Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/mm/kasan_init.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2023 Loongson Technology Corporation Limited
4
*/
5
#define pr_fmt(fmt) "kasan: " fmt
6
#include <linux/kasan.h>
7
#include <linux/memblock.h>
8
#include <linux/sched/task.h>
9
10
#include <asm/tlbflush.h>
11
#include <asm/pgalloc.h>
12
#include <asm-generic/sections.h>
13
14
static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
15
16
#ifdef __PAGETABLE_P4D_FOLDED
17
#define __pgd_none(early, pgd) (0)
18
#else
19
#define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
20
(__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
21
#endif
22
23
#ifdef __PAGETABLE_PUD_FOLDED
24
#define __p4d_none(early, p4d) (0)
25
#else
26
#define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
27
(__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
28
#endif
29
30
#ifdef __PAGETABLE_PMD_FOLDED
31
#define __pud_none(early, pud) (0)
32
#else
33
#define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
34
(__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
35
#endif
36
37
#define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
38
(__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
39
40
#define __pte_none(early, pte) (early ? pte_none(pte) : \
41
((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
42
43
void *kasan_mem_to_shadow(const void *addr)
44
{
45
if (!kasan_enabled()) {
46
return (void *)(kasan_early_shadow_page);
47
} else {
48
unsigned long maddr = (unsigned long)addr;
49
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
50
unsigned long offset = 0;
51
52
if (maddr >= FIXADDR_START)
53
return (void *)(kasan_early_shadow_page);
54
55
maddr &= XRANGE_SHADOW_MASK;
56
switch (xrange) {
57
case XKPRANGE_CC_SEG:
58
offset = XKPRANGE_CC_SHADOW_OFFSET;
59
break;
60
case XKPRANGE_UC_SEG:
61
offset = XKPRANGE_UC_SHADOW_OFFSET;
62
break;
63
case XKPRANGE_WC_SEG:
64
offset = XKPRANGE_WC_SHADOW_OFFSET;
65
break;
66
case XKVRANGE_VC_SEG:
67
offset = XKVRANGE_VC_SHADOW_OFFSET;
68
break;
69
default:
70
WARN_ON(1);
71
return NULL;
72
}
73
74
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
75
}
76
}
77
78
const void *kasan_shadow_to_mem(const void *shadow_addr)
79
{
80
unsigned long addr = (unsigned long)shadow_addr;
81
82
if (unlikely(addr > KASAN_SHADOW_END) ||
83
unlikely(addr < KASAN_SHADOW_START)) {
84
WARN_ON(1);
85
return NULL;
86
}
87
88
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
89
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
90
else if (addr >= XKPRANGE_WC_SHADOW_OFFSET)
91
return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START);
92
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
93
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
94
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
95
return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
96
else {
97
WARN_ON(1);
98
return NULL;
99
}
100
}
101
102
/*
103
* Alloc memory for shadow memory page table.
104
*/
105
static phys_addr_t __init kasan_alloc_zeroed_page(int node)
106
{
107
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
108
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
109
if (!p)
110
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
111
__func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
112
113
return __pa(p);
114
}
115
116
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
117
{
118
if (__pmd_none(early, pmdp_get(pmdp))) {
119
phys_addr_t pte_phys = early ?
120
__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
121
if (!early)
122
memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
123
pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
124
}
125
126
return pte_offset_kernel(pmdp, addr);
127
}
128
129
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
130
{
131
if (__pud_none(early, pudp_get(pudp))) {
132
phys_addr_t pmd_phys = early ?
133
__pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
134
if (!early)
135
memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
136
pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
137
}
138
139
return pmd_offset(pudp, addr);
140
}
141
142
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
143
{
144
if (__p4d_none(early, p4dp_get(p4dp))) {
145
phys_addr_t pud_phys = early ?
146
__pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
147
if (!early)
148
memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
149
p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
150
}
151
152
return pud_offset(p4dp, addr);
153
}
154
155
static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
156
{
157
if (__pgd_none(early, pgdp_get(pgdp))) {
158
phys_addr_t p4d_phys = early ?
159
__pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
160
if (!early)
161
memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
162
pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
163
}
164
165
return p4d_offset(pgdp, addr);
166
}
167
168
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
169
unsigned long end, int node, bool early)
170
{
171
unsigned long next;
172
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
173
174
do {
175
phys_addr_t page_phys = early ?
176
__pa_symbol(kasan_early_shadow_page)
177
: kasan_alloc_zeroed_page(node);
178
next = addr + PAGE_SIZE;
179
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
180
} while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
181
}
182
183
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
184
unsigned long end, int node, bool early)
185
{
186
unsigned long next;
187
pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
188
189
do {
190
next = pmd_addr_end(addr, end);
191
kasan_pte_populate(pmdp, addr, next, node, early);
192
} while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp)));
193
}
194
195
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
196
unsigned long end, int node, bool early)
197
{
198
unsigned long next;
199
pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
200
201
do {
202
next = pud_addr_end(addr, end);
203
kasan_pmd_populate(pudp, addr, next, node, early);
204
} while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
205
}
206
207
static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
208
unsigned long end, int node, bool early)
209
{
210
unsigned long next;
211
p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
212
213
do {
214
next = p4d_addr_end(addr, end);
215
kasan_pud_populate(p4dp, addr, next, node, early);
216
} while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
217
}
218
219
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
220
int node, bool early)
221
{
222
unsigned long next;
223
pgd_t *pgdp;
224
225
pgdp = pgd_offset_k(addr);
226
227
do {
228
next = pgd_addr_end(addr, end);
229
kasan_p4d_populate(pgdp, addr, next, node, early);
230
} while (pgdp++, addr = next, addr != end);
231
232
}
233
234
/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
235
static void __init kasan_map_populate(unsigned long start, unsigned long end,
236
int node)
237
{
238
kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
239
}
240
241
asmlinkage void __init kasan_early_init(void)
242
{
243
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
244
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
245
}
246
247
static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
248
{
249
WRITE_ONCE(*pgdp, pgdval);
250
}
251
252
static void __init clear_pgds(unsigned long start, unsigned long end)
253
{
254
/*
255
* Remove references to kasan page tables from
256
* swapper_pg_dir. pgd_clear() can't be used
257
* here because it's nop on 2,3-level pagetable setups
258
*/
259
for (; start < end; start = pgd_addr_end(start, end))
260
kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
261
}
262
263
void __init kasan_init(void)
264
{
265
u64 i;
266
phys_addr_t pa_start, pa_end;
267
268
/*
269
* If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
270
* overflow UINTPTR_MAX and then looks like a user space address.
271
* For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
272
* large for Loongson-2K series whose cpu_vabits = 39.
273
*/
274
if (KASAN_SHADOW_END < vm_map_base) {
275
pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
276
return;
277
}
278
279
/*
280
* PGD was populated as invalid_pmd_table or invalid_pud_table
281
* in pagetable_init() which depends on how many levels of page
282
* table you are using, but we had to clean the gpd of kasan
283
* shadow memory, as the pgd value is none-zero.
284
* The assertion pgd_none is going to be false and the formal populate
285
* afterwards is not going to create any new pgd at all.
286
*/
287
memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
288
csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
289
local_flush_tlb_all();
290
291
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
292
293
/* Maps everything to a single page of zeroes */
294
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
295
296
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
297
kasan_mem_to_shadow((void *)KFENCE_AREA_END));
298
299
/* Enable KASAN here before kasan_mem_to_shadow(). */
300
kasan_init_generic();
301
302
/* Populate the linear mapping */
303
for_each_mem_range(i, &pa_start, &pa_end) {
304
void *start = (void *)phys_to_virt(pa_start);
305
void *end = (void *)phys_to_virt(pa_end);
306
307
if (start >= end)
308
break;
309
310
kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
311
(unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
312
}
313
314
/* Populate modules mapping */
315
kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
316
(unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
317
/*
318
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
319
* should make sure that it maps the zero page read-only.
320
*/
321
for (i = 0; i < PTRS_PER_PTE; i++)
322
set_pte(&kasan_early_shadow_pte[i],
323
pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
324
325
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
326
csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
327
local_flush_tlb_all();
328
329
/* At this point kasan is fully initialized. Enable error messages */
330
init_task.kasan_depth = 0;
331
}
332
333