Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kernel/head.S
54335 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Copyright (C) 2012 Regents of the University of California
4
*/
5
6
#include <asm/asm-offsets.h>
7
#include <asm/asm.h>
8
#include <linux/init.h>
9
#include <linux/linkage.h>
10
#include <asm/thread_info.h>
11
#include <asm/page.h>
12
#include <asm/pgtable.h>
13
#include <asm/csr.h>
14
#include <asm/hwcap.h>
15
#include <asm/image.h>
16
#include <asm/scs.h>
17
#include <asm/xip_fixup.h>
18
#include <asm/usercfi.h>
19
#include "efi-header.S"
20
21
__HEAD
22
SYM_CODE_START(_start)
23
/*
24
* Image header expected by Linux boot-loaders. The image header data
25
* structure is described in asm/image.h.
26
* Do not modify it without modifying the structure and all bootloaders
27
* that expects this header format!!
28
*/
29
#ifdef CONFIG_EFI
30
/*
31
* This instruction decodes to "MZ" ASCII required by UEFI.
32
*/
33
c.li s4,-13
34
j _start_kernel
35
#else
36
/* jump to start kernel */
37
j _start_kernel
38
/* reserved */
39
.word 0
40
#endif
41
.balign 8
42
#ifdef CONFIG_RISCV_M_MODE
43
/* Image load offset (0MB) from start of RAM for M-mode */
44
.dword 0
45
#else
46
#if __riscv_xlen == 64
47
/* Image load offset(2MB) from start of RAM */
48
.dword 0x200000
49
#else
50
/* Image load offset(4MB) from start of RAM */
51
.dword 0x400000
52
#endif
53
#endif
54
/* Effective size of kernel image */
55
.dword _end - _start
56
.dword __HEAD_FLAGS
57
.word RISCV_HEADER_VERSION
58
.word 0
59
.dword 0
60
.ascii RISCV_IMAGE_MAGIC
61
.balign 4
62
.ascii RISCV_IMAGE_MAGIC2
63
#ifdef CONFIG_EFI
64
.word pe_head_start - _start
65
pe_head_start:
66
67
__EFI_PE_HEADER
68
#else
69
.word 0
70
#endif
71
72
.align 2
73
#ifdef CONFIG_MMU
74
.global relocate_enable_mmu
75
relocate_enable_mmu:
76
/* Relocate return address */
77
la a1, kernel_map
78
XIP_FIXUP_OFFSET a1
79
REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
80
la a2, _start
81
sub a1, a1, a2
82
add ra, ra, a1
83
84
/* Point stvec to virtual address of intruction after satp write */
85
la a2, 1f
86
add a2, a2, a1
87
csrw CSR_TVEC, a2
88
89
/* Compute satp for kernel page tables, but don't load it yet */
90
srl a2, a0, PAGE_SHIFT
91
la a1, satp_mode
92
XIP_FIXUP_OFFSET a1
93
REG_L a1, 0(a1)
94
or a2, a2, a1
95
96
/*
97
* Load trampoline page directory, which will cause us to trap to
98
* stvec if VA != PA, or simply fall through if VA == PA. We need a
99
* full fence here because setup_vm() just wrote these PTEs and we need
100
* to ensure the new translations are in use.
101
*/
102
la a0, trampoline_pg_dir
103
XIP_FIXUP_OFFSET a0
104
srl a0, a0, PAGE_SHIFT
105
or a0, a0, a1
106
sfence.vma
107
csrw CSR_SATP, a0
108
.align 2
109
1:
110
/* Set trap vector to spin forever to help debug */
111
la a0, .Lsecondary_park
112
csrw CSR_TVEC, a0
113
114
/* Reload the global pointer */
115
load_global_pointer
116
117
/*
118
* Switch to kernel page tables. A full fence is necessary in order to
119
* avoid using the trampoline translations, which are only correct for
120
* the first superpage. Fetching the fence is guaranteed to work
121
* because that first superpage is translated the same way.
122
*/
123
csrw CSR_SATP, a2
124
sfence.vma
125
126
ret
127
#endif /* CONFIG_MMU */
128
#ifdef CONFIG_SMP
129
.global secondary_start_sbi
130
secondary_start_sbi:
131
/* Mask all interrupts */
132
csrw CSR_IE, zero
133
csrw CSR_IP, zero
134
135
#ifndef CONFIG_RISCV_M_MODE
136
/* Enable time CSR */
137
li t0, 0x2
138
csrw CSR_SCOUNTEREN, t0
139
#endif
140
141
/* Load the global pointer */
142
load_global_pointer
143
144
/*
145
* Disable FPU & VECTOR to detect illegal usage of
146
* floating point or vector in kernel space
147
*/
148
li t0, SR_FS_VS
149
csrc CSR_STATUS, t0
150
151
/* Set trap vector to spin forever to help debug */
152
la a3, .Lsecondary_park
153
csrw CSR_TVEC, a3
154
155
/* a0 contains the hartid & a1 contains boot data */
156
li a2, SBI_HART_BOOT_TASK_PTR_OFFSET
157
XIP_FIXUP_OFFSET a2
158
add a2, a2, a1
159
REG_L tp, (a2)
160
li a3, SBI_HART_BOOT_STACK_PTR_OFFSET
161
XIP_FIXUP_OFFSET a3
162
add a3, a3, a1
163
REG_L sp, (a3)
164
165
.Lsecondary_start_common:
166
167
#ifdef CONFIG_MMU
168
/* Enable virtual memory and relocate to virtual address */
169
la a0, swapper_pg_dir
170
XIP_FIXUP_OFFSET a0
171
call relocate_enable_mmu
172
#endif
173
call .Lsetup_trap_vector
174
#if defined(CONFIG_RISCV_SBI) && defined(CONFIG_RISCV_USER_CFI)
175
li a7, SBI_EXT_FWFT
176
li a6, SBI_EXT_FWFT_SET
177
li a0, SBI_FWFT_SHADOW_STACK
178
li a1, 1 /* enable supervisor to access shadow stack access */
179
li a2, SBI_FWFT_SET_FLAG_LOCK
180
ecall
181
beqz a0, 1f
182
la a1, riscv_nousercfi
183
li a0, CMDLINE_DISABLE_RISCV_USERCFI_BCFI
184
REG_S a0, (a1)
185
1:
186
#endif
187
scs_load_current
188
call smp_callin
189
#endif /* CONFIG_SMP */
190
191
.align 2
192
.Lsecondary_park:
193
/*
194
* Park this hart if we:
195
* - have too many harts on CONFIG_RISCV_BOOT_SPINWAIT
196
* - receive an early trap, before setup_trap_vector finished
197
* - fail in smp_callin(), as a successful one wouldn't return
198
*/
199
wfi
200
j .Lsecondary_park
201
202
.align 2
203
.Lsetup_trap_vector:
204
/* Set trap vector to exception handler */
205
la a0, handle_exception
206
csrw CSR_TVEC, a0
207
208
/*
209
* Set sup0 scratch register to 0, indicating to exception vector that
210
* we are presently executing in kernel.
211
*/
212
csrw CSR_SCRATCH, zero
213
ret
214
215
SYM_CODE_END(_start)
216
217
SYM_CODE_START(_start_kernel)
218
/* Mask all interrupts */
219
csrw CSR_IE, zero
220
csrw CSR_IP, zero
221
222
#ifdef CONFIG_RISCV_M_MODE
223
/* flush the instruction cache */
224
fence.i
225
226
/* Reset all registers except ra, a0, a1 */
227
call reset_regs
228
229
/*
230
* Setup a PMP to permit access to all of memory. Some machines may
231
* not implement PMPs, so we set up a quick trap handler to just skip
232
* touching the PMPs on any trap.
233
*/
234
la a0, .Lpmp_done
235
csrw CSR_TVEC, a0
236
237
li a0, -1
238
csrw CSR_PMPADDR0, a0
239
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
240
csrw CSR_PMPCFG0, a0
241
.align 2
242
.Lpmp_done:
243
244
/*
245
* The hartid in a0 is expected later on, and we have no firmware
246
* to hand it to us.
247
*/
248
csrr a0, CSR_MHARTID
249
#else
250
/* Enable time CSR */
251
li t0, 0x2
252
csrw CSR_SCOUNTEREN, t0
253
#endif /* CONFIG_RISCV_M_MODE */
254
255
/* Load the global pointer */
256
load_global_pointer
257
258
/*
259
* Disable FPU & VECTOR to detect illegal usage of
260
* floating point or vector in kernel space
261
*/
262
li t0, SR_FS_VS
263
csrc CSR_STATUS, t0
264
265
#ifdef CONFIG_RISCV_BOOT_SPINWAIT
266
li t0, CONFIG_NR_CPUS
267
blt a0, t0, .Lgood_cores
268
tail .Lsecondary_park
269
.Lgood_cores:
270
271
/* The lottery system is only required for spinwait booting method */
272
#ifndef CONFIG_XIP_KERNEL
273
/* Pick one hart to run the main boot sequence */
274
la a3, hart_lottery
275
li a2, 1
276
amoadd.w a3, a2, (a3)
277
bnez a3, .Lsecondary_start
278
279
#else
280
/* hart_lottery in flash contains a magic number */
281
la a3, hart_lottery
282
mv a2, a3
283
XIP_FIXUP_OFFSET a2
284
XIP_FIXUP_FLASH_OFFSET a3
285
lw t1, (a3)
286
amoswap.w t0, t1, (a2)
287
/* first time here if hart_lottery in RAM is not set */
288
beq t0, t1, .Lsecondary_start
289
290
#endif /* CONFIG_XIP */
291
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
292
293
#ifdef CONFIG_XIP_KERNEL
294
la sp, _end + THREAD_SIZE
295
XIP_FIXUP_OFFSET sp
296
mv s0, a0
297
mv s1, a1
298
call __copy_data
299
300
/* Restore a0 & a1 copy */
301
mv a0, s0
302
mv a1, s1
303
#endif
304
305
#ifndef CONFIG_XIP_KERNEL
306
/* Clear BSS for flat non-ELF images */
307
la a3, __bss_start
308
la a4, __bss_stop
309
ble a4, a3, .Lclear_bss_done
310
.Lclear_bss:
311
REG_S zero, (a3)
312
add a3, a3, RISCV_SZPTR
313
blt a3, a4, .Lclear_bss
314
.Lclear_bss_done:
315
#endif
316
la a2, boot_cpu_hartid
317
XIP_FIXUP_OFFSET a2
318
REG_S a0, (a2)
319
320
/* Initialize page tables and relocate to virtual addresses */
321
la tp, init_task
322
la sp, init_thread_union + THREAD_SIZE
323
XIP_FIXUP_OFFSET sp
324
addi sp, sp, -PT_SIZE_ON_STACK
325
scs_load_init_stack
326
#ifdef CONFIG_BUILTIN_DTB
327
la a0, __dtb_start
328
XIP_FIXUP_OFFSET a0
329
#else
330
mv a0, a1
331
#endif /* CONFIG_BUILTIN_DTB */
332
/* Set trap vector to spin forever to help debug */
333
la a3, .Lsecondary_park
334
csrw CSR_TVEC, a3
335
call setup_vm
336
#ifdef CONFIG_MMU
337
la a0, early_pg_dir
338
XIP_FIXUP_OFFSET a0
339
call relocate_enable_mmu
340
#endif /* CONFIG_MMU */
341
342
call .Lsetup_trap_vector
343
/* Restore C environment */
344
la tp, init_task
345
la sp, init_thread_union + THREAD_SIZE
346
addi sp, sp, -PT_SIZE_ON_STACK
347
#if defined(CONFIG_RISCV_SBI) && defined(CONFIG_RISCV_USER_CFI)
348
li a7, SBI_EXT_FWFT
349
li a6, SBI_EXT_FWFT_SET
350
li a0, SBI_FWFT_SHADOW_STACK
351
li a1, 1 /* enable supervisor to access shadow stack access */
352
li a2, SBI_FWFT_SET_FLAG_LOCK
353
ecall
354
beqz a0, 1f
355
la a1, riscv_nousercfi
356
li a0, CMDLINE_DISABLE_RISCV_USERCFI_BCFI
357
REG_S a0, (a1)
358
1:
359
#endif
360
scs_load_current
361
362
#ifdef CONFIG_KASAN
363
call kasan_early_init
364
#endif
365
/* Start the kernel */
366
call soc_early_init
367
tail start_kernel
368
369
#ifdef CONFIG_RISCV_BOOT_SPINWAIT
370
.Lsecondary_start:
371
/* Set trap vector to spin forever to help debug */
372
la a3, .Lsecondary_park
373
csrw CSR_TVEC, a3
374
375
slli a3, a0, LGREG
376
la a1, __cpu_spinwait_stack_pointer
377
XIP_FIXUP_OFFSET a1
378
la a2, __cpu_spinwait_task_pointer
379
XIP_FIXUP_OFFSET a2
380
add a1, a3, a1
381
add a2, a3, a2
382
383
/*
384
* This hart didn't win the lottery, so we wait for the winning hart to
385
* get far enough along the boot process that it should continue.
386
*/
387
.Lwait_for_cpu_up:
388
/* FIXME: We should WFI to save some energy here. */
389
REG_L sp, (a1)
390
REG_L tp, (a2)
391
beqz sp, .Lwait_for_cpu_up
392
beqz tp, .Lwait_for_cpu_up
393
fence
394
395
tail .Lsecondary_start_common
396
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
397
398
SYM_CODE_END(_start_kernel)
399
400
#ifdef CONFIG_RISCV_M_MODE
401
SYM_CODE_START_LOCAL(reset_regs)
402
li sp, 0
403
li gp, 0
404
li tp, 0
405
li t0, 0
406
li t1, 0
407
li t2, 0
408
li s0, 0
409
li s1, 0
410
li a2, 0
411
li a3, 0
412
li a4, 0
413
li a5, 0
414
li a6, 0
415
li a7, 0
416
li s2, 0
417
li s3, 0
418
li s4, 0
419
li s5, 0
420
li s6, 0
421
li s7, 0
422
li s8, 0
423
li s9, 0
424
li s10, 0
425
li s11, 0
426
li t3, 0
427
li t4, 0
428
li t5, 0
429
li t6, 0
430
csrw CSR_SCRATCH, 0
431
432
#ifdef CONFIG_FPU
433
csrr t0, CSR_MISA
434
andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
435
beqz t0, .Lreset_regs_done_fpu
436
437
li t1, SR_FS
438
csrs CSR_STATUS, t1
439
fmv.s.x f0, zero
440
fmv.s.x f1, zero
441
fmv.s.x f2, zero
442
fmv.s.x f3, zero
443
fmv.s.x f4, zero
444
fmv.s.x f5, zero
445
fmv.s.x f6, zero
446
fmv.s.x f7, zero
447
fmv.s.x f8, zero
448
fmv.s.x f9, zero
449
fmv.s.x f10, zero
450
fmv.s.x f11, zero
451
fmv.s.x f12, zero
452
fmv.s.x f13, zero
453
fmv.s.x f14, zero
454
fmv.s.x f15, zero
455
fmv.s.x f16, zero
456
fmv.s.x f17, zero
457
fmv.s.x f18, zero
458
fmv.s.x f19, zero
459
fmv.s.x f20, zero
460
fmv.s.x f21, zero
461
fmv.s.x f22, zero
462
fmv.s.x f23, zero
463
fmv.s.x f24, zero
464
fmv.s.x f25, zero
465
fmv.s.x f26, zero
466
fmv.s.x f27, zero
467
fmv.s.x f28, zero
468
fmv.s.x f29, zero
469
fmv.s.x f30, zero
470
fmv.s.x f31, zero
471
csrw fcsr, 0
472
/* note that the caller must clear SR_FS */
473
.Lreset_regs_done_fpu:
474
#endif /* CONFIG_FPU */
475
476
#ifdef CONFIG_RISCV_ISA_V
477
csrr t0, CSR_MISA
478
li t1, COMPAT_HWCAP_ISA_V
479
and t0, t0, t1
480
beqz t0, .Lreset_regs_done_vector
481
482
/*
483
* Clear vector registers and reset vcsr
484
* VLMAX has a defined value, VLEN is a constant,
485
* and this form of vsetvli is defined to set vl to VLMAX.
486
*/
487
li t1, SR_VS
488
csrs CSR_STATUS, t1
489
csrs CSR_VCSR, x0
490
vsetvli t1, x0, e8, m8, ta, ma
491
vmv.v.i v0, 0
492
vmv.v.i v8, 0
493
vmv.v.i v16, 0
494
vmv.v.i v24, 0
495
/* note that the caller must clear SR_VS */
496
.Lreset_regs_done_vector:
497
#endif /* CONFIG_RISCV_ISA_V */
498
ret
499
SYM_CODE_END(reset_regs)
500
#endif /* CONFIG_RISCV_M_MODE */
501
502