/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Copyright (C) 2012 Regents of the University of California3* Copyright (C) 2017 SiFive4*/56#include <linux/init.h>7#include <linux/linkage.h>89#include <asm/alternative-macros.h>10#include <asm/asm.h>11#include <asm/csr.h>12#include <asm/scs.h>13#include <asm/unistd.h>14#include <asm/page.h>15#include <asm/thread_info.h>16#include <asm/asm-offsets.h>17#include <asm/errata_list.h>18#include <linux/sizes.h>1920.section .irqentry.text, "ax"2122.macro new_vmalloc_check23REG_S a0, TASK_TI_A0(tp)24csrr a0, CSR_CAUSE25/* Exclude IRQs */26blt a0, zero, .Lnew_vmalloc_restore_context_a02728REG_S a1, TASK_TI_A1(tp)29/* Only check new_vmalloc if we are in page/protection fault */30li a1, EXC_LOAD_PAGE_FAULT31beq a0, a1, .Lnew_vmalloc_kernel_address32li a1, EXC_STORE_PAGE_FAULT33beq a0, a1, .Lnew_vmalloc_kernel_address34li a1, EXC_INST_PAGE_FAULT35bne a0, a1, .Lnew_vmalloc_restore_context_a13637.Lnew_vmalloc_kernel_address:38/* Is it a kernel address? */39csrr a0, CSR_TVAL40bge a0, zero, .Lnew_vmalloc_restore_context_a14142/* Check if a new vmalloc mapping appeared that could explain the trap */43REG_S a2, TASK_TI_A2(tp)44/*45* Computes:46* a0 = &new_vmalloc[BIT_WORD(cpu)]47* a1 = BIT_MASK(cpu)48*/49lw a2, TASK_TI_CPU(tp)50/*51* Compute the new_vmalloc element position:52* (cpu / 64) * 8 = (cpu >> 6) << 353*/54srli a1, a2, 655slli a1, a1, 356la a0, new_vmalloc57add a0, a0, a158/*59* Compute the bit position in the new_vmalloc element:60* bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 661* = cpu - ((cpu >> 6) << 3) << 362*/63slli a1, a1, 364sub a1, a2, a165/* Compute the "get mask": 1 << bit_pos */66li a2, 167sll a1, a2, a16869/* Check the value of new_vmalloc for this cpu */70REG_L a2, 0(a0)71and a2, a2, a172beq a2, zero, .Lnew_vmalloc_restore_context7374/* Atomically reset the current cpu bit in new_vmalloc */75amoxor.d a0, a1, (a0)7677/* Only emit a sfence.vma if the uarch caches invalid entries */78ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)7980REG_L a0, TASK_TI_A0(tp)81REG_L a1, TASK_TI_A1(tp)82REG_L a2, TASK_TI_A2(tp)83csrw CSR_SCRATCH, x084sret8586.Lnew_vmalloc_restore_context:87REG_L a2, TASK_TI_A2(tp)88.Lnew_vmalloc_restore_context_a1:89REG_L a1, TASK_TI_A1(tp)90.Lnew_vmalloc_restore_context_a0:91REG_L a0, TASK_TI_A0(tp)92.endm939495SYM_CODE_START(handle_exception)96/*97* If coming from userspace, preserve the user thread pointer and load98* the kernel thread pointer. If we came from the kernel, the scratch99* register will contain 0, and we should continue on the current TP.100*/101csrrw tp, CSR_SCRATCH, tp102bnez tp, .Lsave_context103104.Lrestore_kernel_tpsp:105csrr tp, CSR_SCRATCH106107#ifdef CONFIG_64BIT108/*109* The RISC-V kernel does not eagerly emit a sfence.vma after each110* new vmalloc mapping, which may result in exceptions:111* - if the uarch caches invalid entries, the new mapping would not be112* observed by the page table walker and an invalidation is needed.113* - if the uarch does not cache invalid entries, a reordered access114* could "miss" the new mapping and traps: in that case, we only need115* to retry the access, no sfence.vma is required.116*/117new_vmalloc_check118#endif119120REG_S sp, TASK_TI_KERNEL_SP(tp)121122#ifdef CONFIG_VMAP_STACK123addi sp, sp, -(PT_SIZE_ON_STACK)124srli sp, sp, THREAD_SHIFT125andi sp, sp, 0x1126bnez sp, handle_kernel_stack_overflow127REG_L sp, TASK_TI_KERNEL_SP(tp)128#endif129130.Lsave_context:131REG_S sp, TASK_TI_USER_SP(tp)132REG_L sp, TASK_TI_KERNEL_SP(tp)133addi sp, sp, -(PT_SIZE_ON_STACK)134REG_S x1, PT_RA(sp)135REG_S x3, PT_GP(sp)136REG_S x5, PT_T0(sp)137save_from_x6_to_x31138139/*140* Disable user-mode memory access as it should only be set in the141* actual user copy routines.142*143* Disable the FPU/Vector to detect illegal usage of floating point144* or vector in kernel space.145*/146li t0, SR_SUM | SR_FS_VS147148REG_L s0, TASK_TI_USER_SP(tp)149csrrc s1, CSR_STATUS, t0150csrr s2, CSR_EPC151csrr s3, CSR_TVAL152csrr s4, CSR_CAUSE153csrr s5, CSR_SCRATCH154REG_S s0, PT_SP(sp)155REG_S s1, PT_STATUS(sp)156REG_S s2, PT_EPC(sp)157REG_S s3, PT_BADADDR(sp)158REG_S s4, PT_CAUSE(sp)159REG_S s5, PT_TP(sp)160161/*162* Set the scratch register to 0, so that if a recursive exception163* occurs, the exception vector knows it came from the kernel164*/165csrw CSR_SCRATCH, x0166167/* Load the global pointer */168load_global_pointer169170/* Load the kernel shadow call stack pointer if coming from userspace */171scs_load_current_if_task_changed s5172173#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE174move a0, sp175call riscv_v_context_nesting_start176#endif177move a0, sp /* pt_regs */178179/*180* MSB of cause differentiates between181* interrupts and exceptions182*/183bge s4, zero, 1f184185/* Handle interrupts */186call do_irq187j ret_from_exception1881:189/* Handle other exceptions */190slli t0, s4, RISCV_LGPTR191la t1, excp_vect_table192la t2, excp_vect_table_end193add t0, t1, t0194/* Check if exception code lies within bounds */195bgeu t0, t2, 3f196REG_L t1, 0(t0)1972: jalr t1198j ret_from_exception1993:200201la t1, do_trap_unknown202j 2b203SYM_CODE_END(handle_exception)204ASM_NOKPROBE(handle_exception)205206/*207* The ret_from_exception must be called with interrupt disabled. Here is the208* caller list:209* - handle_exception210* - ret_from_fork211*/212SYM_CODE_START_NOALIGN(ret_from_exception)213REG_L s0, PT_STATUS(sp)214#ifdef CONFIG_RISCV_M_MODE215/* the MPP value is too large to be used as an immediate arg for addi */216li t0, SR_MPP217and s0, s0, t0218#else219andi s0, s0, SR_SPP220#endif221bnez s0, 1f222223#ifdef CONFIG_KSTACK_ERASE224call stackleak_erase_on_task_stack225#endif226227/* Save unwound kernel stack pointer in thread_info */228addi s0, sp, PT_SIZE_ON_STACK229REG_S s0, TASK_TI_KERNEL_SP(tp)230231/* Save the kernel shadow call stack pointer */232scs_save_current233234/*235* Save TP into the scratch register , so we can find the kernel data236* structures again.237*/238csrw CSR_SCRATCH, tp2391:240#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE241move a0, sp242call riscv_v_context_nesting_end243#endif244REG_L a0, PT_STATUS(sp)245/*246* The current load reservation is effectively part of the processor's247* state, in the sense that load reservations cannot be shared between248* different hart contexts. We can't actually save and restore a load249* reservation, so instead here we clear any existing reservation --250* it's always legal for implementations to clear load reservations at251* any point (as long as the forward progress guarantee is kept, but252* we'll ignore that here).253*254* Dangling load reservations can be the result of taking a trap in the255* middle of an LR/SC sequence, but can also be the result of a taken256* forward branch around an SC -- which is how we implement CAS. As a257* result we need to clear reservations between the last CAS and the258* jump back to the new context. While it is unlikely the store259* completes, implementations are allowed to expand reservations to be260* arbitrarily large.261*/262REG_L a2, PT_EPC(sp)263REG_SC x0, a2, PT_EPC(sp)264265csrw CSR_STATUS, a0266csrw CSR_EPC, a2267268REG_L x1, PT_RA(sp)269REG_L x3, PT_GP(sp)270REG_L x4, PT_TP(sp)271REG_L x5, PT_T0(sp)272restore_from_x6_to_x31273274REG_L x2, PT_SP(sp)275276#ifdef CONFIG_RISCV_M_MODE277mret278#else279sret280#endif281SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)282SYM_CODE_END(ret_from_exception)283ASM_NOKPROBE(ret_from_exception)284285#ifdef CONFIG_VMAP_STACK286SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)287/* we reach here from kernel context, sscratch must be 0 */288csrrw x31, CSR_SCRATCH, x31289asm_per_cpu sp, overflow_stack, x31290li x31, OVERFLOW_STACK_SIZE291add sp, sp, x31292/* zero out x31 again and restore x31 */293xor x31, x31, x31294csrrw x31, CSR_SCRATCH, x31295296addi sp, sp, -(PT_SIZE_ON_STACK)297298//save context to overflow stack299REG_S x1, PT_RA(sp)300REG_S x3, PT_GP(sp)301REG_S x5, PT_T0(sp)302save_from_x6_to_x31303304REG_L s0, TASK_TI_KERNEL_SP(tp)305csrr s1, CSR_STATUS306csrr s2, CSR_EPC307csrr s3, CSR_TVAL308csrr s4, CSR_CAUSE309csrr s5, CSR_SCRATCH310REG_S s0, PT_SP(sp)311REG_S s1, PT_STATUS(sp)312REG_S s2, PT_EPC(sp)313REG_S s3, PT_BADADDR(sp)314REG_S s4, PT_CAUSE(sp)315REG_S s5, PT_TP(sp)316move a0, sp317tail handle_bad_stack318SYM_CODE_END(handle_kernel_stack_overflow)319ASM_NOKPROBE(handle_kernel_stack_overflow)320#endif321322SYM_CODE_START(ret_from_fork_kernel_asm)323call schedule_tail324move a0, s1 /* fn_arg */325move a1, s0 /* fn */326move a2, sp /* pt_regs */327call ret_from_fork_kernel328j ret_from_exception329SYM_CODE_END(ret_from_fork_kernel_asm)330331SYM_CODE_START(ret_from_fork_user_asm)332call schedule_tail333move a0, sp /* pt_regs */334call ret_from_fork_user335j ret_from_exception336SYM_CODE_END(ret_from_fork_user_asm)337338#ifdef CONFIG_IRQ_STACKS339/*340* void call_on_irq_stack(struct pt_regs *regs,341* void (*func)(struct pt_regs *));342*343* Calls func(regs) using the per-CPU IRQ stack.344*/345SYM_FUNC_START(call_on_irq_stack)346/* Create a frame record to save ra and s0 (fp) */347addi sp, sp, -STACKFRAME_SIZE_ON_STACK348REG_S ra, STACKFRAME_RA(sp)349REG_S s0, STACKFRAME_FP(sp)350addi s0, sp, STACKFRAME_SIZE_ON_STACK351352/* Switch to the per-CPU shadow call stack */353scs_save_current354scs_load_irq_stack t0355356/* Switch to the per-CPU IRQ stack and call the handler */357load_per_cpu t0, irq_stack_ptr, t1358li t1, IRQ_STACK_SIZE359add sp, t0, t1360jalr a1361362/* Switch back to the thread shadow call stack */363scs_load_current364365/* Switch back to the thread stack and restore ra and s0 */366addi sp, s0, -STACKFRAME_SIZE_ON_STACK367REG_L ra, STACKFRAME_RA(sp)368REG_L s0, STACKFRAME_FP(sp)369addi sp, sp, STACKFRAME_SIZE_ON_STACK370371ret372SYM_FUNC_END(call_on_irq_stack)373#endif /* CONFIG_IRQ_STACKS */374375/*376* Integer register context switch377* The callee-saved registers must be saved and restored.378*379* a0: previous task_struct (must be preserved across the switch)380* a1: next task_struct381*382* The value of a0 and a1 must be preserved by this function, as that's how383* arguments are passed to schedule_tail.384*/385SYM_FUNC_START(__switch_to)386/* Save context into prev->thread */387li a4, TASK_THREAD_RA388add a3, a0, a4389add a4, a1, a4390REG_S ra, TASK_THREAD_RA_RA(a3)391REG_S sp, TASK_THREAD_SP_RA(a3)392REG_S s0, TASK_THREAD_S0_RA(a3)393REG_S s1, TASK_THREAD_S1_RA(a3)394REG_S s2, TASK_THREAD_S2_RA(a3)395REG_S s3, TASK_THREAD_S3_RA(a3)396REG_S s4, TASK_THREAD_S4_RA(a3)397REG_S s5, TASK_THREAD_S5_RA(a3)398REG_S s6, TASK_THREAD_S6_RA(a3)399REG_S s7, TASK_THREAD_S7_RA(a3)400REG_S s8, TASK_THREAD_S8_RA(a3)401REG_S s9, TASK_THREAD_S9_RA(a3)402REG_S s10, TASK_THREAD_S10_RA(a3)403REG_S s11, TASK_THREAD_S11_RA(a3)404405/* save the user space access flag */406csrr s0, CSR_STATUS407REG_S s0, TASK_THREAD_SUM_RA(a3)408409/* Save the kernel shadow call stack pointer */410scs_save_current411/* Restore context from next->thread */412REG_L s0, TASK_THREAD_SUM_RA(a4)413li s1, SR_SUM414and s0, s0, s1415csrs CSR_STATUS, s0416REG_L ra, TASK_THREAD_RA_RA(a4)417REG_L sp, TASK_THREAD_SP_RA(a4)418REG_L s0, TASK_THREAD_S0_RA(a4)419REG_L s1, TASK_THREAD_S1_RA(a4)420REG_L s2, TASK_THREAD_S2_RA(a4)421REG_L s3, TASK_THREAD_S3_RA(a4)422REG_L s4, TASK_THREAD_S4_RA(a4)423REG_L s5, TASK_THREAD_S5_RA(a4)424REG_L s6, TASK_THREAD_S6_RA(a4)425REG_L s7, TASK_THREAD_S7_RA(a4)426REG_L s8, TASK_THREAD_S8_RA(a4)427REG_L s9, TASK_THREAD_S9_RA(a4)428REG_L s10, TASK_THREAD_S10_RA(a4)429REG_L s11, TASK_THREAD_S11_RA(a4)430/* The offset of thread_info in task_struct is zero. */431move tp, a1432/* Switch to the next shadow call stack */433scs_load_current434ret435SYM_FUNC_END(__switch_to)436437#ifndef CONFIG_MMU438#define do_page_fault do_trap_unknown439#endif440441.section ".rodata"442.align LGREG443/* Exception vector table */444SYM_DATA_START_LOCAL(excp_vect_table)445RISCV_PTR do_trap_insn_misaligned446ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)447RISCV_PTR do_trap_insn_illegal448RISCV_PTR do_trap_break449RISCV_PTR do_trap_load_misaligned450RISCV_PTR do_trap_load_fault451RISCV_PTR do_trap_store_misaligned452RISCV_PTR do_trap_store_fault453RISCV_PTR do_trap_ecall_u /* system call */454RISCV_PTR do_trap_ecall_s455RISCV_PTR do_trap_unknown456RISCV_PTR do_trap_ecall_m457/* instruciton page fault */458ALT_PAGE_FAULT(RISCV_PTR do_page_fault)459RISCV_PTR do_page_fault /* load page fault */460RISCV_PTR do_trap_unknown461RISCV_PTR do_page_fault /* store page fault */462SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)463464#ifndef CONFIG_MMU465SYM_DATA_START(__user_rt_sigreturn)466li a7, __NR_rt_sigreturn467ecall468SYM_DATA_END(__user_rt_sigreturn)469#endif470471472