/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Based on arch/arm/mm/proc.S3*4* Copyright (C) 2001 Deep Blue Solutions Ltd.5* Copyright (C) 2012 ARM Ltd.6* Author: Catalin Marinas <[email protected]>7*/89#include <linux/init.h>10#include <linux/linkage.h>11#include <linux/pgtable.h>12#include <linux/cfi_types.h>13#include <asm/assembler.h>14#include <asm/asm-offsets.h>15#include <asm/asm_pointer_auth.h>16#include <asm/hwcap.h>17#include <asm/kernel-pgtable.h>18#include <asm/pgtable-hwdef.h>19#include <asm/cpufeature.h>20#include <asm/alternative.h>21#include <asm/smp.h>22#include <asm/sysreg.h>2324#ifdef CONFIG_ARM64_64K_PAGES25#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K26#elif defined(CONFIG_ARM64_16K_PAGES)27#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K28#else /* CONFIG_ARM64_4K_PAGES */29#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K30#endif3132#ifdef CONFIG_RANDOMIZE_BASE33#define TCR_KASLR_FLAGS TCR_NFD134#else35#define TCR_KASLR_FLAGS 036#endif3738/* PTWs cacheable, inner/outer WBWA */39#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA4041#ifdef CONFIG_KASAN_SW_TAGS42#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID143#else44#define TCR_KASAN_SW_FLAGS 045#endif4647#ifdef CONFIG_KASAN_HW_TAGS48#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID149#elif defined(CONFIG_ARM64_MTE)50/*51* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on52* TBI being enabled at EL1.53*/54#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID155#else56#define TCR_MTE_FLAGS 057#endif5859/*60* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and61* changed during mte_cpu_setup to Normal Tagged if the system supports MTE.62*/63#define MAIR_EL1_SET \64(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \65MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \66MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \67MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \68MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))6970#ifdef CONFIG_CPU_PM71/**72* cpu_do_suspend - save CPU registers context73*74* x0: virtual address of context pointer75*76* This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.77*/78SYM_FUNC_START(cpu_do_suspend)79mrs x2, tpidr_el080mrs x3, tpidrro_el081mrs x4, contextidr_el182mrs x5, osdlr_el183mrs x6, cpacr_el184mrs x7, tcr_el185mrs x8, vbar_el186mrs x9, mdscr_el187mrs x10, oslsr_el188mrs x11, sctlr_el189get_this_cpu_offset x1290mrs x13, sp_el091stp x2, x3, [x0]92stp x4, x5, [x0, #16]93stp x6, x7, [x0, #32]94stp x8, x9, [x0, #48]95stp x10, x11, [x0, #64]96stp x12, x13, [x0, #80]97/*98* Save x18 as it may be used as a platform register, e.g. by shadow99* call stack.100*/101str x18, [x0, #96]102ret103SYM_FUNC_END(cpu_do_suspend)104105/**106* cpu_do_resume - restore CPU register context107*108* x0: Address of context pointer109*/110SYM_FUNC_START(cpu_do_resume)111ldp x2, x3, [x0]112ldp x4, x5, [x0, #16]113ldp x6, x8, [x0, #32]114ldp x9, x10, [x0, #48]115ldp x11, x12, [x0, #64]116ldp x13, x14, [x0, #80]117/*118* Restore x18, as it may be used as a platform register, and clear119* the buffer to minimize the risk of exposure when used for shadow120* call stack.121*/122ldr x18, [x0, #96]123str xzr, [x0, #96]124msr tpidr_el0, x2125msr tpidrro_el0, x3126msr contextidr_el1, x4127msr cpacr_el1, x6128129/* Don't change t0sz here, mask those bits when restoring */130mrs x7, tcr_el1131bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH132133msr tcr_el1, x8134msr vbar_el1, x9135msr mdscr_el1, x10136137msr sctlr_el1, x12138set_this_cpu_offset x13139msr sp_el0, x14140/*141* Restore oslsr_el1 by writing oslar_el1142*/143msr osdlr_el1, x5144ubfx x11, x11, #1, #1145msr oslar_el1, x11146reset_pmuserenr_el0 x0 // Disable PMU access from EL0147reset_amuserenr_el0 x0 // Disable AMU access from EL0148149alternative_if ARM64_HAS_RAS_EXTN150msr_s SYS_DISR_EL1, xzr151alternative_else_nop_endif152153ptrauth_keys_install_kernel_nosync x14, x1, x2, x3154isb155ret156SYM_FUNC_END(cpu_do_resume)157#endif158159.pushsection ".idmap.text", "a"160161.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2162adrp \tmp1, reserved_pg_dir163phys_to_ttbr \tmp2, \tmp1164offset_ttbr1 \tmp2, \tmp1165msr ttbr1_el1, \tmp2166isb167tlbi vmalle1168dsb nsh169isb170.endm171172/*173* void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)174*175* This is the low-level counterpart to cpu_replace_ttbr1, and should not be176* called by anything else. It can only be executed from a TTBR0 mapping.177*/178SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)179__idmap_cpu_set_reserved_ttbr1 x1, x3180181offset_ttbr1 x0, x3182msr ttbr1_el1, x0183isb184185ret186SYM_FUNC_END(idmap_cpu_replace_ttbr1)187SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)188.popsection189190#ifdef CONFIG_UNMAP_KERNEL_AT_EL0191192#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \193PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE)194195.pushsection ".idmap.text", "a"196197.macro pte_to_phys, phys, pte198and \phys, \pte, #PTE_ADDR_LOW199#ifdef CONFIG_ARM64_PA_BITS_52200and \pte, \pte, #PTE_ADDR_HIGH201orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT202#endif203.endm204205.macro kpti_mk_tbl_ng, type, num_entries206add end_\type\()p, cur_\type\()p, #\num_entries * 8207.Ldo_\type:208ldr \type, [cur_\type\()p], #8 // Load the entry and advance209tbz \type, #0, .Lnext_\type // Skip invalid and210tbnz \type, #11, .Lnext_\type // non-global entries211orr \type, \type, #PTE_NG // Same bit for blocks and pages212str \type, [cur_\type\()p, #-8] // Update the entry213.ifnc \type, pte214tbnz \type, #1, .Lderef_\type215.endif216.Lnext_\type:217cmp cur_\type\()p, end_\type\()p218b.ne .Ldo_\type219.endm220221/*222* Dereference the current table entry and map it into the temporary223* fixmap slot associated with the current level.224*/225.macro kpti_map_pgtbl, type, level226str xzr, [temp_pte, #8 * (\level + 2)] // break before make227dsb nshst228add pte, temp_pte, #PAGE_SIZE * (\level + 2)229lsr pte, pte, #12230tlbi vaae1, pte231dsb nsh232isb233234phys_to_pte pte, cur_\type\()p235add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2)236orr pte, pte, pte_flags237str pte, [temp_pte, #8 * (\level + 2)]238dsb nshst239.endm240241/*242* void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,243* unsigned long temp_pte_va)244*245* Called exactly once from stop_machine context by each CPU found during boot.246*/247SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)248cpu .req w0249temp_pte .req x0250num_cpus .req w1251pte_flags .req x1252temp_pgd_phys .req x2253swapper_ttb .req x3254flag_ptr .req x4255cur_pgdp .req x5256end_pgdp .req x6257pgd .req x7258cur_pudp .req x8259end_pudp .req x9260cur_pmdp .req x11261end_pmdp .req x12262cur_ptep .req x14263end_ptep .req x15264pte .req x16265valid .req x17266cur_p4dp .req x19267end_p4dp .req x20268269mov x5, x3 // preserve temp_pte arg270mrs swapper_ttb, ttbr1_el1271adr_l flag_ptr, idmap_kpti_bbml2_flag272273cbnz cpu, __idmap_kpti_secondary274275#if CONFIG_PGTABLE_LEVELS > 4276stp x29, x30, [sp, #-32]!277mov x29, sp278stp x19, x20, [sp, #16]279#endif280281/* We're the boot CPU. Wait for the others to catch up */282sevl2831: wfe284ldaxr w17, [flag_ptr]285eor w17, w17, num_cpus286cbnz w17, 1b287288/* Switch to the temporary page tables on this CPU only */289__idmap_cpu_set_reserved_ttbr1 x8, x9290offset_ttbr1 temp_pgd_phys, x8291msr ttbr1_el1, temp_pgd_phys292isb293294mov temp_pte, x5295mov_q pte_flags, KPTI_NG_PTE_FLAGS296297/* Everybody is enjoying the idmap, so we can rewrite swapper. */298299#ifdef CONFIG_ARM64_LPA2300/*301* If LPA2 support is configured, but 52-bit virtual addressing is not302* enabled at runtime, we will fall back to one level of paging less,303* and so we have to walk swapper_pg_dir as if we dereferenced its304* address from a PGD level entry, and terminate the PGD level loop305* right after.306*/307adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level308mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop309alternative_if_not ARM64_HAS_VA52310b .Lderef_pgd // skip to the next level311alternative_else_nop_endif312/*313* LPA2 based 52-bit virtual addressing requires 52-bit physical314* addressing to be enabled as well. In this case, the shareability315* bits are repurposed as physical address bits, and should not be316* set in pte_flags.317*/318bic pte_flags, pte_flags, #PTE_SHARED319#endif320321/* PGD */322adrp cur_pgdp, swapper_pg_dir323kpti_map_pgtbl pgd, -1324kpti_mk_tbl_ng pgd, PTRS_PER_PGD325326/* Ensure all the updated entries are visible to secondary CPUs */327dsb ishst328329/* We're done: fire up swapper_pg_dir again */330__idmap_cpu_set_reserved_ttbr1 x8, x9331msr ttbr1_el1, swapper_ttb332isb333334/* Set the flag to zero to indicate that we're all done */335str wzr, [flag_ptr]336#if CONFIG_PGTABLE_LEVELS > 4337ldp x19, x20, [sp, #16]338ldp x29, x30, [sp], #32339#endif340ret341342.Lderef_pgd:343/* P4D */344.if CONFIG_PGTABLE_LEVELS > 4345p4d .req x30346pte_to_phys cur_p4dp, pgd347kpti_map_pgtbl p4d, 0348kpti_mk_tbl_ng p4d, PTRS_PER_P4D349b .Lnext_pgd350.else /* CONFIG_PGTABLE_LEVELS <= 4 */351p4d .req pgd352.set .Lnext_p4d, .Lnext_pgd353.endif354355.Lderef_p4d:356/* PUD */357.if CONFIG_PGTABLE_LEVELS > 3358pud .req x10359pte_to_phys cur_pudp, p4d360kpti_map_pgtbl pud, 1361kpti_mk_tbl_ng pud, PTRS_PER_PUD362b .Lnext_p4d363.else /* CONFIG_PGTABLE_LEVELS <= 3 */364pud .req pgd365.set .Lnext_pud, .Lnext_pgd366.endif367368.Lderef_pud:369/* PMD */370.if CONFIG_PGTABLE_LEVELS > 2371pmd .req x13372pte_to_phys cur_pmdp, pud373kpti_map_pgtbl pmd, 2374kpti_mk_tbl_ng pmd, PTRS_PER_PMD375b .Lnext_pud376.else /* CONFIG_PGTABLE_LEVELS <= 2 */377pmd .req pgd378.set .Lnext_pmd, .Lnext_pgd379.endif380381.Lderef_pmd:382/* PTE */383pte_to_phys cur_ptep, pmd384kpti_map_pgtbl pte, 3385kpti_mk_tbl_ng pte, PTRS_PER_PTE386b .Lnext_pmd387388.unreq cpu389.unreq temp_pte390.unreq num_cpus391.unreq pte_flags392.unreq temp_pgd_phys393.unreq cur_pgdp394.unreq end_pgdp395.unreq pgd396.unreq cur_pudp397.unreq end_pudp398.unreq pud399.unreq cur_pmdp400.unreq end_pmdp401.unreq pmd402.unreq cur_ptep403.unreq end_ptep404.unreq pte405.unreq valid406.unreq cur_p4dp407.unreq end_p4dp408.unreq p4d409410/* Secondary CPUs end up here */411__idmap_kpti_secondary:412/* Uninstall swapper before surgery begins */413__idmap_cpu_set_reserved_ttbr1 x16, x17414b scondary_cpu_wait415416.unreq swapper_ttb417.unreq flag_ptr418SYM_FUNC_END(idmap_kpti_install_ng_mappings)419.popsection420#endif421422.pushsection ".idmap.text", "a"423SYM_TYPED_FUNC_START(wait_linear_map_split_to_ptes)424/* Must be same registers as in idmap_kpti_install_ng_mappings */425swapper_ttb .req x3426flag_ptr .req x4427428mrs swapper_ttb, ttbr1_el1429adr_l flag_ptr, idmap_kpti_bbml2_flag430__idmap_cpu_set_reserved_ttbr1 x16, x17431432scondary_cpu_wait:433/* Increment the flag to let the boot CPU we're ready */4341: ldxr w16, [flag_ptr]435add w16, w16, #1436stxr w17, w16, [flag_ptr]437cbnz w17, 1b438439/* Wait for the boot CPU to finish messing around with swapper */440sevl4411: wfe442ldxr w16, [flag_ptr]443cbnz w16, 1b444445/* All done, act like nothing happened */446msr ttbr1_el1, swapper_ttb447isb448ret449450.unreq swapper_ttb451.unreq flag_ptr452SYM_FUNC_END(wait_linear_map_split_to_ptes)453.popsection454455/*456* __cpu_setup457*458* Initialise the processor for turning the MMU on.459*460* Output:461* Return in x0 the value of the SCTLR_EL1 register.462*/463.pushsection ".idmap.text", "a"464SYM_FUNC_START(__cpu_setup)465tlbi vmalle1 // Invalidate local TLB466dsb nsh467468msr cpacr_el1, xzr // Reset cpacr_el1469mov x1, MDSCR_EL1_TDCC // Reset mdscr_el1 and disable470msr mdscr_el1, x1 // access to the DCC from EL0471reset_pmuserenr_el0 x1 // Disable PMU access from EL0472reset_amuserenr_el0 x1 // Disable AMU access from EL0473474/*475* Default values for VMSA control registers. These will be adjusted476* below depending on detected CPU features.477*/478mair .req x17479tcr .req x16480tcr2 .req x15481mov_q mair, MAIR_EL1_SET482mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \483TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \484TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS485mov tcr2, xzr486487tcr_clear_errata_bits tcr, x9, x5488489#ifdef CONFIG_ARM64_VA_BITS_52490mov x9, #64 - VA_BITS491alternative_if ARM64_HAS_VA52492tcr_set_t1sz tcr, x9493#ifdef CONFIG_ARM64_LPA2494orr tcr, tcr, #TCR_DS495#endif496alternative_else_nop_endif497#endif498499/*500* Set the IPS bits in TCR_EL1.501*/502tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6503#ifdef CONFIG_ARM64_HW_AFDBM504/*505* Enable hardware update of the Access Flags bit.506* Hardware dirty bit management is enabled later,507* via capabilities.508*/509mrs x9, ID_AA64MMFR1_EL1510ubfx x9, x9, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, #4511cbz x9, 1f512orr tcr, tcr, #TCR_HA // hardware Access flag update513#ifdef CONFIG_ARM64_HAFT514cmp x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT515b.lt 1f516orr tcr2, tcr2, TCR2_EL1_HAFT517#endif /* CONFIG_ARM64_HAFT */5181:519#endif /* CONFIG_ARM64_HW_AFDBM */520msr mair_el1, mair521msr tcr_el1, tcr522523mrs_s x1, SYS_ID_AA64MMFR3_EL1524ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4525cbz x1, .Lskip_indirection526527mov_q x0, PIE_E0_ASM528msr REG_PIRE0_EL1, x0529mov_q x0, PIE_E1_ASM530msr REG_PIR_EL1, x0531532orr tcr2, tcr2, TCR2_EL1_PIE533534.Lskip_indirection:535536mrs_s x1, SYS_ID_AA64MMFR3_EL1537ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4538cbz x1, 1f539msr REG_TCR2_EL1, tcr25401:541542/*543* Prepare SCTLR544*/545mov_q x0, INIT_SCTLR_EL1_MMU_ON546ret // return to head.S547548.unreq mair549.unreq tcr550.unreq tcr2551SYM_FUNC_END(__cpu_setup)552553554