/* SPDX-License-Identifier: GPL-2.0 */12#include <linux/export.h>3#include <linux/stringify.h>4#include <linux/linkage.h>5#include <asm/dwarf2.h>6#include <asm/cpufeatures.h>7#include <asm/alternative.h>8#include <asm/asm-offsets.h>9#include <asm/nospec-branch.h>10#include <asm/unwind_hints.h>11#include <asm/percpu.h>12#include <asm/frame.h>13#include <asm/nops.h>1415.section .text..__x86.indirect_thunk1617.macro POLINE reg18ANNOTATE_INTRA_FUNCTION_CALL19call .Ldo_rop_\@20int321.Ldo_rop_\@:22mov %\reg, (%_ASM_SP)23UNWIND_HINT_FUNC24.endm2526.macro RETPOLINE reg27POLINE \reg28RET29.endm3031.macro THUNK reg3233.align RETPOLINE_THUNK_SIZE34SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)35UNWIND_HINT_UNDEFINED36ANNOTATE_NOENDBR3738ALTERNATIVE_2 __stringify(RETPOLINE \reg), \39__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \40__stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)41SYM_PIC_ALIAS(__x86_indirect_thunk_\reg)4243.endm4445/*46* Despite being an assembler file we can't just use .irp here47* because __KSYM_DEPS__ only uses the C preprocessor and would48* only see one instance of "__x86_indirect_thunk_\reg" rather49* than one per register with the correct names. So we do it50* the simple and nasty way...51*52* Worse, you can only have a single EXPORT_SYMBOL per line,53* and CPP can't insert newlines, so we have to repeat everything54* at least twice.55*/5657#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)5859.align RETPOLINE_THUNK_SIZE60SYM_CODE_START(__x86_indirect_thunk_array)6162#define GEN(reg) THUNK reg63#include <asm/GEN-for-each-reg.h>64#undef GEN6566.align RETPOLINE_THUNK_SIZE67SYM_CODE_END(__x86_indirect_thunk_array)6869#define GEN(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)70#include <asm/GEN-for-each-reg.h>71#undef GEN7273#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING7475.macro CALL_THUNK reg76.align RETPOLINE_THUNK_SIZE7778SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL)79UNWIND_HINT_UNDEFINED80ANNOTATE_NOENDBR8182CALL_DEPTH_ACCOUNT83POLINE \reg84ANNOTATE_UNRET_SAFE85ret86int387.endm8889.align RETPOLINE_THUNK_SIZE90SYM_CODE_START(__x86_indirect_call_thunk_array)9192#define GEN(reg) CALL_THUNK reg93#include <asm/GEN-for-each-reg.h>94#undef GEN9596.align RETPOLINE_THUNK_SIZE97SYM_CODE_END(__x86_indirect_call_thunk_array)9899#define GEN(reg) __EXPORT_THUNK(__x86_indirect_call_thunk_ ## reg)100#include <asm/GEN-for-each-reg.h>101#undef GEN102103.macro JUMP_THUNK reg104.align RETPOLINE_THUNK_SIZE105106SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL)107UNWIND_HINT_UNDEFINED108ANNOTATE_NOENDBR109POLINE \reg110ANNOTATE_UNRET_SAFE111ret112int3113.endm114115.align RETPOLINE_THUNK_SIZE116SYM_CODE_START(__x86_indirect_jump_thunk_array)117118#define GEN(reg) JUMP_THUNK reg119#include <asm/GEN-for-each-reg.h>120#undef GEN121122.align RETPOLINE_THUNK_SIZE123SYM_CODE_END(__x86_indirect_jump_thunk_array)124125#define GEN(reg) __EXPORT_THUNK(__x86_indirect_jump_thunk_ ## reg)126#include <asm/GEN-for-each-reg.h>127#undef GEN128129#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */130131#ifdef CONFIG_MITIGATION_ITS132133.macro ITS_THUNK reg134135/*136* If CFI paranoid is used then the ITS thunk starts with opcodes (0xea; jne 1b)137* that complete the fineibt_paranoid caller sequence.138*/1391: .byte 0xea140SYM_INNER_LABEL(__x86_indirect_paranoid_thunk_\reg, SYM_L_GLOBAL)141UNWIND_HINT_UNDEFINED142ANNOTATE_NOENDBR143jne 1b144SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)145UNWIND_HINT_UNDEFINED146ANNOTATE_NOENDBR147ANNOTATE_RETPOLINE_SAFE148jmp *%\reg149int3150.align 32, 0xcc /* fill to the end of the line */151.skip 32 - (__x86_indirect_its_thunk_\reg - 1b), 0xcc /* skip to the next upper half */152.endm153154/* ITS mitigation requires thunks be aligned to upper half of cacheline */155.align 64, 0xcc156.skip 29, 0xcc157158#define GEN(reg) ITS_THUNK reg159#include <asm/GEN-for-each-reg.h>160#undef GEN161162.align 64, 0xcc163SYM_FUNC_ALIAS(__x86_indirect_its_thunk_array, __x86_indirect_its_thunk_rax)164SYM_CODE_END(__x86_indirect_its_thunk_array)165166#endif /* CONFIG_MITIGATION_ITS */167168#ifdef CONFIG_MITIGATION_RETHUNK169170/*171* Be careful here: that label cannot really be removed because in172* some configurations and toolchains, the JMP __x86_return_thunk the173* compiler issues is either a short one or the compiler doesn't use174* relocations for same-section JMPs and that breaks the returns175* detection logic in apply_returns() and in objtool.176*/177.section .text..__x86.return_thunk178179#ifdef CONFIG_MITIGATION_SRSO180181/*182* srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at183* special addresses:184*185* - srso_alias_untrain_ret() is 2M aligned186* - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14187* and 20 in its virtual address are set (while those bits in the188* srso_alias_untrain_ret() function are cleared).189*190* This guarantees that those two addresses will alias in the branch191* target buffer of Zen3/4 generations, leading to any potential192* poisoned entries at that BTB slot to get evicted.193*194* As a result, srso_alias_safe_ret() becomes a safe return.195*/196.pushsection .text..__x86.rethunk_untrain197SYM_CODE_START_NOALIGN(srso_alias_untrain_ret)198UNWIND_HINT_FUNC199ANNOTATE_NOENDBR200ASM_NOP2201lfence202jmp srso_alias_return_thunk203SYM_FUNC_END(srso_alias_untrain_ret)204__EXPORT_THUNK(srso_alias_untrain_ret)205.popsection206207.pushsection .text..__x86.rethunk_safe208SYM_CODE_START_NOALIGN(srso_alias_safe_ret)209lea 8(%_ASM_SP), %_ASM_SP210UNWIND_HINT_FUNC211ANNOTATE_UNRET_SAFE212ret213int3214SYM_FUNC_END(srso_alias_safe_ret)215216SYM_CODE_START_NOALIGN(srso_alias_return_thunk)217UNWIND_HINT_FUNC218ANNOTATE_NOENDBR219call srso_alias_safe_ret220ud2221SYM_CODE_END(srso_alias_return_thunk)222.popsection223224/*225* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()226* above. On kernel entry, srso_untrain_ret() is executed which is a227*228* movabs $0xccccc30824648d48,%rax229*230* and when the return thunk executes the inner label srso_safe_ret()231* later, it is a stack manipulation and a RET which is mispredicted and232* thus a "safe" one to use.233*/234.align 64235.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc236SYM_CODE_START_LOCAL_NOALIGN(srso_untrain_ret)237ANNOTATE_NOENDBR238.byte 0x48, 0xb8239240/*241* This forces the function return instruction to speculate into a trap242* (UD2 in srso_return_thunk() below). This RET will then mispredict243* and execution will continue at the return site read from the top of244* the stack.245*/246SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)247lea 8(%_ASM_SP), %_ASM_SP248ret249int3250int3251/* end of movabs */252lfence253call srso_safe_ret254ud2255SYM_CODE_END(srso_safe_ret)256SYM_FUNC_END(srso_untrain_ret)257258SYM_CODE_START(srso_return_thunk)259UNWIND_HINT_FUNC260ANNOTATE_NOENDBR261call srso_safe_ret262ud2263SYM_CODE_END(srso_return_thunk)264265#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"266#else /* !CONFIG_MITIGATION_SRSO */267/* Dummy for the alternative in CALL_UNTRAIN_RET. */268SYM_CODE_START(srso_alias_untrain_ret)269ANNOTATE_UNRET_SAFE270ANNOTATE_NOENDBR271ret272int3273SYM_FUNC_END(srso_alias_untrain_ret)274__EXPORT_THUNK(srso_alias_untrain_ret)275#define JMP_SRSO_UNTRAIN_RET "ud2"276#endif /* CONFIG_MITIGATION_SRSO */277278#ifdef CONFIG_MITIGATION_UNRET_ENTRY279280/*281* Some generic notes on the untraining sequences:282*283* They are interchangeable when it comes to flushing potentially wrong284* RET predictions from the BTB.285*286* The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the287* Retbleed sequence because the return sequence done there288* (srso_safe_ret()) is longer and the return sequence must fully nest289* (end before) the untraining sequence. Therefore, the untraining290* sequence must fully overlap the return sequence.291*292* Regarding alignment - the instructions which need to be untrained,293* must all start at a cacheline boundary for Zen1/2 generations. That294* is, instruction sequences starting at srso_safe_ret() and295* the respective instruction sequences at retbleed_return_thunk()296* must start at a cacheline boundary.297*/298299/*300* Safety details here pertain to the AMD Zen{1,2} microarchitecture:301* 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for302* alignment within the BTB.303* 2) The instruction at retbleed_untrain_ret must contain, and not304* end with, the 0xc3 byte of the RET.305* 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread306* from re-poisioning the BTB prediction.307*/308.align 64309.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc310SYM_CODE_START_LOCAL_NOALIGN(retbleed_untrain_ret)311ANNOTATE_NOENDBR312/*313* As executed from retbleed_untrain_ret, this is:314*315* TEST $0xcc, %bl316* LFENCE317* JMP retbleed_return_thunk318*319* Executing the TEST instruction has a side effect of evicting any BTB320* prediction (potentially attacker controlled) attached to the RET, as321* retbleed_return_thunk + 1 isn't an instruction boundary at the moment.322*/323.byte 0xf6324325/*326* As executed from retbleed_return_thunk, this is a plain RET.327*328* As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.329*330* We subsequently jump backwards and architecturally execute the RET.331* This creates a correct BTB prediction (type=ret), but in the332* meantime we suffer Straight Line Speculation (because the type was333* no branch) which is halted by the INT3.334*335* With SMT enabled and STIBP active, a sibling thread cannot poison336* RET's prediction to a type of its choice, but can evict the337* prediction due to competitive sharing. If the prediction is338* evicted, retbleed_return_thunk will suffer Straight Line Speculation339* which will be contained safely by the INT3.340*/341SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)342ret343int3344SYM_CODE_END(retbleed_return_thunk)345346/*347* Ensure the TEST decoding / BTB invalidation is complete.348*/349lfence350351/*352* Jump back and execute the RET in the middle of the TEST instruction.353* INT3 is for SLS protection.354*/355jmp retbleed_return_thunk356int3357SYM_FUNC_END(retbleed_untrain_ret)358359#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"360#else /* !CONFIG_MITIGATION_UNRET_ENTRY */361#define JMP_RETBLEED_UNTRAIN_RET "ud2"362#endif /* CONFIG_MITIGATION_UNRET_ENTRY */363364#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)365366SYM_FUNC_START(entry_untrain_ret)367ANNOTATE_NOENDBR368ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO369SYM_FUNC_END(entry_untrain_ret)370__EXPORT_THUNK(entry_untrain_ret)371372#endif /* CONFIG_MITIGATION_UNRET_ENTRY || CONFIG_MITIGATION_SRSO */373374#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING375376.align 64377SYM_FUNC_START(call_depth_return_thunk)378ANNOTATE_NOENDBR379/*380* Keep the hotpath in a 16byte I-fetch for the non-debug381* case.382*/383CALL_THUNKS_DEBUG_INC_RETS384shlq $5, PER_CPU_VAR(__x86_call_depth)385jz 1f386ANNOTATE_UNRET_SAFE387ret388int33891:390CALL_THUNKS_DEBUG_INC_STUFFS391.rept 16392ANNOTATE_INTRA_FUNCTION_CALL393call 2f394int33952:396.endr397add $(8*16), %rsp398399CREDIT_CALL_DEPTH400401ANNOTATE_UNRET_SAFE402ret403int3404SYM_FUNC_END(call_depth_return_thunk)405406#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */407408#ifdef CONFIG_MITIGATION_ITS409410.align 64, 0xcc411.skip 32, 0xcc412SYM_CODE_START(its_return_thunk)413UNWIND_HINT_FUNC414ANNOTATE_NOENDBR415ANNOTATE_UNRET_SAFE416ret417int3418SYM_CODE_END(its_return_thunk)419EXPORT_SYMBOL(its_return_thunk)420421#endif /* CONFIG_MITIGATION_ITS */422423/*424* This function name is magical and is used by -mfunction-return=thunk-extern425* for the compiler to generate JMPs to it.426*427* This code is only used during kernel boot or module init. All428* 'JMP __x86_return_thunk' sites are changed to something else by429* apply_returns().430*431* The ALTERNATIVE below adds a really loud warning to catch the case432* where the insufficient default return thunk ends up getting used for433* whatever reason like miscompilation or failure of434* objtool/alternatives/etc to patch all the return sites.435*/436SYM_CODE_START(__x86_return_thunk)437UNWIND_HINT_FUNC438ANNOTATE_NOENDBR439#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || \440defined(CONFIG_MITIGATION_SRSO) || \441defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)442ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \443"jmp warn_thunk_thunk", X86_FEATURE_ALWAYS444#else445ANNOTATE_UNRET_SAFE446ret447#endif448int3449SYM_CODE_END(__x86_return_thunk)450SYM_PIC_ALIAS(__x86_return_thunk)451EXPORT_SYMBOL(__x86_return_thunk)452453#endif /* CONFIG_MITIGATION_RETHUNK */454455456