Path: blob/master/arch/x86/kernel/cpu/microcode/internal.h
29271 views
/* SPDX-License-Identifier: GPL-2.0 */1#ifndef _X86_MICROCODE_INTERNAL_H2#define _X86_MICROCODE_INTERNAL_H34#include <linux/earlycpio.h>5#include <linux/initrd.h>67#include <asm/cpu.h>8#include <asm/microcode.h>910struct device;1112enum ucode_state {13UCODE_OK = 0,14UCODE_NEW,15UCODE_NEW_SAFE,16UCODE_UPDATED,17UCODE_NFOUND,18UCODE_ERROR,19UCODE_TIMEOUT,20UCODE_OFFLINE,21};2223struct microcode_ops {24enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);25void (*microcode_fini_cpu)(int cpu);2627/*28* The generic 'microcode_core' part guarantees that the callbacks29* below run on a target CPU when they are being called.30* See also the "Synchronization" section in microcode_core.c.31*/32enum ucode_state (*apply_microcode)(int cpu);33int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);34void (*finalize_late_load)(int result);35unsigned int nmi_safe : 1,36use_nmi : 1;37};3839struct early_load_data {40u32 old_rev;41u32 new_rev;42};4344extern struct early_load_data early_data;45extern struct ucode_cpu_info ucode_cpu_info[];46extern u32 microcode_rev[NR_CPUS];47extern u32 base_rev;4849struct cpio_data find_microcode_in_initrd(const char *path);5051#define MAX_UCODE_COUNT 1285253#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))54#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')55#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')56#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')57#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')58#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')59#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')6061#define CPUID_IS(a, b, c, ebx, ecx, edx) \62(!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c))))6364/*65* In early loading microcode phase on BSP, boot_cpu_data is not set up yet.66* x86_cpuid_vendor() gets vendor id for BSP.67*68* In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify69* coding, we still use x86_cpuid_vendor() to get vendor id for AP.70*71* x86_cpuid_vendor() gets vendor information directly from CPUID.72*/73static inline int x86_cpuid_vendor(void)74{75u32 eax = 0x00000000;76u32 ebx, ecx = 0, edx;7778native_cpuid(&eax, &ebx, &ecx, &edx);7980if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))81return X86_VENDOR_INTEL;8283if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))84return X86_VENDOR_AMD;8586return X86_VENDOR_UNKNOWN;87}8889static inline unsigned int x86_cpuid_family(void)90{91u32 eax = 0x00000001;92u32 ebx, ecx = 0, edx;9394native_cpuid(&eax, &ebx, &ecx, &edx);9596return x86_family(eax);97}9899extern bool force_minrev;100101#ifdef CONFIG_CPU_SUP_AMD102void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);103void load_ucode_amd_ap(unsigned int family);104void reload_ucode_amd(unsigned int cpu);105struct microcode_ops *init_amd_microcode(void);106void exit_amd_microcode(void);107#else /* CONFIG_CPU_SUP_AMD */108static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }109static inline void load_ucode_amd_ap(unsigned int family) { }110static inline void reload_ucode_amd(unsigned int cpu) { }111static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }112static inline void exit_amd_microcode(void) { }113#endif /* !CONFIG_CPU_SUP_AMD */114115#ifdef CONFIG_CPU_SUP_INTEL116void load_ucode_intel_bsp(struct early_load_data *ed);117void load_ucode_intel_ap(void);118void reload_ucode_intel(void);119struct microcode_ops *init_intel_microcode(void);120#else /* CONFIG_CPU_SUP_INTEL */121static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }122static inline void load_ucode_intel_ap(void) { }123static inline void reload_ucode_intel(void) { }124static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }125#endif /* !CONFIG_CPU_SUP_INTEL */126127#define ucode_dbg(fmt, ...) \128({ \129if (IS_ENABLED(CONFIG_MICROCODE_DBG)) \130pr_info(fmt, ##__VA_ARGS__); \131})132133#endif /* _X86_MICROCODE_INTERNAL_H */134135136