Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/cpu/microcode/amd.c
29271 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* AMD CPU Microcode Update Driver for Linux
4
*
5
* This driver allows to upgrade microcode on F10h AMD
6
* CPUs and later.
7
*
8
* Copyright (C) 2008-2011 Advanced Micro Devices Inc.
9
* 2013-2018 Borislav Petkov <[email protected]>
10
*
11
* Author: Peter Oruba <[email protected]>
12
*
13
* Based on work by:
14
* Tigran Aivazian <[email protected]>
15
*
16
* early loader:
17
* Copyright (C) 2013 Advanced Micro Devices, Inc.
18
*
19
* Author: Jacob Shin <[email protected]>
20
* Fixes: Borislav Petkov <[email protected]>
21
*/
22
#define pr_fmt(fmt) "microcode: " fmt
23
24
#include <linux/earlycpio.h>
25
#include <linux/firmware.h>
26
#include <linux/bsearch.h>
27
#include <linux/uaccess.h>
28
#include <linux/vmalloc.h>
29
#include <linux/initrd.h>
30
#include <linux/kernel.h>
31
#include <linux/pci.h>
32
33
#include <crypto/sha2.h>
34
35
#include <asm/microcode.h>
36
#include <asm/processor.h>
37
#include <asm/cmdline.h>
38
#include <asm/setup.h>
39
#include <asm/cpu.h>
40
#include <asm/msr.h>
41
#include <asm/tlb.h>
42
43
#include "internal.h"
44
45
struct ucode_patch {
46
struct list_head plist;
47
void *data;
48
unsigned int size;
49
u32 patch_id;
50
u16 equiv_cpu;
51
};
52
53
static LIST_HEAD(microcode_cache);
54
55
#define UCODE_MAGIC 0x00414d44
56
#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
57
#define UCODE_UCODE_TYPE 0x00000001
58
59
#define SECTION_HDR_SIZE 8
60
#define CONTAINER_HDR_SZ 12
61
62
struct equiv_cpu_entry {
63
u32 installed_cpu;
64
u32 fixed_errata_mask;
65
u32 fixed_errata_compare;
66
u16 equiv_cpu;
67
u16 res;
68
} __packed;
69
70
struct microcode_header_amd {
71
u32 data_code;
72
u32 patch_id;
73
u16 mc_patch_data_id;
74
u8 mc_patch_data_len;
75
u8 init_flag;
76
u32 mc_patch_data_checksum;
77
u32 nb_dev_id;
78
u32 sb_dev_id;
79
u16 processor_rev_id;
80
u8 nb_rev_id;
81
u8 sb_rev_id;
82
u8 bios_api_rev;
83
u8 reserved1[3];
84
u32 match_reg[8];
85
} __packed;
86
87
struct microcode_amd {
88
struct microcode_header_amd hdr;
89
unsigned int mpb[];
90
};
91
92
static struct equiv_cpu_table {
93
unsigned int num_entries;
94
struct equiv_cpu_entry *entry;
95
} equiv_table;
96
97
union zen_patch_rev {
98
struct {
99
__u32 rev : 8,
100
stepping : 4,
101
model : 4,
102
__reserved : 4,
103
ext_model : 4,
104
ext_fam : 8;
105
};
106
__u32 ucode_rev;
107
};
108
109
union cpuid_1_eax {
110
struct {
111
__u32 stepping : 4,
112
model : 4,
113
family : 4,
114
__reserved0 : 4,
115
ext_model : 4,
116
ext_fam : 8,
117
__reserved1 : 4;
118
};
119
__u32 full;
120
};
121
122
/*
123
* This points to the current valid container of microcode patches which we will
124
* save from the initrd/builtin before jettisoning its contents. @mc is the
125
* microcode patch we found to match.
126
*/
127
struct cont_desc {
128
struct microcode_amd *mc;
129
u32 psize;
130
u8 *data;
131
size_t size;
132
};
133
134
/*
135
* Microcode patch container file is prepended to the initrd in cpio
136
* format. See Documentation/arch/x86/microcode.rst
137
*/
138
static const char
139
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
140
141
/*
142
* This is CPUID(1).EAX on the BSP. It is used in two ways:
143
*
144
* 1. To ignore the equivalence table on Zen1 and newer.
145
*
146
* 2. To match which patches to load because the patch revision ID
147
* already contains the f/m/s for which the microcode is destined
148
* for.
149
*/
150
static u32 bsp_cpuid_1_eax __ro_after_init;
151
152
static bool sha_check = true;
153
154
struct patch_digest {
155
u32 patch_id;
156
u8 sha256[SHA256_DIGEST_SIZE];
157
};
158
159
#include "amd_shas.c"
160
161
static int cmp_id(const void *key, const void *elem)
162
{
163
struct patch_digest *pd = (struct patch_digest *)elem;
164
u32 patch_id = *(u32 *)key;
165
166
if (patch_id == pd->patch_id)
167
return 0;
168
else if (patch_id < pd->patch_id)
169
return -1;
170
else
171
return 1;
172
}
173
174
static u32 cpuid_to_ucode_rev(unsigned int val)
175
{
176
union zen_patch_rev p = {};
177
union cpuid_1_eax c;
178
179
c.full = val;
180
181
p.stepping = c.stepping;
182
p.model = c.model;
183
p.ext_model = c.ext_model;
184
p.ext_fam = c.ext_fam;
185
186
return p.ucode_rev;
187
}
188
189
static bool need_sha_check(u32 cur_rev)
190
{
191
if (!cur_rev) {
192
cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
193
pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);
194
}
195
196
switch (cur_rev >> 8) {
197
case 0x80012: return cur_rev <= 0x800126f; break;
198
case 0x80082: return cur_rev <= 0x800820f; break;
199
case 0x83010: return cur_rev <= 0x830107c; break;
200
case 0x86001: return cur_rev <= 0x860010e; break;
201
case 0x86081: return cur_rev <= 0x8608108; break;
202
case 0x87010: return cur_rev <= 0x8701034; break;
203
case 0x8a000: return cur_rev <= 0x8a0000a; break;
204
case 0xa0010: return cur_rev <= 0xa00107a; break;
205
case 0xa0011: return cur_rev <= 0xa0011da; break;
206
case 0xa0012: return cur_rev <= 0xa001243; break;
207
case 0xa0082: return cur_rev <= 0xa00820e; break;
208
case 0xa1011: return cur_rev <= 0xa101153; break;
209
case 0xa1012: return cur_rev <= 0xa10124e; break;
210
case 0xa1081: return cur_rev <= 0xa108109; break;
211
case 0xa2010: return cur_rev <= 0xa20102f; break;
212
case 0xa2012: return cur_rev <= 0xa201212; break;
213
case 0xa4041: return cur_rev <= 0xa404109; break;
214
case 0xa5000: return cur_rev <= 0xa500013; break;
215
case 0xa6012: return cur_rev <= 0xa60120a; break;
216
case 0xa7041: return cur_rev <= 0xa704109; break;
217
case 0xa7052: return cur_rev <= 0xa705208; break;
218
case 0xa7080: return cur_rev <= 0xa708009; break;
219
case 0xa70c0: return cur_rev <= 0xa70C009; break;
220
case 0xaa001: return cur_rev <= 0xaa00116; break;
221
case 0xaa002: return cur_rev <= 0xaa00218; break;
222
case 0xb0021: return cur_rev <= 0xb002146; break;
223
case 0xb1010: return cur_rev <= 0xb101046; break;
224
case 0xb2040: return cur_rev <= 0xb204031; break;
225
case 0xb4040: return cur_rev <= 0xb404031; break;
226
case 0xb6000: return cur_rev <= 0xb600031; break;
227
case 0xb7000: return cur_rev <= 0xb700031; break;
228
default: break;
229
}
230
231
pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
232
pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
233
return true;
234
}
235
236
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
237
{
238
struct patch_digest *pd = NULL;
239
u8 digest[SHA256_DIGEST_SIZE];
240
int i;
241
242
if (x86_family(bsp_cpuid_1_eax) < 0x17)
243
return true;
244
245
if (!need_sha_check(cur_rev))
246
return true;
247
248
if (!sha_check)
249
return true;
250
251
pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
252
if (!pd) {
253
pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
254
return false;
255
}
256
257
sha256(data, len, digest);
258
259
if (memcmp(digest, pd->sha256, sizeof(digest))) {
260
pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
261
262
for (i = 0; i < SHA256_DIGEST_SIZE; i++)
263
pr_cont("0x%x ", digest[i]);
264
pr_info("\n");
265
266
return false;
267
}
268
269
return true;
270
}
271
272
static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
273
{
274
union zen_patch_rev p;
275
union cpuid_1_eax c;
276
277
p.ucode_rev = val;
278
c.full = 0;
279
280
c.stepping = p.stepping;
281
c.model = p.model;
282
c.ext_model = p.ext_model;
283
c.family = 0xf;
284
c.ext_fam = p.ext_fam;
285
286
return c;
287
}
288
289
static u32 get_patch_level(void)
290
{
291
u32 rev, dummy __always_unused;
292
293
if (IS_ENABLED(CONFIG_MICROCODE_DBG)) {
294
int cpu = smp_processor_id();
295
296
if (!microcode_rev[cpu]) {
297
if (!base_rev)
298
base_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
299
300
microcode_rev[cpu] = base_rev;
301
302
ucode_dbg("CPU%d, base_rev: 0x%x\n", cpu, base_rev);
303
}
304
305
return microcode_rev[cpu];
306
}
307
308
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
309
310
return rev;
311
}
312
313
static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
314
{
315
unsigned int i;
316
317
/* Zen and newer do not need an equivalence table. */
318
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
319
return 0;
320
321
if (!et || !et->num_entries)
322
return 0;
323
324
for (i = 0; i < et->num_entries; i++) {
325
struct equiv_cpu_entry *e = &et->entry[i];
326
327
if (sig == e->installed_cpu)
328
return e->equiv_cpu;
329
}
330
return 0;
331
}
332
333
/*
334
* Check whether there is a valid microcode container file at the beginning
335
* of @buf of size @buf_size.
336
*/
337
static bool verify_container(const u8 *buf, size_t buf_size)
338
{
339
u32 cont_magic;
340
341
if (buf_size <= CONTAINER_HDR_SZ) {
342
ucode_dbg("Truncated microcode container header.\n");
343
return false;
344
}
345
346
cont_magic = *(const u32 *)buf;
347
if (cont_magic != UCODE_MAGIC) {
348
ucode_dbg("Invalid magic value (0x%08x).\n", cont_magic);
349
return false;
350
}
351
352
return true;
353
}
354
355
/*
356
* Check whether there is a valid, non-truncated CPU equivalence table at the
357
* beginning of @buf of size @buf_size.
358
*/
359
static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
360
{
361
const u32 *hdr = (const u32 *)buf;
362
u32 cont_type, equiv_tbl_len;
363
364
if (!verify_container(buf, buf_size))
365
return false;
366
367
/* Zen and newer do not need an equivalence table. */
368
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
369
return true;
370
371
cont_type = hdr[1];
372
if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
373
ucode_dbg("Wrong microcode container equivalence table type: %u.\n",
374
cont_type);
375
return false;
376
}
377
378
buf_size -= CONTAINER_HDR_SZ;
379
380
equiv_tbl_len = hdr[2];
381
if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
382
buf_size < equiv_tbl_len) {
383
ucode_dbg("Truncated equivalence table.\n");
384
return false;
385
}
386
387
return true;
388
}
389
390
/*
391
* Check whether there is a valid, non-truncated microcode patch section at the
392
* beginning of @buf of size @buf_size.
393
*
394
* On success, @sh_psize returns the patch size according to the section header,
395
* to the caller.
396
*/
397
static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
398
{
399
u32 p_type, p_size;
400
const u32 *hdr;
401
402
if (buf_size < SECTION_HDR_SIZE) {
403
ucode_dbg("Truncated patch section.\n");
404
return false;
405
}
406
407
hdr = (const u32 *)buf;
408
p_type = hdr[0];
409
p_size = hdr[1];
410
411
if (p_type != UCODE_UCODE_TYPE) {
412
ucode_dbg("Invalid type field (0x%x) in container file section header.\n",
413
p_type);
414
return false;
415
}
416
417
if (p_size < sizeof(struct microcode_header_amd)) {
418
ucode_dbg("Patch of size %u too short.\n", p_size);
419
return false;
420
}
421
422
*sh_psize = p_size;
423
424
return true;
425
}
426
427
/*
428
* Check whether the passed remaining file @buf_size is large enough to contain
429
* a patch of the indicated @sh_psize (and also whether this size does not
430
* exceed the per-family maximum). @sh_psize is the size read from the section
431
* header.
432
*/
433
static bool __verify_patch_size(u32 sh_psize, size_t buf_size)
434
{
435
u8 family = x86_family(bsp_cpuid_1_eax);
436
u32 max_size;
437
438
if (family >= 0x15)
439
goto ret;
440
441
#define F1XH_MPB_MAX_SIZE 2048
442
#define F14H_MPB_MAX_SIZE 1824
443
444
switch (family) {
445
case 0x10 ... 0x12:
446
max_size = F1XH_MPB_MAX_SIZE;
447
break;
448
case 0x14:
449
max_size = F14H_MPB_MAX_SIZE;
450
break;
451
default:
452
WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
453
return false;
454
}
455
456
if (sh_psize > max_size)
457
return false;
458
459
ret:
460
/* Working with the whole buffer so < is ok. */
461
return sh_psize <= buf_size;
462
}
463
464
/*
465
* Verify the patch in @buf.
466
*
467
* Returns:
468
* negative: on error
469
* positive: patch is not for this family, skip it
470
* 0: success
471
*/
472
static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size)
473
{
474
u8 family = x86_family(bsp_cpuid_1_eax);
475
struct microcode_header_amd *mc_hdr;
476
u32 sh_psize;
477
u16 proc_id;
478
u8 patch_fam;
479
480
if (!__verify_patch_section(buf, buf_size, &sh_psize))
481
return -1;
482
483
/*
484
* The section header length is not included in this indicated size
485
* but is present in the leftover file length so we need to subtract
486
* it before passing this value to the function below.
487
*/
488
buf_size -= SECTION_HDR_SIZE;
489
490
/*
491
* Check if the remaining buffer is big enough to contain a patch of
492
* size sh_psize, as the section claims.
493
*/
494
if (buf_size < sh_psize) {
495
ucode_dbg("Patch of size %u truncated.\n", sh_psize);
496
return -1;
497
}
498
499
if (!__verify_patch_size(sh_psize, buf_size)) {
500
ucode_dbg("Per-family patch size mismatch.\n");
501
return -1;
502
}
503
504
*patch_size = sh_psize;
505
506
mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
507
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
508
pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
509
return -1;
510
}
511
512
proc_id = mc_hdr->processor_rev_id;
513
patch_fam = 0xf + (proc_id >> 12);
514
515
ucode_dbg("Patch-ID 0x%08x: family: 0x%x\n", mc_hdr->patch_id, patch_fam);
516
517
if (patch_fam != family)
518
return 1;
519
520
return 0;
521
}
522
523
static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id)
524
{
525
/* Zen and newer do not need an equivalence table. */
526
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
527
return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax;
528
else
529
return eq_id == mc->hdr.processor_rev_id;
530
}
531
532
/*
533
* This scans the ucode blob for the proper container as we can have multiple
534
* containers glued together.
535
*
536
* Returns the amount of bytes consumed while scanning. @desc contains all the
537
* data we're going to use in later stages of the application.
538
*/
539
static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
540
{
541
struct equiv_cpu_table table;
542
size_t orig_size = size;
543
u32 *hdr = (u32 *)ucode;
544
u16 eq_id;
545
u8 *buf;
546
547
if (!verify_equivalence_table(ucode, size))
548
return 0;
549
550
buf = ucode;
551
552
table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
553
table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
554
555
/*
556
* Find the equivalence ID of our CPU in this table. Even if this table
557
* doesn't contain a patch for the CPU, scan through the whole container
558
* so that it can be skipped in case there are other containers appended.
559
*/
560
eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
561
562
buf += hdr[2] + CONTAINER_HDR_SZ;
563
size -= hdr[2] + CONTAINER_HDR_SZ;
564
565
/*
566
* Scan through the rest of the container to find where it ends. We do
567
* some basic sanity-checking too.
568
*/
569
while (size > 0) {
570
struct microcode_amd *mc;
571
u32 patch_size;
572
int ret;
573
574
ret = verify_patch(buf, size, &patch_size);
575
if (ret < 0) {
576
/*
577
* Patch verification failed, skip to the next container, if
578
* there is one. Before exit, check whether that container has
579
* found a patch already. If so, use it.
580
*/
581
goto out;
582
} else if (ret > 0) {
583
goto skip;
584
}
585
586
mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
587
588
ucode_dbg("patch_id: 0x%x\n", mc->hdr.patch_id);
589
590
if (mc_patch_matches(mc, eq_id)) {
591
desc->psize = patch_size;
592
desc->mc = mc;
593
594
ucode_dbg(" match: size: %d\n", patch_size);
595
}
596
597
skip:
598
/* Skip patch section header too: */
599
buf += patch_size + SECTION_HDR_SIZE;
600
size -= patch_size + SECTION_HDR_SIZE;
601
}
602
603
out:
604
/*
605
* If we have found a patch (desc->mc), it means we're looking at the
606
* container which has a patch for this CPU so return 0 to mean, @ucode
607
* already points to the proper container. Otherwise, we return the size
608
* we scanned so that we can advance to the next container in the
609
* buffer.
610
*/
611
if (desc->mc) {
612
desc->data = ucode;
613
desc->size = orig_size - size;
614
615
return 0;
616
}
617
618
return orig_size - size;
619
}
620
621
/*
622
* Scan the ucode blob for the proper container as we can have multiple
623
* containers glued together.
624
*/
625
static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
626
{
627
while (size) {
628
size_t s = parse_container(ucode, size, desc);
629
if (!s)
630
return;
631
632
/* catch wraparound */
633
if (size >= s) {
634
ucode += s;
635
size -= s;
636
} else {
637
return;
638
}
639
}
640
}
641
642
static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
643
unsigned int psize)
644
{
645
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
646
647
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
648
return false;
649
650
native_wrmsrq(MSR_AMD64_PATCH_LOADER, p_addr);
651
652
if (x86_family(bsp_cpuid_1_eax) == 0x17) {
653
unsigned long p_addr_end = p_addr + psize - 1;
654
655
invlpg(p_addr);
656
657
/*
658
* Flush next page too if patch image is crossing a page
659
* boundary.
660
*/
661
if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
662
invlpg(p_addr_end);
663
}
664
665
if (IS_ENABLED(CONFIG_MICROCODE_DBG))
666
microcode_rev[smp_processor_id()] = mc->hdr.patch_id;
667
668
/* verify patch application was successful */
669
*cur_rev = get_patch_level();
670
671
ucode_dbg("updated rev: 0x%x\n", *cur_rev);
672
673
if (*cur_rev != mc->hdr.patch_id)
674
return false;
675
676
return true;
677
}
678
679
static bool get_builtin_microcode(struct cpio_data *cp)
680
{
681
char fw_name[36] = "amd-ucode/microcode_amd.bin";
682
u8 family = x86_family(bsp_cpuid_1_eax);
683
struct firmware fw;
684
685
if (IS_ENABLED(CONFIG_X86_32))
686
return false;
687
688
if (family >= 0x15)
689
snprintf(fw_name, sizeof(fw_name),
690
"amd-ucode/microcode_amd_fam%02hhxh.bin", family);
691
692
if (firmware_request_builtin(&fw, fw_name)) {
693
cp->size = fw.size;
694
cp->data = (void *)fw.data;
695
return true;
696
}
697
698
return false;
699
}
700
701
static bool __init find_blobs_in_containers(struct cpio_data *ret)
702
{
703
struct cpio_data cp;
704
bool found;
705
706
if (!get_builtin_microcode(&cp))
707
cp = find_microcode_in_initrd(ucode_path);
708
709
found = cp.data && cp.size;
710
if (found)
711
*ret = cp;
712
713
return found;
714
}
715
716
/*
717
* Early load occurs before we can vmalloc(). So we look for the microcode
718
* patch container file in initrd, traverse equivalent cpu table, look for a
719
* matching microcode patch, and update, all in initrd memory in place.
720
* When vmalloc() is available for use later -- on 64-bit during first AP load,
721
* and on 32-bit during save_microcode_in_initrd() -- we can call
722
* load_microcode_amd() to save equivalent cpu table and microcode patches in
723
* kernel heap memory.
724
*/
725
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
726
{
727
struct cont_desc desc = { };
728
struct microcode_amd *mc;
729
struct cpio_data cp = { };
730
char buf[4];
731
u32 rev;
732
733
if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
734
if (!strncmp(buf, "off", 3)) {
735
sha_check = false;
736
pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
737
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
738
}
739
}
740
741
bsp_cpuid_1_eax = cpuid_1_eax;
742
743
rev = get_patch_level();
744
ed->old_rev = rev;
745
746
/* Needed in load_microcode_amd() */
747
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
748
749
if (!find_blobs_in_containers(&cp))
750
return;
751
752
scan_containers(cp.data, cp.size, &desc);
753
754
mc = desc.mc;
755
if (!mc)
756
return;
757
758
/*
759
* Allow application of the same revision to pick up SMT-specific
760
* changes even if the revision of the other SMT thread is already
761
* up-to-date.
762
*/
763
if (ed->old_rev > mc->hdr.patch_id)
764
return;
765
766
if (__apply_microcode_amd(mc, &rev, desc.psize))
767
ed->new_rev = rev;
768
}
769
770
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
771
struct ucode_patch *n,
772
bool ignore_stepping)
773
{
774
/* Zen and newer hardcode the f/m/s in the patch ID */
775
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
776
union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
777
union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
778
779
if (ignore_stepping) {
780
p_cid.stepping = 0;
781
n_cid.stepping = 0;
782
}
783
784
return p_cid.full == n_cid.full;
785
} else {
786
return p->equiv_cpu == n->equiv_cpu;
787
}
788
}
789
790
/*
791
* a small, trivial cache of per-family ucode patches
792
*/
793
static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu)
794
{
795
struct ucode_patch *p;
796
struct ucode_patch n;
797
798
n.equiv_cpu = equiv_cpu;
799
n.patch_id = uci->cpu_sig.rev;
800
801
list_for_each_entry(p, &microcode_cache, plist)
802
if (patch_cpus_equivalent(p, &n, false))
803
return p;
804
805
return NULL;
806
}
807
808
static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
809
{
810
/* Zen and newer hardcode the f/m/s in the patch ID */
811
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
812
union zen_patch_rev zp, zn;
813
814
zp.ucode_rev = p->patch_id;
815
zn.ucode_rev = n->patch_id;
816
817
if (zn.stepping != zp.stepping)
818
return -1;
819
820
return zn.rev > zp.rev;
821
} else {
822
return n->patch_id > p->patch_id;
823
}
824
}
825
826
static void update_cache(struct ucode_patch *new_patch)
827
{
828
struct ucode_patch *p;
829
int ret;
830
831
list_for_each_entry(p, &microcode_cache, plist) {
832
if (patch_cpus_equivalent(p, new_patch, true)) {
833
ret = patch_newer(p, new_patch);
834
if (ret < 0)
835
continue;
836
else if (!ret) {
837
/* we already have the latest patch */
838
kfree(new_patch->data);
839
kfree(new_patch);
840
return;
841
}
842
843
list_replace(&p->plist, &new_patch->plist);
844
kfree(p->data);
845
kfree(p);
846
return;
847
}
848
}
849
/* no patch found, add it */
850
list_add_tail(&new_patch->plist, &microcode_cache);
851
}
852
853
static void free_cache(void)
854
{
855
struct ucode_patch *p, *tmp;
856
857
list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
858
__list_del(p->plist.prev, p->plist.next);
859
kfree(p->data);
860
kfree(p);
861
}
862
}
863
864
static struct ucode_patch *find_patch(unsigned int cpu)
865
{
866
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
867
u16 equiv_id = 0;
868
869
uci->cpu_sig.rev = get_patch_level();
870
871
if (x86_family(bsp_cpuid_1_eax) < 0x17) {
872
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
873
if (!equiv_id)
874
return NULL;
875
}
876
877
return cache_find_patch(uci, equiv_id);
878
}
879
880
void reload_ucode_amd(unsigned int cpu)
881
{
882
u32 rev, dummy __always_unused;
883
struct microcode_amd *mc;
884
struct ucode_patch *p;
885
886
p = find_patch(cpu);
887
if (!p)
888
return;
889
890
mc = p->data;
891
892
rev = get_patch_level();
893
if (rev < mc->hdr.patch_id) {
894
if (__apply_microcode_amd(mc, &rev, p->size))
895
pr_info_once("reload revision: 0x%08x\n", rev);
896
}
897
}
898
899
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
900
{
901
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
902
struct ucode_patch *p;
903
904
csig->sig = cpuid_eax(0x00000001);
905
csig->rev = get_patch_level();
906
907
/*
908
* a patch could have been loaded early, set uci->mc so that
909
* mc_bp_resume() can call apply_microcode()
910
*/
911
p = find_patch(cpu);
912
if (p && (p->patch_id == csig->rev))
913
uci->mc = p->data;
914
915
return 0;
916
}
917
918
static enum ucode_state apply_microcode_amd(int cpu)
919
{
920
struct cpuinfo_x86 *c = &cpu_data(cpu);
921
struct microcode_amd *mc_amd;
922
struct ucode_cpu_info *uci;
923
struct ucode_patch *p;
924
enum ucode_state ret;
925
u32 rev;
926
927
BUG_ON(raw_smp_processor_id() != cpu);
928
929
uci = ucode_cpu_info + cpu;
930
931
p = find_patch(cpu);
932
if (!p)
933
return UCODE_NFOUND;
934
935
rev = uci->cpu_sig.rev;
936
937
mc_amd = p->data;
938
uci->mc = p->data;
939
940
/* need to apply patch? */
941
if (rev > mc_amd->hdr.patch_id) {
942
ret = UCODE_OK;
943
goto out;
944
}
945
946
if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
947
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
948
cpu, mc_amd->hdr.patch_id);
949
return UCODE_ERROR;
950
}
951
952
rev = mc_amd->hdr.patch_id;
953
ret = UCODE_UPDATED;
954
955
out:
956
uci->cpu_sig.rev = rev;
957
c->microcode = rev;
958
959
/* Update boot_cpu_data's revision too, if we're on the BSP: */
960
if (c->cpu_index == boot_cpu_data.cpu_index)
961
boot_cpu_data.microcode = rev;
962
963
return ret;
964
}
965
966
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
967
{
968
unsigned int cpu = smp_processor_id();
969
970
ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
971
apply_microcode_amd(cpu);
972
}
973
974
static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
975
{
976
u32 equiv_tbl_len;
977
const u32 *hdr;
978
979
if (!verify_equivalence_table(buf, buf_size))
980
return 0;
981
982
hdr = (const u32 *)buf;
983
equiv_tbl_len = hdr[2];
984
985
/* Zen and newer do not need an equivalence table. */
986
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
987
goto out;
988
989
equiv_table.entry = vmalloc(equiv_tbl_len);
990
if (!equiv_table.entry) {
991
pr_err("failed to allocate equivalent CPU table\n");
992
return 0;
993
}
994
995
memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
996
equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
997
998
out:
999
/* add header length */
1000
return equiv_tbl_len + CONTAINER_HDR_SZ;
1001
}
1002
1003
static void free_equiv_cpu_table(void)
1004
{
1005
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
1006
return;
1007
1008
vfree(equiv_table.entry);
1009
memset(&equiv_table, 0, sizeof(equiv_table));
1010
}
1011
1012
static void cleanup(void)
1013
{
1014
free_equiv_cpu_table();
1015
free_cache();
1016
}
1017
1018
/*
1019
* Return a non-negative value even if some of the checks failed so that
1020
* we can skip over the next patch. If we return a negative value, we
1021
* signal a grave error like a memory allocation has failed and the
1022
* driver cannot continue functioning normally. In such cases, we tear
1023
* down everything we've used up so far and exit.
1024
*/
1025
static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
1026
unsigned int *patch_size)
1027
{
1028
struct microcode_header_amd *mc_hdr;
1029
struct ucode_patch *patch;
1030
u16 proc_id;
1031
int ret;
1032
1033
ret = verify_patch(fw, leftover, patch_size);
1034
if (ret)
1035
return ret;
1036
1037
patch = kzalloc(sizeof(*patch), GFP_KERNEL);
1038
if (!patch) {
1039
pr_err("Patch allocation failure.\n");
1040
return -EINVAL;
1041
}
1042
1043
patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
1044
if (!patch->data) {
1045
pr_err("Patch data allocation failure.\n");
1046
kfree(patch);
1047
return -EINVAL;
1048
}
1049
patch->size = *patch_size;
1050
1051
mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
1052
proc_id = mc_hdr->processor_rev_id;
1053
1054
INIT_LIST_HEAD(&patch->plist);
1055
patch->patch_id = mc_hdr->patch_id;
1056
patch->equiv_cpu = proc_id;
1057
1058
ucode_dbg("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n",
1059
__func__, patch->patch_id, proc_id);
1060
1061
/* ... and add to cache. */
1062
update_cache(patch);
1063
1064
return 0;
1065
}
1066
1067
/* Scan the blob in @data and add microcode patches to the cache. */
1068
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
1069
{
1070
u8 *fw = (u8 *)data;
1071
size_t offset;
1072
1073
offset = install_equiv_cpu_table(data, size);
1074
if (!offset)
1075
return UCODE_ERROR;
1076
1077
fw += offset;
1078
size -= offset;
1079
1080
if (*(u32 *)fw != UCODE_UCODE_TYPE) {
1081
pr_err("invalid type field in container file section header\n");
1082
free_equiv_cpu_table();
1083
return UCODE_ERROR;
1084
}
1085
1086
while (size > 0) {
1087
unsigned int crnt_size = 0;
1088
int ret;
1089
1090
ret = verify_and_add_patch(family, fw, size, &crnt_size);
1091
if (ret < 0)
1092
return UCODE_ERROR;
1093
1094
fw += crnt_size + SECTION_HDR_SIZE;
1095
size -= (crnt_size + SECTION_HDR_SIZE);
1096
}
1097
1098
return UCODE_OK;
1099
}
1100
1101
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
1102
{
1103
enum ucode_state ret;
1104
1105
/* free old equiv table */
1106
free_equiv_cpu_table();
1107
1108
ret = __load_microcode_amd(family, data, size);
1109
if (ret != UCODE_OK)
1110
cleanup();
1111
1112
return ret;
1113
}
1114
1115
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
1116
{
1117
struct cpuinfo_x86 *c;
1118
unsigned int nid, cpu;
1119
struct ucode_patch *p;
1120
enum ucode_state ret;
1121
1122
ret = _load_microcode_amd(family, data, size);
1123
if (ret != UCODE_OK)
1124
return ret;
1125
1126
for_each_node_with_cpus(nid) {
1127
cpu = cpumask_first(cpumask_of_node(nid));
1128
c = &cpu_data(cpu);
1129
1130
p = find_patch(cpu);
1131
if (!p)
1132
continue;
1133
1134
if (c->microcode >= p->patch_id)
1135
continue;
1136
1137
ret = UCODE_NEW;
1138
}
1139
1140
return ret;
1141
}
1142
1143
static int __init save_microcode_in_initrd(void)
1144
{
1145
struct cpuinfo_x86 *c = &boot_cpu_data;
1146
struct cont_desc desc = { 0 };
1147
unsigned int cpuid_1_eax;
1148
enum ucode_state ret;
1149
struct cpio_data cp;
1150
1151
if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
1152
return 0;
1153
1154
cpuid_1_eax = native_cpuid_eax(1);
1155
1156
if (!find_blobs_in_containers(&cp))
1157
return -EINVAL;
1158
1159
scan_containers(cp.data, cp.size, &desc);
1160
if (!desc.mc)
1161
return -EINVAL;
1162
1163
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
1164
if (ret > UCODE_UPDATED)
1165
return -EINVAL;
1166
1167
return 0;
1168
}
1169
early_initcall(save_microcode_in_initrd);
1170
1171
/*
1172
* AMD microcode firmware naming convention, up to family 15h they are in
1173
* the legacy file:
1174
*
1175
* amd-ucode/microcode_amd.bin
1176
*
1177
* This legacy file is always smaller than 2K in size.
1178
*
1179
* Beginning with family 15h, they are in family-specific firmware files:
1180
*
1181
* amd-ucode/microcode_amd_fam15h.bin
1182
* amd-ucode/microcode_amd_fam16h.bin
1183
* ...
1184
*
1185
* These might be larger than 2K.
1186
*/
1187
static enum ucode_state request_microcode_amd(int cpu, struct device *device)
1188
{
1189
char fw_name[36] = "amd-ucode/microcode_amd.bin";
1190
struct cpuinfo_x86 *c = &cpu_data(cpu);
1191
enum ucode_state ret = UCODE_NFOUND;
1192
const struct firmware *fw;
1193
1194
if (force_minrev)
1195
return UCODE_NFOUND;
1196
1197
if (c->x86 >= 0x15)
1198
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
1199
1200
if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
1201
ucode_dbg("failed to load file %s\n", fw_name);
1202
goto out;
1203
}
1204
1205
ret = UCODE_ERROR;
1206
if (!verify_container(fw->data, fw->size))
1207
goto fw_release;
1208
1209
ret = load_microcode_amd(c->x86, fw->data, fw->size);
1210
1211
fw_release:
1212
release_firmware(fw);
1213
1214
out:
1215
return ret;
1216
}
1217
1218
static void microcode_fini_cpu_amd(int cpu)
1219
{
1220
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1221
1222
uci->mc = NULL;
1223
}
1224
1225
static void finalize_late_load_amd(int result)
1226
{
1227
if (result)
1228
cleanup();
1229
}
1230
1231
static struct microcode_ops microcode_amd_ops = {
1232
.request_microcode_fw = request_microcode_amd,
1233
.collect_cpu_info = collect_cpu_info_amd,
1234
.apply_microcode = apply_microcode_amd,
1235
.microcode_fini_cpu = microcode_fini_cpu_amd,
1236
.finalize_late_load = finalize_late_load_amd,
1237
.nmi_safe = true,
1238
};
1239
1240
struct microcode_ops * __init init_amd_microcode(void)
1241
{
1242
struct cpuinfo_x86 *c = &boot_cpu_data;
1243
1244
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
1245
pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
1246
return NULL;
1247
}
1248
return &microcode_amd_ops;
1249
}
1250
1251
void __exit exit_amd_microcode(void)
1252
{
1253
cleanup();
1254
}
1255
1256