Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/boot/compressed/sev.c
29271 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* AMD Encrypted Register State Support
4
*
5
* Author: Joerg Roedel <[email protected]>
6
*/
7
8
/*
9
* misc.h needs to be first because it knows how to include the other kernel
10
* headers in the pre-decompression code in a way that does not break
11
* compilation.
12
*/
13
#include "misc.h"
14
15
#include <asm/bootparam.h>
16
#include <asm/pgtable_types.h>
17
#include <asm/sev.h>
18
#include <asm/trapnr.h>
19
#include <asm/trap_pf.h>
20
#include <asm/msr-index.h>
21
#include <asm/fpu/xcr.h>
22
#include <asm/ptrace.h>
23
#include <asm/svm.h>
24
#include <asm/cpuid/api.h>
25
26
#include "error.h"
27
#include "sev.h"
28
29
static struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);
30
struct ghcb *boot_ghcb;
31
32
#undef __init
33
#define __init
34
35
#define __BOOT_COMPRESSED
36
37
u8 snp_vmpl;
38
u16 ghcb_version;
39
40
u64 boot_svsm_caa_pa;
41
42
/* Include code for early handlers */
43
#include "../../boot/startup/sev-shared.c"
44
45
static bool sev_snp_enabled(void)
46
{
47
return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
48
}
49
50
void snp_set_page_private(unsigned long paddr)
51
{
52
struct psc_desc d = {
53
SNP_PAGE_STATE_PRIVATE,
54
(struct svsm_ca *)boot_svsm_caa_pa,
55
boot_svsm_caa_pa
56
};
57
58
if (!sev_snp_enabled())
59
return;
60
61
__page_state_change(paddr, paddr, &d);
62
}
63
64
void snp_set_page_shared(unsigned long paddr)
65
{
66
struct psc_desc d = {
67
SNP_PAGE_STATE_SHARED,
68
(struct svsm_ca *)boot_svsm_caa_pa,
69
boot_svsm_caa_pa
70
};
71
72
if (!sev_snp_enabled())
73
return;
74
75
__page_state_change(paddr, paddr, &d);
76
}
77
78
bool early_setup_ghcb(void)
79
{
80
if (set_page_decrypted((unsigned long)&boot_ghcb_page))
81
return false;
82
83
/* Page is now mapped decrypted, clear it */
84
memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page));
85
86
boot_ghcb = &boot_ghcb_page;
87
88
/* Initialize lookup tables for the instruction decoder */
89
sev_insn_decode_init();
90
91
/* SNP guest requires the GHCB GPA must be registered */
92
if (sev_snp_enabled())
93
snp_register_ghcb_early(__pa(&boot_ghcb_page));
94
95
return true;
96
}
97
98
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
99
{
100
struct psc_desc d = {
101
SNP_PAGE_STATE_PRIVATE,
102
(struct svsm_ca *)boot_svsm_caa_pa,
103
boot_svsm_caa_pa
104
};
105
106
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
107
__page_state_change(pa, pa, &d);
108
}
109
110
void sev_es_shutdown_ghcb(void)
111
{
112
if (!boot_ghcb)
113
return;
114
115
if (!sev_es_check_cpu_features())
116
error("SEV-ES CPU Features missing.");
117
118
/*
119
* This denotes whether to use the GHCB MSR protocol or the GHCB
120
* shared page to perform a GHCB request. Since the GHCB page is
121
* being changed to encrypted, it can't be used to perform GHCB
122
* requests. Clear the boot_ghcb variable so that the GHCB MSR
123
* protocol is used to change the GHCB page over to an encrypted
124
* page.
125
*/
126
boot_ghcb = NULL;
127
128
/*
129
* GHCB Page must be flushed from the cache and mapped encrypted again.
130
* Otherwise the running kernel will see strange cache effects when
131
* trying to use that page.
132
*/
133
if (set_page_encrypted((unsigned long)&boot_ghcb_page))
134
error("Can't map GHCB page encrypted");
135
136
/*
137
* GHCB page is mapped encrypted again and flushed from the cache.
138
* Mark it non-present now to catch bugs when #VC exceptions trigger
139
* after this point.
140
*/
141
if (set_page_non_present((unsigned long)&boot_ghcb_page))
142
error("Can't unmap GHCB page");
143
}
144
145
static void __noreturn sev_es_ghcb_terminate(struct ghcb *ghcb, unsigned int set,
146
unsigned int reason, u64 exit_info_2)
147
{
148
u64 exit_info_1 = SVM_VMGEXIT_TERM_REASON(set, reason);
149
150
vc_ghcb_invalidate(ghcb);
151
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_TERM_REQUEST);
152
ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
153
ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
154
155
sev_es_wr_ghcb_msr(__pa(ghcb));
156
VMGEXIT();
157
158
while (true)
159
asm volatile("hlt\n" : : : "memory");
160
}
161
162
bool sev_es_check_ghcb_fault(unsigned long address)
163
{
164
/* Check whether the fault was on the GHCB page */
165
return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);
166
}
167
168
/*
169
* SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need
170
* guest side implementation for proper functioning of the guest. If any
171
* of these features are enabled in the hypervisor but are lacking guest
172
* side implementation, the behavior of the guest will be undefined. The
173
* guest could fail in non-obvious way making it difficult to debug.
174
*
175
* As the behavior of reserved feature bits is unknown to be on the
176
* safe side add them to the required features mask.
177
*/
178
#define SNP_FEATURES_IMPL_REQ (MSR_AMD64_SNP_VTOM | \
179
MSR_AMD64_SNP_REFLECT_VC | \
180
MSR_AMD64_SNP_RESTRICTED_INJ | \
181
MSR_AMD64_SNP_ALT_INJ | \
182
MSR_AMD64_SNP_DEBUG_SWAP | \
183
MSR_AMD64_SNP_VMPL_SSS | \
184
MSR_AMD64_SNP_SECURE_TSC | \
185
MSR_AMD64_SNP_VMGEXIT_PARAM | \
186
MSR_AMD64_SNP_VMSA_REG_PROT | \
187
MSR_AMD64_SNP_RESERVED_BIT13 | \
188
MSR_AMD64_SNP_RESERVED_BIT15 | \
189
MSR_AMD64_SNP_SECURE_AVIC | \
190
MSR_AMD64_SNP_RESERVED_MASK)
191
192
#ifdef CONFIG_AMD_SECURE_AVIC
193
#define SNP_FEATURE_SECURE_AVIC MSR_AMD64_SNP_SECURE_AVIC
194
#else
195
#define SNP_FEATURE_SECURE_AVIC 0
196
#endif
197
198
/*
199
* SNP_FEATURES_PRESENT is the mask of SNP features that are implemented
200
* by the guest kernel. As and when a new feature is implemented in the
201
* guest kernel, a corresponding bit should be added to the mask.
202
*/
203
#define SNP_FEATURES_PRESENT (MSR_AMD64_SNP_DEBUG_SWAP | \
204
MSR_AMD64_SNP_SECURE_TSC | \
205
SNP_FEATURE_SECURE_AVIC)
206
207
u64 snp_get_unsupported_features(u64 status)
208
{
209
if (!(status & MSR_AMD64_SEV_SNP_ENABLED))
210
return 0;
211
212
return status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
213
}
214
215
void snp_check_features(void)
216
{
217
u64 unsupported;
218
219
/*
220
* Terminate the boot if hypervisor has enabled any feature lacking
221
* guest side implementation. Pass on the unsupported features mask through
222
* EXIT_INFO_2 of the GHCB protocol so that those features can be reported
223
* as part of the guest boot failure.
224
*/
225
unsupported = snp_get_unsupported_features(sev_status);
226
if (unsupported) {
227
if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
228
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
229
230
sev_es_ghcb_terminate(boot_ghcb, SEV_TERM_SET_GEN,
231
GHCB_SNP_UNSUPPORTED, unsupported);
232
}
233
}
234
235
/* Search for Confidential Computing blob in the EFI config table. */
236
static struct cc_blob_sev_info *find_cc_blob_efi(struct boot_params *bp)
237
{
238
unsigned long cfg_table_pa;
239
unsigned int cfg_table_len;
240
int ret;
241
242
ret = efi_get_conf_table(bp, &cfg_table_pa, &cfg_table_len);
243
if (ret)
244
return NULL;
245
246
return (struct cc_blob_sev_info *)efi_find_vendor_table(bp, cfg_table_pa,
247
cfg_table_len,
248
EFI_CC_BLOB_GUID);
249
}
250
251
/*
252
* Initial set up of SNP relies on information provided by the
253
* Confidential Computing blob, which can be passed to the boot kernel
254
* by firmware/bootloader in the following ways:
255
*
256
* - via an entry in the EFI config table
257
* - via a setup_data structure, as defined by the Linux Boot Protocol
258
*
259
* Scan for the blob in that order.
260
*/
261
static struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
262
{
263
struct cc_blob_sev_info *cc_info;
264
265
cc_info = find_cc_blob_efi(bp);
266
if (cc_info)
267
goto found_cc_info;
268
269
cc_info = find_cc_blob_setup_data(bp);
270
if (!cc_info)
271
return NULL;
272
273
found_cc_info:
274
if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
275
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
276
277
return cc_info;
278
}
279
280
/*
281
* Indicate SNP based on presence of SNP-specific CC blob. Subsequent checks
282
* will verify the SNP CPUID/MSR bits.
283
*/
284
static bool early_snp_init(struct boot_params *bp)
285
{
286
struct cc_blob_sev_info *cc_info;
287
288
if (!bp)
289
return false;
290
291
cc_info = find_cc_blob(bp);
292
if (!cc_info)
293
return false;
294
295
/*
296
* If a SNP-specific Confidential Computing blob is present, then
297
* firmware/bootloader have indicated SNP support. Verifying this
298
* involves CPUID checks which will be more reliable if the SNP
299
* CPUID table is used. See comments over snp_setup_cpuid_table() for
300
* more details.
301
*/
302
setup_cpuid_table(cc_info);
303
304
/*
305
* Record the SVSM Calling Area (CA) address if the guest is not
306
* running at VMPL0. The CA will be used to communicate with the
307
* SVSM and request its services.
308
*/
309
svsm_setup_ca(cc_info, rip_rel_ptr(&boot_ghcb_page));
310
311
/*
312
* Pass run-time kernel a pointer to CC info via boot_params so EFI
313
* config table doesn't need to be searched again during early startup
314
* phase.
315
*/
316
bp->cc_blob_address = (u32)(unsigned long)cc_info;
317
318
return true;
319
}
320
321
/*
322
* sev_check_cpu_support - Check for SEV support in the CPU capabilities
323
*
324
* Returns < 0 if SEV is not supported, otherwise the position of the
325
* encryption bit in the page table descriptors.
326
*/
327
static int sev_check_cpu_support(void)
328
{
329
unsigned int eax, ebx, ecx, edx;
330
331
/* Check for the SME/SEV support leaf */
332
eax = 0x80000000;
333
ecx = 0;
334
native_cpuid(&eax, &ebx, &ecx, &edx);
335
if (eax < 0x8000001f)
336
return -ENODEV;
337
338
/*
339
* Check for the SME/SEV feature:
340
* CPUID Fn8000_001F[EAX]
341
* - Bit 0 - Secure Memory Encryption support
342
* - Bit 1 - Secure Encrypted Virtualization support
343
* CPUID Fn8000_001F[EBX]
344
* - Bits 5:0 - Pagetable bit position used to indicate encryption
345
*/
346
eax = 0x8000001f;
347
ecx = 0;
348
native_cpuid(&eax, &ebx, &ecx, &edx);
349
/* Check whether SEV is supported */
350
if (!(eax & BIT(1)))
351
return -ENODEV;
352
353
sev_snp_needs_sfw = !(ebx & BIT(31));
354
355
return ebx & 0x3f;
356
}
357
358
void sev_enable(struct boot_params *bp)
359
{
360
struct msr m;
361
int bitpos;
362
bool snp;
363
364
/*
365
* bp->cc_blob_address should only be set by boot/compressed kernel.
366
* Initialize it to 0 to ensure that uninitialized values from
367
* buggy bootloaders aren't propagated.
368
*/
369
if (bp)
370
bp->cc_blob_address = 0;
371
372
/*
373
* Do an initial SEV capability check before early_snp_init() which
374
* loads the CPUID page and the same checks afterwards are done
375
* without the hypervisor and are trustworthy.
376
*
377
* If the HV fakes SEV support, the guest will crash'n'burn
378
* which is good enough.
379
*/
380
381
if (sev_check_cpu_support() < 0)
382
return;
383
384
/*
385
* Setup/preliminary detection of SNP. This will be sanity-checked
386
* against CPUID/MSR values later.
387
*/
388
snp = early_snp_init(bp);
389
390
/* Now repeat the checks with the SNP CPUID table. */
391
392
bitpos = sev_check_cpu_support();
393
if (bitpos < 0) {
394
if (snp)
395
error("SEV-SNP support indicated by CC blob, but not CPUID.");
396
return;
397
}
398
399
/* Set the SME mask if this is an SEV guest. */
400
boot_rdmsr(MSR_AMD64_SEV, &m);
401
sev_status = m.q;
402
if (!(sev_status & MSR_AMD64_SEV_ENABLED))
403
return;
404
405
/* Negotiate the GHCB protocol version. */
406
if (sev_status & MSR_AMD64_SEV_ES_ENABLED) {
407
if (!sev_es_negotiate_protocol())
408
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_PROT_UNSUPPORTED);
409
}
410
411
/*
412
* SNP is supported in v2 of the GHCB spec which mandates support for HV
413
* features.
414
*/
415
if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
416
u64 hv_features;
417
418
hv_features = get_hv_features();
419
if (!(hv_features & GHCB_HV_FT_SNP))
420
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
421
422
/*
423
* Running at VMPL0 is required unless an SVSM is present and
424
* the hypervisor supports the required SVSM GHCB events.
425
*/
426
if (snp_vmpl && !(hv_features & GHCB_HV_FT_SNP_MULTI_VMPL))
427
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
428
}
429
430
if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
431
error("SEV-SNP supported indicated by CC blob, but not SEV status MSR.");
432
433
sme_me_mask = BIT_ULL(bitpos);
434
}
435
436
/*
437
* sev_get_status - Retrieve the SEV status mask
438
*
439
* Returns 0 if the CPU is not SEV capable, otherwise the value of the
440
* AMD64_SEV MSR.
441
*/
442
u64 sev_get_status(void)
443
{
444
struct msr m;
445
446
if (sev_check_cpu_support() < 0)
447
return 0;
448
449
boot_rdmsr(MSR_AMD64_SEV, &m);
450
return m.q;
451
}
452
453
void sev_prep_identity_maps(unsigned long top_level_pgt)
454
{
455
/*
456
* The Confidential Computing blob is used very early in uncompressed
457
* kernel to find the in-memory CPUID table to handle CPUID
458
* instructions. Make sure an identity-mapping exists so it can be
459
* accessed after switchover.
460
*/
461
if (sev_snp_enabled()) {
462
unsigned long cc_info_pa = boot_params_ptr->cc_blob_address;
463
struct cc_blob_sev_info *cc_info;
464
465
kernel_add_identity_map(cc_info_pa, cc_info_pa + sizeof(*cc_info));
466
467
cc_info = (struct cc_blob_sev_info *)cc_info_pa;
468
kernel_add_identity_map(cc_info->cpuid_phys, cc_info->cpuid_phys + cc_info->cpuid_len);
469
}
470
471
sev_verify_cbit(top_level_pgt);
472
}
473
474
bool early_is_sevsnp_guest(void)
475
{
476
static bool sevsnp;
477
478
if (sevsnp)
479
return true;
480
481
if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
482
return false;
483
484
sevsnp = true;
485
486
if (!snp_vmpl) {
487
unsigned int eax, ebx, ecx, edx;
488
489
/*
490
* CPUID Fn8000_001F_EAX[28] - SVSM support
491
*/
492
eax = 0x8000001f;
493
ecx = 0;
494
native_cpuid(&eax, &ebx, &ecx, &edx);
495
if (eax & BIT(28)) {
496
struct msr m;
497
498
/* Obtain the address of the calling area to use */
499
boot_rdmsr(MSR_SVSM_CAA, &m);
500
boot_svsm_caa_pa = m.q;
501
502
/*
503
* The real VMPL level cannot be discovered, but the
504
* memory acceptance routines make no use of that so
505
* any non-zero value suffices here.
506
*/
507
snp_vmpl = U8_MAX;
508
}
509
}
510
return true;
511
}
512
513