diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-09-30 13:40:35 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-09-30 13:40:35 -0700 |
commit | 22bdd6e68bbe270a916233ec5f34a13ae5e80ed9 (patch) | |
tree | 0d94e506ca828acd9939e83d1bbe8a3c8594e7a1 /arch/x86/coco/sev/vc-shared.c | |
parent | 2cb8eeaf00efc037988910de17ffe592b23941a6 (diff) | |
parent | 1f6113ae5ac4927fe80256154ebb0461e670fa85 (diff) |
Merge tag 'x86_apic_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 SEV and apic updates from Borislav Petkov:
- Add functionality to provide runtime firmware updates for the non-x86
parts of an AMD platform like the security processor (ASP) firmware,
modules etc, for example. The intent being that these updates are
interim, live fixups before a proper BIOS update can be attempted
- Add guest support for AMD's Secure AVIC feature which gives encrypted
guests the needed protection against a malicious hypervisor
generating unexpected interrupts and injecting them into such guest,
thus interfering with its operation in an unexpected and negative
manner.
The advantage of this scheme is that the guest determines which
interrupts and when to accept them vs leaving that to the benevolence
(or not) of the hypervisor
- Strictly separate the startup code from the rest of the kernel where
former is executed from the initial 1:1 mapping of memory.
The problem was that the toolchain-generated version of the code was
being executed from a different mapping of memory than what was
"assumed" during code generation, needing an ever-growing pile of
fixups for absolute memory references which are invalid in the early,
1:1 memory mapping during boot.
The major advantage of this is that there's no need to check the 1:1
mapping portion of the code for absolute relocations anymore and get
rid of the RIP_REL_REF() macro sprinkling all over the place.
For more info, see Ard's very detailed writeup on this [1]
- The usual cleanups and fixes
Link: https://lore.kernel.org/r/CAMj1kXEzKEuePEiHB%2BHxvfQbFz0sTiHdn4B%2B%2BzVBJ2mhkPkQ4Q@mail.gmail.com [1]
* tag 'x86_apic_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (49 commits)
x86/boot: Drop erroneous __init annotation from early_set_pages_state()
crypto: ccp - Add AMD Seamless Firmware Servicing (SFS) driver
crypto: ccp - Add new HV-Fixed page allocation/free API
x86/sev: Add new dump_rmp parameter to snp_leak_pages() API
x86/startup/sev: Document the CPUID flow in the boot #VC handler
objtool: Ignore __pi___cfi_ prefixed symbols
x86/sev: Zap snp_abort()
x86/apic/savic: Do not use snp_abort()
x86/boot: Get rid of the .head.text section
x86/boot: Move startup code out of __head section
efistub/x86: Remap inittext read-execute when needed
x86/boot: Create a confined code area for startup code
x86/kbuild: Incorporate boot/startup/ via Kbuild makefile
x86/boot: Revert "Reject absolute references in .head.text"
x86/boot: Check startup code for absence of absolute relocations
objtool: Add action to check for absence of absolute relocations
x86/sev: Export startup routines for later use
x86/sev: Move __sev_[get|put]_ghcb() into separate noinstr object
x86/sev: Provide PIC aliases for SEV related data objects
x86/boot: Provide PIC aliases for 5-level paging related constants
...
Diffstat (limited to 'arch/x86/coco/sev/vc-shared.c')
-rw-r--r-- | arch/x86/coco/sev/vc-shared.c | 143 |
1 files changed, 142 insertions, 1 deletions
diff --git a/arch/x86/coco/sev/vc-shared.c b/arch/x86/coco/sev/vc-shared.c index 2c0ab0fdc060..9b01c9ad81be 100644 --- a/arch/x86/coco/sev/vc-shared.c +++ b/arch/x86/coco/sev/vc-shared.c @@ -409,15 +409,109 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt) return ret; } +enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) +{ + u32 ret; + + ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0); + if (!ret) + return ES_OK; + + if (ret == 1) { + u64 info = ghcb->save.sw_exit_info_2; + unsigned long v = info & SVM_EVTINJ_VEC_MASK; + + /* Check if exception information from hypervisor is sane. */ + if ((info & SVM_EVTINJ_VALID) && + ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) && + ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) { + ctxt->fi.vector = v; + + if (info & SVM_EVTINJ_VALID_ERR) + ctxt->fi.error_code = info >> 32; + + return ES_EXCEPTION; + } + } + + return ES_VMM_ERROR; +} + +enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, + struct es_em_ctxt *ctxt, + u64 exit_code, u64 exit_info_1, + u64 exit_info_2) +{ + /* Fill in protocol and format specifiers */ + ghcb->protocol_version = ghcb_version; + ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; + + ghcb_set_sw_exit_code(ghcb, exit_code); + ghcb_set_sw_exit_info_1(ghcb, exit_info_1); + ghcb_set_sw_exit_info_2(ghcb, exit_info_2); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + return verify_exception_info(ghcb, ctxt); +} + +static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf) +{ + u32 cr4 = native_read_cr4(); + int ret; + + ghcb_set_rax(ghcb, leaf->fn); + ghcb_set_rcx(ghcb, leaf->subfn); + + if (cr4 & X86_CR4_OSXSAVE) + /* Safe to read xcr0 */ + ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK)); + else + /* xgetbv will cause #UD - use reset value for xcr0 */ + ghcb_set_xcr0(ghcb, 1); + + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); + if (ret != ES_OK) + return ret; + + if (!(ghcb_rax_is_valid(ghcb) && + ghcb_rbx_is_valid(ghcb) && + ghcb_rcx_is_valid(ghcb) && + ghcb_rdx_is_valid(ghcb))) + return ES_VMM_ERROR; + + leaf->eax = ghcb->save.rax; + leaf->ebx = ghcb->save.rbx; + leaf->ecx = ghcb->save.rcx; + leaf->edx = ghcb->save.rdx; + + return ES_OK; +} + +struct cpuid_ctx { + struct ghcb *ghcb; + struct es_em_ctxt *ctxt; +}; + +static void snp_cpuid_hv_ghcb(void *p, struct cpuid_leaf *leaf) +{ + struct cpuid_ctx *ctx = p; + + if (__sev_cpuid_hv_ghcb(ctx->ghcb, ctx->ctxt, leaf)) + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV); +} + static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt) { + struct cpuid_ctx ctx = { ghcb, ctxt }; struct pt_regs *regs = ctxt->regs; struct cpuid_leaf leaf; int ret; leaf.fn = regs->ax; leaf.subfn = regs->cx; - ret = snp_cpuid(ghcb, ctxt, &leaf); + ret = snp_cpuid(snp_cpuid_hv_ghcb, &ctx, &leaf); if (!ret) { regs->ax = leaf.eax; regs->bx = leaf.ebx; @@ -502,3 +596,50 @@ static enum es_result vc_handle_rdtsc(struct ghcb *ghcb, return ES_OK; } + +void snp_register_ghcb_early(unsigned long paddr) +{ + unsigned long pfn = paddr >> PAGE_SHIFT; + u64 val; + + sev_es_wr_ghcb_msr(GHCB_MSR_REG_GPA_REQ_VAL(pfn)); + VMGEXIT(); + + val = sev_es_rd_ghcb_msr(); + + /* If the response GPA is not ours then abort the guest */ + if ((GHCB_RESP_CODE(val) != GHCB_MSR_REG_GPA_RESP) || + (GHCB_MSR_REG_GPA_RESP_VAL(val) != pfn)) + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER); +} + +bool __init sev_es_check_cpu_features(void) +{ + if (!has_cpuflag(X86_FEATURE_RDRAND)) { + error("RDRAND instruction not supported - no trusted source of randomness available\n"); + return false; + } + + return true; +} + +bool sev_es_negotiate_protocol(void) +{ + u64 val; + + /* Do the GHCB protocol version negotiation */ + sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ); + VMGEXIT(); + val = sev_es_rd_ghcb_msr(); + + if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP) + return false; + + if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN || + GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX) + return false; + + ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), GHCB_PROTOCOL_MAX); + + return true; +} |