mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
Miscellaneous x86 fixes:
- Fix hypercall detection on Xen guests - Extend the AMD microcode loader SHA check to Zen5, to block loading of any unreleased standalone Zen5 microcode patches - Add new Intel CPU model number for Bartlett Lake - Fix the workaround for AMD erratum 1054 - Fix buggy early memory acceptance between SEV-SNP guests and the EFI stub Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmgCuUsRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1jnuRAAqj+XgVPE25Yw0qyFjWxJwHg3duPPMBql BTs6766d8DJTZI2x+zGmiJzxUynKyMItQnkhS7w2n/T11YKw/wW2nMOYNqBgFJwS A7au/4PSp0LJARO5I3GxEVUFDQwawsf8ly+OeOZacKhtycmxL3Pr9pMHU98GWb5C TvqKQlohp/YI3SKpLxSE/LCJJZr/8o7uOt3i95Rx8fH9zEp4Bat5OPpFpPZpefDS kWnQKFq+iSwDldPMg00/SdpFDZVqhHItodeMqJdz6MMa7+sPB5dyjYNGuT9Dvf03 zMv3mjWFDTPjezvuQH+kTxhOfgFtV8VI+b35c2JqTyqkvSzrcrOV1W2EJausSt4H D//UXzDaAcJJaq/YWBuX+DaajyRdVl6i8trtgKMM0BWRPa7wTBFiJU7Lvt73gW/s 8/c5+V0iI0tkySkqoCZJKVwVVxHDxf9z5CQomEwupf7SrI+O8gjjwi0F8NwV0Zeo kP8InCOHVWFHKqf5G4lVsF7qqLgCSJFkKeyVXR8ZHtrcqEMDoF4eZDfky36K5d8f OMMWF/LAh3Fa2CyQDdwZkqtDi2D+3+99Bbw3zixOZpElPB90jtRrxbu2tfVOE8nC RCjdqLMYq7EPlrCzPiq85PbQjYLA8gDJw9WUZkeb3KJzFdv3zyV9VHc8eMF83JNq gRnKlwXAXPU= =cqkj -----END PGP SIGNATURE----- Merge tag 'x86-urgent-2025-04-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull misc x86 fixes from Ingo Molnar: - Fix hypercall detection on Xen guests - Extend the AMD microcode loader SHA check to Zen5, to block loading of any unreleased standalone Zen5 microcode patches - Add new Intel CPU model number for Bartlett Lake - Fix the workaround for AMD erratum 1054 - Fix buggy early memory acceptance between SEV-SNP guests and the EFI stub * tag 'x86-urgent-2025-04-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/boot/sev: Avoid shared GHCB page for early memory acceptance x86/cpu/amd: Fix workaround for erratum 1054 x86/cpu: Add CPU model number for Bartlett Lake CPUs with Raptor Cove cores x86/microcode/AMD: Extend the SHA check to Zen5, block loading of any unreleased standalone Zen5 microcode patches x86/xen: Fix __xen_hypercall_setfunc()
This commit is contained in:
commit
3088d26962
@ -34,11 +34,14 @@ static bool early_is_tdx_guest(void)
|
||||
|
||||
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
static bool sevsnp;
|
||||
|
||||
/* Platform-specific memory-acceptance call goes here */
|
||||
if (early_is_tdx_guest()) {
|
||||
if (!tdx_accept_memory(start, end))
|
||||
panic("TDX: Failed to accept memory\n");
|
||||
} else if (sev_snp_enabled()) {
|
||||
} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
|
||||
sevsnp = true;
|
||||
snp_accept_memory(start, end);
|
||||
} else {
|
||||
error("Cannot accept memory: unknown platform\n");
|
||||
|
@ -164,10 +164,7 @@ bool sev_snp_enabled(void)
|
||||
|
||||
static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
if (!sev_snp_enabled())
|
||||
return;
|
||||
u64 val, msr;
|
||||
|
||||
/*
|
||||
* If private -> shared then invalidate the page before requesting the
|
||||
@ -176,6 +173,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
if (op == SNP_PAGE_STATE_SHARED)
|
||||
pvalidate_4k_page(paddr, paddr, false);
|
||||
|
||||
/* Save the current GHCB MSR value */
|
||||
msr = sev_es_rd_ghcb_msr();
|
||||
|
||||
/* Issue VMGEXIT to change the page state in RMP table. */
|
||||
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
|
||||
VMGEXIT();
|
||||
@ -185,6 +185,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
|
||||
/* Restore the GHCB MSR value */
|
||||
sev_es_wr_ghcb_msr(msr);
|
||||
|
||||
/*
|
||||
* Now that page state is changed in the RMP table, validate it so that it is
|
||||
* consistent with the RMP entry.
|
||||
@ -195,11 +198,17 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
|
||||
void snp_set_page_private(unsigned long paddr)
|
||||
{
|
||||
if (!sev_snp_enabled())
|
||||
return;
|
||||
|
||||
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
|
||||
}
|
||||
|
||||
void snp_set_page_shared(unsigned long paddr)
|
||||
{
|
||||
if (!sev_snp_enabled())
|
||||
return;
|
||||
|
||||
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
|
||||
}
|
||||
|
||||
@ -223,56 +232,10 @@ static bool early_setup_ghcb(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
|
||||
phys_addr_t pa, phys_addr_t pa_end)
|
||||
{
|
||||
struct psc_hdr *hdr;
|
||||
struct psc_entry *e;
|
||||
unsigned int i;
|
||||
|
||||
hdr = &desc->hdr;
|
||||
memset(hdr, 0, sizeof(*hdr));
|
||||
|
||||
e = desc->entries;
|
||||
|
||||
i = 0;
|
||||
while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
|
||||
hdr->end_entry = i;
|
||||
|
||||
e->gfn = pa >> PAGE_SHIFT;
|
||||
e->operation = SNP_PAGE_STATE_PRIVATE;
|
||||
if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
|
||||
e->pagesize = RMP_PG_SIZE_2M;
|
||||
pa += PMD_SIZE;
|
||||
} else {
|
||||
e->pagesize = RMP_PG_SIZE_4K;
|
||||
pa += PAGE_SIZE;
|
||||
}
|
||||
|
||||
e++;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (vmgexit_psc(boot_ghcb, desc))
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
|
||||
pvalidate_pages(desc);
|
||||
|
||||
return pa;
|
||||
}
|
||||
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
struct snp_psc_desc desc = {};
|
||||
unsigned int i;
|
||||
phys_addr_t pa;
|
||||
|
||||
if (!boot_ghcb && !early_setup_ghcb())
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
|
||||
pa = start;
|
||||
while (pa < end)
|
||||
pa = __snp_accept_memory(&desc, pa, end);
|
||||
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
|
||||
__page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
|
||||
}
|
||||
|
||||
void sev_es_shutdown_ghcb(void)
|
||||
|
@ -12,11 +12,13 @@
|
||||
|
||||
bool sev_snp_enabled(void);
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
||||
u64 sev_get_status(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline bool sev_snp_enabled(void) { return false; }
|
||||
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
|
||||
static inline u64 sev_get_status(void) { return 0; }
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -126,6 +126,8 @@
|
||||
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) /* Redwood Cove */
|
||||
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
|
||||
|
||||
#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */
|
||||
|
||||
/* "Hybrid" Processors (P-Core/E-Core) */
|
||||
|
||||
#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */
|
||||
|
@ -869,6 +869,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c)
|
||||
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
|
||||
/*
|
||||
* Turn off the Instructions Retired free counter on machines that are
|
||||
* susceptible to erratum #1054 "Instructions Retired Performance
|
||||
* Counter May Be Inaccurate".
|
||||
*/
|
||||
if (c->x86_model < 0x30) {
|
||||
msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||
clear_cpu_cap(c, X86_FEATURE_IRPERF);
|
||||
}
|
||||
}
|
||||
|
||||
static bool cpu_has_zenbleed_microcode(void)
|
||||
@ -1052,13 +1062,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
if (!cpu_feature_enabled(X86_FEATURE_XENPV))
|
||||
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
||||
|
||||
/*
|
||||
* Turn on the Instructions Retired free counter on machines not
|
||||
* susceptible to erratum #1054 "Instructions Retired Performance
|
||||
* Counter May Be Inaccurate".
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_IRPERF) &&
|
||||
(boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
|
||||
/* Enable the Instructions Retired free counter */
|
||||
if (cpu_has(c, X86_FEATURE_IRPERF))
|
||||
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||
|
||||
check_null_seg_clears_base(c);
|
||||
|
@ -199,6 +199,12 @@ static bool need_sha_check(u32 cur_rev)
|
||||
case 0xa70c0: return cur_rev <= 0xa70C009; break;
|
||||
case 0xaa001: return cur_rev <= 0xaa00116; break;
|
||||
case 0xaa002: return cur_rev <= 0xaa00218; break;
|
||||
case 0xb0021: return cur_rev <= 0xb002146; break;
|
||||
case 0xb1010: return cur_rev <= 0xb101046; break;
|
||||
case 0xb2040: return cur_rev <= 0xb204031; break;
|
||||
case 0xb4040: return cur_rev <= 0xb404031; break;
|
||||
case 0xb6000: return cur_rev <= 0xb600031; break;
|
||||
case 0xb7000: return cur_rev <= 0xb700031; break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
@ -214,8 +220,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
|
||||
struct sha256_state s;
|
||||
int i;
|
||||
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
|
||||
x86_family(bsp_cpuid_1_eax) > 0x19)
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17)
|
||||
return true;
|
||||
|
||||
if (!need_sha_check(cur_rev))
|
||||
|
@ -103,10 +103,6 @@ noinstr void *__xen_hypercall_setfunc(void)
|
||||
void (*func)(void);
|
||||
|
||||
/*
|
||||
* Xen is supported only on CPUs with CPUID, so testing for
|
||||
* X86_FEATURE_CPUID is a test for early_cpu_init() having been
|
||||
* run.
|
||||
*
|
||||
* Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
|
||||
* dependency chain: it is being called via the xen_hypercall static
|
||||
* call when running as a PVH or HVM guest. Hypercalls need to be
|
||||
@ -118,8 +114,7 @@ noinstr void *__xen_hypercall_setfunc(void)
|
||||
*/
|
||||
instrumentation_begin();
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CPUID))
|
||||
xen_get_vendor();
|
||||
xen_get_vendor();
|
||||
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
|
||||
|
Loading…
x
Reference in New Issue
Block a user