mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
Merge branch 'kvm-arm64/pmu-fixes' into kvmarm/next
* kvm-arm64/pmu-fixes: : vPMU fixes for 6.15 courtesy of Akihiko Odaki : : Various fixes to KVM's vPMU implementation, notably ensuring : userspace-directed changes to the PMCs are reflected in the backing perf : events. KVM: arm64: PMU: Reload when resetting KVM: arm64: PMU: Reload when user modifies registers KVM: arm64: PMU: Fix SET_ONE_REG for vPMC regs KVM: arm64: PMU: Assume PMU presence in pmu-emul.c KVM: arm64: PMU: Set raw values from user to PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
commit
369c012268
@ -858,9 +858,11 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = kvm_arm_pmu_v3_enable(vcpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (kvm_vcpu_has_pmu(vcpu)) {
|
||||
ret = kvm_arm_pmu_v3_enable(vcpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
ret = pkvm_create_hyp_vm(kvm);
|
||||
@ -1175,7 +1177,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
kvm_pmu_flush_hwstate(vcpu);
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
kvm_pmu_flush_hwstate(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
@ -1194,7 +1197,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
isb(); /* Ensure work in x_flush_hwstate is committed */
|
||||
kvm_pmu_sync_hwstate(vcpu);
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
kvm_pmu_sync_hwstate(vcpu);
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
||||
kvm_timer_sync_user(vcpu);
|
||||
kvm_vgic_sync_hwstate(vcpu);
|
||||
@ -1224,7 +1228,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
* that the vgic can properly sample the updated state of the
|
||||
* interrupt line.
|
||||
*/
|
||||
kvm_pmu_sync_hwstate(vcpu);
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
kvm_pmu_sync_hwstate(vcpu);
|
||||
|
||||
/*
|
||||
* Sync the vgic state before syncing the timer state because
|
||||
|
@ -2518,7 +2518,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
|
||||
vcpu_clear_flag(vcpu, IN_NESTED_ERET);
|
||||
preempt_enable();
|
||||
|
||||
kvm_pmu_nested_transition(vcpu);
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
kvm_pmu_nested_transition(vcpu);
|
||||
}
|
||||
|
||||
static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
|
||||
@ -2601,7 +2602,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
|
||||
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
||||
preempt_enable();
|
||||
|
||||
kvm_pmu_nested_transition(vcpu);
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
kvm_pmu_nested_transition(vcpu);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -154,9 +154,6 @@ static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
|
||||
*/
|
||||
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||
{
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
|
||||
}
|
||||
|
||||
@ -195,12 +192,22 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
|
||||
*/
|
||||
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
||||
{
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_set_counter_value_user - set PMU counter value from user
|
||||
* @vcpu: The vcpu pointer
|
||||
* @select_idx: The counter index
|
||||
* @val: The counter value
|
||||
*/
|
||||
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
||||
{
|
||||
kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
|
||||
__vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val;
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_release_perf_event - remove the perf event
|
||||
* @pmc: The PMU counter pointer
|
||||
@ -251,20 +258,6 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
pmu->pmc[i].idx = i;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_vcpu_reset - reset pmu state for cpu
|
||||
* @vcpu: The vcpu pointer
|
||||
*
|
||||
*/
|
||||
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
|
||||
* @vcpu: The vcpu pointer
|
||||
@ -354,7 +347,7 @@ void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu) || !val)
|
||||
if (!val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
@ -405,9 +398,6 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
bool overflow;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
overflow = kvm_pmu_overflow_status(vcpu);
|
||||
if (pmu->irq_level == overflow)
|
||||
return;
|
||||
@ -603,9 +593,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
|
||||
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
|
||||
val &= ~ARMV8_PMU_PMCR_LP;
|
||||
@ -793,9 +780,6 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
||||
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
|
||||
u64 reg;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
reg = counter_index_to_evtreg(pmc->idx);
|
||||
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
|
||||
|
||||
@ -901,9 +885,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
||||
u64 val, mask = 0;
|
||||
int base, i, nr_events;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
if (!pmceid1) {
|
||||
val = compute_pmceid0(cpu_pmu);
|
||||
base = 0;
|
||||
@ -944,9 +925,6 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
if (!vcpu->arch.pmu.created)
|
||||
return -EINVAL;
|
||||
|
||||
@ -969,9 +947,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* One-off reload of the PMU on first run */
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1295,9 +1270,6 @@ void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
|
||||
unsigned long mask;
|
||||
int i;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
for_each_set_bit(i, &mask, 32) {
|
||||
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
@ -196,9 +196,6 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.reset_state.reset = false;
|
||||
spin_unlock(&vcpu->arch.mp_state_lock);
|
||||
|
||||
/* Reset PMU outside of the non-preemptible section */
|
||||
kvm_pmu_vcpu_reset(vcpu);
|
||||
|
||||
preempt_disable();
|
||||
loaded = (vcpu->cpu != -1);
|
||||
if (loaded)
|
||||
|
@ -967,6 +967,22 @@ static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
||||
u64 val)
|
||||
{
|
||||
u64 idx;
|
||||
|
||||
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
|
||||
/* PMCCNTR_EL0 */
|
||||
idx = ARMV8_PMU_CYCLE_IDX;
|
||||
else
|
||||
/* PMEVCNTRn_EL0 */
|
||||
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
|
||||
|
||||
kvm_pmu_set_counter_value_user(vcpu, idx, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
@ -1058,25 +1074,10 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
|
||||
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
|
||||
{
|
||||
bool set;
|
||||
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
|
||||
val &= kvm_pmu_accessible_counter_mask(vcpu);
|
||||
|
||||
switch (r->reg) {
|
||||
case PMOVSSET_EL0:
|
||||
/* CRm[1] being set indicates a SET register, and CLR otherwise */
|
||||
set = r->CRm & 2;
|
||||
break;
|
||||
default:
|
||||
/* Op2[0] being set indicates a SET register, and CLR otherwise */
|
||||
set = r->Op2 & 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (set)
|
||||
__vcpu_sys_reg(vcpu, r->reg) |= val;
|
||||
else
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= ~val;
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val & mask;
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1236,6 +1237,8 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1262,6 +1265,7 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
||||
#define PMU_PMEVCNTR_EL0(n) \
|
||||
{ PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
|
||||
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
|
||||
.set_user = set_pmu_evcntr, \
|
||||
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
|
||||
|
||||
/* Macro to expand the PMEVTYPERn_EL0 register */
|
||||
@ -1880,12 +1884,14 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
|
||||
u8 perfmon;
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
|
||||
|
||||
val &= ~ID_DFR0_EL1_PerfMon_MASK;
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
if (kvm_vcpu_has_pmu(vcpu)) {
|
||||
perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
|
||||
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
|
||||
}
|
||||
|
||||
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
|
||||
|
||||
@ -3052,7 +3058,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
.access = access_pmceid, .reset = NULL },
|
||||
{ PMU_SYS_REG(PMCCNTR_EL0),
|
||||
.access = access_pmu_evcntr, .reset = reset_unknown,
|
||||
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
|
||||
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
|
||||
.set_user = set_pmu_evcntr },
|
||||
{ PMU_SYS_REG(PMXEVTYPER_EL0),
|
||||
.access = access_pmu_evtyper, .reset = NULL },
|
||||
{ PMU_SYS_REG(PMXEVCNTR_EL0),
|
||||
@ -4712,6 +4719,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
|
||||
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -41,11 +41,11 @@ bool kvm_supports_guest_pmuv3(void);
|
||||
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
|
||||
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
|
||||
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
|
||||
void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
|
||||
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
|
||||
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
|
||||
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
|
||||
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
|
||||
@ -109,6 +109,8 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
|
||||
u64 select_idx, u64 val) {}
|
||||
static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu,
|
||||
u64 select_idx, u64 val) {}
|
||||
static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
@ -118,7 +120,6 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
|
Loading…
x
Reference in New Issue
Block a user