arm64: KVM: Add access handler for PMCNTENSET and PMCNTENCLR register
Since the reset value of PMCNTENSET and PMCNTENCLR is UNKNOWN, use reset_unknown for its reset handler. Add a handler to emulate writing PMCNTENSET or PMCNTENCLR register. When writing to PMCNTENSET, call perf_event_enable to enable the perf event. When writing to PMCNTENCLR, call perf_event_disable to disable the perf event. Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
051ff581ce
commit
96b0eebcc6
4 changed files with 107 additions and 4 deletions
|
@ -123,6 +123,7 @@ enum vcpu_sysreg {
|
||||||
PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
|
PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
|
||||||
PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
|
PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
|
||||||
PMCCNTR_EL0, /* Cycle Counter Register */
|
PMCCNTR_EL0, /* Cycle Counter Register */
|
||||||
|
PMCNTENSET_EL0, /* Count Enable Set Register */
|
||||||
|
|
||||||
/* 32bit specific registers. Keep them at the end of the range */
|
/* 32bit specific registers. Keep them at the end of the range */
|
||||||
DACR32_EL2, /* Domain Access Control Register */
|
DACR32_EL2, /* Domain Access Control Register */
|
||||||
|
|
|
@ -563,6 +563,33 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||||
|
const struct sys_reg_desc *r)
|
||||||
|
{
|
||||||
|
u64 val, mask;
|
||||||
|
|
||||||
|
if (!kvm_arm_pmu_v3_ready(vcpu))
|
||||||
|
return trap_raz_wi(vcpu, p, r);
|
||||||
|
|
||||||
|
mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||||
|
if (p->is_write) {
|
||||||
|
val = p->regval & mask;
|
||||||
|
if (r->Op2 & 0x1) {
|
||||||
|
/* accessing PMCNTENSET_EL0 */
|
||||||
|
vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
||||||
|
kvm_pmu_enable_counter(vcpu, val);
|
||||||
|
} else {
|
||||||
|
/* accessing PMCNTENCLR_EL0 */
|
||||||
|
vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
||||||
|
kvm_pmu_disable_counter(vcpu, val);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||||
/* DBGBVRn_EL1 */ \
|
/* DBGBVRn_EL1 */ \
|
||||||
|
@ -757,10 +784,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||||
access_pmcr, reset_pmcr, },
|
access_pmcr, reset_pmcr, },
|
||||||
/* PMCNTENSET_EL0 */
|
/* PMCNTENSET_EL0 */
|
||||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
|
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
|
||||||
trap_raz_wi },
|
access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||||
/* PMCNTENCLR_EL0 */
|
/* PMCNTENCLR_EL0 */
|
||||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
|
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
|
||||||
trap_raz_wi },
|
access_pmcnten, NULL, PMCNTENSET_EL0 },
|
||||||
/* PMOVSCLR_EL0 */
|
/* PMOVSCLR_EL0 */
|
||||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
|
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
|
||||||
trap_raz_wi },
|
trap_raz_wi },
|
||||||
|
@ -1057,8 +1084,8 @@ static const struct sys_reg_desc cp15_regs[] = {
|
||||||
|
|
||||||
/* PMU */
|
/* PMU */
|
||||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
|
{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
|
||||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
|
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
|
||||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
|
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
|
||||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
|
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
|
||||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
|
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
|
||||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
|
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
|
||||||
|
|
|
@ -40,6 +40,9 @@ struct kvm_pmu {
|
||||||
#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
|
#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
|
||||||
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
|
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
|
||||||
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
|
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
|
||||||
|
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
|
||||||
|
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
|
||||||
|
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
|
||||||
#else
|
#else
|
||||||
struct kvm_pmu {
|
struct kvm_pmu {
|
||||||
};
|
};
|
||||||
|
@ -52,6 +55,12 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
|
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
|
||||||
u64 select_idx, u64 val) {}
|
u64 select_idx, u64 val) {}
|
||||||
|
static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
|
||||||
|
static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -61,3 +61,69 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
||||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
|
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
|
||||||
vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
|
vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||||
|
|
||||||
|
val &= ARMV8_PMU_PMCR_N_MASK;
|
||||||
|
if (val == 0)
|
||||||
|
return BIT(ARMV8_PMU_CYCLE_IDX);
|
||||||
|
else
|
||||||
|
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_pmu_enable_counter - enable selected PMU counter
|
||||||
|
* @vcpu: The vcpu pointer
|
||||||
|
* @val: the value guest writes to PMCNTENSET register
|
||||||
|
*
|
||||||
|
* Call perf_event_enable to start counting the perf event
|
||||||
|
*/
|
||||||
|
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||||
|
struct kvm_pmc *pmc;
|
||||||
|
|
||||||
|
if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||||
|
if (!(val & BIT(i)))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
pmc = &pmu->pmc[i];
|
||||||
|
if (pmc->perf_event) {
|
||||||
|
perf_event_enable(pmc->perf_event);
|
||||||
|
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||||
|
kvm_debug("fail to enable perf event\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_pmu_disable_counter - disable selected PMU counter
|
||||||
|
* @vcpu: The vcpu pointer
|
||||||
|
* @val: the value guest writes to PMCNTENCLR register
|
||||||
|
*
|
||||||
|
* Call perf_event_disable to stop counting the perf event
|
||||||
|
*/
|
||||||
|
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||||
|
struct kvm_pmc *pmc;
|
||||||
|
|
||||||
|
if (!val)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||||
|
if (!(val & BIT(i)))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
pmc = &pmu->pmc[i];
|
||||||
|
if (pmc->perf_event)
|
||||||
|
perf_event_disable(pmc->perf_event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue