summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/kvm/pmu-emul.c8
-rw-r--r--arch/arm64/kvm/sys_regs.c12
-rw-r--r--include/kvm/arm_pmu.h4
3 files changed, 12 insertions, 12 deletions
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 28f938f145ac..fd08c4b53be3 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -244,7 +244,7 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
*/
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
{
- unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
+ unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
int i;
for_each_set_bit(i, &mask, 32)
@@ -283,7 +283,7 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
return idx >= hpmn;
}
-u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
{
u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
@@ -592,7 +592,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
- unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
+ unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
@@ -822,7 +822,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 9115a75b2c61..03a64b72e45d 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1168,7 +1168,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
{
bool set;
- val &= kvm_pmu_valid_counter_mask(vcpu);
+ val &= kvm_pmu_implemented_counter_mask(vcpu);
switch (r->reg) {
case PMOVSSET_EL0:
@@ -1191,7 +1191,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
return 0;
@@ -1205,7 +1205,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (pmu_access_el0_disabled(vcpu))
return false;
- mask = kvm_pmu_valid_counter_mask(vcpu);
+ mask = kvm_pmu_implemented_counter_mask(vcpu);
if (p->is_write) {
val = p->regval & mask;
if (r->Op2 & 0x1) {
@@ -1228,7 +1228,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
if (check_pmu_access_disabled(vcpu, 0))
return false;
@@ -1252,7 +1252,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+ u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
if (pmu_access_el0_disabled(vcpu))
return false;
@@ -1282,7 +1282,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (pmu_write_swinc_el0_disabled(vcpu))
return false;
- mask = kvm_pmu_valid_counter_mask(vcpu);
+ mask = kvm_pmu_implemented_counter_mask(vcpu);
kvm_pmu_software_increment(vcpu, p->regval & mask);
return true;
}
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index feb5d1d35f0f..019ace2bb914 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -47,7 +47,7 @@ static __always_inline bool kvm_arm_support_pmu_v3(void)
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
-u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
@@ -114,7 +114,7 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
}
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx, u64 val) {}
-static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
{
return 0;
}