summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-05-04 08:06:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-05-04 08:06:42 -0700
commit3f3041b9e48084f9e82b9c949bf14bb7bcf214c4 (patch)
treebd12677b7a20958013a8a3c1cc1ec087cecb43e5
parent5aac99c6b51cf38ef339b89600add9e508e7e2f9 (diff)
parent58f6217e5d0132a9f14e401e62796916aa055c1b (diff)
Merge tag 'perf-urgent-2025-05-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc perf fixes from Ingo Molnar: - Require group events for branch counter groups and PEBS counter snapshotting groups to be x86 events. - Fix the handling of counter-snapshotting of non-precise events, where counter values may move backwards a bit, temporarily, confusing the code. - Restrict perf/KVM PEBS to guest-owned events. * tag 'perf-urgent-2025-05-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel: KVM: Mask PEBS_ENABLE loaded for guest with vCPU's value. perf/x86/intel/ds: Fix counter backwards of non-precise events counters-snapshotting perf/x86/intel: Check the X86 leader for pebs_counter_event_group perf/x86/intel: Only check the group flag for X86 leader
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/ds.c21
-rw-r--r--arch/x86/events/perf_event.h11
4 files changed, 30 insertions, 6 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 3a4f031d2f44..139ad80d1df3 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -754,7 +754,7 @@ void x86_pmu_enable_all(int added)
}
}
-static inline int is_x86_event(struct perf_event *event)
+int is_x86_event(struct perf_event *event)
{
int i;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 00dfe487bd00..c5f385413392 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4395,7 +4395,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[pebs_enable] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
- .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
+ .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
};
if (arr[pebs_enable].host) {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 18c3ab579b8b..9b20acc0e932 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -2379,8 +2379,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
*/
intel_pmu_save_and_restart_reload(event, count);
}
- } else
- intel_pmu_save_and_restart(event);
+ } else {
+ /*
+ * For a non-precise event, it's possible the
+ * counters-snapshotting records a positive value for the
+ * overflowed event. Then the HW auto-reload mechanism
+ * reset the counter to 0 immediately, because the
+ * pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD
+ * is not set. The counter backwards may be observed in a
+ * PMI handler.
+ *
+ * Since the event value has been updated when processing the
+ * counters-snapshotting record, only needs to set the new
+ * period for the counter.
+ */
+ if (is_pebs_counter_event_group(event))
+ static_call(x86_pmu_set_period)(event);
+ else
+ intel_pmu_save_and_restart(event);
+ }
}
static __always_inline void
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 2c0ce0e9545e..46d120597bab 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -110,14 +110,21 @@ static inline bool is_topdown_event(struct perf_event *event)
return is_metric_event(event) || is_slots_event(event);
}
+int is_x86_event(struct perf_event *event);
+
+static inline bool check_leader_group(struct perf_event *leader, int flags)
+{
+ return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
+}
+
static inline bool is_branch_counters_group(struct perf_event *event)
{
- return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
+ return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
}
static inline bool is_pebs_counter_event_group(struct perf_event *event)
{
- return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR;
+ return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR);
}
struct amd_nb {