diff options
Diffstat (limited to 'arch/x86/events/amd')
-rw-r--r-- | arch/x86/events/amd/brs.c | 12 | ||||
-rw-r--r-- | arch/x86/events/amd/core.c | 16 | ||||
-rw-r--r-- | arch/x86/events/amd/ibs.c | 33 | ||||
-rw-r--r-- | arch/x86/events/amd/iommu.c | 2 | ||||
-rw-r--r-- | arch/x86/events/amd/lbr.c | 21 | ||||
-rw-r--r-- | arch/x86/events/amd/power.c | 11 | ||||
-rw-r--r-- | arch/x86/events/amd/uncore.c | 117 |
7 files changed, 157 insertions, 55 deletions
diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c index ec3427463382..06f35a6b58a5 100644 --- a/arch/x86/events/amd/brs.c +++ b/arch/x86/events/amd/brs.c @@ -44,12 +44,12 @@ static inline unsigned int brs_to(int idx) static __always_inline void set_debug_extn_cfg(u64 val) { /* bits[4:3] must always be set to 11b */ - __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32); + native_wrmsrq(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3); } static __always_inline u64 get_debug_extn_cfg(void) { - return __rdmsr(MSR_AMD_DBG_EXTN_CFG); + return native_rdmsrq(MSR_AMD_DBG_EXTN_CFG); } static bool __init amd_brs_detect(void) @@ -187,7 +187,7 @@ void amd_brs_reset(void) /* * Mark first entry as poisoned */ - wrmsrl(brs_to(0), BRS_POISON); + wrmsrq(brs_to(0), BRS_POISON); } int __init amd_brs_init(void) @@ -325,7 +325,7 @@ void amd_brs_drain(void) u32 brs_idx = tos - i; u64 from, to; - rdmsrl(brs_to(brs_idx), to); + rdmsrq(brs_to(brs_idx), to); /* Entry does not belong to us (as marked by kernel) */ if (to == BRS_POISON) @@ -341,7 +341,7 @@ void amd_brs_drain(void) if (!amd_brs_match_plm(event, to)) continue; - rdmsrl(brs_from(brs_idx), from); + rdmsrq(brs_from(brs_idx), from); perf_clear_branch_entry_bitfields(br+nr); @@ -371,7 +371,7 @@ static void amd_brs_poison_buffer(void) idx = amd_brs_get_tos(&cfg); /* Poison target of entry */ - wrmsrl(brs_to(idx), BRS_POISON); + wrmsrq(brs_to(idx), BRS_POISON); } /* diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 30d6ceb4c8ad..b20661b8621d 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -9,6 +9,7 @@ #include <linux/jiffies.h> #include <asm/apicdef.h> #include <asm/apic.h> +#include <asm/msr.h> #include <asm/nmi.h> #include "../perf_event.h" @@ -563,13 +564,13 @@ static void amd_pmu_cpu_reset(int cpu) return; /* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */ - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0); + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0); /* * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze * and PerfCntrGLobalStatus.PerfCntrOvfl */ - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask); } @@ -651,7 +652,7 @@ static void amd_pmu_cpu_dead(int cpu) static __always_inline void amd_pmu_set_global_ctl(u64 ctl) { - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl); + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl); } static inline u64 amd_pmu_get_global_status(void) @@ -659,7 +660,7 @@ static inline u64 amd_pmu_get_global_status(void) u64 status; /* PerfCntrGlobalStatus is read-only */ - rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status); + rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status); return status; } @@ -672,14 +673,14 @@ static inline void amd_pmu_ack_global_status(u64 status) * clears the same bit in PerfCntrGlobalStatus */ - wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status); + wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status); } static bool amd_pmu_test_overflow_topbit(int idx) { u64 counter; - rdmsrl(x86_pmu_event_addr(idx), counter); + rdmsrq(x86_pmu_event_addr(idx), counter); return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1)); } @@ -1003,8 +1004,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); - if (perf_event_overflow(event, &data, regs)) - x86_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } /* diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 0252b7ea8bca..112f43b23ebf 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -15,6 +15,7 @@ #include <linux/sched/clock.h> #include <asm/apic.h> +#include <asm/msr.h> #include "../perf_event.h" @@ -26,7 +27,7 @@ static u32 ibs_caps; #include <linux/hardirq.h> #include <asm/nmi.h> -#include <asm/amd-ibs.h> +#include <asm/amd/ibs.h> /* attr.config2 */ #define IBS_SW_FILTER_MASK 1 @@ -424,7 +425,7 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, * prev count manually on overflow. */ while (!perf_event_try_update(event, count, 64)) { - rdmsrl(event->hw.config_base, *config); + rdmsrq(event->hw.config_base, *config); count = perf_ibs->get_count(*config); } } @@ -435,9 +436,9 @@ static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs, u64 tmp = hwc->config | config; if (perf_ibs->fetch_count_reset_broken) - wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask); + wrmsrq(hwc->config_base, tmp & ~perf_ibs->enable_mask); - wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask); + wrmsrq(hwc->config_base, tmp | perf_ibs->enable_mask); } /* @@ -452,9 +453,9 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs, { config &= ~perf_ibs->cnt_mask; if (boot_cpu_data.x86 == 0x10) - wrmsrl(hwc->config_base, config); + wrmsrq(hwc->config_base, config); config &= ~perf_ibs->enable_mask; - wrmsrl(hwc->config_base, config); + wrmsrq(hwc->config_base, config); } /* @@ -513,7 +514,7 @@ static void perf_ibs_stop(struct perf_event *event, int flags) if (!stopping && (hwc->state & PERF_HES_UPTODATE)) return; - rdmsrl(hwc->config_base, config); + rdmsrq(hwc->config_base, config); if (stopping) { /* @@ -1256,7 +1257,7 @@ fail: hwc = &event->hw; msr = hwc->config_base; buf = ibs_data.regs; - rdmsrl(msr, *buf); + rdmsrq(msr, *buf); if (!(*buf++ & perf_ibs->valid_mask)) goto fail; @@ -1274,7 +1275,7 @@ fail: offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip); do { - rdmsrl(msr + offset, *buf++); + rdmsrq(msr + offset, *buf++); size++; offset = find_next_bit(perf_ibs->offset_mask, perf_ibs->offset_max, @@ -1304,17 +1305,17 @@ fail: if (event->attr.sample_type & PERF_SAMPLE_RAW) { if (perf_ibs == &perf_ibs_op) { if (ibs_caps & IBS_CAPS_BRNTRGT) { - rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); + rdmsrq(MSR_AMD64_IBSBRTARGET, *buf++); br_target_idx = size; size++; } if (ibs_caps & IBS_CAPS_OPDATA4) { - rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); + rdmsrq(MSR_AMD64_IBSOPDATA4, *buf++); size++; } } if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) { - rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++); + rdmsrq(MSR_AMD64_ICIBSEXTDCTL, *buf++); size++; } } @@ -1373,9 +1374,7 @@ fail: hwc->sample_period = perf_ibs->min_period; out: - if (throttle) { - perf_ibs_stop(event, 0); - } else { + if (!throttle) { if (perf_ibs == &perf_ibs_op) { if (ibs_caps & IBS_CAPS_OPCNTEXT) { new_config = period & IBS_OP_MAX_CNT_EXT_MASK; @@ -1565,7 +1564,7 @@ static inline int ibs_eilvt_valid(void) preempt_disable(); - rdmsrl(MSR_AMD64_IBSCTL, val); + rdmsrq(MSR_AMD64_IBSCTL, val); offset = val & IBSCTL_LVT_OFFSET_MASK; if (!(val & IBSCTL_LVT_OFFSET_VALID)) { @@ -1680,7 +1679,7 @@ static inline int get_ibs_lvt_offset(void) { u64 val; - rdmsrl(MSR_AMD64_IBSCTL, val); + rdmsrq(MSR_AMD64_IBSCTL, val); if (!(val & IBSCTL_LVT_OFFSET_VALID)) return -EINVAL; diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index f8228d8243f7..a721da9987dd 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -16,6 +16,8 @@ #include <linux/slab.h> #include <linux/amd-iommu.h> +#include <asm/msr.h> + #include "../perf_event.h" #include "iommu.h" diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c index c06ccca96851..d24da377df77 100644 --- a/arch/x86/events/amd/lbr.c +++ b/arch/x86/events/amd/lbr.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/perf_event.h> +#include <asm/msr.h> #include <asm/perf_event.h> #include "../perf_event.h" @@ -61,19 +62,19 @@ struct branch_entry { static __always_inline void amd_pmu_lbr_set_from(unsigned int idx, u64 val) { - wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val); + wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val); } static __always_inline void amd_pmu_lbr_set_to(unsigned int idx, u64 val) { - wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); + wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); } static __always_inline u64 amd_pmu_lbr_get_from(unsigned int idx) { u64 val; - rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val); + rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val); return val; } @@ -82,7 +83,7 @@ static __always_inline u64 amd_pmu_lbr_get_to(unsigned int idx) { u64 val; - rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); + rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); return val; } @@ -333,7 +334,7 @@ void amd_pmu_lbr_reset(void) cpuc->last_task_ctx = NULL; cpuc->last_log_id = 0; - wrmsrl(MSR_AMD64_LBR_SELECT, 0); + wrmsrq(MSR_AMD64_LBR_SELECT, 0); } void amd_pmu_lbr_add(struct perf_event *event) @@ -396,16 +397,16 @@ void amd_pmu_lbr_enable_all(void) /* Set hardware branch filter */ if (cpuc->lbr_select) { lbr_select = cpuc->lbr_sel->config & LBR_SELECT_MASK; - wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select); + wrmsrq(MSR_AMD64_LBR_SELECT, lbr_select); } if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); + rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl); + wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); } - rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); - wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN); + rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); + wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN); } void amd_pmu_lbr_disable_all(void) diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c index 37d5b380516e..dad42790cf7d 100644 --- a/arch/x86/events/amd/power.c +++ b/arch/x86/events/amd/power.c @@ -11,6 +11,7 @@ #include <linux/slab.h> #include <linux/perf_event.h> #include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "../perf_event.h" /* Event code: LSB 8 bits, passed in attr->config any other bit is reserved. */ @@ -48,8 +49,8 @@ static void event_update(struct perf_event *event) prev_pwr_acc = hwc->pwr_acc; prev_ptsc = hwc->ptsc; - rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc); - rdmsrl(MSR_F15H_PTSC, new_ptsc); + rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc); + rdmsrq(MSR_F15H_PTSC, new_ptsc); /* * Calculate the CU power consumption over a time period, the unit of @@ -75,8 +76,8 @@ static void __pmu_event_start(struct perf_event *event) event->hw.state = 0; - rdmsrl(MSR_F15H_PTSC, event->hw.ptsc); - rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc); + rdmsrq(MSR_F15H_PTSC, event->hw.ptsc); + rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc); } static void pmu_event_start(struct perf_event *event, int mode) @@ -272,7 +273,7 @@ static int __init amd_power_pmu_init(void) cpu_pwr_sample_ratio = cpuid_ecx(0x80000007); - if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) { + if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) { pr_err("Failed to read max compute unit power accumulator MSR\n"); return -ENODEV; } diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 49c26ce2b115..e8b6af199c73 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -21,6 +21,7 @@ #define NUM_COUNTERS_NB 4 #define NUM_COUNTERS_L2 4 #define NUM_COUNTERS_L3 6 +#define NUM_COUNTERS_MAX 64 #define RDPMC_BASE_NB 6 #define RDPMC_BASE_LLC 10 @@ -38,7 +39,10 @@ struct amd_uncore_ctx { int refcnt; int cpu; struct perf_event **events; - struct hlist_node node; + unsigned long active_mask[BITS_TO_LONGS(NUM_COUNTERS_MAX)]; + int nr_active; + struct hrtimer hrtimer; + u64 hrtimer_duration; }; struct amd_uncore_pmu { @@ -83,11 +87,51 @@ struct amd_uncore { static struct amd_uncore uncores[UNCORE_TYPE_MAX]; +/* Interval for hrtimer, defaults to 60000 milliseconds */ +static unsigned int update_interval = 60 * MSEC_PER_SEC; +module_param(update_interval, uint, 0444); + static struct amd_uncore_pmu *event_to_amd_uncore_pmu(struct perf_event *event) { return container_of(event->pmu, struct amd_uncore_pmu, pmu); } +static enum hrtimer_restart amd_uncore_hrtimer(struct hrtimer *hrtimer) +{ + struct amd_uncore_ctx *ctx; + struct perf_event *event; + int bit; + + ctx = container_of(hrtimer, struct amd_uncore_ctx, hrtimer); + + if (!ctx->nr_active || ctx->cpu != smp_processor_id()) + return HRTIMER_NORESTART; + + for_each_set_bit(bit, ctx->active_mask, NUM_COUNTERS_MAX) { + event = ctx->events[bit]; + event->pmu->read(event); + } + + hrtimer_forward_now(hrtimer, ns_to_ktime(ctx->hrtimer_duration)); + return HRTIMER_RESTART; +} + +static void amd_uncore_start_hrtimer(struct amd_uncore_ctx *ctx) +{ + hrtimer_start(&ctx->hrtimer, ns_to_ktime(ctx->hrtimer_duration), + HRTIMER_MODE_REL_PINNED_HARD); +} + +static void amd_uncore_cancel_hrtimer(struct amd_uncore_ctx *ctx) +{ + hrtimer_cancel(&ctx->hrtimer); +} + +static void amd_uncore_init_hrtimer(struct amd_uncore_ctx *ctx) +{ + hrtimer_setup(&ctx->hrtimer, amd_uncore_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); +} + static void amd_uncore_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -106,9 +150,9 @@ static void amd_uncore_read(struct perf_event *event) * read counts directly from the corresponding PERF_CTR. */ if (hwc->event_base_rdpmc < 0) - rdmsrl(hwc->event_base, new); + rdmsrq(hwc->event_base, new); else - rdpmcl(hwc->event_base_rdpmc, new); + new = rdpmc(hwc->event_base_rdpmc); local64_set(&hwc->prev_count, new); delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); @@ -118,27 +162,40 @@ static void amd_uncore_read(struct perf_event *event) static void amd_uncore_start(struct perf_event *event, int flags) { + struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); + struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); struct hw_perf_event *hwc = &event->hw; + if (!ctx->nr_active++) + amd_uncore_start_hrtimer(ctx); + if (flags & PERF_EF_RELOAD) - wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); + wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count)); hwc->state = 0; - wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE)); + __set_bit(hwc->idx, ctx->active_mask); + wrmsrq(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE)); perf_event_update_userpage(event); } static void amd_uncore_stop(struct perf_event *event, int flags) { + struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); + struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); struct hw_perf_event *hwc = &event->hw; - wrmsrl(hwc->config_base, hwc->config); + wrmsrq(hwc->config_base, hwc->config); hwc->state |= PERF_HES_STOPPED; if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { event->pmu->read(event); hwc->state |= PERF_HES_UPTODATE; } + + if (!--ctx->nr_active) + amd_uncore_cancel_hrtimer(ctx); + + __clear_bit(hwc->idx, ctx->active_mask); } static int amd_uncore_add(struct perf_event *event, int flags) @@ -491,6 +548,9 @@ static int amd_uncore_ctx_init(struct amd_uncore *uncore, unsigned int cpu) goto fail; } + amd_uncore_init_hrtimer(curr); + curr->hrtimer_duration = (u64)update_interval * NSEC_PER_MSEC; + cpumask_set_cpu(cpu, &pmu->active_mask); } @@ -880,16 +940,55 @@ static int amd_uncore_umc_event_init(struct perf_event *event) static void amd_uncore_umc_start(struct perf_event *event, int flags) { + struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); + struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); struct hw_perf_event *hwc = &event->hw; + if (!ctx->nr_active++) + amd_uncore_start_hrtimer(ctx); + if (flags & PERF_EF_RELOAD) - wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count)); + wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count)); hwc->state = 0; - wrmsrl(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC)); + __set_bit(hwc->idx, ctx->active_mask); + wrmsrq(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC)); perf_event_update_userpage(event); } +static void amd_uncore_umc_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 prev, new, shift; + s64 delta; + + shift = COUNTER_SHIFT + 1; + prev = local64_read(&hwc->prev_count); + + /* + * UMC counters do not have RDPMC assignments. Read counts directly + * from the corresponding PERF_CTR. + */ + rdmsrl(hwc->event_base, new); + + /* + * Unlike the other uncore counters, UMC counters saturate and set the + * Overflow bit (bit 48) on overflow. Since they do not roll over, + * proactively reset the corresponding PERF_CTR when bit 47 is set so + * that the counter never gets a chance to saturate. + */ + if (new & BIT_ULL(63 - COUNTER_SHIFT)) { + wrmsrl(hwc->event_base, 0); + local64_set(&hwc->prev_count, 0); + } else { + local64_set(&hwc->prev_count, new); + } + + delta = (new << shift) - (prev << shift); + delta >>= shift; + local64_add(delta, &event->count); +} + static void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu) { @@ -968,7 +1067,7 @@ int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu) .del = amd_uncore_del, .start = amd_uncore_umc_start, .stop = amd_uncore_stop, - .read = amd_uncore_read, + .read = amd_uncore_umc_read, .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, .module = THIS_MODULE, }; |