diff options
author | Will Deacon <will@kernel.org> | 2021-06-24 14:05:40 +0100 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2021-06-24 14:05:40 +0100 |
commit | 2e5d34d26a906f17a773cd3c67ee91dd2118c898 (patch) | |
tree | 604e910287e852077fdb02679f2c2652ed818b15 | |
parent | fdceddb06a5ff5ad3894cf9e8124d5af38ac5793 (diff) | |
parent | d96b1b8c9f79b6bb234a31c80972a6f422079376 (diff) |
Merge branch 'for-next/perf' into for-next/core
PMU driver cleanups for managing IRQ affinity and exposing event
attributes via sysfs.
* for-next/perf: (36 commits)
drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe()
perf/arm-cmn: Fix invalid pointer when access dtc object sharing the same IRQ number
arm64: perf: Simplify EVENT ATTR macro in perf_event.c
drivers/perf: Simplify EVENT ATTR macro in fsl_imx8_ddr_perf.c
drivers/perf: Simplify EVENT ATTR macro in xgene_pmu.c
drivers/perf: Simplify EVENT ATTR macro in qcom_l3_pmu.c
drivers/perf: Simplify EVENT ATTR macro in qcom_l2_pmu.c
drivers/perf: Simplify EVENT ATTR macro in SMMU PMU driver
perf: Add EVENT_ATTR_ID to simplify event attributes
perf/smmuv3: Don't trample existing events with global filter
perf/hisi: Constify static attribute_group structs
perf: qcom: Remove redundant dev_err call in qcom_l3_cache_pmu_probe()
drivers/perf: hisi: Fix data source control
arm64: perf: Add more support on caps under sysfs
perf: qcom_l2_pmu: move to use request_irq by IRQF_NO_AUTOEN flag
arm_pmu: move to use request_irq by IRQF_NO_AUTOEN flag
perf: arm_spe: use DEVICE_ATTR_RO macro
perf: xgene_pmu: use DEVICE_ATTR_RO macro
perf: qcom: use DEVICE_ATTR_RO macro
perf: arm_pmu: use DEVICE_ATTR_RO macro
...
26 files changed, 156 insertions, 170 deletions
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 2924d7910b10..eb2190477da1 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); } else if (idx == ARMV7_IDX_CYCLE_COUNTER) { - asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value)); } else { armv7_pmnc_select_counter(idx); - asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); + asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value)); } } diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 60731f602d3e..4ef6f19331f9 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -239,6 +239,11 @@ /* PMMIR_EL1.SLOTS mask */ #define ARMV8_PMU_SLOTS_MASK 0xff +#define ARMV8_PMU_BUS_SLOTS_SHIFT 8 +#define ARMV8_PMU_BUS_SLOTS_MASK 0xff +#define ARMV8_PMU_BUS_WIDTH_SHIFT 16 +#define ARMV8_PMU_BUS_WIDTH_MASK 0xf + #ifdef CONFIG_PERF_EVENTS struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index f594957e29bd..d07788dad388 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -165,10 +165,7 @@ armv8pmu_events_sysfs_show(struct device *dev, } #define ARMV8_EVENT_ATTR(name, config) \ - (&((struct perf_pmu_events_attr) { \ - .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \ - .id = config, \ - }).attr.attr) + PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config) static struct attribute *armv8_pmuv3_event_attrs[] = { ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR), @@ -312,13 +309,46 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr, struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK; - return snprintf(page, PAGE_SIZE, "0x%08x\n", slots); + return sysfs_emit(page, "0x%08x\n", slots); } static DEVICE_ATTR_RO(slots); +static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr, + char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); + u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT) + & ARMV8_PMU_BUS_SLOTS_MASK; + + return sysfs_emit(page, "0x%08x\n", bus_slots); +} + +static DEVICE_ATTR_RO(bus_slots); + +static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr, + char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); + u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT) + & ARMV8_PMU_BUS_WIDTH_MASK; + u32 val = 0; + + /* Encoded as Log2(number of bytes), plus one */ + if (bus_width > 2 && bus_width < 13) + val = 1 << (bus_width - 1); + + return sysfs_emit(page, "0x%08x\n", val); +} + +static DEVICE_ATTR_RO(bus_width); + static struct attribute *armv8_pmuv3_caps_attrs[] = { &dev_attr_slots.attr, + &dev_attr_bus_slots.attr, + &dev_attr_bus_width.attr, NULL, }; diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 666d8a9b557f..54aca3a62814 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -37,7 +37,7 @@ #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) -#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) +#define CCI_PMU_CNTR_MASK ((1ULL << 32) - 1) #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) #define CCI_PMU_MAX_HW_CNTRS(model) \ @@ -806,7 +806,7 @@ static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *ev return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); /* Generic code to find an unused idx from the mask */ - for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) + for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) if (!test_and_set_bit(idx, hw->used_mask)) return idx; diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 96d47cb302dd..a96c31604545 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1211,7 +1211,7 @@ static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) perf_pmu_migrate_context(&dt->pmu, cpu, target); dt->cpu = target; if (ccn->irq) - WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu))); + WARN_ON(irq_set_affinity(ccn->irq, cpumask_of(dt->cpu))); return 0; } @@ -1291,7 +1291,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) /* Also make sure that the overflow interrupt is handled by this CPU */ if (ccn->irq) { - err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu)); + err = irq_set_affinity(ccn->irq, cpumask_of(ccn->dt.cpu)); if (err) { dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); goto error_set_affinity; @@ -1325,8 +1325,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, &ccn->dt.node); - if (ccn->irq) - irq_set_affinity_hint(ccn->irq, NULL); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); writel(0, ccn->dt.base + CCN_DT_PMCR); diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 56a5c355701d..bc3cba5f8c5d 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -31,7 +31,7 @@ #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) -#define CMN_CHILD_NODE_ADDR GENMASK(27,0) +#define CMN_CHILD_NODE_ADDR GENMASK(27, 0) #define CMN_CHILD_NODE_EXTERNAL BIT(31) #define CMN_ADDR_NODE_PTR GENMASK(27, 14) @@ -1162,7 +1162,7 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) perf_pmu_migrate_context(&cmn->pmu, cpu, target); for (i = 0; i < cmn->num_dtcs; i++) - irq_set_affinity_hint(cmn->dtc[i].irq, cpumask_of(target)); + irq_set_affinity(cmn->dtc[i].irq, cpumask_of(target)); cmn->cpu = target; return 0; } @@ -1212,7 +1212,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn) irq = cmn->dtc[i].irq; for (j = i; j--; ) { if (cmn->dtc[j].irq == irq) { - cmn->dtc[j].irq_friend = j - i; + cmn->dtc[j].irq_friend = i - j; goto next; } } @@ -1222,7 +1222,7 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn) if (err) return err; - err = irq_set_affinity_hint(irq, cpumask_of(cmn->cpu)); + err = irq_set_affinity(irq, cpumask_of(cmn->cpu)); if (err) return err; next: @@ -1568,16 +1568,11 @@ static int arm_cmn_probe(struct platform_device *pdev) static int arm_cmn_remove(struct platform_device *pdev) { struct arm_cmn *cmn = platform_get_drvdata(pdev); - int i; writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL); perf_pmu_unregister(&cmn->pmu); cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node); - - for (i = 0; i < cmn->num_dtcs; i++) - irq_set_affinity_hint(cmn->dtc[i].irq, NULL); - return 0; } diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c index b6c2511d59af..280a6ae3e27c 100644 --- a/drivers/perf/arm_dmc620_pmu.c +++ b/drivers/perf/arm_dmc620_pmu.c @@ -421,7 +421,7 @@ static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num) if (ret) goto out_free_aff; - ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu)); + ret = irq_set_affinity(irq_num, cpumask_of(irq->cpu)); if (ret) goto out_free_irq; @@ -475,7 +475,6 @@ static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu) list_del(&irq->irqs_node); mutex_unlock(&dmc620_pmu_irqs_lock); - WARN_ON(irq_set_affinity_hint(irq->irq_num, NULL)); free_irq(irq->irq_num, irq); cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node); kfree(irq); @@ -622,7 +621,7 @@ static int dmc620_pmu_cpu_teardown(unsigned int cpu, perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target); mutex_unlock(&dmc620_pmu_irqs_lock); - WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target))); + WARN_ON(irq_set_affinity(irq->irq_num, cpumask_of(target))); irq->cpu = target; return 0; diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 196faea074d0..a36698a90d2f 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -687,7 +687,7 @@ static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu) static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu) { cpumask_set_cpu(cpu, &dsu_pmu->active_cpu); - if (irq_set_affinity_hint(dsu_pmu->irq, &dsu_pmu->active_cpu)) + if (irq_set_affinity(dsu_pmu->irq, &dsu_pmu->active_cpu)) pr_warn("Failed to set irq affinity to %d\n", cpu); } @@ -769,7 +769,6 @@ static int dsu_pmu_device_probe(struct platform_device *pdev) if (rc) { cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node); - irq_set_affinity_hint(dsu_pmu->irq, NULL); } return rc; @@ -781,7 +780,6 @@ static int dsu_pmu_device_remove(struct platform_device *pdev) perf_pmu_unregister(&dsu_pmu->pmu); cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node); - irq_set_affinity_hint(dsu_pmu->irq, NULL); return 0; } @@ -840,10 +838,8 @@ static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node) dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu); /* If there are no active CPUs in the DSU, leave IRQ disabled */ - if (dst >= nr_cpu_ids) { - irq_set_affinity_hint(dsu_pmu->irq, NULL); + if (dst >= nr_cpu_ids) return 0; - } perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst); dsu_pmu_set_active_cpu(dst, dsu_pmu); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index d4f7f1f9cc77..3cbc3baf087f 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -563,14 +563,14 @@ static int armpmu_filter_match(struct perf_event *event) return ret; } -static ssize_t armpmu_cpumask_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t cpus_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); } -static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL); +static DEVICE_ATTR_RO(cpus); static struct attribute *armpmu_common_attrs[] = { &dev_attr_cpus.attr, @@ -644,11 +644,9 @@ int armpmu_request_irq(int irq, int cpu) } irq_flags = IRQF_PERCPU | - IRQF_NOBALANCING | + IRQF_NOBALANCING | IRQF_NO_AUTOEN | IRQF_NO_THREAD; - irq_set_status_flags(irq, IRQ_NOAUTOEN); - err = request_nmi(irq, handler, irq_flags, "arm-pmu", per_cpu_ptr(&cpu_armpmu, cpu)); @@ -670,7 +668,7 @@ int armpmu_request_irq(int irq, int cpu) &cpu_armpmu); irq_ops = &percpu_pmuirq_ops; } else { - has_nmi= true; + has_nmi = true; irq_ops = &percpu_pmunmi_ops; } } else { @@ -869,10 +867,8 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags) int cpu; pmu = kzalloc(sizeof(*pmu), flags); - if (!pmu) { - pr_info("failed to allocate PMU device!\n"); + if (!pmu) goto out; - } pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags); if (!pmu->hw_events) { diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index ff6fab4bae30..226348822ab3 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -277,7 +277,7 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, struct perf_event *event, int idx) { u32 span, sid; - unsigned int num_ctrs = smmu_pmu->num_counters; + unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters; bool filter_en = !!get_filter_enable(event); span = filter_en ? get_filter_span(event) : @@ -285,17 +285,19 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, sid = filter_en ? get_filter_stream_id(event) : SMMU_PMCG_DEFAULT_FILTER_SID; - /* Support individual filter settings */ - if (!smmu_pmu->global_filter) { + cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs); + /* + * Per-counter filtering, or scheduling the first globally-filtered + * event into an empty PMU so idx == 0 and it works out equivalent. + */ + if (!smmu_pmu->global_filter || cur_idx == num_ctrs) { smmu_pmu_set_event_filter(event, idx, span, sid); return 0; } - /* Requested settings same as current global settings*/ - idx = find_first_bit(smmu_pmu->used_counters, num_ctrs); - if (idx == num_ctrs || - smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) { - smmu_pmu_set_event_filter(event, 0, span, sid); + /* Otherwise, must match whatever's currently scheduled */ + if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) { + smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event)); return 0; } @@ -509,11 +511,8 @@ static ssize_t smmu_pmu_event_show(struct device *dev, return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); } -#define SMMU_EVENT_ATTR(name, config) \ - (&((struct perf_pmu_events_attr) { \ - .attr = __ATTR(name, 0444, smmu_pmu_event_show, NULL), \ - .id = config, \ - }).attr.attr) +#define SMMU_EVENT_ATTR(name, config) \ + PMU_EVENT_ATTR_ID(name, smmu_pmu_event_show, config) static struct attribute *smmu_pmu_events[] = { SMMU_EVENT_ATTR(cycles, 0), @@ -628,7 +627,7 @@ static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target); smmu_pmu->on_cpu = target; - WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target))); + WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target))); return 0; } @@ -839,15 +838,14 @@ static int smmu_pmu_probe(struct platform_device *pdev) /* Pick one CPU to be the preferred one to use */ smmu_pmu->on_cpu = raw_smp_processor_id(); - WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, - cpumask_of(smmu_pmu->on_cpu))); + WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu))); err = cpuhp_state_add_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); if (err) { dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", err, &res_0->start); - goto out_clear_affinity; + return err; } err = perf_pmu_register(&smmu_pmu->pmu, name, -1); @@ -866,8 +864,6 @@ static int smmu_pmu_probe(struct platform_device *pdev) out_unregister: cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); -out_clear_affinity: - irq_set_affinity_hint(smmu_pmu->irq, NULL); return err; } @@ -877,7 +873,6 @@ static int smmu_pmu_remove(struct platform_device *pdev) perf_pmu_unregister(&smmu_pmu->pmu); cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); - irq_set_affinity_hint(smmu_pmu->irq, NULL); return 0; } diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 8a1e86ab2d8e..d44bcc29d99c 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -231,15 +231,14 @@ static const struct attribute_group arm_spe_pmu_format_group = { .attrs = arm_spe_pmu_formats_attr, }; -static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev); return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus); } -static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL); +static DEVICE_ATTR_RO(cpumask); static struct attribute *arm_spe_pmu_attrs[] = { &dev_attr_cpumask.attr, @@ -1044,7 +1043,6 @@ static void __arm_spe_pmu_dev_probe(void *info) spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features); spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED; - return; } static void __arm_spe_pmu_reset_local(void) @@ -1190,10 +1188,8 @@ static int arm_spe_pmu_device_probe(struct platform_device *pdev) } spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL); - if (!spe_pmu) { - dev_err(dev, "failed to allocate spe_pmu\n"); + if (!spe_pmu) return -ENOMEM; - } spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle)); if (!spe_pmu->handle) diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 2bbb93188064..94ebc1ecace7 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -222,11 +222,8 @@ ddr_pmu_event_show(struct device *dev, struct device_attribute *attr, return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); } -#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \ - (&((struct perf_pmu_events_attr[]) { \ - { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\ - .id = _id, } \ - })[0].attr.attr) +#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR_ID(_name, ddr_pmu_event_show, _id) static struct attribute *ddr_perf_events_attrs[] = { IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID), @@ -674,7 +671,7 @@ static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) perf_pmu_migrate_context(&pmu->pmu, cpu, target); pmu->cpu = target; - WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu))); + WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu))); return 0; } @@ -705,8 +702,10 @@ static int ddr_perf_probe(struct platform_device *pdev) name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", num); - if (!name) - return -ENOMEM; + if (!name) { + ret = -ENOMEM; + goto cpuhp_state_err; + } pmu->devtype_data = of_device_get_match_data(&pdev->dev); @@ -749,7 +748,7 @@ static int ddr_perf_probe(struct platform_device *pdev) } pmu->irq = irq; - ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)); + ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)); if (ret) { dev_err(pmu->dev, "Failed to set interrupt affinity!\n"); goto ddr_perf_err; @@ -777,7 +776,6 @@ static int ddr_perf_remove(struct platform_device *pdev) cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); cpuhp_remove_multi_state(pmu->cpuhp_state); - irq_set_affinity_hint(pmu->irq, NULL); perf_pmu_unregister(&pmu->pmu); diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 7c8a4bc21db4..62299ab5a9be 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -2,7 +2,7 @@ /* * HiSilicon SoC DDRC uncore Hardware event counters support * - * Copyright (C) 2017 Hisilicon Limited + * Copyright (C) 2017 HiSilicon Limited * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> * Anurup M <anurup.m@huawei.com> * @@ -537,7 +537,6 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n"); cpuhp_state_remove_instance_nocalls( CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node); - irq_set_affinity_hint(ddrc_pmu->irq, NULL); } return ret; @@ -550,8 +549,6 @@ static int hisi_ddrc_pmu_remove(struct platform_device *pdev) perf_pmu_unregister(&ddrc_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node); - irq_set_affinity_hint(ddrc_pmu->irq, NULL); - return 0; } diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index 0316fabe32f1..393513150106 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -2,7 +2,7 @@ /* * HiSilicon SoC HHA uncore Hardware event counters support * - * Copyright (C) 2017 Hisilicon Limited + * Copyright (C) 2017 HiSilicon Limited * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> * Anurup M <anurup.m@huawei.com> * @@ -90,7 +90,7 @@ static void hisi_hha_pmu_config_ds(struct perf_event *event) val = readl(hha_pmu->base + HHA_DATSRC_CTRL); val |= HHA_DATSRC_SKT_EN; - writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL); + writel(val, hha_pmu->base + HHA_DATSRC_CTRL); } } @@ -104,7 +104,7 @@ static void hisi_hha_pmu_clear_ds(struct perf_event *event) val = readl(hha_pmu->base + HHA_DATSRC_CTRL); val &= ~HHA_DATSRC_SKT_EN; - writel(ds_skt, hha_pmu->base + HHA_DATSRC_CTRL); + writel(val, hha_pmu->base + HHA_DATSRC_CTRL); } } @@ -540,7 +540,6 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) dev_err(hha_pmu->dev, "HHA PMU register failed!\n"); cpuhp_state_remove_instance_nocalls( CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node); - irq_set_affinity_hint(hha_pmu->irq, NULL); } return ret; @@ -553,8 +552,6 @@ static int hisi_hha_pmu_remove(struct platform_device *pdev) perf_pmu_unregister(&hha_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node); - irq_set_affinity_hint(hha_pmu->irq, NULL); - return 0; } diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index bf9f7772cac9..560ab964c8b5 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -2,7 +2,7 @@ /* * HiSilicon SoC L3C uncore Hardware event counters support * - * Copyright (C) 2017 Hisilicon Limited + * Copyright (C) 2017 HiSilicon Limited * Author: Anurup M <anurup.m@huawei.com> * Shaokun Zhang <zhangshaokun@hisilicon.com> * @@ -578,7 +578,6 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) dev_err(l3c_pmu->dev, "L3C PMU register failed!\n"); cpuhp_state_remove_instance_nocalls( CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node); - irq_set_affinity_hint(l3c_pmu->irq, NULL); } return ret; @@ -591,8 +590,6 @@ static int hisi_l3c_pmu_remove(struct platform_device *pdev) perf_pmu_unregister(&l3c_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node); - irq_set_affinity_hint(l3c_pmu->irq, NULL); - return 0; } diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c index 14f23eb31248..83264ec0a957 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c @@ -333,7 +333,7 @@ static struct attribute *hisi_pa_pmu_identifier_attrs[] = { NULL }; -static struct attribute_group hisi_pa_pmu_identifier_group = { +static const struct attribute_group hisi_pa_pmu_identifier_group = { .attrs = hisi_pa_pmu_identifier_attrs, }; @@ -436,7 +436,6 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev) dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret); cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, &pa_pmu->node); - irq_set_affinity_hint(pa_pmu->irq, NULL); return ret; } @@ -451,8 +450,6 @@ static int hisi_pa_pmu_remove(struct platform_device *pdev) perf_pmu_unregister(&pa_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, &pa_pmu->node); - irq_set_affinity_hint(pa_pmu->irq, NULL); - return 0; } diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 13c68b5e39c4..a738aeab5c04 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -2,7 +2,7 @@ /* * HiSilicon SoC Hardware event counters support * - * Copyright (C) 2017 Hisilicon Limited + * Copyright (C) 2017 HiSilicon Limited * Author: Anurup M <anurup.m@huawei.com> * Shaokun Zhang <zhangshaokun@hisilicon.com> * @@ -488,7 +488,7 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) hisi_pmu->on_cpu = cpu; /* Overflow interrupt also should use the same CPU */ - WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(cpu))); + WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu))); return 0; } @@ -521,7 +521,7 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target); /* Use this CPU for event counting */ hisi_pmu->on_cpu = target; - WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(target))); + WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target))); return 0; } diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h index ea9d89bbc1ea..7f5841d6f592 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.h +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -2,7 +2,7 @@ /* * HiSilicon SoC Hardware event counters support * - * Copyright (C) 2017 Hisilicon Limited + * Copyright (C) 2017 HiSilicon Limited * Author: Anurup M <anurup.m@huawei.com> * Shaokun Zhang <zhangshaokun@hisilicon.com> * diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c index 46be312fa126..6aedc303ff56 100644 --- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c @@ -2,7 +2,7 @@ /* * HiSilicon SLLC uncore Hardware event counters support * - * Copyright (C) 2020 Hisilicon Limited + * Copyright (C) 2020 HiSilicon Limited * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> * * This code is based on the uncore PMUs like arm-cci and arm-ccn. @@ -366,7 +366,7 @@ static struct attribute *hisi_sllc_pmu_identifier_attrs[] = { NULL }; -static struct attribute_group hisi_sllc_pmu_identifier_group = { +static const struct attribute_group hisi_sllc_pmu_identifier_group = { .attrs = hisi_sllc_pmu_identifier_attrs, }; @@ -465,7 +465,6 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev) dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret); cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, &sllc_pmu->node); - irq_set_affinity_hint(sllc_pmu->irq, NULL); return ret; } @@ -481,8 +480,6 @@ static int hisi_sllc_pmu_remove(struct platform_device *pdev) perf_pmu_unregister(&sllc_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, &sllc_pmu->node); - irq_set_affinity_hint(sllc_pmu->irq, NULL); - return 0; } diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index fc54a80f9c5c..5b093badd0f6 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -679,11 +679,8 @@ static ssize_t l2cache_pmu_event_show(struct device *dev, return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); } -#define L2CACHE_EVENT_ATTR(_name, _id) \ - (&((struct perf_pmu_events_attr[]) { \ - { .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \ - .id = _id, } \ - })[0].attr.attr) +#define L2CACHE_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR_ID(_name, l2cache_pmu_event_show, _id) static struct attribute *l2_cache_pmu_events[] = { L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES), @@ -869,14 +866,14 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) irq = platform_get_irq(sdev, 0); if (irq < 0) return irq; - irq_set_status_flags(irq, IRQ_NOAUTOEN); cluster->irq = irq; cluster->l2cache_pmu = l2cache_pmu; cluster->on_cpu = -1; err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq, - IRQF_NOBALANCING | IRQF_NO_THREAD, + IRQF_NOBALANCING | IRQF_NO_THREAD | + IRQF_NO_AUTOEN, "l2-cache-pmu", cluster); if (err) { dev_err(&pdev->dev, diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c index bba078077c93..1ff2ff6582bf 100644 --- a/drivers/perf/qcom_l3_pmu.c +++ b/drivers/perf/qcom_l3_pmu.c @@ -647,10 +647,7 @@ static ssize_t l3cache_pmu_event_show(struct device *dev, } #define L3CACHE_EVENT_ATTR(_name, _id) \ - (&((struct perf_pmu_events_attr[]) { \ - { .attr = __ATTR(_name, 0444, l3cache_pmu_event_show, NULL), \ - .id = _id, } \ - })[0].attr.attr) + PMU_EVENT_ATTR_ID(_name, l3cache_pmu_event_show, _id) static struct attribute *qcom_l3_cache_pmu_events[] = { L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES), @@ -670,15 +667,15 @@ static const struct attribute_group qcom_l3_cache_pmu_events_group = { /* cpumask */ -static ssize_t qcom_l3_cache_pmu_cpumask_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev)); return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask); } -static DEVICE_ATTR(cpumask, 0444, qcom_l3_cache_pmu_cpumask_show, NULL); +static DEVICE_ATTR_RO(cpumask); static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = { &dev_attr_cpumask.attr, @@ -767,10 +764,8 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev) memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0); l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc); - if (IS_ERR(l3pmu->regs)) { - dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start); + if (IS_ERR(l3pmu->regs)) return PTR_ERR(l3pmu->regs); - } qcom_l3_cache__init(l3pmu); diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c index 06a6d569b0b5..fc1a376ee906 100644 --- a/drivers/perf/thunderx2_pmu.c +++ b/drivers/perf/thunderx2_pmu.c @@ -817,10 +817,8 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev, } base = devm_ioremap_resource(dev, &res); - if (IS_ERR(base)) { - dev_err(dev, "PMU type %d: Fail to map resource\n", type); + if (IS_ERR(base)) return NULL; - } tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL); if (!tx2_pmu) diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index ffe3bdeec845..2b6d476bd213 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -278,17 +278,14 @@ static const struct attribute_group mc_pmu_v3_format_attr_group = { static ssize_t xgene_pmu_event_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dev_ext_attribute *eattr; + struct perf_pmu_events_attr *pmu_attr = + container_of(attr, struct perf_pmu_events_attr, attr); - eattr = container_of(attr, struct dev_ext_attribute, attr); - return sysfs_emit(buf, "config=0x%lx\n", (unsigned long) eattr->var); + return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id); } #define XGENE_PMU_EVENT_ATTR(_name, _config) \ - (&((struct dev_ext_attribute[]) { \ - { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \ - .var = (void *) _config, } \ - })[0].attr.attr) + PMU_EVENT_ATTR_ID(_name, xgene_pmu_event_show, _config) static struct attribute *l3c_pmu_events_attrs[] = { XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), @@ -604,15 +601,15 @@ static const struct attribute_group mc_pmu_v3_events_attr_group = { /* * sysfs cpumask attributes */ -static ssize_t xgene_pmu_cpumask_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev)); return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu); } -static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL); +static DEVICE_ATTR_RO(cpumask); static struct attribute *xgene_pmu_cpumask_attrs[] = { &dev_attr_cpumask.attr, diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4777850a6dc7..35a374241515 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -319,39 +319,8 @@ struct irq_affinity_desc { extern cpumask_var_t irq_default_affinity; -/* Internal implementation. Use the helpers below */ -extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, - bool force); - -/** - * irq_set_affinity - Set the irq affinity of a given irq - * @irq: Interrupt to set affinity - * @cpumask: cpumask - * - * Fails if cpumask does not contain an online CPU - */ -static inline int -irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) -{ - return __irq_set_affinity(irq, cpumask, false); -} - -/** - * irq_force_affinity - Force the irq affinity of a given irq - * @irq: Interrupt to set affinity - * @cpumask: cpumask - * - * Same as irq_set_affinity, but without checking the mask against - * online cpus. - * - * Solely for low level cpu hotplug code, where we need to make per - * cpu interrupts affine before the cpu becomes online. - */ -static inline int -irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) -{ - return __irq_set_affinity(irq, cpumask, true); -} +extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); +extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask); extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f5a6a2f069ed..2d510ad750ed 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1576,6 +1576,12 @@ static struct perf_pmu_events_attr _var = { \ .event_str = _str, \ }; +#define PMU_EVENT_ATTR_ID(_name, _show, _id) \ + (&((struct perf_pmu_events_attr[]) { \ + { .attr = __ATTR(_name, 0444, _show, NULL), \ + .id = _id, } \ + })[0].attr.attr) + #define PMU_FORMAT_ATTR(_name, _format) \ static ssize_t \ _name##_show(struct device *dev, \ diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 4c14356543d9..a847dd2044c8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -441,7 +441,8 @@ out_unlock: return ret; } -int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) +static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, + bool force) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; @@ -456,6 +457,36 @@ int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) return ret; } +/** + * irq_set_affinity - Set the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + * Fails if cpumask does not contain an online CPU + */ +int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, false); +} +EXPORT_SYMBOL_GPL(irq_set_affinity); + +/** + * irq_force_affinity - Force the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + * Same as irq_set_affinity, but without checking the mask against + * online cpus. + * + * Solely for low level cpu hotplug code, where we need to make per + * cpu interrupts affine before the cpu becomes online. + */ +int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, true); +} +EXPORT_SYMBOL_GPL(irq_force_affinity); + int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) { unsigned long flags; |