diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/autoprobe.c | 26 | ||||
-rw-r--r-- | kernel/irq/chip.c | 631 | ||||
-rw-r--r-- | kernel/irq/cpuhotplug.c | 12 | ||||
-rw-r--r-- | kernel/irq/debugfs.c | 7 | ||||
-rw-r--r-- | kernel/irq/generic-chip.c | 47 | ||||
-rw-r--r-- | kernel/irq/internals.h | 48 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 176 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 130 | ||||
-rw-r--r-- | kernel/irq/manage.c | 1166 | ||||
-rw-r--r-- | kernel/irq/msi.c | 192 | ||||
-rw-r--r-- | kernel/irq/pm.c | 38 | ||||
-rw-r--r-- | kernel/irq/proc.c | 67 | ||||
-rw-r--r-- | kernel/irq/resend.c | 50 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 104 |
14 files changed, 1148 insertions, 1546 deletions
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index ae60cae24e9a..d0af8a8b3ae6 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -43,18 +43,16 @@ unsigned long probe_irq_on(void) * flush such a longstanding irq before considering it as spurious. */ for_each_irq_desc_reverse(i, desc) { - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (!desc->action && irq_settings_can_probe(desc)) { /* * Some chips need to know about probing in * progress: */ if (desc->irq_data.chip->irq_set_type) - desc->irq_data.chip->irq_set_type(&desc->irq_data, - IRQ_TYPE_PROBE); + desc->irq_data.chip->irq_set_type(&desc->irq_data, IRQ_TYPE_PROBE); irq_activate_and_startup(desc, IRQ_NORESEND); } - raw_spin_unlock_irq(&desc->lock); } /* Wait for longstanding interrupts to trigger. */ @@ -66,13 +64,12 @@ unsigned long probe_irq_on(void) * happened in the previous stage, it may have masked itself) */ for_each_irq_desc_reverse(i, desc) { - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (!desc->action && irq_settings_can_probe(desc)) { desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; if (irq_activate_and_startup(desc, IRQ_NORESEND)) desc->istate |= IRQS_PENDING; } - raw_spin_unlock_irq(&desc->lock); } /* @@ -84,18 +81,16 @@ unsigned long probe_irq_on(void) * Now filter out any obviously spurious interrupts */ for_each_irq_desc(i, desc) { - raw_spin_lock_irq(&desc->lock); - + guard(raw_spinlock_irq)(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { /* It triggered already - consider it spurious. */ if (!(desc->istate & IRQS_WAITING)) { desc->istate &= ~IRQS_AUTODETECT; irq_shutdown_and_deactivate(desc); - } else - if (i < 32) - mask |= 1 << i; + } else if (i < 32) { + mask |= 1 << i; + } } - raw_spin_unlock_irq(&desc->lock); } return mask; @@ -121,7 +116,7 @@ unsigned int probe_irq_mask(unsigned long val) int i; for_each_irq_desc(i, desc) { - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { if (i < 16 && !(desc->istate & IRQS_WAITING)) mask |= 1 << i; @@ -129,7 +124,6 @@ unsigned int probe_irq_mask(unsigned long val) desc->istate &= ~IRQS_AUTODETECT; irq_shutdown_and_deactivate(desc); } - raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); @@ -160,8 +154,7 @@ int probe_irq_off(unsigned long val) struct irq_desc *desc; for_each_irq_desc(i, desc) { - raw_spin_lock_irq(&desc->lock); - + guard(raw_spinlock_irq)(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { if (!(desc->istate & IRQS_WAITING)) { if (!nr_of_irqs) @@ -171,7 +164,6 @@ int probe_irq_off(unsigned long val) desc->istate &= ~IRQS_AUTODETECT; irq_shutdown_and_deactivate(desc); } - raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 36cf1b09cc84..b0e0a7332993 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -34,98 +34,80 @@ struct irqaction chained_action = { }; /** - * irq_set_chip - set the irq chip for an irq - * @irq: irq number - * @chip: pointer to irq chip description structure + * irq_set_chip - set the irq chip for an irq + * @irq: irq number + * @chip: pointer to irq chip description structure */ int irq_set_chip(unsigned int irq, const struct irq_chip *chip) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); + int ret = -EINVAL; - if (!desc) - return -EINVAL; - - desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); - irq_put_desc_unlock(desc, flags); - /* - * For !CONFIG_SPARSE_IRQ make the irq show up in - * allocated_irqs. - */ - irq_mark_irq(irq); - return 0; + scoped_irqdesc_get_and_lock(irq, 0) { + scoped_irqdesc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); + ret = 0; + } + /* For !CONFIG_SPARSE_IRQ make the irq show up in allocated_irqs. */ + if (!ret) + irq_mark_irq(irq); + return ret; } EXPORT_SYMBOL(irq_set_chip); /** - * irq_set_irq_type - set the irq trigger type for an irq - * @irq: irq number - * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h + * irq_set_irq_type - set the irq trigger type for an irq + * @irq: irq number + * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h */ int irq_set_irq_type(unsigned int irq, unsigned int type) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - int ret = 0; - - if (!desc) - return -EINVAL; - - ret = __irq_set_trigger(desc, type); - irq_put_desc_busunlock(desc, flags); - return ret; + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) + return __irq_set_trigger(scoped_irqdesc, type); + return -EINVAL; } EXPORT_SYMBOL(irq_set_irq_type); /** - * irq_set_handler_data - set irq handler data for an irq - * @irq: Interrupt number - * @data: Pointer to interrupt specific data + * irq_set_handler_data - set irq handler data for an irq + * @irq: Interrupt number + * @data: Pointer to interrupt specific data * - * Set the hardware irq controller data for an irq + * Set the hardware irq controller data for an irq */ int irq_set_handler_data(unsigned int irq, void *data) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - - if (!desc) - return -EINVAL; - desc->irq_common_data.handler_data = data; - irq_put_desc_unlock(desc, flags); - return 0; + scoped_irqdesc_get_and_lock(irq, 0) { + scoped_irqdesc->irq_common_data.handler_data = data; + return 0; + } + return -EINVAL; } EXPORT_SYMBOL(irq_set_handler_data); /** - * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset - * @irq_base: Interrupt number base - * @irq_offset: Interrupt number offset - * @entry: Pointer to MSI descriptor data + * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset + * @irq_base: Interrupt number base + * @irq_offset: Interrupt number offset + * @entry: Pointer to MSI descriptor data * - * Set the MSI descriptor entry for an irq at offset + * Set the MSI descriptor entry for an irq at offset */ -int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, - struct msi_desc *entry) -{ - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - - if (!desc) - return -EINVAL; - desc->irq_common_data.msi_desc = entry; - if (entry && !irq_offset) - entry->irq = irq_base; - irq_put_desc_unlock(desc, flags); - return 0; +int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry) +{ + scoped_irqdesc_get_and_lock(irq_base + irq_offset, IRQ_GET_DESC_CHECK_GLOBAL) { + scoped_irqdesc->irq_common_data.msi_desc = entry; + if (entry && !irq_offset) + entry->irq = irq_base; + return 0; + } + return -EINVAL; } /** - * irq_set_msi_desc - set MSI descriptor data for an irq - * @irq: Interrupt number - * @entry: Pointer to MSI descriptor data + * irq_set_msi_desc - set MSI descriptor data for an irq + * @irq: Interrupt number + * @entry: Pointer to MSI descriptor data * - * Set the MSI descriptor entry for an irq + * Set the MSI descriptor entry for an irq */ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) { @@ -133,22 +115,19 @@ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) } /** - * irq_set_chip_data - set irq chip data for an irq - * @irq: Interrupt number - * @data: Pointer to chip specific data + * irq_set_chip_data - set irq chip data for an irq + * @irq: Interrupt number + * @data: Pointer to chip specific data * - * Set the hardware irq chip data for an irq + * Set the hardware irq chip data for an irq */ int irq_set_chip_data(unsigned int irq, void *data) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - - if (!desc) - return -EINVAL; - desc->irq_data.chip_data = data; - irq_put_desc_unlock(desc, flags); - return 0; + scoped_irqdesc_get_and_lock(irq, 0) { + scoped_irqdesc->irq_data.chip_data = data; + return 0; + } + return -EINVAL; } EXPORT_SYMBOL(irq_set_chip_data); @@ -223,6 +202,19 @@ __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, return IRQ_STARTUP_ABORT; return IRQ_STARTUP_MANAGED; } + +void irq_startup_managed(struct irq_desc *desc) +{ + /* + * Only start it up when the disable depth is 1, so that a disable, + * hotunplug, hotplug sequence does not end up enabling it during + * hotplug unconditionally. + */ + desc->depth--; + if (!desc->depth) + irq_startup(desc, IRQ_RESEND, IRQ_START_COND); +} + #else static __always_inline int __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, @@ -290,6 +282,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) ret = __irq_startup(desc); break; case IRQ_STARTUP_ABORT: + desc->depth = 1; irqd_set_managed_shutdown(d); return 0; } @@ -322,7 +315,13 @@ void irq_shutdown(struct irq_desc *desc) { if (irqd_is_started(&desc->irq_data)) { clear_irq_resend(desc); - desc->depth = 1; + /* + * Increment disable depth, so that a managed shutdown on + * CPU hotunplug preserves the actual disabled state when the + * CPU comes back online. See irq_startup_managed(). + */ + desc->depth++; + if (desc->irq_data.chip->irq_shutdown) { desc->irq_data.chip->irq_shutdown(&desc->irq_data); irq_state_set_disabled(desc); @@ -450,48 +449,6 @@ void unmask_threaded_irq(struct irq_desc *desc) unmask_irq(desc); } -/* - * handle_nested_irq - Handle a nested irq from a irq thread - * @irq: the interrupt number - * - * Handle interrupts which are nested into a threaded interrupt - * handler. The handler function is called inside the calling - * threads context. - */ -void handle_nested_irq(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irqaction *action; - irqreturn_t action_ret; - - might_sleep(); - - raw_spin_lock_irq(&desc->lock); - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - action = desc->action; - if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - raw_spin_unlock_irq(&desc->lock); - return; - } - - kstat_incr_irqs_this_cpu(desc); - atomic_inc(&desc->threads_active); - raw_spin_unlock_irq(&desc->lock); - - action_ret = IRQ_NONE; - for_each_action_of_desc(desc, action) - action_ret |= action->thread_fn(action->irq, action->dev_id); - - if (!irq_settings_no_debug(desc)) - note_interrupt(desc, action_ret); - - wake_threads_waitq(desc); -} -EXPORT_SYMBOL_GPL(handle_nested_irq); - static bool irq_check_poll(struct irq_desc *desc) { if (!(desc->istate & IRQS_POLL_INPROGRESS)) @@ -499,7 +456,7 @@ static bool irq_check_poll(struct irq_desc *desc) return irq_wait_for_poll(desc); } -static bool irq_may_run(struct irq_desc *desc) +static bool irq_can_handle_pm(struct irq_desc *desc) { unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; @@ -524,77 +481,111 @@ static bool irq_may_run(struct irq_desc *desc) return irq_check_poll(desc); } +static inline bool irq_can_handle_actions(struct irq_desc *desc) +{ + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { + desc->istate |= IRQS_PENDING; + return false; + } + return true; +} + +static inline bool irq_can_handle(struct irq_desc *desc) +{ + if (!irq_can_handle_pm(desc)) + return false; + + return irq_can_handle_actions(desc); +} + /** - * handle_simple_irq - Simple and software-decoded IRQs. - * @desc: the interrupt description structure for this irq - * - * Simple interrupts are either sent from a demultiplexing interrupt - * handler or come from hardware, where no interrupt hardware control - * is necessary. + * handle_nested_irq - Handle a nested irq from a irq thread + * @irq: the interrupt number * - * Note: The caller is expected to handle the ack, clear, mask and - * unmask issues if necessary. + * Handle interrupts which are nested into a threaded interrupt + * handler. The handler function is called inside the calling threads + * context. */ -void handle_simple_irq(struct irq_desc *desc) +void handle_nested_irq(unsigned int irq) { - raw_spin_lock(&desc->lock); + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + irqreturn_t action_ret; - if (!irq_may_run(desc)) - goto out_unlock; + might_sleep(); - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + scoped_guard(raw_spinlock_irq, &desc->lock) { + if (!irq_can_handle_actions(desc)) + return; - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - goto out_unlock; + action = desc->action; + kstat_incr_irqs_this_cpu(desc); + atomic_inc(&desc->threads_active); } + action_ret = IRQ_NONE; + for_each_action_of_desc(desc, action) + action_ret |= action->thread_fn(action->irq, action->dev_id); + + if (!irq_settings_no_debug(desc)) + note_interrupt(desc, action_ret); + + wake_threads_waitq(desc); +} +EXPORT_SYMBOL_GPL(handle_nested_irq); + +/** + * handle_simple_irq - Simple and software-decoded IRQs. + * @desc: the interrupt description structure for this irq + * + * Simple interrupts are either sent from a demultiplexing interrupt + * handler or come from hardware, where no interrupt hardware control is + * necessary. + * + * Note: The caller is expected to handle the ack, clear, mask and unmask + * issues if necessary. + */ +void handle_simple_irq(struct irq_desc *desc) +{ + guard(raw_spinlock)(&desc->lock); + + if (!irq_can_handle(desc)) + return; + kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); - -out_unlock: - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_simple_irq); /** - * handle_untracked_irq - Simple and software-decoded IRQs. - * @desc: the interrupt description structure for this irq + * handle_untracked_irq - Simple and software-decoded IRQs. + * @desc: the interrupt description structure for this irq * - * Untracked interrupts are sent from a demultiplexing interrupt - * handler when the demultiplexer does not know which device it its - * multiplexed irq domain generated the interrupt. IRQ's handled - * through here are not subjected to stats tracking, randomness, or - * spurious interrupt detection. + * Untracked interrupts are sent from a demultiplexing interrupt handler + * when the demultiplexer does not know which device it its multiplexed irq + * domain generated the interrupt. IRQ's handled through here are not + * subjected to stats tracking, randomness, or spurious interrupt + * detection. * - * Note: Like handle_simple_irq, the caller is expected to handle - * the ack, clear, mask and unmask issues if necessary. + * Note: Like handle_simple_irq, the caller is expected to handle the ack, + * clear, mask and unmask issues if necessary. */ void handle_untracked_irq(struct irq_desc *desc) { - raw_spin_lock(&desc->lock); - - if (!irq_may_run(desc)) - goto out_unlock; - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + scoped_guard(raw_spinlock, &desc->lock) { + if (!irq_can_handle(desc)) + return; - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - goto out_unlock; + desc->istate &= ~IRQS_PENDING; + irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); } - desc->istate &= ~IRQS_PENDING; - irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); - raw_spin_unlock(&desc->lock); - __handle_irq_event_percpu(desc); - raw_spin_lock(&desc->lock); - irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); - -out_unlock: - raw_spin_unlock(&desc->lock); + scoped_guard(raw_spinlock, &desc->lock) + irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); } EXPORT_SYMBOL_GPL(handle_untracked_irq); @@ -617,40 +608,26 @@ static void cond_unmask_irq(struct irq_desc *desc) } /** - * handle_level_irq - Level type irq handler - * @desc: the interrupt description structure for this irq + * handle_level_irq - Level type irq handler + * @desc: the interrupt description structure for this irq * - * Level type interrupts are active as long as the hardware line has - * the active level. This may require to mask the interrupt and unmask - * it after the associated handler has acknowledged the device, so the - * interrupt line is back to inactive. + * Level type interrupts are active as long as the hardware line has the + * active level. This may require to mask the interrupt and unmask it after + * the associated handler has acknowledged the device, so the interrupt + * line is back to inactive. */ void handle_level_irq(struct irq_desc *desc) { - raw_spin_lock(&desc->lock); + guard(raw_spinlock)(&desc->lock); mask_ack_irq(desc); - if (!irq_may_run(desc)) - goto out_unlock; - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - /* - * If its disabled or no action available - * keep it masked and get out of here - */ - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - goto out_unlock; - } + if (!irq_can_handle(desc)) + return; kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); cond_unmask_irq(desc); - -out_unlock: - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_level_irq); @@ -675,42 +652,43 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) } } +static inline void cond_eoi_irq(struct irq_chip *chip, struct irq_data *data) +{ + if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) + chip->irq_eoi(data); +} + /** - * handle_fasteoi_irq - irq handler for transparent controllers - * @desc: the interrupt description structure for this irq + * handle_fasteoi_irq - irq handler for transparent controllers + * @desc: the interrupt description structure for this irq * - * Only a single callback will be issued to the chip: an ->eoi() - * call when the interrupt has been serviced. This enables support - * for modern forms of interrupt handlers, which handle the flow - * details in hardware, transparently. + * Only a single callback will be issued to the chip: an ->eoi() call when + * the interrupt has been serviced. This enables support for modern forms + * of interrupt handlers, which handle the flow details in hardware, + * transparently. */ void handle_fasteoi_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; - raw_spin_lock(&desc->lock); + guard(raw_spinlock)(&desc->lock); /* * When an affinity change races with IRQ handling, the next interrupt * can arrive on the new CPU before the original CPU has completed * handling the previous one - it may need to be resent. */ - if (!irq_may_run(desc)) { + if (!irq_can_handle_pm(desc)) { if (irqd_needs_resend_when_in_progress(&desc->irq_data)) desc->istate |= IRQS_PENDING; - goto out; + cond_eoi_irq(chip, &desc->irq_data); + return; } - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - /* - * If its disabled or no action available - * then mask it and get out of here: - */ - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; + if (!irq_can_handle_actions(desc)) { mask_irq(desc); - goto out; + cond_eoi_irq(chip, &desc->irq_data); + return; } kstat_incr_irqs_this_cpu(desc); @@ -726,13 +704,6 @@ void handle_fasteoi_irq(struct irq_desc *desc) */ if (unlikely(desc->istate & IRQS_PENDING)) check_irq_resend(desc, false); - - raw_spin_unlock(&desc->lock); - return; -out: - if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) - chip->irq_eoi(&desc->irq_data); - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_irq); @@ -770,40 +741,27 @@ void handle_fasteoi_nmi(struct irq_desc *desc) EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); /** - * handle_edge_irq - edge type IRQ handler - * @desc: the interrupt description structure for this irq + * handle_edge_irq - edge type IRQ handler + * @desc: the interrupt description structure for this irq * - * Interrupt occurs on the falling and/or rising edge of a hardware - * signal. The occurrence is latched into the irq controller hardware - * and must be acked in order to be reenabled. After the ack another - * interrupt can happen on the same source even before the first one - * is handled by the associated event handler. If this happens it - * might be necessary to disable (mask) the interrupt depending on the - * controller hardware. This requires to reenable the interrupt inside - * of the loop which handles the interrupts which have arrived while - * the handler was running. If all pending interrupts are handled, the - * loop is left. + * Interrupt occurs on the falling and/or rising edge of a hardware + * signal. The occurrence is latched into the irq controller hardware and + * must be acked in order to be reenabled. After the ack another interrupt + * can happen on the same source even before the first one is handled by + * the associated event handler. If this happens it might be necessary to + * disable (mask) the interrupt depending on the controller hardware. This + * requires to reenable the interrupt inside of the loop which handles the + * interrupts which have arrived while the handler was running. If all + * pending interrupts are handled, the loop is left. */ void handle_edge_irq(struct irq_desc *desc) { - raw_spin_lock(&desc->lock); - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - if (!irq_may_run(desc)) { - desc->istate |= IRQS_PENDING; - mask_ack_irq(desc); - goto out_unlock; - } + guard(raw_spinlock)(&desc->lock); - /* - * If its disabled or no action available then mask it and get - * out of here. - */ - if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { + if (!irq_can_handle(desc)) { desc->istate |= IRQS_PENDING; mask_ack_irq(desc); - goto out_unlock; + return; } kstat_incr_irqs_this_cpu(desc); @@ -814,7 +772,7 @@ void handle_edge_irq(struct irq_desc *desc) do { if (unlikely(!desc->action)) { mask_irq(desc); - goto out_unlock; + return; } /* @@ -830,11 +788,7 @@ void handle_edge_irq(struct irq_desc *desc) handle_irq_event(desc); - } while ((desc->istate & IRQS_PENDING) && - !irqd_irq_disabled(&desc->irq_data)); - -out_unlock: - raw_spin_unlock(&desc->lock); + } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); } EXPORT_SYMBOL(handle_edge_irq); @@ -1007,35 +961,23 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, } } -void -__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, - const char *name) +void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + const char *name) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); - - if (!desc) - return; - - __irq_do_set_handler(desc, handle, is_chained, name); - irq_put_desc_busunlock(desc, flags); + scoped_irqdesc_get_and_lock(irq, 0) + __irq_do_set_handler(scoped_irqdesc, handle, is_chained, name); } EXPORT_SYMBOL_GPL(__irq_set_handler); -void -irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, - void *data) +void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, + void *data) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); - - if (!desc) - return; - - desc->irq_common_data.handler_data = data; - __irq_do_set_handler(desc, handle, 1, NULL); + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; - irq_put_desc_busunlock(desc, flags); + desc->irq_common_data.handler_data = data; + __irq_do_set_handler(desc, handle, 1, NULL); + } } EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); @@ -1050,38 +992,34 @@ EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) { - unsigned long flags, trigger, tmp; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - - if (!desc) - return; - - /* - * Warn when a driver sets the no autoenable flag on an already - * active interrupt. - */ - WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); - - irq_settings_clr_and_set(desc, clr, set); + scoped_irqdesc_get_and_lock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; + unsigned long trigger, tmp; + /* + * Warn when a driver sets the no autoenable flag on an already + * active interrupt. + */ + WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); - trigger = irqd_get_trigger_type(&desc->irq_data); + irq_settings_clr_and_set(desc, clr, set); - irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | - IRQD_TRIGGER_MASK | IRQD_LEVEL); - if (irq_settings_has_no_balance_set(desc)) - irqd_set(&desc->irq_data, IRQD_NO_BALANCING); - if (irq_settings_is_per_cpu(desc)) - irqd_set(&desc->irq_data, IRQD_PER_CPU); - if (irq_settings_is_level(desc)) - irqd_set(&desc->irq_data, IRQD_LEVEL); + trigger = irqd_get_trigger_type(&desc->irq_data); - tmp = irq_settings_get_trigger_mask(desc); - if (tmp != IRQ_TYPE_NONE) - trigger = tmp; + irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | + IRQD_TRIGGER_MASK | IRQD_LEVEL); + if (irq_settings_has_no_balance_set(desc)) + irqd_set(&desc->irq_data, IRQD_NO_BALANCING); + if (irq_settings_is_per_cpu(desc)) + irqd_set(&desc->irq_data, IRQD_PER_CPU); + if (irq_settings_is_level(desc)) + irqd_set(&desc->irq_data, IRQD_LEVEL); - irqd_set(&desc->irq_data, trigger); + tmp = irq_settings_get_trigger_mask(desc); + if (tmp != IRQ_TYPE_NONE) + trigger = tmp; - irq_put_desc_unlock(desc, flags); + irqd_set(&desc->irq_data, trigger); + } } EXPORT_SYMBOL_GPL(irq_modify_status); @@ -1094,25 +1032,21 @@ EXPORT_SYMBOL_GPL(irq_modify_status); */ void irq_cpu_online(void) { - struct irq_desc *desc; - struct irq_chip *chip; - unsigned long flags; unsigned int irq; for_each_active_irq(irq) { - desc = irq_to_desc(irq); + struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip; + if (!desc) continue; - raw_spin_lock_irqsave(&desc->lock, flags); - + guard(raw_spinlock_irqsave)(&desc->lock); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_online && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_online(&desc->irq_data); - - raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -1124,25 +1058,21 @@ void irq_cpu_online(void) */ void irq_cpu_offline(void) { - struct irq_desc *desc; - struct irq_chip *chip; - unsigned long flags; unsigned int irq; for_each_active_irq(irq) { - desc = irq_to_desc(irq); + struct irq_desc *desc = irq_to_desc(irq); + struct irq_chip *chip; + if (!desc) continue; - raw_spin_lock_irqsave(&desc->lock, flags); - + guard(raw_spinlock_irqsave)(&desc->lock); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_offline && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_offline(&desc->irq_data); - - raw_spin_unlock_irqrestore(&desc->lock, flags); } } #endif @@ -1151,102 +1081,69 @@ void irq_cpu_offline(void) #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS /** - * handle_fasteoi_ack_irq - irq handler for edge hierarchy - * stacked on transparent controllers + * handle_fasteoi_ack_irq - irq handler for edge hierarchy stacked on + * transparent controllers * - * @desc: the interrupt description structure for this irq + * @desc: the interrupt description structure for this irq * - * Like handle_fasteoi_irq(), but for use with hierarchy where - * the irq_chip also needs to have its ->irq_ack() function - * called. + * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip + * also needs to have its ->irq_ack() function called. */ void handle_fasteoi_ack_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; - raw_spin_lock(&desc->lock); - - if (!irq_may_run(desc)) - goto out; + guard(raw_spinlock)(&desc->lock); - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + if (!irq_can_handle_pm(desc)) { + cond_eoi_irq(chip, &desc->irq_data); + return; + } - /* - * If its disabled or no action available - * then mask it and get out of here: - */ - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; + if (unlikely(!irq_can_handle_actions(desc))) { mask_irq(desc); - goto out; + cond_eoi_irq(chip, &desc->irq_data); + return; } kstat_incr_irqs_this_cpu(desc); if (desc->istate & IRQS_ONESHOT) mask_irq(desc); - /* Start handling the irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); - - raw_spin_unlock(&desc->lock); - return; -out: - if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) - chip->irq_eoi(&desc->irq_data); - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); /** - * handle_fasteoi_mask_irq - irq handler for level hierarchy - * stacked on transparent controllers + * handle_fasteoi_mask_irq - irq handler for level hierarchy stacked on + * transparent controllers * - * @desc: the interrupt description structure for this irq + * @desc: the interrupt description structure for this irq * - * Like handle_fasteoi_irq(), but for use with hierarchy where - * the irq_chip also needs to have its ->irq_mask_ack() function - * called. + * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip + * also needs to have its ->irq_mask_ack() function called. */ void handle_fasteoi_mask_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; - raw_spin_lock(&desc->lock); + guard(raw_spinlock)(&desc->lock); mask_ack_irq(desc); - if (!irq_may_run(desc)) - goto out; - - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - - /* - * If its disabled or no action available - * then mask it and get out of here: - */ - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - desc->istate |= IRQS_PENDING; - mask_irq(desc); - goto out; + if (!irq_can_handle(desc)) { + cond_eoi_irq(chip, &desc->irq_data); + return; } kstat_incr_irqs_this_cpu(desc); - if (desc->istate & IRQS_ONESHOT) - mask_irq(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); - - raw_spin_unlock(&desc->lock); - return; -out: - if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) - chip->irq_eoi(&desc->irq_data); - raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 15a7654eff68..f07529ae4895 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -177,9 +177,8 @@ void irq_migrate_all_off_this_cpu(void) bool affinity_broken; desc = irq_to_desc(irq); - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); + scoped_guard(raw_spinlock, &desc->lock) + affinity_broken = migrate_one_irq(desc); if (affinity_broken) { pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n", @@ -219,7 +218,7 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) return; if (irqd_is_managed_and_shutdown(data)) - irq_startup(desc, IRQ_RESEND, IRQ_START_COND); + irq_startup_managed(desc); /* * If the interrupt can only be directed to a single target @@ -244,9 +243,8 @@ int irq_affinity_online_cpu(unsigned int cpu) irq_lock_sparse(); for_each_active_irq(irq) { desc = irq_to_desc(irq); - raw_spin_lock_irq(&desc->lock); - irq_restore_affinity_of_irq(desc, cpu); - raw_spin_unlock_irq(&desc->lock); + scoped_guard(raw_spinlock_irq, &desc->lock) + irq_restore_affinity_of_irq(desc, cpu); } irq_unlock_sparse(); diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index ca142b9a4db3..3527defd2890 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -160,7 +160,7 @@ static int irq_debug_show(struct seq_file *m, void *p) struct irq_desc *desc = m->private; struct irq_data *data; - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); data = irq_desc_get_irq_data(desc); seq_printf(m, "handler: %ps\n", desc->handle_irq); seq_printf(m, "device: %s\n", desc->dev_name); @@ -178,7 +178,6 @@ static int irq_debug_show(struct seq_file *m, void *p) seq_printf(m, "node: %d\n", irq_data_get_node(data)); irq_debug_show_masks(m, desc); irq_debug_show_data(m, data, 0); - raw_spin_unlock_irq(&desc->lock); return 0; } @@ -226,12 +225,12 @@ void irq_debugfs_copy_devname(int irq, struct device *dev) void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc) { - char name [10]; + char name [12]; if (!irq_dir || !desc || desc->debugfs_file) return; - sprintf(name, "%d", irq); + sprintf(name, "%u", irq); desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc, &dfs_irq_ops); } diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index c4a8bca5f2b0..bf59e37d650a 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c @@ -40,10 +40,9 @@ void irq_gc_mask_disable_reg(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.disable); *ct->mask_cache &= ~mask; - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg); @@ -60,10 +59,9 @@ void irq_gc_mask_set_bit(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); *ct->mask_cache |= mask; irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit); @@ -80,10 +78,9 @@ void irq_gc_mask_clr_bit(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); *ct->mask_cache &= ~mask; irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit); @@ -100,10 +97,9 @@ void irq_gc_unmask_enable_reg(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.enable); *ct->mask_cache |= mask; - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg); @@ -117,9 +113,8 @@ void irq_gc_ack_set_bit(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit); @@ -133,9 +128,8 @@ void irq_gc_ack_clr_bit(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = ~d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); } /** @@ -156,11 +150,10 @@ void irq_gc_mask_disable_and_ack_set(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.disable); *ct->mask_cache &= ~mask; irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); } EXPORT_SYMBOL_GPL(irq_gc_mask_disable_and_ack_set); @@ -174,9 +167,8 @@ void irq_gc_eoi(struct irq_data *d) struct irq_chip_type *ct = irq_data_get_chip_type(d); u32 mask = d->mask; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); irq_reg_writel(gc, mask, ct->regs.eoi); - irq_gc_unlock(gc); } /** @@ -196,12 +188,11 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on) if (!(mask & gc->wake_enabled)) return -EINVAL; - irq_gc_lock(gc); + guard(raw_spinlock)(&gc->lock); if (on) gc->wake_active |= mask; else gc->wake_active &= ~mask; - irq_gc_unlock(gc); return 0; } EXPORT_SYMBOL_GPL(irq_gc_set_wake); @@ -288,7 +279,6 @@ int irq_domain_alloc_generic_chips(struct irq_domain *d, { struct irq_domain_chip_generic *dgc; struct irq_chip_generic *gc; - unsigned long flags; int numchips, i; size_t dgc_sz; size_t gc_sz; @@ -340,9 +330,8 @@ int irq_domain_alloc_generic_chips(struct irq_domain *d, goto err; } - raw_spin_lock_irqsave(&gc_lock, flags); - list_add_tail(&gc->list, &gc_list); - raw_spin_unlock_irqrestore(&gc_lock, flags); + scoped_guard (raw_spinlock_irqsave, &gc_lock) + list_add_tail(&gc->list, &gc_list); /* Calc pointer to the next generic chip */ tmp += gc_sz; } @@ -459,7 +448,6 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, struct irq_chip_generic *gc; struct irq_chip_type *ct; struct irq_chip *chip; - unsigned long flags; int idx; gc = __irq_get_domain_generic_chip(d, hw_irq); @@ -479,9 +467,8 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, /* We only init the cache for the first mapping of a generic chip */ if (!gc->installed) { - raw_spin_lock_irqsave(&gc->lock, flags); + guard(raw_spinlock_irqsave)(&gc->lock); irq_gc_init_mask_cache(gc, dgc->gc_flags); - raw_spin_unlock_irqrestore(&gc->lock, flags); } /* Mark the interrupt as installed */ @@ -548,9 +535,8 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, struct irq_chip *chip = &ct->chip; unsigned int i; - raw_spin_lock(&gc_lock); - list_add_tail(&gc->list, &gc_list); - raw_spin_unlock(&gc_lock); + scoped_guard (raw_spinlock, &gc_lock) + list_add_tail(&gc->list, &gc_list); irq_gc_init_mask_cache(gc, flags); @@ -616,9 +602,8 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, { unsigned int i, virq; - raw_spin_lock(&gc_lock); - list_del(&gc->list); - raw_spin_unlock(&gc_lock); + scoped_guard (raw_spinlock, &gc_lock) + list_del(&gc->list); for (i = 0; msk; msk >>= 1, i++) { if (!(msk & 0x01)) diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index b0290849c395..aebfe225c9a6 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -87,6 +87,7 @@ extern void __enable_irq(struct irq_desc *desc); extern int irq_activate(struct irq_desc *desc); extern int irq_activate_and_startup(struct irq_desc *desc, bool resend); extern int irq_startup(struct irq_desc *desc, bool resend, bool force); +extern void irq_startup_managed(struct irq_desc *desc); extern void irq_shutdown(struct irq_desc *desc); extern void irq_shutdown_and_deactivate(struct irq_desc *desc); @@ -141,6 +142,10 @@ extern int irq_setup_affinity(struct irq_desc *desc); static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; } #endif + +#define for_each_action_of_desc(desc, act) \ + for (act = desc->action; act; act = act->next) + /* Inline functions for support of irq chips on slow busses */ static inline void chip_bus_lock(struct irq_desc *desc) { @@ -160,38 +165,33 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc) #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) -#define for_each_action_of_desc(desc, act) \ - for (act = desc->action; act; act = act->next) - -struct irq_desc * -__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, - unsigned int check); +struct irq_desc *__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, + unsigned int check); void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); -static inline struct irq_desc * -irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) -{ - return __irq_get_desc_lock(irq, flags, true, check); -} +__DEFINE_CLASS_IS_CONDITIONAL(irqdesc_lock, true); +__DEFINE_UNLOCK_GUARD(irqdesc_lock, struct irq_desc, + __irq_put_desc_unlock(_T->lock, _T->flags, _T->bus), + unsigned long flags; bool bus); -static inline void -irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) +static inline class_irqdesc_lock_t class_irqdesc_lock_constructor(unsigned int irq, bool bus, + unsigned int check) { - __irq_put_desc_unlock(desc, flags, true); -} + class_irqdesc_lock_t _t = { .bus = bus, }; -static inline struct irq_desc * -irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) -{ - return __irq_get_desc_lock(irq, flags, false, check); -} + _t.lock = __irq_get_desc_lock(irq, &_t.flags, bus, check); -static inline void -irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) -{ - __irq_put_desc_unlock(desc, flags, false); + return _t; } +#define scoped_irqdesc_get_and_lock(_irq, _check) \ + scoped_guard(irqdesc_lock, _irq, false, _check) + +#define scoped_irqdesc_get_and_buslock(_irq, _check) \ + scoped_guard(irqdesc_lock, _irq, true, _check) + +#define scoped_irqdesc ((struct irq_desc *)(__guard_ptr(irqdesc_lock)(&scope))) + #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) static inline unsigned int irqd_get(struct irq_data *d) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 4258cd6bd3b4..b64c57b44c20 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -246,8 +246,7 @@ static struct kobject *irq_kobj_base; #define IRQ_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) -static ssize_t per_cpu_count_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t per_cpu_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; @@ -257,112 +256,83 @@ static ssize_t per_cpu_count_show(struct kobject *kobj, for_each_possible_cpu(cpu) { unsigned int c = irq_desc_kstat_cpu(desc, cpu); - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); + ret += sysfs_emit_at(buf, ret, "%s%u", p, c); p = ","; } - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); + ret += sysfs_emit_at(buf, ret, "\n"); return ret; } IRQ_ATTR_RO(per_cpu_count); -static ssize_t chip_name_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t chip_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - - raw_spin_lock_irq(&desc->lock); - if (desc->irq_data.chip && desc->irq_data.chip->name) { - ret = scnprintf(buf, PAGE_SIZE, "%s\n", - desc->irq_data.chip->name); - } - raw_spin_unlock_irq(&desc->lock); - return ret; + guard(raw_spinlock_irq)(&desc->lock); + if (desc->irq_data.chip && desc->irq_data.chip->name) + return sysfs_emit(buf, "%s\n", desc->irq_data.chip->name); + return 0; } IRQ_ATTR_RO(chip_name); -static ssize_t hwirq_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (desc->irq_data.domain) - ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); - raw_spin_unlock_irq(&desc->lock); - - return ret; + return sysfs_emit(buf, "%lu\n", desc->irq_data.hwirq); + return 0; } IRQ_ATTR_RO(hwirq); -static ssize_t type_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - raw_spin_lock_irq(&desc->lock); - ret = sprintf(buf, "%s\n", - irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); - raw_spin_unlock_irq(&desc->lock); - - return ret; + guard(raw_spinlock_irq)(&desc->lock); + return sysfs_emit(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); } IRQ_ATTR_RO(type); -static ssize_t wakeup_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - - raw_spin_lock_irq(&desc->lock); - ret = sprintf(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); - raw_spin_unlock_irq(&desc->lock); - - return ret; + guard(raw_spinlock_irq)(&desc->lock); + return sysfs_emit(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); } IRQ_ATTR_RO(wakeup); -static ssize_t name_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); - ssize_t ret = 0; - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); if (desc->name) - ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); - raw_spin_unlock_irq(&desc->lock); - - return ret; + return sysfs_emit(buf, "%s\n", desc->name); + return 0; } IRQ_ATTR_RO(name); -static ssize_t actions_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); struct irqaction *action; ssize_t ret = 0; char *p = ""; - raw_spin_lock_irq(&desc->lock); - for_each_action_of_desc(desc, action) { - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", - p, action->name); - p = ","; + scoped_guard(raw_spinlock_irq, &desc->lock) { + for_each_action_of_desc(desc, action) { + ret += sysfs_emit_at(buf, ret, "%s%s", p, action->name); + p = ","; + } } - raw_spin_unlock_irq(&desc->lock); if (ret) - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); - + ret += sysfs_emit_at(buf, ret, "\n"); return ret; } IRQ_ATTR_RO(actions); @@ -418,19 +388,14 @@ static int __init irq_sysfs_init(void) int irq; /* Prevent concurrent irq alloc/free */ - irq_lock_sparse(); - + guard(mutex)(&sparse_irq_lock); irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); - if (!irq_kobj_base) { - irq_unlock_sparse(); + if (!irq_kobj_base) return -ENOMEM; - } /* Add the already allocated interrupts */ for_each_irq_desc(irq, desc) irq_sysfs_add(irq, desc); - irq_unlock_sparse(); - return 0; } postcore_initcall(irq_sysfs_init); @@ -573,12 +538,12 @@ err: return -ENOMEM; } -static int irq_expand_nr_irqs(unsigned int nr) +static bool irq_expand_nr_irqs(unsigned int nr) { if (nr > MAX_SPARSE_IRQS) - return -ENOMEM; + return false; nr_irqs = nr; - return 0; + return true; } int __init early_irq_init(void) @@ -656,11 +621,9 @@ EXPORT_SYMBOL(irq_to_desc); static void free_desc(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - raw_spin_lock_irqsave(&desc->lock, flags); - desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) + desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); delete_irq_desc(irq); } @@ -679,16 +642,15 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, return start; } -static int irq_expand_nr_irqs(unsigned int nr) +static inline bool irq_expand_nr_irqs(unsigned int nr) { - return -ENOMEM; + return false; } void irq_mark_irq(unsigned int irq) { - mutex_lock(&sparse_irq_lock); + guard(mutex)(&sparse_irq_lock); irq_insert_desc(irq, irq_desc + irq); - mutex_unlock(&sparse_irq_lock); } #ifdef CONFIG_GENERIC_IRQ_LEGACY @@ -827,11 +789,9 @@ void irq_free_descs(unsigned int from, unsigned int cnt) if (from >= nr_irqs || (from + cnt) > nr_irqs) return; - mutex_lock(&sparse_irq_lock); + guard(mutex)(&sparse_irq_lock); for (i = 0; i < cnt; i++) free_desc(from + i); - - mutex_unlock(&sparse_irq_lock); } EXPORT_SYMBOL_GPL(irq_free_descs); @@ -848,11 +808,10 @@ EXPORT_SYMBOL_GPL(irq_free_descs); * * Returns the first irq number or error code */ -int __ref -__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, - struct module *owner, const struct irq_affinity_desc *affinity) +int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, + struct module *owner, const struct irq_affinity_desc *affinity) { - int start, ret; + int start; if (!cnt) return -EINVAL; @@ -870,22 +829,17 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, from = arch_dynirq_lower_bound(from); } - mutex_lock(&sparse_irq_lock); + guard(mutex)(&sparse_irq_lock); start = irq_find_free_area(from, cnt); - ret = -EEXIST; if (irq >=0 && start != irq) - goto unlock; + return -EEXIST; if (start + cnt > nr_irqs) { - ret = irq_expand_nr_irqs(start + cnt); - if (ret) - goto unlock; + if (!irq_expand_nr_irqs(start + cnt)) + return -ENOMEM; } - ret = alloc_descs(start, cnt, node, affinity, owner); -unlock: - mutex_unlock(&sparse_irq_lock); - return ret; + return alloc_descs(start, cnt, node, affinity, owner); } EXPORT_SYMBOL_GPL(__irq_alloc_descs); @@ -900,27 +854,27 @@ unsigned int irq_get_next_irq(unsigned int offset) return irq_find_at_or_after(offset); } -struct irq_desc * -__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, - unsigned int check) +struct irq_desc *__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, + unsigned int check) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc; - if (desc) { - if (check & _IRQ_DESC_CHECK) { - if ((check & _IRQ_DESC_PERCPU) && - !irq_settings_is_per_cpu_devid(desc)) - return NULL; - - if (!(check & _IRQ_DESC_PERCPU) && - irq_settings_is_per_cpu_devid(desc)) - return NULL; - } + desc = irq_to_desc(irq); + if (!desc) + return NULL; + + if (check & _IRQ_DESC_CHECK) { + if ((check & _IRQ_DESC_PERCPU) && !irq_settings_is_per_cpu_devid(desc)) + return NULL; - if (bus) - chip_bus_lock(desc); - raw_spin_lock_irqsave(&desc->lock, *flags); + if (!(check & _IRQ_DESC_PERCPU) && irq_settings_is_per_cpu_devid(desc)) + return NULL; } + + if (bus) + chip_bus_lock(desc); + raw_spin_lock_irqsave(&desc->lock, *flags); + return desc; } diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 9d5c8651492d..c8b6de09047b 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -480,33 +480,6 @@ struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, } EXPORT_SYMBOL_GPL(irq_domain_create_simple); -/** - * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. - * @of_node: pointer to interrupt controller's device tree node. - * @size: total number of irqs in legacy mapping - * @first_irq: first number of irq block assigned to the domain - * @first_hwirq: first hwirq number to use for the translation. Should normally - * be '0', but a positive integer can be used if the effective - * hwirqs numbering does not begin at zero. - * @ops: map/unmap domain callbacks - * @host_data: Controller private data pointer - * - * Note: the map() callback will be called before this function returns - * for all legacy interrupts except 0 (which is always the invalid irq for - * a legacy controller). - */ -struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - irq_hw_number_t first_hwirq, - const struct irq_domain_ops *ops, - void *host_data) -{ - return irq_domain_create_legacy(of_node_to_fwnode(of_node), size, - first_irq, first_hwirq, ops, host_data); -} -EXPORT_SYMBOL_GPL(irq_domain_add_legacy); - struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, @@ -885,7 +858,7 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, { int i; - fwspec->fwnode = of_node_to_fwnode(np); + fwspec->fwnode = of_fwnode_handle(np); fwspec->param_count = count; for (i = 0; i < count; i++) @@ -1133,6 +1106,31 @@ int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); /** + * irq_domain_xlate_twothreecell() - Generic xlate for direct two or three cell bindings + * @d: Interrupt domain involved in the translation + * @ctrlr: The device tree node for the device whose interrupt is translated + * @intspec: The interrupt specifier data from the device tree + * @intsize: The number of entries in @intspec + * @out_hwirq: Pointer to storage for the hardware interrupt number + * @out_type: Pointer to storage for the interrupt type + * + * Device Tree interrupt specifier translation function for two or three + * cell bindings, where the cell values map directly to the hardware + * interrupt number and the type specifier. + */ +int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type) +{ + struct irq_fwspec fwspec; + + of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec); + + return irq_domain_translate_twothreecell(d, &fwspec, out_hwirq, out_type); +} +EXPORT_SYMBOL_GPL(irq_domain_xlate_twothreecell); + +/** * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings * @d: Interrupt domain involved in the translation * @ctrlr: The device tree node for the device whose interrupt is translated @@ -1216,6 +1214,37 @@ int irq_domain_translate_twocell(struct irq_domain *d, } EXPORT_SYMBOL_GPL(irq_domain_translate_twocell); +/** + * irq_domain_translate_twothreecell() - Generic translate for direct two or three cell + * bindings + * @d: Interrupt domain involved in the translation + * @fwspec: The firmware interrupt specifier to translate + * @out_hwirq: Pointer to storage for the hardware interrupt number + * @out_type: Pointer to storage for the interrupt type + * + * Firmware interrupt specifier translation function for two or three cell + * specifications, where the parameter values map directly to the hardware + * interrupt number and the type specifier. + */ +int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *out_hwirq, unsigned int *out_type) +{ + if (fwspec->param_count == 2) { + *out_hwirq = fwspec->param[0]; + *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + return 0; + } + + if (fwspec->param_count == 3) { + *out_hwirq = fwspec->param[1]; + *out_type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(irq_domain_translate_twothreecell); + int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, int node, const struct irq_affinity_desc *affinity) { @@ -1252,47 +1281,6 @@ void irq_domain_reset_irq_data(struct irq_data *irq_data) EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -/** - * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy - * @parent: Parent irq domain to associate with the new domain - * @flags: Irq domain flags associated to the domain - * @size: Size of the domain. See below - * @fwnode: Optional fwnode of the interrupt controller - * @ops: Pointer to the interrupt domain callbacks - * @host_data: Controller private data pointer - * - * If @size is 0 a tree domain is created, otherwise a linear domain. - * - * If successful the parent is associated to the new domain and the - * domain flags are set. - * Returns pointer to IRQ domain, or NULL on failure. - */ -struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, - unsigned int flags, - unsigned int size, - struct fwnode_handle *fwnode, - const struct irq_domain_ops *ops, - void *host_data) -{ - struct irq_domain_info info = { - .fwnode = fwnode, - .size = size, - .hwirq_max = size, - .ops = ops, - .host_data = host_data, - .domain_flags = flags, - .parent = parent, - }; - struct irq_domain *d; - - if (!info.size) - info.hwirq_max = ~0U; - - d = irq_domain_instantiate(&info); - return IS_ERR(d) ? NULL : d; -} -EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy); - static void irq_domain_insert_irq(int virq) { struct irq_data *data; @@ -2008,7 +1996,7 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain) domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; } #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -/** +/* * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain * @domain: domain to match * @virq: IRQ number to get irq_data @@ -2022,7 +2010,7 @@ struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, } EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); -/** +/* * irq_domain_set_info - Set the complete data for a @virq in @domain * @domain: Interrupt domain to match * @virq: IRQ number diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 753eef8e041c..c94837382037 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -43,8 +43,6 @@ static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) bool inprogress; do { - unsigned long flags; - /* * Wait until we're out of the critical section. This might * give the wrong answer due to the lack of memory barriers. @@ -53,7 +51,7 @@ static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irqsave)(&desc->lock); inprogress = irqd_irq_inprogress(&desc->irq_data); /* @@ -69,33 +67,30 @@ static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, &inprogress); } - raw_spin_unlock_irqrestore(&desc->lock, flags); - /* Oops, that failed? */ } while (inprogress); } /** - * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) - * @irq: interrupt number to wait for + * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) + * @irq: interrupt number to wait for * - * This function waits for any pending hard IRQ handlers for this - * interrupt to complete before returning. If you use this - * function while holding a resource the IRQ handler may need you - * will deadlock. It does not take associated threaded handlers - * into account. + * This function waits for any pending hard IRQ handlers for this interrupt + * to complete before returning. If you use this function while holding a + * resource the IRQ handler may need you will deadlock. It does not take + * associated threaded handlers into account. * - * Do not use this for shutdown scenarios where you must be sure - * that all parts (hardirq and threaded handler) have completed. + * Do not use this for shutdown scenarios where you must be sure that all + * parts (hardirq and threaded handler) have completed. * - * Returns: false if a threaded handler is active. + * Returns: false if a threaded handler is active. * - * This function may be called - with care - from IRQ context. + * This function may be called - with care - from IRQ context. * - * It does not check whether there is an interrupt in flight at the - * hardware level, but not serviced yet, as this might deadlock when - * called with interrupts disabled and the target CPU of the interrupt - * is the current CPU. + * It does not check whether there is an interrupt in flight at the + * hardware level, but not serviced yet, as this might deadlock when called + * with interrupts disabled and the target CPU of the interrupt is the + * current CPU. */ bool synchronize_hardirq(unsigned int irq) { @@ -121,19 +116,19 @@ static void __synchronize_irq(struct irq_desc *desc) } /** - * synchronize_irq - wait for pending IRQ handlers (on other CPUs) - * @irq: interrupt number to wait for + * synchronize_irq - wait for pending IRQ handlers (on other CPUs) + * @irq: interrupt number to wait for * - * This function waits for any pending IRQ handlers for this interrupt - * to complete before returning. If you use this function while - * holding a resource the IRQ handler may need you will deadlock. + * This function waits for any pending IRQ handlers for this interrupt to + * complete before returning. If you use this function while holding a + * resource the IRQ handler may need you will deadlock. * - * Can only be called from preemptible code as it might sleep when - * an interrupt thread is associated to @irq. + * Can only be called from preemptible code as it might sleep when + * an interrupt thread is associated to @irq. * - * It optionally makes sure (when the irq chip supports that method) - * that the interrupt is not pending in any CPU and waiting for - * service. + * It optionally makes sure (when the irq chip supports that method) + * that the interrupt is not pending in any CPU and waiting for + * service. */ void synchronize_irq(unsigned int irq) { @@ -156,8 +151,8 @@ static bool __irq_can_set_affinity(struct irq_desc *desc) } /** - * irq_can_set_affinity - Check if the affinity of a given irq can be set - * @irq: Interrupt to check + * irq_can_set_affinity - Check if the affinity of a given irq can be set + * @irq: Interrupt to check * */ int irq_can_set_affinity(unsigned int irq) @@ -181,13 +176,13 @@ bool irq_can_set_affinity_usr(unsigned int irq) } /** - * irq_set_thread_affinity - Notify irq threads to adjust affinity - * @desc: irq descriptor which has affinity changed + * irq_set_thread_affinity - Notify irq threads to adjust affinity + * @desc: irq descriptor which has affinity changed * - * We just set IRQTF_AFFINITY and delegate the affinity setting - * to the interrupt thread itself. We can not call - * set_cpus_allowed_ptr() here as we hold desc->lock and this - * code can be called from hard interrupt context. + * Just set IRQTF_AFFINITY and delegate the affinity setting to the + * interrupt thread itself. We can not call set_cpus_allowed_ptr() here as + * we hold desc->lock and this code can be called from hard interrupt + * context. */ static void irq_set_thread_affinity(struct irq_desc *desc) { @@ -400,14 +395,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, * an interrupt which is already started or which has already been configured * as managed will also fail, as these mean invalid init state or double init. */ -int irq_update_affinity_desc(unsigned int irq, - struct irq_affinity_desc *affinity) +int irq_update_affinity_desc(unsigned int irq, struct irq_affinity_desc *affinity) { - struct irq_desc *desc; - unsigned long flags; - bool activated; - int ret = 0; - /* * Supporting this with the reservation scheme used by x86 needs * some more thought. Fail it for now. @@ -415,60 +404,50 @@ int irq_update_affinity_desc(unsigned int irq, if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) return -EOPNOTSUPP; - desc = irq_get_desc_buslock(irq, &flags, 0); - if (!desc) - return -EINVAL; - - /* Requires the interrupt to be shut down */ - if (irqd_is_started(&desc->irq_data)) { - ret = -EBUSY; - goto out_unlock; - } + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; + bool activated; - /* Interrupts which are already managed cannot be modified */ - if (irqd_affinity_is_managed(&desc->irq_data)) { - ret = -EBUSY; - goto out_unlock; - } - - /* - * Deactivate the interrupt. That's required to undo - * anything an earlier activation has established. - */ - activated = irqd_is_activated(&desc->irq_data); - if (activated) - irq_domain_deactivate_irq(&desc->irq_data); + /* Requires the interrupt to be shut down */ + if (irqd_is_started(&desc->irq_data)) + return -EBUSY; - if (affinity->is_managed) { - irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); - irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); - } + /* Interrupts which are already managed cannot be modified */ + if (irqd_affinity_is_managed(&desc->irq_data)) + return -EBUSY; + /* + * Deactivate the interrupt. That's required to undo + * anything an earlier activation has established. + */ + activated = irqd_is_activated(&desc->irq_data); + if (activated) + irq_domain_deactivate_irq(&desc->irq_data); - cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); + if (affinity->is_managed) { + irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); + irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); + } - /* Restore the activation state */ - if (activated) - irq_domain_activate_irq(&desc->irq_data, false); + cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); -out_unlock: - irq_put_desc_busunlock(desc, flags); - return ret; + /* Restore the activation state */ + if (activated) + irq_domain_activate_irq(&desc->irq_data, false); + return 0; + } + return -EINVAL; } static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - int ret; if (!desc) return -EINVAL; - raw_spin_lock_irqsave(&desc->lock, flags); - ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); - raw_spin_unlock_irqrestore(&desc->lock, flags); - return ret; + guard(raw_spinlock_irqsave)(&desc->lock); + return irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); } /** @@ -501,39 +480,36 @@ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) } EXPORT_SYMBOL_GPL(irq_force_affinity); -int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, - bool setaffinity) +int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, bool setaffinity) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); + int ret = -EINVAL; - if (!desc) - return -EINVAL; - desc->affinity_hint = m; - irq_put_desc_unlock(desc, flags); - if (m && setaffinity) + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + scoped_irqdesc->affinity_hint = m; + ret = 0; + } + + if (!ret && m && setaffinity) __irq_set_affinity(irq, m, false); - return 0; + return ret; } EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint); static void irq_affinity_notify(struct work_struct *work) { - struct irq_affinity_notify *notify = - container_of(work, struct irq_affinity_notify, work); + struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; - unsigned long flags; if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) goto out; - raw_spin_lock_irqsave(&desc->lock, flags); - if (irq_move_pending(&desc->irq_data)) - irq_get_pending(cpumask, desc); - else - cpumask_copy(cpumask, desc->irq_common_data.affinity); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) { + if (irq_move_pending(&desc->irq_data)) + irq_get_pending(cpumask, desc); + else + cpumask_copy(cpumask, desc->irq_common_data.affinity); + } notify->notify(notify, cpumask); @@ -543,22 +519,20 @@ out: } /** - * irq_set_affinity_notifier - control notification of IRQ affinity changes - * @irq: Interrupt for which to enable/disable notification - * @notify: Context for notification, or %NULL to disable - * notification. Function pointers must be initialised; - * the other fields will be initialised by this function. - * - * Must be called in process context. Notification may only be enabled - * after the IRQ is allocated and must be disabled before the IRQ is - * freed using free_irq(). + * irq_set_affinity_notifier - control notification of IRQ affinity changes + * @irq: Interrupt for which to enable/disable notification + * @notify: Context for notification, or %NULL to disable + * notification. Function pointers must be initialised; + * the other fields will be initialised by this function. + * + * Must be called in process context. Notification may only be enabled + * after the IRQ is allocated and must be disabled before the IRQ is freed + * using free_irq(). */ -int -irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) +int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { struct irq_desc *desc = irq_to_desc(irq); struct irq_affinity_notify *old_notify; - unsigned long flags; /* The release function is promised process context */ might_sleep(); @@ -573,10 +547,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) INIT_WORK(¬ify->work, irq_affinity_notify); } - raw_spin_lock_irqsave(&desc->lock, flags); - old_notify = desc->affinity_notify; - desc->affinity_notify = notify; - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) { + old_notify = desc->affinity_notify; + desc->affinity_notify = notify; + } if (old_notify) { if (cancel_work_sync(&old_notify->work)) { @@ -597,7 +571,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); int irq_setup_affinity(struct irq_desc *desc) { struct cpumask *set = irq_default_affinity; - int ret, node = irq_desc_get_node(desc); + int node = irq_desc_get_node(desc); + static DEFINE_RAW_SPINLOCK(mask_lock); static struct cpumask mask; @@ -605,7 +580,7 @@ int irq_setup_affinity(struct irq_desc *desc) if (!__irq_can_set_affinity(desc)) return 0; - raw_spin_lock(&mask_lock); + guard(raw_spinlock)(&mask_lock); /* * Preserve the managed affinity setting and a userspace affinity * setup, but make sure that one of the targets is online. @@ -630,9 +605,7 @@ int irq_setup_affinity(struct irq_desc *desc) if (cpumask_intersects(&mask, nodemask)) cpumask_and(&mask, &mask, nodemask); } - ret = irq_do_set_affinity(&desc->irq_data, &mask, false); - raw_spin_unlock(&mask_lock); - return ret; + return irq_do_set_affinity(&desc->irq_data, &mask, false); } #else /* Wrapper for ALPHA specific affinity selector magic */ @@ -645,44 +618,36 @@ int irq_setup_affinity(struct irq_desc *desc) /** - * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt - * @irq: interrupt number to set affinity - * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU - * specific data for percpu_devid interrupts - * - * This function uses the vCPU specific data to set the vCPU - * affinity for an irq. The vCPU specific data is passed from - * outside, such as KVM. One example code path is as below: - * KVM -> IOMMU -> irq_set_vcpu_affinity(). + * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt + * @irq: interrupt number to set affinity + * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU + * specific data for percpu_devid interrupts + * + * This function uses the vCPU specific data to set the vCPU affinity for + * an irq. The vCPU specific data is passed from outside, such as KVM. One + * example code path is as below: KVM -> IOMMU -> irq_set_vcpu_affinity(). */ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - struct irq_data *data; - struct irq_chip *chip; - int ret = -ENOSYS; + scoped_irqdesc_get_and_lock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; + struct irq_data *data; + struct irq_chip *chip; - if (!desc) - return -EINVAL; - - data = irq_desc_get_irq_data(desc); - do { - chip = irq_data_get_irq_chip(data); - if (chip && chip->irq_set_vcpu_affinity) - break; -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY - data = data->parent_data; -#else - data = NULL; -#endif - } while (data); + data = irq_desc_get_irq_data(desc); + do { + chip = irq_data_get_irq_chip(data); + if (chip && chip->irq_set_vcpu_affinity) + break; - if (data) - ret = chip->irq_set_vcpu_affinity(data, vcpu_info); - irq_put_desc_unlock(desc, flags); + data = irqd_get_parent_data(data); + } while (data); - return ret; + if (!data) + return -ENOSYS; + return chip->irq_set_vcpu_affinity(data, vcpu_info); + } + return -EINVAL; } EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); @@ -694,26 +659,23 @@ void __disable_irq(struct irq_desc *desc) static int __disable_irq_nosync(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - - if (!desc) - return -EINVAL; - __disable_irq(desc); - irq_put_desc_busunlock(desc, flags); - return 0; + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + __disable_irq(scoped_irqdesc); + return 0; + } + return -EINVAL; } /** - * disable_irq_nosync - disable an irq without waiting - * @irq: Interrupt to disable + * disable_irq_nosync - disable an irq without waiting + * @irq: Interrupt to disable * - * Disable the selected interrupt line. Disables and Enables are - * nested. - * Unlike disable_irq(), this function does not ensure existing - * instances of the IRQ handler have completed before returning. + * Disable the selected interrupt line. Disables and Enables are + * nested. + * Unlike disable_irq(), this function does not ensure existing + * instances of the IRQ handler have completed before returning. * - * This function may be called from IRQ context. + * This function may be called from IRQ context. */ void disable_irq_nosync(unsigned int irq) { @@ -722,17 +684,17 @@ void disable_irq_nosync(unsigned int irq) EXPORT_SYMBOL(disable_irq_nosync); /** - * disable_irq - disable an irq and wait for completion - * @irq: Interrupt to disable + * disable_irq - disable an irq and wait for completion + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Enables and Disables are nested. * - * Disable the selected interrupt line. Enables and Disables are - * nested. - * This function waits for any pending IRQ handlers for this interrupt - * to complete before returning. If you use this function while - * holding a resource the IRQ handler may need you will deadlock. + * This function waits for any pending IRQ handlers for this interrupt to + * complete before returning. If you use this function while holding a + * resource the IRQ handler may need you will deadlock. * - * Can only be called from preemptible code as it might sleep when - * an interrupt thread is associated to @irq. + * Can only be called from preemptible code as it might sleep when an + * interrupt thread is associated to @irq. * */ void disable_irq(unsigned int irq) @@ -744,40 +706,39 @@ void disable_irq(unsigned int irq) EXPORT_SYMBOL(disable_irq); /** - * disable_hardirq - disables an irq and waits for hardirq completion - * @irq: Interrupt to disable + * disable_hardirq - disables an irq and waits for hardirq completion + * @irq: Interrupt to disable * - * Disable the selected interrupt line. Enables and Disables are - * nested. - * This function waits for any pending hard IRQ handlers for this - * interrupt to complete before returning. If you use this function while - * holding a resource the hard IRQ handler may need you will deadlock. + * Disable the selected interrupt line. Enables and Disables are nested. * - * When used to optimistically disable an interrupt from atomic context - * the return value must be checked. + * This function waits for any pending hard IRQ handlers for this interrupt + * to complete before returning. If you use this function while holding a + * resource the hard IRQ handler may need you will deadlock. * - * Returns: false if a threaded handler is active. + * When used to optimistically disable an interrupt from atomic context the + * return value must be checked. * - * This function may be called - with care - from IRQ context. + * Returns: false if a threaded handler is active. + * + * This function may be called - with care - from IRQ context. */ bool disable_hardirq(unsigned int irq) { if (!__disable_irq_nosync(irq)) return synchronize_hardirq(irq); - return false; } EXPORT_SYMBOL_GPL(disable_hardirq); /** - * disable_nmi_nosync - disable an nmi without waiting - * @irq: Interrupt to disable - * - * Disable the selected interrupt line. Disables and enables are - * nested. - * The interrupt to disable must have been requested through request_nmi. - * Unlike disable_nmi(), this function does not ensure existing - * instances of the IRQ handler have completed before returning. + * disable_nmi_nosync - disable an nmi without waiting + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Disables and enables are nested. + * + * The interrupt to disable must have been requested through request_nmi. + * Unlike disable_nmi(), this function does not ensure existing + * instances of the IRQ handler have completed before returning. */ void disable_nmi_nosync(unsigned int irq) { @@ -817,41 +778,34 @@ void __enable_irq(struct irq_desc *desc) } /** - * enable_irq - enable handling of an irq - * @irq: Interrupt to enable + * enable_irq - enable handling of an irq + * @irq: Interrupt to enable * - * Undoes the effect of one call to disable_irq(). If this - * matches the last disable, processing of interrupts on this - * IRQ line is re-enabled. + * Undoes the effect of one call to disable_irq(). If this matches the + * last disable, processing of interrupts on this IRQ line is re-enabled. * - * This function may be called from IRQ context only when - * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! + * This function may be called from IRQ context only when + * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! */ void enable_irq(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + struct irq_desc *desc = scoped_irqdesc; - if (!desc) - return; - if (WARN(!desc->irq_data.chip, - KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) - goto out; - - __enable_irq(desc); -out: - irq_put_desc_busunlock(desc, flags); + if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq)) + return; + __enable_irq(desc); + } } EXPORT_SYMBOL(enable_irq); /** - * enable_nmi - enable handling of an nmi - * @irq: Interrupt to enable + * enable_nmi - enable handling of an nmi + * @irq: Interrupt to enable * - * The interrupt to enable must have been requested through request_nmi. - * Undoes the effect of one call to disable_nmi(). If this - * matches the last disable, processing of interrupts on this - * IRQ line is re-enabled. + * The interrupt to enable must have been requested through request_nmi. + * Undoes the effect of one call to disable_nmi(). If this matches the last + * disable, processing of interrupts on this IRQ line is re-enabled. */ void enable_nmi(unsigned int irq) { @@ -873,65 +827,59 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) } /** - * irq_set_irq_wake - control irq power management wakeup - * @irq: interrupt to control - * @on: enable/disable power management wakeup - * - * Enable/disable power management wakeup mode, which is - * disabled by default. Enables and disables must match, - * just as they match for non-wakeup mode support. - * - * Wakeup mode lets this IRQ wake the system from sleep - * states like "suspend to RAM". - * - * Note: irq enable/disable state is completely orthogonal - * to the enable/disable state of irq wake. An irq can be - * disabled with disable_irq() and still wake the system as - * long as the irq has wake enabled. If this does not hold, - * then the underlying irq chip and the related driver need - * to be investigated. + * irq_set_irq_wake - control irq power management wakeup + * @irq: interrupt to control + * @on: enable/disable power management wakeup + * + * Enable/disable power management wakeup mode, which is disabled by + * default. Enables and disables must match, just as they match for + * non-wakeup mode support. + * + * Wakeup mode lets this IRQ wake the system from sleep states like + * "suspend to RAM". + * + * Note: irq enable/disable state is completely orthogonal to the + * enable/disable state of irq wake. An irq can be disabled with + * disable_irq() and still wake the system as long as the irq has wake + * enabled. If this does not hold, then the underlying irq chip and the + * related driver need to be investigated. */ int irq_set_irq_wake(unsigned int irq, unsigned int on) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - int ret = 0; + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + struct irq_desc *desc = scoped_irqdesc; + int ret = 0; - if (!desc) - return -EINVAL; - - /* Don't use NMIs as wake up interrupts please */ - if (irq_is_nmi(desc)) { - ret = -EINVAL; - goto out_unlock; - } + /* Don't use NMIs as wake up interrupts please */ + if (irq_is_nmi(desc)) + return -EINVAL; - /* wakeup-capable irqs can be shared between drivers that - * don't need to have the same sleep mode behaviors. - */ - if (on) { - if (desc->wake_depth++ == 0) { - ret = set_irq_wake_real(irq, on); - if (ret) - desc->wake_depth = 0; - else - irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); - } - } else { - if (desc->wake_depth == 0) { - WARN(1, "Unbalanced IRQ %d wake disable\n", irq); - } else if (--desc->wake_depth == 0) { - ret = set_irq_wake_real(irq, on); - if (ret) - desc->wake_depth = 1; - else - irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); + /* + * wakeup-capable irqs can be shared between drivers that + * don't need to have the same sleep mode behaviors. + */ + if (on) { + if (desc->wake_depth++ == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 0; + else + irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); + } + } else { + if (desc->wake_depth == 0) { + WARN(1, "Unbalanced IRQ %d wake disable\n", irq); + } else if (--desc->wake_depth == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 1; + else + irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); + } } + return ret; } - -out_unlock: - irq_put_desc_busunlock(desc, flags); - return ret; + return -EINVAL; } EXPORT_SYMBOL(irq_set_irq_wake); @@ -940,22 +888,17 @@ EXPORT_SYMBOL(irq_set_irq_wake); * particular irq has been exclusively allocated or is available * for driver use. */ -int can_request_irq(unsigned int irq, unsigned long irqflags) +bool can_request_irq(unsigned int irq, unsigned long irqflags) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - int canrequest = 0; + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + struct irq_desc *desc = scoped_irqdesc; - if (!desc) - return 0; - - if (irq_settings_can_request(desc)) { - if (!desc->action || - irqflags & desc->action->flags & IRQF_SHARED) - canrequest = 1; + if (irq_settings_can_request(desc)) { + if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) + return true; + } } - irq_put_desc_unlock(desc, flags); - return canrequest; + return false; } int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) @@ -1016,16 +959,11 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) #ifdef CONFIG_HARDIRQS_SW_RESEND int irq_set_parent(int irq, int parent_irq) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); - - if (!desc) - return -EINVAL; - - desc->parent_irq = parent_irq; - - irq_put_desc_unlock(desc, flags); - return 0; + scoped_irqdesc_get_and_lock(irq, 0) { + scoped_irqdesc->parent_irq = parent_irq; + return 0; + } + return -EINVAL; } EXPORT_SYMBOL_GPL(irq_set_parent); #endif @@ -1079,19 +1017,19 @@ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *a return; } - raw_spin_lock_irq(&desc->lock); - /* - * This code is triggered unconditionally. Check the affinity - * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. - */ - if (cpumask_available(desc->irq_common_data.affinity)) { - const struct cpumask *m; + scoped_guard(raw_spinlock_irq, &desc->lock) { + /* + * This code is triggered unconditionally. Check the affinity + * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. + */ + if (cpumask_available(desc->irq_common_data.affinity)) { + const struct cpumask *m; - m = irq_data_get_effective_affinity_mask(&desc->irq_data); - cpumask_copy(mask, m); - valid = true; + m = irq_data_get_effective_affinity_mask(&desc->irq_data); + cpumask_copy(mask, m); + valid = true; + } } - raw_spin_unlock_irq(&desc->lock); if (valid) set_cpus_allowed_ptr(current, mask); @@ -1259,9 +1197,8 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) if (WARN_ON_ONCE(!secondary)) return; - raw_spin_lock_irq(&desc->lock); + guard(raw_spinlock_irq)(&desc->lock); __irq_wake_thread(desc, secondary); - raw_spin_unlock_irq(&desc->lock); } /* @@ -1334,21 +1271,19 @@ static int irq_thread(void *data) } /** - * irq_wake_thread - wake the irq thread for the action identified by dev_id - * @irq: Interrupt line - * @dev_id: Device identity for which the thread should be woken - * + * irq_wake_thread - wake the irq thread for the action identified by dev_id + * @irq: Interrupt line + * @dev_id: Device identity for which the thread should be woken */ void irq_wake_thread(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; - unsigned long flags; if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return; - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irqsave)(&desc->lock); for_each_action_of_desc(desc, action) { if (action->dev_id == dev_id) { if (action->thread) @@ -1356,7 +1291,6 @@ void irq_wake_thread(unsigned int irq, void *dev_id) break; } } - raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL_GPL(irq_wake_thread); @@ -1987,9 +1921,8 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) * There is no interrupt on the fly anymore. Deactivate it * completely. */ - raw_spin_lock_irqsave(&desc->lock, flags); - irq_domain_deactivate_irq(&desc->irq_data); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) + irq_domain_deactivate_irq(&desc->irq_data); irq_release_resources(desc); chip_bus_sync_unlock(desc); @@ -2005,20 +1938,19 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) } /** - * free_irq - free an interrupt allocated with request_irq - * @irq: Interrupt line to free - * @dev_id: Device identity to free + * free_irq - free an interrupt allocated with request_irq + * @irq: Interrupt line to free + * @dev_id: Device identity to free * - * Remove an interrupt handler. The handler is removed and if the - * interrupt line is no longer in use by any driver it is disabled. - * On a shared IRQ the caller must ensure the interrupt is disabled - * on the card it drives before calling this function. The function - * does not return until any executing interrupts for this IRQ - * have completed. + * Remove an interrupt handler. The handler is removed and if the interrupt + * line is no longer in use by any driver it is disabled. On a shared IRQ + * the caller must ensure the interrupt is disabled on the card it drives + * before calling this function. The function does not return until any + * executing interrupts for this IRQ have completed. * - * This function must not be called from interrupt context. + * This function must not be called from interrupt context. * - * Returns the devname argument passed to request_irq. + * Returns the devname argument passed to request_irq. */ const void *free_irq(unsigned int irq, void *dev_id) { @@ -2075,8 +2007,6 @@ static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) const void *free_nmi(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - const void *devname; if (!desc || WARN_ON(!irq_is_nmi(desc))) return NULL; @@ -2088,53 +2018,46 @@ const void *free_nmi(unsigned int irq, void *dev_id) if (WARN_ON(desc->depth == 0)) disable_nmi_nosync(irq); - raw_spin_lock_irqsave(&desc->lock, flags); - + guard(raw_spinlock_irqsave)(&desc->lock); irq_nmi_teardown(desc); - devname = __cleanup_nmi(irq, desc); - - raw_spin_unlock_irqrestore(&desc->lock, flags); - - return devname; + return __cleanup_nmi(irq, desc); } /** - * request_threaded_irq - allocate an interrupt line - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * Primary handler for threaded interrupts. - * If handler is NULL and thread_fn != NULL - * the default primary handler is installed. - * @thread_fn: Function called from the irq handler thread - * If NULL, no irq thread is created - * @irqflags: Interrupt type flags - * @devname: An ascii name for the claiming device - * @dev_id: A cookie passed back to the handler function - * - * This call allocates interrupt resources and enables the - * interrupt line and IRQ handling. From the point this - * call is made your handler function may be invoked. Since - * your handler function must clear any interrupt the board - * raises, you must take care both to initialise your hardware - * and to set up the interrupt handler in the right order. - * - * If you want to set up a threaded irq handler for your device - * then you need to supply @handler and @thread_fn. @handler is - * still called in hard interrupt context and has to check - * whether the interrupt originates from the device. If yes it - * needs to disable the interrupt on the device and return - * IRQ_WAKE_THREAD which will wake up the handler thread and run - * @thread_fn. This split handler design is necessary to support - * shared interrupts. - * - * Dev_id must be globally unique. Normally the address of the - * device data structure is used as the cookie. Since the handler - * receives this value it makes sense to use it. - * - * If your interrupt is shared you must pass a non NULL dev_id - * as this is required when freeing the interrupt. - * - * Flags: + * request_threaded_irq - allocate an interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * Primary handler for threaded interrupts. + * If handler is NULL and thread_fn != NULL + * the default primary handler is installed. + * @thread_fn: Function called from the irq handler thread + * If NULL, no irq thread is created + * @irqflags: Interrupt type flags + * @devname: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the interrupt line + * and IRQ handling. From the point this call is made your handler function + * may be invoked. Since your handler function must clear any interrupt the + * board raises, you must take care both to initialise your hardware and to + * set up the interrupt handler in the right order. + * + * If you want to set up a threaded irq handler for your device then you + * need to supply @handler and @thread_fn. @handler is still called in hard + * interrupt context and has to check whether the interrupt originates from + * the device. If yes it needs to disable the interrupt on the device and + * return IRQ_WAKE_THREAD which will wake up the handler thread and run + * @thread_fn. This split handler design is necessary to support shared + * interrupts. + * + * @dev_id must be globally unique. Normally the address of the device data + * structure is used as the cookie. Since the handler receives this value + * it makes sense to use it. + * + * If your interrupt is shared you must pass a non NULL dev_id as this is + * required when freeing the interrupt. + * + * Flags: * * IRQF_SHARED Interrupt is shared * IRQF_TRIGGER_* Specify active edge(s) or level @@ -2232,21 +2155,20 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, EXPORT_SYMBOL(request_threaded_irq); /** - * request_any_context_irq - allocate an interrupt line - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * Threaded handler for threaded interrupts. - * @flags: Interrupt type flags - * @name: An ascii name for the claiming device - * @dev_id: A cookie passed back to the handler function - * - * This call allocates interrupt resources and enables the - * interrupt line and IRQ handling. It selects either a - * hardirq or threaded handling method depending on the - * context. - * - * On failure, it returns a negative value. On success, - * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. + * request_any_context_irq - allocate an interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * Threaded handler for threaded interrupts. + * @flags: Interrupt type flags + * @name: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the interrupt line + * and IRQ handling. It selects either a hardirq or threaded handling + * method depending on the context. + * + * Returns: On failure, it returns a negative value. On success, it returns either + * IRQC_IS_HARDIRQ or IRQC_IS_NESTED. */ int request_any_context_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev_id) @@ -2273,37 +2195,35 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler, EXPORT_SYMBOL_GPL(request_any_context_irq); /** - * request_nmi - allocate an interrupt line for NMI delivery - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * Threaded handler for threaded interrupts. - * @irqflags: Interrupt type flags - * @name: An ascii name for the claiming device - * @dev_id: A cookie passed back to the handler function - * - * This call allocates interrupt resources and enables the - * interrupt line and IRQ handling. It sets up the IRQ line - * to be handled as an NMI. - * - * An interrupt line delivering NMIs cannot be shared and IRQ handling - * cannot be threaded. - * - * Interrupt lines requested for NMI delivering must produce per cpu - * interrupts and have auto enabling setting disabled. - * - * Dev_id must be globally unique. Normally the address of the - * device data structure is used as the cookie. Since the handler - * receives this value it makes sense to use it. - * - * If the interrupt line cannot be used to deliver NMIs, function - * will fail and return a negative value. + * request_nmi - allocate an interrupt line for NMI delivery + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * Threaded handler for threaded interrupts. + * @irqflags: Interrupt type flags + * @name: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the interrupt line + * and IRQ handling. It sets up the IRQ line to be handled as an NMI. + * + * An interrupt line delivering NMIs cannot be shared and IRQ handling + * cannot be threaded. + * + * Interrupt lines requested for NMI delivering must produce per cpu + * interrupts and have auto enabling setting disabled. + * + * @dev_id must be globally unique. Normally the address of the device data + * structure is used as the cookie. Since the handler receives this value + * it makes sense to use it. + * + * If the interrupt line cannot be used to deliver NMIs, function will fail + * and return a negative value. */ int request_nmi(unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *name, void *dev_id) { struct irqaction *action; struct irq_desc *desc; - unsigned long flags; int retval; if (irq == IRQ_NOTCONNECTED) @@ -2345,21 +2265,17 @@ int request_nmi(unsigned int irq, irq_handler_t handler, if (retval) goto err_irq_setup; - raw_spin_lock_irqsave(&desc->lock, flags); - - /* Setup NMI state */ - desc->istate |= IRQS_NMI; - retval = irq_nmi_setup(desc); - if (retval) { - __cleanup_nmi(irq, desc); - raw_spin_unlock_irqrestore(&desc->lock, flags); - return -EINVAL; + scoped_guard(raw_spinlock_irqsave, &desc->lock) { + /* Setup NMI state */ + desc->istate |= IRQS_NMI; + retval = irq_nmi_setup(desc); + if (retval) { + __cleanup_nmi(irq, desc); + return -EINVAL; + } + return 0; } - raw_spin_unlock_irqrestore(&desc->lock, flags); - - return 0; - err_irq_setup: irq_chip_pm_put(&desc->irq_data); err_out: @@ -2370,35 +2286,25 @@ err_out: void enable_percpu_irq(unsigned int irq, unsigned int type) { - unsigned int cpu = smp_processor_id(); - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { + struct irq_desc *desc = scoped_irqdesc; - if (!desc) - return; - - /* - * If the trigger type is not specified by the caller, then - * use the default for this interrupt. - */ - type &= IRQ_TYPE_SENSE_MASK; - if (type == IRQ_TYPE_NONE) - type = irqd_get_trigger_type(&desc->irq_data); - - if (type != IRQ_TYPE_NONE) { - int ret; - - ret = __irq_set_trigger(desc, type); - - if (ret) { - WARN(1, "failed to set type for IRQ%d\n", irq); - goto out; + /* + * If the trigger type is not specified by the caller, then + * use the default for this interrupt. + */ + type &= IRQ_TYPE_SENSE_MASK; + if (type == IRQ_TYPE_NONE) + type = irqd_get_trigger_type(&desc->irq_data); + + if (type != IRQ_TYPE_NONE) { + if (__irq_set_trigger(desc, type)) { + WARN(1, "failed to set type for IRQ%d\n", irq); + return; + } } + irq_percpu_enable(desc, smp_processor_id()); } - - irq_percpu_enable(desc, cpu); -out: - irq_put_desc_unlock(desc, flags); } EXPORT_SYMBOL_GPL(enable_percpu_irq); @@ -2416,33 +2322,16 @@ void enable_percpu_nmi(unsigned int irq, unsigned int type) */ bool irq_percpu_is_enabled(unsigned int irq) { - unsigned int cpu = smp_processor_id(); - struct irq_desc *desc; - unsigned long flags; - bool is_enabled; - - desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); - if (!desc) - return false; - - is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); - irq_put_desc_unlock(desc, flags); - - return is_enabled; + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) + return cpumask_test_cpu(smp_processor_id(), scoped_irqdesc->percpu_enabled); + return false; } EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); void disable_percpu_irq(unsigned int irq) { - unsigned int cpu = smp_processor_id(); - unsigned long flags; - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); - - if (!desc) - return; - - irq_percpu_disable(desc, cpu); - irq_put_desc_unlock(desc, flags); + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) + irq_percpu_disable(scoped_irqdesc, smp_processor_id()); } EXPORT_SYMBOL_GPL(disable_percpu_irq); @@ -2458,71 +2347,47 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_ { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; - unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); if (!desc) return NULL; - raw_spin_lock_irqsave(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) { + action = desc->action; + if (!action || action->percpu_dev_id != dev_id) { + WARN(1, "Trying to free already-free IRQ %d\n", irq); + return NULL; + } - action = desc->action; - if (!action || action->percpu_dev_id != dev_id) { - WARN(1, "Trying to free already-free IRQ %d\n", irq); - goto bad; - } + if (!cpumask_empty(desc->percpu_enabled)) { + WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", + irq, cpumask_first(desc->percpu_enabled)); + return NULL; + } - if (!cpumask_empty(desc->percpu_enabled)) { - WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", - irq, cpumask_first(desc->percpu_enabled)); - goto bad; + /* Found it - now remove it from the list of entries: */ + desc->action = NULL; + desc->istate &= ~IRQS_NMI; } - /* Found it - now remove it from the list of entries: */ - desc->action = NULL; - - desc->istate &= ~IRQS_NMI; - - raw_spin_unlock_irqrestore(&desc->lock, flags); - unregister_handler_proc(irq, action); - irq_chip_pm_put(&desc->irq_data); module_put(desc->owner); return action; - -bad: - raw_spin_unlock_irqrestore(&desc->lock, flags); - return NULL; } /** - * remove_percpu_irq - free a per-cpu interrupt - * @irq: Interrupt line to free - * @act: irqaction for the interrupt + * free_percpu_irq - free an interrupt allocated with request_percpu_irq + * @irq: Interrupt line to free + * @dev_id: Device identity to free * - * Used to remove interrupts statically setup by the early boot process. - */ -void remove_percpu_irq(unsigned int irq, struct irqaction *act) -{ - struct irq_desc *desc = irq_to_desc(irq); - - if (desc && irq_settings_is_per_cpu_devid(desc)) - __free_percpu_irq(irq, act->percpu_dev_id); -} - -/** - * free_percpu_irq - free an interrupt allocated with request_percpu_irq - * @irq: Interrupt line to free - * @dev_id: Device identity to free + * Remove a percpu interrupt handler. The handler is removed, but the + * interrupt line is not disabled. This must be done on each CPU before + * calling this function. The function does not return until any executing + * interrupts for this IRQ have completed. * - * Remove a percpu interrupt handler. The handler is removed, but - * the interrupt line is not disabled. This must be done on each - * CPU before calling this function. The function does not return - * until any executing interrupts for this IRQ have completed. - * - * This function must not be called from interrupt context. + * This function must not be called from interrupt context. */ void free_percpu_irq(unsigned int irq, void __percpu *dev_id) { @@ -2551,9 +2416,9 @@ void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) } /** - * setup_percpu_irq - setup a per-cpu interrupt - * @irq: Interrupt line to setup - * @act: irqaction for the interrupt + * setup_percpu_irq - setup a per-cpu interrupt + * @irq: Interrupt line to setup + * @act: irqaction for the interrupt * * Used to statically setup per-cpu interrupts in the early boot process. */ @@ -2578,21 +2443,20 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act) } /** - * __request_percpu_irq - allocate a percpu interrupt line - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * @flags: Interrupt type flags (IRQF_TIMER only) - * @devname: An ascii name for the claiming device - * @dev_id: A percpu cookie passed back to the handler function - * - * This call allocates interrupt resources and enables the - * interrupt on the local CPU. If the interrupt is supposed to be - * enabled on other CPUs, it has to be done on each CPU using - * enable_percpu_irq(). - * - * Dev_id must be globally unique. It is a per-cpu variable, and - * the handler gets called with the interrupted CPU's instance of - * that variable. + * __request_percpu_irq - allocate a percpu interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * @flags: Interrupt type flags (IRQF_TIMER only) + * @devname: An ascii name for the claiming device + * @dev_id: A percpu cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the interrupt on the + * local CPU. If the interrupt is supposed to be enabled on other CPUs, it + * has to be done on each CPU using enable_percpu_irq(). + * + * @dev_id must be globally unique. It is a per-cpu variable, and + * the handler gets called with the interrupted CPU's instance of + * that variable. */ int __request_percpu_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *devname, @@ -2640,32 +2504,31 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler, EXPORT_SYMBOL_GPL(__request_percpu_irq); /** - * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery - * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs. - * @name: An ascii name for the claiming device - * @dev_id: A percpu cookie passed back to the handler function + * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * @name: An ascii name for the claiming device + * @dev_id: A percpu cookie passed back to the handler function * - * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs - * have to be setup on each CPU by calling prepare_percpu_nmi() before - * being enabled on the same CPU by using enable_percpu_nmi(). + * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs + * have to be setup on each CPU by calling prepare_percpu_nmi() before + * being enabled on the same CPU by using enable_percpu_nmi(). * - * Dev_id must be globally unique. It is a per-cpu variable, and - * the handler gets called with the interrupted CPU's instance of - * that variable. + * @dev_id must be globally unique. It is a per-cpu variable, and the + * handler gets called with the interrupted CPU's instance of that + * variable. * - * Interrupt lines requested for NMI delivering should have auto enabling - * setting disabled. + * Interrupt lines requested for NMI delivering should have auto enabling + * setting disabled. * - * If the interrupt line cannot be used to deliver NMIs, function - * will fail returning a negative value. + * If the interrupt line cannot be used to deliver NMIs, function + * will fail returning a negative value. */ int request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name, void __percpu *dev_id) { struct irqaction *action; struct irq_desc *desc; - unsigned long flags; int retval; if (!handler) @@ -2701,10 +2564,8 @@ int request_percpu_nmi(unsigned int irq, irq_handler_t handler, if (retval) goto err_irq_setup; - raw_spin_lock_irqsave(&desc->lock, flags); - desc->istate |= IRQS_NMI; - raw_spin_unlock_irqrestore(&desc->lock, flags); - + scoped_guard(raw_spinlock_irqsave, &desc->lock) + desc->istate |= IRQS_NMI; return 0; err_irq_setup: @@ -2716,79 +2577,55 @@ err_out: } /** - * prepare_percpu_nmi - performs CPU local setup for NMI delivery - * @irq: Interrupt line to prepare for NMI delivery + * prepare_percpu_nmi - performs CPU local setup for NMI delivery + * @irq: Interrupt line to prepare for NMI delivery * - * This call prepares an interrupt line to deliver NMI on the current CPU, - * before that interrupt line gets enabled with enable_percpu_nmi(). + * This call prepares an interrupt line to deliver NMI on the current CPU, + * before that interrupt line gets enabled with enable_percpu_nmi(). * - * As a CPU local operation, this should be called from non-preemptible - * context. + * As a CPU local operation, this should be called from non-preemptible + * context. * - * If the interrupt line cannot be used to deliver NMIs, function - * will fail returning a negative value. + * If the interrupt line cannot be used to deliver NMIs, function will fail + * returning a negative value. */ int prepare_percpu_nmi(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc; - int ret = 0; + int ret = -EINVAL; WARN_ON(preemptible()); - desc = irq_get_desc_lock(irq, &flags, - IRQ_GET_DESC_CHECK_PERCPU); - if (!desc) - return -EINVAL; - - if (WARN(!irq_is_nmi(desc), - KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", - irq)) { - ret = -EINVAL; - goto out; - } + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { + if (WARN(!irq_is_nmi(scoped_irqdesc), + "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", irq)) + return -EINVAL; - ret = irq_nmi_setup(desc); - if (ret) { - pr_err("Failed to setup NMI delivery: irq %u\n", irq); - goto out; + ret = irq_nmi_setup(scoped_irqdesc); + if (ret) + pr_err("Failed to setup NMI delivery: irq %u\n", irq); } - -out: - irq_put_desc_unlock(desc, flags); return ret; } /** - * teardown_percpu_nmi - undoes NMI setup of IRQ line - * @irq: Interrupt line from which CPU local NMI configuration should be - * removed - * - * This call undoes the setup done by prepare_percpu_nmi(). + * teardown_percpu_nmi - undoes NMI setup of IRQ line + * @irq: Interrupt line from which CPU local NMI configuration should be removed * - * IRQ line should not be enabled for the current CPU. + * This call undoes the setup done by prepare_percpu_nmi(). * - * As a CPU local operation, this should be called from non-preemptible - * context. + * IRQ line should not be enabled for the current CPU. + * As a CPU local operation, this should be called from non-preemptible + * context. */ void teardown_percpu_nmi(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc; - WARN_ON(preemptible()); - desc = irq_get_desc_lock(irq, &flags, - IRQ_GET_DESC_CHECK_PERCPU); - if (!desc) - return; - - if (WARN_ON(!irq_is_nmi(desc))) - goto out; - - irq_nmi_teardown(desc); -out: - irq_put_desc_unlock(desc, flags); + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { + if (WARN_ON(!irq_is_nmi(scoped_irqdesc))) + return; + irq_nmi_teardown(scoped_irqdesc); + } } static int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) @@ -2815,87 +2652,62 @@ static int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state } /** - * irq_get_irqchip_state - returns the irqchip state of a interrupt. - * @irq: Interrupt line that is forwarded to a VM - * @which: One of IRQCHIP_STATE_* the caller wants to know about - * @state: a pointer to a boolean where the state is to be stored + * irq_get_irqchip_state - returns the irqchip state of a interrupt. + * @irq: Interrupt line that is forwarded to a VM + * @which: One of IRQCHIP_STATE_* the caller wants to know about + * @state: a pointer to a boolean where the state is to be stored * - * This call snapshots the internal irqchip state of an - * interrupt, returning into @state the bit corresponding to - * stage @which + * This call snapshots the internal irqchip state of an interrupt, + * returning into @state the bit corresponding to stage @which * - * This function should be called with preemption disabled if the - * interrupt controller has per-cpu registers. + * This function should be called with preemption disabled if the interrupt + * controller has per-cpu registers. */ -int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, - bool *state) +int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool *state) { - struct irq_desc *desc; - struct irq_data *data; - unsigned long flags; - int err = -EINVAL; - - desc = irq_get_desc_buslock(irq, &flags, 0); - if (!desc) - return err; + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); - data = irq_desc_get_irq_data(desc); - - err = __irq_get_irqchip_state(data, which, state); - - irq_put_desc_busunlock(desc, flags); - return err; + return __irq_get_irqchip_state(data, which, state); + } + return -EINVAL; } EXPORT_SYMBOL_GPL(irq_get_irqchip_state); /** - * irq_set_irqchip_state - set the state of a forwarded interrupt. - * @irq: Interrupt line that is forwarded to a VM - * @which: State to be restored (one of IRQCHIP_STATE_*) - * @val: Value corresponding to @which + * irq_set_irqchip_state - set the state of a forwarded interrupt. + * @irq: Interrupt line that is forwarded to a VM + * @which: State to be restored (one of IRQCHIP_STATE_*) + * @val: Value corresponding to @which * - * This call sets the internal irqchip state of an interrupt, - * depending on the value of @which. + * This call sets the internal irqchip state of an interrupt, depending on + * the value of @which. * - * This function should be called with migration disabled if the - * interrupt controller has per-cpu registers. + * This function should be called with migration disabled if the interrupt + * controller has per-cpu registers. */ -int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, - bool val) +int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool val) { - struct irq_desc *desc; - struct irq_data *data; - struct irq_chip *chip; - unsigned long flags; - int err = -EINVAL; + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); + struct irq_chip *chip; - desc = irq_get_desc_buslock(irq, &flags, 0); - if (!desc) - return err; + do { + chip = irq_data_get_irq_chip(data); - data = irq_desc_get_irq_data(desc); + if (WARN_ON_ONCE(!chip)) + return -ENODEV; - do { - chip = irq_data_get_irq_chip(data); - if (WARN_ON_ONCE(!chip)) { - err = -ENODEV; - goto out_unlock; - } - if (chip->irq_set_irqchip_state) - break; -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY - data = data->parent_data; -#else - data = NULL; -#endif - } while (data); + if (chip->irq_set_irqchip_state) + break; - if (data) - err = chip->irq_set_irqchip_state(data, which, val); + data = irqd_get_parent_data(data); + } while (data); -out_unlock: - irq_put_desc_busunlock(desc, flags); - return err; + if (data) + return chip->irq_set_irqchip_state(data, which, val); + } + return -EINVAL; } EXPORT_SYMBOL_GPL(irq_set_irqchip_state); diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index c05ba7ca00fa..9febe797a5f6 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -59,7 +59,8 @@ struct msi_ctrl { static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl); static unsigned int msi_domain_get_hwsize(struct device *dev, unsigned int domid); static inline int msi_sysfs_create_group(struct device *dev); - +static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg); /** * msi_alloc_desc - Allocate an initialized msi_desc @@ -343,26 +344,30 @@ int msi_setup_device_data(struct device *dev) } /** - * msi_lock_descs - Lock the MSI descriptor storage of a device + * __msi_lock_descs - Lock the MSI descriptor storage of a device * @dev: Device to operate on + * + * Internal function for guard(msi_descs_lock). Don't use in code. */ -void msi_lock_descs(struct device *dev) +void __msi_lock_descs(struct device *dev) { mutex_lock(&dev->msi.data->mutex); } -EXPORT_SYMBOL_GPL(msi_lock_descs); +EXPORT_SYMBOL_GPL(__msi_lock_descs); /** - * msi_unlock_descs - Unlock the MSI descriptor storage of a device + * __msi_unlock_descs - Unlock the MSI descriptor storage of a device * @dev: Device to operate on + * + * Internal function for guard(msi_descs_lock). Don't use in code. */ -void msi_unlock_descs(struct device *dev) +void __msi_unlock_descs(struct device *dev) { /* Invalidate the index which was cached by the iterator */ dev->msi.data->__iter_idx = MSI_XA_MAX_INDEX; mutex_unlock(&dev->msi.data->mutex); } -EXPORT_SYMBOL_GPL(msi_unlock_descs); +EXPORT_SYMBOL_GPL(__msi_unlock_descs); static struct msi_desc *msi_find_desc(struct msi_device_data *md, unsigned int domid, enum msi_desc_filter filter) @@ -448,7 +453,6 @@ EXPORT_SYMBOL_GPL(msi_next_desc); unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index) { struct msi_desc *desc; - unsigned int ret = 0; bool pcimsi = false; struct xarray *xa; @@ -462,7 +466,7 @@ unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigne if (dev_is_pci(dev) && domid == MSI_DEFAULT_DOMAIN) pcimsi = to_pci_dev(dev)->msi_enabled; - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); xa = &dev->msi.data->__domains[domid].store; desc = xa_load(xa, pcimsi ? 0 : index); if (desc && desc->irq) { @@ -471,16 +475,12 @@ unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigne * PCI-MSIX and platform MSI use a descriptor per * interrupt. */ - if (pcimsi) { - if (index < desc->nvec_used) - ret = desc->irq + index; - } else { - ret = desc->irq; - } + if (!pcimsi) + return desc->irq; + if (index < desc->nvec_used) + return desc->irq + index; } - - msi_unlock_descs(dev); - return ret; + return 0; } EXPORT_SYMBOL_GPL(msi_domain_get_virq); @@ -796,6 +796,10 @@ static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, return 0; } +static void msi_domain_ops_teardown(struct irq_domain *domain, msi_alloc_info_t *arg) +{ +} + static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { @@ -821,6 +825,7 @@ static struct msi_domain_ops msi_domain_ops_default = { .get_hwirq = msi_domain_ops_get_hwirq, .msi_init = msi_domain_ops_init, .msi_prepare = msi_domain_ops_prepare, + .msi_teardown = msi_domain_ops_teardown, .set_desc = msi_domain_ops_set_desc, }; @@ -842,6 +847,8 @@ static void msi_domain_update_dom_ops(struct msi_domain_info *info) ops->msi_init = msi_domain_ops_default.msi_init; if (ops->msi_prepare == NULL) ops->msi_prepare = msi_domain_ops_default.msi_prepare; + if (ops->msi_teardown == NULL) + ops->msi_teardown = msi_domain_ops_default.msi_teardown; if (ops->set_desc == NULL) ops->set_desc = msi_domain_ops_default.set_desc; } @@ -905,6 +912,32 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, } /** + * msi_create_parent_irq_domain - Create an MSI-parent interrupt domain + * @info: MSI irqdomain creation info + * @msi_parent_ops: MSI parent callbacks and configuration + * + * Return: pointer to the created &struct irq_domain or %NULL on failure + */ +struct irq_domain *msi_create_parent_irq_domain(struct irq_domain_info *info, + const struct msi_parent_ops *msi_parent_ops) +{ + struct irq_domain *d; + + info->hwirq_max = max(info->hwirq_max, info->size); + info->size = info->hwirq_max; + info->domain_flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; + info->bus_token = msi_parent_ops->bus_select_token; + + d = irq_domain_instantiate(info); + if (IS_ERR(d)) + return NULL; + + d->msi_parent_ops = msi_parent_ops; + return d; +} +EXPORT_SYMBOL_GPL(msi_create_parent_irq_domain); + +/** * msi_parent_init_dev_msi_info - Delegate initialization of device MSI info down * in the domain hierarchy * @dev: The device for which the domain should be created @@ -998,9 +1031,8 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, void *chip_data) { struct irq_domain *domain, *parent = dev->msi.domain; - struct fwnode_handle *fwnode, *fwnalloced = NULL; - struct msi_domain_template *bundle; const struct msi_parent_ops *pops; + struct fwnode_handle *fwnode; if (!irq_domain_is_msi_parent(parent)) return false; @@ -1008,7 +1040,8 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, if (domid >= MSI_MAX_DEVICE_IRQDOMAINS) return false; - bundle = kmemdup(template, sizeof(*bundle), GFP_KERNEL); + struct msi_domain_template *bundle __free(kfree) = + kmemdup(template, sizeof(*bundle), GFP_KERNEL); if (!bundle) return false; @@ -1017,6 +1050,7 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, bundle->info.ops = &bundle->ops; bundle->info.data = domain_data; bundle->info.chip_data = chip_data; + bundle->info.alloc_data = &bundle->alloc_info; pops = parent->msi_parent_ops; snprintf(bundle->name, sizeof(bundle->name), "%s%s-%s", @@ -1031,41 +1065,43 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, * node as they are not guaranteed to have a fwnode. They are never * looked up and always handled in the context of the device. */ - if (bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE) - fwnode = dev->fwnode; + struct fwnode_handle *fwnode_alloced __free(irq_domain_free_fwnode) = NULL; + + if (!(bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE)) + fwnode = fwnode_alloced = irq_domain_alloc_named_fwnode(bundle->name); else - fwnode = fwnalloced = irq_domain_alloc_named_fwnode(bundle->name); + fwnode = dev->fwnode; if (!fwnode) - goto free_bundle; + return false; if (msi_setup_device_data(dev)) - goto free_fwnode; - - msi_lock_descs(dev); + return false; + guard(msi_descs_lock)(dev); if (WARN_ON_ONCE(msi_get_device_domain(dev, domid))) - goto fail; + return false; if (!pops->init_dev_msi_info(dev, parent, parent, &bundle->info)) - goto fail; + return false; domain = __msi_create_irq_domain(fwnode, &bundle->info, IRQ_DOMAIN_FLAG_MSI_DEVICE, parent); if (!domain) - goto fail; + return false; domain->dev = dev; dev->msi.data->__domains[domid].domain = domain; - msi_unlock_descs(dev); - return true; -fail: - msi_unlock_descs(dev); -free_fwnode: - irq_domain_free_fwnode(fwnalloced); -free_bundle: - kfree(bundle); - return false; + if (msi_domain_prepare_irqs(domain, dev, hwsize, &bundle->alloc_info)) { + dev->msi.data->__domains[domid].domain = NULL; + irq_domain_remove(domain); + return false; + } + + /* @bundle and @fwnode_alloced are now in use. Prevent cleanup */ + retain_and_null_ptr(bundle); + retain_and_null_ptr(fwnode_alloced); + return true; } /** @@ -1079,23 +1115,21 @@ void msi_remove_device_irq_domain(struct device *dev, unsigned int domid) struct msi_domain_info *info; struct irq_domain *domain; - msi_lock_descs(dev); - + guard(msi_descs_lock)(dev); domain = msi_get_device_domain(dev, domid); - if (!domain || !irq_domain_is_msi_device(domain)) - goto unlock; + return; dev->msi.data->__domains[domid].domain = NULL; info = domain->host_data; + + info->ops->msi_teardown(domain, info->alloc_data); + if (irq_domain_is_msi_device(domain)) fwnode = domain->fwnode; irq_domain_remove(domain); irq_domain_free_fwnode(fwnode); kfree(container_of(info, struct msi_domain_template, info)); - -unlock: - msi_unlock_descs(dev); } /** @@ -1111,16 +1145,14 @@ bool msi_match_device_irq_domain(struct device *dev, unsigned int domid, { struct msi_domain_info *info; struct irq_domain *domain; - bool ret = false; - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); domain = msi_get_device_domain(dev, domid); if (domain && irq_domain_is_msi_device(domain)) { info = domain->host_data; - ret = info->bus_token == bus_token; + return info->bus_token == bus_token; } - msi_unlock_descs(dev); - return ret; + return false; } static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, @@ -1238,6 +1270,24 @@ static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflag return 0; } +static int populate_alloc_info(struct irq_domain *domain, struct device *dev, + unsigned int nirqs, msi_alloc_info_t *arg) +{ + struct msi_domain_info *info = domain->host_data; + + /* + * If the caller has provided a template alloc info, use that. Once + * all users of msi_create_irq_domain() have been eliminated, this + * should be the only source of allocation information, and the + * prepare call below should be finally removed. + */ + if (!info->alloc_data) + return msi_domain_prepare_irqs(domain, dev, nirqs, arg); + + *arg = *info->alloc_data; + return 0; +} + static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain, struct msi_ctrl *ctrl) { @@ -1250,7 +1300,7 @@ static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain unsigned long idx; int i, ret, virq; - ret = msi_domain_prepare_irqs(domain, dev, ctrl->nirqs, &arg); + ret = populate_alloc_info(domain, dev, ctrl->nirqs, &arg); if (ret) return ret; @@ -1391,12 +1441,9 @@ int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid, int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid, unsigned int first, unsigned int last) { - int ret; - msi_lock_descs(dev); - ret = msi_domain_alloc_irqs_range_locked(dev, domid, first, last); - msi_unlock_descs(dev); - return ret; + guard(msi_descs_lock)(dev); + return msi_domain_alloc_irqs_range_locked(dev, domid, first, last); } EXPORT_SYMBOL_GPL(msi_domain_alloc_irqs_range); @@ -1500,12 +1547,8 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u const struct irq_affinity_desc *affdesc, union msi_instance_cookie *icookie) { - struct msi_map map; - - msi_lock_descs(dev); - map = __msi_domain_alloc_irq_at(dev, domid, index, affdesc, icookie); - msi_unlock_descs(dev); - return map; + guard(msi_descs_lock)(dev); + return __msi_domain_alloc_irq_at(dev, domid, index, affdesc, icookie); } /** @@ -1542,13 +1585,11 @@ int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, icookie.value = ((u64)type << 32) | hwirq; - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); if (WARN_ON_ONCE(msi_get_device_domain(dev, domid) != domain)) map.index = -EINVAL; else map = __msi_domain_alloc_irq_at(dev, domid, MSI_ANY_INDEX, NULL, &icookie); - msi_unlock_descs(dev); - return map.index >= 0 ? map.virq : map.index; } @@ -1641,9 +1682,8 @@ void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid, void msi_domain_free_irqs_range(struct device *dev, unsigned int domid, unsigned int first, unsigned int last) { - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); msi_domain_free_irqs_range_locked(dev, domid, first, last); - msi_unlock_descs(dev); } EXPORT_SYMBOL_GPL(msi_domain_free_irqs_all); @@ -1673,9 +1713,8 @@ void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid) */ void msi_domain_free_irqs_all(struct device *dev, unsigned int domid) { - msi_lock_descs(dev); + guard(msi_descs_lock)(dev); msi_domain_free_irqs_all_locked(dev, domid); - msi_unlock_descs(dev); } /** @@ -1694,12 +1733,11 @@ void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq) if (WARN_ON_ONCE(!dev || !desc || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) return; - msi_lock_descs(dev); - if (!WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain)) { - msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index, - desc->msi_index); - } - msi_unlock_descs(dev); + guard(msi_descs_lock)(dev); + if (WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain)) + return; + msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index, + desc->msi_index); } /** diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index c556bc49d213..445912d51033 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -46,8 +46,7 @@ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) desc->cond_suspend_depth++; WARN_ON_ONCE(desc->no_suspend_depth && - (desc->no_suspend_depth + - desc->cond_suspend_depth) != desc->nr_actions); + (desc->no_suspend_depth + desc->cond_suspend_depth) != desc->nr_actions); } /* @@ -134,14 +133,12 @@ void suspend_device_irqs(void) int irq; for_each_irq_desc(irq, desc) { - unsigned long flags; bool sync; if (irq_settings_is_nested_thread(desc)) continue; - raw_spin_lock_irqsave(&desc->lock, flags); - sync = suspend_device_irq(desc); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irqsave, &desc->lock) + sync = suspend_device_irq(desc); if (sync) synchronize_irq(irq); @@ -186,18 +183,15 @@ static void resume_irqs(bool want_early) int irq; for_each_irq_desc(irq, desc) { - unsigned long flags; - bool is_early = desc->action && - desc->action->flags & IRQF_EARLY_RESUME; + bool is_early = desc->action && desc->action->flags & IRQF_EARLY_RESUME; if (!is_early && want_early) continue; if (irq_settings_is_nested_thread(desc)) continue; - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irqsave)(&desc->lock); resume_irq(desc); - raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -207,22 +201,16 @@ static void resume_irqs(bool want_early) */ void rearm_wake_irq(unsigned int irq) { - unsigned long flags; - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + struct irq_desc *desc = scoped_irqdesc; - if (!desc) - return; - - if (!(desc->istate & IRQS_SUSPENDED) || - !irqd_is_wakeup_set(&desc->irq_data)) - goto unlock; - - desc->istate &= ~IRQS_SUSPENDED; - irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); - __enable_irq(desc); + if (!(desc->istate & IRQS_SUSPENDED) || !irqd_is_wakeup_set(&desc->irq_data)) + return; -unlock: - irq_put_desc_busunlock(desc, flags); + desc->istate &= ~IRQS_SUSPENDED; + irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); + __enable_irq(desc); + } } /** diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 8e29809de38d..29c2404e743b 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -81,20 +81,18 @@ static int show_irq_affinity(int type, struct seq_file *m) static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); - unsigned long flags; cpumask_var_t mask; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; - raw_spin_lock_irqsave(&desc->lock, flags); - if (desc->affinity_hint) - cpumask_copy(mask, desc->affinity_hint); - raw_spin_unlock_irqrestore(&desc->lock, flags); + scoped_guard(raw_spinlock_irq, &desc->lock) { + if (desc->affinity_hint) + cpumask_copy(mask, desc->affinity_hint); + } seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); free_cpumask_var(mask); - return 0; } @@ -295,32 +293,26 @@ static int irq_spurious_proc_show(struct seq_file *m, void *v) #define MAX_NAMELEN 128 -static int name_unique(unsigned int irq, struct irqaction *new_action) +static bool name_unique(unsigned int irq, struct irqaction *new_action) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; - unsigned long flags; - int ret = 1; - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irq)(&desc->lock); for_each_action_of_desc(desc, action) { if ((action != new_action) && action->name && - !strcmp(new_action->name, action->name)) { - ret = 0; - break; - } + !strcmp(new_action->name, action->name)) + return false; } - raw_spin_unlock_irqrestore(&desc->lock, flags); - return ret; + return true; } void register_handler_proc(unsigned int irq, struct irqaction *action) { - char name [MAX_NAMELEN]; + char name[MAX_NAMELEN]; struct irq_desc *desc = irq_to_desc(irq); - if (!desc->dir || action->dir || !action->name || - !name_unique(irq, action)) + if (!desc->dir || action->dir || !action->name || !name_unique(irq, action)) return; snprintf(name, MAX_NAMELEN, "%s", action->name); @@ -347,17 +339,16 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) * added, not when the descriptor is created, so multiple * tasks might try to register at the same time. */ - mutex_lock(®ister_lock); + guard(mutex)(®ister_lock); if (desc->dir) - goto out_unlock; - - sprintf(name, "%d", irq); + return; /* create /proc/irq/1234 */ + sprintf(name, "%u", irq); desc->dir = proc_mkdir(name, root_irq_dir); if (!desc->dir) - goto out_unlock; + return; #ifdef CONFIG_SMP umode_t umode = S_IRUGO; @@ -366,31 +357,27 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) umode |= S_IWUSR; /* create /proc/irq/<irq>/smp_affinity */ - proc_create_data("smp_affinity", umode, desc->dir, - &irq_affinity_proc_ops, irqp); + proc_create_data("smp_affinity", umode, desc->dir, &irq_affinity_proc_ops, irqp); /* create /proc/irq/<irq>/affinity_hint */ proc_create_single_data("affinity_hint", 0444, desc->dir, - irq_affinity_hint_proc_show, irqp); + irq_affinity_hint_proc_show, irqp); /* create /proc/irq/<irq>/smp_affinity_list */ proc_create_data("smp_affinity_list", umode, desc->dir, &irq_affinity_list_proc_ops, irqp); - proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, - irqp); + proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, irqp); # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK proc_create_single_data("effective_affinity", 0444, desc->dir, - irq_effective_aff_proc_show, irqp); + irq_effective_aff_proc_show, irqp); proc_create_single_data("effective_affinity_list", 0444, desc->dir, - irq_effective_aff_list_proc_show, irqp); + irq_effective_aff_list_proc_show, irqp); # endif #endif proc_create_single_data("spurious", 0444, desc->dir, - irq_spurious_proc_show, (void *)(long)irq); + irq_spurious_proc_show, (void *)(long)irq); -out_unlock: - mutex_unlock(®ister_lock); } void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) @@ -468,7 +455,6 @@ int show_interrupts(struct seq_file *p, void *v) int i = *(loff_t *) v, j; struct irqaction *action; struct irq_desc *desc; - unsigned long flags; if (i > ACTUAL_NR_IRQS) return 0; @@ -487,13 +473,13 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - rcu_read_lock(); + guard(rcu)(); desc = irq_to_desc(i); if (!desc || irq_settings_is_hidden(desc)) - goto outsparse; + return 0; if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs) - goto outsparse; + return 0; seq_printf(p, "%*d:", prec, i); for_each_online_cpu(j) { @@ -503,7 +489,7 @@ int show_interrupts(struct seq_file *p, void *v) } seq_putc(p, ' '); - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irq)(&desc->lock); if (desc->irq_data.chip) { if (desc->irq_data.chip->irq_print_chip) desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); @@ -532,9 +518,6 @@ int show_interrupts(struct seq_file *p, void *v) } seq_putc(p, '\n'); - raw_spin_unlock_irqrestore(&desc->lock, flags); -outsparse: - rcu_read_unlock(); return 0; } #endif diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 1b7fa72968bd..ca9cc1b806a9 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -30,18 +30,17 @@ static DEFINE_RAW_SPINLOCK(irq_resend_lock); */ static void resend_irqs(struct tasklet_struct *unused) { - struct irq_desc *desc; - - raw_spin_lock_irq(&irq_resend_lock); + guard(raw_spinlock_irq)(&irq_resend_lock); while (!hlist_empty(&irq_resend_list)) { - desc = hlist_entry(irq_resend_list.first, struct irq_desc, - resend_node); + struct irq_desc *desc; + + desc = hlist_entry(irq_resend_list.first, struct irq_desc, resend_node); hlist_del_init(&desc->resend_node); + raw_spin_unlock(&irq_resend_lock); desc->handle_irq(desc); raw_spin_lock(&irq_resend_lock); } - raw_spin_unlock_irq(&irq_resend_lock); } /* Tasklet to handle resend: */ @@ -75,19 +74,18 @@ static int irq_sw_resend(struct irq_desc *desc) } /* Add to resend_list and activate the softirq: */ - raw_spin_lock(&irq_resend_lock); - if (hlist_unhashed(&desc->resend_node)) - hlist_add_head(&desc->resend_node, &irq_resend_list); - raw_spin_unlock(&irq_resend_lock); + scoped_guard(raw_spinlock, &irq_resend_lock) { + if (hlist_unhashed(&desc->resend_node)) + hlist_add_head(&desc->resend_node, &irq_resend_list); + } tasklet_schedule(&resend_tasklet); return 0; } void clear_irq_resend(struct irq_desc *desc) { - raw_spin_lock(&irq_resend_lock); + guard(raw_spinlock)(&irq_resend_lock); hlist_del_init(&desc->resend_node); - raw_spin_unlock(&irq_resend_lock); } void irq_resend_init(struct irq_desc *desc) @@ -172,30 +170,24 @@ int check_irq_resend(struct irq_desc *desc, bool inject) */ int irq_inject_interrupt(unsigned int irq) { - struct irq_desc *desc; - unsigned long flags; - int err; + int err = -EINVAL; /* Try the state injection hardware interface first */ if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true)) return 0; /* That failed, try via the resend mechanism */ - desc = irq_get_desc_buslock(irq, &flags, 0); - if (!desc) - return -EINVAL; + scoped_irqdesc_get_and_buslock(irq, 0) { + struct irq_desc *desc = scoped_irqdesc; - /* - * Only try to inject when the interrupt is: - * - not NMI type - * - activated - */ - if (irq_is_nmi(desc) || !irqd_is_activated(&desc->irq_data)) - err = -EINVAL; - else - err = check_irq_resend(desc, true); - - irq_put_desc_busunlock(desc, flags); + /* + * Only try to inject when the interrupt is: + * - not NMI type + * - activated + */ + if (!irq_is_nmi(desc) && irqd_is_activated(&desc->irq_data)) + err = check_irq_resend(desc, true); + } return err; } EXPORT_SYMBOL_GPL(irq_inject_interrupt); diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 02b2daf07441..8f26982e7300 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -34,8 +34,9 @@ static atomic_t irq_poll_active; * true and let the handler run. */ bool irq_wait_for_poll(struct irq_desc *desc) - __must_hold(&desc->lock) { + lockdep_assert_held(&desc->lock); + if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), "irq poll in progress on cpu %d for irq %d\n", smp_processor_id(), desc->irq_data.irq)) @@ -59,37 +60,35 @@ bool irq_wait_for_poll(struct irq_desc *desc) /* * Recovery handler for misrouted interrupts. */ -static int try_one_irq(struct irq_desc *desc, bool force) +static bool try_one_irq(struct irq_desc *desc, bool force) { - irqreturn_t ret = IRQ_NONE; struct irqaction *action; + bool ret = false; - raw_spin_lock(&desc->lock); + guard(raw_spinlock)(&desc->lock); /* * PER_CPU, nested thread interrupts and interrupts explicitly * marked polled are excluded from polling. */ - if (irq_settings_is_per_cpu(desc) || - irq_settings_is_nested_thread(desc) || + if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc) || irq_settings_is_polled(desc)) - goto out; + return false; /* * Do not poll disabled interrupts unless the spurious * disabled poller asks explicitly. */ if (irqd_irq_disabled(&desc->irq_data) && !force) - goto out; + return false; /* * All handlers must agree on IRQF_SHARED, so we test just the * first. */ action = desc->action; - if (!action || !(action->flags & IRQF_SHARED) || - (action->flags & __IRQF_TIMER)) - goto out; + if (!action || !(action->flags & IRQF_SHARED) || (action->flags & __IRQF_TIMER)) + return false; /* Already running on another processor */ if (irqd_irq_inprogress(&desc->irq_data)) { @@ -98,21 +97,19 @@ static int try_one_irq(struct irq_desc *desc, bool force) * CPU to go looking for our mystery interrupt too */ desc->istate |= IRQS_PENDING; - goto out; + return false; } /* Mark it poll in progress */ desc->istate |= IRQS_POLL_INPROGRESS; do { if (handle_irq_event(desc) == IRQ_HANDLED) - ret = IRQ_HANDLED; + ret = true; /* Make sure that there is still a valid action */ action = desc->action; } while ((desc->istate & IRQS_PENDING) && action); desc->istate &= ~IRQS_POLL_INPROGRESS; -out: - raw_spin_unlock(&desc->lock); - return ret == IRQ_HANDLED; + return ret; } static int misrouted_irq(int irq) @@ -157,8 +154,7 @@ static void poll_spurious_irqs(struct timer_list *unused) continue; /* Racy but it doesn't matter */ - state = desc->istate; - barrier(); + state = READ_ONCE(desc->istate); if (!(state & IRQS_SPURIOUS_DISABLED)) continue; @@ -168,8 +164,7 @@ static void poll_spurious_irqs(struct timer_list *unused) } out: atomic_dec(&irq_poll_active); - mod_timer(&poll_spurious_irq_timer, - jiffies + POLL_SPURIOUS_IRQ_INTERVAL); + mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); } static inline int bad_action_ret(irqreturn_t action_ret) @@ -193,17 +188,13 @@ static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) { unsigned int irq = irq_desc_get_irq(desc); struct irqaction *action; - unsigned long flags; - if (bad_action_ret(action_ret)) { - printk(KERN_ERR "irq event %d: bogus return value %x\n", - irq, action_ret); - } else { - printk(KERN_ERR "irq %d: nobody cared (try booting with " - "the \"irqpoll\" option)\n", irq); - } + if (bad_action_ret(action_ret)) + pr_err("irq event %d: bogus return value %x\n", irq, action_ret); + else + pr_err("irq %d: nobody cared (try booting with the \"irqpoll\" option)\n", irq); dump_stack(); - printk(KERN_ERR "handlers:\n"); + pr_err("handlers:\n"); /* * We need to take desc->lock here. note_interrupt() is called @@ -211,15 +202,13 @@ static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) * with something else removing an action. It's ok to take * desc->lock here. See synchronize_irq(). */ - raw_spin_lock_irqsave(&desc->lock, flags); + guard(raw_spinlock_irqsave)(&desc->lock); for_each_action_of_desc(desc, action) { - printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler); + pr_err("[<%p>] %ps", action->handler, action->handler); if (action->thread_fn) - printk(KERN_CONT " threaded [<%p>] %ps", - action->thread_fn, action->thread_fn); - printk(KERN_CONT "\n"); + pr_cont(" threaded [<%p>] %ps", action->thread_fn, action->thread_fn); + pr_cont("\n"); } - raw_spin_unlock_irqrestore(&desc->lock, flags); } static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) @@ -232,18 +221,17 @@ static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) } } -static inline int -try_misrouted_irq(unsigned int irq, struct irq_desc *desc, - irqreturn_t action_ret) +static inline bool try_misrouted_irq(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) { struct irqaction *action; if (!irqfixup) - return 0; + return false; /* We didn't actually handle the IRQ - see if it was misrouted? */ if (action_ret == IRQ_NONE) - return 1; + return true; /* * But for 'irqfixup == 2' we also do it for handled interrupts if @@ -251,19 +239,16 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, * traditional PC timer interrupt.. Legacy) */ if (irqfixup < 2) - return 0; + return false; if (!irq) - return 1; + return true; /* * Since we don't get the descriptor lock, "action" can - * change under us. We don't really care, but we don't - * want to follow a NULL pointer. So tell the compiler to - * just load it once by using a barrier. + * change under us. */ - action = desc->action; - barrier(); + action = READ_ONCE(desc->action); return action && (action->flags & IRQF_IRQPOLL); } @@ -273,8 +258,7 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) { unsigned int irq; - if (desc->istate & IRQS_POLL_INPROGRESS || - irq_settings_is_polled(desc)) + if (desc->istate & IRQS_POLL_INPROGRESS || irq_settings_is_polled(desc)) return; if (bad_action_ret(action_ret)) { @@ -420,13 +404,12 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) /* * Now kill the IRQ */ - printk(KERN_EMERG "Disabling IRQ #%d\n", irq); + pr_emerg("Disabling IRQ #%d\n", irq); desc->istate |= IRQS_SPURIOUS_DISABLED; desc->depth++; irq_disable(desc); - mod_timer(&poll_spurious_irq_timer, - jiffies + POLL_SPURIOUS_IRQ_INTERVAL); + mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); } desc->irqs_unhandled = 0; } @@ -436,11 +419,9 @@ bool noirqdebug __read_mostly; int noirqdebug_setup(char *str) { noirqdebug = 1; - printk(KERN_INFO "IRQ lockup detection disabled\n"); - + pr_info("IRQ lockup detection disabled\n"); return 1; } - __setup("noirqdebug", noirqdebug_setup); module_param(noirqdebug, bool, 0644); MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); @@ -452,12 +433,10 @@ static int __init irqfixup_setup(char *str) return 1; } irqfixup = 1; - printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); - printk(KERN_WARNING "This may impact system performance.\n"); - + pr_warn("Misrouted IRQ fixup support enabled.\n"); + pr_warn("This may impact system performance.\n"); return 1; } - __setup("irqfixup", irqfixup_setup); module_param(irqfixup, int, 0644); @@ -468,11 +447,8 @@ static int __init irqpoll_setup(char *str) return 1; } irqfixup = 2; - printk(KERN_WARNING "Misrouted IRQ fixup and polling support " - "enabled\n"); - printk(KERN_WARNING "This may significantly impact system " - "performance\n"); + pr_warn("Misrouted IRQ fixup and polling support enabled\n"); + pr_warn("This may significantly impact system performance\n"); return 1; } - __setup("irqpoll", irqpoll_setup); |