diff options
Diffstat (limited to 'include/linux/interrupt.h')
| -rw-r--r-- | include/linux/interrupt.h | 77 | 
1 files changed, 70 insertions, 7 deletions
| diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index da3e0dbe61d4..d5afee95fd43 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -10,6 +10,7 @@  #include <linux/irqreturn.h>  #include <linux/hardirq.h>  #include <linux/sched.h> +#include <linux/irqflags.h>  #include <asm/atomic.h>  #include <asm/ptrace.h>  #include <asm/system.h> @@ -80,12 +81,64 @@ extern int request_irq(unsigned int,  		       unsigned long, const char *, void *);  extern void free_irq(unsigned int, void *); +/* + * On lockdep we dont want to enable hardirqs in hardirq + * context. Use local_irq_enable_in_hardirq() to annotate + * kernel code that has to do this nevertheless (pretty much + * the only valid case is for old/broken hardware that is + * insanely slow). + * + * NOTE: in theory this might break fragile code that relies + * on hardirq delivery - in practice we dont seem to have such + * places left. So the only effect should be slightly increased + * irqs-off latencies. + */ +#ifdef CONFIG_LOCKDEP +# define local_irq_enable_in_hardirq()	do { } while (0) +#else +# define local_irq_enable_in_hardirq()	local_irq_enable() +#endif  #ifdef CONFIG_GENERIC_HARDIRQS  extern void disable_irq_nosync(unsigned int irq);  extern void disable_irq(unsigned int irq);  extern void enable_irq(unsigned int irq); +/* + * Special lockdep variants of irq disabling/enabling. + * These should be used for locking constructs that + * know that a particular irq context which is disabled, + * and which is the only irq-context user of a lock, + * that it's safe to take the lock in the irq-disabled + * section without disabling hardirqs. + * + * On !CONFIG_LOCKDEP they are equivalent to the normal + * irq disable/enable methods. + */ +static inline void disable_irq_nosync_lockdep(unsigned int irq) +{ +	disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP +	local_irq_disable(); +#endif +} + +static inline void disable_irq_lockdep(unsigned int irq) +{ +	disable_irq(irq); +#ifdef CONFIG_LOCKDEP +	local_irq_disable(); +#endif +} + +static inline void enable_irq_lockdep(unsigned int irq) +{ +#ifdef CONFIG_LOCKDEP +	local_irq_enable(); +#endif +	enable_irq(irq); +} +  /* IRQ wakeup (PM) control: */  extern int set_irq_wake(unsigned int irq, unsigned int on); @@ -99,7 +152,19 @@ static inline int disable_irq_wake(unsigned int irq)  	return set_irq_wake(irq, 0);  } -#endif +#else /* !CONFIG_GENERIC_HARDIRQS */ +/* + * NOTE: non-genirq architectures, if they want to support the lock + * validator need to define the methods below in their asm/irq.h + * files, under an #ifdef CONFIG_LOCKDEP section. + */ +# ifndef CONFIG_LOCKDEP +#  define disable_irq_nosync_lockdep(irq)	disable_irq_nosync(irq) +#  define disable_irq_lockdep(irq)		disable_irq(irq) +#  define enable_irq_lockdep(irq)		enable_irq(irq) +# endif + +#endif /* CONFIG_GENERIC_HARDIRQS */  #ifndef __ARCH_SET_SOFTIRQ_PENDING  #define set_softirq_pending(x) (local_softirq_pending() = (x)) @@ -135,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x)  #define save_and_cli(x)	save_and_cli(&x)  #endif /* CONFIG_SMP */ -/* SoftIRQ primitives.  */ -#define local_bh_disable() \ -		do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) -#define __local_bh_enable() \ -		do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) - +extern void local_bh_disable(void); +extern void __local_bh_enable(void); +extern void _local_bh_enable(void);  extern void local_bh_enable(void); +extern void local_bh_enable_ip(unsigned long ip);  /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high     frequency threaded job scheduling. For almost all the purposes | 
