diff options
| -rw-r--r-- | drivers/leds/leds-lp8860.c | 4 | ||||
| -rw-r--r-- | drivers/spi/spi-nxp-fspi.c | 4 | ||||
| -rw-r--r-- | include/linux/local_lock.h | 20 | ||||
| -rw-r--r-- | include/linux/local_lock_internal.h | 30 | ||||
| -rw-r--r-- | include/linux/lockdep_types.h | 2 | ||||
| -rw-r--r-- | include/linux/mutex.h | 11 | ||||
| -rw-r--r-- | kernel/locking/lockdep.c | 39 | ||||
| -rw-r--r-- | kernel/locking/lockdep_internals.h | 18 | ||||
| -rw-r--r-- | kernel/locking/lockdep_proc.c | 2 | ||||
| -rw-r--r-- | kernel/locking/mutex.c | 4 | ||||
| -rw-r--r-- | kernel/locking/rwsem.c | 4 | ||||
| -rw-r--r-- | rust/kernel/sync/lock.rs | 2 |
12 files changed, 75 insertions, 65 deletions
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c index 52b97c9f2a03..0962c00c215a 100644 --- a/drivers/leds/leds-lp8860.c +++ b/drivers/leds/leds-lp8860.c @@ -307,7 +307,9 @@ static int lp8860_probe(struct i2c_client *client) led->client = client; led->led_dev.brightness_set_blocking = lp8860_brightness_set; - devm_mutex_init(&client->dev, &led->lock); + ret = devm_mutex_init(&client->dev, &led->lock); + if (ret) + return dev_err_probe(&client->dev, ret, "Failed to initialize lock\n"); led->regmap = devm_regmap_init_i2c(client, &lp8860_regmap_config); if (IS_ERR(led->regmap)) { diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index c7d4827f1bf1..b92bfef47371 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -1272,7 +1272,9 @@ static int nxp_fspi_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "Failed to request irq\n"); - devm_mutex_init(dev, &f->lock); + ret = devm_mutex_init(dev, &f->lock); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize lock\n"); ctlr->bus_num = -1; ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT; diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index 16a2ee4f8310..2ba846419524 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -13,13 +13,13 @@ * local_lock - Acquire a per CPU local lock * @lock: The lock variable */ -#define local_lock(lock) __local_lock(lock) +#define local_lock(lock) __local_lock(this_cpu_ptr(lock)) /** * local_lock_irq - Acquire a per CPU local lock and disable interrupts * @lock: The lock variable */ -#define local_lock_irq(lock) __local_lock_irq(lock) +#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock)) /** * local_lock_irqsave - Acquire a per CPU local lock, save and disable @@ -28,19 +28,19 @@ * @flags: Storage for interrupt flags */ #define local_lock_irqsave(lock, flags) \ - __local_lock_irqsave(lock, flags) + __local_lock_irqsave(this_cpu_ptr(lock), flags) /** * local_unlock - Release a per CPU local lock * @lock: The lock variable */ -#define local_unlock(lock) __local_unlock(lock) +#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock)) /** * local_unlock_irq - Release a per CPU local lock and enable interrupts * @lock: The lock variable */ -#define local_unlock_irq(lock) __local_unlock_irq(lock) +#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock)) /** * local_unlock_irqrestore - Release a per CPU local lock and restore @@ -49,7 +49,7 @@ * @flags: Interrupt flags to restore */ #define local_unlock_irqrestore(lock, flags) \ - __local_unlock_irqrestore(lock, flags) + __local_unlock_irqrestore(this_cpu_ptr(lock), flags) /** * local_lock_init - Runtime initialize a lock instance @@ -64,7 +64,7 @@ * locking constrains it will _always_ fail to acquire the lock in NMI or * HARDIRQ context on PREEMPT_RT. */ -#define local_trylock(lock) __local_trylock(lock) +#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock)) /** * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable @@ -77,7 +77,7 @@ * HARDIRQ context on PREEMPT_RT. */ #define local_trylock_irqsave(lock, flags) \ - __local_trylock_irqsave(lock, flags) + __local_trylock_irqsave(this_cpu_ptr(lock), flags) DEFINE_GUARD(local_lock, local_lock_t __percpu*, local_lock(_T), @@ -91,10 +91,10 @@ DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu, unsigned long flags) #define local_lock_nested_bh(_lock) \ - __local_lock_nested_bh(_lock) + __local_lock_nested_bh(this_cpu_ptr(_lock)) #define local_unlock_nested_bh(_lock) \ - __local_unlock_nested_bh(_lock) + __local_unlock_nested_bh(this_cpu_ptr(_lock)) DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*, local_lock_nested_bh(_T), diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 8d5ac16a9b17..d80b5306a2c0 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -99,14 +99,14 @@ do { \ local_trylock_t *tl; \ local_lock_t *l; \ \ - l = (local_lock_t *)this_cpu_ptr(lock); \ + l = (local_lock_t *)(lock); \ tl = (local_trylock_t *)l; \ _Generic((lock), \ - __percpu local_trylock_t *: ({ \ + local_trylock_t *: ({ \ lockdep_assert(tl->acquired == 0); \ WRITE_ONCE(tl->acquired, 1); \ }), \ - __percpu local_lock_t *: (void)0); \ + local_lock_t *: (void)0); \ local_lock_acquire(l); \ } while (0) @@ -133,7 +133,7 @@ do { \ local_trylock_t *tl; \ \ preempt_disable(); \ - tl = this_cpu_ptr(lock); \ + tl = (lock); \ if (READ_ONCE(tl->acquired)) { \ preempt_enable(); \ tl = NULL; \ @@ -150,7 +150,7 @@ do { \ local_trylock_t *tl; \ \ local_irq_save(flags); \ - tl = this_cpu_ptr(lock); \ + tl = (lock); \ if (READ_ONCE(tl->acquired)) { \ local_irq_restore(flags); \ tl = NULL; \ @@ -167,15 +167,15 @@ do { \ local_trylock_t *tl; \ local_lock_t *l; \ \ - l = (local_lock_t *)this_cpu_ptr(lock); \ + l = (local_lock_t *)(lock); \ tl = (local_trylock_t *)l; \ local_lock_release(l); \ _Generic((lock), \ - __percpu local_trylock_t *: ({ \ + local_trylock_t *: ({ \ lockdep_assert(tl->acquired == 1); \ WRITE_ONCE(tl->acquired, 0); \ }), \ - __percpu local_lock_t *: (void)0); \ + local_lock_t *: (void)0); \ } while (0) #define __local_unlock(lock) \ @@ -199,11 +199,11 @@ do { \ #define __local_lock_nested_bh(lock) \ do { \ lockdep_assert_in_softirq(); \ - local_lock_acquire(this_cpu_ptr(lock)); \ + local_lock_acquire((lock)); \ } while (0) #define __local_unlock_nested_bh(lock) \ - local_lock_release(this_cpu_ptr(lock)) + local_lock_release((lock)) #else /* !CONFIG_PREEMPT_RT */ @@ -227,7 +227,7 @@ typedef spinlock_t local_trylock_t; #define __local_lock(__lock) \ do { \ migrate_disable(); \ - spin_lock(this_cpu_ptr((__lock))); \ + spin_lock((__lock)); \ } while (0) #define __local_lock_irq(lock) __local_lock(lock) @@ -241,7 +241,7 @@ typedef spinlock_t local_trylock_t; #define __local_unlock(__lock) \ do { \ - spin_unlock(this_cpu_ptr((__lock))); \ + spin_unlock((__lock)); \ migrate_enable(); \ } while (0) @@ -252,12 +252,12 @@ typedef spinlock_t local_trylock_t; #define __local_lock_nested_bh(lock) \ do { \ lockdep_assert_in_softirq_func(); \ - spin_lock(this_cpu_ptr(lock)); \ + spin_lock((lock)); \ } while (0) #define __local_unlock_nested_bh(lock) \ do { \ - spin_unlock(this_cpu_ptr((lock))); \ + spin_unlock((lock)); \ } while (0) #define __local_trylock(lock) \ @@ -268,7 +268,7 @@ do { \ __locked = 0; \ } else { \ migrate_disable(); \ - __locked = spin_trylock(this_cpu_ptr((lock))); \ + __locked = spin_trylock((lock)); \ if (!__locked) \ migrate_enable(); \ } \ diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 9f361d3ab9d9..eae115a26488 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -175,7 +175,7 @@ struct lock_class_stats { unsigned long bounces[nr_bounce_types]; }; -struct lock_class_stats lock_stats(struct lock_class *class); +void lock_stats(struct lock_class *class, struct lock_class_stats *stats); void clear_lock_stats(struct lock_class *class); #endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h index a039fa8c1780..00afd341d293 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -126,11 +126,11 @@ do { \ #ifdef CONFIG_DEBUG_MUTEXES -int __devm_mutex_init(struct device *dev, struct mutex *lock); +int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock); #else -static inline int __devm_mutex_init(struct device *dev, struct mutex *lock) +static inline int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock) { /* * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so @@ -141,14 +141,17 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock) #endif -#define devm_mutex_init(dev, mutex) \ +#define __mutex_init_ret(mutex) \ ({ \ typeof(mutex) mutex_ = (mutex); \ \ mutex_init(mutex_); \ - __devm_mutex_init(dev, mutex_); \ + mutex_; \ }) +#define devm_mutex_init(dev, mutex) \ + __devm_mutex_init(dev, __mutex_init_ret(mutex)) + /* * See kernel/locking/mutex.c for detailed documentation of these APIs. * Also see Documentation/locking/mutex-design.rst. diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index dd2bbf73718b..2d4c5bab5af8 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -297,33 +297,30 @@ static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) dst->nr += src->nr; } -struct lock_class_stats lock_stats(struct lock_class *class) +void lock_stats(struct lock_class *class, struct lock_class_stats *stats) { - struct lock_class_stats stats; int cpu, i; - memset(&stats, 0, sizeof(struct lock_class_stats)); + memset(stats, 0, sizeof(struct lock_class_stats)); for_each_possible_cpu(cpu) { struct lock_class_stats *pcs = &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; - for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) - stats.contention_point[i] += pcs->contention_point[i]; + for (i = 0; i < ARRAY_SIZE(stats->contention_point); i++) + stats->contention_point[i] += pcs->contention_point[i]; - for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) - stats.contending_point[i] += pcs->contending_point[i]; + for (i = 0; i < ARRAY_SIZE(stats->contending_point); i++) + stats->contending_point[i] += pcs->contending_point[i]; - lock_time_add(&pcs->read_waittime, &stats.read_waittime); - lock_time_add(&pcs->write_waittime, &stats.write_waittime); + lock_time_add(&pcs->read_waittime, &stats->read_waittime); + lock_time_add(&pcs->write_waittime, &stats->write_waittime); - lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); - lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); + lock_time_add(&pcs->read_holdtime, &stats->read_holdtime); + lock_time_add(&pcs->write_holdtime, &stats->write_holdtime); - for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) - stats.bounces[i] += pcs->bounces[i]; + for (i = 0; i < ARRAY_SIZE(stats->bounces); i++) + stats->bounces[i] += pcs->bounces[i]; } - - return stats; } void clear_lock_stats(struct lock_class *class) @@ -6619,8 +6616,16 @@ void lockdep_unregister_key(struct lock_class_key *key) if (need_callback) call_rcu(&delayed_free.rcu_head, free_zapped_rcu); - /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ - synchronize_rcu(); + /* + * Wait until is_dynamic_key() has finished accessing k->hash_entry. + * + * Some operations like __qdisc_destroy() will call this in a debug + * kernel, and the network traffic is disabled while waiting, hence + * the delay of the wait matters in debugging cases. Currently use a + * synchronize_rcu_expedited() to speed up the wait at the cost of + * system IPIs. TODO: Replace RCU with hazptr for this. + */ + synchronize_rcu_expedited(); } EXPORT_SYMBOL_GPL(lockdep_unregister_key); diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index 82156caf77d1..0e5e6ffe91a3 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -47,29 +47,31 @@ enum { __LOCKF(USED_READ) }; +enum { #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE | -static const unsigned long LOCKF_ENABLED_IRQ = + LOCKF_ENABLED_IRQ = #include "lockdep_states.h" - 0; + 0, #undef LOCKDEP_STATE #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE | -static const unsigned long LOCKF_USED_IN_IRQ = + LOCKF_USED_IN_IRQ = #include "lockdep_states.h" - 0; + 0, #undef LOCKDEP_STATE #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ | -static const unsigned long LOCKF_ENABLED_IRQ_READ = + LOCKF_ENABLED_IRQ_READ = #include "lockdep_states.h" - 0; + 0, #undef LOCKDEP_STATE #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ | -static const unsigned long LOCKF_USED_IN_IRQ_READ = + LOCKF_USED_IN_IRQ_READ = #include "lockdep_states.h" - 0; + 0, #undef LOCKDEP_STATE +}; #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ) #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ) diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index b52c07c4707c..1916db9aa46b 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -657,7 +657,7 @@ static int lock_stat_open(struct inode *inode, struct file *file) if (!test_bit(idx, lock_classes_in_use)) continue; iter->class = class; - iter->stats = lock_stats(class); + lock_stats(class, &iter->stats); iter++; } diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 80d778fedd60..de7d6702cd96 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -191,9 +191,7 @@ static void __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, struct list_head *list) { -#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX); -#endif debug_mutex_add_waiter(lock, waiter, current); list_add_tail(&waiter->list, list); @@ -209,9 +207,7 @@ __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) __mutex_clear_flag(lock, MUTEX_FLAGS); debug_mutex_remove_waiter(lock, waiter, current); -#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER hung_task_clear_blocker(); -#endif } /* diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 2ddb827e3bea..8572dba95af4 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -727,8 +727,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) return ret; } -#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER) - static inline enum owner_state rwsem_owner_state(struct task_struct *owner, unsigned long flags) { @@ -835,7 +833,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) enum owner_state owner_state; owner_state = rwsem_spin_on_owner(sem); - if (!(owner_state & OWNER_SPINNABLE)) + if (owner_state == OWNER_NONSPINNABLE) break; /* diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index e82fa5be289c..27202beef90c 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -175,6 +175,8 @@ impl<T: ?Sized, B: Backend> Lock<T, B> { /// Tries to acquire the lock. /// /// Returns a guard that can be used to access the data protected by the lock if successful. + // `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here. + #[must_use = "if unused, the lock will be immediately unlocked"] pub fn try_lock(&self) -> Option<Guard<'_, T, B>> { // SAFETY: The constructor of the type calls `init`, so the existence of the object proves // that `init` was called. |
