summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-04-16 20:07:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-04-16 20:07:32 -0700
commitcfb2e2c57aef75a414c0f18445c7441df5bc13be (patch)
tree9583e76dd9ce5fbaf263b8ffc38934e0b8e3b9e0 /include
parentc1336865c4c90fcc649df0435a7c86c30030a723 (diff)
parenta727a83ef22591d47e2d219cd8e01bd3616f4611 (diff)
Merge tag 'mm-hotfixes-stable-2025-04-16-19-59' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc hotfixes from Andrew Morton: "31 hotfixes. 9 are cc:stable and the remainder address post-6.15 issues or aren't considered necessary for -stable kernels. 22 patches are for MM, 9 are otherwise" * tag 'mm-hotfixes-stable-2025-04-16-19-59' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (31 commits) MAINTAINERS: update HUGETLB reviewers mm: fix apply_to_existing_page_range() selftests/mm: fix compiler -Wmaybe-uninitialized warning alloc_tag: handle incomplete bulk allocations in vm_module_tags_populate mailmap: add entry for Jean-Michel Hautbois mm: (un)track_pfn_copy() fix + doc improvements mm: fix filemap_get_folios_contig returning batches of identical folios mm/hugetlb: add a line break at the end of the format string selftests: mincore: fix tmpfs mincore test failure mm/hugetlb: fix set_max_huge_pages() when there are surplus pages mm/cma: report base address of single range correctly mm: page_alloc: speed up fallbacks in rmqueue_bulk() kunit: slub: add module description mm/kasan: add module decription ucs2_string: add module description zlib: add module description fpga: tests: add module descriptions samples/livepatch: add module descriptions ASN.1: add module description mm/vma: add give_up_on_oom option on modify/merge, use in uffd release ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/local_lock.h58
-rw-r--r--include/linux/local_lock_internal.h207
-rw-r--r--include/linux/pgtable.h9
3 files changed, 101 insertions, 173 deletions
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index 1a0bc35839e3..16a2ee4f8310 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -52,44 +52,23 @@
__local_unlock_irqrestore(lock, flags)
/**
- * localtry_lock_init - Runtime initialize a lock instance
- */
-#define localtry_lock_init(lock) __localtry_lock_init(lock)
-
-/**
- * localtry_lock - Acquire a per CPU local lock
- * @lock: The lock variable
- */
-#define localtry_lock(lock) __localtry_lock(lock)
-
-/**
- * localtry_lock_irq - Acquire a per CPU local lock and disable interrupts
- * @lock: The lock variable
- */
-#define localtry_lock_irq(lock) __localtry_lock_irq(lock)
-
-/**
- * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable
- * interrupts
- * @lock: The lock variable
- * @flags: Storage for interrupt flags
+ * local_lock_init - Runtime initialize a lock instance
*/
-#define localtry_lock_irqsave(lock, flags) \
- __localtry_lock_irqsave(lock, flags)
+#define local_trylock_init(lock) __local_trylock_init(lock)
/**
- * localtry_trylock - Try to acquire a per CPU local lock.
+ * local_trylock - Try to acquire a per CPU local lock
* @lock: The lock variable
*
* The function can be used in any context such as NMI or HARDIRQ. Due to
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
-#define localtry_trylock(lock) __localtry_trylock(lock)
+#define local_trylock(lock) __local_trylock(lock)
/**
- * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
- * interrupts if acquired
+ * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
+ * interrupts if acquired
* @lock: The lock variable
* @flags: Storage for interrupt flags
*
@@ -97,29 +76,8 @@
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
-#define localtry_trylock_irqsave(lock, flags) \
- __localtry_trylock_irqsave(lock, flags)
-
-/**
- * local_unlock - Release a per CPU local lock
- * @lock: The lock variable
- */
-#define localtry_unlock(lock) __localtry_unlock(lock)
-
-/**
- * local_unlock_irq - Release a per CPU local lock and enable interrupts
- * @lock: The lock variable
- */
-#define localtry_unlock_irq(lock) __localtry_unlock_irq(lock)
-
-/**
- * localtry_unlock_irqrestore - Release a per CPU local lock and restore
- * interrupt flags
- * @lock: The lock variable
- * @flags: Interrupt flags to restore
- */
-#define localtry_unlock_irqrestore(lock, flags) \
- __localtry_unlock_irqrestore(lock, flags)
+#define local_trylock_irqsave(lock, flags) \
+ __local_trylock_irqsave(lock, flags)
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
local_lock(_T),
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 67bd13d142fa..bf2bf40d7b18 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -15,10 +15,11 @@ typedef struct {
#endif
} local_lock_t;
+/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
typedef struct {
local_lock_t llock;
- unsigned int acquired;
-} localtry_lock_t;
+ u8 acquired;
+} local_trylock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
@@ -29,6 +30,9 @@ typedef struct {
}, \
.owner = NULL,
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
+ .llock = { LOCAL_LOCK_DEBUG_INIT((lockname).llock) },
+
static inline void local_lock_acquire(local_lock_t *l)
{
lock_map_acquire(&l->dep_map);
@@ -56,6 +60,7 @@ static inline void local_lock_debug_init(local_lock_t *l)
}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
static inline void local_trylock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
@@ -63,7 +68,7 @@ static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
-#define INIT_LOCALTRY_LOCK(lockname) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
+#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
#define __local_lock_init(lock) \
do { \
@@ -76,6 +81,8 @@ do { \
local_lock_debug_init(lock); \
} while (0)
+#define __local_trylock_init(lock) __local_lock_init(lock.llock)
+
#define __spinlock_nested_bh_init(lock) \
do { \
static struct lock_class_key __key; \
@@ -87,149 +94,117 @@ do { \
local_lock_debug_init(lock); \
} while (0)
+#define __local_lock_acquire(lock) \
+ do { \
+ local_trylock_t *tl; \
+ local_lock_t *l; \
+ \
+ l = (local_lock_t *)this_cpu_ptr(lock); \
+ tl = (local_trylock_t *)l; \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(tl->acquired == 0); \
+ WRITE_ONCE(tl->acquired, 1); \
+ }), \
+ default:(void)0); \
+ local_lock_acquire(l); \
+ } while (0)
+
#define __local_lock(lock) \
do { \
preempt_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
- local_lock_acquire(this_cpu_ptr(lock)); \
- } while (0)
-
-#define __local_unlock(lock) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
- preempt_enable(); \
+ __local_lock_acquire(lock); \
} while (0)
-#define __local_unlock_irq(lock) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
- local_irq_enable(); \
- } while (0)
-
-#define __local_unlock_irqrestore(lock, flags) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
- local_irq_restore(flags); \
- } while (0)
-
-#define __local_lock_nested_bh(lock) \
- do { \
- lockdep_assert_in_softirq(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
- } while (0)
-
-#define __local_unlock_nested_bh(lock) \
- local_lock_release(this_cpu_ptr(lock))
-
-/* localtry_lock_t variants */
-
-#define __localtry_lock_init(lock) \
-do { \
- __local_lock_init(&(lock)->llock); \
- WRITE_ONCE((lock)->acquired, 0); \
-} while (0)
-
-#define __localtry_lock(lock) \
- do { \
- localtry_lock_t *lt; \
- preempt_disable(); \
- lt = this_cpu_ptr(lock); \
- local_lock_acquire(&lt->llock); \
- WRITE_ONCE(lt->acquired, 1); \
- } while (0)
-
-#define __localtry_lock_irq(lock) \
- do { \
- localtry_lock_t *lt; \
- local_irq_disable(); \
- lt = this_cpu_ptr(lock); \
- local_lock_acquire(&lt->llock); \
- WRITE_ONCE(lt->acquired, 1); \
- } while (0)
-
-#define __localtry_lock_irqsave(lock, flags) \
- do { \
- localtry_lock_t *lt; \
- local_irq_save(flags); \
- lt = this_cpu_ptr(lock); \
- local_lock_acquire(&lt->llock); \
- WRITE_ONCE(lt->acquired, 1); \
- } while (0)
-
-#define __localtry_trylock(lock) \
+#define __local_trylock(lock) \
({ \
- localtry_lock_t *lt; \
- bool _ret; \
+ local_trylock_t *tl; \
\
preempt_disable(); \
- lt = this_cpu_ptr(lock); \
- if (!READ_ONCE(lt->acquired)) { \
- WRITE_ONCE(lt->acquired, 1); \
- local_trylock_acquire(&lt->llock); \
- _ret = true; \
- } else { \
- _ret = false; \
+ tl = this_cpu_ptr(lock); \
+ if (READ_ONCE(tl->acquired)) { \
preempt_enable(); \
+ tl = NULL; \
+ } else { \
+ WRITE_ONCE(tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)tl); \
} \
- _ret; \
+ !!tl; \
})
-#define __localtry_trylock_irqsave(lock, flags) \
+#define __local_trylock_irqsave(lock, flags) \
({ \
- localtry_lock_t *lt; \
- bool _ret; \
+ local_trylock_t *tl; \
\
local_irq_save(flags); \
- lt = this_cpu_ptr(lock); \
- if (!READ_ONCE(lt->acquired)) { \
- WRITE_ONCE(lt->acquired, 1); \
- local_trylock_acquire(&lt->llock); \
- _ret = true; \
- } else { \
- _ret = false; \
+ tl = this_cpu_ptr(lock); \
+ if (READ_ONCE(tl->acquired)) { \
local_irq_restore(flags); \
+ tl = NULL; \
+ } else { \
+ WRITE_ONCE(tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)tl); \
} \
- _ret; \
+ !!tl; \
})
-#define __localtry_unlock(lock) \
+#define __local_lock_release(lock) \
+ do { \
+ local_trylock_t *tl; \
+ local_lock_t *l; \
+ \
+ l = (local_lock_t *)this_cpu_ptr(lock); \
+ tl = (local_trylock_t *)l; \
+ local_lock_release(l); \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(tl->acquired == 1); \
+ WRITE_ONCE(tl->acquired, 0); \
+ }), \
+ default:(void)0); \
+ } while (0)
+
+#define __local_unlock(lock) \
do { \
- localtry_lock_t *lt; \
- lt = this_cpu_ptr(lock); \
- WRITE_ONCE(lt->acquired, 0); \
- local_lock_release(&lt->llock); \
+ __local_lock_release(lock); \
preempt_enable(); \
} while (0)
-#define __localtry_unlock_irq(lock) \
+#define __local_unlock_irq(lock) \
do { \
- localtry_lock_t *lt; \
- lt = this_cpu_ptr(lock); \
- WRITE_ONCE(lt->acquired, 0); \
- local_lock_release(&lt->llock); \
+ __local_lock_release(lock); \
local_irq_enable(); \
} while (0)
-#define __localtry_unlock_irqrestore(lock, flags) \
+#define __local_unlock_irqrestore(lock, flags) \
do { \
- localtry_lock_t *lt; \
- lt = this_cpu_ptr(lock); \
- WRITE_ONCE(lt->acquired, 0); \
- local_lock_release(&lt->llock); \
+ __local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
+#define __local_lock_nested_bh(lock) \
+ do { \
+ lockdep_assert_in_softirq(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+#define __local_unlock_nested_bh(lock) \
+ local_lock_release(this_cpu_ptr(lock))
+
#else /* !CONFIG_PREEMPT_RT */
/*
@@ -237,16 +212,18 @@ do { \
* critical section while staying preemptible.
*/
typedef spinlock_t local_lock_t;
-typedef spinlock_t localtry_lock_t;
+typedef spinlock_t local_trylock_t;
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
-#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname)
+#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
#define __local_lock_init(l) \
do { \
local_spin_lock_init((l)); \
} while (0)
+#define __local_trylock_init(l) __local_lock_init(l)
+
#define __local_lock(__lock) \
do { \
migrate_disable(); \
@@ -283,17 +260,7 @@ do { \
spin_unlock(this_cpu_ptr((lock))); \
} while (0)
-/* localtry_lock_t variants */
-
-#define __localtry_lock_init(lock) __local_lock_init(lock)
-#define __localtry_lock(lock) __local_lock(lock)
-#define __localtry_lock_irq(lock) __local_lock(lock)
-#define __localtry_lock_irqsave(lock, flags) __local_lock_irqsave(lock, flags)
-#define __localtry_unlock(lock) __local_unlock(lock)
-#define __localtry_unlock_irq(lock) __local_unlock(lock)
-#define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags)
-
-#define __localtry_trylock(lock) \
+#define __local_trylock(lock) \
({ \
int __locked; \
\
@@ -308,11 +275,11 @@ do { \
__locked; \
})
-#define __localtry_trylock_irqsave(lock, flags) \
+#define __local_trylock_irqsave(lock, flags) \
({ \
typecheck(unsigned long, flags); \
flags = 0; \
- __localtry_trylock(lock); \
+ __local_trylock(lock); \
})
#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index e2b705c14945..b50447ef1c92 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1511,8 +1511,9 @@ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
/*
* track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page
- * tables copied during copy_page_range(). On success, stores the pfn to be
- * passed to untrack_pfn_copy().
+ * tables copied during copy_page_range(). Will store the pfn to be
+ * passed to untrack_pfn_copy() only if there is something to be untracked.
+ * Callers should initialize the pfn to 0.
*/
static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, unsigned long *pfn)
@@ -1522,7 +1523,9 @@ static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
/*
* untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during
- * copy_page_range(), but after track_pfn_copy() was already called.
+ * copy_page_range(), but after track_pfn_copy() was already called. Can
+ * be called even if track_pfn_copy() did not actually track anything:
+ * handled internally.
*/
static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma,
unsigned long pfn)