summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--MAINTAINERS66
-rw-r--r--include/linux/alloc_tag.h12
-rw-r--r--include/linux/codetag.h8
-rw-r--r--include/linux/highmem.h10
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/page-flags.h7
-rw-r--r--include/linux/percpu.h4
-rw-r--r--include/uapi/linux/taskstats.h47
-rw-r--r--kernel/module/main.c1
-rw-r--r--lib/alloc_tag.c87
-rw-r--r--lib/codetag.c5
-rw-r--r--mm/cma.c5
-rw-r--r--mm/hugetlb.c24
-rw-r--r--mm/kasan/shadow.c92
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/mremap.c3
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/truncate.c20
-rw-r--r--mm/vma.c1
-rw-r--r--mm/vmalloc.c13
23 files changed, 338 insertions, 91 deletions
diff --git a/.mailmap b/.mailmap
index 9531546fcac9..a885e2eefc69 100644
--- a/.mailmap
+++ b/.mailmap
@@ -313,6 +313,7 @@ Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
Jan Kuliga <jtkuliga.kdev@gmail.com> <jankul@alatek.krakow.pl>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
+Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@opinsys.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index 3c6f9e70ace0..dd844ac8d910 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11106,6 +11106,14 @@ L: linuxppc-dev@lists.ozlabs.org
S: Odd Fixes
F: drivers/tty/hvc/
+HUNG TASK DETECTOR
+M: Andrew Morton <akpm@linux-foundation.org>
+R: Lance Yang <lance.yang@linux.dev>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: include/linux/hung_task.h
+F: kernel/hung_task.c
+
I2C ACPI SUPPORT
M: Mika Westerberg <westeri@kernel.org>
L: linux-i2c@vger.kernel.org
@@ -15561,6 +15569,41 @@ W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: mm/gup.c
+MEMORY MANAGEMENT - KSM (Kernel Samepage Merging)
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Xu Xin <xu.xin16@zte.com.cn>
+R: Chengming Zhou <chengming.zhou@linux.dev>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: Documentation/admin-guide/mm/ksm.rst
+F: Documentation/mm/ksm.rst
+F: include/linux/ksm.h
+F: include/trace/events/ksm.h
+F: mm/ksm.c
+
+MEMORY MANAGEMENT - MEMORY POLICY AND MIGRATION
+M: Andrew Morton <akpm@linux-foundation.org>
+M: David Hildenbrand <david@redhat.com>
+R: Zi Yan <ziy@nvidia.com>
+R: Matthew Brost <matthew.brost@intel.com>
+R: Joshua Hahn <joshua.hahnjy@gmail.com>
+R: Rakie Kim <rakie.kim@sk.com>
+R: Byungchul Park <byungchul@sk.com>
+R: Gregory Price <gourry@gourry.net>
+R: Ying Huang <ying.huang@linux.alibaba.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: include/linux/mempolicy.h
+F: include/linux/migrate.h
+F: mm/mempolicy.c
+F: mm/migrate.c
+F: mm/migrate_device.c
+
MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15573,7 +15616,7 @@ F: mm/numa_memblks.c
MEMORY MANAGEMENT - PAGE ALLOCATOR
M: Andrew Morton <akpm@linux-foundation.org>
-R: Vlastimil Babka <vbabka@suse.cz>
+M: Vlastimil Babka <vbabka@suse.cz>
R: Suren Baghdasaryan <surenb@google.com>
R: Michal Hocko <mhocko@suse.com>
R: Brendan Jackman <jackmanb@google.com>
@@ -15581,10 +15624,25 @@ R: Johannes Weiner <hannes@cmpxchg.org>
R: Zi Yan <ziy@nvidia.com>
L: linux-mm@kvack.org
S: Maintained
+F: include/linux/compaction.h
+F: include/linux/gfp.h
+F: include/linux/page-isolation.h
F: mm/compaction.c
F: mm/page_alloc.c
-F: include/linux/gfp.h
-F: include/linux/compaction.h
+F: mm/page_isolation.c
+
+MEMORY MANAGEMENT - RECLAIM
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Johannes Weiner <hannes@cmpxchg.org>
+R: David Hildenbrand <david@redhat.com>
+R: Michal Hocko <mhocko@kernel.org>
+R: Qi Zheng <zhengqi.arch@bytedance.com>
+R: Shakeel Butt <shakeel.butt@linux.dev>
+R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: mm/pt_reclaim.c
+F: mm/vmscan.c
MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)
M: Andrew Morton <akpm@linux-foundation.org>
@@ -25912,7 +25970,7 @@ F: tools/testing/vsock/
VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
-R: Uladzislau Rezki <urezki@gmail.com>
+M: Uladzislau Rezki <urezki@gmail.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index a946e0203e6d..8f7931eb7d16 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -104,6 +104,16 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
#else /* ARCH_NEEDS_WEAK_PER_CPU */
+#ifdef MODULE
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = NULL };
+
+#else /* MODULE */
+
#define DEFINE_ALLOC_TAG(_alloc_tag) \
static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
static struct alloc_tag _alloc_tag __used __aligned(8) \
@@ -111,6 +121,8 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
.ct = CODE_TAG_INIT, \
.counters = &_alloc_tag_cntr };
+#endif /* MODULE */
+
#endif /* ARCH_NEEDS_WEAK_PER_CPU */
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
index d14dbd26b370..0ee4c21c6dbc 100644
--- a/include/linux/codetag.h
+++ b/include/linux/codetag.h
@@ -36,10 +36,10 @@ union codetag_ref {
struct codetag_type_desc {
const char *section;
size_t tag_size;
- void (*module_load)(struct codetag_type *cttype,
- struct codetag_module *cmod);
- void (*module_unload)(struct codetag_type *cttype,
- struct codetag_module *cmod);
+ void (*module_load)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+ void (*module_unload)(struct module *mod,
+ struct codetag *start, struct codetag *end);
#ifdef CONFIG_MODULES
void (*module_replaced)(struct module *mod, struct module *new_mod);
bool (*needs_section_mem)(struct module *mod, unsigned long size);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 5c6bea81a90e..c698f8415675 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -461,7 +461,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
const char *from = kmap_local_folio(folio, offset);
size_t chunk = len;
- if (folio_test_highmem(folio) &&
+ if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@@ -489,7 +489,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
char *to = kmap_local_folio(folio, offset);
size_t chunk = len;
- if (folio_test_highmem(folio) &&
+ if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@@ -522,7 +522,7 @@ static inline __must_check void *folio_zero_tail(struct folio *folio,
{
size_t len = folio_size(folio) - offset;
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@@ -560,7 +560,7 @@ static inline void folio_fill_tail(struct folio *folio, size_t offset,
VM_BUG_ON(offset + len > folio_size(folio));
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@@ -597,7 +597,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
size_t offset = offset_in_folio(folio, pos);
char *from = kmap_local_folio(folio, offset);
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
offset = offset_in_page(offset);
len = min_t(size_t, len, PAGE_SIZE - offset);
} else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 8f3ac832ee7f..4861a7e304bb 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -275,6 +275,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
bool is_hugetlb_entry_migration(pte_t pte);
bool is_hugetlb_entry_hwpoisoned(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+void fixup_hugetlb_reservations(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
@@ -468,6 +469,10 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
+{
+}
+
#endif /* !CONFIG_HUGETLB_PAGE */
#ifndef pgd_write
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf55206935c4..fdda6b16263b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -385,7 +385,7 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
-# define VM_UFFD_MINOR_BIT 38
+# define VM_UFFD_MINOR_BIT 41
# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
# define VM_UFFD_MINOR VM_NONE
diff --git a/include/linux/mman.h b/include/linux/mman.h
index bce214fece16..f4c6346a8fcd 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -155,7 +155,9 @@ calc_vm_flag_bits(struct file *file, unsigned long flags)
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
_calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
+#endif
arch_calc_vm_flag_bits(file, flags);
}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e6a21b62dcce..3b814ce08331 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -615,6 +615,13 @@ FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
PAGEFLAG_FALSE(HighMem, highmem)
#endif
+/* Does kmap_local_folio() only allow access to one page of the folio? */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
+#define folio_test_partial_kmap(f) true
+#else
+#define folio_test_partial_kmap(f) folio_test_highmem(f)
+#endif
+
#ifdef CONFIG_SWAP
static __always_inline bool folio_test_swapcache(const struct folio *folio)
{
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 52b5ea663b9f..85bf8dd9f087 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -15,11 +15,7 @@
/* enough to cover all DEFINE_PER_CPUs in modules */
#ifdef CONFIG_MODULES
-#ifdef CONFIG_MEM_ALLOC_PROFILING
-#define PERCPU_MODULE_RESERVE (8 << 13)
-#else
#define PERCPU_MODULE_RESERVE (8 << 10)
-#endif
#else
#define PERCPU_MODULE_RESERVE 0
#endif
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 95762232e018..5929030d4e8b 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
*/
-#define TASKSTATS_VERSION 15
+#define TASKSTATS_VERSION 16
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -72,8 +72,6 @@ struct taskstats {
*/
__u64 cpu_count __attribute__((aligned(8)));
__u64 cpu_delay_total;
- __u64 cpu_delay_max;
- __u64 cpu_delay_min;
/* Following four fields atomically updated using task->delays->lock */
@@ -82,14 +80,10 @@ struct taskstats {
*/
__u64 blkio_count;
__u64 blkio_delay_total;
- __u64 blkio_delay_max;
- __u64 blkio_delay_min;
/* Delay waiting for page fault I/O (swap in only) */
__u64 swapin_count;
__u64 swapin_delay_total;
- __u64 swapin_delay_max;
- __u64 swapin_delay_min;
/* cpu "wall-clock" running time
* On some architectures, value will adjust for cpu time stolen
@@ -172,14 +166,11 @@ struct taskstats {
/* Delay waiting for memory reclaim */
__u64 freepages_count;
__u64 freepages_delay_total;
- __u64 freepages_delay_max;
- __u64 freepages_delay_min;
+
/* Delay waiting for thrashing page */
__u64 thrashing_count;
__u64 thrashing_delay_total;
- __u64 thrashing_delay_max;
- __u64 thrashing_delay_min;
/* v10: 64-bit btime to avoid overflow */
__u64 ac_btime64; /* 64-bit begin time */
@@ -187,8 +178,6 @@ struct taskstats {
/* v11: Delay waiting for memory compact */
__u64 compact_count;
__u64 compact_delay_total;
- __u64 compact_delay_max;
- __u64 compact_delay_min;
/* v12 begin */
__u32 ac_tgid; /* thread group ID */
@@ -210,15 +199,37 @@ struct taskstats {
/* v13: Delay waiting for write-protect copy */
__u64 wpcopy_count;
__u64 wpcopy_delay_total;
- __u64 wpcopy_delay_max;
- __u64 wpcopy_delay_min;
/* v14: Delay waiting for IRQ/SOFTIRQ */
__u64 irq_count;
__u64 irq_delay_total;
- __u64 irq_delay_max;
- __u64 irq_delay_min;
- /* v15: add Delay max */
+
+ /* v15: add Delay max and Delay min */
+
+ /* v16: move Delay max and Delay min to the end of taskstat */
+ __u64 cpu_delay_max;
+ __u64 cpu_delay_min;
+
+ __u64 blkio_delay_max;
+ __u64 blkio_delay_min;
+
+ __u64 swapin_delay_max;
+ __u64 swapin_delay_min;
+
+ __u64 freepages_delay_max;
+ __u64 freepages_delay_min;
+
+ __u64 thrashing_delay_max;
+ __u64 thrashing_delay_min;
+
+ __u64 compact_delay_max;
+ __u64 compact_delay_min;
+
+ __u64 wpcopy_delay_max;
+ __u64 wpcopy_delay_min;
+
+ __u64 irq_delay_max;
+ __u64 irq_delay_min;
};
diff --git a/kernel/module/main.c b/kernel/module/main.c
index a2859dc3eea6..5c6ab20240a6 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -2829,6 +2829,7 @@ static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
module_arch_freeing_init(mod);
+ codetag_free_module_sections(mod);
free_mod_mem(mod);
}
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 25ecc1334b67..c7f602fa7b23 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -350,18 +350,28 @@ static bool needs_section_mem(struct module *mod, unsigned long size)
return size >= sizeof(struct alloc_tag);
}
-static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to)
+static bool clean_unused_counters(struct alloc_tag *start_tag,
+ struct alloc_tag *end_tag)
{
- while (from <= to) {
+ struct alloc_tag *tag;
+ bool ret = true;
+
+ for (tag = start_tag; tag <= end_tag; tag++) {
struct alloc_tag_counters counter;
- counter = alloc_tag_read(from);
- if (counter.bytes)
- return from;
- from++;
+ if (!tag->counters)
+ continue;
+
+ counter = alloc_tag_read(tag);
+ if (!counter.bytes) {
+ free_percpu(tag->counters);
+ tag->counters = NULL;
+ } else {
+ ret = false;
+ }
}
- return NULL;
+ return ret;
}
/* Called with mod_area_mt locked */
@@ -371,12 +381,16 @@ static void clean_unused_module_areas_locked(void)
struct module *val;
mas_for_each(&mas, val, module_tags.size) {
+ struct alloc_tag *start_tag;
+ struct alloc_tag *end_tag;
+
if (val != &unloaded_mod)
continue;
/* Release area if all tags are unused */
- if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
- (struct alloc_tag *)(module_tags.start_addr + mas.last)))
+ start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
+ end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
+ if (clean_unused_counters(start_tag, end_tag))
mas_erase(&mas);
}
}
@@ -561,7 +575,8 @@ unlock:
static void release_module_tags(struct module *mod, bool used)
{
MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
- struct alloc_tag *tag;
+ struct alloc_tag *start_tag;
+ struct alloc_tag *end_tag;
struct module *val;
mas_lock(&mas);
@@ -575,15 +590,22 @@ static void release_module_tags(struct module *mod, bool used)
if (!used)
goto release_area;
- /* Find out if the area is used */
- tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
- (struct alloc_tag *)(module_tags.start_addr + mas.last));
- if (tag) {
- struct alloc_tag_counters counter = alloc_tag_read(tag);
+ start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
+ end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
+ if (!clean_unused_counters(start_tag, end_tag)) {
+ struct alloc_tag *tag;
+
+ for (tag = start_tag; tag <= end_tag; tag++) {
+ struct alloc_tag_counters counter;
+
+ if (!tag->counters)
+ continue;
- pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
- tag->ct.filename, tag->ct.lineno, tag->ct.modname,
- tag->ct.function, counter.bytes);
+ counter = alloc_tag_read(tag);
+ pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
+ tag->ct.filename, tag->ct.lineno, tag->ct.modname,
+ tag->ct.function, counter.bytes);
+ }
} else {
used = false;
}
@@ -596,6 +618,34 @@ out:
mas_unlock(&mas);
}
+static void load_module(struct module *mod, struct codetag *start, struct codetag *stop)
+{
+ /* Allocate module alloc_tag percpu counters */
+ struct alloc_tag *start_tag;
+ struct alloc_tag *stop_tag;
+ struct alloc_tag *tag;
+
+ if (!mod)
+ return;
+
+ start_tag = ct_to_alloc_tag(start);
+ stop_tag = ct_to_alloc_tag(stop);
+ for (tag = start_tag; tag < stop_tag; tag++) {
+ WARN_ON(tag->counters);
+ tag->counters = alloc_percpu(struct alloc_tag_counters);
+ if (!tag->counters) {
+ while (--tag >= start_tag) {
+ free_percpu(tag->counters);
+ tag->counters = NULL;
+ }
+ shutdown_mem_profiling(true);
+ pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n",
+ mod->name);
+ break;
+ }
+ }
+}
+
static void replace_module(struct module *mod, struct module *new_mod)
{
MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
@@ -757,6 +807,7 @@ static int __init alloc_tag_init(void)
.needs_section_mem = needs_section_mem,
.alloc_section_mem = reserve_module_tags,
.free_section_mem = release_module_tags,
+ .module_load = load_module,
.module_replaced = replace_module,
#endif
};
diff --git a/lib/codetag.c b/lib/codetag.c
index 42aadd6c1454..de332e98d6f5 100644
--- a/lib/codetag.c
+++ b/lib/codetag.c
@@ -194,7 +194,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
if (err >= 0) {
cttype->count += range_size(cttype, &range);
if (cttype->desc.module_load)
- cttype->desc.module_load(cttype, cmod);
+ cttype->desc.module_load(mod, range.start, range.stop);
}
up_write(&cttype->mod_lock);
@@ -333,7 +333,8 @@ void codetag_unload_module(struct module *mod)
}
if (found) {
if (cttype->desc.module_unload)
- cttype->desc.module_unload(cttype, cmod);
+ cttype->desc.module_unload(cmod->mod,
+ cmod->range.start, cmod->range.stop);
cttype->count -= range_size(cttype, &cmod->range);
idr_remove(&cttype->mod_idr, mod_id);
diff --git a/mm/cma.c b/mm/cma.c
index 15632939f20a..c04be488b099 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -608,7 +608,10 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
* complain. Find the boundary by adding one to the last valid
* address.
*/
- highmem_start = __pa(high_memory - 1) + 1;
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ highmem_start = __pa(high_memory - 1) + 1;
+ else
+ highmem_start = memblock_end_of_DRAM();
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
__func__, &size, &base, &limit, &alignment);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7ae38bfb9096..6a3cf7935c14 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1250,7 +1250,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
/*
* Reset and decrement one ref on hugepage private reservation.
* Called with mm->mmap_lock writer semaphore held.
- * This function should be only used by move_vma() and operate on
+ * This function should be only used by mremap and operate on
* same sized vma. It should never come here with last ref on the
* reservation.
*/
@@ -2949,12 +2949,20 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
while (start_pfn < end_pfn) {
folio = pfn_folio(start_pfn);
+
+ /*
+ * The folio might have been dissolved from under our feet, so make sure
+ * to carefully check the state under the lock.
+ */
+ spin_lock_irq(&hugetlb_lock);
if (folio_test_hugetlb(folio)) {
h = folio_hstate(folio);
} else {
+ spin_unlock_irq(&hugetlb_lock);
start_pfn++;
continue;
}
+ spin_unlock_irq(&hugetlb_lock);
if (!folio_ref_count(folio)) {
ret = alloc_and_dissolve_hugetlb_folio(h, folio,
@@ -7931,3 +7939,17 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
}
+
+/*
+ * For hugetlb, mremap() is an odd edge case - while the VMA copying is
+ * performed, we permit both the old and new VMAs to reference the same
+ * reservation.
+ *
+ * We fix this up after the operation succeeds, or if a newly allocated VMA
+ * is closed as a result of a failure to allocate memory.
+ */
+void fixup_hugetlb_reservations(struct vm_area_struct *vma)
+{
+ if (is_vm_hugetlb_page(vma))
+ clear_vma_resv_huge_pages(vma);
+}
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 88d1c9dcb507..d2c70cd2afb1 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -292,33 +292,99 @@ void __init __weak kasan_populate_early_vm_area_shadow(void *start,
{
}
+struct vmalloc_populate_data {
+ unsigned long start;
+ struct page **pages;
+};
+
static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
- void *unused)
+ void *_data)
{
- unsigned long page;
+ struct vmalloc_populate_data *data = _data;
+ struct page *page;
pte_t pte;
+ int index;
if (likely(!pte_none(ptep_get(ptep))))
return 0;
- page = __get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
- pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
+ index = PFN_DOWN(addr - data->start);
+ page = data->pages[index];
+ __memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE);
+ pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
spin_lock(&init_mm.page_table_lock);
if (likely(pte_none(ptep_get(ptep)))) {
set_pte_at(&init_mm, addr, ptep, pte);
- page = 0;
+ data->pages[index] = NULL;
}
spin_unlock(&init_mm.page_table_lock);
- if (page)
- free_page(page);
+
+ return 0;
+}
+
+static void ___free_pages_bulk(struct page **pages, int nr_pages)
+{
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pages[i]) {
+ __free_pages(pages[i], 0);
+ pages[i] = NULL;
+ }
+ }
+}
+
+static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
+{
+ unsigned long nr_populated, nr_total = nr_pages;
+ struct page **page_array = pages;
+
+ while (nr_pages) {
+ nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
+ if (!nr_populated) {
+ ___free_pages_bulk(page_array, nr_total - nr_pages);
+ return -ENOMEM;
+ }
+ pages += nr_populated;
+ nr_pages -= nr_populated;
+ }
+
return 0;
}
+static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
+{
+ unsigned long nr_pages, nr_total = PFN_UP(end - start);
+ struct vmalloc_populate_data data;
+ int ret = 0;
+
+ data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (!data.pages)
+ return -ENOMEM;
+
+ while (nr_total) {
+ nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
+ ret = ___alloc_pages_bulk(data.pages, nr_pages);
+ if (ret)
+ break;
+
+ data.start = start;
+ ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
+ kasan_populate_vmalloc_pte, &data);
+ ___free_pages_bulk(data.pages, nr_pages);
+ if (ret)
+ break;
+
+ start += nr_pages * PAGE_SIZE;
+ nr_total -= nr_pages;
+ }
+
+ free_page((unsigned long)data.pages);
+
+ return ret;
+}
+
int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
{
unsigned long shadow_start, shadow_end;
@@ -348,9 +414,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
shadow_end = PAGE_ALIGN(shadow_end);
- ret = apply_to_page_range(&init_mm, shadow_start,
- shadow_end - shadow_start,
- kasan_populate_vmalloc_pte, NULL);
+ ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
if (ret)
return ret;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c96c1f2b9cf5..2d4d65f25fec 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1168,7 +1168,6 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
{
struct mem_cgroup *iter;
int ret = 0;
- int i = 0;
BUG_ON(mem_cgroup_is_root(memcg));
@@ -1178,10 +1177,9 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
while (!ret && (task = css_task_iter_next(&it))) {
- /* Avoid potential softlockup warning */
- if ((++i & 1023) == 0)
- cond_resched();
ret = fn(task, arg);
+ /* Avoid potential softlockup warning */
+ cond_resched();
}
css_task_iter_end(&it);
if (ret) {
diff --git a/mm/mremap.c b/mm/mremap.c
index 7db9da609c84..0d4948b720e2 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1188,8 +1188,7 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm,
mremap_userfaultfd_prep(new_vma, vrm->uf);
}
- if (is_vm_hugetlb_page(vma))
- clear_vma_resv_huge_pages(vma);
+ fixup_hugetlb_reservations(vma);
/* Tell pfnmap has moved from this vma */
if (unlikely(vma->vm_flags & VM_PFNMAP))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8258349e49ac..47fa713ccb4d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4562,6 +4562,14 @@ restart:
}
retry:
+ /*
+ * Deal with possible cpuset update races or zonelist updates to avoid
+ * infinite retries.
+ */
+ if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
+ check_retry_zonelist(zonelist_iter_cookie))
+ goto restart;
+
/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
if (alloc_flags & ALLOC_KSWAPD)
wake_all_kswapds(order, gfp_mask, ac);
diff --git a/mm/truncate.c b/mm/truncate.c
index 5d98054094d1..f2aaf99f2990 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -191,6 +191,7 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
{
loff_t pos = folio_pos(folio);
+ size_t size = folio_size(folio);
unsigned int offset, length;
struct page *split_at, *split_at2;
@@ -198,14 +199,13 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
offset = start - pos;
else
offset = 0;
- length = folio_size(folio);
- if (pos + length <= (u64)end)
- length = length - offset;
+ if (pos + size <= (u64)end)
+ length = size - offset;
else
length = end + 1 - pos - offset;
folio_wait_writeback(folio);
- if (length == folio_size(folio)) {
+ if (length == size) {
truncate_inode_folio(folio->mapping, folio);
return true;
}
@@ -224,16 +224,20 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
return true;
split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
- split_at2 = folio_page(folio,
- PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
-
if (!try_folio_split(folio, split_at, NULL)) {
/*
* try to split at offset + length to make sure folios within
* the range can be dropped, especially to avoid memory waste
* for shmem truncate
*/
- struct folio *folio2 = page_folio(split_at2);
+ struct folio *folio2;
+
+ if (offset + length == size)
+ goto no_split;
+
+ split_at2 = folio_page(folio,
+ PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
+ folio2 = page_folio(split_at2);
if (!folio_try_get(folio2))
goto no_split;
diff --git a/mm/vma.c b/mm/vma.c
index 839d12f02c88..a468d4c29c0c 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -1834,6 +1834,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return new_vma;
out_vma_link:
+ fixup_hugetlb_reservations(new_vma);
vma_close(new_vma);
if (new_vma->vm_file)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2d7511654831..00cf1b575c89 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4093,8 +4093,8 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
* would be a good heuristic for when to shrink the vm_area?
*/
if (size <= old_size) {
- /* Zero out "freed" memory. */
- if (want_init_on_free())
+ /* Zero out "freed" memory, potentially for future realloc. */
+ if (want_init_on_free() || want_init_on_alloc(flags))
memset((void *)p + size, 0, old_size - size);
vm->requested_size = size;
kasan_poison_vmalloc(p + size, old_size - size);
@@ -4107,10 +4107,13 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
if (size <= alloced_size) {
kasan_unpoison_vmalloc(p + old_size, size - old_size,
KASAN_VMALLOC_PROT_NORMAL);
- /* Zero out "alloced" memory. */
- if (want_init_on_alloc(flags))
- memset((void *)p + old_size, 0, size - old_size);
+ /*
+ * No need to zero memory here, as unused memory will have
+ * already been zeroed at initial allocation time or during
+ * realloc shrink time.
+ */
vm->requested_size = size;
+ return (void *)p;
}
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */