summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-11-11 09:49:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-11-11 09:49:56 -0800
commit537d196186e0a0ce28e494ca1881885accc35a12 (patch)
treeabf40db764abef2dfd1209f5e3e473ff9ddd0719 /include/linux
parent4427259cc7f7571a157fbc9b5011e1ef6fe0a4a8 (diff)
parentb05addf6f0596edb1f82ab4059438c7ef2d2686d (diff)
Merge tag 'mm-hotfixes-stable-2025-11-10-19-30' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "26 hotfixes. 22(!) are cc:stable, 22 are MM. - address some Kexec Handover issues (Pasha Tatashin) - fix handling of large folios which are mapped outside i_size (Kiryl Shutsemau) - fix some DAMON time issues on 32-bit machines (Quanmin Yan) Plus the usual shower of singletons" * tag 'mm-hotfixes-stable-2025-11-10-19-30' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (26 commits) kho: warn and exit when unpreserved page wasn't preserved kho: fix unpreservation of higher-order vmalloc preservations kho: fix out-of-bounds access of vmalloc chunk MAINTAINERS: add Chris and Kairui as the swap maintainer mm/secretmem: fix use-after-free race in fault handler mm/huge_memory: initialise the tags of the huge zero folio nilfs2: avoid having an active sc_timer before freeing sci scripts/decode_stacktrace.sh: fix build ID and PC source parsing mm/damon/sysfs: change next_update_jiffies to a global variable mm/damon/stat: change last_refresh_jiffies to a global variable maple_tree: fix tracepoint string pointers codetag: debug: handle existing CODETAG_EMPTY in mark_objexts_empty for slabobj_ext mm/mremap: honour writable bit in mremap pte batching gcov: add support for GCC 15 mm/mm_init: fix hash table order logging in alloc_large_system_hash() mm/truncate: unmap large folio on split failure mm/memory: do not populate page table entries beyond i_size fs/proc: fix uaf in proc_readdir_de() mm/huge_memory: preserve PG_has_hwpoisoned if a folio is split to >0 order ksm: use range-walk function to jump over holes in scan_get_next_rmap_item ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/gfp.h3
-rw-r--r--include/linux/huge_mm.h55
2 files changed, 26 insertions, 32 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0ceb4e09306c..623bee335383 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -7,6 +7,7 @@
#include <linux/mmzone.h>
#include <linux/topology.h>
#include <linux/alloc_tag.h>
+#include <linux/cleanup.h>
#include <linux/sched.h>
struct vm_area_struct;
@@ -463,4 +464,6 @@ static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
/* This should be paired with folio_put() rather than free_contig_range(). */
#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
+DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f327d62fc985..71ac78b9f834 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -376,45 +376,30 @@ bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
struct list_head *list);
/*
- * try_folio_split - try to split a @folio at @page using non uniform split.
+ * try_folio_split_to_order - try to split a @folio at @page to @new_order using
+ * non uniform split.
* @folio: folio to be split
- * @page: split to order-0 at the given page
- * @list: store the after-split folios
+ * @page: split to @new_order at the given page
+ * @new_order: the target split order
*
- * Try to split a @folio at @page using non uniform split to order-0, if
- * non uniform split is not supported, fall back to uniform split.
+ * Try to split a @folio at @page using non uniform split to @new_order, if
+ * non uniform split is not supported, fall back to uniform split. After-split
+ * folios are put back to LRU list. Use min_order_for_split() to get the lower
+ * bound of @new_order.
*
* Return: 0: split is successful, otherwise split failed.
*/
-static inline int try_folio_split(struct folio *folio, struct page *page,
- struct list_head *list)
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
{
- int ret = min_order_for_split(folio);
-
- if (ret < 0)
- return ret;
-
- if (!non_uniform_split_supported(folio, 0, false))
- return split_huge_page_to_list_to_order(&folio->page, list,
- ret);
- return folio_split(folio, ret, page, list);
+ if (!non_uniform_split_supported(folio, new_order, /* warns= */ false))
+ return split_huge_page_to_list_to_order(&folio->page, NULL,
+ new_order);
+ return folio_split(folio, new_order, page, NULL);
}
static inline int split_huge_page(struct page *page)
{
- struct folio *folio = page_folio(page);
- int ret = min_order_for_split(folio);
-
- if (ret < 0)
- return ret;
-
- /*
- * split_huge_page() locks the page before splitting and
- * expects the same page that has been split to be locked when
- * returned. split_folio(page_folio(page)) cannot be used here
- * because it converts the page to folio and passes the head
- * page to be split.
- */
- return split_huge_page_to_list_to_order(page, NULL, ret);
+ return split_huge_page_to_list_to_order(page, NULL, 0);
}
void deferred_split_folio(struct folio *folio, bool partially_mapped);
@@ -597,14 +582,20 @@ static inline int split_huge_page(struct page *page)
return -EINVAL;
}
+static inline int min_order_for_split(struct folio *folio)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
+}
+
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
{
VM_WARN_ON_ONCE_FOLIO(1, folio);
return -EINVAL;
}
-static inline int try_folio_split(struct folio *folio, struct page *page,
- struct list_head *list)
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
{
VM_WARN_ON_ONCE_FOLIO(1, folio);
return -EINVAL;