summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-02-01 09:49:20 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-02-01 09:49:20 -0800
commit03cc3579bc617ba7615068bedbb5331043e62142 (patch)
treebea008de0ba840a38d23a6b5011d99402e187649 /mm/compaction.c
parentc6fe03a3f92efdaecff085591fcd18cac9850855 (diff)
parente5b2a356dc8a88708d97bd47cca3b8f7ed7af6cb (diff)
Merge tag 'mm-hotfixes-stable-2025-02-01-03-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "21 hotfixes. 8 are cc:stable and the remainder address post-6.13 issues. 13 are for MM and 8 are for non-MM. All are singletons, please see the changelogs for details" * tag 'mm-hotfixes-stable-2025-02-01-03-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (21 commits) MAINTAINERS: include linux-mm for xarray maintenance revert "xarray: port tests to kunit" MAINTAINERS: add lib/test_xarray.c mailmap, MAINTAINERS, docs: update Carlos's email address mm/hugetlb: fix hugepage allocation for interleaved memory nodes mm: gup: fix infinite loop within __get_longterm_locked mm, swap: fix reclaim offset calculation error during allocation .mailmap: update email address for Christopher Obbard kfence: skip __GFP_THISNODE allocations on NUMA systems nilfs2: fix possible int overflows in nilfs_fiemap() mm: compaction: use the proper flag to determine watermarks kernel: be more careful about dup_mmap() failures and uprobe registering mm/fake-numa: handle cases with no SRAT info mm: kmemleak: fix upper boundary check for physical address objects mailmap: add an entry for Hamza Mahfooz MAINTAINERS: mailmap: update Yosry Ahmed's email address scripts/gdb: fix aarch64 userspace detection in get_current_task mm/vmscan: accumulate nr_demoted for accurate demotion statistics ocfs2: fix incorrect CPU endianness conversion causing mount failure mm/zsmalloc: add __maybe_unused attribute for is_first_zpdesc() ...
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index bcc0df0066dc..12ed8425fa17 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2491,7 +2491,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
*/
static enum compact_result
compaction_suit_allocation_order(struct zone *zone, unsigned int order,
- int highest_zoneidx, unsigned int alloc_flags)
+ int highest_zoneidx, unsigned int alloc_flags,
+ bool async)
{
unsigned long watermark;
@@ -2500,6 +2501,23 @@ compaction_suit_allocation_order(struct zone *zone, unsigned int order,
alloc_flags))
return COMPACT_SUCCESS;
+ /*
+ * For unmovable allocations (without ALLOC_CMA), check if there is enough
+ * free memory in the non-CMA pageblocks. Otherwise compaction could form
+ * the high-order page in CMA pageblocks, which would not help the
+ * allocation to succeed. However, limit the check to costly order async
+ * compaction (such as opportunistic THP attempts) because there is the
+ * possibility that compaction would migrate pages from non-CMA to CMA
+ * pageblock.
+ */
+ if (order > PAGE_ALLOC_COSTLY_ORDER && async &&
+ !(alloc_flags & ALLOC_CMA)) {
+ watermark = low_wmark_pages(zone) + compact_gap(order);
+ if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
+ 0, zone_page_state(zone, NR_FREE_PAGES)))
+ return COMPACT_SKIPPED;
+ }
+
if (!compaction_suitable(zone, order, highest_zoneidx))
return COMPACT_SKIPPED;
@@ -2535,7 +2553,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
if (!is_via_compact_memory(cc->order)) {
ret = compaction_suit_allocation_order(cc->zone, cc->order,
cc->highest_zoneidx,
- cc->alloc_flags);
+ cc->alloc_flags,
+ cc->mode == MIGRATE_ASYNC);
if (ret != COMPACT_CONTINUE)
return ret;
}
@@ -3038,7 +3057,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
ret = compaction_suit_allocation_order(zone,
pgdat->kcompactd_max_order,
- highest_zoneidx, ALLOC_WMARK_MIN);
+ highest_zoneidx, ALLOC_WMARK_MIN,
+ false);
if (ret == COMPACT_CONTINUE)
return true;
}
@@ -3079,7 +3099,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
continue;
ret = compaction_suit_allocation_order(zone,
- cc.order, zoneid, ALLOC_WMARK_MIN);
+ cc.order, zoneid, ALLOC_WMARK_MIN,
+ false);
if (ret != COMPACT_CONTINUE)
continue;