summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-11-25 21:01:38 +0000
committerAndrew Morton <akpm@linux-foundation.org>2025-01-13 22:40:32 -0800
commitefabfe1420f5245938871a9578160f32277c8e21 (patch)
tree5a4ad755017421da6f8e370ecc290bbba4fc39c8 /mm/page_alloc.c
parentee66e9c34fd3aeab3a7c2cda300f852aa5363609 (diff)
mm/page_alloc: move set_page_refcounted() to callers of get_page_from_freelist()
In preparation for allocating frozen pages, stop initialising the page refcount in get_page_from_freelist(). Link: https://lkml.kernel.org/r/20241125210149.2976098-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: David Hildenbrand <david@redhat.com> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: William Kucharski <william.kucharski@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 08cc1e0bd950..2786117a50ee 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3473,7 +3473,6 @@ try_this_zone:
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
- set_page_refcounted(page);
/*
* If this is a high-order atomic allocation then check
@@ -3568,6 +3567,8 @@ __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask, order,
alloc_flags, ac);
+ if (page)
+ set_page_refcounted(page);
return page;
}
@@ -3606,8 +3607,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
~__GFP_DIRECT_RECLAIM, order,
ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
- if (page)
+ if (page) {
+ set_page_refcounted(page);
goto out;
+ }
/* Coredumps can quickly deplete all memory reserves */
if (current->flags & PF_DUMPCORE)
@@ -3698,10 +3701,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
count_vm_event(COMPACTSTALL);
/* Prep a captured page if available */
- if (page) {
+ if (page)
prep_new_page(page, order, gfp_mask, alloc_flags);
- set_page_refcounted(page);
- }
/* Try get a page from the freelist if available */
if (!page)
@@ -3710,6 +3711,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
if (page) {
struct zone *zone = page_zone(page);
+ set_page_refcounted(page);
zone->compact_blockskip_flush = false;
compaction_defer_reset(zone, order, true);
count_vm_event(COMPACTSUCCESS);
@@ -3968,6 +3970,7 @@ retry:
drained = true;
goto retry;
}
+ set_page_refcounted(page);
out:
psi_memstall_leave(&pflags);
@@ -4288,8 +4291,10 @@ restart:
* that first
*/
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
- if (page)
+ if (page) {
+ set_page_refcounted(page);
goto got_pg;
+ }
/*
* For costly allocations, try direct compaction first, as it's likely
@@ -4369,8 +4374,10 @@ retry:
/* Attempt with potentially adjusted zonelist and alloc_flags */
page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
- if (page)
+ if (page) {
+ set_page_refcounted(page);
goto got_pg;
+ }
/* Caller is not willing to reclaim, we can't balance anything */
if (!can_direct_reclaim)
@@ -4754,8 +4761,10 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
/* First allocation attempt */
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
- if (likely(page))
+ if (likely(page)) {
+ set_page_refcounted(page);
goto out;
+ }
alloc_gfp = gfp;
ac.spread_dirty_pages = false;