summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMike Rapoport (Microsoft) <rppt@kernel.org>2025-02-25 10:30:16 +0200
committerAndrew Morton <akpm@linux-foundation.org>2025-03-21 22:03:11 -0700
commit09bdc4fe700d1c499d94452d7a20e69c26a8c007 (patch)
tree8efb0b7549631400cf8b08600336e85ea2698a42 /mm
parent24ac6fb6e3647fff3646b3ea1811095441380560 (diff)
mm/mm_init: rename __init_reserved_page_zone to __init_page_from_nid
__init_reserved_page_zone() function finds the zone for pfn and nid and performs initialization of a struct page with that zone and nid. There is nothing in that function about reserved pages and it is misnamed. Rename it to __init_page_from_nid() to better reflect what the function does. Link: https://lkml.kernel.org/r/20250225083017.567649-2-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Cc: Frank van der Linden <fvdl@google.com> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/internal.h2
-rw-r--r--mm/mm_init.c4
3 files changed, 4 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index af9b8c1fca67..6fccfe6d046c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3407,7 +3407,7 @@ static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
while (npages--) {
pfn = page_to_pfn(page);
- __init_reserved_page_zone(pfn, nid);
+ __init_page_from_nid(pfn, nid);
free_reserved_page(page);
page++;
}
diff --git a/mm/internal.h b/mm/internal.h
index 286520a424fe..21f2643f3d95 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1518,7 +1518,7 @@ static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte
void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid);
-void __meminit __init_reserved_page_zone(unsigned long pfn, int nid);
+void __meminit __init_page_from_nid(unsigned long pfn, int nid);
/* shrinker related functions */
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
diff --git a/mm/mm_init.c b/mm/mm_init.c
index c82b0162f1cb..16a96aaf65c4 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -668,7 +668,7 @@ static inline void fixup_hashdist(void) {}
/*
* Initialize a reserved page unconditionally, finding its zone first.
*/
-void __meminit __init_reserved_page_zone(unsigned long pfn, int nid)
+void __meminit __init_page_from_nid(unsigned long pfn, int nid)
{
pg_data_t *pgdat;
int zid;
@@ -748,7 +748,7 @@ static void __meminit init_reserved_page(unsigned long pfn, int nid)
if (early_page_initialised(pfn, nid))
return;
- __init_reserved_page_zone(pfn, nid);
+ __init_page_from_nid(pfn, nid);
}
#else
static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}