summaryrefslogtreecommitdiff
path: root/mm/cma.c
diff options
context:
space:
mode:
authorFrank van der Linden <fvdl@google.com>2025-02-28 18:29:25 +0000
committerAndrew Morton <akpm@linux-foundation.org>2025-03-16 22:06:30 -0700
commit85abcd023640067fcbb6e23c45e8a0014dbba11d (patch)
treeef34812467b4290f36fecd1bf93378847e21792b /mm/cma.c
parent9320fa2717810a7d3451dd1a18c31f986a1d4068 (diff)
mm/cma: introduce interface for early reservations
It can be desirable to reserve memory in a CMA area before it is activated, early in boot. Such reservations would effectively be memblock allocations, but they can be returned to the CMA area later. This functionality can be used to allow hugetlb bootmem allocations from a hugetlb CMA area. A new interface, cma_reserve_early is introduced. This allows for pageblock-aligned reservations. These reservations are skipped during the initial handoff of pages in a CMA area to the buddy allocator. The caller is responsible for making sure that the page structures are set up, and that the migrate type is set correctly, as with other memblock allocations that stick around. If the CMA area fails to activate (because it intersects with multiple zones), the reserved memory is not given to the buddy allocator, the caller needs to take care of that. Link: https://lkml.kernel.org/r/20250228182928.2645936-25-fvdl@google.com Signed-off-by: Frank van der Linden <fvdl@google.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Dan Carpenter <dan.carpenter@linaro.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin (Cruise) <roman.gushchin@linux.dev> Cc: Usama Arif <usamaarif642@gmail.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/cma.c')
-rw-r--r--mm/cma.c83
1 files changed, 76 insertions, 7 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 5e1d169e24fa..09322b8284bd 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -144,9 +144,10 @@ bool cma_validate_zones(struct cma *cma)
static void __init cma_activate_area(struct cma *cma)
{
- unsigned long pfn, base_pfn;
+ unsigned long pfn, end_pfn;
int allocrange, r;
struct cma_memrange *cmr;
+ unsigned long bitmap_count, count;
for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
cmr = &cma->ranges[allocrange];
@@ -161,8 +162,13 @@ static void __init cma_activate_area(struct cma *cma)
for (r = 0; r < cma->nranges; r++) {
cmr = &cma->ranges[r];
- base_pfn = cmr->base_pfn;
- for (pfn = base_pfn; pfn < base_pfn + cmr->count;
+ if (cmr->early_pfn != cmr->base_pfn) {
+ count = cmr->early_pfn - cmr->base_pfn;
+ bitmap_count = cma_bitmap_pages_to_bits(cma, count);
+ bitmap_set(cmr->bitmap, 0, bitmap_count);
+ }
+
+ for (pfn = cmr->early_pfn; pfn < cmr->base_pfn + cmr->count;
pfn += pageblock_nr_pages)
init_cma_reserved_pageblock(pfn_to_page(pfn));
}
@@ -173,6 +179,7 @@ static void __init cma_activate_area(struct cma *cma)
INIT_HLIST_HEAD(&cma->mem_head);
spin_lock_init(&cma->mem_head_lock);
#endif
+ set_bit(CMA_ACTIVATED, &cma->flags);
return;
@@ -184,9 +191,8 @@ cleanup:
if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
for (r = 0; r < allocrange; r++) {
cmr = &cma->ranges[r];
- for (pfn = cmr->base_pfn;
- pfn < cmr->base_pfn + cmr->count;
- pfn++)
+ end_pfn = cmr->base_pfn + cmr->count;
+ for (pfn = cmr->early_pfn; pfn < end_pfn; pfn++)
free_reserved_page(pfn_to_page(pfn));
}
}
@@ -290,6 +296,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
return ret;
cma->ranges[0].base_pfn = PFN_DOWN(base);
+ cma->ranges[0].early_pfn = PFN_DOWN(base);
cma->ranges[0].count = cma->count;
cma->nranges = 1;
cma->nid = NUMA_NO_NODE;
@@ -509,6 +516,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size,
nr, (u64)mlp->base, (u64)mlp->base + size);
cmrp = &cma->ranges[nr++];
cmrp->base_pfn = PHYS_PFN(mlp->base);
+ cmrp->early_pfn = cmrp->base_pfn;
cmrp->count = size >> PAGE_SHIFT;
sizeleft -= size;
@@ -540,7 +548,6 @@ out:
pr_info("Reserved %lu MiB in %d range%s\n",
(unsigned long)total_size / SZ_1M, nr,
nr > 1 ? "s" : "");
-
return ret;
}
@@ -1034,3 +1041,65 @@ bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end)
return false;
}
+
+/*
+ * Very basic function to reserve memory from a CMA area that has not
+ * yet been activated. This is expected to be called early, when the
+ * system is single-threaded, so there is no locking. The alignment
+ * checking is restrictive - only pageblock-aligned areas
+ * (CMA_MIN_ALIGNMENT_BYTES) may be reserved through this function.
+ * This keeps things simple, and is enough for the current use case.
+ *
+ * The CMA bitmaps have not yet been allocated, so just start
+ * reserving from the bottom up, using a PFN to keep track
+ * of what has been reserved. Unreserving is not possible.
+ *
+ * The caller is responsible for initializing the page structures
+ * in the area properly, since this just points to memblock-allocated
+ * memory. The caller should subsequently use init_cma_pageblock to
+ * set the migrate type and CMA stats the pageblocks that were reserved.
+ *
+ * If the CMA area fails to activate later, memory obtained through
+ * this interface is not handed to the page allocator, this is
+ * the responsibility of the caller (e.g. like normal memblock-allocated
+ * memory).
+ */
+void __init *cma_reserve_early(struct cma *cma, unsigned long size)
+{
+ int r;
+ struct cma_memrange *cmr;
+ unsigned long available;
+ void *ret = NULL;
+
+ if (!cma || !cma->count)
+ return NULL;
+ /*
+ * Can only be called early in init.
+ */
+ if (test_bit(CMA_ACTIVATED, &cma->flags))
+ return NULL;
+
+ if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES))
+ return NULL;
+
+ if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit)))
+ return NULL;
+
+ size >>= PAGE_SHIFT;
+
+ if (size > cma->available_count)
+ return NULL;
+
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+ available = cmr->count - (cmr->early_pfn - cmr->base_pfn);
+ if (size <= available) {
+ ret = phys_to_virt(PFN_PHYS(cmr->early_pfn));
+ cmr->early_pfn += size;
+ cma->available_count -= size;
+ return ret;
+ }
+ }
+
+ return ret;
+}