summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2025-11-28 23:01:02 +0100
committerHeiko Carstens <hca@linux.ibm.com>2025-12-07 16:15:19 +0100
commit1442bb87b878f889442c7e8e83d9125e31ef5072 (patch)
tree50c1ff82bc59ccb795ebede4b8e0f49400c1ac05
parentd9f59178011265344a4864757cf21523668a2a75 (diff)
s390/boot: Use entire page for PTEs
Make boot_pte_alloc() always allocate a full PAGE_SIZE page for PTE tables, instead of carving two 2K PTE tables out of a single 4K page, similar to commit daa8af80d283 ("s390/mm: Allocate page table with PAGE_SIZE granularity"). This mirrors the change in the vmem code and ensures that boot page tables backing the early KASAN shadow can later be fully freed by the vmem page-table teardown helpers (e.g. when unmapping early KASAN shadow on memory hotplug). The leftover-based allocation was originally added to reduce physmem allocator fragmentation when EDAT was disabled. On current hardware EDAT1 is available on all production systems, so the complexity is no longer justified and gets in the way of freeing the shadow mappings. Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
-rw-r--r--arch/s390/boot/vmem.c16
1 files changed, 2 insertions, 14 deletions
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index fbe64ffdfb96..7d6cc4c85af0 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -244,22 +244,10 @@ static void *boot_crst_alloc(unsigned long val)
static pte_t *boot_pte_alloc(void)
{
- static void *pte_leftover;
pte_t *pte;
- /*
- * handling pte_leftovers this way helps to avoid memory fragmentation
- * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
- */
- if (!pte_leftover) {
- pte_leftover = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
- pte = pte_leftover + _PAGE_TABLE_SIZE;
- __arch_set_page_dat(pte, 1);
- } else {
- pte = pte_leftover;
- pte_leftover = NULL;
- }
-
+ pte = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
+ __arch_set_page_dat(pte, 1);
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}