diff options
Diffstat (limited to 'arch/riscv/mm/init.c')
-rw-r--r-- | arch/riscv/mm/init.c | 146 |
1 files changed, 76 insertions, 70 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 8d167e09f1fe..8d0374d7ce8e 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -20,15 +20,13 @@ #include <linux/dma-map-ops.h> #include <linux/crash_dump.h> #include <linux/hugetlb.h> -#ifdef CONFIG_RELOCATABLE -#include <linux/elf.h> -#endif #include <linux/kfence.h> #include <linux/execmem.h> #include <asm/fixmap.h> #include <asm/io.h> #include <asm/kasan.h> +#include <asm/module.h> #include <asm/numa.h> #include <asm/pgtable.h> #include <asm/sections.h> @@ -171,7 +169,7 @@ static void __init print_vm_layout(void) static void print_vm_layout(void) { } #endif /* CONFIG_DEBUG_VM */ -void __init mem_init(void) +void __init arch_mm_preinit(void) { bool swiotlb = max_pfn > PFN_DOWN(dma32_phys_limit); #ifdef CONFIG_FLATMEM @@ -192,7 +190,6 @@ void __init mem_init(void) } swiotlb_init(swiotlb, SWIOTLB_VERBOSE); - memblock_free_all(); print_vm_layout(); } @@ -268,8 +265,12 @@ static void __init setup_bootmem(void) */ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) { max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE; - memblock_cap_memory_range(phys_ram_base, - max_mapped_addr - phys_ram_base); + if (memblock_end_of_DRAM() > max_mapped_addr) { + memblock_cap_memory_range(phys_ram_base, + max_mapped_addr - phys_ram_base); + pr_warn("Physical memory overflows the linear mapping size: region above %pa removed", + &max_mapped_addr); + } } /* @@ -291,10 +292,8 @@ static void __init setup_bootmem(void) phys_ram_end = memblock_end_of_DRAM(); min_low_pfn = PFN_UP(phys_ram_base); max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); - high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); reserve_initrd_mem(); @@ -319,6 +318,44 @@ static void __init setup_bootmem(void) hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); } +#ifdef CONFIG_RELOCATABLE +extern unsigned long __rela_dyn_start, __rela_dyn_end; + +static void __init relocate_kernel(void) +{ + Elf_Rela *rela = (Elf_Rela *)&__rela_dyn_start; + /* + * This holds the offset between the linked virtual address and the + * relocated virtual address. + */ + uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; + /* + * This holds the offset between kernel linked virtual address and + * physical address. + */ + uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; + + for ( ; rela < (Elf_Rela *)&__rela_dyn_end; rela++) { + Elf_Addr addr = (rela->r_offset - va_kernel_link_pa_offset); + Elf_Addr relocated_addr = rela->r_addend; + + if (rela->r_info != R_RISCV_RELATIVE) + continue; + + /* + * Make sure to not relocate vdso symbols like rt_sigreturn + * which are linked from the address 0 in vmlinux since + * vdso symbol addresses are actually used as an offset from + * mm->context.vdso in VDSO_OFFSET macro. + */ + if (relocated_addr >= KERNEL_LINK_ADDR) + relocated_addr += reloc_offset; + + *(Elf_Addr *)addr = relocated_addr; + } +} +#endif /* CONFIG_RELOCATABLE */ + #ifdef CONFIG_MMU struct pt_alloc_ops pt_ops __meminitdata; @@ -405,7 +442,12 @@ static phys_addr_t __meminit alloc_pte_late(uintptr_t va) { struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0); - BUG_ON(!ptdesc || !pagetable_pte_ctor(ptdesc)); + /* + * We do not know which mm the PTE page is associated to at this point. + * Passing NULL to the ctor is the safe option, though it may result + * in unnecessary work (e.g. initialising the ptlock for init_mm). + */ + BUG_ON(!ptdesc || !pagetable_pte_ctor(NULL, ptdesc)); return __pa((pte_t *)ptdesc_address(ptdesc)); } @@ -485,7 +527,8 @@ static phys_addr_t __meminit alloc_pmd_late(uintptr_t va) { struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0); - BUG_ON(!ptdesc || !pagetable_pmd_ctor(ptdesc)); + /* See comment in alloc_pte_late() regarding NULL passed the ctor */ + BUG_ON(!ptdesc || !pagetable_pmd_ctor(NULL, ptdesc)); return __pa((pmd_t *)ptdesc_address(ptdesc)); } @@ -547,11 +590,11 @@ static phys_addr_t __init alloc_pud_fixmap(uintptr_t va) static phys_addr_t __meminit alloc_pud_late(uintptr_t va) { - unsigned long vaddr; + struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, 0); - vaddr = __get_free_page(GFP_KERNEL); - BUG_ON(!vaddr); - return __pa(vaddr); + BUG_ON(!ptdesc); + pagetable_pud_ctor(ptdesc); + return __pa((pud_t *)ptdesc_address(ptdesc)); } static p4d_t *__init get_p4d_virt_early(phys_addr_t pa) @@ -585,11 +628,11 @@ static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va) static phys_addr_t __meminit alloc_p4d_late(uintptr_t va) { - unsigned long vaddr; + struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, 0); - vaddr = __get_free_page(GFP_KERNEL); - BUG_ON(!vaddr); - return __pa(vaddr); + BUG_ON(!ptdesc); + pagetable_p4d_ctor(ptdesc); + return __pa((p4d_t *)ptdesc_address(ptdesc)); } static void __meminit create_pud_mapping(pud_t *pudp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, @@ -819,6 +862,8 @@ static __init void set_satp_mode(uintptr_t dtb_pa) uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK; u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline(dtb_pa); + kernel_map.page_offset = PAGE_OFFSET_L5; + if (satp_mode_cmdline == SATP_MODE_57) { disable_pgtable_l5(); } else if (satp_mode_cmdline == SATP_MODE_48) { @@ -889,44 +934,6 @@ retry: #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #endif -#ifdef CONFIG_RELOCATABLE -extern unsigned long __rela_dyn_start, __rela_dyn_end; - -static void __init relocate_kernel(void) -{ - Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start; - /* - * This holds the offset between the linked virtual address and the - * relocated virtual address. - */ - uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; - /* - * This holds the offset between kernel linked virtual address and - * physical address. - */ - uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; - - for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) { - Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset); - Elf64_Addr relocated_addr = rela->r_addend; - - if (rela->r_info != R_RISCV_RELATIVE) - continue; - - /* - * Make sure to not relocate vdso symbols like rt_sigreturn - * which are linked from the address 0 in vmlinux since - * vdso symbol addresses are actually used as an offset from - * mm->context.vdso in VDSO_OFFSET macro. - */ - if (relocated_addr >= KERNEL_LINK_ADDR) - relocated_addr += reloc_offset; - - *(Elf64_Addr *)addr = relocated_addr; - } -} -#endif /* CONFIG_RELOCATABLE */ - #ifdef CONFIG_XIP_KERNEL static void __init create_kernel_page_table(pgd_t *pgdir, __always_unused bool early) @@ -1104,11 +1111,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset; #ifdef CONFIG_XIP_KERNEL -#ifdef CONFIG_64BIT - kernel_map.page_offset = PAGE_OFFSET_L3; -#else - kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); -#endif kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); @@ -1123,7 +1125,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr + (uintptr_t)&_sdata - (uintptr_t)&_start; #else - kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); kernel_map.phys_addr = (uintptr_t)(&_start); kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; @@ -1170,7 +1171,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) * makes the kernel cross over a PUD_SIZE boundary, raise a bug * since a part of the kernel would not get mapped. */ - BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); + if (IS_ENABLED(CONFIG_64BIT)) + BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); relocate_kernel(); #endif @@ -1374,6 +1376,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) { dtb_early_va = (void *)dtb_pa; dtb_early_pa = dtb_pa; + +#ifdef CONFIG_RELOCATABLE + kernel_map.virt_addr = (uintptr_t)_start; + kernel_map.phys_addr = (uintptr_t)_start; + relocate_kernel(); +#endif } static inline void setup_vm_final(void) @@ -1392,21 +1400,19 @@ static void __init arch_reserve_crashkernel(void) { unsigned long long low_size = 0; unsigned long long crash_base, crash_size; - char *cmdline = boot_command_line; bool high = false; int ret; if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) return; - ret = parse_crashkernel(cmdline, memblock_phys_mem_size(), + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base, &low_size, &high); if (ret) return; - reserve_crashkernel_generic(cmdline, crash_size, crash_base, - low_size, high); + reserve_crashkernel_generic(crash_size, crash_base, low_size, high); } void __init paging_init(void) @@ -1573,7 +1579,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) return; } - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); if (PageReserved(page)) free_reserved_page(page); else @@ -1595,7 +1601,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemm } if (!is_vmemmap) - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); if (PageReserved(page)) free_reserved_page(page); else |