summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Brodsky <kevin.brodsky@arm.com>2025-09-12 08:39:08 +0100
committerWill Deacon <will@kernel.org>2025-09-25 14:47:19 +0100
commit200b0d25084d6e99f9f08229283b14b60a84c657 (patch)
tree3b77b594b9adeeb079b9c922886503ecd70046de
parentfd2f74f8f3d3c1a524637caf5bead9757fae4332 (diff)
arm64: mm: Move KPTI helpers to mmu.c
create_kpti_ng_temp_pgd() is currently defined (as an alias) in mmu.c without matching declaration in a header; instead cpufeature.c makes its own declaration. This is clearly not pretty, and as commit ceca927c86e6 ("arm64: mm: Fix CFI failure due to kpti_ng_pgd_alloc function signature") showed, it also makes it very easy for the prototypes to go out of sync. All this would be much simpler if kpti_install_ng_mappings() and associated functions lived in mmu.c, where they logically belong. This is what this patch does: - Move kpti_install_ng_mappings() and associated functions from cpufeature.c to mmu.c, add a declaration to <asm/mmu.h> - Remove create_kpti_ng_temp_pgd() and just call __create_pgd_mapping_locked() directly instead - Mark all these functions __init - Move __initdata after kpti_ng_temp_alloc (as suggested by checkpatch) Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com> Reviewed-by: Yeoreum Yun <yeoreum.yun@arm.com> [will: Fix conflicts with init_idmap_kpti_bbml2_flag()] Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--arch/arm64/include/asm/mmu.h7
-rw-r--r--arch/arm64/kernel/cpufeature.c98
-rw-r--r--arch/arm64/mm/mmu.c98
3 files changed, 94 insertions, 109 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index ff6fd0bbd7d2..78a4dbf75e60 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -79,7 +79,6 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
extern void mark_linear_text_alias_ro(void);
extern int split_kernel_leaf_mapping(unsigned long start, unsigned long end);
-extern void init_idmap_kpti_bbml2_flag(void);
extern void linear_map_maybe_split_to_ptes(void);
/*
@@ -107,5 +106,11 @@ static inline bool kaslr_requires_kpti(void)
return true;
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+void kpti_install_ng_mappings(void);
+#else
+static inline void kpti_install_ng_mappings(void) {}
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 10362b296f2d..ecb83ab0700e 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1941,104 +1941,6 @@ static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
}
#endif
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
-
-extern
-void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags);
-
-static phys_addr_t __initdata kpti_ng_temp_alloc;
-
-static phys_addr_t __init kpti_ng_pgd_alloc(enum pgtable_type type)
-{
- kpti_ng_temp_alloc -= PAGE_SIZE;
- return kpti_ng_temp_alloc;
-}
-
-static int __init __kpti_install_ng_mappings(void *__unused)
-{
- typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
- extern kpti_remap_fn idmap_kpti_install_ng_mappings;
- kpti_remap_fn *remap_fn;
-
- int cpu = smp_processor_id();
- int levels = CONFIG_PGTABLE_LEVELS;
- int order = order_base_2(levels);
- u64 kpti_ng_temp_pgd_pa = 0;
- pgd_t *kpti_ng_temp_pgd;
- u64 alloc = 0;
-
- if (levels == 5 && !pgtable_l5_enabled())
- levels = 4;
- else if (levels == 4 && !pgtable_l4_enabled())
- levels = 3;
-
- remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
-
- if (!cpu) {
- alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
- kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE);
- kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd);
-
- //
- // Create a minimal page table hierarchy that permits us to map
- // the swapper page tables temporarily as we traverse them.
- //
- // The physical pages are laid out as follows:
- //
- // +--------+-/-------+-/------ +-/------ +-\\\--------+
- // : PTE[] : | PMD[] : | PUD[] : | P4D[] : ||| PGD[] :
- // +--------+-\-------+-\------ +-\------ +-///--------+
- // ^
- // The first page is mapped into this hierarchy at a PMD_SHIFT
- // aligned virtual address, so that we can manipulate the PTE
- // level entries while the mapping is active. The first entry
- // covers the PTE[] page itself, the remaining entries are free
- // to be used as a ad-hoc fixmap.
- //
- create_kpti_ng_temp_pgd(kpti_ng_temp_pgd, __pa(alloc),
- KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL,
- kpti_ng_pgd_alloc, 0);
- }
-
- cpu_install_idmap();
- remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA);
- cpu_uninstall_idmap();
-
- if (!cpu) {
- free_pages(alloc, order);
- arm64_use_ng_mappings = true;
- }
-
- return 0;
-}
-
-static void __init kpti_install_ng_mappings(void)
-{
- /* Check whether KPTI is going to be used */
- if (!arm64_kernel_unmapped_at_el0())
- return;
-
- /*
- * We don't need to rewrite the page-tables if either we've done
- * it already or we have KASLR enabled and therefore have not
- * created any global mappings at all.
- */
- if (arm64_use_ng_mappings)
- return;
-
- init_idmap_kpti_bbml2_flag();
- stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask);
-}
-
-#else
-static inline void kpti_install_ng_mappings(void)
-{
-}
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
-
static void cpu_enable_kpti(struct arm64_cpu_capabilities const *cap)
{
if (__this_cpu_read(this_cpu_vector) == vectors) {
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3a444a5fe469..38a8a775a1db 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -470,14 +470,6 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
mutex_unlock(&fixmap_lock);
}
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-extern __alias(__create_pgd_mapping_locked)
-void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(enum pgtable_type),
- int flags);
-#endif
-
#define INVALID_PHYS_ADDR (-1ULL)
static phys_addr_t __pgd_pgtable_alloc(struct mm_struct *mm, gfp_t gfp,
@@ -823,7 +815,7 @@ static bool linear_map_requires_bbml2 __initdata;
u32 idmap_kpti_bbml2_flag;
-void __init init_idmap_kpti_bbml2_flag(void)
+static void __init init_idmap_kpti_bbml2_flag(void)
{
WRITE_ONCE(idmap_kpti_bbml2_flag, 1);
/* Must be visible to other CPUs before stop_machine() is called. */
@@ -1135,7 +1127,93 @@ static void __init declare_vma(struct vm_struct *vma,
}
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-static pgprot_t kernel_exec_prot(void)
+#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
+
+static phys_addr_t kpti_ng_temp_alloc __initdata;
+
+static phys_addr_t __init kpti_ng_pgd_alloc(enum pgtable_type type)
+{
+ kpti_ng_temp_alloc -= PAGE_SIZE;
+ return kpti_ng_temp_alloc;
+}
+
+static int __init __kpti_install_ng_mappings(void *__unused)
+{
+ typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
+ extern kpti_remap_fn idmap_kpti_install_ng_mappings;
+ kpti_remap_fn *remap_fn;
+
+ int cpu = smp_processor_id();
+ int levels = CONFIG_PGTABLE_LEVELS;
+ int order = order_base_2(levels);
+ u64 kpti_ng_temp_pgd_pa = 0;
+ pgd_t *kpti_ng_temp_pgd;
+ u64 alloc = 0;
+
+ if (levels == 5 && !pgtable_l5_enabled())
+ levels = 4;
+ else if (levels == 4 && !pgtable_l4_enabled())
+ levels = 3;
+
+ remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+
+ if (!cpu) {
+ alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
+ kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE);
+ kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd);
+
+ //
+ // Create a minimal page table hierarchy that permits us to map
+ // the swapper page tables temporarily as we traverse them.
+ //
+ // The physical pages are laid out as follows:
+ //
+ // +--------+-/-------+-/------ +-/------ +-\\\--------+
+ // : PTE[] : | PMD[] : | PUD[] : | P4D[] : ||| PGD[] :
+ // +--------+-\-------+-\------ +-\------ +-///--------+
+ // ^
+ // The first page is mapped into this hierarchy at a PMD_SHIFT
+ // aligned virtual address, so that we can manipulate the PTE
+ // level entries while the mapping is active. The first entry
+ // covers the PTE[] page itself, the remaining entries are free
+ // to be used as a ad-hoc fixmap.
+ //
+ __create_pgd_mapping_locked(kpti_ng_temp_pgd, __pa(alloc),
+ KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL,
+ kpti_ng_pgd_alloc, 0);
+ }
+
+ cpu_install_idmap();
+ remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA);
+ cpu_uninstall_idmap();
+
+ if (!cpu) {
+ free_pages(alloc, order);
+ arm64_use_ng_mappings = true;
+ }
+
+ return 0;
+}
+
+void __init kpti_install_ng_mappings(void)
+{
+ /* Check whether KPTI is going to be used */
+ if (!arm64_kernel_unmapped_at_el0())
+ return;
+
+ /*
+ * We don't need to rewrite the page-tables if either we've done
+ * it already or we have KASLR enabled and therefore have not
+ * created any global mappings at all.
+ */
+ if (arm64_use_ng_mappings)
+ return;
+
+ init_idmap_kpti_bbml2_flag();
+ stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask);
+}
+
+static pgprot_t __init kernel_exec_prot(void)
{
return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
}