diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_native.c | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_utils.c | 22 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/pgtable.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/radix_pgtable.c | 20 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/slice.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/copro_fault.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/ioremap.c | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/ioremap_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 42 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/8xx.c | 32 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 2 |
15 files changed, 65 insertions, 115 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 0fe2f085c05a..8c1582b2987d 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -15,5 +15,5 @@ obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump/ +obj-$(CONFIG_PTDUMP) += ptdump/ obj-$(CONFIG_KASAN) += kasan/ diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c index 430d1d935a7c..e9e2dd70c060 100644 --- a/arch/powerpc/mm/book3s64/hash_native.c +++ b/arch/powerpc/mm/book3s64/hash_native.c @@ -27,8 +27,6 @@ #include <asm/ppc-opcode.h> #include <asm/feature-fixups.h> -#include <misc/cxl-base.h> - #ifdef DEBUG_LOW #define DBG_LOW(fmt...) udbg_printf(fmt) #else @@ -217,11 +215,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) static inline void tlbie(unsigned long vpn, int psize, int apsize, int ssize, int local) { - unsigned int use_local; + unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); - use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); - if (use_local) use_local = mmu_psize_defs[psize].tlbiel; if (lock_tlbie && !use_local) @@ -789,10 +785,6 @@ static void native_flush_hash_range(unsigned long number, int local) unsigned long psize = batch->psize; int ssize = batch->ssize; int i; - unsigned int use_local; - - use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && - mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); local_irq_save(flags); @@ -827,7 +819,8 @@ static void native_flush_hash_range(unsigned long number, int local) } pte_iterate_hashed_end(); } - if (use_local) { + if (mmu_has_feature(MMU_FTR_TLBIEL) && + mmu_psize_defs[psize].tlbiel && local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { vpn = batch->vpn[i]; diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index c8b4fa71d4a7..5158aefe4873 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -56,7 +56,7 @@ #include <asm/cacheflush.h> #include <asm/cputable.h> #include <asm/sections.h> -#include <asm/copro.h> +#include <asm/spu.h> #include <asm/udbg.h> #include <asm/text-patching.h> #include <asm/fadump.h> @@ -1358,18 +1358,6 @@ static void __init htab_initialize(void) } else { unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE; -#ifdef CONFIG_PPC_CELL - /* - * Cell may require the hash table down low when using the - * Axon IOMMU in order to fit the dynamic region over it, see - * comments in cell/iommu.c - */ - if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) { - limit = 0x80000000; - pr_info("Hash table forced below 2G for Axon IOMMU\n"); - } -#endif /* CONFIG_PPC_CELL */ - table = memblock_phys_alloc_range(htab_size_bytes, htab_size_bytes, 0, limit); @@ -1612,7 +1600,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr) if (get_slice_psize(mm, addr) == MMU_PAGE_4K) return; slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); - copro_flush_all_slbs(mm); +#ifdef CONFIG_SPU_BASE + spu_flush_all_slbs(mm); +#endif if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { copy_mm_to_paca(mm); @@ -1881,7 +1871,9 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, "to 4kB pages because of " "non-cacheable mapping\n"); psize = mmu_vmalloc_psize = MMU_PAGE_4K; - copro_flush_all_slbs(mm); +#ifdef CONFIG_SPU_BASE + spu_flush_all_slbs(mm); +#endif } } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index ce64abea9e3e..8f7d41ce2ca1 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -10,7 +10,6 @@ #include <linux/pkeys.h> #include <linux/debugfs.h> #include <linux/proc_fs.h> -#include <misc/cxl-base.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -587,7 +586,7 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, /* * Does the CPU support tlbie? */ -bool tlbie_capable __read_mostly = true; +bool tlbie_capable __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE); EXPORT_SYMBOL(tlbie_capable); /* @@ -595,7 +594,7 @@ EXPORT_SYMBOL(tlbie_capable); * address spaces? tlbie may still be used for nMMU accelerators, and for KVM * guest address spaces. */ -bool tlbie_enabled __read_mostly = true; +bool tlbie_enabled __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE); static int __init setup_disable_tlbie(char *str) { diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 311e2112d782..9f764bc42b8c 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -976,7 +976,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, return 0; } - +#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { if (radix_enabled()) @@ -984,6 +984,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) return false; } +#endif int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) @@ -1120,6 +1121,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in pmd_t *pmd; pte_t *pte; + /* + * Make sure we align the start vmemmap addr so that we calculate + * the correct start_pfn in altmap boundary check to decided whether + * we should use altmap or RAM based backing memory allocation. Also + * the address need to be aligned for set_pte operation. + + * If the start addr is already PMD_SIZE aligned we will try to use + * a pmd mapping. We don't want to be too aggressive here beacause + * that will cause more allocations in RAM. So only if the namespace + * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping. + */ + + start = ALIGN_DOWN(start, PAGE_SIZE); for (addr = start; addr < end; addr = next) { next = pmd_addr_end(addr, end); @@ -1145,8 +1159,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in * in altmap block allocation failures, in which case * we fallback to RAM for vmemmap allocation. */ - if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || - altmap_cross_boundary(altmap, addr, PMD_SIZE))) { + if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap && + altmap_cross_boundary(altmap, addr, PMD_SIZE))) { /* * make sure we don't create altmap mappings * covering things outside the device. diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c index bc9a39821d1c..28bec5bc7879 100644 --- a/arch/powerpc/mm/book3s64/slice.c +++ b/arch/powerpc/mm/book3s64/slice.c @@ -22,7 +22,7 @@ #include <linux/security.h> #include <asm/mman.h> #include <asm/mmu.h> -#include <asm/copro.h> +#include <asm/spu.h> #include <asm/hugetlb.h> #include <asm/mmu_context.h> @@ -248,7 +248,9 @@ static void slice_convert(struct mm_struct *mm, spin_unlock_irqrestore(&slice_convert_lock, flags); - copro_flush_all_slbs(mm); +#ifdef CONFIG_SPU_BASE + spu_flush_all_slbs(mm); +#endif } /* diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index f49fd873df8d..f5f8692e2c69 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -12,8 +12,6 @@ #include <linux/export.h> #include <asm/reg.h> #include <asm/copro.h> -#include <asm/spu.h> -#include <misc/cxl-base.h> /* * This ought to be kept in sync with the powerpc specific do_page_fault @@ -135,13 +133,4 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) return 0; } EXPORT_SYMBOL_GPL(copro_calculate_slb); - -void copro_flush_all_slbs(struct mm_struct *mm) -{ -#ifdef CONFIG_SPU_BASE - spu_flush_all_slbs(mm); -#endif - cxl_slbia(mm); -} -EXPORT_SYMBOL_GPL(copro_flush_all_slbs); #endif diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index c156fe0d53c3..806c74e0d5ab 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/ptrace.h> @@ -218,7 +219,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, // Read/write fault blocked by KUAP is bad, it can never succeed. if (bad_kuap_fault(regs, address, is_write)) { pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n", - is_write ? "write" : "read", address, + str_write_read(is_write), address, from_kuid(&init_user_ns, current_uid())); // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad @@ -625,7 +626,7 @@ static void __bad_page_fault(struct pt_regs *regs, int sig) case INTERRUPT_DATA_STORAGE: case INTERRUPT_H_DATA_STORAGE: pr_alert("BUG: %s on %s at 0x%08lx\n", msg, - is_write ? "write" : "read", regs->dar); + str_write_read(is_write), regs->dar); break; case INTERRUPT_DATA_SEGMENT: pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 6b043180220a..d3c1b749dcfc 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -113,6 +113,7 @@ static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) gpage_freearray[nr_gpages] = 0; list_add(&m->list, &huge_boot_pages[0]); m->hstate = hstate; + m->flags = 0; return 1; } diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d96bbc001e73..b6f3ae03ca9e 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -41,6 +41,7 @@ #include <linux/libfdt.h> #include <linux/memremap.h> #include <linux/memory.h> +#include <linux/bootmem_info.h> #include <asm/pgalloc.h> #include <asm/page.h> @@ -386,10 +387,13 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, } #endif + +#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) { } +#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index 7b0afcabd89f..4b4feba9873b 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -4,7 +4,6 @@ #include <linux/slab.h> #include <linux/mmzone.h> #include <linux/vmalloc.h> -#include <asm/io-workarounds.h> unsigned long ioremap_bot; EXPORT_SYMBOL(ioremap_bot); @@ -14,8 +13,6 @@ void __iomem *ioremap(phys_addr_t addr, unsigned long size) pgprot_t prot = pgprot_noncached(PAGE_KERNEL); void *caller = __builtin_return_address(0); - if (iowa_is_active()) - return iowa_ioremap(addr, size, prot, caller); return __ioremap_caller(addr, size, prot, caller); } EXPORT_SYMBOL(ioremap); @@ -25,8 +22,6 @@ void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size) pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); void *caller = __builtin_return_address(0); - if (iowa_is_active()) - return iowa_ioremap(addr, size, prot, caller); return __ioremap_caller(addr, size, prot, caller); } EXPORT_SYMBOL(ioremap_wc); @@ -36,22 +31,18 @@ void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) pgprot_t prot = pgprot_cached(PAGE_KERNEL); void *caller = __builtin_return_address(0); - if (iowa_is_active()) - return iowa_ioremap(addr, size, prot, caller); return __ioremap_caller(addr, size, prot, caller); } -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags) +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, pgprot_t prot) { - pte_t pte = __pte(flags); + pte_t pte = __pte(pgprot_val(prot)); void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ if (pte_write(pte)) pte = pte_mkdirty(pte); - if (iowa_is_active()) - return iowa_ioremap(addr, size, pte_pgprot(pte), caller); return __ioremap_caller(addr, size, pte_pgprot(pte), caller); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c index d24e5f166723..fb8b55bd2cd5 100644 --- a/arch/powerpc/mm/ioremap_64.c +++ b/arch/powerpc/mm/ioremap_64.c @@ -52,6 +52,6 @@ void iounmap(volatile void __iomem *token) if (!slab_is_available()) return; - generic_iounmap(PCI_FIX_ADDR(token)); + generic_iounmap(token); } EXPORT_SYMBOL(iounmap); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c7708c8fad29..3ddbfdbfa941 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -273,7 +273,7 @@ void __init paging_init(void) mark_nonram_nosave(); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* * book3s is limited to 16 page sizes due to encoding this in @@ -295,22 +295,6 @@ void __init mem_init(void) kasan_late_init(); - memblock_free_all(); - -#ifdef CONFIG_HIGHMEM - { - unsigned long pfn, highmem_mapnr; - - highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; - for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { - phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; - struct page *page = pfn_to_page(pfn); - if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr)) - free_highmem_page(page); - } - } -#endif /* CONFIG_HIGHMEM */ - #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up @@ -319,28 +303,6 @@ void __init mem_init(void) per_cpu(next_tlbcam_idx, smp_processor_id()) = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif - -#ifdef CONFIG_PPC32 - pr_info("Kernel virtual memory layout:\n"); -#ifdef CONFIG_KASAN - pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n", - KASAN_SHADOW_START, KASAN_SHADOW_END); -#endif - pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); -#ifdef CONFIG_HIGHMEM - pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", - PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); -#endif /* CONFIG_HIGHMEM */ - if (ioremap_bot != IOREMAP_TOP) - pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", - ioremap_bot, IOREMAP_TOP); - pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", - VMALLOC_START, VMALLOC_END); -#ifdef MODULES_VADDR - pr_info(" * 0x%08lx..0x%08lx : modules\n", - MODULES_VADDR, MODULES_END); -#endif -#endif /* CONFIG_PPC32 */ } void free_initmem(void) @@ -376,7 +338,7 @@ static int __init add_system_ram_resources(void) */ res->end = end - 1; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - WARN_ON(request_resource(&iomem_resource, res) < 0); + WARN_ON(insert_resource(&iomem_resource, res) < 0); } } diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 8b54f12d1889..ab1505cf42bf 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -54,20 +54,13 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, { pmd_t *pmdp = pmd_off_k(va); pte_t *ptep; - - if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M)) - return -EINVAL; + unsigned int shift = mmu_psize_to_shift(psize); if (new) { if (WARN_ON(slab_is_available())) return -EINVAL; - if (psize == MMU_PAGE_512K) { - ptep = early_pte_alloc_kernel(pmdp, va); - /* The PTE should never be already present */ - if (WARN_ON(pte_present(*ptep) && pgprot_val(prot))) - return -EINVAL; - } else { + if (psize == MMU_PAGE_8M) { if (WARN_ON(!pmd_none(*pmdp) || !pmd_none(*(pmdp + 1)))) return -EINVAL; @@ -78,20 +71,25 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, pmd_populate_kernel(&init_mm, pmdp + 1, ptep); ptep = (pte_t *)pmdp; + } else { + ptep = early_pte_alloc_kernel(pmdp, va); + /* The PTE should never be already present */ + if (WARN_ON(pte_present(*ptep) && pgprot_val(prot))) + return -EINVAL; } } else { - if (psize == MMU_PAGE_512K) - ptep = pte_offset_kernel(pmdp, va); - else + if (psize == MMU_PAGE_8M) ptep = (pte_t *)pmdp; + else + ptep = pte_offset_kernel(pmdp, va); } if (WARN_ON(!ptep)) return -ENOMEM; set_huge_pte_at(&init_mm, va, ptep, - pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), - 1UL << mmu_psize_to_shift(psize)); + arch_make_huge_pte(pfn_pte(pa >> PAGE_SHIFT, prot), shift, 0), + 1UL << shift); return 0; } @@ -123,14 +121,18 @@ static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, unsigned long p = offset; int err = 0; - WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); + WARN_ON(!IS_ALIGNED(offset, SZ_16K) || !IS_ALIGNED(top, SZ_16K)); + for (; p < ALIGN(p, SZ_512K) && p < top && !err; p += SZ_16K, v += SZ_16K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new); for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K) err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M) err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K) err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN_DOWN(top, SZ_16K) && p < top && !err; p += SZ_16K, v += SZ_16K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_16K, new); if (!new) flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 3c1da08304d0..603a0f652ba6 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1336,7 +1336,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) return nid; } -static u64 hot_add_drconf_memory_max(void) +u64 hot_add_drconf_memory_max(void) { struct device_node *memory = NULL; struct device_node *dn = NULL; |