summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/book3s64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/book3s64')
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c7
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c20
2 files changed, 18 insertions, 9 deletions
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 8f7d41ce2ca1..0db01e10a3f8 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -269,11 +269,6 @@ pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
}
-pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
-{
- return pfn_pmd(page_to_pfn(page), pgprot);
-}
-
pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
unsigned long pmdv;
@@ -422,7 +417,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
ptdesc = pagetable_alloc(gfp, 0);
if (!ptdesc)
return NULL;
- if (!pagetable_pmd_ctor(ptdesc)) {
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 311e2112d782..9f764bc42b8c 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -976,7 +976,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
return 0;
}
-
+#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
{
if (radix_enabled())
@@ -984,6 +984,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
return false;
}
+#endif
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
unsigned long addr, unsigned long next)
@@ -1120,6 +1121,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
pmd_t *pmd;
pte_t *pte;
+ /*
+ * Make sure we align the start vmemmap addr so that we calculate
+ * the correct start_pfn in altmap boundary check to decided whether
+ * we should use altmap or RAM based backing memory allocation. Also
+ * the address need to be aligned for set_pte operation.
+
+ * If the start addr is already PMD_SIZE aligned we will try to use
+ * a pmd mapping. We don't want to be too aggressive here beacause
+ * that will cause more allocations in RAM. So only if the namespace
+ * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
+ */
+
+ start = ALIGN_DOWN(start, PAGE_SIZE);
for (addr = start; addr < end; addr = next) {
next = pmd_addr_end(addr, end);
@@ -1145,8 +1159,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
* in altmap block allocation failures, in which case
* we fallback to RAM for vmemmap allocation.
*/
- if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
- altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
+ if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
+ altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
/*
* make sure we don't create altmap mappings
* covering things outside the device.