|author||Matthew Wilcox (Oracle) <firstname.lastname@example.org>||2022-02-03 11:40:17 -0500|
|committer||Matthew Wilcox (Oracle) <email@example.com>||2022-03-21 12:59:02 -0400|
mm: Convert page_vma_mapped_walk to work on PFNs
page_mapped_in_vma() really just wants to walk one page, but as the code stands, if passed the head page of a compound page, it will walk every page in the compound page. Extract pfn/nr_pages/pgoff from the struct page early, so they can be overridden by page_mapped_in_vma(). Signed-off-by: Matthew Wilcox (Oracle) <firstname.lastname@example.org>
Diffstat (limited to 'mm/internal.h')
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 6047268076e7..3b652444f070 100644
@@ -10,6 +10,7 @@
@@ -475,18 +476,20 @@ vma_address(struct page *page, struct vm_area_struct *vma)
- * Then at what user virtual address will none of the page be found in vma?
+ * Then at what user virtual address will none of the range be found in vma?
* Assumes that vma_address() already returned a good starting address.
- * If page is a compound head, the entire compound page is considered.
-static inline unsigned long
-vma_address_end(struct page *page, struct vm_area_struct *vma)
+static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
+ struct vm_area_struct *vma = pvmw->vma;
unsigned long address;
- VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
- pgoff = page_to_pgoff(page) + compound_nr(page);
+ /* Common case, plus ->pgoff is invalid for KSM */
+ if (pvmw->nr_pages == 1)
+ return pvmw->address + PAGE_SIZE;
+ pgoff = pvmw->pgoff + pvmw->nr_pages;
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
/* Check for address beyond vma (or wrapped through 0?) */
if (address < vma->vm_start || address > vma->vm_end)