diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 46 | 
1 files changed, 28 insertions, 18 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index fb63d9256f09..1320b88fab74 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1845,23 +1845,32 @@ void folio_remove_rmap_pud(struct folio *folio, struct page *page,  #endif  } -/* We support batch unmapping of PTEs for lazyfree large folios */ -static inline bool can_batch_unmap_folio_ptes(unsigned long addr, -			struct folio *folio, pte_t *ptep) +static inline unsigned int folio_unmap_pte_batch(struct folio *folio, +			struct page_vma_mapped_walk *pvmw, +			enum ttu_flags flags, pte_t pte)  {  	const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; -	int max_nr = folio_nr_pages(folio); -	pte_t pte = ptep_get(ptep); +	unsigned long end_addr, addr = pvmw->address; +	struct vm_area_struct *vma = pvmw->vma; +	unsigned int max_nr; + +	if (flags & TTU_HWPOISON) +		return 1; +	if (!folio_test_large(folio)) +		return 1; +	/* We may only batch within a single VMA and a single page table. */ +	end_addr = pmd_addr_end(addr, vma->vm_end); +	max_nr = (end_addr - addr) >> PAGE_SHIFT; + +	/* We only support lazyfree batching for now ... */  	if (!folio_test_anon(folio) || folio_test_swapbacked(folio)) -		return false; +		return 1;  	if (pte_unused(pte)) -		return false; -	if (pte_pfn(pte) != folio_pfn(folio)) -		return false; +		return 1; -	return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, -			       NULL, NULL) == max_nr; +	return folio_pte_batch(folio, addr, pvmw->pte, pte, max_nr, fpb_flags, +			       NULL, NULL, NULL);  }  /* @@ -2024,9 +2033,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,  			if (pte_dirty(pteval))  				folio_mark_dirty(folio);  		} else if (likely(pte_present(pteval))) { -			if (folio_test_large(folio) && !(flags & TTU_HWPOISON) && -			    can_batch_unmap_folio_ptes(address, folio, pvmw.pte)) -				nr_pages = folio_nr_pages(folio); +			nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval);  			end_addr = address + nr_pages * PAGE_SIZE;  			flush_cache_range(vma, address, end_addr); @@ -2206,13 +2213,16 @@ discard:  			hugetlb_remove_rmap(folio);  		} else {  			folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); -			folio_ref_sub(folio, nr_pages - 1);  		}  		if (vma->vm_flags & VM_LOCKED)  			mlock_drain_local(); -		folio_put(folio); -		/* We have already batched the entire folio */ -		if (nr_pages > 1) +		folio_put_refs(folio, nr_pages); + +		/* +		 * If we are sure that we batched the entire folio and cleared +		 * all PTEs, we can just optimize and stop right here. +		 */ +		if (nr_pages == folio_nr_pages(folio))  			goto walk_done;  		continue;  walk_abort:  | 
