path: root/mm/internal.h
diff options
authorMatthew Wilcox (Oracle) <>2020-09-01 23:17:50 -0400
committerMatthew Wilcox (Oracle) <>2022-01-08 00:28:41 -0500
commit0e499ed3d7a216706e02eeded562627d3e69dcfd (patch)
tree0bf33ceb48d5530c29c332c99def3b47aeadf53f /mm/internal.h
parent25d6a23e8d280861dfe81193e18143afb2c0d777 (diff)
filemap: Return only folios from find_get_entries()
The callers have all been converted to work on folios, so convert find_get_entries() to return a batch of folios instead of pages. We also now return multiple large folios in a single call. Signed-off-by: Matthew Wilcox (Oracle) <> Reviewed-by: Jan Kara <> Reviewed-by: William Kucharski <> Reviewed-by: Christoph Hellwig <>
Diffstat (limited to 'mm/internal.h')
1 files changed, 4 insertions, 0 deletions
diff --git a/mm/internal.h b/mm/internal.h
index e5f3ff3ae24e..07124e95e790 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -12,6 +12,8 @@
#include <linux/pagemap.h>
#include <linux/tracepoint-defs.h>
+struct folio_batch;
* The set of flags that only affect watermark checking and reclaim
* behaviour. This is used by the MM to obey the caller constraints
@@ -92,6 +94,8 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
+unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
+ pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);