summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristian Brauner <brauner@kernel.org>2025-05-27 21:06:30 +0200
committerChristian Brauner <brauner@kernel.org>2025-05-27 21:06:30 +0200
commit5722bcd7d373768f9a20517ce271f661bb9bb258 (patch)
tree8f8859d612b8fda307882b6b2488919c4e1d7871
parent015a99fa76650e7d6efa3e36f20c0f5b346fe9ce (diff)
parenta1d98e4ffb972ab007f5de850ef53c2a46cacf15 (diff)
Merge patch series "dropbehind fixes and cleanups"
Jens Axboe <axboe@kernel.dk> says: As per the thread here: https://lore.kernel.org/linux-fsdevel/20250525083209.GS2023217@ZenIV/ there was an issue with the dropbehind support, and hence it got reverted (effectively) for the 6.15 kernel release. The problem stems from the fact that the folio can get redirtied and/or scheduled for writeback after the initial dropbehind test, and before we have it locked again for invalidation. Patches 1+2 add a generic helper that both the read and write side can use, and which checks for !dirty && !writeback before going ahead with the invalidation. Patch 3 reverts the FOP_DONTCACHE disable, and patches 4 and 5 do a bit of cleanup work to further unify how the read and write side handling works. This can reasonably be considered a 2 part series, as 1-3 fix the issue and could go to stable, while 4-5 just cleanup the code. * patches from https://lore.kernel.org/20250527133255.452431-1-axboe@kernel.dk: mm/filemap: unify dropbehind flag testing and clearing mm/filemap: unify read/write dropbehind naming Revert "Disable FOP_DONTCACHE for now due to bugs" mm/filemap: use filemap_end_dropbehind() for read invalidation mm/filemap: gate dropbehind invalidate on folio !dirty && !writeback Link: https://lore.kernel.org/20250527133255.452431-1-axboe@kernel.dk Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r--include/linux/fs.h2
-rw-r--r--mm/filemap.c39
2 files changed, 25 insertions, 16 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0db87f8e676c..57c3db3ef6ad 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2207,7 +2207,7 @@ struct file_operations {
/* Supports asynchronous lock callbacks */
#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
/* File system supports uncached read/write buffered IO */
-#define FOP_DONTCACHE 0 /* ((__force fop_flags_t)(1 << 7)) */
+#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7))
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
diff --git a/mm/filemap.c b/mm/filemap.c
index 7b90cbeb4a1a..eef44d7ea12e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1589,13 +1589,30 @@ int folio_wait_private_2_killable(struct folio *folio)
}
EXPORT_SYMBOL(folio_wait_private_2_killable);
+static void filemap_end_dropbehind(struct folio *folio)
+{
+ struct address_space *mapping = folio->mapping;
+
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+
+ if (folio_test_writeback(folio) || folio_test_dirty(folio))
+ return;
+ if (!folio_test_clear_dropbehind(folio))
+ return;
+ if (mapping)
+ folio_unmap_invalidate(mapping, folio, 0);
+}
+
/*
* If folio was marked as dropbehind, then pages should be dropped when writeback
* completes. Do that now. If we fail, it's likely because of a big folio -
* just reset dropbehind for that case and latter completions should invalidate.
*/
-static void folio_end_dropbehind_write(struct folio *folio)
+static void filemap_end_dropbehind_write(struct folio *folio)
{
+ if (!folio_test_dropbehind(folio))
+ return;
+
/*
* Hitting !in_task() should not happen off RWF_DONTCACHE writeback,
* but can happen if normal writeback just happens to find dirty folios
@@ -1604,8 +1621,7 @@ static void folio_end_dropbehind_write(struct folio *folio)
* invalidation in that case.
*/
if (in_task() && folio_trylock(folio)) {
- if (folio->mapping)
- folio_unmap_invalidate(folio->mapping, folio, 0);
+ filemap_end_dropbehind(folio);
folio_unlock(folio);
}
}
@@ -1620,8 +1636,6 @@ static void folio_end_dropbehind_write(struct folio *folio)
*/
void folio_end_writeback(struct folio *folio)
{
- bool folio_dropbehind = false;
-
VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
/*
@@ -1643,14 +1657,11 @@ void folio_end_writeback(struct folio *folio)
* reused before the folio_wake_bit().
*/
folio_get(folio);
- if (!folio_test_dirty(folio))
- folio_dropbehind = folio_test_clear_dropbehind(folio);
if (__folio_end_writeback(folio))
folio_wake_bit(folio, PG_writeback);
- acct_reclaim_writeback(folio);
- if (folio_dropbehind)
- folio_end_dropbehind_write(folio);
+ filemap_end_dropbehind_write(folio);
+ acct_reclaim_writeback(folio);
folio_put(folio);
}
EXPORT_SYMBOL(folio_end_writeback);
@@ -2635,16 +2646,14 @@ static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
return (pos1 >> shift == pos2 >> shift);
}
-static void filemap_end_dropbehind_read(struct address_space *mapping,
- struct folio *folio)
+static void filemap_end_dropbehind_read(struct folio *folio)
{
if (!folio_test_dropbehind(folio))
return;
if (folio_test_writeback(folio) || folio_test_dirty(folio))
return;
if (folio_trylock(folio)) {
- if (folio_test_clear_dropbehind(folio))
- folio_unmap_invalidate(mapping, folio, 0);
+ filemap_end_dropbehind(folio);
folio_unlock(folio);
}
}
@@ -2765,7 +2774,7 @@ put_folios:
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
- filemap_end_dropbehind_read(mapping, folio);
+ filemap_end_dropbehind_read(folio);
folio_put(folio);
}
folio_batch_init(&fbatch);