summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-03-20 14:13:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-03-20 14:13:50 -0700
commitb5329d5a35582abbef57562f9fb6cb26a643f252 (patch)
tree90eaf9f414282761c5be3d2a15a46ec239bc039d
parentf45f8f0ed4c6d3a9be27ff27347408e1c1bbb364 (diff)
parentf70681e9e6066ab7b102e6b46a336a8ed67812ae (diff)
Merge tag 'vfs-6.14-final.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
Pull vfs fixes from Christian Brauner: "A final set of fixes for this cycle: VFS: - Ensure that the stable offset api doesn't return duplicate directory entries when userspace has to perform the getdents call multiple times on large directories afs: - Prevent invalid pointer dereference during get_link RCU pathwalk fuse: - Fix deadlock caused by uninitialized rings when using io_uring with fuse - Handle race condition when using io_uring with fuse to prevent NULL dereference libnetfs: - Ensure that invalidate_cache is only called if implemented - Fix collection of results during pause when collection is offloaded - Ensure rolling_buffer_load_from_ra() doesn't clear mark bits - Make netfs_unbuffered_read() return ssize_t rather than int" * tag 'vfs-6.14-final.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: libfs: Fix duplicate directory entry in offset_dir_lookup fuse: fix possible deadlock if rings are never initialized netfs: Fix netfs_unbuffered_read() to return ssize_t rather than int netfs: Fix rolling_buffer_load_from_ra() to not clear mark bits netfs: Call `invalidate_cache` only if implemented netfs: Fix collection of results during pause when collection offloaded fuse: fix uring race condition for null dereference of fc afs: Fix afs_atcell_get_link() to check if ws_cell is unset first
-rw-r--r--fs/afs/dynroot.c6
-rw-r--r--fs/fuse/dev.c2
-rw-r--r--fs/fuse/dev_uring.c4
-rw-r--r--fs/libfs.c2
-rw-r--r--fs/netfs/direct_read.c6
-rw-r--r--fs/netfs/read_collect.c18
-rw-r--r--fs/netfs/rolling_buffer.c4
-rw-r--r--fs/netfs/write_collect.c3
8 files changed, 22 insertions, 23 deletions
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 008698d706ca..7d997f7a8028 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -314,6 +314,9 @@ static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inod
const char *name;
bool dotted = vnode->fid.vnode == 3;
+ if (!rcu_access_pointer(net->ws_cell))
+ return ERR_PTR(-ENOENT);
+
if (!dentry) {
/* We're in RCU-pathwalk. */
cell = rcu_dereference(net->ws_cell);
@@ -325,9 +328,6 @@ static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inod
return name;
}
- if (!rcu_access_pointer(net->ws_cell))
- return ERR_PTR(-ENOENT);
-
down_read(&net->cells_lock);
cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock));
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 2c3a4d09e500..51e31df4c546 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -77,7 +77,7 @@ void fuse_set_initialized(struct fuse_conn *fc)
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
return !fc->initialized || (for_background && fc->blocked) ||
- (fc->io_uring && !fuse_uring_ready(fc));
+ (fc->io_uring && fc->connected && !fuse_uring_ready(fc));
}
static void fuse_drop_waiting(struct fuse_conn *fc)
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index ebd2931b4f2a..82bf458fa9db 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -208,11 +208,11 @@ static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
init_waitqueue_head(&ring->stop_waitq);
- fc->ring = ring;
ring->nr_queues = nr_queues;
ring->fc = fc;
ring->max_payload_sz = max_payload_size;
atomic_set(&ring->queue_refs, 0);
+ smp_store_release(&fc->ring, ring);
spin_unlock(&fc->lock);
return ring;
@@ -1041,7 +1041,7 @@ static int fuse_uring_register(struct io_uring_cmd *cmd,
unsigned int issue_flags, struct fuse_conn *fc)
{
const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
- struct fuse_ring *ring = fc->ring;
+ struct fuse_ring *ring = smp_load_acquire(&fc->ring);
struct fuse_ring_queue *queue;
struct fuse_ring_ent *ent;
int err;
diff --git a/fs/libfs.c b/fs/libfs.c
index 8444f5cc4064..dc042a975a56 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -496,7 +496,7 @@ offset_dir_lookup(struct dentry *parent, loff_t offset)
found = find_positive_dentry(parent, NULL, false);
else {
rcu_read_lock();
- child = mas_find(&mas, DIR_OFFSET_MAX);
+ child = mas_find_rev(&mas, DIR_OFFSET_MIN);
found = find_positive_dentry(parent, child, false);
rcu_read_unlock();
}
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index 0bf3c2f5a710..5e3f0aeb51f3 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -125,9 +125,9 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
* Perform a read to an application buffer, bypassing the pagecache and the
* local disk cache.
*/
-static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
+static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
{
- int ret;
+ ssize_t ret;
_enter("R=%x %llx-%llx",
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
@@ -155,7 +155,7 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
else
ret = -EIOCBQUEUED;
out:
- _leave(" = %d", ret);
+ _leave(" = %zd", ret);
return ret;
}
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index 636cc5a98ef5..23c75755ad4e 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -682,14 +682,16 @@ void netfs_wait_for_pause(struct netfs_io_request *rreq)
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
- subreq = list_first_entry_or_null(&stream->subrequests,
- struct netfs_io_subrequest, rreq_link);
- if (subreq &&
- (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
- test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
- __set_current_state(TASK_RUNNING);
- netfs_read_collection(rreq);
- continue;
+ if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
+ subreq = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ if (subreq &&
+ (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
+ test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
+ __set_current_state(TASK_RUNNING);
+ netfs_read_collection(rreq);
+ continue;
+ }
}
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) ||
diff --git a/fs/netfs/rolling_buffer.c b/fs/netfs/rolling_buffer.c
index 75d97af14b4a..207b6a326651 100644
--- a/fs/netfs/rolling_buffer.c
+++ b/fs/netfs/rolling_buffer.c
@@ -146,10 +146,6 @@ ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll,
/* Store the counter after setting the slot. */
smp_store_release(&roll->next_head_slot, to);
-
- for (; ix < folioq_nr_slots(fq); ix++)
- folioq_clear(fq, ix);
-
return size;
}
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 294f67795f79..3fca59e6475d 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -400,7 +400,8 @@ void netfs_write_collection_worker(struct work_struct *work)
trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
if (wreq->io_streams[1].active &&
- wreq->io_streams[1].failed) {
+ wreq->io_streams[1].failed &&
+ ictx->ops->invalidate_cache) {
/* Cache write failure doesn't prevent writeback completion
* unless we're in disconnected mode.
*/