summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/bcachefs.h1
-rw-r--r--fs/bcachefs/btree_gc.c95
-rw-r--r--fs/bcachefs/btree_io.c26
-rw-r--r--fs/bcachefs/btree_locking.c2
-rw-r--r--fs/bcachefs/btree_locking.h6
-rw-r--r--fs/bcachefs/btree_types.h29
-rw-r--r--fs/bcachefs/btree_update_interior.c33
-rw-r--r--fs/bcachefs/btree_update_interior.h7
-rw-r--r--fs/bcachefs/chardev.c4
-rw-r--r--fs/bcachefs/disk_accounting.c4
-rw-r--r--fs/bcachefs/error.c5
-rw-r--r--fs/bcachefs/fs.c8
-rw-r--r--fs/bcachefs/io_read.c11
-rw-r--r--fs/bcachefs/io_read.h1
-rw-r--r--fs/bcachefs/movinggc.c22
-rw-r--r--fs/bcachefs/namei.c10
-rw-r--r--fs/bcachefs/rcu_pending.c22
-rw-r--r--fs/bcachefs/recovery.c27
-rw-r--r--fs/bcachefs/recovery_passes.c14
-rw-r--r--fs/bcachefs/sb-downgrade.c5
-rw-r--r--fs/bcachefs/sb-errors_format.h10
-rw-r--r--fs/bcachefs/sb-members.c34
-rw-r--r--fs/bcachefs/super.c47
-rw-r--r--fs/bcachefs/util.c10
-rw-r--r--fs/bcachefs/util.h2
-rw-r--r--fs/ceph/addr.c9
-rw-r--r--fs/ceph/export.c21
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/dlm/lock.c2
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jffs2/erase.c4
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/jffs2/summary.c7
-rw-r--r--fs/namespace.c113
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ocfs2/cluster/tcp.c3
-rw-r--r--fs/overlayfs/file.c4
-rw-r--r--fs/overlayfs/namei.c98
-rw-r--r--fs/overlayfs/ovl_entry.h2
-rw-r--r--fs/overlayfs/params.c40
-rw-r--r--fs/overlayfs/readdir.c4
-rw-r--r--fs/overlayfs/util.c9
-rw-r--r--fs/resctrl/rdtgroup.c4
-rw-r--r--fs/smb/client/cached_dir.h8
-rw-r--r--fs/smb/client/cifs_debug.c23
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h1
-rw-r--r--fs/smb/client/connect.c16
-rw-r--r--fs/smb/client/file.c9
-rw-r--r--fs/smb/client/readdir.c28
-rw-r--r--fs/smb/client/smb2ops.c14
-rw-r--r--fs/smb/client/smb2pdu.c50
-rw-r--r--fs/smb/client/smbdirect.c389
-rw-r--r--fs/smb/client/smbdirect.h71
-rw-r--r--fs/smb/client/transport.c14
-rw-r--r--fs/smb/common/smbdirect/smbdirect.h37
-rw-r--r--fs/smb/common/smbdirect/smbdirect_pdu.h55
-rw-r--r--fs/smb/common/smbdirect/smbdirect_socket.h43
-rw-r--r--fs/ubifs/journal.c2
61 files changed, 969 insertions, 566 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 3651a296d506..5a1cede2febf 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -296,7 +296,6 @@ do { \
#define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n")
void bch2_print_str(struct bch_fs *, const char *, const char *);
-void bch2_print_str_nonblocking(struct bch_fs *, const char *, const char *);
__printf(2, 3)
void bch2_print_opts(struct bch_opts *, const char *, ...);
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 9ddcbe1bda78..e92cf3928c63 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -397,7 +397,11 @@ again:
continue;
}
- ret = btree_check_node_boundaries(trans, b, prev, cur, pulled_from_scan);
+ ret = lockrestart_do(trans,
+ btree_check_node_boundaries(trans, b, prev, cur, pulled_from_scan));
+ if (ret < 0)
+ goto err;
+
if (ret == DID_FILL_FROM_SCAN) {
new_pass = true;
ret = 0;
@@ -438,7 +442,8 @@ again:
if (!ret && !IS_ERR_OR_NULL(prev)) {
BUG_ON(cur);
- ret = btree_repair_node_end(trans, b, prev, pulled_from_scan);
+ ret = lockrestart_do(trans,
+ btree_repair_node_end(trans, b, prev, pulled_from_scan));
if (ret == DID_FILL_FROM_SCAN) {
new_pass = true;
ret = 0;
@@ -519,6 +524,46 @@ fsck_err:
bch2_bkey_buf_exit(&prev_k, c);
bch2_bkey_buf_exit(&cur_k, c);
printbuf_exit(&buf);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int bch2_check_root(struct btree_trans *trans, enum btree_id i,
+ bool *reconstructed_root)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_root *r = bch2_btree_id_root(c, i);
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ bch2_btree_id_to_text(&buf, i);
+
+ if (r->error) {
+ bch_info(c, "btree root %s unreadable, must recover from scan", buf.buf);
+
+ r->alive = false;
+ r->error = 0;
+
+ if (!bch2_btree_has_scanned_nodes(c, i)) {
+ __fsck_err(trans,
+ FSCK_CAN_FIX|(!btree_id_important(i) ? FSCK_AUTOFIX : 0),
+ btree_root_unreadable_and_scan_found_nothing,
+ "no nodes found for btree %s, continue?", buf.buf);
+ bch2_btree_root_alloc_fake_trans(trans, i, 0);
+ } else {
+ bch2_btree_root_alloc_fake_trans(trans, i, 1);
+ bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
+ if (ret)
+ goto err;
+ }
+
+ *reconstructed_root = true;
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ bch_err_fn(c, ret);
return ret;
}
@@ -526,42 +571,18 @@ int bch2_check_topology(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
struct bpos pulled_from_scan = POS_MIN;
- struct printbuf buf = PRINTBUF;
int ret = 0;
bch2_trans_srcu_unlock(trans);
for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
- struct btree_root *r = bch2_btree_id_root(c, i);
bool reconstructed_root = false;
+recover:
+ ret = lockrestart_do(trans, bch2_check_root(trans, i, &reconstructed_root));
+ if (ret)
+ break;
- printbuf_reset(&buf);
- bch2_btree_id_to_text(&buf, i);
-
- if (r->error) {
-reconstruct_root:
- bch_info(c, "btree root %s unreadable, must recover from scan", buf.buf);
-
- r->alive = false;
- r->error = 0;
-
- if (!bch2_btree_has_scanned_nodes(c, i)) {
- __fsck_err(trans,
- FSCK_CAN_FIX|(!btree_id_important(i) ? FSCK_AUTOFIX : 0),
- btree_root_unreadable_and_scan_found_nothing,
- "no nodes found for btree %s, continue?", buf.buf);
- bch2_btree_root_alloc_fake_trans(trans, i, 0);
- } else {
- bch2_btree_root_alloc_fake_trans(trans, i, 1);
- bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
- ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
- if (ret)
- break;
- }
-
- reconstructed_root = true;
- }
-
+ struct btree_root *r = bch2_btree_id_root(c, i);
struct btree *b = r->b;
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
@@ -575,17 +596,21 @@ reconstruct_root:
r->b = NULL;
- if (!reconstructed_root)
- goto reconstruct_root;
+ if (!reconstructed_root) {
+ r->error = -EIO;
+ goto recover;
+ }
+ struct printbuf buf = PRINTBUF;
+ bch2_btree_id_to_text(&buf, i);
bch_err(c, "empty btree root %s", buf.buf);
+ printbuf_exit(&buf);
bch2_btree_root_alloc_fake_trans(trans, i, 0);
r->alive = false;
ret = 0;
}
}
-fsck_err:
- printbuf_exit(&buf);
+
bch2_trans_put(trans);
return ret;
}
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 57eff3012a7b..d8f3c4c65e90 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -741,16 +741,22 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
BCH_VERSION_MAJOR(version),
BCH_VERSION_MINOR(version));
- if (btree_err_on(version < c->sb.version_min,
+ if (c->recovery.curr_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes &&
+ btree_err_on(version < c->sb.version_min,
-BCH_ERR_btree_node_read_err_fixable,
c, NULL, b, i, NULL,
btree_node_bset_older_than_sb_min,
"bset version %u older than superblock version_min %u",
version, c->sb.version_min)) {
- mutex_lock(&c->sb_lock);
- c->disk_sb.sb->version_min = cpu_to_le16(version);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
+ if (bch2_version_compatible(version)) {
+ mutex_lock(&c->sb_lock);
+ c->disk_sb.sb->version_min = cpu_to_le16(version);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ } else {
+ /* We have no idea what's going on: */
+ i->version = cpu_to_le16(c->sb.version);
+ }
}
if (btree_err_on(BCH_VERSION_MAJOR(version) >
@@ -1045,6 +1051,7 @@ got_good_key:
le16_add_cpu(&i->u64s, -next_good_key);
memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k);
set_btree_node_need_rewrite(b);
+ set_btree_node_need_rewrite_error(b);
}
fsck_err:
printbuf_exit(&buf);
@@ -1305,6 +1312,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
(u64 *) vstruct_end(i) - (u64 *) k);
set_btree_bset_end(b, b->set);
set_btree_node_need_rewrite(b);
+ set_btree_node_need_rewrite_error(b);
continue;
}
if (ret)
@@ -1329,12 +1337,16 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
- if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
+ if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw) {
set_btree_node_need_rewrite(b);
+ set_btree_node_need_rewrite_degraded(b);
+ }
}
- if (!ptr_written)
+ if (!ptr_written) {
set_btree_node_need_rewrite(b);
+ set_btree_node_need_rewrite_ptr_written_zero(b);
+ }
fsck_err:
mempool_free(iter, &c->fill_iter);
printbuf_exit(&buf);
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 47035aae232e..91a51aef82f1 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -213,7 +213,7 @@ static noinline __noreturn void break_cycle_fail(struct lock_graph *g)
prt_newline(&buf);
}
- bch2_print_str_nonblocking(g->g->trans->c, KERN_ERR, buf.buf);
+ bch2_print_str(g->g->trans->c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
BUG();
}
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 9adca77e2580..f2173a3316f4 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -417,8 +417,10 @@ static inline void btree_path_set_should_be_locked(struct btree_trans *trans, st
EBUG_ON(!btree_node_locked(path, path->level));
EBUG_ON(path->uptodate);
- path->should_be_locked = true;
- trace_btree_path_should_be_locked(trans, path);
+ if (!path->should_be_locked) {
+ path->should_be_locked = true;
+ trace_btree_path_should_be_locked(trans, path);
+ }
}
static inline void __btree_path_set_level_up(struct btree_trans *trans,
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index c61c4171ae50..3aa4a602bd02 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -617,6 +617,9 @@ enum btree_write_type {
x(dying) \
x(fake) \
x(need_rewrite) \
+ x(need_rewrite_error) \
+ x(need_rewrite_degraded) \
+ x(need_rewrite_ptr_written_zero) \
x(never_write) \
x(pinned)
@@ -641,6 +644,32 @@ static inline void clear_btree_node_ ## flag(struct btree *b) \
BTREE_FLAGS()
#undef x
+#define BTREE_NODE_REWRITE_REASON() \
+ x(none) \
+ x(unknown) \
+ x(error) \
+ x(degraded) \
+ x(ptr_written_zero)
+
+enum btree_node_rewrite_reason {
+#define x(n) BTREE_NODE_REWRITE_##n,
+ BTREE_NODE_REWRITE_REASON()
+#undef x
+};
+
+static inline enum btree_node_rewrite_reason btree_node_rewrite_reason(struct btree *b)
+{
+ if (btree_node_need_rewrite_ptr_written_zero(b))
+ return BTREE_NODE_REWRITE_ptr_written_zero;
+ if (btree_node_need_rewrite_degraded(b))
+ return BTREE_NODE_REWRITE_degraded;
+ if (btree_node_need_rewrite_error(b))
+ return BTREE_NODE_REWRITE_error;
+ if (btree_node_need_rewrite(b))
+ return BTREE_NODE_REWRITE_unknown;
+ return BTREE_NODE_REWRITE_none;
+}
+
static inline struct btree_write *btree_current_write(struct btree *b)
{
return b->writes + btree_node_write_idx(b);
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index d2ecb782919b..e77584607f0d 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1138,6 +1138,13 @@ static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *
start_time);
}
+static const char * const btree_node_reawrite_reason_strs[] = {
+#define x(n) #n,
+ BTREE_NODE_REWRITE_REASON()
+#undef x
+ NULL,
+};
+
static struct btree_update *
bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
unsigned level_start, bool split,
@@ -1232,6 +1239,15 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
list_add_tail(&as->list, &c->btree_interior_update_list);
mutex_unlock(&c->btree_interior_update_lock);
+ struct btree *b = btree_path_node(path, path->level);
+ as->node_start = b->data->min_key;
+ as->node_end = b->data->max_key;
+ as->node_needed_rewrite = btree_node_rewrite_reason(b);
+ as->node_written = b->written;
+ as->node_sectors = btree_buf_bytes(b) >> 9;
+ as->node_remaining = __bch2_btree_u64s_remaining(b,
+ btree_bkey_last(b, bset_tree_last(b)));
+
/*
* We don't want to allocate if we're in an error state, that can cause
* deadlock on emergency shutdown due to open buckets getting stuck in
@@ -2108,6 +2124,9 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
if (ret)
goto err;
+ as->node_start = prev->data->min_key;
+ as->node_end = next->data->max_key;
+
trace_and_count(c, btree_node_merge, trans, b);
n = bch2_btree_node_alloc(as, trans, b->c.level);
@@ -2681,9 +2700,19 @@ static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update
prt_str(out, " ");
bch2_btree_id_to_text(out, as->btree_id);
- prt_printf(out, " l=%u-%u mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
+ prt_printf(out, " l=%u-%u ",
as->update_level_start,
- as->update_level_end,
+ as->update_level_end);
+ bch2_bpos_to_text(out, as->node_start);
+ prt_char(out, ' ');
+ bch2_bpos_to_text(out, as->node_end);
+ prt_printf(out, "\nwritten %u/%u u64s_remaining %u need_rewrite %s",
+ as->node_written,
+ as->node_sectors,
+ as->node_remaining,
+ btree_node_reawrite_reason_strs[as->node_needed_rewrite]);
+
+ prt_printf(out, "\nmode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
bch2_btree_update_modes[as->mode],
as->nodes_written,
closure_nr_remaining(&as->cl),
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 7fe793788a79..b649c36c3fbb 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -57,6 +57,13 @@ struct btree_update {
unsigned took_gc_lock:1;
enum btree_id btree_id;
+ struct bpos node_start;
+ struct bpos node_end;
+ enum btree_node_rewrite_reason node_needed_rewrite;
+ u16 node_written;
+ u16 node_sectors;
+ u16 node_remaining;
+
unsigned update_level_start;
unsigned update_level_end;
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 2d38466eddfd..fde3c2380e28 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -399,7 +399,7 @@ static long bch2_ioctl_data(struct bch_fs *c,
return ret;
}
-static long bch2_ioctl_fs_usage(struct bch_fs *c,
+static noinline_for_stack long bch2_ioctl_fs_usage(struct bch_fs *c,
struct bch_ioctl_fs_usage __user *user_arg)
{
struct bch_ioctl_fs_usage arg = {};
@@ -469,7 +469,7 @@ err:
}
/* obsolete, didn't allow for new data types: */
-static long bch2_ioctl_dev_usage(struct bch_fs *c,
+static noinline_for_stack long bch2_ioctl_dev_usage(struct bch_fs *c,
struct bch_ioctl_dev_usage __user *user_arg)
{
struct bch_ioctl_dev_usage arg;
diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c
index 3d59a57a5256..f7528cd69c73 100644
--- a/fs/bcachefs/disk_accounting.c
+++ b/fs/bcachefs/disk_accounting.c
@@ -618,7 +618,9 @@ int bch2_gc_accounting_done(struct bch_fs *c)
for (unsigned j = 0; j < nr; j++)
src_v[j] -= dst_v[j];
- if (fsck_err(trans, accounting_mismatch, "%s", buf.buf)) {
+ bch2_trans_unlock_long(trans);
+
+ if (fsck_err(c, accounting_mismatch, "%s", buf.buf)) {
percpu_up_write(&c->mark_lock);
ret = commit_do(trans, NULL, NULL, 0,
bch2_disk_accounting_mod(trans, &acc_k, src_v, nr, false));
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 63951e293c47..a8ec6aae5738 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -69,7 +69,7 @@ static bool bch2_fs_trans_inconsistent(struct bch_fs *c, struct btree_trans *tra
if (trans)
bch2_trans_updates_to_text(&buf, trans);
bool ret = __bch2_inconsistent_error(c, &buf);
- bch2_print_str_nonblocking(c, KERN_ERR, buf.buf);
+ bch2_print_str(c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
return ret;
@@ -620,6 +620,9 @@ print:
if (s)
s->ret = ret;
+
+ if (trans)
+ ret = bch2_trans_log_str(trans, bch2_sb_error_strs[err]) ?: ret;
err_unlock:
mutex_unlock(&c->fsck_error_msgs_lock);
err:
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 85d13f800165..3063a8ddc2df 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -2490,6 +2490,14 @@ static int bch2_fs_get_tree(struct fs_context *fc)
if (ret)
goto err_stop_fs;
+ /*
+ * We might be doing a RO mount because other options required it, or we
+ * have no alloc info and it's a small image with no room to regenerate
+ * it
+ */
+ if (c->opts.read_only)
+ fc->sb_flags |= SB_RDONLY;
+
sb = sget(fc->fs_type, NULL, bch2_set_super, fc->sb_flags|SB_NOSEC, c);
ret = PTR_ERR_OR_ZERO(sb);
if (ret)
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index a77779afad01..04bbdcf58e40 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -343,6 +343,10 @@ static struct bch_read_bio *promote_alloc(struct btree_trans *trans,
*bounce = true;
*read_full = promote_full;
+
+ if (have_io_error(failed))
+ orig->self_healing = true;
+
return promote;
nopromote:
trace_io_read_nopromote(c, ret);
@@ -635,12 +639,15 @@ static void bch2_rbio_retry(struct work_struct *work)
prt_str(&buf, "(internal move) ");
prt_str(&buf, "data read error, ");
- if (!ret)
+ if (!ret) {
prt_str(&buf, "successful retry");
- else
+ if (rbio->self_healing)
+ prt_str(&buf, ", self healing");
+ } else
prt_str(&buf, bch2_err_str(ret));
prt_newline(&buf);
+
if (!bkey_deleted(&sk.k->k)) {
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(sk.k));
prt_newline(&buf);
diff --git a/fs/bcachefs/io_read.h b/fs/bcachefs/io_read.h
index 45c959018919..9c5ddbf861b3 100644
--- a/fs/bcachefs/io_read.h
+++ b/fs/bcachefs/io_read.h
@@ -44,6 +44,7 @@ struct bch_read_bio {
have_ioref:1,
narrow_crcs:1,
saw_error:1,
+ self_healing:1,
context:2;
};
u16 _state;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 6d7b1d5f7697..27e68d470ad0 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -28,7 +28,7 @@
#include <linux/wait.h>
struct buckets_in_flight {
- struct rhashtable table;
+ struct rhashtable *table;
struct move_bucket *first;
struct move_bucket *last;
size_t nr;
@@ -98,7 +98,7 @@ out:
static void move_bucket_free(struct buckets_in_flight *list,
struct move_bucket *b)
{
- int ret = rhashtable_remove_fast(&list->table, &b->hash,
+ int ret = rhashtable_remove_fast(list->table, &b->hash,
bch_move_bucket_params);
BUG_ON(ret);
kfree(b);
@@ -133,7 +133,7 @@ static void move_buckets_wait(struct moving_context *ctxt,
static bool bucket_in_flight(struct buckets_in_flight *list,
struct move_bucket_key k)
{
- return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
+ return rhashtable_lookup_fast(list->table, &k, bch_move_bucket_params);
}
static int bch2_copygc_get_buckets(struct moving_context *ctxt,
@@ -185,7 +185,7 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
goto err;
}
- ret2 = rhashtable_lookup_insert_fast(&buckets_in_flight->table, &b_i->hash,
+ ret2 = rhashtable_lookup_insert_fast(buckets_in_flight->table, &b_i->hash,
bch_move_bucket_params);
BUG_ON(ret2);
@@ -350,10 +350,13 @@ static int bch2_copygc_thread(void *arg)
struct buckets_in_flight buckets = {};
u64 last, wait;
- int ret = rhashtable_init(&buckets.table, &bch_move_bucket_params);
+ buckets.table = kzalloc(sizeof(*buckets.table), GFP_KERNEL);
+ int ret = !buckets.table
+ ? -ENOMEM
+ : rhashtable_init(buckets.table, &bch_move_bucket_params);
bch_err_msg(c, ret, "allocating copygc buckets in flight");
if (ret)
- return ret;
+ goto err;
set_freezable();
@@ -421,11 +424,12 @@ static int bch2_copygc_thread(void *arg)
}
move_buckets_wait(&ctxt, &buckets, true);
- rhashtable_destroy(&buckets.table);
+ rhashtable_destroy(buckets.table);
bch2_moving_ctxt_exit(&ctxt);
bch2_move_stats_exit(&move_stats, c);
-
- return 0;
+err:
+ kfree(buckets.table);
+ return ret;
}
void bch2_copygc_stop(struct bch_fs *c)
diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c
index 24120037c031..779c22eb3979 100644
--- a/fs/bcachefs/namei.c
+++ b/fs/bcachefs/namei.c
@@ -175,6 +175,16 @@ int bch2_create_trans(struct btree_trans *trans,
new_inode->bi_dir_offset = dir_offset;
}
+ if (S_ISDIR(mode)) {
+ ret = bch2_maybe_propagate_has_case_insensitive(trans,
+ (subvol_inum) {
+ new_inode->bi_subvol ?: dir.subvol,
+ new_inode->bi_inum },
+ new_inode);
+ if (ret)
+ goto err;
+ }
+
if (S_ISDIR(mode) &&
!new_inode->bi_subvol)
new_inode->bi_depth = dir_u->bi_depth + 1;
diff --git a/fs/bcachefs/rcu_pending.c b/fs/bcachefs/rcu_pending.c
index bef2aa1b8bcd..b1438be9d690 100644
--- a/fs/bcachefs/rcu_pending.c
+++ b/fs/bcachefs/rcu_pending.c
@@ -182,11 +182,6 @@ static inline void kfree_bulk(size_t nr, void ** p)
while (nr--)
kfree(*p);
}
-
-#define local_irq_save(flags) \
-do { \
- flags = 0; \
-} while (0)
#endif
static noinline void __process_finished_items(struct rcu_pending *pending,
@@ -429,9 +424,15 @@ __rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *head,
BUG_ON((ptr != NULL) != (pending->process == RCU_PENDING_KVFREE_FN));
- local_irq_save(flags);
- p = this_cpu_ptr(pending->p);
- spin_lock(&p->lock);
+ /* We could technically be scheduled before taking the lock and end up
+ * using a different cpu's rcu_pending_pcpu: that's ok, it needs a lock
+ * anyways
+ *
+ * And we have to do it this way to avoid breaking PREEMPT_RT, which
+ * redefines how spinlocks work:
+ */
+ p = raw_cpu_ptr(pending->p);
+ spin_lock_irqsave(&p->lock, flags);
rcu_gp_poll_state_t seq = __get_state_synchronize_rcu(pending->srcu);
restart:
if (may_sleep &&
@@ -520,9 +521,8 @@ check_expired:
goto free_node;
}
- local_irq_save(flags);
- p = this_cpu_ptr(pending->p);
- spin_lock(&p->lock);
+ p = raw_cpu_ptr(pending->p);
+ spin_lock_irqsave(&p->lock, flags);
goto restart;
}
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 1e68e61f08e8..0b21fa6ff062 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -99,9 +99,11 @@ int bch2_btree_lost_data(struct bch_fs *c,
goto out;
case BTREE_ID_snapshots:
ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_reconstruct_snapshots, 0) ?: ret;
+ ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_topology, 0) ?: ret;
ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0) ?: ret;
goto out;
default:
+ ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_check_topology, 0) ?: ret;
ret = __bch2_run_explicit_recovery_pass(c, msg, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0) ?: ret;
goto out;
}
@@ -271,13 +273,24 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
goto out;
struct btree_path *path = btree_iter_path(trans, &iter);
- if (unlikely(!btree_path_node(path, k->level))) {
+ if (unlikely(!btree_path_node(path, k->level) &&
+ !k->allocated)) {
+ struct bch_fs *c = trans->c;
+
+ if (!(c->recovery.passes_complete & (BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes)|
+ BIT_ULL(BCH_RECOVERY_PASS_check_topology)))) {
+ bch_err(c, "have key in journal replay for btree depth that does not exist, confused");
+ ret = -EINVAL;
+ }
+#if 0
bch2_trans_iter_exit(trans, &iter);
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, 0, iter_flags);
ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_increase_depth(trans, iter.path, 0) ?:
-BCH_ERR_transaction_restart_nested;
+#endif
+ k->overwritten = true;
goto out;
}
@@ -739,9 +752,11 @@ int bch2_fs_recovery(struct bch_fs *c)
? min(c->opts.recovery_pass_last, BCH_RECOVERY_PASS_snapshots_read)
: BCH_RECOVERY_PASS_snapshots_read;
c->opts.nochanges = true;
- c->opts.read_only = true;
}
+ if (c->opts.nochanges)
+ c->opts.read_only = true;
+
mutex_lock(&c->sb_lock);
struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
bool write_sb = false;
@@ -1093,9 +1108,6 @@ use_clean:
out:
bch2_flush_fsck_errs(c);
- if (!IS_ERR(clean))
- kfree(clean);
-
if (!ret &&
test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) &&
!c->opts.nochanges) {
@@ -1104,6 +1116,9 @@ out:
}
bch_err_fn(c, ret);
+final_out:
+ if (!IS_ERR(clean))
+ kfree(clean);
return ret;
err:
fsck_err:
@@ -1117,7 +1132,7 @@ fsck_err:
bch2_print_str(c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
}
- return ret;
+ goto final_out;
}
int bch2_fs_initialize(struct bch_fs *c)
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
index 605588e33fb3..35ac0d64d73a 100644
--- a/fs/bcachefs/recovery_passes.c
+++ b/fs/bcachefs/recovery_passes.c
@@ -294,8 +294,13 @@ static bool recovery_pass_needs_set(struct bch_fs *c,
enum bch_run_recovery_pass_flags *flags)
{
struct bch_fs_recovery *r = &c->recovery;
- bool in_recovery = test_bit(BCH_FS_in_recovery, &c->flags);
- bool persistent = !in_recovery || !(*flags & RUN_RECOVERY_PASS_nopersistent);
+
+ /*
+ * Never run scan_for_btree_nodes persistently: check_topology will run
+ * it if required
+ */
+ if (pass == BCH_RECOVERY_PASS_scan_for_btree_nodes)
+ *flags |= RUN_RECOVERY_PASS_nopersistent;
if ((*flags & RUN_RECOVERY_PASS_ratelimit) &&
!bch2_recovery_pass_want_ratelimit(c, pass))
@@ -310,6 +315,8 @@ static bool recovery_pass_needs_set(struct bch_fs *c,
* Otherwise, we run run_explicit_recovery_pass when we find damage, so
* it should run again even if it's already run:
*/
+ bool in_recovery = test_bit(BCH_FS_in_recovery, &c->flags);
+ bool persistent = !in_recovery || !(*flags & RUN_RECOVERY_PASS_nopersistent);
if (persistent
? !(c->sb.recovery_passes_required & BIT_ULL(pass))
@@ -334,6 +341,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c,
struct bch_fs_recovery *r = &c->recovery;
int ret = 0;
+
lockdep_assert_held(&c->sb_lock);
bch2_printbuf_make_room(out, 1024);
@@ -446,7 +454,7 @@ int bch2_require_recovery_pass(struct bch_fs *c,
int bch2_run_print_explicit_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
{
- enum bch_run_recovery_pass_flags flags = RUN_RECOVERY_PASS_nopersistent;
+ enum bch_run_recovery_pass_flags flags = 0;
if (!recovery_pass_needs_set(c, pass, &flags))
return 0;
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index b61f88450a6d..1506d05e0665 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -253,6 +253,7 @@ DOWNGRADE_TABLE()
static int downgrade_table_extra(struct bch_fs *c, darray_char *table)
{
+ unsigned dst_offset = table->nr;
struct bch_sb_field_downgrade_entry *dst = (void *) &darray_top(*table);
unsigned bytes = sizeof(*dst) + sizeof(dst->errors[0]) * le16_to_cpu(dst->nr_errors);
int ret = 0;
@@ -268,6 +269,9 @@ static int downgrade_table_extra(struct bch_fs *c, darray_char *table)
if (ret)
return ret;
+ dst = (void *) &table->data[dst_offset];
+ dst->nr_errors = cpu_to_le16(nr_errors + 1);
+
/* open coded __set_bit_le64, as dst is packed and
* dst->recovery_passes is misaligned */
unsigned b = BCH_RECOVERY_PASS_STABLE_check_allocations;
@@ -278,7 +282,6 @@ static int downgrade_table_extra(struct bch_fs *c, darray_char *table)
break;
}
- dst->nr_errors = cpu_to_le16(nr_errors);
return ret;
}
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
index 6fdbf265e4c0..d06e73884871 100644
--- a/fs/bcachefs/sb-errors_format.h
+++ b/fs/bcachefs/sb-errors_format.h
@@ -134,7 +134,7 @@ enum bch_fsck_flags {
x(bucket_gens_to_invalid_buckets, 121, FSCK_AUTOFIX) \
x(bucket_gens_nonzero_for_invalid_buckets, 122, FSCK_AUTOFIX) \
x(need_discard_freespace_key_to_invalid_dev_bucket, 123, 0) \
- x(need_discard_freespace_key_bad, 124, 0) \
+ x(need_discard_freespace_key_bad, 124, FSCK_AUTOFIX) \
x(discarding_bucket_not_in_need_discard_btree, 291, 0) \
x(backpointer_bucket_offset_wrong, 125, 0) \
x(backpointer_level_bad, 294, 0) \
@@ -165,7 +165,7 @@ enum bch_fsck_flags {
x(ptr_to_missing_replicas_entry, 149, FSCK_AUTOFIX) \
x(ptr_to_missing_stripe, 150, 0) \
x(ptr_to_incorrect_stripe, 151, 0) \
- x(ptr_gen_newer_than_bucket_gen, 152, 0) \
+ x(ptr_gen_newer_than_bucket_gen, 152, FSCK_AUTOFIX) \
x(ptr_too_stale, 153, 0) \
x(stale_dirty_ptr, 154, FSCK_AUTOFIX) \
x(ptr_bucket_data_type_mismatch, 155, 0) \
@@ -236,7 +236,7 @@ enum bch_fsck_flags {
x(inode_multiple_links_but_nlink_0, 207, FSCK_AUTOFIX) \
x(inode_wrong_backpointer, 208, FSCK_AUTOFIX) \
x(inode_wrong_nlink, 209, FSCK_AUTOFIX) \
- x(inode_has_child_snapshots_wrong, 287, 0) \
+ x(inode_has_child_snapshots_wrong, 287, FSCK_AUTOFIX) \
x(inode_unreachable, 210, FSCK_AUTOFIX) \
x(inode_journal_seq_in_future, 299, FSCK_AUTOFIX) \
x(inode_i_sectors_underflow, 312, FSCK_AUTOFIX) \
@@ -279,8 +279,8 @@ enum bch_fsck_flags {
x(root_dir_missing, 239, 0) \
x(root_inode_not_dir, 240, 0) \
x(dir_loop, 241, 0) \
- x(hash_table_key_duplicate, 242, 0) \
- x(hash_table_key_wrong_offset, 243, 0) \
+ x(hash_table_key_duplicate, 242, FSCK_AUTOFIX) \
+ x(hash_table_key_wrong_offset, 243, FSCK_AUTOFIX) \
x(unlinked_inode_not_on_deleted_list, 244, FSCK_AUTOFIX) \
x(reflink_p_front_pad_bad, 245, 0) \
x(journal_entry_dup_same_device, 246, 0) \
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
index 363eb0c6eb7c..6245e342a8a8 100644
--- a/fs/bcachefs/sb-members.c
+++ b/fs/bcachefs/sb-members.c
@@ -325,9 +325,17 @@ static void bch2_sb_members_v1_to_text(struct printbuf *out, struct bch_sb *sb,
{
struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1);
struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
- unsigned i;
- for (i = 0; i < sb->nr_devices; i++)
+ if (vstruct_end(&mi->field) <= (void *) &mi->_members[0]) {
+ prt_printf(out, "field ends before start of entries");
+ return;
+ }
+
+ unsigned nr = (vstruct_end(&mi->field) - (void *) &mi->_members[0]) / sizeof(mi->_members[0]);
+ if (nr != sb->nr_devices)
+ prt_printf(out, "nr_devices mismatch: have %i entries, should be %u", nr, sb->nr_devices);
+
+ for (unsigned i = 0; i < min(sb->nr_devices, nr); i++)
member_to_text(out, members_v1_get(mi, i), gi, sb, i);
}
@@ -341,9 +349,27 @@ static void bch2_sb_members_v2_to_text(struct printbuf *out, struct bch_sb *sb,
{
struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2);
struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
- unsigned i;
- for (i = 0; i < sb->nr_devices; i++)
+ if (vstruct_end(&mi->field) <= (void *) &mi->_members[0]) {
+ prt_printf(out, "field ends before start of entries");
+ return;
+ }
+
+ if (!le16_to_cpu(mi->member_bytes)) {
+ prt_printf(out, "member_bytes 0");
+ return;
+ }
+
+ unsigned nr = (vstruct_end(&mi->field) - (void *) &mi->_members[0]) / le16_to_cpu(mi->member_bytes);
+ if (nr != sb->nr_devices)
+ prt_printf(out, "nr_devices mismatch: have %i entries, should be %u", nr, sb->nr_devices);
+
+ /*
+ * We call to_text() on superblock sections that haven't passed
+ * validate, so we can't trust sb->nr_devices.
+ */
+
+ for (unsigned i = 0; i < min(sb->nr_devices, nr); i++)
member_to_text(out, members_v2_get(mi, i), gi, sb, i);
}
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 397a69da5a75..a5b97c9c5163 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -104,7 +104,7 @@ const char * const bch2_dev_write_refs[] = {
#undef x
static void __bch2_print_str(struct bch_fs *c, const char *prefix,
- const char *str, bool nonblocking)
+ const char *str)
{
#ifdef __KERNEL__
struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c);
@@ -114,17 +114,12 @@ static void __bch2_print_str(struct bch_fs *c, const char *prefix,
return;
}
#endif
- bch2_print_string_as_lines(KERN_ERR, str, nonblocking);
+ bch2_print_string_as_lines(KERN_ERR, str);
}
void bch2_print_str(struct bch_fs *c, const char *prefix, const char *str)
{
- __bch2_print_str(c, prefix, str, false);
-}
-
-void bch2_print_str_nonblocking(struct bch_fs *c, const char *prefix, const char *str)
-{
- __bch2_print_str(c, prefix, str, true);
+ __bch2_print_str(c, prefix, str);
}
__printf(2, 0)
@@ -1072,12 +1067,13 @@ noinline_for_stack
static void print_mount_opts(struct bch_fs *c)
{
enum bch_opt_id i;
- struct printbuf p = PRINTBUF;
- bool first = true;
+ CLASS(printbuf, p)();
+ bch2_log_msg_start(c, &p);
prt_str(&p, "starting version ");
bch2_version_to_text(&p, c->sb.version);
+ bool first = true;
for (i = 0; i < bch2_opts_nr; i++) {
const struct bch_option *opt = &bch2_opt_table[i];
u64 v = bch2_opt_get_by_id(&c->opts, i);
@@ -1094,17 +1090,24 @@ static void print_mount_opts(struct bch_fs *c)
}
if (c->sb.version_incompat_allowed != c->sb.version) {
- prt_printf(&p, "\n allowing incompatible features above ");
+ prt_printf(&p, "\nallowing incompatible features above ");
bch2_version_to_text(&p, c->sb.version_incompat_allowed);
}
if (c->opts.verbose) {
- prt_printf(&p, "\n features: ");
+ prt_printf(&p, "\nfeatures: ");
prt_bitflags(&p, bch2_sb_features, c->sb.features);
}
- bch_info(c, "%s", p.buf);
- printbuf_exit(&p);
+ if (c->sb.multi_device) {
+ prt_printf(&p, "\nwith devices");
+ for_each_online_member(c, ca, BCH_DEV_READ_REF_bch2_online_devs) {
+ prt_char(&p, ' ');
+ prt_str(&p, ca->name);
+ }
+ }
+
+ bch2_print_str(c, KERN_INFO, p.buf);
}
static bool bch2_fs_may_start(struct bch_fs *c)
@@ -1995,6 +1998,22 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
goto err_late;
}
+ /*
+ * We just changed the superblock UUID, invalidate cache and send a
+ * uevent to update /dev/disk/by-uuid
+ */
+ invalidate_bdev(ca->disk_sb.bdev);
+
+ char uuid_str[37];
+ snprintf(uuid_str, sizeof(uuid_str), "UUID=%pUb", &c->sb.uuid);
+
+ char *envp[] = {
+ "CHANGE=uuid",
+ uuid_str,
+ NULL,
+ };
+ kobject_uevent_env(&ca->disk_sb.bdev->bd_device.kobj, KOBJ_CHANGE, envp);
+
up_write(&c->state_lock);
out:
printbuf_exit(&label);
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index dc3817f545fa..df9a6071fe18 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -262,8 +262,7 @@ static bool string_is_spaces(const char *str)
return true;
}
-void bch2_print_string_as_lines(const char *prefix, const char *lines,
- bool nonblocking)
+void bch2_print_string_as_lines(const char *prefix, const char *lines)
{
bool locked = false;
const char *p;
@@ -273,12 +272,7 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines,
return;
}
- if (!nonblocking) {
- console_lock();
- locked = true;
- } else {
- locked = console_trylock();
- }
+ locked = console_trylock();
while (*lines) {
p = strchrnul(lines, '\n');
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 0a4b1d433621..6488f098d140 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -214,7 +214,7 @@ u64 bch2_read_flag_list(const char *, const char * const[]);
void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
void bch2_prt_u64_base2(struct printbuf *, u64);
-void bch2_print_string_as_lines(const char *, const char *, bool);
+void bch2_print_string_as_lines(const char *, const char *);
typedef DARRAY(unsigned long) bch_stacktrace;
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index b95c4cb21c13..60a621b00c65 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -409,6 +409,15 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct page **pages;
size_t page_off;
+ /*
+ * FIXME: io_iter.count needs to be corrected to aligned
+ * length. Otherwise, iov_iter_get_pages_alloc2() operates
+ * with the initial unaligned length value. As a result,
+ * ceph_msg_data_cursor_init() triggers BUG_ON() in the case
+ * if msg->sparse_read_total > msg->data_length.
+ */
+ subreq->io_iter.count = len;
+
err = iov_iter_get_pages_alloc2(&subreq->io_iter, &pages, len, &page_off);
if (err < 0) {
doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 150076ced937..b2f2af104679 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -33,12 +33,19 @@ struct ceph_nfs_snapfh {
u32 hash;
} __attribute__ ((packed));
+#define BYTES_PER_U32 (sizeof(u32))
+#define CEPH_FH_BASIC_SIZE \
+ (sizeof(struct ceph_nfs_fh) / BYTES_PER_U32)
+#define CEPH_FH_WITH_PARENT_SIZE \
+ (sizeof(struct ceph_nfs_confh) / BYTES_PER_U32)
+#define CEPH_FH_SNAPPED_INODE_SIZE \
+ (sizeof(struct ceph_nfs_snapfh) / BYTES_PER_U32)
+
static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
- static const int snap_handle_length =
- sizeof(struct ceph_nfs_snapfh) >> 2;
+ static const int snap_handle_length = CEPH_FH_SNAPPED_INODE_SIZE;
struct ceph_nfs_snapfh *sfh = (void *)rawfh;
u64 snapid = ceph_snap(inode);
int ret;
@@ -88,10 +95,8 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
- static const int handle_length =
- sizeof(struct ceph_nfs_fh) >> 2;
- static const int connected_handle_length =
- sizeof(struct ceph_nfs_confh) >> 2;
+ static const int handle_length = CEPH_FH_BASIC_SIZE;
+ static const int connected_handle_length = CEPH_FH_WITH_PARENT_SIZE;
int type;
if (ceph_snap(inode) != CEPH_NOSNAP)
@@ -308,7 +313,7 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
if (fh_type != FILEID_INO32_GEN &&
fh_type != FILEID_INO32_GEN_PARENT)
return NULL;
- if (fh_len < sizeof(*fh) / 4)
+ if (fh_len < sizeof(*fh) / BYTES_PER_U32)
return NULL;
doutc(fsc->client, "%llx\n", fh->ino);
@@ -427,7 +432,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
if (fh_type != FILEID_INO32_GEN_PARENT)
return NULL;
- if (fh_len < sizeof(*cfh) / 4)
+ if (fh_len < sizeof(*cfh) / BYTES_PER_U32)
return NULL;
doutc(fsc->client, "%llx\n", cfh->parent_ino);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 851d70200c6b..a7254cab44cc 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -2616,7 +2616,7 @@ static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
s32 stripe_unit = ci->i_layout.stripe_unit;
s32 stripe_count = ci->i_layout.stripe_count;
s32 object_size = ci->i_layout.object_size;
- u64 object_set_size = object_size * stripe_count;
+ u64 object_set_size = (u64) object_size * stripe_count;
u64 nearly, t;
/* round offset up to next period boundary */
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index f3951253e393..2b8438d8a324 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -1033,8 +1033,7 @@ void ceph_umount_begin(struct super_block *sb)
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
doutc(fsc->client, "starting forced umount\n");
- if (!fsc)
- return;
+
fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
__ceph_umount_begin(fsc);
}
@@ -1227,6 +1226,7 @@ static int ceph_set_super(struct super_block *s, struct fs_context *fc)
s->s_time_min = 0;
s->s_time_max = U32_MAX;
s->s_flags |= SB_NODIRATIME | SB_NOATIME;
+ s->s_magic = CEPH_SUPER_MAGIC;
ceph_fscrypt_set_ops(s);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index e01d5f29f4d2..6dd3a524cd35 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -509,7 +509,7 @@ static void add_scan(struct dlm_ls *ls, struct dlm_rsb *r)
void dlm_rsb_scan(struct timer_list *timer)
{
- struct dlm_ls *ls = from_timer(ls, timer, ls_scan_timer);
+ struct dlm_ls *ls = timer_container_of(ls, timer, ls_scan_timer);
int our_nodeid = dlm_our_nodeid();
struct dlm_rsb *r;
int rv;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a7f80ca01174..c7d39da7e733 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3627,7 +3627,7 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly)
*/
static void print_daily_error_info(struct timer_list *t)
{
- struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
+ struct ext4_sb_info *sbi = timer_container_of(sbi, t, s_err_report);
struct super_block *sb = sbi->s_sb;
struct ext4_super_block *es = sbi->s_es;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 6d5e76848733..d480b94117cd 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -134,7 +134,7 @@ static __be32 jbd2_superblock_csum(journal_superblock_t *sb)
static void commit_timeout(struct timer_list *t)
{
- journal_t *journal = from_timer(journal, t, j_commit_timer);
+ journal_t *journal = timer_container_of(journal, t, j_commit_timer);
wake_up_process(journal->j_task);
}
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index ef3a1e1b6cb0..fda9f4d6093f 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -425,7 +425,9 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
.totlen = cpu_to_je32(c->cleanmarker_size)
};
- jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ if (ret)
+ goto filebad;
marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 29671e33a171..62879c218d4b 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -256,7 +256,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
__func__, skip);
- jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+ ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+ if (ret)
+ goto out;
jffs2_scan_dirty_space(c, c->nextblock, skip);
}
#endif
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 4fe64519870f..d83372d3e1a0 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -858,7 +858,10 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
spin_unlock(&c->erase_completion_lock);
jeb = c->nextblock;
- jffs2_prealloc_raw_node_refs(c, jeb, 1);
+ ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
+
+ if (ret)
+ goto out;
if (!c->summary->sum_num || !c->summary->sum_list_head) {
JFFS2_WARNING("Empty summary info!!!\n");
@@ -872,6 +875,8 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
datasize += padsize;
ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize);
+
+out:
spin_lock(&c->erase_completion_lock);
return ret;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 2f2e93927f46..e13d9ab4f564 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2410,7 +2410,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
namespace_unlock();
}
-bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+static bool __has_locked_children(struct mount *mnt, struct dentry *dentry)
{
struct mount *child;
@@ -2424,6 +2424,16 @@ bool has_locked_children(struct mount *mnt, struct dentry *dentry)
return false;
}
+bool has_locked_children(struct mount *mnt, struct dentry *dentry)
+{
+ bool res;
+
+ read_seqlock_excl(&mount_lock);
+ res = __has_locked_children(mnt, dentry);
+ read_sequnlock_excl(&mount_lock);
+ return res;
+}
+
/*
* Check that there aren't references to earlier/same mount namespaces in the
* specified subtree. Such references can act as pins for mount namespaces
@@ -2468,23 +2478,27 @@ struct vfsmount *clone_private_mount(const struct path *path)
if (IS_MNT_UNBINDABLE(old_mnt))
return ERR_PTR(-EINVAL);
- if (mnt_has_parent(old_mnt)) {
- if (!check_mnt(old_mnt))
- return ERR_PTR(-EINVAL);
- } else {
- if (!is_mounted(&old_mnt->mnt))
- return ERR_PTR(-EINVAL);
-
- /* Make sure this isn't something purely kernel internal. */
- if (!is_anon_ns(old_mnt->mnt_ns))
+ /*
+ * Make sure the source mount is acceptable.
+ * Anything mounted in our mount namespace is allowed.
+ * Otherwise, it must be the root of an anonymous mount
+ * namespace, and we need to make sure no namespace
+ * loops get created.
+ */
+ if (!check_mnt(old_mnt)) {
+ if (!is_mounted(&old_mnt->mnt) ||
+ !is_anon_ns(old_mnt->mnt_ns) ||
+ mnt_has_parent(old_mnt))
return ERR_PTR(-EINVAL);
- /* Make sure we don't create mount namespace loops. */
if (!check_for_nsfs_mounts(old_mnt))
return ERR_PTR(-EINVAL);
}
- if (has_locked_children(old_mnt, path->dentry))
+ if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ if (__has_locked_children(old_mnt, path->dentry))
return ERR_PTR(-EINVAL);
new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
@@ -2930,6 +2944,10 @@ static int do_change_type(struct path *path, int ms_flags)
return -EINVAL;
namespace_lock();
+ if (!check_mnt(mnt)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
if (type == MS_SHARED) {
err = invent_group_ids(mnt, recurse);
if (err)
@@ -3021,7 +3039,7 @@ static struct mount *__do_loopback(struct path *old_path, int recurse)
if (!may_copy_tree(old_path))
return mnt;
- if (!recurse && has_locked_children(old, old_path->dentry))
+ if (!recurse && __has_locked_children(old, old_path->dentry))
return mnt;
if (recurse)
@@ -3414,7 +3432,7 @@ static int do_set_group(struct path *from_path, struct path *to_path)
goto out;
/* From mount should not have locked children in place of To's root */
- if (has_locked_children(from, to->mnt.mnt_root))
+ if (__has_locked_children(from, to->mnt.mnt_root))
goto out;
/* Setting sharing groups is only allowed on private mounts */
@@ -3428,7 +3446,7 @@ static int do_set_group(struct path *from_path, struct path *to_path)
if (IS_MNT_SLAVE(from)) {
struct mount *m = from->mnt_master;
- list_add(&to->mnt_slave, &m->mnt_slave_list);
+ list_add(&to->mnt_slave, &from->mnt_slave);
to->mnt_master = m;
}
@@ -3453,18 +3471,25 @@ out:
* Check if path is overmounted, i.e., if there's a mount on top of
* @path->mnt with @path->dentry as mountpoint.
*
- * Context: This function expects namespace_lock() to be held.
+ * Context: namespace_sem must be held at least shared.
+ * MUST NOT be called under lock_mount_hash() (there one should just
+ * call __lookup_mnt() and check if it returns NULL).
* Return: If path is overmounted true is returned, false if not.
*/
static inline bool path_overmounted(const struct path *path)
{
+ unsigned seq = read_seqbegin(&mount_lock);
+ bool no_child;
+
rcu_read_lock();
- if (unlikely(__lookup_mnt(path->mnt, path->dentry))) {
- rcu_read_unlock();
- return true;
- }
+ no_child = !__lookup_mnt(path->mnt, path->dentry);
rcu_read_unlock();
- return false;
+ if (need_seqretry(&mount_lock, seq)) {
+ read_seqlock_excl(&mount_lock);
+ no_child = !__lookup_mnt(path->mnt, path->dentry);
+ read_sequnlock_excl(&mount_lock);
+ }
+ return unlikely(!no_child);
}
/**
@@ -3623,37 +3648,41 @@ static int do_move_mount(struct path *old_path,
ns = old->mnt_ns;
err = -EINVAL;
- if (!may_use_mount(p))
- goto out;
-
/* The thing moved must be mounted... */
if (!is_mounted(&old->mnt))
goto out;
- /* ... and either ours or the root of anon namespace */
- if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
- goto out;
-
- if (is_anon_ns(ns) && ns == p->mnt_ns) {
+ if (check_mnt(old)) {
+ /* if the source is in our namespace... */
+ /* ... it should be detachable from parent */
+ if (!mnt_has_parent(old) || IS_MNT_LOCKED(old))
+ goto out;
+ /* ... and the target should be in our namespace */
+ if (!check_mnt(p))
+ goto out;
+ } else {
/*
- * Ending up with two files referring to the root of the
- * same anonymous mount namespace would cause an error
- * as this would mean trying to move the same mount
- * twice into the mount tree which would be rejected
- * later. But be explicit about it right here.
+ * otherwise the source must be the root of some anon namespace.
+ * AV: check for mount being root of an anon namespace is worth
+ * an inlined predicate...
*/
- goto out;
- } else if (is_anon_ns(p->mnt_ns)) {
+ if (!is_anon_ns(ns) || mnt_has_parent(old))
+ goto out;
/*
- * Don't allow moving an attached mount tree to an
- * anonymous mount tree.
+ * Bail out early if the target is within the same namespace -
+ * subsequent checks would've rejected that, but they lose
+ * some corner cases if we check it early.
*/
- goto out;
+ if (ns == p->mnt_ns)
+ goto out;
+ /*
+ * Target should be either in our namespace or in an acceptable
+ * anon namespace, sensu check_anonymous_mnt().
+ */
+ if (!may_use_mount(p))
+ goto out;
}
- if (old->mnt.mnt_flags & MNT_LOCKED)
- goto out;
-
if (!path_mounted(old_path))
goto out;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 61a4141f8d6b..f15ca6fc400d 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2485,7 +2485,7 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
static void nilfs_construction_timeout(struct timer_list *t)
{
- struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
+ struct nilfs_sc_info *sci = timer_container_of(sci, t, sc_timer);
wake_up_process(sci->sc_task);
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 43e652a2adaf..b05d4e9d13b2 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1488,7 +1488,8 @@ static void o2net_sc_send_keep_req(struct work_struct *work)
* where shutdown is going to be involved */
static void o2net_idle_timer(struct timer_list *t)
{
- struct o2net_sock_container *sc = from_timer(sc, t, sc_idle_timeout);
+ struct o2net_sock_container *sc = timer_container_of(sc, t,
+ sc_idle_timeout);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
#ifdef CONFIG_DEBUG_FS
unsigned long msecs = ktime_to_ms(ktime_get()) -
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 969b458100fe..dfea7bd800cb 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -48,8 +48,8 @@ static struct file *ovl_open_realfile(const struct file *file,
if (!inode_owner_or_capable(real_idmap, realinode))
flags &= ~O_NOATIME;
- realfile = backing_file_open(&file->f_path, flags, realpath,
- current_cred());
+ realfile = backing_file_open(file_user_path((struct file *) file),
+ flags, realpath, current_cred());
}
ovl_revert_creds(old_cred);
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index bf722daf19a9..0b8b28392eb7 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -16,6 +16,7 @@
struct ovl_lookup_data {
struct super_block *sb;
+ struct dentry *dentry;
const struct ovl_layer *layer;
struct qstr name;
bool is_dir;
@@ -24,6 +25,7 @@ struct ovl_lookup_data {
bool stop;
bool last;
char *redirect;
+ char *upperredirect;
int metacopy;
/* Referring to last redirect xattr */
bool absolute_redirect;
@@ -1024,6 +1026,31 @@ int ovl_verify_lowerdata(struct dentry *dentry)
return ovl_maybe_validate_verity(dentry);
}
+/*
+ * Following redirects/metacopy can have security consequences: it's like a
+ * symlink into the lower layer without the permission checks.
+ *
+ * This is only a problem if the upper layer is untrusted (e.g comes from an USB
+ * drive). This can allow a non-readable file or directory to become readable.
+ *
+ * Only following redirects when redirects are enabled disables this attack
+ * vector when not necessary.
+ */
+static bool ovl_check_follow_redirect(struct ovl_lookup_data *d)
+{
+ struct ovl_fs *ofs = OVL_FS(d->sb);
+
+ if (d->metacopy && !ofs->config.metacopy) {
+ pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", d->dentry);
+ return false;
+ }
+ if ((d->redirect || d->upperredirect) && !ovl_redirect_follow(ofs)) {
+ pr_warn_ratelimited("refusing to follow redirect for (%pd2)\n", d->dentry);
+ return false;
+ }
+ return true;
+}
+
struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
@@ -1039,7 +1066,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int ctr = 0;
struct inode *inode = NULL;
bool upperopaque = false;
- char *upperredirect = NULL;
+ bool check_redirect = (ovl_redirect_follow(ofs) || ofs->numdatalayer);
struct dentry *this;
unsigned int i;
int err;
@@ -1047,12 +1074,14 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
int metacopy_size = 0;
struct ovl_lookup_data d = {
.sb = dentry->d_sb,
+ .dentry = dentry,
.name = dentry->d_name,
.is_dir = false,
.opaque = false,
.stop = false,
- .last = ovl_redirect_follow(ofs) ? false : !ovl_numlower(poe),
+ .last = check_redirect ? false : !ovl_numlower(poe),
.redirect = NULL,
+ .upperredirect = NULL,
.metacopy = 0,
};
@@ -1094,8 +1123,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
if (d.redirect) {
err = -ENOMEM;
- upperredirect = kstrdup(d.redirect, GFP_KERNEL);
- if (!upperredirect)
+ d.upperredirect = kstrdup(d.redirect, GFP_KERNEL);
+ if (!d.upperredirect)
goto out_put_upper;
if (d.redirect[0] == '/')
poe = roe;
@@ -1113,7 +1142,12 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
for (i = 0; !d.stop && i < ovl_numlower(poe); i++) {
struct ovl_path lower = ovl_lowerstack(poe)[i];
- if (!ovl_redirect_follow(ofs))
+ if (!ovl_check_follow_redirect(&d)) {
+ err = -EPERM;
+ goto out_put;
+ }
+
+ if (!check_redirect)
d.last = i == ovl_numlower(poe) - 1;
else if (d.is_dir || !ofs->numdatalayer)
d.last = lower.layer->idx == ovl_numlower(roe);
@@ -1126,13 +1160,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
if (!this)
continue;
- if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) {
- dput(this);
- err = -EPERM;
- pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry);
- goto out_put;
- }
-
/*
* If no origin fh is stored in upper of a merge dir, store fh
* of lower dir and set upper parent "impure".
@@ -1185,23 +1212,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
ctr++;
}
- /*
- * Following redirects can have security consequences: it's like
- * a symlink into the lower layer without the permission checks.
- * This is only a problem if the upper layer is untrusted (e.g
- * comes from an USB drive). This can allow a non-readable file
- * or directory to become readable.
- *
- * Only following redirects when redirects are enabled disables
- * this attack vector when not necessary.
- */
- err = -EPERM;
- if (d.redirect && !ovl_redirect_follow(ofs)) {
- pr_warn_ratelimited("refusing to follow redirect for (%pd2)\n",
- dentry);
- goto out_put;
- }
-
if (d.stop)
break;
@@ -1212,10 +1222,16 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
}
}
- /* Defer lookup of lowerdata in data-only layers to first access */
+ /*
+ * Defer lookup of lowerdata in data-only layers to first access.
+ * Don't require redirect=follow and metacopy=on in this case.
+ */
if (d.metacopy && ctr && ofs->numdatalayer && d.absolute_redirect) {
d.metacopy = 0;
ctr++;
+ } else if (!ovl_check_follow_redirect(&d)) {
+ err = -EPERM;
+ goto out_put;
}
/*
@@ -1298,20 +1314,26 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
/*
* It's safe to assign upperredirect here: the previous
- * assignment of happens only if upperdentry is non-NULL, and
+ * assignment happens only if upperdentry is non-NULL, and
* this one only if upperdentry is NULL.
*/
- upperredirect = ovl_get_redirect_xattr(ofs, &upperpath, 0);
- if (IS_ERR(upperredirect)) {
- err = PTR_ERR(upperredirect);
- upperredirect = NULL;
+ d.upperredirect = ovl_get_redirect_xattr(ofs, &upperpath, 0);
+ if (IS_ERR(d.upperredirect)) {
+ err = PTR_ERR(d.upperredirect);
+ d.upperredirect = NULL;
goto out_free_oe;
}
+
err = ovl_check_metacopy_xattr(ofs, &upperpath, NULL);
if (err < 0)
goto out_free_oe;
- uppermetacopy = err;
+ d.metacopy = uppermetacopy = err;
metacopy_size = err;
+
+ if (!ovl_check_follow_redirect(&d)) {
+ err = -EPERM;
+ goto out_free_oe;
+ }
}
if (upperdentry || ctr) {
@@ -1319,7 +1341,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
.upperdentry = upperdentry,
.oe = oe,
.index = index,
- .redirect = upperredirect,
+ .redirect = d.upperredirect,
};
/* Store lowerdata redirect for lazy lookup */
@@ -1361,7 +1383,7 @@ out_put_upper:
kfree(origin_path);
}
dput(upperdentry);
- kfree(upperredirect);
+ kfree(d.upperredirect);
out:
kfree(d.redirect);
ovl_revert_creds(old_cred);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index cb449ab310a7..afb7762f873f 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -51,7 +51,7 @@ struct ovl_path {
struct ovl_entry {
unsigned int __numlower;
- struct ovl_path __lowerstack[];
+ struct ovl_path __lowerstack[] __counted_by(__numlower);
};
/* private information held for overlayfs's superblock */
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index 6759f7d040c8..f42488c01957 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -871,18 +871,6 @@ int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
config->uuid = OVL_UUID_NULL;
}
- /* Resolve verity -> metacopy dependency */
- if (config->verity_mode && !config->metacopy) {
- /* Don't allow explicit specified conflicting combinations */
- if (set.metacopy) {
- pr_err("conflicting options: metacopy=off,verity=%s\n",
- ovl_verity_mode(config));
- return -EINVAL;
- }
- /* Otherwise automatically enable metacopy. */
- config->metacopy = true;
- }
-
/*
* This is to make the logic below simpler. It doesn't make any other
* difference, since redirect_dir=on is only used for upper.
@@ -890,18 +878,13 @@ int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
if (!config->upperdir && config->redirect_mode == OVL_REDIRECT_FOLLOW)
config->redirect_mode = OVL_REDIRECT_ON;
- /* Resolve verity -> metacopy -> redirect_dir dependency */
+ /* metacopy -> redirect_dir dependency */
if (config->metacopy && config->redirect_mode != OVL_REDIRECT_ON) {
if (set.metacopy && set.redirect) {
pr_err("conflicting options: metacopy=on,redirect_dir=%s\n",
ovl_redirect_mode(config));
return -EINVAL;
}
- if (config->verity_mode && set.redirect) {
- pr_err("conflicting options: verity=%s,redirect_dir=%s\n",
- ovl_verity_mode(config), ovl_redirect_mode(config));
- return -EINVAL;
- }
if (set.redirect) {
/*
* There was an explicit redirect_dir=... that resulted
@@ -970,7 +953,7 @@ int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
}
- /* Resolve userxattr -> !redirect && !metacopy && !verity dependency */
+ /* Resolve userxattr -> !redirect && !metacopy dependency */
if (config->userxattr) {
if (set.redirect &&
config->redirect_mode != OVL_REDIRECT_NOFOLLOW) {
@@ -982,11 +965,6 @@ int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
pr_err("conflicting options: userxattr,metacopy=on\n");
return -EINVAL;
}
- if (config->verity_mode) {
- pr_err("conflicting options: userxattr,verity=%s\n",
- ovl_verity_mode(config));
- return -EINVAL;
- }
/*
* Silently disable default setting of redirect and metacopy.
* This shall be the default in the future as well: these
@@ -1025,11 +1003,6 @@ int ovl_fs_params_verify(const struct ovl_fs_context *ctx,
*/
}
- if (ctx->nr_data > 0 && !config->metacopy) {
- pr_err("lower data-only dirs require metacopy support.\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -1078,17 +1051,16 @@ int ovl_show_options(struct seq_file *m, struct dentry *dentry)
seq_printf(m, ",redirect_dir=%s",
ovl_redirect_mode(&ofs->config));
if (ofs->config.index != ovl_index_def)
- seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off");
+ seq_printf(m, ",index=%s", str_on_off(ofs->config.index));
if (ofs->config.uuid != ovl_uuid_def())
seq_printf(m, ",uuid=%s", ovl_uuid_mode(&ofs->config));
if (ofs->config.nfs_export != ovl_nfs_export_def)
- seq_printf(m, ",nfs_export=%s", ofs->config.nfs_export ?
- "on" : "off");
+ seq_printf(m, ",nfs_export=%s",
+ str_on_off(ofs->config.nfs_export));
if (ofs->config.xino != ovl_xino_def() && !ovl_same_fs(ofs))
seq_printf(m, ",xino=%s", ovl_xino_mode(&ofs->config));
if (ofs->config.metacopy != ovl_metacopy_def)
- seq_printf(m, ",metacopy=%s",
- ofs->config.metacopy ? "on" : "off");
+ seq_printf(m, ",metacopy=%s", str_on_off(ofs->config.metacopy));
if (ofs->config.ovl_volatile)
seq_puts(m, ",volatile");
if (ofs->config.userxattr)
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 44e208da417c..474c80d210d1 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -13,6 +13,7 @@
#include <linux/security.h>
#include <linux/cred.h>
#include <linux/ratelimit.h>
+#include <linux/overflow.h>
#include "overlayfs.h"
struct ovl_cache_entry {
@@ -147,9 +148,8 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
u64 ino, unsigned int d_type)
{
struct ovl_cache_entry *p;
- size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
- p = kmalloc(size, GFP_KERNEL);
+ p = kmalloc(struct_size(p, name, len + 1), GFP_KERNEL);
if (!p)
return NULL;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 0819c739cc2f..dcccb4b4a66c 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -15,6 +15,7 @@
#include <linux/uuid.h>
#include <linux/namei.h>
#include <linux/ratelimit.h>
+#include <linux/overflow.h>
#include "overlayfs.h"
/* Get write access to upper mnt - may fail if upper sb was remounted ro */
@@ -145,9 +146,9 @@ void ovl_stack_free(struct ovl_path *stack, unsigned int n)
struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
{
- size_t size = offsetof(struct ovl_entry, __lowerstack[numlower]);
- struct ovl_entry *oe = kzalloc(size, GFP_KERNEL);
+ struct ovl_entry *oe;
+ oe = kzalloc(struct_size(oe, __lowerstack, numlower), GFP_KERNEL);
if (oe)
oe->__numlower = numlower;
@@ -305,7 +306,9 @@ enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path)
struct dentry *ovl_dentry_upper(struct dentry *dentry)
{
- return ovl_upperdentry_dereference(OVL_I(d_inode(dentry)));
+ struct inode *inode = d_inode(dentry);
+
+ return inode ? ovl_upperdentry_dereference(OVL_I(inode)) : NULL;
}
struct dentry *ovl_dentry_lower(struct dentry *dentry)
diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
index cc37f58b47dd..1beb124e25f6 100644
--- a/fs/resctrl/rdtgroup.c
+++ b/fs/resctrl/rdtgroup.c
@@ -536,6 +536,8 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
goto unlock;
}
+ rdt_last_cmd_clear();
+
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
ret = -EINVAL;
@@ -3472,6 +3474,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
goto out_unlock;
}
+ rdt_last_cmd_clear();
+
/*
* Check that the parent directory for a monitor group is a "mon_groups"
* directory.
diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
index 1dfe79d947a6..bc8a812ff95f 100644
--- a/fs/smb/client/cached_dir.h
+++ b/fs/smb/client/cached_dir.h
@@ -21,10 +21,10 @@ struct cached_dirent {
struct cached_dirents {
bool is_valid:1;
bool is_failed:1;
- struct dir_context *ctx; /*
- * Only used to make sure we only take entries
- * from a single context. Never dereferenced.
- */
+ struct file *file; /*
+ * Used to associate the cache with a single
+ * open file instance.
+ */
struct mutex de_mutex;
int pos; /* Expected ctx->pos */
struct list_head entries;
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index e03c890de0a0..c0196be0e65f 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -362,6 +362,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
c = 0;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ struct smbdirect_socket_parameters *sp;
+#endif
+
/* channel info will be printed as a part of sessions below */
if (SERVER_IS_CHAN(server))
continue;
@@ -383,25 +387,26 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\nSMBDirect transport not available");
goto skip_rdma;
}
+ sp = &server->smbd_conn->socket.parameters;
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
"transport status: %x",
server->smbd_conn->protocol,
- server->smbd_conn->transport_status);
+ server->smbd_conn->socket.status);
seq_printf(m, "\nConn receive_credit_max: %x "
"send_credit_target: %x max_send_size: %x",
- server->smbd_conn->receive_credit_max,
- server->smbd_conn->send_credit_target,
- server->smbd_conn->max_send_size);
+ sp->recv_credit_max,
+ sp->send_credit_target,
+ sp->max_send_size);
seq_printf(m, "\nConn max_fragmented_recv_size: %x "
"max_fragmented_send_size: %x max_receive_size:%x",
- server->smbd_conn->max_fragmented_recv_size,
- server->smbd_conn->max_fragmented_send_size,
- server->smbd_conn->max_receive_size);
+ sp->max_fragmented_recv_size,
+ sp->max_fragmented_send_size,
+ sp->max_recv_size);
seq_printf(m, "\nConn keep_alive_interval: %x "
"max_readwrite_size: %x rdma_readwrite_threshold: %x",
- server->smbd_conn->keep_alive_interval,
- server->smbd_conn->max_readwrite_size,
+ sp->keepalive_interval_msec * 1000,
+ sp->max_read_write_size,
server->smbd_conn->rdma_readwrite_threshold);
seq_printf(m, "\nDebug count_get_receive_buffer: %x "
"count_put_receive_buffer: %x count_send_empty: %x",
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index ca435a3841b8..b9ec9fe16a98 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -145,6 +145,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 54
-#define CIFS_VERSION "2.54"
+#define SMB3_PRODUCT_BUILD 55
+#define CIFS_VERSION "2.55"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index ad7dd16db3e9..45e94e18f4d5 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -1085,6 +1085,7 @@ struct cifs_chan {
};
#define CIFS_SES_FLAG_SCALE_CHANNELS (0x1)
+#define CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES (0x2)
/*
* Session structure. One of these for each uid session with a particular host
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 024817d40c5f..c4fb80b37738 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -116,13 +116,9 @@ static void smb2_query_server_interfaces(struct work_struct *work)
rc = server->ops->query_server_interfaces(xid, tcon, false);
free_xid(xid);
- if (rc) {
- if (rc == -EOPNOTSUPP)
- return;
-
+ if (rc)
cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
__func__, rc);
- }
queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
(SMB_INTERFACE_POLL_INTERVAL * HZ));
@@ -3722,9 +3718,15 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
goto out;
}
- /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
- if (tcon->posix_extensions)
+ /*
+ * if new SMB3.11 POSIX extensions are supported, do not change anything in the
+ * path (i.e., do not remap / and \ and do not map any special characters)
+ */
+ if (tcon->posix_extensions) {
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
+ cifs_sb->mnt_cifs_flags &= ~(CIFS_MOUNT_MAP_SFM_CHR |
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ }
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/* tell server which Unix caps we support */
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index d2df10b8e6fd..9835672267d2 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -999,15 +999,18 @@ int cifs_open(struct inode *inode, struct file *file)
rc = cifs_get_readable_path(tcon, full_path, &cfile);
}
if (rc == 0) {
- if (file->f_flags == cfile->f_flags) {
+ unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
+ unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
+
+ if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
+ (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
file->private_data = cfile;
spin_lock(&CIFS_I(inode)->deferred_lock);
cifs_del_deferred_close(cfile);
spin_unlock(&CIFS_I(inode)->deferred_lock);
goto use_cache;
- } else {
- _cifsFileInfo_put(cfile, true, false);
}
+ _cifsFileInfo_put(cfile, true, false);
} else {
/* hard link on the defeered close file */
rc = cifs_get_hardlink_path(tcon, inode, file);
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index f9f11cbf89be..ba0193cf9033 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -851,9 +851,9 @@ static bool emit_cached_dirents(struct cached_dirents *cde,
}
static void update_cached_dirents_count(struct cached_dirents *cde,
- struct dir_context *ctx)
+ struct file *file)
{
- if (cde->ctx != ctx)
+ if (cde->file != file)
return;
if (cde->is_valid || cde->is_failed)
return;
@@ -862,9 +862,9 @@ static void update_cached_dirents_count(struct cached_dirents *cde,
}
static void finished_cached_dirents_count(struct cached_dirents *cde,
- struct dir_context *ctx)
+ struct dir_context *ctx, struct file *file)
{
- if (cde->ctx != ctx)
+ if (cde->file != file)
return;
if (cde->is_valid || cde->is_failed)
return;
@@ -877,11 +877,12 @@ static void finished_cached_dirents_count(struct cached_dirents *cde,
static void add_cached_dirent(struct cached_dirents *cde,
struct dir_context *ctx,
const char *name, int namelen,
- struct cifs_fattr *fattr)
+ struct cifs_fattr *fattr,
+ struct file *file)
{
struct cached_dirent *de;
- if (cde->ctx != ctx)
+ if (cde->file != file)
return;
if (cde->is_valid || cde->is_failed)
return;
@@ -911,7 +912,8 @@ static void add_cached_dirent(struct cached_dirents *cde,
static bool cifs_dir_emit(struct dir_context *ctx,
const char *name, int namelen,
struct cifs_fattr *fattr,
- struct cached_fid *cfid)
+ struct cached_fid *cfid,
+ struct file *file)
{
bool rc;
ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
@@ -923,7 +925,7 @@ static bool cifs_dir_emit(struct dir_context *ctx,
if (cfid) {
mutex_lock(&cfid->dirents.de_mutex);
add_cached_dirent(&cfid->dirents, ctx, name, namelen,
- fattr);
+ fattr, file);
mutex_unlock(&cfid->dirents.de_mutex);
}
@@ -1023,7 +1025,7 @@ static int cifs_filldir(char *find_entry, struct file *file,
cifs_prime_dcache(file_dentry(file), &name, &fattr);
return !cifs_dir_emit(ctx, name.name, name.len,
- &fattr, cfid);
+ &fattr, cfid, file);
}
@@ -1074,8 +1076,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
* we need to initialize scanning and storing the
* directory content.
*/
- if (ctx->pos == 0 && cfid->dirents.ctx == NULL) {
- cfid->dirents.ctx = ctx;
+ if (ctx->pos == 0 && cfid->dirents.file == NULL) {
+ cfid->dirents.file = file;
cfid->dirents.pos = 2;
}
/*
@@ -1143,7 +1145,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
} else {
if (cfid) {
mutex_lock(&cfid->dirents.de_mutex);
- finished_cached_dirents_count(&cfid->dirents, ctx);
+ finished_cached_dirents_count(&cfid->dirents, ctx, file);
mutex_unlock(&cfid->dirents.de_mutex);
}
cifs_dbg(FYI, "Could not find entry\n");
@@ -1184,7 +1186,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
ctx->pos++;
if (cfid) {
mutex_lock(&cfid->dirents.de_mutex);
- update_cached_dirents_count(&cfid->dirents, ctx);
+ update_cached_dirents_count(&cfid->dirents, file);
mutex_unlock(&cfid->dirents.de_mutex);
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index bab9f567d9b7..1468c16ea9b8 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -504,6 +504,9 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
wsize = min_t(unsigned int, wsize, server->max_write);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
+ struct smbdirect_socket_parameters *sp =
+ &server->smbd_conn->socket.parameters;
+
if (server->sign)
/*
* Account for SMB2 data transfer packet header and
@@ -511,12 +514,12 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
*/
wsize = min_t(unsigned int,
wsize,
- server->smbd_conn->max_fragmented_send_size -
+ sp->max_fragmented_send_size -
SMB2_READWRITE_PDU_HEADER_SIZE -
sizeof(struct smb2_transform_hdr));
else
wsize = min_t(unsigned int,
- wsize, server->smbd_conn->max_readwrite_size);
+ wsize, sp->max_read_write_size);
}
#endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
@@ -552,6 +555,9 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
rsize = min_t(unsigned int, rsize, server->max_read);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
+ struct smbdirect_socket_parameters *sp =
+ &server->smbd_conn->socket.parameters;
+
if (server->sign)
/*
* Account for SMB2 data transfer packet header and
@@ -559,12 +565,12 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
*/
rsize = min_t(unsigned int,
rsize,
- server->smbd_conn->max_fragmented_recv_size -
+ sp->max_fragmented_recv_size -
SMB2_READWRITE_PDU_HEADER_SIZE -
sizeof(struct smb2_transform_hdr));
else
rsize = min_t(unsigned int,
- rsize, server->smbd_conn->max_readwrite_size);
+ rsize, sp->max_read_write_size);
}
#endif
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 0c320d06809c..a717be1626a3 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -36,6 +36,7 @@
#include "smb2glob.h"
#include "cifspdu.h"
#include "cifs_spnego.h"
+#include "../common/smbdirect/smbdirect.h"
#include "smbdirect.h"
#include "trace.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
@@ -411,14 +412,23 @@ skip_sess_setup:
if (!rc &&
(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
server->ops->query_server_interfaces) {
- mutex_unlock(&ses->session_mutex);
-
/*
- * query server network interfaces, in case they change
+ * query server network interfaces, in case they change.
+ * Also mark the session as pending this update while the query
+ * is in progress. This will be used to avoid calling
+ * smb2_reconnect recursively.
*/
+ ses->flags |= CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
xid = get_xid();
rc = server->ops->query_server_interfaces(xid, tcon, false);
free_xid(xid);
+ ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
+
+ /* regardless of rc value, setup polling */
+ queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+ (SMB_INTERFACE_POLL_INTERVAL * HZ));
+
+ mutex_unlock(&ses->session_mutex);
if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
/*
@@ -438,11 +448,8 @@ skip_sess_setup:
if (ses->chan_max > ses->chan_count &&
ses->iface_count &&
!SERVER_IS_CHAN(server)) {
- if (ses->chan_count == 1) {
+ if (ses->chan_count == 1)
cifs_server_dbg(VFS, "supports multichannel now\n");
- queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
- (SMB_INTERFACE_POLL_INTERVAL * HZ));
- }
cifs_try_adding_channels(ses);
}
@@ -560,11 +567,18 @@ static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
struct TCP_Server_Info *server,
void **request_buf, unsigned int *total_len)
{
- /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
- if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
+ /*
+ * Skip reconnect in one of the following cases:
+ * 1. For FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs
+ * 2. For FSCTL_QUERY_NETWORK_INTERFACE_INFO IOCTL when called from
+ * smb2_reconnect (indicated by CIFS_SES_FLAG_SCALE_CHANNELS ses flag)
+ */
+ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO ||
+ (opcode == FSCTL_QUERY_NETWORK_INTERFACE_INFO &&
+ (tcon->ses->flags & CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES)))
return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
request_buf, total_len);
- }
+
return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
request_buf, total_len);
}
@@ -4449,10 +4463,10 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a RDMA write, fill in and append
- * smbd_buffer_descriptor_v1 to the end of read request
+ * smbdirect_buffer_descriptor_v1 to the end of read request
*/
if (rdata && smb3_use_rdma_offload(io_parms)) {
- struct smbd_buffer_descriptor_v1 *v1;
+ struct smbdirect_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter,
@@ -4466,8 +4480,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
req->ReadChannelInfoOffset =
cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
req->ReadChannelInfoLength =
- cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
- v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+ cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
+ v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
v1->offset = cpu_to_le64(rdata->mr->mr->iova);
v1->token = cpu_to_le32(rdata->mr->mr->rkey);
v1->length = cpu_to_le32(rdata->mr->mr->length);
@@ -4975,10 +4989,10 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a server RDMA read, fill in and append
- * smbd_buffer_descriptor_v1 to the end of write request
+ * smbdirect_buffer_descriptor_v1 to the end of write request
*/
if (smb3_use_rdma_offload(io_parms)) {
- struct smbd_buffer_descriptor_v1 *v1;
+ struct smbdirect_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
@@ -4997,8 +5011,8 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
req->WriteChannelInfoOffset =
cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
req->WriteChannelInfoLength =
- cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
- v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+ cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
+ v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
v1->offset = cpu_to_le64(wdata->mr->mr->iova);
v1->token = cpu_to_le32(wdata->mr->mr->rkey);
v1->length = cpu_to_le32(wdata->mr->mr->length);
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index b0b7254661e9..5ae847919da5 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/folio_queue.h>
+#include "../common/smbdirect/smbdirect_pdu.h"
#include "smbdirect.h"
#include "cifs_debug.h"
#include "cifsproto.h"
@@ -50,9 +51,6 @@ struct smb_extract_to_rdma {
static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
struct smb_extract_to_rdma *rdma);
-/* SMBD version number */
-#define SMBD_V1 0x0100
-
/* Port numbers for SMBD transport */
#define SMB_PORT 445
#define SMBD_PORT 5445
@@ -165,10 +163,11 @@ static void smbd_disconnect_rdma_work(struct work_struct *work)
{
struct smbd_connection *info =
container_of(work, struct smbd_connection, disconnect_work);
+ struct smbdirect_socket *sc = &info->socket;
- if (info->transport_status == SMBD_CONNECTED) {
- info->transport_status = SMBD_DISCONNECTING;
- rdma_disconnect(info->id);
+ if (sc->status == SMBDIRECT_SOCKET_CONNECTED) {
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTING;
+ rdma_disconnect(sc->rdma.cm_id);
}
}
@@ -182,6 +181,7 @@ static int smbd_conn_upcall(
struct rdma_cm_id *id, struct rdma_cm_event *event)
{
struct smbd_connection *info = id->context;
+ struct smbdirect_socket *sc = &info->socket;
log_rdma_event(INFO, "event=%d status=%d\n",
event->event, event->status);
@@ -205,7 +205,7 @@ static int smbd_conn_upcall(
case RDMA_CM_EVENT_ESTABLISHED:
log_rdma_event(INFO, "connected event=%d\n", event->event);
- info->transport_status = SMBD_CONNECTED;
+ sc->status = SMBDIRECT_SOCKET_CONNECTED;
wake_up_interruptible(&info->conn_wait);
break;
@@ -213,20 +213,20 @@ static int smbd_conn_upcall(
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_REJECTED:
log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
- info->transport_status = SMBD_DISCONNECTED;
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
wake_up_interruptible(&info->conn_wait);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_DISCONNECTED:
/* This happens when we fail the negotiation */
- if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
- info->transport_status = SMBD_DISCONNECTED;
+ if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) {
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
wake_up(&info->conn_wait);
break;
}
- info->transport_status = SMBD_DISCONNECTED;
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
wake_up_interruptible(&info->disconn_wait);
wake_up_interruptible(&info->wait_reassembly_queue);
wake_up_interruptible_all(&info->wait_send_queue);
@@ -275,6 +275,8 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
int i;
struct smbd_request *request =
container_of(wc->wr_cqe, struct smbd_request, cqe);
+ struct smbd_connection *info = request->info;
+ struct smbdirect_socket *sc = &info->socket;
log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
request, wc->status);
@@ -286,7 +288,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
}
for (i = 0; i < request->num_sge; i++)
- ib_dma_unmap_single(request->info->id->device,
+ ib_dma_unmap_single(sc->ib.dev,
request->sge[i].addr,
request->sge[i].length,
DMA_TO_DEVICE);
@@ -299,7 +301,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
mempool_free(request, request->info->request_mempool);
}
-static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
+static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp)
{
log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
resp->min_version, resp->max_version,
@@ -318,15 +320,17 @@ static bool process_negotiation_response(
struct smbd_response *response, int packet_length)
{
struct smbd_connection *info = response->info;
- struct smbd_negotiate_resp *packet = smbd_response_payload(response);
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
+ struct smbdirect_negotiate_resp *packet = smbd_response_payload(response);
- if (packet_length < sizeof(struct smbd_negotiate_resp)) {
+ if (packet_length < sizeof(struct smbdirect_negotiate_resp)) {
log_rdma_event(ERR,
"error: packet_length=%d\n", packet_length);
return false;
}
- if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
+ if (le16_to_cpu(packet->negotiated_version) != SMBDIRECT_V1) {
log_rdma_event(ERR, "error: negotiated_version=%x\n",
le16_to_cpu(packet->negotiated_version));
return false;
@@ -347,20 +351,20 @@ static bool process_negotiation_response(
atomic_set(&info->receive_credits, 0);
- if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
+ if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) {
log_rdma_event(ERR, "error: preferred_send_size=%d\n",
le32_to_cpu(packet->preferred_send_size));
return false;
}
- info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
+ sp->max_recv_size = le32_to_cpu(packet->preferred_send_size);
if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
log_rdma_event(ERR, "error: max_receive_size=%d\n",
le32_to_cpu(packet->max_receive_size));
return false;
}
- info->max_send_size = min_t(int, info->max_send_size,
- le32_to_cpu(packet->max_receive_size));
+ sp->max_send_size = min_t(u32, sp->max_send_size,
+ le32_to_cpu(packet->max_receive_size));
if (le32_to_cpu(packet->max_fragmented_size) <
SMBD_MIN_FRAGMENTED_SIZE) {
@@ -368,18 +372,18 @@ static bool process_negotiation_response(
le32_to_cpu(packet->max_fragmented_size));
return false;
}
- info->max_fragmented_send_size =
+ sp->max_fragmented_send_size =
le32_to_cpu(packet->max_fragmented_size);
info->rdma_readwrite_threshold =
- rdma_readwrite_threshold > info->max_fragmented_send_size ?
- info->max_fragmented_send_size :
+ rdma_readwrite_threshold > sp->max_fragmented_send_size ?
+ sp->max_fragmented_send_size :
rdma_readwrite_threshold;
- info->max_readwrite_size = min_t(u32,
+ sp->max_read_write_size = min_t(u32,
le32_to_cpu(packet->max_readwrite_size),
info->max_frmr_depth * PAGE_SIZE);
- info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
+ info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
return true;
}
@@ -393,8 +397,9 @@ static void smbd_post_send_credits(struct work_struct *work)
struct smbd_connection *info =
container_of(work, struct smbd_connection,
post_send_credits_work);
+ struct smbdirect_socket *sc = &info->socket;
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
wake_up(&info->wait_receive_queues);
return;
}
@@ -448,7 +453,7 @@ static void smbd_post_send_credits(struct work_struct *work)
/* Called from softirq, when recv is done */
static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct smbd_data_transfer *data_transfer;
+ struct smbdirect_data_transfer *data_transfer;
struct smbd_response *response =
container_of(wc->wr_cqe, struct smbd_response, cqe);
struct smbd_connection *info = response->info;
@@ -474,7 +479,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
switch (response->type) {
/* SMBD negotiation response */
case SMBD_NEGOTIATE_RESP:
- dump_smbd_negotiate_resp(smbd_response_payload(response));
+ dump_smbdirect_negotiate_resp(smbd_response_payload(response));
info->full_packet_received = true;
info->negotiate_done =
process_negotiation_response(response, wc->byte_len);
@@ -531,7 +536,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
/* Send a KEEP_ALIVE response right away if requested */
info->keep_alive_requested = KEEP_ALIVE_NONE;
if (le16_to_cpu(data_transfer->flags) &
- SMB_DIRECT_RESPONSE_REQUESTED) {
+ SMBDIRECT_FLAG_RESPONSE_REQUESTED) {
info->keep_alive_requested = KEEP_ALIVE_PENDING;
}
@@ -635,32 +640,34 @@ static int smbd_ia_open(
struct smbd_connection *info,
struct sockaddr *dstaddr, int port)
{
+ struct smbdirect_socket *sc = &info->socket;
int rc;
- info->id = smbd_create_id(info, dstaddr, port);
- if (IS_ERR(info->id)) {
- rc = PTR_ERR(info->id);
+ sc->rdma.cm_id = smbd_create_id(info, dstaddr, port);
+ if (IS_ERR(sc->rdma.cm_id)) {
+ rc = PTR_ERR(sc->rdma.cm_id);
goto out1;
}
+ sc->ib.dev = sc->rdma.cm_id->device;
- if (!frwr_is_supported(&info->id->device->attrs)) {
+ if (!frwr_is_supported(&sc->ib.dev->attrs)) {
log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
- info->id->device->attrs.device_cap_flags,
- info->id->device->attrs.max_fast_reg_page_list_len);
+ sc->ib.dev->attrs.device_cap_flags,
+ sc->ib.dev->attrs.max_fast_reg_page_list_len);
rc = -EPROTONOSUPPORT;
goto out2;
}
info->max_frmr_depth = min_t(int,
smbd_max_frmr_depth,
- info->id->device->attrs.max_fast_reg_page_list_len);
+ sc->ib.dev->attrs.max_fast_reg_page_list_len);
info->mr_type = IB_MR_TYPE_MEM_REG;
- if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+ if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
info->mr_type = IB_MR_TYPE_SG_GAPS;
- info->pd = ib_alloc_pd(info->id->device, 0);
- if (IS_ERR(info->pd)) {
- rc = PTR_ERR(info->pd);
+ sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
+ if (IS_ERR(sc->ib.pd)) {
+ rc = PTR_ERR(sc->ib.pd);
log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
goto out2;
}
@@ -668,8 +675,8 @@ static int smbd_ia_open(
return 0;
out2:
- rdma_destroy_id(info->id);
- info->id = NULL;
+ rdma_destroy_id(sc->rdma.cm_id);
+ sc->rdma.cm_id = NULL;
out1:
return rc;
@@ -683,10 +690,12 @@ out1:
*/
static int smbd_post_send_negotiate_req(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_send_wr send_wr;
int rc = -ENOMEM;
struct smbd_request *request;
- struct smbd_negotiate_req *packet;
+ struct smbdirect_negotiate_req *packet;
request = mempool_alloc(info->request_mempool, GFP_KERNEL);
if (!request)
@@ -695,29 +704,29 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
request->info = info;
packet = smbd_request_payload(request);
- packet->min_version = cpu_to_le16(SMBD_V1);
- packet->max_version = cpu_to_le16(SMBD_V1);
+ packet->min_version = cpu_to_le16(SMBDIRECT_V1);
+ packet->max_version = cpu_to_le16(SMBDIRECT_V1);
packet->reserved = 0;
- packet->credits_requested = cpu_to_le16(info->send_credit_target);
- packet->preferred_send_size = cpu_to_le32(info->max_send_size);
- packet->max_receive_size = cpu_to_le32(info->max_receive_size);
+ packet->credits_requested = cpu_to_le16(sp->send_credit_target);
+ packet->preferred_send_size = cpu_to_le32(sp->max_send_size);
+ packet->max_receive_size = cpu_to_le32(sp->max_recv_size);
packet->max_fragmented_size =
- cpu_to_le32(info->max_fragmented_recv_size);
+ cpu_to_le32(sp->max_fragmented_recv_size);
request->num_sge = 1;
request->sge[0].addr = ib_dma_map_single(
- info->id->device, (void *)packet,
+ sc->ib.dev, (void *)packet,
sizeof(*packet), DMA_TO_DEVICE);
- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+ if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
rc = -EIO;
goto dma_mapping_failed;
}
request->sge[0].length = sizeof(*packet);
- request->sge[0].lkey = info->pd->local_dma_lkey;
+ request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
ib_dma_sync_single_for_device(
- info->id->device, request->sge[0].addr,
+ sc->ib.dev, request->sge[0].addr,
request->sge[0].length, DMA_TO_DEVICE);
request->cqe.done = send_done;
@@ -734,14 +743,14 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
request->sge[0].length, request->sge[0].lkey);
atomic_inc(&info->send_pending);
- rc = ib_post_send(info->id->qp, &send_wr, NULL);
+ rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
if (!rc)
return 0;
/* if we reach here, post send failed */
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
atomic_dec(&info->send_pending);
- ib_dma_unmap_single(info->id->device, request->sge[0].addr,
+ ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr,
request->sge[0].length, DMA_TO_DEVICE);
smbd_disconnect_rdma_connection(info);
@@ -774,10 +783,10 @@ static int manage_credits_prior_sending(struct smbd_connection *info)
/*
* Check if we need to send a KEEP_ALIVE message
* The idle connection timer triggers a KEEP_ALIVE message when expires
- * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
+ * SMBDIRECT_FLAG_RESPONSE_REQUESTED is set in the message flag to have peer send
* back a response.
* return value:
- * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
+ * 1 if SMBDIRECT_FLAG_RESPONSE_REQUESTED needs to be set
* 0: otherwise
*/
static int manage_keep_alive_before_sending(struct smbd_connection *info)
@@ -793,6 +802,8 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info)
static int smbd_post_send(struct smbd_connection *info,
struct smbd_request *request)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_send_wr send_wr;
int rc, i;
@@ -801,7 +812,7 @@ static int smbd_post_send(struct smbd_connection *info,
"rdma_request sge[%d] addr=0x%llx length=%u\n",
i, request->sge[i].addr, request->sge[i].length);
ib_dma_sync_single_for_device(
- info->id->device,
+ sc->ib.dev,
request->sge[i].addr,
request->sge[i].length,
DMA_TO_DEVICE);
@@ -816,7 +827,7 @@ static int smbd_post_send(struct smbd_connection *info,
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- rc = ib_post_send(info->id->qp, &send_wr, NULL);
+ rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
if (rc) {
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
smbd_disconnect_rdma_connection(info);
@@ -824,7 +835,7 @@ static int smbd_post_send(struct smbd_connection *info,
} else
/* Reset timer for idle connection after packet is sent */
mod_delayed_work(info->workqueue, &info->idle_timer_work,
- info->keep_alive_interval*HZ);
+ msecs_to_jiffies(sp->keepalive_interval_msec));
return rc;
}
@@ -833,22 +844,24 @@ static int smbd_post_send_iter(struct smbd_connection *info,
struct iov_iter *iter,
int *_remaining_data_length)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
int i, rc;
int header_length;
int data_length;
struct smbd_request *request;
- struct smbd_data_transfer *packet;
+ struct smbdirect_data_transfer *packet;
int new_credits = 0;
wait_credit:
/* Wait for send credits. A SMBD packet needs one credit */
rc = wait_event_interruptible(info->wait_send_queue,
atomic_read(&info->send_credits) > 0 ||
- info->transport_status != SMBD_CONNECTED);
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
if (rc)
goto err_wait_credit;
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
log_outgoing(ERR, "disconnected not sending on wait_credit\n");
rc = -EAGAIN;
goto err_wait_credit;
@@ -860,17 +873,17 @@ wait_credit:
wait_send_queue:
wait_event(info->wait_post_send,
- atomic_read(&info->send_pending) < info->send_credit_target ||
- info->transport_status != SMBD_CONNECTED);
+ atomic_read(&info->send_pending) < sp->send_credit_target ||
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
rc = -EAGAIN;
goto err_wait_send_queue;
}
if (unlikely(atomic_inc_return(&info->send_pending) >
- info->send_credit_target)) {
+ sp->send_credit_target)) {
atomic_dec(&info->send_pending);
goto wait_send_queue;
}
@@ -890,8 +903,8 @@ wait_send_queue:
.nr_sge = 1,
.max_sge = SMBDIRECT_MAX_SEND_SGE,
.sge = request->sge,
- .device = info->id->device,
- .local_dma_lkey = info->pd->local_dma_lkey,
+ .device = sc->ib.dev,
+ .local_dma_lkey = sc->ib.pd->local_dma_lkey,
.direction = DMA_TO_DEVICE,
};
@@ -909,7 +922,7 @@ wait_send_queue:
/* Fill in the packet header */
packet = smbd_request_payload(request);
- packet->credits_requested = cpu_to_le16(info->send_credit_target);
+ packet->credits_requested = cpu_to_le16(sp->send_credit_target);
new_credits = manage_credits_prior_sending(info);
atomic_add(new_credits, &info->receive_credits);
@@ -919,7 +932,7 @@ wait_send_queue:
packet->flags = 0;
if (manage_keep_alive_before_sending(info))
- packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
+ packet->flags |= cpu_to_le16(SMBDIRECT_FLAG_RESPONSE_REQUESTED);
packet->reserved = 0;
if (!data_length)
@@ -938,23 +951,23 @@ wait_send_queue:
le32_to_cpu(packet->remaining_data_length));
/* Map the packet to DMA */
- header_length = sizeof(struct smbd_data_transfer);
+ header_length = sizeof(struct smbdirect_data_transfer);
/* If this is a packet without payload, don't send padding */
if (!data_length)
- header_length = offsetof(struct smbd_data_transfer, padding);
+ header_length = offsetof(struct smbdirect_data_transfer, padding);
- request->sge[0].addr = ib_dma_map_single(info->id->device,
+ request->sge[0].addr = ib_dma_map_single(sc->ib.dev,
(void *)packet,
header_length,
DMA_TO_DEVICE);
- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+ if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
rc = -EIO;
request->sge[0].addr = 0;
goto err_dma;
}
request->sge[0].length = header_length;
- request->sge[0].lkey = info->pd->local_dma_lkey;
+ request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
rc = smbd_post_send(info, request);
if (!rc)
@@ -963,7 +976,7 @@ wait_send_queue:
err_dma:
for (i = 0; i < request->num_sge; i++)
if (request->sge[i].addr)
- ib_dma_unmap_single(info->id->device,
+ ib_dma_unmap_single(sc->ib.dev,
request->sge[i].addr,
request->sge[i].length,
DMA_TO_DEVICE);
@@ -1008,17 +1021,19 @@ static int smbd_post_send_empty(struct smbd_connection *info)
static int smbd_post_recv(
struct smbd_connection *info, struct smbd_response *response)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_recv_wr recv_wr;
int rc = -EIO;
response->sge.addr = ib_dma_map_single(
- info->id->device, response->packet,
- info->max_receive_size, DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(info->id->device, response->sge.addr))
+ sc->ib.dev, response->packet,
+ sp->max_recv_size, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr))
return rc;
- response->sge.length = info->max_receive_size;
- response->sge.lkey = info->pd->local_dma_lkey;
+ response->sge.length = sp->max_recv_size;
+ response->sge.lkey = sc->ib.pd->local_dma_lkey;
response->cqe.done = recv_done;
@@ -1027,9 +1042,9 @@ static int smbd_post_recv(
recv_wr.sg_list = &response->sge;
recv_wr.num_sge = 1;
- rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
+ rc = ib_post_recv(sc->ib.qp, &recv_wr, NULL);
if (rc) {
- ib_dma_unmap_single(info->id->device, response->sge.addr,
+ ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
response->sge.length, DMA_FROM_DEVICE);
smbd_disconnect_rdma_connection(info);
log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
@@ -1187,9 +1202,10 @@ static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
static void put_receive_buffer(
struct smbd_connection *info, struct smbd_response *response)
{
+ struct smbdirect_socket *sc = &info->socket;
unsigned long flags;
- ib_dma_unmap_single(info->id->device, response->sge.addr,
+ ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
response->sge.length, DMA_FROM_DEVICE);
spin_lock_irqsave(&info->receive_queue_lock, flags);
@@ -1264,6 +1280,8 @@ static void idle_connection_timer(struct work_struct *work)
struct smbd_connection *info = container_of(
work, struct smbd_connection,
idle_timer_work.work);
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
log_keep_alive(ERR,
@@ -1278,7 +1296,7 @@ static void idle_connection_timer(struct work_struct *work)
/* Setup the next idle timeout work */
queue_delayed_work(info->workqueue, &info->idle_timer_work,
- info->keep_alive_interval*HZ);
+ msecs_to_jiffies(sp->keepalive_interval_msec));
}
/*
@@ -1289,6 +1307,8 @@ static void idle_connection_timer(struct work_struct *work)
void smbd_destroy(struct TCP_Server_Info *server)
{
struct smbd_connection *info = server->smbd_conn;
+ struct smbdirect_socket *sc;
+ struct smbdirect_socket_parameters *sp;
struct smbd_response *response;
unsigned long flags;
@@ -1296,19 +1316,22 @@ void smbd_destroy(struct TCP_Server_Info *server)
log_rdma_event(INFO, "rdma session already destroyed\n");
return;
}
+ sc = &info->socket;
+ sp = &sc->parameters;
log_rdma_event(INFO, "destroying rdma session\n");
- if (info->transport_status != SMBD_DISCONNECTED) {
- rdma_disconnect(server->smbd_conn->id);
+ if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) {
+ rdma_disconnect(sc->rdma.cm_id);
log_rdma_event(INFO, "wait for transport being disconnected\n");
wait_event_interruptible(
info->disconn_wait,
- info->transport_status == SMBD_DISCONNECTED);
+ sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
}
log_rdma_event(INFO, "destroying qp\n");
- ib_drain_qp(info->id->qp);
- rdma_destroy_qp(info->id);
+ ib_drain_qp(sc->ib.qp);
+ rdma_destroy_qp(sc->rdma.cm_id);
+ sc->ib.qp = NULL;
log_rdma_event(INFO, "cancelling idle timer\n");
cancel_delayed_work_sync(&info->idle_timer_work);
@@ -1336,7 +1359,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
log_rdma_event(INFO, "free receive buffers\n");
wait_event(info->wait_receive_queues,
info->count_receive_queue + info->count_empty_packet_queue
- == info->receive_credit_max);
+ == sp->recv_credit_max);
destroy_receive_buffers(info);
/*
@@ -1355,10 +1378,10 @@ void smbd_destroy(struct TCP_Server_Info *server)
}
destroy_mr_list(info);
- ib_free_cq(info->send_cq);
- ib_free_cq(info->recv_cq);
- ib_dealloc_pd(info->pd);
- rdma_destroy_id(info->id);
+ ib_free_cq(sc->ib.send_cq);
+ ib_free_cq(sc->ib.recv_cq);
+ ib_dealloc_pd(sc->ib.pd);
+ rdma_destroy_id(sc->rdma.cm_id);
/* free mempools */
mempool_destroy(info->request_mempool);
@@ -1367,7 +1390,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
mempool_destroy(info->response_mempool);
kmem_cache_destroy(info->response_cache);
- info->transport_status = SMBD_DESTROYED;
+ sc->status = SMBDIRECT_SOCKET_DESTROYED;
destroy_workqueue(info->workqueue);
log_rdma_event(INFO, "rdma session destroyed\n");
@@ -1392,7 +1415,7 @@ int smbd_reconnect(struct TCP_Server_Info *server)
* This is possible if transport is disconnected and we haven't received
* notification from RDMA, but upper layer has detected timeout
*/
- if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
+ if (server->smbd_conn->socket.status == SMBDIRECT_SOCKET_CONNECTED) {
log_rdma_event(INFO, "disconnecting transport\n");
smbd_destroy(server);
}
@@ -1424,6 +1447,8 @@ static void destroy_caches_and_workqueue(struct smbd_connection *info)
#define MAX_NAME_LEN 80
static int allocate_caches_and_workqueue(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
char name[MAX_NAME_LEN];
int rc;
@@ -1432,13 +1457,13 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
kmem_cache_create(
name,
sizeof(struct smbd_request) +
- sizeof(struct smbd_data_transfer),
+ sizeof(struct smbdirect_data_transfer),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!info->request_cache)
return -ENOMEM;
info->request_mempool =
- mempool_create(info->send_credit_target, mempool_alloc_slab,
+ mempool_create(sp->send_credit_target, mempool_alloc_slab,
mempool_free_slab, info->request_cache);
if (!info->request_mempool)
goto out1;
@@ -1448,13 +1473,13 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
kmem_cache_create(
name,
sizeof(struct smbd_response) +
- info->max_receive_size,
+ sp->max_recv_size,
0, SLAB_HWCACHE_ALIGN, NULL);
if (!info->response_cache)
goto out2;
info->response_mempool =
- mempool_create(info->receive_credit_max, mempool_alloc_slab,
+ mempool_create(sp->recv_credit_max, mempool_alloc_slab,
mempool_free_slab, info->response_cache);
if (!info->response_mempool)
goto out3;
@@ -1464,7 +1489,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
if (!info->workqueue)
goto out4;
- rc = allocate_receive_buffers(info, info->receive_credit_max);
+ rc = allocate_receive_buffers(info, sp->recv_credit_max);
if (rc) {
log_rdma_event(ERR, "failed to allocate receive buffers\n");
goto out5;
@@ -1491,6 +1516,8 @@ static struct smbd_connection *_smbd_get_connection(
{
int rc;
struct smbd_connection *info;
+ struct smbdirect_socket *sc;
+ struct smbdirect_socket_parameters *sp;
struct rdma_conn_param conn_param;
struct ib_qp_init_attr qp_attr;
struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
@@ -1500,101 +1527,102 @@ static struct smbd_connection *_smbd_get_connection(
info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
if (!info)
return NULL;
+ sc = &info->socket;
+ sp = &sc->parameters;
- info->transport_status = SMBD_CONNECTING;
+ sc->status = SMBDIRECT_SOCKET_CONNECTING;
rc = smbd_ia_open(info, dstaddr, port);
if (rc) {
log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
goto create_id_failed;
}
- if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
- smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
+ if (smbd_send_credit_target > sc->ib.dev->attrs.max_cqe ||
+ smbd_send_credit_target > sc->ib.dev->attrs.max_qp_wr) {
log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
smbd_send_credit_target,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
goto config_failed;
}
- if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
- smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
+ if (smbd_receive_credit_max > sc->ib.dev->attrs.max_cqe ||
+ smbd_receive_credit_max > sc->ib.dev->attrs.max_qp_wr) {
log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
smbd_receive_credit_max,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
goto config_failed;
}
- info->receive_credit_max = smbd_receive_credit_max;
- info->send_credit_target = smbd_send_credit_target;
- info->max_send_size = smbd_max_send_size;
- info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
- info->max_receive_size = smbd_max_receive_size;
- info->keep_alive_interval = smbd_keep_alive_interval;
+ sp->recv_credit_max = smbd_receive_credit_max;
+ sp->send_credit_target = smbd_send_credit_target;
+ sp->max_send_size = smbd_max_send_size;
+ sp->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
+ sp->max_recv_size = smbd_max_receive_size;
+ sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000;
- if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
- info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
+ if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
+ sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
log_rdma_event(ERR,
"device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
IB_DEVICE_NAME_MAX,
- info->id->device->name,
- info->id->device->attrs.max_send_sge,
- info->id->device->attrs.max_recv_sge);
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_send_sge,
+ sc->ib.dev->attrs.max_recv_sge);
goto config_failed;
}
- info->send_cq = NULL;
- info->recv_cq = NULL;
- info->send_cq =
- ib_alloc_cq_any(info->id->device, info,
- info->send_credit_target, IB_POLL_SOFTIRQ);
- if (IS_ERR(info->send_cq)) {
- info->send_cq = NULL;
+ sc->ib.send_cq =
+ ib_alloc_cq_any(sc->ib.dev, info,
+ sp->send_credit_target, IB_POLL_SOFTIRQ);
+ if (IS_ERR(sc->ib.send_cq)) {
+ sc->ib.send_cq = NULL;
goto alloc_cq_failed;
}
- info->recv_cq =
- ib_alloc_cq_any(info->id->device, info,
- info->receive_credit_max, IB_POLL_SOFTIRQ);
- if (IS_ERR(info->recv_cq)) {
- info->recv_cq = NULL;
+ sc->ib.recv_cq =
+ ib_alloc_cq_any(sc->ib.dev, info,
+ sp->recv_credit_max, IB_POLL_SOFTIRQ);
+ if (IS_ERR(sc->ib.recv_cq)) {
+ sc->ib.recv_cq = NULL;
goto alloc_cq_failed;
}
memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.event_handler = smbd_qp_async_error_upcall;
qp_attr.qp_context = info;
- qp_attr.cap.max_send_wr = info->send_credit_target;
- qp_attr.cap.max_recv_wr = info->receive_credit_max;
+ qp_attr.cap.max_send_wr = sp->send_credit_target;
+ qp_attr.cap.max_recv_wr = sp->recv_credit_max;
qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE;
qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE;
qp_attr.cap.max_inline_data = 0;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
qp_attr.qp_type = IB_QPT_RC;
- qp_attr.send_cq = info->send_cq;
- qp_attr.recv_cq = info->recv_cq;
+ qp_attr.send_cq = sc->ib.send_cq;
+ qp_attr.recv_cq = sc->ib.recv_cq;
qp_attr.port_num = ~0;
- rc = rdma_create_qp(info->id, info->pd, &qp_attr);
+ rc = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr);
if (rc) {
log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
goto create_qp_failed;
}
+ sc->ib.qp = sc->rdma.cm_id->qp;
memset(&conn_param, 0, sizeof(conn_param));
conn_param.initiator_depth = 0;
conn_param.responder_resources =
- min(info->id->device->attrs.max_qp_rd_atom,
+ min(sc->ib.dev->attrs.max_qp_rd_atom,
SMBD_CM_RESPONDER_RESOURCES);
info->responder_resources = conn_param.responder_resources;
log_rdma_mr(INFO, "responder_resources=%d\n",
info->responder_resources);
/* Need to send IRD/ORD in private data for iWARP */
- info->id->device->ops.get_port_immutable(
- info->id->device, info->id->port_num, &port_immutable);
+ sc->ib.dev->ops.get_port_immutable(
+ sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable);
if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
ird_ord_hdr[0] = info->responder_resources;
ird_ord_hdr[1] = 1;
@@ -1615,16 +1643,16 @@ static struct smbd_connection *_smbd_get_connection(
init_waitqueue_head(&info->conn_wait);
init_waitqueue_head(&info->disconn_wait);
init_waitqueue_head(&info->wait_reassembly_queue);
- rc = rdma_connect(info->id, &conn_param);
+ rc = rdma_connect(sc->rdma.cm_id, &conn_param);
if (rc) {
log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
goto rdma_connect_failed;
}
wait_event_interruptible(
- info->conn_wait, info->transport_status != SMBD_CONNECTING);
+ info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING);
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
goto rdma_connect_failed;
}
@@ -1640,7 +1668,7 @@ static struct smbd_connection *_smbd_get_connection(
init_waitqueue_head(&info->wait_send_queue);
INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
queue_delayed_work(info->workqueue, &info->idle_timer_work,
- info->keep_alive_interval*HZ);
+ msecs_to_jiffies(sp->keepalive_interval_msec));
init_waitqueue_head(&info->wait_send_pending);
atomic_set(&info->send_pending, 0);
@@ -1675,26 +1703,26 @@ allocate_mr_failed:
negotiation_failed:
cancel_delayed_work_sync(&info->idle_timer_work);
destroy_caches_and_workqueue(info);
- info->transport_status = SMBD_NEGOTIATE_FAILED;
+ sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
init_waitqueue_head(&info->conn_wait);
- rdma_disconnect(info->id);
+ rdma_disconnect(sc->rdma.cm_id);
wait_event(info->conn_wait,
- info->transport_status == SMBD_DISCONNECTED);
+ sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
allocate_cache_failed:
rdma_connect_failed:
- rdma_destroy_qp(info->id);
+ rdma_destroy_qp(sc->rdma.cm_id);
create_qp_failed:
alloc_cq_failed:
- if (info->send_cq)
- ib_free_cq(info->send_cq);
- if (info->recv_cq)
- ib_free_cq(info->recv_cq);
+ if (sc->ib.send_cq)
+ ib_free_cq(sc->ib.send_cq);
+ if (sc->ib.recv_cq)
+ ib_free_cq(sc->ib.recv_cq);
config_failed:
- ib_dealloc_pd(info->pd);
- rdma_destroy_id(info->id);
+ ib_dealloc_pd(sc->ib.pd);
+ rdma_destroy_id(sc->rdma.cm_id);
create_id_failed:
kfree(info);
@@ -1734,8 +1762,9 @@ try_again:
static int smbd_recv_buf(struct smbd_connection *info, char *buf,
unsigned int size)
{
+ struct smbdirect_socket *sc = &info->socket;
struct smbd_response *response;
- struct smbd_data_transfer *data_transfer;
+ struct smbdirect_data_transfer *data_transfer;
int to_copy, to_read, data_read, offset;
u32 data_length, remaining_data_length, data_offset;
int rc;
@@ -1848,12 +1877,12 @@ read_rfc1002_done:
rc = wait_event_interruptible(
info->wait_reassembly_queue,
info->reassembly_data_length >= size ||
- info->transport_status != SMBD_CONNECTED);
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
/* Don't return any data if interrupted */
if (rc)
return rc;
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
log_read(ERR, "disconnected\n");
return -ECONNABORTED;
}
@@ -1871,6 +1900,7 @@ static int smbd_recv_page(struct smbd_connection *info,
struct page *page, unsigned int page_offset,
unsigned int to_read)
{
+ struct smbdirect_socket *sc = &info->socket;
int ret;
char *to_address;
void *page_address;
@@ -1879,7 +1909,7 @@ static int smbd_recv_page(struct smbd_connection *info,
ret = wait_event_interruptible(
info->wait_reassembly_queue,
info->reassembly_data_length >= to_read ||
- info->transport_status != SMBD_CONNECTED);
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
if (ret)
return ret;
@@ -1954,12 +1984,14 @@ int smbd_send(struct TCP_Server_Info *server,
int num_rqst, struct smb_rqst *rqst_array)
{
struct smbd_connection *info = server->smbd_conn;
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_rqst *rqst;
struct iov_iter iter;
unsigned int remaining_data_length, klen;
int rc, i, rqst_idx;
- if (info->transport_status != SMBD_CONNECTED)
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
return -EAGAIN;
/*
@@ -1971,10 +2003,10 @@ int smbd_send(struct TCP_Server_Info *server,
for (i = 0; i < num_rqst; i++)
remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
- if (unlikely(remaining_data_length > info->max_fragmented_send_size)) {
+ if (unlikely(remaining_data_length > sp->max_fragmented_send_size)) {
/* assertion: payload never exceeds negotiated maximum */
log_write(ERR, "payload size %d > max size %d\n",
- remaining_data_length, info->max_fragmented_send_size);
+ remaining_data_length, sp->max_fragmented_send_size);
return -EINVAL;
}
@@ -2053,6 +2085,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
{
struct smbd_connection *info =
container_of(work, struct smbd_connection, mr_recovery_work);
+ struct smbdirect_socket *sc = &info->socket;
struct smbd_mr *smbdirect_mr;
int rc;
@@ -2070,7 +2103,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
}
smbdirect_mr->mr = ib_alloc_mr(
- info->pd, info->mr_type,
+ sc->ib.pd, info->mr_type,
info->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
@@ -2099,12 +2132,13 @@ static void smbd_mr_recovery_work(struct work_struct *work)
static void destroy_mr_list(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
struct smbd_mr *mr, *tmp;
cancel_work_sync(&info->mr_recovery_work);
list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
if (mr->state == MR_INVALIDATED)
- ib_dma_unmap_sg(info->id->device, mr->sgt.sgl,
+ ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
mr->sgt.nents, mr->dir);
ib_dereg_mr(mr->mr);
kfree(mr->sgt.sgl);
@@ -2121,6 +2155,7 @@ static void destroy_mr_list(struct smbd_connection *info)
*/
static int allocate_mr_list(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
int i;
struct smbd_mr *smbdirect_mr, *tmp;
@@ -2136,7 +2171,7 @@ static int allocate_mr_list(struct smbd_connection *info)
smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
if (!smbdirect_mr)
goto cleanup_entries;
- smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
+ smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type,
info->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
@@ -2181,20 +2216,20 @@ cleanup_entries:
*/
static struct smbd_mr *get_mr(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
struct smbd_mr *ret;
int rc;
again:
rc = wait_event_interruptible(info->wait_mr,
atomic_read(&info->mr_ready_count) ||
- info->transport_status != SMBD_CONNECTED);
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
if (rc) {
log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
return NULL;
}
- if (info->transport_status != SMBD_CONNECTED) {
- log_rdma_mr(ERR, "info->transport_status=%x\n",
- info->transport_status);
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ log_rdma_mr(ERR, "sc->status=%x\n", sc->status);
return NULL;
}
@@ -2247,6 +2282,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
struct iov_iter *iter,
bool writing, bool need_invalidate)
{
+ struct smbdirect_socket *sc = &info->socket;
struct smbd_mr *smbdirect_mr;
int rc, num_pages;
enum dma_data_direction dir;
@@ -2276,7 +2312,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
num_pages, iov_iter_count(iter), info->max_frmr_depth);
smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth);
- rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl,
+ rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents, dir);
if (!rc) {
log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
@@ -2312,7 +2348,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
* on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
* on the next ib_post_send when we actually send I/O to remote peer
*/
- rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
+ rc = ib_post_send(sc->ib.qp, &reg_wr->wr, NULL);
if (!rc)
return smbdirect_mr;
@@ -2321,7 +2357,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
/* If all failed, attempt to recover this MR by setting it MR_ERROR*/
map_mr_error:
- ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl,
+ ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents, smbdirect_mr->dir);
dma_map_error:
@@ -2359,6 +2395,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
{
struct ib_send_wr *wr;
struct smbd_connection *info = smbdirect_mr->conn;
+ struct smbdirect_socket *sc = &info->socket;
int rc = 0;
if (smbdirect_mr->need_invalidate) {
@@ -2372,7 +2409,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
wr->send_flags = IB_SEND_SIGNALED;
init_completion(&smbdirect_mr->invalidate_done);
- rc = ib_post_send(info->id->qp, wr, NULL);
+ rc = ib_post_send(sc->ib.qp, wr, NULL);
if (rc) {
log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
smbd_disconnect_rdma_connection(info);
@@ -2389,7 +2426,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
if (smbdirect_mr->state == MR_INVALIDATED) {
ib_dma_unmap_sg(
- info->id->device, smbdirect_mr->sgt.sgl,
+ sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents,
smbdirect_mr->dir);
smbdirect_mr->state = MR_READY;
diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
index c08e3665150d..75b3f491c3ad 100644
--- a/fs/smb/client/smbdirect.h
+++ b/fs/smb/client/smbdirect.h
@@ -15,6 +15,9 @@
#include <rdma/rdma_cm.h>
#include <linux/mempool.h>
+#include "../common/smbdirect/smbdirect.h"
+#include "../common/smbdirect/smbdirect_socket.h"
+
extern int rdma_readwrite_threshold;
extern int smbd_max_frmr_depth;
extern int smbd_keep_alive_interval;
@@ -50,14 +53,8 @@ enum smbd_connection_status {
* 5. mempools for allocating packets
*/
struct smbd_connection {
- enum smbd_connection_status transport_status;
-
- /* RDMA related */
- struct rdma_cm_id *id;
- struct ib_qp_init_attr qp_attr;
- struct ib_pd *pd;
- struct ib_cq *send_cq, *recv_cq;
- struct ib_device_attr dev_attr;
+ struct smbdirect_socket socket;
+
int ri_rc;
struct completion ri_done;
wait_queue_head_t conn_wait;
@@ -72,15 +69,7 @@ struct smbd_connection {
spinlock_t lock_new_credits_offered;
int new_credits_offered;
- /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
- int receive_credit_max;
- int send_credit_target;
- int max_send_size;
- int max_fragmented_recv_size;
- int max_fragmented_send_size;
- int max_receive_size;
- int keep_alive_interval;
- int max_readwrite_size;
+ /* dynamic connection parameters defined in [MS-SMBD] 3.1.1.1 */
enum keep_alive_status keep_alive_requested;
int protocol;
atomic_t send_credits;
@@ -177,54 +166,6 @@ enum smbd_message_type {
SMBD_TRANSFER_DATA,
};
-#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
-
-/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
-struct smbd_negotiate_req {
- __le16 min_version;
- __le16 max_version;
- __le16 reserved;
- __le16 credits_requested;
- __le32 preferred_send_size;
- __le32 max_receive_size;
- __le32 max_fragmented_size;
-} __packed;
-
-/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
-struct smbd_negotiate_resp {
- __le16 min_version;
- __le16 max_version;
- __le16 negotiated_version;
- __le16 reserved;
- __le16 credits_requested;
- __le16 credits_granted;
- __le32 status;
- __le32 max_readwrite_size;
- __le32 preferred_send_size;
- __le32 max_receive_size;
- __le32 max_fragmented_size;
-} __packed;
-
-/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
-struct smbd_data_transfer {
- __le16 credits_requested;
- __le16 credits_granted;
- __le16 flags;
- __le16 reserved;
- __le32 remaining_data_length;
- __le32 data_offset;
- __le32 data_length;
- __le32 padding;
- __u8 buffer[];
-} __packed;
-
-/* The packet fields for a registered RDMA buffer */
-struct smbd_buffer_descriptor_v1 {
- __le64 offset;
- __le32 token;
- __le32 length;
-} __packed;
-
/* Maximum number of SGEs used by smbdirect.c in any send work request */
#define SMBDIRECT_MAX_SEND_SGE 6
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 266af17aa7d9..191783f553ce 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -1018,14 +1018,16 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
uint index = 0;
unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
struct TCP_Server_Info *server = NULL;
- int i;
+ int i, start, cur;
if (!ses)
return NULL;
spin_lock(&ses->chan_lock);
+ start = atomic_inc_return(&ses->chan_seq);
for (i = 0; i < ses->chan_count; i++) {
- server = ses->chans[i].server;
+ cur = (start + i) % ses->chan_count;
+ server = ses->chans[cur].server;
if (!server || server->terminate)
continue;
@@ -1042,17 +1044,15 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
*/
if (server->in_flight < min_in_flight) {
min_in_flight = server->in_flight;
- index = i;
+ index = cur;
}
if (server->in_flight > max_in_flight)
max_in_flight = server->in_flight;
}
/* if all channels are equally loaded, fall back to round-robin */
- if (min_in_flight == max_in_flight) {
- index = (uint)atomic_inc_return(&ses->chan_seq);
- index %= ses->chan_count;
- }
+ if (min_in_flight == max_in_flight)
+ index = (uint)start % ses->chan_count;
server = ses->chans[index].server;
spin_unlock(&ses->chan_lock);
diff --git a/fs/smb/common/smbdirect/smbdirect.h b/fs/smb/common/smbdirect/smbdirect.h
new file mode 100644
index 000000000000..b9a385344ff3
--- /dev/null
+++ b/fs/smb/common/smbdirect/smbdirect.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2017, Microsoft Corporation.
+ * Copyright (C) 2018, LG Electronics.
+ */
+
+#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
+#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
+
+/* SMB-DIRECT buffer descriptor V1 structure [MS-SMBD] 2.2.3.1 */
+struct smbdirect_buffer_descriptor_v1 {
+ __le64 offset;
+ __le32 token;
+ __le32 length;
+} __packed;
+
+/*
+ * Connection parameters mostly from [MS-SMBD] 3.1.1.1
+ *
+ * These are setup and negotiated at the beginning of a
+ * connection and remain constant unless explicitly changed.
+ *
+ * Some values are important for the upper layer.
+ */
+struct smbdirect_socket_parameters {
+ __u16 recv_credit_max;
+ __u16 send_credit_target;
+ __u32 max_send_size;
+ __u32 max_fragmented_send_size;
+ __u32 max_recv_size;
+ __u32 max_fragmented_recv_size;
+ __u32 max_read_write_size;
+ __u32 keepalive_interval_msec;
+ __u32 keepalive_timeout_msec;
+} __packed;
+
+#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__ */
diff --git a/fs/smb/common/smbdirect/smbdirect_pdu.h b/fs/smb/common/smbdirect/smbdirect_pdu.h
new file mode 100644
index 000000000000..ae9fdb05ce23
--- /dev/null
+++ b/fs/smb/common/smbdirect/smbdirect_pdu.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2017 Stefan Metzmacher
+ */
+
+#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
+#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
+
+#define SMBDIRECT_V1 0x0100
+
+/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
+struct smbdirect_negotiate_req {
+ __le16 min_version;
+ __le16 max_version;
+ __le16 reserved;
+ __le16 credits_requested;
+ __le32 preferred_send_size;
+ __le32 max_receive_size;
+ __le32 max_fragmented_size;
+} __packed;
+
+/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
+struct smbdirect_negotiate_resp {
+ __le16 min_version;
+ __le16 max_version;
+ __le16 negotiated_version;
+ __le16 reserved;
+ __le16 credits_requested;
+ __le16 credits_granted;
+ __le32 status;
+ __le32 max_readwrite_size;
+ __le32 preferred_send_size;
+ __le32 max_receive_size;
+ __le32 max_fragmented_size;
+} __packed;
+
+#define SMBDIRECT_DATA_MIN_HDR_SIZE 0x14
+#define SMBDIRECT_DATA_OFFSET 0x18
+
+#define SMBDIRECT_FLAG_RESPONSE_REQUESTED 0x0001
+
+/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
+struct smbdirect_data_transfer {
+ __le16 credits_requested;
+ __le16 credits_granted;
+ __le16 flags;
+ __le16 reserved;
+ __le32 remaining_data_length;
+ __le32 data_offset;
+ __le32 data_length;
+ __le32 padding;
+ __u8 buffer[];
+} __packed;
+
+#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__ */
diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
new file mode 100644
index 000000000000..e5b15cc44a7b
--- /dev/null
+++ b/fs/smb/common/smbdirect/smbdirect_socket.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Stefan Metzmacher
+ */
+
+#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
+#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
+
+enum smbdirect_socket_status {
+ SMBDIRECT_SOCKET_CREATED,
+ SMBDIRECT_SOCKET_CONNECTING,
+ SMBDIRECT_SOCKET_CONNECTED,
+ SMBDIRECT_SOCKET_NEGOTIATE_FAILED,
+ SMBDIRECT_SOCKET_DISCONNECTING,
+ SMBDIRECT_SOCKET_DISCONNECTED,
+ SMBDIRECT_SOCKET_DESTROYED
+};
+
+struct smbdirect_socket {
+ enum smbdirect_socket_status status;
+
+ /* RDMA related */
+ struct {
+ struct rdma_cm_id *cm_id;
+ } rdma;
+
+ /* IB verbs related */
+ struct {
+ struct ib_pd *pd;
+ struct ib_cq *send_cq;
+ struct ib_cq *recv_cq;
+
+ /*
+ * shortcuts for rdma.cm_id->{qp,device};
+ */
+ struct ib_qp *qp;
+ struct ib_device *dev;
+ } ib;
+
+ struct smbdirect_socket_parameters parameters;
+};
+
+#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index ee954e64ce7f..e28ab4395e5c 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -985,7 +985,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
if (kill_xattrs && ui->xattr_cnt > ubifs_xattr_max_cnt(c)) {
- ubifs_err(c, "Cannot delete inode, it has too much xattrs!");
+ ubifs_err(c, "Cannot delete inode, it has too many xattrs!");
err = -EPERM;
ubifs_ro_mode(c, err);
return err;