summaryrefslogtreecommitdiff
path: root/fs/smb/client
diff options
context:
space:
mode:
Diffstat (limited to 'fs/smb/client')
-rw-r--r--fs/smb/client/Kconfig1
-rw-r--r--fs/smb/client/cached_dir.c50
-rw-r--r--fs/smb/client/cached_dir.h16
-rw-r--r--fs/smb/client/cifs_debug.c153
-rw-r--r--fs/smb/client/cifs_spnego.c47
-rw-r--r--fs/smb/client/cifs_unicode.c3
-rw-r--r--fs/smb/client/cifsencrypt.c8
-rw-r--r--fs/smb/client/cifsfs.c72
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h43
-rw-r--r--fs/smb/client/cifsproto.h4
-rw-r--r--fs/smb/client/cifstransport.c19
-rw-r--r--fs/smb/client/compress.c71
-rw-r--r--fs/smb/client/connect.c9
-rw-r--r--fs/smb/client/dir.c54
-rw-r--r--fs/smb/client/file.c34
-rw-r--r--fs/smb/client/fs_context.c11
-rw-r--r--fs/smb/client/inode.c139
-rw-r--r--fs/smb/client/misc.c38
-rw-r--r--fs/smb/client/readdir.c40
-rw-r--r--fs/smb/client/reparse.c2
-rw-r--r--fs/smb/client/smb1ops.c4
-rw-r--r--fs/smb/client/smb2glob.h3
-rw-r--r--fs/smb/client/smb2inode.c296
-rw-r--r--fs/smb/client/smb2misc.c19
-rw-r--r--fs/smb/client/smb2ops.c79
-rw-r--r--fs/smb/client/smb2pdu.c24
-rw-r--r--fs/smb/client/smb2proto.h3
-rw-r--r--fs/smb/client/smb2transport.c1
-rw-r--r--fs/smb/client/smbdirect.c1230
-rw-r--r--fs/smb/client/smbdirect.h102
-rw-r--r--fs/smb/client/trace.h61
-rw-r--r--fs/smb/client/transport.c20
33 files changed, 1719 insertions, 941 deletions
diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig
index 9f05f94e265a..a4c02199fef4 100644
--- a/fs/smb/client/Kconfig
+++ b/fs/smb/client/Kconfig
@@ -15,6 +15,7 @@ config CIFS
select CRYPTO_GCM
select CRYPTO_ECB
select CRYPTO_AES
+ select CRYPTO_LIB_ARC4
select KEYS
select DNS_RESOLVER
select ASN1
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index b69daeb1301b..b36f9f9340f0 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -36,9 +36,8 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
* fully cached or it may be in the process of
* being deleted due to a lease break.
*/
- if (!cfid->time || !cfid->has_lease) {
+ if (!is_valid_cached_dir(cfid))
return NULL;
- }
kref_get(&cfid->refcount);
return cfid;
}
@@ -194,7 +193,7 @@ replay_again:
* Otherwise, it is either a new entry or laundromat worker removed it
* from @cfids->entries. Caller will put last reference if the latter.
*/
- if (cfid->has_lease && cfid->time) {
+ if (is_valid_cached_dir(cfid)) {
cfid->last_access_time = jiffies;
spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;
@@ -233,7 +232,7 @@ replay_again:
list_for_each_entry(parent_cfid, &cfids->entries, entry) {
if (parent_cfid->dentry == dentry->d_parent) {
cifs_dbg(FYI, "found a parent cached file handle\n");
- if (parent_cfid->has_lease && parent_cfid->time) {
+ if (is_valid_cached_dir(parent_cfid)) {
lease_flags
|= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE;
memcpy(pfid->parent_lease_key,
@@ -417,12 +416,18 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
if (cfids == NULL)
return -EOPNOTSUPP;
+ if (!dentry)
+ return -ENOENT;
+
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry(cfid, &cfids->entries, entry) {
- if (dentry && cfid->dentry == dentry) {
+ if (cfid->dentry == dentry) {
+ if (!is_valid_cached_dir(cfid))
+ break;
cifs_dbg(FYI, "found a cached file handle by dentry\n");
kref_get(&cfid->refcount);
*ret_cfid = cfid;
+ cfid->last_access_time = jiffies;
spin_unlock(&cfids->cfid_list_lock);
return 0;
}
@@ -522,10 +527,9 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
spin_unlock(&cifs_sb->tlink_tree_lock);
goto done;
}
- spin_lock(&cfid->fid_lock);
+
tmp_list->dentry = cfid->dentry;
cfid->dentry = NULL;
- spin_unlock(&cfid->fid_lock);
list_add_tail(&tmp_list->entry, &entry);
}
@@ -608,14 +612,9 @@ static void cached_dir_put_work(struct work_struct *work)
{
struct cached_fid *cfid = container_of(work, struct cached_fid,
put_work);
- struct dentry *dentry;
-
- spin_lock(&cfid->fid_lock);
- dentry = cfid->dentry;
+ dput(cfid->dentry);
cfid->dentry = NULL;
- spin_unlock(&cfid->fid_lock);
- dput(dentry);
queue_work(serverclose_wq, &cfid->close_work);
}
@@ -673,7 +672,6 @@ static struct cached_fid *init_cached_dir(const char *path)
INIT_LIST_HEAD(&cfid->entry);
INIT_LIST_HEAD(&cfid->dirents.entries);
mutex_init(&cfid->dirents.de_mutex);
- spin_lock_init(&cfid->fid_lock);
kref_init(&cfid->refcount);
return cfid;
}
@@ -697,6 +695,21 @@ static void free_cached_dir(struct cached_fid *cfid)
kfree(dirent);
}
+ /* adjust tcon-level counters and reset per-dir accounting */
+ if (cfid->cfids) {
+ if (cfid->dirents.entries_count)
+ atomic_long_sub((long)cfid->dirents.entries_count,
+ &cfid->cfids->total_dirents_entries);
+ if (cfid->dirents.bytes_used) {
+ atomic64_sub((long long)cfid->dirents.bytes_used,
+ &cfid->cfids->total_dirents_bytes);
+ atomic64_sub((long long)cfid->dirents.bytes_used,
+ &cifs_dircache_bytes_used);
+ }
+ }
+ cfid->dirents.entries_count = 0;
+ cfid->dirents.bytes_used = 0;
+
kfree(cfid->path);
cfid->path = NULL;
kfree(cfid);
@@ -725,7 +738,6 @@ static void cfids_laundromat_worker(struct work_struct *work)
{
struct cached_fids *cfids;
struct cached_fid *cfid, *q;
- struct dentry *dentry;
LIST_HEAD(entry);
cfids = container_of(work, struct cached_fids, laundromat_work.work);
@@ -752,12 +764,9 @@ static void cfids_laundromat_worker(struct work_struct *work)
list_for_each_entry_safe(cfid, q, &entry, entry) {
list_del(&cfid->entry);
- spin_lock(&cfid->fid_lock);
- dentry = cfid->dentry;
+ dput(cfid->dentry);
cfid->dentry = NULL;
- spin_unlock(&cfid->fid_lock);
- dput(dentry);
if (cfid->is_open) {
spin_lock(&cifs_tcp_ses_lock);
++cfid->tcon->tc_count;
@@ -792,6 +801,9 @@ struct cached_fids *init_cached_dirs(void)
queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
dir_cache_timeout * HZ);
+ atomic_long_set(&cfids->total_dirents_entries, 0);
+ atomic64_set(&cfids->total_dirents_bytes, 0);
+
return cfids;
}
diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
index 46b5a2fdf15b..31339dc32719 100644
--- a/fs/smb/client/cached_dir.h
+++ b/fs/smb/client/cached_dir.h
@@ -27,6 +27,9 @@ struct cached_dirents {
struct mutex de_mutex;
loff_t pos; /* Expected ctx->pos */
struct list_head entries;
+ /* accounting for cached entries in this directory */
+ unsigned long entries_count;
+ unsigned long bytes_used;
};
struct cached_fid {
@@ -41,7 +44,6 @@ struct cached_fid {
unsigned long last_access_time; /* jiffies of when last accessed */
struct kref refcount;
struct cifs_fid fid;
- spinlock_t fid_lock;
struct cifs_tcon *tcon;
struct dentry *dentry;
struct work_struct put_work;
@@ -62,8 +64,20 @@ struct cached_fids {
struct list_head dying;
struct work_struct invalidation_work;
struct delayed_work laundromat_work;
+ /* aggregate accounting for all cached dirents under this tcon */
+ atomic_long_t total_dirents_entries;
+ atomic64_t total_dirents_bytes;
};
+/* Module-wide directory cache accounting (defined in cifsfs.c) */
+extern atomic64_t cifs_dircache_bytes_used; /* bytes across all mounts */
+
+static inline bool
+is_valid_cached_dir(struct cached_fid *cfid)
+{
+ return cfid->time && cfid->has_lease;
+}
+
extern struct cached_fids *init_cached_dirs(void);
extern void free_cached_dirs(struct cached_fids *cfids);
extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index beb4f18f05ef..1fb71d2d31b5 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -24,6 +24,7 @@
#endif
#ifdef CONFIG_CIFS_SMB_DIRECT
#include "smbdirect.h"
+#include "../common/smbdirect/smbdirect_pdu.h"
#endif
#include "cifs_swn.h"
#include "cached_dir.h"
@@ -239,14 +240,18 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct cifsFileInfo *cfile;
+ struct inode *inode;
+ struct cifsInodeInfo *cinode;
+ char lease[4];
+ int n;
seq_puts(m, "# Version:1\n");
seq_puts(m, "# Format:\n");
seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>");
#ifdef CONFIG_CIFS_DEBUG2
- seq_printf(m, " <filename> <mid>\n");
+ seq_puts(m, " <filename> <lease> <mid>\n");
#else
- seq_printf(m, " <filename>\n");
+ seq_puts(m, " <filename> <lease>\n");
#endif /* CIFS_DEBUG2 */
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
@@ -266,11 +271,30 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
cfile->pid,
from_kuid(&init_user_ns, cfile->uid),
cfile->dentry);
+
+ /* Append lease/oplock caching state as RHW letters */
+ inode = d_inode(cfile->dentry);
+ n = 0;
+ if (inode) {
+ cinode = CIFS_I(inode);
+ if (CIFS_CACHE_READ(cinode))
+ lease[n++] = 'R';
+ if (CIFS_CACHE_HANDLE(cinode))
+ lease[n++] = 'H';
+ if (CIFS_CACHE_WRITE(cinode))
+ lease[n++] = 'W';
+ }
+ lease[n] = '\0';
+ seq_puts(m, " ");
+ if (n)
+ seq_printf(m, "%s", lease);
+ else
+ seq_puts(m, "NONE");
+
#ifdef CONFIG_CIFS_DEBUG2
- seq_printf(m, " %llu\n", cfile->fid.mid);
-#else
+ seq_printf(m, " %llu", cfile->fid.mid);
+#endif /* CONFIG_CIFS_DEBUG2 */
seq_printf(m, "\n");
-#endif /* CIFS_DEBUG2 */
}
spin_unlock(&tcon->open_file_lock);
}
@@ -304,8 +328,13 @@ static int cifs_debug_dirs_proc_show(struct seq_file *m, void *v)
list_for_each(tmp1, &ses->tcon_list) {
tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
cfids = tcon->cfids;
+ if (!cfids)
+ continue;
spin_lock(&cfids->cfid_list_lock); /* check lock ordering */
- seq_printf(m, "Num entries: %d\n", cfids->num_entries);
+ seq_printf(m, "Num entries: %d, cached_dirents: %lu entries, %llu bytes\n",
+ cfids->num_entries,
+ (unsigned long)atomic_long_read(&cfids->total_dirents_entries),
+ (unsigned long long)atomic64_read(&cfids->total_dirents_bytes));
list_for_each_entry(cfid, &cfids->entries, entry) {
seq_printf(m, "0x%x 0x%llx 0x%llx %s",
tcon->tid,
@@ -316,11 +345,12 @@ static int cifs_debug_dirs_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\tvalid file info");
if (cfid->dirents.is_valid)
seq_printf(m, ", valid dirents");
+ if (!list_empty(&cfid->dirents.entries))
+ seq_printf(m, ", dirents: %lu entries, %lu bytes",
+ cfid->dirents.entries_count, cfid->dirents.bytes_used);
seq_printf(m, "\n");
}
spin_unlock(&cfids->cfid_list_lock);
-
-
}
}
}
@@ -347,6 +377,22 @@ static __always_inline const char *compression_alg_str(__le16 alg)
}
}
+static __always_inline const char *cipher_alg_str(__le16 cipher)
+{
+ switch (cipher) {
+ case SMB2_ENCRYPTION_AES128_CCM:
+ return "AES128-CCM";
+ case SMB2_ENCRYPTION_AES128_GCM:
+ return "AES128-GCM";
+ case SMB2_ENCRYPTION_AES256_CCM:
+ return "AES256-CCM";
+ case SMB2_ENCRYPTION_AES256_GCM:
+ return "AES256-GCM";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
{
struct mid_q_entry *mid_entry;
@@ -440,57 +486,55 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
sc = &server->smbd_conn->socket;
sp = &sc->parameters;
- seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
- "transport status: %x",
- server->smbd_conn->protocol,
- server->smbd_conn->socket.status);
- seq_printf(m, "\nConn receive_credit_max: %x "
- "send_credit_target: %x max_send_size: %x",
+ seq_printf(m, "\nSMBDirect protocol version: 0x%x "
+ "transport status: %s (%u)",
+ SMBDIRECT_V1,
+ smbdirect_socket_status_string(sc->status),
+ sc->status);
+ seq_printf(m, "\nConn receive_credit_max: %u "
+ "send_credit_target: %u max_send_size: %u",
sp->recv_credit_max,
sp->send_credit_target,
sp->max_send_size);
- seq_printf(m, "\nConn max_fragmented_recv_size: %x "
- "max_fragmented_send_size: %x max_receive_size:%x",
+ seq_printf(m, "\nConn max_fragmented_recv_size: %u "
+ "max_fragmented_send_size: %u max_receive_size:%u",
sp->max_fragmented_recv_size,
sp->max_fragmented_send_size,
sp->max_recv_size);
- seq_printf(m, "\nConn keep_alive_interval: %x "
- "max_readwrite_size: %x rdma_readwrite_threshold: %x",
+ seq_printf(m, "\nConn keep_alive_interval: %u "
+ "max_readwrite_size: %u rdma_readwrite_threshold: %u",
sp->keepalive_interval_msec * 1000,
sp->max_read_write_size,
- server->smbd_conn->rdma_readwrite_threshold);
- seq_printf(m, "\nDebug count_get_receive_buffer: %x "
- "count_put_receive_buffer: %x count_send_empty: %x",
- server->smbd_conn->count_get_receive_buffer,
- server->smbd_conn->count_put_receive_buffer,
- server->smbd_conn->count_send_empty);
- seq_printf(m, "\nRead Queue count_reassembly_queue: %x "
- "count_enqueue_reassembly_queue: %x "
- "count_dequeue_reassembly_queue: %x "
- "reassembly_data_length: %x "
- "reassembly_queue_length: %x",
- server->smbd_conn->count_reassembly_queue,
- server->smbd_conn->count_enqueue_reassembly_queue,
- server->smbd_conn->count_dequeue_reassembly_queue,
+ server->rdma_readwrite_threshold);
+ seq_printf(m, "\nDebug count_get_receive_buffer: %llu "
+ "count_put_receive_buffer: %llu count_send_empty: %llu",
+ sc->statistics.get_receive_buffer,
+ sc->statistics.put_receive_buffer,
+ sc->statistics.send_empty);
+ seq_printf(m, "\nRead Queue "
+ "count_enqueue_reassembly_queue: %llu "
+ "count_dequeue_reassembly_queue: %llu "
+ "reassembly_data_length: %u "
+ "reassembly_queue_length: %u",
+ sc->statistics.enqueue_reassembly_queue,
+ sc->statistics.dequeue_reassembly_queue,
sc->recv_io.reassembly.data_length,
sc->recv_io.reassembly.queue_length);
- seq_printf(m, "\nCurrent Credits send_credits: %x "
- "receive_credits: %x receive_credit_target: %x",
- atomic_read(&server->smbd_conn->send_credits),
- atomic_read(&server->smbd_conn->receive_credits),
- server->smbd_conn->receive_credit_target);
- seq_printf(m, "\nPending send_pending: %x ",
- atomic_read(&server->smbd_conn->send_pending));
- seq_printf(m, "\nReceive buffers count_receive_queue: %x ",
- server->smbd_conn->count_receive_queue);
- seq_printf(m, "\nMR responder_resources: %x "
- "max_frmr_depth: %x mr_type: %x",
- server->smbd_conn->responder_resources,
- server->smbd_conn->max_frmr_depth,
- server->smbd_conn->mr_type);
- seq_printf(m, "\nMR mr_ready_count: %x mr_used_count: %x",
- atomic_read(&server->smbd_conn->mr_ready_count),
- atomic_read(&server->smbd_conn->mr_used_count));
+ seq_printf(m, "\nCurrent Credits send_credits: %u "
+ "receive_credits: %u receive_credit_target: %u",
+ atomic_read(&sc->send_io.credits.count),
+ atomic_read(&sc->recv_io.credits.count),
+ sc->recv_io.credits.target);
+ seq_printf(m, "\nPending send_pending: %u ",
+ atomic_read(&sc->send_io.pending.count));
+ seq_printf(m, "\nMR responder_resources: %u "
+ "max_frmr_depth: %u mr_type: 0x%x",
+ sp->responder_resources,
+ sp->max_frmr_depth,
+ sc->mr_io.type);
+ seq_printf(m, "\nMR mr_ready_count: %u mr_used_count: %u",
+ atomic_read(&sc->mr_io.ready.count),
+ atomic_read(&sc->mr_io.used.count));
skip_rdma:
#endif
seq_printf(m, "\nNumber of credits: %d,%d,%d Dialect 0x%x",
@@ -539,6 +583,11 @@ skip_rdma:
else
seq_puts(m, "disabled (not supported by this server)");
+ /* Show negotiated encryption cipher, even if not required */
+ seq_puts(m, "\nEncryption: ");
+ if (server->cipher_type)
+ seq_printf(m, "Negotiated cipher (%s)", cipher_alg_str(server->cipher_type));
+
seq_printf(m, "\n\n\tSessions: ");
i = 0;
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
@@ -576,12 +625,8 @@ skip_rdma:
/* dump session id helpful for use with network trace */
seq_printf(m, " SessionId: 0x%llx", ses->Suid);
- if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) {
+ if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
seq_puts(m, " encrypted");
- /* can help in debugging to show encryption type */
- if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
- seq_puts(m, "(gcm256)");
- }
if (ses->sign)
seq_puts(m, " signed");
diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
index bc1c1e9b288a..43b86fa4d695 100644
--- a/fs/smb/client/cifs_spnego.c
+++ b/fs/smb/client/cifs_spnego.c
@@ -124,55 +124,44 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo,
dp = description;
/* start with version and hostname portion of UNC string */
spnego_key = ERR_PTR(-EINVAL);
- sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION,
- hostname);
- dp = description + strlen(description);
+ dp += sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION,
+ hostname);
/* add the server address */
if (server->dstaddr.ss_family == AF_INET)
- sprintf(dp, "ip4=%pI4", &sa->sin_addr);
+ dp += sprintf(dp, "ip4=%pI4", &sa->sin_addr);
else if (server->dstaddr.ss_family == AF_INET6)
- sprintf(dp, "ip6=%pI6", &sa6->sin6_addr);
+ dp += sprintf(dp, "ip6=%pI6", &sa6->sin6_addr);
else
goto out;
- dp = description + strlen(description);
-
/* for now, only sec=krb5 and sec=mskrb5 and iakerb are valid */
if (server->sec_kerberos)
- sprintf(dp, ";sec=krb5");
+ dp += sprintf(dp, ";sec=krb5");
else if (server->sec_mskerberos)
- sprintf(dp, ";sec=mskrb5");
+ dp += sprintf(dp, ";sec=mskrb5");
else if (server->sec_iakerb)
- sprintf(dp, ";sec=iakerb");
+ dp += sprintf(dp, ";sec=iakerb");
else {
cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
- sprintf(dp, ";sec=krb5");
+ dp += sprintf(dp, ";sec=krb5");
}
- dp = description + strlen(description);
- sprintf(dp, ";uid=0x%x",
- from_kuid_munged(&init_user_ns, sesInfo->linux_uid));
+ dp += sprintf(dp, ";uid=0x%x",
+ from_kuid_munged(&init_user_ns, sesInfo->linux_uid));
- dp = description + strlen(description);
- sprintf(dp, ";creduid=0x%x",
+ dp += sprintf(dp, ";creduid=0x%x",
from_kuid_munged(&init_user_ns, sesInfo->cred_uid));
- if (sesInfo->user_name) {
- dp = description + strlen(description);
- sprintf(dp, ";user=%s", sesInfo->user_name);
- }
+ if (sesInfo->user_name)
+ dp += sprintf(dp, ";user=%s", sesInfo->user_name);
- dp = description + strlen(description);
- sprintf(dp, ";pid=0x%x", current->pid);
+ dp += sprintf(dp, ";pid=0x%x", current->pid);
- if (sesInfo->upcall_target == UPTARGET_MOUNT) {
- dp = description + strlen(description);
- sprintf(dp, ";upcall_target=mount");
- } else {
- dp = description + strlen(description);
- sprintf(dp, ";upcall_target=app");
- }
+ if (sesInfo->upcall_target == UPTARGET_MOUNT)
+ dp += sprintf(dp, ";upcall_target=mount");
+ else
+ dp += sprintf(dp, ";upcall_target=app");
cifs_dbg(FYI, "key description = %s\n", description);
saved_cred = override_creds(spnego_cred);
diff --git a/fs/smb/client/cifs_unicode.c b/fs/smb/client/cifs_unicode.c
index 4cc6e0896fad..f8659d36793f 100644
--- a/fs/smb/client/cifs_unicode.c
+++ b/fs/smb/client/cifs_unicode.c
@@ -629,6 +629,9 @@ cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len,
int len;
__le16 *dst;
+ if (!src)
+ return NULL;
+
len = cifs_local_to_utf16_bytes(src, maxlen, cp);
len += 2; /* NULL */
dst = kmalloc(len, GFP_KERNEL);
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 3cc686246908..7b7c8c38fdd0 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -22,8 +22,8 @@
#include <linux/highmem.h>
#include <linux/fips.h>
#include <linux/iov_iter.h>
-#include "../common/arc4.h"
#include <crypto/aead.h>
+#include <crypto/arc4.h>
static size_t cifs_shash_step(void *iter_base, size_t progress, size_t len,
void *priv, void *priv2)
@@ -725,9 +725,9 @@ calc_seckey(struct cifs_ses *ses)
return -ENOMEM;
}
- cifs_arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE);
- cifs_arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key,
- CIFS_CPHTXT_SIZE);
+ arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE);
+ arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key,
+ CIFS_CPHTXT_SIZE);
/* make secondary_key/nonce as session key */
memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE);
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 3bd85ab2deb1..1775c2b7528f 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -121,6 +121,46 @@ unsigned int dir_cache_timeout = 30;
module_param(dir_cache_timeout, uint, 0644);
MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
"Range: 1 to 65000 seconds, 0 to disable caching dir contents");
+/* Module-wide total cached dirents (in bytes) across all tcons */
+atomic64_t cifs_dircache_bytes_used = ATOMIC64_INIT(0);
+
+/*
+ * Write-only module parameter to drop all cached directory entries across
+ * all CIFS mounts. Echo a non-zero value to trigger.
+ */
+static void cifs_drop_all_dir_caches(void)
+{
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
+ invalidate_all_cached_dirs(tcon);
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+}
+
+static int cifs_param_set_drop_dir_cache(const char *val, const struct kernel_param *kp)
+{
+ bool bv;
+ int rc = kstrtobool(val, &bv);
+
+ if (rc)
+ return rc;
+ if (bv)
+ cifs_drop_all_dir_caches();
+ return 0;
+}
+
+module_param_call(drop_dir_cache, cifs_param_set_drop_dir_cache, NULL, NULL, 0200);
+MODULE_PARM_DESC(drop_dir_cache, "Write 1 to drop all cached directory entries across all CIFS mounts");
+
#ifdef CONFIG_CIFS_STATS2
unsigned int slow_rsp_threshold = 1;
module_param(slow_rsp_threshold, uint, 0644);
@@ -857,7 +897,7 @@ static int cifs_drop_inode(struct inode *inode)
/* no serverino => unconditional eviction */
return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
- generic_drop_inode(inode);
+ inode_generic_drop(inode);
}
static const struct super_operations cifs_super_ops = {
@@ -1358,6 +1398,20 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
truncate_setsize(target_inode, new_size);
fscache_resize_cookie(cifs_inode_cookie(target_inode),
new_size);
+ } else if (rc == -EOPNOTSUPP) {
+ /*
+ * copy_file_range syscall man page indicates EINVAL
+ * is returned e.g when "fd_in and fd_out refer to the
+ * same file and the source and target ranges overlap."
+ * Test generic/157 was what showed these cases where
+ * we need to remap EOPNOTSUPP to EINVAL
+ */
+ if (off >= src_inode->i_size) {
+ rc = -EINVAL;
+ } else if (src_inode == target_inode) {
+ if (off + len > destoff)
+ rc = -EINVAL;
+ }
}
if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
target_cifsi->netfs.zero_point = new_size;
@@ -1881,7 +1935,9 @@ init_cifs(void)
cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
}
- cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ cifsiod_wq = alloc_workqueue("cifsiod",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!cifsiod_wq) {
rc = -ENOMEM;
goto out_clean_proc;
@@ -1909,28 +1965,32 @@ init_cifs(void)
}
cifsoplockd_wq = alloc_workqueue("cifsoplockd",
- WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!cifsoplockd_wq) {
rc = -ENOMEM;
goto out_destroy_fileinfo_put_wq;
}
deferredclose_wq = alloc_workqueue("deferredclose",
- WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!deferredclose_wq) {
rc = -ENOMEM;
goto out_destroy_cifsoplockd_wq;
}
serverclose_wq = alloc_workqueue("serverclose",
- WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!serverclose_wq) {
rc = -ENOMEM;
goto out_destroy_deferredclose_wq;
}
cfid_put_wq = alloc_workqueue("cfid_put_wq",
- WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!cfid_put_wq) {
rc = -ENOMEM;
goto out_destroy_serverclose_wq;
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 487f39cff77e..3ce7c614ccc0 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -145,6 +145,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 55
-#define CIFS_VERSION "2.55"
+#define SMB3_PRODUCT_BUILD 56
+#define CIFS_VERSION "2.56"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index e6830ab3a546..3ac254e123dc 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -87,7 +87,7 @@
#define SMB_INTERFACE_POLL_INTERVAL 600
/* maximum number of PDUs in one compound */
-#define MAX_COMPOUND 7
+#define MAX_COMPOUND 10
/*
* Default number of credits to keep available for SMB3.
@@ -814,6 +814,13 @@ struct TCP_Server_Info {
unsigned int max_read;
unsigned int max_write;
unsigned int min_offload;
+ /*
+ * If payload is less than or equal to the threshold,
+ * use RDMA send/recv to send upper layer I/O.
+ * If payload is more than the threshold,
+ * use RDMA read/write through memory registration for I/O.
+ */
+ unsigned int rdma_readwrite_threshold;
unsigned int retrans;
struct {
bool requested; /* "compress" mount option set*/
@@ -1540,7 +1547,7 @@ struct cifs_io_subrequest {
struct kvec iov[2];
struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT
- struct smbd_mr *mr;
+ struct smbdirect_mr_io *mr;
#endif
struct cifs_credits credits;
};
@@ -1732,6 +1739,7 @@ struct mid_q_entry {
int mid_rc; /* rc for MID_RC */
__le16 command; /* smb command code */
unsigned int optype; /* operation type */
+ spinlock_t mid_lock;
bool wait_cancelled:1; /* Cancelled while waiting for response */
bool deleted_from_q:1; /* Whether Mid has been dequeued frem pending_mid_q */
bool large_buf:1; /* if valid response, is pointer to large buf */
@@ -1881,9 +1889,12 @@ static inline bool is_replayable_error(int error)
/* cifs_get_writable_file() flags */
-#define FIND_WR_ANY 0
-#define FIND_WR_FSUID_ONLY 1
-#define FIND_WR_WITH_DELETE 2
+enum cifs_writable_file_flags {
+ FIND_WR_ANY = 0U,
+ FIND_WR_FSUID_ONLY = (1U << 0),
+ FIND_WR_WITH_DELETE = (1U << 1),
+ FIND_WR_NO_PENDING_DELETE = (1U << 2),
+};
#define MID_FREE 0
#define MID_REQUEST_ALLOCATED 1
@@ -2036,6 +2047,9 @@ require use of the stronger protocol */
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
* ->oplock_break_cancelled
+ * mid_q_entry->mid_lock mid_q_entry->callback alloc_mid
+ * smb2_mid_entry_alloc
+ * (Any fields of mid_q_entry that will need protection)
****************************************************************************/
#ifdef DECLARE_GLOBALS_HERE
@@ -2339,6 +2353,8 @@ struct smb2_compound_vars {
struct kvec qi_iov;
struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+ struct kvec unlink_iov[SMB2_SET_INFO_IOV_SIZE];
+ struct kvec rename_iov[SMB2_SET_INFO_IOV_SIZE];
struct kvec close_iov;
struct smb2_file_rename_info_hdr rename_info;
struct smb2_file_link_info_hdr link_info;
@@ -2375,6 +2391,23 @@ static inline bool cifs_netbios_name(const char *name, size_t namelen)
return ret;
}
+/*
+ * Execute mid callback atomically - ensures callback runs exactly once
+ * and prevents sleeping in atomic context.
+ */
+static inline void mid_execute_callback(struct mid_q_entry *mid)
+{
+ void (*callback)(struct mid_q_entry *mid);
+
+ spin_lock(&mid->mid_lock);
+ callback = mid->callback;
+ mid->callback = NULL; /* Mark as executed, */
+ spin_unlock(&mid->mid_lock);
+
+ if (callback)
+ callback(mid);
+}
+
#define CIFS_REPARSE_SUPPORT(tcon) \
((tcon)->posix_extensions || \
(le32_to_cpu((tcon)->fsAttrInfo.Attributes) & \
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index c34c533b2efa..e8fba98690ce 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -312,8 +312,8 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
-extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
- const char *path);
+void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+ struct dentry *dentry);
extern void cifs_mark_open_handles_for_deleted_file(struct inode *inode,
const char *path);
diff --git a/fs/smb/client/cifstransport.c b/fs/smb/client/cifstransport.c
index 352dafb888dd..e98b95eff8c9 100644
--- a/fs/smb/client/cifstransport.c
+++ b/fs/smb/client/cifstransport.c
@@ -46,6 +46,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
memset(temp, 0, sizeof(struct mid_q_entry));
kref_init(&temp->refcount);
+ spin_lock_init(&temp->mid_lock);
temp->mid = get_mid(smb_buffer);
temp->pid = current->pid;
temp->command = cpu_to_le16(smb_buffer->Command);
@@ -345,16 +346,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(server, midQ);
if (rc != 0) {
send_cancel(server, &rqst, midQ);
- spin_lock(&server->mid_queue_lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
- midQ->mid_state == MID_RESPONSE_RECEIVED) {
+ spin_lock(&midQ->mid_lock);
+ if (midQ->callback) {
/* no longer considered to be "in-flight" */
midQ->callback = release_mid;
- spin_unlock(&server->mid_queue_lock);
+ spin_unlock(&midQ->mid_lock);
add_credits(server, &credits, 0);
return rc;
}
- spin_unlock(&server->mid_queue_lock);
+ spin_unlock(&midQ->mid_lock);
}
rc = cifs_sync_mid_result(midQ, server);
@@ -527,15 +527,14 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
rc = wait_for_response(server, midQ);
if (rc) {
send_cancel(server, &rqst, midQ);
- spin_lock(&server->mid_queue_lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
- midQ->mid_state == MID_RESPONSE_RECEIVED) {
+ spin_lock(&midQ->mid_lock);
+ if (midQ->callback) {
/* no longer considered to be "in-flight" */
midQ->callback = release_mid;
- spin_unlock(&server->mid_queue_lock);
+ spin_unlock(&midQ->mid_lock);
return rc;
}
- spin_unlock(&server->mid_queue_lock);
+ spin_unlock(&midQ->mid_lock);
}
/* We got the response - restart system call. */
diff --git a/fs/smb/client/compress.c b/fs/smb/client/compress.c
index 766b4de13da7..db709f5cd2e1 100644
--- a/fs/smb/client/compress.c
+++ b/fs/smb/client/compress.c
@@ -155,58 +155,29 @@ static int cmp_bkt(const void *_a, const void *_b)
}
/*
- * TODO:
- * Support other iter types, if required.
- * Only ITER_XARRAY is supported for now.
+ * Collect some 2K samples with 2K gaps between.
*/
-static int collect_sample(const struct iov_iter *iter, ssize_t max, u8 *sample)
+static int collect_sample(const struct iov_iter *source, ssize_t max, u8 *sample)
{
- struct folio *folios[16], *folio;
- unsigned int nr, i, j, npages;
- loff_t start = iter->xarray_start + iter->iov_offset;
- pgoff_t last, index = start / PAGE_SIZE;
- size_t len, off, foff;
- void *p;
- int s = 0;
-
- last = (start + max - 1) / PAGE_SIZE;
- do {
- nr = xa_extract(iter->xarray, (void **)folios, index, last, ARRAY_SIZE(folios),
- XA_PRESENT);
- if (nr == 0)
- return -EIO;
-
- for (i = 0; i < nr; i++) {
- folio = folios[i];
- npages = folio_nr_pages(folio);
- foff = start - folio_pos(folio);
- off = foff % PAGE_SIZE;
-
- for (j = foff / PAGE_SIZE; j < npages; j++) {
- size_t len2;
-
- len = min_t(size_t, max, PAGE_SIZE - off);
- len2 = min_t(size_t, len, SZ_2K);
-
- p = kmap_local_page(folio_page(folio, j));
- memcpy(&sample[s], p, len2);
- kunmap_local(p);
-
- s += len2;
-
- if (len2 < SZ_2K || s >= max - SZ_2K)
- return s;
-
- max -= len;
- if (max <= 0)
- return s;
-
- start += len;
- off = 0;
- index++;
- }
- }
- } while (nr == ARRAY_SIZE(folios));
+ struct iov_iter iter = *source;
+ size_t s = 0;
+
+ while (iov_iter_count(&iter) >= SZ_2K) {
+ size_t part = umin(umin(iov_iter_count(&iter), SZ_2K), max);
+ size_t n;
+
+ n = copy_from_iter(sample + s, part, &iter);
+ if (n != part)
+ return -EFAULT;
+
+ s += n;
+ max -= n;
+
+ if (iov_iter_count(&iter) < PAGE_SIZE - SZ_2K)
+ break;
+
+ iov_iter_advance(&iter, SZ_2K);
+ }
return s;
}
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 587845a2452d..dd12f3eb61dc 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -335,7 +335,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
list_del_init(&mid->qhead);
- mid->callback(mid);
+ mid_execute_callback(mid);
release_mid(mid);
}
@@ -919,7 +919,7 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
list_del_init(&mid->qhead);
mid->mid_rc = mid_rc;
mid->mid_state = MID_RC;
- mid->callback(mid);
+ mid_execute_callback(mid);
release_mid(mid);
}
@@ -1117,7 +1117,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
list_del_init(&mid_entry->qhead);
- mid_entry->callback(mid_entry);
+ mid_execute_callback(mid_entry);
release_mid(mid_entry);
}
/* 1/8th of sec is more than enough time for them to exit */
@@ -1394,7 +1394,7 @@ next_pdu:
}
if (!mids[i]->multiRsp || mids[i]->multiEnd)
- mids[i]->callback(mids[i]);
+ mid_execute_callback(mids[i]);
release_mid(mids[i]);
} else if (server->ops->is_oplock_break &&
@@ -4205,7 +4205,6 @@ retry:
return 0;
}
- server->lstrp = jiffies;
server->tcpStatus = CifsInNegotiate;
server->neg_start = jiffies;
spin_unlock(&server->srv_lock);
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index 5223edf6d11a..fc67a6441c96 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -322,13 +322,14 @@ retry_open:
list_for_each_entry(parent_cfid, &tcon->cfids->entries, entry) {
if (parent_cfid->dentry == direntry->d_parent) {
cifs_dbg(FYI, "found a parent cached file handle\n");
- if (parent_cfid->has_lease && parent_cfid->time) {
+ if (is_valid_cached_dir(parent_cfid)) {
lease_flags
|= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE;
memcpy(fid->parent_lease_key,
parent_cfid->fid.lease_key,
SMB2_LEASE_KEY_SIZE);
parent_cfid->dirents.is_valid = false;
+ parent_cfid->dirents.is_failed = true;
}
break;
}
@@ -484,8 +485,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
* in network traffic in the other paths.
*/
if (!(oflags & O_CREAT)) {
- struct dentry *res;
-
/*
* Check for hashed negative dentry. We have already revalidated
* the dentry and it is fine. No need to perform another lookup.
@@ -493,11 +492,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
if (!d_in_lookup(direntry))
return -ENOENT;
- res = cifs_lookup(inode, direntry, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- return finish_no_open(file, res);
+ return finish_no_open(file, cifs_lookup(inode, direntry, 0));
}
xid = get_xid();
@@ -683,6 +678,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
const char *full_path;
void *page;
int retry_count = 0;
+ struct cached_fid *cfid = NULL;
xid = get_xid();
@@ -722,6 +718,28 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
cifs_dbg(FYI, "non-NULL inode in lookup\n");
} else {
cifs_dbg(FYI, "NULL inode in lookup\n");
+
+ /*
+ * We can only rely on negative dentries having the same
+ * spelling as the cached dirent if case insensitivity is
+ * forced on mount.
+ *
+ * XXX: if servers correctly announce Case Sensitivity Search
+ * on GetInfo of FileFSAttributeInformation, then we can take
+ * correct action even if case insensitive is not forced on
+ * mount.
+ */
+ if (pTcon->nocase && !open_cached_dir_by_dentry(pTcon, direntry->d_parent, &cfid)) {
+ /*
+ * dentry is negative and parent is fully cached:
+ * we can assume file does not exist
+ */
+ if (cfid->dirents.is_valid) {
+ close_cached_dir(cfid);
+ goto out;
+ }
+ close_cached_dir(cfid);
+ }
}
cifs_dbg(FYI, "Full path: %s inode = 0x%p\n",
full_path, d_inode(direntry));
@@ -755,6 +773,8 @@ again:
}
newInode = ERR_PTR(rc);
}
+
+out:
free_dentry_path(page);
cifs_put_tlink(tlink);
free_xid(xid);
@@ -765,7 +785,8 @@ static int
cifs_d_revalidate(struct inode *dir, const struct qstr *name,
struct dentry *direntry, unsigned int flags)
{
- struct inode *inode;
+ struct inode *inode = NULL;
+ struct cached_fid *cfid;
int rc;
if (flags & LOOKUP_RCU)
@@ -812,6 +833,21 @@ cifs_d_revalidate(struct inode *dir, const struct qstr *name,
return 1;
}
+ } else {
+ struct cifs_sb_info *cifs_sb = CIFS_SB(dir->i_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+
+ if (!open_cached_dir_by_dentry(tcon, direntry->d_parent, &cfid)) {
+ /*
+ * dentry is negative and parent is fully cached:
+ * we can assume file does not exist
+ */
+ if (cfid->dirents.is_valid) {
+ close_cached_dir(cfid);
+ return 1;
+ }
+ close_cached_dir(cfid);
+ }
}
/*
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 186e061068be..a5ed742afa00 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -97,8 +97,12 @@ retry:
cifs_trace_rw_credits_write_prepare);
#ifdef CONFIG_CIFS_SMB_DIRECT
- if (server->smbd_conn)
- stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
+ if (server->smbd_conn) {
+ const struct smbdirect_socket_parameters *sp =
+ smbd_get_parameters(server->smbd_conn);
+
+ stream->sreq_max_segs = sp->max_frmr_depth;
+ }
#endif
}
@@ -187,8 +191,12 @@ static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
cifs_trace_rw_credits_read_submit);
#ifdef CONFIG_CIFS_SMB_DIRECT
- if (server->smbd_conn)
- rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
+ if (server->smbd_conn) {
+ const struct smbdirect_socket_parameters *sp =
+ smbd_get_parameters(server->smbd_conn);
+
+ rreq->io_streams[0].sreq_max_segs = sp->max_frmr_depth;
+ }
#endif
return 0;
}
@@ -998,7 +1006,10 @@ int cifs_open(struct inode *inode, struct file *file)
/* Get the cached handle as SMB2 close is deferred */
if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
- rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
+ rc = cifs_get_writable_path(tcon, full_path,
+ FIND_WR_FSUID_ONLY |
+ FIND_WR_NO_PENDING_DELETE,
+ &cfile);
} else {
rc = cifs_get_readable_path(tcon, full_path, &cfile);
}
@@ -2530,6 +2541,9 @@ refind_writable:
continue;
if (with_delete && !(open_file->fid.access & DELETE))
continue;
+ if ((flags & FIND_WR_NO_PENDING_DELETE) &&
+ open_file->status_file_deleted)
+ continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
@@ -2647,6 +2661,16 @@ cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
spin_unlock(&tcon->open_file_lock);
free_dentry_path(page);
*ret_file = find_readable_file(cinode, 0);
+ if (*ret_file) {
+ spin_lock(&cinode->open_file_lock);
+ if ((*ret_file)->status_file_deleted) {
+ spin_unlock(&cinode->open_file_lock);
+ cifsFileInfo_put(*ret_file);
+ *ret_file = NULL;
+ } else {
+ spin_unlock(&cinode->open_file_lock);
+ }
+ }
return *ret_file ? 0 : -ENOENT;
}
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 072383899e81..e60927b2a7c8 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -773,16 +773,14 @@ static int smb3_fs_context_parse_monolithic(struct fs_context *fc,
}
- len = 0;
value = strchr(key, '=');
if (value) {
if (value == key)
continue;
*value++ = 0;
- len = strlen(value);
}
- ret = vfs_parse_fs_string(fc, key, value, len);
+ ret = vfs_parse_fs_string(fc, key, value);
if (ret < 0)
break;
}
@@ -1820,6 +1818,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
goto cifs_parse_mount_err;
}
+ /*
+ * Multichannel is not meaningful if max_channels is 1.
+ * Force multichannel to false to ensure consistent configuration.
+ */
+ if (ctx->multichannel && ctx->max_channels == 1)
+ ctx->multichannel = false;
+
return 0;
cifs_parse_mount_err:
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 75be4b46bc6f..8bb544be401e 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -1931,7 +1931,7 @@ cifs_drop_nlink(struct inode *inode)
* but will return the EACCES to the caller. Note that the VFS does not call
* unlink on negative dentries currently.
*/
-int cifs_unlink(struct inode *dir, struct dentry *dentry)
+static int __cifs_unlink(struct inode *dir, struct dentry *dentry, bool sillyrename)
{
int rc = 0;
unsigned int xid;
@@ -1943,15 +1943,24 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct tcon_link *tlink;
struct cifs_tcon *tcon;
+ __u32 dosattr = 0, origattr = 0;
struct TCP_Server_Info *server;
struct iattr *attrs = NULL;
- __u32 dosattr = 0, origattr = 0;
+ bool rehash = false;
cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry);
if (unlikely(cifs_forced_shutdown(cifs_sb)))
return -EIO;
+ /* Unhash dentry in advance to prevent any concurrent opens */
+ spin_lock(&dentry->d_lock);
+ if (!d_unhashed(dentry)) {
+ __d_drop(dentry);
+ rehash = true;
+ }
+ spin_unlock(&dentry->d_lock);
+
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
@@ -1975,7 +1984,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
}
netfs_wait_for_outstanding_io(inode);
- cifs_close_deferred_file_under_dentry(tcon, full_path);
+ cifs_close_deferred_file_under_dentry(tcon, dentry);
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
@@ -1994,7 +2003,24 @@ retry_std_delete:
goto psx_del_no_retry;
}
- rc = server->ops->unlink(xid, tcon, full_path, cifs_sb, dentry);
+ /* For SMB2+, if the file is open, we always perform a silly rename.
+ *
+ * We check for d_count() right after calling
+ * cifs_close_deferred_file_under_dentry() to make sure that the
+ * dentry's refcount gets dropped in case the file had any deferred
+ * close.
+ */
+ if (!sillyrename && server->vals->protocol_id > SMB10_PROT_ID) {
+ spin_lock(&dentry->d_lock);
+ if (d_count(dentry) > 1)
+ sillyrename = true;
+ spin_unlock(&dentry->d_lock);
+ }
+
+ if (sillyrename)
+ rc = -EBUSY;
+ else
+ rc = server->ops->unlink(xid, tcon, full_path, cifs_sb, dentry);
psx_del_no_retry:
if (!rc) {
@@ -2003,7 +2029,8 @@ psx_del_no_retry:
cifs_drop_nlink(inode);
}
} else if (rc == -ENOENT) {
- d_drop(dentry);
+ if (simple_positive(dentry))
+ d_delete(dentry);
} else if (rc == -EBUSY) {
if (server->ops->rename_pending_delete) {
rc = server->ops->rename_pending_delete(full_path,
@@ -2056,9 +2083,16 @@ unlink_out:
kfree(attrs);
free_xid(xid);
cifs_put_tlink(tlink);
+ if (rehash)
+ d_rehash(dentry);
return rc;
}
+int cifs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ return __cifs_unlink(dir, dentry, false);
+}
+
static int
cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
const char *full_path, struct cifs_sb_info *cifs_sb,
@@ -2346,14 +2380,16 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb);
cifs_put_tlink(tlink);
+ cifsInode = CIFS_I(d_inode(direntry));
+
if (!rc) {
+ set_bit(CIFS_INO_DELETE_PENDING, &cifsInode->flags);
spin_lock(&d_inode(direntry)->i_lock);
i_size_write(d_inode(direntry), 0);
clear_nlink(d_inode(direntry));
spin_unlock(&d_inode(direntry)->i_lock);
}
- cifsInode = CIFS_I(d_inode(direntry));
/* force revalidate to go get info when needed */
cifsInode->time = 0;
@@ -2446,8 +2482,11 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
}
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
do_rename_exit:
- if (rc == 0)
+ if (rc == 0) {
d_move(from_dentry, to_dentry);
+ /* Force a new lookup */
+ d_drop(from_dentry);
+ }
cifs_put_tlink(tlink);
return rc;
}
@@ -2458,10 +2497,12 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
struct dentry *target_dentry, unsigned int flags)
{
const char *from_name, *to_name;
+ struct TCP_Server_Info *server;
void *page1, *page2;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
+ bool rehash = false;
unsigned int xid;
int rc, tmprc;
int retry_count = 0;
@@ -2477,10 +2518,22 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
if (unlikely(cifs_forced_shutdown(cifs_sb)))
return -EIO;
+ /*
+ * Prevent any concurrent opens on the target by unhashing the dentry.
+ * VFS already unhashes the target when renaming directories.
+ */
+ if (d_is_positive(target_dentry) && !d_is_dir(target_dentry)) {
+ if (!d_unhashed(target_dentry)) {
+ d_drop(target_dentry);
+ rehash = true;
+ }
+ }
+
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
+ server = tcon->ses->server;
page1 = alloc_dentry_path();
page2 = alloc_dentry_path();
@@ -2498,10 +2551,10 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
goto cifs_rename_exit;
}
- cifs_close_deferred_file_under_dentry(tcon, from_name);
+ cifs_close_deferred_file_under_dentry(tcon, source_dentry);
if (d_inode(target_dentry) != NULL) {
netfs_wait_for_outstanding_io(d_inode(target_dentry));
- cifs_close_deferred_file_under_dentry(tcon, to_name);
+ cifs_close_deferred_file_under_dentry(tcon, target_dentry);
}
rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
@@ -2518,6 +2571,8 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
}
}
+ if (!rc)
+ rehash = false;
/*
* No-replace is the natural behavior for CIFS, so skip unlink hacks.
*/
@@ -2565,23 +2620,61 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
unlink_target:
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
-
- /* Try unlinking the target dentry if it's not negative */
- if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
- if (d_is_dir(target_dentry))
- tmprc = cifs_rmdir(target_dir, target_dentry);
- else
- tmprc = cifs_unlink(target_dir, target_dentry);
- if (tmprc)
- goto cifs_rename_exit;
- rc = cifs_do_rename(xid, source_dentry, from_name,
- target_dentry, to_name);
+ if (d_really_is_positive(target_dentry)) {
+ if (!rc) {
+ struct inode *inode = d_inode(target_dentry);
+ /*
+ * Samba and ksmbd servers allow renaming a target
+ * directory that is open, so make sure to update
+ * ->i_nlink and then mark it as delete pending.
+ */
+ if (S_ISDIR(inode->i_mode)) {
+ drop_cached_dir_by_name(xid, tcon, to_name, cifs_sb);
+ spin_lock(&inode->i_lock);
+ i_size_write(inode, 0);
+ clear_nlink(inode);
+ spin_unlock(&inode->i_lock);
+ set_bit(CIFS_INO_DELETE_PENDING, &CIFS_I(inode)->flags);
+ CIFS_I(inode)->time = 0; /* force reval */
+ inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ }
+ } else if (rc == -EACCES || rc == -EEXIST) {
+ /*
+ * Rename failed, possibly due to a busy target.
+ * Retry it by unliking the target first.
+ */
+ if (d_is_dir(target_dentry)) {
+ tmprc = cifs_rmdir(target_dir, target_dentry);
+ } else {
+ tmprc = __cifs_unlink(target_dir, target_dentry,
+ server->vals->protocol_id > SMB10_PROT_ID);
+ }
+ if (tmprc) {
+ /*
+ * Some servers will return STATUS_ACCESS_DENIED
+ * or STATUS_DIRECTORY_NOT_EMPTY when failing to
+ * rename a non-empty directory. Make sure to
+ * propagate the appropriate error back to
+ * userspace.
+ */
+ if (tmprc == -EEXIST || tmprc == -ENOTEMPTY)
+ rc = tmprc;
+ goto cifs_rename_exit;
+ }
+ rc = cifs_do_rename(xid, source_dentry, from_name,
+ target_dentry, to_name);
+ if (!rc)
+ rehash = false;
+ }
}
/* force revalidate to go get info when needed */
CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
cifs_rename_exit:
+ if (rehash)
+ d_rehash(target_dentry);
kfree(info_buf_source);
free_dentry_path(page2);
free_dentry_path(page1);
@@ -2599,6 +2692,8 @@ cifs_dentry_needs_reval(struct dentry *dentry)
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct cached_fid *cfid = NULL;
+ if (test_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags))
+ return false;
if (cifs_i->time == 0)
return true;
@@ -2609,7 +2704,7 @@ cifs_dentry_needs_reval(struct dentry *dentry)
return true;
if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) {
- if (cfid->time && cifs_i->time > cfid->time) {
+ if (cifs_i->time > cfid->time) {
close_cached_dir(cfid);
return false;
}
@@ -2749,7 +2844,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
}
cifs_dbg(FYI, "Update attributes: %s inode 0x%p count %d dentry: 0x%p d_time %ld jiffies %ld\n",
- full_path, inode, inode->i_count.counter,
+ full_path, inode, icount_read(inode),
dentry, cifs_get_time(dentry), jiffies);
again:
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index da23cc12a52c..dda6dece802a 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -832,33 +832,28 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
kfree(tmp_list);
}
}
-void
-cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+
+void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon,
+ struct dentry *dentry)
{
- struct cifsFileInfo *cfile;
struct file_list *tmp_list, *tmp_next_list;
- void *page;
- const char *full_path;
+ struct cifsFileInfo *cfile;
LIST_HEAD(file_head);
- page = alloc_dentry_path();
spin_lock(&tcon->open_file_lock);
list_for_each_entry(cfile, &tcon->openFileList, tlist) {
- full_path = build_path_from_dentry(cfile->dentry, page);
- if (strstr(full_path, path)) {
- if (delayed_work_pending(&cfile->deferred)) {
- if (cancel_delayed_work(&cfile->deferred)) {
- spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
- cifs_del_deferred_close(cfile);
- spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
-
- tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
- if (tmp_list == NULL)
- break;
- tmp_list->cfile = cfile;
- list_add_tail(&tmp_list->list, &file_head);
- }
- }
+ if ((cfile->dentry == dentry) &&
+ delayed_work_pending(&cfile->deferred) &&
+ cancel_delayed_work(&cfile->deferred)) {
+ spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+ cifs_del_deferred_close(cfile);
+ spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
+ tmp_list->cfile = cfile;
+ list_add_tail(&tmp_list->list, &file_head);
}
}
spin_unlock(&tcon->open_file_lock);
@@ -868,7 +863,6 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
list_del(&tmp_list->list);
kfree(tmp_list);
}
- free_dentry_path(page);
}
/*
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index 4e5460206397..f0ce26622a14 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -874,39 +874,42 @@ static void finished_cached_dirents_count(struct cached_dirents *cde,
cde->is_valid = 1;
}
-static void add_cached_dirent(struct cached_dirents *cde,
- struct dir_context *ctx,
- const char *name, int namelen,
- struct cifs_fattr *fattr,
- struct file *file)
+static bool add_cached_dirent(struct cached_dirents *cde,
+ struct dir_context *ctx, const char *name,
+ int namelen, struct cifs_fattr *fattr,
+ struct file *file)
{
struct cached_dirent *de;
if (cde->file != file)
- return;
+ return false;
if (cde->is_valid || cde->is_failed)
- return;
+ return false;
if (ctx->pos != cde->pos) {
cde->is_failed = 1;
- return;
+ return false;
}
de = kzalloc(sizeof(*de), GFP_ATOMIC);
if (de == NULL) {
cde->is_failed = 1;
- return;
+ return false;
}
de->namelen = namelen;
de->name = kstrndup(name, namelen, GFP_ATOMIC);
if (de->name == NULL) {
kfree(de);
cde->is_failed = 1;
- return;
+ return false;
}
de->pos = ctx->pos;
memcpy(&de->fattr, fattr, sizeof(struct cifs_fattr));
list_add_tail(&de->entry, &cde->entries);
+ /* update accounting */
+ cde->entries_count++;
+ cde->bytes_used += sizeof(*de) + (size_t)namelen + 1;
+ return true;
}
static bool cifs_dir_emit(struct dir_context *ctx,
@@ -915,7 +918,8 @@ static bool cifs_dir_emit(struct dir_context *ctx,
struct cached_fid *cfid,
struct file *file)
{
- bool rc;
+ size_t delta_bytes = 0;
+ bool rc, added = false;
ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
rc = dir_emit(ctx, name, namelen, ino, fattr->cf_dtype);
@@ -923,10 +927,20 @@ static bool cifs_dir_emit(struct dir_context *ctx,
return rc;
if (cfid) {
+ /* Cost of this entry */
+ delta_bytes = sizeof(struct cached_dirent) + (size_t)namelen + 1;
+
mutex_lock(&cfid->dirents.de_mutex);
- add_cached_dirent(&cfid->dirents, ctx, name, namelen,
- fattr, file);
+ added = add_cached_dirent(&cfid->dirents, ctx, name, namelen,
+ fattr, file);
mutex_unlock(&cfid->dirents.de_mutex);
+
+ if (added) {
+ /* per-tcon then global for consistency with free path */
+ atomic64_add((long long)delta_bytes, &cfid->cfids->total_dirents_bytes);
+ atomic_long_inc(&cfid->cfids->total_dirents_entries);
+ atomic64_add((long long)delta_bytes, &cifs_dircache_bytes_used);
+ }
}
return rc;
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index 7869cec58f52..10c84c095fe7 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -278,7 +278,7 @@ static int detect_directory_symlink_target(struct cifs_sb_info *cifs_sb,
}
/*
- * For absolute symlinks it is not possible to determinate
+ * For absolute symlinks it is not possible to determine
* if it should point to directory or file.
*/
if (symname[0] == '/') {
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index 893a1ea8c000..a02d41d1ce4a 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -1005,7 +1005,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
rc = -EOPNOTSUPP;
}
- /* Fallback to SMB_COM_SETATTR command when absolutelty needed. */
+ /* Fallback to SMB_COM_SETATTR command when absolutely needed. */
if (rc == -EOPNOTSUPP) {
cifs_dbg(FYI, "calling SetInformation since SetPathInfo for attrs/times not supported by this server\n");
rc = SMBSetInformation(xid, tcon, full_path,
@@ -1039,7 +1039,7 @@ set_via_filehandle:
cifsFileInfo_put(open_file);
/*
- * Setting the read-only bit is not honered on non-NT servers when done
+ * Setting the read-only bit is not honored on non-NT servers when done
* via open-semantics. So for setting it, use SMB_COM_SETATTR command.
* This command works only after the file is closed, so use it only when
* operation was called without the filehandle.
diff --git a/fs/smb/client/smb2glob.h b/fs/smb/client/smb2glob.h
index 224495322a05..e56e4d402f13 100644
--- a/fs/smb/client/smb2glob.h
+++ b/fs/smb/client/smb2glob.h
@@ -30,10 +30,9 @@ enum smb2_compound_ops {
SMB2_OP_QUERY_DIR,
SMB2_OP_MKDIR,
SMB2_OP_RENAME,
- SMB2_OP_DELETE,
SMB2_OP_HARDLINK,
SMB2_OP_SET_EOF,
- SMB2_OP_RMDIR,
+ SMB2_OP_UNLINK,
SMB2_OP_POSIX_QUERY_INFO,
SMB2_OP_SET_REPARSE,
SMB2_OP_GET_REPARSE,
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 2a0316c514e4..0985db9f86e5 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -207,8 +207,10 @@ replay_again:
server = cifs_pick_channel(ses);
vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
- if (vars == NULL)
- return -ENOMEM;
+ if (vars == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
rqst = &vars->rqst[0];
rsp_iov = &vars->rsp_iov[0];
@@ -344,9 +346,6 @@ replay_again:
trace_smb3_posix_query_info_compound_enter(xid, tcon->tid,
ses->Suid, full_path);
break;
- case SMB2_OP_DELETE:
- trace_smb3_delete_enter(xid, tcon->tid, ses->Suid, full_path);
- break;
case SMB2_OP_MKDIR:
/*
* Directories are created through parameters in the
@@ -354,23 +353,40 @@ replay_again:
*/
trace_smb3_mkdir_enter(xid, tcon->tid, ses->Suid, full_path);
break;
- case SMB2_OP_RMDIR:
- rqst[num_rqst].rq_iov = &vars->si_iov[0];
+ case SMB2_OP_UNLINK:
+ rqst[num_rqst].rq_iov = vars->unlink_iov;
rqst[num_rqst].rq_nvec = 1;
size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
data[0] = &delete_pending[0];
- rc = SMB2_set_info_init(tcon, server,
- &rqst[num_rqst], COMPOUND_FID,
- COMPOUND_FID, current->tgid,
- FILE_DISPOSITION_INFORMATION,
- SMB2_O_INFO_FILE, 0, data, size);
- if (rc)
+ if (cfile) {
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
+ cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid,
+ current->tgid,
+ FILE_DISPOSITION_INFORMATION,
+ SMB2_O_INFO_FILE, 0,
+ data, size);
+ } else {
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
+ COMPOUND_FID,
+ COMPOUND_FID,
+ current->tgid,
+ FILE_DISPOSITION_INFORMATION,
+ SMB2_O_INFO_FILE, 0,
+ data, size);
+ }
+ if (!rc && (!cfile || num_rqst > 1)) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst]);
+ } else if (rc) {
goto finished;
- smb2_set_next_command(tcon, &rqst[num_rqst]);
- smb2_set_related(&rqst[num_rqst++]);
- trace_smb3_rmdir_enter(xid, tcon->tid, ses->Suid, full_path);
+ }
+ num_rqst++;
+ trace_smb3_unlink_enter(xid, tcon->tid, ses->Suid, full_path);
break;
case SMB2_OP_SET_EOF:
rqst[num_rqst].rq_iov = &vars->si_iov[0];
@@ -440,7 +456,7 @@ replay_again:
ses->Suid, full_path);
break;
case SMB2_OP_RENAME:
- rqst[num_rqst].rq_iov = &vars->si_iov[0];
+ rqst[num_rqst].rq_iov = vars->rename_iov;
rqst[num_rqst].rq_nvec = 2;
len = in_iov[i].iov_len;
@@ -671,7 +687,7 @@ finished:
}
for (i = 0; i < num_cmds; i++) {
- char *buf = rsp_iov[i + i].iov_base;
+ char *buf = rsp_iov[i + 1].iov_base;
if (buf && resp_buftype[i + 1] != CIFS_NO_BUFFER)
rc = server->ops->map_error(buf, false);
@@ -730,19 +746,6 @@ finished:
trace_smb3_posix_query_info_compound_done(xid, tcon->tid,
ses->Suid);
break;
- case SMB2_OP_DELETE:
- if (rc)
- trace_smb3_delete_err(xid, tcon->tid, ses->Suid, rc);
- else {
- /*
- * If dentry (hence, inode) is NULL, lease break is going to
- * take care of degrading leases on handles for deleted files.
- */
- if (inode)
- cifs_mark_open_handles_for_deleted_file(inode, full_path);
- trace_smb3_delete_done(xid, tcon->tid, ses->Suid);
- }
- break;
case SMB2_OP_MKDIR:
if (rc)
trace_smb3_mkdir_err(xid, tcon->tid, ses->Suid, rc);
@@ -763,11 +766,11 @@ finished:
trace_smb3_rename_done(xid, tcon->tid, ses->Suid);
SMB2_set_info_free(&rqst[num_rqst++]);
break;
- case SMB2_OP_RMDIR:
- if (rc)
- trace_smb3_rmdir_err(xid, tcon->tid, ses->Suid, rc);
+ case SMB2_OP_UNLINK:
+ if (!rc)
+ trace_smb3_unlink_done(xid, tcon->tid, ses->Suid);
else
- trace_smb3_rmdir_done(xid, tcon->tid, ses->Suid);
+ trace_smb3_unlink_err(xid, tcon->tid, ses->Suid, rc);
SMB2_set_info_free(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_EOF:
@@ -864,6 +867,7 @@ finished:
smb2_should_replay(tcon, &retries, &cur_sleep))
goto replay_again;
+out:
if (cfile)
cifsFileInfo_put(cfile);
@@ -1163,7 +1167,7 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
FILE_OPEN, CREATE_NOT_FILE, ACL_NO_MODE);
return smb2_compound_op(xid, tcon, cifs_sb,
name, &oparms, NULL,
- &(int){SMB2_OP_RMDIR}, 1,
+ &(int){SMB2_OP_UNLINK}, 1,
NULL, NULL, NULL, NULL);
}
@@ -1171,21 +1175,107 @@ int
smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb, struct dentry *dentry)
{
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ __le16 *utf16_path __free(kfree) = NULL;
+ int retries = 0, cur_sleep = 1;
+ struct TCP_Server_Info *server;
struct cifs_open_parms oparms;
+ struct smb2_create_req *creq;
+ struct inode *inode = NULL;
+ struct smb_rqst rqst[2];
+ struct kvec rsp_iov[2];
+ struct kvec close_iov;
+ int resp_buftype[2];
+ struct cifs_fid fid;
+ int flags = 0;
+ __u8 oplock;
+ int rc;
- oparms = CIFS_OPARMS(cifs_sb, tcon, name,
- DELETE, FILE_OPEN,
- CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
- ACL_NO_MODE);
- int rc = smb2_compound_op(xid, tcon, cifs_sb, name, &oparms,
- NULL, &(int){SMB2_OP_DELETE}, 1,
- NULL, NULL, NULL, dentry);
- if (rc == -EINVAL) {
- cifs_dbg(FYI, "invalid lease key, resending request without lease");
- rc = smb2_compound_op(xid, tcon, cifs_sb, name, &oparms,
- NULL, &(int){SMB2_OP_DELETE}, 1,
- NULL, NULL, NULL, NULL);
+ utf16_path = cifs_convert_path_to_utf16(name, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+again:
+ oplock = SMB2_OPLOCK_LEVEL_NONE;
+ server = cifs_pick_channel(tcon->ses);
+
+ memset(rqst, 0, sizeof(rqst));
+ memset(resp_buftype, 0, sizeof(resp_buftype));
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ rqst[0].rq_iov = open_iov;
+ rqst[0].rq_nvec = ARRAY_SIZE(open_iov);
+
+ oparms = CIFS_OPARMS(cifs_sb, tcon, name, DELETE | FILE_READ_ATTRIBUTES,
+ FILE_OPEN, CREATE_DELETE_ON_CLOSE |
+ OPEN_REPARSE_POINT, ACL_NO_MODE);
+ oparms.fid = &fid;
+
+ if (dentry) {
+ inode = d_inode(dentry);
+ if (CIFS_I(inode)->lease_granted && server->ops->get_lease_key) {
+ oplock = SMB2_OPLOCK_LEVEL_LEASE;
+ server->ops->get_lease_key(inode, &fid);
+ }
+ }
+
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, utf16_path);
+ if (rc)
+ goto err_free;
+ smb2_set_next_command(tcon, &rqst[0]);
+ creq = rqst[0].rq_iov[0].iov_base;
+ creq->ShareAccess = FILE_SHARE_DELETE_LE;
+
+ rqst[1].rq_iov = &close_iov;
+ rqst[1].rq_nvec = 1;
+
+ rc = SMB2_close_init(tcon, server, &rqst[1],
+ COMPOUND_FID, COMPOUND_FID, false);
+ smb2_set_related(&rqst[1]);
+ if (rc)
+ goto err_free;
+
+ if (retries) {
+ for (int i = 0; i < ARRAY_SIZE(rqst); i++)
+ smb2_set_replay(server, &rqst[i]);
}
+
+ rc = compound_send_recv(xid, tcon->ses, server, flags,
+ ARRAY_SIZE(rqst), rqst,
+ resp_buftype, rsp_iov);
+ SMB2_open_free(&rqst[0]);
+ SMB2_close_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto again;
+
+ /* Retry compound request without lease */
+ if (rc == -EINVAL && dentry) {
+ dentry = NULL;
+ retries = 0;
+ cur_sleep = 1;
+ goto again;
+ }
+ /*
+ * If dentry (hence, inode) is NULL, lease break is going to
+ * take care of degrading leases on handles for deleted files.
+ */
+ if (!rc && inode)
+ cifs_mark_open_handles_for_deleted_file(inode, name);
+
+ return rc;
+
+err_free:
+ SMB2_open_free(&rqst[0]);
+ SMB2_close_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
return rc;
}
@@ -1438,3 +1528,113 @@ out:
cifs_free_open_info(&data);
return rc;
}
+
+static inline __le16 *utf16_smb2_path(struct cifs_sb_info *cifs_sb,
+ const char *name, size_t namelen)
+{
+ int len;
+
+ if (*name == '\\' ||
+ (cifs_sb_master_tlink(cifs_sb) &&
+ cifs_sb_master_tcon(cifs_sb)->posix_extensions && *name == '/'))
+ name++;
+ return cifs_strndup_to_utf16(name, namelen, &len,
+ cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+}
+
+int smb2_rename_pending_delete(const char *full_path,
+ struct dentry *dentry,
+ const unsigned int xid)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(d_inode(dentry)->i_sb);
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
+ __le16 *utf16_path __free(kfree) = NULL;
+ __u32 co = file_create_options(dentry);
+ int cmds[] = {
+ SMB2_OP_SET_INFO,
+ SMB2_OP_RENAME,
+ SMB2_OP_UNLINK,
+ };
+ const int num_cmds = ARRAY_SIZE(cmds);
+ char *to_name __free(kfree) = NULL;
+ __u32 attrs = cinode->cifsAttrs;
+ struct cifs_open_parms oparms;
+ static atomic_t sillycounter;
+ struct cifsFileInfo *cfile;
+ struct tcon_link *tlink;
+ struct cifs_tcon *tcon;
+ struct kvec iov[2];
+ const char *ppath;
+ void *page;
+ size_t len;
+ int rc;
+
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+
+ page = alloc_dentry_path();
+
+ ppath = build_path_from_dentry(dentry->d_parent, page);
+ if (IS_ERR(ppath)) {
+ rc = PTR_ERR(ppath);
+ goto out;
+ }
+
+ len = strlen(ppath) + strlen("/.__smb1234") + 1;
+ to_name = kmalloc(len, GFP_KERNEL);
+ if (!to_name) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ scnprintf(to_name, len, "%s%c.__smb%04X", ppath, CIFS_DIR_SEP(cifs_sb),
+ atomic_inc_return(&sillycounter) & 0xffff);
+
+ utf16_path = utf16_smb2_path(cifs_sb, to_name, len);
+ if (!utf16_path) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ drop_cached_dir_by_name(xid, tcon, full_path, cifs_sb);
+ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
+ DELETE | FILE_WRITE_ATTRIBUTES,
+ FILE_OPEN, co, ACL_NO_MODE);
+
+ attrs &= ~ATTR_READONLY;
+ if (!attrs)
+ attrs = ATTR_NORMAL;
+ if (d_inode(dentry)->i_nlink <= 1)
+ attrs |= ATTR_HIDDEN;
+ iov[0].iov_base = &(FILE_BASIC_INFO) {
+ .Attributes = cpu_to_le32(attrs),
+ };
+ iov[0].iov_len = sizeof(FILE_BASIC_INFO);
+ iov[1].iov_base = utf16_path;
+ iov[1].iov_len = sizeof(*utf16_path) * UniStrlen((wchar_t *)utf16_path);
+
+ cifs_get_writable_path(tcon, full_path, FIND_WR_WITH_DELETE, &cfile);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov,
+ cmds, num_cmds, cfile, NULL, NULL, dentry);
+ if (rc == -EINVAL) {
+ cifs_dbg(FYI, "invalid lease key, resending request without lease\n");
+ cifs_get_writable_path(tcon, full_path,
+ FIND_WR_WITH_DELETE, &cfile);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov,
+ cmds, num_cmds, cfile, NULL, NULL, NULL);
+ }
+ if (!rc) {
+ set_bit(CIFS_INO_DELETE_PENDING, &cinode->flags);
+ } else {
+ cifs_tcon_dbg(FYI, "%s: failed to rename '%s' to '%s': %d\n",
+ __func__, full_path, to_name, rc);
+ rc = -EIO;
+ }
+out:
+ cifs_put_tlink(tlink);
+ free_dentry_path(page);
+ return rc;
+}
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index cddf273c14ae..89d933b4a8bc 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -614,6 +614,15 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
struct cifs_tcon *tcon;
struct cifs_pending_open *open;
+ /* Trace receipt of lease break request from server */
+ trace_smb3_lease_break_enter(le32_to_cpu(rsp->CurrentLeaseState),
+ le32_to_cpu(rsp->Flags),
+ le16_to_cpu(rsp->Epoch),
+ le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
+ le64_to_cpu(rsp->hdr.SessionId),
+ *((u64 *)rsp->LeaseKey),
+ *((u64 *)&rsp->LeaseKey[8]));
+
cifs_dbg(FYI, "Checking for lease break\n");
/* If server is a channel, select the primary channel */
@@ -660,10 +669,12 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
trace_smb3_lease_not_found(le32_to_cpu(rsp->CurrentLeaseState),
- le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
- le64_to_cpu(rsp->hdr.SessionId),
- *((u64 *)rsp->LeaseKey),
- *((u64 *)&rsp->LeaseKey[8]));
+ le32_to_cpu(rsp->Flags),
+ le16_to_cpu(rsp->Epoch),
+ le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
+ le64_to_cpu(rsp->hdr.SessionId),
+ *((u64 *)rsp->LeaseKey),
+ *((u64 *)&rsp->LeaseKey[8]));
return false;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index ad8947434b71..058050f744c0 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -504,8 +504,8 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
wsize = min_t(unsigned int, wsize, server->max_write);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
- struct smbdirect_socket_parameters *sp =
- &server->smbd_conn->socket.parameters;
+ const struct smbdirect_socket_parameters *sp =
+ smbd_get_parameters(server->smbd_conn);
if (server->sign)
/*
@@ -555,8 +555,8 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
rsize = min_t(unsigned int, rsize, server->max_read);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
- struct smbdirect_socket_parameters *sp =
- &server->smbd_conn->socket.parameters;
+ const struct smbdirect_socket_parameters *sp =
+ smbd_get_parameters(server->smbd_conn);
if (server->sign)
/*
@@ -772,6 +772,13 @@ next_iface:
bytes_left -= sizeof(*p);
break;
}
+ /* Validate that Next doesn't point beyond the buffer */
+ if (next > bytes_left) {
+ cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n",
+ __func__, next, bytes_left);
+ rc = -EINVAL;
+ goto out;
+ }
p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
bytes_left -= next;
}
@@ -783,7 +790,9 @@ next_iface:
}
/* Azure rounds the buffer size up 8, to a 16 byte boundary */
- if ((bytes_left > 8) || p->Next)
+ if ((bytes_left > 8) ||
+ (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next)
+ + sizeof(p->Next) && p->Next))
cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
ses->iface_last_update = jiffies;
@@ -945,11 +954,8 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
if (!rc) {
- if (cfid->has_lease) {
- close_cached_dir(cfid);
- return 0;
- }
close_cached_dir(cfid);
+ return 0;
}
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
@@ -2631,13 +2637,35 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
}
/* SMB headers in a compound are 8 byte aligned. */
- if (!IS_ALIGNED(len, 8)) {
- num_padding = 8 - (len & 7);
+ if (IS_ALIGNED(len, 8))
+ goto out;
+
+ num_padding = 8 - (len & 7);
+ if (smb3_encryption_required(tcon)) {
+ int i;
+
+ /*
+ * Flatten request into a single buffer with required padding as
+ * the encryption layer can't handle the padding iovs.
+ */
+ for (i = 1; i < rqst->rq_nvec; i++) {
+ memcpy(rqst->rq_iov[0].iov_base +
+ rqst->rq_iov[0].iov_len,
+ rqst->rq_iov[i].iov_base,
+ rqst->rq_iov[i].iov_len);
+ rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
+ }
+ memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
+ 0, num_padding);
+ rqst->rq_iov[0].iov_len += num_padding;
+ rqst->rq_nvec = 1;
+ } else {
rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
rqst->rq_nvec++;
- len += num_padding;
}
+ len += num_padding;
+out:
shdr->NextCommand = cpu_to_le32(len);
}
@@ -4188,7 +4216,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
int num_rqst, const u8 *sig, u8 **iv,
struct aead_request **req, struct sg_table *sgt,
- unsigned int *num_sgs, size_t *sensitive_size)
+ unsigned int *num_sgs)
{
unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
unsigned int iv_size = crypto_aead_ivsize(tfm);
@@ -4205,9 +4233,8 @@ static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst
len += req_size;
len = ALIGN(len, __alignof__(struct scatterlist));
len += array_size(*num_sgs, sizeof(struct scatterlist));
- *sensitive_size = len;
- p = kvzalloc(len, GFP_NOFS);
+ p = kzalloc(len, GFP_NOFS);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -4221,16 +4248,14 @@ static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst
static void *smb2_get_aead_req(struct crypto_aead *tfm, struct smb_rqst *rqst,
int num_rqst, const u8 *sig, u8 **iv,
- struct aead_request **req, struct scatterlist **sgl,
- size_t *sensitive_size)
+ struct aead_request **req, struct scatterlist **sgl)
{
struct sg_table sgtable = {};
unsigned int skip, num_sgs, i, j;
ssize_t rc;
void *p;
- p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable,
- &num_sgs, sensitive_size);
+ p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable, &num_sgs);
if (IS_ERR(p))
return ERR_CAST(p);
@@ -4319,7 +4344,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
DECLARE_CRYPTO_WAIT(wait);
unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
void *creq;
- size_t sensitive_size;
rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
if (rc) {
@@ -4345,8 +4369,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
return rc;
}
- creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg,
- &sensitive_size);
+ creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
if (IS_ERR(creq))
return PTR_ERR(creq);
@@ -4376,7 +4399,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
if (!rc && enc)
memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
- kvfree_sensitive(creq, sensitive_size);
+ kfree_sensitive(creq);
return rc;
}
@@ -4487,7 +4510,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
for (int i = 1; i < num_rqst; i++) {
struct smb_rqst *old = &old_rq[i - 1];
struct smb_rqst *new = &new_rq[i];
- struct folio_queue *buffer;
+ struct folio_queue *buffer = NULL;
size_t size = iov_iter_count(&old->rq_iter);
orig_len += smb_rqst_len(server, old);
@@ -4805,7 +4828,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
dw->server->ops->is_network_name_deleted(dw->buf,
dw->server);
- mid->callback(mid);
+ mid_execute_callback(mid);
} else {
spin_lock(&dw->server->srv_lock);
if (dw->server->tcpStatus == CifsNeedReconnect) {
@@ -4813,7 +4836,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
mid->mid_state = MID_RETRY_NEEDED;
spin_unlock(&dw->server->mid_queue_lock);
spin_unlock(&dw->server->srv_lock);
- mid->callback(mid);
+ mid_execute_callback(mid);
} else {
spin_lock(&dw->server->mid_queue_lock);
mid->mid_state = MID_REQUEST_SUBMITTED;
@@ -5367,6 +5390,7 @@ struct smb_version_operations smb20_operations = {
.llseek = smb3_llseek,
.is_status_io_timeout = smb2_is_status_io_timeout,
.is_network_name_deleted = smb2_is_network_name_deleted,
+ .rename_pending_delete = smb2_rename_pending_delete,
};
#endif /* CIFS_ALLOW_INSECURE_LEGACY */
@@ -5472,6 +5496,7 @@ struct smb_version_operations smb21_operations = {
.llseek = smb3_llseek,
.is_status_io_timeout = smb2_is_status_io_timeout,
.is_network_name_deleted = smb2_is_network_name_deleted,
+ .rename_pending_delete = smb2_rename_pending_delete,
};
struct smb_version_operations smb30_operations = {
@@ -5588,6 +5613,7 @@ struct smb_version_operations smb30_operations = {
.llseek = smb3_llseek,
.is_status_io_timeout = smb2_is_status_io_timeout,
.is_network_name_deleted = smb2_is_network_name_deleted,
+ .rename_pending_delete = smb2_rename_pending_delete,
};
struct smb_version_operations smb311_operations = {
@@ -5704,6 +5730,7 @@ struct smb_version_operations smb311_operations = {
.llseek = smb3_llseek,
.is_status_io_timeout = smb2_is_status_io_timeout,
.is_network_name_deleted = smb2_is_network_name_deleted,
+ .rename_pending_delete = smb2_rename_pending_delete,
};
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 2df93a75e3b8..42e2d4ea344d 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -240,8 +240,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
*/
if (smb2_command != SMB2_TREE_DISCONNECT) {
spin_unlock(&tcon->tc_lock);
- cifs_dbg(FYI, "can not send cmd %d while umounting\n",
- smb2_command);
+ cifs_tcon_dbg(FYI, "can not send cmd %d while umounting\n",
+ smb2_command);
return -ENODEV;
}
}
@@ -296,9 +296,9 @@ again:
return 0;
}
spin_unlock(&ses->chan_lock);
- cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d",
- tcon->ses->chans_need_reconnect,
- tcon->need_reconnect);
+ cifs_tcon_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d\n",
+ tcon->ses->chans_need_reconnect,
+ tcon->need_reconnect);
mutex_lock(&ses->session_mutex);
/*
@@ -392,11 +392,11 @@ skip_sess_setup:
rc = cifs_tree_connect(0, tcon);
- cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
+ cifs_tcon_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc) {
/* If sess reconnected but tcon didn't, something strange ... */
mutex_unlock(&ses->session_mutex);
- cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
+ cifs_tcon_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
goto out;
}
@@ -442,8 +442,8 @@ skip_sess_setup:
from_reconnect);
goto skip_add_channels;
} else if (rc)
- cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
- __func__, rc);
+ cifs_tcon_dbg(FYI, "%s: failed to query server interfaces: %d\n",
+ __func__, rc);
if (ses->chan_max > ses->chan_count &&
ses->iface_count &&
@@ -4411,7 +4411,7 @@ static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
return false;
/* offload also has its overhead, so only do it if desired */
- if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold)
+ if (io_parms->length < server->rdma_readwrite_threshold)
return false;
return true;
@@ -6192,11 +6192,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
please_key_high = (__u64 *)(lease_key+8);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
- trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
+ trace_smb3_lease_ack_err(le32_to_cpu(lease_state), tcon->tid,
ses->Suid, *please_key_low, *please_key_high, rc);
cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
} else
- trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
+ trace_smb3_lease_ack_done(le32_to_cpu(lease_state), tcon->tid,
ses->Suid, *please_key_low, *please_key_high);
return rc;
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index 6e805ece6a7b..b3f1398c9f79 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -317,5 +317,8 @@ int posix_info_sid_size(const void *beg, const void *end);
int smb2_make_nfs_node(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev);
+int smb2_rename_pending_delete(const char *full_path,
+ struct dentry *dentry,
+ const unsigned int xid);
#endif /* _SMB2PROTO_H */
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index ff9ef7fcd010..bc0e92eb2b64 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -771,6 +771,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *shdr,
temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
memset(temp, 0, sizeof(struct mid_q_entry));
kref_init(&temp->refcount);
+ spin_lock_init(&temp->mid_lock);
temp->mid = le64_to_cpu(shdr->MessageId);
temp->credits = credits > 0 ? credits : 1;
temp->pid = current->pid;
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index c628e91c328b..316f398c70f4 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -13,28 +13,35 @@
#include "cifsproto.h"
#include "smb2proto.h"
+const struct smbdirect_socket_parameters *smbd_get_parameters(struct smbd_connection *conn)
+{
+ struct smbdirect_socket *sc = &conn->socket;
+
+ return &sc->parameters;
+}
+
static struct smbdirect_recv_io *get_receive_buffer(
- struct smbd_connection *info);
+ struct smbdirect_socket *sc);
static void put_receive_buffer(
- struct smbd_connection *info,
+ struct smbdirect_socket *sc,
struct smbdirect_recv_io *response);
-static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
-static void destroy_receive_buffers(struct smbd_connection *info);
+static int allocate_receive_buffers(struct smbdirect_socket *sc, int num_buf);
+static void destroy_receive_buffers(struct smbdirect_socket *sc);
static void enqueue_reassembly(
- struct smbd_connection *info,
+ struct smbdirect_socket *sc,
struct smbdirect_recv_io *response, int data_length);
static struct smbdirect_recv_io *_get_first_reassembly(
- struct smbd_connection *info);
+ struct smbdirect_socket *sc);
static int smbd_post_recv(
- struct smbd_connection *info,
+ struct smbdirect_socket *sc,
struct smbdirect_recv_io *response);
-static int smbd_post_send_empty(struct smbd_connection *info);
+static int smbd_post_send_empty(struct smbdirect_socket *sc);
-static void destroy_mr_list(struct smbd_connection *info);
-static int allocate_mr_list(struct smbd_connection *info);
+static void destroy_mr_list(struct smbdirect_socket *sc);
+static int allocate_mr_list(struct smbdirect_socket *sc);
struct smb_extract_to_rdma {
struct ib_sge *sge;
@@ -57,6 +64,9 @@ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
/* SMBD negotiation timeout in seconds */
#define SMBD_NEGOTIATE_TIMEOUT 120
+/* The timeout to wait for a keepalive message from peer in seconds */
+#define KEEPALIVE_RECV_TIMEOUT 5
+
/* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
#define SMBD_MIN_RECEIVE_SIZE 128
#define SMBD_MIN_FRAGMENTED_SIZE 131072
@@ -155,65 +165,277 @@ do { \
#define log_rdma_mr(level, fmt, args...) \
log_rdma(level, LOG_RDMA_MR, fmt, ##args)
+static void smbd_disconnect_wake_up_all(struct smbdirect_socket *sc)
+{
+ /*
+ * Wake up all waiters in all wait queues
+ * in order to notice the broken connection.
+ */
+ wake_up_all(&sc->status_wait);
+ wake_up_all(&sc->send_io.credits.wait_queue);
+ wake_up_all(&sc->send_io.pending.dec_wait_queue);
+ wake_up_all(&sc->send_io.pending.zero_wait_queue);
+ wake_up_all(&sc->recv_io.reassembly.wait_queue);
+ wake_up_all(&sc->mr_io.ready.wait_queue);
+ wake_up_all(&sc->mr_io.cleanup.wait_queue);
+}
+
static void smbd_disconnect_rdma_work(struct work_struct *work)
{
- struct smbd_connection *info =
- container_of(work, struct smbd_connection, disconnect_work);
- struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket *sc =
+ container_of(work, struct smbdirect_socket, disconnect_work);
- if (sc->status == SMBDIRECT_SOCKET_CONNECTED) {
+ /*
+ * make sure this and other work is not queued again
+ * but here we don't block and avoid
+ * disable[_delayed]_work_sync()
+ */
+ disable_work(&sc->disconnect_work);
+ disable_work(&sc->recv_io.posted.refill_work);
+ disable_work(&sc->mr_io.recovery_work);
+ disable_work(&sc->idle.immediate_work);
+ disable_delayed_work(&sc->idle.timer_work);
+
+ if (sc->first_error == 0)
+ sc->first_error = -ECONNABORTED;
+
+ switch (sc->status) {
+ case SMBDIRECT_SOCKET_NEGOTIATE_NEEDED:
+ case SMBDIRECT_SOCKET_NEGOTIATE_RUNNING:
+ case SMBDIRECT_SOCKET_NEGOTIATE_FAILED:
+ case SMBDIRECT_SOCKET_CONNECTED:
+ case SMBDIRECT_SOCKET_ERROR:
sc->status = SMBDIRECT_SOCKET_DISCONNECTING;
rdma_disconnect(sc->rdma.cm_id);
+ break;
+
+ case SMBDIRECT_SOCKET_CREATED:
+ case SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED:
+ case SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING:
+ case SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED:
+ case SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED:
+ case SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING:
+ case SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED:
+ case SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED:
+ case SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING:
+ case SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED:
+ /*
+ * rdma_connect() never reached
+ * RDMA_CM_EVENT_ESTABLISHED
+ */
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ break;
+
+ case SMBDIRECT_SOCKET_DISCONNECTING:
+ case SMBDIRECT_SOCKET_DISCONNECTED:
+ case SMBDIRECT_SOCKET_DESTROYED:
+ break;
}
+
+ /*
+ * Wake up all waiters in all wait queues
+ * in order to notice the broken connection.
+ */
+ smbd_disconnect_wake_up_all(sc);
}
-static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
+static void smbd_disconnect_rdma_connection(struct smbdirect_socket *sc)
{
- queue_work(info->workqueue, &info->disconnect_work);
+ /*
+ * make sure other work (than disconnect_work) is
+ * not queued again but here we don't block and avoid
+ * disable[_delayed]_work_sync()
+ */
+ disable_work(&sc->recv_io.posted.refill_work);
+ disable_work(&sc->mr_io.recovery_work);
+ disable_work(&sc->idle.immediate_work);
+ disable_delayed_work(&sc->idle.timer_work);
+
+ if (sc->first_error == 0)
+ sc->first_error = -ECONNABORTED;
+
+ switch (sc->status) {
+ case SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED:
+ case SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED:
+ case SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED:
+ case SMBDIRECT_SOCKET_NEGOTIATE_FAILED:
+ case SMBDIRECT_SOCKET_ERROR:
+ case SMBDIRECT_SOCKET_DISCONNECTING:
+ case SMBDIRECT_SOCKET_DISCONNECTED:
+ case SMBDIRECT_SOCKET_DESTROYED:
+ /*
+ * Keep the current error status
+ */
+ break;
+
+ case SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED:
+ case SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING:
+ sc->status = SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED;
+ break;
+
+ case SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED:
+ case SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING:
+ sc->status = SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED;
+ break;
+
+ case SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED:
+ case SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING:
+ sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED;
+ break;
+
+ case SMBDIRECT_SOCKET_NEGOTIATE_NEEDED:
+ case SMBDIRECT_SOCKET_NEGOTIATE_RUNNING:
+ sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
+ break;
+
+ case SMBDIRECT_SOCKET_CREATED:
+ case SMBDIRECT_SOCKET_CONNECTED:
+ sc->status = SMBDIRECT_SOCKET_ERROR;
+ break;
+ }
+
+ /*
+ * Wake up all waiters in all wait queues
+ * in order to notice the broken connection.
+ */
+ smbd_disconnect_wake_up_all(sc);
+
+ queue_work(sc->workqueue, &sc->disconnect_work);
}
/* Upcall from RDMA CM */
static int smbd_conn_upcall(
struct rdma_cm_id *id, struct rdma_cm_event *event)
{
- struct smbd_connection *info = id->context;
- struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket *sc = id->context;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
const char *event_name = rdma_event_msg(event->event);
+ u8 peer_initiator_depth;
+ u8 peer_responder_resources;
log_rdma_event(INFO, "event=%s status=%d\n",
event_name, event->status);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING);
+ sc->status = SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED;
+ wake_up(&sc->status_wait);
+ break;
+
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- info->ri_rc = 0;
- complete(&info->ri_done);
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING);
+ sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED;
+ wake_up(&sc->status_wait);
break;
case RDMA_CM_EVENT_ADDR_ERROR:
log_rdma_event(ERR, "connecting failed event=%s\n", event_name);
- info->ri_rc = -EHOSTUNREACH;
- complete(&info->ri_done);
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING);
+ sc->status = SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED;
+ smbd_disconnect_rdma_work(&sc->disconnect_work);
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
log_rdma_event(ERR, "connecting failed event=%s\n", event_name);
- info->ri_rc = -ENETUNREACH;
- complete(&info->ri_done);
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING);
+ sc->status = SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED;
+ smbd_disconnect_rdma_work(&sc->disconnect_work);
break;
case RDMA_CM_EVENT_ESTABLISHED:
log_rdma_event(INFO, "connected event=%s\n", event_name);
- sc->status = SMBDIRECT_SOCKET_CONNECTED;
- wake_up_interruptible(&info->status_wait);
+
+ /*
+ * Here we work around an inconsistency between
+ * iWarp and other devices (at least rxe and irdma using RoCEv2)
+ */
+ if (rdma_protocol_iwarp(id->device, id->port_num)) {
+ /*
+ * iWarp devices report the peer's values
+ * with the perspective of the peer here.
+ * Tested with siw and irdma (in iwarp mode)
+ * We need to change to our perspective here,
+ * so we need to switch the values.
+ */
+ peer_initiator_depth = event->param.conn.responder_resources;
+ peer_responder_resources = event->param.conn.initiator_depth;
+ } else {
+ /*
+ * Non iWarp devices report the peer's values
+ * already changed to our perspective here.
+ * Tested with rxe and irdma (in roce mode).
+ */
+ peer_initiator_depth = event->param.conn.initiator_depth;
+ peer_responder_resources = event->param.conn.responder_resources;
+ }
+ if (rdma_protocol_iwarp(id->device, id->port_num) &&
+ event->param.conn.private_data_len == 8) {
+ /*
+ * Legacy clients with only iWarp MPA v1 support
+ * need a private blob in order to negotiate
+ * the IRD/ORD values.
+ */
+ const __be32 *ird_ord_hdr = event->param.conn.private_data;
+ u32 ird32 = be32_to_cpu(ird_ord_hdr[0]);
+ u32 ord32 = be32_to_cpu(ird_ord_hdr[1]);
+
+ /*
+ * cifs.ko sends the legacy IRD/ORD negotiation
+ * event if iWarp MPA v2 was used.
+ *
+ * Here we check that the values match and only
+ * mark the client as legacy if they don't match.
+ */
+ if ((u32)event->param.conn.initiator_depth != ird32 ||
+ (u32)event->param.conn.responder_resources != ord32) {
+ /*
+ * There are broken clients (old cifs.ko)
+ * using little endian and also
+ * struct rdma_conn_param only uses u8
+ * for initiator_depth and responder_resources,
+ * so we truncate the value to U8_MAX.
+ *
+ * smb_direct_accept_client() will then
+ * do the real negotiation in order to
+ * select the minimum between client and
+ * server.
+ */
+ ird32 = min_t(u32, ird32, U8_MAX);
+ ord32 = min_t(u32, ord32, U8_MAX);
+
+ sc->rdma.legacy_iwarp = true;
+ peer_initiator_depth = (u8)ird32;
+ peer_responder_resources = (u8)ord32;
+ }
+ }
+
+ /*
+ * negotiate the value by using the minimum
+ * between client and server if the client provided
+ * non 0 values.
+ */
+ if (peer_initiator_depth != 0)
+ sp->initiator_depth =
+ min_t(u8, sp->initiator_depth,
+ peer_initiator_depth);
+ if (peer_responder_resources != 0)
+ sp->responder_resources =
+ min_t(u8, sp->responder_resources,
+ peer_responder_resources);
+
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING);
+ sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED;
+ wake_up(&sc->status_wait);
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_REJECTED:
log_rdma_event(ERR, "connecting failed event=%s\n", event_name);
- sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
- wake_up_interruptible(&info->status_wait);
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING);
+ sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED;
+ smbd_disconnect_rdma_work(&sc->disconnect_work);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
@@ -221,15 +443,10 @@ static int smbd_conn_upcall(
/* This happens when we fail the negotiation */
if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) {
log_rdma_event(ERR, "event=%s during negotiation\n", event_name);
- sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
- wake_up(&info->status_wait);
- break;
}
sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
- wake_up_interruptible(&info->status_wait);
- wake_up_interruptible(&sc->recv_io.reassembly.wait_queue);
- wake_up_interruptible_all(&info->wait_send_queue);
+ smbd_disconnect_rdma_work(&sc->disconnect_work);
break;
default:
@@ -245,15 +462,15 @@ static int smbd_conn_upcall(
static void
smbd_qp_async_error_upcall(struct ib_event *event, void *context)
{
- struct smbd_connection *info = context;
+ struct smbdirect_socket *sc = context;
- log_rdma_event(ERR, "%s on device %s info %p\n",
- ib_event_msg(event->event), event->device->name, info);
+ log_rdma_event(ERR, "%s on device %s socket %p\n",
+ ib_event_msg(event->event), event->device->name, sc);
switch (event->event) {
case IB_EVENT_CQ_ERR:
case IB_EVENT_QP_FATAL:
- smbd_disconnect_rdma_connection(info);
+ smbd_disconnect_rdma_connection(sc);
break;
default:
@@ -278,11 +495,9 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
struct smbdirect_send_io *request =
container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
struct smbdirect_socket *sc = request->socket;
- struct smbd_connection *info =
- container_of(sc, struct smbd_connection, socket);
- log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%d\n",
- request, wc->status);
+ log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%s\n",
+ request, ib_wc_status_msg(wc->status));
for (i = 0; i < request->num_sge; i++)
ib_dma_unmap_single(sc->ib.dev,
@@ -291,17 +506,18 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
DMA_TO_DEVICE);
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
- log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
- wc->status, wc->opcode);
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ log_rdma_send(ERR, "wc->status=%s wc->opcode=%d\n",
+ ib_wc_status_msg(wc->status), wc->opcode);
mempool_free(request, sc->send_io.mem.pool);
- smbd_disconnect_rdma_connection(info);
+ smbd_disconnect_rdma_connection(sc);
return;
}
- if (atomic_dec_and_test(&info->send_pending))
- wake_up(&info->wait_send_pending);
+ if (atomic_dec_and_test(&sc->send_io.pending.count))
+ wake_up(&sc->send_io.pending.zero_wait_queue);
- wake_up(&info->wait_post_send);
+ wake_up(&sc->send_io.pending.dec_wait_queue);
mempool_free(request, sc->send_io.mem.pool);
}
@@ -325,8 +541,6 @@ static bool process_negotiation_response(
struct smbdirect_recv_io *response, int packet_length)
{
struct smbdirect_socket *sc = response->socket;
- struct smbd_connection *info =
- container_of(sc, struct smbd_connection, socket);
struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smbdirect_negotiate_resp *packet = smbdirect_recv_io_payload(response);
@@ -341,21 +555,19 @@ static bool process_negotiation_response(
le16_to_cpu(packet->negotiated_version));
return false;
}
- info->protocol = le16_to_cpu(packet->negotiated_version);
if (packet->credits_requested == 0) {
log_rdma_event(ERR, "error: credits_requested==0\n");
return false;
}
- info->receive_credit_target = le16_to_cpu(packet->credits_requested);
+ sc->recv_io.credits.target = le16_to_cpu(packet->credits_requested);
+ sc->recv_io.credits.target = min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max);
if (packet->credits_granted == 0) {
log_rdma_event(ERR, "error: credits_granted==0\n");
return false;
}
- atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
-
- atomic_set(&info->receive_credits, 0);
+ atomic_set(&sc->send_io.credits.count, le16_to_cpu(packet->credits_granted));
if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) {
log_rdma_event(ERR, "error: preferred_send_size=%d\n",
@@ -380,16 +592,12 @@ static bool process_negotiation_response(
}
sp->max_fragmented_send_size =
le32_to_cpu(packet->max_fragmented_size);
- info->rdma_readwrite_threshold =
- rdma_readwrite_threshold > sp->max_fragmented_send_size ?
- sp->max_fragmented_send_size :
- rdma_readwrite_threshold;
sp->max_read_write_size = min_t(u32,
le32_to_cpu(packet->max_readwrite_size),
- info->max_frmr_depth * PAGE_SIZE);
- info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
+ sp->max_frmr_depth * PAGE_SIZE);
+ sp->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER;
return true;
@@ -397,52 +605,40 @@ static bool process_negotiation_response(
static void smbd_post_send_credits(struct work_struct *work)
{
- int ret = 0;
int rc;
struct smbdirect_recv_io *response;
- struct smbd_connection *info =
- container_of(work, struct smbd_connection,
- post_send_credits_work);
- struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket *sc =
+ container_of(work, struct smbdirect_socket, recv_io.posted.refill_work);
if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
- wake_up(&info->wait_receive_queues);
return;
}
- if (info->receive_credit_target >
- atomic_read(&info->receive_credits)) {
+ if (sc->recv_io.credits.target >
+ atomic_read(&sc->recv_io.credits.count)) {
while (true) {
- response = get_receive_buffer(info);
+ response = get_receive_buffer(sc);
if (!response)
break;
response->first_segment = false;
- rc = smbd_post_recv(info, response);
+ rc = smbd_post_recv(sc, response);
if (rc) {
log_rdma_recv(ERR,
"post_recv failed rc=%d\n", rc);
- put_receive_buffer(info, response);
+ put_receive_buffer(sc, response);
break;
}
- ret++;
+ atomic_inc(&sc->recv_io.posted.count);
}
}
- spin_lock(&info->lock_new_credits_offered);
- info->new_credits_offered += ret;
- spin_unlock(&info->lock_new_credits_offered);
-
/* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */
- info->send_immediate = true;
- if (atomic_read(&info->receive_credits) <
- info->receive_credit_target - 1) {
- if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
- info->send_immediate) {
- log_keep_alive(INFO, "send an empty message\n");
- smbd_post_send_empty(info);
- }
+ if (atomic_read(&sc->recv_io.credits.count) <
+ sc->recv_io.credits.target - 1) {
+ log_keep_alive(INFO, "schedule send of an empty message\n");
+ queue_work(sc->workqueue, &sc->idle.immediate_work);
}
}
@@ -453,17 +649,23 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct smbdirect_recv_io *response =
container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe);
struct smbdirect_socket *sc = response->socket;
- struct smbd_connection *info =
- container_of(sc, struct smbd_connection, socket);
- int data_length = 0;
-
- log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n",
- response, sc->recv_io.expected, wc->status, wc->opcode,
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
+ u16 old_recv_credit_target;
+ u32 data_offset = 0;
+ u32 data_length = 0;
+ u32 remaining_data_length = 0;
+ bool negotiate_done = false;
+
+ log_rdma_recv(INFO,
+ "response=0x%p type=%d wc status=%s wc opcode %d byte_len=%d pkey_index=%u\n",
+ response, sc->recv_io.expected,
+ ib_wc_status_msg(wc->status), wc->opcode,
wc->byte_len, wc->pkey_index);
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
- log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
- wc->status, wc->opcode);
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ log_rdma_recv(ERR, "wc->status=%s opcode=%d\n",
+ ib_wc_status_msg(wc->status), wc->opcode);
goto error;
}
@@ -473,21 +675,52 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
response->sge.length,
DMA_FROM_DEVICE);
+ /*
+ * Reset timer to the keepalive interval in
+ * order to trigger our next keepalive message.
+ */
+ sc->idle.keepalive = SMBDIRECT_KEEPALIVE_NONE;
+ mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
+ msecs_to_jiffies(sp->keepalive_interval_msec));
+
switch (sc->recv_io.expected) {
/* SMBD negotiation response */
case SMBDIRECT_EXPECT_NEGOTIATE_REP:
dump_smbdirect_negotiate_resp(smbdirect_recv_io_payload(response));
sc->recv_io.reassembly.full_packet_received = true;
- info->negotiate_done =
+ negotiate_done =
process_negotiation_response(response, wc->byte_len);
- put_receive_buffer(info, response);
- complete(&info->negotiate_completion);
+ put_receive_buffer(sc, response);
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_NEGOTIATE_RUNNING);
+ if (!negotiate_done) {
+ sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
+ smbd_disconnect_rdma_connection(sc);
+ } else {
+ sc->status = SMBDIRECT_SOCKET_CONNECTED;
+ wake_up(&sc->status_wait);
+ }
+
return;
/* SMBD data transfer packet */
case SMBDIRECT_EXPECT_DATA_TRANSFER:
data_transfer = smbdirect_recv_io_payload(response);
+
+ if (wc->byte_len <
+ offsetof(struct smbdirect_data_transfer, padding))
+ goto error;
+
+ remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length);
+ data_offset = le32_to_cpu(data_transfer->data_offset);
data_length = le32_to_cpu(data_transfer->data_length);
+ if (wc->byte_len < data_offset ||
+ (u64)wc->byte_len < (u64)data_offset + data_length)
+ goto error;
+
+ if (remaining_data_length > sp->max_fragmented_recv_size ||
+ data_length > sp->max_fragmented_recv_size ||
+ (u64)remaining_data_length + (u64)data_length > (u64)sp->max_fragmented_recv_size)
+ goto error;
if (data_length) {
if (sc->recv_io.reassembly.full_packet_received)
@@ -499,17 +732,23 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
sc->recv_io.reassembly.full_packet_received = true;
}
- atomic_dec(&info->receive_credits);
- info->receive_credit_target =
+ atomic_dec(&sc->recv_io.posted.count);
+ atomic_dec(&sc->recv_io.credits.count);
+ old_recv_credit_target = sc->recv_io.credits.target;
+ sc->recv_io.credits.target =
le16_to_cpu(data_transfer->credits_requested);
+ sc->recv_io.credits.target =
+ min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max);
+ sc->recv_io.credits.target =
+ max_t(u16, sc->recv_io.credits.target, 1);
if (le16_to_cpu(data_transfer->credits_granted)) {
atomic_add(le16_to_cpu(data_transfer->credits_granted),
- &info->send_credits);
+ &sc->send_io.credits.count);
/*
* We have new send credits granted from remote peer
* If any sender is waiting for credits, unblock it
*/
- wake_up_interruptible(&info->wait_send_queue);
+ wake_up(&sc->send_io.credits.wait_queue);
}
log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n",
@@ -518,11 +757,11 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
le32_to_cpu(data_transfer->data_length),
le32_to_cpu(data_transfer->remaining_data_length));
- /* Send a KEEP_ALIVE response right away if requested */
- info->keep_alive_requested = KEEP_ALIVE_NONE;
+ /* Send an immediate response right away if requested */
if (le16_to_cpu(data_transfer->flags) &
SMBDIRECT_FLAG_RESPONSE_REQUESTED) {
- info->keep_alive_requested = KEEP_ALIVE_PENDING;
+ log_keep_alive(INFO, "schedule send of immediate response\n");
+ queue_work(sc->workqueue, &sc->idle.immediate_work);
}
/*
@@ -530,10 +769,13 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
* reassembly queue and wake up the reading thread
*/
if (data_length) {
- enqueue_reassembly(info, response, data_length);
- wake_up_interruptible(&sc->recv_io.reassembly.wait_queue);
+ if (sc->recv_io.credits.target > old_recv_credit_target)
+ queue_work(sc->workqueue, &sc->recv_io.posted.refill_work);
+
+ enqueue_reassembly(sc, response, data_length);
+ wake_up(&sc->recv_io.reassembly.wait_queue);
} else
- put_receive_buffer(info, response);
+ put_receive_buffer(sc, response);
return;
@@ -548,19 +790,20 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
log_rdma_recv(ERR, "unexpected response type=%d\n", sc->recv_io.expected);
WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_DATA_TRANSFER);
error:
- put_receive_buffer(info, response);
- smbd_disconnect_rdma_connection(info);
+ put_receive_buffer(sc, response);
+ smbd_disconnect_rdma_connection(sc);
}
static struct rdma_cm_id *smbd_create_id(
- struct smbd_connection *info,
+ struct smbdirect_socket *sc,
struct sockaddr *dstaddr, int port)
{
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct rdma_cm_id *id;
int rc;
__be16 *sport;
- id = rdma_create_id(&init_net, smbd_conn_upcall, info,
+ id = rdma_create_id(&init_net, smbd_conn_upcall, sc,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(id)) {
rc = PTR_ERR(id);
@@ -575,43 +818,57 @@ static struct rdma_cm_id *smbd_create_id(
*sport = htons(port);
- init_completion(&info->ri_done);
- info->ri_rc = -ETIMEDOUT;
-
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED);
+ sc->status = SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING;
rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
- RDMA_RESOLVE_TIMEOUT);
+ sp->resolve_addr_timeout_msec);
if (rc) {
log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
goto out;
}
- rc = wait_for_completion_interruptible_timeout(
- &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ rc = wait_event_interruptible_timeout(
+ sc->status_wait,
+ sc->status != SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING,
+ msecs_to_jiffies(sp->resolve_addr_timeout_msec));
/* e.g. if interrupted returns -ERESTARTSYS */
if (rc < 0) {
log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
goto out;
}
- rc = info->ri_rc;
- if (rc) {
+ if (sc->status == SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING) {
+ rc = -ETIMEDOUT;
+ log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
+ goto out;
+ }
+ if (sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED) {
+ rc = -EHOSTUNREACH;
log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
goto out;
}
- info->ri_rc = -ETIMEDOUT;
- rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED);
+ sc->status = SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING;
+ rc = rdma_resolve_route(id, sp->resolve_route_timeout_msec);
if (rc) {
log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
goto out;
}
- rc = wait_for_completion_interruptible_timeout(
- &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ rc = wait_event_interruptible_timeout(
+ sc->status_wait,
+ sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING,
+ msecs_to_jiffies(sp->resolve_route_timeout_msec));
/* e.g. if interrupted returns -ERESTARTSYS */
if (rc < 0) {
log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
goto out;
}
- rc = info->ri_rc;
- if (rc) {
+ if (sc->status == SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING) {
+ rc = -ETIMEDOUT;
+ log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
+ goto out;
+ }
+ if (sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED) {
+ rc = -ENETUNREACH;
log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
goto out;
}
@@ -638,13 +895,16 @@ static bool frwr_is_supported(struct ib_device_attr *attrs)
}
static int smbd_ia_open(
- struct smbd_connection *info,
+ struct smbdirect_socket *sc,
struct sockaddr *dstaddr, int port)
{
- struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
int rc;
- sc->rdma.cm_id = smbd_create_id(info, dstaddr, port);
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_CREATED);
+ sc->status = SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED;
+
+ sc->rdma.cm_id = smbd_create_id(sc, dstaddr, port);
if (IS_ERR(sc->rdma.cm_id)) {
rc = PTR_ERR(sc->rdma.cm_id);
goto out1;
@@ -659,19 +919,12 @@ static int smbd_ia_open(
rc = -EPROTONOSUPPORT;
goto out2;
}
- info->max_frmr_depth = min_t(int,
- smbd_max_frmr_depth,
+ sp->max_frmr_depth = min_t(u32,
+ sp->max_frmr_depth,
sc->ib.dev->attrs.max_fast_reg_page_list_len);
- info->mr_type = IB_MR_TYPE_MEM_REG;
+ sc->mr_io.type = IB_MR_TYPE_MEM_REG;
if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
- info->mr_type = IB_MR_TYPE_SG_GAPS;
-
- sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
- if (IS_ERR(sc->ib.pd)) {
- rc = PTR_ERR(sc->ib.pd);
- log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
- goto out2;
- }
+ sc->mr_io.type = IB_MR_TYPE_SG_GAPS;
return 0;
@@ -689,9 +942,8 @@ out1:
* After negotiation, the transport is connected and ready for
* carrying upper layer SMB payload
*/
-static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+static int smbd_post_send_negotiate_req(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_send_wr send_wr;
int rc = -ENOMEM;
@@ -743,18 +995,18 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
request->sge[0].addr,
request->sge[0].length, request->sge[0].lkey);
- atomic_inc(&info->send_pending);
+ atomic_inc(&sc->send_io.pending.count);
rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
if (!rc)
return 0;
/* if we reach here, post send failed */
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
- atomic_dec(&info->send_pending);
+ atomic_dec(&sc->send_io.pending.count);
ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr,
request->sge[0].length, DMA_TO_DEVICE);
- smbd_disconnect_rdma_connection(info);
+ smbd_disconnect_rdma_connection(sc);
dma_mapping_failed:
mempool_free(request, sc->send_io.mem.pool);
@@ -769,14 +1021,20 @@ dma_mapping_failed:
* buffer as possible, and extend the receive credits to remote peer
* return value: the new credtis being granted.
*/
-static int manage_credits_prior_sending(struct smbd_connection *info)
+static int manage_credits_prior_sending(struct smbdirect_socket *sc)
{
int new_credits;
- spin_lock(&info->lock_new_credits_offered);
- new_credits = info->new_credits_offered;
- info->new_credits_offered = 0;
- spin_unlock(&info->lock_new_credits_offered);
+ if (atomic_read(&sc->recv_io.credits.count) >= sc->recv_io.credits.target)
+ return 0;
+
+ new_credits = atomic_read(&sc->recv_io.posted.count);
+ if (new_credits == 0)
+ return 0;
+
+ new_credits -= atomic_read(&sc->recv_io.credits.count);
+ if (new_credits <= 0)
+ return 0;
return new_credits;
}
@@ -790,21 +1048,27 @@ static int manage_credits_prior_sending(struct smbd_connection *info)
* 1 if SMBDIRECT_FLAG_RESPONSE_REQUESTED needs to be set
* 0: otherwise
*/
-static int manage_keep_alive_before_sending(struct smbd_connection *info)
+static int manage_keep_alive_before_sending(struct smbdirect_socket *sc)
{
- if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
- info->keep_alive_requested = KEEP_ALIVE_SENT;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
+
+ if (sc->idle.keepalive == SMBDIRECT_KEEPALIVE_PENDING) {
+ sc->idle.keepalive = SMBDIRECT_KEEPALIVE_SENT;
+ /*
+ * Now use the keepalive timeout (instead of keepalive interval)
+ * in order to wait for a response
+ */
+ mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
+ msecs_to_jiffies(sp->keepalive_timeout_msec));
return 1;
}
return 0;
}
/* Post the send request */
-static int smbd_post_send(struct smbd_connection *info,
+static int smbd_post_send(struct smbdirect_socket *sc,
struct smbdirect_send_io *request)
{
- struct smbdirect_socket *sc = &info->socket;
- struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_send_wr send_wr;
int rc, i;
@@ -831,21 +1095,17 @@ static int smbd_post_send(struct smbd_connection *info,
rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
if (rc) {
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
- smbd_disconnect_rdma_connection(info);
+ smbd_disconnect_rdma_connection(sc);
rc = -EAGAIN;
- } else
- /* Reset timer for idle connection after packet is sent */
- mod_delayed_work(info->workqueue, &info->idle_timer_work,
- msecs_to_jiffies(sp->keepalive_interval_msec));
+ }
return rc;
}
-static int smbd_post_send_iter(struct smbd_connection *info,
+static int smbd_post_send_iter(struct smbdirect_socket *sc,
struct iov_iter *iter,
int *_remaining_data_length)
{
- struct smbdirect_socket *sc = &info->socket;
struct smbdirect_socket_parameters *sp = &sc->parameters;
int i, rc;
int header_length;
@@ -856,8 +1116,8 @@ static int smbd_post_send_iter(struct smbd_connection *info,
wait_credit:
/* Wait for send credits. A SMBD packet needs one credit */
- rc = wait_event_interruptible(info->wait_send_queue,
- atomic_read(&info->send_credits) > 0 ||
+ rc = wait_event_interruptible(sc->send_io.credits.wait_queue,
+ atomic_read(&sc->send_io.credits.count) > 0 ||
sc->status != SMBDIRECT_SOCKET_CONNECTED);
if (rc)
goto err_wait_credit;
@@ -867,14 +1127,14 @@ wait_credit:
rc = -EAGAIN;
goto err_wait_credit;
}
- if (unlikely(atomic_dec_return(&info->send_credits) < 0)) {
- atomic_inc(&info->send_credits);
+ if (unlikely(atomic_dec_return(&sc->send_io.credits.count) < 0)) {
+ atomic_inc(&sc->send_io.credits.count);
goto wait_credit;
}
wait_send_queue:
- wait_event(info->wait_post_send,
- atomic_read(&info->send_pending) < sp->send_credit_target ||
+ wait_event(sc->send_io.pending.dec_wait_queue,
+ atomic_read(&sc->send_io.pending.count) < sp->send_credit_target ||
sc->status != SMBDIRECT_SOCKET_CONNECTED);
if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
@@ -883,9 +1143,9 @@ wait_send_queue:
goto err_wait_send_queue;
}
- if (unlikely(atomic_inc_return(&info->send_pending) >
+ if (unlikely(atomic_inc_return(&sc->send_io.pending.count) >
sp->send_credit_target)) {
- atomic_dec(&info->send_pending);
+ atomic_dec(&sc->send_io.pending.count);
goto wait_send_queue;
}
@@ -898,10 +1158,30 @@ wait_send_queue:
request->socket = sc;
memset(request->sge, 0, sizeof(request->sge));
+ /* Map the packet to DMA */
+ header_length = sizeof(struct smbdirect_data_transfer);
+ /* If this is a packet without payload, don't send padding */
+ if (!iter)
+ header_length = offsetof(struct smbdirect_data_transfer, padding);
+
+ packet = smbdirect_send_io_payload(request);
+ request->sge[0].addr = ib_dma_map_single(sc->ib.dev,
+ (void *)packet,
+ header_length,
+ DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
+ rc = -EIO;
+ goto err_dma;
+ }
+
+ request->sge[0].length = header_length;
+ request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
+ request->num_sge = 1;
+
/* Fill in the data payload to find out how much data we can add */
if (iter) {
struct smb_extract_to_rdma extract = {
- .nr_sge = 1,
+ .nr_sge = request->num_sge,
.max_sge = SMBDIRECT_SEND_IO_MAX_SGE,
.sge = request->sge,
.device = sc->ib.dev,
@@ -920,21 +1200,17 @@ wait_send_queue:
*_remaining_data_length -= data_length;
} else {
data_length = 0;
- request->num_sge = 1;
}
/* Fill in the packet header */
- packet = smbdirect_send_io_payload(request);
packet->credits_requested = cpu_to_le16(sp->send_credit_target);
- new_credits = manage_credits_prior_sending(info);
- atomic_add(new_credits, &info->receive_credits);
+ new_credits = manage_credits_prior_sending(sc);
+ atomic_add(new_credits, &sc->recv_io.credits.count);
packet->credits_granted = cpu_to_le16(new_credits);
- info->send_immediate = false;
-
packet->flags = 0;
- if (manage_keep_alive_before_sending(info))
+ if (manage_keep_alive_before_sending(sc))
packet->flags |= cpu_to_le16(SMBDIRECT_FLAG_RESPONSE_REQUESTED);
packet->reserved = 0;
@@ -953,26 +1229,7 @@ wait_send_queue:
le32_to_cpu(packet->data_length),
le32_to_cpu(packet->remaining_data_length));
- /* Map the packet to DMA */
- header_length = sizeof(struct smbdirect_data_transfer);
- /* If this is a packet without payload, don't send padding */
- if (!data_length)
- header_length = offsetof(struct smbdirect_data_transfer, padding);
-
- request->sge[0].addr = ib_dma_map_single(sc->ib.dev,
- (void *)packet,
- header_length,
- DMA_TO_DEVICE);
- if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
- rc = -EIO;
- request->sge[0].addr = 0;
- goto err_dma;
- }
-
- request->sge[0].length = header_length;
- request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
-
- rc = smbd_post_send(info, request);
+ rc = smbd_post_send(sc, request);
if (!rc)
return 0;
@@ -985,19 +1242,16 @@ err_dma:
DMA_TO_DEVICE);
mempool_free(request, sc->send_io.mem.pool);
- /* roll back receive credits and credits to be offered */
- spin_lock(&info->lock_new_credits_offered);
- info->new_credits_offered += new_credits;
- spin_unlock(&info->lock_new_credits_offered);
- atomic_sub(new_credits, &info->receive_credits);
+ /* roll back the granted receive credits */
+ atomic_sub(new_credits, &sc->recv_io.credits.count);
err_alloc:
- if (atomic_dec_and_test(&info->send_pending))
- wake_up(&info->wait_send_pending);
+ if (atomic_dec_and_test(&sc->send_io.pending.count))
+ wake_up(&sc->send_io.pending.zero_wait_queue);
err_wait_send_queue:
/* roll back send credits and pending */
- atomic_inc(&info->send_credits);
+ atomic_inc(&sc->send_io.credits.count);
err_wait_credit:
return rc;
@@ -1008,15 +1262,15 @@ err_wait_credit:
* Empty message is used to extend credits to peer to for keep live
* while there is no upper layer payload to send at the time
*/
-static int smbd_post_send_empty(struct smbd_connection *info)
+static int smbd_post_send_empty(struct smbdirect_socket *sc)
{
int remaining_data_length = 0;
- info->count_send_empty++;
- return smbd_post_send_iter(info, NULL, &remaining_data_length);
+ sc->statistics.send_empty++;
+ return smbd_post_send_iter(sc, NULL, &remaining_data_length);
}
-static int smbd_post_send_full_iter(struct smbd_connection *info,
+static int smbd_post_send_full_iter(struct smbdirect_socket *sc,
struct iov_iter *iter,
int *_remaining_data_length)
{
@@ -1029,7 +1283,7 @@ static int smbd_post_send_full_iter(struct smbd_connection *info,
*/
while (iov_iter_count(iter) > 0) {
- rc = smbd_post_send_iter(info, iter, _remaining_data_length);
+ rc = smbd_post_send_iter(sc, iter, _remaining_data_length);
if (rc < 0)
break;
}
@@ -1043,9 +1297,8 @@ static int smbd_post_send_full_iter(struct smbd_connection *info,
* The interaction is controlled by send/receive credit system
*/
static int smbd_post_recv(
- struct smbd_connection *info, struct smbdirect_recv_io *response)
+ struct smbdirect_socket *sc, struct smbdirect_recv_io *response)
{
- struct smbdirect_socket *sc = &info->socket;
struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_recv_wr recv_wr;
int rc = -EIO;
@@ -1071,7 +1324,7 @@ static int smbd_post_recv(
ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
response->sge.length, DMA_FROM_DEVICE);
response->sge.length = 0;
- smbd_disconnect_rdma_connection(info);
+ smbd_disconnect_rdma_connection(sc);
log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
}
@@ -1079,31 +1332,36 @@ static int smbd_post_recv(
}
/* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
-static int smbd_negotiate(struct smbd_connection *info)
+static int smbd_negotiate(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
int rc;
- struct smbdirect_recv_io *response = get_receive_buffer(info);
+ struct smbdirect_recv_io *response = get_receive_buffer(sc);
+
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_NEGOTIATE_NEEDED);
+ sc->status = SMBDIRECT_SOCKET_NEGOTIATE_RUNNING;
sc->recv_io.expected = SMBDIRECT_EXPECT_NEGOTIATE_REP;
- rc = smbd_post_recv(info, response);
+ rc = smbd_post_recv(sc, response);
log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
rc, response->sge.addr,
response->sge.length, response->sge.lkey);
- if (rc)
+ if (rc) {
+ put_receive_buffer(sc, response);
return rc;
+ }
- init_completion(&info->negotiate_completion);
- info->negotiate_done = false;
- rc = smbd_post_send_negotiate_req(info);
+ rc = smbd_post_send_negotiate_req(sc);
if (rc)
return rc;
- rc = wait_for_completion_interruptible_timeout(
- &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
- log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
+ rc = wait_event_interruptible_timeout(
+ sc->status_wait,
+ sc->status != SMBDIRECT_SOCKET_NEGOTIATE_RUNNING,
+ msecs_to_jiffies(sp->negotiate_timeout_msec));
+ log_rdma_event(INFO, "wait_event_interruptible_timeout rc=%d\n", rc);
- if (info->negotiate_done)
+ if (sc->status == SMBDIRECT_SOCKET_CONNECTED)
return 0;
if (rc == 0)
@@ -1127,13 +1385,13 @@ static int smbd_negotiate(struct smbd_connection *info)
* data_length: the size of payload in this packet
*/
static void enqueue_reassembly(
- struct smbd_connection *info,
+ struct smbdirect_socket *sc,
struct smbdirect_recv_io *response,
int data_length)
{
- struct smbdirect_socket *sc = &info->socket;
+ unsigned long flags;
- spin_lock(&sc->recv_io.reassembly.lock);
+ spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
list_add_tail(&response->list, &sc->recv_io.reassembly.list);
sc->recv_io.reassembly.queue_length++;
/*
@@ -1144,9 +1402,8 @@ static void enqueue_reassembly(
*/
virt_wmb();
sc->recv_io.reassembly.data_length += data_length;
- spin_unlock(&sc->recv_io.reassembly.lock);
- info->count_reassembly_queue++;
- info->count_enqueue_reassembly_queue++;
+ spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
+ sc->statistics.enqueue_reassembly_queue++;
}
/*
@@ -1154,9 +1411,8 @@ static void enqueue_reassembly(
* Caller is responsible for locking
* return value: the first entry if any, NULL if queue is empty
*/
-static struct smbdirect_recv_io *_get_first_reassembly(struct smbd_connection *info)
+static struct smbdirect_recv_io *_get_first_reassembly(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
struct smbdirect_recv_io *ret = NULL;
if (!list_empty(&sc->recv_io.reassembly.list)) {
@@ -1173,9 +1429,8 @@ static struct smbdirect_recv_io *_get_first_reassembly(struct smbd_connection *i
* pre-allocated in advance.
* return value: the receive buffer, NULL if none is available
*/
-static struct smbdirect_recv_io *get_receive_buffer(struct smbd_connection *info)
+static struct smbdirect_recv_io *get_receive_buffer(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
struct smbdirect_recv_io *ret = NULL;
unsigned long flags;
@@ -1185,8 +1440,7 @@ static struct smbdirect_recv_io *get_receive_buffer(struct smbd_connection *info
&sc->recv_io.free.list,
struct smbdirect_recv_io, list);
list_del(&ret->list);
- info->count_receive_queue--;
- info->count_get_receive_buffer++;
+ sc->statistics.get_receive_buffer++;
}
spin_unlock_irqrestore(&sc->recv_io.free.lock, flags);
@@ -1200,9 +1454,8 @@ static struct smbdirect_recv_io *get_receive_buffer(struct smbd_connection *info
* receive buffer is returned.
*/
static void put_receive_buffer(
- struct smbd_connection *info, struct smbdirect_recv_io *response)
+ struct smbdirect_socket *sc, struct smbdirect_recv_io *response)
{
- struct smbdirect_socket *sc = &info->socket;
unsigned long flags;
if (likely(response->sge.length != 0)) {
@@ -1215,31 +1468,18 @@ static void put_receive_buffer(
spin_lock_irqsave(&sc->recv_io.free.lock, flags);
list_add_tail(&response->list, &sc->recv_io.free.list);
- info->count_receive_queue++;
- info->count_put_receive_buffer++;
+ sc->statistics.put_receive_buffer++;
spin_unlock_irqrestore(&sc->recv_io.free.lock, flags);
- queue_work(info->workqueue, &info->post_send_credits_work);
+ queue_work(sc->workqueue, &sc->recv_io.posted.refill_work);
}
/* Preallocate all receive buffer on transport establishment */
-static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
+static int allocate_receive_buffers(struct smbdirect_socket *sc, int num_buf)
{
- struct smbdirect_socket *sc = &info->socket;
struct smbdirect_recv_io *response;
int i;
- INIT_LIST_HEAD(&sc->recv_io.reassembly.list);
- spin_lock_init(&sc->recv_io.reassembly.lock);
- sc->recv_io.reassembly.data_length = 0;
- sc->recv_io.reassembly.queue_length = 0;
-
- INIT_LIST_HEAD(&sc->recv_io.free.list);
- spin_lock_init(&sc->recv_io.free.lock);
- info->count_receive_queue = 0;
-
- init_waitqueue_head(&info->wait_receive_queues);
-
for (i = 0; i < num_buf; i++) {
response = mempool_alloc(sc->recv_io.mem.pool, GFP_KERNEL);
if (!response)
@@ -1248,7 +1488,6 @@ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
response->socket = sc;
response->sge.length = 0;
list_add_tail(&response->list, &sc->recv_io.free.list);
- info->count_receive_queue++;
}
return 0;
@@ -1259,45 +1498,59 @@ allocate_failed:
&sc->recv_io.free.list,
struct smbdirect_recv_io, list);
list_del(&response->list);
- info->count_receive_queue--;
mempool_free(response, sc->recv_io.mem.pool);
}
return -ENOMEM;
}
-static void destroy_receive_buffers(struct smbd_connection *info)
+static void destroy_receive_buffers(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
struct smbdirect_recv_io *response;
- while ((response = get_receive_buffer(info)))
+ while ((response = get_receive_buffer(sc)))
mempool_free(response, sc->recv_io.mem.pool);
}
+static void send_immediate_empty_message(struct work_struct *work)
+{
+ struct smbdirect_socket *sc =
+ container_of(work, struct smbdirect_socket, idle.immediate_work);
+
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
+ return;
+
+ log_keep_alive(INFO, "send an empty message\n");
+ smbd_post_send_empty(sc);
+}
+
/* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
static void idle_connection_timer(struct work_struct *work)
{
- struct smbd_connection *info = container_of(
- work, struct smbd_connection,
- idle_timer_work.work);
- struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket *sc =
+ container_of(work, struct smbdirect_socket, idle.timer_work.work);
struct smbdirect_socket_parameters *sp = &sc->parameters;
- if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
+ if (sc->idle.keepalive != SMBDIRECT_KEEPALIVE_NONE) {
log_keep_alive(ERR,
- "error status info->keep_alive_requested=%d\n",
- info->keep_alive_requested);
- smbd_disconnect_rdma_connection(info);
+ "error status sc->idle.keepalive=%d\n",
+ sc->idle.keepalive);
+ smbd_disconnect_rdma_connection(sc);
return;
}
- log_keep_alive(INFO, "about to send an empty idle message\n");
- smbd_post_send_empty(info);
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
+ return;
- /* Setup the next idle timeout work */
- queue_delayed_work(info->workqueue, &info->idle_timer_work,
- msecs_to_jiffies(sp->keepalive_interval_msec));
+ /*
+ * Now use the keepalive timeout (instead of keepalive interval)
+ * in order to wait for a response
+ */
+ sc->idle.keepalive = SMBDIRECT_KEEPALIVE_PENDING;
+ mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
+ msecs_to_jiffies(sp->keepalive_timeout_msec));
+ log_keep_alive(INFO, "schedule send of empty idle message\n");
+ queue_work(sc->workqueue, &sc->idle.immediate_work);
}
/*
@@ -1309,7 +1562,6 @@ void smbd_destroy(struct TCP_Server_Info *server)
{
struct smbd_connection *info = server->smbd_conn;
struct smbdirect_socket *sc;
- struct smbdirect_socket_parameters *sp;
struct smbdirect_recv_io *response;
unsigned long flags;
@@ -1318,39 +1570,51 @@ void smbd_destroy(struct TCP_Server_Info *server)
return;
}
sc = &info->socket;
- sp = &sc->parameters;
+
+ log_rdma_event(INFO, "cancelling and disable disconnect_work\n");
+ disable_work_sync(&sc->disconnect_work);
log_rdma_event(INFO, "destroying rdma session\n");
- if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) {
- rdma_disconnect(sc->rdma.cm_id);
+ if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING) {
+ smbd_disconnect_rdma_work(&sc->disconnect_work);
log_rdma_event(INFO, "wait for transport being disconnected\n");
wait_event_interruptible(
- info->status_wait,
+ sc->status_wait,
sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
}
+ /*
+ * Wake up all waiters in all wait queues
+ * in order to notice the broken connection.
+ *
+ * Most likely this was already called via
+ * smbd_disconnect_rdma_work(), but call it again...
+ */
+ smbd_disconnect_wake_up_all(sc);
+
+ log_rdma_event(INFO, "cancelling recv_io.posted.refill_work\n");
+ disable_work_sync(&sc->recv_io.posted.refill_work);
+
log_rdma_event(INFO, "destroying qp\n");
ib_drain_qp(sc->ib.qp);
rdma_destroy_qp(sc->rdma.cm_id);
sc->ib.qp = NULL;
log_rdma_event(INFO, "cancelling idle timer\n");
- cancel_delayed_work_sync(&info->idle_timer_work);
-
- log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
- wait_event(info->wait_send_pending,
- atomic_read(&info->send_pending) == 0);
+ disable_delayed_work_sync(&sc->idle.timer_work);
+ log_rdma_event(INFO, "cancelling send immediate work\n");
+ disable_work_sync(&sc->idle.immediate_work);
/* It's not possible for upper layer to get to reassembly */
log_rdma_event(INFO, "drain the reassembly queue\n");
do {
spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
- response = _get_first_reassembly(info);
+ response = _get_first_reassembly(sc);
if (response) {
list_del(&response->list);
spin_unlock_irqrestore(
&sc->recv_io.reassembly.lock, flags);
- put_receive_buffer(info, response);
+ put_receive_buffer(sc, response);
} else
spin_unlock_irqrestore(
&sc->recv_io.reassembly.lock, flags);
@@ -1358,9 +1622,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
sc->recv_io.reassembly.data_length = 0;
log_rdma_event(INFO, "free receive buffers\n");
- wait_event(info->wait_receive_queues,
- info->count_receive_queue == sp->recv_credit_max);
- destroy_receive_buffers(info);
+ destroy_receive_buffers(sc);
/*
* For performance reasons, memory registration and deregistration
@@ -1370,13 +1632,12 @@ void smbd_destroy(struct TCP_Server_Info *server)
* path when sending data, and then release memory registrations.
*/
log_rdma_event(INFO, "freeing mr list\n");
- wake_up_interruptible_all(&info->wait_mr);
- while (atomic_read(&info->mr_used_count)) {
+ while (atomic_read(&sc->mr_io.used.count)) {
cifs_server_unlock(server);
msleep(1000);
cifs_server_lock(server);
}
- destroy_mr_list(info);
+ destroy_mr_list(sc);
ib_free_cq(sc->ib.send_cq);
ib_free_cq(sc->ib.recv_cq);
@@ -1392,7 +1653,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
sc->status = SMBDIRECT_SOCKET_DESTROYED;
- destroy_workqueue(info->workqueue);
+ destroy_workqueue(sc->workqueue);
log_rdma_event(INFO, "rdma session destroyed\n");
kfree(info);
server->smbd_conn = NULL;
@@ -1434,12 +1695,9 @@ create_conn:
return -ENOENT;
}
-static void destroy_caches_and_workqueue(struct smbd_connection *info)
+static void destroy_caches(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
-
- destroy_receive_buffers(info);
- destroy_workqueue(info->workqueue);
+ destroy_receive_buffers(sc);
mempool_destroy(sc->recv_io.mem.pool);
kmem_cache_destroy(sc->recv_io.mem.cache);
mempool_destroy(sc->send_io.mem.pool);
@@ -1447,9 +1705,8 @@ static void destroy_caches_and_workqueue(struct smbd_connection *info)
}
#define MAX_NAME_LEN 80
-static int allocate_caches_and_workqueue(struct smbd_connection *info)
+static int allocate_caches(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
struct smbdirect_socket_parameters *sp = &sc->parameters;
char name[MAX_NAME_LEN];
int rc;
@@ -1457,7 +1714,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
if (WARN_ON_ONCE(sp->max_recv_size < sizeof(struct smbdirect_data_transfer)))
return -ENOMEM;
- scnprintf(name, MAX_NAME_LEN, "smbdirect_send_io_%p", info);
+ scnprintf(name, MAX_NAME_LEN, "smbdirect_send_io_%p", sc);
sc->send_io.mem.cache =
kmem_cache_create(
name,
@@ -1473,7 +1730,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
if (!sc->send_io.mem.pool)
goto out1;
- scnprintf(name, MAX_NAME_LEN, "smbdirect_recv_io_%p", info);
+ scnprintf(name, MAX_NAME_LEN, "smbdirect_recv_io_%p", sc);
struct kmem_cache_args response_args = {
.align = __alignof__(struct smbdirect_recv_io),
@@ -1494,21 +1751,14 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
if (!sc->recv_io.mem.pool)
goto out3;
- scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
- info->workqueue = create_workqueue(name);
- if (!info->workqueue)
- goto out4;
-
- rc = allocate_receive_buffers(info, sp->recv_credit_max);
+ rc = allocate_receive_buffers(sc, sp->recv_credit_max);
if (rc) {
log_rdma_event(ERR, "failed to allocate receive buffers\n");
- goto out5;
+ goto out4;
}
return 0;
-out5:
- destroy_workqueue(info->workqueue);
out4:
mempool_destroy(sc->recv_io.mem.pool);
out3:
@@ -1532,46 +1782,63 @@ static struct smbd_connection *_smbd_get_connection(
struct ib_qp_init_attr qp_attr;
struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
struct ib_port_immutable port_immutable;
- u32 ird_ord_hdr[2];
+ __be32 ird_ord_hdr[2];
+ char wq_name[80];
+ struct workqueue_struct *workqueue;
info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
if (!info)
return NULL;
sc = &info->socket;
+ scnprintf(wq_name, ARRAY_SIZE(wq_name), "smbd_%p", sc);
+ workqueue = create_workqueue(wq_name);
+ if (!workqueue)
+ goto create_wq_failed;
+ smbdirect_socket_init(sc);
+ sc->workqueue = workqueue;
sp = &sc->parameters;
- sc->status = SMBDIRECT_SOCKET_CONNECTING;
- rc = smbd_ia_open(info, dstaddr, port);
+ INIT_WORK(&sc->disconnect_work, smbd_disconnect_rdma_work);
+
+ sp->resolve_addr_timeout_msec = RDMA_RESOLVE_TIMEOUT;
+ sp->resolve_route_timeout_msec = RDMA_RESOLVE_TIMEOUT;
+ sp->rdma_connect_timeout_msec = RDMA_RESOLVE_TIMEOUT;
+ sp->negotiate_timeout_msec = SMBD_NEGOTIATE_TIMEOUT * 1000;
+ sp->initiator_depth = 1;
+ sp->responder_resources = SMBD_CM_RESPONDER_RESOURCES;
+ sp->recv_credit_max = smbd_receive_credit_max;
+ sp->send_credit_target = smbd_send_credit_target;
+ sp->max_send_size = smbd_max_send_size;
+ sp->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
+ sp->max_recv_size = smbd_max_receive_size;
+ sp->max_frmr_depth = smbd_max_frmr_depth;
+ sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000;
+ sp->keepalive_timeout_msec = KEEPALIVE_RECV_TIMEOUT * 1000;
+
+ rc = smbd_ia_open(sc, dstaddr, port);
if (rc) {
log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
goto create_id_failed;
}
- if (smbd_send_credit_target > sc->ib.dev->attrs.max_cqe ||
- smbd_send_credit_target > sc->ib.dev->attrs.max_qp_wr) {
+ if (sp->send_credit_target > sc->ib.dev->attrs.max_cqe ||
+ sp->send_credit_target > sc->ib.dev->attrs.max_qp_wr) {
log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
- smbd_send_credit_target,
+ sp->send_credit_target,
sc->ib.dev->attrs.max_cqe,
sc->ib.dev->attrs.max_qp_wr);
goto config_failed;
}
- if (smbd_receive_credit_max > sc->ib.dev->attrs.max_cqe ||
- smbd_receive_credit_max > sc->ib.dev->attrs.max_qp_wr) {
+ if (sp->recv_credit_max > sc->ib.dev->attrs.max_cqe ||
+ sp->recv_credit_max > sc->ib.dev->attrs.max_qp_wr) {
log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
- smbd_receive_credit_max,
+ sp->recv_credit_max,
sc->ib.dev->attrs.max_cqe,
sc->ib.dev->attrs.max_qp_wr);
goto config_failed;
}
- sp->recv_credit_max = smbd_receive_credit_max;
- sp->send_credit_target = smbd_send_credit_target;
- sp->max_send_size = smbd_max_send_size;
- sp->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
- sp->max_recv_size = smbd_max_receive_size;
- sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000;
-
if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE ||
sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_RECV_IO_MAX_SGE) {
log_rdma_event(ERR,
@@ -1583,8 +1850,16 @@ static struct smbd_connection *_smbd_get_connection(
goto config_failed;
}
+ sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
+ if (IS_ERR(sc->ib.pd)) {
+ rc = PTR_ERR(sc->ib.pd);
+ sc->ib.pd = NULL;
+ log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
+ goto alloc_pd_failed;
+ }
+
sc->ib.send_cq =
- ib_alloc_cq_any(sc->ib.dev, info,
+ ib_alloc_cq_any(sc->ib.dev, sc,
sp->send_credit_target, IB_POLL_SOFTIRQ);
if (IS_ERR(sc->ib.send_cq)) {
sc->ib.send_cq = NULL;
@@ -1592,7 +1867,7 @@ static struct smbd_connection *_smbd_get_connection(
}
sc->ib.recv_cq =
- ib_alloc_cq_any(sc->ib.dev, info,
+ ib_alloc_cq_any(sc->ib.dev, sc,
sp->recv_credit_max, IB_POLL_SOFTIRQ);
if (IS_ERR(sc->ib.recv_cq)) {
sc->ib.recv_cq = NULL;
@@ -1601,7 +1876,7 @@ static struct smbd_connection *_smbd_get_connection(
memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.event_handler = smbd_qp_async_error_upcall;
- qp_attr.qp_context = info;
+ qp_attr.qp_context = sc;
qp_attr.cap.max_send_wr = sp->send_credit_target;
qp_attr.cap.max_recv_wr = sp->recv_credit_max;
qp_attr.cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
@@ -1620,22 +1895,22 @@ static struct smbd_connection *_smbd_get_connection(
}
sc->ib.qp = sc->rdma.cm_id->qp;
- memset(&conn_param, 0, sizeof(conn_param));
- conn_param.initiator_depth = 0;
-
- conn_param.responder_resources =
- min(sc->ib.dev->attrs.max_qp_rd_atom,
- SMBD_CM_RESPONDER_RESOURCES);
- info->responder_resources = conn_param.responder_resources;
+ sp->responder_resources =
+ min_t(u8, sp->responder_resources,
+ sc->ib.dev->attrs.max_qp_rd_atom);
log_rdma_mr(INFO, "responder_resources=%d\n",
- info->responder_resources);
+ sp->responder_resources);
+
+ memset(&conn_param, 0, sizeof(conn_param));
+ conn_param.initiator_depth = sp->initiator_depth;
+ conn_param.responder_resources = sp->responder_resources;
/* Need to send IRD/ORD in private data for iWARP */
sc->ib.dev->ops.get_port_immutable(
sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable);
if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
- ird_ord_hdr[0] = info->responder_resources;
- ird_ord_hdr[1] = 1;
+ ird_ord_hdr[0] = cpu_to_be32(conn_param.responder_resources);
+ ird_ord_hdr[1] = cpu_to_be32(conn_param.initiator_depth);
conn_param.private_data = ird_ord_hdr;
conn_param.private_data_len = sizeof(ird_ord_hdr);
} else {
@@ -1650,8 +1925,8 @@ static struct smbd_connection *_smbd_get_connection(
log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
&addr_in->sin_addr, port);
- init_waitqueue_head(&info->status_wait);
- init_waitqueue_head(&sc->recv_io.reassembly.wait_queue);
+ WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED);
+ sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING;
rc = rdma_connect(sc->rdma.cm_id, &conn_param);
if (rc) {
log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
@@ -1659,45 +1934,42 @@ static struct smbd_connection *_smbd_get_connection(
}
wait_event_interruptible_timeout(
- info->status_wait,
- sc->status != SMBDIRECT_SOCKET_CONNECTING,
- msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ sc->status_wait,
+ sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING,
+ msecs_to_jiffies(sp->rdma_connect_timeout_msec));
- if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_NEGOTIATE_NEEDED) {
log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
goto rdma_connect_failed;
}
log_rdma_event(INFO, "rdma_connect connected\n");
- rc = allocate_caches_and_workqueue(info);
+ rc = allocate_caches(sc);
if (rc) {
log_rdma_event(ERR, "cache allocation failed\n");
goto allocate_cache_failed;
}
- init_waitqueue_head(&info->wait_send_queue);
- INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
- queue_delayed_work(info->workqueue, &info->idle_timer_work,
- msecs_to_jiffies(sp->keepalive_interval_msec));
-
- init_waitqueue_head(&info->wait_send_pending);
- atomic_set(&info->send_pending, 0);
-
- init_waitqueue_head(&info->wait_post_send);
+ INIT_WORK(&sc->idle.immediate_work, send_immediate_empty_message);
+ INIT_DELAYED_WORK(&sc->idle.timer_work, idle_connection_timer);
+ /*
+ * start with the negotiate timeout and SMBDIRECT_KEEPALIVE_PENDING
+ * so that the timer will cause a disconnect.
+ */
+ sc->idle.keepalive = SMBDIRECT_KEEPALIVE_PENDING;
+ mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
+ msecs_to_jiffies(sp->negotiate_timeout_msec));
- INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
- INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
- info->new_credits_offered = 0;
- spin_lock_init(&info->lock_new_credits_offered);
+ INIT_WORK(&sc->recv_io.posted.refill_work, smbd_post_send_credits);
- rc = smbd_negotiate(info);
+ rc = smbd_negotiate(sc);
if (rc) {
log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
goto negotiation_failed;
}
- rc = allocate_mr_list(info);
+ rc = allocate_mr_list(sc);
if (rc) {
log_rdma_mr(ERR, "memory registration allocation failed\n");
goto allocate_mr_failed;
@@ -1712,11 +1984,11 @@ allocate_mr_failed:
return NULL;
negotiation_failed:
- cancel_delayed_work_sync(&info->idle_timer_work);
- destroy_caches_and_workqueue(info);
+ disable_delayed_work_sync(&sc->idle.timer_work);
+ destroy_caches(sc);
sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
rdma_disconnect(sc->rdma.cm_id);
- wait_event(info->status_wait,
+ wait_event(sc->status_wait,
sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
allocate_cache_failed:
@@ -1730,11 +2002,15 @@ alloc_cq_failed:
if (sc->ib.recv_cq)
ib_free_cq(sc->ib.recv_cq);
-config_failed:
ib_dealloc_pd(sc->ib.pd);
+
+alloc_pd_failed:
+config_failed:
rdma_destroy_id(sc->rdma.cm_id);
create_id_failed:
+ destroy_workqueue(sc->workqueue);
+create_wq_failed:
kfree(info);
return NULL;
}
@@ -1743,6 +2019,7 @@ struct smbd_connection *smbd_get_connection(
struct TCP_Server_Info *server, struct sockaddr *dstaddr)
{
struct smbd_connection *ret;
+ const struct smbdirect_socket_parameters *sp;
int port = SMBD_PORT;
try_again:
@@ -1753,6 +2030,16 @@ try_again:
port = SMB_PORT;
goto try_again;
}
+ if (!ret)
+ return NULL;
+
+ sp = &ret->socket.parameters;
+
+ server->rdma_readwrite_threshold =
+ rdma_readwrite_threshold > sp->max_fragmented_send_size ?
+ sp->max_fragmented_send_size :
+ rdma_readwrite_threshold;
+
return ret;
}
@@ -1794,6 +2081,7 @@ again:
if (sc->recv_io.reassembly.data_length >= size) {
int queue_length;
int queue_removed = 0;
+ unsigned long flags;
/*
* Need to make sure reassembly_data_length is read before
@@ -1808,7 +2096,7 @@ again:
to_read = size;
offset = sc->recv_io.reassembly.first_entry_offset;
while (data_read < size) {
- response = _get_first_reassembly(info);
+ response = _get_first_reassembly(sc);
data_transfer = smbdirect_recv_io_payload(response);
data_length = le32_to_cpu(data_transfer->data_length);
remaining_data_length =
@@ -1853,16 +2141,15 @@ again:
if (queue_length)
list_del(&response->list);
else {
- spin_lock_irq(
- &sc->recv_io.reassembly.lock);
+ spin_lock_irqsave(
+ &sc->recv_io.reassembly.lock, flags);
list_del(&response->list);
- spin_unlock_irq(
- &sc->recv_io.reassembly.lock);
+ spin_unlock_irqrestore(
+ &sc->recv_io.reassembly.lock, flags);
}
queue_removed++;
- info->count_reassembly_queue--;
- info->count_dequeue_reassembly_queue++;
- put_receive_buffer(info, response);
+ sc->statistics.dequeue_reassembly_queue++;
+ put_receive_buffer(sc, response);
offset = 0;
log_read(INFO, "put_receive_buffer offset=0\n");
} else
@@ -1876,10 +2163,10 @@ again:
to_read, data_read, offset);
}
- spin_lock_irq(&sc->recv_io.reassembly.lock);
+ spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
sc->recv_io.reassembly.data_length -= data_read;
sc->recv_io.reassembly.queue_length -= queue_removed;
- spin_unlock_irq(&sc->recv_io.reassembly.lock);
+ spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
sc->recv_io.reassembly.first_entry_offset = offset;
log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
@@ -1964,13 +2251,13 @@ int smbd_send(struct TCP_Server_Info *server,
klen += rqst->rq_iov[i].iov_len;
iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, klen);
- rc = smbd_post_send_full_iter(info, &iter, &remaining_data_length);
+ rc = smbd_post_send_full_iter(sc, &iter, &remaining_data_length);
if (rc < 0)
break;
if (iov_iter_count(&rqst->rq_iter) > 0) {
/* And then the data pages if there are any */
- rc = smbd_post_send_full_iter(info, &rqst->rq_iter,
+ rc = smbd_post_send_full_iter(sc, &rqst->rq_iter,
&remaining_data_length);
if (rc < 0)
break;
@@ -1985,22 +2272,25 @@ int smbd_send(struct TCP_Server_Info *server,
* that means all the I/Os have been out and we are good to return
*/
- wait_event(info->wait_send_pending,
- atomic_read(&info->send_pending) == 0);
+ wait_event(sc->send_io.pending.zero_wait_queue,
+ atomic_read(&sc->send_io.pending.count) == 0 ||
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
+
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED && rc == 0)
+ rc = -EAGAIN;
return rc;
}
static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct smbd_mr *mr;
- struct ib_cqe *cqe;
+ struct smbdirect_mr_io *mr =
+ container_of(wc->wr_cqe, struct smbdirect_mr_io, cqe);
+ struct smbdirect_socket *sc = mr->socket;
if (wc->status) {
log_rdma_mr(ERR, "status=%d\n", wc->status);
- cqe = wc->wr_cqe;
- mr = container_of(cqe, struct smbd_mr, cqe);
- smbd_disconnect_rdma_connection(mr->conn);
+ smbd_disconnect_rdma_connection(sc);
}
}
@@ -2015,14 +2305,14 @@ static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
*/
static void smbd_mr_recovery_work(struct work_struct *work)
{
- struct smbd_connection *info =
- container_of(work, struct smbd_connection, mr_recovery_work);
- struct smbdirect_socket *sc = &info->socket;
- struct smbd_mr *smbdirect_mr;
+ struct smbdirect_socket *sc =
+ container_of(work, struct smbdirect_socket, mr_io.recovery_work);
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
+ struct smbdirect_mr_io *smbdirect_mr;
int rc;
- list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
- if (smbdirect_mr->state == MR_ERROR) {
+ list_for_each_entry(smbdirect_mr, &sc->mr_io.all.list, list) {
+ if (smbdirect_mr->state == SMBDIRECT_MR_ERROR) {
/* recover this MR entry */
rc = ib_dereg_mr(smbdirect_mr->mr);
@@ -2030,25 +2320,25 @@ static void smbd_mr_recovery_work(struct work_struct *work)
log_rdma_mr(ERR,
"ib_dereg_mr failed rc=%x\n",
rc);
- smbd_disconnect_rdma_connection(info);
+ smbd_disconnect_rdma_connection(sc);
continue;
}
smbdirect_mr->mr = ib_alloc_mr(
- sc->ib.pd, info->mr_type,
- info->max_frmr_depth);
+ sc->ib.pd, sc->mr_io.type,
+ sp->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
- info->mr_type,
- info->max_frmr_depth);
- smbd_disconnect_rdma_connection(info);
+ sc->mr_io.type,
+ sp->max_frmr_depth);
+ smbd_disconnect_rdma_connection(sc);
continue;
}
} else
/* This MR is being used, don't recover it */
continue;
- smbdirect_mr->state = MR_READY;
+ smbdirect_mr->state = SMBDIRECT_MR_READY;
/* smbdirect_mr->state is updated by this function
* and is read and updated by I/O issuing CPUs trying
@@ -2057,19 +2347,18 @@ static void smbd_mr_recovery_work(struct work_struct *work)
* value is updated before waking up any calls to
* get_mr() from the I/O issuing CPUs
*/
- if (atomic_inc_return(&info->mr_ready_count) == 1)
- wake_up_interruptible(&info->wait_mr);
+ if (atomic_inc_return(&sc->mr_io.ready.count) == 1)
+ wake_up(&sc->mr_io.ready.wait_queue);
}
}
-static void destroy_mr_list(struct smbd_connection *info)
+static void destroy_mr_list(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
- struct smbd_mr *mr, *tmp;
+ struct smbdirect_mr_io *mr, *tmp;
- cancel_work_sync(&info->mr_recovery_work);
- list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
- if (mr->state == MR_INVALIDATED)
+ disable_work_sync(&sc->mr_io.recovery_work);
+ list_for_each_entry_safe(mr, tmp, &sc->mr_io.all.list, list) {
+ if (mr->state == SMBDIRECT_MR_INVALIDATED)
ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
mr->sgt.nents, mr->dir);
ib_dereg_mr(mr->mr);
@@ -2085,32 +2374,32 @@ static void destroy_mr_list(struct smbd_connection *info)
* Recovery is done in smbd_mr_recovery_work. The content of list entry changes
* as MRs are used and recovered for I/O, but the list links will not change
*/
-static int allocate_mr_list(struct smbd_connection *info)
+static int allocate_mr_list(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
int i;
- struct smbd_mr *smbdirect_mr, *tmp;
-
- INIT_LIST_HEAD(&info->mr_list);
- init_waitqueue_head(&info->wait_mr);
- spin_lock_init(&info->mr_list_lock);
- atomic_set(&info->mr_ready_count, 0);
- atomic_set(&info->mr_used_count, 0);
- init_waitqueue_head(&info->wait_for_mr_cleanup);
- INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
+ struct smbdirect_mr_io *smbdirect_mr, *tmp;
+
+ INIT_WORK(&sc->mr_io.recovery_work, smbd_mr_recovery_work);
+
+ if (sp->responder_resources == 0) {
+ log_rdma_mr(ERR, "responder_resources negotiated as 0\n");
+ return -EINVAL;
+ }
+
/* Allocate more MRs (2x) than hardware responder_resources */
- for (i = 0; i < info->responder_resources * 2; i++) {
+ for (i = 0; i < sp->responder_resources * 2; i++) {
smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
if (!smbdirect_mr)
goto cleanup_entries;
- smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type,
- info->max_frmr_depth);
+ smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, sc->mr_io.type,
+ sp->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
- info->mr_type, info->max_frmr_depth);
+ sc->mr_io.type, sp->max_frmr_depth);
goto out;
}
- smbdirect_mr->sgt.sgl = kcalloc(info->max_frmr_depth,
+ smbdirect_mr->sgt.sgl = kcalloc(sp->max_frmr_depth,
sizeof(struct scatterlist),
GFP_KERNEL);
if (!smbdirect_mr->sgt.sgl) {
@@ -2118,18 +2407,18 @@ static int allocate_mr_list(struct smbd_connection *info)
ib_dereg_mr(smbdirect_mr->mr);
goto out;
}
- smbdirect_mr->state = MR_READY;
- smbdirect_mr->conn = info;
+ smbdirect_mr->state = SMBDIRECT_MR_READY;
+ smbdirect_mr->socket = sc;
- list_add_tail(&smbdirect_mr->list, &info->mr_list);
- atomic_inc(&info->mr_ready_count);
+ list_add_tail(&smbdirect_mr->list, &sc->mr_io.all.list);
+ atomic_inc(&sc->mr_io.ready.count);
}
return 0;
out:
kfree(smbdirect_mr);
cleanup_entries:
- list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
+ list_for_each_entry_safe(smbdirect_mr, tmp, &sc->mr_io.all.list, list) {
list_del(&smbdirect_mr->list);
ib_dereg_mr(smbdirect_mr->mr);
kfree(smbdirect_mr->sgt.sgl);
@@ -2146,14 +2435,14 @@ cleanup_entries:
* issuing I/O trying to get MR at the same time, mr_list_lock is used to
* protect this situation.
*/
-static struct smbd_mr *get_mr(struct smbd_connection *info)
+static struct smbdirect_mr_io *get_mr(struct smbdirect_socket *sc)
{
- struct smbdirect_socket *sc = &info->socket;
- struct smbd_mr *ret;
+ struct smbdirect_mr_io *ret;
+ unsigned long flags;
int rc;
again:
- rc = wait_event_interruptible(info->wait_mr,
- atomic_read(&info->mr_ready_count) ||
+ rc = wait_event_interruptible(sc->mr_io.ready.wait_queue,
+ atomic_read(&sc->mr_io.ready.count) ||
sc->status != SMBDIRECT_SOCKET_CONNECTED);
if (rc) {
log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
@@ -2165,18 +2454,18 @@ again:
return NULL;
}
- spin_lock(&info->mr_list_lock);
- list_for_each_entry(ret, &info->mr_list, list) {
- if (ret->state == MR_READY) {
- ret->state = MR_REGISTERED;
- spin_unlock(&info->mr_list_lock);
- atomic_dec(&info->mr_ready_count);
- atomic_inc(&info->mr_used_count);
+ spin_lock_irqsave(&sc->mr_io.all.lock, flags);
+ list_for_each_entry(ret, &sc->mr_io.all.list, list) {
+ if (ret->state == SMBDIRECT_MR_READY) {
+ ret->state = SMBDIRECT_MR_REGISTERED;
+ spin_unlock_irqrestore(&sc->mr_io.all.lock, flags);
+ atomic_dec(&sc->mr_io.ready.count);
+ atomic_inc(&sc->mr_io.used.count);
return ret;
}
}
- spin_unlock(&info->mr_list_lock);
+ spin_unlock_irqrestore(&sc->mr_io.all.lock, flags);
/*
* It is possible that we could fail to get MR because other processes may
* try to acquire a MR at the same time. If this is the case, retry it.
@@ -2187,8 +2476,7 @@ again:
/*
* Transcribe the pages from an iterator into an MR scatterlist.
*/
-static int smbd_iter_to_mr(struct smbd_connection *info,
- struct iov_iter *iter,
+static int smbd_iter_to_mr(struct iov_iter *iter,
struct sg_table *sgt,
unsigned int max_sg)
{
@@ -2210,25 +2498,26 @@ static int smbd_iter_to_mr(struct smbd_connection *info,
* need_invalidate: true if this MR needs to be locally invalidated after I/O
* return value: the MR registered, NULL if failed.
*/
-struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+struct smbdirect_mr_io *smbd_register_mr(struct smbd_connection *info,
struct iov_iter *iter,
bool writing, bool need_invalidate)
{
struct smbdirect_socket *sc = &info->socket;
- struct smbd_mr *smbdirect_mr;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
+ struct smbdirect_mr_io *smbdirect_mr;
int rc, num_pages;
enum dma_data_direction dir;
struct ib_reg_wr *reg_wr;
- num_pages = iov_iter_npages(iter, info->max_frmr_depth + 1);
- if (num_pages > info->max_frmr_depth) {
+ num_pages = iov_iter_npages(iter, sp->max_frmr_depth + 1);
+ if (num_pages > sp->max_frmr_depth) {
log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
- num_pages, info->max_frmr_depth);
+ num_pages, sp->max_frmr_depth);
WARN_ON_ONCE(1);
return NULL;
}
- smbdirect_mr = get_mr(info);
+ smbdirect_mr = get_mr(sc);
if (!smbdirect_mr) {
log_rdma_mr(ERR, "get_mr returning NULL\n");
return NULL;
@@ -2241,8 +2530,8 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
smbdirect_mr->sgt.orig_nents = 0;
log_rdma_mr(INFO, "num_pages=0x%x count=0x%zx depth=%u\n",
- num_pages, iov_iter_count(iter), info->max_frmr_depth);
- smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth);
+ num_pages, iov_iter_count(iter), sp->max_frmr_depth);
+ smbd_iter_to_mr(iter, &smbdirect_mr->sgt, sp->max_frmr_depth);
rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents, dir);
@@ -2287,32 +2576,32 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
rc, reg_wr->key);
- /* If all failed, attempt to recover this MR by setting it MR_ERROR*/
+ /* If all failed, attempt to recover this MR by setting it SMBDIRECT_MR_ERROR*/
map_mr_error:
ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents, smbdirect_mr->dir);
dma_map_error:
- smbdirect_mr->state = MR_ERROR;
- if (atomic_dec_and_test(&info->mr_used_count))
- wake_up(&info->wait_for_mr_cleanup);
+ smbdirect_mr->state = SMBDIRECT_MR_ERROR;
+ if (atomic_dec_and_test(&sc->mr_io.used.count))
+ wake_up(&sc->mr_io.cleanup.wait_queue);
- smbd_disconnect_rdma_connection(info);
+ smbd_disconnect_rdma_connection(sc);
return NULL;
}
static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct smbd_mr *smbdirect_mr;
+ struct smbdirect_mr_io *smbdirect_mr;
struct ib_cqe *cqe;
cqe = wc->wr_cqe;
- smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
- smbdirect_mr->state = MR_INVALIDATED;
+ smbdirect_mr = container_of(cqe, struct smbdirect_mr_io, cqe);
+ smbdirect_mr->state = SMBDIRECT_MR_INVALIDATED;
if (wc->status != IB_WC_SUCCESS) {
log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
- smbdirect_mr->state = MR_ERROR;
+ smbdirect_mr->state = SMBDIRECT_MR_ERROR;
}
complete(&smbdirect_mr->invalidate_done);
}
@@ -2323,11 +2612,10 @@ static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
* and we have to locally invalidate the buffer to prevent data is being
* modified by remote peer after upper layer consumes it
*/
-int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+int smbd_deregister_mr(struct smbdirect_mr_io *smbdirect_mr)
{
struct ib_send_wr *wr;
- struct smbd_connection *info = smbdirect_mr->conn;
- struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket *sc = smbdirect_mr->socket;
int rc = 0;
if (smbdirect_mr->need_invalidate) {
@@ -2344,36 +2632,36 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
rc = ib_post_send(sc->ib.qp, wr, NULL);
if (rc) {
log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
- smbd_disconnect_rdma_connection(info);
+ smbd_disconnect_rdma_connection(sc);
goto done;
}
wait_for_completion(&smbdirect_mr->invalidate_done);
smbdirect_mr->need_invalidate = false;
} else
/*
- * For remote invalidation, just set it to MR_INVALIDATED
+ * For remote invalidation, just set it to SMBDIRECT_MR_INVALIDATED
* and defer to mr_recovery_work to recover the MR for next use
*/
- smbdirect_mr->state = MR_INVALIDATED;
+ smbdirect_mr->state = SMBDIRECT_MR_INVALIDATED;
- if (smbdirect_mr->state == MR_INVALIDATED) {
+ if (smbdirect_mr->state == SMBDIRECT_MR_INVALIDATED) {
ib_dma_unmap_sg(
sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents,
smbdirect_mr->dir);
- smbdirect_mr->state = MR_READY;
- if (atomic_inc_return(&info->mr_ready_count) == 1)
- wake_up_interruptible(&info->wait_mr);
+ smbdirect_mr->state = SMBDIRECT_MR_READY;
+ if (atomic_inc_return(&sc->mr_io.ready.count) == 1)
+ wake_up(&sc->mr_io.ready.wait_queue);
} else
/*
* Schedule the work to do MR recovery for future I/Os MR
* recovery is slow and don't want it to block current I/O
*/
- queue_work(info->workqueue, &info->mr_recovery_work);
+ queue_work(sc->workqueue, &sc->mr_io.recovery_work);
done:
- if (atomic_dec_and_test(&info->mr_used_count))
- wake_up(&info->wait_for_mr_cleanup);
+ if (atomic_dec_and_test(&sc->mr_io.used.count))
+ wake_up(&sc->mr_io.cleanup.wait_queue);
return rc;
}
diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
index e45aa9ddd71d..d67ac5ddaff4 100644
--- a/fs/smb/client/smbdirect.h
+++ b/fs/smb/client/smbdirect.h
@@ -27,12 +27,6 @@ extern int smbd_max_send_size;
extern int smbd_send_credit_target;
extern int smbd_receive_credit_max;
-enum keep_alive_status {
- KEEP_ALIVE_NONE,
- KEEP_ALIVE_PENDING,
- KEEP_ALIVE_SENT,
-};
-
/*
* The context for the SMBDirect transport
* Everything related to the transport is here. It has several logical parts
@@ -44,79 +38,14 @@ enum keep_alive_status {
*/
struct smbd_connection {
struct smbdirect_socket socket;
-
- int ri_rc;
- struct completion ri_done;
- wait_queue_head_t status_wait;
-
- struct completion negotiate_completion;
- bool negotiate_done;
-
- struct work_struct disconnect_work;
- struct work_struct post_send_credits_work;
-
- spinlock_t lock_new_credits_offered;
- int new_credits_offered;
-
- /* dynamic connection parameters defined in [MS-SMBD] 3.1.1.1 */
- enum keep_alive_status keep_alive_requested;
- int protocol;
- atomic_t send_credits;
- atomic_t receive_credits;
- int receive_credit_target;
-
- /* Memory registrations */
- /* Maximum number of RDMA read/write outstanding on this connection */
- int responder_resources;
- /* Maximum number of pages in a single RDMA write/read on this connection */
- int max_frmr_depth;
- /*
- * If payload is less than or equal to the threshold,
- * use RDMA send/recv to send upper layer I/O.
- * If payload is more than the threshold,
- * use RDMA read/write through memory registration for I/O.
- */
- int rdma_readwrite_threshold;
- enum ib_mr_type mr_type;
- struct list_head mr_list;
- spinlock_t mr_list_lock;
- /* The number of available MRs ready for memory registration */
- atomic_t mr_ready_count;
- atomic_t mr_used_count;
- wait_queue_head_t wait_mr;
- struct work_struct mr_recovery_work;
- /* Used by transport to wait until all MRs are returned */
- wait_queue_head_t wait_for_mr_cleanup;
-
- /* Activity accounting */
- atomic_t send_pending;
- wait_queue_head_t wait_send_pending;
- wait_queue_head_t wait_post_send;
-
- /* Receive queue */
- int count_receive_queue;
- wait_queue_head_t wait_receive_queues;
-
- bool send_immediate;
-
- wait_queue_head_t wait_send_queue;
-
- struct workqueue_struct *workqueue;
- struct delayed_work idle_timer_work;
-
- /* for debug purposes */
- unsigned int count_get_receive_buffer;
- unsigned int count_put_receive_buffer;
- unsigned int count_reassembly_queue;
- unsigned int count_enqueue_reassembly_queue;
- unsigned int count_dequeue_reassembly_queue;
- unsigned int count_send_empty;
};
/* Create a SMBDirect session */
struct smbd_connection *smbd_get_connection(
struct TCP_Server_Info *server, struct sockaddr *dstaddr);
+const struct smbdirect_socket_parameters *smbd_get_parameters(struct smbd_connection *conn);
+
/* Reconnect SMBDirect session */
int smbd_reconnect(struct TCP_Server_Info *server);
/* Destroy SMBDirect session */
@@ -127,34 +56,11 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
int smbd_send(struct TCP_Server_Info *server,
int num_rqst, struct smb_rqst *rqst);
-enum mr_state {
- MR_READY,
- MR_REGISTERED,
- MR_INVALIDATED,
- MR_ERROR
-};
-
-struct smbd_mr {
- struct smbd_connection *conn;
- struct list_head list;
- enum mr_state state;
- struct ib_mr *mr;
- struct sg_table sgt;
- enum dma_data_direction dir;
- union {
- struct ib_reg_wr wr;
- struct ib_send_wr inv_wr;
- };
- struct ib_cqe cqe;
- bool need_invalidate;
- struct completion invalidate_done;
-};
-
/* Interfaces to register and deregister MR for RDMA read/write */
-struct smbd_mr *smbd_register_mr(
+struct smbdirect_mr_io *smbd_register_mr(
struct smbd_connection *info, struct iov_iter *iter,
bool writing, bool need_invalidate);
-int smbd_deregister_mr(struct smbd_mr *mr);
+int smbd_deregister_mr(struct smbdirect_mr_io *mr);
#else
#define cifs_rdma_enabled(server) 0
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 93e5b2bb9f28..fd650e2afc76 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -669,13 +669,12 @@ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(query_info_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(posix_query_info_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(hardlink_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rename_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(rmdir_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(unlink_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_eof_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_info_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_reparse_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(get_reparse_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(query_wsl_ea_compound_enter);
-DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mknod_enter);
@@ -710,13 +709,12 @@ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_info_compound_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(posix_query_info_compound_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(hardlink_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rename_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(rmdir_done);
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(unlink_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_eof_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_info_compound_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(set_reparse_compound_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(get_reparse_compound_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_wsl_ea_compound_done);
-DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(tdis_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mknod_done);
@@ -756,14 +754,13 @@ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_info_compound_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(posix_query_info_compound_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(hardlink_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rename_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(rmdir_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(unlink_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_eof_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_info_compound_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(set_reparse_compound_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(get_reparse_compound_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_wsl_ea_compound_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);
-DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(tdis_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mknod_err);
@@ -1171,8 +1168,54 @@ DEFINE_EVENT(smb3_lease_done_class, smb3_##name, \
__u64 lease_key_high), \
TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high))
-DEFINE_SMB3_LEASE_DONE_EVENT(lease_done);
-DEFINE_SMB3_LEASE_DONE_EVENT(lease_not_found);
+DEFINE_SMB3_LEASE_DONE_EVENT(lease_ack_done);
+/* Tracepoint when a lease break request is received/entered (includes epoch and flags) */
+DECLARE_EVENT_CLASS(smb3_lease_enter_class,
+ TP_PROTO(__u32 lease_state,
+ __u32 flags,
+ __u16 epoch,
+ __u32 tid,
+ __u64 sesid,
+ __u64 lease_key_low,
+ __u64 lease_key_high),
+ TP_ARGS(lease_state, flags, epoch, tid, sesid, lease_key_low, lease_key_high),
+ TP_STRUCT__entry(
+ __field(__u32, lease_state)
+ __field(__u32, flags)
+ __field(__u16, epoch)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(__u64, lease_key_low)
+ __field(__u64, lease_key_high)
+ ),
+ TP_fast_assign(
+ __entry->lease_state = lease_state;
+ __entry->flags = flags;
+ __entry->epoch = epoch;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->lease_key_low = lease_key_low;
+ __entry->lease_key_high = lease_key_high;
+ ),
+ TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x flags=0x%x epoch=%u",
+ __entry->sesid, __entry->tid, __entry->lease_key_high,
+ __entry->lease_key_low, __entry->lease_state, __entry->flags, __entry->epoch)
+)
+
+#define DEFINE_SMB3_LEASE_ENTER_EVENT(name) \
+DEFINE_EVENT(smb3_lease_enter_class, smb3_##name, \
+ TP_PROTO(__u32 lease_state, \
+ __u32 flags, \
+ __u16 epoch, \
+ __u32 tid, \
+ __u64 sesid, \
+ __u64 lease_key_low, \
+ __u64 lease_key_high), \
+ TP_ARGS(lease_state, flags, epoch, tid, sesid, lease_key_low, lease_key_high))
+
+DEFINE_SMB3_LEASE_ENTER_EVENT(lease_break_enter);
+/* Lease not found: reuse lease_enter payload (includes epoch and flags) */
+DEFINE_SMB3_LEASE_ENTER_EVENT(lease_not_found);
DECLARE_EVENT_CLASS(smb3_lease_err_class,
TP_PROTO(__u32 lease_state,
@@ -1213,7 +1256,7 @@ DEFINE_EVENT(smb3_lease_err_class, smb3_##name, \
int rc), \
TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc))
-DEFINE_SMB3_LEASE_ERR_EVENT(lease_err);
+DEFINE_SMB3_LEASE_ERR_EVENT(lease_ack_err);
DECLARE_EVENT_CLASS(smb3_connect_class,
TP_PROTO(char *hostname,
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 32d528b4dd83..051cd9dbba13 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -22,6 +22,7 @@
#include <linux/mempool.h>
#include <linux/sched/signal.h>
#include <linux/task_io_accounting_ops.h>
+#include <linux/task_work.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
@@ -173,9 +174,16 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
* send a packet. In most cases if we fail to send
* after the retries we will kill the socket and
* reconnect which may clear the network problem.
+ *
+ * Even if regular signals are masked, EINTR might be
+ * propagated from sk_stream_wait_memory() to here when
+ * TIF_NOTIFY_SIGNAL is used for task work. For example,
+ * certain io_uring completions will use that. Treat
+ * having EINTR with pending task work the same as EAGAIN
+ * to avoid unnecessary reconnects.
*/
rc = sock_sendmsg(ssocket, smb_msg);
- if (rc == -EAGAIN) {
+ if (rc == -EAGAIN || unlikely(rc == -EINTR && task_work_pending(current))) {
retries++;
if (retries >= 14 ||
(!server->noblocksnd && (retries > 2))) {
@@ -323,8 +331,7 @@ int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
break;
total_len += sent;
}
-
-}
+ }
unmask:
sigprocmask(SIG_SETMASK, &oldmask, NULL);
@@ -1005,15 +1012,14 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(server, &rqst[i], midQ[i]);
- spin_lock(&server->mid_queue_lock);
+ spin_lock(&midQ[i]->mid_lock);
midQ[i]->wait_cancelled = true;
- if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
- midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
+ if (midQ[i]->callback) {
midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true;
credits[i].value = 0;
}
- spin_unlock(&server->mid_queue_lock);
+ spin_unlock(&midQ[i]->mid_lock);
}
}