summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/v9fs.c16
-rw-r--r--fs/9p/vfs_dentry.c33
-rw-r--r--fs/9p/vfs_inode.c8
-rw-r--r--fs/9p/vfs_inode_dotl.c8
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/free-space-tree.c15
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/relocation.c13
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/btrfs/zoned.c2
-rw-r--r--fs/ceph/dir.c3
-rw-r--r--fs/ceph/file.c30
-rw-r--r--fs/ceph/inode.c11
-rw-r--r--fs/ceph/io.c100
-rw-r--r--fs/ceph/io.h8
-rw-r--r--fs/ceph/ioctl.c17
-rw-r--r--fs/ceph/locks.c5
-rw-r--r--fs/ceph/mds_client.c22
-rw-r--r--fs/ceph/mdsmap.c14
-rw-r--r--fs/ceph/super.c14
-rw-r--r--fs/ceph/super.h17
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/dax.c2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/exec.c2
-rw-r--r--fs/exfat/exfat_fs.h1
-rw-r--r--fs/exfat/file.c7
-rw-r--r--fs/exfat/namei.c8
-rw-r--r--fs/exfat/nls.c3
-rw-r--r--fs/ext4/ext4_jbd2.c11
-rw-r--r--fs/ext4/inode.c8
-rw-r--r--fs/ext4/orphan.c4
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/super.c2
-rw-r--r--fs/file_attr.c16
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/fuse/ioctl.c4
-rw-r--r--fs/hpfs/anode.c43
-rw-r--r--fs/hpfs/ea.c2
-rw-r--r--fs/hpfs/file.c4
-rw-r--r--fs/hpfs/hpfs.h44
-rw-r--r--fs/hpfs/map.c8
-rw-r--r--fs/hpfs/namei.c18
-rw-r--r--fs/hpfs/super.c8
-rw-r--r--fs/iomap/direct-io.c3
-rw-r--r--fs/jbd2/transaction.c13
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c35
-rw-r--r--fs/nfs/nfs4client.c1
-rw-r--r--fs/nfs/nfs4proc.c13
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/nfsd/flexfilelayout.c8
-rw-r--r--fs/nsfs.c4
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/file.c5
-rw-r--r--fs/overlayfs/inode.c5
-rw-r--r--fs/smb/client/Kconfig7
-rw-r--r--fs/smb/client/cached_dir.c37
-rw-r--r--fs/smb/client/cached_dir.h1
-rw-r--r--fs/smb/client/cifs_spnego.c12
-rw-r--r--fs/smb/client/cifs_swn.c20
-rw-r--r--fs/smb/client/cifsacl.c5
-rw-r--r--fs/smb/client/cifsencrypt.c201
-rw-r--r--fs/smb/client/cifsfs.c26
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h27
-rw-r--r--fs/smb/client/cifsproto.h10
-rw-r--r--fs/smb/client/cifssmb.c4
-rw-r--r--fs/smb/client/dir.c38
-rw-r--r--fs/smb/client/file.c105
-rw-r--r--fs/smb/client/inode.c157
-rw-r--r--fs/smb/client/link.c31
-rw-r--r--fs/smb/client/misc.c17
-rw-r--r--fs/smb/client/sess.c2
-rw-r--r--fs/smb/client/smb1ops.c69
-rw-r--r--fs/smb/client/smb2inode.c24
-rw-r--r--fs/smb/client/smb2misc.c53
-rw-r--r--fs/smb/client/smb2ops.c342
-rw-r--r--fs/smb/client/smb2pdu.c2
-rw-r--r--fs/smb/client/smb2pdu.h16
-rw-r--r--fs/smb/client/smb2proto.h8
-rw-r--r--fs/smb/client/smb2transport.c169
-rw-r--r--fs/smb/client/smbdirect.c321
-rw-r--r--fs/smb/client/smbdirect.h2
-rw-r--r--fs/smb/client/trace.h2
-rw-r--r--fs/smb/client/xattr.c1
-rw-r--r--fs/smb/common/cifsglob.h30
-rw-r--r--fs/smb/common/smbdirect/smbdirect_socket.h11
-rw-r--r--fs/smb/server/mgmt/user_session.c7
-rw-r--r--fs/smb/server/smb2pdu.c11
-rw-r--r--fs/smb/server/smb_common.h14
-rw-r--r--fs/smb/server/transport_ipc.c12
-rw-r--r--fs/smb/server/transport_rdma.c20
95 files changed, 1404 insertions, 1062 deletions
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 77e9c4387c1d..a020a8f00a1a 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -438,8 +438,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->flags &= ~V9FS_ACCESS_MASK;
v9ses->flags |= V9FS_ACCESS_USER;
}
- /*FIXME !! */
- /* for legacy mode, fall back to V9FS_ACCESS_ANY */
+ /* FIXME: for legacy mode, fall back to V9FS_ACCESS_ANY */
if (!(v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) &&
((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) {
@@ -450,7 +449,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
if (!v9fs_proto_dotl(v9ses) ||
!((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) {
/*
- * We support ACL checks on clinet only if the protocol is
+ * We support ACL checks on client only if the protocol is
* 9P2000.L and access is V9FS_ACCESS_CLIENT.
*/
v9ses->flags &= ~V9FS_ACL_MASK;
@@ -561,7 +560,7 @@ static ssize_t caches_show(struct kobject *kobj,
spin_lock(&v9fs_sessionlist_lock);
list_for_each_entry(v9ses, &v9fs_sessionlist, slist) {
if (v9ses->cachetag) {
- n = snprintf(buf, limit, "%s\n", v9ses->cachetag);
+ n = snprintf(buf + count, limit, "%s\n", v9ses->cachetag);
if (n < 0) {
count = n;
break;
@@ -597,13 +596,16 @@ static const struct attribute_group v9fs_attr_group = {
static int __init v9fs_sysfs_init(void)
{
+ int ret;
+
v9fs_kobj = kobject_create_and_add("9p", fs_kobj);
if (!v9fs_kobj)
return -ENOMEM;
- if (sysfs_create_group(v9fs_kobj, &v9fs_attr_group)) {
+ ret = sysfs_create_group(v9fs_kobj, &v9fs_attr_group);
+ if (ret) {
kobject_put(v9fs_kobj);
- return -ENOMEM;
+ return ret;
}
return 0;
@@ -669,7 +671,7 @@ static int __init init_v9fs(void)
int err;
pr_info("Installing v9fs 9p2000 file system support\n");
- /* TODO: Setup list of registered trasnport modules */
+ /* TODO: Setup list of registered transport modules */
err = v9fs_init_inode_cache();
if (err < 0) {
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 04795508a795..f3248a3e5402 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -66,6 +66,7 @@ static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
struct p9_fid *fid;
struct inode *inode;
struct v9fs_inode *v9inode;
+ unsigned int cached;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -75,13 +76,22 @@ static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
goto out_valid;
v9inode = V9FS_I(inode);
- if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
+ struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
+
+ cached = v9ses->cache & (CACHE_META | CACHE_LOOSE);
+
+ if (!cached || v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
int retval;
struct v9fs_session_info *v9ses;
fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
+ if (IS_ERR(fid)) {
+ p9_debug(
+ P9_DEBUG_VFS,
+ "v9fs_fid_lookup: dentry = %pd (%p), got error %pe\n",
+ dentry, dentry, fid);
return PTR_ERR(fid);
+ }
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
@@ -90,12 +100,25 @@ static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
retval = v9fs_refresh_inode(fid, inode);
p9_fid_put(fid);
- if (retval == -ENOENT)
+ if (retval == -ENOENT) {
+ p9_debug(P9_DEBUG_VFS, "dentry: %pd (%p) invalidated due to ENOENT\n",
+ dentry, dentry);
+ return 0;
+ }
+ if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
+ p9_debug(P9_DEBUG_VFS, "dentry: %pd (%p) invalidated due to type change\n",
+ dentry, dentry);
return 0;
- if (retval < 0)
+ }
+ if (retval < 0) {
+ p9_debug(P9_DEBUG_VFS,
+ "refresh inode: dentry = %pd (%p), got error %pe\n",
+ dentry, dentry, ERR_PTR(retval));
return retval;
+ }
}
out_valid:
+ p9_debug(P9_DEBUG_VFS, "dentry: %pd (%p) is valid\n", dentry, dentry);
return 1;
}
@@ -127,6 +150,8 @@ const struct dentry_operations v9fs_cached_dentry_operations = {
};
const struct dentry_operations v9fs_dentry_operations = {
+ .d_revalidate = v9fs_lookup_revalidate,
+ .d_weak_revalidate = __v9fs_lookup_revalidate,
.d_release = v9fs_dentry_release,
.d_unalias_trylock = v9fs_dentry_unalias_trylock,
.d_unalias_unlock = v9fs_dentry_unalias_unlock,
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index d0c77ec31b1d..69f378a83775 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -1339,8 +1339,14 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
* Don't update inode if the file type is different
*/
umode = p9mode2unixmode(v9ses, st, &rdev);
- if (inode_wrong_type(inode, umode))
+ if (inode_wrong_type(inode, umode)) {
+ /*
+ * Do this as a way of letting the caller know the inode should not
+ * be reused
+ */
+ v9fs_invalidate_inode_attr(inode);
goto out;
+ }
/*
* We don't want to refresh inode->i_size,
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index be297e335468..0b404e8484d2 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -897,8 +897,14 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
/*
* Don't update inode if the file type is different
*/
- if (inode_wrong_type(inode, st->st_mode))
+ if (inode_wrong_type(inode, st->st_mode)) {
+ /*
+ * Do this as a way of letting the caller know the inode should not
+ * be reused
+ */
+ v9fs_invalidate_inode_attr(inode);
goto out;
+ }
/*
* We don't want to refresh inode->i_size,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c123a3ef154a..755ec6dfd51c 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -973,7 +973,7 @@ static void btrfs_readahead_expand(struct readahead_control *ractl,
{
const u64 ra_pos = readahead_pos(ractl);
const u64 ra_end = ra_pos + readahead_length(ractl);
- const u64 em_end = em->start + em->ram_bytes;
+ const u64 em_end = em->start + em->len;
/* No expansion for holes and inline extents. */
if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE)
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index dad0b492a663..d86541073d42 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -1106,14 +1106,15 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
* If ret is 1 (no key found), it means this is an empty block group,
* without any extents allocated from it and there's no block group
* item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
- * because we are using the block group tree feature, so block group
- * items are stored in the block group tree. It also means there are no
- * extents allocated for block groups with a start offset beyond this
- * block group's end offset (this is the last, highest, block group).
+ * because we are using the block group tree feature (so block group
+ * items are stored in the block group tree) or this is a new block
+ * group created in the current transaction and its block group item
+ * was not yet inserted in the extent tree (that happens in
+ * btrfs_create_pending_block_groups() -> insert_block_group_item()).
+ * It also means there are no extents allocated for block groups with a
+ * start offset beyond this block group's end offset (this is the last,
+ * highest, block group).
*/
- if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE))
- ASSERT(ret == 0);
-
start = block_group->start;
end = block_group->start + block_group->length;
while (ret == 0) {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 185bef0df1c2..8cb7d5a462ef 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3740,7 +3740,7 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
if (!prealloc) {
ret = -ENOMEM;
- goto drop_write;
+ goto out;
}
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8dd8de6b9fb8..0765e06d00b8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3780,6 +3780,7 @@ out:
/*
* Mark start of chunk relocation that is cancellable. Check if the cancellation
* has been requested meanwhile and don't start in that case.
+ * NOTE: if this returns an error, reloc_chunk_end() must not be called.
*
* Return:
* 0 success
@@ -3796,10 +3797,8 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
btrfs_info(fs_info, "chunk relocation canceled on start");
- /*
- * On cancel, clear all requests but let the caller mark
- * the end after cleanup operations.
- */
+ /* On cancel, clear all requests. */
+ clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
atomic_set(&fs_info->reloc_cancel_req, 0);
return -ECANCELED;
}
@@ -3808,9 +3807,11 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
/*
* Mark end of chunk relocation that is cancellable and wake any waiters.
+ * NOTE: call only if a previous call to reloc_chunk_start() succeeded.
*/
static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
{
+ ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
/* Requested after start, clear bit first so any waiters can continue */
if (atomic_read(&fs_info->reloc_cancel_req) > 0)
btrfs_info(fs_info, "chunk relocation canceled during operation");
@@ -4023,9 +4024,9 @@ out:
if (err && rw)
btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
+ reloc_chunk_end(fs_info);
out_put_bg:
btrfs_put_block_group(bg);
- reloc_chunk_end(fs_info);
free_reloc_control(rc);
return err;
}
@@ -4208,8 +4209,8 @@ out_clean:
ret = ret2;
out_unset:
unset_reloc_control(rc);
-out_end:
reloc_chunk_end(fs_info);
+out_end:
free_reloc_control(rc);
out:
free_reloc_roots(&reloc_roots);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 4691d0bdb2e8..651b11884f82 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -694,7 +694,7 @@ static void *scrub_stripe_get_kaddr(struct scrub_stripe *stripe, int sector_nr)
/* stripe->folios[] is allocated by us and no highmem is allowed. */
ASSERT(folio);
- ASSERT(!folio_test_partial_kmap(folio));
+ ASSERT(!folio_test_highmem(folio));
return folio_address(folio) + offset_in_folio(folio, offset);
}
@@ -707,7 +707,7 @@ static phys_addr_t scrub_stripe_get_paddr(struct scrub_stripe *stripe, int secto
/* stripe->folios[] is allocated by us and no highmem is allowed. */
ASSERT(folio);
- ASSERT(!folio_test_partial_kmap(folio));
+ ASSERT(!folio_test_highmem(folio));
/* And the range must be contained inside the folio. */
ASSERT(offset_in_folio(folio, offset) + fs_info->sectorsize <= folio_size(folio));
return page_to_phys(folio_page(folio, 0)) + offset_in_folio(folio, offset);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 9230e5066fc6..6144e66661f5 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -178,7 +178,6 @@ struct send_ctx {
u64 cur_inode_rdev;
u64 cur_inode_last_extent;
u64 cur_inode_next_write_offset;
- struct fs_path cur_inode_path;
bool cur_inode_new;
bool cur_inode_new_gen;
bool cur_inode_deleted;
@@ -305,6 +304,9 @@ struct send_ctx {
struct btrfs_lru_cache dir_created_cache;
struct btrfs_lru_cache dir_utimes_cache;
+
+ /* Must be last as it ends in a flexible-array member. */
+ struct fs_path cur_inode_path;
};
struct pending_dir_move {
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index d6e496436539..aadc02374b2a 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1900,8 +1900,6 @@ static int btrfs_get_tree_super(struct fs_context *fc)
return PTR_ERR(sb);
}
- set_device_specific_options(fs_info);
-
if (sb->s_root) {
/*
* Not the first mount of the fs thus got an existing super block.
@@ -1946,6 +1944,7 @@ static int btrfs_get_tree_super(struct fs_context *fc)
deactivate_locked_super(sb);
return -EACCES;
}
+ set_device_specific_options(fs_info);
bdev = fs_devices->latest_dev->bdev;
snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index ca30b15ea452..c10b4c242acf 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1797,7 +1797,7 @@ static int check_inode_extref(struct extent_buffer *leaf,
struct btrfs_inode_extref *extref = (struct btrfs_inode_extref *)ptr;
u16 namelen;
- if (unlikely(ptr + sizeof(*extref)) > end) {
+ if (unlikely(ptr + sizeof(*extref) > end)) {
inode_ref_err(leaf, slot,
"inode extref overflow, ptr %lu end %lu inode_extref size %zu",
ptr, end, sizeof(*extref));
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index e00036672f33..0ea0df18a8e4 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1753,7 +1753,7 @@ out:
!fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
btrfs_bg_type_to_raid_name(map->type));
- return -EINVAL;
+ ret = -EINVAL;
}
if (unlikely(cache->alloc_offset > cache->zone_capacity)) {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 32973c62c1a2..d18c0eaef9b7 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1260,8 +1260,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
spin_unlock(&fsc->async_unlink_conflict_lock);
spin_lock(&dentry->d_lock);
- di->flags &= ~CEPH_DENTRY_ASYNC_UNLINK;
- wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT);
+ clear_and_wake_up_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags);
spin_unlock(&dentry->d_lock);
synchronize_rcu();
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 978acd3d4b32..99b30f784ee2 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -579,8 +579,7 @@ static void wake_async_create_waiters(struct inode *inode,
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
- ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
- wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
+ clear_and_wake_up_bit(CEPH_ASYNC_CREATE_BIT, &ci->i_ceph_flags);
if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
@@ -762,8 +761,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
}
spin_lock(&dentry->d_lock);
- di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
- wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
+ clear_and_wake_up_bit(CEPH_DENTRY_ASYNC_CREATE_BIT, &di->flags);
spin_unlock(&dentry->d_lock);
return ret;
@@ -2121,10 +2119,10 @@ again:
if (ceph_inode_is_shutdown(inode))
return -ESTALE;
- if (direct_lock)
- ceph_start_io_direct(inode);
- else
- ceph_start_io_read(inode);
+ ret = direct_lock ? ceph_start_io_direct(inode) :
+ ceph_start_io_read(inode);
+ if (ret)
+ return ret;
if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
want |= CEPH_CAP_FILE_CACHE;
@@ -2277,7 +2275,9 @@ static ssize_t ceph_splice_read(struct file *in, loff_t *ppos,
(fi->flags & CEPH_F_SYNC))
return copy_splice_read(in, ppos, pipe, len, flags);
- ceph_start_io_read(inode);
+ ret = ceph_start_io_read(inode);
+ if (ret)
+ return ret;
want = CEPH_CAP_FILE_CACHE;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
@@ -2356,10 +2356,10 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
direct_lock = true;
retry_snap:
- if (direct_lock)
- ceph_start_io_direct(inode);
- else
- ceph_start_io_write(inode);
+ err = direct_lock ? ceph_start_io_direct(inode) :
+ ceph_start_io_write(inode);
+ if (err)
+ goto out_unlocked;
if (iocb->ki_flags & IOCB_APPEND) {
err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
@@ -2878,7 +2878,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
struct ceph_object_id src_oid, dst_oid;
struct ceph_osd_client *osdc;
struct ceph_osd_request *req;
- size_t bytes = 0;
+ ssize_t bytes = 0;
u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
u32 src_objlen, dst_objlen;
u32 object_size = src_ci->i_layout.object_size;
@@ -2928,7 +2928,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
"OSDs don't support copy-from2; disabling copy offload\n");
}
doutc(cl, "returned %d\n", ret);
- if (!bytes)
+ if (bytes <= 0)
bytes = ret;
goto out;
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 949f0badc944..a6e260d9e420 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1794,6 +1794,11 @@ retry_lookup:
goto done;
}
+ if (unlikely(!in)) {
+ err = -EINVAL;
+ goto done;
+ }
+
/* attach proper inode */
if (d_really_is_negative(dn)) {
ceph_dir_clear_ordered(dir);
@@ -1829,6 +1834,12 @@ retry_lookup:
doutc(cl, " linking snapped dir %p to dn %p\n", in,
req->r_dentry);
ceph_dir_clear_ordered(dir);
+
+ if (unlikely(!in)) {
+ err = -EINVAL;
+ goto done;
+ }
+
ihold(in);
err = splice_dentry(&req->r_dentry, in);
if (err < 0)
diff --git a/fs/ceph/io.c b/fs/ceph/io.c
index c456509b31c3..2d10f49c93a9 100644
--- a/fs/ceph/io.c
+++ b/fs/ceph/io.c
@@ -21,14 +21,23 @@
/* Call with exclusively locked inode->i_rwsem */
static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
{
+ bool is_odirect;
+
lockdep_assert_held_write(&inode->i_rwsem);
- if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT) {
- spin_lock(&ci->i_ceph_lock);
- ci->i_ceph_flags &= ~CEPH_I_ODIRECT;
- spin_unlock(&ci->i_ceph_lock);
- inode_dio_wait(inode);
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ if (is_odirect) {
+ clear_bit(CEPH_I_ODIRECT_BIT, &ci->i_ceph_flags);
+ /* ensure modified bit is visible */
+ smp_mb__after_atomic();
}
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (is_odirect)
+ inode_dio_wait(inode);
}
/**
@@ -47,20 +56,35 @@ static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
*/
-void
-ceph_start_io_read(struct inode *inode)
+int ceph_start_io_read(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_odirect;
+ int err;
/* Be an optimist! */
- down_read(&inode->i_rwsem);
- if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT))
- return;
+ err = down_read_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ if (!is_odirect)
+ return 0;
up_read(&inode->i_rwsem);
+
/* Slow path.... */
- down_write(&inode->i_rwsem);
+ err = down_write_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
ceph_block_o_direct(ci, inode);
downgrade_write(&inode->i_rwsem);
+
+ return 0;
}
/**
@@ -83,11 +107,12 @@ ceph_end_io_read(struct inode *inode)
* Declare that a buffered write operation is about to start, and ensure
* that we block all direct I/O.
*/
-void
-ceph_start_io_write(struct inode *inode)
+int ceph_start_io_write(struct inode *inode)
{
- down_write(&inode->i_rwsem);
- ceph_block_o_direct(ceph_inode(inode), inode);
+ int err = down_write_killable(&inode->i_rwsem);
+ if (!err)
+ ceph_block_o_direct(ceph_inode(inode), inode);
+ return err;
}
/**
@@ -106,12 +131,22 @@ ceph_end_io_write(struct inode *inode)
/* Call with exclusively locked inode->i_rwsem */
static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
{
+ bool is_odirect;
+
lockdep_assert_held_write(&inode->i_rwsem);
- if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)) {
- spin_lock(&ci->i_ceph_lock);
- ci->i_ceph_flags |= CEPH_I_ODIRECT;
- spin_unlock(&ci->i_ceph_lock);
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ if (!is_odirect) {
+ set_bit(CEPH_I_ODIRECT_BIT, &ci->i_ceph_flags);
+ /* ensure modified bit is visible */
+ smp_mb__after_atomic();
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (!is_odirect) {
/* FIXME: unmap_mapping_range? */
filemap_write_and_wait(inode->i_mapping);
}
@@ -133,20 +168,35 @@ static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
* Note that buffered writes and truncates both take a write lock on
* inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
*/
-void
-ceph_start_io_direct(struct inode *inode)
+int ceph_start_io_direct(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_odirect;
+ int err;
/* Be an optimist! */
- down_read(&inode->i_rwsem);
- if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)
- return;
+ err = down_read_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
+ spin_lock(&ci->i_ceph_lock);
+ /* ensure that bit state is consistent */
+ smp_mb__before_atomic();
+ is_odirect = READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT;
+ spin_unlock(&ci->i_ceph_lock);
+ if (is_odirect)
+ return 0;
up_read(&inode->i_rwsem);
+
/* Slow path.... */
- down_write(&inode->i_rwsem);
+ err = down_write_killable(&inode->i_rwsem);
+ if (err)
+ return err;
+
ceph_block_buffered(ci, inode);
downgrade_write(&inode->i_rwsem);
+
+ return 0;
}
/**
diff --git a/fs/ceph/io.h b/fs/ceph/io.h
index fa594cd77348..79029825e8b8 100644
--- a/fs/ceph/io.h
+++ b/fs/ceph/io.h
@@ -2,11 +2,13 @@
#ifndef _FS_CEPH_IO_H
#define _FS_CEPH_IO_H
-void ceph_start_io_read(struct inode *inode);
+#include <linux/compiler_attributes.h>
+
+int __must_check ceph_start_io_read(struct inode *inode);
void ceph_end_io_read(struct inode *inode);
-void ceph_start_io_write(struct inode *inode);
+int __must_check ceph_start_io_write(struct inode *inode);
void ceph_end_io_write(struct inode *inode);
-void ceph_start_io_direct(struct inode *inode);
+int __must_check ceph_start_io_direct(struct inode *inode);
void ceph_end_io_direct(struct inode *inode);
#endif /* FS_CEPH_IO_H */
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index e861de3c79b9..15cde055f3da 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -246,21 +246,28 @@ static long ceph_ioctl_lazyio(struct file *file)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
+ bool is_file_already_lazy = false;
+ spin_lock(&ci->i_ceph_lock);
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
- spin_lock(&ci->i_ceph_lock);
fi->fmode |= CEPH_FILE_MODE_LAZY;
ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++;
__ceph_touch_fmode(ci, mdsc, fi->fmode);
- spin_unlock(&ci->i_ceph_lock);
+ } else {
+ is_file_already_lazy = true;
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (is_file_already_lazy) {
+ doutc(cl, "file %p %p %llx.%llx already lazy\n", file, inode,
+ ceph_vinop(inode));
+ } else {
doutc(cl, "file %p %p %llx.%llx marked lazy\n", file, inode,
ceph_vinop(inode));
ceph_check_caps(ci, 0);
- } else {
- doutc(cl, "file %p %p %llx.%llx already lazy\n", file, inode,
- ceph_vinop(inode));
}
+
return 0;
}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ebf4ac0055dd..dd764f9c64b9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -221,7 +221,10 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
if (err && err != -ERESTARTSYS)
return err;
- wait_for_completion_killable(&req->r_safe_completion);
+ err = wait_for_completion_killable(&req->r_safe_completion);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 73da2648fa0f..1740047aef0f 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -979,14 +979,15 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
if (mds >= mdsc->max_sessions) {
int newmax = 1 << get_count_order(mds + 1);
struct ceph_mds_session **sa;
+ size_t ptr_size = sizeof(struct ceph_mds_session *);
doutc(cl, "realloc to %d\n", newmax);
- sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
+ sa = kcalloc(newmax, ptr_size, GFP_NOFS);
if (!sa)
goto fail_realloc;
if (mdsc->sessions) {
memcpy(sa, mdsc->sessions,
- mdsc->max_sessions * sizeof(void *));
+ mdsc->max_sessions * ptr_size);
kfree(mdsc->sessions);
}
mdsc->sessions = sa;
@@ -2532,6 +2533,7 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
size_t size = sizeof(struct ceph_mds_reply_dir_entry);
unsigned int num_entries;
+ u64 bytes_count;
int order;
spin_lock(&ci->i_ceph_lock);
@@ -2540,7 +2542,11 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
num_entries = max(num_entries, 1U);
num_entries = min(num_entries, opt->max_readdir);
- order = get_order(size * num_entries);
+ bytes_count = (u64)size * num_entries;
+ if (unlikely(bytes_count > ULONG_MAX))
+ bytes_count = ULONG_MAX;
+
+ order = get_order((unsigned long)bytes_count);
while (order >= 0) {
rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
__GFP_NOWARN |
@@ -2550,7 +2556,7 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
break;
order--;
}
- if (!rinfo->dir_entries)
+ if (!rinfo->dir_entries || unlikely(order < 0))
return -ENOMEM;
num_entries = (PAGE_SIZE << order) / size;
@@ -5649,11 +5655,19 @@ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
struct ceph_client *cl = mdsc->fsc->client;
+ const char *fs_name = mdsc->fsc->mount_options->mds_namespace;
const char *spath = mdsc->fsc->mount_options->server_path;
bool gid_matched = false;
u32 gid, tlen, len;
int i, j;
+ doutc(cl, "fsname check fs_name=%s match.fs_name=%s\n",
+ fs_name, auth->match.fs_name ? auth->match.fs_name : "");
+ if (auth->match.fs_name && strcmp(auth->match.fs_name, fs_name)) {
+ /* fsname mismatch, try next one */
+ return 0;
+ }
+
doutc(cl, "match.uid %lld\n", auth->match.uid);
if (auth->match.uid != MDS_AUTH_UID_ANY) {
if (auth->match.uid != caller_uid)
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 8109aba66e02..2c7b151a7c95 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -353,10 +353,22 @@ struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
__decode_and_drop_type(p, end, u8, bad_ext);
}
if (mdsmap_ev >= 8) {
+ u32 fsname_len;
/* enabled */
ceph_decode_8_safe(p, end, m->m_enabled, bad_ext);
/* fs_name */
- ceph_decode_skip_string(p, end, bad_ext);
+ ceph_decode_32_safe(p, end, fsname_len, bad_ext);
+
+ /* validate fsname against mds_namespace */
+ if (!namespace_equals(mdsc->fsc->mount_options, *p,
+ fsname_len)) {
+ pr_warn_client(cl, "fsname %*pE doesn't match mds_namespace %s\n",
+ (int)fsname_len, (char *)*p,
+ mdsc->fsc->mount_options->mds_namespace);
+ goto bad;
+ }
+ /* skip fsname after validation */
+ ceph_decode_skip_n(p, end, fsname_len, bad);
}
/* damaged */
if (mdsmap_ev >= 9) {
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index db6c2db68f96..ad0cf177e75a 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -246,20 +246,6 @@ static void canonicalize_path(char *path)
path[j] = '\0';
}
-/*
- * Check if the mds namespace in ceph_mount_options matches
- * the passed in namespace string. First time match (when
- * ->mds_namespace is NULL) is treated specially, since
- * ->mds_namespace needs to be initialized by the caller.
- */
-static int namespace_equals(struct ceph_mount_options *fsopt,
- const char *namespace, size_t len)
-{
- return !(fsopt->mds_namespace &&
- (strlen(fsopt->mds_namespace) != len ||
- strncmp(fsopt->mds_namespace, namespace, len)));
-}
-
static int ceph_parse_old_source(const char *dev_name, const char *dev_name_end,
struct fs_context *fc)
{
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 25d8bacbcf44..a1f781c46b41 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -104,6 +104,20 @@ struct ceph_mount_options {
struct fscrypt_dummy_policy dummy_enc_policy;
};
+/*
+ * Check if the mds namespace in ceph_mount_options matches
+ * the passed in namespace string. First time match (when
+ * ->mds_namespace is NULL) is treated specially, since
+ * ->mds_namespace needs to be initialized by the caller.
+ */
+static inline int namespace_equals(struct ceph_mount_options *fsopt,
+ const char *namespace, size_t len)
+{
+ return !(fsopt->mds_namespace &&
+ (strlen(fsopt->mds_namespace) != len ||
+ strncmp(fsopt->mds_namespace, namespace, len)));
+}
+
/* mount state */
enum {
CEPH_MOUNT_MOUNTING,
@@ -639,7 +653,8 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_FLUSH_SNAPS (1 << 8) /* need flush snapss */
#define CEPH_I_ERROR_WRITE (1 << 9) /* have seen write errors */
#define CEPH_I_ERROR_FILELOCK (1 << 10) /* have seen file lock errors */
-#define CEPH_I_ODIRECT (1 << 11) /* inode in direct I/O mode */
+#define CEPH_I_ODIRECT_BIT (11) /* inode in direct I/O mode */
+#define CEPH_I_ODIRECT (1 << CEPH_I_ODIRECT_BIT)
#define CEPH_ASYNC_CREATE_BIT (12) /* async create in flight for this */
#define CEPH_I_ASYNC_CREATE (1 << CEPH_ASYNC_CREATE_BIT)
#define CEPH_I_SHUTDOWN (1 << 13) /* inode is no longer usable */
diff --git a/fs/coredump.c b/fs/coredump.c
index b5fc06a092a4..5c1c381ee380 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -1468,7 +1468,7 @@ static int proc_dostring_coredump(const struct ctl_table *table, int write,
ssize_t retval;
char old_core_pattern[CORENAME_MAX_SIZE];
- if (write)
+ if (!write)
return proc_dostring(table, write, buffer, lenp, ppos);
retval = strscpy(old_core_pattern, core_pattern, CORENAME_MAX_SIZE);
diff --git a/fs/dax.c b/fs/dax.c
index 89f071ba7b10..516f995a988c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1725,7 +1725,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
if (iov_iter_rw(iter) == WRITE) {
lockdep_assert_held_write(&iomi.inode->i_rwsem);
iomi.flags |= IOMAP_WRITE;
- } else {
+ } else if (!sb_rdonly(iomi.inode->i_sb)) {
lockdep_assert_held(&iomi.inode->i_rwsem);
}
diff --git a/fs/dcache.c b/fs/dcache.c
index a067fa0a965a..035cccbc9276 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2557,6 +2557,8 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
spin_lock(&parent->d_lock);
new->d_parent = dget_dlock(parent);
hlist_add_head(&new->d_sib, &parent->d_children);
+ if (parent->d_flags & DCACHE_DISCONNECTED)
+ new->d_flags |= DCACHE_DISCONNECTED;
spin_unlock(&parent->d_lock);
retry:
diff --git a/fs/exec.c b/fs/exec.c
index 6b70c6726d31..4298e7e08d5d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -2048,7 +2048,7 @@ static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int writ
{
int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (!error && !write)
+ if (!error && write)
validate_coredump_safety();
return error;
}
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index 329697c89d09..38210fb6901c 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -29,7 +29,6 @@ enum exfat_error_mode {
enum {
NLS_NAME_NO_LOSSY = 0, /* no lossy */
NLS_NAME_LOSSY = 1 << 0, /* just detected incorrect filename(s) */
- NLS_NAME_OVERLEN = 1 << 1, /* the length is over than its limit */
};
#define EXFAT_HASH_BITS 8
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index f246cf439588..adc37b4d7fc2 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -509,8 +509,8 @@ static int exfat_ioctl_get_volume_label(struct super_block *sb, unsigned long ar
static int exfat_ioctl_set_volume_label(struct super_block *sb,
unsigned long arg)
{
- int ret = 0, lossy;
- char label[FSLABEL_MAX];
+ int ret = 0, lossy, label_len;
+ char label[FSLABEL_MAX] = {0};
struct exfat_uni_name uniname;
if (!capable(CAP_SYS_ADMIN))
@@ -520,8 +520,9 @@ static int exfat_ioctl_set_volume_label(struct super_block *sb,
return -EFAULT;
memset(&uniname, 0, sizeof(uniname));
+ label_len = strnlen(label, FSLABEL_MAX - 1);
if (label[0]) {
- ret = exfat_nls_to_utf16(sb, label, FSLABEL_MAX,
+ ret = exfat_nls_to_utf16(sb, label, label_len,
&uniname, &lossy);
if (ret < 0)
return ret;
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 7eb9c67fd35f..745dce29ddb5 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -442,7 +442,7 @@ static int __exfat_resolve_path(struct inode *inode, const unsigned char *path,
return namelen; /* return error value */
if ((lossy && !lookup) || !namelen)
- return (lossy & NLS_NAME_OVERLEN) ? -ENAMETOOLONG : -EINVAL;
+ return -EINVAL;
return 0;
}
@@ -642,10 +642,14 @@ static int exfat_find(struct inode *dir, const struct qstr *qname,
info->type = exfat_get_entry_type(ep);
info->attr = le16_to_cpu(ep->dentry.file.attr);
- info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size);
info->size = le64_to_cpu(ep2->dentry.stream.size);
+ if (info->valid_size < 0) {
+ exfat_fs_error(sb, "data valid size is invalid(%lld)", info->valid_size);
+ return -EIO;
+ }
+
if (unlikely(EXFAT_B_TO_CLU_ROUND_UP(info->size, sbi) > sbi->used_clusters)) {
exfat_fs_error(sb, "data size is invalid(%lld)", info->size);
return -EIO;
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index 8243d94ceaf4..57db08a5271c 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -616,9 +616,6 @@ static int exfat_nls_to_ucs2(struct super_block *sb,
unilen++;
}
- if (p_cstring[i] != '\0')
- lossy |= NLS_NAME_OVERLEN;
-
*uniname = '\0';
p_uniname->name_len = unilen;
p_uniname->name_hash = exfat_calc_chksum16(upname, unilen << 1, 0,
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index b3e9b7bd7978..a0e66bc10093 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -280,9 +280,16 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
bh, is_metadata, inode->i_mode,
test_opt(inode->i_sb, DATA_FLAGS));
- /* In the no journal case, we can just do a bforget and return */
+ /*
+ * In the no journal case, we should wait for the ongoing buffer
+ * to complete and do a forget.
+ */
if (!ext4_handle_valid(handle)) {
- bforget(bh);
+ if (bh) {
+ clear_buffer_dirty(bh);
+ wait_on_buffer(bh);
+ __bforget(bh);
+ }
return 0;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index f9e4ac87211e..e99306a8f47c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5319,6 +5319,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
}
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
ext4_set_inode_flags(inode, true);
+ /* Detect invalid flag combination - can't have both inline data and extents */
+ if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
+ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ ext4_error_inode(inode, function, line, 0,
+ "inode has both inline data and extents flags");
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
if (ext4_has_feature_64bit(sb))
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
index 33c3a89396b1..82d5e7501455 100644
--- a/fs/ext4/orphan.c
+++ b/fs/ext4/orphan.c
@@ -513,7 +513,7 @@ void ext4_release_orphan_info(struct super_block *sb)
return;
for (i = 0; i < oi->of_blocks; i++)
brelse(oi->of_binfo[i].ob_bh);
- kfree(oi->of_binfo);
+ kvfree(oi->of_binfo);
}
static struct ext4_orphan_block_tail *ext4_orphan_block_tail(
@@ -637,7 +637,7 @@ int ext4_init_orphan_info(struct super_block *sb)
out_free:
for (i--; i >= 0; i--)
brelse(oi->of_binfo[i].ob_bh);
- kfree(oi->of_binfo);
+ kvfree(oi->of_binfo);
out_put:
iput(inode);
return ret;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index ef38e62cda8f..775aa4f63aa3 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1497,8 +1497,8 @@ static bool f2fs_map_blocks_cached(struct inode *inode,
struct f2fs_dev_info *dev = &sbi->devs[bidx];
map->m_bdev = dev->bdev;
- map->m_pblk -= dev->start_blk;
map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
+ map->m_pblk -= dev->start_blk;
} else {
map->m_bdev = inode->i_sb->s_bdev;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index fd8e7b0b2166..db7afb806411 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1820,7 +1820,7 @@ static int f2fs_drop_inode(struct inode *inode)
sb_end_intwrite(inode->i_sb);
spin_lock(&inode->i_lock);
- iput(inode);
+ atomic_dec(&inode->i_count);
}
trace_f2fs_drop_inode(inode, 0);
return 0;
diff --git a/fs/file_attr.c b/fs/file_attr.c
index 12424d4945d0..1dcec88c0680 100644
--- a/fs/file_attr.c
+++ b/fs/file_attr.c
@@ -84,7 +84,7 @@ int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
int error;
if (!inode->i_op->fileattr_get)
- return -EOPNOTSUPP;
+ return -ENOIOCTLCMD;
error = security_inode_file_getattr(dentry, fa);
if (error)
@@ -270,7 +270,7 @@ int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
int err;
if (!inode->i_op->fileattr_set)
- return -EOPNOTSUPP;
+ return -ENOIOCTLCMD;
if (!inode_owner_or_capable(idmap, inode))
return -EPERM;
@@ -312,8 +312,6 @@ int ioctl_getflags(struct file *file, unsigned int __user *argp)
int err;
err = vfs_fileattr_get(file->f_path.dentry, &fa);
- if (err == -EOPNOTSUPP)
- err = -ENOIOCTLCMD;
if (!err)
err = put_user(fa.flags, argp);
return err;
@@ -335,8 +333,6 @@ int ioctl_setflags(struct file *file, unsigned int __user *argp)
fileattr_fill_flags(&fa, flags);
err = vfs_fileattr_set(idmap, dentry, &fa);
mnt_drop_write_file(file);
- if (err == -EOPNOTSUPP)
- err = -ENOIOCTLCMD;
}
}
return err;
@@ -349,8 +345,6 @@ int ioctl_fsgetxattr(struct file *file, void __user *argp)
int err;
err = vfs_fileattr_get(file->f_path.dentry, &fa);
- if (err == -EOPNOTSUPP)
- err = -ENOIOCTLCMD;
if (!err)
err = copy_fsxattr_to_user(&fa, argp);
@@ -371,8 +365,6 @@ int ioctl_fssetxattr(struct file *file, void __user *argp)
if (!err) {
err = vfs_fileattr_set(idmap, dentry, &fa);
mnt_drop_write_file(file);
- if (err == -EOPNOTSUPP)
- err = -ENOIOCTLCMD;
}
}
return err;
@@ -424,6 +416,8 @@ SYSCALL_DEFINE5(file_getattr, int, dfd, const char __user *, filename,
}
error = vfs_fileattr_get(filepath.dentry, &fa);
+ if (error == -ENOIOCTLCMD || error == -ENOTTY)
+ error = -EOPNOTSUPP;
if (error)
return error;
@@ -491,6 +485,8 @@ SYSCALL_DEFINE5(file_setattr, int, dfd, const char __user *, filename,
if (!error) {
error = vfs_fileattr_set(mnt_idmap(filepath.mnt),
filepath.dentry, &fa);
+ if (error == -ENOIOCTLCMD || error == -ENOTTY)
+ error = -EOPNOTSUPP;
mnt_drop_write(filepath.mnt);
}
diff --git a/fs/file_table.c b/fs/file_table.c
index b223d873e48b..cd4a3db4659a 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -192,7 +192,7 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
f->f_sb_err = 0;
/*
- * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While
+ * We're SLAB_TYPESAFE_BY_RCU so initialize f_ref last. While
* fget-rcu pattern users need to be able to handle spurious
* refcount bumps we should reinitialize the reused file first.
*/
diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c
index 57032eadca6c..fdc175e93f74 100644
--- a/fs/fuse/ioctl.c
+++ b/fs/fuse/ioctl.c
@@ -536,8 +536,6 @@ int fuse_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
cleanup:
fuse_priv_ioctl_cleanup(inode, ff);
- if (err == -ENOTTY)
- err = -EOPNOTSUPP;
return err;
}
@@ -574,7 +572,5 @@ int fuse_fileattr_set(struct mnt_idmap *idmap,
cleanup:
fuse_priv_ioctl_cleanup(inode, ff);
- if (err == -ENOTTY)
- err = -EOPNOTSUPP;
return err;
}
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index c14c9a035ee0..a4f5321eafae 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -27,7 +27,7 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
a = le32_to_cpu(btree->u.internal[i].down);
brelse(bh);
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
goto go_down;
}
hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
@@ -69,12 +69,13 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
int n;
unsigned fs;
int c1, c2 = 0;
+
if (fnod) {
if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
} else {
if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
a = node;
go_down:
@@ -91,7 +92,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
goto go_down;
}
if (n >= 0) {
@@ -151,7 +152,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
}
brelse(bh);
bh = bh1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
btree->n_free_nodes--; n = btree->n_used_nodes++;
le16_add_cpu(&btree->first_free, 12);
@@ -168,10 +169,10 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
if (up != node || !fnod) {
if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
} else {
if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
}
if (btree->n_free_nodes) {
btree->n_free_nodes--; n = btree->n_used_nodes++;
@@ -206,8 +207,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
anode->btree.n_used_nodes = 1;
anode->btree.n_free_nodes = 59;
anode->btree.first_free = cpu_to_le16(16);
- anode->btree.u.internal[0].down = cpu_to_le32(a);
- anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
+ GET_BTREE_PTR(&anode->btree)->u.internal[0].down = cpu_to_le32(a);
+ GET_BTREE_PTR(&anode->btree)->u.internal[0].file_secno = cpu_to_le32(-1);
mark_buffer_dirty(bh);
brelse(bh);
if ((anode = hpfs_map_anode(s, a, &bh))) {
@@ -229,20 +230,20 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
brelse(bh2);
return -1;
}
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
} else {
if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
brelse(bh2);
return -1;
}
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
}
ranode->up = cpu_to_le32(node);
memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
if (fnod)
ranode->btree.flags |= BP_fnode_parent;
- ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
- if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
+ GET_BTREE_PTR(&ranode->btree)->n_free_nodes = (bp_internal(GET_BTREE_PTR(&ranode->btree)) ? 60 : 40) - GET_BTREE_PTR(&ranode->btree)->n_used_nodes;
+ if (bp_internal(GET_BTREE_PTR(&ranode->btree))) for (n = 0; n < GET_BTREE_PTR(&ranode->btree)->n_used_nodes; n++) {
struct anode *unode;
if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
unode->up = cpu_to_le32(ra);
@@ -291,7 +292,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
return;
if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
- btree1 = &anode->btree;
+ btree1 = GET_BTREE_PTR(&anode->btree);
level++;
pos = 0;
}
@@ -307,7 +308,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
ano = le32_to_cpu(anode->up);
if (--level) {
if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
- btree1 = &anode->btree;
+ btree1 = GET_BTREE_PTR(&anode->btree);
} else btree1 = btree;
for (i = 0; i < btree1->n_used_nodes; i++) {
if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
@@ -332,7 +333,7 @@ static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
struct anode *anode;
struct buffer_head *bh;
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
- return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
+ return hpfs_bplus_lookup(s, NULL, GET_BTREE_PTR(&anode->btree), sec, bh);
}
int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
@@ -388,7 +389,7 @@ void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
struct buffer_head *bh;
if (ano) {
if (!(anode = hpfs_map_anode(s, a, &bh))) return;
- hpfs_remove_btree(s, &anode->btree);
+ hpfs_remove_btree(s, GET_BTREE_PTR(&anode->btree));
brelse(bh);
hpfs_free_sectors(s, a, 1);
} else hpfs_free_sectors(s, a, (len + 511) >> 9);
@@ -407,10 +408,10 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
int c1, c2 = 0;
if (fno) {
if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
- btree = &fnode->btree;
+ btree = GET_BTREE_PTR(&fnode->btree);
} else {
if (!(anode = hpfs_map_anode(s, f, &bh))) return;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
if (!secs) {
hpfs_remove_btree(s, btree);
@@ -448,7 +449,7 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
return;
if (!(anode = hpfs_map_anode(s, node, &bh))) return;
- btree = &anode->btree;
+ btree = GET_BTREE_PTR(&anode->btree);
}
nodes = btree->n_used_nodes + btree->n_free_nodes;
for (i = 0; i < btree->n_used_nodes; i++)
@@ -485,7 +486,7 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
struct extended_attribute *ea;
struct extended_attribute *ea_end;
if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
- if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
+ if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, GET_BTREE_PTR(&fnode->btree));
else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
ea_end = fnode_end_ea(fnode);
for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 102ba18e561f..2149d3ca530b 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -41,7 +41,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
struct buffer_head *bh;
struct anode *anode;
if ((anode = hpfs_map_anode(s, a, &bh))) {
- hpfs_remove_btree(s, &anode->btree);
+ hpfs_remove_btree(s, GET_BTREE_PTR(&anode->btree));
brelse(bh);
hpfs_free_sectors(s, a, 1);
}
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 263b5bbe1849..29e876705369 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -51,7 +51,9 @@ static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_sec
return hpfs_inode->i_disk_sec + n;
}
if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
- disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh);
+ disk_secno = hpfs_bplus_lookup(inode->i_sb, inode,
+ GET_BTREE_PTR(&fnode->btree),
+ file_secno, bh);
if (disk_secno == -1) return 0;
if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
n = file_secno - hpfs_inode->i_file_sec;
diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h
index 281dec8f636b..353f73c914d9 100644
--- a/fs/hpfs/hpfs.h
+++ b/fs/hpfs/hpfs.h
@@ -394,27 +394,45 @@ enum {
BP_binary_search = 0x40,
BP_internal = 0x80
};
+
+/**
+ * GET_BTREE_PTR() - Get a pointer to struct bplus_header
+ *
+ * Wrapper around container_of() to retrieve a pointer to struct
+ * bplus_header from a pointer to struct bplus_header_fixed.
+ *
+ * @ptr: Pointer to struct bplus_header_fixed.
+ *
+ */
+#define GET_BTREE_PTR(ptr) \
+ container_of(ptr, struct bplus_header, __hdr)
+
struct bplus_header
{
- u8 flags; /* bit 0 - high bit of first free entry offset
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(bplus_header_fixed, __hdr,
+ u8 flags; /* bit 0 - high bit of first free entry offset
bit 5 - we're pointed to by an fnode,
the data btree or some ea or the
main ea bootage pointer ea_secno
bit 6 - suggest binary search (unused)
bit 7 - 1 -> (internal) tree of anodes
0 -> (leaf) list of extents */
- u8 fill[3];
- u8 n_free_nodes; /* free nodes in following array */
- u8 n_used_nodes; /* used nodes in following array */
- __le16 first_free; /* offset from start of header to
+ u8 fill[3];
+ u8 n_free_nodes; /* free nodes in following array */
+ u8 n_used_nodes; /* used nodes in following array */
+ __le16 first_free; /* offset from start of header to
first free node in array */
- union {
- /* (internal) 2-word entries giving subtree pointers */
- DECLARE_FLEX_ARRAY(struct bplus_internal_node, internal);
- /* (external) 3-word entries giving sector runs */
- DECLARE_FLEX_ARRAY(struct bplus_leaf_node, external);
- } u;
+ );
+ union {
+ /* (internal) 2-word entries giving subtree pointers */
+ DECLARE_FLEX_ARRAY(struct bplus_internal_node, internal);
+ /* (external) 3-word entries giving sector runs */
+ DECLARE_FLEX_ARRAY(struct bplus_leaf_node, external);
+ } u;
};
+static_assert(offsetof(struct bplus_header, u.internal) == sizeof(struct bplus_header_fixed),
+ "struct member likely outside of struct_group_tagged()");
static inline bool bp_internal(struct bplus_header *bp)
{
@@ -453,7 +471,7 @@ struct fnode
__le16 flags; /* bit 1 set -> ea_secno is an anode */
/* bit 8 set -> directory. first & only extent
points to dnode. */
- struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */
+ struct bplus_header_fixed btree; /* b+ tree, 8 extents or 12 subtrees */
union {
struct bplus_leaf_node external[8];
struct bplus_internal_node internal[12];
@@ -495,7 +513,7 @@ struct anode
__le32 self; /* pointer to this anode */
__le32 up; /* parent anode or fnode */
- struct bplus_header btree; /* b+tree, 40 extents or 60 subtrees */
+ struct bplus_header_fixed btree; /* b+tree, 40 extents or 60 subtrees */
union {
struct bplus_leaf_node external[40];
struct bplus_internal_node internal[60];
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index ecd9fccd1663..be73233502f8 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -178,14 +178,14 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
}
if (!fnode_is_dir(fnode)) {
if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
- (bp_internal(&fnode->btree) ? 12 : 8)) {
+ (bp_internal(GET_BTREE_PTR(&fnode->btree)) ? 12 : 8)) {
hpfs_error(s,
"bad number of nodes in fnode %08lx",
(unsigned long)ino);
goto bail;
}
if (le16_to_cpu(fnode->btree.first_free) !=
- 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
+ 8 + fnode->btree.n_used_nodes * (bp_internal(GET_BTREE_PTR(&fnode->btree)) ? 8 : 12)) {
hpfs_error(s,
"bad first_free pointer in fnode %08lx",
(unsigned long)ino);
@@ -233,12 +233,12 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff
goto bail;
}
if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
- (bp_internal(&anode->btree) ? 60 : 40)) {
+ (bp_internal(GET_BTREE_PTR(&anode->btree)) ? 60 : 40)) {
hpfs_error(s, "bad number of nodes in anode %08x", ano);
goto bail;
}
if (le16_to_cpu(anode->btree.first_free) !=
- 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
+ 8 + anode->btree.n_used_nodes * (bp_internal(GET_BTREE_PTR(&anode->btree)) ? 8 : 12)) {
hpfs_error(s, "bad first_free pointer in anode %08x", ano);
goto bail;
}
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index e3cdc421dfba..353e13a615f5 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -52,8 +52,10 @@ static struct dentry *hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
dee.fnode = cpu_to_le32(fno);
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail2;
+ }
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
@@ -153,9 +155,10 @@ static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir,
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail1;
-
+ }
hpfs_init_inode(result);
result->i_ino = fno;
result->i_mode |= S_IFREG;
@@ -239,9 +242,10 @@ static int hpfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail1;
-
+ }
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
@@ -314,8 +318,10 @@ static int hpfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
- if (!result)
+ if (!result) {
+ err = -ENOMEM;
goto bail1;
+ }
result->i_ino = fno;
hpfs_init_inode(result);
hpfs_i(result)->i_parent_dir = dir->i_ino;
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 42b779b4d87f..8ab85e7ac91e 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -404,15 +404,11 @@ static int hpfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_timeshift:
{
- int m = 1;
char *rhs = param->string;
int timeshift;
- if (*rhs == '-') m = -1;
- if (*rhs == '+' || *rhs == '-') rhs++;
- timeshift = simple_strtoul(rhs, &rhs, 0) * m;
- if (*rhs)
- return -EINVAL;
+ if (kstrtoint(rhs, 0, &timeshift))
+ return -EINVAL;
ctx->timeshift = timeshift;
break;
}
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 9802b2cc29bb..5d5d63efbd57 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -433,7 +433,8 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- ret = bio_iov_iter_get_bdev_pages(bio, dio->submit.iter, iomap->bdev);
+ ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
+ bdev_logical_block_size(iomap->bdev) - 1);
if (unlikely(ret)) {
/*
* We have to stop part way through an IO. We must fall
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index c7867139af69..3e510564de6e 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1659,6 +1659,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
int drop_reserve = 0;
int err = 0;
int was_modified = 0;
+ int wait_for_writeback = 0;
if (is_handle_aborted(handle))
return -EROFS;
@@ -1782,18 +1783,22 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
}
/*
- * The buffer is still not written to disk, we should
- * attach this buffer to current transaction so that the
- * buffer can be checkpointed only after the current
- * transaction commits.
+ * The buffer has not yet been written to disk. We should
+ * either clear the buffer or ensure that the ongoing I/O
+ * is completed, and attach this buffer to current
+ * transaction so that the buffer can be checkpointed only
+ * after the current transaction commits.
*/
clear_buffer_dirty(bh);
+ wait_for_writeback = 1;
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
spin_unlock(&journal->j_list_lock);
}
drop:
__brelse(bh);
spin_unlock(&jh->b_state_lock);
+ if (wait_for_writeback)
+ wait_on_buffer(bh);
jbd2_journal_put_journal_head(jh);
if (drop_reserve) {
/* no need to reserve log space for this block -bzzz */
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index df01d2876b68..9056f05a67dc 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -270,19 +270,31 @@ ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
mirror->layout = NULL;
}
-static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
+static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(u32 dss_count,
+ gfp_t gfp_flags)
{
struct nfs4_ff_layout_mirror *mirror;
- u32 dss_id;
mirror = kzalloc(sizeof(*mirror), gfp_flags);
- if (mirror != NULL) {
- spin_lock_init(&mirror->lock);
- refcount_set(&mirror->ref, 1);
- INIT_LIST_HEAD(&mirror->mirrors);
- for (dss_id = 0; dss_id < mirror->dss_count; dss_id++)
- nfs_localio_file_init(&mirror->dss[dss_id].nfl);
+ if (mirror == NULL)
+ return NULL;
+
+ spin_lock_init(&mirror->lock);
+ refcount_set(&mirror->ref, 1);
+ INIT_LIST_HEAD(&mirror->mirrors);
+
+ mirror->dss_count = dss_count;
+ mirror->dss =
+ kcalloc(dss_count, sizeof(struct nfs4_ff_layout_ds_stripe),
+ gfp_flags);
+ if (mirror->dss == NULL) {
+ kfree(mirror);
+ return NULL;
}
+
+ for (u32 dss_id = 0; dss_id < mirror->dss_count; dss_id++)
+ nfs_localio_file_init(&mirror->dss[dss_id].nfl);
+
return mirror;
}
@@ -507,17 +519,12 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
if (dss_count > 1 && stripe_unit == 0)
goto out_err_free;
- fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
+ fls->mirror_array[i] = ff_layout_alloc_mirror(dss_count, gfp_flags);
if (fls->mirror_array[i] == NULL) {
rc = -ENOMEM;
goto out_err_free;
}
- fls->mirror_array[i]->dss_count = dss_count;
- fls->mirror_array[i]->dss =
- kcalloc(dss_count, sizeof(struct nfs4_ff_layout_ds_stripe),
- gfp_flags);
-
for (dss_id = 0; dss_id < dss_count; dss_id++) {
dss_info = &fls->mirror_array[i]->dss[dss_id];
dss_info->mirror = fls->mirror_array[i];
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 6fddf43d729c..5998d6bd8a4f 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -222,6 +222,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
clp->cl_mig_gen = 1;
+ clp->cl_last_renewal = jiffies;
#if IS_ENABLED(CONFIG_NFS_V4_1)
init_waitqueue_head(&clp->cl_lock_waitq);
#endif
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f58098417142..411776718494 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3636,6 +3636,7 @@ struct nfs4_closedata {
} lr;
struct nfs_fattr fattr;
unsigned long timestamp;
+ unsigned short retrans;
};
static void nfs4_free_closedata(void *data)
@@ -3664,6 +3665,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
.state = state,
.inode = calldata->inode,
.stateid = &calldata->arg.stateid,
+ .retrans = calldata->retrans,
};
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
@@ -3711,6 +3713,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
default:
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
+ calldata->retrans = exception.retrans;
if (exception.retry)
goto out_restart;
}
@@ -5593,9 +5596,11 @@ static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
.inode = hdr->inode,
.state = hdr->args.context->state,
.stateid = &hdr->args.stateid,
+ .retrans = hdr->retrans,
};
task->tk_status = nfs4_async_handle_exception(task,
server, task->tk_status, &exception);
+ hdr->retrans = exception.retrans;
if (exception.retry) {
rpc_restart_call_prepare(task);
return -EAGAIN;
@@ -5709,10 +5714,12 @@ static int nfs4_write_done_cb(struct rpc_task *task,
.inode = hdr->inode,
.state = hdr->args.context->state,
.stateid = &hdr->args.stateid,
+ .retrans = hdr->retrans,
};
task->tk_status = nfs4_async_handle_exception(task,
NFS_SERVER(inode), task->tk_status,
&exception);
+ hdr->retrans = exception.retrans;
if (exception.retry) {
rpc_restart_call_prepare(task);
return -EAGAIN;
@@ -6726,6 +6733,7 @@ struct nfs4_delegreturndata {
struct nfs_fh fh;
nfs4_stateid stateid;
unsigned long timestamp;
+ unsigned short retrans;
struct {
struct nfs4_layoutreturn_args arg;
struct nfs4_layoutreturn_res res;
@@ -6746,6 +6754,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
.inode = data->inode,
.stateid = &data->stateid,
.task_is_privileged = data->args.seq_args.sa_privileged,
+ .retrans = data->retrans,
};
if (!nfs4_sequence_done(task, &data->res.seq_res))
@@ -6817,6 +6826,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
task->tk_status = nfs4_async_handle_exception(task,
data->res.server, task->tk_status,
&exception);
+ data->retrans = exception.retrans;
if (exception.retry)
goto out_restart;
}
@@ -7093,6 +7103,7 @@ struct nfs4_unlockdata {
struct file_lock fl;
struct nfs_server *server;
unsigned long timestamp;
+ unsigned short retrans;
};
static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
@@ -7147,6 +7158,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
struct nfs4_exception exception = {
.inode = calldata->lsp->ls_state->inode,
.stateid = &calldata->arg.stateid,
+ .retrans = calldata->retrans,
};
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
@@ -7180,6 +7192,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
task->tk_status = nfs4_async_handle_exception(task,
calldata->server, task->tk_status,
&exception);
+ calldata->retrans = exception.retrans;
if (exception.retry)
rpc_restart_call_prepare(task);
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 0fb6905736d5..336c510f3750 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1535,7 +1535,8 @@ static int nfs_writeback_done(struct rpc_task *task,
/* Deal with the suid/sgid bit corner case */
if (nfs_should_remove_suid(inode)) {
spin_lock(&inode->i_lock);
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE
+ | NFS_INO_REVAL_FORCED);
spin_unlock(&inode->i_lock);
}
return 0;
diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c
index c318cf74e388..0f1a35400cd5 100644
--- a/fs/nfsd/flexfilelayout.c
+++ b/fs/nfsd/flexfilelayout.c
@@ -125,6 +125,13 @@ nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp,
return 0;
}
+static __be32
+nfsd4_ff_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
+ struct nfsd4_layoutcommit *lcp)
+{
+ return nfs_ok;
+}
+
const struct nfsd4_layout_ops ff_layout_ops = {
.notify_types =
NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
@@ -133,4 +140,5 @@ const struct nfsd4_layout_ops ff_layout_ops = {
.encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
.proc_layoutget = nfsd4_ff_proc_layoutget,
.encode_layoutget = nfsd4_ff_encode_layoutget,
+ .proc_layoutcommit = nfsd4_ff_proc_layoutcommit,
};
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 648dc59bef7f..79b026a36fb6 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -490,7 +490,9 @@ static struct dentry *nsfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
VFS_WARN_ON_ONCE(ns->ns_id != fid->ns_id);
VFS_WARN_ON_ONCE(ns->ns_type != fid->ns_type);
- VFS_WARN_ON_ONCE(ns->inum != fid->ns_inum);
+
+ if (ns->inum != fid->ns_inum)
+ return NULL;
if (!__ns_ref_get(ns))
return NULL;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index aac7e34f56c1..604a82acd164 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -178,7 +178,7 @@ static int ovl_copy_fileattr(struct inode *inode, const struct path *old,
err = ovl_real_fileattr_get(old, &oldfa);
if (err) {
/* Ntfs-3g returns -EINVAL for "no fileattr support" */
- if (err == -EOPNOTSUPP || err == -EINVAL)
+ if (err == -ENOTTY || err == -EINVAL)
return 0;
pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n",
old->dentry, err);
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index fc52c796061d..7ab2c9daffd0 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -369,11 +369,6 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!ovl_should_sync(OVL_FS(inode->i_sb)))
ifl &= ~(IOCB_DSYNC | IOCB_SYNC);
- /*
- * Overlayfs doesn't support deferred completions, don't copy
- * this property in case it is set by the issuer.
- */
- ifl &= ~IOCB_DIO_CALLER_COMP;
ret = backing_file_write_iter(realfile, iter, iocb, ifl, &ctx);
out_unlock:
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index aaa4cf579561..e11f310ce092 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -720,7 +720,10 @@ int ovl_real_fileattr_get(const struct path *realpath, struct file_kattr *fa)
if (err)
return err;
- return vfs_fileattr_get(realpath->dentry, fa);
+ err = vfs_fileattr_get(realpath->dentry, fa);
+ if (err == -ENOIOCTLCMD)
+ err = -ENOTTY;
+ return err;
}
int ovl_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig
index a4c02199fef4..17bd368574e9 100644
--- a/fs/smb/client/Kconfig
+++ b/fs/smb/client/Kconfig
@@ -5,17 +5,16 @@ config CIFS
select NLS
select NLS_UCS2_UTILS
select CRYPTO
- select CRYPTO_MD5
- select CRYPTO_SHA256
- select CRYPTO_SHA512
select CRYPTO_CMAC
- select CRYPTO_HMAC
select CRYPTO_AEAD2
select CRYPTO_CCM
select CRYPTO_GCM
select CRYPTO_ECB
select CRYPTO_AES
select CRYPTO_LIB_ARC4
+ select CRYPTO_LIB_MD5
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select KEYS
select DNS_RESOLVER
select ASN1
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index b36f9f9340f0..b8ac7b7faf61 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -562,8 +562,8 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
/*
* Mark all the cfids as closed, and move them to the cfids->dying list.
- * They'll be cleaned up later by cfids_invalidation_worker. Take
- * a reference to each cfid during this process.
+ * They'll be cleaned up by laundromat. Take a reference to each cfid
+ * during this process.
*/
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
@@ -580,12 +580,11 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
} else
kref_get(&cfid->refcount);
}
- /*
- * Queue dropping of the dentries once locks have been dropped
- */
- if (!list_empty(&cfids->dying))
- queue_work(cfid_put_wq, &cfids->invalidation_work);
spin_unlock(&cfids->cfid_list_lock);
+
+ /* run laundromat unconditionally now as there might have been previously queued work */
+ mod_delayed_work(cfid_put_wq, &cfids->laundromat_work, 0);
+ flush_delayed_work(&cfids->laundromat_work);
}
static void
@@ -715,25 +714,6 @@ static void free_cached_dir(struct cached_fid *cfid)
kfree(cfid);
}
-static void cfids_invalidation_worker(struct work_struct *work)
-{
- struct cached_fids *cfids = container_of(work, struct cached_fids,
- invalidation_work);
- struct cached_fid *cfid, *q;
- LIST_HEAD(entry);
-
- spin_lock(&cfids->cfid_list_lock);
- /* move cfids->dying to the local list */
- list_cut_before(&entry, &cfids->dying, &cfids->dying);
- spin_unlock(&cfids->cfid_list_lock);
-
- list_for_each_entry_safe(cfid, q, &entry, entry) {
- list_del(&cfid->entry);
- /* Drop the ref-count acquired in invalidate_all_cached_dirs */
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- }
-}
-
static void cfids_laundromat_worker(struct work_struct *work)
{
struct cached_fids *cfids;
@@ -743,6 +723,9 @@ static void cfids_laundromat_worker(struct work_struct *work)
cfids = container_of(work, struct cached_fids, laundromat_work.work);
spin_lock(&cfids->cfid_list_lock);
+ /* move cfids->dying to the local list */
+ list_cut_before(&entry, &cfids->dying, &cfids->dying);
+
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
if (cfid->last_access_time &&
time_after(jiffies, cfid->last_access_time + HZ * dir_cache_timeout)) {
@@ -796,7 +779,6 @@ struct cached_fids *init_cached_dirs(void)
INIT_LIST_HEAD(&cfids->entries);
INIT_LIST_HEAD(&cfids->dying);
- INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
dir_cache_timeout * HZ);
@@ -820,7 +802,6 @@ void free_cached_dirs(struct cached_fids *cfids)
return;
cancel_delayed_work_sync(&cfids->laundromat_work);
- cancel_work_sync(&cfids->invalidation_work);
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
index 31339dc32719..1e383db7c337 100644
--- a/fs/smb/client/cached_dir.h
+++ b/fs/smb/client/cached_dir.h
@@ -62,7 +62,6 @@ struct cached_fids {
int num_entries;
struct list_head entries;
struct list_head dying;
- struct work_struct invalidation_work;
struct delayed_work laundromat_work;
/* aggregate accounting for all cached dirents under this tcon */
atomic_long_t total_dirents_entries;
diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c
index 43b86fa4d695..9891f55bac1e 100644
--- a/fs/smb/client/cifs_spnego.c
+++ b/fs/smb/client/cifs_spnego.c
@@ -24,20 +24,14 @@ static const struct cred *spnego_cred;
static int
cifs_spnego_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
- char *payload;
- int ret;
+ char *payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
- ret = -ENOMEM;
- payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
if (!payload)
- goto error;
+ return -ENOMEM;
/* attach the data */
key->payload.data[0] = payload;
- ret = 0;
-
-error:
- return ret;
+ return 0;
}
static void
diff --git a/fs/smb/client/cifs_swn.c b/fs/smb/client/cifs_swn.c
index 7233c6a7e6d7..68a1f87c446d 100644
--- a/fs/smb/client/cifs_swn.c
+++ b/fs/smb/client/cifs_swn.c
@@ -82,10 +82,8 @@ static int cifs_swn_send_register_message(struct cifs_swn_reg *swnreg)
int ret;
skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (skb == NULL) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!skb)
+ return -ENOMEM;
hdr = genlmsg_put(skb, 0, 0, &cifs_genl_family, 0, CIFS_GENL_CMD_SWN_REGISTER);
if (hdr == NULL) {
@@ -172,7 +170,6 @@ static int cifs_swn_send_register_message(struct cifs_swn_reg *swnreg)
nlmsg_fail:
genlmsg_cancel(skb, hdr);
nlmsg_free(skb);
-fail:
return ret;
}
@@ -313,17 +310,15 @@ static struct cifs_swn_reg *cifs_get_swn_reg(struct cifs_tcon *tcon)
reg = cifs_find_swn_reg(tcon);
if (!IS_ERR(reg)) {
kref_get(&reg->ref_count);
- mutex_unlock(&cifs_swnreg_idr_mutex);
- return reg;
+ goto unlock;
} else if (PTR_ERR(reg) != -EEXIST) {
- mutex_unlock(&cifs_swnreg_idr_mutex);
- return reg;
+ goto unlock;
}
reg = kmalloc(sizeof(struct cifs_swn_reg), GFP_ATOMIC);
if (reg == NULL) {
- mutex_unlock(&cifs_swnreg_idr_mutex);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto fail_unlock;
}
kref_init(&reg->ref_count);
@@ -354,7 +349,7 @@ static struct cifs_swn_reg *cifs_get_swn_reg(struct cifs_tcon *tcon)
reg->ip_notify = (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT);
reg->tcon = tcon;
-
+unlock:
mutex_unlock(&cifs_swnreg_idr_mutex);
return reg;
@@ -365,6 +360,7 @@ fail_idr:
idr_remove(&cifs_swnreg_idr, reg->id);
fail:
kfree(reg);
+fail_unlock:
mutex_unlock(&cifs_swnreg_idr_mutex);
return ERR_PTR(ret);
}
diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
index 63b3b1290bed..ce2ebc213a1d 100644
--- a/fs/smb/client/cifsacl.c
+++ b/fs/smb/client/cifsacl.c
@@ -339,7 +339,6 @@ int
sid_to_id(struct cifs_sb_info *cifs_sb, struct smb_sid *psid,
struct cifs_fattr *fattr, uint sidtype)
{
- int rc = 0;
struct key *sidkey;
char *sidstr;
const struct cred *saved_cred;
@@ -446,12 +445,12 @@ out_revert_creds:
* fails then we just fall back to using the ctx->linux_uid/linux_gid.
*/
got_valid_id:
- rc = 0;
if (sidtype == SIDOWNER)
fattr->cf_uid = fuid;
else
fattr->cf_gid = fgid;
- return rc;
+
+ return 0;
}
int
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 7b7c8c38fdd0..801824825ecf 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -24,14 +24,43 @@
#include <linux/iov_iter.h>
#include <crypto/aead.h>
#include <crypto/arc4.h>
+#include <crypto/md5.h>
+#include <crypto/sha2.h>
-static size_t cifs_shash_step(void *iter_base, size_t progress, size_t len,
- void *priv, void *priv2)
+static int cifs_sig_update(struct cifs_calc_sig_ctx *ctx,
+ const u8 *data, size_t len)
{
- struct shash_desc *shash = priv;
+ if (ctx->md5) {
+ md5_update(ctx->md5, data, len);
+ return 0;
+ }
+ if (ctx->hmac) {
+ hmac_sha256_update(ctx->hmac, data, len);
+ return 0;
+ }
+ return crypto_shash_update(ctx->shash, data, len);
+}
+
+static int cifs_sig_final(struct cifs_calc_sig_ctx *ctx, u8 *out)
+{
+ if (ctx->md5) {
+ md5_final(ctx->md5, out);
+ return 0;
+ }
+ if (ctx->hmac) {
+ hmac_sha256_final(ctx->hmac, out);
+ return 0;
+ }
+ return crypto_shash_final(ctx->shash, out);
+}
+
+static size_t cifs_sig_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
+{
+ struct cifs_calc_sig_ctx *ctx = priv;
int ret, *pret = priv2;
- ret = crypto_shash_update(shash, iter_base, len);
+ ret = cifs_sig_update(ctx, iter_base, len);
if (ret < 0) {
*pret = ret;
return len;
@@ -42,21 +71,20 @@ static size_t cifs_shash_step(void *iter_base, size_t progress, size_t len,
/*
* Pass the data from an iterator into a hash.
*/
-static int cifs_shash_iter(const struct iov_iter *iter, size_t maxsize,
- struct shash_desc *shash)
+static int cifs_sig_iter(const struct iov_iter *iter, size_t maxsize,
+ struct cifs_calc_sig_ctx *ctx)
{
struct iov_iter tmp_iter = *iter;
int err = -EIO;
- if (iterate_and_advance_kernel(&tmp_iter, maxsize, shash, &err,
- cifs_shash_step) != maxsize)
+ if (iterate_and_advance_kernel(&tmp_iter, maxsize, ctx, &err,
+ cifs_sig_step) != maxsize)
return err;
return 0;
}
-int __cifs_calc_signature(struct smb_rqst *rqst,
- struct TCP_Server_Info *server, char *signature,
- struct shash_desc *shash)
+int __cifs_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ char *signature, struct cifs_calc_sig_ctx *ctx)
{
int i;
ssize_t rc;
@@ -82,8 +110,7 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
return -EIO;
}
- rc = crypto_shash_update(shash,
- iov[i].iov_base, iov[i].iov_len);
+ rc = cifs_sig_update(ctx, iov[i].iov_base, iov[i].iov_len);
if (rc) {
cifs_dbg(VFS, "%s: Could not update with payload\n",
__func__);
@@ -91,11 +118,11 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
}
}
- rc = cifs_shash_iter(&rqst->rq_iter, iov_iter_count(&rqst->rq_iter), shash);
+ rc = cifs_sig_iter(&rqst->rq_iter, iov_iter_count(&rqst->rq_iter), ctx);
if (rc < 0)
return rc;
- rc = crypto_shash_final(shash, signature);
+ rc = cifs_sig_final(ctx, signature);
if (rc)
cifs_dbg(VFS, "%s: Could not generate hash\n", __func__);
@@ -112,29 +139,22 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
static int cifs_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server, char *signature)
{
- int rc;
+ struct md5_ctx ctx;
if (!rqst->rq_iov || !signature || !server)
return -EINVAL;
-
- rc = cifs_alloc_hash("md5", &server->secmech.md5);
- if (rc)
- return -1;
-
- rc = crypto_shash_init(server->secmech.md5);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init md5\n", __func__);
- return rc;
+ if (fips_enabled) {
+ cifs_dbg(VFS,
+ "MD5 signature support is disabled due to FIPS\n");
+ return -EOPNOTSUPP;
}
- rc = crypto_shash_update(server->secmech.md5,
- server->session_key.response, server->session_key.len);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
- return rc;
- }
+ md5_init(&ctx);
+ md5_update(&ctx, server->session_key.response, server->session_key.len);
- return __cifs_calc_signature(rqst, server, signature, server->secmech.md5);
+ return __cifs_calc_signature(
+ rqst, server, signature,
+ &(struct cifs_calc_sig_ctx){ .md5 = &ctx });
}
/* must be called with server->srv_mutex held */
@@ -405,11 +425,11 @@ static __le64 find_timestamp(struct cifs_ses *ses)
}
static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
- const struct nls_table *nls_cp, struct shash_desc *hmacmd5)
+ const struct nls_table *nls_cp)
{
- int rc = 0;
int len;
char nt_hash[CIFS_NTHASH_SIZE];
+ struct hmac_md5_ctx hmac_ctx;
__le16 *user;
wchar_t *domain;
wchar_t *server;
@@ -417,17 +437,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
/* calculate md4 hash of password */
E_md4hash(ses->password, nt_hash, nls_cp);
- rc = crypto_shash_setkey(hmacmd5->tfm, nt_hash, CIFS_NTHASH_SIZE);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not set NT hash as a key, rc=%d\n", __func__, rc);
- return rc;
- }
-
- rc = crypto_shash_init(hmacmd5);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init HMAC-MD5, rc=%d\n", __func__, rc);
- return rc;
- }
+ hmac_md5_init_usingrawkey(&hmac_ctx, nt_hash, CIFS_NTHASH_SIZE);
/* convert ses->user_name to unicode */
len = ses->user_name ? strlen(ses->user_name) : 0;
@@ -442,12 +452,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
*(u16 *)user = 0;
}
- rc = crypto_shash_update(hmacmd5, (char *)user, 2 * len);
+ hmac_md5_update(&hmac_ctx, (const u8 *)user, 2 * len);
kfree(user);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with user, rc=%d\n", __func__, rc);
- return rc;
- }
/* convert ses->domainName to unicode and uppercase */
if (ses->domainName) {
@@ -459,12 +465,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len,
nls_cp);
- rc = crypto_shash_update(hmacmd5, (char *)domain, 2 * len);
+ hmac_md5_update(&hmac_ctx, (const u8 *)domain, 2 * len);
kfree(domain);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with domain, rc=%d\n", __func__, rc);
- return rc;
- }
} else {
/* We use ses->ip_addr if no domain name available */
len = strlen(ses->ip_addr);
@@ -474,25 +476,16 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
return -ENOMEM;
len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len, nls_cp);
- rc = crypto_shash_update(hmacmd5, (char *)server, 2 * len);
+ hmac_md5_update(&hmac_ctx, (const u8 *)server, 2 * len);
kfree(server);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with server, rc=%d\n", __func__, rc);
- return rc;
- }
}
- rc = crypto_shash_final(hmacmd5, ntlmv2_hash);
- if (rc)
- cifs_dbg(VFS, "%s: Could not generate MD5 hash, rc=%d\n", __func__, rc);
-
- return rc;
+ hmac_md5_final(&hmac_ctx, ntlmv2_hash);
+ return 0;
}
-static int
-CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash, struct shash_desc *hmacmd5)
+static void CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
{
- int rc;
struct ntlmv2_resp *ntlmv2 = (struct ntlmv2_resp *)
(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
unsigned int hash_len;
@@ -501,35 +494,15 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash, struct shash_
hash_len = ses->auth_key.len - (CIFS_SESS_KEY_SIZE +
offsetof(struct ntlmv2_resp, challenge.key[0]));
- rc = crypto_shash_setkey(hmacmd5->tfm, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not set NTLMv2 hash as a key, rc=%d\n", __func__, rc);
- return rc;
- }
-
- rc = crypto_shash_init(hmacmd5);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init HMAC-MD5, rc=%d\n", __func__, rc);
- return rc;
- }
-
if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED)
memcpy(ntlmv2->challenge.key, ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
else
memcpy(ntlmv2->challenge.key, ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
- rc = crypto_shash_update(hmacmd5, ntlmv2->challenge.key, hash_len);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response, rc=%d\n", __func__, rc);
- return rc;
- }
-
- /* Note that the MD5 digest over writes anon.challenge_key.key */
- rc = crypto_shash_final(hmacmd5, ntlmv2->ntlmv2_hash);
- if (rc)
- cifs_dbg(VFS, "%s: Could not generate MD5 hash, rc=%d\n", __func__, rc);
-
- return rc;
+ /* Note that the HMAC-MD5 value overwrites ntlmv2->challenge.key */
+ hmac_md5_usingrawkey(ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE,
+ ntlmv2->challenge.key, hash_len,
+ ntlmv2->ntlmv2_hash);
}
/*
@@ -586,7 +559,6 @@ out:
int
setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
- struct shash_desc *hmacmd5 = NULL;
unsigned char *tiblob = NULL; /* target info blob */
struct ntlmv2_resp *ntlmv2;
char ntlmv2_hash[16];
@@ -657,51 +629,29 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
ntlmv2->client_chal = cc;
ntlmv2->reserved2 = 0;
- rc = cifs_alloc_hash("hmac(md5)", &hmacmd5);
- if (rc) {
- cifs_dbg(VFS, "Could not allocate HMAC-MD5, rc=%d\n", rc);
+ if (fips_enabled) {
+ cifs_dbg(VFS, "NTLMv2 support is disabled due to FIPS\n");
+ rc = -EOPNOTSUPP;
goto unlock;
}
/* calculate ntlmv2_hash */
- rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp, hmacmd5);
+ rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
if (rc) {
cifs_dbg(VFS, "Could not get NTLMv2 hash, rc=%d\n", rc);
goto unlock;
}
/* calculate first part of the client response (CR1) */
- rc = CalcNTLMv2_response(ses, ntlmv2_hash, hmacmd5);
- if (rc) {
- cifs_dbg(VFS, "Could not calculate CR1, rc=%d\n", rc);
- goto unlock;
- }
+ CalcNTLMv2_response(ses, ntlmv2_hash);
/* now calculate the session key for NTLMv2 */
- rc = crypto_shash_setkey(hmacmd5->tfm, ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not set NTLMv2 hash as a key, rc=%d\n", __func__, rc);
- goto unlock;
- }
-
- rc = crypto_shash_init(hmacmd5);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init HMAC-MD5, rc=%d\n", __func__, rc);
- goto unlock;
- }
-
- rc = crypto_shash_update(hmacmd5, ntlmv2->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update with response, rc=%d\n", __func__, rc);
- goto unlock;
- }
-
- rc = crypto_shash_final(hmacmd5, ses->auth_key.response);
- if (rc)
- cifs_dbg(VFS, "%s: Could not generate MD5 hash, rc=%d\n", __func__, rc);
+ hmac_md5_usingrawkey(ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE,
+ ntlmv2->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE,
+ ses->auth_key.response);
+ rc = 0;
unlock:
cifs_server_unlock(ses->server);
- cifs_free_hash(&hmacmd5);
setup_ntlmv2_rsp_ret:
kfree_sensitive(tiblob);
@@ -743,9 +693,6 @@ void
cifs_crypto_secmech_release(struct TCP_Server_Info *server)
{
cifs_free_hash(&server->secmech.aes_cmac);
- cifs_free_hash(&server->secmech.hmacsha256);
- cifs_free_hash(&server->secmech.md5);
- cifs_free_hash(&server->secmech.sha512);
if (server->secmech.enc) {
crypto_free_aead(server->secmech.enc);
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 1775c2b7528f..4f959f1e08d2 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -392,11 +392,27 @@ static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct TCP_Server_Info *server = tcon->ses->server;
+ struct inode *inode = file_inode(file);
+ int rc;
+
+ if (!server->ops->fallocate)
+ return -EOPNOTSUPP;
+
+ rc = inode_lock_killable(inode);
+ if (rc)
+ return rc;
+
+ netfs_wait_for_outstanding_io(inode);
- if (server->ops->fallocate)
- return server->ops->fallocate(file, tcon, mode, off, len);
+ rc = file_modified(file);
+ if (rc)
+ goto out_unlock;
+
+ rc = server->ops->fallocate(file, tcon, mode, off, len);
- return -EOPNOTSUPP;
+out_unlock:
+ inode_unlock(inode);
+ return rc;
}
static int cifs_permission(struct mnt_idmap *idmap,
@@ -2123,13 +2139,9 @@ MODULE_DESCRIPTION
"also older servers complying with the SNIA CIFS Specification)");
MODULE_VERSION(CIFS_VERSION);
MODULE_SOFTDEP("ecb");
-MODULE_SOFTDEP("hmac");
-MODULE_SOFTDEP("md5");
MODULE_SOFTDEP("nls");
MODULE_SOFTDEP("aes");
MODULE_SOFTDEP("cmac");
-MODULE_SOFTDEP("sha256");
-MODULE_SOFTDEP("sha512");
MODULE_SOFTDEP("aead2");
MODULE_SOFTDEP("ccm");
MODULE_SOFTDEP("gcm");
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 3ce7c614ccc0..e9534258d1ef 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -145,6 +145,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 56
-#define CIFS_VERSION "2.56"
+#define SMB3_PRODUCT_BUILD 57
+#define CIFS_VERSION "2.57"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 3ac254e123dc..16a00a61fd2c 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -24,6 +24,7 @@
#include "cifsacl.h"
#include <crypto/internal/hash.h>
#include <uapi/linux/cifs/cifs_mount.h>
+#include "../common/cifsglob.h"
#include "../common/smb2pdu.h"
#include "smb2pdu.h"
#include <linux/filelock.h>
@@ -221,9 +222,6 @@ struct session_key {
/* crypto hashing related structure/fields, not specific to a sec mech */
struct cifs_secmech {
- struct shash_desc *md5; /* md5 hash function, for CIFS/SMB1 signatures */
- struct shash_desc *hmacsha256; /* hmac-sha256 hash function, for SMB2 signatures */
- struct shash_desc *sha512; /* sha512 hash function, for SMB3.1.1 preauth hash */
struct shash_desc *aes_cmac; /* block-cipher based MAC function, for SMB3 signatures */
struct crypto_aead *enc; /* smb3 encryption AEAD TFM (AES-CCM and AES-GCM) */
@@ -702,12 +700,6 @@ get_rfc1002_length(void *buf)
return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
}
-static inline void
-inc_rfc1001_len(void *buf, int count)
-{
- be32_add_cpu((__be32 *)buf, count);
-}
-
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
@@ -1021,8 +1013,6 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
-#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
-
/*
* Windows only supports a max of 60kb reads and 65535 byte writes. Default to
* those values when posix extensions aren't in force. In actuality here, we
@@ -1566,6 +1556,11 @@ struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr,
bool offload);
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
+int cifs_file_flush(const unsigned int xid, struct inode *inode,
+ struct cifsFileInfo *cfile);
+int cifs_file_set_size(const unsigned int xid, struct dentry *dentry,
+ const char *full_path, struct cifsFileInfo *open_file,
+ loff_t size);
#define CIFS_CACHE_READ_FLG 1
#define CIFS_CACHE_HANDLE_FLG 2
@@ -2143,30 +2138,20 @@ extern mempool_t cifs_io_request_pool;
extern mempool_t cifs_io_subrequest_pool;
/* Operations for different SMB versions */
-#define SMB1_VERSION_STRING "1.0"
-#define SMB20_VERSION_STRING "2.0"
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
extern struct smb_version_operations smb1_operations;
extern struct smb_version_values smb1_values;
extern struct smb_version_operations smb20_operations;
extern struct smb_version_values smb20_values;
#endif /* CIFS_ALLOW_INSECURE_LEGACY */
-#define SMB21_VERSION_STRING "2.1"
extern struct smb_version_operations smb21_operations;
extern struct smb_version_values smb21_values;
-#define SMBDEFAULT_VERSION_STRING "default"
extern struct smb_version_values smbdefault_values;
-#define SMB3ANY_VERSION_STRING "3"
extern struct smb_version_values smb3any_values;
-#define SMB30_VERSION_STRING "3.0"
extern struct smb_version_operations smb30_operations;
extern struct smb_version_values smb30_values;
-#define SMB302_VERSION_STRING "3.02"
-#define ALT_SMB302_VERSION_STRING "3.0.2"
/*extern struct smb_version_operations smb302_operations;*/ /* not needed yet */
extern struct smb_version_values smb302_values;
-#define SMB311_VERSION_STRING "3.1.1"
-#define ALT_SMB311_VERSION_STRING "3.11"
extern struct smb_version_operations smb311_operations;
extern struct smb_version_values smb311_values;
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index e8fba98690ce..4976be2c47c1 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -632,9 +632,13 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf,
unsigned int *pbytes_written);
-int __cifs_calc_signature(struct smb_rqst *rqst,
- struct TCP_Server_Info *server, char *signature,
- struct shash_desc *shash);
+struct cifs_calc_sig_ctx {
+ struct md5_ctx *md5;
+ struct hmac_sha256_ctx *hmac;
+ struct shash_desc *shash;
+};
+int __cifs_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ char *signature, struct cifs_calc_sig_ctx *ctx);
enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index d20766f664c4..2881efcbe09a 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1163,7 +1163,7 @@ OldOpenRetry:
cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile));
pfile_info->EndOfFile = pfile_info->AllocationSize;
pfile_info->NumberOfLinks = cpu_to_le32(1);
- pfile_info->DeletePending = 0;
+ pfile_info->DeletePending = 0; /* successful open = not delete pending */
}
}
@@ -1288,7 +1288,7 @@ openRetry:
buf->AllocationSize = rsp->AllocationSize;
buf->EndOfFile = rsp->EndOfFile;
buf->NumberOfLinks = cpu_to_le32(1);
- buf->DeletePending = 0;
+ buf->DeletePending = 0; /* successful open = not delete pending */
}
cifs_buf_release(req);
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index fc67a6441c96..da5597dbf5b9 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -200,8 +200,8 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
full_path = build_path_from_dentry(direntry, page);
if (IS_ERR(full_path)) {
- free_dentry_path(page);
- return PTR_ERR(full_path);
+ rc = PTR_ERR(full_path);
+ goto out;
}
/* If we're caching, we need to be able to fill in around partial writes. */
@@ -678,7 +678,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
const char *full_path;
void *page;
int retry_count = 0;
- struct cached_fid *cfid = NULL;
+ struct dentry *de;
xid = get_xid();
@@ -690,16 +690,15 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
- free_xid(xid);
- return ERR_CAST(tlink);
+ de = ERR_CAST(tlink);
+ goto free_xid;
}
pTcon = tlink_tcon(tlink);
rc = check_name(direntry, pTcon);
if (unlikely(rc)) {
- cifs_put_tlink(tlink);
- free_xid(xid);
- return ERR_PTR(rc);
+ de = ERR_PTR(rc);
+ goto put_tlink;
}
/* can not grab the rename sem here since it would
@@ -708,15 +707,15 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
page = alloc_dentry_path();
full_path = build_path_from_dentry(direntry, page);
if (IS_ERR(full_path)) {
- cifs_put_tlink(tlink);
- free_xid(xid);
- free_dentry_path(page);
- return ERR_CAST(full_path);
+ de = ERR_CAST(full_path);
+ goto free_dentry_path;
}
if (d_really_is_positive(direntry)) {
cifs_dbg(FYI, "non-NULL inode in lookup\n");
} else {
+ struct cached_fid *cfid = NULL;
+
cifs_dbg(FYI, "NULL inode in lookup\n");
/*
@@ -775,25 +774,27 @@ again:
}
out:
+ de = d_splice_alias(newInode, direntry);
+free_dentry_path:
free_dentry_path(page);
+put_tlink:
cifs_put_tlink(tlink);
+free_xid:
free_xid(xid);
- return d_splice_alias(newInode, direntry);
+ return de;
}
static int
cifs_d_revalidate(struct inode *dir, const struct qstr *name,
struct dentry *direntry, unsigned int flags)
{
- struct inode *inode = NULL;
- struct cached_fid *cfid;
- int rc;
-
if (flags & LOOKUP_RCU)
return -ECHILD;
if (d_really_is_positive(direntry)) {
- inode = d_inode(direntry);
+ int rc;
+ struct inode *inode = d_inode(direntry);
+
if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
CIFS_I(inode)->time = 0; /* force reval */
@@ -836,6 +837,7 @@ cifs_d_revalidate(struct inode *dir, const struct qstr *name,
} else {
struct cifs_sb_info *cifs_sb = CIFS_SB(dir->i_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cached_fid *cfid;
if (!open_cached_dir_by_dentry(tcon, direntry->d_parent, &cfid)) {
/*
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index a5ed742afa00..474dadeb1593 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -952,6 +952,66 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
}
}
+int cifs_file_flush(const unsigned int xid, struct inode *inode,
+ struct cifsFileInfo *cfile)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_tcon *tcon;
+ int rc;
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
+ return 0;
+
+ if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
+ tcon = tlink_tcon(cfile->tlink);
+ return tcon->ses->server->ops->flush(xid, tcon,
+ &cfile->fid);
+ }
+ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
+ if (!rc) {
+ tcon = tlink_tcon(cfile->tlink);
+ rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
+ cifsFileInfo_put(cfile);
+ } else if (rc == -EBADF) {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
+{
+ struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry));
+ struct inode *inode = d_inode(dentry);
+ struct cifsFileInfo *cfile = NULL;
+ struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon;
+ int rc;
+
+ rc = filemap_write_and_wait(inode->i_mapping);
+ if (is_interrupt_error(rc))
+ return -ERESTARTSYS;
+ mapping_set_error(inode->i_mapping, rc);
+
+ cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
+ rc = cifs_file_flush(xid, inode, cfile);
+ if (!rc) {
+ if (cfile) {
+ tcon = tlink_tcon(cfile->tlink);
+ server = tcon->ses->server;
+ rc = server->ops->set_file_size(xid, tcon,
+ cfile, 0, false);
+ }
+ if (!rc) {
+ netfs_resize_file(&cinode->netfs, 0, true);
+ cifs_setsize(inode, 0);
+ inode->i_blocks = 0;
+ }
+ }
+ if (cfile)
+ cifsFileInfo_put(cfile);
+ return rc;
+}
+
int cifs_open(struct inode *inode, struct file *file)
{
@@ -1004,6 +1064,12 @@ int cifs_open(struct inode *inode, struct file *file)
file->f_op = &cifs_file_direct_ops;
}
+ if (file->f_flags & O_TRUNC) {
+ rc = cifs_do_truncate(xid, file_dentry(file));
+ if (rc)
+ goto out;
+ }
+
/* Get the cached handle as SMB2 close is deferred */
if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
rc = cifs_get_writable_path(tcon, full_path,
@@ -2685,13 +2751,10 @@ cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
- unsigned int xid;
- int rc = 0;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
struct cifsFileInfo *smbfile = file->private_data;
struct inode *inode = file_inode(file);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ unsigned int xid;
+ int rc;
rc = file_write_and_wait_range(file, start, end);
if (rc) {
@@ -2699,39 +2762,15 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
return rc;
}
- xid = get_xid();
-
- cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
- file, datasync);
+ cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync);
if (!CIFS_CACHE_READ(CIFS_I(inode))) {
rc = cifs_zap_mapping(inode);
- if (rc) {
- cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
- rc = 0; /* don't care about it in fsync */
- }
+ cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc);
}
- tcon = tlink_tcon(smbfile->tlink);
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
- server = tcon->ses->server;
- if (server->ops->flush == NULL) {
- rc = -ENOSYS;
- goto strict_fsync_exit;
- }
-
- if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
- smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
- if (smbfile) {
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
- cifsFileInfo_put(smbfile);
- } else
- cifs_dbg(FYI, "ignore fsync for file not open for write\n");
- } else
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
- }
-
-strict_fsync_exit:
+ xid = get_xid();
+ rc = cifs_file_flush(xid, inode, smbfile);
free_xid(xid);
return rc;
}
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 8bb544be401e..098a79b7a959 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -2431,8 +2431,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
tcon = tlink_tcon(tlink);
server = tcon->ses->server;
- if (!server->ops->rename)
- return -ENOSYS;
+ if (!server->ops->rename) {
+ rc = -ENOSYS;
+ goto do_rename_exit;
+ }
/* try path-based rename first */
rc = server->ops->rename(xid, tcon, from_dentry,
@@ -3007,28 +3009,25 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
void cifs_setsize(struct inode *inode, loff_t offset)
{
- struct cifsInodeInfo *cifs_i = CIFS_I(inode);
-
spin_lock(&inode->i_lock);
i_size_write(inode, offset);
spin_unlock(&inode->i_lock);
-
- /* Cached inode must be refreshed on truncate */
- cifs_i->time = 0;
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
truncate_pagecache(inode, offset);
+ netfs_wait_for_outstanding_io(inode);
}
-static int
-cifs_set_file_size(struct inode *inode, struct iattr *attrs,
- unsigned int xid, const char *full_path, struct dentry *dentry)
+int cifs_file_set_size(const unsigned int xid, struct dentry *dentry,
+ const char *full_path, struct cifsFileInfo *open_file,
+ loff_t size)
{
- int rc;
- struct cifsFileInfo *open_file;
- struct cifsInodeInfo *cifsInode = CIFS_I(inode);
+ struct inode *inode = d_inode(dentry);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct tcon_link *tlink = NULL;
struct cifs_tcon *tcon = NULL;
struct TCP_Server_Info *server;
+ int rc = -EINVAL;
/*
* To avoid spurious oplock breaks from server, in the case of
@@ -3039,19 +3038,25 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
* writebehind data than the SMB timeout for the SetPathInfo
* request would allow
*/
- open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
- if (open_file) {
+ if (open_file && (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE)) {
tcon = tlink_tcon(open_file->tlink);
server = tcon->ses->server;
- if (server->ops->set_file_size)
- rc = server->ops->set_file_size(xid, tcon, open_file,
- attrs->ia_size, false);
- else
- rc = -ENOSYS;
- cifsFileInfo_put(open_file);
- cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
- } else
- rc = -EINVAL;
+ rc = server->ops->set_file_size(xid, tcon,
+ open_file,
+ size, false);
+ cifs_dbg(FYI, "%s: set_file_size: rc = %d\n", __func__, rc);
+ } else {
+ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+ if (open_file) {
+ tcon = tlink_tcon(open_file->tlink);
+ server = tcon->ses->server;
+ rc = server->ops->set_file_size(xid, tcon,
+ open_file,
+ size, false);
+ cifs_dbg(FYI, "%s: set_file_size: rc = %d\n", __func__, rc);
+ cifsFileInfo_put(open_file);
+ }
+ }
if (!rc)
goto set_size_out;
@@ -3069,20 +3074,15 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
* valid, writeable file handle for it was found or because there was
* an error setting it by handle.
*/
- if (server->ops->set_path_size)
- rc = server->ops->set_path_size(xid, tcon, full_path,
- attrs->ia_size, cifs_sb, false, dentry);
- else
- rc = -ENOSYS;
- cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
-
- if (tlink)
- cifs_put_tlink(tlink);
+ rc = server->ops->set_path_size(xid, tcon, full_path, size,
+ cifs_sb, false, dentry);
+ cifs_dbg(FYI, "%s: SetEOF by path (setattrs) rc = %d\n", __func__, rc);
+ cifs_put_tlink(tlink);
set_size_out:
if (rc == 0) {
- netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
- cifs_setsize(inode, attrs->ia_size);
+ netfs_resize_file(&cifsInode->netfs, size, true);
+ cifs_setsize(inode, size);
/*
* i_blocks is not related to (i_size / i_blksize), but instead
* 512 byte (2**9) size is required for calculating num blocks.
@@ -3090,15 +3090,7 @@ set_size_out:
* this is best estimate we have for blocks allocated for a file
* Number of blocks must be rounded up so size 1 is not 0 blocks
*/
- inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9;
-
- /*
- * The man page of truncate says if the size changed,
- * then the st_ctime and st_mtime fields for the file
- * are updated.
- */
- attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
- attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
+ inode->i_blocks = (512 - 1 + size) >> 9;
}
return rc;
@@ -3118,7 +3110,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
struct cifs_unix_set_info_args *args = NULL;
- struct cifsFileInfo *open_file;
+ struct cifsFileInfo *open_file = NULL;
cifs_dbg(FYI, "setattr_unix on file %pd attrs->ia_valid=0x%x\n",
direntry, attrs->ia_valid);
@@ -3132,6 +3124,9 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
if (rc < 0)
goto out;
+ if (attrs->ia_valid & ATTR_FILE)
+ open_file = attrs->ia_file->private_data;
+
full_path = build_path_from_dentry(direntry, page);
if (IS_ERR(full_path)) {
rc = PTR_ERR(full_path);
@@ -3159,9 +3154,16 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
rc = 0;
if (attrs->ia_valid & ATTR_SIZE) {
- rc = cifs_set_file_size(inode, attrs, xid, full_path, direntry);
+ rc = cifs_file_set_size(xid, direntry, full_path,
+ open_file, attrs->ia_size);
if (rc != 0)
goto out;
+ /*
+ * Avoid setting timestamps on the server for ftruncate(2) to
+ * prevent it from disabling automatic timestamp updates as per
+ * MS-FSA 2.1.4.17.
+ */
+ attrs->ia_valid &= ~(ATTR_CTIME | ATTR_MTIME);
}
/* skip mode change if it's just for clearing setuid/setgid */
@@ -3206,14 +3208,24 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
args->ctime = NO_CHANGE_64;
args->device = 0;
- open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
- if (open_file) {
- u16 nfid = open_file->fid.netfid;
- u32 npid = open_file->pid;
+ rc = -EINVAL;
+ if (open_file && (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE)) {
pTcon = tlink_tcon(open_file->tlink);
- rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid);
- cifsFileInfo_put(open_file);
+ rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args,
+ open_file->fid.netfid,
+ open_file->pid);
} else {
+ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+ if (open_file) {
+ pTcon = tlink_tcon(open_file->tlink);
+ rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args,
+ open_file->fid.netfid,
+ open_file->pid);
+ cifsFileInfo_put(open_file);
+ }
+ }
+
+ if (rc) {
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
@@ -3221,8 +3233,8 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
}
pTcon = tlink_tcon(tlink);
rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
- cifs_sb->local_nls,
- cifs_remap(cifs_sb));
+ cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
cifs_put_tlink(tlink);
}
@@ -3264,8 +3276,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
struct inode *inode = d_inode(direntry);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
- struct cifsFileInfo *wfile;
- struct cifs_tcon *tcon;
+ struct cifsFileInfo *cfile = NULL;
const char *full_path;
void *page = alloc_dentry_path();
int rc = -EACCES;
@@ -3285,6 +3296,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
if (rc < 0)
goto cifs_setattr_exit;
+ if (attrs->ia_valid & ATTR_FILE)
+ cfile = attrs->ia_file->private_data;
+
full_path = build_path_from_dentry(direntry, page);
if (IS_ERR(full_path)) {
rc = PTR_ERR(full_path);
@@ -3311,25 +3325,23 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
rc = 0;
- if ((attrs->ia_valid & ATTR_MTIME) &&
- !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
- rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile);
- if (!rc) {
- tcon = tlink_tcon(wfile->tlink);
- rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
- cifsFileInfo_put(wfile);
- if (rc)
- goto cifs_setattr_exit;
- } else if (rc != -EBADF)
+ if (attrs->ia_valid & ATTR_MTIME) {
+ rc = cifs_file_flush(xid, inode, cfile);
+ if (rc)
goto cifs_setattr_exit;
- else
- rc = 0;
}
if (attrs->ia_valid & ATTR_SIZE) {
- rc = cifs_set_file_size(inode, attrs, xid, full_path, direntry);
+ rc = cifs_file_set_size(xid, direntry, full_path,
+ cfile, attrs->ia_size);
if (rc != 0)
goto cifs_setattr_exit;
+ /*
+ * Avoid setting timestamps on the server for ftruncate(2) to
+ * prevent it from disabling automatic timestamp updates as per
+ * MS-FSA 2.1.4.17.
+ */
+ attrs->ia_valid &= ~(ATTR_CTIME | ATTR_MTIME);
}
if (attrs->ia_valid & ATTR_UID)
@@ -3459,6 +3471,13 @@ cifs_setattr(struct mnt_idmap *idmap, struct dentry *direntry,
if (unlikely(cifs_forced_shutdown(cifs_sb)))
return -EIO;
+ /*
+ * Avoid setting [cm]time with O_TRUNC to prevent the server from
+ * disabling automatic timestamp updates as specified in
+ * MS-FSA 2.1.4.17.
+ */
+ if (attrs->ia_valid & ATTR_OPEN)
+ return 0;
do {
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
index fe80e711cd75..70f3c0c67eeb 100644
--- a/fs/smb/client/link.c
+++ b/fs/smb/client/link.c
@@ -5,6 +5,7 @@
* Author(s): Steve French (sfrench@us.ibm.com)
*
*/
+#include <crypto/md5.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/slab.h>
@@ -37,23 +38,6 @@
#define CIFS_MF_SYMLINK_MD5_ARGS(md5_hash) md5_hash
static int
-symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
-{
- int rc;
- struct shash_desc *md5 = NULL;
-
- rc = cifs_alloc_hash("md5", &md5);
- if (rc)
- return rc;
-
- rc = crypto_shash_digest(md5, link_str, link_len, md5_hash);
- if (rc)
- cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
- cifs_free_hash(&md5);
- return rc;
-}
-
-static int
parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
char **_link_str)
{
@@ -77,11 +61,7 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
return -EINVAL;
- rc = symlink_hash(link_len, link_str, md5_hash);
- if (rc) {
- cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
- return rc;
- }
+ md5(link_str, link_len, md5_hash);
scnprintf(md5_str2, sizeof(md5_str2),
CIFS_MF_SYMLINK_MD5_FORMAT,
@@ -103,7 +83,6 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
static int
format_mf_symlink(u8 *buf, unsigned int buf_len, const char *link_str)
{
- int rc;
unsigned int link_len;
unsigned int ofs;
u8 md5_hash[16];
@@ -116,11 +95,7 @@ format_mf_symlink(u8 *buf, unsigned int buf_len, const char *link_str)
if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
return -ENAMETOOLONG;
- rc = symlink_hash(link_len, link_str, md5_hash);
- if (rc) {
- cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
- return rc;
- }
+ md5(link_str, link_len, md5_hash);
scnprintf(buf, buf_len,
CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index dda6dece802a..e10123d8cd7d 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -916,6 +916,14 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
char *data_end;
struct dfs_referral_level_3 *ref;
+ if (rsp_size < sizeof(*rsp)) {
+ cifs_dbg(VFS | ONCE,
+ "%s: header is malformed (size is %u, must be %zu)\n",
+ __func__, rsp_size, sizeof(*rsp));
+ rc = -EINVAL;
+ goto parse_DFS_referrals_exit;
+ }
+
*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
if (*num_of_nodes < 1) {
@@ -925,6 +933,15 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
goto parse_DFS_referrals_exit;
}
+ if (sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3) > rsp_size) {
+ cifs_dbg(VFS | ONCE,
+ "%s: malformed buffer (size is %u, must be at least %zu)\n",
+ __func__, rsp_size,
+ sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3));
+ rc = -EINVAL;
+ goto parse_DFS_referrals_exit;
+ }
+
ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
if (ref->VersionNumber != cpu_to_le16(3)) {
cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 0a8c2fcc9ded..ef3b498b0a02 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -584,7 +584,7 @@ cifs_ses_add_channel(struct cifs_ses *ses,
* to sign packets before we generate the channel signing key
* (we sign with the session key)
*/
- rc = smb311_crypto_shash_allocate(chan->server);
+ rc = smb3_crypto_shash_allocate(chan->server);
if (rc) {
cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
mutex_unlock(&ses->session_mutex);
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index a02d41d1ce4a..ca8f3dd7ff63 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -652,13 +652,71 @@ static int cifs_query_path_info(const unsigned int xid,
#ifdef CONFIG_CIFS_XATTR
/*
+ * For non-symlink WSL reparse points it is required to fetch
+ * EA $LXMOD which contains in its S_DT part the mandatory file type.
+ */
+ if (!rc && data->reparse_point) {
+ struct smb2_file_full_ea_info *ea;
+ u32 next = 0;
+
+ ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
+ do {
+ ea = (void *)((u8 *)ea + next);
+ next = le32_to_cpu(ea->next_entry_offset);
+ } while (next);
+ if (le16_to_cpu(ea->ea_value_length)) {
+ ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) +
+ ea->ea_name_length + 1 +
+ le16_to_cpu(ea->ea_value_length), 4));
+ ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset));
+ }
+
+ rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_MODE,
+ &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
+ SMB2_WSL_XATTR_MODE_SIZE, cifs_sb);
+ if (rc == SMB2_WSL_XATTR_MODE_SIZE) {
+ ea->next_entry_offset = cpu_to_le32(0);
+ ea->flags = 0;
+ ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
+ ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_MODE_SIZE);
+ memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_MODE, SMB2_WSL_XATTR_NAME_LEN + 1);
+ data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
+ SMB2_WSL_XATTR_MODE_SIZE, 4);
+ rc = 0;
+ } else if (rc >= 0) {
+ /* It is an error if EA $LXMOD has wrong size. */
+ rc = -EINVAL;
+ } else {
+ /*
+ * In all other cases ignore error if fetching
+ * of EA $LXMOD failed. It is needed only for
+ * non-symlink WSL reparse points and wsl_to_fattr()
+ * handle the case when EA is missing.
+ */
+ rc = 0;
+ }
+ }
+
+ /*
* For WSL CHR and BLK reparse points it is required to fetch
* EA $LXDEV which contains major and minor device numbers.
*/
if (!rc && data->reparse_point) {
struct smb2_file_full_ea_info *ea;
+ u32 next = 0;
ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
+ do {
+ ea = (void *)((u8 *)ea + next);
+ next = le32_to_cpu(ea->next_entry_offset);
+ } while (next);
+ if (le16_to_cpu(ea->ea_value_length)) {
+ ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) +
+ ea->ea_name_length + 1 +
+ le16_to_cpu(ea->ea_value_length), 4));
+ ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset));
+ }
+
rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV,
&ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
SMB2_WSL_XATTR_DEV_SIZE, cifs_sb);
@@ -668,8 +726,8 @@ static int cifs_query_path_info(const unsigned int xid,
ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE);
memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1);
- data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
- SMB2_WSL_XATTR_DEV_SIZE;
+ data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
+ SMB2_WSL_XATTR_MODE_SIZE, 4);
rc = 0;
} else if (rc >= 0) {
/* It is an error if EA $LXDEV has wrong size. */
@@ -818,6 +876,11 @@ cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
info.Attributes = cpu_to_le32(dosattrs);
rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls,
cifs_sb);
+ if (rc == -EOPNOTSUPP || rc == -EINVAL)
+ rc = SMBSetInformation(xid, tcon, full_path,
+ info.Attributes,
+ 0 /* do not change write time */,
+ cifs_sb->local_nls, cifs_sb);
if (rc == 0)
cifsInode->cifsAttrs = dosattrs;
}
@@ -974,7 +1037,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
.tcon = tcon,
.cifs_sb = cifs_sb,
.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
- .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+ .create_options = cifs_create_options(cifs_sb, 0),
.disposition = FILE_OPEN,
.path = full_path,
.fid = &fid,
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 0985db9f86e5..09e3fc81d7cb 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -676,7 +676,7 @@ finished:
idata->fi.EndOfFile = create_rsp->EndofFile;
if (le32_to_cpu(idata->fi.NumberOfLinks) == 0)
idata->fi.NumberOfLinks = cpu_to_le32(1); /* dummy value */
- idata->fi.DeletePending = 0;
+ idata->fi.DeletePending = 0; /* successful open = not delete pending */
idata->fi.Directory = !!(le32_to_cpu(create_rsp->FileAttributes) & ATTR_DIRECTORY);
/* smb2_parse_contexts() fills idata->fi.IndexNumber */
@@ -1382,31 +1382,33 @@ int
smb2_set_file_info(struct inode *inode, const char *full_path,
FILE_BASIC_INFO *buf, const unsigned int xid)
{
- struct cifs_open_parms oparms;
+ struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), };
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsFileInfo *cfile = NULL;
+ struct cifs_open_parms oparms;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
- struct cifsFileInfo *cfile;
- struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), };
- int rc;
-
- if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
- (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
- (buf->Attributes == 0))
- return 0; /* would be a no op, no sense sending this */
+ int rc = 0;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
+ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) {
+ if (buf->Attributes == 0)
+ goto out; /* would be a no op, no sense sending this */
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ }
+
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES,
FILE_OPEN, 0, ACL_NO_MODE);
rc = smb2_compound_op(xid, tcon, cifs_sb,
full_path, &oparms, &in_iov,
&(int){SMB2_OP_SET_INFO}, 1,
cfile, NULL, NULL, NULL);
+out:
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index 89d933b4a8bc..96bfe4c63ccf 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -7,6 +7,7 @@
* Pavel Shilovsky (pshilovsky@samba.org) 2012
*
*/
+#include <crypto/sha2.h>
#include <linux/ctype.h>
#include "cifsglob.h"
#include "cifsproto.h"
@@ -888,13 +889,13 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
* @iov: array containing the SMB request we will send to the server
* @nvec: number of array entries for the iov
*/
-int
+void
smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server,
struct kvec *iov, int nvec)
{
- int i, rc;
+ int i;
struct smb2_hdr *hdr;
- struct shash_desc *sha512 = NULL;
+ struct sha512_ctx sha_ctx;
hdr = (struct smb2_hdr *)iov[0].iov_base;
/* neg prot are always taken */
@@ -907,52 +908,22 @@ smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server,
* and we can test it. Preauth requires 3.1.1 for now.
*/
if (server->dialect != SMB311_PROT_ID)
- return 0;
+ return;
if (hdr->Command != SMB2_SESSION_SETUP)
- return 0;
+ return;
/* skip last sess setup response */
if ((hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
&& (hdr->Status == NT_STATUS_OK
|| (hdr->Status !=
cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))))
- return 0;
+ return;
ok:
- rc = smb311_crypto_shash_allocate(server);
- if (rc)
- return rc;
-
- sha512 = server->secmech.sha512;
- rc = crypto_shash_init(sha512);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__);
- return rc;
- }
-
- rc = crypto_shash_update(sha512, ses->preauth_sha_hash,
- SMB2_PREAUTH_HASH_SIZE);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__);
- return rc;
- }
-
- for (i = 0; i < nvec; i++) {
- rc = crypto_shash_update(sha512, iov[i].iov_base, iov[i].iov_len);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not update sha512 shash\n",
- __func__);
- return rc;
- }
- }
-
- rc = crypto_shash_final(sha512, ses->preauth_sha_hash);
- if (rc) {
- cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n",
- __func__);
- return rc;
- }
-
- return 0;
+ sha512_init(&sha_ctx);
+ sha512_update(&sha_ctx, ses->preauth_sha_hash, SMB2_PREAUTH_HASH_SIZE);
+ for (i = 0; i < nvec; i++)
+ sha512_update(&sha_ctx, iov[i].iov_base, iov[i].iov_len);
+ sha512_final(&sha_ctx, ses->preauth_sha_hash);
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 058050f744c0..95cd484cfbba 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -1803,140 +1803,226 @@ free_vars:
return rc;
}
+/**
+ * calc_chunk_count - calculates the number chunks to be filled in the Chunks[]
+ * array of struct copychunk_ioctl
+ *
+ * @tcon: destination file tcon
+ * @bytes_left: how many bytes are left to copy
+ *
+ * Return: maximum number of chunks with which Chunks[] can be filled.
+ */
+static inline u32
+calc_chunk_count(struct cifs_tcon *tcon, u64 bytes_left)
+{
+ u32 max_chunks = READ_ONCE(tcon->max_chunks);
+ u32 max_bytes_copy = READ_ONCE(tcon->max_bytes_copy);
+ u32 max_bytes_chunk = READ_ONCE(tcon->max_bytes_chunk);
+ u64 need;
+ u32 allowed;
+
+ if (!max_bytes_chunk || !max_bytes_copy || !max_chunks)
+ return 0;
+
+ /* chunks needed for the remaining bytes */
+ need = DIV_ROUND_UP_ULL(bytes_left, max_bytes_chunk);
+ /* chunks allowed per cc request */
+ allowed = DIV_ROUND_UP(max_bytes_copy, max_bytes_chunk);
+
+ return (u32)umin(need, umin(max_chunks, allowed));
+}
+
+/**
+ * smb2_copychunk_range - server-side copy of data range
+ *
+ * @xid: transaction id
+ * @src_file: source file
+ * @dst_file: destination file
+ * @src_off: source file byte offset
+ * @len: number of bytes to copy
+ * @dst_off: destination file byte offset
+ *
+ * Obtains a resume key for @src_file and issues FSCTL_SRV_COPYCHUNK_WRITE
+ * IOCTLs, splitting the request into chunks limited by tcon->max_*.
+ *
+ * Return: @len on success; negative errno on failure.
+ */
static ssize_t
smb2_copychunk_range(const unsigned int xid,
- struct cifsFileInfo *srcfile,
- struct cifsFileInfo *trgtfile, u64 src_off,
- u64 len, u64 dest_off)
+ struct cifsFileInfo *src_file,
+ struct cifsFileInfo *dst_file,
+ u64 src_off,
+ u64 len,
+ u64 dst_off)
{
- int rc;
- unsigned int ret_data_len;
- struct copychunk_ioctl *pcchunk;
- struct copychunk_ioctl_rsp *retbuf = NULL;
+ int rc = 0;
+ unsigned int ret_data_len = 0;
+ struct copychunk_ioctl *cc_req = NULL;
+ struct copychunk_ioctl_rsp *cc_rsp = NULL;
struct cifs_tcon *tcon;
- int chunks_copied = 0;
- bool chunk_sizes_updated = false;
- ssize_t bytes_written, total_bytes_written = 0;
+ struct copychunk *chunk;
+ u32 chunks, chunk_count, chunk_bytes;
+ u32 copy_bytes, copy_bytes_left;
+ u32 chunks_written, bytes_written;
+ u64 total_bytes_left = len;
+ u64 src_off_prev, dst_off_prev;
+ u32 retries = 0;
+
+ tcon = tlink_tcon(dst_file->tlink);
+
+ trace_smb3_copychunk_enter(xid, src_file->fid.volatile_fid,
+ dst_file->fid.volatile_fid, tcon->tid,
+ tcon->ses->Suid, src_off, dst_off, len);
+
+retry:
+ chunk_count = calc_chunk_count(tcon, total_bytes_left);
+ if (!chunk_count) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
- pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
- if (pcchunk == NULL)
- return -ENOMEM;
+ cc_req = kzalloc(struct_size(cc_req, Chunks, chunk_count), GFP_KERNEL);
+ if (!cc_req) {
+ rc = -ENOMEM;
+ goto out;
+ }
- cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
/* Request a key from the server to identify the source of the copy */
- rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
- srcfile->fid.persistent_fid,
- srcfile->fid.volatile_fid, pcchunk);
+ rc = SMB2_request_res_key(xid,
+ tlink_tcon(src_file->tlink),
+ src_file->fid.persistent_fid,
+ src_file->fid.volatile_fid,
+ cc_req);
- /* Note: request_res_key sets res_key null only if rc !=0 */
+ /* Note: request_res_key sets res_key null only if rc != 0 */
if (rc)
- goto cchunk_out;
+ goto out;
+
+ while (total_bytes_left > 0) {
+
+ /* Store previous offsets to allow rewind */
+ src_off_prev = src_off;
+ dst_off_prev = dst_off;
+
+ chunks = 0;
+ copy_bytes = 0;
+ copy_bytes_left = umin(total_bytes_left, tcon->max_bytes_copy);
+ while (copy_bytes_left > 0 && chunks < chunk_count) {
+ chunk = &cc_req->Chunks[chunks++];
- /* For now array only one chunk long, will make more flexible later */
- pcchunk->ChunkCount = cpu_to_le32(1);
- pcchunk->Reserved = 0;
- pcchunk->Reserved2 = 0;
+ chunk->SourceOffset = cpu_to_le64(src_off);
+ chunk->TargetOffset = cpu_to_le64(dst_off);
- tcon = tlink_tcon(trgtfile->tlink);
+ chunk_bytes = umin(copy_bytes_left, tcon->max_bytes_chunk);
- trace_smb3_copychunk_enter(xid, srcfile->fid.volatile_fid,
- trgtfile->fid.volatile_fid, tcon->tid,
- tcon->ses->Suid, src_off, dest_off, len);
+ chunk->Length = cpu_to_le32(chunk_bytes);
+ /* Buffer is zeroed, no need to set chunk->Reserved = 0 */
- while (len > 0) {
- pcchunk->SourceOffset = cpu_to_le64(src_off);
- pcchunk->TargetOffset = cpu_to_le64(dest_off);
- pcchunk->Length =
- cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
+ src_off += chunk_bytes;
+ dst_off += chunk_bytes;
+
+ copy_bytes_left -= chunk_bytes;
+ copy_bytes += chunk_bytes;
+ }
+
+ cc_req->ChunkCount = cpu_to_le32(chunks);
+ /* Buffer is zeroed, no need to set cc_req->Reserved = 0 */
/* Request server copy to target from src identified by key */
- kfree(retbuf);
- retbuf = NULL;
- rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
- trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
- (char *)pcchunk, sizeof(struct copychunk_ioctl),
- CIFSMaxBufSize, (char **)&retbuf, &ret_data_len);
+ kfree(cc_rsp);
+ cc_rsp = NULL;
+ rc = SMB2_ioctl(xid, tcon, dst_file->fid.persistent_fid,
+ dst_file->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
+ (char *)cc_req, struct_size(cc_req, Chunks, chunks),
+ CIFSMaxBufSize, (char **)&cc_rsp, &ret_data_len);
+
+ if (rc && rc != -EINVAL)
+ goto out;
+
+ if (unlikely(ret_data_len != sizeof(*cc_rsp))) {
+ cifs_tcon_dbg(VFS, "Copychunk invalid response: size %u/%zu\n",
+ ret_data_len, sizeof(*cc_rsp));
+ rc = -EIO;
+ goto out;
+ }
+
+ bytes_written = le32_to_cpu(cc_rsp->TotalBytesWritten);
+ chunks_written = le32_to_cpu(cc_rsp->ChunksWritten);
+ chunk_bytes = le32_to_cpu(cc_rsp->ChunkBytesWritten);
+
if (rc == 0) {
- if (ret_data_len !=
- sizeof(struct copychunk_ioctl_rsp)) {
- cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
+ /* Check if server claimed to write more than we asked */
+ if (unlikely(!bytes_written || bytes_written > copy_bytes ||
+ !chunks_written || chunks_written > chunks)) {
+ cifs_tcon_dbg(VFS, "Copychunk invalid response: bytes written %u/%u, chunks written %u/%u\n",
+ bytes_written, copy_bytes, chunks_written, chunks);
rc = -EIO;
- goto cchunk_out;
- }
- if (retbuf->TotalBytesWritten == 0) {
- cifs_dbg(FYI, "no bytes copied\n");
- rc = -EIO;
- goto cchunk_out;
- }
- /*
- * Check if server claimed to write more than we asked
- */
- if (le32_to_cpu(retbuf->TotalBytesWritten) >
- le32_to_cpu(pcchunk->Length)) {
- cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
- rc = -EIO;
- goto cchunk_out;
+ goto out;
}
- if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
- cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
- rc = -EIO;
- goto cchunk_out;
+
+ /* Partial write: rewind */
+ if (bytes_written < copy_bytes) {
+ u32 delta = copy_bytes - bytes_written;
+
+ src_off -= delta;
+ dst_off -= delta;
}
- chunks_copied++;
-
- bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
- src_off += bytes_written;
- dest_off += bytes_written;
- len -= bytes_written;
- total_bytes_written += bytes_written;
-
- cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
- le32_to_cpu(retbuf->ChunksWritten),
- le32_to_cpu(retbuf->ChunkBytesWritten),
- bytes_written);
- trace_smb3_copychunk_done(xid, srcfile->fid.volatile_fid,
- trgtfile->fid.volatile_fid, tcon->tid,
- tcon->ses->Suid, src_off, dest_off, len);
- } else if (rc == -EINVAL) {
- if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
- goto cchunk_out;
-
- cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
- le32_to_cpu(retbuf->ChunksWritten),
- le32_to_cpu(retbuf->ChunkBytesWritten),
- le32_to_cpu(retbuf->TotalBytesWritten));
- /*
- * Check if this is the first request using these sizes,
- * (ie check if copy succeed once with original sizes
- * and check if the server gave us different sizes after
- * we already updated max sizes on previous request).
- * if not then why is the server returning an error now
- */
- if ((chunks_copied != 0) || chunk_sizes_updated)
- goto cchunk_out;
-
- /* Check that server is not asking us to grow size */
- if (le32_to_cpu(retbuf->ChunkBytesWritten) <
- tcon->max_bytes_chunk)
- tcon->max_bytes_chunk =
- le32_to_cpu(retbuf->ChunkBytesWritten);
- else
- goto cchunk_out; /* server gave us bogus size */
+ total_bytes_left -= bytes_written;
+ continue;
+ }
- /* No need to change MaxChunks since already set to 1 */
- chunk_sizes_updated = true;
- } else
- goto cchunk_out;
+ /*
+ * Check if server is not asking us to reduce size.
+ *
+ * Note: As per MS-SMB2 2.2.32.1, the values returned
+ * in cc_rsp are not strictly lower than what existed
+ * before.
+ */
+ if (bytes_written < tcon->max_bytes_copy) {
+ cifs_tcon_dbg(FYI, "Copychunk MaxBytesCopy updated: %u -> %u\n",
+ tcon->max_bytes_copy, bytes_written);
+ tcon->max_bytes_copy = bytes_written;
+ }
+
+ if (chunks_written < tcon->max_chunks) {
+ cifs_tcon_dbg(FYI, "Copychunk MaxChunks updated: %u -> %u\n",
+ tcon->max_chunks, chunks_written);
+ tcon->max_chunks = chunks_written;
+ }
+
+ if (chunk_bytes < tcon->max_bytes_chunk) {
+ cifs_tcon_dbg(FYI, "Copychunk MaxBytesChunk updated: %u -> %u\n",
+ tcon->max_bytes_chunk, chunk_bytes);
+ tcon->max_bytes_chunk = chunk_bytes;
+ }
+
+ /* reset to last offsets */
+ if (retries++ < 2) {
+ src_off = src_off_prev;
+ dst_off = dst_off_prev;
+ kfree(cc_req);
+ cc_req = NULL;
+ goto retry;
+ }
+
+ break;
}
-cchunk_out:
- kfree(pcchunk);
- kfree(retbuf);
- if (rc)
+out:
+ kfree(cc_req);
+ kfree(cc_rsp);
+ if (rc) {
+ trace_smb3_copychunk_err(xid, src_file->fid.volatile_fid,
+ dst_file->fid.volatile_fid, tcon->tid,
+ tcon->ses->Suid, src_off, dst_off, len, rc);
return rc;
- else
- return total_bytes_written;
+ } else {
+ trace_smb3_copychunk_done(xid, src_file->fid.volatile_fid,
+ dst_file->fid.volatile_fid, tcon->tid,
+ tcon->ses->Suid, src_off, dst_off, len);
+ return len;
+ }
}
static int
@@ -3126,8 +3212,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path) {
rc = -ENOMEM;
- free_xid(xid);
- return ERR_PTR(rc);
+ goto put_tlink;
}
oparms = (struct cifs_open_parms) {
@@ -3159,6 +3244,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
}
+put_tlink:
cifs_put_tlink(tlink);
free_xid(xid);
@@ -3199,8 +3285,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path) {
rc = -ENOMEM;
- free_xid(xid);
- return rc;
+ goto put_tlink;
}
oparms = (struct cifs_open_parms) {
@@ -3221,6 +3306,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
}
+put_tlink:
cifs_put_tlink(tlink);
free_xid(xid);
return rc;
@@ -3281,7 +3367,6 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
ses->Suid, offset, len);
- inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
i_size = i_size_read(inode);
@@ -3299,6 +3384,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
* first, otherwise the data may be inconsistent with the server.
*/
truncate_pagecache_range(inode, offset, offset + len - 1);
+ netfs_wait_for_outstanding_io(inode);
/* if file not oplocked can't be sure whether asking to extend size */
rc = -EOPNOTSUPP;
@@ -3327,7 +3413,6 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
zero_range_exit:
filemap_invalidate_unlock(inode->i_mapping);
- inode_unlock(inode);
free_xid(xid);
if (rc)
trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
@@ -3351,7 +3436,6 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
xid = get_xid();
- inode_lock(inode);
/* Need to make file sparse, if not already, before freeing range. */
/* Consider adding equivalent for compressed since it could also work */
if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
@@ -3365,6 +3449,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
* caches first, otherwise the data may be inconsistent with the server.
*/
truncate_pagecache_range(inode, offset, offset + len - 1);
+ netfs_wait_for_outstanding_io(inode);
cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
@@ -3399,7 +3484,6 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
unlock:
filemap_invalidate_unlock(inode->i_mapping);
out:
- inode_unlock(inode);
free_xid(xid);
return rc;
}
@@ -3663,8 +3747,6 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
xid = get_xid();
- inode_lock(inode);
-
old_eof = i_size_read(inode);
if ((off >= old_eof) ||
off + len >= old_eof) {
@@ -3679,6 +3761,7 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
truncate_pagecache_range(inode, off, old_eof);
ictx->zero_point = old_eof;
+ netfs_wait_for_outstanding_io(inode);
rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
old_eof - off - len, off);
@@ -3699,8 +3782,7 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
fscache_resize_cookie(cifs_inode_cookie(inode), new_eof);
out_2:
filemap_invalidate_unlock(inode->i_mapping);
- out:
- inode_unlock(inode);
+out:
free_xid(xid);
return rc;
}
@@ -3717,8 +3799,6 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
xid = get_xid();
- inode_lock(inode);
-
old_eof = i_size_read(inode);
if (off >= old_eof) {
rc = -EINVAL;
@@ -3733,6 +3813,7 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
if (rc < 0)
goto out_2;
truncate_pagecache_range(inode, off, old_eof);
+ netfs_wait_for_outstanding_io(inode);
rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->pid, new_eof);
@@ -3755,8 +3836,7 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
rc = 0;
out_2:
filemap_invalidate_unlock(inode->i_mapping);
- out:
- inode_unlock(inode);
+out:
free_xid(xid);
return rc;
}
@@ -4650,7 +4730,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
unsigned int pad_len;
struct cifs_io_subrequest *rdata = mid->callback_data;
struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
- int length;
+ size_t copied;
bool use_rdma_mr = false;
if (shdr->Command != SMB2_READ) {
@@ -4763,10 +4843,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
} else if (buf_len >= data_offset + data_len) {
/* read response payload is in buf */
WARN_ONCE(buffer, "read data can be either in buf or in buffer");
- length = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter);
- if (length < 0)
- return length;
- rdata->got_bytes = data_len;
+ copied = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter);
+ if (copied == 0)
+ return -EIO;
+ rdata->got_bytes = copied;
} else {
/* read response payload cannot be in both buf and pages */
WARN_ONCE(1, "buf can not contain only a part of read data");
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 42e2d4ea344d..b0739a2661bf 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -3277,7 +3277,7 @@ replay_again:
buf->EndOfFile = rsp->EndofFile;
buf->Attributes = rsp->FileAttributes;
buf->NumberOfLinks = cpu_to_le32(1);
- buf->DeletePending = 0;
+ buf->DeletePending = 0; /* successful open = not delete pending */
}
diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
index 3c09a58dfd07..101024f8f725 100644
--- a/fs/smb/client/smb2pdu.h
+++ b/fs/smb/client/smb2pdu.h
@@ -201,16 +201,20 @@ struct resume_key_req {
char Context[]; /* ignored, Windows sets to 4 bytes of zero */
} __packed;
+
+struct copychunk {
+ __le64 SourceOffset;
+ __le64 TargetOffset;
+ __le32 Length;
+ __le32 Reserved;
+} __packed;
+
/* this goes in the ioctl buffer when doing a copychunk request */
struct copychunk_ioctl {
char SourceKey[COPY_CHUNK_RES_KEY_SIZE];
- __le32 ChunkCount; /* we are only sending 1 */
+ __le32 ChunkCount;
__le32 Reserved;
- /* array will only be one chunk long for us */
- __le64 SourceOffset;
- __le64 TargetOffset;
- __le32 Length; /* how many bytes to copy */
- __u32 Reserved2;
+ struct copychunk Chunks[];
} __packed;
struct copychunk_ioctl_rsp {
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index b3f1398c9f79..6eb86d134abc 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -295,10 +295,10 @@ extern int smb2_validate_and_copy_iov(unsigned int offset,
extern void smb2_copy_fs_info_to_kstatfs(
struct smb2_fs_full_size_info *pfs_inf,
struct kstatfs *kst);
-extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
-extern int smb311_update_preauth_hash(struct cifs_ses *ses,
- struct TCP_Server_Info *server,
- struct kvec *iov, int nvec);
+extern int smb3_crypto_shash_allocate(struct TCP_Server_Info *server);
+extern void smb311_update_preauth_hash(struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
+ struct kvec *iov, int nvec);
extern int smb2_query_info_compound(const unsigned int xid,
struct cifs_tcon *tcon,
const char *path, u32 desired_access,
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index bc0e92eb2b64..ad6068e17a2a 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -19,6 +19,7 @@
#include <linux/mempool.h>
#include <linux/highmem.h>
#include <crypto/aead.h>
+#include <crypto/sha2.h>
#include "cifsglob.h"
#include "cifsproto.h"
#include "smb2proto.h"
@@ -26,53 +27,14 @@
#include "../common/smb2status.h"
#include "smb2glob.h"
-static int
-smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
-{
- struct cifs_secmech *p = &server->secmech;
- int rc;
-
- rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256);
- if (rc)
- goto err;
-
- rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac);
- if (rc)
- goto err;
-
- return 0;
-err:
- cifs_free_hash(&p->hmacsha256);
- return rc;
-}
-
int
-smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
+smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
{
struct cifs_secmech *p = &server->secmech;
- int rc = 0;
-
- rc = cifs_alloc_hash("hmac(sha256)", &p->hmacsha256);
- if (rc)
- return rc;
-
- rc = cifs_alloc_hash("cmac(aes)", &p->aes_cmac);
- if (rc)
- goto err;
-
- rc = cifs_alloc_hash("sha512", &p->sha512);
- if (rc)
- goto err;
-
- return 0;
-err:
- cifs_free_hash(&p->aes_cmac);
- cifs_free_hash(&p->hmacsha256);
- return rc;
+ return cifs_alloc_hash("cmac(aes)", &p->aes_cmac);
}
-
static
int smb3_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
{
@@ -240,11 +202,6 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
return NULL;
}
tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
- if (!tcon) {
- spin_unlock(&cifs_tcp_ses_lock);
- cifs_put_smb_ses(ses);
- return NULL;
- }
spin_unlock(&cifs_tcp_ses_lock);
/* tcon already has a ref to ses, so we don't need ses anymore */
cifs_put_smb_ses(ses);
@@ -258,10 +215,9 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
{
int rc;
unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
- unsigned char *sigptr = smb2_signature;
struct kvec *iov = rqst->rq_iov;
struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
- struct shash_desc *shash = NULL;
+ struct hmac_sha256_ctx hmac_ctx;
struct smb_rqst drqst;
__u64 sid = le64_to_cpu(shdr->SessionId);
u8 key[SMB2_NTLMV2_SESSKEY_SIZE];
@@ -276,30 +232,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE);
memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
- if (allocate_crypto) {
- rc = cifs_alloc_hash("hmac(sha256)", &shash);
- if (rc) {
- cifs_server_dbg(VFS,
- "%s: sha256 alloc failed\n", __func__);
- goto out;
- }
- } else {
- shash = server->secmech.hmacsha256;
- }
-
- rc = crypto_shash_setkey(shash->tfm, key, sizeof(key));
- if (rc) {
- cifs_server_dbg(VFS,
- "%s: Could not update with response\n",
- __func__);
- goto out;
- }
-
- rc = crypto_shash_init(shash);
- if (rc) {
- cifs_server_dbg(VFS, "%s: Could not init sha256", __func__);
- goto out;
- }
+ hmac_sha256_init_usingrawkey(&hmac_ctx, key, sizeof(key));
/*
* For SMB2+, __cifs_calc_signature() expects to sign only the actual
@@ -310,25 +243,17 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
*/
drqst = *rqst;
if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
- rc = crypto_shash_update(shash, iov[0].iov_base,
- iov[0].iov_len);
- if (rc) {
- cifs_server_dbg(VFS,
- "%s: Could not update with payload\n",
- __func__);
- goto out;
- }
+ hmac_sha256_update(&hmac_ctx, iov[0].iov_base, iov[0].iov_len);
drqst.rq_iov++;
drqst.rq_nvec--;
}
- rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
+ rc = __cifs_calc_signature(
+ &drqst, server, smb2_signature,
+ &(struct cifs_calc_sig_ctx){ .hmac = &hmac_ctx });
if (!rc)
- memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
+ memcpy(shdr->Signature, smb2_signature, SMB2_SIGNATURE_SIZE);
-out:
- if (allocate_crypto)
- cifs_free_hash(&shash);
return rc;
}
@@ -341,8 +266,8 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
__u8 L256[4] = {0, 0, 1, 0};
int rc = 0;
unsigned char prfhash[SMB2_HMACSHA256_SIZE];
- unsigned char *hashptr = prfhash;
struct TCP_Server_Info *server = ses->server;
+ struct hmac_sha256_ctx hmac_ctx;
memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE);
memset(key, 0x0, key_size);
@@ -350,67 +275,26 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
rc = smb3_crypto_shash_allocate(server);
if (rc) {
cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
- goto smb3signkey_ret;
- }
-
- rc = crypto_shash_setkey(server->secmech.hmacsha256->tfm,
- ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
- if (rc) {
- cifs_server_dbg(VFS, "%s: Could not set with session key\n", __func__);
- goto smb3signkey_ret;
- }
-
- rc = crypto_shash_init(server->secmech.hmacsha256);
- if (rc) {
- cifs_server_dbg(VFS, "%s: Could not init sign hmac\n", __func__);
- goto smb3signkey_ret;
- }
-
- rc = crypto_shash_update(server->secmech.hmacsha256, i, 4);
- if (rc) {
- cifs_server_dbg(VFS, "%s: Could not update with n\n", __func__);
- goto smb3signkey_ret;
- }
-
- rc = crypto_shash_update(server->secmech.hmacsha256, label.iov_base, label.iov_len);
- if (rc) {
- cifs_server_dbg(VFS, "%s: Could not update with label\n", __func__);
- goto smb3signkey_ret;
- }
-
- rc = crypto_shash_update(server->secmech.hmacsha256, &zero, 1);
- if (rc) {
- cifs_server_dbg(VFS, "%s: Could not update with zero\n", __func__);
- goto smb3signkey_ret;
+ return rc;
}
- rc = crypto_shash_update(server->secmech.hmacsha256, context.iov_base, context.iov_len);
- if (rc) {
- cifs_server_dbg(VFS, "%s: Could not update with context\n", __func__);
- goto smb3signkey_ret;
- }
+ hmac_sha256_init_usingrawkey(&hmac_ctx, ses->auth_key.response,
+ SMB2_NTLMV2_SESSKEY_SIZE);
+ hmac_sha256_update(&hmac_ctx, i, 4);
+ hmac_sha256_update(&hmac_ctx, label.iov_base, label.iov_len);
+ hmac_sha256_update(&hmac_ctx, &zero, 1);
+ hmac_sha256_update(&hmac_ctx, context.iov_base, context.iov_len);
if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
- rc = crypto_shash_update(server->secmech.hmacsha256, L256, 4);
+ hmac_sha256_update(&hmac_ctx, L256, 4);
} else {
- rc = crypto_shash_update(server->secmech.hmacsha256, L128, 4);
- }
- if (rc) {
- cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
- goto smb3signkey_ret;
- }
-
- rc = crypto_shash_final(server->secmech.hmacsha256, hashptr);
- if (rc) {
- cifs_server_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__);
- goto smb3signkey_ret;
+ hmac_sha256_update(&hmac_ctx, L128, 4);
}
+ hmac_sha256_final(&hmac_ctx, prfhash);
- memcpy(key, hashptr, key_size);
-
-smb3signkey_ret:
- return rc;
+ memcpy(key, prfhash, key_size);
+ return 0;
}
struct derivation {
@@ -587,7 +471,6 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
{
int rc;
unsigned char smb3_signature[SMB2_CMACAES_SIZE];
- unsigned char *sigptr = smb3_signature;
struct kvec *iov = rqst->rq_iov;
struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
struct shash_desc *shash = NULL;
@@ -648,9 +531,11 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
drqst.rq_nvec--;
}
- rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
+ rc = __cifs_calc_signature(
+ &drqst, server, smb3_signature,
+ &(struct cifs_calc_sig_ctx){ .shash = shash });
if (!rc)
- memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
+ memcpy(shdr->Signature, smb3_signature, SMB2_SIGNATURE_SIZE);
out:
if (allocate_crypto)
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index 316f398c70f4..49e2df3ad1f0 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -1575,12 +1575,12 @@ void smbd_destroy(struct TCP_Server_Info *server)
disable_work_sync(&sc->disconnect_work);
log_rdma_event(INFO, "destroying rdma session\n");
- if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING) {
+ if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING)
smbd_disconnect_rdma_work(&sc->disconnect_work);
+ if (sc->status < SMBDIRECT_SOCKET_DISCONNECTED) {
log_rdma_event(INFO, "wait for transport being disconnected\n");
- wait_event_interruptible(
- sc->status_wait,
- sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+ wait_event(sc->status_wait, sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+ log_rdma_event(INFO, "waited for transport being disconnected\n");
}
/*
@@ -1624,19 +1624,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
log_rdma_event(INFO, "free receive buffers\n");
destroy_receive_buffers(sc);
- /*
- * For performance reasons, memory registration and deregistration
- * are not locked by srv_mutex. It is possible some processes are
- * blocked on transport srv_mutex while holding memory registration.
- * Release the transport srv_mutex to allow them to hit the failure
- * path when sending data, and then release memory registrations.
- */
log_rdma_event(INFO, "freeing mr list\n");
- while (atomic_read(&sc->mr_io.used.count)) {
- cifs_server_unlock(server);
- msleep(1000);
- cifs_server_lock(server);
- }
destroy_mr_list(sc);
ib_free_cq(sc->ib.send_cq);
@@ -2352,18 +2340,84 @@ static void smbd_mr_recovery_work(struct work_struct *work)
}
}
+static void smbd_mr_disable_locked(struct smbdirect_mr_io *mr)
+{
+ struct smbdirect_socket *sc = mr->socket;
+
+ lockdep_assert_held(&mr->mutex);
+
+ if (mr->state == SMBDIRECT_MR_DISABLED)
+ return;
+
+ if (mr->mr)
+ ib_dereg_mr(mr->mr);
+ if (mr->sgt.nents)
+ ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir);
+ kfree(mr->sgt.sgl);
+
+ mr->mr = NULL;
+ mr->sgt.sgl = NULL;
+ mr->sgt.nents = 0;
+
+ mr->state = SMBDIRECT_MR_DISABLED;
+}
+
+static void smbd_mr_free_locked(struct kref *kref)
+{
+ struct smbdirect_mr_io *mr =
+ container_of(kref, struct smbdirect_mr_io, kref);
+
+ lockdep_assert_held(&mr->mutex);
+
+ /*
+ * smbd_mr_disable_locked() should already be called!
+ */
+ if (WARN_ON_ONCE(mr->state != SMBDIRECT_MR_DISABLED))
+ smbd_mr_disable_locked(mr);
+
+ mutex_unlock(&mr->mutex);
+ mutex_destroy(&mr->mutex);
+ kfree(mr);
+}
+
static void destroy_mr_list(struct smbdirect_socket *sc)
{
struct smbdirect_mr_io *mr, *tmp;
+ LIST_HEAD(all_list);
+ unsigned long flags;
disable_work_sync(&sc->mr_io.recovery_work);
- list_for_each_entry_safe(mr, tmp, &sc->mr_io.all.list, list) {
- if (mr->state == SMBDIRECT_MR_INVALIDATED)
- ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
- mr->sgt.nents, mr->dir);
- ib_dereg_mr(mr->mr);
- kfree(mr->sgt.sgl);
- kfree(mr);
+
+ spin_lock_irqsave(&sc->mr_io.all.lock, flags);
+ list_splice_tail_init(&sc->mr_io.all.list, &all_list);
+ spin_unlock_irqrestore(&sc->mr_io.all.lock, flags);
+
+ list_for_each_entry_safe(mr, tmp, &all_list, list) {
+ mutex_lock(&mr->mutex);
+
+ smbd_mr_disable_locked(mr);
+ list_del(&mr->list);
+ mr->socket = NULL;
+
+ /*
+ * No kref_put_mutex() as it's already locked.
+ *
+ * If smbd_mr_free_locked() is called
+ * and the mutex is unlocked and mr is gone,
+ * in that case kref_put() returned 1.
+ *
+ * If kref_put() returned 0 we know that
+ * smbd_mr_free_locked() didn't
+ * run. Not by us nor by anyone else, as we
+ * still hold the mutex, so we need to unlock.
+ *
+ * If the mr is still registered it will
+ * be dangling (detached from the connection
+ * waiting for smbd_deregister_mr() to be
+ * called in order to free the memory.
+ */
+ if (!kref_put(&mr->kref, smbd_mr_free_locked))
+ mutex_unlock(&mr->mutex);
}
}
@@ -2377,10 +2431,9 @@ static void destroy_mr_list(struct smbdirect_socket *sc)
static int allocate_mr_list(struct smbdirect_socket *sc)
{
struct smbdirect_socket_parameters *sp = &sc->parameters;
- int i;
- struct smbdirect_mr_io *smbdirect_mr, *tmp;
-
- INIT_WORK(&sc->mr_io.recovery_work, smbd_mr_recovery_work);
+ struct smbdirect_mr_io *mr;
+ int ret;
+ u32 i;
if (sp->responder_resources == 0) {
log_rdma_mr(ERR, "responder_resources negotiated as 0\n");
@@ -2389,42 +2442,52 @@ static int allocate_mr_list(struct smbdirect_socket *sc)
/* Allocate more MRs (2x) than hardware responder_resources */
for (i = 0; i < sp->responder_resources * 2; i++) {
- smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
- if (!smbdirect_mr)
- goto cleanup_entries;
- smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, sc->mr_io.type,
- sp->max_frmr_depth);
- if (IS_ERR(smbdirect_mr->mr)) {
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ ret = -ENOMEM;
+ goto kzalloc_mr_failed;
+ }
+
+ kref_init(&mr->kref);
+ mutex_init(&mr->mutex);
+
+ mr->mr = ib_alloc_mr(sc->ib.pd,
+ sc->mr_io.type,
+ sp->max_frmr_depth);
+ if (IS_ERR(mr->mr)) {
+ ret = PTR_ERR(mr->mr);
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
sc->mr_io.type, sp->max_frmr_depth);
- goto out;
+ goto ib_alloc_mr_failed;
}
- smbdirect_mr->sgt.sgl = kcalloc(sp->max_frmr_depth,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!smbdirect_mr->sgt.sgl) {
+
+ mr->sgt.sgl = kcalloc(sp->max_frmr_depth,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!mr->sgt.sgl) {
+ ret = -ENOMEM;
log_rdma_mr(ERR, "failed to allocate sgl\n");
- ib_dereg_mr(smbdirect_mr->mr);
- goto out;
+ goto kcalloc_sgl_failed;
}
- smbdirect_mr->state = SMBDIRECT_MR_READY;
- smbdirect_mr->socket = sc;
+ mr->state = SMBDIRECT_MR_READY;
+ mr->socket = sc;
- list_add_tail(&smbdirect_mr->list, &sc->mr_io.all.list);
+ list_add_tail(&mr->list, &sc->mr_io.all.list);
atomic_inc(&sc->mr_io.ready.count);
}
+
+ INIT_WORK(&sc->mr_io.recovery_work, smbd_mr_recovery_work);
+
return 0;
-out:
- kfree(smbdirect_mr);
-cleanup_entries:
- list_for_each_entry_safe(smbdirect_mr, tmp, &sc->mr_io.all.list, list) {
- list_del(&smbdirect_mr->list);
- ib_dereg_mr(smbdirect_mr->mr);
- kfree(smbdirect_mr->sgt.sgl);
- kfree(smbdirect_mr);
- }
- return -ENOMEM;
+kcalloc_sgl_failed:
+ ib_dereg_mr(mr->mr);
+ib_alloc_mr_failed:
+ mutex_destroy(&mr->mutex);
+ kfree(mr);
+kzalloc_mr_failed:
+ destroy_mr_list(sc);
+ return ret;
}
/*
@@ -2458,6 +2521,7 @@ again:
list_for_each_entry(ret, &sc->mr_io.all.list, list) {
if (ret->state == SMBDIRECT_MR_READY) {
ret->state = SMBDIRECT_MR_REGISTERED;
+ kref_get(&ret->kref);
spin_unlock_irqrestore(&sc->mr_io.all.lock, flags);
atomic_dec(&sc->mr_io.ready.count);
atomic_inc(&sc->mr_io.used.count);
@@ -2504,9 +2568,8 @@ struct smbdirect_mr_io *smbd_register_mr(struct smbd_connection *info,
{
struct smbdirect_socket *sc = &info->socket;
struct smbdirect_socket_parameters *sp = &sc->parameters;
- struct smbdirect_mr_io *smbdirect_mr;
+ struct smbdirect_mr_io *mr;
int rc, num_pages;
- enum dma_data_direction dir;
struct ib_reg_wr *reg_wr;
num_pages = iov_iter_npages(iter, sp->max_frmr_depth + 1);
@@ -2517,49 +2580,47 @@ struct smbdirect_mr_io *smbd_register_mr(struct smbd_connection *info,
return NULL;
}
- smbdirect_mr = get_mr(sc);
- if (!smbdirect_mr) {
+ mr = get_mr(sc);
+ if (!mr) {
log_rdma_mr(ERR, "get_mr returning NULL\n");
return NULL;
}
- dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- smbdirect_mr->dir = dir;
- smbdirect_mr->need_invalidate = need_invalidate;
- smbdirect_mr->sgt.nents = 0;
- smbdirect_mr->sgt.orig_nents = 0;
+ mutex_lock(&mr->mutex);
+
+ mr->dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ mr->need_invalidate = need_invalidate;
+ mr->sgt.nents = 0;
+ mr->sgt.orig_nents = 0;
log_rdma_mr(INFO, "num_pages=0x%x count=0x%zx depth=%u\n",
num_pages, iov_iter_count(iter), sp->max_frmr_depth);
- smbd_iter_to_mr(iter, &smbdirect_mr->sgt, sp->max_frmr_depth);
+ smbd_iter_to_mr(iter, &mr->sgt, sp->max_frmr_depth);
- rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
- smbdirect_mr->sgt.nents, dir);
+ rc = ib_dma_map_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir);
if (!rc) {
log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
- num_pages, dir, rc);
+ num_pages, mr->dir, rc);
goto dma_map_error;
}
- rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgt.sgl,
- smbdirect_mr->sgt.nents, NULL, PAGE_SIZE);
- if (rc != smbdirect_mr->sgt.nents) {
+ rc = ib_map_mr_sg(mr->mr, mr->sgt.sgl, mr->sgt.nents, NULL, PAGE_SIZE);
+ if (rc != mr->sgt.nents) {
log_rdma_mr(ERR,
- "ib_map_mr_sg failed rc = %d nents = %x\n",
- rc, smbdirect_mr->sgt.nents);
+ "ib_map_mr_sg failed rc = %d nents = %x\n",
+ rc, mr->sgt.nents);
goto map_mr_error;
}
- ib_update_fast_reg_key(smbdirect_mr->mr,
- ib_inc_rkey(smbdirect_mr->mr->rkey));
- reg_wr = &smbdirect_mr->wr;
+ ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
+ reg_wr = &mr->wr;
reg_wr->wr.opcode = IB_WR_REG_MR;
- smbdirect_mr->cqe.done = register_mr_done;
- reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
+ mr->cqe.done = register_mr_done;
+ reg_wr->wr.wr_cqe = &mr->cqe;
reg_wr->wr.num_sge = 0;
reg_wr->wr.send_flags = IB_SEND_SIGNALED;
- reg_wr->mr = smbdirect_mr->mr;
- reg_wr->key = smbdirect_mr->mr->rkey;
+ reg_wr->mr = mr->mr;
+ reg_wr->key = mr->mr->rkey;
reg_wr->access = writing ?
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
IB_ACCESS_REMOTE_READ;
@@ -2570,24 +2631,51 @@ struct smbdirect_mr_io *smbd_register_mr(struct smbd_connection *info,
* on the next ib_post_send when we actually send I/O to remote peer
*/
rc = ib_post_send(sc->ib.qp, &reg_wr->wr, NULL);
- if (!rc)
- return smbdirect_mr;
+ if (!rc) {
+ /*
+ * get_mr() gave us a reference
+ * via kref_get(&mr->kref), we keep that and let
+ * the caller use smbd_deregister_mr()
+ * to remove it again.
+ */
+ mutex_unlock(&mr->mutex);
+ return mr;
+ }
log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
rc, reg_wr->key);
/* If all failed, attempt to recover this MR by setting it SMBDIRECT_MR_ERROR*/
map_mr_error:
- ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
- smbdirect_mr->sgt.nents, smbdirect_mr->dir);
+ ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir);
dma_map_error:
- smbdirect_mr->state = SMBDIRECT_MR_ERROR;
+ mr->sgt.nents = 0;
+ mr->state = SMBDIRECT_MR_ERROR;
if (atomic_dec_and_test(&sc->mr_io.used.count))
wake_up(&sc->mr_io.cleanup.wait_queue);
smbd_disconnect_rdma_connection(sc);
+ /*
+ * get_mr() gave us a reference
+ * via kref_get(&mr->kref), we need to remove it again
+ * on error.
+ *
+ * No kref_put_mutex() as it's already locked.
+ *
+ * If smbd_mr_free_locked() is called
+ * and the mutex is unlocked and mr is gone,
+ * in that case kref_put() returned 1.
+ *
+ * If kref_put() returned 0 we know that
+ * smbd_mr_free_locked() didn't
+ * run. Not by us nor by anyone else, as we
+ * still hold the mutex, so we need to unlock.
+ */
+ if (!kref_put(&mr->kref, smbd_mr_free_locked))
+ mutex_unlock(&mr->mutex);
+
return NULL;
}
@@ -2612,44 +2700,55 @@ static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
* and we have to locally invalidate the buffer to prevent data is being
* modified by remote peer after upper layer consumes it
*/
-int smbd_deregister_mr(struct smbdirect_mr_io *smbdirect_mr)
+void smbd_deregister_mr(struct smbdirect_mr_io *mr)
{
- struct ib_send_wr *wr;
- struct smbdirect_socket *sc = smbdirect_mr->socket;
- int rc = 0;
+ struct smbdirect_socket *sc = mr->socket;
+
+ mutex_lock(&mr->mutex);
+ if (mr->state == SMBDIRECT_MR_DISABLED)
+ goto put_kref;
+
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ smbd_mr_disable_locked(mr);
+ goto put_kref;
+ }
+
+ if (mr->need_invalidate) {
+ struct ib_send_wr *wr = &mr->inv_wr;
+ int rc;
- if (smbdirect_mr->need_invalidate) {
/* Need to finish local invalidation before returning */
- wr = &smbdirect_mr->inv_wr;
wr->opcode = IB_WR_LOCAL_INV;
- smbdirect_mr->cqe.done = local_inv_done;
- wr->wr_cqe = &smbdirect_mr->cqe;
+ mr->cqe.done = local_inv_done;
+ wr->wr_cqe = &mr->cqe;
wr->num_sge = 0;
- wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
+ wr->ex.invalidate_rkey = mr->mr->rkey;
wr->send_flags = IB_SEND_SIGNALED;
- init_completion(&smbdirect_mr->invalidate_done);
+ init_completion(&mr->invalidate_done);
rc = ib_post_send(sc->ib.qp, wr, NULL);
if (rc) {
log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
+ smbd_mr_disable_locked(mr);
smbd_disconnect_rdma_connection(sc);
goto done;
}
- wait_for_completion(&smbdirect_mr->invalidate_done);
- smbdirect_mr->need_invalidate = false;
+ wait_for_completion(&mr->invalidate_done);
+ mr->need_invalidate = false;
} else
/*
* For remote invalidation, just set it to SMBDIRECT_MR_INVALIDATED
* and defer to mr_recovery_work to recover the MR for next use
*/
- smbdirect_mr->state = SMBDIRECT_MR_INVALIDATED;
+ mr->state = SMBDIRECT_MR_INVALIDATED;
- if (smbdirect_mr->state == SMBDIRECT_MR_INVALIDATED) {
- ib_dma_unmap_sg(
- sc->ib.dev, smbdirect_mr->sgt.sgl,
- smbdirect_mr->sgt.nents,
- smbdirect_mr->dir);
- smbdirect_mr->state = SMBDIRECT_MR_READY;
+ if (mr->sgt.nents) {
+ ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir);
+ mr->sgt.nents = 0;
+ }
+
+ if (mr->state == SMBDIRECT_MR_INVALIDATED) {
+ mr->state = SMBDIRECT_MR_READY;
if (atomic_inc_return(&sc->mr_io.ready.count) == 1)
wake_up(&sc->mr_io.ready.wait_queue);
} else
@@ -2663,7 +2762,23 @@ done:
if (atomic_dec_and_test(&sc->mr_io.used.count))
wake_up(&sc->mr_io.cleanup.wait_queue);
- return rc;
+put_kref:
+ /*
+ * No kref_put_mutex() as it's already locked.
+ *
+ * If smbd_mr_free_locked() is called
+ * and the mutex is unlocked and mr is gone,
+ * in that case kref_put() returned 1.
+ *
+ * If kref_put() returned 0 we know that
+ * smbd_mr_free_locked() didn't
+ * run. Not by us nor by anyone else, as we
+ * still hold the mutex, so we need to unlock
+ * and keep the mr in SMBDIRECT_MR_READY or
+ * SMBDIRECT_MR_ERROR state.
+ */
+ if (!kref_put(&mr->kref, smbd_mr_free_locked))
+ mutex_unlock(&mr->mutex);
}
static bool smb_set_sge(struct smb_extract_to_rdma *rdma,
diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
index d67ac5ddaff4..577d37dbeb8a 100644
--- a/fs/smb/client/smbdirect.h
+++ b/fs/smb/client/smbdirect.h
@@ -60,7 +60,7 @@ int smbd_send(struct TCP_Server_Info *server,
struct smbdirect_mr_io *smbd_register_mr(
struct smbd_connection *info, struct iov_iter *iter,
bool writing, bool need_invalidate);
-int smbd_deregister_mr(struct smbdirect_mr_io *mr);
+void smbd_deregister_mr(struct smbdirect_mr_io *mr);
#else
#define cifs_rdma_enabled(server) 0
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index fd650e2afc76..28e00c34df1c 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -266,7 +266,7 @@ DEFINE_EVENT(smb3_copy_range_err_class, smb3_##name, \
TP_ARGS(xid, src_fid, target_fid, tid, sesid, src_offset, target_offset, len, rc))
DEFINE_SMB3_COPY_RANGE_ERR_EVENT(clone_err);
-/* TODO: Add SMB3_COPY_RANGE_ERR_EVENT(copychunk_err) */
+DEFINE_SMB3_COPY_RANGE_ERR_EVENT(copychunk_err);
DECLARE_EVENT_CLASS(smb3_copy_range_done_class,
TP_PROTO(unsigned int xid,
diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
index b88fa04f5792..029910d56c22 100644
--- a/fs/smb/client/xattr.c
+++ b/fs/smb/client/xattr.c
@@ -178,7 +178,6 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
memcpy(pacl, value, size);
if (pTcon->ses->server->ops->set_acl) {
int aclflags = 0;
- rc = 0;
switch (handler->flags) {
case XATTR_CIFS_NTSD_FULL:
diff --git a/fs/smb/common/cifsglob.h b/fs/smb/common/cifsglob.h
new file mode 100644
index 000000000000..00fd215e3eb5
--- /dev/null
+++ b/fs/smb/common/cifsglob.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
+/*
+ *
+ * Copyright (C) International Business Machines Corp., 2002,2008
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Jeremy Allison (jra@samba.org)
+ *
+ */
+#ifndef _COMMON_CIFS_GLOB_H
+#define _COMMON_CIFS_GLOB_H
+
+static inline void inc_rfc1001_len(void *buf, int count)
+{
+ be32_add_cpu((__be32 *)buf, count);
+}
+
+#define SMB1_VERSION_STRING "1.0"
+#define SMB20_VERSION_STRING "2.0"
+#define SMB21_VERSION_STRING "2.1"
+#define SMBDEFAULT_VERSION_STRING "default"
+#define SMB3ANY_VERSION_STRING "3"
+#define SMB30_VERSION_STRING "3.0"
+#define SMB302_VERSION_STRING "3.02"
+#define ALT_SMB302_VERSION_STRING "3.0.2"
+#define SMB311_VERSION_STRING "3.1.1"
+#define ALT_SMB311_VERSION_STRING "3.11"
+
+#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
+
+#endif /* _COMMON_CIFS_GLOB_H */
diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
index db22a1d0546b..361db7f9f623 100644
--- a/fs/smb/common/smbdirect/smbdirect_socket.h
+++ b/fs/smb/common/smbdirect/smbdirect_socket.h
@@ -437,13 +437,22 @@ enum smbdirect_mr_state {
SMBDIRECT_MR_READY,
SMBDIRECT_MR_REGISTERED,
SMBDIRECT_MR_INVALIDATED,
- SMBDIRECT_MR_ERROR
+ SMBDIRECT_MR_ERROR,
+ SMBDIRECT_MR_DISABLED
};
struct smbdirect_mr_io {
struct smbdirect_socket *socket;
struct ib_cqe cqe;
+ /*
+ * We can have up to two references:
+ * 1. by the connection
+ * 2. by the registration
+ */
+ struct kref kref;
+ struct mutex mutex;
+
struct list_head list;
enum smbdirect_mr_state state;
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
index 6fa025374f2f..1c181ef99929 100644
--- a/fs/smb/server/mgmt/user_session.c
+++ b/fs/smb/server/mgmt/user_session.c
@@ -147,14 +147,11 @@ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id)
int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
{
struct ksmbd_session_rpc *entry;
- int method;
- down_read(&sess->rpc_lock);
+ lockdep_assert_held(&sess->rpc_lock);
entry = xa_load(&sess->rpc_handle_list, id);
- method = entry ? entry->method : 0;
- up_read(&sess->rpc_lock);
- return method;
+ return entry ? entry->method : 0;
}
void ksmbd_session_destroy(struct ksmbd_session *sess)
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index ab1d45fcebde..f901ae18e68a 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1806,6 +1806,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
if (ksmbd_conn_need_reconnect(conn)) {
rc = -EFAULT;
+ ksmbd_user_session_put(sess);
sess = NULL;
goto out_err;
}
@@ -4625,8 +4626,15 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
* pipe without opening it, checking error condition here
*/
id = req->VolatileFileId;
- if (!ksmbd_session_rpc_method(sess, id))
+
+ lockdep_assert_not_held(&sess->rpc_lock);
+
+ down_read(&sess->rpc_lock);
+ if (!ksmbd_session_rpc_method(sess, id)) {
+ up_read(&sess->rpc_lock);
return -ENOENT;
+ }
+ up_read(&sess->rpc_lock);
ksmbd_debug(SMB, "FileInfoClass %u, FileId 0x%llx\n",
req->FileInfoClass, req->VolatileFileId);
@@ -6824,6 +6832,7 @@ int smb2_read(struct ksmbd_work *work)
nbytes = ksmbd_vfs_read(work, fp, length, &offset, aux_payload_buf);
if (nbytes < 0) {
+ kvfree(aux_payload_buf);
err = nbytes;
goto out;
}
diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
index d742ba754348..863716207a0d 100644
--- a/fs/smb/server/smb_common.h
+++ b/fs/smb/server/smb_common.h
@@ -10,6 +10,7 @@
#include "glob.h"
#include "nterr.h"
+#include "../common/cifsglob.h"
#include "../common/smb2pdu.h"
#include "smb2pdu.h"
@@ -26,16 +27,8 @@
#define SMB311_PROT 6
#define BAD_PROT 0xFFFF
-#define SMB1_VERSION_STRING "1.0"
-#define SMB20_VERSION_STRING "2.0"
-#define SMB21_VERSION_STRING "2.1"
-#define SMB30_VERSION_STRING "3.0"
-#define SMB302_VERSION_STRING "3.02"
-#define SMB311_VERSION_STRING "3.1.1"
-
#define SMB_ECHO_INTERVAL (60 * HZ)
-#define CIFS_DEFAULT_IOSIZE (64 * 1024)
#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
#define MAX_STREAM_PROT_LEN 0x00FFFFFF
@@ -464,9 +457,4 @@ static inline unsigned int get_rfc1002_len(void *buf)
{
return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
}
-
-static inline void inc_rfc1001_len(void *buf, int count)
-{
- be32_add_cpu((__be32 *)buf, count);
-}
#endif /* __SMB_COMMON_H__ */
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index 2aa1b29bea08..46f87fd1ce1c 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -825,6 +825,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle
if (!msg)
return NULL;
+ lockdep_assert_not_held(&sess->rpc_lock);
+
+ down_read(&sess->rpc_lock);
msg->type = KSMBD_EVENT_RPC_REQUEST;
req = (struct ksmbd_rpc_command *)msg->payload;
req->handle = handle;
@@ -833,6 +836,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle
req->flags |= KSMBD_RPC_WRITE_METHOD;
req->payload_sz = payload_sz;
memcpy(req->payload, payload, payload_sz);
+ up_read(&sess->rpc_lock);
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_free(msg);
@@ -849,6 +853,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle)
if (!msg)
return NULL;
+ lockdep_assert_not_held(&sess->rpc_lock);
+
+ down_read(&sess->rpc_lock);
msg->type = KSMBD_EVENT_RPC_REQUEST;
req = (struct ksmbd_rpc_command *)msg->payload;
req->handle = handle;
@@ -856,6 +863,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle)
req->flags |= rpc_context_flags(sess);
req->flags |= KSMBD_RPC_READ_METHOD;
req->payload_sz = 0;
+ up_read(&sess->rpc_lock);
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_free(msg);
@@ -876,6 +884,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle
if (!msg)
return NULL;
+ lockdep_assert_not_held(&sess->rpc_lock);
+
+ down_read(&sess->rpc_lock);
msg->type = KSMBD_EVENT_RPC_REQUEST;
req = (struct ksmbd_rpc_command *)msg->payload;
req->handle = handle;
@@ -884,6 +895,7 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle
req->flags |= KSMBD_RPC_IOCTL_METHOD;
req->payload_sz = payload_sz;
memcpy(req->payload, payload, payload_sz);
+ up_read(&sess->rpc_lock);
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_free(msg);
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index b3077766d6ec..a201c5871a77 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -1574,18 +1574,14 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
get_buf_page_count(desc_buf, desc_buf_len),
msg->sg_list, SG_CHUNK_SIZE);
if (ret) {
- kfree(msg);
ret = -ENOMEM;
- goto out;
+ goto free_msg;
}
ret = get_sg_list(desc_buf, desc_buf_len,
msg->sgt.sgl, msg->sgt.orig_nents);
- if (ret < 0) {
- sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
- kfree(msg);
- goto out;
- }
+ if (ret < 0)
+ goto free_table;
ret = rdma_rw_ctx_init(&msg->rdma_ctx, sc->ib.qp, sc->ib.qp->port,
msg->sgt.sgl,
@@ -1596,9 +1592,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
if (ret < 0) {
pr_err("failed to init rdma_rw_ctx: %d\n", ret);
- sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
- kfree(msg);
- goto out;
+ goto free_table;
}
list_add_tail(&msg->list, &msg_list);
@@ -1630,6 +1624,12 @@ out:
atomic_add(credits_needed, &sc->rw_io.credits.count);
wake_up(&sc->rw_io.credits.wait_queue);
return ret;
+
+free_table:
+ sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE);
+free_msg:
+ kfree(msg);
+ goto out;
}
static int smb_direct_rdma_write(struct ksmbd_transport *t,