summaryrefslogtreecommitdiff
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/backchannel_rqst.c35
-rw-r--r--net/sunrpc/rpc_pipe.c27
-rw-r--r--net/sunrpc/svcsock.c62
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c8
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c19
5 files changed, 106 insertions, 45 deletions
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index caa94cf57123..68b1fcdea8f0 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -25,6 +25,22 @@ unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt)
}
/*
+ * Helper function to nullify backchannel server pointer in transport.
+ * We need to synchronize setting the pointer to NULL (done so after
+ * the backchannel server is shutdown) with the usage of that pointer
+ * by the backchannel request processing routines
+ * xprt_complete_bc_request() and rpcrdma_bc_receive_call().
+ */
+void xprt_svc_destroy_nullify_bc(struct rpc_xprt *xprt, struct svc_serv **serv)
+{
+ spin_lock(&xprt->bc_pa_lock);
+ svc_destroy(serv);
+ xprt->bc_serv = NULL;
+ spin_unlock(&xprt->bc_pa_lock);
+}
+EXPORT_SYMBOL_GPL(xprt_svc_destroy_nullify_bc);
+
+/*
* Helper routines that track the number of preallocation elements
* on the transport.
*/
@@ -354,7 +370,6 @@ found:
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
{
struct rpc_xprt *xprt = req->rq_xprt;
- struct svc_serv *bc_serv = xprt->bc_serv;
spin_lock(&xprt->bc_pa_lock);
list_del(&req->rq_bc_pa_list);
@@ -365,7 +380,21 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
dprintk("RPC: add callback request to list\n");
+ xprt_enqueue_bc_request(req);
+}
+
+void xprt_enqueue_bc_request(struct rpc_rqst *req)
+{
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct svc_serv *bc_serv;
+
xprt_get(xprt);
- lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
- svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
+ spin_lock(&xprt->bc_pa_lock);
+ bc_serv = xprt->bc_serv;
+ if (bc_serv) {
+ lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
+ }
+ spin_unlock(&xprt->bc_pa_lock);
}
+EXPORT_SYMBOL_GPL(xprt_enqueue_bc_request);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0bd1df2ebb47..379daefc4847 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -536,17 +536,16 @@ static int rpc_new_file(struct dentry *parent,
inode = rpc_get_inode(dir->i_sb, S_IFREG | mode);
if (unlikely(!inode)) {
- dput(dentry);
- inode_unlock(dir);
+ simple_done_creating(dentry);
return -ENOMEM;
}
inode->i_ino = iunique(dir->i_sb, 100);
if (i_fop)
inode->i_fop = i_fop;
rpc_inode_setowner(inode, private);
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
fsnotify_create(dir, dentry);
- inode_unlock(dir);
+ simple_done_creating(dentry);
return 0;
}
@@ -563,18 +562,17 @@ static struct dentry *rpc_new_dir(struct dentry *parent,
inode = rpc_get_inode(dir->i_sb, S_IFDIR | mode);
if (unlikely(!inode)) {
- dput(dentry);
- inode_unlock(dir);
+ simple_done_creating(dentry);
return ERR_PTR(-ENOMEM);
}
inode->i_ino = iunique(dir->i_sb, 100);
inc_nlink(dir);
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
fsnotify_mkdir(dir, dentry);
- inode_unlock(dir);
+ simple_done_creating(dentry);
- return dentry;
+ return dentry; // borrowed
}
static int rpc_populate(struct dentry *parent,
@@ -657,8 +655,7 @@ int rpc_mkpipe_dentry(struct dentry *parent, const char *name,
inode = rpc_get_inode(dir->i_sb, umode);
if (unlikely(!inode)) {
- dput(dentry);
- inode_unlock(dir);
+ simple_done_creating(dentry);
err = -ENOMEM;
goto failed;
}
@@ -668,10 +665,10 @@ int rpc_mkpipe_dentry(struct dentry *parent, const char *name,
rpci->private = private;
rpci->pipe = pipe;
rpc_inode_setowner(inode, private);
- d_instantiate(dentry, inode);
- pipe->dentry = dentry;
+ pipe->dentry = dentry; // borrowed
+ d_make_persistent(dentry, inode);
fsnotify_create(dir, dentry);
- inode_unlock(dir);
+ simple_done_creating(dentry);
return 0;
failed:
@@ -1206,7 +1203,7 @@ static void rpc_kill_sb(struct super_block *sb)
sb);
mutex_unlock(&sn->pipefs_sb_lock);
out:
- kill_litter_super(sb);
+ kill_anon_super(sb);
put_net(net);
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 16ff6c100821..d61cd9b40491 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -68,6 +68,17 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
+/*
+ * For UDP:
+ * 1 for header page
+ * enough pages for RPCSVC_MAXPAYLOAD_UDP
+ * 1 in case payload is not aligned
+ * 1 for tail page
+ */
+enum {
+ SUNRPC_MAX_UDP_SENDPAGES = 1 + RPCSVC_MAXPAYLOAD_UDP / PAGE_SIZE + 1 + 1
+};
+
/* To-do: to avoid tying up an nfsd thread while waiting for a
* handshake request, the request could instead be deferred.
*/
@@ -740,14 +751,14 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
if (svc_xprt_is_dead(xprt))
goto out_notconn;
- count = xdr_buf_to_bvec(rqstp->rq_bvec, rqstp->rq_maxpages, xdr);
+ count = xdr_buf_to_bvec(svsk->sk_bvec, SUNRPC_MAX_UDP_SENDPAGES, xdr);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, svsk->sk_bvec,
count, rqstp->rq_res.len);
err = sock_sendmsg(svsk->sk_sock, &msg);
if (err == -ECONNREFUSED) {
/* ICMP error on earlier request. */
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, svsk->sk_bvec,
count, rqstp->rq_res.len);
err = sock_sendmsg(svsk->sk_sock, &msg);
}
@@ -1062,9 +1073,10 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
return svc_sock_reclen(svsk);
err_too_large:
- net_notice_ratelimited("svc: %s %s RPC fragment too large: %d\n",
- __func__, svsk->sk_xprt.xpt_server->sv_name,
- svc_sock_reclen(svsk));
+ net_notice_ratelimited("svc: %s oversized RPC fragment (%u octets) from %pISpc\n",
+ svsk->sk_xprt.xpt_server->sv_name,
+ svc_sock_reclen(svsk),
+ (struct sockaddr *)&svsk->sk_xprt.xpt_remote);
svc_xprt_deferred_close(&svsk->sk_xprt);
err_short:
return -EAGAIN;
@@ -1235,19 +1247,19 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
int ret;
/* The stream record marker is copied into a temporary page
- * fragment buffer so that it can be included in rq_bvec.
+ * fragment buffer so that it can be included in sk_bvec.
*/
buf = page_frag_alloc(&svsk->sk_frag_cache, sizeof(marker),
GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, &marker, sizeof(marker));
- bvec_set_virt(rqstp->rq_bvec, buf, sizeof(marker));
+ bvec_set_virt(svsk->sk_bvec, buf, sizeof(marker));
- count = xdr_buf_to_bvec(rqstp->rq_bvec + 1, rqstp->rq_maxpages,
+ count = xdr_buf_to_bvec(svsk->sk_bvec + 1, rqstp->rq_maxpages,
&rqstp->rq_res);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, svsk->sk_bvec,
1 + count, sizeof(marker) + rqstp->rq_res.len);
ret = sock_sendmsg(svsk->sk_sock, &msg);
page_frag_free(buf);
@@ -1392,6 +1404,20 @@ void svc_sock_update_bufs(struct svc_serv *serv)
spin_unlock_bh(&serv->sv_lock);
}
+static int svc_sock_sendpages(struct svc_serv *serv, struct socket *sock, int flags)
+{
+ switch (sock->type) {
+ case SOCK_STREAM:
+ /* +1 for TCP record marker */
+ if (flags & SVC_SOCK_TEMPORARY)
+ return svc_serv_maxpages(serv) + 1;
+ return 0;
+ case SOCK_DGRAM:
+ return SUNRPC_MAX_UDP_SENDPAGES;
+ }
+ return -EINVAL;
+}
+
/*
* Initialize socket for RPC use and create svc_sock struct
*/
@@ -1402,12 +1428,26 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
struct svc_sock *svsk;
struct sock *inet;
int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
+ int sendpages;
unsigned long pages;
+ sendpages = svc_sock_sendpages(serv, sock, flags);
+ if (sendpages < 0)
+ return ERR_PTR(sendpages);
+
pages = svc_serv_maxpages(serv);
svsk = kzalloc(struct_size(svsk, sk_pages, pages), GFP_KERNEL);
if (!svsk)
return ERR_PTR(-ENOMEM);
+
+ if (sendpages) {
+ svsk->sk_bvec = kcalloc(sendpages, sizeof(*svsk->sk_bvec), GFP_KERNEL);
+ if (!svsk->sk_bvec) {
+ kfree(svsk);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
svsk->sk_maxpages = pages;
inet = sock->sk;
@@ -1419,6 +1459,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
inet->sk_protocol,
ntohs(inet_sk(inet)->inet_sport));
if (err < 0) {
+ kfree(svsk->sk_bvec);
kfree(svsk);
return ERR_PTR(err);
}
@@ -1636,5 +1677,6 @@ static void svc_sock_free(struct svc_xprt *xprt)
sock_release(sock);
page_frag_cache_drain(&svsk->sk_frag_cache);
+ kfree(svsk->sk_bvec);
kfree(svsk);
}
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 8c817e755262..2f0f9618dd05 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -9,6 +9,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/svc_rdma.h>
+#include <linux/sunrpc/bc_xprt.h>
#include "xprt_rdma.h"
#include <trace/events/rpcrdma.h>
@@ -220,7 +221,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_rep *rep)
{
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
- struct svc_serv *bc_serv;
struct rpcrdma_req *req;
struct rpc_rqst *rqst;
struct xdr_buf *buf;
@@ -261,11 +261,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
trace_xprtrdma_cb_call(r_xprt, rqst);
/* Queue rqst for ULP's callback service */
- bc_serv = xprt->bc_serv;
- xprt_get(xprt);
- lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
-
- svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
+ xprt_enqueue_bc_request(rqst);
r_xprt->rx_stats.bcall_count++;
return;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 3d7f1413df02..b7b318ad25c4 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -591,12 +591,18 @@ static void svc_rdma_detach(struct svc_xprt *xprt)
rdma_disconnect(rdma->sc_cm_id);
}
-static void __svc_rdma_free(struct work_struct *work)
+/**
+ * svc_rdma_free - Release class-specific transport resources
+ * @xprt: Generic svc transport object
+ */
+static void svc_rdma_free(struct svc_xprt *xprt)
{
struct svcxprt_rdma *rdma =
- container_of(work, struct svcxprt_rdma, sc_work);
+ container_of(xprt, struct svcxprt_rdma, sc_xprt);
struct ib_device *device = rdma->sc_cm_id->device;
+ might_sleep();
+
/* This blocks until the Completion Queues are empty */
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_drain_qp(rdma->sc_qp);
@@ -629,15 +635,6 @@ static void __svc_rdma_free(struct work_struct *work)
kfree(rdma);
}
-static void svc_rdma_free(struct svc_xprt *xprt)
-{
- struct svcxprt_rdma *rdma =
- container_of(xprt, struct svcxprt_rdma, sc_xprt);
-
- INIT_WORK(&rdma->sc_work, __svc_rdma_free);
- schedule_work(&rdma->sc_work);
-}
-
static int svc_rdma_has_wspace(struct svc_xprt *xprt)
{
struct svcxprt_rdma *rdma =