summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_fd.c17
-rw-r--r--net/9p/trans_usbg.c16
-rw-r--r--net/bridge/br_vlan.c2
-rw-r--r--net/can/j1939/main.c2
-rw-r--r--net/ceph/Kconfig3
-rw-r--r--net/ceph/messenger.c12
-rw-r--r--net/ceph/messenger_v1.c56
-rw-r--r--net/ceph/messenger_v2.c246
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/gro_cells.c10
-rw-r--r--net/core/page_pool.c76
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_tunnel.c14
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_input.c1
-rw-r--r--net/ipv4/tcp_output.c19
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/netfilter/nft_objref.c39
-rw-r--r--net/psp/psp_sock.c4
-rw-r--r--net/sctp/sm_statefuns.c3
-rw-r--r--net/sunrpc/Kconfig3
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/svc.c17
-rw-r--r--net/sunrpc/svc_xprt.c13
-rw-r--r--net/sunrpc/svcsock.c25
-rw-r--r--net/sunrpc/sysfs.c2
-rw-r--r--net/tipc/crypto.c2
-rw-r--r--net/tipc/topsrv.c4
-rw-r--r--net/tls/tls_main.c7
-rw-r--r--net/tls/tls_sw.c31
-rw-r--r--net/vmw_vsock/Kconfig2
-rw-r--r--net/xdp/xsk_queue.h45
36 files changed, 439 insertions, 291 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 339ec4e54778..a516745f732f 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -666,7 +666,6 @@ static void p9_poll_mux(struct p9_conn *m)
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
{
- __poll_t n;
int err;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
@@ -686,13 +685,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
list_add_tail(&req->req_list, &m->unsent_req_list);
spin_unlock(&m->req_lock);
- if (test_and_clear_bit(Wpending, &m->wsched))
- n = EPOLLOUT;
- else
- n = p9_fd_poll(m->client, NULL, NULL);
-
- if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
- schedule_work(&m->wq);
+ p9_poll_mux(m);
return 0;
}
@@ -726,10 +719,10 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
spin_lock(&m->req_lock);
- /* Ignore cancelled request if message has been received
- * before lock.
- */
- if (req->status == REQ_STATUS_RCVD) {
+ /* Ignore cancelled request if status changed since the request was
+ * processed in p9_client_flush()
+ */
+ if (req->status != REQ_STATUS_SENT) {
spin_unlock(&m->req_lock);
return 0;
}
diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c
index 6b694f117aef..468f7e8f0277 100644
--- a/net/9p/trans_usbg.c
+++ b/net/9p/trans_usbg.c
@@ -231,6 +231,8 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
struct f_usb9pfs *usb9pfs = ep->driver_data;
struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
struct p9_req_t *p9_rx_req;
+ unsigned int req_size = req->actual;
+ int status = REQ_STATUS_RCVD;
if (req->status) {
dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
@@ -242,11 +244,19 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
if (!p9_rx_req)
return;
- memcpy(p9_rx_req->rc.sdata, req->buf, req->actual);
+ if (req_size > p9_rx_req->rc.capacity) {
+ dev_err(&cdev->gadget->dev,
+ "%s received data size %u exceeds buffer capacity %zu\n",
+ ep->name, req_size, p9_rx_req->rc.capacity);
+ req_size = 0;
+ status = REQ_STATUS_ERROR;
+ }
+
+ memcpy(p9_rx_req->rc.sdata, req->buf, req_size);
- p9_rx_req->rc.size = req->actual;
+ p9_rx_req->rc.size = req_size;
- p9_client_cb(usb9pfs->client, p9_rx_req, REQ_STATUS_RCVD);
+ p9_client_cb(usb9pfs->client, p9_rx_req, status);
p9_req_put(usb9pfs->client, p9_rx_req);
complete(&usb9pfs->received);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index ae911220cb3c..ce72b837ff8e 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1457,7 +1457,7 @@ void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
if (!br_opt_get(br, BROPT_VLAN_ENABLED))
return;
- vg = br_vlan_group(br);
+ vg = br_vlan_group_rcu(br);
if (idx >= 0 &&
ctx->vlan[idx].proto == br->vlan_proto) {
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index 3706a872ecaf..a93af55df5fd 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -378,6 +378,8 @@ static int j1939_netdev_notify(struct notifier_block *nb,
j1939_ecu_unmap_all(priv);
break;
case NETDEV_UNREGISTER:
+ j1939_cancel_active_session(priv, NULL);
+ j1939_sk_netdev_event_netdown(priv);
j1939_sk_netdev_event_unregister(priv);
break;
}
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index 0aa21fcbf6ec..ea60e3ef0834 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -6,8 +6,7 @@ config CEPH_LIB
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_GCM
- select CRYPTO_HMAC
- select CRYPTO_SHA256
+ select CRYPTO_LIB_SHA256
select CRYPTO
select KEYS
default n
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1fbec4853f00..f8181acaf870 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1794,9 +1794,9 @@ void ceph_msg_revoke(struct ceph_msg *msg)
WARN_ON(con->state != CEPH_CON_S_OPEN);
dout("%s con %p msg %p was sending\n", __func__, con, msg);
if (ceph_msgr2(from_msgr(con->msgr)))
- ceph_con_v2_revoke(con);
+ ceph_con_v2_revoke(con, msg);
else
- ceph_con_v1_revoke(con);
+ ceph_con_v1_revoke(con, msg);
ceph_msg_put(con->out_msg);
con->out_msg = NULL;
} else {
@@ -2111,11 +2111,13 @@ int ceph_con_in_msg_alloc(struct ceph_connection *con,
return ret;
}
-void ceph_con_get_out_msg(struct ceph_connection *con)
+struct ceph_msg *ceph_con_get_out_msg(struct ceph_connection *con)
{
struct ceph_msg *msg;
- BUG_ON(list_empty(&con->out_queue));
+ if (list_empty(&con->out_queue))
+ return NULL;
+
msg = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
WARN_ON(msg->con != con);
@@ -2142,7 +2144,7 @@ void ceph_con_get_out_msg(struct ceph_connection *con)
* message or in case of a fault.
*/
WARN_ON(con->out_msg);
- con->out_msg = ceph_msg_get(msg);
+ return con->out_msg = ceph_msg_get(msg);
}
/*
diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index 0cb61c76b9b8..c9e002d96319 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -169,10 +169,9 @@ static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
* Prepare footer for currently outgoing message, and finish things
* off. Assumes out_kvec* are already valid.. we just add on to the end.
*/
-static void prepare_write_message_footer(struct ceph_connection *con)
+static void prepare_write_message_footer(struct ceph_connection *con,
+ struct ceph_msg *m)
{
- struct ceph_msg *m = con->out_msg;
-
m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
dout("prepare_write_message_footer %p\n", con);
@@ -192,9 +191,9 @@ static void prepare_write_message_footer(struct ceph_connection *con)
/*
* Prepare headers for the next outgoing message.
*/
-static void prepare_write_message(struct ceph_connection *con)
+static void prepare_write_message(struct ceph_connection *con,
+ struct ceph_msg *m)
{
- struct ceph_msg *m;
u32 crc;
con_out_kvec_reset(con);
@@ -210,9 +209,6 @@ static void prepare_write_message(struct ceph_connection *con)
&con->v1.out_temp_ack);
}
- ceph_con_get_out_msg(con);
- m = con->out_msg;
-
dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
m, con->out_seq, le16_to_cpu(m->hdr.type),
le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
@@ -231,31 +227,31 @@ static void prepare_write_message(struct ceph_connection *con)
/* fill in hdr crc and finalize hdr */
crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
- con->out_msg->hdr.crc = cpu_to_le32(crc);
- memcpy(&con->v1.out_hdr, &con->out_msg->hdr, sizeof(con->v1.out_hdr));
+ m->hdr.crc = cpu_to_le32(crc);
+ memcpy(&con->v1.out_hdr, &m->hdr, sizeof(con->v1.out_hdr));
/* fill in front and middle crc, footer */
crc = crc32c(0, m->front.iov_base, m->front.iov_len);
- con->out_msg->footer.front_crc = cpu_to_le32(crc);
+ m->footer.front_crc = cpu_to_le32(crc);
if (m->middle) {
crc = crc32c(0, m->middle->vec.iov_base,
m->middle->vec.iov_len);
- con->out_msg->footer.middle_crc = cpu_to_le32(crc);
+ m->footer.middle_crc = cpu_to_le32(crc);
} else
- con->out_msg->footer.middle_crc = 0;
+ m->footer.middle_crc = 0;
dout("%s front_crc %u middle_crc %u\n", __func__,
- le32_to_cpu(con->out_msg->footer.front_crc),
- le32_to_cpu(con->out_msg->footer.middle_crc));
- con->out_msg->footer.flags = 0;
+ le32_to_cpu(m->footer.front_crc),
+ le32_to_cpu(m->footer.middle_crc));
+ m->footer.flags = 0;
/* is there a data payload? */
- con->out_msg->footer.data_crc = 0;
+ m->footer.data_crc = 0;
if (m->data_length) {
- prepare_message_data(con->out_msg, m->data_length);
+ prepare_message_data(m, m->data_length);
con->v1.out_more = 1; /* data + footer will follow */
} else {
/* no, queue up footer too and be done */
- prepare_write_message_footer(con);
+ prepare_write_message_footer(con, m);
}
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
@@ -462,9 +458,9 @@ out:
* 0 -> socket full, but more to do
* <0 -> error
*/
-static int write_partial_message_data(struct ceph_connection *con)
+static int write_partial_message_data(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
- struct ceph_msg *msg = con->out_msg;
struct ceph_msg_data_cursor *cursor = &msg->cursor;
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
u32 crc;
@@ -516,7 +512,7 @@ static int write_partial_message_data(struct ceph_connection *con)
else
msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
con_out_kvec_reset(con);
- prepare_write_message_footer(con);
+ prepare_write_message_footer(con, msg);
return 1; /* must return > 0 to indicate success */
}
@@ -1472,6 +1468,7 @@ bad_tag:
*/
int ceph_con_v1_try_write(struct ceph_connection *con)
{
+ struct ceph_msg *msg;
int ret = 1;
dout("try_write start %p state %d\n", con, con->state);
@@ -1518,14 +1515,15 @@ more:
}
/* msg pages? */
- if (con->out_msg) {
+ msg = con->out_msg;
+ if (msg) {
if (con->v1.out_msg_done) {
- ceph_msg_put(con->out_msg);
+ ceph_msg_put(msg);
con->out_msg = NULL; /* we're done with this one */
goto do_next;
}
- ret = write_partial_message_data(con);
+ ret = write_partial_message_data(con, msg);
if (ret == 1)
goto more; /* we need to send the footer, too! */
if (ret == 0)
@@ -1545,8 +1543,8 @@ do_next:
goto more;
}
/* is anything else pending? */
- if (!list_empty(&con->out_queue)) {
- prepare_write_message(con);
+ if ((msg = ceph_con_get_out_msg(con)) != NULL) {
+ prepare_write_message(con, msg);
goto more;
}
if (con->in_seq > con->in_seq_acked) {
@@ -1564,10 +1562,8 @@ out:
return ret;
}
-void ceph_con_v1_revoke(struct ceph_connection *con)
+void ceph_con_v1_revoke(struct ceph_connection *con, struct ceph_msg *msg)
{
- struct ceph_msg *msg = con->out_msg;
-
WARN_ON(con->v1.out_skip);
/* footer */
if (con->v1.out_msg_done) {
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index 5483b4eed94e..9e39378eda00 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -709,7 +709,7 @@ static int setup_crypto(struct ceph_connection *con,
dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
__func__, con, con->v2.con_mode, session_key_len, con_secret_len);
- WARN_ON(con->v2.hmac_tfm || con->v2.gcm_tfm || con->v2.gcm_req);
+ WARN_ON(con->v2.hmac_key_set || con->v2.gcm_tfm || con->v2.gcm_req);
if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
con->v2.con_mode != CEPH_CON_MODE_SECURE) {
@@ -723,22 +723,8 @@ static int setup_crypto(struct ceph_connection *con,
return 0; /* auth_none */
}
- noio_flag = memalloc_noio_save();
- con->v2.hmac_tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
- memalloc_noio_restore(noio_flag);
- if (IS_ERR(con->v2.hmac_tfm)) {
- ret = PTR_ERR(con->v2.hmac_tfm);
- con->v2.hmac_tfm = NULL;
- pr_err("failed to allocate hmac tfm context: %d\n", ret);
- return ret;
- }
-
- ret = crypto_shash_setkey(con->v2.hmac_tfm, session_key,
- session_key_len);
- if (ret) {
- pr_err("failed to set hmac key: %d\n", ret);
- return ret;
- }
+ hmac_sha256_preparekey(&con->v2.hmac_key, session_key, session_key_len);
+ con->v2.hmac_key_set = true;
if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
WARN_ON(con_secret_len);
@@ -793,38 +779,26 @@ static int setup_crypto(struct ceph_connection *con,
return 0; /* auth_x, secure mode */
}
-static int ceph_hmac_sha256(struct ceph_connection *con,
- const struct kvec *kvecs, int kvec_cnt, u8 *hmac)
+static void ceph_hmac_sha256(struct ceph_connection *con,
+ const struct kvec *kvecs, int kvec_cnt,
+ u8 hmac[SHA256_DIGEST_SIZE])
{
- SHASH_DESC_ON_STACK(desc, con->v2.hmac_tfm); /* tfm arg is ignored */
- int ret;
+ struct hmac_sha256_ctx ctx;
int i;
- dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__, con,
- con->v2.hmac_tfm, kvec_cnt);
+ dout("%s con %p hmac_key_set %d kvec_cnt %d\n", __func__, con,
+ con->v2.hmac_key_set, kvec_cnt);
- if (!con->v2.hmac_tfm) {
+ if (!con->v2.hmac_key_set) {
memset(hmac, 0, SHA256_DIGEST_SIZE);
- return 0; /* auth_none */
+ return; /* auth_none */
}
- desc->tfm = con->v2.hmac_tfm;
- ret = crypto_shash_init(desc);
- if (ret)
- goto out;
-
- for (i = 0; i < kvec_cnt; i++) {
- ret = crypto_shash_update(desc, kvecs[i].iov_base,
- kvecs[i].iov_len);
- if (ret)
- goto out;
- }
-
- ret = crypto_shash_final(desc, hmac);
-
-out:
- shash_desc_zero(desc);
- return ret; /* auth_x, both plain and secure modes */
+ /* auth_x, both plain and secure modes */
+ hmac_sha256_init(&ctx, &con->v2.hmac_key);
+ for (i = 0; i < kvec_cnt; i++)
+ hmac_sha256_update(&ctx, kvecs[i].iov_base, kvecs[i].iov_len);
+ hmac_sha256_final(&ctx, hmac);
}
static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
@@ -1455,17 +1429,14 @@ static int prepare_auth_request_more(struct ceph_connection *con,
static int prepare_auth_signature(struct ceph_connection *con)
{
void *buf;
- int ret;
buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
con_secure(con)));
if (!buf)
return -ENOMEM;
- ret = ceph_hmac_sha256(con, con->v2.in_sign_kvecs,
- con->v2.in_sign_kvec_cnt, CTRL_BODY(buf));
- if (ret)
- return ret;
+ ceph_hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
+ CTRL_BODY(buf));
return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
SHA256_DIGEST_SIZE);
@@ -1589,10 +1560,11 @@ static int prepare_ack(struct ceph_connection *con)
return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
}
-static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
+static void prepare_epilogue_plain(struct ceph_connection *con,
+ struct ceph_msg *msg, bool aborted)
{
dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
- con->out_msg, aborted, con->v2.out_epil.front_crc,
+ msg, aborted, con->v2.out_epil.front_crc,
con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
encode_epilogue_plain(con, aborted);
@@ -1603,10 +1575,9 @@ static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
* For "used" empty segments, crc is -1. For unused (trailing)
* segments, crc is 0.
*/
-static void prepare_message_plain(struct ceph_connection *con)
+static void prepare_message_plain(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
- struct ceph_msg *msg = con->out_msg;
-
prepare_head_plain(con, con->v2.out_buf,
sizeof(struct ceph_msg_header2), NULL, 0, false);
@@ -1647,7 +1618,7 @@ static void prepare_message_plain(struct ceph_connection *con)
con->v2.out_state = OUT_S_QUEUE_DATA;
} else {
con->v2.out_epil.data_crc = 0;
- prepare_epilogue_plain(con, false);
+ prepare_epilogue_plain(con, msg, false);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
}
@@ -1659,7 +1630,8 @@ static void prepare_message_plain(struct ceph_connection *con)
* allocate pages for the entire tail of the message (currently up
* to ~32M) and two sgs arrays (up to ~256K each)...
*/
-static int prepare_message_secure(struct ceph_connection *con)
+static int prepare_message_secure(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
void *zerop = page_address(ceph_zero_page);
struct sg_table enc_sgt = {};
@@ -1674,7 +1646,7 @@ static int prepare_message_secure(struct ceph_connection *con)
if (ret)
return ret;
- tail_len = tail_onwire_len(con->out_msg, true);
+ tail_len = tail_onwire_len(msg, true);
if (!tail_len) {
/*
* Empty message: once the head is written,
@@ -1685,7 +1657,7 @@ static int prepare_message_secure(struct ceph_connection *con)
}
encode_epilogue_secure(con, false);
- ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop,
+ ret = setup_message_sgs(&sgt, msg, zerop, zerop, zerop,
&con->v2.out_epil, NULL, 0, false);
if (ret)
goto out;
@@ -1714,7 +1686,7 @@ static int prepare_message_secure(struct ceph_connection *con)
goto out;
dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
- con->out_msg, sgt.orig_nents, enc_page_cnt);
+ msg, sgt.orig_nents, enc_page_cnt);
con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
out:
@@ -1723,19 +1695,19 @@ out:
return ret;
}
-static int prepare_message(struct ceph_connection *con)
+static int prepare_message(struct ceph_connection *con, struct ceph_msg *msg)
{
int lens[] = {
sizeof(struct ceph_msg_header2),
- front_len(con->out_msg),
- middle_len(con->out_msg),
- data_len(con->out_msg)
+ front_len(msg),
+ middle_len(msg),
+ data_len(msg)
};
struct ceph_frame_desc desc;
int ret;
dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
- con->out_msg, lens[0], lens[1], lens[2], lens[3]);
+ msg, lens[0], lens[1], lens[2], lens[3]);
if (con->in_seq > con->in_seq_acked) {
dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
@@ -1746,15 +1718,15 @@ static int prepare_message(struct ceph_connection *con)
reset_out_kvecs(con);
init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
encode_preamble(&desc, con->v2.out_buf);
- fill_header2(CTRL_BODY(con->v2.out_buf), &con->out_msg->hdr,
+ fill_header2(CTRL_BODY(con->v2.out_buf), &msg->hdr,
con->in_seq_acked);
if (con_secure(con)) {
- ret = prepare_message_secure(con);
+ ret = prepare_message_secure(con, msg);
if (ret)
return ret;
} else {
- prepare_message_plain(con);
+ prepare_message_plain(con, msg);
}
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
@@ -2460,10 +2432,8 @@ static int process_auth_signature(struct ceph_connection *con,
return -EINVAL;
}
- ret = ceph_hmac_sha256(con, con->v2.out_sign_kvecs,
- con->v2.out_sign_kvec_cnt, hmac);
- if (ret)
- return ret;
+ ceph_hmac_sha256(con, con->v2.out_sign_kvecs, con->v2.out_sign_kvec_cnt,
+ hmac);
ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
@@ -3184,20 +3154,20 @@ int ceph_con_v2_try_read(struct ceph_connection *con)
}
}
-static void queue_data(struct ceph_connection *con)
+static void queue_data(struct ceph_connection *con, struct ceph_msg *msg)
{
struct bio_vec bv;
con->v2.out_epil.data_crc = -1;
- ceph_msg_data_cursor_init(&con->v2.out_cursor, con->out_msg,
- data_len(con->out_msg));
+ ceph_msg_data_cursor_init(&con->v2.out_cursor, msg,
+ data_len(msg));
get_bvec_at(&con->v2.out_cursor, &bv);
set_out_bvec(con, &bv, true);
con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
}
-static void queue_data_cont(struct ceph_connection *con)
+static void queue_data_cont(struct ceph_connection *con, struct ceph_msg *msg)
{
struct bio_vec bv;
@@ -3218,7 +3188,7 @@ static void queue_data_cont(struct ceph_connection *con)
* we are done.
*/
reset_out_kvecs(con);
- prepare_epilogue_plain(con, false);
+ prepare_epilogue_plain(con, msg, false);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
@@ -3250,7 +3220,7 @@ static void queue_enc_page(struct ceph_connection *con)
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
-static void queue_zeros(struct ceph_connection *con)
+static void queue_zeros(struct ceph_connection *con, struct ceph_msg *msg)
{
dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
@@ -3267,7 +3237,7 @@ static void queue_zeros(struct ceph_connection *con)
* Once it's written, we are done patching up for the revoke.
*/
reset_out_kvecs(con);
- prepare_epilogue_plain(con, true);
+ prepare_epilogue_plain(con, msg, true);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
@@ -3294,6 +3264,7 @@ static void finish_message(struct ceph_connection *con)
static int populate_out_iter(struct ceph_connection *con)
{
+ struct ceph_msg *msg;
int ret;
dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
@@ -3309,18 +3280,18 @@ static int populate_out_iter(struct ceph_connection *con)
switch (con->v2.out_state) {
case OUT_S_QUEUE_DATA:
WARN_ON(!con->out_msg);
- queue_data(con);
+ queue_data(con, con->out_msg);
goto populated;
case OUT_S_QUEUE_DATA_CONT:
WARN_ON(!con->out_msg);
- queue_data_cont(con);
+ queue_data_cont(con, con->out_msg);
goto populated;
case OUT_S_QUEUE_ENC_PAGE:
queue_enc_page(con);
goto populated;
case OUT_S_QUEUE_ZEROS:
WARN_ON(con->out_msg); /* revoked */
- queue_zeros(con);
+ queue_zeros(con, con->out_msg);
goto populated;
case OUT_S_FINISH_MESSAGE:
finish_message(con);
@@ -3339,9 +3310,8 @@ static int populate_out_iter(struct ceph_connection *con)
pr_err("prepare_keepalive2 failed: %d\n", ret);
return ret;
}
- } else if (!list_empty(&con->out_queue)) {
- ceph_con_get_out_msg(con);
- ret = prepare_message(con);
+ } else if ((msg = ceph_con_get_out_msg(con)) != NULL) {
+ ret = prepare_message(con, msg);
if (ret) {
pr_err("prepare_message failed: %d\n", ret);
return ret;
@@ -3453,17 +3423,18 @@ static u32 crc32c_zeros(u32 crc, int zero_len)
return crc;
}
-static void prepare_zero_front(struct ceph_connection *con, int resid)
+static void prepare_zero_front(struct ceph_connection *con,
+ struct ceph_msg *msg, int resid)
{
int sent;
- WARN_ON(!resid || resid > front_len(con->out_msg));
- sent = front_len(con->out_msg) - resid;
+ WARN_ON(!resid || resid > front_len(msg));
+ sent = front_len(msg) - resid;
dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
if (sent) {
con->v2.out_epil.front_crc =
- crc32c(-1, con->out_msg->front.iov_base, sent);
+ crc32c(-1, msg->front.iov_base, sent);
con->v2.out_epil.front_crc =
crc32c_zeros(con->v2.out_epil.front_crc, resid);
} else {
@@ -3474,17 +3445,18 @@ static void prepare_zero_front(struct ceph_connection *con, int resid)
out_zero_add(con, resid);
}
-static void prepare_zero_middle(struct ceph_connection *con, int resid)
+static void prepare_zero_middle(struct ceph_connection *con,
+ struct ceph_msg *msg, int resid)
{
int sent;
- WARN_ON(!resid || resid > middle_len(con->out_msg));
- sent = middle_len(con->out_msg) - resid;
+ WARN_ON(!resid || resid > middle_len(msg));
+ sent = middle_len(msg) - resid;
dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
if (sent) {
con->v2.out_epil.middle_crc =
- crc32c(-1, con->out_msg->middle->vec.iov_base, sent);
+ crc32c(-1, msg->middle->vec.iov_base, sent);
con->v2.out_epil.middle_crc =
crc32c_zeros(con->v2.out_epil.middle_crc, resid);
} else {
@@ -3495,61 +3467,64 @@ static void prepare_zero_middle(struct ceph_connection *con, int resid)
out_zero_add(con, resid);
}
-static void prepare_zero_data(struct ceph_connection *con)
+static void prepare_zero_data(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
dout("%s con %p\n", __func__, con);
- con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(con->out_msg));
- out_zero_add(con, data_len(con->out_msg));
+ con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(msg));
+ out_zero_add(con, data_len(msg));
}
-static void revoke_at_queue_data(struct ceph_connection *con)
+static void revoke_at_queue_data(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
int boundary;
int resid;
- WARN_ON(!data_len(con->out_msg));
+ WARN_ON(!data_len(msg));
WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
resid = iov_iter_count(&con->v2.out_iter);
- boundary = front_len(con->out_msg) + middle_len(con->out_msg);
+ boundary = front_len(msg) + middle_len(msg);
if (resid > boundary) {
resid -= boundary;
WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
dout("%s con %p was sending head\n", __func__, con);
- if (front_len(con->out_msg))
- prepare_zero_front(con, front_len(con->out_msg));
- if (middle_len(con->out_msg))
- prepare_zero_middle(con, middle_len(con->out_msg));
- prepare_zero_data(con);
+ if (front_len(msg))
+ prepare_zero_front(con, msg, front_len(msg));
+ if (middle_len(msg))
+ prepare_zero_middle(con, msg, middle_len(msg));
+ prepare_zero_data(con, msg);
WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
con->v2.out_state = OUT_S_QUEUE_ZEROS;
return;
}
- boundary = middle_len(con->out_msg);
+ boundary = middle_len(msg);
if (resid > boundary) {
resid -= boundary;
dout("%s con %p was sending front\n", __func__, con);
- prepare_zero_front(con, resid);
- if (middle_len(con->out_msg))
- prepare_zero_middle(con, middle_len(con->out_msg));
- prepare_zero_data(con);
- queue_zeros(con);
+ prepare_zero_front(con, msg, resid);
+ if (middle_len(msg))
+ prepare_zero_middle(con, msg, middle_len(msg));
+ prepare_zero_data(con, msg);
+ queue_zeros(con, msg);
return;
}
WARN_ON(!resid);
dout("%s con %p was sending middle\n", __func__, con);
- prepare_zero_middle(con, resid);
- prepare_zero_data(con);
- queue_zeros(con);
+ prepare_zero_middle(con, msg, resid);
+ prepare_zero_data(con, msg);
+ queue_zeros(con, msg);
}
-static void revoke_at_queue_data_cont(struct ceph_connection *con)
+static void revoke_at_queue_data_cont(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
int sent, resid; /* current piece of data */
- WARN_ON(!data_len(con->out_msg));
+ WARN_ON(!data_len(msg));
WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
resid = iov_iter_count(&con->v2.out_iter);
WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
@@ -3568,10 +3543,11 @@ static void revoke_at_queue_data_cont(struct ceph_connection *con)
con->v2.out_iter.count -= resid;
out_zero_add(con, con->v2.out_cursor.total_resid);
- queue_zeros(con);
+ queue_zeros(con, msg);
}
-static void revoke_at_finish_message(struct ceph_connection *con)
+static void revoke_at_finish_message(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
int boundary;
int resid;
@@ -3579,39 +3555,39 @@ static void revoke_at_finish_message(struct ceph_connection *con)
WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
resid = iov_iter_count(&con->v2.out_iter);
- if (!front_len(con->out_msg) && !middle_len(con->out_msg) &&
- !data_len(con->out_msg)) {
+ if (!front_len(msg) && !middle_len(msg) &&
+ !data_len(msg)) {
WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
dout("%s con %p was sending head (empty message) - noop\n",
__func__, con);
return;
}
- boundary = front_len(con->out_msg) + middle_len(con->out_msg) +
+ boundary = front_len(msg) + middle_len(msg) +
CEPH_EPILOGUE_PLAIN_LEN;
if (resid > boundary) {
resid -= boundary;
WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
dout("%s con %p was sending head\n", __func__, con);
- if (front_len(con->out_msg))
- prepare_zero_front(con, front_len(con->out_msg));
- if (middle_len(con->out_msg))
- prepare_zero_middle(con, middle_len(con->out_msg));
+ if (front_len(msg))
+ prepare_zero_front(con, msg, front_len(msg));
+ if (middle_len(msg))
+ prepare_zero_middle(con, msg, middle_len(msg));
con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
con->v2.out_state = OUT_S_QUEUE_ZEROS;
return;
}
- boundary = middle_len(con->out_msg) + CEPH_EPILOGUE_PLAIN_LEN;
+ boundary = middle_len(msg) + CEPH_EPILOGUE_PLAIN_LEN;
if (resid > boundary) {
resid -= boundary;
dout("%s con %p was sending front\n", __func__, con);
- prepare_zero_front(con, resid);
- if (middle_len(con->out_msg))
- prepare_zero_middle(con, middle_len(con->out_msg));
+ prepare_zero_front(con, msg, resid);
+ if (middle_len(msg))
+ prepare_zero_middle(con, msg, middle_len(msg));
con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
- queue_zeros(con);
+ queue_zeros(con, msg);
return;
}
@@ -3619,9 +3595,9 @@ static void revoke_at_finish_message(struct ceph_connection *con)
if (resid > boundary) {
resid -= boundary;
dout("%s con %p was sending middle\n", __func__, con);
- prepare_zero_middle(con, resid);
+ prepare_zero_middle(con, msg, resid);
con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
- queue_zeros(con);
+ queue_zeros(con, msg);
return;
}
@@ -3629,7 +3605,7 @@ static void revoke_at_finish_message(struct ceph_connection *con)
dout("%s con %p was sending epilogue - noop\n", __func__, con);
}
-void ceph_con_v2_revoke(struct ceph_connection *con)
+void ceph_con_v2_revoke(struct ceph_connection *con, struct ceph_msg *msg)
{
WARN_ON(con->v2.out_zero);
@@ -3642,13 +3618,13 @@ void ceph_con_v2_revoke(struct ceph_connection *con)
switch (con->v2.out_state) {
case OUT_S_QUEUE_DATA:
- revoke_at_queue_data(con);
+ revoke_at_queue_data(con, msg);
break;
case OUT_S_QUEUE_DATA_CONT:
- revoke_at_queue_data_cont(con);
+ revoke_at_queue_data_cont(con, msg);
break;
case OUT_S_FINISH_MESSAGE:
- revoke_at_finish_message(con);
+ revoke_at_finish_message(con, msg);
break;
default:
WARN(1, "bad out_state %d", con->v2.out_state);
@@ -3814,10 +3790,8 @@ void ceph_con_v2_reset_protocol(struct ceph_connection *con)
memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
- if (con->v2.hmac_tfm) {
- crypto_free_shash(con->v2.hmac_tfm);
- con->v2.hmac_tfm = NULL;
- }
+ memzero_explicit(&con->v2.hmac_key, sizeof(con->v2.hmac_key));
+ con->v2.hmac_key_set = false;
if (con->v2.gcm_req) {
aead_request_free(con->v2.gcm_req);
con->v2.gcm_req = NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index a64cef2c537e..2acfa44927da 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -12176,6 +12176,35 @@ static void dev_memory_provider_uninstall(struct net_device *dev)
}
}
+/* devices must be UP and netdev_lock()'d */
+static void netif_close_many_and_unlock(struct list_head *close_head)
+{
+ struct net_device *dev, *tmp;
+
+ netif_close_many(close_head, false);
+
+ /* ... now unlock them */
+ list_for_each_entry_safe(dev, tmp, close_head, close_list) {
+ netdev_unlock(dev);
+ list_del_init(&dev->close_list);
+ }
+}
+
+static void netif_close_many_and_unlock_cond(struct list_head *close_head)
+{
+#ifdef CONFIG_LOCKDEP
+ /* We can only track up to MAX_LOCK_DEPTH locks per task.
+ *
+ * Reserve half the available slots for additional locks possibly
+ * taken by notifiers and (soft)irqs.
+ */
+ unsigned int limit = MAX_LOCK_DEPTH / 2;
+
+ if (lockdep_depth(current) > limit)
+ netif_close_many_and_unlock(close_head);
+#endif
+}
+
void unregister_netdevice_many_notify(struct list_head *head,
u32 portid, const struct nlmsghdr *nlh)
{
@@ -12208,17 +12237,18 @@ void unregister_netdevice_many_notify(struct list_head *head,
/* If device is running, close it first. Start with ops locked... */
list_for_each_entry(dev, head, unreg_list) {
+ if (!(dev->flags & IFF_UP))
+ continue;
if (netdev_need_ops_lock(dev)) {
list_add_tail(&dev->close_list, &close_head);
netdev_lock(dev);
}
+ netif_close_many_and_unlock_cond(&close_head);
}
- netif_close_many(&close_head, true);
- /* ... now unlock them and go over the rest. */
+ netif_close_many_and_unlock(&close_head);
+ /* ... now go over the rest. */
list_for_each_entry(dev, head, unreg_list) {
- if (netdev_need_ops_lock(dev))
- netdev_unlock(dev);
- else
+ if (!netdev_need_ops_lock(dev))
list_add_tail(&dev->close_list, &close_head);
}
netif_close_many(&close_head, true);
diff --git a/net/core/filter.c b/net/core/filter.c
index 5d1838ff1ab9..76628df1fc82 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2281,6 +2281,7 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
goto out_drop;
+ skb_dst_drop(skb);
skb_dst_set(skb, dst);
} else if (nh->nh_family != AF_INET6) {
goto out_drop;
@@ -2389,6 +2390,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
goto out_drop;
}
+ skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
}
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index ff8e5b64bf6b..b43911562f4d 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -8,11 +8,13 @@
struct gro_cell {
struct sk_buff_head napi_skbs;
struct napi_struct napi;
+ local_lock_t bh_lock;
};
int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
+ bool have_bh_lock = false;
struct gro_cell *cell;
int res;
@@ -25,6 +27,8 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
goto unlock;
}
+ local_lock_nested_bh(&gcells->cells->bh_lock);
+ have_bh_lock = true;
cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
@@ -39,6 +43,9 @@ drop:
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
+ if (have_bh_lock)
+ local_unlock_nested_bh(&gcells->cells->bh_lock);
+
res = NET_RX_SUCCESS;
unlock:
@@ -54,6 +61,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
struct sk_buff *skb;
int work_done = 0;
+ __local_lock_nested_bh(&cell->bh_lock);
while (work_done < budget) {
skb = __skb_dequeue(&cell->napi_skbs);
if (!skb)
@@ -64,6 +72,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
if (work_done < budget)
napi_complete_done(napi, work_done);
+ __local_unlock_nested_bh(&cell->bh_lock);
return work_done;
}
@@ -79,6 +88,7 @@ int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
__skb_queue_head_init(&cell->napi_skbs);
+ local_lock_init(&cell->bh_lock);
set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 492728f9e021..1a5edec485f1 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -468,11 +468,60 @@ page_pool_dma_sync_for_device(const struct page_pool *pool,
}
}
+static int page_pool_register_dma_index(struct page_pool *pool,
+ netmem_ref netmem, gfp_t gfp)
+{
+ int err = 0;
+ u32 id;
+
+ if (unlikely(!PP_DMA_INDEX_BITS))
+ goto out;
+
+ if (in_softirq())
+ err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
+ PP_DMA_INDEX_LIMIT, gfp);
+ else
+ err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
+ PP_DMA_INDEX_LIMIT, gfp);
+ if (err) {
+ WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@");
+ goto out;
+ }
+
+ netmem_set_dma_index(netmem, id);
+out:
+ return err;
+}
+
+static int page_pool_release_dma_index(struct page_pool *pool,
+ netmem_ref netmem)
+{
+ struct page *old, *page = netmem_to_page(netmem);
+ unsigned long id;
+
+ if (unlikely(!PP_DMA_INDEX_BITS))
+ return 0;
+
+ id = netmem_get_dma_index(netmem);
+ if (!id)
+ return -1;
+
+ if (in_softirq())
+ old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
+ else
+ old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
+ if (old != page)
+ return -1;
+
+ netmem_set_dma_index(netmem, 0);
+
+ return 0;
+}
+
static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp)
{
dma_addr_t dma;
int err;
- u32 id;
/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
* since dma_addr_t can be either 32 or 64 bits and does not always fit
@@ -491,18 +540,10 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t g
goto unmap_failed;
}
- if (in_softirq())
- err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
- PP_DMA_INDEX_LIMIT, gfp);
- else
- err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
- PP_DMA_INDEX_LIMIT, gfp);
- if (err) {
- WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@");
+ err = page_pool_register_dma_index(pool, netmem, gfp);
+ if (err)
goto unset_failed;
- }
- netmem_set_dma_index(netmem, id);
page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len);
return true;
@@ -680,8 +721,6 @@ void page_pool_clear_pp_info(netmem_ref netmem)
static __always_inline void __page_pool_release_netmem_dma(struct page_pool *pool,
netmem_ref netmem)
{
- struct page *old, *page = netmem_to_page(netmem);
- unsigned long id;
dma_addr_t dma;
if (!pool->dma_map)
@@ -690,15 +729,7 @@ static __always_inline void __page_pool_release_netmem_dma(struct page_pool *poo
*/
return;
- id = netmem_get_dma_index(netmem);
- if (!id)
- return;
-
- if (in_softirq())
- old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
- else
- old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
- if (old != page)
+ if (page_pool_release_dma_index(pool, netmem))
return;
dma = page_pool_get_dma_addr_netmem(netmem);
@@ -708,7 +739,6 @@ static __always_inline void __page_pool_release_netmem_dma(struct page_pool *poo
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
page_pool_set_dma_addr_netmem(netmem, 0);
- netmem_set_dma_index(netmem, 0);
}
/* Disconnects a page (from a page_pool). API users can have a need
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bc12790017b0..6be01454f262 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -7200,6 +7200,7 @@ nodefer: kfree_skb_napi_cache(skb);
DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
DEBUG_NET_WARN_ON_ONCE(skb->destructor);
+ DEBUG_NET_WARN_ON_ONCE(skb_nfct(skb));
sdn = per_cpu_ptr(net_hotdata.skb_defer_nodes, cpu) + numa_node_id();
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 5ca97ede979c..ff11d3a85a36 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1668,7 +1668,7 @@ void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk,
nskb->ip_summed = CHECKSUM_NONE;
if (orig_sk) {
skb_set_owner_edemux(nskb, (struct sock *)orig_sk);
- psp_reply_set_decrypted(nskb);
+ psp_reply_set_decrypted(orig_sk, nskb);
}
if (transmit_time)
nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index aaeb5d16f0c9..158a30ae7c5f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -568,20 +568,6 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
-{
- /* we must cap headroom to some upperlimit, else pskb_expand_head
- * will overflow header offsets in skb_headers_offset_update().
- */
- static const unsigned int max_allowed = 512;
-
- if (headroom > max_allowed)
- headroom = max_allowed;
-
- if (headroom > READ_ONCE(dev->needed_headroom))
- WRITE_ONCE(dev->needed_headroom, headroom);
-}
-
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
u8 proto, int tunnel_hlen)
{
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7949d16506a4..8a18aeca7ab0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1791,6 +1791,7 @@ EXPORT_IPV6_MOD(tcp_peek_len);
/* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
int tcp_set_rcvlowat(struct sock *sk, int val)
{
+ struct tcp_sock *tp = tcp_sk(sk);
int space, cap;
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
@@ -1809,7 +1810,9 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
space = tcp_space_from_win(sk, val);
if (space > sk->sk_rcvbuf) {
WRITE_ONCE(sk->sk_rcvbuf, space);
- WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
+
+ if (tp->window_clamp && tp->window_clamp < val)
+ WRITE_ONCE(tp->window_clamp, val);
}
return 0;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b44fdc309633..31ea5af49f2d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -7509,7 +7509,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
&foc, TCP_SYNACK_FASTOPEN, skb);
/* Add the child socket directly into the accept queue */
if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
- reqsk_fastopen_remove(fastopen_sk, req, false);
bh_unlock_sock(fastopen_sk);
sock_put(fastopen_sk);
goto drop_and_free;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bb3576ac0ad7..b94efb3050d2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2369,7 +2369,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
u32 max_segs)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- u32 send_win, cong_win, limit, in_flight;
+ u32 send_win, cong_win, limit, in_flight, threshold;
+ u64 srtt_in_ns, expected_ack, how_far_is_the_ack;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *head;
int win_divisor;
@@ -2431,9 +2432,19 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
head = tcp_rtx_queue_head(sk);
if (!head)
goto send_now;
- delta = tp->tcp_clock_cache - head->tstamp;
- /* If next ACK is likely to come too late (half srtt), do not defer */
- if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
+
+ srtt_in_ns = (u64)(NSEC_PER_USEC >> 3) * tp->srtt_us;
+ /* When is the ACK expected ? */
+ expected_ack = head->tstamp + srtt_in_ns;
+ /* How far from now is the ACK expected ? */
+ how_far_is_the_ack = expected_ack - tp->tcp_clock_cache;
+
+ /* If next ACK is likely to come too late,
+ * ie in more than min(1ms, half srtt), do not defer.
+ */
+ threshold = min(srtt_in_ns >> 1, NSEC_PER_MSEC);
+
+ if ((s64)(how_far_is_the_ack - threshold) > 0)
goto send_now;
/* Ok, it looks like it is advisable to defer.
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 95241093b7f0..30dfbf73729d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1851,8 +1851,6 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
sk_peek_offset_bwd(sk, len);
if (!skb_shared(skb)) {
- if (unlikely(udp_skb_has_head_state(skb)))
- skb_release_head_state(skb);
skb_attempt_defer_free(skb);
return;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3262e81223df..6405072050e0 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1257,8 +1257,7 @@ route_lookup:
*/
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr)
+ dst->header_len + t->hlen;
- if (max_headroom > READ_ONCE(dev->needed_headroom))
- WRITE_ONCE(dev->needed_headroom, max_headroom);
+ ip_tunnel_adj_headroom(dev, max_headroom);
err = ip6_tnl_encap(skb, t, &proto, fl6);
if (err)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9622c2776ade..59c4977a811a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -974,7 +974,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
if (sk) {
/* unconstify the socket only to attach it to buff with care. */
skb_set_owner_edemux(buff, (struct sock *)sk);
- psp_reply_set_decrypted(buff);
+ psp_reply_set_decrypted(sk, buff);
if (sk->sk_state == TCP_TIME_WAIT)
mark = inet_twsk(sk)->tw_mark;
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 8ee66a86c3bc..1a62e384766a 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -22,6 +22,35 @@ void nft_objref_eval(const struct nft_expr *expr,
obj->ops->eval(obj, regs, pkt);
}
+static int nft_objref_validate_obj_type(const struct nft_ctx *ctx, u32 type)
+{
+ unsigned int hooks;
+
+ switch (type) {
+ case NFT_OBJECT_SYNPROXY:
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
+ hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD);
+
+ return nft_chain_validate_hooks(ctx->chain, hooks);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int nft_objref_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_object *obj = nft_objref_priv(expr);
+
+ return nft_objref_validate_obj_type(ctx, obj->ops->type->type);
+}
+
static int nft_objref_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -93,6 +122,7 @@ static const struct nft_expr_ops nft_objref_ops = {
.activate = nft_objref_activate,
.deactivate = nft_objref_deactivate,
.dump = nft_objref_dump,
+ .validate = nft_objref_validate,
.reduce = NFT_REDUCE_READONLY,
};
@@ -197,6 +227,14 @@ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
nf_tables_destroy_set(ctx, priv->set);
}
+static int nft_objref_map_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ const struct nft_objref_map *priv = nft_expr_priv(expr);
+
+ return nft_objref_validate_obj_type(ctx, priv->set->objtype);
+}
+
static const struct nft_expr_ops nft_objref_map_ops = {
.type = &nft_objref_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
@@ -206,6 +244,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
.deactivate = nft_objref_map_deactivate,
.destroy = nft_objref_map_destroy,
.dump = nft_objref_map_dump,
+ .validate = nft_objref_map_validate,
.reduce = NFT_REDUCE_READONLY,
};
diff --git a/net/psp/psp_sock.c b/net/psp/psp_sock.c
index 5324a7603bed..a931d825d1cc 100644
--- a/net/psp/psp_sock.c
+++ b/net/psp/psp_sock.c
@@ -279,12 +279,12 @@ void psp_twsk_assoc_free(struct inet_timewait_sock *tw)
psp_assoc_put(pas);
}
-void psp_reply_set_decrypted(struct sk_buff *skb)
+void psp_reply_set_decrypted(const struct sock *sk, struct sk_buff *skb)
{
struct psp_assoc *pas;
rcu_read_lock();
- pas = psp_sk_get_assoc_rcu(skb->sk);
+ pas = psp_sk_get_assoc_rcu(sk);
if (pas && pas->tx.spi)
skb->decrypted = 1;
rcu_read_unlock();
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 4cb8f393434d..3755ba079d07 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -886,7 +886,8 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
return SCTP_DISPOSITION_CONSUME;
nomem_authev:
- sctp_ulpevent_free(ai_ev);
+ if (ai_ev)
+ sctp_ulpevent_free(ai_ev);
nomem_aiev:
sctp_ulpevent_free(ev);
nomem_ev:
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index a570e7adf270..984e0cf9bf8a 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -18,9 +18,10 @@ config SUNRPC_SWAP
config RPCSEC_GSS_KRB5
tristate "Secure RPC: Kerberos V mechanism"
- depends on SUNRPC && CRYPTO
+ depends on SUNRPC
default y
select SUNRPC_GSS
+ select CRYPTO
select CRYPTO_SKCIPHER
select CRYPTO_HASH
help
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index e82212f6b562..a8ec30759a18 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -724,7 +724,7 @@ svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
- if (flavor != RPC_AUTH_GSS) {
+ if (flavor != RPC_AUTH_GSS || checksum.len < XDR_UNIT) {
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index de05ef637bdc..4704dce7284e 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1425,8 +1425,6 @@ svc_process_common(struct svc_rqst *rqstp)
/* Call the function that processes the request. */
rc = process.dispatch(rqstp);
- if (procp->pc_release)
- procp->pc_release(rqstp);
xdr_finish_decode(xdr);
if (!rc)
@@ -1525,6 +1523,14 @@ static void svc_drop(struct svc_rqst *rqstp)
trace_svc_drop(rqstp);
}
+static void svc_release_rqst(struct svc_rqst *rqstp)
+{
+ const struct svc_procedure *procp = rqstp->rq_procinfo;
+
+ if (procp && procp->pc_release)
+ procp->pc_release(rqstp);
+}
+
/**
* svc_process - Execute one RPC transaction
* @rqstp: RPC transaction context
@@ -1564,9 +1570,12 @@ void svc_process(struct svc_rqst *rqstp)
if (unlikely(*p != rpc_call))
goto out_baddir;
- if (!svc_process_common(rqstp))
+ if (!svc_process_common(rqstp)) {
+ svc_release_rqst(rqstp);
goto out_drop;
+ }
svc_send(rqstp);
+ svc_release_rqst(rqstp);
return;
out_baddir:
@@ -1634,6 +1643,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
if (!proc_error) {
/* Processing error: drop the request */
xprt_free_bc_request(req);
+ svc_release_rqst(rqstp);
return;
}
/* Finally, send the reply synchronously */
@@ -1647,6 +1657,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
timeout.to_maxval = timeout.to_initval;
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req, &timeout);
+ svc_release_rqst(rqstp);
if (IS_ERR(task))
return;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 049ab53088e9..6973184ff667 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -1014,6 +1014,19 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
struct svc_serv *serv = xprt->xpt_server;
struct svc_deferred_req *dr;
+ /* unregister with rpcbind for when transport type is TCP or UDP.
+ */
+ if (test_bit(XPT_RPCB_UNREG, &xprt->xpt_flags)) {
+ struct svc_sock *svsk = container_of(xprt, struct svc_sock,
+ sk_xprt);
+ struct socket *sock = svsk->sk_sock;
+
+ if (svc_register(serv, xprt->xpt_net, sock->sk->sk_family,
+ sock->sk->sk_protocol, 0) < 0)
+ pr_warn("failed to unregister %s with rpcbind\n",
+ xprt->xpt_class->xcl_name);
+ }
+
if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
return;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index e2c5e0e626f9..7b90abc5cf0e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -836,6 +836,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
/* data might have come in before data_ready set up */
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
+ set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags);
/* make sure we get destination address info */
switch (svsk->sk_sk->sk_family) {
@@ -1224,7 +1225,7 @@ err_noclose:
* that the pages backing @xdr are unchanging.
*/
static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
- rpc_fraghdr marker, int *sentp)
+ rpc_fraghdr marker)
{
struct msghdr msg = {
.msg_flags = MSG_SPLICE_PAGES,
@@ -1233,8 +1234,6 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
void *buf;
int ret;
- *sentp = 0;
-
/* The stream record marker is copied into a temporary page
* fragment buffer so that it can be included in rq_bvec.
*/
@@ -1252,10 +1251,7 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
1 + count, sizeof(marker) + rqstp->rq_res.len);
ret = sock_sendmsg(svsk->sk_sock, &msg);
page_frag_free(buf);
- if (ret < 0)
- return ret;
- *sentp += ret;
- return 0;
+ return ret;
}
/**
@@ -1274,7 +1270,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
struct xdr_buf *xdr = &rqstp->rq_res;
rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
(u32)xdr->len);
- int sent, err;
+ int sent;
svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
rqstp->rq_xprt_ctxt = NULL;
@@ -1282,9 +1278,9 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
mutex_lock(&xprt->xpt_mutex);
if (svc_xprt_is_dead(xprt))
goto out_notconn;
- err = svc_tcp_sendmsg(svsk, rqstp, marker, &sent);
- trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
- if (err < 0 || sent != (xdr->len + sizeof(marker)))
+ sent = svc_tcp_sendmsg(svsk, rqstp, marker);
+ trace_svcsock_tcp_send(xprt, sent);
+ if (sent < 0 || sent != (xdr->len + sizeof(marker)))
goto out_close;
mutex_unlock(&xprt->xpt_mutex);
return sent;
@@ -1293,10 +1289,10 @@ out_notconn:
mutex_unlock(&xprt->xpt_mutex);
return -ENOTCONN;
out_close:
- pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
+ pr_notice("rpc-srv/tcp: %s: %s %d when sending %zu bytes - shutting down socket\n",
xprt->xpt_server->sv_name,
- (err < 0) ? "got error" : "sent",
- (err < 0) ? err : sent, xdr->len);
+ (sent < 0) ? "got error" : "sent",
+ sent, xdr->len + sizeof(marker));
svc_xprt_deferred_close(xprt);
mutex_unlock(&xprt->xpt_mutex);
return -EAGAIN;
@@ -1355,6 +1351,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
if (sk->sk_state == TCP_LISTEN) {
strcpy(svsk->sk_xprt.xpt_remotebuf, "listener");
set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
+ set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags);
sk->sk_data_ready = svc_tcp_listen_data_ready;
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
} else {
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index 09434e1143c5..8b01b7ae2690 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -389,7 +389,7 @@ static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj,
saddr = (struct sockaddr *)&xprt->addr;
port = rpc_get_port(saddr);
- /* buf_len is the len until the first occurence of either
+ /* buf_len is the len until the first occurrence of either
* '\n' or '\0'
*/
buf_len = strcspn(buf, "\n");
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index ea5bb131ebd0..751904f10aab 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -1797,7 +1797,7 @@ exit:
* @b: bearer where the message has been received
*
* If the decryption is successful, the decrypted skb is returned directly or
- * as the callback, the encryption header and auth tag will be trimed out
+ * as the callback, the encryption header and auth tag will be trimmed out
* before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete().
* Otherwise, the skb will be freed!
* Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index ffe577bf6b51..aad7f96b6009 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -57,7 +57,7 @@
* @conn_idr: identifier set of connection
* @idr_lock: protect the connection identifier set
* @idr_in_use: amount of allocated identifier entry
- * @net: network namspace instance
+ * @net: network namespace instance
* @awork: accept work item
* @rcv_wq: receive workqueue
* @send_wq: send workqueue
@@ -83,7 +83,7 @@ struct tipc_topsrv {
* @sock: socket handler associated with connection
* @flags: indicates connection state
* @server: pointer to connected server
- * @sub_list: lsit to all pertaing subscriptions
+ * @sub_list: list to all pertaining subscriptions
* @sub_lock: lock protecting the subscription list
* @rwork: receive work item
* @outqueue: pointer to first outbound message in queue
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index a3ccb3135e51..39a2ab47fe72 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -255,12 +255,9 @@ int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
if (msg->msg_flags & MSG_MORE)
return -EINVAL;
- rc = tls_handle_open_record(sk, msg->msg_flags);
- if (rc)
- return rc;
-
*record_type = *(unsigned char *)CMSG_DATA(cmsg);
- rc = 0;
+
+ rc = tls_handle_open_record(sk, msg->msg_flags);
break;
default:
return -EINVAL;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index daac9fd4be7e..d17135369980 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1054,7 +1054,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
if (ret == -EINPROGRESS)
num_async++;
else if (ret != -EAGAIN)
- goto send_end;
+ goto end;
}
}
@@ -1112,8 +1112,11 @@ alloc_encrypted:
goto send_end;
tls_ctx->pending_open_record_frags = true;
- if (sk_msg_full(msg_pl))
+ if (sk_msg_full(msg_pl)) {
full_record = true;
+ sk_msg_trim(sk, msg_en,
+ msg_pl->sg.size + prot->overhead_size);
+ }
if (full_record || eor)
goto copied;
@@ -1149,6 +1152,13 @@ alloc_encrypted:
} else if (ret != -EAGAIN)
goto send_end;
}
+
+ /* Transmit if any encryptions have completed */
+ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ cancel_delayed_work(&ctx->tx_work.work);
+ tls_tx_records(sk, msg->msg_flags);
+ }
+
continue;
rollback_iter:
copied -= try_to_copy;
@@ -1204,6 +1214,12 @@ copied:
goto send_end;
}
}
+
+ /* Transmit if any encryptions have completed */
+ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ cancel_delayed_work(&ctx->tx_work.work);
+ tls_tx_records(sk, msg->msg_flags);
+ }
}
continue;
@@ -1223,8 +1239,9 @@ trim_sgl:
goto alloc_encrypted;
}
+send_end:
if (!num_async) {
- goto send_end;
+ goto end;
} else if (num_zc || eor) {
int err;
@@ -1242,7 +1259,7 @@ trim_sgl:
tls_tx_records(sk, msg->msg_flags);
}
-send_end:
+end:
ret = sk_stream_error(sk, msg->msg_flags, ret);
return copied > 0 ? copied : ret;
}
@@ -1637,8 +1654,10 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
if (unlikely(darg->async)) {
err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
- if (err)
- __skb_queue_tail(&ctx->async_hold, darg->skb);
+ if (err) {
+ err = tls_decrypt_async_wait(ctx);
+ darg->async = false;
+ }
return err;
}
diff --git a/net/vmw_vsock/Kconfig b/net/vmw_vsock/Kconfig
index 56356d2980c8..8e803c4828c4 100644
--- a/net/vmw_vsock/Kconfig
+++ b/net/vmw_vsock/Kconfig
@@ -72,7 +72,7 @@ config VIRTIO_VSOCKETS_COMMON
config HYPERV_VSOCKETS
tristate "Hyper-V transport for Virtual Sockets"
- depends on VSOCKETS && HYPERV
+ depends on VSOCKETS && HYPERV_VMBUS
help
This module implements a Hyper-V transport for Virtual Sockets.
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index f16f390370dc..1eb8d9f8b104 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -143,14 +143,24 @@ static inline bool xp_unused_options_set(u32 options)
static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{
- u64 addr = desc->addr - pool->tx_metadata_len;
- u64 len = desc->len + pool->tx_metadata_len;
- u64 offset = addr & (pool->chunk_size - 1);
+ u64 len = desc->len;
+ u64 addr, offset;
- if (!desc->len)
+ if (!len)
return false;
- if (offset + len > pool->chunk_size)
+ /* Can overflow if desc->addr < pool->tx_metadata_len */
+ if (check_sub_overflow(desc->addr, pool->tx_metadata_len, &addr))
+ return false;
+
+ offset = addr & (pool->chunk_size - 1);
+
+ /*
+ * Can't overflow: @offset is guaranteed to be < ``U32_MAX``
+ * (pool->chunk_size is ``u32``), @len is guaranteed
+ * to be <= ``U32_MAX``.
+ */
+ if (offset + len + pool->tx_metadata_len > pool->chunk_size)
return false;
if (addr >= pool->addrs_cnt)
@@ -158,27 +168,42 @@ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
if (xp_unused_options_set(desc->options))
return false;
+
return true;
}
static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{
- u64 addr = xp_unaligned_add_offset_to_addr(desc->addr) - pool->tx_metadata_len;
- u64 len = desc->len + pool->tx_metadata_len;
+ u64 len = desc->len;
+ u64 addr, end;
- if (!desc->len)
+ if (!len)
return false;
+ /* Can't overflow: @len is guaranteed to be <= ``U32_MAX`` */
+ len += pool->tx_metadata_len;
if (len > pool->chunk_size)
return false;
- if (addr >= pool->addrs_cnt || addr + len > pool->addrs_cnt ||
- xp_desc_crosses_non_contig_pg(pool, addr, len))
+ /* Can overflow if desc->addr is close to 0 */
+ if (check_sub_overflow(xp_unaligned_add_offset_to_addr(desc->addr),
+ pool->tx_metadata_len, &addr))
+ return false;
+
+ if (addr >= pool->addrs_cnt)
+ return false;
+
+ /* Can overflow if pool->addrs_cnt is high enough */
+ if (check_add_overflow(addr, len, &end) || end > pool->addrs_cnt)
+ return false;
+
+ if (xp_desc_crosses_non_contig_pg(pool, addr, len))
return false;
if (xp_unused_options_set(desc->options))
return false;
+
return true;
}