summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-04-01 20:00:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-04-01 20:00:51 -0700
commitacc4d5ff0b61eb1715c498b6536c38c1feb7f3c1 (patch)
treeac43eb64286548bc95a4da9431dceb8034a51812 /net
parent3491aa04787f4d7e00da98d94b1b10001c398b5a (diff)
parentf278b6d5bb465c7fd66f3d103812947e55b376ed (diff)
Merge tag 'net-6.15-rc0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski: "Rather tiny pull request, mostly so that we can get into our trees your fix to the x86 Makefile. Current release - regressions: - Revert "tcp: avoid atomic operations on sk->sk_rmem_alloc", error queue accounting was missed Current release - new code bugs: - 5 fixes for the netdevice instance locking work Previous releases - regressions: - usbnet: restore usb%d name exception for local mac addresses Previous releases - always broken: - rtnetlink: allocate vfinfo size for VF GUIDs when supported, avoid spurious GET_LINK failures - eth: mana: Switch to page pool for jumbo frames - phy: broadcom: Correct BCM5221 PHY model detection Misc: - selftests: drv-net: replace helpers for referring to other files" * tag 'net-6.15-rc0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (22 commits) Revert "tcp: avoid atomic operations on sk->sk_rmem_alloc" bnxt_en: bring back rtnl lock in bnxt_shutdown eth: gve: add missing netdev locks on reset and shutdown paths selftests: mptcp: ignore mptcp_diag binary selftests: mptcp: close fd_in before returning in main_loop selftests: mptcp: fix incorrect fd checks in main_loop mptcp: fix NULL pointer in can_accept_new_subflow octeontx2-af: Free NIX_AF_INT_VEC_GEN irq octeontx2-af: Fix mbox INTR handler when num VFs > 64 net: fix use-after-free in the netdev_nl_sock_priv_destroy() selftests: net: use Path helpers in ping selftests: net: use the dummy bpf from net/lib selftests: drv-net: replace the rpath helper with Path objects net: lapbether: use netdev_lockdep_set_classes() helper net: phy: broadcom: Correct BCM5221 PHY model detection net: usb: usbnet: restore usb%d name exception for local mac addresses net/mlx5e: SHAMPO, Make reserved size independent of page size net: mana: Switch to page pool for jumbo frames MAINTAINERS: Add dedicated entries for phy_link_topology net: move replay logic to tc_modify_qdisc ...
Diffstat (limited to 'net')
-rw-r--r--net/core/netdev-genl.c6
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/ipv4/tcp.c18
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/mptcp/subflow.c15
-rw-r--r--net/sched/sch_api.c73
7 files changed, 48 insertions, 75 deletions
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index fd1cfa9707dc..3afeaa8c5dc5 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -951,12 +951,14 @@ void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv)
{
struct net_devmem_dmabuf_binding *binding;
struct net_devmem_dmabuf_binding *temp;
+ struct net_device *dev;
mutex_lock(&priv->lock);
list_for_each_entry_safe(binding, temp, &priv->bindings, list) {
- netdev_lock(binding->dev);
+ dev = binding->dev;
+ netdev_lock(dev);
net_devmem_unbind_dmabuf(binding);
- netdev_unlock(binding->dev);
+ netdev_unlock(dev);
}
mutex_unlock(&priv->lock);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5a24a30dfc2d..334db17be37d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1177,6 +1177,9 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
/* IFLA_VF_STATS_TX_DROPPED */
nla_total_size_64bit(sizeof(__u64)));
}
+ if (dev->netdev_ops->ndo_get_vf_guid)
+ size += num_vfs * 2 *
+ nla_total_size(sizeof(struct ifla_vf_guid));
return size;
} else
return 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ea8de00f669d..6edc441b3702 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1525,25 +1525,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
__tcp_cleanup_rbuf(sk, copied);
}
-/* private version of sock_rfree() avoiding one atomic_sub() */
-void tcp_sock_rfree(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
- unsigned int len = skb->truesize;
-
- sock_owned_by_me(sk);
- atomic_set(&sk->sk_rmem_alloc,
- atomic_read(&sk->sk_rmem_alloc) - len);
-
- sk_forward_alloc_add(sk, len);
- sk_mem_reclaim(sk);
-}
-
static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (likely(skb->destructor == tcp_sock_rfree)) {
- tcp_sock_rfree(skb);
+ if (likely(skb->destructor == sock_rfree)) {
+ sock_rfree(skb);
skb->destructor = NULL;
skb->sk = NULL;
return skb_attempt_defer_free(skb);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index ca40665145c6..1a6b1bc54245 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -189,7 +189,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
tcp_segs_in(tp, skb);
__skb_pull(skb, tcp_hdrlen(skb));
sk_forced_mem_schedule(sk, skb->truesize);
- tcp_skb_set_owner_r(skb, sk);
+ skb_set_owner_r(skb, sk);
TCP_SKB_CB(skb)->seq++;
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e1f952fbac48..a35018e2d0ba 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5171,7 +5171,7 @@ end:
if (tcp_is_sack(tp))
tcp_grow_window(sk, skb, false);
skb_condense(skb);
- tcp_skb_set_owner_r(skb, sk);
+ skb_set_owner_r(skb, sk);
}
}
@@ -5187,7 +5187,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
if (!eaten) {
tcp_add_receive_queue(sk, skb);
- tcp_skb_set_owner_r(skb, sk);
+ skb_set_owner_r(skb, sk);
}
return eaten;
}
@@ -5504,7 +5504,7 @@ skip_this:
__skb_queue_before(list, skb, nskb);
else
__skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
- tcp_skb_set_owner_r(nskb, sk);
+ skb_set_owner_r(nskb, sk);
mptcp_skb_ext_move(nskb, skb);
/* Copy data, releasing collapsed skbs. */
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index efe8d86496db..409bd415ef1d 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -754,8 +754,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
subflow_req = mptcp_subflow_rsk(req);
msk = subflow_req->msk;
- if (!msk)
- return false;
subflow_generate_hmac(READ_ONCE(msk->remote_key),
READ_ONCE(msk->local_key),
@@ -850,12 +848,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
} else if (subflow_req->mp_join) {
mptcp_get_options(skb, &mp_opt);
- if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
- !subflow_hmac_valid(req, &mp_opt) ||
- !mptcp_can_accept_new_subflow(subflow_req->msk)) {
- SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+ if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK))
fallback = true;
- }
}
create_child:
@@ -905,6 +899,13 @@ create_child:
goto dispose_child;
}
+ if (!subflow_hmac_valid(req, &mp_opt) ||
+ !mptcp_can_accept_new_subflow(subflow_req->msk)) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
+ goto dispose_child;
+ }
+
/* move the msk reference ownership to the subflow */
subflow_req->msk = NULL;
ctx->conn = (struct sock *)owner;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index defb05c1fba4..f74a097f54ae 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1267,38 +1267,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
struct qdisc_size_table *stab;
ops = qdisc_lookup_ops(kind);
-#ifdef CONFIG_MODULES
- if (ops == NULL && kind != NULL) {
- char name[IFNAMSIZ];
- if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
- /* We dropped the RTNL semaphore in order to
- * perform the module load. So, even if we
- * succeeded in loading the module we have to
- * tell the caller to replay the request. We
- * indicate this using -EAGAIN.
- * We replay the request because the device may
- * go away in the mean time.
- */
- netdev_unlock_ops(dev);
- rtnl_unlock();
- request_module(NET_SCH_ALIAS_PREFIX "%s", name);
- rtnl_lock();
- netdev_lock_ops(dev);
- ops = qdisc_lookup_ops(kind);
- if (ops != NULL) {
- /* We will try again qdisc_lookup_ops,
- * so don't keep a reference.
- */
- module_put(ops->owner);
- err = -EAGAIN;
- goto err_out;
- }
- }
- }
-#endif
-
- err = -ENOENT;
if (!ops) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
goto err_out;
}
@@ -1623,8 +1593,7 @@ static int __tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack,
struct net_device *dev,
struct nlattr *tca[TCA_MAX + 1],
- struct tcmsg *tcm,
- bool *replay)
+ struct tcmsg *tcm)
{
struct Qdisc *q = NULL;
struct Qdisc *p = NULL;
@@ -1789,13 +1758,8 @@ create_n_graft2:
tcm->tcm_parent, tcm->tcm_handle,
tca, &err, extack);
}
- if (q == NULL) {
- if (err == -EAGAIN) {
- *replay = true;
- return 0;
- }
+ if (!q)
return err;
- }
graft:
err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
@@ -1808,6 +1772,27 @@ graft:
return 0;
}
+static void request_qdisc_module(struct nlattr *kind)
+{
+ struct Qdisc_ops *ops;
+ char name[IFNAMSIZ];
+
+ if (!kind)
+ return;
+
+ ops = qdisc_lookup_ops(kind);
+ if (ops) {
+ module_put(ops->owner);
+ return;
+ }
+
+ if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
+ rtnl_unlock();
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
+ rtnl_lock();
+ }
+}
+
/*
* Create/change qdisc.
*/
@@ -1818,27 +1803,23 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
struct tcmsg *tcm;
- bool replay;
int err;
-replay:
- /* Reinit, just in case something touches this. */
err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
return err;
+ request_qdisc_module(tca[TCA_KIND]);
+
tcm = nlmsg_data(n);
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV;
- replay = false;
netdev_lock_ops(dev);
- err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm, &replay);
+ err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm);
netdev_unlock_ops(dev);
- if (replay)
- goto replay;
return err;
}