summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/kvaser_pciefd.c3
-rw-r--r--drivers/net/dsa/b53/b53_common.c58
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c2
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h1
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c54
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c289
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c181
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c45
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c8
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ovpn/io.c8
-rw-r--r--drivers/net/ovpn/netlink.c16
-rw-r--r--drivers/net/ovpn/peer.c4
-rw-r--r--drivers/net/ovpn/socket.c68
-rw-r--r--drivers/net/ovpn/socket.h4
-rw-r--r--drivers/net/ovpn/tcp.c73
-rw-r--r--drivers/net/ovpn/tcp.h3
-rw-r--r--drivers/net/ovpn/udp.c46
-rw-r--r--drivers/net/ovpn/udp.h4
-rw-r--r--drivers/net/usb/aqc111.c8
-rw-r--r--drivers/net/usb/ch9200.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c26
-rw-r--r--drivers/net/wireguard/device.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c2
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c11
55 files changed, 630 insertions, 512 deletions
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 7d3066691d5d..52301511ed1b 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -966,7 +966,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
u32 status, tx_nr_packets_max;
netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
- KVASER_PCIEFD_CAN_TX_MAX_COUNT);
+ roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT));
if (!netdev)
return -ENOMEM;
@@ -995,7 +995,6 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq;
- can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count);
spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 132683ed3abe..862bdccb7439 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -22,6 +22,7 @@
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/platform_data/b53.h>
#include <linux/phy.h>
@@ -1322,41 +1323,17 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- u8 rgmii_ctrl = 0, off;
-
- if (port == dev->imp_port)
- off = B53_RGMII_CTRL_IMP;
- else
- off = B53_RGMII_CTRL_P(port);
+ u8 rgmii_ctrl = 0;
- b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
-
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC);
- rgmii_ctrl |= RGMII_CTRL_DLL_RXC;
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC);
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- default:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- break;
- }
+ b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- if (port != dev->imp_port) {
- if (is63268(dev))
- rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
+ if (is63268(dev))
+ rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
- rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
- }
+ rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
- b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+ b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl);
dev_dbg(ds->dev, "Configured port %d for %s\n", port,
phy_modes(interface));
@@ -1377,8 +1354,7 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
* tx_clk aligned timing (restoring to reset defaults)
*/
b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
- RGMII_CTRL_TIMING_SEL);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
/* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
* sure that we enable the port TX clock internal delay to
@@ -1398,7 +1374,10 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
if (interface == PHY_INTERFACE_MODE_RGMII)
rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
- rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+
+ if (dev->chip_id != BCM53115_DEVICE_ID)
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+
b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
dev_info(ds->dev, "Configured port %d for %s\n", port,
@@ -1462,6 +1441,10 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
+ /* BCM63xx RGMII ports support RGMII */
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ phy_interface_set_rgmii(config->supported_interfaces);
+
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100;
@@ -1501,7 +1484,7 @@ static void b53_phylink_mac_config(struct phylink_config *config,
struct b53_device *dev = ds->priv;
int port = dp->index;
- if (is63xx(dev) && port >= B53_63XX_RGMII0)
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
b53_adjust_63xx_rgmii(ds, port, interface);
if (mode == MLO_AN_FIXED) {
@@ -2353,6 +2336,9 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
+ if (!b53_support_eee(ds, port))
+ return 0;
+
ret = phy_init_eee(phy, false);
if (ret)
return 0;
@@ -2367,7 +2353,7 @@ bool b53_support_eee(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- return !is5325(dev) && !is5365(dev);
+ return !is5325(dev) && !is5365(dev) && !is63xx(dev);
}
EXPORT_SYMBOL(b53_support_eee);
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index d1d3b854361e..a7ec609d64de 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -84,6 +84,8 @@ static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
+
+ airoha_ppe_init_upd_mem(port);
}
static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
index b815697302bf..a970b789cf23 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.h
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -614,6 +614,7 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
int airoha_ppe_init(struct airoha_eth *eth);
void airoha_ppe_deinit(struct airoha_eth *eth);
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
u32 hash);
void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 12d32c92717a..9067d2fc7706 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -223,6 +223,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
int dsa_port = airoha_get_dsa_port(&dev);
struct airoha_foe_mac_info_common *l2;
u32 qdata, ports_pad, val;
+ u8 smac_id = 0xf;
memset(hwe, 0, sizeof(*hwe));
@@ -257,6 +258,8 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
*/
if (airhoa_is_lan_gdm_port(port))
val |= AIROHA_FOE_IB2_FAST_PATH;
+
+ smac_id = port->id;
}
if (is_multicast_ether_addr(data->eth.h_dest))
@@ -291,7 +294,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
hwe->ipv4.l2.src_mac_lo =
get_unaligned_be16(data->eth.h_source + 4);
} else {
- l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
}
if (data->vlan.num) {
@@ -636,7 +639,6 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
struct airoha_foe_entry *hwe_p, hwe;
struct airoha_flow_table_entry *f;
- struct airoha_foe_mac_info *l2;
int type;
hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
@@ -653,18 +655,25 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
memcpy(&hwe, hwe_p, sizeof(*hwe_p));
hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
- l2 = &hwe.bridge.l2;
- memcpy(l2, &e->data.bridge.l2, sizeof(*l2));
type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
- if (type == PPE_PKT_TYPE_IPV4_HNAPT)
- memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
- sizeof(hwe.ipv4.new_tuple));
- else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T &&
- l2->common.etype == ETH_P_IP)
- l2->common.etype = ETH_P_IPV6;
-
- hwe.bridge.ib2 = e->data.bridge.ib2;
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
+ hwe.ipv6.ib2 = e->data.bridge.ib2;
+ /* setting smac_id to 0xf instruct the hw to keep original
+ * source mac address
+ */
+ hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
+ 0xf);
+ } else {
+ memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
+ sizeof(hwe.bridge.l2));
+ hwe.bridge.ib2 = e->data.bridge.ib2;
+ if (type == PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
+ sizeof(hwe.ipv4.new_tuple));
+ }
+
hwe.bridge.data = e->data.bridge.data;
airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
@@ -1238,6 +1247,27 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
airoha_ppe_foe_insert_entry(ppe, skb, hash);
}
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct net_device *dev = port->dev;
+ const u8 *addr = dev->dev_addr;
+ u32 val;
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+
+ val = (addr[0] << 8) | addr[1];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+}
+
int airoha_ppe_init(struct airoha_eth *eth)
{
struct airoha_ppe *ppe;
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
index d931530fc96f..04187eb40ec6 100644
--- a/drivers/net/ethernet/airoha/airoha_regs.h
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -313,6 +313,16 @@
#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+#define REG_UPDMEM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x370)
+#define PPE_UPDMEM_ACK_MASK BIT(31)
+#define PPE_UPDMEM_ADDR_MASK GENMASK(11, 8)
+#define PPE_UPDMEM_OFFSET_MASK GENMASK(7, 4)
+#define PPE_UPDMEM_SEL_MASK GENMASK(3, 2)
+#define PPE_UPDMEM_WR_MASK BIT(1)
+#define PPE_UPDMEM_REQ_MASK BIT(0)
+
+#define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
+
#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 636520bb4b8c..81a74e07464f 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -96,7 +96,6 @@ config BNX2
config CNIC
tristate "QLogic CNIC support"
depends on PCI && (IPV6 || IPV6=n)
- depends on MMU
select BNX2
select UIO
help
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e1ffbd561fac..7cd1eda0b449 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -2153,7 +2153,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
};
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
- .value = cpu_to_be64(priv->rx[0].fill_cnt),
+ .value = cpu_to_be64(priv->rx[idx].fill_cnt),
.queue_id = cpu_to_be32(idx),
};
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index a27f1574a733..9d705d94b065 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -764,6 +764,9 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
s16 completion_tag;
pkt = gve_alloc_pending_packet(tx);
+ if (!pkt)
+ return -ENOMEM;
+
pkt->skb = skb;
completion_tag = pkt - tx->dqo.pending_packets;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
index 093aa6d775ff..497f2a36f35d 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -324,8 +324,6 @@ static __init int hinic3_nic_lld_init(void)
{
int err;
- pr_info("%s: %s\n", HINIC3_NIC_DRV_NAME, HINIC3_NIC_DRV_DESC);
-
err = hinic3_lld_init();
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 9de3e0ba3731..f7a98ff43a57 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -268,7 +268,6 @@ struct iavf_adapter {
struct list_head vlan_filter_list;
int num_vlan_filters;
struct list_head mac_filter_list;
- struct mutex crit_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 288bb5b2e72e..2b2b315205b5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -4,6 +4,8 @@
#include <linux/bitfield.h>
#include <linux/uaccess.h>
+#include <net/netdev_lock.h>
+
/* ethtool support for iavf */
#include "iavf.h"
@@ -1256,9 +1258,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct iavf_fdir_fltr *fltr;
- int count = 50;
int err;
+ netdev_assert_locked(adapter->netdev);
+
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
@@ -1277,14 +1280,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (!fltr)
return -ENOMEM;
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(fltr);
- return -EINVAL;
- }
- udelay(1);
- }
-
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
if (!err)
err = iavf_fdir_add_fltr(adapter, fltr);
@@ -1292,7 +1287,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (err)
kfree(fltr);
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -1435,11 +1429,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
{
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
- int count = 50, err = 0;
bool symm = false;
u64 hash_flds;
+ int err = 0;
u32 hdrs;
+ netdev_assert_locked(adapter->netdev);
+
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
@@ -1463,15 +1459,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
return -EINVAL;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(rss_new);
- return -EINVAL;
- }
-
- udelay(1);
- }
-
spin_lock_bh(&adapter->adv_rss_lock);
rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
if (rss_old) {
@@ -1500,8 +1487,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!err)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
- mutex_unlock(&adapter->crit_lock);
-
if (!rss_new_add)
kfree(rss_new);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 6d7ba4d67a19..2c0bb41809a4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1287,11 +1287,11 @@ static void iavf_configure(struct iavf_adapter *adapter)
/**
* iavf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
static void iavf_up_complete(struct iavf_adapter *adapter)
{
+ netdev_assert_locked(adapter->netdev);
+
iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
@@ -1410,13 +1410,13 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
/**
* iavf_down - Shutdown the connection processing
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ netdev_assert_locked(netdev);
+
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
@@ -2025,22 +2025,21 @@ err:
* iavf_finish_config - do all netdev work that needs RTNL
* @work: our work_struct
*
- * Do work that needs both RTNL and crit_lock.
- **/
+ * Do work that needs RTNL.
+ */
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
- bool locks_released = false;
+ bool netdev_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
/* Always take RTNL first to prevent circular lock dependency;
- * The dev->lock is needed to update the queue number
+ * the dev->lock (== netdev lock) is needed to update the queue number.
*/
rtnl_lock();
netdev_lock(adapter->netdev);
- mutex_lock(&adapter->crit_lock);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
adapter->netdev->reg_state == NETREG_REGISTERED &&
@@ -2059,22 +2058,21 @@ static void iavf_finish_config(struct work_struct *work)
netif_set_real_num_tx_queues(adapter->netdev, pairs);
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev);
- locks_released = true;
+ netdev_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(adapter->netdev);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(adapter->netdev);
goto out;
}
}
@@ -2090,10 +2088,8 @@ static void iavf_finish_config(struct work_struct *work)
}
out:
- if (!locks_released) {
- mutex_unlock(&adapter->crit_lock);
+ if (!netdev_released)
netdev_unlock(adapter->netdev);
- }
rtnl_unlock();
}
@@ -2911,28 +2907,15 @@ err:
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
-/**
- * iavf_watchdog_task - Periodic call-back task
- * @work: pointer to work_struct
- **/
-static void iavf_watchdog_task(struct work_struct *work)
+static const int IAVF_NO_RESCHED = -1;
+
+/* return: msec delay for requeueing itself */
+static int iavf_watchdog_step(struct iavf_adapter *adapter)
{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- watchdog_task.work);
- struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- netdev_lock(netdev);
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE) {
- netdev_unlock(netdev);
- return;
- }
-
- goto restart_watchdog;
- }
+ netdev_assert_locked(adapter->netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
@@ -2940,39 +2923,19 @@ static void iavf_watchdog_task(struct work_struct *work)
switch (adapter->state) {
case __IAVF_STARTUP:
iavf_startup(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -2980,27 +2943,18 @@ static void iavf_watchdog_task(struct work_struct *work)
* watchdog task, iavf_remove should handle this state
* as it can loop forever
*/
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- return;
+ return IAVF_NO_RESCHED;
}
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, (5 * HZ));
- return;
+ return 5000;
}
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
- return;
+ return 1000;
case __IAVF_COMM_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -3010,9 +2964,7 @@ static void iavf_watchdog_task(struct work_struct *work)
*/
iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- return;
+ return IAVF_NO_RESCHED;
}
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -3030,18 +2982,9 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task,
- msecs_to_jiffies(10));
- return;
+ return 10;
case __IAVF_RESETTING:
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
- return;
+ return 2000;
case __IAVF_DOWN:
case __IAVF_DOWN_PENDING:
case __IAVF_TESTING:
@@ -3068,9 +3011,7 @@ static void iavf_watchdog_task(struct work_struct *work)
break;
case __IAVF_REMOVE:
default:
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- return;
+ return IAVF_NO_RESCHED;
}
/* check for hw reset */
@@ -3080,24 +3021,29 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, HZ * 2);
- return;
}
- mutex_unlock(&adapter->crit_lock);
-restart_watchdog:
- netdev_unlock(netdev);
+ return adapter->aq_required ? 20 : 2000;
+}
+
+static void iavf_watchdog_task(struct work_struct *work)
+{
+ struct iavf_adapter *adapter = container_of(work,
+ struct iavf_adapter,
+ watchdog_task.work);
+ struct net_device *netdev = adapter->netdev;
+ int msec_delay;
+
+ netdev_lock(netdev);
+ msec_delay = iavf_watchdog_step(adapter);
+ /* note that we schedule a different task */
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
- if (adapter->aq_required)
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(20));
- else
+
+ if (msec_delay != IAVF_NO_RESCHED)
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
+ msecs_to_jiffies(msec_delay));
+ netdev_unlock(netdev);
}
/**
@@ -3105,14 +3051,15 @@ restart_watchdog:
* @adapter: board private structure
*
* Set communication failed flag and free all resources.
- * NOTE: This function is expected to be called with crit_lock being held.
- **/
+ */
static void iavf_disable_vf(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *fv, *fvtmp;
struct iavf_cloud_filter *cf, *cftmp;
+ netdev_assert_locked(adapter->netdev);
+
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
/* We don't use netif_running() because it may be true prior to
@@ -3212,17 +3159,7 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
- /* When device is being removed it doesn't make sense to run the reset
- * task, just return in such a case.
- */
netdev_lock(netdev);
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state != __IAVF_REMOVE)
- queue_work(adapter->wq, &adapter->reset_task);
-
- netdev_unlock(netdev);
- return;
- }
iavf_misc_irq_disable(adapter);
if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
@@ -3267,7 +3204,6 @@ static void iavf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -3411,7 +3347,6 @@ continue_reset:
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
wake_up(&adapter->reset_waitqueue);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
return;
@@ -3422,7 +3357,6 @@ reset_err:
}
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3435,6 +3369,7 @@ static void iavf_adminq_task(struct work_struct *work)
{
struct iavf_adapter *adapter =
container_of(work, struct iavf_adapter, adminq_task);
+ struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
enum virtchnl_ops v_op;
@@ -3442,13 +3377,7 @@ static void iavf_adminq_task(struct work_struct *work)
u32 val, oldval;
u16 pending;
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
- return;
-
- queue_work(adapter->wq, &adapter->adminq_task);
- goto out;
- }
+ netdev_lock(netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto unlock;
@@ -3515,8 +3444,7 @@ static void iavf_adminq_task(struct work_struct *work)
freedom:
kfree(event.msg_buf);
unlock:
- mutex_unlock(&adapter->crit_lock);
-out:
+ netdev_unlock(netdev);
/* re-enable Admin queue interrupt cause */
iavf_misc_irq_enable(adapter);
}
@@ -4209,8 +4137,8 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
{
int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
- struct iavf_cloud_filter *filter = NULL;
- int err = -EINVAL, count = 50;
+ struct iavf_cloud_filter *filter;
+ int err;
if (tc < 0) {
dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
@@ -4220,17 +4148,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
-
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(filter);
- return err;
- }
- udelay(1);
- }
-
filter->cookie = cls_flower->cookie;
+ netdev_lock(adapter->netdev);
+
/* bail out here if filter already exists */
spin_lock_bh(&adapter->cloud_filter_list_lock);
if (iavf_find_cf(adapter, &cls_flower->cookie)) {
@@ -4264,7 +4185,7 @@ err:
if (err)
kfree(filter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(adapter->netdev);
return err;
}
@@ -4568,28 +4489,13 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
- * is already taken and iavf_open is called from an upper
- * device's notifier reacting on NETDEV_REGISTER event.
- * We have to leave here to avoid dead lock.
- */
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
- return -EBUSY;
-
- usleep_range(500, 1000);
- }
-
- if (adapter->state != __IAVF_DOWN) {
- err = -EBUSY;
- goto err_unlock;
- }
+ if (adapter->state != __IAVF_DOWN)
+ return -EBUSY;
if (adapter->state == __IAVF_RUNNING &&
!test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
- err = 0;
- goto err_unlock;
+ return 0;
}
/* allocate transmit descriptors */
@@ -4608,9 +4514,7 @@ static int iavf_open(struct net_device *netdev)
goto err_req_irq;
spin_lock_bh(&adapter->mac_vlan_list_lock);
-
iavf_add_filter(adapter, adapter->hw.mac.addr);
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* Restore filters that were removed with IFF_DOWN */
@@ -4623,8 +4527,6 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
- mutex_unlock(&adapter->crit_lock);
-
return 0;
err_req_irq:
@@ -4634,8 +4536,6 @@ err_setup_rx:
iavf_free_all_rx_resources(adapter);
err_setup_tx:
iavf_free_all_tx_resources(adapter);
-err_unlock:
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -4659,12 +4559,8 @@ static int iavf_close(struct net_device *netdev)
netdev_assert_locked(netdev);
- mutex_lock(&adapter->crit_lock);
-
- if (adapter->state <= __IAVF_DOWN_PENDING) {
- mutex_unlock(&adapter->crit_lock);
+ if (adapter->state <= __IAVF_DOWN_PENDING)
return 0;
- }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
@@ -4695,7 +4591,6 @@ static int iavf_close(struct net_device *netdev)
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
/* We explicitly don't free resources here because the hardware is
@@ -4714,11 +4609,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
-
netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
+
adapter->aq_required |= aq_to_restore;
- mutex_unlock(&adapter->crit_lock);
+
return 0;
}
@@ -5227,15 +5121,16 @@ iavf_shaper_set(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev);
const struct net_shaper_handle *handle = &shaper->handle;
struct iavf_ring *tx_ring;
- int ret = 0;
+ int ret;
+
+ netdev_assert_locked(adapter->netdev);
- mutex_lock(&adapter->crit_lock);
if (handle->id >= adapter->num_active_queues)
- goto unlock;
+ return 0;
ret = iavf_verify_shaper(binding, shaper, extack);
if (ret)
- goto unlock;
+ return ret;
tx_ring = &adapter->tx_rings[handle->id];
@@ -5245,9 +5140,7 @@ iavf_shaper_set(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
-unlock:
- mutex_unlock(&adapter->crit_lock);
- return ret;
+ return 0;
}
static int iavf_shaper_del(struct net_shaper_binding *binding,
@@ -5257,9 +5150,10 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev);
struct iavf_ring *tx_ring;
- mutex_lock(&adapter->crit_lock);
+ netdev_assert_locked(adapter->netdev);
+
if (handle->id >= adapter->num_active_queues)
- goto unlock;
+ return 0;
tx_ring = &adapter->tx_rings[handle->id];
tx_ring->q_shaper.bw_min = 0;
@@ -5268,8 +5162,6 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
-unlock:
- mutex_unlock(&adapter->crit_lock);
return 0;
}
@@ -5530,10 +5422,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_alloc_qos_cap;
}
- /* set up the locks for the AQ, do this only once in probe
- * and destroy them only once in remove
- */
- mutex_init(&adapter->crit_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -5596,22 +5484,24 @@ static int iavf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
+ bool running;
netif_device_detach(netdev);
+ running = netif_running(netdev);
+ if (running)
+ rtnl_lock();
netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
- if (netif_running(netdev)) {
- rtnl_lock();
+ if (running)
iavf_down(adapter);
- rtnl_unlock();
- }
+
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
+ if (running)
+ rtnl_unlock();
return 0;
}
@@ -5688,20 +5578,20 @@ static void iavf_remove(struct pci_dev *pdev)
* There are flows where register/unregister netdev may race.
*/
while (1) {
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(netdev);
if (adapter->state == __IAVF_RUNNING ||
adapter->state == __IAVF_DOWN ||
adapter->state == __IAVF_INIT_FAILED) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
break;
}
/* Simply return if we already went through iavf_shutdown */
if (adapter->state == __IAVF_REMOVE) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
usleep_range(500, 1000);
}
cancel_delayed_work_sync(&adapter->watchdog_task);
@@ -5711,7 +5601,6 @@ static void iavf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
@@ -5727,9 +5616,11 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
+ netdev_unlock(netdev);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->adminq_task);
+ netdev_lock(netdev);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
@@ -5747,8 +5638,6 @@ static void iavf_remove(struct pci_dev *pdev)
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
mutex_destroy(&hw->aq.asq_mutex);
- mutex_unlock(&adapter->crit_lock);
- mutex_destroy(&adapter->crit_lock);
netdev_unlock(netdev);
iounmap(hw->hw_addr);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 20d3baf955e3..d97d4b25b30d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2741,6 +2741,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)
}
/**
+ * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
+ * @vsi: the VSI with XDP rings being unmapped
+ */
+static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.tx_ring = ring;
+ }
+}
+
+/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
@@ -2803,7 +2824,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (status) {
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
status);
- goto clear_xdp_rings;
+ goto unmap_xdp_rings;
}
/* assign the prog only when it's not already present on VSI;
@@ -2819,6 +2840,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
+unmap_xdp_rings:
+ ice_unmap_xdp_rings(vsi);
clear_xdp_rings:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
@@ -2835,6 +2858,8 @@ err_map_xdp:
mutex_unlock(&pf->avail_q_mutex);
devm_kfree(dev, vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
return -ENOMEM;
}
@@ -2850,7 +2875,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
- int i, v_idx;
+ int i;
/* q_vectors are freed in reset path so there's no point in detaching
* rings
@@ -2858,17 +2883,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx)
- if (!ring->tx_buf || !ice_ring_is_xdp(ring))
- break;
-
- /* restore the value of last node prior to XDP setup */
- q_vector->tx.tx_ring = ring;
- }
+ ice_unmap_xdp_rings(vsi);
free_qmap:
mutex_lock(&pf->avail_q_mutex);
@@ -3013,11 +3028,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ goto resume_if;
} else {
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
ICE_XDP_CFG_FULL);
- if (xdp_ring_err)
+ if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ goto resume_if;
+ }
}
xdp_features_set_redirect_target(vsi->netdev, true);
/* reallocate Rx queues that are used for zero-copy */
@@ -3035,6 +3053,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
}
+resume_if:
if (if_running)
ret = ice_up(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 6ca13c5dcb14..d9d09296d1d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -85,6 +85,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
+ * ice_sched_find_next_vsi_node - find the next node for a given VSI
+ * @vsi_node: VSI support node to start search with
+ *
+ * Return: Next VSI support node, or NULL.
+ *
+ * The function returns a pointer to the next node from the VSI layer
+ * assigned to the given VSI, or NULL if there is no such a node.
+ */
+static struct ice_sched_node *
+ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node)
+{
+ unsigned int vsi_handle = vsi_node->vsi_handle;
+
+ while ((vsi_node = vsi_node->sibling) != NULL)
+ if (vsi_node->vsi_handle == vsi_handle)
+ break;
+
+ return vsi_node;
+}
+
+/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
@@ -1084,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children;
} else {
- /* This parent is full, try the next sibling */
- parent = parent->sibling;
+ /* This parent is full,
+ * try the next available sibling.
+ */
+ parent = ice_sched_find_next_vsi_node(parent);
/* Don't modify the first node TEID memory if the
* first node was added already in the above call.
* Instead send some temp memory for all other
@@ -1528,12 +1551,23 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
+ struct ice_sched_node *next_vsi_node;
+
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < max_children &&
qgrp_node->owner == owner)
break;
qgrp_node = qgrp_node->sibling;
+ if (qgrp_node)
+ continue;
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+ if (!next_vsi_node)
+ break;
+
+ vsi_node = next_vsi_node;
+ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
}
/* Select the best queue group */
@@ -1604,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the HW struct
- * @num_qs: number of queues
+ * @num_new_qs: number of new queues that will be added to the tree
* @num_nodes: num nodes array
*
* This function calculates the number of VSI child nodes based on the
* number of queues.
*/
static void
-ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
+ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes)
{
- u16 num = num_qs;
+ u16 num = num_new_qs;
u8 i, qgl, vsil;
qgl = ice_sched_get_qgrp_layer(hw);
@@ -1779,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!parent)
return -EIO;
- if (i == vsil)
+ /* Do not modify the VSI handle for already existing VSI nodes,
+ * (if no new VSI node was added to the tree).
+ * Assign the VSI handle only to newly added VSI nodes.
+ */
+ if (i == vsil && num_added)
parent->vsi_handle = vsi_handle;
}
@@ -1813,6 +1851,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
}
/**
+ * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count
+ * @hw: pointer to the HW struct
+ * @vsi_node: pointer to the leftmost VSI node that needs to be extended
+ * @new_numqs: new number of queues that has to be handled by the VSI
+ * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry
+ *
+ * This function recalculates the number of supported nodes that need to
+ * be added after adding more Tx queues for a given VSI.
+ * The number of new VSI support nodes that shall be added will be saved
+ * to the @new_num_nodes table for the VSI layer.
+ */
+static void
+ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw,
+ struct ice_sched_node *vsi_node,
+ unsigned int new_numqs, u16 *new_num_nodes)
+{
+ u32 vsi_nodes_cnt = 1;
+ u32 max_queue_cnt = 1;
+ u32 qgl, vsil;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ for (u32 i = vsil; i <= qgl; i++)
+ max_queue_cnt *= hw->max_children[i];
+
+ while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL)
+ vsi_nodes_cnt++;
+
+ if (new_numqs > (max_queue_cnt * vsi_nodes_cnt))
+ new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) -
+ vsi_nodes_cnt;
+}
+
+/**
* ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1863,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
return status;
}
- if (new_numqs)
- ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
- /* Keep the max number of queue configuration all the time. Update the
- * tree only if number of queues > previous number of queues. This may
+ ice_sched_recalc_vsi_support_nodes(hw, vsi_node,
+ new_numqs, new_num_nodes);
+ ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs,
+ new_num_nodes);
+
+ /* Never decrease the number of queues in the tree. Update the tree
+ * only if number of queues > previous number of queues. This may
* leave some extra nodes in the tree if number of queues < previous
* number but that wouldn't harm anything. Removing those extra nodes
* may complicate the code if those nodes are part of SRL or
* individually rate limited.
+ * Also, add the required VSI support nodes if the existing ones cannot
+ * handle the requested new number of queues.
*/
+ status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes);
+ if (status)
+ return status;
+
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
@@ -2013,6 +2096,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
}
/**
+ * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI
+ * @pi: port information structure
+ * @vsi_node: pointer to the leftmost node of the VSI to be removed
+ * @owner: LAN or RDMA
+ * @tc: TC number
+ *
+ * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC.
+ *
+ * This function removes all the VSI support nodes associated with a given VSI
+ * and its LAN or RDMA children nodes from the scheduler tree.
+ */
+static int
+ice_sched_rm_vsi_subtree(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node, u8 owner, u8 tc)
+{
+ u16 vsi_handle = vsi_node->vsi_handle;
+ bool all_vsi_nodes_removed = true;
+ int j = 0;
+
+ while (vsi_node) {
+ struct ice_sched_node *next_vsi_node;
+
+ if (ice_sched_is_leaf_node_present(vsi_node)) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc);
+ return -EBUSY;
+ }
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner)
+ ice_free_sched_node(pi, vsi_node->children[j]);
+ else
+ j++;
+ }
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children)
+ ice_free_sched_node(pi, vsi_node);
+ else
+ all_vsi_nodes_removed = false;
+
+ vsi_node = next_vsi_node;
+ }
+
+ /* clean up aggregator related VSI info if any */
+ if (all_vsi_nodes_removed)
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+
+ return 0;
+}
+
+/**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -2038,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
- u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
@@ -2048,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_node)
continue;
- if (ice_sched_is_leaf_node_present(vsi_node)) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = -EBUSY;
+ status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i);
+ if (status)
goto exit_sched_rm_vsi_cfg;
- }
- while (j < vsi_node->num_children) {
- if (vsi_node->children[j]->owner == owner) {
- ice_free_sched_node(pi, vsi_node->children[j]);
- /* reset the counter again since the num
- * children will be updated after node removal
- */
- j = 0;
- } else {
- j++;
- }
- }
- /* remove the VSI if it has no children */
- if (!vsi_node->num_children) {
- ice_free_sched_node(pi, vsi_node);
- vsi_ctx->sched.vsi_node[i] = NULL;
+ vsi_ctx->sched.vsi_node[i] = NULL;
- /* clean up aggregator related VSI info if any */
- ice_sched_rm_agg_vsi_info(pi, vsi_handle);
- }
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
else
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index bab12ecb2df5..4eb20ec2accb 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -1801,11 +1801,19 @@ void idpf_vc_event_task(struct work_struct *work)
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
return;
- if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
- test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
- set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
- idpf_init_hard_reset(adapter);
- }
+ if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
+ goto func_reset;
+
+ if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
+ goto drv_load;
+
+ return;
+
+func_reset:
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+drv_load:
+ set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ idpf_init_hard_reset(adapter);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 2e356dd10812..993c354aa27a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -362,17 +362,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
+ int csum, tso, needed;
unsigned int count;
__be16 protocol;
- int csum, tso;
count = idpf_tx_desc_count_required(tx_q, skb);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
- if (idpf_tx_maybe_stop_common(tx_q,
- count + IDPF_TX_DESCS_PER_CACHE_LINE +
- IDPF_TX_DESCS_FOR_CTX)) {
+ needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX;
+ if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ IDPF_DESC_UNUSED(tx_q),
+ needed, needed)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
u64_stats_update_begin(&tx_q->stats_sync);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 631679cdaa6f..5cf440e09d0a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2184,6 +2184,19 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
+/* Global conditions to tell whether the txq (and related resources)
+ * has room to allow the use of "size" descriptors.
+ */
+static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
+{
+ if (IDPF_DESC_UNUSED(tx_q) < size ||
+ IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
+ IDPF_TX_BUF_RSV_LOW(tx_q))
+ return 0;
+ return 1;
+}
+
/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
@@ -2194,29 +2207,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
- if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
- goto out;
-
- /* If there are too many outstanding completions expected on the
- * completion queue, stop the TX queue to give the device some time to
- * catch up
- */
- if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
- IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
- goto splitq_stop;
-
- /* Also check for available book keeping buffers; if we are low, stop
- * the queue to wait for more completions
- */
- if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
- goto splitq_stop;
-
- return 0;
-
-splitq_stop:
- netif_stop_subqueue(tx_q->netdev, tx_q->idx);
+ if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ idpf_txq_has_room(tx_q, descs_needed),
+ 1, 1))
+ return 0;
-out:
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
@@ -2242,12 +2237,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
- if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
- u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.q_busy);
- u64_stats_update_end(&tx_q->stats_sync);
- }
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index c779fe71df99..36a0f828a6f8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -1049,12 +1049,4 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
-static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
- u32 needed)
-{
- return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
- IDPF_DESC_UNUSED(tx_q),
- needed, needed);
-}
-
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 07a9f5ae34fd..24febaaa8fbb 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -347,7 +347,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
* All waiting threads will be woken-up and their transaction aborted. Further
* operations on that object will fail.
*/
-static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
int i;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 3522c1238ea2..77578206bada 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -150,5 +150,6 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index b175119a6a7d..b83886a41121 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1463,6 +1463,8 @@ static __maybe_unused int mtk_star_suspend(struct device *dev)
if (netif_running(ndev))
mtk_star_disable(ndev);
+ netif_device_detach(ndev);
+
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
return 0;
@@ -1487,6 +1489,8 @@ static __maybe_unused int mtk_star_resume(struct device *dev)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+ netif_device_attach(ndev);
+
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index cd754cd76bde..d73a2044dc26 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -249,7 +249,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
- u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 max_val_cycles = freq_khz * 1000ULL * MLX4_EN_WRAP_AROUND_SEC;
u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 427bdc0e4908..7001584f1b7a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -879,6 +879,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
lan966x_vlan_port_set_vlan_aware(port, 0);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 1f9df67f0504..4f75f0688369 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -497,6 +497,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port);
bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
bool vlan_aware);
+void lan966x_vlan_port_rew_host(struct lan966x_port *port);
int lan966x_vlan_port_set_vid(struct lan966x_port *port,
u16 vid,
bool pvid,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index 1c88120eb291..bcb4db76b75c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -297,6 +297,7 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_set_vlan_aware(port, false);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
}
int lan966x_port_changeupper(struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
index fa34a739c748..7da22520724c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -149,6 +149,27 @@ void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
port->vlan_aware = vlan_aware;
}
+/* When the interface is in host mode, the interface should not be vlan aware
+ * but it should insert all the tags that it gets from the network stack.
+ * The tags are not in the data of the frame but actually in the skb and the ifh
+ * is configured already to get this tag. So what we need to do is to update the
+ * rewriter to insert the vlan tag for all frames which have a vlan tag
+ * different than 0.
+ */
+void lan966x_vlan_port_rew_host(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ /* Tag all frames except when VID=0*/
+ val = REW_TAG_CFG_TAG_CFG_SET(2);
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+}
+
void lan966x_vlan_port_apply(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index c9693f77e1f6..ac6f2e3a3fcd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -32,6 +32,11 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
int i, ret = 0;
u32 ctrl;
+ if (!ptp_rate) {
+ netdev_warn(priv->dev, "Invalid PTP rate");
+ return -EINVAL;
+ }
+
ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
ret |= est_write(est_addr, EST_TER, cfg->ter, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 085c09039af4..1369fa70bc58 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -805,6 +805,11 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
+ if (!priv->plat->clk_ptp_rate) {
+ netdev_err(priv->dev, "Invalid PTP clock rate");
+ return -EINVAL;
+ }
+
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 43c869f64c39..b80c1efdb323 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -430,6 +430,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ static int bus_id = -ENODEV;
int phy_mode;
void *ret;
int rc;
@@ -465,8 +466,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
of_property_read_u32(np, "max-speed", &plat->max_speed);
plat->bus_id = of_alias_get_id(np, "ethernet");
- if (plat->bus_id < 0)
- plat->bus_id = 0;
+ if (plat->bus_id < 0) {
+ if (bus_id < 0)
+ bus_id = of_alias_get_highest_id("ethernet");
+ /* No ethernet alias found, init at -1 so first bus_id is 0 */
+ if (bus_id < 0)
+ bus_id = -1;
+ plat->bus_id = ++bus_id;
+ }
/* Default to phy auto-detection */
plat->phy_addr = -1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 429b2d357813..3767ba495e78 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -317,7 +317,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
/* Calculate the clock domain crossing (CDC) error if necessary */
priv->plat->cdc_error_adj = 0;
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ if (priv->plat->has_gmac4)
priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
/* Update the ptp clock parameters based on feature discovery, when
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index e8241e998aa9..7159baa0155c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -28,6 +28,14 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
spin_lock(&prueth->stats_lock);
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
+ /* In MII mode TX lines are swapped inside ICSSG, so read Tx stats
+ * from slice1 for port0 and slice0 for port1 to get accurate Tx
+ * stats for a given port
+ */
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII &&
+ icssg_all_miig_stats[i].offset >= ICSSG_TX_PACKET_OFFSET &&
+ icssg_all_miig_stats[i].offset <= ICSSG_TX_BYTE_OFFSET)
+ base = stats_base[slice ^ 1];
regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset,
&val);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index e01c5997a551..1dd3755d9e6d 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = dev_xdp_propagate(vf_netdev, &xdp);
+ ret = netif_xdp_propagate(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 14a0d04e21ae..c41a025c66f0 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2462,8 +2462,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
- netvsc_vf_setxdp(vf_netdev, NULL);
-
reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
@@ -2631,7 +2629,9 @@ static int netvsc_probe(struct hv_device *dev,
continue;
netvsc_prepare_bonding(vf_netdev);
+ netdev_lock_ops(vf_netdev);
netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
+ netdev_unlock_ops(vf_netdev);
__netvsc_vf_setup(net, vf_netdev);
break;
}
diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c
index 10d8afecec55..ebf1e849506b 100644
--- a/drivers/net/ovpn/io.c
+++ b/drivers/net/ovpn/io.c
@@ -134,7 +134,7 @@ void ovpn_decrypt_post(void *data, int ret)
rcu_read_lock();
sock = rcu_dereference(peer->sock);
- if (sock && sock->sock->sk->sk_protocol == IPPROTO_UDP)
+ if (sock && sock->sk->sk_protocol == IPPROTO_UDP)
/* check if this peer changed local or remote endpoint */
ovpn_peer_endpoints_update(peer, skb);
rcu_read_unlock();
@@ -270,12 +270,12 @@ void ovpn_encrypt_post(void *data, int ret)
if (unlikely(!sock))
goto err_unlock;
- switch (sock->sock->sk->sk_protocol) {
+ switch (sock->sk->sk_protocol) {
case IPPROTO_UDP:
- ovpn_udp_send_skb(peer, sock->sock, skb);
+ ovpn_udp_send_skb(peer, sock->sk, skb);
break;
case IPPROTO_TCP:
- ovpn_tcp_send_skb(peer, sock->sock, skb);
+ ovpn_tcp_send_skb(peer, sock->sk, skb);
break;
default:
/* no transport configured yet */
diff --git a/drivers/net/ovpn/netlink.c b/drivers/net/ovpn/netlink.c
index bea03913bfb1..a4ec53def46e 100644
--- a/drivers/net/ovpn/netlink.c
+++ b/drivers/net/ovpn/netlink.c
@@ -501,7 +501,7 @@ int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info)
/* when using a TCP socket the remote IP is not expected */
rcu_read_lock();
sock = rcu_dereference(peer->sock);
- if (sock && sock->sock->sk->sk_protocol == IPPROTO_TCP &&
+ if (sock && sock->sk->sk_protocol == IPPROTO_TCP &&
(attrs[OVPN_A_PEER_REMOTE_IPV4] ||
attrs[OVPN_A_PEER_REMOTE_IPV6])) {
rcu_read_unlock();
@@ -559,14 +559,14 @@ static int ovpn_nl_send_peer(struct sk_buff *skb, const struct genl_info *info,
goto err_unlock;
}
- if (!net_eq(genl_info_net(info), sock_net(sock->sock->sk))) {
+ if (!net_eq(genl_info_net(info), sock_net(sock->sk))) {
id = peernet2id_alloc(genl_info_net(info),
- sock_net(sock->sock->sk),
+ sock_net(sock->sk),
GFP_ATOMIC);
if (nla_put_s32(skb, OVPN_A_PEER_SOCKET_NETNSID, id))
goto err_unlock;
}
- local_port = inet_sk(sock->sock->sk)->inet_sport;
+ local_port = inet_sk(sock->sk)->inet_sport;
rcu_read_unlock();
if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id))
@@ -1153,8 +1153,8 @@ int ovpn_nl_peer_del_notify(struct ovpn_peer *peer)
ret = -EINVAL;
goto err_unlock;
}
- genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk),
- msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0,
+ OVPN_NLGRP_PEERS, GFP_ATOMIC);
rcu_read_unlock();
return 0;
@@ -1218,8 +1218,8 @@ int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id)
ret = -EINVAL;
goto err_unlock;
}
- genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk),
- msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0,
+ OVPN_NLGRP_PEERS, GFP_ATOMIC);
rcu_read_unlock();
return 0;
diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
index a1fd27b9c038..4bfcab0c8652 100644
--- a/drivers/net/ovpn/peer.c
+++ b/drivers/net/ovpn/peer.c
@@ -1145,7 +1145,7 @@ static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk,
if (sk) {
ovpn_sock = rcu_access_pointer(peer->sock);
- if (!ovpn_sock || ovpn_sock->sock->sk != sk) {
+ if (!ovpn_sock || ovpn_sock->sk != sk) {
spin_unlock_bh(&ovpn->lock);
ovpn_peer_put(peer);
return;
@@ -1175,7 +1175,7 @@ static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk,
if (sk) {
rcu_read_lock();
ovpn_sock = rcu_dereference(peer->sock);
- remove = ovpn_sock && ovpn_sock->sock->sk == sk;
+ remove = ovpn_sock && ovpn_sock->sk == sk;
rcu_read_unlock();
}
diff --git a/drivers/net/ovpn/socket.c b/drivers/net/ovpn/socket.c
index a83cbab72591..9750871ab65c 100644
--- a/drivers/net/ovpn/socket.c
+++ b/drivers/net/ovpn/socket.c
@@ -24,9 +24,9 @@ static void ovpn_socket_release_kref(struct kref *kref)
struct ovpn_socket *sock = container_of(kref, struct ovpn_socket,
refcount);
- if (sock->sock->sk->sk_protocol == IPPROTO_UDP)
+ if (sock->sk->sk_protocol == IPPROTO_UDP)
ovpn_udp_socket_detach(sock);
- else if (sock->sock->sk->sk_protocol == IPPROTO_TCP)
+ else if (sock->sk->sk_protocol == IPPROTO_TCP)
ovpn_tcp_socket_detach(sock);
}
@@ -75,14 +75,6 @@ void ovpn_socket_release(struct ovpn_peer *peer)
if (!sock)
return;
- /* sanity check: we should not end up here if the socket
- * was already closed
- */
- if (!sock->sock->sk) {
- DEBUG_NET_WARN_ON_ONCE(1);
- return;
- }
-
/* Drop the reference while holding the sock lock to avoid
* concurrent ovpn_socket_new call to mess up with a partially
* detached socket.
@@ -90,22 +82,24 @@ void ovpn_socket_release(struct ovpn_peer *peer)
* Holding the lock ensures that a socket with refcnt 0 is fully
* detached before it can be picked by a concurrent reader.
*/
- lock_sock(sock->sock->sk);
+ lock_sock(sock->sk);
released = ovpn_socket_put(peer, sock);
- release_sock(sock->sock->sk);
+ release_sock(sock->sk);
/* align all readers with sk_user_data being NULL */
synchronize_rcu();
/* following cleanup should happen with lock released */
if (released) {
- if (sock->sock->sk->sk_protocol == IPPROTO_UDP) {
+ if (sock->sk->sk_protocol == IPPROTO_UDP) {
netdev_put(sock->ovpn->dev, &sock->dev_tracker);
- } else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) {
+ } else if (sock->sk->sk_protocol == IPPROTO_TCP) {
/* wait for TCP jobs to terminate */
ovpn_tcp_socket_wait_finish(sock);
ovpn_peer_put(sock->peer);
}
+ /* drop reference acquired in ovpn_socket_new() */
+ sock_put(sock->sk);
/* we can call plain kfree() because we already waited one RCU
* period due to synchronize_rcu()
*/
@@ -118,12 +112,14 @@ static bool ovpn_socket_hold(struct ovpn_socket *sock)
return kref_get_unless_zero(&sock->refcount);
}
-static int ovpn_socket_attach(struct ovpn_socket *sock, struct ovpn_peer *peer)
+static int ovpn_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct socket *sock,
+ struct ovpn_peer *peer)
{
- if (sock->sock->sk->sk_protocol == IPPROTO_UDP)
- return ovpn_udp_socket_attach(sock, peer->ovpn);
- else if (sock->sock->sk->sk_protocol == IPPROTO_TCP)
- return ovpn_tcp_socket_attach(sock, peer);
+ if (sock->sk->sk_protocol == IPPROTO_UDP)
+ return ovpn_udp_socket_attach(ovpn_sock, sock, peer->ovpn);
+ else if (sock->sk->sk_protocol == IPPROTO_TCP)
+ return ovpn_tcp_socket_attach(ovpn_sock, peer);
return -EOPNOTSUPP;
}
@@ -138,14 +134,15 @@ static int ovpn_socket_attach(struct ovpn_socket *sock, struct ovpn_peer *peer)
struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
{
struct ovpn_socket *ovpn_sock;
+ struct sock *sk = sock->sk;
int ret;
- lock_sock(sock->sk);
+ lock_sock(sk);
/* a TCP socket can only be owned by a single peer, therefore there
* can't be any other user
*/
- if (sock->sk->sk_protocol == IPPROTO_TCP && sock->sk->sk_user_data) {
+ if (sk->sk_protocol == IPPROTO_TCP && sk->sk_user_data) {
ovpn_sock = ERR_PTR(-EBUSY);
goto sock_release;
}
@@ -153,8 +150,8 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
/* a UDP socket can be shared across multiple peers, but we must make
* sure it is not owned by something else
*/
- if (sock->sk->sk_protocol == IPPROTO_UDP) {
- u8 type = READ_ONCE(udp_sk(sock->sk)->encap_type);
+ if (sk->sk_protocol == IPPROTO_UDP) {
+ u8 type = READ_ONCE(udp_sk(sk)->encap_type);
/* socket owned by other encapsulation module */
if (type && type != UDP_ENCAP_OVPNINUDP) {
@@ -163,7 +160,7 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
}
rcu_read_lock();
- ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
+ ovpn_sock = rcu_dereference_sk_user_data(sk);
if (ovpn_sock) {
/* socket owned by another ovpn instance, we can't use it */
if (ovpn_sock->ovpn != peer->ovpn) {
@@ -200,11 +197,22 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
goto sock_release;
}
- ovpn_sock->sock = sock;
+ ovpn_sock->sk = sk;
kref_init(&ovpn_sock->refcount);
- ret = ovpn_socket_attach(ovpn_sock, peer);
+ /* the newly created ovpn_socket is holding reference to sk,
+ * therefore we increase its refcounter.
+ *
+ * This ovpn_socket instance is referenced by all peers
+ * using the same socket.
+ *
+ * ovpn_socket_release() will take care of dropping the reference.
+ */
+ sock_hold(sk);
+
+ ret = ovpn_socket_attach(ovpn_sock, sock, peer);
if (ret < 0) {
+ sock_put(sk);
kfree(ovpn_sock);
ovpn_sock = ERR_PTR(ret);
goto sock_release;
@@ -213,11 +221,11 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
/* TCP sockets are per-peer, therefore they are linked to their unique
* peer
*/
- if (sock->sk->sk_protocol == IPPROTO_TCP) {
+ if (sk->sk_protocol == IPPROTO_TCP) {
INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work);
ovpn_sock->peer = peer;
ovpn_peer_hold(peer);
- } else if (sock->sk->sk_protocol == IPPROTO_UDP) {
+ } else if (sk->sk_protocol == IPPROTO_UDP) {
/* in UDP we only link the ovpn instance since the socket is
* shared among multiple peers
*/
@@ -226,8 +234,8 @@ struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
GFP_KERNEL);
}
- rcu_assign_sk_user_data(sock->sk, ovpn_sock);
+ rcu_assign_sk_user_data(sk, ovpn_sock);
sock_release:
- release_sock(sock->sk);
+ release_sock(sk);
return ovpn_sock;
}
diff --git a/drivers/net/ovpn/socket.h b/drivers/net/ovpn/socket.h
index 00d856b1a5d8..4afcec71040d 100644
--- a/drivers/net/ovpn/socket.h
+++ b/drivers/net/ovpn/socket.h
@@ -22,7 +22,7 @@ struct ovpn_peer;
* @ovpn: ovpn instance owning this socket (UDP only)
* @dev_tracker: reference tracker for associated dev (UDP only)
* @peer: unique peer transmitting over this socket (TCP only)
- * @sock: the low level sock object
+ * @sk: the low level sock object
* @refcount: amount of contexts currently referencing this object
* @work: member used to schedule release routine (it may block)
* @tcp_tx_work: work for deferring outgoing packet processing (TCP only)
@@ -36,7 +36,7 @@ struct ovpn_socket {
struct ovpn_peer *peer;
};
- struct socket *sock;
+ struct sock *sk;
struct kref refcount;
struct work_struct work;
struct work_struct tcp_tx_work;
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
index 7c42d84987ad..289f62c5d2c7 100644
--- a/drivers/net/ovpn/tcp.c
+++ b/drivers/net/ovpn/tcp.c
@@ -124,14 +124,18 @@ static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb)
* this peer, therefore ovpn_peer_hold() is not expected to fail
*/
if (WARN_ON(!ovpn_peer_hold(peer)))
- goto err;
+ goto err_nopeer;
ovpn_recv(peer, skb);
return;
err:
+ /* take reference for deferred peer deletion. should never fail */
+ if (WARN_ON(!ovpn_peer_hold(peer)))
+ goto err_nopeer;
+ schedule_work(&peer->tcp.defer_del_work);
dev_dstats_rx_dropped(peer->ovpn->dev);
+err_nopeer:
kfree_skb(skb);
- ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR);
}
static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
@@ -186,18 +190,18 @@ out:
void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock)
{
struct ovpn_peer *peer = ovpn_sock->peer;
- struct socket *sock = ovpn_sock->sock;
+ struct sock *sk = ovpn_sock->sk;
strp_stop(&peer->tcp.strp);
skb_queue_purge(&peer->tcp.user_queue);
/* restore CBs that were saved in ovpn_sock_set_tcp_cb() */
- sock->sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready;
- sock->sk->sk_write_space = peer->tcp.sk_cb.sk_write_space;
- sock->sk->sk_prot = peer->tcp.sk_cb.prot;
- sock->sk->sk_socket->ops = peer->tcp.sk_cb.ops;
+ sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready;
+ sk->sk_write_space = peer->tcp.sk_cb.sk_write_space;
+ sk->sk_prot = peer->tcp.sk_cb.prot;
+ sk->sk_socket->ops = peer->tcp.sk_cb.ops;
- rcu_assign_sk_user_data(sock->sk, NULL);
+ rcu_assign_sk_user_data(sk, NULL);
}
void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock)
@@ -283,10 +287,10 @@ void ovpn_tcp_tx_work(struct work_struct *work)
sock = container_of(work, struct ovpn_socket, tcp_tx_work);
- lock_sock(sock->sock->sk);
+ lock_sock(sock->sk);
if (sock->peer)
- ovpn_tcp_send_sock(sock->peer, sock->sock->sk);
- release_sock(sock->sock->sk);
+ ovpn_tcp_send_sock(sock->peer, sock->sk);
+ release_sock(sock->sk);
}
static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
@@ -307,15 +311,15 @@ static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
ovpn_tcp_send_sock(peer, sk);
}
-void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock,
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
struct sk_buff *skb)
{
u16 len = skb->len;
*(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len);
- spin_lock_nested(&sock->sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING);
- if (sock_owned_by_user(sock->sk)) {
+ spin_lock_nested(&sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING);
+ if (sock_owned_by_user(sk)) {
if (skb_queue_len(&peer->tcp.out_queue) >=
READ_ONCE(net_hotdata.max_backlog)) {
dev_dstats_tx_dropped(peer->ovpn->dev);
@@ -324,10 +328,10 @@ void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock,
}
__skb_queue_tail(&peer->tcp.out_queue, skb);
} else {
- ovpn_tcp_send_sock_skb(peer, sock->sk, skb);
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
}
unlock:
- spin_unlock(&sock->sk->sk_lock.slock);
+ spin_unlock(&sk->sk_lock.slock);
}
static void ovpn_tcp_release(struct sock *sk)
@@ -474,7 +478,6 @@ static void ovpn_tcp_peer_del_work(struct work_struct *work)
int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
struct ovpn_peer *peer)
{
- struct socket *sock = ovpn_sock->sock;
struct strp_callbacks cb = {
.rcv_msg = ovpn_tcp_rcv,
.parse_msg = ovpn_tcp_parse,
@@ -482,20 +485,20 @@ int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
int ret;
/* make sure no pre-existing encapsulation handler exists */
- if (sock->sk->sk_user_data)
+ if (ovpn_sock->sk->sk_user_data)
return -EBUSY;
/* only a fully connected socket is expected. Connection should be
* handled in userspace
*/
- if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ if (ovpn_sock->sk->sk_state != TCP_ESTABLISHED) {
net_err_ratelimited("%s: provided TCP socket is not in ESTABLISHED state: %d\n",
netdev_name(peer->ovpn->dev),
- sock->sk->sk_state);
+ ovpn_sock->sk->sk_state);
return -EINVAL;
}
- ret = strp_init(&peer->tcp.strp, sock->sk, &cb);
+ ret = strp_init(&peer->tcp.strp, ovpn_sock->sk, &cb);
if (ret < 0) {
DEBUG_NET_WARN_ON_ONCE(1);
return ret;
@@ -503,31 +506,31 @@ int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
INIT_WORK(&peer->tcp.defer_del_work, ovpn_tcp_peer_del_work);
- __sk_dst_reset(sock->sk);
+ __sk_dst_reset(ovpn_sock->sk);
skb_queue_head_init(&peer->tcp.user_queue);
skb_queue_head_init(&peer->tcp.out_queue);
/* save current CBs so that they can be restored upon socket release */
- peer->tcp.sk_cb.sk_data_ready = sock->sk->sk_data_ready;
- peer->tcp.sk_cb.sk_write_space = sock->sk->sk_write_space;
- peer->tcp.sk_cb.prot = sock->sk->sk_prot;
- peer->tcp.sk_cb.ops = sock->sk->sk_socket->ops;
+ peer->tcp.sk_cb.sk_data_ready = ovpn_sock->sk->sk_data_ready;
+ peer->tcp.sk_cb.sk_write_space = ovpn_sock->sk->sk_write_space;
+ peer->tcp.sk_cb.prot = ovpn_sock->sk->sk_prot;
+ peer->tcp.sk_cb.ops = ovpn_sock->sk->sk_socket->ops;
/* assign our static CBs and prot/ops */
- sock->sk->sk_data_ready = ovpn_tcp_data_ready;
- sock->sk->sk_write_space = ovpn_tcp_write_space;
+ ovpn_sock->sk->sk_data_ready = ovpn_tcp_data_ready;
+ ovpn_sock->sk->sk_write_space = ovpn_tcp_write_space;
- if (sock->sk->sk_family == AF_INET) {
- sock->sk->sk_prot = &ovpn_tcp_prot;
- sock->sk->sk_socket->ops = &ovpn_tcp_ops;
+ if (ovpn_sock->sk->sk_family == AF_INET) {
+ ovpn_sock->sk->sk_prot = &ovpn_tcp_prot;
+ ovpn_sock->sk->sk_socket->ops = &ovpn_tcp_ops;
} else {
- sock->sk->sk_prot = &ovpn_tcp6_prot;
- sock->sk->sk_socket->ops = &ovpn_tcp6_ops;
+ ovpn_sock->sk->sk_prot = &ovpn_tcp6_prot;
+ ovpn_sock->sk->sk_socket->ops = &ovpn_tcp6_ops;
}
/* avoid using task_frag */
- sock->sk->sk_allocation = GFP_ATOMIC;
- sock->sk->sk_use_task_frag = false;
+ ovpn_sock->sk->sk_allocation = GFP_ATOMIC;
+ ovpn_sock->sk->sk_use_task_frag = false;
/* enqueue the RX worker */
strp_check_rcv(&peer->tcp.strp);
diff --git a/drivers/net/ovpn/tcp.h b/drivers/net/ovpn/tcp.h
index 10aefa834cf3..a3aa3570ae5e 100644
--- a/drivers/net/ovpn/tcp.h
+++ b/drivers/net/ovpn/tcp.h
@@ -30,7 +30,8 @@ void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock);
* Required by the OpenVPN protocol in order to extract packets from
* the TCP stream on the receiver side.
*/
-void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock, struct sk_buff *skb);
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb);
void ovpn_tcp_tx_work(struct work_struct *work);
#endif /* _NET_OVPN_TCP_H_ */
diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c
index aef8c0406ec9..bff00946eae2 100644
--- a/drivers/net/ovpn/udp.c
+++ b/drivers/net/ovpn/udp.c
@@ -43,7 +43,7 @@ static struct ovpn_socket *ovpn_socket_from_udp_sock(struct sock *sk)
return NULL;
/* make sure that sk matches our stored transport socket */
- if (unlikely(!ovpn_sock->sock || sk != ovpn_sock->sock->sk))
+ if (unlikely(!ovpn_sock->sk || sk != ovpn_sock->sk))
return NULL;
return ovpn_sock;
@@ -335,32 +335,22 @@ out:
/**
* ovpn_udp_send_skb - prepare skb and send it over via UDP
* @peer: the destination peer
- * @sock: the RCU protected peer socket
+ * @sk: peer socket
* @skb: the packet to send
*/
-void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock,
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
struct sk_buff *skb)
{
- int ret = -1;
+ int ret;
skb->dev = peer->ovpn->dev;
/* no checksum performed at this layer */
skb->ip_summed = CHECKSUM_NONE;
- /* get socket info */
- if (unlikely(!sock)) {
- net_warn_ratelimited("%s: no sock for remote peer %u\n",
- netdev_name(peer->ovpn->dev), peer->id);
- goto out;
- }
-
/* crypto layer -> transport (UDP) */
- ret = ovpn_udp_output(peer, &peer->dst_cache, sock->sk, skb);
-out:
- if (unlikely(ret < 0)) {
+ ret = ovpn_udp_output(peer, &peer->dst_cache, sk, skb);
+ if (unlikely(ret < 0))
kfree_skb(skb);
- return;
- }
}
static void ovpn_udp_encap_destroy(struct sock *sk)
@@ -383,6 +373,7 @@ static void ovpn_udp_encap_destroy(struct sock *sk)
/**
* ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn
* @ovpn_sock: socket to configure
+ * @sock: the socket container to be passed to setup_udp_tunnel_sock()
* @ovpn: the openvp instance to link
*
* After invoking this function, the sock will be controlled by ovpn so that
@@ -390,7 +381,7 @@ static void ovpn_udp_encap_destroy(struct sock *sk)
*
* Return: 0 on success or a negative error code otherwise
*/
-int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock,
struct ovpn_priv *ovpn)
{
struct udp_tunnel_sock_cfg cfg = {
@@ -398,17 +389,16 @@ int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
.encap_rcv = ovpn_udp_encap_recv,
.encap_destroy = ovpn_udp_encap_destroy,
};
- struct socket *sock = ovpn_sock->sock;
struct ovpn_socket *old_data;
int ret;
/* make sure no pre-existing encapsulation handler exists */
rcu_read_lock();
- old_data = rcu_dereference_sk_user_data(sock->sk);
+ old_data = rcu_dereference_sk_user_data(ovpn_sock->sk);
if (!old_data) {
/* socket is currently unused - we can take it */
rcu_read_unlock();
- setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg);
+ setup_udp_tunnel_sock(sock_net(ovpn_sock->sk), sock, &cfg);
return 0;
}
@@ -421,7 +411,7 @@ int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
* Unlikely TCP, a single UDP socket can be used to talk to many remote
* hosts and therefore openvpn instantiates one only for all its peers
*/
- if ((READ_ONCE(udp_sk(sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) &&
+ if ((READ_ONCE(udp_sk(ovpn_sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) &&
old_data->ovpn == ovpn) {
netdev_dbg(ovpn->dev,
"provided socket already owned by this interface\n");
@@ -442,8 +432,16 @@ int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
*/
void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock)
{
- struct udp_tunnel_sock_cfg cfg = { };
+ struct sock *sk = ovpn_sock->sk;
+
+ /* Re-enable multicast loopback */
+ inet_set_bit(MC_LOOP, sk);
+ /* Disable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
+ inet_dec_convert_csum(sk);
+
+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ WRITE_ONCE(udp_sk(sk)->encap_rcv, NULL);
+ WRITE_ONCE(udp_sk(sk)->encap_destroy, NULL);
- setup_udp_tunnel_sock(sock_net(ovpn_sock->sock->sk), ovpn_sock->sock,
- &cfg);
+ rcu_assign_sk_user_data(sk, NULL);
}
diff --git a/drivers/net/ovpn/udp.h b/drivers/net/ovpn/udp.h
index 9994eb6e0428..fe26fbe25c5a 100644
--- a/drivers/net/ovpn/udp.h
+++ b/drivers/net/ovpn/udp.h
@@ -15,11 +15,11 @@ struct ovpn_peer;
struct ovpn_priv;
struct socket;
-int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock,
struct ovpn_priv *ovpn);
void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock);
-void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock,
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
struct sk_buff *skb);
#endif /* _NET_OVPN_UDP_H_ */
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 453a2cf82753..9201ee10a13f 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -31,11 +31,11 @@ static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
USB_RECIP_DEVICE, value, index, data, size);
if (unlikely(ret < size)) {
- ret = ret < 0 ? ret : -ENODATA;
-
netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret);
+
+ ret = ret < 0 ? ret : -ENODATA;
}
return ret;
@@ -50,11 +50,11 @@ static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value,
USB_RECIP_DEVICE, value, index, data, size);
if (unlikely(ret < size)) {
- ret = ret < 0 ? ret : -ENODATA;
-
netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret);
+
+ ret = ret < 0 ? ret : -ENODATA;
}
return ret;
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index f69d9b902da0..a206ffa76f1b 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -178,6 +178,7 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
+ int ret;
netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
__func__, phy_id, loc);
@@ -185,8 +186,10 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (phy_id != 0)
return -ENODEV;
- control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
- CONTROL_TIMEOUT_MS);
+ ret = control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
+ CONTROL_TIMEOUT_MS);
+ if (ret < 0)
+ return ret;
return (buff[0] | buff[1] << 8);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 2440e30c5bd1..0572f6a9bdb6 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1572,6 +1572,30 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
return (hlen + (hdr.tcp->doff << 2));
}
+static void
+vmxnet3_lro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+
static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
@@ -1885,6 +1909,8 @@ sop_done:
if (segCnt != 0 && mss != 0) {
skb_shinfo(skb)->gso_type = rcd->v4 ?
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ if (encap_lro)
+ vmxnet3_lro_tunnel(skb, skb->protocol);
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_segs = segCnt;
} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 3ffeeba5dccf..4a529f1f9bea 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -366,6 +366,7 @@ static int wg_newlink(struct net_device *dev,
if (ret < 0)
goto err_free_handshake_queue;
+ dev_set_threaded(dev, true);
ret = register_netdevice(dev);
if (ret < 0)
goto err_uninit_ratelimiter;
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index 2e935d381b6b..659ef134ef16 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -24,7 +24,7 @@ config ATH11K_PCI
select MHI_BUS
select QRTR
select QRTR_MHI
- select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
help
This module adds support for PCIE bus
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index b3b15e1eb282..1ea1af1b8f6c 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -7,7 +7,7 @@ config ATH12K
select MHI_BUS
select QRTR
select QRTR_MHI
- select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
help
Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
802.11be) family of chipsets, for example WCN7850 and
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
index 73ed8d5cab43..9d2c087360e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
@@ -349,10 +349,6 @@ int iwl_mld_load_fw(struct iwl_mld *mld)
if (ret)
goto err;
- ret = iwl_mld_init_mcc(mld);
- if (ret)
- goto err;
-
mld->fw_status.running = true;
return 0;
@@ -546,6 +542,10 @@ int iwl_mld_start_fw(struct iwl_mld *mld)
if (ret)
goto error;
+ ret = iwl_mld_init_mcc(mld);
+ if (ret)
+ goto error;
+
return 0;
error:
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index 8cdd960c5245..e8820e7cf8fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -653,7 +653,8 @@ iwl_mld_nic_error(struct iwl_op_mode *op_mode,
* It might not actually be true that we'll restart, but the
* setting doesn't matter if we're going to be unbound either.
*/
- if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
+ if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT &&
+ mld->fw_status.running)
mld->fw_status.in_hw_restart = true;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 0f056a6641bd..956b491ae5a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -6360,8 +6360,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
(struct iwl_mvm_internal_rxq_notif *)cmd->payload;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
- .data[0] = &cmd,
- .len[0] = sizeof(cmd),
+ .data[0] = cmd,
+ .len[0] = __struct_size(cmd),
.data[1] = data,
.len[1] = size,
.flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 38ad719161e6..c8f4f3a1d2eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -125,7 +125,7 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
reset_done =
inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE;
} else {
- inta_hw = iwl_read32(trans, CSR_INT_MASK);
+ inta_hw = iwl_read32(trans, CSR_INT);
reset_done = inta_hw & CSR_INT_BIT_RESET_DONE;
}
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index 8755c5e6a65b..c814fbd756a1 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -550,8 +550,8 @@ static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
struct mhi_mbim_context *mbim = ctxt;
- link->session = if_id;
link->mbim = mbim;
+ link->session = mhi_mbim_get_link_mux_id(link->mbim->mdev->mhi_cntrl) + if_id;
link->ndev = ndev;
u64_stats_init(&link->rx_syncp);
u64_stats_init(&link->tx_syncp);
@@ -607,7 +607,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
{
struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
struct mhi_mbim_context *mbim;
- int err, link_id;
+ int err;
mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
if (!mbim)
@@ -628,11 +628,8 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
/* Number of transfer descriptors determines size of the queue */
mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
- /* Get the corresponding mux_id from mhi */
- link_id = mhi_mbim_get_link_mux_id(cntrl);
-
/* Register wwan link ops with MHI controller representing WWAN instance */
- return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, link_id);
+ return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
}
static void mhi_mbim_remove(struct mhi_device *mhi_dev)
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index 91fa082e9cab..fc0a7cb181df 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -302,7 +302,7 @@ static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id
ccmni->ctlb = ctlb;
ccmni->dev = dev;
atomic_set(&ccmni->usage, 0);
- ctlb->ccmni_inst[if_id] = ccmni;
+ WRITE_ONCE(ctlb->ccmni_inst[if_id], ccmni);
ret = register_netdevice(dev);
if (ret)
@@ -324,6 +324,7 @@ static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct l
if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
return;
+ WRITE_ONCE(ctlb->ccmni_inst[if_id], NULL);
unregister_netdevice(dev);
}
@@ -419,7 +420,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu
skb_cb = T7XX_SKB_CB(skb);
netif_id = skb_cb->netif_idx;
- ccmni = ccmni_ctlb->ccmni_inst[netif_id];
+ ccmni = READ_ONCE(ccmni_ctlb->ccmni_inst[netif_id]);
if (!ccmni) {
dev_kfree_skb(skb);
return;
@@ -441,7 +442,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu
static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
{
- struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
struct netdev_queue *net_queue;
if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
@@ -453,7 +454,7 @@ static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno
static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
{
- struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
struct netdev_queue *net_queue;
if (atomic_read(&ccmni->usage) > 0) {
@@ -471,7 +472,7 @@ static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
if (ctlb->md_sta != MD_STATE_READY)
return;
- if (!ctlb->ccmni_inst[0]) {
+ if (!READ_ONCE(ctlb->ccmni_inst[0])) {
dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
return;
}