diff options
Diffstat (limited to 'drivers/net/ethernet')
30 files changed, 318 insertions, 166 deletions
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 832a2ae01950..a8d79ee350f8 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1303,24 +1303,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb, * the user space for finding a flow. During this process, OVS computes * checksum on the first packet when CHECKSUM_PARTIAL flag is set. * - * So, re-compute TCP pseudo header checksum when configured for - * trunk mode. + * So, re-compute TCP pseudo header checksum. */ + if (iph_proto == IPPROTO_TCP) { struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); + if (tcph->check == 0x0000) { /* Recompute TCP pseudo header checksum */ - if (adapter->is_active_trunk) { - tcphdrlen = skb->len - iphlen; - if (skb_proto == ETH_P_IP) - tcph->check = - ~csum_tcpudp_magic(iph->saddr, - iph->daddr, tcphdrlen, iph_proto, 0); - else if (skb_proto == ETH_P_IPV6) - tcph->check = - ~csum_ipv6_magic(&iph6->saddr, - &iph6->daddr, tcphdrlen, iph_proto, 0); - } + tcphdrlen = skb->len - iphlen; + if (skb_proto == ETH_P_IP) + tcph->check = + ~csum_tcpudp_magic(iph->saddr, + iph->daddr, tcphdrlen, iph_proto, 0); + else if (skb_proto == ETH_P_IPV6) + tcph->check = + ~csum_ipv6_magic(&iph6->saddr, + &iph6->daddr, tcphdrlen, iph_proto, 0); /* Setup SKB fields for checksum offload */ skb_partial_csum_set(skb, iphlen, offsetof(struct tcphdr, check)); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 4f39863b5537..7b1256992dcf 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -2093,3 +2093,35 @@ lag_rebuild_out: } mutex_unlock(&pf->lag_mutex); } + +/** + * ice_lag_is_switchdev_running + * @pf: pointer to PF structure + * + * Check if switchdev is running on any of the interfaces connected to lag. + */ +bool ice_lag_is_switchdev_running(struct ice_pf *pf) +{ + struct ice_lag *lag = pf->lag; + struct net_device *tmp_nd; + + if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag) + return false; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { + struct ice_netdev_priv *priv = netdev_priv(tmp_nd); + + if (!netif_is_ice(tmp_nd) || !priv || !priv->vsi || + !priv->vsi->back) + continue; + + if (ice_is_switchdev_running(priv->vsi->back)) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; +} diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index 18075b82485a..facb6c894b6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -62,4 +62,5 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf); int ice_init_lag(struct ice_pf *pf); void ice_deinit_lag(struct ice_pf *pf); void ice_lag_rebuild(struct ice_pf *pf); +bool ice_lag_is_switchdev_running(struct ice_pf *pf); #endif /* _ICE_LAG_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 201570cd2e0b..7bf9b7069754 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3575,6 +3575,12 @@ int ice_set_dflt_vsi(struct ice_vsi *vsi) dev = ice_pf_to_dev(vsi->back); + if (ice_lag_is_switchdev_running(vsi->back)) { + dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n", + vsi->vsi_num); + return 0; + } + /* the VSI passed in is already the default VSI */ if (ice_is_vsi_dflt_vsi(vsi)) { dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n", diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index b03426ac932b..db97353efd06 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2617,12 +2617,14 @@ static int ice_vc_query_rxdid(struct ice_vf *vf) goto err; } - /* Read flexiflag registers to determine whether the - * corresponding RXDID is configured and supported or not. - * Since Legacy 16byte descriptor format is not supported, - * start from Legacy 32byte descriptor. + /* RXDIDs supported by DDP package can be read from the register + * to get the supported RXDID bitmap. But the legacy 32byte RXDID + * is not listed in DDP package, add it in the bitmap manually. + * Legacy 16byte descriptor is not supported. */ - for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { + rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1); + + for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 29cc60988071..ea88ac04ab9a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, struct vf_macvlans *mv_list; int num_vf_macvlans, i; + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + num_vf_macvlans = hw->mac.num_rar_entries - (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); if (!num_vf_macvlans) @@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL); if (mv_list) { - /* Initialize list of VF macvlans */ - INIT_LIST_HEAD(&adapter->vf_mvs.l); for (i = 0; i < num_vf_macvlans; i++) { mv_list[i].vf = -1; mv_list[i].free = true; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 59b138214af2..6cc7a78968fc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -1357,10 +1357,12 @@ static int cn10k_mdo_upd_txsa(struct macsec_context *ctx) if (netif_running(secy->netdev)) { /* Keys cannot be changed after creation */ - err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, - sw_tx_sa->next_pn); - if (err) - return err; + if (ctx->sa.update_pn) { + err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, + sw_tx_sa->next_pn); + if (err) + return err; + } err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, sw_tx_sa->active); @@ -1529,6 +1531,9 @@ static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx) if (err) return err; + if (!ctx->sa.update_pn) + return 0; + err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, rx_sa->next_pn); if (err) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 997fedac3a98..818ce76185b2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1403,6 +1403,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, return 0; } + pp_params.order = get_order(buf_size); pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); pp_params.nid = NUMA_NO_NODE; diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ddec1627f1a7..8d0bacf4e49c 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2195,7 +2195,7 @@ struct rx_ring_info { struct sk_buff *skb; dma_addr_t data_addr; DEFINE_DMA_UNMAP_LEN(data_size); - dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; + dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1]; }; enum flow_control { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 3cffd1bd3067..20afe79f380a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3171,8 +3171,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { - __napi_schedule(ð->rx_napi); mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + __napi_schedule(ð->rx_napi); } return IRQ_HANDLED; @@ -3184,8 +3184,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) eth->tx_events++; if (likely(napi_schedule_prep(ð->tx_napi))) { - __napi_schedule(ð->tx_napi); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + __napi_schedule(ð->tx_napi); } return IRQ_HANDLED; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index c9c1db971652..d4ebd8743114 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -580,7 +580,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) goto out; } - if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) { + if (ctx->sa.update_pn) { netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n", assoc_num); err = -EINVAL; @@ -973,7 +973,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx) goto out; } - if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) { + if (ctx->sa.update_pn) { netdev_err(ctx->netdev, "MACsec offload update RX sa %d PN isn't supported\n", assoc_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a2ae791538ed..acb40770cf0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3952,13 +3952,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable) struct mlx5e_channels *chs = &priv->channels; struct mlx5e_params new_params; int err; + bool rx_ts_over_crc = !enable; mutex_lock(&priv->state_lock); new_params = chs->params; new_params.scatter_fcs_en = enable; err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap, - &new_params.scatter_fcs_en, true); + &rx_ts_over_crc, true); mutex_unlock(&priv->state_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index bb8eeb86edf7..52c2fe3644d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -310,8 +310,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, }; -static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, - bool learning_en) +static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, + bool learning_en) { char tnpc_pl[MLXSW_REG_TNPC_LEN]; diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 329e374b9539..43ba71e82260 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -46,6 +46,7 @@ config LAN743X tristate "LAN743x support" depends on PCI depends on PTP_1588_CLOCK_OPTIONAL + select PHYLIB select FIXED_PHY select CRC16 select CRC32 diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 4a16ebff3d1d..48ea4aeeea5d 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -91,63 +91,137 @@ static unsigned int mana_checksum_info(struct sk_buff *skb) return 0; } +static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash, + int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey) +{ + ash->dma_handle[sg_i] = da; + ash->size[sg_i] = sge_len; + + tp->wqe_req.sgl[sg_i].address = da; + tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey; + tp->wqe_req.sgl[sg_i].size = sge_len; +} + static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, - struct mana_tx_package *tp) + struct mana_tx_package *tp, int gso_hs) { struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; + int hsg = 1; /* num of SGEs of linear part */ struct gdma_dev *gd = apc->ac->gdma_dev; + int skb_hlen = skb_headlen(skb); + int sge0_len, sge1_len = 0; struct gdma_context *gc; struct device *dev; skb_frag_t *frag; dma_addr_t da; + int sg_i; int i; gc = gd->gdma_context; dev = gc->dev; - da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (gso_hs && gso_hs < skb_hlen) { + sge0_len = gso_hs; + sge1_len = skb_hlen - gso_hs; + } else { + sge0_len = skb_hlen; + } + + da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE); if (dma_mapping_error(dev, da)) return -ENOMEM; - ash->dma_handle[0] = da; - ash->size[0] = skb_headlen(skb); + mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey); - tp->wqe_req.sgl[0].address = ash->dma_handle[0]; - tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; - tp->wqe_req.sgl[0].size = ash->size[0]; + if (sge1_len) { + sg_i = 1; + da = dma_map_single(dev, skb->data + sge0_len, sge1_len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, da)) + goto frag_err; + + mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey); + hsg = 2; + } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + sg_i = hsg + i; + frag = &skb_shinfo(skb)->frags[i]; da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); - if (dma_mapping_error(dev, da)) goto frag_err; - ash->dma_handle[i + 1] = da; - ash->size[i + 1] = skb_frag_size(frag); - - tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; - tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; - tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; + mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag), + gd->gpa_mkey); } return 0; frag_err: - for (i = i - 1; i >= 0; i--) - dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], + for (i = sg_i - 1; i >= hsg; i--) + dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], DMA_TO_DEVICE); - dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + for (i = hsg - 1; i >= 0; i--) + dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); return -ENOMEM; } +/* Handle the case when GSO SKB linear length is too large. + * MANA NIC requires GSO packets to put only the packet header to SGE0. + * So, we need 2 SGEs for the skb linear part which contains more than the + * header. + * Return a positive value for the number of SGEs, or a negative value + * for an error. + */ +static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb, + int gso_hs) +{ + int num_sge = 1 + skb_shinfo(skb)->nr_frags; + int skb_hlen = skb_headlen(skb); + + if (gso_hs < skb_hlen) { + num_sge++; + } else if (gso_hs > skb_hlen) { + if (net_ratelimit()) + netdev_err(ndev, + "TX nonlinear head: hs:%d, skb_hlen:%d\n", + gso_hs, skb_hlen); + + return -EINVAL; + } + + return num_sge; +} + +/* Get the GSO packet's header size */ +static int mana_get_gso_hs(struct sk_buff *skb) +{ + int gso_hs; + + if (skb->encapsulation) { + gso_hs = skb_inner_tcp_all_headers(skb); + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + gso_hs = skb_transport_offset(skb) + + sizeof(struct udphdr); + } else { + gso_hs = skb_tcp_all_headers(skb); + } + } + + return gso_hs; +} + netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) { enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; struct mana_port_context *apc = netdev_priv(ndev); + int gso_hs = 0; /* zero for non-GSO pkts */ u16 txq_idx = skb_get_queue_mapping(skb); struct gdma_dev *gd = apc->ac->gdma_dev; bool ipv4 = false, ipv6 = false; @@ -159,7 +233,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct mana_txq *txq; struct mana_cq *cq; int err, len; - u16 ihs; if (unlikely(!apc->port_is_up)) goto tx_drop; @@ -209,19 +282,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) pkg.wqe_req.client_data_unit = 0; pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; - WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); - - if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { - pkg.wqe_req.sgl = pkg.sgl_array; - } else { - pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, - sizeof(struct gdma_sge), - GFP_ATOMIC); - if (!pkg.sgl_ptr) - goto tx_drop_count; - - pkg.wqe_req.sgl = pkg.sgl_ptr; - } if (skb->protocol == htons(ETH_P_IP)) ipv4 = true; @@ -229,6 +289,26 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ipv6 = true; if (skb_is_gso(skb)) { + int num_sge; + + gso_hs = mana_get_gso_hs(skb); + + num_sge = mana_fix_skb_head(ndev, skb, gso_hs); + if (num_sge > 0) + pkg.wqe_req.num_sge = num_sge; + else + goto tx_drop_count; + + u64_stats_update_begin(&tx_stats->syncp); + if (skb->encapsulation) { + tx_stats->tso_inner_packets++; + tx_stats->tso_inner_bytes += skb->len - gso_hs; + } else { + tx_stats->tso_packets++; + tx_stats->tso_bytes += skb->len - gso_hs; + } + u64_stats_update_end(&tx_stats->syncp); + pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; @@ -252,28 +332,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } - - if (skb->encapsulation) { - ihs = skb_inner_tcp_all_headers(skb); - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->tso_inner_packets++; - tx_stats->tso_inner_bytes += skb->len - ihs; - u64_stats_update_end(&tx_stats->syncp); - } else { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { - ihs = skb_transport_offset(skb) + sizeof(struct udphdr); - } else { - ihs = skb_tcp_all_headers(skb); - if (ipv6_has_hopopt_jumbo(skb)) - ihs -= sizeof(struct hop_jumbo_hdr); - } - - u64_stats_update_begin(&tx_stats->syncp); - tx_stats->tso_packets++; - tx_stats->tso_bytes += skb->len - ihs; - u64_stats_update_end(&tx_stats->syncp); - } - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { csum_type = mana_checksum_info(skb); @@ -296,11 +354,25 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) } else { /* Can't do offload of this type of checksum */ if (skb_checksum_help(skb)) - goto free_sgl_ptr; + goto tx_drop_count; } } - if (mana_map_skb(skb, apc, &pkg)) { + WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); + + if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { + pkg.wqe_req.sgl = pkg.sgl_array; + } else { + pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge, + sizeof(struct gdma_sge), + GFP_ATOMIC); + if (!pkg.sgl_ptr) + goto tx_drop_count; + + pkg.wqe_req.sgl = pkg.sgl_ptr; + } + + if (mana_map_skb(skb, apc, &pkg, gso_hs)) { u64_stats_update_begin(&tx_stats->syncp); tx_stats->mana_map_err++; u64_stats_update_end(&tx_stats->syncp); @@ -1258,11 +1330,16 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; struct device *dev = gc->dev; - int i; + int hsg, i; - dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); + /* Number of SGEs of linear part */ + hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1; - for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) + for (i = 0; i < hsg; i++) + dma_unmap_single(dev, ash->dma_handle[i], ash->size[i], + DMA_TO_DEVICE); + + for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++) dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], DMA_TO_DEVICE); } @@ -1317,19 +1394,23 @@ static void mana_poll_tx_cq(struct mana_cq *cq) case CQE_TX_VPORT_IDX_OUT_OF_RANGE: case CQE_TX_VPORT_DISABLED: case CQE_TX_VLAN_TAGGING_VIOLATION: - WARN_ONCE(1, "TX: CQE error %d: ignored.\n", - cqe_oob->cqe_hdr.cqe_type); + if (net_ratelimit()) + netdev_err(ndev, "TX: CQE error %d\n", + cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_err++; break; default: - /* If the CQE type is unexpected, log an error, assert, - * and go through the error path. + /* If the CQE type is unknown, log an error, + * and still free the SKB, update tail, etc. */ - WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", - cqe_oob->cqe_hdr.cqe_type); + if (net_ratelimit()) + netdev_err(ndev, "TX: unknown CQE type %d\n", + cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_unknown_type++; - return; + break; } if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index f21cf1f40f98..153533cd8f08 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); struct nfp_flower_cmsg_merge_hint *msg; struct nfp_fl_payload *sub_flows[2]; + struct nfp_flower_priv *priv; int err, i, flow_cnt; msg = nfp_flower_cmsg_get_data(skb); @@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) return; } - rtnl_lock(); + priv = app->priv; + mutex_lock(&priv->nfp_fl_lock); for (i = 0; i < flow_cnt; i++) { u32 ctx = be32_to_cpu(msg->flow[i].host_ctx); sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx); if (!sub_flows[i]) { nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n"); - goto err_rtnl_unlock; + goto err_mutex_unlock; } } @@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) if (err == -ENOMEM) nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n"); -err_rtnl_unlock: - rtnl_unlock(); +err_mutex_unlock: + mutex_unlock(&priv->nfp_fl_lock); } static void diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 2643c4b3ff1f..2967bab72505 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -2131,8 +2131,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl struct nfp_fl_ct_flow_entry *ct_entry; struct netlink_ext_ack *extack = NULL; - ASSERT_RTNL(); - extack = flow->common.extack; switch (flow->command) { case FLOW_CLS_REPLACE: @@ -2178,9 +2176,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb switch (type) { case TC_SETUP_CLSFLOWER: - rtnl_lock(); + while (!mutex_trylock(&zt->priv->nfp_fl_lock)) { + if (!zt->nft) /* avoid deadlock */ + return err; + msleep(20); + } err = nfp_fl_ct_offload_nft_flow(zt, flow); - rtnl_unlock(); + mutex_unlock(&zt->priv->nfp_fl_lock); break; default: return -EOPNOTSUPP; @@ -2208,6 +2210,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) struct nfp_fl_ct_flow_entry *ct_entry; struct nfp_fl_ct_zone_entry *zt; struct rhashtable *m_table; + struct nf_flowtable *nft; if (!ct_map_ent) return -ENOENT; @@ -2226,8 +2229,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) if (ct_map_ent->cookie > 0) kfree(ct_map_ent); - if (!zt->pre_ct_count) { - zt->nft = NULL; + if (!zt->pre_ct_count && zt->nft) { + nft = zt->nft; + zt->nft = NULL; /* avoid deadlock */ + nf_flow_table_offload_del_cb(nft, + nfp_fl_ct_handle_nft_flow, + zt); nfp_fl_ct_clean_nft_entries(zt); } break; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 40372545148e..2b7c947ff4f2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -297,6 +297,7 @@ struct nfp_fl_internal_ports { * @predt_list: List to keep track of decap pretun flows * @neigh_table: Table to keep track of neighbor entries * @predt_lock: Lock to serialise predt/neigh table updates + * @nfp_fl_lock: Lock to protect the flow offload operation */ struct nfp_flower_priv { struct nfp_app *app; @@ -339,6 +340,7 @@ struct nfp_flower_priv { struct list_head predt_list; struct rhashtable neigh_table; spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */ + struct mutex nfp_fl_lock; /* Protect the flow operation */ }; /** diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 0f06ef6e24bf..80e4675582bf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) goto err_free_stats_ctx_table; + mutex_init(&priv->nfp_fl_lock); + err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params); if (err) goto err_free_merge_table; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c153f0575b92..0aceef9fe582 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, u64 parent_ctx = 0; int err; - ASSERT_RTNL(); - if (sub_flow1 == sub_flow2 || nfp_flower_is_merge_flow(sub_flow1) || nfp_flower_is_merge_flow(sub_flow2)) @@ -1727,19 +1725,30 @@ static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flower) { + struct nfp_flower_priv *priv = app->priv; + int ret; + if (!eth_proto_is_802_3(flower->common.protocol)) return -EOPNOTSUPP; + mutex_lock(&priv->nfp_fl_lock); switch (flower->command) { case FLOW_CLS_REPLACE: - return nfp_flower_add_offload(app, netdev, flower); + ret = nfp_flower_add_offload(app, netdev, flower); + break; case FLOW_CLS_DESTROY: - return nfp_flower_del_offload(app, netdev, flower); + ret = nfp_flower_del_offload(app, netdev, flower); + break; case FLOW_CLS_STATS: - return nfp_flower_get_stats(app, netdev, flower); + ret = nfp_flower_get_stats(app, netdev, flower); + break; default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + break; } + mutex_unlock(&priv->nfp_fl_lock); + + return ret; } static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, @@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, repr_priv = repr->app_priv; repr_priv->block_shared = f->block_shared; f->driver_block_list = &nfp_block_cb_list; + f->unlocked_driver_cb = true; switch (f->command) { case FLOW_BLOCK_BIND: @@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; + f->unlocked_driver_cb = true; + switch (f->command) { case FLOW_BLOCK_BIND: cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index 99052a925d9e..e7180b4793c7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, { struct netlink_ext_ack *extack = flow->common.extack; struct nfp_flower_priv *fl_priv = app->priv; + int ret; if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload"); return -EOPNOTSUPP; } + mutex_lock(&fl_priv->nfp_fl_lock); switch (flow->command) { case TC_CLSMATCHALL_REPLACE: - return nfp_flower_install_rate_limiter(app, netdev, flow, - extack); + ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack); + break; case TC_CLSMATCHALL_DESTROY: - return nfp_flower_remove_rate_limiter(app, netdev, flow, - extack); + ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack); + break; case TC_CLSMATCHALL_STATS: - return nfp_flower_stats_rate_limiter(app, netdev, flow, - extack); + ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack); + break; default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + break; } + mutex_unlock(&fl_priv->nfp_fl_lock); + + return ret; } /* Offload tc action, currently only for tc police */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 0bfc375161ed..a174c6fc626a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -110,9 +110,9 @@ struct qed_ll2_info { enum core_tx_dest tx_dest; u8 tx_stats_en; bool main_func_queue; + struct qed_ll2_cbs cbs; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; - struct qed_ll2_cbs cbs; }; extern const struct qed_ll2_ops qed_ll2_ops_pass; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 7df9f9f8e134..0ef0b88b7145 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2167,6 +2167,8 @@ static int ravb_close(struct net_device *ndev) of_phy_deregister_fixed_link(np); } + cancel_work_sync(&priv->work); + if (info->multi_irqs) { free_irq(priv->tx_irqs[RAVB_NC], ndev); free_irq(priv->rx_irqs[RAVB_NC], ndev); @@ -2891,8 +2893,6 @@ static int ravb_remove(struct platform_device *pdev) clk_disable_unprepare(priv->gptp_clk); clk_disable_unprepare(priv->refclk); - dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, - priv->desc_bat_dma); /* Set reset mode */ ravb_write(ndev, CCC_OPC_RESET, CCC); unregister_netdev(ndev); @@ -2900,6 +2900,8 @@ static int ravb_remove(struct platform_device *pdev) netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); + dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, + priv->desc_bat_dma); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index ea9186178091..0fc0b6bea753 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -4,6 +4,7 @@ * Copyright (C) 2022 Renesas Electronics Corporation */ +#include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/etherdevice.h> @@ -1049,7 +1050,7 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) static void rswitch_etha_enable_mii(struct rswitch_etha *etha) { rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, - MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06)); + MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06)); rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45); } @@ -1253,7 +1254,7 @@ static void rswitch_adjust_link(struct net_device *ndev) phy_print_status(phydev); if (phydev->link) phy_power_on(rdev->serdes); - else + else if (rdev->serdes->power_count) phy_power_off(rdev->serdes); rdev->etha->link = phydev->link; @@ -1693,6 +1694,12 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index) etha->index = index; etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE; etha->coma_addr = priv->addr; + + /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1. + * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply + * both the numerator and the denominator by 10. + */ + etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1; } static int rswitch_device_alloc(struct rswitch_private *priv, int index) @@ -1900,6 +1907,10 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) return -ENOMEM; spin_lock_init(&priv->lock); + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + attr = soc_device_match(rswitch_soc_no_speed_change); if (attr) priv->etha_no_runtime_change = true; @@ -1953,15 +1964,17 @@ static void rswitch_deinit(struct rswitch_private *priv) rswitch_gwca_hw_deinit(priv); rcar_gen4_ptp_unregister(priv->ptp_priv); - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + rswitch_for_each_enabled_port(priv, i) { struct rswitch_device *rdev = priv->rdev[i]; - phy_exit(priv->rdev[i]->serdes); - rswitch_ether_port_deinit_one(rdev); unregister_netdev(rdev->ndev); - rswitch_device_free(priv, i); + rswitch_ether_port_deinit_one(rdev); + phy_exit(priv->rdev[i]->serdes); } + for (i = 0; i < RSWITCH_NUM_PORTS; i++) + rswitch_device_free(priv, i); + rswitch_gwca_ts_queue_free(priv); rswitch_gwca_linkfix_free(priv); diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index f0c16a37ea55..04f49a7a5843 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -915,6 +915,7 @@ struct rswitch_etha { bool external_phy; struct mii_bus *mii; phy_interface_t phy_interface; + u32 psmcs; u8 mac_addr[MAX_ADDR_LEN]; int link; int speed; @@ -1012,6 +1013,7 @@ struct rswitch_private { struct rswitch_mfwd mfwd; spinlock_t lock; /* lock interrupt registers' control */ + struct clk *clk; bool etha_no_runtime_change; bool gwca_halt; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 26ea8c687881..a0e276783e65 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -104,6 +104,7 @@ struct stm32_ops { int (*parse_data)(struct stm32_dwmac *dwmac, struct device *dev); u32 syscfg_eth_mask; + bool clk_rx_enable_in_suspend; }; static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) @@ -121,7 +122,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) if (ret) return ret; - if (!dwmac->dev->power.is_suspended) { + if (!dwmac->ops->clk_rx_enable_in_suspend || + !dwmac->dev->power.is_suspended) { ret = clk_prepare_enable(dwmac->clk_rx); if (ret) { clk_disable_unprepare(dwmac->clk_tx); @@ -513,7 +515,8 @@ static struct stm32_ops stm32mp1_dwmac_data = { .suspend = stm32mp1_suspend, .resume = stm32mp1_resume, .parse_data = stm32mp1_parse_data, - .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK + .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK, + .clk_rx_enable_in_suspend = true }; static const struct of_device_id stm32_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 83c567a89a46..ed1a5a31a491 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -6002,33 +6002,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling receive - used by NETCONSOLE and other diagnostic tools - * to allow network I/O with interrupts disabled. - */ -static void stmmac_poll_controller(struct net_device *dev) -{ - struct stmmac_priv *priv = netdev_priv(dev); - int i; - - /* If adapter is down, do nothing */ - if (test_bit(STMMAC_DOWN, &priv->state)) - return; - - if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) { - for (i = 0; i < priv->plat->rx_queues_to_use; i++) - stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); - - for (i = 0; i < priv->plat->tx_queues_to_use; i++) - stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); - } else { - disable_irq(dev->irq); - stmmac_interrupt(dev->irq, dev); - enable_irq(dev->irq); - } -} -#endif - /** * stmmac_ioctl - Entry point for the Ioctl * @dev: Device pointer. @@ -6989,9 +6962,6 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_get_stats64 = stmmac_get_stats64, .ndo_setup_tc = stmmac_setup_tc, .ndo_select_queue = stmmac_select_queue, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = stmmac_poll_controller, -#endif .ndo_set_mac_address = stmmac_set_mac_address, .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 0f28795e581c..2f0678f15fb7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -901,7 +901,7 @@ static int __maybe_unused stmmac_pltfr_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); int ret; - ret = stmmac_pltfr_init(pdev, priv->plat->bsp_priv); + ret = stmmac_pltfr_init(pdev, priv->plat); if (ret) return ret; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index bea6fc0f324c..24120605502f 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1747,9 +1747,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) } tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); - if (tx_chn->irq <= 0) { + if (tx_chn->irq < 0) { dev_err(dev, "Failed to get tx dma irq %d\n", tx_chn->irq); + ret = tx_chn->irq; goto err; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 410612f43cbd..4914d0ef58e9 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -316,12 +316,12 @@ static int prueth_init_tx_chns(struct prueth_emac *emac) goto fail; } - tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); - if (tx_chn->irq <= 0) { - ret = -EINVAL; + ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); + if (ret < 0) { netdev_err(ndev, "failed to get tx irq\n"); goto fail; } + tx_chn->irq = ret; snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d", dev_name(dev), tx_chn->id); |
