diff options
Diffstat (limited to 'drivers/net/ethernet/marvell')
41 files changed, 539 insertions, 249 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 837295fecd17..50f7c59e8a04 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -34,7 +34,6 @@ config MV643XX_ETH config MVMDIO tristate "Marvell MDIO interface support" depends on HAS_IOMEM - select MDIO_DEVRES select PHYLIB help This driver supports the MDIO interface found in the network diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 67a6ff07c83d..8cc888bf6094 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2247,7 +2247,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) if (unlikely(mp->oom)) { mp->oom = 0; - del_timer(&mp->rx_oom); + timer_delete(&mp->rx_oom); } work_done = 0; @@ -2521,7 +2521,7 @@ static int mv643xx_eth_stop(struct net_device *dev) napi_disable(&mp->napi); - del_timer_sync(&mp->rx_oom); + timer_delete_sync(&mp->rx_oom); netif_carrier_off(dev); if (dev->phydev) @@ -2531,7 +2531,7 @@ static int mv643xx_eth_stop(struct net_device *dev) port_reset(mp); mv643xx_eth_get_stats(dev); mib_counters_update(mp); - del_timer_sync(&mp->mib_counters_timer); + timer_delete_sync(&mp->mib_counters_timer); for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 44fe9b68d1c2..061fcd444d50 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1113,6 +1113,9 @@ struct mvpp2 { /* Spinlocks for CM3 shared memory configuration */ spinlock_t mss_spinlock; + + /* Spinlock for shared PRS parser memory and shadow table */ + spinlock_t prs_spinlock; }; struct mvpp2_pcpu_stats { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 566c12c89520..a7872d14a49d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5173,38 +5173,40 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped = dev->stats.tx_dropped; } -static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +static int mvpp2_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; + struct mvpp2_port *port = netdev_priv(dev); void __iomem *ptp; u32 gcr, int_mask; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + if (!port->hwtstamp) + return -EOPNOTSUPP; - if (config.tx_type != HWTSTAMP_TX_OFF && - config.tx_type != HWTSTAMP_TX_ON) + if (config->tx_type != HWTSTAMP_TX_OFF && + config->tx_type != HWTSTAMP_TX_ON) return -ERANGE; ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); int_mask = gcr = 0; - if (config.tx_type != HWTSTAMP_TX_OFF) { + if (config->tx_type != HWTSTAMP_TX_OFF) { gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | MVPP22_PTP_INT_MASK_QUEUE0; } /* It seems we must also release the TX reset when enabling the TSU */ - if (config.rx_filter != HWTSTAMP_FILTER_NONE) + if (config->rx_filter != HWTSTAMP_FILTER_NONE) gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET; if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) mvpp22_tai_start(port->priv->tai); - if (config.rx_filter != HWTSTAMP_FILTER_NONE) { - config.rx_filter = HWTSTAMP_FILTER_ALL; + if (config->rx_filter != HWTSTAMP_FILTER_NONE) { + config->rx_filter = HWTSTAMP_FILTER_ALL; mvpp2_modify(ptp + MVPP22_PTP_GCR, MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET | @@ -5225,26 +5227,22 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) mvpp22_tai_stop(port->priv->tai); - port->tx_hwtstamp_type = config.tx_type; - - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + port->tx_hwtstamp_type = config->tx_type; return 0; } -static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +static int mvpp2_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; - - memset(&config, 0, sizeof(config)); + struct mvpp2_port *port = netdev_priv(dev); - config.tx_type = port->tx_hwtstamp_type; - config.rx_filter = port->rx_hwtstamp ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + if (!port->hwtstamp) + return -EOPNOTSUPP; - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + config->tx_type = port->tx_hwtstamp_type; + config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; return 0; } @@ -5274,18 +5272,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mvpp2_port *port = netdev_priv(dev); - switch (cmd) { - case SIOCSHWTSTAMP: - if (port->hwtstamp) - return mvpp2_set_ts_config(port, ifr); - break; - - case SIOCGHWTSTAMP: - if (port->hwtstamp) - return mvpp2_get_ts_config(port, ifr); - break; - } - if (!port->phylink) return -ENOTSUPP; @@ -5799,6 +5785,8 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_set_features = mvpp2_set_features, .ndo_bpf = mvpp2_xdp, .ndo_xdp_xmit = mvpp2_xdp_xmit, + .ndo_hwtstamp_get = mvpp2_hwtstamp_get, + .ndo_hwtstamp_set = mvpp2_hwtstamp_set, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { @@ -7723,8 +7711,9 @@ static int mvpp2_probe(struct platform_device *pdev) if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) priv->hw_version = MVPP23; - /* Init mss lock */ + /* Init locks for shared packet processor resources */ spin_lock_init(&priv->mss_spinlock); + spin_lock_init(&priv->prs_spinlock); /* Initialize network controller */ err = mvpp2_init(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 9af22f497a40..93e978bdf303 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) } /* Initialize tcam entry from hw */ -int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, - int tid) +static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, return 0; } +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) +{ + int err; + + spin_lock_bh(&priv->prs_spinlock); + err = __mvpp2_prs_init_from_hw(priv, pe, tid); + spin_unlock_bh(&priv->prs_spinlock); + + return err; +} + /* Invalidate tcam hw entry */ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) { @@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); bits = mvpp2_prs_sram_ai_get(&pe); /* Sram store classification lookup ID in AI bits [5:0] */ @@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) } /* Set port to unicast or multicast promiscuous mode */ -void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) +static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, + bool add) { struct mvpp2_prs_entry pe; unsigned char cast_match; unsigned int ri; int tid; + lockdep_assert_held(&priv->prs_spinlock); + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { cast_match = MVPP2_PRS_UCAST_VAL; tid = MVPP2_PE_MAC_UC_PROMISCUOUS; @@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, /* promiscuous mode - Accept unknown unicast or multicast packets */ if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, mvpp2_prs_hw_write(priv, &pe); } +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + spin_lock_bh(&priv->prs_spinlock); + __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add); + spin_unlock_bh(&priv->prs_spinlock); +} + /* Set entry for dsa packets */ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) @@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) @@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(&pe, port_map); @@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); @@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || @@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ @@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_drop_fc(priv); mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); } /* Set default entries for various types of dsa packets */ @@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) struct mvpp2_prs_entry pe; int err; - priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - /* Double VLAN: 0x88A8, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); @@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + /* Scan TCAM and see if entry with this <vid,port> already exist */ tid = mvpp2_prs_vid_range_find(port, vid, mask); @@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) MVPP2_PRS_VLAN_FILT_MAX_ENTRY); /* There isn't room for a new VID filter */ - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); pe.index = tid; @@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Enable the current port */ @@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) struct mvpp2 *priv = port->priv; int tid; - /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + spin_lock_bh(&priv->prs_spinlock); - /* No such entry */ - if (tid < 0) - return; + /* Invalidate TCAM entry with this <vid,port>, if it exists */ + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + if (tid >= 0) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } - mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; + spin_unlock_bh(&priv->prs_spinlock); } /* Remove all existing VID filters on this port */ @@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) struct mvpp2 *priv = port->priv; int tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (priv->prs_shadow[tid].valid) { @@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) priv->prs_shadow[tid].valid = false; } } + + spin_unlock_bh(&priv->prs_spinlock); } /* Remove VID filering entry for this port */ @@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv; + spin_lock_bh(&priv->prs_spinlock); + /* Invalidate the guard entry */ mvpp2_prs_hw_inv(priv, tid); priv->prs_shadow[tid].valid = false; + + spin_unlock_bh(&priv->prs_spinlock); } /* Add guard entry that drops packets when no VID is matched on this port */ @@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + pe.index = tid; reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); @@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + + spin_unlock_bh(&priv->prs_spinlock); } /* Parser default initialization */ @@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) { int err, index, i; + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + spin_lock_bh(&priv->prs_spinlock); + /* Enable tcam table */ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); @@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) mvpp2_prs_hw_inv(priv, index); - priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - /* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++) mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, @@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_vid_init(priv); err = mvpp2_prs_etype_init(priv); - if (err) - return err; - - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; + err = err ? : mvpp2_prs_vlan_init(pdev, priv); + err = err ? : mvpp2_prs_pppoe_init(priv); + err = err ? : mvpp2_prs_ip6_init(priv); + err = err ? : mvpp2_prs_ip4_init(priv); - return 0; + spin_unlock_bh(&priv->prs_spinlock); + return err; } /* Compare MAC DA with tcam entry data */ @@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, (priv->prs_shadow[tid].udf != udf_type)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); if (mvpp2_prs_mac_range_equals(&pe, da, mask) && @@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, } /* Update parser's mac da entry */ -int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port, + const u8 *da, bool add) { unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct mvpp2 *priv = port->priv; @@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) return 0; } +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + int err; + + spin_lock_bh(&port->priv->prs_spinlock); + err = __mvpp2_prs_mac_da_accept(port, da, add); + spin_unlock_bh(&port->priv->prs_spinlock); + + return err; +} + int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) { struct mvpp2_port *port = netdev_priv(dev); @@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) unsigned long pmap; int index, tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; @@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); pmap = mvpp2_prs_tcam_port_map_get(&pe); @@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) continue; /* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); + __mvpp2_prs_mac_da_accept(port, da, false); } + + spin_unlock_bh(&priv->prs_spinlock); } int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) { switch (type) { case MVPP2_TAG_TYPE_EDSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); @@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_DSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_MH: case MVPP2_TAG_TYPE_NONE: + spin_lock_bh(&priv->prs_spinlock); /* Remove port form EDSA and DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; default: @@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&port->priv->prs_spinlock); + tid = mvpp2_prs_flow_find(port->priv, port->id); /* Such entry not exist */ @@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&port->priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); mvpp2_prs_hw_write(port->priv, &pe); + spin_unlock_bh(&port->priv->prs_spinlock); return 0; } @@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index) if (index > MVPP2_PRS_TCAM_SRAM_SIZE) return -EINVAL; + spin_lock_bh(&priv->prs_spinlock); + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; + spin_unlock_bh(&priv->prs_spinlock); return val; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 0a679e95196f..24499bb36c00 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1223,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work) miss_cnt); rtnl_lock(); if (netif_running(oct->netdev)) - octep_stop(oct->netdev); + dev_close(oct->netdev); rtnl_unlock(); } diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c index 18c922dd5fc6..420c3f4cf741 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -18,8 +18,6 @@ #include "octep_vf_config.h" #include "octep_vf_main.h" -struct workqueue_struct *octep_vf_wq; - /* Supported Devices */ static const struct pci_device_id octep_vf_pci_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)}, @@ -835,7 +833,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct octep_vf_device *oct = netdev_priv(netdev); netdev_hold(netdev, NULL, GFP_ATOMIC); - schedule_work(&oct->tx_timeout_task); + if (!schedule_work(&oct->tx_timeout_task)) + netdev_put(netdev, NULL); + } static int octep_vf_set_mac(struct net_device *netdev, void *p) diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h index 1a352f41f823..b9f13506f462 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h @@ -320,8 +320,6 @@ static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct) #define octep_vf_read_csr64(octep_vf_dev, reg_off) \ readq((octep_vf_dev)->mmio.hw_addr + (reg_off)) -extern struct workqueue_struct *octep_vf_wq; - int octep_vf_device_setup(struct octep_vf_device *oct); int octep_vf_setup_iqs(struct octep_vf_device *oct); void octep_vf_free_iqs(struct octep_vf_device *oct); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 0b27a695008b..971993586fb4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -717,6 +717,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + + /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ + if (idx >= CGX_RX_STAT_GLOBAL_INDEX) + lmac_id = 0; + *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 1e5aa5397504..7d21905deed8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -188,14 +188,13 @@ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) { unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; - struct device *sender = &mbox->pdev->dev; while (!time_after(jiffies, timeout)) { if (mdev->num_msgs == mdev->msgs_acked) return 0; usleep_range(800, 1000); } - dev_dbg(sender, "timed out while waiting for rsp\n"); + trace_otx2_msg_wait_rsp(mbox->pdev); return -EIO; } EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); @@ -219,6 +218,7 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; void *hw_mbase = mdev->hwbase; + struct mbox_msghdr *msg; u64 intr_val; tx_hdr = hw_mbase + mbox->tx_start; @@ -251,7 +251,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) tx_hdr->num_msgs = mdev->num_msgs; rx_hdr->num_msgs = 0; - trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size); + msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset); + + trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size, + msg->id, msg->pcifunc); spin_unlock(&mdev->mbox_lock); @@ -445,6 +448,14 @@ const char *otx2_mbox_id2name(u16 id) #define M(_name, _id, _1, _2, _3) case _id: return # _name; MBOX_MESSAGES #undef M + +#define M(_name, _id, _1, _2, _3) case _id: return # _name; + MBOX_UP_CGX_MESSAGES +#undef M + +#define M(_name, _id, _1, _2, _3) case _id: return # _name; + MBOX_UP_CPT_MESSAGES +#undef M default: return "INVALID ID"; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 005ca8a056c0..a213b2663583 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -524,6 +524,8 @@ struct get_hw_cap_rsp { u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ u8 nix_shaping; /* Is shaping and coloring supported */ u8 npc_hash_extract; /* Is hash extract supported */ +#define HW_CAP_MACSEC BIT_ULL(1) + u64 hw_caps; }; /* CGX mbox message formats */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index 655dd4726d36..0277d226293e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); + mutex_unlock(&rvu->mbox_lock); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 6575c422635b..a8025f0486c9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1393,8 +1393,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (blkaddr < 0) return; - if (blktype == BLKTYPE_NIX) - rvu_nix_reset_mac(pfvf, pcifunc); block = &hw->block[blkaddr]; @@ -1407,6 +1405,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (lf < 0) /* This should never happen */ continue; + if (blktype == BLKTYPE_NIX) { + rvu_nix_reset_mac(pfvf, pcifunc); + rvu_npc_clear_ucast_entry(rvu, pcifunc, lf); + } /* Disable the LF */ rvu_write64(rvu, blkaddr, block->lfcfg_reg | (lf << block->lfshift), 0x00ULL); @@ -2031,6 +2033,9 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, rsp->nix_shaping = hw->cap.nix_shaping; rsp->npc_hash_extract = hw->cap.npc_hash_extract; + if (rvu->mcs_blk_cnt) + rsp->hw_caps = HW_CAP_MACSEC; + return 0; } @@ -2173,7 +2178,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, if (rsp && err) \ rsp->hdr.rc = err; \ \ - trace_otx2_msg_process(mbox->pdev, _id, err); \ + trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \ return rsp ? err : -ENOMEM; \ } MBOX_MESSAGES diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 60f085b00a8c..48f66292ad5c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -969,8 +969,6 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, - bool enable); void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, @@ -996,6 +994,8 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt); +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf); + bool is_npc_intf_tx(u8 intf); bool is_npc_intf_rx(u8 intf); bool is_npc_interface_valid(struct rvu *rvu, u8 intf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 992fa0b82e8d..d0331b0e0bfd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -34,7 +34,7 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ + trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \ return req; \ } @@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); + mutex_unlock(&rvu->mbox_lock); } while (pfmap); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7fa98aeb3663..4a3370a40dd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -13,19 +13,26 @@ /* RVU LMTST */ #define LMT_TBL_OP_READ 0 #define LMT_TBL_OP_WRITE 1 -#define LMT_MAP_TABLE_SIZE (128 * 1024) #define LMT_MAPTBL_ENTRY_SIZE 16 +#define LMT_MAX_VFS 256 + +#define LMT_MAP_ENTRY_ENA BIT_ULL(20) +#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16) /* Function to perform operations (read/write) on lmtst map table */ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, int lmt_tbl_op) { void __iomem *lmt_map_base; - u64 tbl_base; + u64 tbl_base, cfg; + int pfs, vfs; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + vfs = 1 << (cfg & 0xF); + pfs = 1 << ((cfg >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); return -ENOMEM; @@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, *val = readq(lmt_map_base + index); } else { writeq((*val), (lmt_map_base + index)); + + cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); + /* 2048 LMTLINES */ + cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); + + writeq(cfg, (lmt_map_base + (index + 8))); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S * changes effective. Write 1 for flush and read is being used as a * barrier and sets up a data dependency. Write to 0 after a write @@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { - return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } @@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); - pf = rvu_get_pf(pcifunc) & 0x1F; + pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index a1f9ec03c2ce..c827da626471 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; + int apr_pfs, apr_vfs; int buf_size = 10240; size_t off = 0; int index = 0; @@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + apr_vfs = 1 << (val & 0xF); + apr_pfs = 1 << ((val >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * + LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); kfree(buf); @@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", pf); - index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); @@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, /* Reading num of VFs per PF */ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); for (vf = 0; vf < num_vfs; vf++) { - index = (pf * rvu->hw->total_vfs * 16) + + index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d:VF%d \t\t", pf, vf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 821fe242f821..da15bb451178 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -820,24 +820,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, - bool enable) -{ - struct npc_mcam *mcam = &rvu->hw->mcam; - int blkaddr, index; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); - if (blkaddr < 0) - return; - - /* Get 'pcifunc' of PF device */ - pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; - - index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, - NIXLF_BCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); -} - void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan) { @@ -1125,6 +1107,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; int index, blkaddr; @@ -1133,9 +1116,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, return; /* Ucast MCAM match entry of this PF/VF */ - index = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_UCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), + pfvf->nix_rx_intf)) { + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + } /* Nothing to do for VFs, on platforms where pkt replication * is not supported @@ -3588,3 +3574,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, return 0; } + +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule *rule; + int ucast_idx, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + + npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false); + + npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0); + + npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx); + + mutex_lock(&mcam->lock); + list_for_each_entry(rule, &mcam->mcam_rules, list) { + if (rule->entry == ucast_idx) { + list_del(&rule->list); + kfree(rule); + break; + } + } + mutex_unlock(&mcam->lock); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c index 052ae5923e3a..32953cca108c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c @@ -60,6 +60,8 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); + mutex_unlock(&rvu->mbox_lock); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c index 775fd4c35794..19e0d16b12f6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c @@ -11,3 +11,5 @@ EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process); +EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status); +EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index 5704520f9b02..4cd0fc4b0d20 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -18,33 +18,42 @@ #include "mbox.h" TRACE_EVENT(otx2_msg_alloc, - TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size), - TP_ARGS(pdev, id, size), + TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size, u16 pcifunc), + TP_ARGS(pdev, id, size, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, id) __field(u64, size) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->size = size; + __entry->pcifunc = pcifunc; ), - TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev), - otx2_mbox_id2name(__entry->id), __entry->size) + TP_printk("[%s] msg:(%s) size:%lld pcifunc:0x%x\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), __entry->size, + __entry->pcifunc) ); TRACE_EVENT(otx2_msg_send, - TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size), - TP_ARGS(pdev, num_msgs, msg_size), + TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size, + u16 id, u16 pcifunc), + TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, num_msgs) __field(u64, msg_size) + __field(u16, id) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->num_msgs = num_msgs; __entry->msg_size = msg_size; + __entry->id = id; + __entry->pcifunc = pcifunc; ), - TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev), - __entry->num_msgs, __entry->msg_size) + TP_printk("[%s] sent %d msg(s) of size:%lld msg:(%s) pcifunc:0x%x\n", + __get_str(dev), __entry->num_msgs, __entry->msg_size, + otx2_mbox_id2name(__entry->id), __entry->pcifunc) ); TRACE_EVENT(otx2_msg_check, @@ -81,18 +90,73 @@ TRACE_EVENT(otx2_msg_interrupt, ); TRACE_EVENT(otx2_msg_process, - TP_PROTO(const struct pci_dev *pdev, u16 id, int err), - TP_ARGS(pdev, id, err), + TP_PROTO(const struct pci_dev *pdev, u16 id, int err, u16 pcifunc), + TP_ARGS(pdev, id, err, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, id) __field(int, err) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->err = err; + __entry->pcifunc = pcifunc; + ), + TP_printk("[%s] msg:(%s) error:%d pcifunc:0x%x\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), + __entry->err, __entry->pcifunc) +); + +TRACE_EVENT(otx2_msg_wait_rsp, + TP_PROTO(const struct pci_dev *pdev), + TP_ARGS(pdev), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + ), + TP_fast_assign(__assign_str(dev) + ), + TP_printk("[%s] timed out while waiting for response\n", + __get_str(dev)) +); + +TRACE_EVENT(otx2_msg_status, + TP_PROTO(const struct pci_dev *pdev, const char *msg, u16 num_msgs), + TP_ARGS(pdev, msg, num_msgs), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u16, num_msgs) + ), + TP_fast_assign(__assign_str(dev); + __assign_str(str); + __entry->num_msgs = num_msgs; + ), + TP_printk("[%s] %s num_msgs:%d\n", __get_str(dev), + __get_str(str), __entry->num_msgs) +); + +TRACE_EVENT(otx2_parse_dump, + TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word), + TP_ARGS(pdev, msg, word), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u64, w0) + __field(u64, w1) + __field(u64, w2) + __field(u64, w3) + __field(u64, w4) + __field(u64, w5) + ), + TP_fast_assign(__assign_str(dev); + __assign_str(str); + __entry->w0 = *(word + 0); + __entry->w1 = *(word + 1); + __entry->w2 = *(word + 2); + __entry->w3 = *(word + 3); + __entry->w4 = *(word + 4); + __entry->w5 = *(word + 5); ), - TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev), - otx2_mbox_id2name(__entry->id), __entry->err) + TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n", + __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2, + __entry->w3, __entry->w4, __entry->w5) ); #endif /* __RVU_TRACE_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index c3b6e0f60a79..7f6a435ac680 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -357,9 +357,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf) mutex_lock(&pfvf->mbox.lock); /* Remove RQ's policer mapping */ - for (qidx = 0; qidx < hw->rx_queues; qidx++) - cn10k_map_unmap_rq_policer(pfvf, qidx, - hw->matchall_ipolicer, false); + for (qidx = 0; qidx < hw->rx_queues; qidx++) { + rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false); + if (rc) + dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).", + qidx, rc); + } rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c index fc59e50bafce..a6500e3673f2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -663,10 +663,10 @@ static int cn10k_ipsec_inb_add_state(struct xfrm_state *x, return -EOPNOTSUPP; } -static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, +static int cn10k_ipsec_outb_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { - struct net_device *netdev = x->xso.dev; struct cn10k_tx_sa_s *sa_entry; struct qmem *sa_info; struct otx2_nic *pf; @@ -676,7 +676,7 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, if (err) return err; - pf = netdev_priv(netdev); + pf = netdev_priv(dev); err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN); if (err) @@ -700,18 +700,18 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, return 0; } -static int cn10k_ipsec_add_state(struct xfrm_state *x, +static int cn10k_ipsec_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) return cn10k_ipsec_inb_add_state(x, extack); else - return cn10k_ipsec_outb_add_state(x, extack); + return cn10k_ipsec_outb_add_state(dev, x, extack); } -static void cn10k_ipsec_del_state(struct xfrm_state *x) +static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x) { - struct net_device *netdev = x->xso.dev; struct cn10k_tx_sa_s *sa_entry; struct qmem *sa_info; struct otx2_nic *pf; @@ -720,7 +720,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x) if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) return; - pf = netdev_priv(netdev); + pf = netdev_priv(dev); sa_info = (struct qmem *)x->xso.offload_handle; sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; @@ -732,7 +732,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x) err = cn10k_outb_write_sa(pf, sa_info); if (err) - netdev_err(netdev, "Error (%d) deleting SA\n", err); + netdev_err(dev, "Error (%d) deleting SA\n", err); x->xso.offload_handle = 0; qmem_free(pf->dev, sa_info); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index f3b9daffaec3..4c7e0f345cb5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -531,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, if (sw_tx_sc->encrypt) sectag_tci |= (MCS_TCI_E | MCS_TCI_C); - policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); + policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, + pfvf->netdev->mtu + OTX2_ETH_HLEN); /* Write SecTag excluding AN bits(1..0) */ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 84cd029a85aa..6f572589f1e5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -2055,6 +2055,43 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t } EXPORT_SYMBOL(otx2_handle_ntuple_tc_features); +int otx2_set_hw_capabilities(struct otx2_nic *pfvf) +{ + struct mbox *mbox = &pfvf->mbox; + struct otx2_hw *hw = &pfvf->hw; + struct get_hw_cap_rsp *rsp; + struct msg_req *req; + int ret = -ENOMEM; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_get_hw_cap(mbox); + if (!req) + goto fail; + + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct get_hw_cap_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = -EINVAL; + goto fail; + } + + if (rsp->hw_caps & HW_CAP_MACSEC) + __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + dev_err(pfvf->dev, "Cannot get MACSEC capability from AF\n"); + mutex_unlock(&mbox->lock); + return ret; +} + #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ int __weak \ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 1e88422825be..ca0e6ab12ceb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -356,6 +356,7 @@ struct otx2_flow_config { struct list_head flow_list_tc; u8 ucast_flt_cnt; bool ntuple; + u16 ntuple_cnt; }; struct dev_hw_ops { @@ -631,9 +632,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); } - - if (is_dev_cn10kb(pfvf->pdev)) - __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); } /* Register read/write APIs */ @@ -871,6 +869,7 @@ static struct _req_type __maybe_unused \ *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ { \ struct _req_type *req; \ + u16 pcifunc = mbox->pfvf->pcifunc; \ \ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ &mbox->mbox, 0, sizeof(struct _req_type), \ @@ -879,7 +878,8 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ + req->hdr.pcifunc = pcifunc; \ + trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \ return req; \ } @@ -1043,6 +1043,7 @@ void otx2_disable_napi(struct otx2_nic *pf); irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); +int otx2_set_hw_capabilities(struct otx2_nic *pfvf); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); @@ -1107,6 +1108,8 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, u64 iova, int len, u16 qidx, u16 flags); +void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 33ec9a7f7c03..e13ae5484c19 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, if (!pfvf->flow_cfg) return 0; + pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16; otx2_alloc_mcam_entries(pfvf, ctx->val.vu16); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 010385b29988..45b8c9230184 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -315,7 +315,7 @@ static void otx2_get_pauseparam(struct net_device *netdev, struct otx2_nic *pfvf = netdev_priv(netdev); struct cgx_pause_frm_cfg *req, *rsp; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return; mutex_lock(&pfvf->mbox.lock); @@ -347,7 +347,7 @@ static int otx2_set_pauseparam(struct net_device *netdev, if (pause->autoneg) return -EOPNOTSUPP; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return -EOPNOTSUPP; if (pause->rx_pause) @@ -941,8 +941,8 @@ static u32 otx2_get_link(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); - /* LBK link is internal and always UP */ - if (is_otx2_lbkvf(pfvf->pdev)) + /* LBK and SDP links are internal and always UP */ + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return 1; return pfvf->linfo.link_up; } @@ -1413,7 +1413,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, { struct otx2_nic *pfvf = netdev_priv(netdev); - if (is_otx2_lbkvf(pfvf->pdev)) { + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) { cmd->base.duplex = DUPLEX_FULL; cmd->base.speed = SPEED_100000; } else { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 47bfd1fb37d4..64c6d9162ef6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -247,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf) mutex_unlock(&pfvf->mbox.lock); /* Allocate entries for Ntuple filters */ - count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); + count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt); if (count <= 0) { otx2_clear_ntuple_flow_info(pfvf, flow_cfg); return 0; @@ -307,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS; + pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT; /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index cfed9ec5b157..db7c466fdc39 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -465,6 +465,9 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)", + vf_mbox->num_msgs); + for (id = 0; id < vf_mbox->num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + offset); @@ -473,7 +476,7 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) goto inval_msg; /* Set VF's number in each of the msg */ - msg->pcifunc &= RVU_PFVF_FUNC_MASK; + msg->pcifunc &= ~RVU_PFVF_FUNC_MASK; msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK; offset = msg->next_msgoff; } @@ -503,6 +506,9 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)", + vf_mbox->up_num_msgs); + for (id = 0; id < vf_mbox->up_num_msgs; id++) { msg = mdev->mbase + offset; @@ -819,6 +825,9 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); pf = af_mbox->pfvf; + trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)", + num_msgs); + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2_process_pfaf_mbox_msg(pf, msg); @@ -974,6 +983,9 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)", + num_msgs); + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); @@ -1023,6 +1035,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", BIT_ULL(0)); + + trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)", + hdr->num_msgs); } if (mbox_data & MBOX_DOWN_MSG) { @@ -1039,6 +1054,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", BIT_ULL(0)); + + trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)", + hdr->num_msgs); } return IRQ_HANDLED; @@ -3048,7 +3066,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -3057,7 +3075,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + return err; } pci_set_master(pdev); @@ -3067,10 +3085,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount); - if (!netdev) { - err = -ENOMEM; - goto err_release_regions; - } + if (!netdev) + return -ENOMEM; pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); @@ -3128,6 +3144,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_ptp_destroy; + otx2_set_hw_capabilities(pf); + err = cn10k_mcs_init(pf); if (err) goto err_del_mcam_entries; @@ -3246,8 +3264,6 @@ err_detach_rsrc: err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); -err_release_regions: - pci_release_regions(pdev); return err; } @@ -3289,6 +3305,7 @@ static void otx2_vf_link_event_task(struct work_struct *work) req = (struct cgx_link_info_msg *)msghdr; req->hdr.id = MBOX_MSG_CGX_LINK_EVENT; req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.pcifunc = pf->pcifunc; memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx); @@ -3447,8 +3464,6 @@ static void otx2_remove(struct pci_dev *pdev) pci_free_irq_vectors(pf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); - - pci_release_regions(pdev); } static struct pci_driver otx2_pf_driver = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index af8cabe828d0..99ace381cc78 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -335,6 +335,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_sg_s *sg = &cqe->sg; struct sk_buff *skb = NULL; + u64 *word = (u64 *)parse; void *end, *start; u32 metasize = 0; u64 *seg_addr; @@ -342,9 +343,12 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, int seg; if (unlikely(parse->errlev || parse->errcode)) { - if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) + if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) { + trace_otx2_parse_dump(pfvf->pdev, "Err:", word); return; + } } + trace_otx2_parse_dump(pfvf->pdev, "", word); if (pfvf->xdp_prog) if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, @@ -1410,9 +1414,8 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf) } } -static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, - struct xdp_frame *xdpf, - u64 dma_addr, int len, int *offset, u16 flags) +void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags) { struct nix_sqe_sg_s *sg = NULL; u64 *iova = NULL; @@ -1559,12 +1562,11 @@ handle_xdp_verdict: break; default: bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); - break; + fallthrough; case XDP_ABORTED: - if (xsk_buff) - xsk_buff_free(xsk_buff); - trace_xdp_exception(pfvf->netdev, prog, act); - break; + if (act == XDP_ABORTED) + trace_xdp_exception(pfvf->netdev, prog, act); + fallthrough; case XDP_DROP: cq->pool_ptrs++; if (xsk_buff) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 7ef3ba477d49..8a8b598bd389 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -136,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; rsp->hdr.sig = OTX2_MBOX_RSP_SIG; - rsp->hdr.pcifunc = 0; + rsp->hdr.pcifunc = req->pcifunc; rsp->hdr.rc = 0; err = otx2_mbox_up_handler_cgx_link_event( vf, (struct cgx_link_info_msg *)req, rsp); @@ -548,7 +548,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -557,7 +557,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + return err; } pci_set_master(pdev); @@ -565,10 +565,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) qcount = num_online_cpus(); qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount); - if (!netdev) { - err = -ENOMEM; - goto err_release_regions; - } + if (!netdev) + return -ENOMEM; pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); @@ -729,9 +727,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } #ifdef CONFIG_DCB - err = otx2_dcbnl_set_ops(netdev); - if (err) - goto err_free_zc_bmap; + /* Priority flow control is not supported for LBK and SDP vf(s) */ + if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) { + err = otx2_dcbnl_set_ops(netdev); + if (err) + goto err_free_zc_bmap; + } #endif otx2_qos_init(vf, qos_txqs); @@ -765,8 +766,6 @@ err_free_irq_vectors: err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); -err_release_regions: - pci_release_regions(pdev); return err; } @@ -815,8 +814,6 @@ static void otx2vf_remove(struct pci_dev *pdev) pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); - - pci_release_regions(pdev); } static struct pci_driver otx2vf_driver = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c index ce10caea8511..b328aae23d73 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c @@ -11,6 +11,7 @@ #include <net/xdp.h> #include "otx2_common.h" +#include "otx2_struct.h" #include "otx2_xsk.h" int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, @@ -196,11 +197,39 @@ void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx); } +static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, + u16 qidx) +{ + struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_snd_queue *sq; + int offset; + + sq = &pfvf->qset.sq[qidx]; + memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); + + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + + if (!sqe_hdr->total) { + sqe_hdr->aura = sq->aura_id; + sqe_hdr->df = 1; + sqe_hdr->sq = qidx; + sqe_hdr->pnc = 1; + } + sqe_hdr->total = len; + sqe_hdr->sqe_id = sq->head; + + offset = sizeof(*sqe_hdr); + + otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME); + sqe_hdr->sizem1 = (offset / 16) - 1; + pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); +} + void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, int queue, int budget) { struct xdp_desc *xdp_desc = pool->tx_descs; - int err, i, work_done = 0, batch; + int i, batch; budget = min(budget, otx2_read_free_sqe(pfvf, queue)); batch = xsk_tx_peek_release_desc_batch(pool, budget); @@ -211,15 +240,6 @@ void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, dma_addr_t dma_addr; dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr); - err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len, - queue, OTX2_AF_XDP_FRAME); - if (!err) { - netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err); - break; - } - work_done++; + otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue); } - - if (work_done) - xsk_tx_release(pool); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 0f844c14485a..5765bac119f0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL2) { + /* configure parent txschq */ + cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq); + cfg->regval[num_regs] = (u64)hw->tx_link << 16; + num_regs++; + /* configure link cfg */ if (level == pfvf->qos.link_cfg_lvl) { cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); @@ -1633,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force if (!node->is_static) dwrr_del_node = true; + WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); /* destroy the leaf node */ otx2_qos_disable_sq(pfvf, qid); otx2_qos_destroy_node(pfvf, node); @@ -1677,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force } kfree(new_cfg); - /* update tx_real_queues */ - otx2_qos_update_tx_netdev_queues(pfvf); - return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c index c5dbae0e513b..58d572ce08ef 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c @@ -256,6 +256,26 @@ out: return err; } +static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf) +{ + struct ndc_sync_op *req; + int rc; + + mutex_lock(&pfvf->mbox.lock); + + req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->nix_lf_tx_sync = true; + req->npa_lf_sync = true; + rc = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return rc; +} + void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) { struct otx2_qset *qset = &pfvf->qset; @@ -285,6 +305,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) otx2_qos_sqb_flush(pfvf, sq_idx); otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx)); + /* NIX/NPA NDC sync */ + otx2_qos_nix_npa_ndc_sync(pfvf); otx2_cleanup_tx_cqes(pfvf, cq); mutex_lock(&pfvf->mbox.lock); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c index 04e08e06f30f..2cd3da3b6843 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -67,6 +67,8 @@ static int rvu_rep_mcam_flow_init(struct rep_dev *rep) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&priv->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) + goto exit; for (ent = 0; ent < rsp->count; ent++) rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; @@ -763,7 +765,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -772,7 +774,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + goto err_set_drv_data; } pci_set_master(pdev); @@ -780,7 +782,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; - goto err_release_regions; + goto err_set_drv_data; } pci_set_drvdata(pdev, priv); @@ -797,7 +799,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = otx2_init_rsrc(pdev, priv); if (err) - goto err_release_regions; + goto err_set_drv_data; priv->iommu_domain = iommu_get_domain_for_dev(dev); @@ -820,9 +822,8 @@ err_detach_rsrc: otx2_disable_mbox_intr(priv); otx2_pfaf_mbox_destroy(priv); pci_free_irq_vectors(pdev); -err_release_regions: +err_set_drv_data: pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); return err; } @@ -842,7 +843,6 @@ static void rvu_rep_remove(struct pci_dev *pdev) otx2_pfaf_mbox_destroy(priv); pci_free_irq_vectors(priv->pdev); pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); } static struct pci_driver rvu_rep_driver = { diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c index 4cd53a2dae46..634f4543c1d7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_counter.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c @@ -336,8 +336,7 @@ prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx) static void prestera_counter_stats_work(struct work_struct *work) { - struct delayed_work *dl_work = - container_of(work, struct delayed_work, work); + struct delayed_work *dl_work = to_delayed_work(work); struct prestera_counter *counter = container_of(dl_work, struct prestera_counter, stats_dw); struct prestera_counter_block *block; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index 35857dc19542..c45d108b2f6d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -845,9 +845,9 @@ static int prestera_pci_probe(struct pci_dev *pdev, goto err_pci_enable_device; } - err = pci_request_regions(pdev, driver_name); + err = pcim_request_all_regions(pdev, driver_name); if (err) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); + dev_err(&pdev->dev, "pcim_request_all_regions failed\n"); goto err_pci_request_regions; } @@ -938,7 +938,6 @@ err_pci_dev_alloc: err_pp_ioremap: err_mem_ioremap: err_dma_mask: - pci_release_regions(pdev); err_pci_request_regions: err_pci_enable_device: return err; @@ -953,7 +952,6 @@ static void prestera_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdev); destroy_workqueue(fw->wq); prestera_fw_uninit(fw); - pci_release_regions(pdev); } static const struct pci_device_id prestera_pci_devices[] = { diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 2bf426cea6dd..72c1967768f4 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1175,7 +1175,7 @@ static int pxa168_eth_stop(struct net_device *dev) /* Write to ICR to clear interrupts. */ wrl(pep, INT_W_CLEAR, 0); napi_disable(&pep->napi); - del_timer_sync(&pep->timeout); + timer_delete_sync(&pep->timeout); netif_carrier_off(dev); free_irq(dev->irq, dev); rxq_deinit(dev); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index a1bada9eaaf6..b2081d6e34f0 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2662,7 +2662,7 @@ static int skge_down(struct net_device *dev) netif_tx_disable(dev); if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) - del_timer_sync(&skge->link_timer); + timer_delete_sync(&skge->link_timer); napi_disable(&skge->napi); netif_carrier_off(dev); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index d7121c836508..e2a9aae8bc9b 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5052,7 +5052,7 @@ static int sky2_suspend(struct device *dev) if (!hw) return 0; - del_timer_sync(&hw->watchdog_timer); + timer_delete_sync(&hw->watchdog_timer); cancel_work_sync(&hw->restart_work); rtnl_lock(); |