diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_ethtool.c | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_main.c | 289 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 47 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sched.c | 181 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_lib.c | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_txrx.c | 45 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_txrx.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_virtchnl.h | 1 |
11 files changed, 314 insertions, 316 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 9de3e0ba3731..f7a98ff43a57 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -268,7 +268,6 @@ struct iavf_adapter { struct list_head vlan_filter_list; int num_vlan_filters; struct list_head mac_filter_list; - struct mutex crit_lock; /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 288bb5b2e72e..2b2b315205b5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -4,6 +4,8 @@ #include <linux/bitfield.h> #include <linux/uaccess.h> +#include <net/netdev_lock.h> + /* ethtool support for iavf */ #include "iavf.h" @@ -1256,9 +1258,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx { struct ethtool_rx_flow_spec *fsp = &cmd->fs; struct iavf_fdir_fltr *fltr; - int count = 50; int err; + netdev_assert_locked(adapter->netdev); + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; @@ -1277,14 +1280,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (!fltr) return -ENOMEM; - while (!mutex_trylock(&adapter->crit_lock)) { - if (--count == 0) { - kfree(fltr); - return -EINVAL; - } - udelay(1); - } - err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); if (!err) err = iavf_fdir_add_fltr(adapter, fltr); @@ -1292,7 +1287,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (err) kfree(fltr); - mutex_unlock(&adapter->crit_lock); return err; } @@ -1435,11 +1429,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, { struct iavf_adv_rss *rss_old, *rss_new; bool rss_new_add = false; - int count = 50, err = 0; bool symm = false; u64 hash_flds; + int err = 0; u32 hdrs; + netdev_assert_locked(adapter->netdev); + if (!ADV_RSS_SUPPORT(adapter)) return -EOPNOTSUPP; @@ -1463,15 +1459,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, return -EINVAL; } - while (!mutex_trylock(&adapter->crit_lock)) { - if (--count == 0) { - kfree(rss_new); - return -EINVAL; - } - - udelay(1); - } - spin_lock_bh(&adapter->adv_rss_lock); rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); if (rss_old) { @@ -1500,8 +1487,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, if (!err) iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); - mutex_unlock(&adapter->crit_lock); - if (!rss_new_add) kfree(rss_new); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 6d7ba4d67a19..2c0bb41809a4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1287,11 +1287,11 @@ static void iavf_configure(struct iavf_adapter *adapter) /** * iavf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure - * - * Expects to be called while holding crit_lock. - **/ + */ static void iavf_up_complete(struct iavf_adapter *adapter) { + netdev_assert_locked(adapter->netdev); + iavf_change_state(adapter, __IAVF_RUNNING); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); @@ -1410,13 +1410,13 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) /** * iavf_down - Shutdown the connection processing * @adapter: board private structure - * - * Expects to be called while holding crit_lock. - **/ + */ void iavf_down(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; + netdev_assert_locked(netdev); + if (adapter->state <= __IAVF_DOWN_PENDING) return; @@ -2025,22 +2025,21 @@ err: * iavf_finish_config - do all netdev work that needs RTNL * @work: our work_struct * - * Do work that needs both RTNL and crit_lock. - **/ + * Do work that needs RTNL. + */ static void iavf_finish_config(struct work_struct *work) { struct iavf_adapter *adapter; - bool locks_released = false; + bool netdev_released = false; int pairs, err; adapter = container_of(work, struct iavf_adapter, finish_config); /* Always take RTNL first to prevent circular lock dependency; - * The dev->lock is needed to update the queue number + * the dev->lock (== netdev lock) is needed to update the queue number. */ rtnl_lock(); netdev_lock(adapter->netdev); - mutex_lock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && adapter->netdev->reg_state == NETREG_REGISTERED && @@ -2059,22 +2058,21 @@ static void iavf_finish_config(struct work_struct *work) netif_set_real_num_tx_queues(adapter->netdev, pairs); if (adapter->netdev->reg_state != NETREG_REGISTERED) { - mutex_unlock(&adapter->crit_lock); netdev_unlock(adapter->netdev); - locks_released = true; + netdev_released = true; err = register_netdevice(adapter->netdev); if (err) { dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", err); /* go back and try again.*/ - mutex_lock(&adapter->crit_lock); + netdev_lock(adapter->netdev); iavf_free_rss(adapter); iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); - mutex_unlock(&adapter->crit_lock); + netdev_unlock(adapter->netdev); goto out; } } @@ -2090,10 +2088,8 @@ static void iavf_finish_config(struct work_struct *work) } out: - if (!locks_released) { - mutex_unlock(&adapter->crit_lock); + if (!netdev_released) netdev_unlock(adapter->netdev); - } rtnl_unlock(); } @@ -2911,28 +2907,15 @@ err: iavf_change_state(adapter, __IAVF_INIT_FAILED); } -/** - * iavf_watchdog_task - Periodic call-back task - * @work: pointer to work_struct - **/ -static void iavf_watchdog_task(struct work_struct *work) +static const int IAVF_NO_RESCHED = -1; + +/* return: msec delay for requeueing itself */ +static int iavf_watchdog_step(struct iavf_adapter *adapter) { - struct iavf_adapter *adapter = container_of(work, - struct iavf_adapter, - watchdog_task.work); - struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; u32 reg_val; - netdev_lock(netdev); - if (!mutex_trylock(&adapter->crit_lock)) { - if (adapter->state == __IAVF_REMOVE) { - netdev_unlock(netdev); - return; - } - - goto restart_watchdog; - } + netdev_assert_locked(adapter->netdev); if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) iavf_change_state(adapter, __IAVF_COMM_FAILED); @@ -2940,39 +2923,19 @@ static void iavf_watchdog_task(struct work_struct *work) switch (adapter->state) { case __IAVF_STARTUP: iavf_startup(adapter); - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, &adapter->watchdog_task, - msecs_to_jiffies(30)); - return; + return 30; case __IAVF_INIT_VERSION_CHECK: iavf_init_version_check(adapter); - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, &adapter->watchdog_task, - msecs_to_jiffies(30)); - return; + return 30; case __IAVF_INIT_GET_RESOURCES: iavf_init_get_resources(adapter); - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, &adapter->watchdog_task, - msecs_to_jiffies(1)); - return; + return 1; case __IAVF_INIT_EXTENDED_CAPS: iavf_init_process_extended_caps(adapter); - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, &adapter->watchdog_task, - msecs_to_jiffies(1)); - return; + return 1; case __IAVF_INIT_CONFIG_ADAPTER: iavf_init_config_adapter(adapter); - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, &adapter->watchdog_task, - msecs_to_jiffies(1)); - return; + return 1; case __IAVF_INIT_FAILED: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { @@ -2980,27 +2943,18 @@ static void iavf_watchdog_task(struct work_struct *work) * watchdog task, iavf_remove should handle this state * as it can loop forever */ - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - return; + return IAVF_NO_RESCHED; } if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&adapter->pdev->dev, "Failed to communicate with PF; waiting before retry\n"); adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, - &adapter->watchdog_task, (5 * HZ)); - return; + return 5000; } /* Try again from failed step*/ iavf_change_state(adapter, adapter->last_state); - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); - return; + return 1000; case __IAVF_COMM_FAILED: if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { @@ -3010,9 +2964,7 @@ static void iavf_watchdog_task(struct work_struct *work) */ iavf_change_state(adapter, __IAVF_INIT_FAILED); adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - return; + return IAVF_NO_RESCHED; } reg_val = rd32(hw, IAVF_VFGEN_RSTAT) & IAVF_VFGEN_RSTAT_VFR_STATE_MASK; @@ -3030,18 +2982,9 @@ static void iavf_watchdog_task(struct work_struct *work) } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, - &adapter->watchdog_task, - msecs_to_jiffies(10)); - return; + return 10; case __IAVF_RESETTING: - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, &adapter->watchdog_task, - HZ * 2); - return; + return 2000; case __IAVF_DOWN: case __IAVF_DOWN_PENDING: case __IAVF_TESTING: @@ -3068,9 +3011,7 @@ static void iavf_watchdog_task(struct work_struct *work) break; case __IAVF_REMOVE: default: - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - return; + return IAVF_NO_RESCHED; } /* check for hw reset */ @@ -3080,24 +3021,29 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->current_op = VIRTCHNL_OP_UNKNOWN; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING); - mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); - queue_delayed_work(adapter->wq, - &adapter->watchdog_task, HZ * 2); - return; } - mutex_unlock(&adapter->crit_lock); -restart_watchdog: - netdev_unlock(netdev); + return adapter->aq_required ? 20 : 2000; +} + +static void iavf_watchdog_task(struct work_struct *work) +{ + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, + watchdog_task.work); + struct net_device *netdev = adapter->netdev; + int msec_delay; + + netdev_lock(netdev); + msec_delay = iavf_watchdog_step(adapter); + /* note that we schedule a different task */ if (adapter->state >= __IAVF_DOWN) queue_work(adapter->wq, &adapter->adminq_task); - if (adapter->aq_required) - queue_delayed_work(adapter->wq, &adapter->watchdog_task, - msecs_to_jiffies(20)); - else + + if (msec_delay != IAVF_NO_RESCHED) queue_delayed_work(adapter->wq, &adapter->watchdog_task, - HZ * 2); + msecs_to_jiffies(msec_delay)); + netdev_unlock(netdev); } /** @@ -3105,14 +3051,15 @@ restart_watchdog: * @adapter: board private structure * * Set communication failed flag and free all resources. - * NOTE: This function is expected to be called with crit_lock being held. - **/ + */ static void iavf_disable_vf(struct iavf_adapter *adapter) { struct iavf_mac_filter *f, *ftmp; struct iavf_vlan_filter *fv, *fvtmp; struct iavf_cloud_filter *cf, *cftmp; + netdev_assert_locked(adapter->netdev); + adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; /* We don't use netif_running() because it may be true prior to @@ -3212,17 +3159,7 @@ static void iavf_reset_task(struct work_struct *work) int i = 0, err; bool running; - /* When device is being removed it doesn't make sense to run the reset - * task, just return in such a case. - */ netdev_lock(netdev); - if (!mutex_trylock(&adapter->crit_lock)) { - if (adapter->state != __IAVF_REMOVE) - queue_work(adapter->wq, &adapter->reset_task); - - netdev_unlock(netdev); - return; - } iavf_misc_irq_disable(adapter); if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { @@ -3267,7 +3204,6 @@ static void iavf_reset_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); - mutex_unlock(&adapter->crit_lock); netdev_unlock(netdev); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -3411,7 +3347,6 @@ continue_reset: adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; wake_up(&adapter->reset_waitqueue); - mutex_unlock(&adapter->crit_lock); netdev_unlock(netdev); return; @@ -3422,7 +3357,6 @@ reset_err: } iavf_disable_vf(adapter); - mutex_unlock(&adapter->crit_lock); netdev_unlock(netdev); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } @@ -3435,6 +3369,7 @@ static void iavf_adminq_task(struct work_struct *work) { struct iavf_adapter *adapter = container_of(work, struct iavf_adapter, adminq_task); + struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; struct iavf_arq_event_info event; enum virtchnl_ops v_op; @@ -3442,13 +3377,7 @@ static void iavf_adminq_task(struct work_struct *work) u32 val, oldval; u16 pending; - if (!mutex_trylock(&adapter->crit_lock)) { - if (adapter->state == __IAVF_REMOVE) - return; - - queue_work(adapter->wq, &adapter->adminq_task); - goto out; - } + netdev_lock(netdev); if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) goto unlock; @@ -3515,8 +3444,7 @@ static void iavf_adminq_task(struct work_struct *work) freedom: kfree(event.msg_buf); unlock: - mutex_unlock(&adapter->crit_lock); -out: + netdev_unlock(netdev); /* re-enable Admin queue interrupt cause */ iavf_misc_irq_enable(adapter); } @@ -4209,8 +4137,8 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, struct flow_cls_offload *cls_flower) { int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); - struct iavf_cloud_filter *filter = NULL; - int err = -EINVAL, count = 50; + struct iavf_cloud_filter *filter; + int err; if (tc < 0) { dev_err(&adapter->pdev->dev, "Invalid traffic class\n"); @@ -4220,17 +4148,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (!filter) return -ENOMEM; - - while (!mutex_trylock(&adapter->crit_lock)) { - if (--count == 0) { - kfree(filter); - return err; - } - udelay(1); - } - filter->cookie = cls_flower->cookie; + netdev_lock(adapter->netdev); + /* bail out here if filter already exists */ spin_lock_bh(&adapter->cloud_filter_list_lock); if (iavf_find_cf(adapter, &cls_flower->cookie)) { @@ -4264,7 +4185,7 @@ err: if (err) kfree(filter); - mutex_unlock(&adapter->crit_lock); + netdev_unlock(adapter->netdev); return err; } @@ -4568,28 +4489,13 @@ static int iavf_open(struct net_device *netdev) return -EIO; } - while (!mutex_trylock(&adapter->crit_lock)) { - /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock - * is already taken and iavf_open is called from an upper - * device's notifier reacting on NETDEV_REGISTER event. - * We have to leave here to avoid dead lock. - */ - if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) - return -EBUSY; - - usleep_range(500, 1000); - } - - if (adapter->state != __IAVF_DOWN) { - err = -EBUSY; - goto err_unlock; - } + if (adapter->state != __IAVF_DOWN) + return -EBUSY; if (adapter->state == __IAVF_RUNNING && !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) { dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); - err = 0; - goto err_unlock; + return 0; } /* allocate transmit descriptors */ @@ -4608,9 +4514,7 @@ static int iavf_open(struct net_device *netdev) goto err_req_irq; spin_lock_bh(&adapter->mac_vlan_list_lock); - iavf_add_filter(adapter, adapter->hw.mac.addr); - spin_unlock_bh(&adapter->mac_vlan_list_lock); /* Restore filters that were removed with IFF_DOWN */ @@ -4623,8 +4527,6 @@ static int iavf_open(struct net_device *netdev) iavf_irq_enable(adapter, true); - mutex_unlock(&adapter->crit_lock); - return 0; err_req_irq: @@ -4634,8 +4536,6 @@ err_setup_rx: iavf_free_all_rx_resources(adapter); err_setup_tx: iavf_free_all_tx_resources(adapter); -err_unlock: - mutex_unlock(&adapter->crit_lock); return err; } @@ -4659,12 +4559,8 @@ static int iavf_close(struct net_device *netdev) netdev_assert_locked(netdev); - mutex_lock(&adapter->crit_lock); - - if (adapter->state <= __IAVF_DOWN_PENDING) { - mutex_unlock(&adapter->crit_lock); + if (adapter->state <= __IAVF_DOWN_PENDING) return 0; - } set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before @@ -4695,7 +4591,6 @@ static int iavf_close(struct net_device *netdev) iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_free_traffic_irqs(adapter); - mutex_unlock(&adapter->crit_lock); netdev_unlock(netdev); /* We explicitly don't free resources here because the hardware is @@ -4714,11 +4609,10 @@ static int iavf_close(struct net_device *netdev) msecs_to_jiffies(500)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); - netdev_lock(netdev); - mutex_lock(&adapter->crit_lock); + adapter->aq_required |= aq_to_restore; - mutex_unlock(&adapter->crit_lock); + return 0; } @@ -5227,15 +5121,16 @@ iavf_shaper_set(struct net_shaper_binding *binding, struct iavf_adapter *adapter = netdev_priv(binding->netdev); const struct net_shaper_handle *handle = &shaper->handle; struct iavf_ring *tx_ring; - int ret = 0; + int ret; + + netdev_assert_locked(adapter->netdev); - mutex_lock(&adapter->crit_lock); if (handle->id >= adapter->num_active_queues) - goto unlock; + return 0; ret = iavf_verify_shaper(binding, shaper, extack); if (ret) - goto unlock; + return ret; tx_ring = &adapter->tx_rings[handle->id]; @@ -5245,9 +5140,7 @@ iavf_shaper_set(struct net_shaper_binding *binding, adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; -unlock: - mutex_unlock(&adapter->crit_lock); - return ret; + return 0; } static int iavf_shaper_del(struct net_shaper_binding *binding, @@ -5257,9 +5150,10 @@ static int iavf_shaper_del(struct net_shaper_binding *binding, struct iavf_adapter *adapter = netdev_priv(binding->netdev); struct iavf_ring *tx_ring; - mutex_lock(&adapter->crit_lock); + netdev_assert_locked(adapter->netdev); + if (handle->id >= adapter->num_active_queues) - goto unlock; + return 0; tx_ring = &adapter->tx_rings[handle->id]; tx_ring->q_shaper.bw_min = 0; @@ -5268,8 +5162,6 @@ static int iavf_shaper_del(struct net_shaper_binding *binding, adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW; -unlock: - mutex_unlock(&adapter->crit_lock); return 0; } @@ -5530,10 +5422,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_alloc_qos_cap; } - /* set up the locks for the AQ, do this only once in probe - * and destroy them only once in remove - */ - mutex_init(&adapter->crit_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); @@ -5596,22 +5484,24 @@ static int iavf_suspend(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct iavf_adapter *adapter = netdev_priv(netdev); + bool running; netif_device_detach(netdev); + running = netif_running(netdev); + if (running) + rtnl_lock(); netdev_lock(netdev); - mutex_lock(&adapter->crit_lock); - if (netif_running(netdev)) { - rtnl_lock(); + if (running) iavf_down(adapter); - rtnl_unlock(); - } + iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); - mutex_unlock(&adapter->crit_lock); netdev_unlock(netdev); + if (running) + rtnl_unlock(); return 0; } @@ -5688,20 +5578,20 @@ static void iavf_remove(struct pci_dev *pdev) * There are flows where register/unregister netdev may race. */ while (1) { - mutex_lock(&adapter->crit_lock); + netdev_lock(netdev); if (adapter->state == __IAVF_RUNNING || adapter->state == __IAVF_DOWN || adapter->state == __IAVF_INIT_FAILED) { - mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); break; } /* Simply return if we already went through iavf_shutdown */ if (adapter->state == __IAVF_REMOVE) { - mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); return; } - mutex_unlock(&adapter->crit_lock); + netdev_unlock(netdev); usleep_range(500, 1000); } cancel_delayed_work_sync(&adapter->watchdog_task); @@ -5711,7 +5601,6 @@ static void iavf_remove(struct pci_dev *pdev) unregister_netdev(netdev); netdev_lock(netdev); - mutex_lock(&adapter->crit_lock); dev_info(&adapter->pdev->dev, "Removing device\n"); iavf_change_state(adapter, __IAVF_REMOVE); @@ -5727,9 +5616,11 @@ static void iavf_remove(struct pci_dev *pdev) iavf_misc_irq_disable(adapter); /* Shut down all the garbage mashers on the detention level */ + netdev_unlock(netdev); cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->adminq_task); + netdev_lock(netdev); adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; @@ -5747,8 +5638,6 @@ static void iavf_remove(struct pci_dev *pdev) /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); - mutex_unlock(&adapter->crit_lock); - mutex_destroy(&adapter->crit_lock); netdev_unlock(netdev); iounmap(hw->hw_addr); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 20d3baf955e3..d97d4b25b30d 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2741,6 +2741,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi) } /** + * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors + * @vsi: the VSI with XDP rings being unmapped + */ +static void ice_unmap_xdp_rings(struct ice_vsi *vsi) +{ + int v_idx; + + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct ice_tx_ring *ring; + + ice_for_each_tx_ring(ring, q_vector->tx) + if (!ring->tx_buf || !ice_ring_is_xdp(ring)) + break; + + /* restore the value of last node prior to XDP setup */ + q_vector->tx.tx_ring = ring; + } +} + +/** * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP * @vsi: VSI to bring up Tx rings used by XDP * @prog: bpf program that will be assigned to VSI @@ -2803,7 +2824,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, if (status) { dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", status); - goto clear_xdp_rings; + goto unmap_xdp_rings; } /* assign the prog only when it's not already present on VSI; @@ -2819,6 +2840,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, ice_vsi_assign_bpf_prog(vsi, prog); return 0; +unmap_xdp_rings: + ice_unmap_xdp_rings(vsi); clear_xdp_rings: ice_for_each_xdp_txq(vsi, i) if (vsi->xdp_rings[i]) { @@ -2835,6 +2858,8 @@ err_map_xdp: mutex_unlock(&pf->avail_q_mutex); devm_kfree(dev, vsi->xdp_rings); + vsi->xdp_rings = NULL; + return -ENOMEM; } @@ -2850,7 +2875,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; - int i, v_idx; + int i; /* q_vectors are freed in reset path so there's no point in detaching * rings @@ -2858,17 +2883,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) if (cfg_type == ICE_XDP_CFG_PART) goto free_qmap; - ice_for_each_q_vector(vsi, v_idx) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; - struct ice_tx_ring *ring; - - ice_for_each_tx_ring(ring, q_vector->tx) - if (!ring->tx_buf || !ice_ring_is_xdp(ring)) - break; - - /* restore the value of last node prior to XDP setup */ - q_vector->tx.tx_ring = ring; - } + ice_unmap_xdp_rings(vsi); free_qmap: mutex_lock(&pf->avail_q_mutex); @@ -3013,11 +3028,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, xdp_ring_err = ice_vsi_determine_xdp_res(vsi); if (xdp_ring_err) { NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); + goto resume_if; } else { xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, ICE_XDP_CFG_FULL); - if (xdp_ring_err) + if (xdp_ring_err) { NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); + goto resume_if; + } } xdp_features_set_redirect_target(vsi->netdev, true); /* reallocate Rx queues that are used for zero-copy */ @@ -3035,6 +3053,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); } +resume_if: if (if_running) ret = ice_up(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 6ca13c5dcb14..d9d09296d1d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -85,6 +85,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) } /** + * ice_sched_find_next_vsi_node - find the next node for a given VSI + * @vsi_node: VSI support node to start search with + * + * Return: Next VSI support node, or NULL. + * + * The function returns a pointer to the next node from the VSI layer + * assigned to the given VSI, or NULL if there is no such a node. + */ +static struct ice_sched_node * +ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node) +{ + unsigned int vsi_handle = vsi_node->vsi_handle; + + while ((vsi_node = vsi_node->sibling) != NULL) + if (vsi_node->vsi_handle == vsi_handle) + break; + + return vsi_node; +} + +/** * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd * @hw: pointer to the HW struct * @cmd_opc: cmd opcode @@ -1084,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, if (parent->num_children < max_child_nodes) { new_num_nodes = max_child_nodes - parent->num_children; } else { - /* This parent is full, try the next sibling */ - parent = parent->sibling; + /* This parent is full, + * try the next available sibling. + */ + parent = ice_sched_find_next_vsi_node(parent); /* Don't modify the first node TEID memory if the * first node was added already in the above call. * Instead send some temp memory for all other @@ -1528,12 +1551,23 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { + struct ice_sched_node *next_vsi_node; + /* make sure the qgroup node is part of the VSI subtree */ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) if (qgrp_node->num_children < max_children && qgrp_node->owner == owner) break; qgrp_node = qgrp_node->sibling; + if (qgrp_node) + continue; + + next_vsi_node = ice_sched_find_next_vsi_node(vsi_node); + if (!next_vsi_node) + break; + + vsi_node = next_vsi_node; + qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); } /* Select the best queue group */ @@ -1604,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, /** * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes * @hw: pointer to the HW struct - * @num_qs: number of queues + * @num_new_qs: number of new queues that will be added to the tree * @num_nodes: num nodes array * * This function calculates the number of VSI child nodes based on the * number of queues. */ static void -ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) +ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes) { - u16 num = num_qs; + u16 num = num_new_qs; u8 i, qgl, vsil; qgl = ice_sched_get_qgrp_layer(hw); @@ -1779,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, if (!parent) return -EIO; - if (i == vsil) + /* Do not modify the VSI handle for already existing VSI nodes, + * (if no new VSI node was added to the tree). + * Assign the VSI handle only to newly added VSI nodes. + */ + if (i == vsil && num_added) parent->vsi_handle = vsi_handle; } @@ -1813,6 +1851,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) } /** + * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count + * @hw: pointer to the HW struct + * @vsi_node: pointer to the leftmost VSI node that needs to be extended + * @new_numqs: new number of queues that has to be handled by the VSI + * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry + * + * This function recalculates the number of supported nodes that need to + * be added after adding more Tx queues for a given VSI. + * The number of new VSI support nodes that shall be added will be saved + * to the @new_num_nodes table for the VSI layer. + */ +static void +ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw, + struct ice_sched_node *vsi_node, + unsigned int new_numqs, u16 *new_num_nodes) +{ + u32 vsi_nodes_cnt = 1; + u32 max_queue_cnt = 1; + u32 qgl, vsil; + + qgl = ice_sched_get_qgrp_layer(hw); + vsil = ice_sched_get_vsi_layer(hw); + + for (u32 i = vsil; i <= qgl; i++) + max_queue_cnt *= hw->max_children[i]; + + while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL) + vsi_nodes_cnt++; + + if (new_numqs > (max_queue_cnt * vsi_nodes_cnt)) + new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) - + vsi_nodes_cnt; +} + +/** * ice_sched_update_vsi_child_nodes - update VSI child nodes * @pi: port information structure * @vsi_handle: software VSI handle @@ -1863,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, return status; } - if (new_numqs) - ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); - /* Keep the max number of queue configuration all the time. Update the - * tree only if number of queues > previous number of queues. This may + ice_sched_recalc_vsi_support_nodes(hw, vsi_node, + new_numqs, new_num_nodes); + ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs, + new_num_nodes); + + /* Never decrease the number of queues in the tree. Update the tree + * only if number of queues > previous number of queues. This may * leave some extra nodes in the tree if number of queues < previous * number but that wouldn't harm anything. Removing those extra nodes * may complicate the code if those nodes are part of SRL or * individually rate limited. + * Also, add the required VSI support nodes if the existing ones cannot + * handle the requested new number of queues. */ + status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, + new_num_nodes); + if (status) + return status; + status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, new_num_nodes, owner); if (status) @@ -2013,6 +2096,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) } /** + * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI + * @pi: port information structure + * @vsi_node: pointer to the leftmost node of the VSI to be removed + * @owner: LAN or RDMA + * @tc: TC number + * + * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC. + * + * This function removes all the VSI support nodes associated with a given VSI + * and its LAN or RDMA children nodes from the scheduler tree. + */ +static int +ice_sched_rm_vsi_subtree(struct ice_port_info *pi, + struct ice_sched_node *vsi_node, u8 owner, u8 tc) +{ + u16 vsi_handle = vsi_node->vsi_handle; + bool all_vsi_nodes_removed = true; + int j = 0; + + while (vsi_node) { + struct ice_sched_node *next_vsi_node; + + if (ice_sched_is_leaf_node_present(vsi_node)) { + ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc); + return -EBUSY; + } + while (j < vsi_node->num_children) { + if (vsi_node->children[j]->owner == owner) + ice_free_sched_node(pi, vsi_node->children[j]); + else + j++; + } + + next_vsi_node = ice_sched_find_next_vsi_node(vsi_node); + + /* remove the VSI if it has no children */ + if (!vsi_node->num_children) + ice_free_sched_node(pi, vsi_node); + else + all_vsi_nodes_removed = false; + + vsi_node = next_vsi_node; + } + + /* clean up aggregator related VSI info if any */ + if (all_vsi_nodes_removed) + ice_sched_rm_agg_vsi_info(pi, vsi_handle); + + return 0; +} + +/** * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes * @pi: port information structure * @vsi_handle: software VSI handle @@ -2038,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) ice_for_each_traffic_class(i) { struct ice_sched_node *vsi_node, *tc_node; - u8 j = 0; tc_node = ice_sched_get_tc_node(pi, i); if (!tc_node) @@ -2048,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) if (!vsi_node) continue; - if (ice_sched_is_leaf_node_present(vsi_node)) { - ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); - status = -EBUSY; + status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i); + if (status) goto exit_sched_rm_vsi_cfg; - } - while (j < vsi_node->num_children) { - if (vsi_node->children[j]->owner == owner) { - ice_free_sched_node(pi, vsi_node->children[j]); - /* reset the counter again since the num - * children will be updated after node removal - */ - j = 0; - } else { - j++; - } - } - /* remove the VSI if it has no children */ - if (!vsi_node->num_children) { - ice_free_sched_node(pi, vsi_node); - vsi_ctx->sched.vsi_node[i] = NULL; + vsi_ctx->sched.vsi_node[i] = NULL; - /* clean up aggregator related VSI info if any */ - ice_sched_rm_agg_vsi_info(pi, vsi_handle); - } if (owner == ICE_SCHED_NODE_OWNER_LAN) vsi_ctx->sched.max_lanq[i] = 0; else diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index bab12ecb2df5..4eb20ec2accb 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1801,11 +1801,19 @@ void idpf_vc_event_task(struct work_struct *work) if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) return; - if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || - test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { - set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); - idpf_init_hard_reset(adapter); - } + if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags)) + goto func_reset; + + if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) + goto drv_load; + + return; + +func_reset: + idpf_vc_xn_shutdown(adapter->vcxn_mngr); +drv_load: + set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); + idpf_init_hard_reset(adapter); } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 2e356dd10812..993c354aa27a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -362,17 +362,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, { struct idpf_tx_offload_params offload = { }; struct idpf_tx_buf *first; + int csum, tso, needed; unsigned int count; __be16 protocol; - int csum, tso; count = idpf_tx_desc_count_required(tx_q, skb); if (unlikely(!count)) return idpf_tx_drop_skb(tx_q, skb); - if (idpf_tx_maybe_stop_common(tx_q, - count + IDPF_TX_DESCS_PER_CACHE_LINE + - IDPF_TX_DESCS_FOR_CTX)) { + needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX; + if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, + IDPF_DESC_UNUSED(tx_q), + needed, needed)) { idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); u64_stats_update_begin(&tx_q->stats_sync); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 631679cdaa6f..5cf440e09d0a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -2184,6 +2184,19 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); } +/* Global conditions to tell whether the txq (and related resources) + * has room to allow the use of "size" descriptors. + */ +static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size) +{ + if (IDPF_DESC_UNUSED(tx_q) < size || + IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > + IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) || + IDPF_TX_BUF_RSV_LOW(tx_q)) + return 0; + return 1; +} + /** * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * @tx_q: the queue to be checked @@ -2194,29 +2207,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, unsigned int descs_needed) { - if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) - goto out; - - /* If there are too many outstanding completions expected on the - * completion queue, stop the TX queue to give the device some time to - * catch up - */ - if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > - IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) - goto splitq_stop; - - /* Also check for available book keeping buffers; if we are low, stop - * the queue to wait for more completions - */ - if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q))) - goto splitq_stop; - - return 0; - -splitq_stop: - netif_stop_subqueue(tx_q->netdev, tx_q->idx); + if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, + idpf_txq_has_room(tx_q, descs_needed), + 1, 1)) + return 0; -out: u64_stats_update_begin(&tx_q->stats_sync); u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); @@ -2242,12 +2237,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); tx_q->next_to_use = val; - if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) { - u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.q_busy); - u64_stats_update_end(&tx_q->stats_sync); - } - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index c779fe71df99..36a0f828a6f8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -1049,12 +1049,4 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); -static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, - u32 needed) -{ - return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, - IDPF_DESC_UNUSED(tx_q), - needed, needed); -} - #endif /* !_IDPF_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 07a9f5ae34fd..24febaaa8fbb 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -347,7 +347,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr) * All waiting threads will be woken-up and their transaction aborted. Further * operations on that object will fail. */ -static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) +void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) { int i; diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h index 3522c1238ea2..77578206bada 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h @@ -150,5 +150,6 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport); int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); +void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr); #endif /* _IDPF_VIRTCHNL_H_ */ |