summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/gro_cells.c10
-rw-r--r--net/core/skbuff.c1
3 files changed, 46 insertions, 5 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index a64cef2c537e..2acfa44927da 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -12176,6 +12176,35 @@ static void dev_memory_provider_uninstall(struct net_device *dev)
}
}
+/* devices must be UP and netdev_lock()'d */
+static void netif_close_many_and_unlock(struct list_head *close_head)
+{
+ struct net_device *dev, *tmp;
+
+ netif_close_many(close_head, false);
+
+ /* ... now unlock them */
+ list_for_each_entry_safe(dev, tmp, close_head, close_list) {
+ netdev_unlock(dev);
+ list_del_init(&dev->close_list);
+ }
+}
+
+static void netif_close_many_and_unlock_cond(struct list_head *close_head)
+{
+#ifdef CONFIG_LOCKDEP
+ /* We can only track up to MAX_LOCK_DEPTH locks per task.
+ *
+ * Reserve half the available slots for additional locks possibly
+ * taken by notifiers and (soft)irqs.
+ */
+ unsigned int limit = MAX_LOCK_DEPTH / 2;
+
+ if (lockdep_depth(current) > limit)
+ netif_close_many_and_unlock(close_head);
+#endif
+}
+
void unregister_netdevice_many_notify(struct list_head *head,
u32 portid, const struct nlmsghdr *nlh)
{
@@ -12208,17 +12237,18 @@ void unregister_netdevice_many_notify(struct list_head *head,
/* If device is running, close it first. Start with ops locked... */
list_for_each_entry(dev, head, unreg_list) {
+ if (!(dev->flags & IFF_UP))
+ continue;
if (netdev_need_ops_lock(dev)) {
list_add_tail(&dev->close_list, &close_head);
netdev_lock(dev);
}
+ netif_close_many_and_unlock_cond(&close_head);
}
- netif_close_many(&close_head, true);
- /* ... now unlock them and go over the rest. */
+ netif_close_many_and_unlock(&close_head);
+ /* ... now go over the rest. */
list_for_each_entry(dev, head, unreg_list) {
- if (netdev_need_ops_lock(dev))
- netdev_unlock(dev);
- else
+ if (!netdev_need_ops_lock(dev))
list_add_tail(&dev->close_list, &close_head);
}
netif_close_many(&close_head, true);
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index ff8e5b64bf6b..b43911562f4d 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -8,11 +8,13 @@
struct gro_cell {
struct sk_buff_head napi_skbs;
struct napi_struct napi;
+ local_lock_t bh_lock;
};
int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
+ bool have_bh_lock = false;
struct gro_cell *cell;
int res;
@@ -25,6 +27,8 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
goto unlock;
}
+ local_lock_nested_bh(&gcells->cells->bh_lock);
+ have_bh_lock = true;
cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
@@ -39,6 +43,9 @@ drop:
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
+ if (have_bh_lock)
+ local_unlock_nested_bh(&gcells->cells->bh_lock);
+
res = NET_RX_SUCCESS;
unlock:
@@ -54,6 +61,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
struct sk_buff *skb;
int work_done = 0;
+ __local_lock_nested_bh(&cell->bh_lock);
while (work_done < budget) {
skb = __skb_dequeue(&cell->napi_skbs);
if (!skb)
@@ -64,6 +72,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
if (work_done < budget)
napi_complete_done(napi, work_done);
+ __local_unlock_nested_bh(&cell->bh_lock);
return work_done;
}
@@ -79,6 +88,7 @@ int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
__skb_queue_head_init(&cell->napi_skbs);
+ local_lock_init(&cell->bh_lock);
set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bc12790017b0..6be01454f262 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -7200,6 +7200,7 @@ nodefer: kfree_skb_napi_cache(skb);
DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
DEBUG_NET_WARN_ON_ONCE(skb->destructor);
+ DEBUG_NET_WARN_ON_ONCE(skb_nfct(skb));
sdn = per_cpu_ptr(net_hotdata.skb_defer_nodes, cpu) + numa_node_id();