summaryrefslogtreecommitdiff
path: root/net/xfrm/xfrm_input.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2022-09-30 13:09:11 +0100
committerDavid S. Miller <davem@davemloft.net>2022-09-30 13:09:11 +0100
commit0bafedc536499a533dd7a94c9c980d53f3ca2afc (patch)
tree19f7b9aeb504fcfd27cb56b94cbf34c22722e3dd /net/xfrm/xfrm_input.c
parentf4ce91ce12a7c6ead19b128ffa8cff6e3ded2a14 (diff)
parent4f4920669d21e1060b7243e5118dc3b71ced1276 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
Steffen Klassert says: ==================== pull request (net): ipsec 2022-09-29 1) Use the inner instead of the outer protocol for GSO on inter address family tunnels. This fixes the GSO case for address family tunnels. From Sabrina Dubroca. 2) Reset ipcomp_scratches with NULL when freed, otherwise it holds obsolete address. From Khalid Masum. 3) Reinject transport-mode packets through workqueue instead of a tasklet. The tasklet might take too long to finish. From Liu Jian. Please pull or let me know if there are problems. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/xfrm/xfrm_input.c')
-rw-r--r--net/xfrm/xfrm_input.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b2f4ec9c537f..aa5220565763 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -24,7 +24,8 @@
#include "xfrm_inout.h"
struct xfrm_trans_tasklet {
- struct tasklet_struct tasklet;
+ struct work_struct work;
+ spinlock_t queue_lock;
struct sk_buff_head queue;
};
@@ -760,18 +761,22 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
}
EXPORT_SYMBOL(xfrm_input_resume);
-static void xfrm_trans_reinject(struct tasklet_struct *t)
+static void xfrm_trans_reinject(struct work_struct *work)
{
- struct xfrm_trans_tasklet *trans = from_tasklet(trans, t, tasklet);
+ struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work);
struct sk_buff_head queue;
struct sk_buff *skb;
__skb_queue_head_init(&queue);
+ spin_lock_bh(&trans->queue_lock);
skb_queue_splice_init(&trans->queue, &queue);
+ spin_unlock_bh(&trans->queue_lock);
+ local_bh_disable();
while ((skb = __skb_dequeue(&queue)))
XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net,
NULL, skb);
+ local_bh_enable();
}
int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
@@ -789,8 +794,10 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
XFRM_TRANS_SKB_CB(skb)->finish = finish;
XFRM_TRANS_SKB_CB(skb)->net = net;
+ spin_lock_bh(&trans->queue_lock);
__skb_queue_tail(&trans->queue, skb);
- tasklet_schedule(&trans->tasklet);
+ spin_unlock_bh(&trans->queue_lock);
+ schedule_work(&trans->work);
return 0;
}
EXPORT_SYMBOL(xfrm_trans_queue_net);
@@ -817,7 +824,8 @@ void __init xfrm_input_init(void)
struct xfrm_trans_tasklet *trans;
trans = &per_cpu(xfrm_trans_tasklet, i);
+ spin_lock_init(&trans->queue_lock);
__skb_queue_head_init(&trans->queue);
- tasklet_setup(&trans->tasklet, xfrm_trans_reinject);
+ INIT_WORK(&trans->work, xfrm_trans_reinject);
}
}