summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2024-07-02 15:26:59 +0200
committerPaolo Abeni <pabeni@redhat.com>2024-07-02 15:27:00 +0200
commit2a01a8895015ad479df7cfb8f127501d1c8da7c9 (patch)
treea822126b33b57d6d3abde8196612806e49545f3f /net
parente27d7168f0c8c024344e9541513aa71d921402a5 (diff)
parente3d69f585d651aba877e18866de7e8cfa2476caa (diff)
Merge branch 'net-bpf_net_context-cleanups'
Sebastian Andrzej Siewior says: ==================== net: bpf_net_context cleanups. a small series with bpf_net_context cleanups/ improvements. Jakub asked for #1 and #2 and while looking around I made #3. ==================== Link: https://patch.msgid.link/20240628103020.1766241-1-bigeasy@linutronix.de Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c33
-rw-r--r--net/xdp/xsk.c19
2 files changed, 30 insertions, 22 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index eb1c4425c06f..403d23faf22e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4277,22 +4277,39 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
*/
void xdp_do_flush(void)
{
- __dev_flush();
- __cpu_map_flush();
- __xsk_map_flush();
+ struct list_head *lh_map, *lh_dev, *lh_xsk;
+
+ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
+ if (lh_dev)
+ __dev_flush(lh_dev);
+ if (lh_map)
+ __cpu_map_flush(lh_map);
+ if (lh_xsk)
+ __xsk_map_flush(lh_xsk);
}
EXPORT_SYMBOL_GPL(xdp_do_flush);
#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
void xdp_do_check_flushed(struct napi_struct *napi)
{
- bool ret;
+ struct list_head *lh_map, *lh_dev, *lh_xsk;
+ bool missed = false;
- ret = dev_check_flush();
- ret |= cpu_map_check_flush();
- ret |= xsk_map_check_flush();
+ bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk);
+ if (lh_dev) {
+ __dev_flush(lh_dev);
+ missed = true;
+ }
+ if (lh_map) {
+ __cpu_map_flush(lh_map);
+ missed = true;
+ }
+ if (lh_xsk) {
+ __xsk_map_flush(lh_xsk);
+ missed = true;
+ }
- WARN_ONCE(ret, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
+ WARN_ONCE(missed, "Missing xdp_do_flush() invocation after NAPI by %ps\n",
napi->poll);
}
#endif
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index ed062e038389..7e16336044b2 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -370,22 +370,23 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
- struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
int err;
err = xsk_rcv(xs, xdp);
if (err)
return err;
- if (!xs->flush_node.prev)
+ if (!xs->flush_node.prev) {
+ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+
list_add(&xs->flush_node, flush_list);
+ }
return 0;
}
-void __xsk_map_flush(void)
+void __xsk_map_flush(struct list_head *flush_list)
{
- struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
@@ -394,16 +395,6 @@ void __xsk_map_flush(void)
}
}
-#ifdef CONFIG_DEBUG_NET
-bool xsk_map_check_flush(void)
-{
- if (list_empty(bpf_net_ctx_get_xskmap_flush_list()))
- return false;
- __xsk_map_flush();
- return true;
-}
-#endif
-
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{
xskq_prod_submit_n(pool->cq, nb_entries);