From 9a675ba55a96a45a9fb69e6a5c43f80c6682e541 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 16 Oct 2023 14:57:38 +0200 Subject: [PATCH] net, bpf: Add a warning if NAPI cb missed xdp_do_flush(). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A few drivers were missing a xdp_do_flush() invocation after XDP_REDIRECT. Add three helper functions each for one of the per-CPU lists. Return true if the per-CPU list is non-empty and flush the list. Add xdp_do_check_flushed() which invokes each helper functions and creates a warning if one of the functions had a non-empty list. Hide everything behind CONFIG_DEBUG_NET. Suggested-by: Jesper Dangaard Brouer Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Daniel Borkmann Reviewed-by: Toke Høiland-Jørgensen Acked-by: Jakub Kicinski Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20231016125738.Yt79p1uF@linutronix.de --- include/linux/bpf.h | 3 +++ include/net/xdp_sock.h | 9 +++++++++ kernel/bpf/cpumap.c | 10 ++++++++++ kernel/bpf/devmap.c | 10 ++++++++++ net/core/dev.c | 2 ++ net/core/dev.h | 6 ++++++ net/core/filter.c | 16 ++++++++++++++++ net/xdp/xsk.c | 10 ++++++++++ 8 files changed, 66 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d3c51a5075086d..b4b40b45962b21 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2478,6 +2478,9 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, enum bpf_dynptr_type type, u32 offset, u32 size); void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); + +bool dev_check_flush(void); +bool cpu_map_check_flush(void); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 69b472604b86f5..7dd0df2f6f8e6c 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -109,4 +109,13 @@ static inline void __xsk_map_flush(void) #endif /* CONFIG_XDP_SOCKETS */ +#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET) +bool xsk_map_check_flush(void); +#else +static inline bool xsk_map_check_flush(void) +{ + return false; +} +#endif + #endif /* _LINUX_XDP_SOCK_H */ diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index e42a1bdb7f5365..8a0bb80fe48a34 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -764,6 +764,16 @@ void __cpu_map_flush(void) } } +#ifdef CONFIG_DEBUG_NET +bool cpu_map_check_flush(void) +{ + if (list_empty(this_cpu_ptr(&cpu_map_flush_list))) + return false; + __cpu_map_flush(); + return true; +} +#endif + static int __init cpu_map_init(void) { int cpu; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 4d42f6ed6c11ae..a936c704d4e773 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -418,6 +418,16 @@ void __dev_flush(void) } } +#ifdef CONFIG_DEBUG_NET +bool dev_check_flush(void) +{ + if (list_empty(this_cpu_ptr(&dev_flush_list))) + return false; + __dev_flush(); + return true; +} +#endif + /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or * by local_bh_disable() (from XDP calls inside NAPI). The * rcu_read_lock_bh_held() below makes lockdep accept both. diff --git a/net/core/dev.c b/net/core/dev.c index 97e7b9833db9ff..4420831180c653 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6535,6 +6535,8 @@ static int __napi_poll(struct napi_struct *n, bool *repoll) if (test_bit(NAPI_STATE_SCHED, &n->state)) { work = n->poll(n, weight); trace_napi_poll(n, work, weight); + + xdp_do_check_flushed(n); } if (unlikely(work > weight)) diff --git a/net/core/dev.h b/net/core/dev.h index e075e198092cc1..f66125857af773 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -136,4 +136,10 @@ static inline void netif_set_gro_ipv4_max_size(struct net_device *dev, } int rps_cpumask_housekeeping(struct cpumask *mask); + +#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) +void xdp_do_check_flushed(struct napi_struct *napi); +#else +static inline void xdp_do_check_flushed(struct napi_struct *napi) { } +#endif #endif diff --git a/net/core/filter.c b/net/core/filter.c index cc2e4babc85fbe..21d75108c2e94b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -83,6 +83,8 @@ #include #include +#include "dev.h" + static const struct bpf_func_proto * bpf_sk_base_func_proto(enum bpf_func_id func_id); @@ -4208,6 +4210,20 @@ void xdp_do_flush(void) } EXPORT_SYMBOL_GPL(xdp_do_flush); +#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) +void xdp_do_check_flushed(struct napi_struct *napi) +{ + bool ret; + + ret = dev_check_flush(); + ret |= cpu_map_check_flush(); + ret |= xsk_map_check_flush(); + + WARN_ONCE(ret, "Missing xdp_do_flush() invocation after NAPI by %ps\n", + napi->poll); +} +#endif + void bpf_clear_redirect_map(struct bpf_map *map) { struct bpf_redirect_info *ri; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index f5e96e0d6e01d4..ba070fd37d2445 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -391,6 +391,16 @@ void __xsk_map_flush(void) } } +#ifdef CONFIG_DEBUG_NET +bool xsk_map_check_flush(void) +{ + if (list_empty(this_cpu_ptr(&xskmap_flush_list))) + return false; + __xsk_map_flush(); + return true; +} +#endif + void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) { xskq_prod_submit_n(pool->cq, nb_entries);