summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2025-10-24 13:33:50 -1000
committerTejun Heo <tj@kernel.org>2025-10-24 13:37:37 -1000
commitdcb938c4532872b42f1615b12776b9e6caf8ed91 (patch)
tree5a0745701941de4efee74f2516b00a4ce2909599 /tools
parent71d7847cad4475f1f795c7737e08b604b448ca70 (diff)
sched_ext: Add ___compat suffix to scx_bpf_dsq_insert___v2 in compat.bpf.h
2dbbdeda77a6 ("sched_ext: Fix scx_bpf_dsq_insert() backward binary compatibility") renamed the new bool-returning variant to scx_bpf_dsq_insert___v2 in the kernel. However, libbpf currently only strips ___SUFFIX on the BPF side, not on kernel symbols, so the compat wrapper couldn't match the kernel kfunc and would always fall back to the old variant even when the new one was available. Add an extra ___compat suffix as a workaround - libbpf strips one suffix on the BPF side leaving ___v2, which then matches the kernel kfunc directly. In the future when libbpf strips all suffixes on both sides, all suffixes can be dropped. Fixes: 2dbbdeda77a6 ("sched_ext: Fix scx_bpf_dsq_insert() backward binary compatibility") Cc: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h8
1 files changed, 5 insertions, 3 deletions
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
index a023b71991a6..26bead92fa04 100644
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -237,15 +237,17 @@ scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime
/*
* v6.19: scx_bpf_dsq_insert() now returns bool instead of void. Move
* scx_bpf_dsq_insert() decl to common.bpf.h and drop compat helper after v6.22.
+ * The extra ___compat suffix is to work around libbpf not ignoring __SUFFIX on
+ * kernel side. The entire suffix can be dropped later.
*/
-bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
+bool scx_bpf_dsq_insert___v2___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
void scx_bpf_dsq_insert___v1(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
static inline bool
scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags)
{
- if (bpf_ksym_exists(scx_bpf_dsq_insert___v2)) {
- return scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags);
+ if (bpf_ksym_exists(scx_bpf_dsq_insert___v2___compat)) {
+ return scx_bpf_dsq_insert___v2___compat(p, dsq_id, slice, enq_flags);
} else {
scx_bpf_dsq_insert___v1(p, dsq_id, slice, enq_flags);
return true;