Path: blob/master/tools/sched_ext/include/scx/compat.bpf.h
29271 views
/* SPDX-License-Identifier: GPL-2.0 */1/*2* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.3* Copyright (c) 2024 Tejun Heo <[email protected]>4* Copyright (c) 2024 David Vernet <[email protected]>5*/6#ifndef __SCX_COMPAT_BPF_H7#define __SCX_COMPAT_BPF_H89#define __COMPAT_ENUM_OR_ZERO(__type, __ent) \10({ \11__type __ret = 0; \12if (bpf_core_enum_value_exists(__type, __ent)) \13__ret = __ent; \14__ret; \15})1617/* v6.12: 819513666966 ("sched_ext: Add cgroup support") */18#define __COMPAT_scx_bpf_task_cgroup(p) \19(bpf_ksym_exists(scx_bpf_task_cgroup) ? \20scx_bpf_task_cgroup((p)) : NULL)2122/*23* v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are24* renamed to unload the verb.25*26* Build error is triggered if old names are used. New binaries work with both27* new and old names. The compat macros will be removed on v6.15 release.28*29* scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by30* 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").31* Preserve __COMPAT macros until v6.15.32*/33void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;34void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;35bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;36void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;37void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;38bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;39bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;40int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;4142#define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \43(bpf_ksym_exists(scx_bpf_dsq_insert) ? \44scx_bpf_dsq_insert((p), (dsq_id), (slice), (enq_flags)) : \45scx_bpf_dispatch___compat((p), (dsq_id), (slice), (enq_flags)))4647#define scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags) \48(bpf_ksym_exists(scx_bpf_dsq_insert_vtime) ? \49scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \50scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags)))5152#define scx_bpf_dsq_move_to_local(dsq_id) \53(bpf_ksym_exists(scx_bpf_dsq_move_to_local) ? \54scx_bpf_dsq_move_to_local((dsq_id)) : \55scx_bpf_consume___compat((dsq_id)))5657#define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice) \58(bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ? \59scx_bpf_dsq_move_set_slice((it__iter), (slice)) : \60(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ? \61scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) : \62(void)0))6364#define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime) \65(bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ? \66scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) : \67(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ? \68scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) : \69(void) 0))7071#define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \72(bpf_ksym_exists(scx_bpf_dsq_move) ? \73scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) : \74(bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ? \75scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \76false))7778#define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \79(bpf_ksym_exists(scx_bpf_dsq_move_vtime) ? \80scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) : \81(bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ? \82scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \83false))8485#define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \86(bpf_ksym_exists(bpf_cpumask_populate) ? \87(bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)8889#define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \90_Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")9192#define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags) \93_Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()")9495#define scx_bpf_consume(dsq_id) ({ \96_Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \97false; \98})99100#define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \101_Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()")102103#define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \104_Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()")105106#define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \107_Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \108false; \109})110111#define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \112_Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \113false; \114})115116#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \117_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()")118119#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \120_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()")121122#define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \123_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \124false; \125})126127#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \128_Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \129false; \130})131132/**133* __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on134* in a compatible way. We will preserve this __COMPAT helper until v6.16.135*136* @enq_flags: enqueue flags from ops.enqueue()137*138* Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags139*/140static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)141{142#ifdef HAVE_SCX_ENQ_CPU_SELECTED143/*144* This is the case that a BPF code compiled against vmlinux.h145* where the enum SCX_ENQ_CPU_SELECTED exists.146*/147148/*149* We should temporarily suspend the macro expansion of150* 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being151* rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED'152* is defined in 'scripts/gen_enums.py'.153*/154#pragma push_macro("SCX_ENQ_CPU_SELECTED")155#undef SCX_ENQ_CPU_SELECTED156u64 flag;157158/*159* When the kernel did not have SCX_ENQ_CPU_SELECTED,160* select_task_rq_scx() has never been skipped. Thus, this case161* should be considered that the CPU has already been selected.162*/163if (!bpf_core_enum_value_exists(enum scx_enq_flags,164SCX_ENQ_CPU_SELECTED))165return true;166167flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);168return enq_flags & flag;169170/*171* Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'.172*/173#pragma pop_macro("SCX_ENQ_CPU_SELECTED")174#else175/*176* This is the case that a BPF code compiled against vmlinux.h177* where the enum SCX_ENQ_CPU_SELECTED does NOT exist.178*/179return true;180#endif /* HAVE_SCX_ENQ_CPU_SELECTED */181}182183184#define scx_bpf_now() \185(bpf_ksym_exists(scx_bpf_now) ? \186scx_bpf_now() : \187bpf_ktime_get_ns())188189/*190* v6.15: Introduce event counters.191*192* Preserve the following macro until v6.17.193*/194#define __COMPAT_scx_bpf_events(events, size) \195(bpf_ksym_exists(scx_bpf_events) ? \196scx_bpf_events(events, size) : ({}))197198/*199* v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle200* cpumasks.201*202* Preserve the following __COMPAT_scx_*_node macros until v6.17.203*/204#define __COMPAT_scx_bpf_nr_node_ids() \205(bpf_ksym_exists(scx_bpf_nr_node_ids) ? \206scx_bpf_nr_node_ids() : 1U)207208#define __COMPAT_scx_bpf_cpu_node(cpu) \209(bpf_ksym_exists(scx_bpf_cpu_node) ? \210scx_bpf_cpu_node(cpu) : 0)211212#define __COMPAT_scx_bpf_get_idle_cpumask_node(node) \213(bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ? \214scx_bpf_get_idle_cpumask_node(node) : \215scx_bpf_get_idle_cpumask()) \216217#define __COMPAT_scx_bpf_get_idle_smtmask_node(node) \218(bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ? \219scx_bpf_get_idle_smtmask_node(node) : \220scx_bpf_get_idle_smtmask())221222#define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \223(bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ? \224scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \225scx_bpf_pick_idle_cpu(cpus_allowed, flags))226227#define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \228(bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ? \229scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \230scx_bpf_pick_any_cpu(cpus_allowed, flags))231232/*233* v6.18: Add a helper to retrieve the current task running on a CPU.234*235* Keep this helper available until v6.20 for compatibility.236*/237static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)238{239struct rq *rq;240241if (bpf_ksym_exists(scx_bpf_cpu_curr))242return scx_bpf_cpu_curr(cpu);243244rq = scx_bpf_cpu_rq(cpu);245246return rq ? rq->curr : NULL;247}248249/*250* Define sched_ext_ops. This may be expanded to define multiple variants for251* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().252*/253#define SCX_OPS_DEFINE(__name, ...) \254SEC(".struct_ops.link") \255struct sched_ext_ops __name = { \256__VA_ARGS__, \257};258259#endif /* __SCX_COMPAT_BPF_H */260261262