Path: blob/master/tools/sched_ext/include/scx/bpf_arena_common.bpf.h
29271 views
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */1/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */2#pragma once34#ifndef PAGE_SIZE5#define PAGE_SIZE __PAGE_SIZE6/*7* for older kernels try sizeof(struct genradix_node)8* or flexible:9* static inline long __bpf_page_size(void) {10* return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node);11* }12* but generated code is not great.13*/14#endif1516#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM)17#define __arena __attribute__((address_space(1)))18#define __arena_global __attribute__((address_space(1)))19#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */20#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */21#else2223/* emit instruction:24* rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as25*26* This is a workaround for LLVM compiler versions without27* __BPF_FEATURE_ADDR_SPACE_CAST that do not automatically cast between arena28* pointers and native kernel/userspace ones. In this case we explicitly do so29* with cast_kern() and cast_user(). E.g., in the Linux kernel tree,30* tools/testing/selftests/bpf includes tests that use these macros to implement31* linked lists and hashtables backed by arena memory. In sched_ext, we use32* cast_kern() and cast_user() for compatibility with older LLVM toolchains.33*/34#ifndef bpf_addr_space_cast35#define bpf_addr_space_cast(var, dst_as, src_as)\36asm volatile(".byte 0xBF; \37.ifc %[reg], r0; \38.byte 0x00; \39.endif; \40.ifc %[reg], r1; \41.byte 0x11; \42.endif; \43.ifc %[reg], r2; \44.byte 0x22; \45.endif; \46.ifc %[reg], r3; \47.byte 0x33; \48.endif; \49.ifc %[reg], r4; \50.byte 0x44; \51.endif; \52.ifc %[reg], r5; \53.byte 0x55; \54.endif; \55.ifc %[reg], r6; \56.byte 0x66; \57.endif; \58.ifc %[reg], r7; \59.byte 0x77; \60.endif; \61.ifc %[reg], r8; \62.byte 0x88; \63.endif; \64.ifc %[reg], r9; \65.byte 0x99; \66.endif; \67.short %[off]; \68.long %[as]" \69: [reg]"+r"(var) \70: [off]"i"(BPF_ADDR_SPACE_CAST) \71, [as]"i"((dst_as << 16) | src_as));72#endif7374#define __arena75#define __arena_global SEC(".addr_space.1")76#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1)77#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0)78#endif7980void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,81int node_id, __u64 flags) __ksym __weak;82void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;8384/*85* Note that cond_break can only be portably used in the body of a breakable86* construct, whereas can_loop can be used anywhere.87*/88#ifdef TEST89#define can_loop true90#define __cond_break(expr) expr91#else92#ifdef __BPF_FEATURE_MAY_GOTO93#define can_loop \94({ __label__ l_break, l_continue; \95bool ret = true; \96asm volatile goto("may_goto %l[l_break]" \97:::: l_break); \98goto l_continue; \99l_break: ret = false; \100l_continue:; \101ret; \102})103104#define __cond_break(expr) \105({ __label__ l_break, l_continue; \106asm volatile goto("may_goto %l[l_break]" \107:::: l_break); \108goto l_continue; \109l_break: expr; \110l_continue:; \111})112#else113#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__114#define can_loop \115({ __label__ l_break, l_continue; \116bool ret = true; \117asm volatile goto("1:.byte 0xe5; \118.byte 0; \119.long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \120.short 0" \121:::: l_break); \122goto l_continue; \123l_break: ret = false; \124l_continue:; \125ret; \126})127128#define __cond_break(expr) \129({ __label__ l_break, l_continue; \130asm volatile goto("1:.byte 0xe5; \131.byte 0; \132.long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \133.short 0" \134:::: l_break); \135goto l_continue; \136l_break: expr; \137l_continue:; \138})139#else140#define can_loop \141({ __label__ l_break, l_continue; \142bool ret = true; \143asm volatile goto("1:.byte 0xe5; \144.byte 0; \145.long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \146.short 0" \147:::: l_break); \148goto l_continue; \149l_break: ret = false; \150l_continue:; \151ret; \152})153154#define __cond_break(expr) \155({ __label__ l_break, l_continue; \156asm volatile goto("1:.byte 0xe5; \157.byte 0; \158.long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \159.short 0" \160:::: l_break); \161goto l_continue; \162l_break: expr; \163l_continue:; \164})165#endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */166#endif /* __BPF_FEATURE_MAY_GOTO */167#endif /* TEST */168169#define cond_break __cond_break(break)170#define cond_break_label(label) __cond_break(goto label)171172173void bpf_preempt_disable(void) __weak __ksym;174void bpf_preempt_enable(void) __weak __ksym;175176177