Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/bpf/helpers.c
29280 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3
*/
4
#include <linux/bpf.h>
5
#include <linux/btf.h>
6
#include <linux/bpf-cgroup.h>
7
#include <linux/cgroup.h>
8
#include <linux/rcupdate.h>
9
#include <linux/random.h>
10
#include <linux/smp.h>
11
#include <linux/topology.h>
12
#include <linux/ktime.h>
13
#include <linux/sched.h>
14
#include <linux/uidgid.h>
15
#include <linux/filter.h>
16
#include <linux/ctype.h>
17
#include <linux/jiffies.h>
18
#include <linux/pid_namespace.h>
19
#include <linux/poison.h>
20
#include <linux/proc_ns.h>
21
#include <linux/sched/task.h>
22
#include <linux/security.h>
23
#include <linux/btf_ids.h>
24
#include <linux/bpf_mem_alloc.h>
25
#include <linux/kasan.h>
26
#include <linux/bpf_verifier.h>
27
#include <linux/uaccess.h>
28
#include <linux/verification.h>
29
#include <linux/task_work.h>
30
#include <linux/irq_work.h>
31
32
#include "../../lib/kstrtox.h"
33
34
/* If kernel subsystem is allowing eBPF programs to call this function,
35
* inside its own verifier_ops->get_func_proto() callback it should return
36
* bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
37
*
38
* Different map implementations will rely on rcu in map methods
39
* lookup/update/delete, therefore eBPF programs must run under rcu lock
40
* if program is allowed to access maps, so check rcu_read_lock_held() or
41
* rcu_read_lock_trace_held() in all three functions.
42
*/
43
BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
44
{
45
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
46
!rcu_read_lock_bh_held());
47
return (unsigned long) map->ops->map_lookup_elem(map, key);
48
}
49
50
const struct bpf_func_proto bpf_map_lookup_elem_proto = {
51
.func = bpf_map_lookup_elem,
52
.gpl_only = false,
53
.pkt_access = true,
54
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
55
.arg1_type = ARG_CONST_MAP_PTR,
56
.arg2_type = ARG_PTR_TO_MAP_KEY,
57
};
58
59
BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
60
void *, value, u64, flags)
61
{
62
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
63
!rcu_read_lock_bh_held());
64
return map->ops->map_update_elem(map, key, value, flags);
65
}
66
67
const struct bpf_func_proto bpf_map_update_elem_proto = {
68
.func = bpf_map_update_elem,
69
.gpl_only = false,
70
.pkt_access = true,
71
.ret_type = RET_INTEGER,
72
.arg1_type = ARG_CONST_MAP_PTR,
73
.arg2_type = ARG_PTR_TO_MAP_KEY,
74
.arg3_type = ARG_PTR_TO_MAP_VALUE,
75
.arg4_type = ARG_ANYTHING,
76
};
77
78
BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
79
{
80
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
81
!rcu_read_lock_bh_held());
82
return map->ops->map_delete_elem(map, key);
83
}
84
85
const struct bpf_func_proto bpf_map_delete_elem_proto = {
86
.func = bpf_map_delete_elem,
87
.gpl_only = false,
88
.pkt_access = true,
89
.ret_type = RET_INTEGER,
90
.arg1_type = ARG_CONST_MAP_PTR,
91
.arg2_type = ARG_PTR_TO_MAP_KEY,
92
};
93
94
BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
95
{
96
return map->ops->map_push_elem(map, value, flags);
97
}
98
99
const struct bpf_func_proto bpf_map_push_elem_proto = {
100
.func = bpf_map_push_elem,
101
.gpl_only = false,
102
.pkt_access = true,
103
.ret_type = RET_INTEGER,
104
.arg1_type = ARG_CONST_MAP_PTR,
105
.arg2_type = ARG_PTR_TO_MAP_VALUE,
106
.arg3_type = ARG_ANYTHING,
107
};
108
109
BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
110
{
111
return map->ops->map_pop_elem(map, value);
112
}
113
114
const struct bpf_func_proto bpf_map_pop_elem_proto = {
115
.func = bpf_map_pop_elem,
116
.gpl_only = false,
117
.ret_type = RET_INTEGER,
118
.arg1_type = ARG_CONST_MAP_PTR,
119
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
120
};
121
122
BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
123
{
124
return map->ops->map_peek_elem(map, value);
125
}
126
127
const struct bpf_func_proto bpf_map_peek_elem_proto = {
128
.func = bpf_map_peek_elem,
129
.gpl_only = false,
130
.ret_type = RET_INTEGER,
131
.arg1_type = ARG_CONST_MAP_PTR,
132
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
133
};
134
135
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
136
{
137
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
138
!rcu_read_lock_bh_held());
139
return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
140
}
141
142
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
143
.func = bpf_map_lookup_percpu_elem,
144
.gpl_only = false,
145
.pkt_access = true,
146
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
147
.arg1_type = ARG_CONST_MAP_PTR,
148
.arg2_type = ARG_PTR_TO_MAP_KEY,
149
.arg3_type = ARG_ANYTHING,
150
};
151
152
const struct bpf_func_proto bpf_get_prandom_u32_proto = {
153
.func = bpf_user_rnd_u32,
154
.gpl_only = false,
155
.ret_type = RET_INTEGER,
156
};
157
158
BPF_CALL_0(bpf_get_smp_processor_id)
159
{
160
return smp_processor_id();
161
}
162
163
const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
164
.func = bpf_get_smp_processor_id,
165
.gpl_only = false,
166
.ret_type = RET_INTEGER,
167
.allow_fastcall = true,
168
};
169
170
BPF_CALL_0(bpf_get_numa_node_id)
171
{
172
return numa_node_id();
173
}
174
175
const struct bpf_func_proto bpf_get_numa_node_id_proto = {
176
.func = bpf_get_numa_node_id,
177
.gpl_only = false,
178
.ret_type = RET_INTEGER,
179
};
180
181
BPF_CALL_0(bpf_ktime_get_ns)
182
{
183
/* NMI safe access to clock monotonic */
184
return ktime_get_mono_fast_ns();
185
}
186
187
const struct bpf_func_proto bpf_ktime_get_ns_proto = {
188
.func = bpf_ktime_get_ns,
189
.gpl_only = false,
190
.ret_type = RET_INTEGER,
191
};
192
193
BPF_CALL_0(bpf_ktime_get_boot_ns)
194
{
195
/* NMI safe access to clock boottime */
196
return ktime_get_boot_fast_ns();
197
}
198
199
const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
200
.func = bpf_ktime_get_boot_ns,
201
.gpl_only = false,
202
.ret_type = RET_INTEGER,
203
};
204
205
BPF_CALL_0(bpf_ktime_get_coarse_ns)
206
{
207
return ktime_get_coarse_ns();
208
}
209
210
const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
211
.func = bpf_ktime_get_coarse_ns,
212
.gpl_only = false,
213
.ret_type = RET_INTEGER,
214
};
215
216
BPF_CALL_0(bpf_ktime_get_tai_ns)
217
{
218
/* NMI safe access to clock tai */
219
return ktime_get_tai_fast_ns();
220
}
221
222
const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
223
.func = bpf_ktime_get_tai_ns,
224
.gpl_only = false,
225
.ret_type = RET_INTEGER,
226
};
227
228
BPF_CALL_0(bpf_get_current_pid_tgid)
229
{
230
struct task_struct *task = current;
231
232
if (unlikely(!task))
233
return -EINVAL;
234
235
return (u64) task->tgid << 32 | task->pid;
236
}
237
238
const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
239
.func = bpf_get_current_pid_tgid,
240
.gpl_only = false,
241
.ret_type = RET_INTEGER,
242
};
243
244
BPF_CALL_0(bpf_get_current_uid_gid)
245
{
246
struct task_struct *task = current;
247
kuid_t uid;
248
kgid_t gid;
249
250
if (unlikely(!task))
251
return -EINVAL;
252
253
current_uid_gid(&uid, &gid);
254
return (u64) from_kgid(&init_user_ns, gid) << 32 |
255
from_kuid(&init_user_ns, uid);
256
}
257
258
const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
259
.func = bpf_get_current_uid_gid,
260
.gpl_only = false,
261
.ret_type = RET_INTEGER,
262
};
263
264
BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
265
{
266
struct task_struct *task = current;
267
268
if (unlikely(!task))
269
goto err_clear;
270
271
/* Verifier guarantees that size > 0 */
272
strscpy_pad(buf, task->comm, size);
273
return 0;
274
err_clear:
275
memset(buf, 0, size);
276
return -EINVAL;
277
}
278
279
const struct bpf_func_proto bpf_get_current_comm_proto = {
280
.func = bpf_get_current_comm,
281
.gpl_only = false,
282
.ret_type = RET_INTEGER,
283
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
284
.arg2_type = ARG_CONST_SIZE,
285
};
286
287
#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
288
289
static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
290
{
291
arch_spinlock_t *l = (void *)lock;
292
union {
293
__u32 val;
294
arch_spinlock_t lock;
295
} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
296
297
compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
298
BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
299
BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
300
preempt_disable();
301
arch_spin_lock(l);
302
}
303
304
static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
305
{
306
arch_spinlock_t *l = (void *)lock;
307
308
arch_spin_unlock(l);
309
preempt_enable();
310
}
311
312
#else
313
314
static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
315
{
316
atomic_t *l = (void *)lock;
317
318
BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
319
do {
320
atomic_cond_read_relaxed(l, !VAL);
321
} while (atomic_xchg(l, 1));
322
}
323
324
static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
325
{
326
atomic_t *l = (void *)lock;
327
328
atomic_set_release(l, 0);
329
}
330
331
#endif
332
333
static DEFINE_PER_CPU(unsigned long, irqsave_flags);
334
335
static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
336
{
337
unsigned long flags;
338
339
local_irq_save(flags);
340
__bpf_spin_lock(lock);
341
__this_cpu_write(irqsave_flags, flags);
342
}
343
344
NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
345
{
346
__bpf_spin_lock_irqsave(lock);
347
return 0;
348
}
349
350
const struct bpf_func_proto bpf_spin_lock_proto = {
351
.func = bpf_spin_lock,
352
.gpl_only = false,
353
.ret_type = RET_VOID,
354
.arg1_type = ARG_PTR_TO_SPIN_LOCK,
355
.arg1_btf_id = BPF_PTR_POISON,
356
};
357
358
static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
359
{
360
unsigned long flags;
361
362
flags = __this_cpu_read(irqsave_flags);
363
__bpf_spin_unlock(lock);
364
local_irq_restore(flags);
365
}
366
367
NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
368
{
369
__bpf_spin_unlock_irqrestore(lock);
370
return 0;
371
}
372
373
const struct bpf_func_proto bpf_spin_unlock_proto = {
374
.func = bpf_spin_unlock,
375
.gpl_only = false,
376
.ret_type = RET_VOID,
377
.arg1_type = ARG_PTR_TO_SPIN_LOCK,
378
.arg1_btf_id = BPF_PTR_POISON,
379
};
380
381
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
382
bool lock_src)
383
{
384
struct bpf_spin_lock *lock;
385
386
if (lock_src)
387
lock = src + map->record->spin_lock_off;
388
else
389
lock = dst + map->record->spin_lock_off;
390
preempt_disable();
391
__bpf_spin_lock_irqsave(lock);
392
copy_map_value(map, dst, src);
393
__bpf_spin_unlock_irqrestore(lock);
394
preempt_enable();
395
}
396
397
BPF_CALL_0(bpf_jiffies64)
398
{
399
return get_jiffies_64();
400
}
401
402
const struct bpf_func_proto bpf_jiffies64_proto = {
403
.func = bpf_jiffies64,
404
.gpl_only = false,
405
.ret_type = RET_INTEGER,
406
};
407
408
#ifdef CONFIG_CGROUPS
409
BPF_CALL_0(bpf_get_current_cgroup_id)
410
{
411
struct cgroup *cgrp;
412
u64 cgrp_id;
413
414
rcu_read_lock();
415
cgrp = task_dfl_cgroup(current);
416
cgrp_id = cgroup_id(cgrp);
417
rcu_read_unlock();
418
419
return cgrp_id;
420
}
421
422
const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
423
.func = bpf_get_current_cgroup_id,
424
.gpl_only = false,
425
.ret_type = RET_INTEGER,
426
};
427
428
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
429
{
430
struct cgroup *cgrp;
431
struct cgroup *ancestor;
432
u64 cgrp_id;
433
434
rcu_read_lock();
435
cgrp = task_dfl_cgroup(current);
436
ancestor = cgroup_ancestor(cgrp, ancestor_level);
437
cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
438
rcu_read_unlock();
439
440
return cgrp_id;
441
}
442
443
const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
444
.func = bpf_get_current_ancestor_cgroup_id,
445
.gpl_only = false,
446
.ret_type = RET_INTEGER,
447
.arg1_type = ARG_ANYTHING,
448
};
449
#endif /* CONFIG_CGROUPS */
450
451
#define BPF_STRTOX_BASE_MASK 0x1F
452
453
static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
454
unsigned long long *res, bool *is_negative)
455
{
456
unsigned int base = flags & BPF_STRTOX_BASE_MASK;
457
const char *cur_buf = buf;
458
size_t cur_len = buf_len;
459
unsigned int consumed;
460
size_t val_len;
461
char str[64];
462
463
if (!buf || !buf_len || !res || !is_negative)
464
return -EINVAL;
465
466
if (base != 0 && base != 8 && base != 10 && base != 16)
467
return -EINVAL;
468
469
if (flags & ~BPF_STRTOX_BASE_MASK)
470
return -EINVAL;
471
472
while (cur_buf < buf + buf_len && isspace(*cur_buf))
473
++cur_buf;
474
475
*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
476
if (*is_negative)
477
++cur_buf;
478
479
consumed = cur_buf - buf;
480
cur_len -= consumed;
481
if (!cur_len)
482
return -EINVAL;
483
484
cur_len = min(cur_len, sizeof(str) - 1);
485
memcpy(str, cur_buf, cur_len);
486
str[cur_len] = '\0';
487
cur_buf = str;
488
489
cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
490
val_len = _parse_integer(cur_buf, base, res);
491
492
if (val_len & KSTRTOX_OVERFLOW)
493
return -ERANGE;
494
495
if (val_len == 0)
496
return -EINVAL;
497
498
cur_buf += val_len;
499
consumed += cur_buf - str;
500
501
return consumed;
502
}
503
504
static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
505
long long *res)
506
{
507
unsigned long long _res;
508
bool is_negative;
509
int err;
510
511
err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
512
if (err < 0)
513
return err;
514
if (is_negative) {
515
if ((long long)-_res > 0)
516
return -ERANGE;
517
*res = -_res;
518
} else {
519
if ((long long)_res < 0)
520
return -ERANGE;
521
*res = _res;
522
}
523
return err;
524
}
525
526
BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
527
s64 *, res)
528
{
529
long long _res;
530
int err;
531
532
*res = 0;
533
err = __bpf_strtoll(buf, buf_len, flags, &_res);
534
if (err < 0)
535
return err;
536
*res = _res;
537
return err;
538
}
539
540
const struct bpf_func_proto bpf_strtol_proto = {
541
.func = bpf_strtol,
542
.gpl_only = false,
543
.ret_type = RET_INTEGER,
544
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
545
.arg2_type = ARG_CONST_SIZE,
546
.arg3_type = ARG_ANYTHING,
547
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
548
.arg4_size = sizeof(s64),
549
};
550
551
BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
552
u64 *, res)
553
{
554
unsigned long long _res;
555
bool is_negative;
556
int err;
557
558
*res = 0;
559
err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
560
if (err < 0)
561
return err;
562
if (is_negative)
563
return -EINVAL;
564
*res = _res;
565
return err;
566
}
567
568
const struct bpf_func_proto bpf_strtoul_proto = {
569
.func = bpf_strtoul,
570
.gpl_only = false,
571
.ret_type = RET_INTEGER,
572
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
573
.arg2_type = ARG_CONST_SIZE,
574
.arg3_type = ARG_ANYTHING,
575
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
576
.arg4_size = sizeof(u64),
577
};
578
579
BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
580
{
581
return strncmp(s1, s2, s1_sz);
582
}
583
584
static const struct bpf_func_proto bpf_strncmp_proto = {
585
.func = bpf_strncmp,
586
.gpl_only = false,
587
.ret_type = RET_INTEGER,
588
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
589
.arg2_type = ARG_CONST_SIZE,
590
.arg3_type = ARG_PTR_TO_CONST_STR,
591
};
592
593
BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
594
struct bpf_pidns_info *, nsdata, u32, size)
595
{
596
struct task_struct *task = current;
597
struct pid_namespace *pidns;
598
int err = -EINVAL;
599
600
if (unlikely(size != sizeof(struct bpf_pidns_info)))
601
goto clear;
602
603
if (unlikely((u64)(dev_t)dev != dev))
604
goto clear;
605
606
if (unlikely(!task))
607
goto clear;
608
609
pidns = task_active_pid_ns(task);
610
if (unlikely(!pidns)) {
611
err = -ENOENT;
612
goto clear;
613
}
614
615
if (!ns_match(&pidns->ns, (dev_t)dev, ino))
616
goto clear;
617
618
nsdata->pid = task_pid_nr_ns(task, pidns);
619
nsdata->tgid = task_tgid_nr_ns(task, pidns);
620
return 0;
621
clear:
622
memset((void *)nsdata, 0, (size_t) size);
623
return err;
624
}
625
626
const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
627
.func = bpf_get_ns_current_pid_tgid,
628
.gpl_only = false,
629
.ret_type = RET_INTEGER,
630
.arg1_type = ARG_ANYTHING,
631
.arg2_type = ARG_ANYTHING,
632
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
633
.arg4_type = ARG_CONST_SIZE,
634
};
635
636
static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
637
.func = bpf_get_raw_cpu_id,
638
.gpl_only = false,
639
.ret_type = RET_INTEGER,
640
};
641
642
BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
643
u64, flags, void *, data, u64, size)
644
{
645
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
646
return -EINVAL;
647
648
return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
649
}
650
651
const struct bpf_func_proto bpf_event_output_data_proto = {
652
.func = bpf_event_output_data,
653
.gpl_only = true,
654
.ret_type = RET_INTEGER,
655
.arg1_type = ARG_PTR_TO_CTX,
656
.arg2_type = ARG_CONST_MAP_PTR,
657
.arg3_type = ARG_ANYTHING,
658
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
659
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
660
};
661
662
BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
663
const void __user *, user_ptr)
664
{
665
int ret = copy_from_user(dst, user_ptr, size);
666
667
if (unlikely(ret)) {
668
memset(dst, 0, size);
669
ret = -EFAULT;
670
}
671
672
return ret;
673
}
674
675
const struct bpf_func_proto bpf_copy_from_user_proto = {
676
.func = bpf_copy_from_user,
677
.gpl_only = false,
678
.might_sleep = true,
679
.ret_type = RET_INTEGER,
680
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
681
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
682
.arg3_type = ARG_ANYTHING,
683
};
684
685
BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
686
const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
687
{
688
int ret;
689
690
/* flags is not used yet */
691
if (unlikely(flags))
692
return -EINVAL;
693
694
if (unlikely(!size))
695
return 0;
696
697
ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
698
if (ret == size)
699
return 0;
700
701
memset(dst, 0, size);
702
/* Return -EFAULT for partial read */
703
return ret < 0 ? ret : -EFAULT;
704
}
705
706
const struct bpf_func_proto bpf_copy_from_user_task_proto = {
707
.func = bpf_copy_from_user_task,
708
.gpl_only = true,
709
.might_sleep = true,
710
.ret_type = RET_INTEGER,
711
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
712
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
713
.arg3_type = ARG_ANYTHING,
714
.arg4_type = ARG_PTR_TO_BTF_ID,
715
.arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
716
.arg5_type = ARG_ANYTHING
717
};
718
719
BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
720
{
721
if (cpu >= nr_cpu_ids)
722
return (unsigned long)NULL;
723
724
return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
725
}
726
727
const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
728
.func = bpf_per_cpu_ptr,
729
.gpl_only = false,
730
.ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
731
.arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
732
.arg2_type = ARG_ANYTHING,
733
};
734
735
BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
736
{
737
return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
738
}
739
740
const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
741
.func = bpf_this_cpu_ptr,
742
.gpl_only = false,
743
.ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
744
.arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
745
};
746
747
static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
748
size_t bufsz)
749
{
750
void __user *user_ptr = (__force void __user *)unsafe_ptr;
751
752
buf[0] = 0;
753
754
switch (fmt_ptype) {
755
case 's':
756
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
757
if ((unsigned long)unsafe_ptr < TASK_SIZE)
758
return strncpy_from_user_nofault(buf, user_ptr, bufsz);
759
fallthrough;
760
#endif
761
case 'k':
762
return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
763
case 'u':
764
return strncpy_from_user_nofault(buf, user_ptr, bufsz);
765
}
766
767
return -EINVAL;
768
}
769
770
/* Support executing three nested bprintf helper calls on a given CPU */
771
#define MAX_BPRINTF_NEST_LEVEL 3
772
773
static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
774
static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
775
776
int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs)
777
{
778
int nest_level;
779
780
nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
781
if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
782
this_cpu_dec(bpf_bprintf_nest_level);
783
return -EBUSY;
784
}
785
*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
786
787
return 0;
788
}
789
790
void bpf_put_buffers(void)
791
{
792
if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
793
return;
794
this_cpu_dec(bpf_bprintf_nest_level);
795
}
796
797
void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
798
{
799
if (!data->bin_args && !data->buf)
800
return;
801
bpf_put_buffers();
802
}
803
804
/*
805
* bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
806
*
807
* Returns a negative value if fmt is an invalid format string or 0 otherwise.
808
*
809
* This can be used in two ways:
810
* - Format string verification only: when data->get_bin_args is false
811
* - Arguments preparation: in addition to the above verification, it writes in
812
* data->bin_args a binary representation of arguments usable by bstr_printf
813
* where pointers from BPF have been sanitized.
814
*
815
* In argument preparation mode, if 0 is returned, safe temporary buffers are
816
* allocated and bpf_bprintf_cleanup should be called to free them after use.
817
*/
818
int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
819
u32 num_args, struct bpf_bprintf_data *data)
820
{
821
bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
822
char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
823
struct bpf_bprintf_buffers *buffers = NULL;
824
size_t sizeof_cur_arg, sizeof_cur_ip;
825
int err, i, num_spec = 0;
826
u64 cur_arg;
827
char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
828
829
fmt_end = strnchr(fmt, fmt_size, 0);
830
if (!fmt_end)
831
return -EINVAL;
832
fmt_size = fmt_end - fmt;
833
834
if (get_buffers && bpf_try_get_buffers(&buffers))
835
return -EBUSY;
836
837
if (data->get_bin_args) {
838
if (num_args)
839
tmp_buf = buffers->bin_args;
840
tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
841
data->bin_args = (u32 *)tmp_buf;
842
}
843
844
if (data->get_buf)
845
data->buf = buffers->buf;
846
847
for (i = 0; i < fmt_size; i++) {
848
if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
849
err = -EINVAL;
850
goto out;
851
}
852
853
if (fmt[i] != '%')
854
continue;
855
856
if (fmt[i + 1] == '%') {
857
i++;
858
continue;
859
}
860
861
if (num_spec >= num_args) {
862
err = -EINVAL;
863
goto out;
864
}
865
866
/* The string is zero-terminated so if fmt[i] != 0, we can
867
* always access fmt[i + 1], in the worst case it will be a 0
868
*/
869
i++;
870
871
/* skip optional "[0 +-][num]" width formatting field */
872
while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
873
fmt[i] == ' ')
874
i++;
875
if (fmt[i] >= '1' && fmt[i] <= '9') {
876
i++;
877
while (fmt[i] >= '0' && fmt[i] <= '9')
878
i++;
879
}
880
881
if (fmt[i] == 'p') {
882
sizeof_cur_arg = sizeof(long);
883
884
if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
885
ispunct(fmt[i + 1])) {
886
if (tmp_buf)
887
cur_arg = raw_args[num_spec];
888
goto nocopy_fmt;
889
}
890
891
if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
892
fmt[i + 2] == 's') {
893
fmt_ptype = fmt[i + 1];
894
i += 2;
895
goto fmt_str;
896
}
897
898
if (fmt[i + 1] == 'K' ||
899
fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
900
fmt[i + 1] == 'S') {
901
if (tmp_buf)
902
cur_arg = raw_args[num_spec];
903
i++;
904
goto nocopy_fmt;
905
}
906
907
if (fmt[i + 1] == 'B') {
908
if (tmp_buf) {
909
err = snprintf(tmp_buf,
910
(tmp_buf_end - tmp_buf),
911
"%pB",
912
(void *)(long)raw_args[num_spec]);
913
tmp_buf += (err + 1);
914
}
915
916
i++;
917
num_spec++;
918
continue;
919
}
920
921
/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
922
if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
923
(fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
924
err = -EINVAL;
925
goto out;
926
}
927
928
i += 2;
929
if (!tmp_buf)
930
goto nocopy_fmt;
931
932
sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
933
if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
934
err = -ENOSPC;
935
goto out;
936
}
937
938
unsafe_ptr = (char *)(long)raw_args[num_spec];
939
err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
940
sizeof_cur_ip);
941
if (err < 0)
942
memset(cur_ip, 0, sizeof_cur_ip);
943
944
/* hack: bstr_printf expects IP addresses to be
945
* pre-formatted as strings, ironically, the easiest way
946
* to do that is to call snprintf.
947
*/
948
ip_spec[2] = fmt[i - 1];
949
ip_spec[3] = fmt[i];
950
err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
951
ip_spec, &cur_ip);
952
953
tmp_buf += err + 1;
954
num_spec++;
955
956
continue;
957
} else if (fmt[i] == 's') {
958
fmt_ptype = fmt[i];
959
fmt_str:
960
if (fmt[i + 1] != 0 &&
961
!isspace(fmt[i + 1]) &&
962
!ispunct(fmt[i + 1])) {
963
err = -EINVAL;
964
goto out;
965
}
966
967
if (!tmp_buf)
968
goto nocopy_fmt;
969
970
if (tmp_buf_end == tmp_buf) {
971
err = -ENOSPC;
972
goto out;
973
}
974
975
unsafe_ptr = (char *)(long)raw_args[num_spec];
976
err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
977
fmt_ptype,
978
tmp_buf_end - tmp_buf);
979
if (err < 0) {
980
tmp_buf[0] = '\0';
981
err = 1;
982
}
983
984
tmp_buf += err;
985
num_spec++;
986
987
continue;
988
} else if (fmt[i] == 'c') {
989
if (!tmp_buf)
990
goto nocopy_fmt;
991
992
if (tmp_buf_end == tmp_buf) {
993
err = -ENOSPC;
994
goto out;
995
}
996
997
*tmp_buf = raw_args[num_spec];
998
tmp_buf++;
999
num_spec++;
1000
1001
continue;
1002
}
1003
1004
sizeof_cur_arg = sizeof(int);
1005
1006
if (fmt[i] == 'l') {
1007
sizeof_cur_arg = sizeof(long);
1008
i++;
1009
}
1010
if (fmt[i] == 'l') {
1011
sizeof_cur_arg = sizeof(long long);
1012
i++;
1013
}
1014
1015
if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1016
fmt[i] != 'x' && fmt[i] != 'X') {
1017
err = -EINVAL;
1018
goto out;
1019
}
1020
1021
if (tmp_buf)
1022
cur_arg = raw_args[num_spec];
1023
nocopy_fmt:
1024
if (tmp_buf) {
1025
tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1026
if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1027
err = -ENOSPC;
1028
goto out;
1029
}
1030
1031
if (sizeof_cur_arg == 8) {
1032
*(u32 *)tmp_buf = *(u32 *)&cur_arg;
1033
*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1034
} else {
1035
*(u32 *)tmp_buf = (u32)(long)cur_arg;
1036
}
1037
tmp_buf += sizeof_cur_arg;
1038
}
1039
num_spec++;
1040
}
1041
1042
err = 0;
1043
out:
1044
if (err)
1045
bpf_bprintf_cleanup(data);
1046
return err;
1047
}
1048
1049
BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1050
const void *, args, u32, data_len)
1051
{
1052
struct bpf_bprintf_data data = {
1053
.get_bin_args = true,
1054
};
1055
int err, num_args;
1056
1057
if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1058
(data_len && !args))
1059
return -EINVAL;
1060
num_args = data_len / 8;
1061
1062
/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1063
* can safely give an unbounded size.
1064
*/
1065
err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1066
if (err < 0)
1067
return err;
1068
1069
err = bstr_printf(str, str_size, fmt, data.bin_args);
1070
1071
bpf_bprintf_cleanup(&data);
1072
1073
return err + 1;
1074
}
1075
1076
const struct bpf_func_proto bpf_snprintf_proto = {
1077
.func = bpf_snprintf,
1078
.gpl_only = true,
1079
.ret_type = RET_INTEGER,
1080
.arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1081
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
1082
.arg3_type = ARG_PTR_TO_CONST_STR,
1083
.arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1084
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
1085
};
1086
1087
static void *map_key_from_value(struct bpf_map *map, void *value, u32 *arr_idx)
1088
{
1089
if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1090
struct bpf_array *array = container_of(map, struct bpf_array, map);
1091
1092
*arr_idx = ((char *)value - array->value) / array->elem_size;
1093
return arr_idx;
1094
}
1095
return (void *)value - round_up(map->key_size, 8);
1096
}
1097
1098
struct bpf_async_cb {
1099
struct bpf_map *map;
1100
struct bpf_prog *prog;
1101
void __rcu *callback_fn;
1102
void *value;
1103
union {
1104
struct rcu_head rcu;
1105
struct work_struct delete_work;
1106
};
1107
u64 flags;
1108
};
1109
1110
/* BPF map elements can contain 'struct bpf_timer'.
1111
* Such map owns all of its BPF timers.
1112
* 'struct bpf_timer' is allocated as part of map element allocation
1113
* and it's zero initialized.
1114
* That space is used to keep 'struct bpf_async_kern'.
1115
* bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1116
* remembers 'struct bpf_map *' pointer it's part of.
1117
* bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1118
* bpf_timer_start() arms the timer.
1119
* If user space reference to a map goes to zero at this point
1120
* ops->map_release_uref callback is responsible for cancelling the timers,
1121
* freeing their memory, and decrementing prog's refcnts.
1122
* bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1123
* Inner maps can contain bpf timers as well. ops->map_release_uref is
1124
* freeing the timers when inner map is replaced or deleted by user space.
1125
*/
1126
struct bpf_hrtimer {
1127
struct bpf_async_cb cb;
1128
struct hrtimer timer;
1129
atomic_t cancelling;
1130
};
1131
1132
struct bpf_work {
1133
struct bpf_async_cb cb;
1134
struct work_struct work;
1135
struct work_struct delete_work;
1136
};
1137
1138
/* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
1139
struct bpf_async_kern {
1140
union {
1141
struct bpf_async_cb *cb;
1142
struct bpf_hrtimer *timer;
1143
struct bpf_work *work;
1144
};
1145
/* bpf_spin_lock is used here instead of spinlock_t to make
1146
* sure that it always fits into space reserved by struct bpf_timer
1147
* regardless of LOCKDEP and spinlock debug flags.
1148
*/
1149
struct bpf_spin_lock lock;
1150
} __attribute__((aligned(8)));
1151
1152
enum bpf_async_type {
1153
BPF_ASYNC_TYPE_TIMER = 0,
1154
BPF_ASYNC_TYPE_WQ,
1155
};
1156
1157
static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1158
1159
static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1160
{
1161
struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1162
struct bpf_map *map = t->cb.map;
1163
void *value = t->cb.value;
1164
bpf_callback_t callback_fn;
1165
void *key;
1166
u32 idx;
1167
1168
BTF_TYPE_EMIT(struct bpf_timer);
1169
callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
1170
if (!callback_fn)
1171
goto out;
1172
1173
/* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1174
* cannot be preempted by another bpf_timer_cb() on the same cpu.
1175
* Remember the timer this callback is servicing to prevent
1176
* deadlock if callback_fn() calls bpf_timer_cancel() or
1177
* bpf_map_delete_elem() on the same timer.
1178
*/
1179
this_cpu_write(hrtimer_running, t);
1180
1181
key = map_key_from_value(map, value, &idx);
1182
1183
callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1184
/* The verifier checked that return value is zero. */
1185
1186
this_cpu_write(hrtimer_running, NULL);
1187
out:
1188
return HRTIMER_NORESTART;
1189
}
1190
1191
static void bpf_wq_work(struct work_struct *work)
1192
{
1193
struct bpf_work *w = container_of(work, struct bpf_work, work);
1194
struct bpf_async_cb *cb = &w->cb;
1195
struct bpf_map *map = cb->map;
1196
bpf_callback_t callback_fn;
1197
void *value = cb->value;
1198
void *key;
1199
u32 idx;
1200
1201
BTF_TYPE_EMIT(struct bpf_wq);
1202
1203
callback_fn = READ_ONCE(cb->callback_fn);
1204
if (!callback_fn)
1205
return;
1206
1207
key = map_key_from_value(map, value, &idx);
1208
1209
rcu_read_lock_trace();
1210
migrate_disable();
1211
1212
callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1213
1214
migrate_enable();
1215
rcu_read_unlock_trace();
1216
}
1217
1218
static void bpf_wq_delete_work(struct work_struct *work)
1219
{
1220
struct bpf_work *w = container_of(work, struct bpf_work, delete_work);
1221
1222
cancel_work_sync(&w->work);
1223
1224
kfree_rcu(w, cb.rcu);
1225
}
1226
1227
static void bpf_timer_delete_work(struct work_struct *work)
1228
{
1229
struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
1230
1231
/* Cancel the timer and wait for callback to complete if it was running.
1232
* If hrtimer_cancel() can be safely called it's safe to call
1233
* kfree_rcu(t) right after for both preallocated and non-preallocated
1234
* maps. The async->cb = NULL was already done and no code path can see
1235
* address 't' anymore. Timer if armed for existing bpf_hrtimer before
1236
* bpf_timer_cancel_and_free will have been cancelled.
1237
*/
1238
hrtimer_cancel(&t->timer);
1239
kfree_rcu(t, cb.rcu);
1240
}
1241
1242
static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1243
enum bpf_async_type type)
1244
{
1245
struct bpf_async_cb *cb;
1246
struct bpf_hrtimer *t;
1247
struct bpf_work *w;
1248
clockid_t clockid;
1249
size_t size;
1250
int ret = 0;
1251
1252
if (in_nmi())
1253
return -EOPNOTSUPP;
1254
1255
switch (type) {
1256
case BPF_ASYNC_TYPE_TIMER:
1257
size = sizeof(struct bpf_hrtimer);
1258
break;
1259
case BPF_ASYNC_TYPE_WQ:
1260
size = sizeof(struct bpf_work);
1261
break;
1262
default:
1263
return -EINVAL;
1264
}
1265
1266
__bpf_spin_lock_irqsave(&async->lock);
1267
t = async->timer;
1268
if (t) {
1269
ret = -EBUSY;
1270
goto out;
1271
}
1272
1273
/* Allocate via bpf_map_kmalloc_node() for memcg accounting. Until
1274
* kmalloc_nolock() is available, avoid locking issues by using
1275
* __GFP_HIGH (GFP_ATOMIC & ~__GFP_RECLAIM).
1276
*/
1277
cb = bpf_map_kmalloc_node(map, size, __GFP_HIGH, map->numa_node);
1278
if (!cb) {
1279
ret = -ENOMEM;
1280
goto out;
1281
}
1282
1283
switch (type) {
1284
case BPF_ASYNC_TYPE_TIMER:
1285
clockid = flags & (MAX_CLOCKS - 1);
1286
t = (struct bpf_hrtimer *)cb;
1287
1288
atomic_set(&t->cancelling, 0);
1289
INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
1290
hrtimer_setup(&t->timer, bpf_timer_cb, clockid, HRTIMER_MODE_REL_SOFT);
1291
cb->value = (void *)async - map->record->timer_off;
1292
break;
1293
case BPF_ASYNC_TYPE_WQ:
1294
w = (struct bpf_work *)cb;
1295
1296
INIT_WORK(&w->work, bpf_wq_work);
1297
INIT_WORK(&w->delete_work, bpf_wq_delete_work);
1298
cb->value = (void *)async - map->record->wq_off;
1299
break;
1300
}
1301
cb->map = map;
1302
cb->prog = NULL;
1303
cb->flags = flags;
1304
rcu_assign_pointer(cb->callback_fn, NULL);
1305
1306
WRITE_ONCE(async->cb, cb);
1307
/* Guarantee the order between async->cb and map->usercnt. So
1308
* when there are concurrent uref release and bpf timer init, either
1309
* bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1310
* timer or atomic64_read() below returns a zero usercnt.
1311
*/
1312
smp_mb();
1313
if (!atomic64_read(&map->usercnt)) {
1314
/* maps with timers must be either held by user space
1315
* or pinned in bpffs.
1316
*/
1317
WRITE_ONCE(async->cb, NULL);
1318
kfree(cb);
1319
ret = -EPERM;
1320
}
1321
out:
1322
__bpf_spin_unlock_irqrestore(&async->lock);
1323
return ret;
1324
}
1325
1326
BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1327
u64, flags)
1328
{
1329
clock_t clockid = flags & (MAX_CLOCKS - 1);
1330
1331
BUILD_BUG_ON(MAX_CLOCKS != 16);
1332
BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1333
BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1334
1335
if (flags >= MAX_CLOCKS ||
1336
/* similar to timerfd except _ALARM variants are not supported */
1337
(clockid != CLOCK_MONOTONIC &&
1338
clockid != CLOCK_REALTIME &&
1339
clockid != CLOCK_BOOTTIME))
1340
return -EINVAL;
1341
1342
return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1343
}
1344
1345
static const struct bpf_func_proto bpf_timer_init_proto = {
1346
.func = bpf_timer_init,
1347
.gpl_only = true,
1348
.ret_type = RET_INTEGER,
1349
.arg1_type = ARG_PTR_TO_TIMER,
1350
.arg2_type = ARG_CONST_MAP_PTR,
1351
.arg3_type = ARG_ANYTHING,
1352
};
1353
1354
static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
1355
struct bpf_prog_aux *aux, unsigned int flags,
1356
enum bpf_async_type type)
1357
{
1358
struct bpf_prog *prev, *prog = aux->prog;
1359
struct bpf_async_cb *cb;
1360
int ret = 0;
1361
1362
if (in_nmi())
1363
return -EOPNOTSUPP;
1364
__bpf_spin_lock_irqsave(&async->lock);
1365
cb = async->cb;
1366
if (!cb) {
1367
ret = -EINVAL;
1368
goto out;
1369
}
1370
if (!atomic64_read(&cb->map->usercnt)) {
1371
/* maps with timers must be either held by user space
1372
* or pinned in bpffs. Otherwise timer might still be
1373
* running even when bpf prog is detached and user space
1374
* is gone, since map_release_uref won't ever be called.
1375
*/
1376
ret = -EPERM;
1377
goto out;
1378
}
1379
prev = cb->prog;
1380
if (prev != prog) {
1381
/* Bump prog refcnt once. Every bpf_timer_set_callback()
1382
* can pick different callback_fn-s within the same prog.
1383
*/
1384
prog = bpf_prog_inc_not_zero(prog);
1385
if (IS_ERR(prog)) {
1386
ret = PTR_ERR(prog);
1387
goto out;
1388
}
1389
if (prev)
1390
/* Drop prev prog refcnt when swapping with new prog */
1391
bpf_prog_put(prev);
1392
cb->prog = prog;
1393
}
1394
rcu_assign_pointer(cb->callback_fn, callback_fn);
1395
out:
1396
__bpf_spin_unlock_irqrestore(&async->lock);
1397
return ret;
1398
}
1399
1400
BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
1401
struct bpf_prog_aux *, aux)
1402
{
1403
return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER);
1404
}
1405
1406
static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1407
.func = bpf_timer_set_callback,
1408
.gpl_only = true,
1409
.ret_type = RET_INTEGER,
1410
.arg1_type = ARG_PTR_TO_TIMER,
1411
.arg2_type = ARG_PTR_TO_FUNC,
1412
};
1413
1414
BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
1415
{
1416
struct bpf_hrtimer *t;
1417
int ret = 0;
1418
enum hrtimer_mode mode;
1419
1420
if (in_nmi())
1421
return -EOPNOTSUPP;
1422
if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1423
return -EINVAL;
1424
__bpf_spin_lock_irqsave(&timer->lock);
1425
t = timer->timer;
1426
if (!t || !t->cb.prog) {
1427
ret = -EINVAL;
1428
goto out;
1429
}
1430
1431
if (flags & BPF_F_TIMER_ABS)
1432
mode = HRTIMER_MODE_ABS_SOFT;
1433
else
1434
mode = HRTIMER_MODE_REL_SOFT;
1435
1436
if (flags & BPF_F_TIMER_CPU_PIN)
1437
mode |= HRTIMER_MODE_PINNED;
1438
1439
hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1440
out:
1441
__bpf_spin_unlock_irqrestore(&timer->lock);
1442
return ret;
1443
}
1444
1445
static const struct bpf_func_proto bpf_timer_start_proto = {
1446
.func = bpf_timer_start,
1447
.gpl_only = true,
1448
.ret_type = RET_INTEGER,
1449
.arg1_type = ARG_PTR_TO_TIMER,
1450
.arg2_type = ARG_ANYTHING,
1451
.arg3_type = ARG_ANYTHING,
1452
};
1453
1454
static void drop_prog_refcnt(struct bpf_async_cb *async)
1455
{
1456
struct bpf_prog *prog = async->prog;
1457
1458
if (prog) {
1459
bpf_prog_put(prog);
1460
async->prog = NULL;
1461
rcu_assign_pointer(async->callback_fn, NULL);
1462
}
1463
}
1464
1465
BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
1466
{
1467
struct bpf_hrtimer *t, *cur_t;
1468
bool inc = false;
1469
int ret = 0;
1470
1471
if (in_nmi())
1472
return -EOPNOTSUPP;
1473
rcu_read_lock();
1474
__bpf_spin_lock_irqsave(&timer->lock);
1475
t = timer->timer;
1476
if (!t) {
1477
ret = -EINVAL;
1478
goto out;
1479
}
1480
1481
cur_t = this_cpu_read(hrtimer_running);
1482
if (cur_t == t) {
1483
/* If bpf callback_fn is trying to bpf_timer_cancel()
1484
* its own timer the hrtimer_cancel() will deadlock
1485
* since it waits for callback_fn to finish.
1486
*/
1487
ret = -EDEADLK;
1488
goto out;
1489
}
1490
1491
/* Only account in-flight cancellations when invoked from a timer
1492
* callback, since we want to avoid waiting only if other _callbacks_
1493
* are waiting on us, to avoid introducing lockups. Non-callback paths
1494
* are ok, since nobody would synchronously wait for their completion.
1495
*/
1496
if (!cur_t)
1497
goto drop;
1498
atomic_inc(&t->cancelling);
1499
/* Need full barrier after relaxed atomic_inc */
1500
smp_mb__after_atomic();
1501
inc = true;
1502
if (atomic_read(&cur_t->cancelling)) {
1503
/* We're cancelling timer t, while some other timer callback is
1504
* attempting to cancel us. In such a case, it might be possible
1505
* that timer t belongs to the other callback, or some other
1506
* callback waiting upon it (creating transitive dependencies
1507
* upon us), and we will enter a deadlock if we continue
1508
* cancelling and waiting for it synchronously, since it might
1509
* do the same. Bail!
1510
*/
1511
ret = -EDEADLK;
1512
goto out;
1513
}
1514
drop:
1515
drop_prog_refcnt(&t->cb);
1516
out:
1517
__bpf_spin_unlock_irqrestore(&timer->lock);
1518
/* Cancel the timer and wait for associated callback to finish
1519
* if it was running.
1520
*/
1521
ret = ret ?: hrtimer_cancel(&t->timer);
1522
if (inc)
1523
atomic_dec(&t->cancelling);
1524
rcu_read_unlock();
1525
return ret;
1526
}
1527
1528
static const struct bpf_func_proto bpf_timer_cancel_proto = {
1529
.func = bpf_timer_cancel,
1530
.gpl_only = true,
1531
.ret_type = RET_INTEGER,
1532
.arg1_type = ARG_PTR_TO_TIMER,
1533
};
1534
1535
static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
1536
{
1537
struct bpf_async_cb *cb;
1538
1539
/* Performance optimization: read async->cb without lock first. */
1540
if (!READ_ONCE(async->cb))
1541
return NULL;
1542
1543
__bpf_spin_lock_irqsave(&async->lock);
1544
/* re-read it under lock */
1545
cb = async->cb;
1546
if (!cb)
1547
goto out;
1548
drop_prog_refcnt(cb);
1549
/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1550
* this timer, since it won't be initialized.
1551
*/
1552
WRITE_ONCE(async->cb, NULL);
1553
out:
1554
__bpf_spin_unlock_irqrestore(&async->lock);
1555
return cb;
1556
}
1557
1558
/* This function is called by map_delete/update_elem for individual element and
1559
* by ops->map_release_uref when the user space reference to a map reaches zero.
1560
*/
1561
void bpf_timer_cancel_and_free(void *val)
1562
{
1563
struct bpf_hrtimer *t;
1564
1565
t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val);
1566
1567
if (!t)
1568
return;
1569
/* We check that bpf_map_delete/update_elem() was called from timer
1570
* callback_fn. In such case we don't call hrtimer_cancel() (since it
1571
* will deadlock) and don't call hrtimer_try_to_cancel() (since it will
1572
* just return -1). Though callback_fn is still running on this cpu it's
1573
* safe to do kfree(t) because bpf_timer_cb() read everything it needed
1574
* from 't'. The bpf subprog callback_fn won't be able to access 't',
1575
* since async->cb = NULL was already done. The timer will be
1576
* effectively cancelled because bpf_timer_cb() will return
1577
* HRTIMER_NORESTART.
1578
*
1579
* However, it is possible the timer callback_fn calling us armed the
1580
* timer _before_ calling us, such that failing to cancel it here will
1581
* cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
1582
* Therefore, we _need_ to cancel any outstanding timers before we do
1583
* kfree_rcu, even though no more timers can be armed.
1584
*
1585
* Moreover, we need to schedule work even if timer does not belong to
1586
* the calling callback_fn, as on two different CPUs, we can end up in a
1587
* situation where both sides run in parallel, try to cancel one
1588
* another, and we end up waiting on both sides in hrtimer_cancel
1589
* without making forward progress, since timer1 depends on time2
1590
* callback to finish, and vice versa.
1591
*
1592
* CPU 1 (timer1_cb) CPU 2 (timer2_cb)
1593
* bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1)
1594
*
1595
* To avoid these issues, punt to workqueue context when we are in a
1596
* timer callback.
1597
*/
1598
if (this_cpu_read(hrtimer_running)) {
1599
queue_work(system_dfl_wq, &t->cb.delete_work);
1600
return;
1601
}
1602
1603
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1604
/* If the timer is running on other CPU, also use a kworker to
1605
* wait for the completion of the timer instead of trying to
1606
* acquire a sleepable lock in hrtimer_cancel() to wait for its
1607
* completion.
1608
*/
1609
if (hrtimer_try_to_cancel(&t->timer) >= 0)
1610
kfree_rcu(t, cb.rcu);
1611
else
1612
queue_work(system_dfl_wq, &t->cb.delete_work);
1613
} else {
1614
bpf_timer_delete_work(&t->cb.delete_work);
1615
}
1616
}
1617
1618
/* This function is called by map_delete/update_elem for individual element and
1619
* by ops->map_release_uref when the user space reference to a map reaches zero.
1620
*/
1621
void bpf_wq_cancel_and_free(void *val)
1622
{
1623
struct bpf_work *work;
1624
1625
BTF_TYPE_EMIT(struct bpf_wq);
1626
1627
work = (struct bpf_work *)__bpf_async_cancel_and_free(val);
1628
if (!work)
1629
return;
1630
/* Trigger cancel of the sleepable work, but *do not* wait for
1631
* it to finish if it was running as we might not be in a
1632
* sleepable context.
1633
* kfree will be called once the work has finished.
1634
*/
1635
schedule_work(&work->delete_work);
1636
}
1637
1638
BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
1639
{
1640
unsigned long *kptr = dst;
1641
1642
/* This helper may be inlined by verifier. */
1643
return xchg(kptr, (unsigned long)ptr);
1644
}
1645
1646
/* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1647
* helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1648
* denote type that verifier will determine.
1649
*/
1650
static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1651
.func = bpf_kptr_xchg,
1652
.gpl_only = false,
1653
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1654
.ret_btf_id = BPF_PTR_POISON,
1655
.arg1_type = ARG_KPTR_XCHG_DEST,
1656
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1657
.arg2_btf_id = BPF_PTR_POISON,
1658
};
1659
1660
/* Since the upper 8 bits of dynptr->size is reserved, the
1661
* maximum supported size is 2^24 - 1.
1662
*/
1663
#define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1664
#define DYNPTR_TYPE_SHIFT 28
1665
#define DYNPTR_SIZE_MASK 0xFFFFFF
1666
#define DYNPTR_RDONLY_BIT BIT(31)
1667
1668
bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1669
{
1670
return ptr->size & DYNPTR_RDONLY_BIT;
1671
}
1672
1673
void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1674
{
1675
ptr->size |= DYNPTR_RDONLY_BIT;
1676
}
1677
1678
static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1679
{
1680
ptr->size |= type << DYNPTR_TYPE_SHIFT;
1681
}
1682
1683
static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1684
{
1685
return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1686
}
1687
1688
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1689
{
1690
return ptr->size & DYNPTR_SIZE_MASK;
1691
}
1692
1693
static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
1694
{
1695
u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1696
1697
ptr->size = new_size | metadata;
1698
}
1699
1700
int bpf_dynptr_check_size(u32 size)
1701
{
1702
return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1703
}
1704
1705
void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1706
enum bpf_dynptr_type type, u32 offset, u32 size)
1707
{
1708
ptr->data = data;
1709
ptr->offset = offset;
1710
ptr->size = size;
1711
bpf_dynptr_set_type(ptr, type);
1712
}
1713
1714
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1715
{
1716
memset(ptr, 0, sizeof(*ptr));
1717
}
1718
1719
BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1720
{
1721
int err;
1722
1723
BTF_TYPE_EMIT(struct bpf_dynptr);
1724
1725
err = bpf_dynptr_check_size(size);
1726
if (err)
1727
goto error;
1728
1729
/* flags is currently unsupported */
1730
if (flags) {
1731
err = -EINVAL;
1732
goto error;
1733
}
1734
1735
bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1736
1737
return 0;
1738
1739
error:
1740
bpf_dynptr_set_null(ptr);
1741
return err;
1742
}
1743
1744
static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1745
.func = bpf_dynptr_from_mem,
1746
.gpl_only = false,
1747
.ret_type = RET_INTEGER,
1748
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
1749
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
1750
.arg3_type = ARG_ANYTHING,
1751
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
1752
};
1753
1754
static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src,
1755
u32 offset, u64 flags)
1756
{
1757
enum bpf_dynptr_type type;
1758
int err;
1759
1760
if (!src->data || flags)
1761
return -EINVAL;
1762
1763
err = bpf_dynptr_check_off_len(src, offset, len);
1764
if (err)
1765
return err;
1766
1767
type = bpf_dynptr_get_type(src);
1768
1769
switch (type) {
1770
case BPF_DYNPTR_TYPE_LOCAL:
1771
case BPF_DYNPTR_TYPE_RINGBUF:
1772
/* Source and destination may possibly overlap, hence use memmove to
1773
* copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1774
* pointing to overlapping PTR_TO_MAP_VALUE regions.
1775
*/
1776
memmove(dst, src->data + src->offset + offset, len);
1777
return 0;
1778
case BPF_DYNPTR_TYPE_SKB:
1779
return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1780
case BPF_DYNPTR_TYPE_XDP:
1781
return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1782
case BPF_DYNPTR_TYPE_SKB_META:
1783
memmove(dst, bpf_skb_meta_pointer(src->data, src->offset + offset), len);
1784
return 0;
1785
default:
1786
WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1787
return -EFAULT;
1788
}
1789
}
1790
1791
BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1792
u32, offset, u64, flags)
1793
{
1794
return __bpf_dynptr_read(dst, len, src, offset, flags);
1795
}
1796
1797
static const struct bpf_func_proto bpf_dynptr_read_proto = {
1798
.func = bpf_dynptr_read,
1799
.gpl_only = false,
1800
.ret_type = RET_INTEGER,
1801
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
1802
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
1803
.arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1804
.arg4_type = ARG_ANYTHING,
1805
.arg5_type = ARG_ANYTHING,
1806
};
1807
1808
int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src,
1809
u32 len, u64 flags)
1810
{
1811
enum bpf_dynptr_type type;
1812
int err;
1813
1814
if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1815
return -EINVAL;
1816
1817
err = bpf_dynptr_check_off_len(dst, offset, len);
1818
if (err)
1819
return err;
1820
1821
type = bpf_dynptr_get_type(dst);
1822
1823
switch (type) {
1824
case BPF_DYNPTR_TYPE_LOCAL:
1825
case BPF_DYNPTR_TYPE_RINGBUF:
1826
if (flags)
1827
return -EINVAL;
1828
/* Source and destination may possibly overlap, hence use memmove to
1829
* copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1830
* pointing to overlapping PTR_TO_MAP_VALUE regions.
1831
*/
1832
memmove(dst->data + dst->offset + offset, src, len);
1833
return 0;
1834
case BPF_DYNPTR_TYPE_SKB:
1835
return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1836
flags);
1837
case BPF_DYNPTR_TYPE_XDP:
1838
if (flags)
1839
return -EINVAL;
1840
return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1841
case BPF_DYNPTR_TYPE_SKB_META:
1842
if (flags)
1843
return -EINVAL;
1844
memmove(bpf_skb_meta_pointer(dst->data, dst->offset + offset), src, len);
1845
return 0;
1846
default:
1847
WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1848
return -EFAULT;
1849
}
1850
}
1851
1852
BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1853
u32, len, u64, flags)
1854
{
1855
return __bpf_dynptr_write(dst, offset, src, len, flags);
1856
}
1857
1858
static const struct bpf_func_proto bpf_dynptr_write_proto = {
1859
.func = bpf_dynptr_write,
1860
.gpl_only = false,
1861
.ret_type = RET_INTEGER,
1862
.arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1863
.arg2_type = ARG_ANYTHING,
1864
.arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1865
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
1866
.arg5_type = ARG_ANYTHING,
1867
};
1868
1869
BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1870
{
1871
enum bpf_dynptr_type type;
1872
int err;
1873
1874
if (!ptr->data)
1875
return 0;
1876
1877
err = bpf_dynptr_check_off_len(ptr, offset, len);
1878
if (err)
1879
return 0;
1880
1881
if (__bpf_dynptr_is_rdonly(ptr))
1882
return 0;
1883
1884
type = bpf_dynptr_get_type(ptr);
1885
1886
switch (type) {
1887
case BPF_DYNPTR_TYPE_LOCAL:
1888
case BPF_DYNPTR_TYPE_RINGBUF:
1889
return (unsigned long)(ptr->data + ptr->offset + offset);
1890
case BPF_DYNPTR_TYPE_SKB:
1891
case BPF_DYNPTR_TYPE_XDP:
1892
case BPF_DYNPTR_TYPE_SKB_META:
1893
/* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1894
return 0;
1895
default:
1896
WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1897
return 0;
1898
}
1899
}
1900
1901
static const struct bpf_func_proto bpf_dynptr_data_proto = {
1902
.func = bpf_dynptr_data,
1903
.gpl_only = false,
1904
.ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1905
.arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1906
.arg2_type = ARG_ANYTHING,
1907
.arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
1908
};
1909
1910
const struct bpf_func_proto bpf_get_current_task_proto __weak;
1911
const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1912
const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1913
const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1914
const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1915
const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1916
const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1917
const struct bpf_func_proto bpf_perf_event_read_proto __weak;
1918
const struct bpf_func_proto bpf_send_signal_proto __weak;
1919
const struct bpf_func_proto bpf_send_signal_thread_proto __weak;
1920
const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak;
1921
const struct bpf_func_proto bpf_get_task_stack_proto __weak;
1922
const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak;
1923
1924
const struct bpf_func_proto *
1925
bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1926
{
1927
switch (func_id) {
1928
case BPF_FUNC_map_lookup_elem:
1929
return &bpf_map_lookup_elem_proto;
1930
case BPF_FUNC_map_update_elem:
1931
return &bpf_map_update_elem_proto;
1932
case BPF_FUNC_map_delete_elem:
1933
return &bpf_map_delete_elem_proto;
1934
case BPF_FUNC_map_push_elem:
1935
return &bpf_map_push_elem_proto;
1936
case BPF_FUNC_map_pop_elem:
1937
return &bpf_map_pop_elem_proto;
1938
case BPF_FUNC_map_peek_elem:
1939
return &bpf_map_peek_elem_proto;
1940
case BPF_FUNC_map_lookup_percpu_elem:
1941
return &bpf_map_lookup_percpu_elem_proto;
1942
case BPF_FUNC_get_prandom_u32:
1943
return &bpf_get_prandom_u32_proto;
1944
case BPF_FUNC_get_smp_processor_id:
1945
return &bpf_get_raw_smp_processor_id_proto;
1946
case BPF_FUNC_get_numa_node_id:
1947
return &bpf_get_numa_node_id_proto;
1948
case BPF_FUNC_tail_call:
1949
return &bpf_tail_call_proto;
1950
case BPF_FUNC_ktime_get_ns:
1951
return &bpf_ktime_get_ns_proto;
1952
case BPF_FUNC_ktime_get_boot_ns:
1953
return &bpf_ktime_get_boot_ns_proto;
1954
case BPF_FUNC_ktime_get_tai_ns:
1955
return &bpf_ktime_get_tai_ns_proto;
1956
case BPF_FUNC_ringbuf_output:
1957
return &bpf_ringbuf_output_proto;
1958
case BPF_FUNC_ringbuf_reserve:
1959
return &bpf_ringbuf_reserve_proto;
1960
case BPF_FUNC_ringbuf_submit:
1961
return &bpf_ringbuf_submit_proto;
1962
case BPF_FUNC_ringbuf_discard:
1963
return &bpf_ringbuf_discard_proto;
1964
case BPF_FUNC_ringbuf_query:
1965
return &bpf_ringbuf_query_proto;
1966
case BPF_FUNC_strncmp:
1967
return &bpf_strncmp_proto;
1968
case BPF_FUNC_strtol:
1969
return &bpf_strtol_proto;
1970
case BPF_FUNC_strtoul:
1971
return &bpf_strtoul_proto;
1972
case BPF_FUNC_get_current_pid_tgid:
1973
return &bpf_get_current_pid_tgid_proto;
1974
case BPF_FUNC_get_ns_current_pid_tgid:
1975
return &bpf_get_ns_current_pid_tgid_proto;
1976
case BPF_FUNC_get_current_uid_gid:
1977
return &bpf_get_current_uid_gid_proto;
1978
default:
1979
break;
1980
}
1981
1982
if (!bpf_token_capable(prog->aux->token, CAP_BPF))
1983
return NULL;
1984
1985
switch (func_id) {
1986
case BPF_FUNC_spin_lock:
1987
return &bpf_spin_lock_proto;
1988
case BPF_FUNC_spin_unlock:
1989
return &bpf_spin_unlock_proto;
1990
case BPF_FUNC_jiffies64:
1991
return &bpf_jiffies64_proto;
1992
case BPF_FUNC_per_cpu_ptr:
1993
return &bpf_per_cpu_ptr_proto;
1994
case BPF_FUNC_this_cpu_ptr:
1995
return &bpf_this_cpu_ptr_proto;
1996
case BPF_FUNC_timer_init:
1997
return &bpf_timer_init_proto;
1998
case BPF_FUNC_timer_set_callback:
1999
return &bpf_timer_set_callback_proto;
2000
case BPF_FUNC_timer_start:
2001
return &bpf_timer_start_proto;
2002
case BPF_FUNC_timer_cancel:
2003
return &bpf_timer_cancel_proto;
2004
case BPF_FUNC_kptr_xchg:
2005
return &bpf_kptr_xchg_proto;
2006
case BPF_FUNC_for_each_map_elem:
2007
return &bpf_for_each_map_elem_proto;
2008
case BPF_FUNC_loop:
2009
return &bpf_loop_proto;
2010
case BPF_FUNC_user_ringbuf_drain:
2011
return &bpf_user_ringbuf_drain_proto;
2012
case BPF_FUNC_ringbuf_reserve_dynptr:
2013
return &bpf_ringbuf_reserve_dynptr_proto;
2014
case BPF_FUNC_ringbuf_submit_dynptr:
2015
return &bpf_ringbuf_submit_dynptr_proto;
2016
case BPF_FUNC_ringbuf_discard_dynptr:
2017
return &bpf_ringbuf_discard_dynptr_proto;
2018
case BPF_FUNC_dynptr_from_mem:
2019
return &bpf_dynptr_from_mem_proto;
2020
case BPF_FUNC_dynptr_read:
2021
return &bpf_dynptr_read_proto;
2022
case BPF_FUNC_dynptr_write:
2023
return &bpf_dynptr_write_proto;
2024
case BPF_FUNC_dynptr_data:
2025
return &bpf_dynptr_data_proto;
2026
#ifdef CONFIG_CGROUPS
2027
case BPF_FUNC_cgrp_storage_get:
2028
return &bpf_cgrp_storage_get_proto;
2029
case BPF_FUNC_cgrp_storage_delete:
2030
return &bpf_cgrp_storage_delete_proto;
2031
case BPF_FUNC_get_current_cgroup_id:
2032
return &bpf_get_current_cgroup_id_proto;
2033
case BPF_FUNC_get_current_ancestor_cgroup_id:
2034
return &bpf_get_current_ancestor_cgroup_id_proto;
2035
case BPF_FUNC_current_task_under_cgroup:
2036
return &bpf_current_task_under_cgroup_proto;
2037
#endif
2038
#ifdef CONFIG_CGROUP_NET_CLASSID
2039
case BPF_FUNC_get_cgroup_classid:
2040
return &bpf_get_cgroup_classid_curr_proto;
2041
#endif
2042
case BPF_FUNC_task_storage_get:
2043
if (bpf_prog_check_recur(prog))
2044
return &bpf_task_storage_get_recur_proto;
2045
return &bpf_task_storage_get_proto;
2046
case BPF_FUNC_task_storage_delete:
2047
if (bpf_prog_check_recur(prog))
2048
return &bpf_task_storage_delete_recur_proto;
2049
return &bpf_task_storage_delete_proto;
2050
default:
2051
break;
2052
}
2053
2054
if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
2055
return NULL;
2056
2057
switch (func_id) {
2058
case BPF_FUNC_trace_printk:
2059
return bpf_get_trace_printk_proto();
2060
case BPF_FUNC_get_current_task:
2061
return &bpf_get_current_task_proto;
2062
case BPF_FUNC_get_current_task_btf:
2063
return &bpf_get_current_task_btf_proto;
2064
case BPF_FUNC_get_current_comm:
2065
return &bpf_get_current_comm_proto;
2066
case BPF_FUNC_probe_read_user:
2067
return &bpf_probe_read_user_proto;
2068
case BPF_FUNC_probe_read_kernel:
2069
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2070
NULL : &bpf_probe_read_kernel_proto;
2071
case BPF_FUNC_probe_read_user_str:
2072
return &bpf_probe_read_user_str_proto;
2073
case BPF_FUNC_probe_read_kernel_str:
2074
return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2075
NULL : &bpf_probe_read_kernel_str_proto;
2076
case BPF_FUNC_copy_from_user:
2077
return &bpf_copy_from_user_proto;
2078
case BPF_FUNC_copy_from_user_task:
2079
return &bpf_copy_from_user_task_proto;
2080
case BPF_FUNC_snprintf_btf:
2081
return &bpf_snprintf_btf_proto;
2082
case BPF_FUNC_snprintf:
2083
return &bpf_snprintf_proto;
2084
case BPF_FUNC_task_pt_regs:
2085
return &bpf_task_pt_regs_proto;
2086
case BPF_FUNC_trace_vprintk:
2087
return bpf_get_trace_vprintk_proto();
2088
case BPF_FUNC_perf_event_read_value:
2089
return bpf_get_perf_event_read_value_proto();
2090
case BPF_FUNC_perf_event_read:
2091
return &bpf_perf_event_read_proto;
2092
case BPF_FUNC_send_signal:
2093
return &bpf_send_signal_proto;
2094
case BPF_FUNC_send_signal_thread:
2095
return &bpf_send_signal_thread_proto;
2096
case BPF_FUNC_get_task_stack:
2097
return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
2098
: &bpf_get_task_stack_proto;
2099
case BPF_FUNC_get_branch_snapshot:
2100
return &bpf_get_branch_snapshot_proto;
2101
case BPF_FUNC_find_vma:
2102
return &bpf_find_vma_proto;
2103
default:
2104
return NULL;
2105
}
2106
}
2107
EXPORT_SYMBOL_GPL(bpf_base_func_proto);
2108
2109
void bpf_list_head_free(const struct btf_field *field, void *list_head,
2110
struct bpf_spin_lock *spin_lock)
2111
{
2112
struct list_head *head = list_head, *orig_head = list_head;
2113
2114
BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
2115
BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
2116
2117
/* Do the actual list draining outside the lock to not hold the lock for
2118
* too long, and also prevent deadlocks if tracing programs end up
2119
* executing on entry/exit of functions called inside the critical
2120
* section, and end up doing map ops that call bpf_list_head_free for
2121
* the same map value again.
2122
*/
2123
__bpf_spin_lock_irqsave(spin_lock);
2124
if (!head->next || list_empty(head))
2125
goto unlock;
2126
head = head->next;
2127
unlock:
2128
INIT_LIST_HEAD(orig_head);
2129
__bpf_spin_unlock_irqrestore(spin_lock);
2130
2131
while (head != orig_head) {
2132
void *obj = head;
2133
2134
obj -= field->graph_root.node_offset;
2135
head = head->next;
2136
/* The contained type can also have resources, including a
2137
* bpf_list_head which needs to be freed.
2138
*/
2139
__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2140
}
2141
}
2142
2143
/* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
2144
* 'rb_node *', so field name of rb_node within containing struct is not
2145
* needed.
2146
*
2147
* Since bpf_rb_tree's node type has a corresponding struct btf_field with
2148
* graph_root.node_offset, it's not necessary to know field name
2149
* or type of node struct
2150
*/
2151
#define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
2152
for (pos = rb_first_postorder(root); \
2153
pos && ({ n = rb_next_postorder(pos); 1; }); \
2154
pos = n)
2155
2156
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
2157
struct bpf_spin_lock *spin_lock)
2158
{
2159
struct rb_root_cached orig_root, *root = rb_root;
2160
struct rb_node *pos, *n;
2161
void *obj;
2162
2163
BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
2164
BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
2165
2166
__bpf_spin_lock_irqsave(spin_lock);
2167
orig_root = *root;
2168
*root = RB_ROOT_CACHED;
2169
__bpf_spin_unlock_irqrestore(spin_lock);
2170
2171
bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
2172
obj = pos;
2173
obj -= field->graph_root.node_offset;
2174
2175
2176
__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2177
}
2178
}
2179
2180
__bpf_kfunc_start_defs();
2181
2182
__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2183
{
2184
struct btf_struct_meta *meta = meta__ign;
2185
u64 size = local_type_id__k;
2186
void *p;
2187
2188
p = bpf_mem_alloc(&bpf_global_ma, size);
2189
if (!p)
2190
return NULL;
2191
if (meta)
2192
bpf_obj_init(meta->record, p);
2193
return p;
2194
}
2195
2196
__bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2197
{
2198
u64 size = local_type_id__k;
2199
2200
/* The verifier has ensured that meta__ign must be NULL */
2201
return bpf_mem_alloc(&bpf_global_percpu_ma, size);
2202
}
2203
2204
/* Must be called under migrate_disable(), as required by bpf_mem_free */
2205
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
2206
{
2207
struct bpf_mem_alloc *ma;
2208
2209
if (rec && rec->refcount_off >= 0 &&
2210
!refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
2211
/* Object is refcounted and refcount_dec didn't result in 0
2212
* refcount. Return without freeing the object
2213
*/
2214
return;
2215
}
2216
2217
if (rec)
2218
bpf_obj_free_fields(rec, p);
2219
2220
if (percpu)
2221
ma = &bpf_global_percpu_ma;
2222
else
2223
ma = &bpf_global_ma;
2224
bpf_mem_free_rcu(ma, p);
2225
}
2226
2227
__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
2228
{
2229
struct btf_struct_meta *meta = meta__ign;
2230
void *p = p__alloc;
2231
2232
__bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
2233
}
2234
2235
__bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
2236
{
2237
/* The verifier has ensured that meta__ign must be NULL */
2238
bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
2239
}
2240
2241
__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
2242
{
2243
struct btf_struct_meta *meta = meta__ign;
2244
struct bpf_refcount *ref;
2245
2246
/* Could just cast directly to refcount_t *, but need some code using
2247
* bpf_refcount type so that it is emitted in vmlinux BTF
2248
*/
2249
ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
2250
if (!refcount_inc_not_zero((refcount_t *)ref))
2251
return NULL;
2252
2253
/* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
2254
* in verifier.c
2255
*/
2256
return (void *)p__refcounted_kptr;
2257
}
2258
2259
static int __bpf_list_add(struct bpf_list_node_kern *node,
2260
struct bpf_list_head *head,
2261
bool tail, struct btf_record *rec, u64 off)
2262
{
2263
struct list_head *n = &node->list_head, *h = (void *)head;
2264
2265
/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2266
* called on its fields, so init here
2267
*/
2268
if (unlikely(!h->next))
2269
INIT_LIST_HEAD(h);
2270
2271
/* node->owner != NULL implies !list_empty(n), no need to separately
2272
* check the latter
2273
*/
2274
if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2275
/* Only called from BPF prog, no need to migrate_disable */
2276
__bpf_obj_drop_impl((void *)n - off, rec, false);
2277
return -EINVAL;
2278
}
2279
2280
tail ? list_add_tail(n, h) : list_add(n, h);
2281
WRITE_ONCE(node->owner, head);
2282
2283
return 0;
2284
}
2285
2286
__bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2287
struct bpf_list_node *node,
2288
void *meta__ign, u64 off)
2289
{
2290
struct bpf_list_node_kern *n = (void *)node;
2291
struct btf_struct_meta *meta = meta__ign;
2292
2293
return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2294
}
2295
2296
__bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2297
struct bpf_list_node *node,
2298
void *meta__ign, u64 off)
2299
{
2300
struct bpf_list_node_kern *n = (void *)node;
2301
struct btf_struct_meta *meta = meta__ign;
2302
2303
return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2304
}
2305
2306
static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2307
{
2308
struct list_head *n, *h = (void *)head;
2309
struct bpf_list_node_kern *node;
2310
2311
/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2312
* called on its fields, so init here
2313
*/
2314
if (unlikely(!h->next))
2315
INIT_LIST_HEAD(h);
2316
if (list_empty(h))
2317
return NULL;
2318
2319
n = tail ? h->prev : h->next;
2320
node = container_of(n, struct bpf_list_node_kern, list_head);
2321
if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2322
return NULL;
2323
2324
list_del_init(n);
2325
WRITE_ONCE(node->owner, NULL);
2326
return (struct bpf_list_node *)n;
2327
}
2328
2329
__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2330
{
2331
return __bpf_list_del(head, false);
2332
}
2333
2334
__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2335
{
2336
return __bpf_list_del(head, true);
2337
}
2338
2339
__bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head)
2340
{
2341
struct list_head *h = (struct list_head *)head;
2342
2343
if (list_empty(h) || unlikely(!h->next))
2344
return NULL;
2345
2346
return (struct bpf_list_node *)h->next;
2347
}
2348
2349
__bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head)
2350
{
2351
struct list_head *h = (struct list_head *)head;
2352
2353
if (list_empty(h) || unlikely(!h->next))
2354
return NULL;
2355
2356
return (struct bpf_list_node *)h->prev;
2357
}
2358
2359
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2360
struct bpf_rb_node *node)
2361
{
2362
struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2363
struct rb_root_cached *r = (struct rb_root_cached *)root;
2364
struct rb_node *n = &node_internal->rb_node;
2365
2366
/* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2367
* n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2368
*/
2369
if (READ_ONCE(node_internal->owner) != root)
2370
return NULL;
2371
2372
rb_erase_cached(n, r);
2373
RB_CLEAR_NODE(n);
2374
WRITE_ONCE(node_internal->owner, NULL);
2375
return (struct bpf_rb_node *)n;
2376
}
2377
2378
/* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2379
* program
2380
*/
2381
static int __bpf_rbtree_add(struct bpf_rb_root *root,
2382
struct bpf_rb_node_kern *node,
2383
void *less, struct btf_record *rec, u64 off)
2384
{
2385
struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2386
struct rb_node *parent = NULL, *n = &node->rb_node;
2387
bpf_callback_t cb = (bpf_callback_t)less;
2388
bool leftmost = true;
2389
2390
/* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2391
* check the latter
2392
*/
2393
if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2394
/* Only called from BPF prog, no need to migrate_disable */
2395
__bpf_obj_drop_impl((void *)n - off, rec, false);
2396
return -EINVAL;
2397
}
2398
2399
while (*link) {
2400
parent = *link;
2401
if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2402
link = &parent->rb_left;
2403
} else {
2404
link = &parent->rb_right;
2405
leftmost = false;
2406
}
2407
}
2408
2409
rb_link_node(n, parent, link);
2410
rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2411
WRITE_ONCE(node->owner, root);
2412
return 0;
2413
}
2414
2415
__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2416
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2417
void *meta__ign, u64 off)
2418
{
2419
struct btf_struct_meta *meta = meta__ign;
2420
struct bpf_rb_node_kern *n = (void *)node;
2421
2422
return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2423
}
2424
2425
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2426
{
2427
struct rb_root_cached *r = (struct rb_root_cached *)root;
2428
2429
return (struct bpf_rb_node *)rb_first_cached(r);
2430
}
2431
2432
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
2433
{
2434
struct rb_root_cached *r = (struct rb_root_cached *)root;
2435
2436
return (struct bpf_rb_node *)r->rb_root.rb_node;
2437
}
2438
2439
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node)
2440
{
2441
struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2442
2443
if (READ_ONCE(node_internal->owner) != root)
2444
return NULL;
2445
2446
return (struct bpf_rb_node *)node_internal->rb_node.rb_left;
2447
}
2448
2449
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node)
2450
{
2451
struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2452
2453
if (READ_ONCE(node_internal->owner) != root)
2454
return NULL;
2455
2456
return (struct bpf_rb_node *)node_internal->rb_node.rb_right;
2457
}
2458
2459
/**
2460
* bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2461
* kfunc which is not stored in a map as a kptr, must be released by calling
2462
* bpf_task_release().
2463
* @p: The task on which a reference is being acquired.
2464
*/
2465
__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2466
{
2467
if (refcount_inc_not_zero(&p->rcu_users))
2468
return p;
2469
return NULL;
2470
}
2471
2472
/**
2473
* bpf_task_release - Release the reference acquired on a task.
2474
* @p: The task on which a reference is being released.
2475
*/
2476
__bpf_kfunc void bpf_task_release(struct task_struct *p)
2477
{
2478
put_task_struct_rcu_user(p);
2479
}
2480
2481
__bpf_kfunc void bpf_task_release_dtor(void *p)
2482
{
2483
put_task_struct_rcu_user(p);
2484
}
2485
CFI_NOSEAL(bpf_task_release_dtor);
2486
2487
#ifdef CONFIG_CGROUPS
2488
/**
2489
* bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2490
* this kfunc which is not stored in a map as a kptr, must be released by
2491
* calling bpf_cgroup_release().
2492
* @cgrp: The cgroup on which a reference is being acquired.
2493
*/
2494
__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2495
{
2496
return cgroup_tryget(cgrp) ? cgrp : NULL;
2497
}
2498
2499
/**
2500
* bpf_cgroup_release - Release the reference acquired on a cgroup.
2501
* If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2502
* not be freed until the current grace period has ended, even if its refcount
2503
* drops to 0.
2504
* @cgrp: The cgroup on which a reference is being released.
2505
*/
2506
__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2507
{
2508
cgroup_put(cgrp);
2509
}
2510
2511
__bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
2512
{
2513
cgroup_put(cgrp);
2514
}
2515
CFI_NOSEAL(bpf_cgroup_release_dtor);
2516
2517
/**
2518
* bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2519
* array. A cgroup returned by this kfunc which is not subsequently stored in a
2520
* map, must be released by calling bpf_cgroup_release().
2521
* @cgrp: The cgroup for which we're performing a lookup.
2522
* @level: The level of ancestor to look up.
2523
*/
2524
__bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2525
{
2526
struct cgroup *ancestor;
2527
2528
if (level > cgrp->level || level < 0)
2529
return NULL;
2530
2531
/* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2532
ancestor = cgrp->ancestors[level];
2533
if (!cgroup_tryget(ancestor))
2534
return NULL;
2535
return ancestor;
2536
}
2537
2538
/**
2539
* bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2540
* kfunc which is not subsequently stored in a map, must be released by calling
2541
* bpf_cgroup_release().
2542
* @cgid: cgroup id.
2543
*/
2544
__bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2545
{
2546
struct cgroup *cgrp;
2547
2548
cgrp = __cgroup_get_from_id(cgid);
2549
if (IS_ERR(cgrp))
2550
return NULL;
2551
return cgrp;
2552
}
2553
2554
/**
2555
* bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2556
* task's membership of cgroup ancestry.
2557
* @task: the task to be tested
2558
* @ancestor: possible ancestor of @task's cgroup
2559
*
2560
* Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2561
* It follows all the same rules as cgroup_is_descendant, and only applies
2562
* to the default hierarchy.
2563
*/
2564
__bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2565
struct cgroup *ancestor)
2566
{
2567
long ret;
2568
2569
rcu_read_lock();
2570
ret = task_under_cgroup_hierarchy(task, ancestor);
2571
rcu_read_unlock();
2572
return ret;
2573
}
2574
2575
BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
2576
{
2577
struct bpf_array *array = container_of(map, struct bpf_array, map);
2578
struct cgroup *cgrp;
2579
2580
if (unlikely(idx >= array->map.max_entries))
2581
return -E2BIG;
2582
2583
cgrp = READ_ONCE(array->ptrs[idx]);
2584
if (unlikely(!cgrp))
2585
return -EAGAIN;
2586
2587
return task_under_cgroup_hierarchy(current, cgrp);
2588
}
2589
2590
const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
2591
.func = bpf_current_task_under_cgroup,
2592
.gpl_only = false,
2593
.ret_type = RET_INTEGER,
2594
.arg1_type = ARG_CONST_MAP_PTR,
2595
.arg2_type = ARG_ANYTHING,
2596
};
2597
2598
/**
2599
* bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2600
* specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
2601
* hierarchy ID.
2602
* @task: The target task
2603
* @hierarchy_id: The ID of a cgroup1 hierarchy
2604
*
2605
* On success, the cgroup is returen. On failure, NULL is returned.
2606
*/
2607
__bpf_kfunc struct cgroup *
2608
bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
2609
{
2610
struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
2611
2612
if (IS_ERR(cgrp))
2613
return NULL;
2614
return cgrp;
2615
}
2616
#endif /* CONFIG_CGROUPS */
2617
2618
/**
2619
* bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2620
* in the root pid namespace idr. If a task is returned, it must either be
2621
* stored in a map, or released with bpf_task_release().
2622
* @pid: The pid of the task being looked up.
2623
*/
2624
__bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2625
{
2626
struct task_struct *p;
2627
2628
rcu_read_lock();
2629
p = find_task_by_pid_ns(pid, &init_pid_ns);
2630
if (p)
2631
p = bpf_task_acquire(p);
2632
rcu_read_unlock();
2633
2634
return p;
2635
}
2636
2637
/**
2638
* bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up
2639
* in the pid namespace of the current task. If a task is returned, it must
2640
* either be stored in a map, or released with bpf_task_release().
2641
* @vpid: The vpid of the task being looked up.
2642
*/
2643
__bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid)
2644
{
2645
struct task_struct *p;
2646
2647
rcu_read_lock();
2648
p = find_task_by_vpid(vpid);
2649
if (p)
2650
p = bpf_task_acquire(p);
2651
rcu_read_unlock();
2652
2653
return p;
2654
}
2655
2656
/**
2657
* bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2658
* @p: The dynptr whose data slice to retrieve
2659
* @offset: Offset into the dynptr
2660
* @buffer__opt: User-provided buffer to copy contents into. May be NULL
2661
* @buffer__szk: Size (in bytes) of the buffer if present. This is the
2662
* length of the requested slice. This must be a constant.
2663
*
2664
* For non-skb and non-xdp type dynptrs, there is no difference between
2665
* bpf_dynptr_slice and bpf_dynptr_data.
2666
*
2667
* If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2668
*
2669
* If the intention is to write to the data slice, please use
2670
* bpf_dynptr_slice_rdwr.
2671
*
2672
* The user must check that the returned pointer is not null before using it.
2673
*
2674
* Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2675
* does not change the underlying packet data pointers, so a call to
2676
* bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2677
* the bpf program.
2678
*
2679
* Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2680
* data slice (can be either direct pointer to the data or a pointer to the user
2681
* provided buffer, with its contents containing the data, if unable to obtain
2682
* direct pointer)
2683
*/
2684
__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
2685
void *buffer__opt, u32 buffer__szk)
2686
{
2687
const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2688
enum bpf_dynptr_type type;
2689
u32 len = buffer__szk;
2690
int err;
2691
2692
if (!ptr->data)
2693
return NULL;
2694
2695
err = bpf_dynptr_check_off_len(ptr, offset, len);
2696
if (err)
2697
return NULL;
2698
2699
type = bpf_dynptr_get_type(ptr);
2700
2701
switch (type) {
2702
case BPF_DYNPTR_TYPE_LOCAL:
2703
case BPF_DYNPTR_TYPE_RINGBUF:
2704
return ptr->data + ptr->offset + offset;
2705
case BPF_DYNPTR_TYPE_SKB:
2706
if (buffer__opt)
2707
return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2708
else
2709
return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2710
case BPF_DYNPTR_TYPE_XDP:
2711
{
2712
void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2713
if (!IS_ERR_OR_NULL(xdp_ptr))
2714
return xdp_ptr;
2715
2716
if (!buffer__opt)
2717
return NULL;
2718
bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2719
return buffer__opt;
2720
}
2721
case BPF_DYNPTR_TYPE_SKB_META:
2722
return bpf_skb_meta_pointer(ptr->data, ptr->offset + offset);
2723
default:
2724
WARN_ONCE(true, "unknown dynptr type %d\n", type);
2725
return NULL;
2726
}
2727
}
2728
2729
/**
2730
* bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2731
* @p: The dynptr whose data slice to retrieve
2732
* @offset: Offset into the dynptr
2733
* @buffer__opt: User-provided buffer to copy contents into. May be NULL
2734
* @buffer__szk: Size (in bytes) of the buffer if present. This is the
2735
* length of the requested slice. This must be a constant.
2736
*
2737
* For non-skb and non-xdp type dynptrs, there is no difference between
2738
* bpf_dynptr_slice and bpf_dynptr_data.
2739
*
2740
* If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2741
*
2742
* The returned pointer is writable and may point to either directly the dynptr
2743
* data at the requested offset or to the buffer if unable to obtain a direct
2744
* data pointer to (example: the requested slice is to the paged area of an skb
2745
* packet). In the case where the returned pointer is to the buffer, the user
2746
* is responsible for persisting writes through calling bpf_dynptr_write(). This
2747
* usually looks something like this pattern:
2748
*
2749
* struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2750
* if (!eth)
2751
* return TC_ACT_SHOT;
2752
*
2753
* // mutate eth header //
2754
*
2755
* if (eth == buffer)
2756
* bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2757
*
2758
* Please note that, as in the example above, the user must check that the
2759
* returned pointer is not null before using it.
2760
*
2761
* Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2762
* does not change the underlying packet data pointers, so a call to
2763
* bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2764
* the bpf program.
2765
*
2766
* Return: NULL if the call failed (eg invalid dynptr), pointer to a
2767
* data slice (can be either direct pointer to the data or a pointer to the user
2768
* provided buffer, with its contents containing the data, if unable to obtain
2769
* direct pointer)
2770
*/
2771
__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
2772
void *buffer__opt, u32 buffer__szk)
2773
{
2774
const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2775
2776
if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2777
return NULL;
2778
2779
/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2780
*
2781
* For skb-type dynptrs, it is safe to write into the returned pointer
2782
* if the bpf program allows skb data writes. There are two possibilities
2783
* that may occur when calling bpf_dynptr_slice_rdwr:
2784
*
2785
* 1) The requested slice is in the head of the skb. In this case, the
2786
* returned pointer is directly to skb data, and if the skb is cloned, the
2787
* verifier will have uncloned it (see bpf_unclone_prologue()) already.
2788
* The pointer can be directly written into.
2789
*
2790
* 2) Some portion of the requested slice is in the paged buffer area.
2791
* In this case, the requested data will be copied out into the buffer
2792
* and the returned pointer will be a pointer to the buffer. The skb
2793
* will not be pulled. To persist the write, the user will need to call
2794
* bpf_dynptr_write(), which will pull the skb and commit the write.
2795
*
2796
* Similarly for xdp programs, if the requested slice is not across xdp
2797
* fragments, then a direct pointer will be returned, otherwise the data
2798
* will be copied out into the buffer and the user will need to call
2799
* bpf_dynptr_write() to commit changes.
2800
*/
2801
return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
2802
}
2803
2804
__bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
2805
{
2806
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2807
u32 size;
2808
2809
if (!ptr->data || start > end)
2810
return -EINVAL;
2811
2812
size = __bpf_dynptr_size(ptr);
2813
2814
if (start > size || end > size)
2815
return -ERANGE;
2816
2817
ptr->offset += start;
2818
bpf_dynptr_set_size(ptr, end - start);
2819
2820
return 0;
2821
}
2822
2823
__bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
2824
{
2825
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2826
2827
return !ptr->data;
2828
}
2829
2830
__bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
2831
{
2832
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2833
2834
if (!ptr->data)
2835
return false;
2836
2837
return __bpf_dynptr_is_rdonly(ptr);
2838
}
2839
2840
__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
2841
{
2842
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2843
2844
if (!ptr->data)
2845
return -EINVAL;
2846
2847
return __bpf_dynptr_size(ptr);
2848
}
2849
2850
__bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
2851
struct bpf_dynptr *clone__uninit)
2852
{
2853
struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
2854
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2855
2856
if (!ptr->data) {
2857
bpf_dynptr_set_null(clone);
2858
return -EINVAL;
2859
}
2860
2861
*clone = *ptr;
2862
2863
return 0;
2864
}
2865
2866
/**
2867
* bpf_dynptr_copy() - Copy data from one dynptr to another.
2868
* @dst_ptr: Destination dynptr - where data should be copied to
2869
* @dst_off: Offset into the destination dynptr
2870
* @src_ptr: Source dynptr - where data should be copied from
2871
* @src_off: Offset into the source dynptr
2872
* @size: Length of the data to copy from source to destination
2873
*
2874
* Copies data from source dynptr to destination dynptr.
2875
* Returns 0 on success; negative error, otherwise.
2876
*/
2877
__bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off,
2878
struct bpf_dynptr *src_ptr, u32 src_off, u32 size)
2879
{
2880
struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr;
2881
struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr;
2882
void *src_slice, *dst_slice;
2883
char buf[256];
2884
u32 off;
2885
2886
src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size);
2887
dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size);
2888
2889
if (src_slice && dst_slice) {
2890
memmove(dst_slice, src_slice, size);
2891
return 0;
2892
}
2893
2894
if (src_slice)
2895
return __bpf_dynptr_write(dst, dst_off, src_slice, size, 0);
2896
2897
if (dst_slice)
2898
return __bpf_dynptr_read(dst_slice, size, src, src_off, 0);
2899
2900
if (bpf_dynptr_check_off_len(dst, dst_off, size) ||
2901
bpf_dynptr_check_off_len(src, src_off, size))
2902
return -E2BIG;
2903
2904
off = 0;
2905
while (off < size) {
2906
u32 chunk_sz = min_t(u32, sizeof(buf), size - off);
2907
int err;
2908
2909
err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0);
2910
if (err)
2911
return err;
2912
err = __bpf_dynptr_write(dst, dst_off + off, buf, chunk_sz, 0);
2913
if (err)
2914
return err;
2915
2916
off += chunk_sz;
2917
}
2918
return 0;
2919
}
2920
2921
/**
2922
* bpf_dynptr_memset() - Fill dynptr memory with a constant byte.
2923
* @p: Destination dynptr - where data will be filled
2924
* @offset: Offset into the dynptr to start filling from
2925
* @size: Number of bytes to fill
2926
* @val: Constant byte to fill the memory with
2927
*
2928
* Fills the @size bytes of the memory area pointed to by @p
2929
* at @offset with the constant byte @val.
2930
* Returns 0 on success; negative error, otherwise.
2931
*/
2932
__bpf_kfunc int bpf_dynptr_memset(struct bpf_dynptr *p, u32 offset, u32 size, u8 val)
2933
{
2934
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2935
u32 chunk_sz, write_off;
2936
char buf[256];
2937
void* slice;
2938
int err;
2939
2940
slice = bpf_dynptr_slice_rdwr(p, offset, NULL, size);
2941
if (likely(slice)) {
2942
memset(slice, val, size);
2943
return 0;
2944
}
2945
2946
if (__bpf_dynptr_is_rdonly(ptr))
2947
return -EINVAL;
2948
2949
err = bpf_dynptr_check_off_len(ptr, offset, size);
2950
if (err)
2951
return err;
2952
2953
/* Non-linear data under the dynptr, write from a local buffer */
2954
chunk_sz = min_t(u32, sizeof(buf), size);
2955
memset(buf, val, chunk_sz);
2956
2957
for (write_off = 0; write_off < size; write_off += chunk_sz) {
2958
chunk_sz = min_t(u32, sizeof(buf), size - write_off);
2959
err = __bpf_dynptr_write(ptr, offset + write_off, buf, chunk_sz, 0);
2960
if (err)
2961
return err;
2962
}
2963
2964
return 0;
2965
}
2966
2967
__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2968
{
2969
return obj;
2970
}
2971
2972
__bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
2973
{
2974
return (void *)obj__ign;
2975
}
2976
2977
__bpf_kfunc void bpf_rcu_read_lock(void)
2978
{
2979
rcu_read_lock();
2980
}
2981
2982
__bpf_kfunc void bpf_rcu_read_unlock(void)
2983
{
2984
rcu_read_unlock();
2985
}
2986
2987
struct bpf_throw_ctx {
2988
struct bpf_prog_aux *aux;
2989
u64 sp;
2990
u64 bp;
2991
int cnt;
2992
};
2993
2994
static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
2995
{
2996
struct bpf_throw_ctx *ctx = cookie;
2997
struct bpf_prog *prog;
2998
2999
/*
3000
* The RCU read lock is held to safely traverse the latch tree, but we
3001
* don't need its protection when accessing the prog, since it has an
3002
* active stack frame on the current stack trace, and won't disappear.
3003
*/
3004
rcu_read_lock();
3005
prog = bpf_prog_ksym_find(ip);
3006
rcu_read_unlock();
3007
if (!prog)
3008
return !ctx->cnt;
3009
ctx->cnt++;
3010
if (bpf_is_subprog(prog))
3011
return true;
3012
ctx->aux = prog->aux;
3013
ctx->sp = sp;
3014
ctx->bp = bp;
3015
return false;
3016
}
3017
3018
__bpf_kfunc void bpf_throw(u64 cookie)
3019
{
3020
struct bpf_throw_ctx ctx = {};
3021
3022
arch_bpf_stack_walk(bpf_stack_walker, &ctx);
3023
WARN_ON_ONCE(!ctx.aux);
3024
if (ctx.aux)
3025
WARN_ON_ONCE(!ctx.aux->exception_boundary);
3026
WARN_ON_ONCE(!ctx.bp);
3027
WARN_ON_ONCE(!ctx.cnt);
3028
/* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
3029
* deeper stack depths than ctx.sp as we do not return from bpf_throw,
3030
* which skips compiler generated instrumentation to do the same.
3031
*/
3032
kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
3033
ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
3034
WARN(1, "A call to BPF exception callback should never return\n");
3035
}
3036
3037
__bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
3038
{
3039
struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3040
struct bpf_map *map = p__map;
3041
3042
BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
3043
BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));
3044
3045
if (flags)
3046
return -EINVAL;
3047
3048
return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
3049
}
3050
3051
__bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
3052
{
3053
struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3054
struct bpf_work *w;
3055
3056
if (in_nmi())
3057
return -EOPNOTSUPP;
3058
if (flags)
3059
return -EINVAL;
3060
w = READ_ONCE(async->work);
3061
if (!w || !READ_ONCE(w->cb.prog))
3062
return -EINVAL;
3063
3064
schedule_work(&w->work);
3065
return 0;
3066
}
3067
3068
__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
3069
int (callback_fn)(void *map, int *key, void *value),
3070
unsigned int flags,
3071
void *aux__prog)
3072
{
3073
struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog;
3074
struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
3075
3076
if (flags)
3077
return -EINVAL;
3078
3079
return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
3080
}
3081
3082
__bpf_kfunc void bpf_preempt_disable(void)
3083
{
3084
preempt_disable();
3085
}
3086
3087
__bpf_kfunc void bpf_preempt_enable(void)
3088
{
3089
preempt_enable();
3090
}
3091
3092
struct bpf_iter_bits {
3093
__u64 __opaque[2];
3094
} __aligned(8);
3095
3096
#define BITS_ITER_NR_WORDS_MAX 511
3097
3098
struct bpf_iter_bits_kern {
3099
union {
3100
__u64 *bits;
3101
__u64 bits_copy;
3102
};
3103
int nr_bits;
3104
int bit;
3105
} __aligned(8);
3106
3107
/* On 64-bit hosts, unsigned long and u64 have the same size, so passing
3108
* a u64 pointer and an unsigned long pointer to find_next_bit() will
3109
* return the same result, as both point to the same 8-byte area.
3110
*
3111
* For 32-bit little-endian hosts, using a u64 pointer or unsigned long
3112
* pointer also makes no difference. This is because the first iterated
3113
* unsigned long is composed of bits 0-31 of the u64 and the second unsigned
3114
* long is composed of bits 32-63 of the u64.
3115
*
3116
* However, for 32-bit big-endian hosts, this is not the case. The first
3117
* iterated unsigned long will be bits 32-63 of the u64, so swap these two
3118
* ulong values within the u64.
3119
*/
3120
static void swap_ulong_in_u64(u64 *bits, unsigned int nr)
3121
{
3122
#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
3123
unsigned int i;
3124
3125
for (i = 0; i < nr; i++)
3126
bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32);
3127
#endif
3128
}
3129
3130
/**
3131
* bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
3132
* @it: The new bpf_iter_bits to be created
3133
* @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
3134
* @nr_words: The size of the specified memory area, measured in 8-byte units.
3135
* The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be
3136
* further reduced by the BPF memory allocator implementation.
3137
*
3138
* This function initializes a new bpf_iter_bits structure for iterating over
3139
* a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
3140
* copies the data of the memory area to the newly created bpf_iter_bits @it for
3141
* subsequent iteration operations.
3142
*
3143
* On success, 0 is returned. On failure, ERR is returned.
3144
*/
3145
__bpf_kfunc int
3146
bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
3147
{
3148
struct bpf_iter_bits_kern *kit = (void *)it;
3149
u32 nr_bytes = nr_words * sizeof(u64);
3150
u32 nr_bits = BYTES_TO_BITS(nr_bytes);
3151
int err;
3152
3153
BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
3154
BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
3155
__alignof__(struct bpf_iter_bits));
3156
3157
kit->nr_bits = 0;
3158
kit->bits_copy = 0;
3159
kit->bit = -1;
3160
3161
if (!unsafe_ptr__ign || !nr_words)
3162
return -EINVAL;
3163
if (nr_words > BITS_ITER_NR_WORDS_MAX)
3164
return -E2BIG;
3165
3166
/* Optimization for u64 mask */
3167
if (nr_bits == 64) {
3168
err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
3169
if (err)
3170
return -EFAULT;
3171
3172
swap_ulong_in_u64(&kit->bits_copy, nr_words);
3173
3174
kit->nr_bits = nr_bits;
3175
return 0;
3176
}
3177
3178
if (bpf_mem_alloc_check_size(false, nr_bytes))
3179
return -E2BIG;
3180
3181
/* Fallback to memalloc */
3182
kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
3183
if (!kit->bits)
3184
return -ENOMEM;
3185
3186
err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
3187
if (err) {
3188
bpf_mem_free(&bpf_global_ma, kit->bits);
3189
return err;
3190
}
3191
3192
swap_ulong_in_u64(kit->bits, nr_words);
3193
3194
kit->nr_bits = nr_bits;
3195
return 0;
3196
}
3197
3198
/**
3199
* bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
3200
* @it: The bpf_iter_bits to be checked
3201
*
3202
* This function returns a pointer to a number representing the value of the
3203
* next bit in the bits.
3204
*
3205
* If there are no further bits available, it returns NULL.
3206
*/
3207
__bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
3208
{
3209
struct bpf_iter_bits_kern *kit = (void *)it;
3210
int bit = kit->bit, nr_bits = kit->nr_bits;
3211
const void *bits;
3212
3213
if (!nr_bits || bit >= nr_bits)
3214
return NULL;
3215
3216
bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
3217
bit = find_next_bit(bits, nr_bits, bit + 1);
3218
if (bit >= nr_bits) {
3219
kit->bit = bit;
3220
return NULL;
3221
}
3222
3223
kit->bit = bit;
3224
return &kit->bit;
3225
}
3226
3227
/**
3228
* bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3229
* @it: The bpf_iter_bits to be destroyed
3230
*
3231
* Destroy the resource associated with the bpf_iter_bits.
3232
*/
3233
__bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
3234
{
3235
struct bpf_iter_bits_kern *kit = (void *)it;
3236
3237
if (kit->nr_bits <= 64)
3238
return;
3239
bpf_mem_free(&bpf_global_ma, kit->bits);
3240
}
3241
3242
/**
3243
* bpf_copy_from_user_str() - Copy a string from an unsafe user address
3244
* @dst: Destination address, in kernel space. This buffer must be
3245
* at least @dst__sz bytes long.
3246
* @dst__sz: Maximum number of bytes to copy, includes the trailing NUL.
3247
* @unsafe_ptr__ign: Source address, in user space.
3248
* @flags: The only supported flag is BPF_F_PAD_ZEROS
3249
*
3250
* Copies a NUL-terminated string from userspace to BPF space. If user string is
3251
* too long this will still ensure zero termination in the dst buffer unless
3252
* buffer size is 0.
3253
*
3254
* If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
3255
* memset all of @dst on failure.
3256
*/
3257
__bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
3258
{
3259
int ret;
3260
3261
if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3262
return -EINVAL;
3263
3264
if (unlikely(!dst__sz))
3265
return 0;
3266
3267
ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
3268
if (ret < 0) {
3269
if (flags & BPF_F_PAD_ZEROS)
3270
memset((char *)dst, 0, dst__sz);
3271
3272
return ret;
3273
}
3274
3275
if (flags & BPF_F_PAD_ZEROS)
3276
memset((char *)dst + ret, 0, dst__sz - ret);
3277
else
3278
((char *)dst)[ret] = '\0';
3279
3280
return ret + 1;
3281
}
3282
3283
/**
3284
* bpf_copy_from_user_task_str() - Copy a string from an task's address space
3285
* @dst: Destination address, in kernel space. This buffer must be
3286
* at least @dst__sz bytes long.
3287
* @dst__sz: Maximum number of bytes to copy, includes the trailing NUL.
3288
* @unsafe_ptr__ign: Source address in the task's address space.
3289
* @tsk: The task whose address space will be used
3290
* @flags: The only supported flag is BPF_F_PAD_ZEROS
3291
*
3292
* Copies a NUL terminated string from a task's address space to @dst__sz
3293
* buffer. If user string is too long this will still ensure zero termination
3294
* in the @dst__sz buffer unless buffer size is 0.
3295
*
3296
* If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst__sz to 0 on success
3297
* and memset all of @dst__sz on failure.
3298
*
3299
* Return: The number of copied bytes on success including the NUL terminator.
3300
* A negative error code on failure.
3301
*/
3302
__bpf_kfunc int bpf_copy_from_user_task_str(void *dst, u32 dst__sz,
3303
const void __user *unsafe_ptr__ign,
3304
struct task_struct *tsk, u64 flags)
3305
{
3306
int ret;
3307
3308
if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3309
return -EINVAL;
3310
3311
if (unlikely(dst__sz == 0))
3312
return 0;
3313
3314
ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_ptr__ign, dst, dst__sz, 0);
3315
if (ret < 0) {
3316
if (flags & BPF_F_PAD_ZEROS)
3317
memset(dst, 0, dst__sz);
3318
return ret;
3319
}
3320
3321
if (flags & BPF_F_PAD_ZEROS)
3322
memset(dst + ret, 0, dst__sz - ret);
3323
3324
return ret + 1;
3325
}
3326
3327
/* Keep unsinged long in prototype so that kfunc is usable when emitted to
3328
* vmlinux.h in BPF programs directly, but note that while in BPF prog, the
3329
* unsigned long always points to 8-byte region on stack, the kernel may only
3330
* read and write the 4-bytes on 32-bit.
3331
*/
3332
__bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag)
3333
{
3334
local_irq_save(*flags__irq_flag);
3335
}
3336
3337
__bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
3338
{
3339
local_irq_restore(*flags__irq_flag);
3340
}
3341
3342
__bpf_kfunc void __bpf_trap(void)
3343
{
3344
}
3345
3346
/*
3347
* Kfuncs for string operations.
3348
*
3349
* Since strings are not necessarily %NUL-terminated, we cannot directly call
3350
* in-kernel implementations. Instead, we open-code the implementations using
3351
* __get_kernel_nofault instead of plain dereference to make them safe.
3352
*/
3353
3354
static int __bpf_strcasecmp(const char *s1, const char *s2, bool ignore_case)
3355
{
3356
char c1, c2;
3357
int i;
3358
3359
if (!copy_from_kernel_nofault_allowed(s1, 1) ||
3360
!copy_from_kernel_nofault_allowed(s2, 1)) {
3361
return -ERANGE;
3362
}
3363
3364
guard(pagefault)();
3365
for (i = 0; i < XATTR_SIZE_MAX; i++) {
3366
__get_kernel_nofault(&c1, s1, char, err_out);
3367
__get_kernel_nofault(&c2, s2, char, err_out);
3368
if (ignore_case) {
3369
c1 = tolower(c1);
3370
c2 = tolower(c2);
3371
}
3372
if (c1 != c2)
3373
return c1 < c2 ? -1 : 1;
3374
if (c1 == '\0')
3375
return 0;
3376
s1++;
3377
s2++;
3378
}
3379
return -E2BIG;
3380
err_out:
3381
return -EFAULT;
3382
}
3383
3384
/**
3385
* bpf_strcmp - Compare two strings
3386
* @s1__ign: One string
3387
* @s2__ign: Another string
3388
*
3389
* Return:
3390
* * %0 - Strings are equal
3391
* * %-1 - @s1__ign is smaller
3392
* * %1 - @s2__ign is smaller
3393
* * %-EFAULT - Cannot read one of the strings
3394
* * %-E2BIG - One of strings is too large
3395
* * %-ERANGE - One of strings is outside of kernel address space
3396
*/
3397
__bpf_kfunc int bpf_strcmp(const char *s1__ign, const char *s2__ign)
3398
{
3399
return __bpf_strcasecmp(s1__ign, s2__ign, false);
3400
}
3401
3402
/**
3403
* bpf_strcasecmp - Compare two strings, ignoring the case of the characters
3404
* @s1__ign: One string
3405
* @s2__ign: Another string
3406
*
3407
* Return:
3408
* * %0 - Strings are equal
3409
* * %-1 - @s1__ign is smaller
3410
* * %1 - @s2__ign is smaller
3411
* * %-EFAULT - Cannot read one of the strings
3412
* * %-E2BIG - One of strings is too large
3413
* * %-ERANGE - One of strings is outside of kernel address space
3414
*/
3415
__bpf_kfunc int bpf_strcasecmp(const char *s1__ign, const char *s2__ign)
3416
{
3417
return __bpf_strcasecmp(s1__ign, s2__ign, true);
3418
}
3419
3420
/**
3421
* bpf_strnchr - Find a character in a length limited string
3422
* @s__ign: The string to be searched
3423
* @count: The number of characters to be searched
3424
* @c: The character to search for
3425
*
3426
* Note that the %NUL-terminator is considered part of the string, and can
3427
* be searched for.
3428
*
3429
* Return:
3430
* * >=0 - Index of the first occurrence of @c within @s__ign
3431
* * %-ENOENT - @c not found in the first @count characters of @s__ign
3432
* * %-EFAULT - Cannot read @s__ign
3433
* * %-E2BIG - @s__ign is too large
3434
* * %-ERANGE - @s__ign is outside of kernel address space
3435
*/
3436
__bpf_kfunc int bpf_strnchr(const char *s__ign, size_t count, char c)
3437
{
3438
char sc;
3439
int i;
3440
3441
if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3442
return -ERANGE;
3443
3444
guard(pagefault)();
3445
for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3446
__get_kernel_nofault(&sc, s__ign, char, err_out);
3447
if (sc == c)
3448
return i;
3449
if (sc == '\0')
3450
return -ENOENT;
3451
s__ign++;
3452
}
3453
return i == XATTR_SIZE_MAX ? -E2BIG : -ENOENT;
3454
err_out:
3455
return -EFAULT;
3456
}
3457
3458
/**
3459
* bpf_strchr - Find the first occurrence of a character in a string
3460
* @s__ign: The string to be searched
3461
* @c: The character to search for
3462
*
3463
* Note that the %NUL-terminator is considered part of the string, and can
3464
* be searched for.
3465
*
3466
* Return:
3467
* * >=0 - The index of the first occurrence of @c within @s__ign
3468
* * %-ENOENT - @c not found in @s__ign
3469
* * %-EFAULT - Cannot read @s__ign
3470
* * %-E2BIG - @s__ign is too large
3471
* * %-ERANGE - @s__ign is outside of kernel address space
3472
*/
3473
__bpf_kfunc int bpf_strchr(const char *s__ign, char c)
3474
{
3475
return bpf_strnchr(s__ign, XATTR_SIZE_MAX, c);
3476
}
3477
3478
/**
3479
* bpf_strchrnul - Find and return a character in a string, or end of string
3480
* @s__ign: The string to be searched
3481
* @c: The character to search for
3482
*
3483
* Return:
3484
* * >=0 - Index of the first occurrence of @c within @s__ign or index of
3485
* the null byte at the end of @s__ign when @c is not found
3486
* * %-EFAULT - Cannot read @s__ign
3487
* * %-E2BIG - @s__ign is too large
3488
* * %-ERANGE - @s__ign is outside of kernel address space
3489
*/
3490
__bpf_kfunc int bpf_strchrnul(const char *s__ign, char c)
3491
{
3492
char sc;
3493
int i;
3494
3495
if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3496
return -ERANGE;
3497
3498
guard(pagefault)();
3499
for (i = 0; i < XATTR_SIZE_MAX; i++) {
3500
__get_kernel_nofault(&sc, s__ign, char, err_out);
3501
if (sc == '\0' || sc == c)
3502
return i;
3503
s__ign++;
3504
}
3505
return -E2BIG;
3506
err_out:
3507
return -EFAULT;
3508
}
3509
3510
/**
3511
* bpf_strrchr - Find the last occurrence of a character in a string
3512
* @s__ign: The string to be searched
3513
* @c: The character to search for
3514
*
3515
* Return:
3516
* * >=0 - Index of the last occurrence of @c within @s__ign
3517
* * %-ENOENT - @c not found in @s__ign
3518
* * %-EFAULT - Cannot read @s__ign
3519
* * %-E2BIG - @s__ign is too large
3520
* * %-ERANGE - @s__ign is outside of kernel address space
3521
*/
3522
__bpf_kfunc int bpf_strrchr(const char *s__ign, int c)
3523
{
3524
char sc;
3525
int i, last = -ENOENT;
3526
3527
if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3528
return -ERANGE;
3529
3530
guard(pagefault)();
3531
for (i = 0; i < XATTR_SIZE_MAX; i++) {
3532
__get_kernel_nofault(&sc, s__ign, char, err_out);
3533
if (sc == c)
3534
last = i;
3535
if (sc == '\0')
3536
return last;
3537
s__ign++;
3538
}
3539
return -E2BIG;
3540
err_out:
3541
return -EFAULT;
3542
}
3543
3544
/**
3545
* bpf_strnlen - Calculate the length of a length-limited string
3546
* @s__ign: The string
3547
* @count: The maximum number of characters to count
3548
*
3549
* Return:
3550
* * >=0 - The length of @s__ign
3551
* * %-EFAULT - Cannot read @s__ign
3552
* * %-E2BIG - @s__ign is too large
3553
* * %-ERANGE - @s__ign is outside of kernel address space
3554
*/
3555
__bpf_kfunc int bpf_strnlen(const char *s__ign, size_t count)
3556
{
3557
char c;
3558
int i;
3559
3560
if (!copy_from_kernel_nofault_allowed(s__ign, 1))
3561
return -ERANGE;
3562
3563
guard(pagefault)();
3564
for (i = 0; i < count && i < XATTR_SIZE_MAX; i++) {
3565
__get_kernel_nofault(&c, s__ign, char, err_out);
3566
if (c == '\0')
3567
return i;
3568
s__ign++;
3569
}
3570
return i == XATTR_SIZE_MAX ? -E2BIG : i;
3571
err_out:
3572
return -EFAULT;
3573
}
3574
3575
/**
3576
* bpf_strlen - Calculate the length of a string
3577
* @s__ign: The string
3578
*
3579
* Return:
3580
* * >=0 - The length of @s__ign
3581
* * %-EFAULT - Cannot read @s__ign
3582
* * %-E2BIG - @s__ign is too large
3583
* * %-ERANGE - @s__ign is outside of kernel address space
3584
*/
3585
__bpf_kfunc int bpf_strlen(const char *s__ign)
3586
{
3587
return bpf_strnlen(s__ign, XATTR_SIZE_MAX);
3588
}
3589
3590
/**
3591
* bpf_strspn - Calculate the length of the initial substring of @s__ign which
3592
* only contains letters in @accept__ign
3593
* @s__ign: The string to be searched
3594
* @accept__ign: The string to search for
3595
*
3596
* Return:
3597
* * >=0 - The length of the initial substring of @s__ign which only
3598
* contains letters from @accept__ign
3599
* * %-EFAULT - Cannot read one of the strings
3600
* * %-E2BIG - One of the strings is too large
3601
* * %-ERANGE - One of the strings is outside of kernel address space
3602
*/
3603
__bpf_kfunc int bpf_strspn(const char *s__ign, const char *accept__ign)
3604
{
3605
char cs, ca;
3606
int i, j;
3607
3608
if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3609
!copy_from_kernel_nofault_allowed(accept__ign, 1)) {
3610
return -ERANGE;
3611
}
3612
3613
guard(pagefault)();
3614
for (i = 0; i < XATTR_SIZE_MAX; i++) {
3615
__get_kernel_nofault(&cs, s__ign, char, err_out);
3616
if (cs == '\0')
3617
return i;
3618
for (j = 0; j < XATTR_SIZE_MAX; j++) {
3619
__get_kernel_nofault(&ca, accept__ign + j, char, err_out);
3620
if (cs == ca || ca == '\0')
3621
break;
3622
}
3623
if (j == XATTR_SIZE_MAX)
3624
return -E2BIG;
3625
if (ca == '\0')
3626
return i;
3627
s__ign++;
3628
}
3629
return -E2BIG;
3630
err_out:
3631
return -EFAULT;
3632
}
3633
3634
/**
3635
* bpf_strcspn - Calculate the length of the initial substring of @s__ign which
3636
* does not contain letters in @reject__ign
3637
* @s__ign: The string to be searched
3638
* @reject__ign: The string to search for
3639
*
3640
* Return:
3641
* * >=0 - The length of the initial substring of @s__ign which does not
3642
* contain letters from @reject__ign
3643
* * %-EFAULT - Cannot read one of the strings
3644
* * %-E2BIG - One of the strings is too large
3645
* * %-ERANGE - One of the strings is outside of kernel address space
3646
*/
3647
__bpf_kfunc int bpf_strcspn(const char *s__ign, const char *reject__ign)
3648
{
3649
char cs, cr;
3650
int i, j;
3651
3652
if (!copy_from_kernel_nofault_allowed(s__ign, 1) ||
3653
!copy_from_kernel_nofault_allowed(reject__ign, 1)) {
3654
return -ERANGE;
3655
}
3656
3657
guard(pagefault)();
3658
for (i = 0; i < XATTR_SIZE_MAX; i++) {
3659
__get_kernel_nofault(&cs, s__ign, char, err_out);
3660
if (cs == '\0')
3661
return i;
3662
for (j = 0; j < XATTR_SIZE_MAX; j++) {
3663
__get_kernel_nofault(&cr, reject__ign + j, char, err_out);
3664
if (cs == cr || cr == '\0')
3665
break;
3666
}
3667
if (j == XATTR_SIZE_MAX)
3668
return -E2BIG;
3669
if (cr != '\0')
3670
return i;
3671
s__ign++;
3672
}
3673
return -E2BIG;
3674
err_out:
3675
return -EFAULT;
3676
}
3677
3678
/**
3679
* bpf_strnstr - Find the first substring in a length-limited string
3680
* @s1__ign: The string to be searched
3681
* @s2__ign: The string to search for
3682
* @len: the maximum number of characters to search
3683
*
3684
* Return:
3685
* * >=0 - Index of the first character of the first occurrence of @s2__ign
3686
* within the first @len characters of @s1__ign
3687
* * %-ENOENT - @s2__ign not found in the first @len characters of @s1__ign
3688
* * %-EFAULT - Cannot read one of the strings
3689
* * %-E2BIG - One of the strings is too large
3690
* * %-ERANGE - One of the strings is outside of kernel address space
3691
*/
3692
__bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign, size_t len)
3693
{
3694
char c1, c2;
3695
int i, j;
3696
3697
if (!copy_from_kernel_nofault_allowed(s1__ign, 1) ||
3698
!copy_from_kernel_nofault_allowed(s2__ign, 1)) {
3699
return -ERANGE;
3700
}
3701
3702
guard(pagefault)();
3703
for (i = 0; i < XATTR_SIZE_MAX; i++) {
3704
for (j = 0; i + j <= len && j < XATTR_SIZE_MAX; j++) {
3705
__get_kernel_nofault(&c2, s2__ign + j, char, err_out);
3706
if (c2 == '\0')
3707
return i;
3708
/*
3709
* We allow reading an extra byte from s2 (note the
3710
* `i + j <= len` above) to cover the case when s2 is
3711
* a suffix of the first len chars of s1.
3712
*/
3713
if (i + j == len)
3714
break;
3715
__get_kernel_nofault(&c1, s1__ign + j, char, err_out);
3716
if (c1 == '\0')
3717
return -ENOENT;
3718
if (c1 != c2)
3719
break;
3720
}
3721
if (j == XATTR_SIZE_MAX)
3722
return -E2BIG;
3723
if (i + j == len)
3724
return -ENOENT;
3725
s1__ign++;
3726
}
3727
return -E2BIG;
3728
err_out:
3729
return -EFAULT;
3730
}
3731
3732
/**
3733
* bpf_strstr - Find the first substring in a string
3734
* @s1__ign: The string to be searched
3735
* @s2__ign: The string to search for
3736
*
3737
* Return:
3738
* * >=0 - Index of the first character of the first occurrence of @s2__ign
3739
* within @s1__ign
3740
* * %-ENOENT - @s2__ign is not a substring of @s1__ign
3741
* * %-EFAULT - Cannot read one of the strings
3742
* * %-E2BIG - One of the strings is too large
3743
* * %-ERANGE - One of the strings is outside of kernel address space
3744
*/
3745
__bpf_kfunc int bpf_strstr(const char *s1__ign, const char *s2__ign)
3746
{
3747
return bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX);
3748
}
3749
#ifdef CONFIG_KEYS
3750
/**
3751
* bpf_lookup_user_key - lookup a key by its serial
3752
* @serial: key handle serial number
3753
* @flags: lookup-specific flags
3754
*
3755
* Search a key with a given *serial* and the provided *flags*.
3756
* If found, increment the reference count of the key by one, and
3757
* return it in the bpf_key structure.
3758
*
3759
* The bpf_key structure must be passed to bpf_key_put() when done
3760
* with it, so that the key reference count is decremented and the
3761
* bpf_key structure is freed.
3762
*
3763
* Permission checks are deferred to the time the key is used by
3764
* one of the available key-specific kfuncs.
3765
*
3766
* Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
3767
* special keyring (e.g. session keyring), if it doesn't yet exist.
3768
* Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
3769
* for the key construction, and to retrieve uninstantiated keys (keys
3770
* without data attached to them).
3771
*
3772
* Return: a bpf_key pointer with a valid key pointer if the key is found, a
3773
* NULL pointer otherwise.
3774
*/
3775
__bpf_kfunc struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags)
3776
{
3777
key_ref_t key_ref;
3778
struct bpf_key *bkey;
3779
3780
if (flags & ~KEY_LOOKUP_ALL)
3781
return NULL;
3782
3783
/*
3784
* Permission check is deferred until the key is used, as the
3785
* intent of the caller is unknown here.
3786
*/
3787
key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
3788
if (IS_ERR(key_ref))
3789
return NULL;
3790
3791
bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
3792
if (!bkey) {
3793
key_put(key_ref_to_ptr(key_ref));
3794
return NULL;
3795
}
3796
3797
bkey->key = key_ref_to_ptr(key_ref);
3798
bkey->has_ref = true;
3799
3800
return bkey;
3801
}
3802
3803
/**
3804
* bpf_lookup_system_key - lookup a key by a system-defined ID
3805
* @id: key ID
3806
*
3807
* Obtain a bpf_key structure with a key pointer set to the passed key ID.
3808
* The key pointer is marked as invalid, to prevent bpf_key_put() from
3809
* attempting to decrement the key reference count on that pointer. The key
3810
* pointer set in such way is currently understood only by
3811
* verify_pkcs7_signature().
3812
*
3813
* Set *id* to one of the values defined in include/linux/verification.h:
3814
* 0 for the primary keyring (immutable keyring of system keys);
3815
* VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
3816
* (where keys can be added only if they are vouched for by existing keys
3817
* in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
3818
* keyring (primarily used by the integrity subsystem to verify a kexec'ed
3819
* kerned image and, possibly, the initramfs signature).
3820
*
3821
* Return: a bpf_key pointer with an invalid key pointer set from the
3822
* pre-determined ID on success, a NULL pointer otherwise
3823
*/
3824
__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
3825
{
3826
struct bpf_key *bkey;
3827
3828
if (system_keyring_id_check(id) < 0)
3829
return NULL;
3830
3831
bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
3832
if (!bkey)
3833
return NULL;
3834
3835
bkey->key = (struct key *)(unsigned long)id;
3836
bkey->has_ref = false;
3837
3838
return bkey;
3839
}
3840
3841
/**
3842
* bpf_key_put - decrement key reference count if key is valid and free bpf_key
3843
* @bkey: bpf_key structure
3844
*
3845
* Decrement the reference count of the key inside *bkey*, if the pointer
3846
* is valid, and free *bkey*.
3847
*/
3848
__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
3849
{
3850
if (bkey->has_ref)
3851
key_put(bkey->key);
3852
3853
kfree(bkey);
3854
}
3855
3856
/**
3857
* bpf_verify_pkcs7_signature - verify a PKCS#7 signature
3858
* @data_p: data to verify
3859
* @sig_p: signature of the data
3860
* @trusted_keyring: keyring with keys trusted for signature verification
3861
*
3862
* Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
3863
* with keys in a keyring referenced by *trusted_keyring*.
3864
*
3865
* Return: 0 on success, a negative value on error.
3866
*/
3867
__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
3868
struct bpf_dynptr *sig_p,
3869
struct bpf_key *trusted_keyring)
3870
{
3871
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
3872
struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
3873
struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
3874
const void *data, *sig;
3875
u32 data_len, sig_len;
3876
int ret;
3877
3878
if (trusted_keyring->has_ref) {
3879
/*
3880
* Do the permission check deferred in bpf_lookup_user_key().
3881
* See bpf_lookup_user_key() for more details.
3882
*
3883
* A call to key_task_permission() here would be redundant, as
3884
* it is already done by keyring_search() called by
3885
* find_asymmetric_key().
3886
*/
3887
ret = key_validate(trusted_keyring->key);
3888
if (ret < 0)
3889
return ret;
3890
}
3891
3892
data_len = __bpf_dynptr_size(data_ptr);
3893
data = __bpf_dynptr_data(data_ptr, data_len);
3894
sig_len = __bpf_dynptr_size(sig_ptr);
3895
sig = __bpf_dynptr_data(sig_ptr, sig_len);
3896
3897
return verify_pkcs7_signature(data, data_len, sig, sig_len,
3898
trusted_keyring->key,
3899
VERIFYING_BPF_SIGNATURE, NULL,
3900
NULL);
3901
#else
3902
return -EOPNOTSUPP;
3903
#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
3904
}
3905
#endif /* CONFIG_KEYS */
3906
3907
typedef int (*bpf_task_work_callback_t)(struct bpf_map *map, void *key, void *value);
3908
3909
enum bpf_task_work_state {
3910
/* bpf_task_work is ready to be used */
3911
BPF_TW_STANDBY = 0,
3912
/* irq work scheduling in progress */
3913
BPF_TW_PENDING,
3914
/* task work scheduling in progress */
3915
BPF_TW_SCHEDULING,
3916
/* task work is scheduled successfully */
3917
BPF_TW_SCHEDULED,
3918
/* callback is running */
3919
BPF_TW_RUNNING,
3920
/* associated BPF map value is deleted */
3921
BPF_TW_FREED,
3922
};
3923
3924
struct bpf_task_work_ctx {
3925
enum bpf_task_work_state state;
3926
refcount_t refcnt;
3927
struct callback_head work;
3928
struct irq_work irq_work;
3929
/* bpf_prog that schedules task work */
3930
struct bpf_prog *prog;
3931
/* task for which callback is scheduled */
3932
struct task_struct *task;
3933
/* the map and map value associated with this context */
3934
struct bpf_map *map;
3935
void *map_val;
3936
enum task_work_notify_mode mode;
3937
bpf_task_work_callback_t callback_fn;
3938
struct rcu_head rcu;
3939
} __aligned(8);
3940
3941
/* Actual type for struct bpf_task_work */
3942
struct bpf_task_work_kern {
3943
struct bpf_task_work_ctx *ctx;
3944
};
3945
3946
static void bpf_task_work_ctx_reset(struct bpf_task_work_ctx *ctx)
3947
{
3948
if (ctx->prog) {
3949
bpf_prog_put(ctx->prog);
3950
ctx->prog = NULL;
3951
}
3952
if (ctx->task) {
3953
bpf_task_release(ctx->task);
3954
ctx->task = NULL;
3955
}
3956
}
3957
3958
static bool bpf_task_work_ctx_tryget(struct bpf_task_work_ctx *ctx)
3959
{
3960
return refcount_inc_not_zero(&ctx->refcnt);
3961
}
3962
3963
static void bpf_task_work_ctx_put(struct bpf_task_work_ctx *ctx)
3964
{
3965
if (!refcount_dec_and_test(&ctx->refcnt))
3966
return;
3967
3968
bpf_task_work_ctx_reset(ctx);
3969
3970
/* bpf_mem_free expects migration to be disabled */
3971
migrate_disable();
3972
bpf_mem_free(&bpf_global_ma, ctx);
3973
migrate_enable();
3974
}
3975
3976
static void bpf_task_work_cancel(struct bpf_task_work_ctx *ctx)
3977
{
3978
/*
3979
* Scheduled task_work callback holds ctx ref, so if we successfully
3980
* cancelled, we put that ref on callback's behalf. If we couldn't
3981
* cancel, callback will inevitably run or has already completed
3982
* running, and it would have taken care of its ctx ref itself.
3983
*/
3984
if (task_work_cancel(ctx->task, &ctx->work))
3985
bpf_task_work_ctx_put(ctx);
3986
}
3987
3988
static void bpf_task_work_callback(struct callback_head *cb)
3989
{
3990
struct bpf_task_work_ctx *ctx = container_of(cb, struct bpf_task_work_ctx, work);
3991
enum bpf_task_work_state state;
3992
u32 idx;
3993
void *key;
3994
3995
/* Read lock is needed to protect ctx and map key/value access */
3996
guard(rcu_tasks_trace)();
3997
/*
3998
* This callback may start running before bpf_task_work_irq() switched to
3999
* SCHEDULED state, so handle both transition variants SCHEDULING|SCHEDULED -> RUNNING.
4000
*/
4001
state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_RUNNING);
4002
if (state == BPF_TW_SCHEDULED)
4003
state = cmpxchg(&ctx->state, BPF_TW_SCHEDULED, BPF_TW_RUNNING);
4004
if (state == BPF_TW_FREED) {
4005
bpf_task_work_ctx_put(ctx);
4006
return;
4007
}
4008
4009
key = (void *)map_key_from_value(ctx->map, ctx->map_val, &idx);
4010
4011
migrate_disable();
4012
ctx->callback_fn(ctx->map, key, ctx->map_val);
4013
migrate_enable();
4014
4015
bpf_task_work_ctx_reset(ctx);
4016
(void)cmpxchg(&ctx->state, BPF_TW_RUNNING, BPF_TW_STANDBY);
4017
4018
bpf_task_work_ctx_put(ctx);
4019
}
4020
4021
static void bpf_task_work_irq(struct irq_work *irq_work)
4022
{
4023
struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4024
enum bpf_task_work_state state;
4025
int err;
4026
4027
guard(rcu_tasks_trace)();
4028
4029
if (cmpxchg(&ctx->state, BPF_TW_PENDING, BPF_TW_SCHEDULING) != BPF_TW_PENDING) {
4030
bpf_task_work_ctx_put(ctx);
4031
return;
4032
}
4033
4034
err = task_work_add(ctx->task, &ctx->work, ctx->mode);
4035
if (err) {
4036
bpf_task_work_ctx_reset(ctx);
4037
/*
4038
* try to switch back to STANDBY for another task_work reuse, but we might have
4039
* gone to FREED already, which is fine as we already cleaned up after ourselves
4040
*/
4041
(void)cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_STANDBY);
4042
bpf_task_work_ctx_put(ctx);
4043
return;
4044
}
4045
4046
/*
4047
* It's technically possible for just scheduled task_work callback to
4048
* complete running by now, going SCHEDULING -> RUNNING and then
4049
* dropping its ctx refcount. Instead of capturing extra ref just to
4050
* protected below ctx->state access, we rely on RCU protection to
4051
* perform below SCHEDULING -> SCHEDULED attempt.
4052
*/
4053
state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_SCHEDULED);
4054
if (state == BPF_TW_FREED)
4055
bpf_task_work_cancel(ctx); /* clean up if we switched into FREED state */
4056
}
4057
4058
static struct bpf_task_work_ctx *bpf_task_work_fetch_ctx(struct bpf_task_work *tw,
4059
struct bpf_map *map)
4060
{
4061
struct bpf_task_work_kern *twk = (void *)tw;
4062
struct bpf_task_work_ctx *ctx, *old_ctx;
4063
4064
ctx = READ_ONCE(twk->ctx);
4065
if (ctx)
4066
return ctx;
4067
4068
ctx = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_task_work_ctx));
4069
if (!ctx)
4070
return ERR_PTR(-ENOMEM);
4071
4072
memset(ctx, 0, sizeof(*ctx));
4073
refcount_set(&ctx->refcnt, 1); /* map's own ref */
4074
ctx->state = BPF_TW_STANDBY;
4075
4076
old_ctx = cmpxchg(&twk->ctx, NULL, ctx);
4077
if (old_ctx) {
4078
/*
4079
* tw->ctx is set by concurrent BPF program, release allocated
4080
* memory and try to reuse already set context.
4081
*/
4082
bpf_mem_free(&bpf_global_ma, ctx);
4083
return old_ctx;
4084
}
4085
4086
return ctx; /* Success */
4087
}
4088
4089
static struct bpf_task_work_ctx *bpf_task_work_acquire_ctx(struct bpf_task_work *tw,
4090
struct bpf_map *map)
4091
{
4092
struct bpf_task_work_ctx *ctx;
4093
4094
ctx = bpf_task_work_fetch_ctx(tw, map);
4095
if (IS_ERR(ctx))
4096
return ctx;
4097
4098
/* try to get ref for task_work callback to hold */
4099
if (!bpf_task_work_ctx_tryget(ctx))
4100
return ERR_PTR(-EBUSY);
4101
4102
if (cmpxchg(&ctx->state, BPF_TW_STANDBY, BPF_TW_PENDING) != BPF_TW_STANDBY) {
4103
/* lost acquiring race or map_release_uref() stole it from us, put ref and bail */
4104
bpf_task_work_ctx_put(ctx);
4105
return ERR_PTR(-EBUSY);
4106
}
4107
4108
/*
4109
* If no process or bpffs is holding a reference to the map, no new callbacks should be
4110
* scheduled. This does not address any race or correctness issue, but rather is a policy
4111
* choice: dropping user references should stop everything.
4112
*/
4113
if (!atomic64_read(&map->usercnt)) {
4114
/* drop ref we just got for task_work callback itself */
4115
bpf_task_work_ctx_put(ctx);
4116
/* transfer map's ref into cancel_and_free() */
4117
bpf_task_work_cancel_and_free(tw);
4118
return ERR_PTR(-EBUSY);
4119
}
4120
4121
return ctx;
4122
}
4123
4124
static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work *tw,
4125
struct bpf_map *map, bpf_task_work_callback_t callback_fn,
4126
struct bpf_prog_aux *aux, enum task_work_notify_mode mode)
4127
{
4128
struct bpf_prog *prog;
4129
struct bpf_task_work_ctx *ctx;
4130
int err;
4131
4132
BTF_TYPE_EMIT(struct bpf_task_work);
4133
4134
prog = bpf_prog_inc_not_zero(aux->prog);
4135
if (IS_ERR(prog))
4136
return -EBADF;
4137
task = bpf_task_acquire(task);
4138
if (!task) {
4139
err = -EBADF;
4140
goto release_prog;
4141
}
4142
4143
ctx = bpf_task_work_acquire_ctx(tw, map);
4144
if (IS_ERR(ctx)) {
4145
err = PTR_ERR(ctx);
4146
goto release_all;
4147
}
4148
4149
ctx->task = task;
4150
ctx->callback_fn = callback_fn;
4151
ctx->prog = prog;
4152
ctx->mode = mode;
4153
ctx->map = map;
4154
ctx->map_val = (void *)tw - map->record->task_work_off;
4155
init_task_work(&ctx->work, bpf_task_work_callback);
4156
init_irq_work(&ctx->irq_work, bpf_task_work_irq);
4157
4158
irq_work_queue(&ctx->irq_work);
4159
return 0;
4160
4161
release_all:
4162
bpf_task_release(task);
4163
release_prog:
4164
bpf_prog_put(prog);
4165
return err;
4166
}
4167
4168
/**
4169
* bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode
4170
* @task: Task struct for which callback should be scheduled
4171
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4172
* @map__map: bpf_map that embeds struct bpf_task_work in the values
4173
* @callback: pointer to BPF subprogram to call
4174
* @aux__prog: user should pass NULL
4175
*
4176
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
4177
*/
4178
__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
4179
void *map__map, bpf_task_work_callback_t callback,
4180
void *aux__prog)
4181
{
4182
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
4183
}
4184
4185
/**
4186
* bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME mode
4187
* @task: Task struct for which callback should be scheduled
4188
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
4189
* @map__map: bpf_map that embeds struct bpf_task_work in the values
4190
* @callback: pointer to BPF subprogram to call
4191
* @aux__prog: user should pass NULL
4192
*
4193
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
4194
*/
4195
__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
4196
void *map__map, bpf_task_work_callback_t callback,
4197
void *aux__prog)
4198
{
4199
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
4200
}
4201
4202
__bpf_kfunc_end_defs();
4203
4204
static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work)
4205
{
4206
struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
4207
4208
bpf_task_work_cancel(ctx); /* this might put task_work callback's ref */
4209
bpf_task_work_ctx_put(ctx); /* and here we put map's own ref that was transferred to us */
4210
}
4211
4212
void bpf_task_work_cancel_and_free(void *val)
4213
{
4214
struct bpf_task_work_kern *twk = val;
4215
struct bpf_task_work_ctx *ctx;
4216
enum bpf_task_work_state state;
4217
4218
ctx = xchg(&twk->ctx, NULL);
4219
if (!ctx)
4220
return;
4221
4222
state = xchg(&ctx->state, BPF_TW_FREED);
4223
if (state == BPF_TW_SCHEDULED) {
4224
/* run in irq_work to avoid locks in NMI */
4225
init_irq_work(&ctx->irq_work, bpf_task_work_cancel_scheduled);
4226
irq_work_queue(&ctx->irq_work);
4227
return;
4228
}
4229
4230
bpf_task_work_ctx_put(ctx); /* put bpf map's ref */
4231
}
4232
4233
BTF_KFUNCS_START(generic_btf_ids)
4234
#ifdef CONFIG_CRASH_DUMP
4235
BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
4236
#endif
4237
BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4238
BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
4239
BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
4240
BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
4241
BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
4242
BTF_ID_FLAGS(func, bpf_list_push_front_impl)
4243
BTF_ID_FLAGS(func, bpf_list_push_back_impl)
4244
BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
4245
BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
4246
BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
4247
BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
4248
BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4249
BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
4250
BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
4251
BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
4252
BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
4253
BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL)
4254
BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL)
4255
BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL)
4256
4257
#ifdef CONFIG_CGROUPS
4258
BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4259
BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
4260
BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4261
BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
4262
BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
4263
BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
4264
#endif
4265
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
4266
BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL)
4267
BTF_ID_FLAGS(func, bpf_throw)
4268
#ifdef CONFIG_BPF_EVENTS
4269
BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS)
4270
#endif
4271
#ifdef CONFIG_KEYS
4272
BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
4273
BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
4274
BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
4275
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
4276
BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
4277
#endif
4278
#endif
4279
BTF_KFUNCS_END(generic_btf_ids)
4280
4281
static const struct btf_kfunc_id_set generic_kfunc_set = {
4282
.owner = THIS_MODULE,
4283
.set = &generic_btf_ids,
4284
};
4285
4286
4287
BTF_ID_LIST(generic_dtor_ids)
4288
BTF_ID(struct, task_struct)
4289
BTF_ID(func, bpf_task_release_dtor)
4290
#ifdef CONFIG_CGROUPS
4291
BTF_ID(struct, cgroup)
4292
BTF_ID(func, bpf_cgroup_release_dtor)
4293
#endif
4294
4295
BTF_KFUNCS_START(common_btf_ids)
4296
BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL)
4297
BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL)
4298
BTF_ID_FLAGS(func, bpf_rcu_read_lock)
4299
BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
4300
BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
4301
BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
4302
BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
4303
BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
4304
BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
4305
BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
4306
BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
4307
BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
4308
#ifdef CONFIG_CGROUPS
4309
BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
4310
BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
4311
BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
4312
BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
4313
BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
4314
BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
4315
#endif
4316
BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
4317
BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
4318
BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
4319
BTF_ID_FLAGS(func, bpf_dynptr_adjust)
4320
BTF_ID_FLAGS(func, bpf_dynptr_is_null)
4321
BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
4322
BTF_ID_FLAGS(func, bpf_dynptr_size)
4323
BTF_ID_FLAGS(func, bpf_dynptr_clone)
4324
BTF_ID_FLAGS(func, bpf_dynptr_copy)
4325
BTF_ID_FLAGS(func, bpf_dynptr_memset)
4326
#ifdef CONFIG_NET
4327
BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
4328
#endif
4329
BTF_ID_FLAGS(func, bpf_wq_init)
4330
BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
4331
BTF_ID_FLAGS(func, bpf_wq_start)
4332
BTF_ID_FLAGS(func, bpf_preempt_disable)
4333
BTF_ID_FLAGS(func, bpf_preempt_enable)
4334
BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
4335
BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
4336
BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
4337
BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
4338
BTF_ID_FLAGS(func, bpf_copy_from_user_task_str, KF_SLEEPABLE)
4339
BTF_ID_FLAGS(func, bpf_get_kmem_cache)
4340
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
4341
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4342
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4343
BTF_ID_FLAGS(func, bpf_local_irq_save)
4344
BTF_ID_FLAGS(func, bpf_local_irq_restore)
4345
BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)
4346
BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)
4347
BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr)
4348
BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr)
4349
BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE)
4350
BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)
4351
BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
4352
BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
4353
#ifdef CONFIG_DMA_SHARED_BUFFER
4354
BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)
4355
BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
4356
BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
4357
#endif
4358
BTF_ID_FLAGS(func, __bpf_trap)
4359
BTF_ID_FLAGS(func, bpf_strcmp);
4360
BTF_ID_FLAGS(func, bpf_strcasecmp);
4361
BTF_ID_FLAGS(func, bpf_strchr);
4362
BTF_ID_FLAGS(func, bpf_strchrnul);
4363
BTF_ID_FLAGS(func, bpf_strnchr);
4364
BTF_ID_FLAGS(func, bpf_strrchr);
4365
BTF_ID_FLAGS(func, bpf_strlen);
4366
BTF_ID_FLAGS(func, bpf_strnlen);
4367
BTF_ID_FLAGS(func, bpf_strspn);
4368
BTF_ID_FLAGS(func, bpf_strcspn);
4369
BTF_ID_FLAGS(func, bpf_strstr);
4370
BTF_ID_FLAGS(func, bpf_strnstr);
4371
#if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS)
4372
BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
4373
#endif
4374
BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS)
4375
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_TRUSTED_ARGS)
4376
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_TRUSTED_ARGS)
4377
BTF_KFUNCS_END(common_btf_ids)
4378
4379
static const struct btf_kfunc_id_set common_kfunc_set = {
4380
.owner = THIS_MODULE,
4381
.set = &common_btf_ids,
4382
};
4383
4384
static int __init kfunc_init(void)
4385
{
4386
int ret;
4387
const struct btf_id_dtor_kfunc generic_dtors[] = {
4388
{
4389
.btf_id = generic_dtor_ids[0],
4390
.kfunc_btf_id = generic_dtor_ids[1]
4391
},
4392
#ifdef CONFIG_CGROUPS
4393
{
4394
.btf_id = generic_dtor_ids[2],
4395
.kfunc_btf_id = generic_dtor_ids[3]
4396
},
4397
#endif
4398
};
4399
4400
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
4401
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
4402
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
4403
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
4404
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
4405
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
4406
ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
4407
ARRAY_SIZE(generic_dtors),
4408
THIS_MODULE);
4409
return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
4410
}
4411
4412
late_initcall(kfunc_init);
4413
4414
/* Get a pointer to dynptr data up to len bytes for read only access. If
4415
* the dynptr doesn't have continuous data up to len bytes, return NULL.
4416
*/
4417
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
4418
{
4419
const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
4420
4421
return bpf_dynptr_slice(p, 0, NULL, len);
4422
}
4423
4424
/* Get a pointer to dynptr data up to len bytes for read write access. If
4425
* the dynptr doesn't have continuous data up to len bytes, or the dynptr
4426
* is read only, return NULL.
4427
*/
4428
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
4429
{
4430
if (__bpf_dynptr_is_rdonly(ptr))
4431
return NULL;
4432
return (void *)__bpf_dynptr_data(ptr, len);
4433
}
4434
4435