Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/bpf/btf.c
54330 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright (c) 2018 Facebook */
3
4
#include <uapi/linux/btf.h>
5
#include <uapi/linux/bpf.h>
6
#include <uapi/linux/bpf_perf_event.h>
7
#include <uapi/linux/types.h>
8
#include <linux/seq_file.h>
9
#include <linux/compiler.h>
10
#include <linux/ctype.h>
11
#include <linux/errno.h>
12
#include <linux/slab.h>
13
#include <linux/anon_inodes.h>
14
#include <linux/file.h>
15
#include <linux/uaccess.h>
16
#include <linux/kernel.h>
17
#include <linux/idr.h>
18
#include <linux/sort.h>
19
#include <linux/bpf_verifier.h>
20
#include <linux/btf.h>
21
#include <linux/btf_ids.h>
22
#include <linux/bpf.h>
23
#include <linux/bpf_lsm.h>
24
#include <linux/skmsg.h>
25
#include <linux/perf_event.h>
26
#include <linux/bsearch.h>
27
#include <linux/kobject.h>
28
#include <linux/string.h>
29
#include <linux/sysfs.h>
30
#include <linux/overflow.h>
31
32
#include <net/netfilter/nf_bpf_link.h>
33
34
#include <net/sock.h>
35
#include <net/xdp.h>
36
#include "../tools/lib/bpf/relo_core.h"
37
38
/* BTF (BPF Type Format) is the meta data format which describes
39
* the data types of BPF program/map. Hence, it basically focus
40
* on the C programming language which the modern BPF is primary
41
* using.
42
*
43
* ELF Section:
44
* ~~~~~~~~~~~
45
* The BTF data is stored under the ".BTF" ELF section
46
*
47
* struct btf_type:
48
* ~~~~~~~~~~~~~~~
49
* Each 'struct btf_type' object describes a C data type.
50
* Depending on the type it is describing, a 'struct btf_type'
51
* object may be followed by more data. F.e.
52
* To describe an array, 'struct btf_type' is followed by
53
* 'struct btf_array'.
54
*
55
* 'struct btf_type' and any extra data following it are
56
* 4 bytes aligned.
57
*
58
* Type section:
59
* ~~~~~~~~~~~~~
60
* The BTF type section contains a list of 'struct btf_type' objects.
61
* Each one describes a C type. Recall from the above section
62
* that a 'struct btf_type' object could be immediately followed by extra
63
* data in order to describe some particular C types.
64
*
65
* type_id:
66
* ~~~~~~~
67
* Each btf_type object is identified by a type_id. The type_id
68
* is implicitly implied by the location of the btf_type object in
69
* the BTF type section. The first one has type_id 1. The second
70
* one has type_id 2...etc. Hence, an earlier btf_type has
71
* a smaller type_id.
72
*
73
* A btf_type object may refer to another btf_type object by using
74
* type_id (i.e. the "type" in the "struct btf_type").
75
*
76
* NOTE that we cannot assume any reference-order.
77
* A btf_type object can refer to an earlier btf_type object
78
* but it can also refer to a later btf_type object.
79
*
80
* For example, to describe "const void *". A btf_type
81
* object describing "const" may refer to another btf_type
82
* object describing "void *". This type-reference is done
83
* by specifying type_id:
84
*
85
* [1] CONST (anon) type_id=2
86
* [2] PTR (anon) type_id=0
87
*
88
* The above is the btf_verifier debug log:
89
* - Each line started with "[?]" is a btf_type object
90
* - [?] is the type_id of the btf_type object.
91
* - CONST/PTR is the BTF_KIND_XXX
92
* - "(anon)" is the name of the type. It just
93
* happens that CONST and PTR has no name.
94
* - type_id=XXX is the 'u32 type' in btf_type
95
*
96
* NOTE: "void" has type_id 0
97
*
98
* String section:
99
* ~~~~~~~~~~~~~~
100
* The BTF string section contains the names used by the type section.
101
* Each string is referred by an "offset" from the beginning of the
102
* string section.
103
*
104
* Each string is '\0' terminated.
105
*
106
* The first character in the string section must be '\0'
107
* which is used to mean 'anonymous'. Some btf_type may not
108
* have a name.
109
*/
110
111
/* BTF verification:
112
*
113
* To verify BTF data, two passes are needed.
114
*
115
* Pass #1
116
* ~~~~~~~
117
* The first pass is to collect all btf_type objects to
118
* an array: "btf->types".
119
*
120
* Depending on the C type that a btf_type is describing,
121
* a btf_type may be followed by extra data. We don't know
122
* how many btf_type is there, and more importantly we don't
123
* know where each btf_type is located in the type section.
124
*
125
* Without knowing the location of each type_id, most verifications
126
* cannot be done. e.g. an earlier btf_type may refer to a later
127
* btf_type (recall the "const void *" above), so we cannot
128
* check this type-reference in the first pass.
129
*
130
* In the first pass, it still does some verifications (e.g.
131
* checking the name is a valid offset to the string section).
132
*
133
* Pass #2
134
* ~~~~~~~
135
* The main focus is to resolve a btf_type that is referring
136
* to another type.
137
*
138
* We have to ensure the referring type:
139
* 1) does exist in the BTF (i.e. in btf->types[])
140
* 2) does not cause a loop:
141
* struct A {
142
* struct B b;
143
* };
144
*
145
* struct B {
146
* struct A a;
147
* };
148
*
149
* btf_type_needs_resolve() decides if a btf_type needs
150
* to be resolved.
151
*
152
* The needs_resolve type implements the "resolve()" ops which
153
* essentially does a DFS and detects backedge.
154
*
155
* During resolve (or DFS), different C types have different
156
* "RESOLVED" conditions.
157
*
158
* When resolving a BTF_KIND_STRUCT, we need to resolve all its
159
* members because a member is always referring to another
160
* type. A struct's member can be treated as "RESOLVED" if
161
* it is referring to a BTF_KIND_PTR. Otherwise, the
162
* following valid C struct would be rejected:
163
*
164
* struct A {
165
* int m;
166
* struct A *a;
167
* };
168
*
169
* When resolving a BTF_KIND_PTR, it needs to keep resolving if
170
* it is referring to another BTF_KIND_PTR. Otherwise, we cannot
171
* detect a pointer loop, e.g.:
172
* BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
173
* ^ |
174
* +-----------------------------------------+
175
*
176
*/
177
178
#define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
179
#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
180
#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
181
#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
182
#define BITS_ROUNDUP_BYTES(bits) \
183
(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
184
185
#define BTF_INFO_MASK 0x9f00ffff
186
#define BTF_INT_MASK 0x0fffffff
187
#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
188
#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
189
190
/* 16MB for 64k structs and each has 16 members and
191
* a few MB spaces for the string section.
192
* The hard limit is S32_MAX.
193
*/
194
#define BTF_MAX_SIZE (16 * 1024 * 1024)
195
196
#define for_each_member_from(i, from, struct_type, member) \
197
for (i = from, member = btf_type_member(struct_type) + from; \
198
i < btf_type_vlen(struct_type); \
199
i++, member++)
200
201
#define for_each_vsi_from(i, from, struct_type, member) \
202
for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
203
i < btf_type_vlen(struct_type); \
204
i++, member++)
205
206
DEFINE_IDR(btf_idr);
207
DEFINE_SPINLOCK(btf_idr_lock);
208
209
enum btf_kfunc_hook {
210
BTF_KFUNC_HOOK_COMMON,
211
BTF_KFUNC_HOOK_XDP,
212
BTF_KFUNC_HOOK_TC,
213
BTF_KFUNC_HOOK_STRUCT_OPS,
214
BTF_KFUNC_HOOK_TRACING,
215
BTF_KFUNC_HOOK_SYSCALL,
216
BTF_KFUNC_HOOK_FMODRET,
217
BTF_KFUNC_HOOK_CGROUP,
218
BTF_KFUNC_HOOK_SCHED_ACT,
219
BTF_KFUNC_HOOK_SK_SKB,
220
BTF_KFUNC_HOOK_SOCKET_FILTER,
221
BTF_KFUNC_HOOK_LWT,
222
BTF_KFUNC_HOOK_NETFILTER,
223
BTF_KFUNC_HOOK_KPROBE,
224
BTF_KFUNC_HOOK_MAX,
225
};
226
227
enum {
228
BTF_KFUNC_SET_MAX_CNT = 256,
229
BTF_DTOR_KFUNC_MAX_CNT = 256,
230
BTF_KFUNC_FILTER_MAX_CNT = 16,
231
};
232
233
struct btf_kfunc_hook_filter {
234
btf_kfunc_filter_t filters[BTF_KFUNC_FILTER_MAX_CNT];
235
u32 nr_filters;
236
};
237
238
struct btf_kfunc_set_tab {
239
struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX];
240
struct btf_kfunc_hook_filter hook_filters[BTF_KFUNC_HOOK_MAX];
241
};
242
243
struct btf_id_dtor_kfunc_tab {
244
u32 cnt;
245
struct btf_id_dtor_kfunc dtors[];
246
};
247
248
struct btf_struct_ops_tab {
249
u32 cnt;
250
u32 capacity;
251
struct bpf_struct_ops_desc ops[];
252
};
253
254
struct btf {
255
void *data;
256
struct btf_type **types;
257
u32 *resolved_ids;
258
u32 *resolved_sizes;
259
const char *strings;
260
void *nohdr_data;
261
struct btf_header hdr;
262
u32 nr_types; /* includes VOID for base BTF */
263
u32 named_start_id;
264
u32 types_size;
265
u32 data_size;
266
refcount_t refcnt;
267
u32 id;
268
struct rcu_head rcu;
269
struct btf_kfunc_set_tab *kfunc_set_tab;
270
struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
271
struct btf_struct_metas *struct_meta_tab;
272
struct btf_struct_ops_tab *struct_ops_tab;
273
274
/* split BTF support */
275
struct btf *base_btf;
276
u32 start_id; /* first type ID in this BTF (0 for base BTF) */
277
u32 start_str_off; /* first string offset (0 for base BTF) */
278
char name[MODULE_NAME_LEN];
279
bool kernel_btf;
280
__u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */
281
};
282
283
enum verifier_phase {
284
CHECK_META,
285
CHECK_TYPE,
286
};
287
288
struct resolve_vertex {
289
const struct btf_type *t;
290
u32 type_id;
291
u16 next_member;
292
};
293
294
enum visit_state {
295
NOT_VISITED,
296
VISITED,
297
RESOLVED,
298
};
299
300
enum resolve_mode {
301
RESOLVE_TBD, /* To Be Determined */
302
RESOLVE_PTR, /* Resolving for Pointer */
303
RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
304
* or array
305
*/
306
};
307
308
#define MAX_RESOLVE_DEPTH 32
309
310
struct btf_sec_info {
311
u32 off;
312
u32 len;
313
};
314
315
struct btf_verifier_env {
316
struct btf *btf;
317
u8 *visit_states;
318
struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
319
struct bpf_verifier_log log;
320
u32 log_type_id;
321
u32 top_stack;
322
enum verifier_phase phase;
323
enum resolve_mode resolve_mode;
324
};
325
326
static const char * const btf_kind_str[NR_BTF_KINDS] = {
327
[BTF_KIND_UNKN] = "UNKNOWN",
328
[BTF_KIND_INT] = "INT",
329
[BTF_KIND_PTR] = "PTR",
330
[BTF_KIND_ARRAY] = "ARRAY",
331
[BTF_KIND_STRUCT] = "STRUCT",
332
[BTF_KIND_UNION] = "UNION",
333
[BTF_KIND_ENUM] = "ENUM",
334
[BTF_KIND_FWD] = "FWD",
335
[BTF_KIND_TYPEDEF] = "TYPEDEF",
336
[BTF_KIND_VOLATILE] = "VOLATILE",
337
[BTF_KIND_CONST] = "CONST",
338
[BTF_KIND_RESTRICT] = "RESTRICT",
339
[BTF_KIND_FUNC] = "FUNC",
340
[BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
341
[BTF_KIND_VAR] = "VAR",
342
[BTF_KIND_DATASEC] = "DATASEC",
343
[BTF_KIND_FLOAT] = "FLOAT",
344
[BTF_KIND_DECL_TAG] = "DECL_TAG",
345
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
346
[BTF_KIND_ENUM64] = "ENUM64",
347
};
348
349
const char *btf_type_str(const struct btf_type *t)
350
{
351
return btf_kind_str[BTF_INFO_KIND(t->info)];
352
}
353
354
/* Chunk size we use in safe copy of data to be shown. */
355
#define BTF_SHOW_OBJ_SAFE_SIZE 32
356
357
/*
358
* This is the maximum size of a base type value (equivalent to a
359
* 128-bit int); if we are at the end of our safe buffer and have
360
* less than 16 bytes space we can't be assured of being able
361
* to copy the next type safely, so in such cases we will initiate
362
* a new copy.
363
*/
364
#define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16
365
366
/* Type name size */
367
#define BTF_SHOW_NAME_SIZE 80
368
369
/*
370
* The suffix of a type that indicates it cannot alias another type when
371
* comparing BTF IDs for kfunc invocations.
372
*/
373
#define NOCAST_ALIAS_SUFFIX "___init"
374
375
/*
376
* Common data to all BTF show operations. Private show functions can add
377
* their own data to a structure containing a struct btf_show and consult it
378
* in the show callback. See btf_type_show() below.
379
*
380
* One challenge with showing nested data is we want to skip 0-valued
381
* data, but in order to figure out whether a nested object is all zeros
382
* we need to walk through it. As a result, we need to make two passes
383
* when handling structs, unions and arrays; the first path simply looks
384
* for nonzero data, while the second actually does the display. The first
385
* pass is signalled by show->state.depth_check being set, and if we
386
* encounter a non-zero value we set show->state.depth_to_show to
387
* the depth at which we encountered it. When we have completed the
388
* first pass, we will know if anything needs to be displayed if
389
* depth_to_show > depth. See btf_[struct,array]_show() for the
390
* implementation of this.
391
*
392
* Another problem is we want to ensure the data for display is safe to
393
* access. To support this, the anonymous "struct {} obj" tracks the data
394
* object and our safe copy of it. We copy portions of the data needed
395
* to the object "copy" buffer, but because its size is limited to
396
* BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
397
* traverse larger objects for display.
398
*
399
* The various data type show functions all start with a call to
400
* btf_show_start_type() which returns a pointer to the safe copy
401
* of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
402
* raw data itself). btf_show_obj_safe() is responsible for
403
* using copy_from_kernel_nofault() to update the safe data if necessary
404
* as we traverse the object's data. skbuff-like semantics are
405
* used:
406
*
407
* - obj.head points to the start of the toplevel object for display
408
* - obj.size is the size of the toplevel object
409
* - obj.data points to the current point in the original data at
410
* which our safe data starts. obj.data will advance as we copy
411
* portions of the data.
412
*
413
* In most cases a single copy will suffice, but larger data structures
414
* such as "struct task_struct" will require many copies. The logic in
415
* btf_show_obj_safe() handles the logic that determines if a new
416
* copy_from_kernel_nofault() is needed.
417
*/
418
struct btf_show {
419
u64 flags;
420
void *target; /* target of show operation (seq file, buffer) */
421
__printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
422
const struct btf *btf;
423
/* below are used during iteration */
424
struct {
425
u8 depth;
426
u8 depth_to_show;
427
u8 depth_check;
428
u8 array_member:1,
429
array_terminated:1;
430
u16 array_encoding;
431
u32 type_id;
432
int status; /* non-zero for error */
433
const struct btf_type *type;
434
const struct btf_member *member;
435
char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */
436
} state;
437
struct {
438
u32 size;
439
void *head;
440
void *data;
441
u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
442
} obj;
443
};
444
445
struct btf_kind_operations {
446
s32 (*check_meta)(struct btf_verifier_env *env,
447
const struct btf_type *t,
448
u32 meta_left);
449
int (*resolve)(struct btf_verifier_env *env,
450
const struct resolve_vertex *v);
451
int (*check_member)(struct btf_verifier_env *env,
452
const struct btf_type *struct_type,
453
const struct btf_member *member,
454
const struct btf_type *member_type);
455
int (*check_kflag_member)(struct btf_verifier_env *env,
456
const struct btf_type *struct_type,
457
const struct btf_member *member,
458
const struct btf_type *member_type);
459
void (*log_details)(struct btf_verifier_env *env,
460
const struct btf_type *t);
461
void (*show)(const struct btf *btf, const struct btf_type *t,
462
u32 type_id, void *data, u8 bits_offsets,
463
struct btf_show *show);
464
};
465
466
static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
467
static struct btf_type btf_void;
468
469
static int btf_resolve(struct btf_verifier_env *env,
470
const struct btf_type *t, u32 type_id);
471
472
static int btf_func_check(struct btf_verifier_env *env,
473
const struct btf_type *t);
474
475
static bool btf_type_is_modifier(const struct btf_type *t)
476
{
477
/* Some of them is not strictly a C modifier
478
* but they are grouped into the same bucket
479
* for BTF concern:
480
* A type (t) that refers to another
481
* type through t->type AND its size cannot
482
* be determined without following the t->type.
483
*
484
* ptr does not fall into this bucket
485
* because its size is always sizeof(void *).
486
*/
487
switch (BTF_INFO_KIND(t->info)) {
488
case BTF_KIND_TYPEDEF:
489
case BTF_KIND_VOLATILE:
490
case BTF_KIND_CONST:
491
case BTF_KIND_RESTRICT:
492
case BTF_KIND_TYPE_TAG:
493
return true;
494
}
495
496
return false;
497
}
498
499
static int btf_start_id(const struct btf *btf)
500
{
501
return btf->start_id + (btf->base_btf ? 0 : 1);
502
}
503
504
bool btf_type_is_void(const struct btf_type *t)
505
{
506
return t == &btf_void;
507
}
508
509
static bool btf_type_is_datasec(const struct btf_type *t)
510
{
511
return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
512
}
513
514
static bool btf_type_is_decl_tag(const struct btf_type *t)
515
{
516
return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
517
}
518
519
static bool btf_type_nosize(const struct btf_type *t)
520
{
521
return btf_type_is_void(t) || btf_type_is_fwd(t) ||
522
btf_type_is_func(t) || btf_type_is_func_proto(t) ||
523
btf_type_is_decl_tag(t);
524
}
525
526
static bool btf_type_nosize_or_null(const struct btf_type *t)
527
{
528
return !t || btf_type_nosize(t);
529
}
530
531
static bool btf_type_is_decl_tag_target(const struct btf_type *t)
532
{
533
return btf_type_is_func(t) || btf_type_is_struct(t) ||
534
btf_type_is_var(t) || btf_type_is_typedef(t);
535
}
536
537
bool btf_is_vmlinux(const struct btf *btf)
538
{
539
return btf->kernel_btf && !btf->base_btf;
540
}
541
542
u32 btf_nr_types(const struct btf *btf)
543
{
544
u32 total = 0;
545
546
while (btf) {
547
total += btf->nr_types;
548
btf = btf->base_btf;
549
}
550
551
return total;
552
}
553
554
/*
555
* Note that vmlinux and kernel module BTFs are always sorted
556
* during the building phase.
557
*/
558
static void btf_check_sorted(struct btf *btf)
559
{
560
u32 i, n, named_start_id = 0;
561
562
n = btf_nr_types(btf);
563
if (btf_is_vmlinux(btf)) {
564
for (i = btf_start_id(btf); i < n; i++) {
565
const struct btf_type *t = btf_type_by_id(btf, i);
566
const char *n = btf_name_by_offset(btf, t->name_off);
567
568
if (n[0] != '\0') {
569
btf->named_start_id = i;
570
return;
571
}
572
}
573
return;
574
}
575
576
for (i = btf_start_id(btf) + 1; i < n; i++) {
577
const struct btf_type *ta = btf_type_by_id(btf, i - 1);
578
const struct btf_type *tb = btf_type_by_id(btf, i);
579
const char *na = btf_name_by_offset(btf, ta->name_off);
580
const char *nb = btf_name_by_offset(btf, tb->name_off);
581
582
if (strcmp(na, nb) > 0)
583
return;
584
585
if (named_start_id == 0 && na[0] != '\0')
586
named_start_id = i - 1;
587
if (named_start_id == 0 && nb[0] != '\0')
588
named_start_id = i;
589
}
590
591
if (named_start_id)
592
btf->named_start_id = named_start_id;
593
}
594
595
/*
596
* btf_named_start_id - Get the named starting ID for the BTF
597
* @btf: Pointer to the target BTF object
598
* @own: Flag indicating whether to query only the current BTF (true = current BTF only,
599
* false = recursively traverse the base BTF chain)
600
*
601
* Return value rules:
602
* 1. For a sorted btf, return its named_start_id
603
* 2. Else for a split BTF, return its start_id
604
* 3. Else for a base BTF, return 1
605
*/
606
u32 btf_named_start_id(const struct btf *btf, bool own)
607
{
608
const struct btf *base_btf = btf;
609
610
while (!own && base_btf->base_btf)
611
base_btf = base_btf->base_btf;
612
613
return base_btf->named_start_id ?: (base_btf->start_id ?: 1);
614
}
615
616
static s32 btf_find_by_name_kind_bsearch(const struct btf *btf, const char *name)
617
{
618
const struct btf_type *t;
619
const char *tname;
620
s32 l, r, m;
621
622
l = btf_named_start_id(btf, true);
623
r = btf_nr_types(btf) - 1;
624
while (l <= r) {
625
m = l + (r - l) / 2;
626
t = btf_type_by_id(btf, m);
627
tname = btf_name_by_offset(btf, t->name_off);
628
if (strcmp(tname, name) >= 0) {
629
if (l == r)
630
return r;
631
r = m;
632
} else {
633
l = m + 1;
634
}
635
}
636
637
return btf_nr_types(btf);
638
}
639
640
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
641
{
642
const struct btf *base_btf = btf_base_btf(btf);
643
const struct btf_type *t;
644
const char *tname;
645
s32 id, total;
646
647
if (base_btf) {
648
id = btf_find_by_name_kind(base_btf, name, kind);
649
if (id > 0)
650
return id;
651
}
652
653
total = btf_nr_types(btf);
654
if (btf->named_start_id > 0 && name[0]) {
655
id = btf_find_by_name_kind_bsearch(btf, name);
656
for (; id < total; id++) {
657
t = btf_type_by_id(btf, id);
658
tname = btf_name_by_offset(btf, t->name_off);
659
if (strcmp(tname, name) != 0)
660
return -ENOENT;
661
if (BTF_INFO_KIND(t->info) == kind)
662
return id;
663
}
664
} else {
665
for (id = btf_start_id(btf); id < total; id++) {
666
t = btf_type_by_id(btf, id);
667
if (BTF_INFO_KIND(t->info) != kind)
668
continue;
669
tname = btf_name_by_offset(btf, t->name_off);
670
if (strcmp(tname, name) == 0)
671
return id;
672
}
673
}
674
675
return -ENOENT;
676
}
677
678
s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
679
{
680
struct btf *btf;
681
s32 ret;
682
int id;
683
684
btf = bpf_get_btf_vmlinux();
685
if (IS_ERR(btf))
686
return PTR_ERR(btf);
687
if (!btf)
688
return -EINVAL;
689
690
ret = btf_find_by_name_kind(btf, name, kind);
691
/* ret is never zero, since btf_find_by_name_kind returns
692
* positive btf_id or negative error.
693
*/
694
if (ret > 0) {
695
btf_get(btf);
696
*btf_p = btf;
697
return ret;
698
}
699
700
/* If name is not found in vmlinux's BTF then search in module's BTFs */
701
spin_lock_bh(&btf_idr_lock);
702
idr_for_each_entry(&btf_idr, btf, id) {
703
if (!btf_is_module(btf))
704
continue;
705
/* linear search could be slow hence unlock/lock
706
* the IDR to avoiding holding it for too long
707
*/
708
btf_get(btf);
709
spin_unlock_bh(&btf_idr_lock);
710
ret = btf_find_by_name_kind(btf, name, kind);
711
if (ret > 0) {
712
*btf_p = btf;
713
return ret;
714
}
715
btf_put(btf);
716
spin_lock_bh(&btf_idr_lock);
717
}
718
spin_unlock_bh(&btf_idr_lock);
719
return ret;
720
}
721
EXPORT_SYMBOL_GPL(bpf_find_btf_id);
722
723
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
724
u32 id, u32 *res_id)
725
{
726
const struct btf_type *t = btf_type_by_id(btf, id);
727
728
while (btf_type_is_modifier(t)) {
729
id = t->type;
730
t = btf_type_by_id(btf, t->type);
731
}
732
733
if (res_id)
734
*res_id = id;
735
736
return t;
737
}
738
739
const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
740
u32 id, u32 *res_id)
741
{
742
const struct btf_type *t;
743
744
t = btf_type_skip_modifiers(btf, id, NULL);
745
if (!btf_type_is_ptr(t))
746
return NULL;
747
748
return btf_type_skip_modifiers(btf, t->type, res_id);
749
}
750
751
const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
752
u32 id, u32 *res_id)
753
{
754
const struct btf_type *ptype;
755
756
ptype = btf_type_resolve_ptr(btf, id, res_id);
757
if (ptype && btf_type_is_func_proto(ptype))
758
return ptype;
759
760
return NULL;
761
}
762
763
/* Types that act only as a source, not sink or intermediate
764
* type when resolving.
765
*/
766
static bool btf_type_is_resolve_source_only(const struct btf_type *t)
767
{
768
return btf_type_is_var(t) ||
769
btf_type_is_decl_tag(t) ||
770
btf_type_is_datasec(t);
771
}
772
773
/* What types need to be resolved?
774
*
775
* btf_type_is_modifier() is an obvious one.
776
*
777
* btf_type_is_struct() because its member refers to
778
* another type (through member->type).
779
*
780
* btf_type_is_var() because the variable refers to
781
* another type. btf_type_is_datasec() holds multiple
782
* btf_type_is_var() types that need resolving.
783
*
784
* btf_type_is_array() because its element (array->type)
785
* refers to another type. Array can be thought of a
786
* special case of struct while array just has the same
787
* member-type repeated by array->nelems of times.
788
*/
789
static bool btf_type_needs_resolve(const struct btf_type *t)
790
{
791
return btf_type_is_modifier(t) ||
792
btf_type_is_ptr(t) ||
793
btf_type_is_struct(t) ||
794
btf_type_is_array(t) ||
795
btf_type_is_var(t) ||
796
btf_type_is_func(t) ||
797
btf_type_is_decl_tag(t) ||
798
btf_type_is_datasec(t);
799
}
800
801
/* t->size can be used */
802
static bool btf_type_has_size(const struct btf_type *t)
803
{
804
switch (BTF_INFO_KIND(t->info)) {
805
case BTF_KIND_INT:
806
case BTF_KIND_STRUCT:
807
case BTF_KIND_UNION:
808
case BTF_KIND_ENUM:
809
case BTF_KIND_DATASEC:
810
case BTF_KIND_FLOAT:
811
case BTF_KIND_ENUM64:
812
return true;
813
}
814
815
return false;
816
}
817
818
static const char *btf_int_encoding_str(u8 encoding)
819
{
820
if (encoding == 0)
821
return "(none)";
822
else if (encoding == BTF_INT_SIGNED)
823
return "SIGNED";
824
else if (encoding == BTF_INT_CHAR)
825
return "CHAR";
826
else if (encoding == BTF_INT_BOOL)
827
return "BOOL";
828
else
829
return "UNKN";
830
}
831
832
static u32 btf_type_int(const struct btf_type *t)
833
{
834
return *(u32 *)(t + 1);
835
}
836
837
static const struct btf_array *btf_type_array(const struct btf_type *t)
838
{
839
return (const struct btf_array *)(t + 1);
840
}
841
842
static const struct btf_enum *btf_type_enum(const struct btf_type *t)
843
{
844
return (const struct btf_enum *)(t + 1);
845
}
846
847
static const struct btf_var *btf_type_var(const struct btf_type *t)
848
{
849
return (const struct btf_var *)(t + 1);
850
}
851
852
static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
853
{
854
return (const struct btf_decl_tag *)(t + 1);
855
}
856
857
static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
858
{
859
return (const struct btf_enum64 *)(t + 1);
860
}
861
862
static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
863
{
864
return kind_ops[BTF_INFO_KIND(t->info)];
865
}
866
867
static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
868
{
869
if (!BTF_STR_OFFSET_VALID(offset))
870
return false;
871
872
while (offset < btf->start_str_off)
873
btf = btf->base_btf;
874
875
offset -= btf->start_str_off;
876
return offset < btf->hdr.str_len;
877
}
878
879
static bool __btf_name_char_ok(char c, bool first)
880
{
881
if ((first ? !isalpha(c) :
882
!isalnum(c)) &&
883
c != '_' &&
884
c != '.')
885
return false;
886
return true;
887
}
888
889
const char *btf_str_by_offset(const struct btf *btf, u32 offset)
890
{
891
while (offset < btf->start_str_off)
892
btf = btf->base_btf;
893
894
offset -= btf->start_str_off;
895
if (offset < btf->hdr.str_len)
896
return &btf->strings[offset];
897
898
return NULL;
899
}
900
901
static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
902
{
903
/* offset must be valid */
904
const char *src = btf_str_by_offset(btf, offset);
905
const char *src_limit;
906
907
if (!__btf_name_char_ok(*src, true))
908
return false;
909
910
/* set a limit on identifier length */
911
src_limit = src + KSYM_NAME_LEN;
912
src++;
913
while (*src && src < src_limit) {
914
if (!__btf_name_char_ok(*src, false))
915
return false;
916
src++;
917
}
918
919
return !*src;
920
}
921
922
/* Allow any printable character in DATASEC names */
923
static bool btf_name_valid_section(const struct btf *btf, u32 offset)
924
{
925
/* offset must be valid */
926
const char *src = btf_str_by_offset(btf, offset);
927
const char *src_limit;
928
929
if (!*src)
930
return false;
931
932
/* set a limit on identifier length */
933
src_limit = src + KSYM_NAME_LEN;
934
while (*src && src < src_limit) {
935
if (!isprint(*src))
936
return false;
937
src++;
938
}
939
940
return !*src;
941
}
942
943
static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
944
{
945
const char *name;
946
947
if (!offset)
948
return "(anon)";
949
950
name = btf_str_by_offset(btf, offset);
951
return name ?: "(invalid-name-offset)";
952
}
953
954
const char *btf_name_by_offset(const struct btf *btf, u32 offset)
955
{
956
return btf_str_by_offset(btf, offset);
957
}
958
959
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
960
{
961
while (type_id < btf->start_id)
962
btf = btf->base_btf;
963
964
type_id -= btf->start_id;
965
if (type_id >= btf->nr_types)
966
return NULL;
967
return btf->types[type_id];
968
}
969
EXPORT_SYMBOL_GPL(btf_type_by_id);
970
971
/*
972
* Check that the type @t is a regular int. This means that @t is not
973
* a bit field and it has the same size as either of u8/u16/u32/u64
974
* or __int128. If @expected_size is not zero, then size of @t should
975
* be the same. A caller should already have checked that the type @t
976
* is an integer.
977
*/
978
static bool __btf_type_int_is_regular(const struct btf_type *t, size_t expected_size)
979
{
980
u32 int_data = btf_type_int(t);
981
u8 nr_bits = BTF_INT_BITS(int_data);
982
u8 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
983
984
return BITS_PER_BYTE_MASKED(nr_bits) == 0 &&
985
BTF_INT_OFFSET(int_data) == 0 &&
986
(nr_bytes <= 16 && is_power_of_2(nr_bytes)) &&
987
(expected_size == 0 || nr_bytes == expected_size);
988
}
989
990
static bool btf_type_int_is_regular(const struct btf_type *t)
991
{
992
return __btf_type_int_is_regular(t, 0);
993
}
994
995
bool btf_type_is_i32(const struct btf_type *t)
996
{
997
return btf_type_is_int(t) && __btf_type_int_is_regular(t, 4);
998
}
999
1000
bool btf_type_is_i64(const struct btf_type *t)
1001
{
1002
return btf_type_is_int(t) && __btf_type_int_is_regular(t, 8);
1003
}
1004
1005
bool btf_type_is_primitive(const struct btf_type *t)
1006
{
1007
return (btf_type_is_int(t) && btf_type_int_is_regular(t)) ||
1008
btf_is_any_enum(t);
1009
}
1010
1011
/*
1012
* Check that given struct member is a regular int with expected
1013
* offset and size.
1014
*/
1015
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
1016
const struct btf_member *m,
1017
u32 expected_offset, u32 expected_size)
1018
{
1019
const struct btf_type *t;
1020
u32 id, int_data;
1021
u8 nr_bits;
1022
1023
id = m->type;
1024
t = btf_type_id_size(btf, &id, NULL);
1025
if (!t || !btf_type_is_int(t))
1026
return false;
1027
1028
int_data = btf_type_int(t);
1029
nr_bits = BTF_INT_BITS(int_data);
1030
if (btf_type_kflag(s)) {
1031
u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
1032
u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
1033
1034
/* if kflag set, int should be a regular int and
1035
* bit offset should be at byte boundary.
1036
*/
1037
return !bitfield_size &&
1038
BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
1039
BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
1040
}
1041
1042
if (BTF_INT_OFFSET(int_data) ||
1043
BITS_PER_BYTE_MASKED(m->offset) ||
1044
BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
1045
BITS_PER_BYTE_MASKED(nr_bits) ||
1046
BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
1047
return false;
1048
1049
return true;
1050
}
1051
1052
/* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
1053
static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
1054
u32 id)
1055
{
1056
const struct btf_type *t = btf_type_by_id(btf, id);
1057
1058
while (btf_type_is_modifier(t) &&
1059
BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
1060
t = btf_type_by_id(btf, t->type);
1061
}
1062
1063
return t;
1064
}
1065
1066
#define BTF_SHOW_MAX_ITER 10
1067
1068
#define BTF_KIND_BIT(kind) (1ULL << kind)
1069
1070
/*
1071
* Populate show->state.name with type name information.
1072
* Format of type name is
1073
*
1074
* [.member_name = ] (type_name)
1075
*/
1076
static const char *btf_show_name(struct btf_show *show)
1077
{
1078
/* BTF_MAX_ITER array suffixes "[]" */
1079
const char *array_suffixes = "[][][][][][][][][][]";
1080
const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
1081
/* BTF_MAX_ITER pointer suffixes "*" */
1082
const char *ptr_suffixes = "**********";
1083
const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
1084
const char *name = NULL, *prefix = "", *parens = "";
1085
const struct btf_member *m = show->state.member;
1086
const struct btf_type *t;
1087
const struct btf_array *array;
1088
u32 id = show->state.type_id;
1089
const char *member = NULL;
1090
bool show_member = false;
1091
u64 kinds = 0;
1092
int i;
1093
1094
show->state.name[0] = '\0';
1095
1096
/*
1097
* Don't show type name if we're showing an array member;
1098
* in that case we show the array type so don't need to repeat
1099
* ourselves for each member.
1100
*/
1101
if (show->state.array_member)
1102
return "";
1103
1104
/* Retrieve member name, if any. */
1105
if (m) {
1106
member = btf_name_by_offset(show->btf, m->name_off);
1107
show_member = strlen(member) > 0;
1108
id = m->type;
1109
}
1110
1111
/*
1112
* Start with type_id, as we have resolved the struct btf_type *
1113
* via btf_modifier_show() past the parent typedef to the child
1114
* struct, int etc it is defined as. In such cases, the type_id
1115
* still represents the starting type while the struct btf_type *
1116
* in our show->state points at the resolved type of the typedef.
1117
*/
1118
t = btf_type_by_id(show->btf, id);
1119
if (!t)
1120
return "";
1121
1122
/*
1123
* The goal here is to build up the right number of pointer and
1124
* array suffixes while ensuring the type name for a typedef
1125
* is represented. Along the way we accumulate a list of
1126
* BTF kinds we have encountered, since these will inform later
1127
* display; for example, pointer types will not require an
1128
* opening "{" for struct, we will just display the pointer value.
1129
*
1130
* We also want to accumulate the right number of pointer or array
1131
* indices in the format string while iterating until we get to
1132
* the typedef/pointee/array member target type.
1133
*
1134
* We start by pointing at the end of pointer and array suffix
1135
* strings; as we accumulate pointers and arrays we move the pointer
1136
* or array string backwards so it will show the expected number of
1137
* '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers
1138
* and/or arrays and typedefs are supported as a precaution.
1139
*
1140
* We also want to get typedef name while proceeding to resolve
1141
* type it points to so that we can add parentheses if it is a
1142
* "typedef struct" etc.
1143
*/
1144
for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
1145
1146
switch (BTF_INFO_KIND(t->info)) {
1147
case BTF_KIND_TYPEDEF:
1148
if (!name)
1149
name = btf_name_by_offset(show->btf,
1150
t->name_off);
1151
kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
1152
id = t->type;
1153
break;
1154
case BTF_KIND_ARRAY:
1155
kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
1156
parens = "[";
1157
if (!t)
1158
return "";
1159
array = btf_type_array(t);
1160
if (array_suffix > array_suffixes)
1161
array_suffix -= 2;
1162
id = array->type;
1163
break;
1164
case BTF_KIND_PTR:
1165
kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
1166
if (ptr_suffix > ptr_suffixes)
1167
ptr_suffix -= 1;
1168
id = t->type;
1169
break;
1170
default:
1171
id = 0;
1172
break;
1173
}
1174
if (!id)
1175
break;
1176
t = btf_type_skip_qualifiers(show->btf, id);
1177
}
1178
/* We may not be able to represent this type; bail to be safe */
1179
if (i == BTF_SHOW_MAX_ITER)
1180
return "";
1181
1182
if (!name)
1183
name = btf_name_by_offset(show->btf, t->name_off);
1184
1185
switch (BTF_INFO_KIND(t->info)) {
1186
case BTF_KIND_STRUCT:
1187
case BTF_KIND_UNION:
1188
prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
1189
"struct" : "union";
1190
/* if it's an array of struct/union, parens is already set */
1191
if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
1192
parens = "{";
1193
break;
1194
case BTF_KIND_ENUM:
1195
case BTF_KIND_ENUM64:
1196
prefix = "enum";
1197
break;
1198
default:
1199
break;
1200
}
1201
1202
/* pointer does not require parens */
1203
if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
1204
parens = "";
1205
/* typedef does not require struct/union/enum prefix */
1206
if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
1207
prefix = "";
1208
1209
if (!name)
1210
name = "";
1211
1212
/* Even if we don't want type name info, we want parentheses etc */
1213
if (show->flags & BTF_SHOW_NONAME)
1214
snprintf(show->state.name, sizeof(show->state.name), "%s",
1215
parens);
1216
else
1217
snprintf(show->state.name, sizeof(show->state.name),
1218
"%s%s%s(%s%s%s%s%s%s)%s",
1219
/* first 3 strings comprise ".member = " */
1220
show_member ? "." : "",
1221
show_member ? member : "",
1222
show_member ? " = " : "",
1223
/* ...next is our prefix (struct, enum, etc) */
1224
prefix,
1225
strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
1226
/* ...this is the type name itself */
1227
name,
1228
/* ...suffixed by the appropriate '*', '[]' suffixes */
1229
strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
1230
array_suffix, parens);
1231
1232
return show->state.name;
1233
}
1234
1235
static const char *__btf_show_indent(struct btf_show *show)
1236
{
1237
const char *indents = " ";
1238
const char *indent = &indents[strlen(indents)];
1239
1240
if ((indent - show->state.depth) >= indents)
1241
return indent - show->state.depth;
1242
return indents;
1243
}
1244
1245
static const char *btf_show_indent(struct btf_show *show)
1246
{
1247
return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
1248
}
1249
1250
static const char *btf_show_newline(struct btf_show *show)
1251
{
1252
return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
1253
}
1254
1255
static const char *btf_show_delim(struct btf_show *show)
1256
{
1257
if (show->state.depth == 0)
1258
return "";
1259
1260
if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
1261
BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
1262
return "|";
1263
1264
return ",";
1265
}
1266
1267
__printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
1268
{
1269
va_list args;
1270
1271
if (!show->state.depth_check) {
1272
va_start(args, fmt);
1273
show->showfn(show, fmt, args);
1274
va_end(args);
1275
}
1276
}
1277
1278
/* Macros are used here as btf_show_type_value[s]() prepends and appends
1279
* format specifiers to the format specifier passed in; these do the work of
1280
* adding indentation, delimiters etc while the caller simply has to specify
1281
* the type value(s) in the format specifier + value(s).
1282
*/
1283
#define btf_show_type_value(show, fmt, value) \
1284
do { \
1285
if ((value) != (__typeof__(value))0 || \
1286
(show->flags & BTF_SHOW_ZERO) || \
1287
show->state.depth == 0) { \
1288
btf_show(show, "%s%s" fmt "%s%s", \
1289
btf_show_indent(show), \
1290
btf_show_name(show), \
1291
value, btf_show_delim(show), \
1292
btf_show_newline(show)); \
1293
if (show->state.depth > show->state.depth_to_show) \
1294
show->state.depth_to_show = show->state.depth; \
1295
} \
1296
} while (0)
1297
1298
#define btf_show_type_values(show, fmt, ...) \
1299
do { \
1300
btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \
1301
btf_show_name(show), \
1302
__VA_ARGS__, btf_show_delim(show), \
1303
btf_show_newline(show)); \
1304
if (show->state.depth > show->state.depth_to_show) \
1305
show->state.depth_to_show = show->state.depth; \
1306
} while (0)
1307
1308
/* How much is left to copy to safe buffer after @data? */
1309
static int btf_show_obj_size_left(struct btf_show *show, void *data)
1310
{
1311
return show->obj.head + show->obj.size - data;
1312
}
1313
1314
/* Is object pointed to by @data of @size already copied to our safe buffer? */
1315
static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
1316
{
1317
return data >= show->obj.data &&
1318
(data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
1319
}
1320
1321
/*
1322
* If object pointed to by @data of @size falls within our safe buffer, return
1323
* the equivalent pointer to the same safe data. Assumes
1324
* copy_from_kernel_nofault() has already happened and our safe buffer is
1325
* populated.
1326
*/
1327
static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
1328
{
1329
if (btf_show_obj_is_safe(show, data, size))
1330
return show->obj.safe + (data - show->obj.data);
1331
return NULL;
1332
}
1333
1334
/*
1335
* Return a safe-to-access version of data pointed to by @data.
1336
* We do this by copying the relevant amount of information
1337
* to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1338
*
1339
* If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1340
* safe copy is needed.
1341
*
1342
* Otherwise we need to determine if we have the required amount
1343
* of data (determined by the @data pointer and the size of the
1344
* largest base type we can encounter (represented by
1345
* BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1346
* that we will be able to print some of the current object,
1347
* and if more is needed a copy will be triggered.
1348
* Some objects such as structs will not fit into the buffer;
1349
* in such cases additional copies when we iterate over their
1350
* members may be needed.
1351
*
1352
* btf_show_obj_safe() is used to return a safe buffer for
1353
* btf_show_start_type(); this ensures that as we recurse into
1354
* nested types we always have safe data for the given type.
1355
* This approach is somewhat wasteful; it's possible for example
1356
* that when iterating over a large union we'll end up copying the
1357
* same data repeatedly, but the goal is safety not performance.
1358
* We use stack data as opposed to per-CPU buffers because the
1359
* iteration over a type can take some time, and preemption handling
1360
* would greatly complicate use of the safe buffer.
1361
*/
1362
static void *btf_show_obj_safe(struct btf_show *show,
1363
const struct btf_type *t,
1364
void *data)
1365
{
1366
const struct btf_type *rt;
1367
int size_left, size;
1368
void *safe = NULL;
1369
1370
if (show->flags & BTF_SHOW_UNSAFE)
1371
return data;
1372
1373
rt = btf_resolve_size(show->btf, t, &size);
1374
if (IS_ERR(rt)) {
1375
show->state.status = PTR_ERR(rt);
1376
return NULL;
1377
}
1378
1379
/*
1380
* Is this toplevel object? If so, set total object size and
1381
* initialize pointers. Otherwise check if we still fall within
1382
* our safe object data.
1383
*/
1384
if (show->state.depth == 0) {
1385
show->obj.size = size;
1386
show->obj.head = data;
1387
} else {
1388
/*
1389
* If the size of the current object is > our remaining
1390
* safe buffer we _may_ need to do a new copy. However
1391
* consider the case of a nested struct; it's size pushes
1392
* us over the safe buffer limit, but showing any individual
1393
* struct members does not. In such cases, we don't need
1394
* to initiate a fresh copy yet; however we definitely need
1395
* at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1396
* in our buffer, regardless of the current object size.
1397
* The logic here is that as we resolve types we will
1398
* hit a base type at some point, and we need to be sure
1399
* the next chunk of data is safely available to display
1400
* that type info safely. We cannot rely on the size of
1401
* the current object here because it may be much larger
1402
* than our current buffer (e.g. task_struct is 8k).
1403
* All we want to do here is ensure that we can print the
1404
* next basic type, which we can if either
1405
* - the current type size is within the safe buffer; or
1406
* - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1407
* the safe buffer.
1408
*/
1409
safe = __btf_show_obj_safe(show, data,
1410
min(size,
1411
BTF_SHOW_OBJ_BASE_TYPE_SIZE));
1412
}
1413
1414
/*
1415
* We need a new copy to our safe object, either because we haven't
1416
* yet copied and are initializing safe data, or because the data
1417
* we want falls outside the boundaries of the safe object.
1418
*/
1419
if (!safe) {
1420
size_left = btf_show_obj_size_left(show, data);
1421
if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
1422
size_left = BTF_SHOW_OBJ_SAFE_SIZE;
1423
show->state.status = copy_from_kernel_nofault(show->obj.safe,
1424
data, size_left);
1425
if (!show->state.status) {
1426
show->obj.data = data;
1427
safe = show->obj.safe;
1428
}
1429
}
1430
1431
return safe;
1432
}
1433
1434
/*
1435
* Set the type we are starting to show and return a safe data pointer
1436
* to be used for showing the associated data.
1437
*/
1438
static void *btf_show_start_type(struct btf_show *show,
1439
const struct btf_type *t,
1440
u32 type_id, void *data)
1441
{
1442
show->state.type = t;
1443
show->state.type_id = type_id;
1444
show->state.name[0] = '\0';
1445
1446
return btf_show_obj_safe(show, t, data);
1447
}
1448
1449
static void btf_show_end_type(struct btf_show *show)
1450
{
1451
show->state.type = NULL;
1452
show->state.type_id = 0;
1453
show->state.name[0] = '\0';
1454
}
1455
1456
static void *btf_show_start_aggr_type(struct btf_show *show,
1457
const struct btf_type *t,
1458
u32 type_id, void *data)
1459
{
1460
void *safe_data = btf_show_start_type(show, t, type_id, data);
1461
1462
if (!safe_data)
1463
return safe_data;
1464
1465
btf_show(show, "%s%s%s", btf_show_indent(show),
1466
btf_show_name(show),
1467
btf_show_newline(show));
1468
show->state.depth++;
1469
return safe_data;
1470
}
1471
1472
static void btf_show_end_aggr_type(struct btf_show *show,
1473
const char *suffix)
1474
{
1475
show->state.depth--;
1476
btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
1477
btf_show_delim(show), btf_show_newline(show));
1478
btf_show_end_type(show);
1479
}
1480
1481
static void btf_show_start_member(struct btf_show *show,
1482
const struct btf_member *m)
1483
{
1484
show->state.member = m;
1485
}
1486
1487
static void btf_show_start_array_member(struct btf_show *show)
1488
{
1489
show->state.array_member = 1;
1490
btf_show_start_member(show, NULL);
1491
}
1492
1493
static void btf_show_end_member(struct btf_show *show)
1494
{
1495
show->state.member = NULL;
1496
}
1497
1498
static void btf_show_end_array_member(struct btf_show *show)
1499
{
1500
show->state.array_member = 0;
1501
btf_show_end_member(show);
1502
}
1503
1504
static void *btf_show_start_array_type(struct btf_show *show,
1505
const struct btf_type *t,
1506
u32 type_id,
1507
u16 array_encoding,
1508
void *data)
1509
{
1510
show->state.array_encoding = array_encoding;
1511
show->state.array_terminated = 0;
1512
return btf_show_start_aggr_type(show, t, type_id, data);
1513
}
1514
1515
static void btf_show_end_array_type(struct btf_show *show)
1516
{
1517
show->state.array_encoding = 0;
1518
show->state.array_terminated = 0;
1519
btf_show_end_aggr_type(show, "]");
1520
}
1521
1522
static void *btf_show_start_struct_type(struct btf_show *show,
1523
const struct btf_type *t,
1524
u32 type_id,
1525
void *data)
1526
{
1527
return btf_show_start_aggr_type(show, t, type_id, data);
1528
}
1529
1530
static void btf_show_end_struct_type(struct btf_show *show)
1531
{
1532
btf_show_end_aggr_type(show, "}");
1533
}
1534
1535
__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
1536
const char *fmt, ...)
1537
{
1538
va_list args;
1539
1540
va_start(args, fmt);
1541
bpf_verifier_vlog(log, fmt, args);
1542
va_end(args);
1543
}
1544
1545
__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
1546
const char *fmt, ...)
1547
{
1548
struct bpf_verifier_log *log = &env->log;
1549
va_list args;
1550
1551
if (!bpf_verifier_log_needed(log))
1552
return;
1553
1554
va_start(args, fmt);
1555
bpf_verifier_vlog(log, fmt, args);
1556
va_end(args);
1557
}
1558
1559
__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
1560
const struct btf_type *t,
1561
bool log_details,
1562
const char *fmt, ...)
1563
{
1564
struct bpf_verifier_log *log = &env->log;
1565
struct btf *btf = env->btf;
1566
va_list args;
1567
1568
if (!bpf_verifier_log_needed(log))
1569
return;
1570
1571
if (log->level == BPF_LOG_KERNEL) {
1572
/* btf verifier prints all types it is processing via
1573
* btf_verifier_log_type(..., fmt = NULL).
1574
* Skip those prints for in-kernel BTF verification.
1575
*/
1576
if (!fmt)
1577
return;
1578
1579
/* Skip logging when loading module BTF with mismatches permitted */
1580
if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1581
return;
1582
}
1583
1584
__btf_verifier_log(log, "[%u] %s %s%s",
1585
env->log_type_id,
1586
btf_type_str(t),
1587
__btf_name_by_offset(btf, t->name_off),
1588
log_details ? " " : "");
1589
1590
if (log_details)
1591
btf_type_ops(t)->log_details(env, t);
1592
1593
if (fmt && *fmt) {
1594
__btf_verifier_log(log, " ");
1595
va_start(args, fmt);
1596
bpf_verifier_vlog(log, fmt, args);
1597
va_end(args);
1598
}
1599
1600
__btf_verifier_log(log, "\n");
1601
}
1602
1603
#define btf_verifier_log_type(env, t, ...) \
1604
__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1605
#define btf_verifier_log_basic(env, t, ...) \
1606
__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1607
1608
__printf(4, 5)
1609
static void btf_verifier_log_member(struct btf_verifier_env *env,
1610
const struct btf_type *struct_type,
1611
const struct btf_member *member,
1612
const char *fmt, ...)
1613
{
1614
struct bpf_verifier_log *log = &env->log;
1615
struct btf *btf = env->btf;
1616
va_list args;
1617
1618
if (!bpf_verifier_log_needed(log))
1619
return;
1620
1621
if (log->level == BPF_LOG_KERNEL) {
1622
if (!fmt)
1623
return;
1624
1625
/* Skip logging when loading module BTF with mismatches permitted */
1626
if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
1627
return;
1628
}
1629
1630
/* The CHECK_META phase already did a btf dump.
1631
*
1632
* If member is logged again, it must hit an error in
1633
* parsing this member. It is useful to print out which
1634
* struct this member belongs to.
1635
*/
1636
if (env->phase != CHECK_META)
1637
btf_verifier_log_type(env, struct_type, NULL);
1638
1639
if (btf_type_kflag(struct_type))
1640
__btf_verifier_log(log,
1641
"\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1642
__btf_name_by_offset(btf, member->name_off),
1643
member->type,
1644
BTF_MEMBER_BITFIELD_SIZE(member->offset),
1645
BTF_MEMBER_BIT_OFFSET(member->offset));
1646
else
1647
__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
1648
__btf_name_by_offset(btf, member->name_off),
1649
member->type, member->offset);
1650
1651
if (fmt && *fmt) {
1652
__btf_verifier_log(log, " ");
1653
va_start(args, fmt);
1654
bpf_verifier_vlog(log, fmt, args);
1655
va_end(args);
1656
}
1657
1658
__btf_verifier_log(log, "\n");
1659
}
1660
1661
__printf(4, 5)
1662
static void btf_verifier_log_vsi(struct btf_verifier_env *env,
1663
const struct btf_type *datasec_type,
1664
const struct btf_var_secinfo *vsi,
1665
const char *fmt, ...)
1666
{
1667
struct bpf_verifier_log *log = &env->log;
1668
va_list args;
1669
1670
if (!bpf_verifier_log_needed(log))
1671
return;
1672
if (log->level == BPF_LOG_KERNEL && !fmt)
1673
return;
1674
if (env->phase != CHECK_META)
1675
btf_verifier_log_type(env, datasec_type, NULL);
1676
1677
__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
1678
vsi->type, vsi->offset, vsi->size);
1679
if (fmt && *fmt) {
1680
__btf_verifier_log(log, " ");
1681
va_start(args, fmt);
1682
bpf_verifier_vlog(log, fmt, args);
1683
va_end(args);
1684
}
1685
1686
__btf_verifier_log(log, "\n");
1687
}
1688
1689
static void btf_verifier_log_hdr(struct btf_verifier_env *env,
1690
u32 btf_data_size)
1691
{
1692
struct bpf_verifier_log *log = &env->log;
1693
const struct btf *btf = env->btf;
1694
const struct btf_header *hdr;
1695
1696
if (!bpf_verifier_log_needed(log))
1697
return;
1698
1699
if (log->level == BPF_LOG_KERNEL)
1700
return;
1701
hdr = &btf->hdr;
1702
__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
1703
__btf_verifier_log(log, "version: %u\n", hdr->version);
1704
__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
1705
__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
1706
__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
1707
__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
1708
__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
1709
__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
1710
__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
1711
}
1712
1713
static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
1714
{
1715
struct btf *btf = env->btf;
1716
1717
if (btf->types_size == btf->nr_types) {
1718
/* Expand 'types' array */
1719
1720
struct btf_type **new_types;
1721
u32 expand_by, new_size;
1722
1723
if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
1724
btf_verifier_log(env, "Exceeded max num of types");
1725
return -E2BIG;
1726
}
1727
1728
expand_by = max_t(u32, btf->types_size >> 2, 16);
1729
new_size = min_t(u32, BTF_MAX_TYPE,
1730
btf->types_size + expand_by);
1731
1732
new_types = kvcalloc(new_size, sizeof(*new_types),
1733
GFP_KERNEL | __GFP_NOWARN);
1734
if (!new_types)
1735
return -ENOMEM;
1736
1737
if (btf->nr_types == 0) {
1738
if (!btf->base_btf) {
1739
/* lazily init VOID type */
1740
new_types[0] = &btf_void;
1741
btf->nr_types++;
1742
}
1743
} else {
1744
memcpy(new_types, btf->types,
1745
sizeof(*btf->types) * btf->nr_types);
1746
}
1747
1748
kvfree(btf->types);
1749
btf->types = new_types;
1750
btf->types_size = new_size;
1751
}
1752
1753
btf->types[btf->nr_types++] = t;
1754
1755
return 0;
1756
}
1757
1758
static int btf_alloc_id(struct btf *btf)
1759
{
1760
int id;
1761
1762
idr_preload(GFP_KERNEL);
1763
spin_lock_bh(&btf_idr_lock);
1764
id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
1765
if (id > 0)
1766
btf->id = id;
1767
spin_unlock_bh(&btf_idr_lock);
1768
idr_preload_end();
1769
1770
if (WARN_ON_ONCE(!id))
1771
return -ENOSPC;
1772
1773
return id > 0 ? 0 : id;
1774
}
1775
1776
static void btf_free_id(struct btf *btf)
1777
{
1778
unsigned long flags;
1779
1780
/*
1781
* In map-in-map, calling map_delete_elem() on outer
1782
* map will call bpf_map_put on the inner map.
1783
* It will then eventually call btf_free_id()
1784
* on the inner map. Some of the map_delete_elem()
1785
* implementation may have irq disabled, so
1786
* we need to use the _irqsave() version instead
1787
* of the _bh() version.
1788
*/
1789
spin_lock_irqsave(&btf_idr_lock, flags);
1790
idr_remove(&btf_idr, btf->id);
1791
spin_unlock_irqrestore(&btf_idr_lock, flags);
1792
}
1793
1794
static void btf_free_kfunc_set_tab(struct btf *btf)
1795
{
1796
struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
1797
int hook;
1798
1799
if (!tab)
1800
return;
1801
for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
1802
kfree(tab->sets[hook]);
1803
kfree(tab);
1804
btf->kfunc_set_tab = NULL;
1805
}
1806
1807
static void btf_free_dtor_kfunc_tab(struct btf *btf)
1808
{
1809
struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
1810
1811
if (!tab)
1812
return;
1813
kfree(tab);
1814
btf->dtor_kfunc_tab = NULL;
1815
}
1816
1817
static void btf_struct_metas_free(struct btf_struct_metas *tab)
1818
{
1819
int i;
1820
1821
if (!tab)
1822
return;
1823
for (i = 0; i < tab->cnt; i++)
1824
btf_record_free(tab->types[i].record);
1825
kfree(tab);
1826
}
1827
1828
static void btf_free_struct_meta_tab(struct btf *btf)
1829
{
1830
struct btf_struct_metas *tab = btf->struct_meta_tab;
1831
1832
btf_struct_metas_free(tab);
1833
btf->struct_meta_tab = NULL;
1834
}
1835
1836
static void btf_free_struct_ops_tab(struct btf *btf)
1837
{
1838
struct btf_struct_ops_tab *tab = btf->struct_ops_tab;
1839
u32 i;
1840
1841
if (!tab)
1842
return;
1843
1844
for (i = 0; i < tab->cnt; i++)
1845
bpf_struct_ops_desc_release(&tab->ops[i]);
1846
1847
kfree(tab);
1848
btf->struct_ops_tab = NULL;
1849
}
1850
1851
static void btf_free(struct btf *btf)
1852
{
1853
btf_free_struct_meta_tab(btf);
1854
btf_free_dtor_kfunc_tab(btf);
1855
btf_free_kfunc_set_tab(btf);
1856
btf_free_struct_ops_tab(btf);
1857
kvfree(btf->types);
1858
kvfree(btf->resolved_sizes);
1859
kvfree(btf->resolved_ids);
1860
/* vmlinux does not allocate btf->data, it simply points it at
1861
* __start_BTF.
1862
*/
1863
if (!btf_is_vmlinux(btf))
1864
kvfree(btf->data);
1865
kvfree(btf->base_id_map);
1866
kfree(btf);
1867
}
1868
1869
static void btf_free_rcu(struct rcu_head *rcu)
1870
{
1871
struct btf *btf = container_of(rcu, struct btf, rcu);
1872
1873
btf_free(btf);
1874
}
1875
1876
const char *btf_get_name(const struct btf *btf)
1877
{
1878
return btf->name;
1879
}
1880
1881
void btf_get(struct btf *btf)
1882
{
1883
refcount_inc(&btf->refcnt);
1884
}
1885
1886
void btf_put(struct btf *btf)
1887
{
1888
if (btf && refcount_dec_and_test(&btf->refcnt)) {
1889
btf_free_id(btf);
1890
call_rcu(&btf->rcu, btf_free_rcu);
1891
}
1892
}
1893
1894
struct btf *btf_base_btf(const struct btf *btf)
1895
{
1896
return btf->base_btf;
1897
}
1898
1899
const struct btf_header *btf_header(const struct btf *btf)
1900
{
1901
return &btf->hdr;
1902
}
1903
1904
void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
1905
{
1906
btf->base_btf = (struct btf *)base_btf;
1907
btf->start_id = btf_nr_types(base_btf);
1908
btf->start_str_off = base_btf->hdr.str_len;
1909
}
1910
1911
static int env_resolve_init(struct btf_verifier_env *env)
1912
{
1913
struct btf *btf = env->btf;
1914
u32 nr_types = btf->nr_types;
1915
u32 *resolved_sizes = NULL;
1916
u32 *resolved_ids = NULL;
1917
u8 *visit_states = NULL;
1918
1919
resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
1920
GFP_KERNEL | __GFP_NOWARN);
1921
if (!resolved_sizes)
1922
goto nomem;
1923
1924
resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
1925
GFP_KERNEL | __GFP_NOWARN);
1926
if (!resolved_ids)
1927
goto nomem;
1928
1929
visit_states = kvcalloc(nr_types, sizeof(*visit_states),
1930
GFP_KERNEL | __GFP_NOWARN);
1931
if (!visit_states)
1932
goto nomem;
1933
1934
btf->resolved_sizes = resolved_sizes;
1935
btf->resolved_ids = resolved_ids;
1936
env->visit_states = visit_states;
1937
1938
return 0;
1939
1940
nomem:
1941
kvfree(resolved_sizes);
1942
kvfree(resolved_ids);
1943
kvfree(visit_states);
1944
return -ENOMEM;
1945
}
1946
1947
static void btf_verifier_env_free(struct btf_verifier_env *env)
1948
{
1949
kvfree(env->visit_states);
1950
kfree(env);
1951
}
1952
1953
static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
1954
const struct btf_type *next_type)
1955
{
1956
switch (env->resolve_mode) {
1957
case RESOLVE_TBD:
1958
/* int, enum or void is a sink */
1959
return !btf_type_needs_resolve(next_type);
1960
case RESOLVE_PTR:
1961
/* int, enum, void, struct, array, func or func_proto is a sink
1962
* for ptr
1963
*/
1964
return !btf_type_is_modifier(next_type) &&
1965
!btf_type_is_ptr(next_type);
1966
case RESOLVE_STRUCT_OR_ARRAY:
1967
/* int, enum, void, ptr, func or func_proto is a sink
1968
* for struct and array
1969
*/
1970
return !btf_type_is_modifier(next_type) &&
1971
!btf_type_is_array(next_type) &&
1972
!btf_type_is_struct(next_type);
1973
default:
1974
BUG();
1975
}
1976
}
1977
1978
static bool env_type_is_resolved(const struct btf_verifier_env *env,
1979
u32 type_id)
1980
{
1981
/* base BTF types should be resolved by now */
1982
if (type_id < env->btf->start_id)
1983
return true;
1984
1985
return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
1986
}
1987
1988
static int env_stack_push(struct btf_verifier_env *env,
1989
const struct btf_type *t, u32 type_id)
1990
{
1991
const struct btf *btf = env->btf;
1992
struct resolve_vertex *v;
1993
1994
if (env->top_stack == MAX_RESOLVE_DEPTH)
1995
return -E2BIG;
1996
1997
if (type_id < btf->start_id
1998
|| env->visit_states[type_id - btf->start_id] != NOT_VISITED)
1999
return -EEXIST;
2000
2001
env->visit_states[type_id - btf->start_id] = VISITED;
2002
2003
v = &env->stack[env->top_stack++];
2004
v->t = t;
2005
v->type_id = type_id;
2006
v->next_member = 0;
2007
2008
if (env->resolve_mode == RESOLVE_TBD) {
2009
if (btf_type_is_ptr(t))
2010
env->resolve_mode = RESOLVE_PTR;
2011
else if (btf_type_is_struct(t) || btf_type_is_array(t))
2012
env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
2013
}
2014
2015
return 0;
2016
}
2017
2018
static void env_stack_set_next_member(struct btf_verifier_env *env,
2019
u16 next_member)
2020
{
2021
env->stack[env->top_stack - 1].next_member = next_member;
2022
}
2023
2024
static void env_stack_pop_resolved(struct btf_verifier_env *env,
2025
u32 resolved_type_id,
2026
u32 resolved_size)
2027
{
2028
u32 type_id = env->stack[--(env->top_stack)].type_id;
2029
struct btf *btf = env->btf;
2030
2031
type_id -= btf->start_id; /* adjust to local type id */
2032
btf->resolved_sizes[type_id] = resolved_size;
2033
btf->resolved_ids[type_id] = resolved_type_id;
2034
env->visit_states[type_id] = RESOLVED;
2035
}
2036
2037
static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
2038
{
2039
return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
2040
}
2041
2042
/* Resolve the size of a passed-in "type"
2043
*
2044
* type: is an array (e.g. u32 array[x][y])
2045
* return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
2046
* *type_size: (x * y * sizeof(u32)). Hence, *type_size always
2047
* corresponds to the return type.
2048
* *elem_type: u32
2049
* *elem_id: id of u32
2050
* *total_nelems: (x * y). Hence, individual elem size is
2051
* (*type_size / *total_nelems)
2052
* *type_id: id of type if it's changed within the function, 0 if not
2053
*
2054
* type: is not an array (e.g. const struct X)
2055
* return type: type "struct X"
2056
* *type_size: sizeof(struct X)
2057
* *elem_type: same as return type ("struct X")
2058
* *elem_id: 0
2059
* *total_nelems: 1
2060
* *type_id: id of type if it's changed within the function, 0 if not
2061
*/
2062
static const struct btf_type *
2063
__btf_resolve_size(const struct btf *btf, const struct btf_type *type,
2064
u32 *type_size, const struct btf_type **elem_type,
2065
u32 *elem_id, u32 *total_nelems, u32 *type_id)
2066
{
2067
const struct btf_type *array_type = NULL;
2068
const struct btf_array *array = NULL;
2069
u32 i, size, nelems = 1, id = 0;
2070
2071
for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
2072
switch (BTF_INFO_KIND(type->info)) {
2073
/* type->size can be used */
2074
case BTF_KIND_INT:
2075
case BTF_KIND_STRUCT:
2076
case BTF_KIND_UNION:
2077
case BTF_KIND_ENUM:
2078
case BTF_KIND_FLOAT:
2079
case BTF_KIND_ENUM64:
2080
size = type->size;
2081
goto resolved;
2082
2083
case BTF_KIND_PTR:
2084
size = sizeof(void *);
2085
goto resolved;
2086
2087
/* Modifiers */
2088
case BTF_KIND_TYPEDEF:
2089
case BTF_KIND_VOLATILE:
2090
case BTF_KIND_CONST:
2091
case BTF_KIND_RESTRICT:
2092
case BTF_KIND_TYPE_TAG:
2093
id = type->type;
2094
type = btf_type_by_id(btf, type->type);
2095
break;
2096
2097
case BTF_KIND_ARRAY:
2098
if (!array_type)
2099
array_type = type;
2100
array = btf_type_array(type);
2101
if (nelems && array->nelems > U32_MAX / nelems)
2102
return ERR_PTR(-EINVAL);
2103
nelems *= array->nelems;
2104
type = btf_type_by_id(btf, array->type);
2105
break;
2106
2107
/* type without size */
2108
default:
2109
return ERR_PTR(-EINVAL);
2110
}
2111
}
2112
2113
return ERR_PTR(-EINVAL);
2114
2115
resolved:
2116
if (nelems && size > U32_MAX / nelems)
2117
return ERR_PTR(-EINVAL);
2118
2119
*type_size = nelems * size;
2120
if (total_nelems)
2121
*total_nelems = nelems;
2122
if (elem_type)
2123
*elem_type = type;
2124
if (elem_id)
2125
*elem_id = array ? array->type : 0;
2126
if (type_id && id)
2127
*type_id = id;
2128
2129
return array_type ? : type;
2130
}
2131
2132
const struct btf_type *
2133
btf_resolve_size(const struct btf *btf, const struct btf_type *type,
2134
u32 *type_size)
2135
{
2136
return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
2137
}
2138
2139
static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
2140
{
2141
while (type_id < btf->start_id)
2142
btf = btf->base_btf;
2143
2144
return btf->resolved_ids[type_id - btf->start_id];
2145
}
2146
2147
/* The input param "type_id" must point to a needs_resolve type */
2148
static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
2149
u32 *type_id)
2150
{
2151
*type_id = btf_resolved_type_id(btf, *type_id);
2152
return btf_type_by_id(btf, *type_id);
2153
}
2154
2155
static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
2156
{
2157
while (type_id < btf->start_id)
2158
btf = btf->base_btf;
2159
2160
return btf->resolved_sizes[type_id - btf->start_id];
2161
}
2162
2163
const struct btf_type *btf_type_id_size(const struct btf *btf,
2164
u32 *type_id, u32 *ret_size)
2165
{
2166
const struct btf_type *size_type;
2167
u32 size_type_id = *type_id;
2168
u32 size = 0;
2169
2170
size_type = btf_type_by_id(btf, size_type_id);
2171
if (btf_type_nosize_or_null(size_type))
2172
return NULL;
2173
2174
if (btf_type_has_size(size_type)) {
2175
size = size_type->size;
2176
} else if (btf_type_is_array(size_type)) {
2177
size = btf_resolved_type_size(btf, size_type_id);
2178
} else if (btf_type_is_ptr(size_type)) {
2179
size = sizeof(void *);
2180
} else {
2181
if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
2182
!btf_type_is_var(size_type)))
2183
return NULL;
2184
2185
size_type_id = btf_resolved_type_id(btf, size_type_id);
2186
size_type = btf_type_by_id(btf, size_type_id);
2187
if (btf_type_nosize_or_null(size_type))
2188
return NULL;
2189
else if (btf_type_has_size(size_type))
2190
size = size_type->size;
2191
else if (btf_type_is_array(size_type))
2192
size = btf_resolved_type_size(btf, size_type_id);
2193
else if (btf_type_is_ptr(size_type))
2194
size = sizeof(void *);
2195
else
2196
return NULL;
2197
}
2198
2199
*type_id = size_type_id;
2200
if (ret_size)
2201
*ret_size = size;
2202
2203
return size_type;
2204
}
2205
2206
static int btf_df_check_member(struct btf_verifier_env *env,
2207
const struct btf_type *struct_type,
2208
const struct btf_member *member,
2209
const struct btf_type *member_type)
2210
{
2211
btf_verifier_log_basic(env, struct_type,
2212
"Unsupported check_member");
2213
return -EINVAL;
2214
}
2215
2216
static int btf_df_check_kflag_member(struct btf_verifier_env *env,
2217
const struct btf_type *struct_type,
2218
const struct btf_member *member,
2219
const struct btf_type *member_type)
2220
{
2221
btf_verifier_log_basic(env, struct_type,
2222
"Unsupported check_kflag_member");
2223
return -EINVAL;
2224
}
2225
2226
/* Used for ptr, array struct/union and float type members.
2227
* int, enum and modifier types have their specific callback functions.
2228
*/
2229
static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
2230
const struct btf_type *struct_type,
2231
const struct btf_member *member,
2232
const struct btf_type *member_type)
2233
{
2234
if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
2235
btf_verifier_log_member(env, struct_type, member,
2236
"Invalid member bitfield_size");
2237
return -EINVAL;
2238
}
2239
2240
/* bitfield size is 0, so member->offset represents bit offset only.
2241
* It is safe to call non kflag check_member variants.
2242
*/
2243
return btf_type_ops(member_type)->check_member(env, struct_type,
2244
member,
2245
member_type);
2246
}
2247
2248
static int btf_df_resolve(struct btf_verifier_env *env,
2249
const struct resolve_vertex *v)
2250
{
2251
btf_verifier_log_basic(env, v->t, "Unsupported resolve");
2252
return -EINVAL;
2253
}
2254
2255
static void btf_df_show(const struct btf *btf, const struct btf_type *t,
2256
u32 type_id, void *data, u8 bits_offsets,
2257
struct btf_show *show)
2258
{
2259
btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
2260
}
2261
2262
static int btf_int_check_member(struct btf_verifier_env *env,
2263
const struct btf_type *struct_type,
2264
const struct btf_member *member,
2265
const struct btf_type *member_type)
2266
{
2267
u32 int_data = btf_type_int(member_type);
2268
u32 struct_bits_off = member->offset;
2269
u32 struct_size = struct_type->size;
2270
u32 nr_copy_bits;
2271
u32 bytes_offset;
2272
2273
if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
2274
btf_verifier_log_member(env, struct_type, member,
2275
"bits_offset exceeds U32_MAX");
2276
return -EINVAL;
2277
}
2278
2279
struct_bits_off += BTF_INT_OFFSET(int_data);
2280
bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2281
nr_copy_bits = BTF_INT_BITS(int_data) +
2282
BITS_PER_BYTE_MASKED(struct_bits_off);
2283
2284
if (nr_copy_bits > BITS_PER_U128) {
2285
btf_verifier_log_member(env, struct_type, member,
2286
"nr_copy_bits exceeds 128");
2287
return -EINVAL;
2288
}
2289
2290
if (struct_size < bytes_offset ||
2291
struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2292
btf_verifier_log_member(env, struct_type, member,
2293
"Member exceeds struct_size");
2294
return -EINVAL;
2295
}
2296
2297
return 0;
2298
}
2299
2300
static int btf_int_check_kflag_member(struct btf_verifier_env *env,
2301
const struct btf_type *struct_type,
2302
const struct btf_member *member,
2303
const struct btf_type *member_type)
2304
{
2305
u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
2306
u32 int_data = btf_type_int(member_type);
2307
u32 struct_size = struct_type->size;
2308
u32 nr_copy_bits;
2309
2310
/* a regular int type is required for the kflag int member */
2311
if (!btf_type_int_is_regular(member_type)) {
2312
btf_verifier_log_member(env, struct_type, member,
2313
"Invalid member base type");
2314
return -EINVAL;
2315
}
2316
2317
/* check sanity of bitfield size */
2318
nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2319
struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2320
nr_int_data_bits = BTF_INT_BITS(int_data);
2321
if (!nr_bits) {
2322
/* Not a bitfield member, member offset must be at byte
2323
* boundary.
2324
*/
2325
if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2326
btf_verifier_log_member(env, struct_type, member,
2327
"Invalid member offset");
2328
return -EINVAL;
2329
}
2330
2331
nr_bits = nr_int_data_bits;
2332
} else if (nr_bits > nr_int_data_bits) {
2333
btf_verifier_log_member(env, struct_type, member,
2334
"Invalid member bitfield_size");
2335
return -EINVAL;
2336
}
2337
2338
bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2339
nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
2340
if (nr_copy_bits > BITS_PER_U128) {
2341
btf_verifier_log_member(env, struct_type, member,
2342
"nr_copy_bits exceeds 128");
2343
return -EINVAL;
2344
}
2345
2346
if (struct_size < bytes_offset ||
2347
struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
2348
btf_verifier_log_member(env, struct_type, member,
2349
"Member exceeds struct_size");
2350
return -EINVAL;
2351
}
2352
2353
return 0;
2354
}
2355
2356
static s32 btf_int_check_meta(struct btf_verifier_env *env,
2357
const struct btf_type *t,
2358
u32 meta_left)
2359
{
2360
u32 int_data, nr_bits, meta_needed = sizeof(int_data);
2361
u16 encoding;
2362
2363
if (meta_left < meta_needed) {
2364
btf_verifier_log_basic(env, t,
2365
"meta_left:%u meta_needed:%u",
2366
meta_left, meta_needed);
2367
return -EINVAL;
2368
}
2369
2370
if (btf_type_vlen(t)) {
2371
btf_verifier_log_type(env, t, "vlen != 0");
2372
return -EINVAL;
2373
}
2374
2375
if (btf_type_kflag(t)) {
2376
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2377
return -EINVAL;
2378
}
2379
2380
int_data = btf_type_int(t);
2381
if (int_data & ~BTF_INT_MASK) {
2382
btf_verifier_log_basic(env, t, "Invalid int_data:%x",
2383
int_data);
2384
return -EINVAL;
2385
}
2386
2387
nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
2388
2389
if (nr_bits > BITS_PER_U128) {
2390
btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
2391
BITS_PER_U128);
2392
return -EINVAL;
2393
}
2394
2395
if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
2396
btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
2397
return -EINVAL;
2398
}
2399
2400
/*
2401
* Only one of the encoding bits is allowed and it
2402
* should be sufficient for the pretty print purpose (i.e. decoding).
2403
* Multiple bits can be allowed later if it is found
2404
* to be insufficient.
2405
*/
2406
encoding = BTF_INT_ENCODING(int_data);
2407
if (encoding &&
2408
encoding != BTF_INT_SIGNED &&
2409
encoding != BTF_INT_CHAR &&
2410
encoding != BTF_INT_BOOL) {
2411
btf_verifier_log_type(env, t, "Unsupported encoding");
2412
return -ENOTSUPP;
2413
}
2414
2415
btf_verifier_log_type(env, t, NULL);
2416
2417
return meta_needed;
2418
}
2419
2420
static void btf_int_log(struct btf_verifier_env *env,
2421
const struct btf_type *t)
2422
{
2423
int int_data = btf_type_int(t);
2424
2425
btf_verifier_log(env,
2426
"size=%u bits_offset=%u nr_bits=%u encoding=%s",
2427
t->size, BTF_INT_OFFSET(int_data),
2428
BTF_INT_BITS(int_data),
2429
btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
2430
}
2431
2432
static void btf_int128_print(struct btf_show *show, void *data)
2433
{
2434
/* data points to a __int128 number.
2435
* Suppose
2436
* int128_num = *(__int128 *)data;
2437
* The below formulas shows what upper_num and lower_num represents:
2438
* upper_num = int128_num >> 64;
2439
* lower_num = int128_num & 0xffffffffFFFFFFFFULL;
2440
*/
2441
u64 upper_num, lower_num;
2442
2443
#ifdef __BIG_ENDIAN_BITFIELD
2444
upper_num = *(u64 *)data;
2445
lower_num = *(u64 *)(data + 8);
2446
#else
2447
upper_num = *(u64 *)(data + 8);
2448
lower_num = *(u64 *)data;
2449
#endif
2450
if (upper_num == 0)
2451
btf_show_type_value(show, "0x%llx", lower_num);
2452
else
2453
btf_show_type_values(show, "0x%llx%016llx", upper_num,
2454
lower_num);
2455
}
2456
2457
static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
2458
u16 right_shift_bits)
2459
{
2460
u64 upper_num, lower_num;
2461
2462
#ifdef __BIG_ENDIAN_BITFIELD
2463
upper_num = print_num[0];
2464
lower_num = print_num[1];
2465
#else
2466
upper_num = print_num[1];
2467
lower_num = print_num[0];
2468
#endif
2469
2470
/* shake out un-needed bits by shift/or operations */
2471
if (left_shift_bits >= 64) {
2472
upper_num = lower_num << (left_shift_bits - 64);
2473
lower_num = 0;
2474
} else {
2475
upper_num = (upper_num << left_shift_bits) |
2476
(lower_num >> (64 - left_shift_bits));
2477
lower_num = lower_num << left_shift_bits;
2478
}
2479
2480
if (right_shift_bits >= 64) {
2481
lower_num = upper_num >> (right_shift_bits - 64);
2482
upper_num = 0;
2483
} else {
2484
lower_num = (lower_num >> right_shift_bits) |
2485
(upper_num << (64 - right_shift_bits));
2486
upper_num = upper_num >> right_shift_bits;
2487
}
2488
2489
#ifdef __BIG_ENDIAN_BITFIELD
2490
print_num[0] = upper_num;
2491
print_num[1] = lower_num;
2492
#else
2493
print_num[0] = lower_num;
2494
print_num[1] = upper_num;
2495
#endif
2496
}
2497
2498
static void btf_bitfield_show(void *data, u8 bits_offset,
2499
u8 nr_bits, struct btf_show *show)
2500
{
2501
u16 left_shift_bits, right_shift_bits;
2502
u8 nr_copy_bytes;
2503
u8 nr_copy_bits;
2504
u64 print_num[2] = {};
2505
2506
nr_copy_bits = nr_bits + bits_offset;
2507
nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
2508
2509
memcpy(print_num, data, nr_copy_bytes);
2510
2511
#ifdef __BIG_ENDIAN_BITFIELD
2512
left_shift_bits = bits_offset;
2513
#else
2514
left_shift_bits = BITS_PER_U128 - nr_copy_bits;
2515
#endif
2516
right_shift_bits = BITS_PER_U128 - nr_bits;
2517
2518
btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
2519
btf_int128_print(show, print_num);
2520
}
2521
2522
2523
static void btf_int_bits_show(const struct btf *btf,
2524
const struct btf_type *t,
2525
void *data, u8 bits_offset,
2526
struct btf_show *show)
2527
{
2528
u32 int_data = btf_type_int(t);
2529
u8 nr_bits = BTF_INT_BITS(int_data);
2530
u8 total_bits_offset;
2531
2532
/*
2533
* bits_offset is at most 7.
2534
* BTF_INT_OFFSET() cannot exceed 128 bits.
2535
*/
2536
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
2537
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
2538
bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
2539
btf_bitfield_show(data, bits_offset, nr_bits, show);
2540
}
2541
2542
static void btf_int_show(const struct btf *btf, const struct btf_type *t,
2543
u32 type_id, void *data, u8 bits_offset,
2544
struct btf_show *show)
2545
{
2546
u32 int_data = btf_type_int(t);
2547
u8 encoding = BTF_INT_ENCODING(int_data);
2548
bool sign = encoding & BTF_INT_SIGNED;
2549
u8 nr_bits = BTF_INT_BITS(int_data);
2550
void *safe_data;
2551
2552
safe_data = btf_show_start_type(show, t, type_id, data);
2553
if (!safe_data)
2554
return;
2555
2556
if (bits_offset || BTF_INT_OFFSET(int_data) ||
2557
BITS_PER_BYTE_MASKED(nr_bits)) {
2558
btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2559
goto out;
2560
}
2561
2562
switch (nr_bits) {
2563
case 128:
2564
btf_int128_print(show, safe_data);
2565
break;
2566
case 64:
2567
if (sign)
2568
btf_show_type_value(show, "%lld", *(s64 *)safe_data);
2569
else
2570
btf_show_type_value(show, "%llu", *(u64 *)safe_data);
2571
break;
2572
case 32:
2573
if (sign)
2574
btf_show_type_value(show, "%d", *(s32 *)safe_data);
2575
else
2576
btf_show_type_value(show, "%u", *(u32 *)safe_data);
2577
break;
2578
case 16:
2579
if (sign)
2580
btf_show_type_value(show, "%d", *(s16 *)safe_data);
2581
else
2582
btf_show_type_value(show, "%u", *(u16 *)safe_data);
2583
break;
2584
case 8:
2585
if (show->state.array_encoding == BTF_INT_CHAR) {
2586
/* check for null terminator */
2587
if (show->state.array_terminated)
2588
break;
2589
if (*(char *)data == '\0') {
2590
show->state.array_terminated = 1;
2591
break;
2592
}
2593
if (isprint(*(char *)data)) {
2594
btf_show_type_value(show, "'%c'",
2595
*(char *)safe_data);
2596
break;
2597
}
2598
}
2599
if (sign)
2600
btf_show_type_value(show, "%d", *(s8 *)safe_data);
2601
else
2602
btf_show_type_value(show, "%u", *(u8 *)safe_data);
2603
break;
2604
default:
2605
btf_int_bits_show(btf, t, safe_data, bits_offset, show);
2606
break;
2607
}
2608
out:
2609
btf_show_end_type(show);
2610
}
2611
2612
static const struct btf_kind_operations int_ops = {
2613
.check_meta = btf_int_check_meta,
2614
.resolve = btf_df_resolve,
2615
.check_member = btf_int_check_member,
2616
.check_kflag_member = btf_int_check_kflag_member,
2617
.log_details = btf_int_log,
2618
.show = btf_int_show,
2619
};
2620
2621
static int btf_modifier_check_member(struct btf_verifier_env *env,
2622
const struct btf_type *struct_type,
2623
const struct btf_member *member,
2624
const struct btf_type *member_type)
2625
{
2626
const struct btf_type *resolved_type;
2627
u32 resolved_type_id = member->type;
2628
struct btf_member resolved_member;
2629
struct btf *btf = env->btf;
2630
2631
resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2632
if (!resolved_type) {
2633
btf_verifier_log_member(env, struct_type, member,
2634
"Invalid member");
2635
return -EINVAL;
2636
}
2637
2638
resolved_member = *member;
2639
resolved_member.type = resolved_type_id;
2640
2641
return btf_type_ops(resolved_type)->check_member(env, struct_type,
2642
&resolved_member,
2643
resolved_type);
2644
}
2645
2646
static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
2647
const struct btf_type *struct_type,
2648
const struct btf_member *member,
2649
const struct btf_type *member_type)
2650
{
2651
const struct btf_type *resolved_type;
2652
u32 resolved_type_id = member->type;
2653
struct btf_member resolved_member;
2654
struct btf *btf = env->btf;
2655
2656
resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
2657
if (!resolved_type) {
2658
btf_verifier_log_member(env, struct_type, member,
2659
"Invalid member");
2660
return -EINVAL;
2661
}
2662
2663
resolved_member = *member;
2664
resolved_member.type = resolved_type_id;
2665
2666
return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
2667
&resolved_member,
2668
resolved_type);
2669
}
2670
2671
static int btf_ptr_check_member(struct btf_verifier_env *env,
2672
const struct btf_type *struct_type,
2673
const struct btf_member *member,
2674
const struct btf_type *member_type)
2675
{
2676
u32 struct_size, struct_bits_off, bytes_offset;
2677
2678
struct_size = struct_type->size;
2679
struct_bits_off = member->offset;
2680
bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2681
2682
if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2683
btf_verifier_log_member(env, struct_type, member,
2684
"Member is not byte aligned");
2685
return -EINVAL;
2686
}
2687
2688
if (struct_size - bytes_offset < sizeof(void *)) {
2689
btf_verifier_log_member(env, struct_type, member,
2690
"Member exceeds struct_size");
2691
return -EINVAL;
2692
}
2693
2694
return 0;
2695
}
2696
2697
static int btf_ref_type_check_meta(struct btf_verifier_env *env,
2698
const struct btf_type *t,
2699
u32 meta_left)
2700
{
2701
const char *value;
2702
2703
if (btf_type_vlen(t)) {
2704
btf_verifier_log_type(env, t, "vlen != 0");
2705
return -EINVAL;
2706
}
2707
2708
if (btf_type_kflag(t) && !btf_type_is_type_tag(t)) {
2709
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2710
return -EINVAL;
2711
}
2712
2713
if (!BTF_TYPE_ID_VALID(t->type)) {
2714
btf_verifier_log_type(env, t, "Invalid type_id");
2715
return -EINVAL;
2716
}
2717
2718
/* typedef/type_tag type must have a valid name, and other ref types,
2719
* volatile, const, restrict, should have a null name.
2720
*/
2721
if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
2722
if (!t->name_off ||
2723
!btf_name_valid_identifier(env->btf, t->name_off)) {
2724
btf_verifier_log_type(env, t, "Invalid name");
2725
return -EINVAL;
2726
}
2727
} else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
2728
value = btf_name_by_offset(env->btf, t->name_off);
2729
if (!value || !value[0]) {
2730
btf_verifier_log_type(env, t, "Invalid name");
2731
return -EINVAL;
2732
}
2733
} else {
2734
if (t->name_off) {
2735
btf_verifier_log_type(env, t, "Invalid name");
2736
return -EINVAL;
2737
}
2738
}
2739
2740
btf_verifier_log_type(env, t, NULL);
2741
2742
return 0;
2743
}
2744
2745
static int btf_modifier_resolve(struct btf_verifier_env *env,
2746
const struct resolve_vertex *v)
2747
{
2748
const struct btf_type *t = v->t;
2749
const struct btf_type *next_type;
2750
u32 next_type_id = t->type;
2751
struct btf *btf = env->btf;
2752
2753
next_type = btf_type_by_id(btf, next_type_id);
2754
if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2755
btf_verifier_log_type(env, v->t, "Invalid type_id");
2756
return -EINVAL;
2757
}
2758
2759
if (!env_type_is_resolve_sink(env, next_type) &&
2760
!env_type_is_resolved(env, next_type_id))
2761
return env_stack_push(env, next_type, next_type_id);
2762
2763
/* Figure out the resolved next_type_id with size.
2764
* They will be stored in the current modifier's
2765
* resolved_ids and resolved_sizes such that it can
2766
* save us a few type-following when we use it later (e.g. in
2767
* pretty print).
2768
*/
2769
if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2770
if (env_type_is_resolved(env, next_type_id))
2771
next_type = btf_type_id_resolve(btf, &next_type_id);
2772
2773
/* "typedef void new_void", "const void"...etc */
2774
if (!btf_type_is_void(next_type) &&
2775
!btf_type_is_fwd(next_type) &&
2776
!btf_type_is_func_proto(next_type)) {
2777
btf_verifier_log_type(env, v->t, "Invalid type_id");
2778
return -EINVAL;
2779
}
2780
}
2781
2782
env_stack_pop_resolved(env, next_type_id, 0);
2783
2784
return 0;
2785
}
2786
2787
static int btf_var_resolve(struct btf_verifier_env *env,
2788
const struct resolve_vertex *v)
2789
{
2790
const struct btf_type *next_type;
2791
const struct btf_type *t = v->t;
2792
u32 next_type_id = t->type;
2793
struct btf *btf = env->btf;
2794
2795
next_type = btf_type_by_id(btf, next_type_id);
2796
if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2797
btf_verifier_log_type(env, v->t, "Invalid type_id");
2798
return -EINVAL;
2799
}
2800
2801
if (!env_type_is_resolve_sink(env, next_type) &&
2802
!env_type_is_resolved(env, next_type_id))
2803
return env_stack_push(env, next_type, next_type_id);
2804
2805
if (btf_type_is_modifier(next_type)) {
2806
const struct btf_type *resolved_type;
2807
u32 resolved_type_id;
2808
2809
resolved_type_id = next_type_id;
2810
resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2811
2812
if (btf_type_is_ptr(resolved_type) &&
2813
!env_type_is_resolve_sink(env, resolved_type) &&
2814
!env_type_is_resolved(env, resolved_type_id))
2815
return env_stack_push(env, resolved_type,
2816
resolved_type_id);
2817
}
2818
2819
/* We must resolve to something concrete at this point, no
2820
* forward types or similar that would resolve to size of
2821
* zero is allowed.
2822
*/
2823
if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2824
btf_verifier_log_type(env, v->t, "Invalid type_id");
2825
return -EINVAL;
2826
}
2827
2828
env_stack_pop_resolved(env, next_type_id, 0);
2829
2830
return 0;
2831
}
2832
2833
static int btf_ptr_resolve(struct btf_verifier_env *env,
2834
const struct resolve_vertex *v)
2835
{
2836
const struct btf_type *next_type;
2837
const struct btf_type *t = v->t;
2838
u32 next_type_id = t->type;
2839
struct btf *btf = env->btf;
2840
2841
next_type = btf_type_by_id(btf, next_type_id);
2842
if (!next_type || btf_type_is_resolve_source_only(next_type)) {
2843
btf_verifier_log_type(env, v->t, "Invalid type_id");
2844
return -EINVAL;
2845
}
2846
2847
if (!env_type_is_resolve_sink(env, next_type) &&
2848
!env_type_is_resolved(env, next_type_id))
2849
return env_stack_push(env, next_type, next_type_id);
2850
2851
/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2852
* the modifier may have stopped resolving when it was resolved
2853
* to a ptr (last-resolved-ptr).
2854
*
2855
* We now need to continue from the last-resolved-ptr to
2856
* ensure the last-resolved-ptr will not referring back to
2857
* the current ptr (t).
2858
*/
2859
if (btf_type_is_modifier(next_type)) {
2860
const struct btf_type *resolved_type;
2861
u32 resolved_type_id;
2862
2863
resolved_type_id = next_type_id;
2864
resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
2865
2866
if (btf_type_is_ptr(resolved_type) &&
2867
!env_type_is_resolve_sink(env, resolved_type) &&
2868
!env_type_is_resolved(env, resolved_type_id))
2869
return env_stack_push(env, resolved_type,
2870
resolved_type_id);
2871
}
2872
2873
if (!btf_type_id_size(btf, &next_type_id, NULL)) {
2874
if (env_type_is_resolved(env, next_type_id))
2875
next_type = btf_type_id_resolve(btf, &next_type_id);
2876
2877
if (!btf_type_is_void(next_type) &&
2878
!btf_type_is_fwd(next_type) &&
2879
!btf_type_is_func_proto(next_type)) {
2880
btf_verifier_log_type(env, v->t, "Invalid type_id");
2881
return -EINVAL;
2882
}
2883
}
2884
2885
env_stack_pop_resolved(env, next_type_id, 0);
2886
2887
return 0;
2888
}
2889
2890
static void btf_modifier_show(const struct btf *btf,
2891
const struct btf_type *t,
2892
u32 type_id, void *data,
2893
u8 bits_offset, struct btf_show *show)
2894
{
2895
if (btf->resolved_ids)
2896
t = btf_type_id_resolve(btf, &type_id);
2897
else
2898
t = btf_type_skip_modifiers(btf, type_id, NULL);
2899
2900
btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2901
}
2902
2903
static void btf_var_show(const struct btf *btf, const struct btf_type *t,
2904
u32 type_id, void *data, u8 bits_offset,
2905
struct btf_show *show)
2906
{
2907
t = btf_type_id_resolve(btf, &type_id);
2908
2909
btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
2910
}
2911
2912
static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
2913
u32 type_id, void *data, u8 bits_offset,
2914
struct btf_show *show)
2915
{
2916
void *safe_data;
2917
2918
safe_data = btf_show_start_type(show, t, type_id, data);
2919
if (!safe_data)
2920
return;
2921
2922
/* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2923
if (show->flags & BTF_SHOW_PTR_RAW)
2924
btf_show_type_value(show, "0x%px", *(void **)safe_data);
2925
else
2926
btf_show_type_value(show, "0x%p", *(void **)safe_data);
2927
btf_show_end_type(show);
2928
}
2929
2930
static void btf_ref_type_log(struct btf_verifier_env *env,
2931
const struct btf_type *t)
2932
{
2933
btf_verifier_log(env, "type_id=%u", t->type);
2934
}
2935
2936
static const struct btf_kind_operations modifier_ops = {
2937
.check_meta = btf_ref_type_check_meta,
2938
.resolve = btf_modifier_resolve,
2939
.check_member = btf_modifier_check_member,
2940
.check_kflag_member = btf_modifier_check_kflag_member,
2941
.log_details = btf_ref_type_log,
2942
.show = btf_modifier_show,
2943
};
2944
2945
static const struct btf_kind_operations ptr_ops = {
2946
.check_meta = btf_ref_type_check_meta,
2947
.resolve = btf_ptr_resolve,
2948
.check_member = btf_ptr_check_member,
2949
.check_kflag_member = btf_generic_check_kflag_member,
2950
.log_details = btf_ref_type_log,
2951
.show = btf_ptr_show,
2952
};
2953
2954
static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
2955
const struct btf_type *t,
2956
u32 meta_left)
2957
{
2958
if (btf_type_vlen(t)) {
2959
btf_verifier_log_type(env, t, "vlen != 0");
2960
return -EINVAL;
2961
}
2962
2963
if (t->type) {
2964
btf_verifier_log_type(env, t, "type != 0");
2965
return -EINVAL;
2966
}
2967
2968
/* fwd type must have a valid name */
2969
if (!t->name_off ||
2970
!btf_name_valid_identifier(env->btf, t->name_off)) {
2971
btf_verifier_log_type(env, t, "Invalid name");
2972
return -EINVAL;
2973
}
2974
2975
btf_verifier_log_type(env, t, NULL);
2976
2977
return 0;
2978
}
2979
2980
static void btf_fwd_type_log(struct btf_verifier_env *env,
2981
const struct btf_type *t)
2982
{
2983
btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
2984
}
2985
2986
static const struct btf_kind_operations fwd_ops = {
2987
.check_meta = btf_fwd_check_meta,
2988
.resolve = btf_df_resolve,
2989
.check_member = btf_df_check_member,
2990
.check_kflag_member = btf_df_check_kflag_member,
2991
.log_details = btf_fwd_type_log,
2992
.show = btf_df_show,
2993
};
2994
2995
static int btf_array_check_member(struct btf_verifier_env *env,
2996
const struct btf_type *struct_type,
2997
const struct btf_member *member,
2998
const struct btf_type *member_type)
2999
{
3000
u32 struct_bits_off = member->offset;
3001
u32 struct_size, bytes_offset;
3002
u32 array_type_id, array_size;
3003
struct btf *btf = env->btf;
3004
3005
if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3006
btf_verifier_log_member(env, struct_type, member,
3007
"Member is not byte aligned");
3008
return -EINVAL;
3009
}
3010
3011
array_type_id = member->type;
3012
btf_type_id_size(btf, &array_type_id, &array_size);
3013
struct_size = struct_type->size;
3014
bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3015
if (struct_size - bytes_offset < array_size) {
3016
btf_verifier_log_member(env, struct_type, member,
3017
"Member exceeds struct_size");
3018
return -EINVAL;
3019
}
3020
3021
return 0;
3022
}
3023
3024
static s32 btf_array_check_meta(struct btf_verifier_env *env,
3025
const struct btf_type *t,
3026
u32 meta_left)
3027
{
3028
const struct btf_array *array = btf_type_array(t);
3029
u32 meta_needed = sizeof(*array);
3030
3031
if (meta_left < meta_needed) {
3032
btf_verifier_log_basic(env, t,
3033
"meta_left:%u meta_needed:%u",
3034
meta_left, meta_needed);
3035
return -EINVAL;
3036
}
3037
3038
/* array type should not have a name */
3039
if (t->name_off) {
3040
btf_verifier_log_type(env, t, "Invalid name");
3041
return -EINVAL;
3042
}
3043
3044
if (btf_type_vlen(t)) {
3045
btf_verifier_log_type(env, t, "vlen != 0");
3046
return -EINVAL;
3047
}
3048
3049
if (btf_type_kflag(t)) {
3050
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
3051
return -EINVAL;
3052
}
3053
3054
if (t->size) {
3055
btf_verifier_log_type(env, t, "size != 0");
3056
return -EINVAL;
3057
}
3058
3059
/* Array elem type and index type cannot be in type void,
3060
* so !array->type and !array->index_type are not allowed.
3061
*/
3062
if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
3063
btf_verifier_log_type(env, t, "Invalid elem");
3064
return -EINVAL;
3065
}
3066
3067
if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
3068
btf_verifier_log_type(env, t, "Invalid index");
3069
return -EINVAL;
3070
}
3071
3072
btf_verifier_log_type(env, t, NULL);
3073
3074
return meta_needed;
3075
}
3076
3077
static int btf_array_resolve(struct btf_verifier_env *env,
3078
const struct resolve_vertex *v)
3079
{
3080
const struct btf_array *array = btf_type_array(v->t);
3081
const struct btf_type *elem_type, *index_type;
3082
u32 elem_type_id, index_type_id;
3083
struct btf *btf = env->btf;
3084
u32 elem_size;
3085
3086
/* Check array->index_type */
3087
index_type_id = array->index_type;
3088
index_type = btf_type_by_id(btf, index_type_id);
3089
if (btf_type_nosize_or_null(index_type) ||
3090
btf_type_is_resolve_source_only(index_type)) {
3091
btf_verifier_log_type(env, v->t, "Invalid index");
3092
return -EINVAL;
3093
}
3094
3095
if (!env_type_is_resolve_sink(env, index_type) &&
3096
!env_type_is_resolved(env, index_type_id))
3097
return env_stack_push(env, index_type, index_type_id);
3098
3099
index_type = btf_type_id_size(btf, &index_type_id, NULL);
3100
if (!index_type || !btf_type_is_int(index_type) ||
3101
!btf_type_int_is_regular(index_type)) {
3102
btf_verifier_log_type(env, v->t, "Invalid index");
3103
return -EINVAL;
3104
}
3105
3106
/* Check array->type */
3107
elem_type_id = array->type;
3108
elem_type = btf_type_by_id(btf, elem_type_id);
3109
if (btf_type_nosize_or_null(elem_type) ||
3110
btf_type_is_resolve_source_only(elem_type)) {
3111
btf_verifier_log_type(env, v->t,
3112
"Invalid elem");
3113
return -EINVAL;
3114
}
3115
3116
if (!env_type_is_resolve_sink(env, elem_type) &&
3117
!env_type_is_resolved(env, elem_type_id))
3118
return env_stack_push(env, elem_type, elem_type_id);
3119
3120
elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3121
if (!elem_type) {
3122
btf_verifier_log_type(env, v->t, "Invalid elem");
3123
return -EINVAL;
3124
}
3125
3126
if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
3127
btf_verifier_log_type(env, v->t, "Invalid array of int");
3128
return -EINVAL;
3129
}
3130
3131
if (array->nelems && elem_size > U32_MAX / array->nelems) {
3132
btf_verifier_log_type(env, v->t,
3133
"Array size overflows U32_MAX");
3134
return -EINVAL;
3135
}
3136
3137
env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
3138
3139
return 0;
3140
}
3141
3142
static void btf_array_log(struct btf_verifier_env *env,
3143
const struct btf_type *t)
3144
{
3145
const struct btf_array *array = btf_type_array(t);
3146
3147
btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
3148
array->type, array->index_type, array->nelems);
3149
}
3150
3151
static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
3152
u32 type_id, void *data, u8 bits_offset,
3153
struct btf_show *show)
3154
{
3155
const struct btf_array *array = btf_type_array(t);
3156
const struct btf_kind_operations *elem_ops;
3157
const struct btf_type *elem_type;
3158
u32 i, elem_size = 0, elem_type_id;
3159
u16 encoding = 0;
3160
3161
elem_type_id = array->type;
3162
elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
3163
if (elem_type && btf_type_has_size(elem_type))
3164
elem_size = elem_type->size;
3165
3166
if (elem_type && btf_type_is_int(elem_type)) {
3167
u32 int_type = btf_type_int(elem_type);
3168
3169
encoding = BTF_INT_ENCODING(int_type);
3170
3171
/*
3172
* BTF_INT_CHAR encoding never seems to be set for
3173
* char arrays, so if size is 1 and element is
3174
* printable as a char, we'll do that.
3175
*/
3176
if (elem_size == 1)
3177
encoding = BTF_INT_CHAR;
3178
}
3179
3180
if (!btf_show_start_array_type(show, t, type_id, encoding, data))
3181
return;
3182
3183
if (!elem_type)
3184
goto out;
3185
elem_ops = btf_type_ops(elem_type);
3186
3187
for (i = 0; i < array->nelems; i++) {
3188
3189
btf_show_start_array_member(show);
3190
3191
elem_ops->show(btf, elem_type, elem_type_id, data,
3192
bits_offset, show);
3193
data += elem_size;
3194
3195
btf_show_end_array_member(show);
3196
3197
if (show->state.array_terminated)
3198
break;
3199
}
3200
out:
3201
btf_show_end_array_type(show);
3202
}
3203
3204
static void btf_array_show(const struct btf *btf, const struct btf_type *t,
3205
u32 type_id, void *data, u8 bits_offset,
3206
struct btf_show *show)
3207
{
3208
const struct btf_member *m = show->state.member;
3209
3210
/*
3211
* First check if any members would be shown (are non-zero).
3212
* See comments above "struct btf_show" definition for more
3213
* details on how this works at a high-level.
3214
*/
3215
if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
3216
if (!show->state.depth_check) {
3217
show->state.depth_check = show->state.depth + 1;
3218
show->state.depth_to_show = 0;
3219
}
3220
__btf_array_show(btf, t, type_id, data, bits_offset, show);
3221
show->state.member = m;
3222
3223
if (show->state.depth_check != show->state.depth + 1)
3224
return;
3225
show->state.depth_check = 0;
3226
3227
if (show->state.depth_to_show <= show->state.depth)
3228
return;
3229
/*
3230
* Reaching here indicates we have recursed and found
3231
* non-zero array member(s).
3232
*/
3233
}
3234
__btf_array_show(btf, t, type_id, data, bits_offset, show);
3235
}
3236
3237
static const struct btf_kind_operations array_ops = {
3238
.check_meta = btf_array_check_meta,
3239
.resolve = btf_array_resolve,
3240
.check_member = btf_array_check_member,
3241
.check_kflag_member = btf_generic_check_kflag_member,
3242
.log_details = btf_array_log,
3243
.show = btf_array_show,
3244
};
3245
3246
static int btf_struct_check_member(struct btf_verifier_env *env,
3247
const struct btf_type *struct_type,
3248
const struct btf_member *member,
3249
const struct btf_type *member_type)
3250
{
3251
u32 struct_bits_off = member->offset;
3252
u32 struct_size, bytes_offset;
3253
3254
if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
3255
btf_verifier_log_member(env, struct_type, member,
3256
"Member is not byte aligned");
3257
return -EINVAL;
3258
}
3259
3260
struct_size = struct_type->size;
3261
bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3262
if (struct_size - bytes_offset < member_type->size) {
3263
btf_verifier_log_member(env, struct_type, member,
3264
"Member exceeds struct_size");
3265
return -EINVAL;
3266
}
3267
3268
return 0;
3269
}
3270
3271
static s32 btf_struct_check_meta(struct btf_verifier_env *env,
3272
const struct btf_type *t,
3273
u32 meta_left)
3274
{
3275
bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
3276
const struct btf_member *member;
3277
u32 meta_needed, last_offset;
3278
struct btf *btf = env->btf;
3279
u32 struct_size = t->size;
3280
u32 offset;
3281
u16 i;
3282
3283
meta_needed = btf_type_vlen(t) * sizeof(*member);
3284
if (meta_left < meta_needed) {
3285
btf_verifier_log_basic(env, t,
3286
"meta_left:%u meta_needed:%u",
3287
meta_left, meta_needed);
3288
return -EINVAL;
3289
}
3290
3291
/* struct type either no name or a valid one */
3292
if (t->name_off &&
3293
!btf_name_valid_identifier(env->btf, t->name_off)) {
3294
btf_verifier_log_type(env, t, "Invalid name");
3295
return -EINVAL;
3296
}
3297
3298
btf_verifier_log_type(env, t, NULL);
3299
3300
last_offset = 0;
3301
for_each_member(i, t, member) {
3302
if (!btf_name_offset_valid(btf, member->name_off)) {
3303
btf_verifier_log_member(env, t, member,
3304
"Invalid member name_offset:%u",
3305
member->name_off);
3306
return -EINVAL;
3307
}
3308
3309
/* struct member either no name or a valid one */
3310
if (member->name_off &&
3311
!btf_name_valid_identifier(btf, member->name_off)) {
3312
btf_verifier_log_member(env, t, member, "Invalid name");
3313
return -EINVAL;
3314
}
3315
/* A member cannot be in type void */
3316
if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
3317
btf_verifier_log_member(env, t, member,
3318
"Invalid type_id");
3319
return -EINVAL;
3320
}
3321
3322
offset = __btf_member_bit_offset(t, member);
3323
if (is_union && offset) {
3324
btf_verifier_log_member(env, t, member,
3325
"Invalid member bits_offset");
3326
return -EINVAL;
3327
}
3328
3329
/*
3330
* ">" instead of ">=" because the last member could be
3331
* "char a[0];"
3332
*/
3333
if (last_offset > offset) {
3334
btf_verifier_log_member(env, t, member,
3335
"Invalid member bits_offset");
3336
return -EINVAL;
3337
}
3338
3339
if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
3340
btf_verifier_log_member(env, t, member,
3341
"Member bits_offset exceeds its struct size");
3342
return -EINVAL;
3343
}
3344
3345
btf_verifier_log_member(env, t, member, NULL);
3346
last_offset = offset;
3347
}
3348
3349
return meta_needed;
3350
}
3351
3352
static int btf_struct_resolve(struct btf_verifier_env *env,
3353
const struct resolve_vertex *v)
3354
{
3355
const struct btf_member *member;
3356
int err;
3357
u16 i;
3358
3359
/* Before continue resolving the next_member,
3360
* ensure the last member is indeed resolved to a
3361
* type with size info.
3362
*/
3363
if (v->next_member) {
3364
const struct btf_type *last_member_type;
3365
const struct btf_member *last_member;
3366
u32 last_member_type_id;
3367
3368
last_member = btf_type_member(v->t) + v->next_member - 1;
3369
last_member_type_id = last_member->type;
3370
if (WARN_ON_ONCE(!env_type_is_resolved(env,
3371
last_member_type_id)))
3372
return -EINVAL;
3373
3374
last_member_type = btf_type_by_id(env->btf,
3375
last_member_type_id);
3376
if (btf_type_kflag(v->t))
3377
err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
3378
last_member,
3379
last_member_type);
3380
else
3381
err = btf_type_ops(last_member_type)->check_member(env, v->t,
3382
last_member,
3383
last_member_type);
3384
if (err)
3385
return err;
3386
}
3387
3388
for_each_member_from(i, v->next_member, v->t, member) {
3389
u32 member_type_id = member->type;
3390
const struct btf_type *member_type = btf_type_by_id(env->btf,
3391
member_type_id);
3392
3393
if (btf_type_nosize_or_null(member_type) ||
3394
btf_type_is_resolve_source_only(member_type)) {
3395
btf_verifier_log_member(env, v->t, member,
3396
"Invalid member");
3397
return -EINVAL;
3398
}
3399
3400
if (!env_type_is_resolve_sink(env, member_type) &&
3401
!env_type_is_resolved(env, member_type_id)) {
3402
env_stack_set_next_member(env, i + 1);
3403
return env_stack_push(env, member_type, member_type_id);
3404
}
3405
3406
if (btf_type_kflag(v->t))
3407
err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
3408
member,
3409
member_type);
3410
else
3411
err = btf_type_ops(member_type)->check_member(env, v->t,
3412
member,
3413
member_type);
3414
if (err)
3415
return err;
3416
}
3417
3418
env_stack_pop_resolved(env, 0, 0);
3419
3420
return 0;
3421
}
3422
3423
static void btf_struct_log(struct btf_verifier_env *env,
3424
const struct btf_type *t)
3425
{
3426
btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
3427
}
3428
3429
enum {
3430
BTF_FIELD_IGNORE = 0,
3431
BTF_FIELD_FOUND = 1,
3432
};
3433
3434
struct btf_field_info {
3435
enum btf_field_type type;
3436
u32 off;
3437
union {
3438
struct {
3439
u32 type_id;
3440
} kptr;
3441
struct {
3442
const char *node_name;
3443
u32 value_btf_id;
3444
} graph_root;
3445
};
3446
};
3447
3448
static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
3449
u32 off, int sz, enum btf_field_type field_type,
3450
struct btf_field_info *info)
3451
{
3452
if (!__btf_type_is_struct(t))
3453
return BTF_FIELD_IGNORE;
3454
if (t->size != sz)
3455
return BTF_FIELD_IGNORE;
3456
info->type = field_type;
3457
info->off = off;
3458
return BTF_FIELD_FOUND;
3459
}
3460
3461
static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
3462
u32 off, int sz, struct btf_field_info *info, u32 field_mask)
3463
{
3464
enum btf_field_type type;
3465
const char *tag_value;
3466
bool is_type_tag;
3467
u32 res_id;
3468
3469
/* Permit modifiers on the pointer itself */
3470
if (btf_type_is_volatile(t))
3471
t = btf_type_by_id(btf, t->type);
3472
/* For PTR, sz is always == 8 */
3473
if (!btf_type_is_ptr(t))
3474
return BTF_FIELD_IGNORE;
3475
t = btf_type_by_id(btf, t->type);
3476
is_type_tag = btf_type_is_type_tag(t) && !btf_type_kflag(t);
3477
if (!is_type_tag)
3478
return BTF_FIELD_IGNORE;
3479
/* Reject extra tags */
3480
if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
3481
return -EINVAL;
3482
tag_value = __btf_name_by_offset(btf, t->name_off);
3483
if (!strcmp("kptr_untrusted", tag_value))
3484
type = BPF_KPTR_UNREF;
3485
else if (!strcmp("kptr", tag_value))
3486
type = BPF_KPTR_REF;
3487
else if (!strcmp("percpu_kptr", tag_value))
3488
type = BPF_KPTR_PERCPU;
3489
else if (!strcmp("uptr", tag_value))
3490
type = BPF_UPTR;
3491
else
3492
return -EINVAL;
3493
3494
if (!(type & field_mask))
3495
return BTF_FIELD_IGNORE;
3496
3497
/* Get the base type */
3498
t = btf_type_skip_modifiers(btf, t->type, &res_id);
3499
/* Only pointer to struct is allowed */
3500
if (!__btf_type_is_struct(t))
3501
return -EINVAL;
3502
3503
info->type = type;
3504
info->off = off;
3505
info->kptr.type_id = res_id;
3506
return BTF_FIELD_FOUND;
3507
}
3508
3509
int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
3510
int comp_idx, const char *tag_key, int last_id)
3511
{
3512
int len = strlen(tag_key);
3513
int i, n;
3514
3515
for (i = last_id + 1, n = btf_nr_types(btf); i < n; i++) {
3516
const struct btf_type *t = btf_type_by_id(btf, i);
3517
3518
if (!btf_type_is_decl_tag(t))
3519
continue;
3520
if (pt != btf_type_by_id(btf, t->type))
3521
continue;
3522
if (btf_type_decl_tag(t)->component_idx != comp_idx)
3523
continue;
3524
if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
3525
continue;
3526
return i;
3527
}
3528
return -ENOENT;
3529
}
3530
3531
const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
3532
int comp_idx, const char *tag_key)
3533
{
3534
const char *value = NULL;
3535
const struct btf_type *t;
3536
int len, id;
3537
3538
id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key,
3539
btf_named_start_id(btf, false) - 1);
3540
if (id < 0)
3541
return ERR_PTR(id);
3542
3543
t = btf_type_by_id(btf, id);
3544
len = strlen(tag_key);
3545
value = __btf_name_by_offset(btf, t->name_off) + len;
3546
3547
/* Prevent duplicate entries for same type */
3548
id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, id);
3549
if (id >= 0)
3550
return ERR_PTR(-EEXIST);
3551
3552
return value;
3553
}
3554
3555
static int
3556
btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
3557
const struct btf_type *t, int comp_idx, u32 off,
3558
int sz, struct btf_field_info *info,
3559
enum btf_field_type head_type)
3560
{
3561
const char *node_field_name;
3562
const char *value_type;
3563
s32 id;
3564
3565
if (!__btf_type_is_struct(t))
3566
return BTF_FIELD_IGNORE;
3567
if (t->size != sz)
3568
return BTF_FIELD_IGNORE;
3569
value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
3570
if (IS_ERR(value_type))
3571
return -EINVAL;
3572
node_field_name = strstr(value_type, ":");
3573
if (!node_field_name)
3574
return -EINVAL;
3575
value_type = kstrndup(value_type, node_field_name - value_type,
3576
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
3577
if (!value_type)
3578
return -ENOMEM;
3579
id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
3580
kfree(value_type);
3581
if (id < 0)
3582
return id;
3583
node_field_name++;
3584
if (str_is_empty(node_field_name))
3585
return -EINVAL;
3586
info->type = head_type;
3587
info->off = off;
3588
info->graph_root.value_btf_id = id;
3589
info->graph_root.node_name = node_field_name;
3590
return BTF_FIELD_FOUND;
3591
}
3592
3593
static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type,
3594
u32 field_mask, u32 *seen_mask, int *align, int *sz)
3595
{
3596
const struct {
3597
enum btf_field_type type;
3598
const char *const name;
3599
const bool is_unique;
3600
} field_types[] = {
3601
{ BPF_SPIN_LOCK, "bpf_spin_lock", true },
3602
{ BPF_RES_SPIN_LOCK, "bpf_res_spin_lock", true },
3603
{ BPF_TIMER, "bpf_timer", true },
3604
{ BPF_WORKQUEUE, "bpf_wq", true },
3605
{ BPF_TASK_WORK, "bpf_task_work", true },
3606
{ BPF_LIST_HEAD, "bpf_list_head", false },
3607
{ BPF_LIST_NODE, "bpf_list_node", false },
3608
{ BPF_RB_ROOT, "bpf_rb_root", false },
3609
{ BPF_RB_NODE, "bpf_rb_node", false },
3610
{ BPF_REFCOUNT, "bpf_refcount", false },
3611
};
3612
int type = 0, i;
3613
const char *name = __btf_name_by_offset(btf, var_type->name_off);
3614
const char *field_type_name;
3615
enum btf_field_type field_type;
3616
bool is_unique;
3617
3618
for (i = 0; i < ARRAY_SIZE(field_types); ++i) {
3619
field_type = field_types[i].type;
3620
field_type_name = field_types[i].name;
3621
is_unique = field_types[i].is_unique;
3622
if (!(field_mask & field_type) || strcmp(name, field_type_name))
3623
continue;
3624
if (is_unique) {
3625
if (*seen_mask & field_type)
3626
return -E2BIG;
3627
*seen_mask |= field_type;
3628
}
3629
type = field_type;
3630
goto end;
3631
}
3632
3633
/* Only return BPF_KPTR when all other types with matchable names fail */
3634
if (field_mask & (BPF_KPTR | BPF_UPTR) && !__btf_type_is_struct(var_type)) {
3635
type = BPF_KPTR_REF;
3636
goto end;
3637
}
3638
return 0;
3639
end:
3640
*sz = btf_field_type_size(type);
3641
*align = btf_field_type_align(type);
3642
return type;
3643
}
3644
3645
/* Repeat a number of fields for a specified number of times.
3646
*
3647
* Copy the fields starting from the first field and repeat them for
3648
* repeat_cnt times. The fields are repeated by adding the offset of each
3649
* field with
3650
* (i + 1) * elem_size
3651
* where i is the repeat index and elem_size is the size of an element.
3652
*/
3653
static int btf_repeat_fields(struct btf_field_info *info, int info_cnt,
3654
u32 field_cnt, u32 repeat_cnt, u32 elem_size)
3655
{
3656
u32 i, j;
3657
u32 cur;
3658
3659
/* Ensure not repeating fields that should not be repeated. */
3660
for (i = 0; i < field_cnt; i++) {
3661
switch (info[i].type) {
3662
case BPF_KPTR_UNREF:
3663
case BPF_KPTR_REF:
3664
case BPF_KPTR_PERCPU:
3665
case BPF_UPTR:
3666
case BPF_LIST_HEAD:
3667
case BPF_RB_ROOT:
3668
break;
3669
default:
3670
return -EINVAL;
3671
}
3672
}
3673
3674
/* The type of struct size or variable size is u32,
3675
* so the multiplication will not overflow.
3676
*/
3677
if (field_cnt * (repeat_cnt + 1) > info_cnt)
3678
return -E2BIG;
3679
3680
cur = field_cnt;
3681
for (i = 0; i < repeat_cnt; i++) {
3682
memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
3683
for (j = 0; j < field_cnt; j++)
3684
info[cur++].off += (i + 1) * elem_size;
3685
}
3686
3687
return 0;
3688
}
3689
3690
static int btf_find_struct_field(const struct btf *btf,
3691
const struct btf_type *t, u32 field_mask,
3692
struct btf_field_info *info, int info_cnt,
3693
u32 level);
3694
3695
/* Find special fields in the struct type of a field.
3696
*
3697
* This function is used to find fields of special types that is not a
3698
* global variable or a direct field of a struct type. It also handles the
3699
* repetition if it is the element type of an array.
3700
*/
3701
static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t,
3702
u32 off, u32 nelems,
3703
u32 field_mask, struct btf_field_info *info,
3704
int info_cnt, u32 level)
3705
{
3706
int ret, err, i;
3707
3708
level++;
3709
if (level >= MAX_RESOLVE_DEPTH)
3710
return -E2BIG;
3711
3712
ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level);
3713
3714
if (ret <= 0)
3715
return ret;
3716
3717
/* Shift the offsets of the nested struct fields to the offsets
3718
* related to the container.
3719
*/
3720
for (i = 0; i < ret; i++)
3721
info[i].off += off;
3722
3723
if (nelems > 1) {
3724
err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
3725
if (err == 0)
3726
ret *= nelems;
3727
else
3728
ret = err;
3729
}
3730
3731
return ret;
3732
}
3733
3734
static int btf_find_field_one(const struct btf *btf,
3735
const struct btf_type *var,
3736
const struct btf_type *var_type,
3737
int var_idx,
3738
u32 off, u32 expected_size,
3739
u32 field_mask, u32 *seen_mask,
3740
struct btf_field_info *info, int info_cnt,
3741
u32 level)
3742
{
3743
int ret, align, sz, field_type;
3744
struct btf_field_info tmp;
3745
const struct btf_array *array;
3746
u32 i, nelems = 1;
3747
3748
/* Walk into array types to find the element type and the number of
3749
* elements in the (flattened) array.
3750
*/
3751
for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(var_type); i++) {
3752
array = btf_array(var_type);
3753
nelems *= array->nelems;
3754
var_type = btf_type_by_id(btf, array->type);
3755
}
3756
if (i == MAX_RESOLVE_DEPTH)
3757
return -E2BIG;
3758
if (nelems == 0)
3759
return 0;
3760
3761
field_type = btf_get_field_type(btf, var_type,
3762
field_mask, seen_mask, &align, &sz);
3763
/* Look into variables of struct types */
3764
if (!field_type && __btf_type_is_struct(var_type)) {
3765
sz = var_type->size;
3766
if (expected_size && expected_size != sz * nelems)
3767
return 0;
3768
ret = btf_find_nested_struct(btf, var_type, off, nelems, field_mask,
3769
&info[0], info_cnt, level);
3770
return ret;
3771
}
3772
3773
if (field_type == 0)
3774
return 0;
3775
if (field_type < 0)
3776
return field_type;
3777
3778
if (expected_size && expected_size != sz * nelems)
3779
return 0;
3780
if (off % align)
3781
return 0;
3782
3783
switch (field_type) {
3784
case BPF_SPIN_LOCK:
3785
case BPF_RES_SPIN_LOCK:
3786
case BPF_TIMER:
3787
case BPF_WORKQUEUE:
3788
case BPF_LIST_NODE:
3789
case BPF_RB_NODE:
3790
case BPF_REFCOUNT:
3791
case BPF_TASK_WORK:
3792
ret = btf_find_struct(btf, var_type, off, sz, field_type,
3793
info_cnt ? &info[0] : &tmp);
3794
if (ret < 0)
3795
return ret;
3796
break;
3797
case BPF_KPTR_UNREF:
3798
case BPF_KPTR_REF:
3799
case BPF_KPTR_PERCPU:
3800
case BPF_UPTR:
3801
ret = btf_find_kptr(btf, var_type, off, sz,
3802
info_cnt ? &info[0] : &tmp, field_mask);
3803
if (ret < 0)
3804
return ret;
3805
break;
3806
case BPF_LIST_HEAD:
3807
case BPF_RB_ROOT:
3808
ret = btf_find_graph_root(btf, var, var_type,
3809
var_idx, off, sz,
3810
info_cnt ? &info[0] : &tmp,
3811
field_type);
3812
if (ret < 0)
3813
return ret;
3814
break;
3815
default:
3816
return -EFAULT;
3817
}
3818
3819
if (ret == BTF_FIELD_IGNORE)
3820
return 0;
3821
if (!info_cnt)
3822
return -E2BIG;
3823
if (nelems > 1) {
3824
ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
3825
if (ret < 0)
3826
return ret;
3827
}
3828
return nelems;
3829
}
3830
3831
static int btf_find_struct_field(const struct btf *btf,
3832
const struct btf_type *t, u32 field_mask,
3833
struct btf_field_info *info, int info_cnt,
3834
u32 level)
3835
{
3836
int ret, idx = 0;
3837
const struct btf_member *member;
3838
u32 i, off, seen_mask = 0;
3839
3840
for_each_member(i, t, member) {
3841
const struct btf_type *member_type = btf_type_by_id(btf,
3842
member->type);
3843
3844
off = __btf_member_bit_offset(t, member);
3845
if (off % 8)
3846
/* valid C code cannot generate such BTF */
3847
return -EINVAL;
3848
off /= 8;
3849
3850
ret = btf_find_field_one(btf, t, member_type, i,
3851
off, 0,
3852
field_mask, &seen_mask,
3853
&info[idx], info_cnt - idx, level);
3854
if (ret < 0)
3855
return ret;
3856
idx += ret;
3857
}
3858
return idx;
3859
}
3860
3861
static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
3862
u32 field_mask, struct btf_field_info *info,
3863
int info_cnt, u32 level)
3864
{
3865
int ret, idx = 0;
3866
const struct btf_var_secinfo *vsi;
3867
u32 i, off, seen_mask = 0;
3868
3869
for_each_vsi(i, t, vsi) {
3870
const struct btf_type *var = btf_type_by_id(btf, vsi->type);
3871
const struct btf_type *var_type = btf_type_by_id(btf, var->type);
3872
3873
off = vsi->offset;
3874
ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size,
3875
field_mask, &seen_mask,
3876
&info[idx], info_cnt - idx,
3877
level);
3878
if (ret < 0)
3879
return ret;
3880
idx += ret;
3881
}
3882
return idx;
3883
}
3884
3885
static int btf_find_field(const struct btf *btf, const struct btf_type *t,
3886
u32 field_mask, struct btf_field_info *info,
3887
int info_cnt)
3888
{
3889
if (__btf_type_is_struct(t))
3890
return btf_find_struct_field(btf, t, field_mask, info, info_cnt, 0);
3891
else if (btf_type_is_datasec(t))
3892
return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, 0);
3893
return -EINVAL;
3894
}
3895
3896
/* Callers have to ensure the life cycle of btf if it is program BTF */
3897
static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
3898
struct btf_field_info *info)
3899
{
3900
struct module *mod = NULL;
3901
const struct btf_type *t;
3902
/* If a matching btf type is found in kernel or module BTFs, kptr_ref
3903
* is that BTF, otherwise it's program BTF
3904
*/
3905
struct btf *kptr_btf;
3906
int ret;
3907
s32 id;
3908
3909
/* Find type in map BTF, and use it to look up the matching type
3910
* in vmlinux or module BTFs, by name and kind.
3911
*/
3912
t = btf_type_by_id(btf, info->kptr.type_id);
3913
id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
3914
&kptr_btf);
3915
if (id == -ENOENT) {
3916
/* btf_parse_kptr should only be called w/ btf = program BTF */
3917
WARN_ON_ONCE(btf_is_kernel(btf));
3918
3919
/* Type exists only in program BTF. Assume that it's a MEM_ALLOC
3920
* kptr allocated via bpf_obj_new
3921
*/
3922
field->kptr.dtor = NULL;
3923
id = info->kptr.type_id;
3924
kptr_btf = (struct btf *)btf;
3925
goto found_dtor;
3926
}
3927
if (id < 0)
3928
return id;
3929
3930
/* Find and stash the function pointer for the destruction function that
3931
* needs to be eventually invoked from the map free path.
3932
*/
3933
if (info->type == BPF_KPTR_REF) {
3934
const struct btf_type *dtor_func;
3935
const char *dtor_func_name;
3936
unsigned long addr;
3937
s32 dtor_btf_id;
3938
3939
/* This call also serves as a whitelist of allowed objects that
3940
* can be used as a referenced pointer and be stored in a map at
3941
* the same time.
3942
*/
3943
dtor_btf_id = btf_find_dtor_kfunc(kptr_btf, id);
3944
if (dtor_btf_id < 0) {
3945
ret = dtor_btf_id;
3946
goto end_btf;
3947
}
3948
3949
dtor_func = btf_type_by_id(kptr_btf, dtor_btf_id);
3950
if (!dtor_func) {
3951
ret = -ENOENT;
3952
goto end_btf;
3953
}
3954
3955
if (btf_is_module(kptr_btf)) {
3956
mod = btf_try_get_module(kptr_btf);
3957
if (!mod) {
3958
ret = -ENXIO;
3959
goto end_btf;
3960
}
3961
}
3962
3963
/* We already verified dtor_func to be btf_type_is_func
3964
* in register_btf_id_dtor_kfuncs.
3965
*/
3966
dtor_func_name = __btf_name_by_offset(kptr_btf, dtor_func->name_off);
3967
addr = kallsyms_lookup_name(dtor_func_name);
3968
if (!addr) {
3969
ret = -EINVAL;
3970
goto end_mod;
3971
}
3972
field->kptr.dtor = (void *)addr;
3973
}
3974
3975
found_dtor:
3976
field->kptr.btf_id = id;
3977
field->kptr.btf = kptr_btf;
3978
field->kptr.module = mod;
3979
return 0;
3980
end_mod:
3981
module_put(mod);
3982
end_btf:
3983
btf_put(kptr_btf);
3984
return ret;
3985
}
3986
3987
static int btf_parse_graph_root(const struct btf *btf,
3988
struct btf_field *field,
3989
struct btf_field_info *info,
3990
const char *node_type_name,
3991
size_t node_type_align)
3992
{
3993
const struct btf_type *t, *n = NULL;
3994
const struct btf_member *member;
3995
u32 offset;
3996
int i;
3997
3998
t = btf_type_by_id(btf, info->graph_root.value_btf_id);
3999
/* We've already checked that value_btf_id is a struct type. We
4000
* just need to figure out the offset of the list_node, and
4001
* verify its type.
4002
*/
4003
for_each_member(i, t, member) {
4004
if (strcmp(info->graph_root.node_name,
4005
__btf_name_by_offset(btf, member->name_off)))
4006
continue;
4007
/* Invalid BTF, two members with same name */
4008
if (n)
4009
return -EINVAL;
4010
n = btf_type_by_id(btf, member->type);
4011
if (!__btf_type_is_struct(n))
4012
return -EINVAL;
4013
if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
4014
return -EINVAL;
4015
offset = __btf_member_bit_offset(n, member);
4016
if (offset % 8)
4017
return -EINVAL;
4018
offset /= 8;
4019
if (offset % node_type_align)
4020
return -EINVAL;
4021
4022
field->graph_root.btf = (struct btf *)btf;
4023
field->graph_root.value_btf_id = info->graph_root.value_btf_id;
4024
field->graph_root.node_offset = offset;
4025
}
4026
if (!n)
4027
return -ENOENT;
4028
return 0;
4029
}
4030
4031
static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
4032
struct btf_field_info *info)
4033
{
4034
return btf_parse_graph_root(btf, field, info, "bpf_list_node",
4035
__alignof__(struct bpf_list_node));
4036
}
4037
4038
static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field,
4039
struct btf_field_info *info)
4040
{
4041
return btf_parse_graph_root(btf, field, info, "bpf_rb_node",
4042
__alignof__(struct bpf_rb_node));
4043
}
4044
4045
static int btf_field_cmp(const void *_a, const void *_b, const void *priv)
4046
{
4047
const struct btf_field *a = (const struct btf_field *)_a;
4048
const struct btf_field *b = (const struct btf_field *)_b;
4049
4050
if (a->offset < b->offset)
4051
return -1;
4052
else if (a->offset > b->offset)
4053
return 1;
4054
return 0;
4055
}
4056
4057
struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
4058
u32 field_mask, u32 value_size)
4059
{
4060
struct btf_field_info info_arr[BTF_FIELDS_MAX];
4061
u32 next_off = 0, field_type_size;
4062
struct btf_record *rec;
4063
int ret, i, cnt;
4064
4065
ret = btf_find_field(btf, t, field_mask, info_arr, ARRAY_SIZE(info_arr));
4066
if (ret < 0)
4067
return ERR_PTR(ret);
4068
if (!ret)
4069
return NULL;
4070
4071
cnt = ret;
4072
/* This needs to be kzalloc to zero out padding and unused fields, see
4073
* comment in btf_record_equal.
4074
*/
4075
rec = kzalloc(struct_size(rec, fields, cnt), GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
4076
if (!rec)
4077
return ERR_PTR(-ENOMEM);
4078
4079
rec->spin_lock_off = -EINVAL;
4080
rec->res_spin_lock_off = -EINVAL;
4081
rec->timer_off = -EINVAL;
4082
rec->wq_off = -EINVAL;
4083
rec->refcount_off = -EINVAL;
4084
rec->task_work_off = -EINVAL;
4085
for (i = 0; i < cnt; i++) {
4086
field_type_size = btf_field_type_size(info_arr[i].type);
4087
if (info_arr[i].off + field_type_size > value_size) {
4088
WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size);
4089
ret = -EFAULT;
4090
goto end;
4091
}
4092
if (info_arr[i].off < next_off) {
4093
ret = -EEXIST;
4094
goto end;
4095
}
4096
next_off = info_arr[i].off + field_type_size;
4097
4098
rec->field_mask |= info_arr[i].type;
4099
rec->fields[i].offset = info_arr[i].off;
4100
rec->fields[i].type = info_arr[i].type;
4101
rec->fields[i].size = field_type_size;
4102
4103
switch (info_arr[i].type) {
4104
case BPF_SPIN_LOCK:
4105
WARN_ON_ONCE(rec->spin_lock_off >= 0);
4106
/* Cache offset for faster lookup at runtime */
4107
rec->spin_lock_off = rec->fields[i].offset;
4108
break;
4109
case BPF_RES_SPIN_LOCK:
4110
WARN_ON_ONCE(rec->spin_lock_off >= 0);
4111
/* Cache offset for faster lookup at runtime */
4112
rec->res_spin_lock_off = rec->fields[i].offset;
4113
break;
4114
case BPF_TIMER:
4115
WARN_ON_ONCE(rec->timer_off >= 0);
4116
/* Cache offset for faster lookup at runtime */
4117
rec->timer_off = rec->fields[i].offset;
4118
break;
4119
case BPF_WORKQUEUE:
4120
WARN_ON_ONCE(rec->wq_off >= 0);
4121
/* Cache offset for faster lookup at runtime */
4122
rec->wq_off = rec->fields[i].offset;
4123
break;
4124
case BPF_TASK_WORK:
4125
WARN_ON_ONCE(rec->task_work_off >= 0);
4126
rec->task_work_off = rec->fields[i].offset;
4127
break;
4128
case BPF_REFCOUNT:
4129
WARN_ON_ONCE(rec->refcount_off >= 0);
4130
/* Cache offset for faster lookup at runtime */
4131
rec->refcount_off = rec->fields[i].offset;
4132
break;
4133
case BPF_KPTR_UNREF:
4134
case BPF_KPTR_REF:
4135
case BPF_KPTR_PERCPU:
4136
case BPF_UPTR:
4137
ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
4138
if (ret < 0)
4139
goto end;
4140
break;
4141
case BPF_LIST_HEAD:
4142
ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
4143
if (ret < 0)
4144
goto end;
4145
break;
4146
case BPF_RB_ROOT:
4147
ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
4148
if (ret < 0)
4149
goto end;
4150
break;
4151
case BPF_LIST_NODE:
4152
case BPF_RB_NODE:
4153
break;
4154
default:
4155
ret = -EFAULT;
4156
goto end;
4157
}
4158
rec->cnt++;
4159
}
4160
4161
if (rec->spin_lock_off >= 0 && rec->res_spin_lock_off >= 0) {
4162
ret = -EINVAL;
4163
goto end;
4164
}
4165
4166
/* bpf_{list_head, rb_node} require bpf_spin_lock */
4167
if ((btf_record_has_field(rec, BPF_LIST_HEAD) ||
4168
btf_record_has_field(rec, BPF_RB_ROOT)) &&
4169
(rec->spin_lock_off < 0 && rec->res_spin_lock_off < 0)) {
4170
ret = -EINVAL;
4171
goto end;
4172
}
4173
4174
if (rec->refcount_off < 0 &&
4175
btf_record_has_field(rec, BPF_LIST_NODE) &&
4176
btf_record_has_field(rec, BPF_RB_NODE)) {
4177
ret = -EINVAL;
4178
goto end;
4179
}
4180
4181
sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
4182
NULL, rec);
4183
4184
return rec;
4185
end:
4186
btf_record_free(rec);
4187
return ERR_PTR(ret);
4188
}
4189
4190
int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
4191
{
4192
int i;
4193
4194
/* There are three types that signify ownership of some other type:
4195
* kptr_ref, bpf_list_head, bpf_rb_root.
4196
* kptr_ref only supports storing kernel types, which can't store
4197
* references to program allocated local types.
4198
*
4199
* Hence we only need to ensure that bpf_{list_head,rb_root} ownership
4200
* does not form cycles.
4201
*/
4202
if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR)))
4203
return 0;
4204
for (i = 0; i < rec->cnt; i++) {
4205
struct btf_struct_meta *meta;
4206
const struct btf_type *t;
4207
u32 btf_id;
4208
4209
if (rec->fields[i].type == BPF_UPTR) {
4210
/* The uptr only supports pinning one page and cannot
4211
* point to a kernel struct
4212
*/
4213
if (btf_is_kernel(rec->fields[i].kptr.btf))
4214
return -EINVAL;
4215
t = btf_type_by_id(rec->fields[i].kptr.btf,
4216
rec->fields[i].kptr.btf_id);
4217
if (!t->size)
4218
return -EINVAL;
4219
if (t->size > PAGE_SIZE)
4220
return -E2BIG;
4221
continue;
4222
}
4223
4224
if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
4225
continue;
4226
btf_id = rec->fields[i].graph_root.value_btf_id;
4227
meta = btf_find_struct_meta(btf, btf_id);
4228
if (!meta)
4229
return -EFAULT;
4230
rec->fields[i].graph_root.value_rec = meta->record;
4231
4232
/* We need to set value_rec for all root types, but no need
4233
* to check ownership cycle for a type unless it's also a
4234
* node type.
4235
*/
4236
if (!(rec->field_mask & BPF_GRAPH_NODE))
4237
continue;
4238
4239
/* We need to ensure ownership acyclicity among all types. The
4240
* proper way to do it would be to topologically sort all BTF
4241
* IDs based on the ownership edges, since there can be multiple
4242
* bpf_{list_head,rb_node} in a type. Instead, we use the
4243
* following resaoning:
4244
*
4245
* - A type can only be owned by another type in user BTF if it
4246
* has a bpf_{list,rb}_node. Let's call these node types.
4247
* - A type can only _own_ another type in user BTF if it has a
4248
* bpf_{list_head,rb_root}. Let's call these root types.
4249
*
4250
* We ensure that if a type is both a root and node, its
4251
* element types cannot be root types.
4252
*
4253
* To ensure acyclicity:
4254
*
4255
* When A is an root type but not a node, its ownership
4256
* chain can be:
4257
* A -> B -> C
4258
* Where:
4259
* - A is an root, e.g. has bpf_rb_root.
4260
* - B is both a root and node, e.g. has bpf_rb_node and
4261
* bpf_list_head.
4262
* - C is only an root, e.g. has bpf_list_node
4263
*
4264
* When A is both a root and node, some other type already
4265
* owns it in the BTF domain, hence it can not own
4266
* another root type through any of the ownership edges.
4267
* A -> B
4268
* Where:
4269
* - A is both an root and node.
4270
* - B is only an node.
4271
*/
4272
if (meta->record->field_mask & BPF_GRAPH_ROOT)
4273
return -ELOOP;
4274
}
4275
return 0;
4276
}
4277
4278
static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
4279
u32 type_id, void *data, u8 bits_offset,
4280
struct btf_show *show)
4281
{
4282
const struct btf_member *member;
4283
void *safe_data;
4284
u32 i;
4285
4286
safe_data = btf_show_start_struct_type(show, t, type_id, data);
4287
if (!safe_data)
4288
return;
4289
4290
for_each_member(i, t, member) {
4291
const struct btf_type *member_type = btf_type_by_id(btf,
4292
member->type);
4293
const struct btf_kind_operations *ops;
4294
u32 member_offset, bitfield_size;
4295
u32 bytes_offset;
4296
u8 bits8_offset;
4297
4298
btf_show_start_member(show, member);
4299
4300
member_offset = __btf_member_bit_offset(t, member);
4301
bitfield_size = __btf_member_bitfield_size(t, member);
4302
bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
4303
bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
4304
if (bitfield_size) {
4305
safe_data = btf_show_start_type(show, member_type,
4306
member->type,
4307
data + bytes_offset);
4308
if (safe_data)
4309
btf_bitfield_show(safe_data,
4310
bits8_offset,
4311
bitfield_size, show);
4312
btf_show_end_type(show);
4313
} else {
4314
ops = btf_type_ops(member_type);
4315
ops->show(btf, member_type, member->type,
4316
data + bytes_offset, bits8_offset, show);
4317
}
4318
4319
btf_show_end_member(show);
4320
}
4321
4322
btf_show_end_struct_type(show);
4323
}
4324
4325
static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
4326
u32 type_id, void *data, u8 bits_offset,
4327
struct btf_show *show)
4328
{
4329
const struct btf_member *m = show->state.member;
4330
4331
/*
4332
* First check if any members would be shown (are non-zero).
4333
* See comments above "struct btf_show" definition for more
4334
* details on how this works at a high-level.
4335
*/
4336
if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
4337
if (!show->state.depth_check) {
4338
show->state.depth_check = show->state.depth + 1;
4339
show->state.depth_to_show = 0;
4340
}
4341
__btf_struct_show(btf, t, type_id, data, bits_offset, show);
4342
/* Restore saved member data here */
4343
show->state.member = m;
4344
if (show->state.depth_check != show->state.depth + 1)
4345
return;
4346
show->state.depth_check = 0;
4347
4348
if (show->state.depth_to_show <= show->state.depth)
4349
return;
4350
/*
4351
* Reaching here indicates we have recursed and found
4352
* non-zero child values.
4353
*/
4354
}
4355
4356
__btf_struct_show(btf, t, type_id, data, bits_offset, show);
4357
}
4358
4359
static const struct btf_kind_operations struct_ops = {
4360
.check_meta = btf_struct_check_meta,
4361
.resolve = btf_struct_resolve,
4362
.check_member = btf_struct_check_member,
4363
.check_kflag_member = btf_generic_check_kflag_member,
4364
.log_details = btf_struct_log,
4365
.show = btf_struct_show,
4366
};
4367
4368
static int btf_enum_check_member(struct btf_verifier_env *env,
4369
const struct btf_type *struct_type,
4370
const struct btf_member *member,
4371
const struct btf_type *member_type)
4372
{
4373
u32 struct_bits_off = member->offset;
4374
u32 struct_size, bytes_offset;
4375
4376
if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4377
btf_verifier_log_member(env, struct_type, member,
4378
"Member is not byte aligned");
4379
return -EINVAL;
4380
}
4381
4382
struct_size = struct_type->size;
4383
bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
4384
if (struct_size - bytes_offset < member_type->size) {
4385
btf_verifier_log_member(env, struct_type, member,
4386
"Member exceeds struct_size");
4387
return -EINVAL;
4388
}
4389
4390
return 0;
4391
}
4392
4393
static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
4394
const struct btf_type *struct_type,
4395
const struct btf_member *member,
4396
const struct btf_type *member_type)
4397
{
4398
u32 struct_bits_off, nr_bits, bytes_end, struct_size;
4399
u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
4400
4401
struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
4402
nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
4403
if (!nr_bits) {
4404
if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
4405
btf_verifier_log_member(env, struct_type, member,
4406
"Member is not byte aligned");
4407
return -EINVAL;
4408
}
4409
4410
nr_bits = int_bitsize;
4411
} else if (nr_bits > int_bitsize) {
4412
btf_verifier_log_member(env, struct_type, member,
4413
"Invalid member bitfield_size");
4414
return -EINVAL;
4415
}
4416
4417
struct_size = struct_type->size;
4418
bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
4419
if (struct_size < bytes_end) {
4420
btf_verifier_log_member(env, struct_type, member,
4421
"Member exceeds struct_size");
4422
return -EINVAL;
4423
}
4424
4425
return 0;
4426
}
4427
4428
static s32 btf_enum_check_meta(struct btf_verifier_env *env,
4429
const struct btf_type *t,
4430
u32 meta_left)
4431
{
4432
const struct btf_enum *enums = btf_type_enum(t);
4433
struct btf *btf = env->btf;
4434
const char *fmt_str;
4435
u16 i, nr_enums;
4436
u32 meta_needed;
4437
4438
nr_enums = btf_type_vlen(t);
4439
meta_needed = nr_enums * sizeof(*enums);
4440
4441
if (meta_left < meta_needed) {
4442
btf_verifier_log_basic(env, t,
4443
"meta_left:%u meta_needed:%u",
4444
meta_left, meta_needed);
4445
return -EINVAL;
4446
}
4447
4448
if (t->size > 8 || !is_power_of_2(t->size)) {
4449
btf_verifier_log_type(env, t, "Unexpected size");
4450
return -EINVAL;
4451
}
4452
4453
/* enum type either no name or a valid one */
4454
if (t->name_off &&
4455
!btf_name_valid_identifier(env->btf, t->name_off)) {
4456
btf_verifier_log_type(env, t, "Invalid name");
4457
return -EINVAL;
4458
}
4459
4460
btf_verifier_log_type(env, t, NULL);
4461
4462
for (i = 0; i < nr_enums; i++) {
4463
if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4464
btf_verifier_log(env, "\tInvalid name_offset:%u",
4465
enums[i].name_off);
4466
return -EINVAL;
4467
}
4468
4469
/* enum member must have a valid name */
4470
if (!enums[i].name_off ||
4471
!btf_name_valid_identifier(btf, enums[i].name_off)) {
4472
btf_verifier_log_type(env, t, "Invalid name");
4473
return -EINVAL;
4474
}
4475
4476
if (env->log.level == BPF_LOG_KERNEL)
4477
continue;
4478
fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
4479
btf_verifier_log(env, fmt_str,
4480
__btf_name_by_offset(btf, enums[i].name_off),
4481
enums[i].val);
4482
}
4483
4484
return meta_needed;
4485
}
4486
4487
static void btf_enum_log(struct btf_verifier_env *env,
4488
const struct btf_type *t)
4489
{
4490
btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4491
}
4492
4493
static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
4494
u32 type_id, void *data, u8 bits_offset,
4495
struct btf_show *show)
4496
{
4497
const struct btf_enum *enums = btf_type_enum(t);
4498
u32 i, nr_enums = btf_type_vlen(t);
4499
void *safe_data;
4500
int v;
4501
4502
safe_data = btf_show_start_type(show, t, type_id, data);
4503
if (!safe_data)
4504
return;
4505
4506
v = *(int *)safe_data;
4507
4508
for (i = 0; i < nr_enums; i++) {
4509
if (v != enums[i].val)
4510
continue;
4511
4512
btf_show_type_value(show, "%s",
4513
__btf_name_by_offset(btf,
4514
enums[i].name_off));
4515
4516
btf_show_end_type(show);
4517
return;
4518
}
4519
4520
if (btf_type_kflag(t))
4521
btf_show_type_value(show, "%d", v);
4522
else
4523
btf_show_type_value(show, "%u", v);
4524
btf_show_end_type(show);
4525
}
4526
4527
static const struct btf_kind_operations enum_ops = {
4528
.check_meta = btf_enum_check_meta,
4529
.resolve = btf_df_resolve,
4530
.check_member = btf_enum_check_member,
4531
.check_kflag_member = btf_enum_check_kflag_member,
4532
.log_details = btf_enum_log,
4533
.show = btf_enum_show,
4534
};
4535
4536
static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
4537
const struct btf_type *t,
4538
u32 meta_left)
4539
{
4540
const struct btf_enum64 *enums = btf_type_enum64(t);
4541
struct btf *btf = env->btf;
4542
const char *fmt_str;
4543
u16 i, nr_enums;
4544
u32 meta_needed;
4545
4546
nr_enums = btf_type_vlen(t);
4547
meta_needed = nr_enums * sizeof(*enums);
4548
4549
if (meta_left < meta_needed) {
4550
btf_verifier_log_basic(env, t,
4551
"meta_left:%u meta_needed:%u",
4552
meta_left, meta_needed);
4553
return -EINVAL;
4554
}
4555
4556
if (t->size > 8 || !is_power_of_2(t->size)) {
4557
btf_verifier_log_type(env, t, "Unexpected size");
4558
return -EINVAL;
4559
}
4560
4561
/* enum type either no name or a valid one */
4562
if (t->name_off &&
4563
!btf_name_valid_identifier(env->btf, t->name_off)) {
4564
btf_verifier_log_type(env, t, "Invalid name");
4565
return -EINVAL;
4566
}
4567
4568
btf_verifier_log_type(env, t, NULL);
4569
4570
for (i = 0; i < nr_enums; i++) {
4571
if (!btf_name_offset_valid(btf, enums[i].name_off)) {
4572
btf_verifier_log(env, "\tInvalid name_offset:%u",
4573
enums[i].name_off);
4574
return -EINVAL;
4575
}
4576
4577
/* enum member must have a valid name */
4578
if (!enums[i].name_off ||
4579
!btf_name_valid_identifier(btf, enums[i].name_off)) {
4580
btf_verifier_log_type(env, t, "Invalid name");
4581
return -EINVAL;
4582
}
4583
4584
if (env->log.level == BPF_LOG_KERNEL)
4585
continue;
4586
4587
fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
4588
btf_verifier_log(env, fmt_str,
4589
__btf_name_by_offset(btf, enums[i].name_off),
4590
btf_enum64_value(enums + i));
4591
}
4592
4593
return meta_needed;
4594
}
4595
4596
static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
4597
u32 type_id, void *data, u8 bits_offset,
4598
struct btf_show *show)
4599
{
4600
const struct btf_enum64 *enums = btf_type_enum64(t);
4601
u32 i, nr_enums = btf_type_vlen(t);
4602
void *safe_data;
4603
s64 v;
4604
4605
safe_data = btf_show_start_type(show, t, type_id, data);
4606
if (!safe_data)
4607
return;
4608
4609
v = *(u64 *)safe_data;
4610
4611
for (i = 0; i < nr_enums; i++) {
4612
if (v != btf_enum64_value(enums + i))
4613
continue;
4614
4615
btf_show_type_value(show, "%s",
4616
__btf_name_by_offset(btf,
4617
enums[i].name_off));
4618
4619
btf_show_end_type(show);
4620
return;
4621
}
4622
4623
if (btf_type_kflag(t))
4624
btf_show_type_value(show, "%lld", v);
4625
else
4626
btf_show_type_value(show, "%llu", v);
4627
btf_show_end_type(show);
4628
}
4629
4630
static const struct btf_kind_operations enum64_ops = {
4631
.check_meta = btf_enum64_check_meta,
4632
.resolve = btf_df_resolve,
4633
.check_member = btf_enum_check_member,
4634
.check_kflag_member = btf_enum_check_kflag_member,
4635
.log_details = btf_enum_log,
4636
.show = btf_enum64_show,
4637
};
4638
4639
static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
4640
const struct btf_type *t,
4641
u32 meta_left)
4642
{
4643
u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
4644
4645
if (meta_left < meta_needed) {
4646
btf_verifier_log_basic(env, t,
4647
"meta_left:%u meta_needed:%u",
4648
meta_left, meta_needed);
4649
return -EINVAL;
4650
}
4651
4652
if (t->name_off) {
4653
btf_verifier_log_type(env, t, "Invalid name");
4654
return -EINVAL;
4655
}
4656
4657
if (btf_type_kflag(t)) {
4658
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4659
return -EINVAL;
4660
}
4661
4662
btf_verifier_log_type(env, t, NULL);
4663
4664
return meta_needed;
4665
}
4666
4667
static void btf_func_proto_log(struct btf_verifier_env *env,
4668
const struct btf_type *t)
4669
{
4670
const struct btf_param *args = (const struct btf_param *)(t + 1);
4671
u16 nr_args = btf_type_vlen(t), i;
4672
4673
btf_verifier_log(env, "return=%u args=(", t->type);
4674
if (!nr_args) {
4675
btf_verifier_log(env, "void");
4676
goto done;
4677
}
4678
4679
if (nr_args == 1 && !args[0].type) {
4680
/* Only one vararg */
4681
btf_verifier_log(env, "vararg");
4682
goto done;
4683
}
4684
4685
btf_verifier_log(env, "%u %s", args[0].type,
4686
__btf_name_by_offset(env->btf,
4687
args[0].name_off));
4688
for (i = 1; i < nr_args - 1; i++)
4689
btf_verifier_log(env, ", %u %s", args[i].type,
4690
__btf_name_by_offset(env->btf,
4691
args[i].name_off));
4692
4693
if (nr_args > 1) {
4694
const struct btf_param *last_arg = &args[nr_args - 1];
4695
4696
if (last_arg->type)
4697
btf_verifier_log(env, ", %u %s", last_arg->type,
4698
__btf_name_by_offset(env->btf,
4699
last_arg->name_off));
4700
else
4701
btf_verifier_log(env, ", vararg");
4702
}
4703
4704
done:
4705
btf_verifier_log(env, ")");
4706
}
4707
4708
static const struct btf_kind_operations func_proto_ops = {
4709
.check_meta = btf_func_proto_check_meta,
4710
.resolve = btf_df_resolve,
4711
/*
4712
* BTF_KIND_FUNC_PROTO cannot be directly referred by
4713
* a struct's member.
4714
*
4715
* It should be a function pointer instead.
4716
* (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
4717
*
4718
* Hence, there is no btf_func_check_member().
4719
*/
4720
.check_member = btf_df_check_member,
4721
.check_kflag_member = btf_df_check_kflag_member,
4722
.log_details = btf_func_proto_log,
4723
.show = btf_df_show,
4724
};
4725
4726
static s32 btf_func_check_meta(struct btf_verifier_env *env,
4727
const struct btf_type *t,
4728
u32 meta_left)
4729
{
4730
if (!t->name_off ||
4731
!btf_name_valid_identifier(env->btf, t->name_off)) {
4732
btf_verifier_log_type(env, t, "Invalid name");
4733
return -EINVAL;
4734
}
4735
4736
if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
4737
btf_verifier_log_type(env, t, "Invalid func linkage");
4738
return -EINVAL;
4739
}
4740
4741
if (btf_type_kflag(t)) {
4742
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4743
return -EINVAL;
4744
}
4745
4746
btf_verifier_log_type(env, t, NULL);
4747
4748
return 0;
4749
}
4750
4751
static int btf_func_resolve(struct btf_verifier_env *env,
4752
const struct resolve_vertex *v)
4753
{
4754
const struct btf_type *t = v->t;
4755
u32 next_type_id = t->type;
4756
int err;
4757
4758
err = btf_func_check(env, t);
4759
if (err)
4760
return err;
4761
4762
env_stack_pop_resolved(env, next_type_id, 0);
4763
return 0;
4764
}
4765
4766
static const struct btf_kind_operations func_ops = {
4767
.check_meta = btf_func_check_meta,
4768
.resolve = btf_func_resolve,
4769
.check_member = btf_df_check_member,
4770
.check_kflag_member = btf_df_check_kflag_member,
4771
.log_details = btf_ref_type_log,
4772
.show = btf_df_show,
4773
};
4774
4775
static s32 btf_var_check_meta(struct btf_verifier_env *env,
4776
const struct btf_type *t,
4777
u32 meta_left)
4778
{
4779
const struct btf_var *var;
4780
u32 meta_needed = sizeof(*var);
4781
4782
if (meta_left < meta_needed) {
4783
btf_verifier_log_basic(env, t,
4784
"meta_left:%u meta_needed:%u",
4785
meta_left, meta_needed);
4786
return -EINVAL;
4787
}
4788
4789
if (btf_type_vlen(t)) {
4790
btf_verifier_log_type(env, t, "vlen != 0");
4791
return -EINVAL;
4792
}
4793
4794
if (btf_type_kflag(t)) {
4795
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4796
return -EINVAL;
4797
}
4798
4799
if (!t->name_off ||
4800
!btf_name_valid_identifier(env->btf, t->name_off)) {
4801
btf_verifier_log_type(env, t, "Invalid name");
4802
return -EINVAL;
4803
}
4804
4805
/* A var cannot be in type void */
4806
if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
4807
btf_verifier_log_type(env, t, "Invalid type_id");
4808
return -EINVAL;
4809
}
4810
4811
var = btf_type_var(t);
4812
if (var->linkage != BTF_VAR_STATIC &&
4813
var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
4814
btf_verifier_log_type(env, t, "Linkage not supported");
4815
return -EINVAL;
4816
}
4817
4818
btf_verifier_log_type(env, t, NULL);
4819
4820
return meta_needed;
4821
}
4822
4823
static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
4824
{
4825
const struct btf_var *var = btf_type_var(t);
4826
4827
btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
4828
}
4829
4830
static const struct btf_kind_operations var_ops = {
4831
.check_meta = btf_var_check_meta,
4832
.resolve = btf_var_resolve,
4833
.check_member = btf_df_check_member,
4834
.check_kflag_member = btf_df_check_kflag_member,
4835
.log_details = btf_var_log,
4836
.show = btf_var_show,
4837
};
4838
4839
static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
4840
const struct btf_type *t,
4841
u32 meta_left)
4842
{
4843
const struct btf_var_secinfo *vsi;
4844
u64 last_vsi_end_off = 0, sum = 0;
4845
u32 i, meta_needed;
4846
4847
meta_needed = btf_type_vlen(t) * sizeof(*vsi);
4848
if (meta_left < meta_needed) {
4849
btf_verifier_log_basic(env, t,
4850
"meta_left:%u meta_needed:%u",
4851
meta_left, meta_needed);
4852
return -EINVAL;
4853
}
4854
4855
if (!t->size) {
4856
btf_verifier_log_type(env, t, "size == 0");
4857
return -EINVAL;
4858
}
4859
4860
if (btf_type_kflag(t)) {
4861
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
4862
return -EINVAL;
4863
}
4864
4865
if (!t->name_off ||
4866
!btf_name_valid_section(env->btf, t->name_off)) {
4867
btf_verifier_log_type(env, t, "Invalid name");
4868
return -EINVAL;
4869
}
4870
4871
btf_verifier_log_type(env, t, NULL);
4872
4873
for_each_vsi(i, t, vsi) {
4874
/* A var cannot be in type void */
4875
if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
4876
btf_verifier_log_vsi(env, t, vsi,
4877
"Invalid type_id");
4878
return -EINVAL;
4879
}
4880
4881
if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
4882
btf_verifier_log_vsi(env, t, vsi,
4883
"Invalid offset");
4884
return -EINVAL;
4885
}
4886
4887
if (!vsi->size || vsi->size > t->size) {
4888
btf_verifier_log_vsi(env, t, vsi,
4889
"Invalid size");
4890
return -EINVAL;
4891
}
4892
4893
last_vsi_end_off = vsi->offset + vsi->size;
4894
if (last_vsi_end_off > t->size) {
4895
btf_verifier_log_vsi(env, t, vsi,
4896
"Invalid offset+size");
4897
return -EINVAL;
4898
}
4899
4900
btf_verifier_log_vsi(env, t, vsi, NULL);
4901
sum += vsi->size;
4902
}
4903
4904
if (t->size < sum) {
4905
btf_verifier_log_type(env, t, "Invalid btf_info size");
4906
return -EINVAL;
4907
}
4908
4909
return meta_needed;
4910
}
4911
4912
static int btf_datasec_resolve(struct btf_verifier_env *env,
4913
const struct resolve_vertex *v)
4914
{
4915
const struct btf_var_secinfo *vsi;
4916
struct btf *btf = env->btf;
4917
u16 i;
4918
4919
env->resolve_mode = RESOLVE_TBD;
4920
for_each_vsi_from(i, v->next_member, v->t, vsi) {
4921
u32 var_type_id = vsi->type, type_id, type_size = 0;
4922
const struct btf_type *var_type = btf_type_by_id(env->btf,
4923
var_type_id);
4924
if (!var_type || !btf_type_is_var(var_type)) {
4925
btf_verifier_log_vsi(env, v->t, vsi,
4926
"Not a VAR kind member");
4927
return -EINVAL;
4928
}
4929
4930
if (!env_type_is_resolve_sink(env, var_type) &&
4931
!env_type_is_resolved(env, var_type_id)) {
4932
env_stack_set_next_member(env, i + 1);
4933
return env_stack_push(env, var_type, var_type_id);
4934
}
4935
4936
type_id = var_type->type;
4937
if (!btf_type_id_size(btf, &type_id, &type_size)) {
4938
btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
4939
return -EINVAL;
4940
}
4941
4942
if (vsi->size < type_size) {
4943
btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
4944
return -EINVAL;
4945
}
4946
}
4947
4948
env_stack_pop_resolved(env, 0, 0);
4949
return 0;
4950
}
4951
4952
static void btf_datasec_log(struct btf_verifier_env *env,
4953
const struct btf_type *t)
4954
{
4955
btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
4956
}
4957
4958
static void btf_datasec_show(const struct btf *btf,
4959
const struct btf_type *t, u32 type_id,
4960
void *data, u8 bits_offset,
4961
struct btf_show *show)
4962
{
4963
const struct btf_var_secinfo *vsi;
4964
const struct btf_type *var;
4965
u32 i;
4966
4967
if (!btf_show_start_type(show, t, type_id, data))
4968
return;
4969
4970
btf_show_type_value(show, "section (\"%s\") = {",
4971
__btf_name_by_offset(btf, t->name_off));
4972
for_each_vsi(i, t, vsi) {
4973
var = btf_type_by_id(btf, vsi->type);
4974
if (i)
4975
btf_show(show, ",");
4976
btf_type_ops(var)->show(btf, var, vsi->type,
4977
data + vsi->offset, bits_offset, show);
4978
}
4979
btf_show_end_type(show);
4980
}
4981
4982
static const struct btf_kind_operations datasec_ops = {
4983
.check_meta = btf_datasec_check_meta,
4984
.resolve = btf_datasec_resolve,
4985
.check_member = btf_df_check_member,
4986
.check_kflag_member = btf_df_check_kflag_member,
4987
.log_details = btf_datasec_log,
4988
.show = btf_datasec_show,
4989
};
4990
4991
static s32 btf_float_check_meta(struct btf_verifier_env *env,
4992
const struct btf_type *t,
4993
u32 meta_left)
4994
{
4995
if (btf_type_vlen(t)) {
4996
btf_verifier_log_type(env, t, "vlen != 0");
4997
return -EINVAL;
4998
}
4999
5000
if (btf_type_kflag(t)) {
5001
btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
5002
return -EINVAL;
5003
}
5004
5005
if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
5006
t->size != 16) {
5007
btf_verifier_log_type(env, t, "Invalid type_size");
5008
return -EINVAL;
5009
}
5010
5011
btf_verifier_log_type(env, t, NULL);
5012
5013
return 0;
5014
}
5015
5016
static int btf_float_check_member(struct btf_verifier_env *env,
5017
const struct btf_type *struct_type,
5018
const struct btf_member *member,
5019
const struct btf_type *member_type)
5020
{
5021
u64 start_offset_bytes;
5022
u64 end_offset_bytes;
5023
u64 misalign_bits;
5024
u64 align_bytes;
5025
u64 align_bits;
5026
5027
/* Different architectures have different alignment requirements, so
5028
* here we check only for the reasonable minimum. This way we ensure
5029
* that types after CO-RE can pass the kernel BTF verifier.
5030
*/
5031
align_bytes = min_t(u64, sizeof(void *), member_type->size);
5032
align_bits = align_bytes * BITS_PER_BYTE;
5033
div64_u64_rem(member->offset, align_bits, &misalign_bits);
5034
if (misalign_bits) {
5035
btf_verifier_log_member(env, struct_type, member,
5036
"Member is not properly aligned");
5037
return -EINVAL;
5038
}
5039
5040
start_offset_bytes = member->offset / BITS_PER_BYTE;
5041
end_offset_bytes = start_offset_bytes + member_type->size;
5042
if (end_offset_bytes > struct_type->size) {
5043
btf_verifier_log_member(env, struct_type, member,
5044
"Member exceeds struct_size");
5045
return -EINVAL;
5046
}
5047
5048
return 0;
5049
}
5050
5051
static void btf_float_log(struct btf_verifier_env *env,
5052
const struct btf_type *t)
5053
{
5054
btf_verifier_log(env, "size=%u", t->size);
5055
}
5056
5057
static const struct btf_kind_operations float_ops = {
5058
.check_meta = btf_float_check_meta,
5059
.resolve = btf_df_resolve,
5060
.check_member = btf_float_check_member,
5061
.check_kflag_member = btf_generic_check_kflag_member,
5062
.log_details = btf_float_log,
5063
.show = btf_df_show,
5064
};
5065
5066
static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
5067
const struct btf_type *t,
5068
u32 meta_left)
5069
{
5070
const struct btf_decl_tag *tag;
5071
u32 meta_needed = sizeof(*tag);
5072
s32 component_idx;
5073
const char *value;
5074
5075
if (meta_left < meta_needed) {
5076
btf_verifier_log_basic(env, t,
5077
"meta_left:%u meta_needed:%u",
5078
meta_left, meta_needed);
5079
return -EINVAL;
5080
}
5081
5082
value = btf_name_by_offset(env->btf, t->name_off);
5083
if (!value || !value[0]) {
5084
btf_verifier_log_type(env, t, "Invalid value");
5085
return -EINVAL;
5086
}
5087
5088
if (btf_type_vlen(t)) {
5089
btf_verifier_log_type(env, t, "vlen != 0");
5090
return -EINVAL;
5091
}
5092
5093
component_idx = btf_type_decl_tag(t)->component_idx;
5094
if (component_idx < -1) {
5095
btf_verifier_log_type(env, t, "Invalid component_idx");
5096
return -EINVAL;
5097
}
5098
5099
btf_verifier_log_type(env, t, NULL);
5100
5101
return meta_needed;
5102
}
5103
5104
static int btf_decl_tag_resolve(struct btf_verifier_env *env,
5105
const struct resolve_vertex *v)
5106
{
5107
const struct btf_type *next_type;
5108
const struct btf_type *t = v->t;
5109
u32 next_type_id = t->type;
5110
struct btf *btf = env->btf;
5111
s32 component_idx;
5112
u32 vlen;
5113
5114
next_type = btf_type_by_id(btf, next_type_id);
5115
if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
5116
btf_verifier_log_type(env, v->t, "Invalid type_id");
5117
return -EINVAL;
5118
}
5119
5120
if (!env_type_is_resolve_sink(env, next_type) &&
5121
!env_type_is_resolved(env, next_type_id))
5122
return env_stack_push(env, next_type, next_type_id);
5123
5124
component_idx = btf_type_decl_tag(t)->component_idx;
5125
if (component_idx != -1) {
5126
if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
5127
btf_verifier_log_type(env, v->t, "Invalid component_idx");
5128
return -EINVAL;
5129
}
5130
5131
if (btf_type_is_struct(next_type)) {
5132
vlen = btf_type_vlen(next_type);
5133
} else {
5134
/* next_type should be a function */
5135
next_type = btf_type_by_id(btf, next_type->type);
5136
vlen = btf_type_vlen(next_type);
5137
}
5138
5139
if ((u32)component_idx >= vlen) {
5140
btf_verifier_log_type(env, v->t, "Invalid component_idx");
5141
return -EINVAL;
5142
}
5143
}
5144
5145
env_stack_pop_resolved(env, next_type_id, 0);
5146
5147
return 0;
5148
}
5149
5150
static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
5151
{
5152
btf_verifier_log(env, "type=%u component_idx=%d", t->type,
5153
btf_type_decl_tag(t)->component_idx);
5154
}
5155
5156
static const struct btf_kind_operations decl_tag_ops = {
5157
.check_meta = btf_decl_tag_check_meta,
5158
.resolve = btf_decl_tag_resolve,
5159
.check_member = btf_df_check_member,
5160
.check_kflag_member = btf_df_check_kflag_member,
5161
.log_details = btf_decl_tag_log,
5162
.show = btf_df_show,
5163
};
5164
5165
static int btf_func_proto_check(struct btf_verifier_env *env,
5166
const struct btf_type *t)
5167
{
5168
const struct btf_type *ret_type;
5169
const struct btf_param *args;
5170
const struct btf *btf;
5171
u16 nr_args, i;
5172
int err;
5173
5174
btf = env->btf;
5175
args = (const struct btf_param *)(t + 1);
5176
nr_args = btf_type_vlen(t);
5177
5178
/* Check func return type which could be "void" (t->type == 0) */
5179
if (t->type) {
5180
u32 ret_type_id = t->type;
5181
5182
ret_type = btf_type_by_id(btf, ret_type_id);
5183
if (!ret_type) {
5184
btf_verifier_log_type(env, t, "Invalid return type");
5185
return -EINVAL;
5186
}
5187
5188
if (btf_type_is_resolve_source_only(ret_type)) {
5189
btf_verifier_log_type(env, t, "Invalid return type");
5190
return -EINVAL;
5191
}
5192
5193
if (btf_type_needs_resolve(ret_type) &&
5194
!env_type_is_resolved(env, ret_type_id)) {
5195
err = btf_resolve(env, ret_type, ret_type_id);
5196
if (err)
5197
return err;
5198
}
5199
5200
/* Ensure the return type is a type that has a size */
5201
if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
5202
btf_verifier_log_type(env, t, "Invalid return type");
5203
return -EINVAL;
5204
}
5205
}
5206
5207
if (!nr_args)
5208
return 0;
5209
5210
/* Last func arg type_id could be 0 if it is a vararg */
5211
if (!args[nr_args - 1].type) {
5212
if (args[nr_args - 1].name_off) {
5213
btf_verifier_log_type(env, t, "Invalid arg#%u",
5214
nr_args);
5215
return -EINVAL;
5216
}
5217
nr_args--;
5218
}
5219
5220
for (i = 0; i < nr_args; i++) {
5221
const struct btf_type *arg_type;
5222
u32 arg_type_id;
5223
5224
arg_type_id = args[i].type;
5225
arg_type = btf_type_by_id(btf, arg_type_id);
5226
if (!arg_type) {
5227
btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5228
return -EINVAL;
5229
}
5230
5231
if (btf_type_is_resolve_source_only(arg_type)) {
5232
btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5233
return -EINVAL;
5234
}
5235
5236
if (args[i].name_off &&
5237
(!btf_name_offset_valid(btf, args[i].name_off) ||
5238
!btf_name_valid_identifier(btf, args[i].name_off))) {
5239
btf_verifier_log_type(env, t,
5240
"Invalid arg#%u", i + 1);
5241
return -EINVAL;
5242
}
5243
5244
if (btf_type_needs_resolve(arg_type) &&
5245
!env_type_is_resolved(env, arg_type_id)) {
5246
err = btf_resolve(env, arg_type, arg_type_id);
5247
if (err)
5248
return err;
5249
}
5250
5251
if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
5252
btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5253
return -EINVAL;
5254
}
5255
}
5256
5257
return 0;
5258
}
5259
5260
static int btf_func_check(struct btf_verifier_env *env,
5261
const struct btf_type *t)
5262
{
5263
const struct btf_type *proto_type;
5264
const struct btf_param *args;
5265
const struct btf *btf;
5266
u16 nr_args, i;
5267
5268
btf = env->btf;
5269
proto_type = btf_type_by_id(btf, t->type);
5270
5271
if (!proto_type || !btf_type_is_func_proto(proto_type)) {
5272
btf_verifier_log_type(env, t, "Invalid type_id");
5273
return -EINVAL;
5274
}
5275
5276
args = (const struct btf_param *)(proto_type + 1);
5277
nr_args = btf_type_vlen(proto_type);
5278
for (i = 0; i < nr_args; i++) {
5279
if (!args[i].name_off && args[i].type) {
5280
btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
5281
return -EINVAL;
5282
}
5283
}
5284
5285
return 0;
5286
}
5287
5288
static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
5289
[BTF_KIND_INT] = &int_ops,
5290
[BTF_KIND_PTR] = &ptr_ops,
5291
[BTF_KIND_ARRAY] = &array_ops,
5292
[BTF_KIND_STRUCT] = &struct_ops,
5293
[BTF_KIND_UNION] = &struct_ops,
5294
[BTF_KIND_ENUM] = &enum_ops,
5295
[BTF_KIND_FWD] = &fwd_ops,
5296
[BTF_KIND_TYPEDEF] = &modifier_ops,
5297
[BTF_KIND_VOLATILE] = &modifier_ops,
5298
[BTF_KIND_CONST] = &modifier_ops,
5299
[BTF_KIND_RESTRICT] = &modifier_ops,
5300
[BTF_KIND_FUNC] = &func_ops,
5301
[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
5302
[BTF_KIND_VAR] = &var_ops,
5303
[BTF_KIND_DATASEC] = &datasec_ops,
5304
[BTF_KIND_FLOAT] = &float_ops,
5305
[BTF_KIND_DECL_TAG] = &decl_tag_ops,
5306
[BTF_KIND_TYPE_TAG] = &modifier_ops,
5307
[BTF_KIND_ENUM64] = &enum64_ops,
5308
};
5309
5310
static s32 btf_check_meta(struct btf_verifier_env *env,
5311
const struct btf_type *t,
5312
u32 meta_left)
5313
{
5314
u32 saved_meta_left = meta_left;
5315
s32 var_meta_size;
5316
5317
if (meta_left < sizeof(*t)) {
5318
btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
5319
env->log_type_id, meta_left, sizeof(*t));
5320
return -EINVAL;
5321
}
5322
meta_left -= sizeof(*t);
5323
5324
if (t->info & ~BTF_INFO_MASK) {
5325
btf_verifier_log(env, "[%u] Invalid btf_info:%x",
5326
env->log_type_id, t->info);
5327
return -EINVAL;
5328
}
5329
5330
if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
5331
BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
5332
btf_verifier_log(env, "[%u] Invalid kind:%u",
5333
env->log_type_id, BTF_INFO_KIND(t->info));
5334
return -EINVAL;
5335
}
5336
5337
if (!btf_name_offset_valid(env->btf, t->name_off)) {
5338
btf_verifier_log(env, "[%u] Invalid name_offset:%u",
5339
env->log_type_id, t->name_off);
5340
return -EINVAL;
5341
}
5342
5343
var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
5344
if (var_meta_size < 0)
5345
return var_meta_size;
5346
5347
meta_left -= var_meta_size;
5348
5349
return saved_meta_left - meta_left;
5350
}
5351
5352
static int btf_check_all_metas(struct btf_verifier_env *env)
5353
{
5354
struct btf *btf = env->btf;
5355
struct btf_header *hdr;
5356
void *cur, *end;
5357
5358
hdr = &btf->hdr;
5359
cur = btf->nohdr_data + hdr->type_off;
5360
end = cur + hdr->type_len;
5361
5362
env->log_type_id = btf->base_btf ? btf->start_id : 1;
5363
while (cur < end) {
5364
struct btf_type *t = cur;
5365
s32 meta_size;
5366
5367
meta_size = btf_check_meta(env, t, end - cur);
5368
if (meta_size < 0)
5369
return meta_size;
5370
5371
btf_add_type(env, t);
5372
cur += meta_size;
5373
env->log_type_id++;
5374
}
5375
5376
return 0;
5377
}
5378
5379
static bool btf_resolve_valid(struct btf_verifier_env *env,
5380
const struct btf_type *t,
5381
u32 type_id)
5382
{
5383
struct btf *btf = env->btf;
5384
5385
if (!env_type_is_resolved(env, type_id))
5386
return false;
5387
5388
if (btf_type_is_struct(t) || btf_type_is_datasec(t))
5389
return !btf_resolved_type_id(btf, type_id) &&
5390
!btf_resolved_type_size(btf, type_id);
5391
5392
if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
5393
return btf_resolved_type_id(btf, type_id) &&
5394
!btf_resolved_type_size(btf, type_id);
5395
5396
if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
5397
btf_type_is_var(t)) {
5398
t = btf_type_id_resolve(btf, &type_id);
5399
return t &&
5400
!btf_type_is_modifier(t) &&
5401
!btf_type_is_var(t) &&
5402
!btf_type_is_datasec(t);
5403
}
5404
5405
if (btf_type_is_array(t)) {
5406
const struct btf_array *array = btf_type_array(t);
5407
const struct btf_type *elem_type;
5408
u32 elem_type_id = array->type;
5409
u32 elem_size;
5410
5411
elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
5412
return elem_type && !btf_type_is_modifier(elem_type) &&
5413
(array->nelems * elem_size ==
5414
btf_resolved_type_size(btf, type_id));
5415
}
5416
5417
return false;
5418
}
5419
5420
static int btf_resolve(struct btf_verifier_env *env,
5421
const struct btf_type *t, u32 type_id)
5422
{
5423
u32 save_log_type_id = env->log_type_id;
5424
const struct resolve_vertex *v;
5425
int err = 0;
5426
5427
env->resolve_mode = RESOLVE_TBD;
5428
env_stack_push(env, t, type_id);
5429
while (!err && (v = env_stack_peak(env))) {
5430
env->log_type_id = v->type_id;
5431
err = btf_type_ops(v->t)->resolve(env, v);
5432
}
5433
5434
env->log_type_id = type_id;
5435
if (err == -E2BIG) {
5436
btf_verifier_log_type(env, t,
5437
"Exceeded max resolving depth:%u",
5438
MAX_RESOLVE_DEPTH);
5439
} else if (err == -EEXIST) {
5440
btf_verifier_log_type(env, t, "Loop detected");
5441
}
5442
5443
/* Final sanity check */
5444
if (!err && !btf_resolve_valid(env, t, type_id)) {
5445
btf_verifier_log_type(env, t, "Invalid resolve state");
5446
err = -EINVAL;
5447
}
5448
5449
env->log_type_id = save_log_type_id;
5450
return err;
5451
}
5452
5453
static int btf_check_all_types(struct btf_verifier_env *env)
5454
{
5455
struct btf *btf = env->btf;
5456
const struct btf_type *t;
5457
u32 type_id, i;
5458
int err;
5459
5460
err = env_resolve_init(env);
5461
if (err)
5462
return err;
5463
5464
env->phase++;
5465
for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
5466
type_id = btf->start_id + i;
5467
t = btf_type_by_id(btf, type_id);
5468
5469
env->log_type_id = type_id;
5470
if (btf_type_needs_resolve(t) &&
5471
!env_type_is_resolved(env, type_id)) {
5472
err = btf_resolve(env, t, type_id);
5473
if (err)
5474
return err;
5475
}
5476
5477
if (btf_type_is_func_proto(t)) {
5478
err = btf_func_proto_check(env, t);
5479
if (err)
5480
return err;
5481
}
5482
}
5483
5484
return 0;
5485
}
5486
5487
static int btf_parse_type_sec(struct btf_verifier_env *env)
5488
{
5489
const struct btf_header *hdr = &env->btf->hdr;
5490
int err;
5491
5492
/* Type section must align to 4 bytes */
5493
if (hdr->type_off & (sizeof(u32) - 1)) {
5494
btf_verifier_log(env, "Unaligned type_off");
5495
return -EINVAL;
5496
}
5497
5498
if (!env->btf->base_btf && !hdr->type_len) {
5499
btf_verifier_log(env, "No type found");
5500
return -EINVAL;
5501
}
5502
5503
err = btf_check_all_metas(env);
5504
if (err)
5505
return err;
5506
5507
return btf_check_all_types(env);
5508
}
5509
5510
static int btf_parse_str_sec(struct btf_verifier_env *env)
5511
{
5512
const struct btf_header *hdr;
5513
struct btf *btf = env->btf;
5514
const char *start, *end;
5515
5516
hdr = &btf->hdr;
5517
start = btf->nohdr_data + hdr->str_off;
5518
end = start + hdr->str_len;
5519
5520
if (end != btf->data + btf->data_size) {
5521
btf_verifier_log(env, "String section is not at the end");
5522
return -EINVAL;
5523
}
5524
5525
btf->strings = start;
5526
5527
if (btf->base_btf && !hdr->str_len)
5528
return 0;
5529
if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
5530
btf_verifier_log(env, "Invalid string section");
5531
return -EINVAL;
5532
}
5533
if (!btf->base_btf && start[0]) {
5534
btf_verifier_log(env, "Invalid string section");
5535
return -EINVAL;
5536
}
5537
5538
return 0;
5539
}
5540
5541
static const size_t btf_sec_info_offset[] = {
5542
offsetof(struct btf_header, type_off),
5543
offsetof(struct btf_header, str_off),
5544
};
5545
5546
static int btf_sec_info_cmp(const void *a, const void *b)
5547
{
5548
const struct btf_sec_info *x = a;
5549
const struct btf_sec_info *y = b;
5550
5551
return (int)(x->off - y->off) ? : (int)(x->len - y->len);
5552
}
5553
5554
static int btf_check_sec_info(struct btf_verifier_env *env,
5555
u32 btf_data_size)
5556
{
5557
struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
5558
u32 total, expected_total, i;
5559
const struct btf_header *hdr;
5560
const struct btf *btf;
5561
5562
btf = env->btf;
5563
hdr = &btf->hdr;
5564
5565
/* Populate the secs from hdr */
5566
for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
5567
secs[i] = *(struct btf_sec_info *)((void *)hdr +
5568
btf_sec_info_offset[i]);
5569
5570
sort(secs, ARRAY_SIZE(btf_sec_info_offset),
5571
sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
5572
5573
/* Check for gaps and overlap among sections */
5574
total = 0;
5575
expected_total = btf_data_size - hdr->hdr_len;
5576
for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
5577
if (expected_total < secs[i].off) {
5578
btf_verifier_log(env, "Invalid section offset");
5579
return -EINVAL;
5580
}
5581
if (total < secs[i].off) {
5582
/* gap */
5583
btf_verifier_log(env, "Unsupported section found");
5584
return -EINVAL;
5585
}
5586
if (total > secs[i].off) {
5587
btf_verifier_log(env, "Section overlap found");
5588
return -EINVAL;
5589
}
5590
if (expected_total - total < secs[i].len) {
5591
btf_verifier_log(env,
5592
"Total section length too long");
5593
return -EINVAL;
5594
}
5595
total += secs[i].len;
5596
}
5597
5598
/* There is data other than hdr and known sections */
5599
if (expected_total != total) {
5600
btf_verifier_log(env, "Unsupported section found");
5601
return -EINVAL;
5602
}
5603
5604
return 0;
5605
}
5606
5607
static int btf_parse_hdr(struct btf_verifier_env *env)
5608
{
5609
u32 hdr_len, hdr_copy, btf_data_size;
5610
const struct btf_header *hdr;
5611
struct btf *btf;
5612
5613
btf = env->btf;
5614
btf_data_size = btf->data_size;
5615
5616
if (btf_data_size < offsetofend(struct btf_header, hdr_len)) {
5617
btf_verifier_log(env, "hdr_len not found");
5618
return -EINVAL;
5619
}
5620
5621
hdr = btf->data;
5622
hdr_len = hdr->hdr_len;
5623
if (btf_data_size < hdr_len) {
5624
btf_verifier_log(env, "btf_header not found");
5625
return -EINVAL;
5626
}
5627
5628
/* Ensure the unsupported header fields are zero */
5629
if (hdr_len > sizeof(btf->hdr)) {
5630
u8 *expected_zero = btf->data + sizeof(btf->hdr);
5631
u8 *end = btf->data + hdr_len;
5632
5633
for (; expected_zero < end; expected_zero++) {
5634
if (*expected_zero) {
5635
btf_verifier_log(env, "Unsupported btf_header");
5636
return -E2BIG;
5637
}
5638
}
5639
}
5640
5641
hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
5642
memcpy(&btf->hdr, btf->data, hdr_copy);
5643
5644
hdr = &btf->hdr;
5645
5646
btf_verifier_log_hdr(env, btf_data_size);
5647
5648
if (hdr->magic != BTF_MAGIC) {
5649
btf_verifier_log(env, "Invalid magic");
5650
return -EINVAL;
5651
}
5652
5653
if (hdr->version != BTF_VERSION) {
5654
btf_verifier_log(env, "Unsupported version");
5655
return -ENOTSUPP;
5656
}
5657
5658
if (hdr->flags) {
5659
btf_verifier_log(env, "Unsupported flags");
5660
return -ENOTSUPP;
5661
}
5662
5663
if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
5664
btf_verifier_log(env, "No data");
5665
return -EINVAL;
5666
}
5667
5668
return btf_check_sec_info(env, btf_data_size);
5669
}
5670
5671
static const char *alloc_obj_fields[] = {
5672
"bpf_spin_lock",
5673
"bpf_list_head",
5674
"bpf_list_node",
5675
"bpf_rb_root",
5676
"bpf_rb_node",
5677
"bpf_refcount",
5678
};
5679
5680
static struct btf_struct_metas *
5681
btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
5682
{
5683
struct btf_struct_metas *tab = NULL;
5684
struct btf_id_set *aof;
5685
int i, n, id, ret;
5686
5687
BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0);
5688
BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32));
5689
5690
aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN);
5691
if (!aof)
5692
return ERR_PTR(-ENOMEM);
5693
aof->cnt = 0;
5694
5695
for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) {
5696
/* Try to find whether this special type exists in user BTF, and
5697
* if so remember its ID so we can easily find it among members
5698
* of structs that we iterate in the next loop.
5699
*/
5700
struct btf_id_set *new_aof;
5701
5702
id = btf_find_by_name_kind(btf, alloc_obj_fields[i], BTF_KIND_STRUCT);
5703
if (id < 0)
5704
continue;
5705
5706
new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5707
GFP_KERNEL | __GFP_NOWARN);
5708
if (!new_aof) {
5709
ret = -ENOMEM;
5710
goto free_aof;
5711
}
5712
aof = new_aof;
5713
aof->ids[aof->cnt++] = id;
5714
}
5715
5716
n = btf_nr_types(btf);
5717
for (i = 1; i < n; i++) {
5718
/* Try to find if there are kptrs in user BTF and remember their ID */
5719
struct btf_id_set *new_aof;
5720
struct btf_field_info tmp;
5721
const struct btf_type *t;
5722
5723
t = btf_type_by_id(btf, i);
5724
if (!t) {
5725
ret = -EINVAL;
5726
goto free_aof;
5727
}
5728
5729
ret = btf_find_kptr(btf, t, 0, 0, &tmp, BPF_KPTR);
5730
if (ret != BTF_FIELD_FOUND)
5731
continue;
5732
5733
new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1),
5734
GFP_KERNEL | __GFP_NOWARN);
5735
if (!new_aof) {
5736
ret = -ENOMEM;
5737
goto free_aof;
5738
}
5739
aof = new_aof;
5740
aof->ids[aof->cnt++] = i;
5741
}
5742
5743
if (!aof->cnt) {
5744
kfree(aof);
5745
return NULL;
5746
}
5747
sort(&aof->ids, aof->cnt, sizeof(aof->ids[0]), btf_id_cmp_func, NULL);
5748
5749
for (i = 1; i < n; i++) {
5750
struct btf_struct_metas *new_tab;
5751
const struct btf_member *member;
5752
struct btf_struct_meta *type;
5753
struct btf_record *record;
5754
const struct btf_type *t;
5755
int j, tab_cnt;
5756
5757
t = btf_type_by_id(btf, i);
5758
if (!__btf_type_is_struct(t))
5759
continue;
5760
5761
cond_resched();
5762
5763
for_each_member(j, t, member) {
5764
if (btf_id_set_contains(aof, member->type))
5765
goto parse;
5766
}
5767
continue;
5768
parse:
5769
tab_cnt = tab ? tab->cnt : 0;
5770
new_tab = krealloc(tab, struct_size(new_tab, types, tab_cnt + 1),
5771
GFP_KERNEL | __GFP_NOWARN);
5772
if (!new_tab) {
5773
ret = -ENOMEM;
5774
goto free;
5775
}
5776
if (!tab)
5777
new_tab->cnt = 0;
5778
tab = new_tab;
5779
5780
type = &tab->types[tab->cnt];
5781
type->btf_id = i;
5782
record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
5783
BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT |
5784
BPF_KPTR, t->size);
5785
/* The record cannot be unset, treat it as an error if so */
5786
if (IS_ERR_OR_NULL(record)) {
5787
ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
5788
goto free;
5789
}
5790
type->record = record;
5791
tab->cnt++;
5792
}
5793
kfree(aof);
5794
return tab;
5795
free:
5796
btf_struct_metas_free(tab);
5797
free_aof:
5798
kfree(aof);
5799
return ERR_PTR(ret);
5800
}
5801
5802
struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
5803
{
5804
struct btf_struct_metas *tab;
5805
5806
BUILD_BUG_ON(offsetof(struct btf_struct_meta, btf_id) != 0);
5807
tab = btf->struct_meta_tab;
5808
if (!tab)
5809
return NULL;
5810
return bsearch(&btf_id, tab->types, tab->cnt, sizeof(tab->types[0]), btf_id_cmp_func);
5811
}
5812
5813
static int btf_check_type_tags(struct btf_verifier_env *env,
5814
struct btf *btf, int start_id)
5815
{
5816
int i, n, good_id = start_id - 1;
5817
bool in_tags;
5818
5819
n = btf_nr_types(btf);
5820
for (i = start_id; i < n; i++) {
5821
const struct btf_type *t;
5822
int chain_limit = 32;
5823
u32 cur_id = i;
5824
5825
t = btf_type_by_id(btf, i);
5826
if (!t)
5827
return -EINVAL;
5828
if (!btf_type_is_modifier(t))
5829
continue;
5830
5831
cond_resched();
5832
5833
in_tags = btf_type_is_type_tag(t);
5834
while (btf_type_is_modifier(t)) {
5835
if (!chain_limit--) {
5836
btf_verifier_log(env, "Max chain length or cycle detected");
5837
return -ELOOP;
5838
}
5839
if (btf_type_is_type_tag(t)) {
5840
if (!in_tags) {
5841
btf_verifier_log(env, "Type tags don't precede modifiers");
5842
return -EINVAL;
5843
}
5844
} else if (in_tags) {
5845
in_tags = false;
5846
}
5847
if (cur_id <= good_id)
5848
break;
5849
/* Move to next type */
5850
cur_id = t->type;
5851
t = btf_type_by_id(btf, cur_id);
5852
if (!t)
5853
return -EINVAL;
5854
}
5855
good_id = i;
5856
}
5857
return 0;
5858
}
5859
5860
static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size)
5861
{
5862
u32 log_true_size;
5863
int err;
5864
5865
err = bpf_vlog_finalize(log, &log_true_size);
5866
5867
if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) &&
5868
copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, btf_log_true_size),
5869
&log_true_size, sizeof(log_true_size)))
5870
err = -EFAULT;
5871
5872
return err;
5873
}
5874
5875
static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
5876
{
5877
bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel);
5878
char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf);
5879
struct btf_struct_metas *struct_meta_tab;
5880
struct btf_verifier_env *env = NULL;
5881
struct btf *btf = NULL;
5882
u8 *data;
5883
int err, ret;
5884
5885
if (attr->btf_size > BTF_MAX_SIZE)
5886
return ERR_PTR(-E2BIG);
5887
5888
env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
5889
if (!env)
5890
return ERR_PTR(-ENOMEM);
5891
5892
/* user could have requested verbose verifier output
5893
* and supplied buffer to store the verification trace
5894
*/
5895
err = bpf_vlog_init(&env->log, attr->btf_log_level,
5896
log_ubuf, attr->btf_log_size);
5897
if (err)
5898
goto errout_free;
5899
5900
btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
5901
if (!btf) {
5902
err = -ENOMEM;
5903
goto errout;
5904
}
5905
env->btf = btf;
5906
btf->named_start_id = 0;
5907
5908
data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN);
5909
if (!data) {
5910
err = -ENOMEM;
5911
goto errout;
5912
}
5913
5914
btf->data = data;
5915
btf->data_size = attr->btf_size;
5916
5917
if (copy_from_bpfptr(data, btf_data, attr->btf_size)) {
5918
err = -EFAULT;
5919
goto errout;
5920
}
5921
5922
err = btf_parse_hdr(env);
5923
if (err)
5924
goto errout;
5925
5926
btf->nohdr_data = btf->data + btf->hdr.hdr_len;
5927
5928
err = btf_parse_str_sec(env);
5929
if (err)
5930
goto errout;
5931
5932
err = btf_parse_type_sec(env);
5933
if (err)
5934
goto errout;
5935
5936
err = btf_check_type_tags(env, btf, 1);
5937
if (err)
5938
goto errout;
5939
5940
struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
5941
if (IS_ERR(struct_meta_tab)) {
5942
err = PTR_ERR(struct_meta_tab);
5943
goto errout;
5944
}
5945
btf->struct_meta_tab = struct_meta_tab;
5946
5947
if (struct_meta_tab) {
5948
int i;
5949
5950
for (i = 0; i < struct_meta_tab->cnt; i++) {
5951
err = btf_check_and_fixup_fields(btf, struct_meta_tab->types[i].record);
5952
if (err < 0)
5953
goto errout_meta;
5954
}
5955
}
5956
5957
err = finalize_log(&env->log, uattr, uattr_size);
5958
if (err)
5959
goto errout_free;
5960
5961
btf_verifier_env_free(env);
5962
refcount_set(&btf->refcnt, 1);
5963
return btf;
5964
5965
errout_meta:
5966
btf_free_struct_meta_tab(btf);
5967
errout:
5968
/* overwrite err with -ENOSPC or -EFAULT */
5969
ret = finalize_log(&env->log, uattr, uattr_size);
5970
if (ret)
5971
err = ret;
5972
errout_free:
5973
btf_verifier_env_free(env);
5974
if (btf)
5975
btf_free(btf);
5976
return ERR_PTR(err);
5977
}
5978
5979
extern char __start_BTF[];
5980
extern char __stop_BTF[];
5981
extern struct btf *btf_vmlinux;
5982
5983
#define BPF_MAP_TYPE(_id, _ops)
5984
#define BPF_LINK_TYPE(_id, _name)
5985
static union {
5986
struct bpf_ctx_convert {
5987
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5988
prog_ctx_type _id##_prog; \
5989
kern_ctx_type _id##_kern;
5990
#include <linux/bpf_types.h>
5991
#undef BPF_PROG_TYPE
5992
} *__t;
5993
/* 't' is written once under lock. Read many times. */
5994
const struct btf_type *t;
5995
} bpf_ctx_convert;
5996
enum {
5997
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5998
__ctx_convert##_id,
5999
#include <linux/bpf_types.h>
6000
#undef BPF_PROG_TYPE
6001
__ctx_convert_unused, /* to avoid empty enum in extreme .config */
6002
};
6003
static u8 bpf_ctx_convert_map[] = {
6004
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
6005
[_id] = __ctx_convert##_id,
6006
#include <linux/bpf_types.h>
6007
#undef BPF_PROG_TYPE
6008
0, /* avoid empty array */
6009
};
6010
#undef BPF_MAP_TYPE
6011
#undef BPF_LINK_TYPE
6012
6013
static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)
6014
{
6015
const struct btf_type *conv_struct;
6016
const struct btf_member *ctx_type;
6017
6018
conv_struct = bpf_ctx_convert.t;
6019
if (!conv_struct)
6020
return NULL;
6021
/* prog_type is valid bpf program type. No need for bounds check. */
6022
ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
6023
/* ctx_type is a pointer to prog_ctx_type in vmlinux.
6024
* Like 'struct __sk_buff'
6025
*/
6026
return btf_type_by_id(btf_vmlinux, ctx_type->type);
6027
}
6028
6029
static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
6030
{
6031
const struct btf_type *conv_struct;
6032
const struct btf_member *ctx_type;
6033
6034
conv_struct = bpf_ctx_convert.t;
6035
if (!conv_struct)
6036
return -EFAULT;
6037
/* prog_type is valid bpf program type. No need for bounds check. */
6038
ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
6039
/* ctx_type is a pointer to prog_ctx_type in vmlinux.
6040
* Like 'struct sk_buff'
6041
*/
6042
return ctx_type->type;
6043
}
6044
6045
bool btf_is_projection_of(const char *pname, const char *tname)
6046
{
6047
if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
6048
return true;
6049
if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
6050
return true;
6051
return false;
6052
}
6053
6054
bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
6055
const struct btf_type *t, enum bpf_prog_type prog_type,
6056
int arg)
6057
{
6058
const struct btf_type *ctx_type;
6059
const char *tname, *ctx_tname;
6060
6061
t = btf_type_by_id(btf, t->type);
6062
6063
/* KPROBE programs allow bpf_user_pt_regs_t typedef, which we need to
6064
* check before we skip all the typedef below.
6065
*/
6066
if (prog_type == BPF_PROG_TYPE_KPROBE) {
6067
while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
6068
t = btf_type_by_id(btf, t->type);
6069
6070
if (btf_type_is_typedef(t)) {
6071
tname = btf_name_by_offset(btf, t->name_off);
6072
if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
6073
return true;
6074
}
6075
}
6076
6077
while (btf_type_is_modifier(t))
6078
t = btf_type_by_id(btf, t->type);
6079
if (!btf_type_is_struct(t)) {
6080
/* Only pointer to struct is supported for now.
6081
* That means that BPF_PROG_TYPE_TRACEPOINT with BTF
6082
* is not supported yet.
6083
* BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
6084
*/
6085
return false;
6086
}
6087
tname = btf_name_by_offset(btf, t->name_off);
6088
if (!tname) {
6089
bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
6090
return false;
6091
}
6092
6093
ctx_type = find_canonical_prog_ctx_type(prog_type);
6094
if (!ctx_type) {
6095
bpf_log(log, "btf_vmlinux is malformed\n");
6096
/* should not happen */
6097
return false;
6098
}
6099
again:
6100
ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
6101
if (!ctx_tname) {
6102
/* should not happen */
6103
bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
6104
return false;
6105
}
6106
/* program types without named context types work only with arg:ctx tag */
6107
if (ctx_tname[0] == '\0')
6108
return false;
6109
/* only compare that prog's ctx type name is the same as
6110
* kernel expects. No need to compare field by field.
6111
* It's ok for bpf prog to do:
6112
* struct __sk_buff {};
6113
* int socket_filter_bpf_prog(struct __sk_buff *skb)
6114
* { // no fields of skb are ever used }
6115
*/
6116
if (btf_is_projection_of(ctx_tname, tname))
6117
return true;
6118
if (strcmp(ctx_tname, tname)) {
6119
/* bpf_user_pt_regs_t is a typedef, so resolve it to
6120
* underlying struct and check name again
6121
*/
6122
if (!btf_type_is_modifier(ctx_type))
6123
return false;
6124
while (btf_type_is_modifier(ctx_type))
6125
ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6126
goto again;
6127
}
6128
return true;
6129
}
6130
6131
/* forward declarations for arch-specific underlying types of
6132
* bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef
6133
* compilation guards below for BPF_PROG_TYPE_PERF_EVENT checks, but still
6134
* works correctly with __builtin_types_compatible_p() on respective
6135
* architectures
6136
*/
6137
struct user_regs_struct;
6138
struct user_pt_regs;
6139
6140
static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
6141
const struct btf_type *t, int arg,
6142
enum bpf_prog_type prog_type,
6143
enum bpf_attach_type attach_type)
6144
{
6145
const struct btf_type *ctx_type;
6146
const char *tname, *ctx_tname;
6147
6148
if (!btf_is_ptr(t)) {
6149
bpf_log(log, "arg#%d type isn't a pointer\n", arg);
6150
return -EINVAL;
6151
}
6152
t = btf_type_by_id(btf, t->type);
6153
6154
/* KPROBE and PERF_EVENT programs allow bpf_user_pt_regs_t typedef */
6155
if (prog_type == BPF_PROG_TYPE_KPROBE || prog_type == BPF_PROG_TYPE_PERF_EVENT) {
6156
while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
6157
t = btf_type_by_id(btf, t->type);
6158
6159
if (btf_type_is_typedef(t)) {
6160
tname = btf_name_by_offset(btf, t->name_off);
6161
if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
6162
return 0;
6163
}
6164
}
6165
6166
/* all other program types don't use typedefs for context type */
6167
while (btf_type_is_modifier(t))
6168
t = btf_type_by_id(btf, t->type);
6169
6170
/* `void *ctx __arg_ctx` is always valid */
6171
if (btf_type_is_void(t))
6172
return 0;
6173
6174
tname = btf_name_by_offset(btf, t->name_off);
6175
if (str_is_empty(tname)) {
6176
bpf_log(log, "arg#%d type doesn't have a name\n", arg);
6177
return -EINVAL;
6178
}
6179
6180
/* special cases */
6181
switch (prog_type) {
6182
case BPF_PROG_TYPE_KPROBE:
6183
if (__btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6184
return 0;
6185
break;
6186
case BPF_PROG_TYPE_PERF_EVENT:
6187
if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6188
__btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6189
return 0;
6190
if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6191
__btf_type_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
6192
return 0;
6193
if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6194
__btf_type_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
6195
return 0;
6196
break;
6197
case BPF_PROG_TYPE_RAW_TRACEPOINT:
6198
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6199
/* allow u64* as ctx */
6200
if (btf_is_int(t) && t->size == 8)
6201
return 0;
6202
break;
6203
case BPF_PROG_TYPE_TRACING:
6204
switch (attach_type) {
6205
case BPF_TRACE_RAW_TP:
6206
/* tp_btf program is TRACING, so need special case here */
6207
if (__btf_type_is_struct(t) &&
6208
strcmp(tname, "bpf_raw_tracepoint_args") == 0)
6209
return 0;
6210
/* allow u64* as ctx */
6211
if (btf_is_int(t) && t->size == 8)
6212
return 0;
6213
break;
6214
case BPF_TRACE_ITER:
6215
/* allow struct bpf_iter__xxx types only */
6216
if (__btf_type_is_struct(t) &&
6217
strncmp(tname, "bpf_iter__", sizeof("bpf_iter__") - 1) == 0)
6218
return 0;
6219
break;
6220
case BPF_TRACE_FENTRY:
6221
case BPF_TRACE_FEXIT:
6222
case BPF_MODIFY_RETURN:
6223
case BPF_TRACE_FSESSION:
6224
/* allow u64* as ctx */
6225
if (btf_is_int(t) && t->size == 8)
6226
return 0;
6227
break;
6228
default:
6229
break;
6230
}
6231
break;
6232
case BPF_PROG_TYPE_LSM:
6233
case BPF_PROG_TYPE_STRUCT_OPS:
6234
/* allow u64* as ctx */
6235
if (btf_is_int(t) && t->size == 8)
6236
return 0;
6237
break;
6238
case BPF_PROG_TYPE_TRACEPOINT:
6239
case BPF_PROG_TYPE_SYSCALL:
6240
case BPF_PROG_TYPE_EXT:
6241
return 0; /* anything goes */
6242
default:
6243
break;
6244
}
6245
6246
ctx_type = find_canonical_prog_ctx_type(prog_type);
6247
if (!ctx_type) {
6248
/* should not happen */
6249
bpf_log(log, "btf_vmlinux is malformed\n");
6250
return -EINVAL;
6251
}
6252
6253
/* resolve typedefs and check that underlying structs are matching as well */
6254
while (btf_type_is_modifier(ctx_type))
6255
ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
6256
6257
/* if program type doesn't have distinctly named struct type for
6258
* context, then __arg_ctx argument can only be `void *`, which we
6259
* already checked above
6260
*/
6261
if (!__btf_type_is_struct(ctx_type)) {
6262
bpf_log(log, "arg#%d should be void pointer\n", arg);
6263
return -EINVAL;
6264
}
6265
6266
ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
6267
if (!__btf_type_is_struct(t) || strcmp(ctx_tname, tname) != 0) {
6268
bpf_log(log, "arg#%d should be `struct %s *`\n", arg, ctx_tname);
6269
return -EINVAL;
6270
}
6271
6272
return 0;
6273
}
6274
6275
static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
6276
struct btf *btf,
6277
const struct btf_type *t,
6278
enum bpf_prog_type prog_type,
6279
int arg)
6280
{
6281
if (!btf_is_prog_ctx_type(log, btf, t, prog_type, arg))
6282
return -ENOENT;
6283
return find_kern_ctx_type_id(prog_type);
6284
}
6285
6286
int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type)
6287
{
6288
const struct btf_member *kctx_member;
6289
const struct btf_type *conv_struct;
6290
const struct btf_type *kctx_type;
6291
u32 kctx_type_id;
6292
6293
conv_struct = bpf_ctx_convert.t;
6294
/* get member for kernel ctx type */
6295
kctx_member = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
6296
kctx_type_id = kctx_member->type;
6297
kctx_type = btf_type_by_id(btf_vmlinux, kctx_type_id);
6298
if (!btf_type_is_struct(kctx_type)) {
6299
bpf_log(log, "kern ctx type id %u is not a struct\n", kctx_type_id);
6300
return -EINVAL;
6301
}
6302
6303
return kctx_type_id;
6304
}
6305
6306
BTF_ID_LIST_SINGLE(bpf_ctx_convert_btf_id, struct, bpf_ctx_convert)
6307
6308
static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name,
6309
void *data, unsigned int data_size)
6310
{
6311
struct btf *btf = NULL;
6312
int err;
6313
6314
if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF))
6315
return ERR_PTR(-ENOENT);
6316
6317
btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6318
if (!btf) {
6319
err = -ENOMEM;
6320
goto errout;
6321
}
6322
env->btf = btf;
6323
6324
btf->data = data;
6325
btf->data_size = data_size;
6326
btf->kernel_btf = true;
6327
btf->named_start_id = 0;
6328
strscpy(btf->name, name);
6329
6330
err = btf_parse_hdr(env);
6331
if (err)
6332
goto errout;
6333
6334
btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6335
6336
err = btf_parse_str_sec(env);
6337
if (err)
6338
goto errout;
6339
6340
err = btf_check_all_metas(env);
6341
if (err)
6342
goto errout;
6343
6344
err = btf_check_type_tags(env, btf, 1);
6345
if (err)
6346
goto errout;
6347
6348
btf_check_sorted(btf);
6349
refcount_set(&btf->refcnt, 1);
6350
6351
return btf;
6352
6353
errout:
6354
if (btf) {
6355
kvfree(btf->types);
6356
kfree(btf);
6357
}
6358
return ERR_PTR(err);
6359
}
6360
6361
struct btf *btf_parse_vmlinux(void)
6362
{
6363
struct btf_verifier_env *env = NULL;
6364
struct bpf_verifier_log *log;
6365
struct btf *btf;
6366
int err;
6367
6368
env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6369
if (!env)
6370
return ERR_PTR(-ENOMEM);
6371
6372
log = &env->log;
6373
log->level = BPF_LOG_KERNEL;
6374
btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF);
6375
if (IS_ERR(btf))
6376
goto err_out;
6377
6378
/* btf_parse_vmlinux() runs under bpf_verifier_lock */
6379
bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
6380
err = btf_alloc_id(btf);
6381
if (err) {
6382
btf_free(btf);
6383
btf = ERR_PTR(err);
6384
}
6385
err_out:
6386
btf_verifier_env_free(env);
6387
return btf;
6388
}
6389
6390
/* If .BTF_ids section was created with distilled base BTF, both base and
6391
* split BTF ids will need to be mapped to actual base/split ids for
6392
* BTF now that it has been relocated.
6393
*/
6394
static __u32 btf_relocate_id(const struct btf *btf, __u32 id)
6395
{
6396
if (!btf->base_btf || !btf->base_id_map)
6397
return id;
6398
return btf->base_id_map[id];
6399
}
6400
6401
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
6402
6403
static struct btf *btf_parse_module(const char *module_name, const void *data,
6404
unsigned int data_size, void *base_data,
6405
unsigned int base_data_size)
6406
{
6407
struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL;
6408
struct btf_verifier_env *env = NULL;
6409
struct bpf_verifier_log *log;
6410
int err = 0;
6411
6412
vmlinux_btf = bpf_get_btf_vmlinux();
6413
if (IS_ERR(vmlinux_btf))
6414
return vmlinux_btf;
6415
if (!vmlinux_btf)
6416
return ERR_PTR(-EINVAL);
6417
6418
env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
6419
if (!env)
6420
return ERR_PTR(-ENOMEM);
6421
6422
log = &env->log;
6423
log->level = BPF_LOG_KERNEL;
6424
6425
if (base_data) {
6426
base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size);
6427
if (IS_ERR(base_btf)) {
6428
err = PTR_ERR(base_btf);
6429
goto errout;
6430
}
6431
} else {
6432
base_btf = vmlinux_btf;
6433
}
6434
6435
btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
6436
if (!btf) {
6437
err = -ENOMEM;
6438
goto errout;
6439
}
6440
env->btf = btf;
6441
6442
btf->base_btf = base_btf;
6443
btf->start_id = base_btf->nr_types;
6444
btf->start_str_off = base_btf->hdr.str_len;
6445
btf->kernel_btf = true;
6446
btf->named_start_id = 0;
6447
strscpy(btf->name, module_name);
6448
6449
btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN);
6450
if (!btf->data) {
6451
err = -ENOMEM;
6452
goto errout;
6453
}
6454
btf->data_size = data_size;
6455
6456
err = btf_parse_hdr(env);
6457
if (err)
6458
goto errout;
6459
6460
btf->nohdr_data = btf->data + btf->hdr.hdr_len;
6461
6462
err = btf_parse_str_sec(env);
6463
if (err)
6464
goto errout;
6465
6466
err = btf_check_all_metas(env);
6467
if (err)
6468
goto errout;
6469
6470
err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
6471
if (err)
6472
goto errout;
6473
6474
if (base_btf != vmlinux_btf) {
6475
err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map);
6476
if (err)
6477
goto errout;
6478
btf_free(base_btf);
6479
base_btf = vmlinux_btf;
6480
}
6481
6482
btf_verifier_env_free(env);
6483
btf_check_sorted(btf);
6484
refcount_set(&btf->refcnt, 1);
6485
return btf;
6486
6487
errout:
6488
btf_verifier_env_free(env);
6489
if (!IS_ERR(base_btf) && base_btf != vmlinux_btf)
6490
btf_free(base_btf);
6491
if (btf) {
6492
kvfree(btf->data);
6493
kvfree(btf->types);
6494
kfree(btf);
6495
}
6496
return ERR_PTR(err);
6497
}
6498
6499
#endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
6500
6501
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
6502
{
6503
struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6504
6505
if (tgt_prog)
6506
return tgt_prog->aux->btf;
6507
else
6508
return prog->aux->attach_btf;
6509
}
6510
6511
static bool is_void_or_int_ptr(struct btf *btf, const struct btf_type *t)
6512
{
6513
/* skip modifiers */
6514
t = btf_type_skip_modifiers(btf, t->type, NULL);
6515
return btf_type_is_void(t) || btf_type_is_int(t);
6516
}
6517
6518
u32 btf_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto,
6519
int off)
6520
{
6521
const struct btf_param *args;
6522
const struct btf_type *t;
6523
u32 offset = 0, nr_args;
6524
int i;
6525
6526
if (!func_proto)
6527
return off / 8;
6528
6529
nr_args = btf_type_vlen(func_proto);
6530
args = (const struct btf_param *)(func_proto + 1);
6531
for (i = 0; i < nr_args; i++) {
6532
t = btf_type_skip_modifiers(btf, args[i].type, NULL);
6533
offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6534
if (off < offset)
6535
return i;
6536
}
6537
6538
t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
6539
offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
6540
if (off < offset)
6541
return nr_args;
6542
6543
return nr_args + 1;
6544
}
6545
6546
static bool prog_args_trusted(const struct bpf_prog *prog)
6547
{
6548
enum bpf_attach_type atype = prog->expected_attach_type;
6549
6550
switch (prog->type) {
6551
case BPF_PROG_TYPE_TRACING:
6552
return atype == BPF_TRACE_RAW_TP || atype == BPF_TRACE_ITER;
6553
case BPF_PROG_TYPE_LSM:
6554
return bpf_lsm_is_trusted(prog);
6555
case BPF_PROG_TYPE_STRUCT_OPS:
6556
return true;
6557
default:
6558
return false;
6559
}
6560
}
6561
6562
int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
6563
u32 arg_no)
6564
{
6565
const struct btf_param *args;
6566
const struct btf_type *t;
6567
int off = 0, i;
6568
u32 sz;
6569
6570
args = btf_params(func_proto);
6571
for (i = 0; i < arg_no; i++) {
6572
t = btf_type_by_id(btf, args[i].type);
6573
t = btf_resolve_size(btf, t, &sz);
6574
if (IS_ERR(t))
6575
return PTR_ERR(t);
6576
off += roundup(sz, 8);
6577
}
6578
6579
return off;
6580
}
6581
6582
struct bpf_raw_tp_null_args {
6583
const char *func;
6584
u64 mask;
6585
};
6586
6587
static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
6588
/* sched */
6589
{ "sched_pi_setprio", 0x10 },
6590
/* ... from sched_numa_pair_template event class */
6591
{ "sched_stick_numa", 0x100 },
6592
{ "sched_swap_numa", 0x100 },
6593
/* afs */
6594
{ "afs_make_fs_call", 0x10 },
6595
{ "afs_make_fs_calli", 0x10 },
6596
{ "afs_make_fs_call1", 0x10 },
6597
{ "afs_make_fs_call2", 0x10 },
6598
{ "afs_protocol_error", 0x1 },
6599
{ "afs_flock_ev", 0x10 },
6600
/* cachefiles */
6601
{ "cachefiles_lookup", 0x1 | 0x200 },
6602
{ "cachefiles_unlink", 0x1 },
6603
{ "cachefiles_rename", 0x1 },
6604
{ "cachefiles_prep_read", 0x1 },
6605
{ "cachefiles_mark_active", 0x1 },
6606
{ "cachefiles_mark_failed", 0x1 },
6607
{ "cachefiles_mark_inactive", 0x1 },
6608
{ "cachefiles_vfs_error", 0x1 },
6609
{ "cachefiles_io_error", 0x1 },
6610
{ "cachefiles_ondemand_open", 0x1 },
6611
{ "cachefiles_ondemand_copen", 0x1 },
6612
{ "cachefiles_ondemand_close", 0x1 },
6613
{ "cachefiles_ondemand_read", 0x1 },
6614
{ "cachefiles_ondemand_cread", 0x1 },
6615
{ "cachefiles_ondemand_fd_write", 0x1 },
6616
{ "cachefiles_ondemand_fd_release", 0x1 },
6617
/* ext4, from ext4__mballoc event class */
6618
{ "ext4_mballoc_discard", 0x10 },
6619
{ "ext4_mballoc_free", 0x10 },
6620
/* fib */
6621
{ "fib_table_lookup", 0x100 },
6622
/* filelock */
6623
/* ... from filelock_lock event class */
6624
{ "posix_lock_inode", 0x10 },
6625
{ "fcntl_setlk", 0x10 },
6626
{ "locks_remove_posix", 0x10 },
6627
{ "flock_lock_inode", 0x10 },
6628
/* ... from filelock_lease event class */
6629
{ "break_lease_noblock", 0x10 },
6630
{ "break_lease_block", 0x10 },
6631
{ "break_lease_unblock", 0x10 },
6632
{ "generic_delete_lease", 0x10 },
6633
{ "time_out_leases", 0x10 },
6634
/* host1x */
6635
{ "host1x_cdma_push_gather", 0x10000 },
6636
/* huge_memory */
6637
{ "mm_khugepaged_scan_pmd", 0x10 },
6638
{ "mm_collapse_huge_page_isolate", 0x1 },
6639
{ "mm_khugepaged_scan_file", 0x10 },
6640
{ "mm_khugepaged_collapse_file", 0x10 },
6641
/* kmem */
6642
{ "mm_page_alloc", 0x1 },
6643
{ "mm_page_pcpu_drain", 0x1 },
6644
/* .. from mm_page event class */
6645
{ "mm_page_alloc_zone_locked", 0x1 },
6646
/* netfs */
6647
{ "netfs_failure", 0x10 },
6648
/* power */
6649
{ "device_pm_callback_start", 0x10 },
6650
/* qdisc */
6651
{ "qdisc_dequeue", 0x1000 },
6652
/* rxrpc */
6653
{ "rxrpc_recvdata", 0x1 },
6654
{ "rxrpc_resend", 0x10 },
6655
{ "rxrpc_tq", 0x10 },
6656
{ "rxrpc_client", 0x1 },
6657
/* skb */
6658
{"kfree_skb", 0x1000},
6659
/* sunrpc */
6660
{ "xs_stream_read_data", 0x1 },
6661
/* ... from xprt_cong_event event class */
6662
{ "xprt_reserve_cong", 0x10 },
6663
{ "xprt_release_cong", 0x10 },
6664
{ "xprt_get_cong", 0x10 },
6665
{ "xprt_put_cong", 0x10 },
6666
/* tcp */
6667
{ "tcp_send_reset", 0x11 },
6668
{ "tcp_sendmsg_locked", 0x100 },
6669
/* tegra_apb_dma */
6670
{ "tegra_dma_tx_status", 0x100 },
6671
/* timer_migration */
6672
{ "tmigr_update_events", 0x1 },
6673
/* writeback, from writeback_folio_template event class */
6674
{ "writeback_dirty_folio", 0x10 },
6675
{ "folio_wait_writeback", 0x10 },
6676
/* rdma */
6677
{ "mr_integ_alloc", 0x2000 },
6678
/* bpf_testmod */
6679
{ "bpf_testmod_test_read", 0x0 },
6680
/* amdgpu */
6681
{ "amdgpu_vm_bo_map", 0x1 },
6682
{ "amdgpu_vm_bo_unmap", 0x1 },
6683
/* netfs */
6684
{ "netfs_folioq", 0x1 },
6685
/* xfs from xfs_defer_pending_class */
6686
{ "xfs_defer_create_intent", 0x1 },
6687
{ "xfs_defer_cancel_list", 0x1 },
6688
{ "xfs_defer_pending_finish", 0x1 },
6689
{ "xfs_defer_pending_abort", 0x1 },
6690
{ "xfs_defer_relog_intent", 0x1 },
6691
{ "xfs_defer_isolate_paused", 0x1 },
6692
{ "xfs_defer_item_pause", 0x1 },
6693
{ "xfs_defer_item_unpause", 0x1 },
6694
/* xfs from xfs_defer_pending_item_class */
6695
{ "xfs_defer_add_item", 0x1 },
6696
{ "xfs_defer_cancel_item", 0x1 },
6697
{ "xfs_defer_finish_item", 0x1 },
6698
/* xfs from xfs_icwalk_class */
6699
{ "xfs_ioc_free_eofblocks", 0x10 },
6700
{ "xfs_blockgc_free_space", 0x10 },
6701
/* xfs from xfs_btree_cur_class */
6702
{ "xfs_btree_updkeys", 0x100 },
6703
{ "xfs_btree_overlapped_query_range", 0x100 },
6704
/* xfs from xfs_imap_class*/
6705
{ "xfs_map_blocks_found", 0x10000 },
6706
{ "xfs_map_blocks_alloc", 0x10000 },
6707
{ "xfs_iomap_alloc", 0x1000 },
6708
{ "xfs_iomap_found", 0x1000 },
6709
/* xfs from xfs_fs_class */
6710
{ "xfs_inodegc_flush", 0x1 },
6711
{ "xfs_inodegc_push", 0x1 },
6712
{ "xfs_inodegc_start", 0x1 },
6713
{ "xfs_inodegc_stop", 0x1 },
6714
{ "xfs_inodegc_queue", 0x1 },
6715
{ "xfs_inodegc_throttle", 0x1 },
6716
{ "xfs_fs_sync_fs", 0x1 },
6717
{ "xfs_blockgc_start", 0x1 },
6718
{ "xfs_blockgc_stop", 0x1 },
6719
{ "xfs_blockgc_worker", 0x1 },
6720
{ "xfs_blockgc_flush_all", 0x1 },
6721
/* xfs_scrub */
6722
{ "xchk_nlinks_live_update", 0x10 },
6723
/* xfs_scrub from xchk_metapath_class */
6724
{ "xchk_metapath_lookup", 0x100 },
6725
/* nfsd */
6726
{ "nfsd_dirent", 0x1 },
6727
{ "nfsd_file_acquire", 0x1001 },
6728
{ "nfsd_file_insert_err", 0x1 },
6729
{ "nfsd_file_cons_err", 0x1 },
6730
/* nfs4 */
6731
{ "nfs4_setup_sequence", 0x1 },
6732
{ "pnfs_update_layout", 0x10000 },
6733
{ "nfs4_inode_callback_event", 0x200 },
6734
{ "nfs4_inode_stateid_callback_event", 0x200 },
6735
/* nfs from pnfs_layout_event */
6736
{ "pnfs_mds_fallback_pg_init_read", 0x10000 },
6737
{ "pnfs_mds_fallback_pg_init_write", 0x10000 },
6738
{ "pnfs_mds_fallback_pg_get_mirror_count", 0x10000 },
6739
{ "pnfs_mds_fallback_read_done", 0x10000 },
6740
{ "pnfs_mds_fallback_write_done", 0x10000 },
6741
{ "pnfs_mds_fallback_read_pagelist", 0x10000 },
6742
{ "pnfs_mds_fallback_write_pagelist", 0x10000 },
6743
/* coda */
6744
{ "coda_dec_pic_run", 0x10 },
6745
{ "coda_dec_pic_done", 0x10 },
6746
/* cfg80211 */
6747
{ "cfg80211_scan_done", 0x11 },
6748
{ "rdev_set_coalesce", 0x10 },
6749
{ "cfg80211_report_wowlan_wakeup", 0x100 },
6750
{ "cfg80211_inform_bss_frame", 0x100 },
6751
{ "cfg80211_michael_mic_failure", 0x10000 },
6752
/* cfg80211 from wiphy_work_event */
6753
{ "wiphy_work_queue", 0x10 },
6754
{ "wiphy_work_run", 0x10 },
6755
{ "wiphy_work_cancel", 0x10 },
6756
{ "wiphy_work_flush", 0x10 },
6757
/* hugetlbfs */
6758
{ "hugetlbfs_alloc_inode", 0x10 },
6759
/* spufs */
6760
{ "spufs_context", 0x10 },
6761
/* kvm_hv */
6762
{ "kvm_page_fault_enter", 0x100 },
6763
/* dpu */
6764
{ "dpu_crtc_setup_mixer", 0x100 },
6765
/* binder */
6766
{ "binder_transaction", 0x100 },
6767
/* bcachefs */
6768
{ "btree_path_free", 0x100 },
6769
/* hfi1_tx */
6770
{ "hfi1_sdma_progress", 0x1000 },
6771
/* iptfs */
6772
{ "iptfs_ingress_postq_event", 0x1000 },
6773
/* neigh */
6774
{ "neigh_update", 0x10 },
6775
/* snd_firewire_lib */
6776
{ "amdtp_packet", 0x100 },
6777
};
6778
6779
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
6780
const struct bpf_prog *prog,
6781
struct bpf_insn_access_aux *info)
6782
{
6783
const struct btf_type *t = prog->aux->attach_func_proto;
6784
struct bpf_prog *tgt_prog = prog->aux->dst_prog;
6785
struct btf *btf = bpf_prog_get_target_btf(prog);
6786
const char *tname = prog->aux->attach_func_name;
6787
struct bpf_verifier_log *log = info->log;
6788
const struct btf_param *args;
6789
bool ptr_err_raw_tp = false;
6790
const char *tag_value;
6791
u32 nr_args, arg;
6792
int i, ret;
6793
6794
if (off % 8) {
6795
bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
6796
tname, off);
6797
return false;
6798
}
6799
arg = btf_ctx_arg_idx(btf, t, off);
6800
args = (const struct btf_param *)(t + 1);
6801
/* if (t == NULL) Fall back to default BPF prog with
6802
* MAX_BPF_FUNC_REG_ARGS u64 arguments.
6803
*/
6804
nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS;
6805
if (prog->aux->attach_btf_trace) {
6806
/* skip first 'void *__data' argument in btf_trace_##name typedef */
6807
args++;
6808
nr_args--;
6809
}
6810
6811
if (arg > nr_args) {
6812
bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6813
tname, arg + 1);
6814
return false;
6815
}
6816
6817
if (arg == nr_args) {
6818
switch (prog->expected_attach_type) {
6819
case BPF_LSM_MAC:
6820
/* mark we are accessing the return value */
6821
info->is_retval = true;
6822
fallthrough;
6823
case BPF_LSM_CGROUP:
6824
case BPF_TRACE_FEXIT:
6825
case BPF_TRACE_FSESSION:
6826
/* When LSM programs are attached to void LSM hooks
6827
* they use FEXIT trampolines and when attached to
6828
* int LSM hooks, they use MODIFY_RETURN trampolines.
6829
*
6830
* While the LSM programs are BPF_MODIFY_RETURN-like
6831
* the check:
6832
*
6833
* if (ret_type != 'int')
6834
* return -EINVAL;
6835
*
6836
* is _not_ done here. This is still safe as LSM hooks
6837
* have only void and int return types.
6838
*/
6839
if (!t)
6840
return true;
6841
t = btf_type_by_id(btf, t->type);
6842
break;
6843
case BPF_MODIFY_RETURN:
6844
/* For now the BPF_MODIFY_RETURN can only be attached to
6845
* functions that return an int.
6846
*/
6847
if (!t)
6848
return false;
6849
6850
t = btf_type_skip_modifiers(btf, t->type, NULL);
6851
if (!btf_type_is_small_int(t)) {
6852
bpf_log(log,
6853
"ret type %s not allowed for fmod_ret\n",
6854
btf_type_str(t));
6855
return false;
6856
}
6857
break;
6858
default:
6859
bpf_log(log, "func '%s' doesn't have %d-th argument\n",
6860
tname, arg + 1);
6861
return false;
6862
}
6863
} else {
6864
if (!t)
6865
/* Default prog with MAX_BPF_FUNC_REG_ARGS args */
6866
return true;
6867
t = btf_type_by_id(btf, args[arg].type);
6868
}
6869
6870
/* skip modifiers */
6871
while (btf_type_is_modifier(t))
6872
t = btf_type_by_id(btf, t->type);
6873
if (btf_type_is_small_int(t) || btf_is_any_enum(t) || btf_type_is_struct(t))
6874
/* accessing a scalar */
6875
return true;
6876
if (!btf_type_is_ptr(t)) {
6877
bpf_log(log,
6878
"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
6879
tname, arg,
6880
__btf_name_by_offset(btf, t->name_off),
6881
btf_type_str(t));
6882
return false;
6883
}
6884
6885
if (size != sizeof(u64)) {
6886
bpf_log(log, "func '%s' size %d must be 8\n",
6887
tname, size);
6888
return false;
6889
}
6890
6891
/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
6892
for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6893
const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6894
u32 type, flag;
6895
6896
type = base_type(ctx_arg_info->reg_type);
6897
flag = type_flag(ctx_arg_info->reg_type);
6898
if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
6899
(flag & PTR_MAYBE_NULL)) {
6900
info->reg_type = ctx_arg_info->reg_type;
6901
return true;
6902
}
6903
}
6904
6905
/*
6906
* If it's a pointer to void, it's the same as scalar from the verifier
6907
* safety POV. Either way, no futher pointer walking is allowed.
6908
*/
6909
if (is_void_or_int_ptr(btf, t))
6910
return true;
6911
6912
/* this is a pointer to another type */
6913
for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
6914
const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
6915
6916
if (ctx_arg_info->offset == off) {
6917
if (!ctx_arg_info->btf_id) {
6918
bpf_log(log,"invalid btf_id for context argument offset %u\n", off);
6919
return false;
6920
}
6921
6922
info->reg_type = ctx_arg_info->reg_type;
6923
info->btf = ctx_arg_info->btf ? : btf_vmlinux;
6924
info->btf_id = ctx_arg_info->btf_id;
6925
info->ref_obj_id = ctx_arg_info->ref_obj_id;
6926
return true;
6927
}
6928
}
6929
6930
info->reg_type = PTR_TO_BTF_ID;
6931
if (prog_args_trusted(prog))
6932
info->reg_type |= PTR_TRUSTED;
6933
6934
if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
6935
info->reg_type |= PTR_MAYBE_NULL;
6936
6937
if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
6938
struct btf *btf = prog->aux->attach_btf;
6939
const struct btf_type *t;
6940
const char *tname;
6941
6942
/* BTF lookups cannot fail, return false on error */
6943
t = btf_type_by_id(btf, prog->aux->attach_btf_id);
6944
if (!t)
6945
return false;
6946
tname = btf_name_by_offset(btf, t->name_off);
6947
if (!tname)
6948
return false;
6949
/* Checked by bpf_check_attach_target */
6950
tname += sizeof("btf_trace_") - 1;
6951
for (i = 0; i < ARRAY_SIZE(raw_tp_null_args); i++) {
6952
/* Is this a func with potential NULL args? */
6953
if (strcmp(tname, raw_tp_null_args[i].func))
6954
continue;
6955
if (raw_tp_null_args[i].mask & (0x1ULL << (arg * 4)))
6956
info->reg_type |= PTR_MAYBE_NULL;
6957
/* Is the current arg IS_ERR? */
6958
if (raw_tp_null_args[i].mask & (0x2ULL << (arg * 4)))
6959
ptr_err_raw_tp = true;
6960
break;
6961
}
6962
/* If we don't know NULL-ness specification and the tracepoint
6963
* is coming from a loadable module, be conservative and mark
6964
* argument as PTR_MAYBE_NULL.
6965
*/
6966
if (i == ARRAY_SIZE(raw_tp_null_args) && btf_is_module(btf))
6967
info->reg_type |= PTR_MAYBE_NULL;
6968
}
6969
6970
if (tgt_prog) {
6971
enum bpf_prog_type tgt_type;
6972
6973
if (tgt_prog->type == BPF_PROG_TYPE_EXT)
6974
tgt_type = tgt_prog->aux->saved_dst_prog_type;
6975
else
6976
tgt_type = tgt_prog->type;
6977
6978
ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
6979
if (ret > 0) {
6980
info->btf = btf_vmlinux;
6981
info->btf_id = ret;
6982
return true;
6983
} else {
6984
return false;
6985
}
6986
}
6987
6988
info->btf = btf;
6989
info->btf_id = t->type;
6990
t = btf_type_by_id(btf, t->type);
6991
6992
if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) {
6993
tag_value = __btf_name_by_offset(btf, t->name_off);
6994
if (strcmp(tag_value, "user") == 0)
6995
info->reg_type |= MEM_USER;
6996
if (strcmp(tag_value, "percpu") == 0)
6997
info->reg_type |= MEM_PERCPU;
6998
}
6999
7000
/* skip modifiers */
7001
while (btf_type_is_modifier(t)) {
7002
info->btf_id = t->type;
7003
t = btf_type_by_id(btf, t->type);
7004
}
7005
if (!btf_type_is_struct(t)) {
7006
bpf_log(log,
7007
"func '%s' arg%d type %s is not a struct\n",
7008
tname, arg, btf_type_str(t));
7009
return false;
7010
}
7011
bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
7012
tname, arg, info->btf_id, btf_type_str(t),
7013
__btf_name_by_offset(btf, t->name_off));
7014
7015
/* Perform all checks on the validity of type for this argument, but if
7016
* we know it can be IS_ERR at runtime, scrub pointer type and mark as
7017
* scalar.
7018
*/
7019
if (ptr_err_raw_tp) {
7020
bpf_log(log, "marking pointer arg%d as scalar as it may encode error", arg);
7021
info->reg_type = SCALAR_VALUE;
7022
}
7023
return true;
7024
}
7025
EXPORT_SYMBOL_GPL(btf_ctx_access);
7026
7027
enum bpf_struct_walk_result {
7028
/* < 0 error */
7029
WALK_SCALAR = 0,
7030
WALK_PTR,
7031
WALK_PTR_UNTRUSTED,
7032
WALK_STRUCT,
7033
};
7034
7035
static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
7036
const struct btf_type *t, int off, int size,
7037
u32 *next_btf_id, enum bpf_type_flag *flag,
7038
const char **field_name)
7039
{
7040
u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
7041
const struct btf_type *mtype, *elem_type = NULL;
7042
const struct btf_member *member;
7043
const char *tname, *mname, *tag_value;
7044
u32 vlen, elem_id, mid;
7045
7046
again:
7047
if (btf_type_is_modifier(t))
7048
t = btf_type_skip_modifiers(btf, t->type, NULL);
7049
tname = __btf_name_by_offset(btf, t->name_off);
7050
if (!btf_type_is_struct(t)) {
7051
bpf_log(log, "Type '%s' is not a struct\n", tname);
7052
return -EINVAL;
7053
}
7054
7055
vlen = btf_type_vlen(t);
7056
if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED))
7057
/*
7058
* walking unions yields untrusted pointers
7059
* with exception of __bpf_md_ptr and other
7060
* unions with a single member
7061
*/
7062
*flag |= PTR_UNTRUSTED;
7063
7064
if (off + size > t->size) {
7065
/* If the last element is a variable size array, we may
7066
* need to relax the rule.
7067
*/
7068
struct btf_array *array_elem;
7069
7070
if (vlen == 0)
7071
goto error;
7072
7073
member = btf_type_member(t) + vlen - 1;
7074
mtype = btf_type_skip_modifiers(btf, member->type,
7075
NULL);
7076
if (!btf_type_is_array(mtype))
7077
goto error;
7078
7079
array_elem = (struct btf_array *)(mtype + 1);
7080
if (array_elem->nelems != 0)
7081
goto error;
7082
7083
moff = __btf_member_bit_offset(t, member) / 8;
7084
if (off < moff)
7085
goto error;
7086
7087
/* allow structure and integer */
7088
t = btf_type_skip_modifiers(btf, array_elem->type,
7089
NULL);
7090
7091
if (btf_type_is_int(t))
7092
return WALK_SCALAR;
7093
7094
if (!btf_type_is_struct(t))
7095
goto error;
7096
7097
off = (off - moff) % t->size;
7098
goto again;
7099
7100
error:
7101
bpf_log(log, "access beyond struct %s at off %u size %u\n",
7102
tname, off, size);
7103
return -EACCES;
7104
}
7105
7106
for_each_member(i, t, member) {
7107
/* offset of the field in bytes */
7108
moff = __btf_member_bit_offset(t, member) / 8;
7109
if (off + size <= moff)
7110
/* won't find anything, field is already too far */
7111
break;
7112
7113
if (__btf_member_bitfield_size(t, member)) {
7114
u32 end_bit = __btf_member_bit_offset(t, member) +
7115
__btf_member_bitfield_size(t, member);
7116
7117
/* off <= moff instead of off == moff because clang
7118
* does not generate a BTF member for anonymous
7119
* bitfield like the ":16" here:
7120
* struct {
7121
* int :16;
7122
* int x:8;
7123
* };
7124
*/
7125
if (off <= moff &&
7126
BITS_ROUNDUP_BYTES(end_bit) <= off + size)
7127
return WALK_SCALAR;
7128
7129
/* off may be accessing a following member
7130
*
7131
* or
7132
*
7133
* Doing partial access at either end of this
7134
* bitfield. Continue on this case also to
7135
* treat it as not accessing this bitfield
7136
* and eventually error out as field not
7137
* found to keep it simple.
7138
* It could be relaxed if there was a legit
7139
* partial access case later.
7140
*/
7141
continue;
7142
}
7143
7144
/* In case of "off" is pointing to holes of a struct */
7145
if (off < moff)
7146
break;
7147
7148
/* type of the field */
7149
mid = member->type;
7150
mtype = btf_type_by_id(btf, member->type);
7151
mname = __btf_name_by_offset(btf, member->name_off);
7152
7153
mtype = __btf_resolve_size(btf, mtype, &msize,
7154
&elem_type, &elem_id, &total_nelems,
7155
&mid);
7156
if (IS_ERR(mtype)) {
7157
bpf_log(log, "field %s doesn't have size\n", mname);
7158
return -EFAULT;
7159
}
7160
7161
mtrue_end = moff + msize;
7162
if (off >= mtrue_end)
7163
/* no overlap with member, keep iterating */
7164
continue;
7165
7166
if (btf_type_is_array(mtype)) {
7167
u32 elem_idx;
7168
7169
/* __btf_resolve_size() above helps to
7170
* linearize a multi-dimensional array.
7171
*
7172
* The logic here is treating an array
7173
* in a struct as the following way:
7174
*
7175
* struct outer {
7176
* struct inner array[2][2];
7177
* };
7178
*
7179
* looks like:
7180
*
7181
* struct outer {
7182
* struct inner array_elem0;
7183
* struct inner array_elem1;
7184
* struct inner array_elem2;
7185
* struct inner array_elem3;
7186
* };
7187
*
7188
* When accessing outer->array[1][0], it moves
7189
* moff to "array_elem2", set mtype to
7190
* "struct inner", and msize also becomes
7191
* sizeof(struct inner). Then most of the
7192
* remaining logic will fall through without
7193
* caring the current member is an array or
7194
* not.
7195
*
7196
* Unlike mtype/msize/moff, mtrue_end does not
7197
* change. The naming difference ("_true") tells
7198
* that it is not always corresponding to
7199
* the current mtype/msize/moff.
7200
* It is the true end of the current
7201
* member (i.e. array in this case). That
7202
* will allow an int array to be accessed like
7203
* a scratch space,
7204
* i.e. allow access beyond the size of
7205
* the array's element as long as it is
7206
* within the mtrue_end boundary.
7207
*/
7208
7209
/* skip empty array */
7210
if (moff == mtrue_end)
7211
continue;
7212
7213
msize /= total_nelems;
7214
elem_idx = (off - moff) / msize;
7215
moff += elem_idx * msize;
7216
mtype = elem_type;
7217
mid = elem_id;
7218
}
7219
7220
/* the 'off' we're looking for is either equal to start
7221
* of this field or inside of this struct
7222
*/
7223
if (btf_type_is_struct(mtype)) {
7224
/* our field must be inside that union or struct */
7225
t = mtype;
7226
7227
/* return if the offset matches the member offset */
7228
if (off == moff) {
7229
*next_btf_id = mid;
7230
return WALK_STRUCT;
7231
}
7232
7233
/* adjust offset we're looking for */
7234
off -= moff;
7235
goto again;
7236
}
7237
7238
if (btf_type_is_ptr(mtype)) {
7239
const struct btf_type *stype, *t;
7240
enum bpf_type_flag tmp_flag = 0;
7241
u32 id;
7242
7243
if (msize != size || off != moff) {
7244
bpf_log(log,
7245
"cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
7246
mname, moff, tname, off, size);
7247
return -EACCES;
7248
}
7249
7250
/* check type tag */
7251
t = btf_type_by_id(btf, mtype->type);
7252
if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) {
7253
tag_value = __btf_name_by_offset(btf, t->name_off);
7254
/* check __user tag */
7255
if (strcmp(tag_value, "user") == 0)
7256
tmp_flag = MEM_USER;
7257
/* check __percpu tag */
7258
if (strcmp(tag_value, "percpu") == 0)
7259
tmp_flag = MEM_PERCPU;
7260
/* check __rcu tag */
7261
if (strcmp(tag_value, "rcu") == 0)
7262
tmp_flag = MEM_RCU;
7263
}
7264
7265
stype = btf_type_skip_modifiers(btf, mtype->type, &id);
7266
if (btf_type_is_struct(stype)) {
7267
*next_btf_id = id;
7268
*flag |= tmp_flag;
7269
if (field_name)
7270
*field_name = mname;
7271
return WALK_PTR;
7272
}
7273
7274
return WALK_PTR_UNTRUSTED;
7275
}
7276
7277
/* Allow more flexible access within an int as long as
7278
* it is within mtrue_end.
7279
* Since mtrue_end could be the end of an array,
7280
* that also allows using an array of int as a scratch
7281
* space. e.g. skb->cb[].
7282
*/
7283
if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) {
7284
bpf_log(log,
7285
"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
7286
mname, mtrue_end, tname, off, size);
7287
return -EACCES;
7288
}
7289
7290
return WALK_SCALAR;
7291
}
7292
bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
7293
return -EINVAL;
7294
}
7295
7296
int btf_struct_access(struct bpf_verifier_log *log,
7297
const struct bpf_reg_state *reg,
7298
int off, int size, enum bpf_access_type atype __maybe_unused,
7299
u32 *next_btf_id, enum bpf_type_flag *flag,
7300
const char **field_name)
7301
{
7302
const struct btf *btf = reg->btf;
7303
enum bpf_type_flag tmp_flag = 0;
7304
const struct btf_type *t;
7305
u32 id = reg->btf_id;
7306
int err;
7307
7308
while (type_is_alloc(reg->type)) {
7309
struct btf_struct_meta *meta;
7310
struct btf_record *rec;
7311
int i;
7312
7313
meta = btf_find_struct_meta(btf, id);
7314
if (!meta)
7315
break;
7316
rec = meta->record;
7317
for (i = 0; i < rec->cnt; i++) {
7318
struct btf_field *field = &rec->fields[i];
7319
u32 offset = field->offset;
7320
if (off < offset + field->size && offset < off + size) {
7321
bpf_log(log,
7322
"direct access to %s is disallowed\n",
7323
btf_field_type_name(field->type));
7324
return -EACCES;
7325
}
7326
}
7327
break;
7328
}
7329
7330
t = btf_type_by_id(btf, id);
7331
do {
7332
err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag, field_name);
7333
7334
switch (err) {
7335
case WALK_PTR:
7336
/* For local types, the destination register cannot
7337
* become a pointer again.
7338
*/
7339
if (type_is_alloc(reg->type))
7340
return SCALAR_VALUE;
7341
/* If we found the pointer or scalar on t+off,
7342
* we're done.
7343
*/
7344
*next_btf_id = id;
7345
*flag = tmp_flag;
7346
return PTR_TO_BTF_ID;
7347
case WALK_PTR_UNTRUSTED:
7348
*flag = MEM_RDONLY | PTR_UNTRUSTED;
7349
return PTR_TO_MEM;
7350
case WALK_SCALAR:
7351
return SCALAR_VALUE;
7352
case WALK_STRUCT:
7353
/* We found nested struct, so continue the search
7354
* by diving in it. At this point the offset is
7355
* aligned with the new type, so set it to 0.
7356
*/
7357
t = btf_type_by_id(btf, id);
7358
off = 0;
7359
break;
7360
default:
7361
/* It's either error or unknown return value..
7362
* scream and leave.
7363
*/
7364
if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
7365
return -EINVAL;
7366
return err;
7367
}
7368
} while (t);
7369
7370
return -EINVAL;
7371
}
7372
7373
/* Check that two BTF types, each specified as an BTF object + id, are exactly
7374
* the same. Trivial ID check is not enough due to module BTFs, because we can
7375
* end up with two different module BTFs, but IDs point to the common type in
7376
* vmlinux BTF.
7377
*/
7378
bool btf_types_are_same(const struct btf *btf1, u32 id1,
7379
const struct btf *btf2, u32 id2)
7380
{
7381
if (id1 != id2)
7382
return false;
7383
if (btf1 == btf2)
7384
return true;
7385
return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
7386
}
7387
7388
bool btf_struct_ids_match(struct bpf_verifier_log *log,
7389
const struct btf *btf, u32 id, int off,
7390
const struct btf *need_btf, u32 need_type_id,
7391
bool strict)
7392
{
7393
const struct btf_type *type;
7394
enum bpf_type_flag flag = 0;
7395
int err;
7396
7397
/* Are we already done? */
7398
if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
7399
return true;
7400
/* In case of strict type match, we do not walk struct, the top level
7401
* type match must succeed. When strict is true, off should have already
7402
* been 0.
7403
*/
7404
if (strict)
7405
return false;
7406
again:
7407
type = btf_type_by_id(btf, id);
7408
if (!type)
7409
return false;
7410
err = btf_struct_walk(log, btf, type, off, 1, &id, &flag, NULL);
7411
if (err != WALK_STRUCT)
7412
return false;
7413
7414
/* We found nested struct object. If it matches
7415
* the requested ID, we're done. Otherwise let's
7416
* continue the search with offset 0 in the new
7417
* type.
7418
*/
7419
if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
7420
off = 0;
7421
goto again;
7422
}
7423
7424
return true;
7425
}
7426
7427
static int __get_type_size(struct btf *btf, u32 btf_id,
7428
const struct btf_type **ret_type)
7429
{
7430
const struct btf_type *t;
7431
7432
*ret_type = btf_type_by_id(btf, 0);
7433
if (!btf_id)
7434
/* void */
7435
return 0;
7436
t = btf_type_by_id(btf, btf_id);
7437
while (t && btf_type_is_modifier(t))
7438
t = btf_type_by_id(btf, t->type);
7439
if (!t)
7440
return -EINVAL;
7441
*ret_type = t;
7442
if (btf_type_is_ptr(t))
7443
/* kernel size of pointer. Not BPF's size of pointer*/
7444
return sizeof(void *);
7445
if (btf_type_is_int(t) || btf_is_any_enum(t) || btf_type_is_struct(t))
7446
return t->size;
7447
return -EINVAL;
7448
}
7449
7450
static u8 __get_type_fmodel_flags(const struct btf_type *t)
7451
{
7452
u8 flags = 0;
7453
7454
if (btf_type_is_struct(t))
7455
flags |= BTF_FMODEL_STRUCT_ARG;
7456
if (btf_type_is_signed_int(t))
7457
flags |= BTF_FMODEL_SIGNED_ARG;
7458
7459
return flags;
7460
}
7461
7462
int btf_distill_func_proto(struct bpf_verifier_log *log,
7463
struct btf *btf,
7464
const struct btf_type *func,
7465
const char *tname,
7466
struct btf_func_model *m)
7467
{
7468
const struct btf_param *args;
7469
const struct btf_type *t;
7470
u32 i, nargs;
7471
int ret;
7472
7473
if (!func) {
7474
/* BTF function prototype doesn't match the verifier types.
7475
* Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
7476
*/
7477
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7478
m->arg_size[i] = 8;
7479
m->arg_flags[i] = 0;
7480
}
7481
m->ret_size = 8;
7482
m->ret_flags = 0;
7483
m->nr_args = MAX_BPF_FUNC_REG_ARGS;
7484
return 0;
7485
}
7486
args = (const struct btf_param *)(func + 1);
7487
nargs = btf_type_vlen(func);
7488
if (nargs > MAX_BPF_FUNC_ARGS) {
7489
bpf_log(log,
7490
"The function %s has %d arguments. Too many.\n",
7491
tname, nargs);
7492
return -EINVAL;
7493
}
7494
ret = __get_type_size(btf, func->type, &t);
7495
if (ret < 0 || btf_type_is_struct(t)) {
7496
bpf_log(log,
7497
"The function %s return type %s is unsupported.\n",
7498
tname, btf_type_str(t));
7499
return -EINVAL;
7500
}
7501
m->ret_size = ret;
7502
m->ret_flags = __get_type_fmodel_flags(t);
7503
7504
for (i = 0; i < nargs; i++) {
7505
if (i == nargs - 1 && args[i].type == 0) {
7506
bpf_log(log,
7507
"The function %s with variable args is unsupported.\n",
7508
tname);
7509
return -EINVAL;
7510
}
7511
ret = __get_type_size(btf, args[i].type, &t);
7512
7513
/* No support of struct argument size greater than 16 bytes */
7514
if (ret < 0 || ret > 16) {
7515
bpf_log(log,
7516
"The function %s arg%d type %s is unsupported.\n",
7517
tname, i, btf_type_str(t));
7518
return -EINVAL;
7519
}
7520
if (ret == 0) {
7521
bpf_log(log,
7522
"The function %s has malformed void argument.\n",
7523
tname);
7524
return -EINVAL;
7525
}
7526
m->arg_size[i] = ret;
7527
m->arg_flags[i] = __get_type_fmodel_flags(t);
7528
}
7529
m->nr_args = nargs;
7530
return 0;
7531
}
7532
7533
/* Compare BTFs of two functions assuming only scalars and pointers to context.
7534
* t1 points to BTF_KIND_FUNC in btf1
7535
* t2 points to BTF_KIND_FUNC in btf2
7536
* Returns:
7537
* EINVAL - function prototype mismatch
7538
* EFAULT - verifier bug
7539
* 0 - 99% match. The last 1% is validated by the verifier.
7540
*/
7541
static int btf_check_func_type_match(struct bpf_verifier_log *log,
7542
struct btf *btf1, const struct btf_type *t1,
7543
struct btf *btf2, const struct btf_type *t2)
7544
{
7545
const struct btf_param *args1, *args2;
7546
const char *fn1, *fn2, *s1, *s2;
7547
u32 nargs1, nargs2, i;
7548
7549
fn1 = btf_name_by_offset(btf1, t1->name_off);
7550
fn2 = btf_name_by_offset(btf2, t2->name_off);
7551
7552
if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
7553
bpf_log(log, "%s() is not a global function\n", fn1);
7554
return -EINVAL;
7555
}
7556
if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
7557
bpf_log(log, "%s() is not a global function\n", fn2);
7558
return -EINVAL;
7559
}
7560
7561
t1 = btf_type_by_id(btf1, t1->type);
7562
if (!t1 || !btf_type_is_func_proto(t1))
7563
return -EFAULT;
7564
t2 = btf_type_by_id(btf2, t2->type);
7565
if (!t2 || !btf_type_is_func_proto(t2))
7566
return -EFAULT;
7567
7568
args1 = (const struct btf_param *)(t1 + 1);
7569
nargs1 = btf_type_vlen(t1);
7570
args2 = (const struct btf_param *)(t2 + 1);
7571
nargs2 = btf_type_vlen(t2);
7572
7573
if (nargs1 != nargs2) {
7574
bpf_log(log, "%s() has %d args while %s() has %d args\n",
7575
fn1, nargs1, fn2, nargs2);
7576
return -EINVAL;
7577
}
7578
7579
t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7580
t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7581
if (t1->info != t2->info) {
7582
bpf_log(log,
7583
"Return type %s of %s() doesn't match type %s of %s()\n",
7584
btf_type_str(t1), fn1,
7585
btf_type_str(t2), fn2);
7586
return -EINVAL;
7587
}
7588
7589
for (i = 0; i < nargs1; i++) {
7590
t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
7591
t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
7592
7593
if (t1->info != t2->info) {
7594
bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
7595
i, fn1, btf_type_str(t1),
7596
fn2, btf_type_str(t2));
7597
return -EINVAL;
7598
}
7599
if (btf_type_has_size(t1) && t1->size != t2->size) {
7600
bpf_log(log,
7601
"arg%d in %s() has size %d while %s() has %d\n",
7602
i, fn1, t1->size,
7603
fn2, t2->size);
7604
return -EINVAL;
7605
}
7606
7607
/* global functions are validated with scalars and pointers
7608
* to context only. And only global functions can be replaced.
7609
* Hence type check only those types.
7610
*/
7611
if (btf_type_is_int(t1) || btf_is_any_enum(t1))
7612
continue;
7613
if (!btf_type_is_ptr(t1)) {
7614
bpf_log(log,
7615
"arg%d in %s() has unrecognized type\n",
7616
i, fn1);
7617
return -EINVAL;
7618
}
7619
t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
7620
t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
7621
if (!btf_type_is_struct(t1)) {
7622
bpf_log(log,
7623
"arg%d in %s() is not a pointer to context\n",
7624
i, fn1);
7625
return -EINVAL;
7626
}
7627
if (!btf_type_is_struct(t2)) {
7628
bpf_log(log,
7629
"arg%d in %s() is not a pointer to context\n",
7630
i, fn2);
7631
return -EINVAL;
7632
}
7633
/* This is an optional check to make program writing easier.
7634
* Compare names of structs and report an error to the user.
7635
* btf_prepare_func_args() already checked that t2 struct
7636
* is a context type. btf_prepare_func_args() will check
7637
* later that t1 struct is a context type as well.
7638
*/
7639
s1 = btf_name_by_offset(btf1, t1->name_off);
7640
s2 = btf_name_by_offset(btf2, t2->name_off);
7641
if (strcmp(s1, s2)) {
7642
bpf_log(log,
7643
"arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
7644
i, fn1, s1, fn2, s2);
7645
return -EINVAL;
7646
}
7647
}
7648
return 0;
7649
}
7650
7651
/* Compare BTFs of given program with BTF of target program */
7652
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
7653
struct btf *btf2, const struct btf_type *t2)
7654
{
7655
struct btf *btf1 = prog->aux->btf;
7656
const struct btf_type *t1;
7657
u32 btf_id = 0;
7658
7659
if (!prog->aux->func_info) {
7660
bpf_log(log, "Program extension requires BTF\n");
7661
return -EINVAL;
7662
}
7663
7664
btf_id = prog->aux->func_info[0].type_id;
7665
if (!btf_id)
7666
return -EFAULT;
7667
7668
t1 = btf_type_by_id(btf1, btf_id);
7669
if (!t1 || !btf_type_is_func(t1))
7670
return -EFAULT;
7671
7672
return btf_check_func_type_match(log, btf1, t1, btf2, t2);
7673
}
7674
7675
static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t)
7676
{
7677
const char *name;
7678
7679
t = btf_type_by_id(btf, t->type); /* skip PTR */
7680
7681
while (btf_type_is_modifier(t))
7682
t = btf_type_by_id(btf, t->type);
7683
7684
/* allow either struct or struct forward declaration */
7685
if (btf_type_is_struct(t) ||
7686
(btf_type_is_fwd(t) && btf_type_kflag(t) == 0)) {
7687
name = btf_str_by_offset(btf, t->name_off);
7688
return name && strcmp(name, "bpf_dynptr") == 0;
7689
}
7690
7691
return false;
7692
}
7693
7694
struct bpf_cand_cache {
7695
const char *name;
7696
u32 name_len;
7697
u16 kind;
7698
u16 cnt;
7699
struct {
7700
const struct btf *btf;
7701
u32 id;
7702
} cands[];
7703
};
7704
7705
static DEFINE_MUTEX(cand_cache_mutex);
7706
7707
static struct bpf_cand_cache *
7708
bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id);
7709
7710
static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx,
7711
const struct btf *btf, const struct btf_type *t)
7712
{
7713
struct bpf_cand_cache *cc;
7714
struct bpf_core_ctx ctx = {
7715
.btf = btf,
7716
.log = log,
7717
};
7718
u32 kern_type_id, type_id;
7719
int err = 0;
7720
7721
/* skip PTR and modifiers */
7722
type_id = t->type;
7723
t = btf_type_by_id(btf, t->type);
7724
while (btf_type_is_modifier(t)) {
7725
type_id = t->type;
7726
t = btf_type_by_id(btf, t->type);
7727
}
7728
7729
mutex_lock(&cand_cache_mutex);
7730
cc = bpf_core_find_cands(&ctx, type_id);
7731
if (IS_ERR(cc)) {
7732
err = PTR_ERR(cc);
7733
bpf_log(log, "arg#%d reference type('%s %s') candidate matching error: %d\n",
7734
arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7735
err);
7736
goto cand_cache_unlock;
7737
}
7738
if (cc->cnt != 1) {
7739
bpf_log(log, "arg#%d reference type('%s %s') %s\n",
7740
arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
7741
cc->cnt == 0 ? "has no matches" : "is ambiguous");
7742
err = cc->cnt == 0 ? -ENOENT : -ESRCH;
7743
goto cand_cache_unlock;
7744
}
7745
if (btf_is_module(cc->cands[0].btf)) {
7746
bpf_log(log, "arg#%d reference type('%s %s') points to kernel module type (unsupported)\n",
7747
arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off));
7748
err = -EOPNOTSUPP;
7749
goto cand_cache_unlock;
7750
}
7751
kern_type_id = cc->cands[0].id;
7752
7753
cand_cache_unlock:
7754
mutex_unlock(&cand_cache_mutex);
7755
if (err)
7756
return err;
7757
7758
return kern_type_id;
7759
}
7760
7761
enum btf_arg_tag {
7762
ARG_TAG_CTX = BIT_ULL(0),
7763
ARG_TAG_NONNULL = BIT_ULL(1),
7764
ARG_TAG_TRUSTED = BIT_ULL(2),
7765
ARG_TAG_UNTRUSTED = BIT_ULL(3),
7766
ARG_TAG_NULLABLE = BIT_ULL(4),
7767
ARG_TAG_ARENA = BIT_ULL(5),
7768
};
7769
7770
/* Process BTF of a function to produce high-level expectation of function
7771
* arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information
7772
* is cached in subprog info for reuse.
7773
* Returns:
7774
* EFAULT - there is a verifier bug. Abort verification.
7775
* EINVAL - cannot convert BTF.
7776
* 0 - Successfully processed BTF and constructed argument expectations.
7777
*/
7778
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
7779
{
7780
bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL;
7781
struct bpf_subprog_info *sub = subprog_info(env, subprog);
7782
struct bpf_verifier_log *log = &env->log;
7783
struct bpf_prog *prog = env->prog;
7784
enum bpf_prog_type prog_type = prog->type;
7785
struct btf *btf = prog->aux->btf;
7786
const struct btf_param *args;
7787
const struct btf_type *t, *ref_t, *fn_t;
7788
u32 i, nargs, btf_id;
7789
const char *tname;
7790
7791
if (sub->args_cached)
7792
return 0;
7793
7794
if (!prog->aux->func_info) {
7795
verifier_bug(env, "func_info undefined");
7796
return -EFAULT;
7797
}
7798
7799
btf_id = prog->aux->func_info[subprog].type_id;
7800
if (!btf_id) {
7801
if (!is_global) /* not fatal for static funcs */
7802
return -EINVAL;
7803
bpf_log(log, "Global functions need valid BTF\n");
7804
return -EFAULT;
7805
}
7806
7807
fn_t = btf_type_by_id(btf, btf_id);
7808
if (!fn_t || !btf_type_is_func(fn_t)) {
7809
/* These checks were already done by the verifier while loading
7810
* struct bpf_func_info
7811
*/
7812
bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
7813
subprog);
7814
return -EFAULT;
7815
}
7816
tname = btf_name_by_offset(btf, fn_t->name_off);
7817
7818
if (prog->aux->func_info_aux[subprog].unreliable) {
7819
verifier_bug(env, "unreliable BTF for function %s()", tname);
7820
return -EFAULT;
7821
}
7822
if (prog_type == BPF_PROG_TYPE_EXT)
7823
prog_type = prog->aux->dst_prog->type;
7824
7825
t = btf_type_by_id(btf, fn_t->type);
7826
if (!t || !btf_type_is_func_proto(t)) {
7827
bpf_log(log, "Invalid type of function %s()\n", tname);
7828
return -EFAULT;
7829
}
7830
args = (const struct btf_param *)(t + 1);
7831
nargs = btf_type_vlen(t);
7832
if (nargs > MAX_BPF_FUNC_REG_ARGS) {
7833
if (!is_global)
7834
return -EINVAL;
7835
bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
7836
tname, nargs, MAX_BPF_FUNC_REG_ARGS);
7837
return -EINVAL;
7838
}
7839
/* check that function returns int, exception cb also requires this */
7840
t = btf_type_by_id(btf, t->type);
7841
while (btf_type_is_modifier(t))
7842
t = btf_type_by_id(btf, t->type);
7843
if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
7844
if (!is_global)
7845
return -EINVAL;
7846
bpf_log(log,
7847
"Global function %s() doesn't return scalar. Only those are supported.\n",
7848
tname);
7849
return -EINVAL;
7850
}
7851
7852
/* Convert BTF function arguments into verifier types.
7853
* Only PTR_TO_CTX and SCALAR are supported atm.
7854
*/
7855
for (i = 0; i < nargs; i++) {
7856
u32 tags = 0;
7857
int id = btf_named_start_id(btf, false) - 1;
7858
7859
/* 'arg:<tag>' decl_tag takes precedence over derivation of
7860
* register type from BTF type itself
7861
*/
7862
while ((id = btf_find_next_decl_tag(btf, fn_t, i, "arg:", id)) > 0) {
7863
const struct btf_type *tag_t = btf_type_by_id(btf, id);
7864
const char *tag = __btf_name_by_offset(btf, tag_t->name_off) + 4;
7865
7866
/* disallow arg tags in static subprogs */
7867
if (!is_global) {
7868
bpf_log(log, "arg#%d type tag is not supported in static functions\n", i);
7869
return -EOPNOTSUPP;
7870
}
7871
7872
if (strcmp(tag, "ctx") == 0) {
7873
tags |= ARG_TAG_CTX;
7874
} else if (strcmp(tag, "trusted") == 0) {
7875
tags |= ARG_TAG_TRUSTED;
7876
} else if (strcmp(tag, "untrusted") == 0) {
7877
tags |= ARG_TAG_UNTRUSTED;
7878
} else if (strcmp(tag, "nonnull") == 0) {
7879
tags |= ARG_TAG_NONNULL;
7880
} else if (strcmp(tag, "nullable") == 0) {
7881
tags |= ARG_TAG_NULLABLE;
7882
} else if (strcmp(tag, "arena") == 0) {
7883
tags |= ARG_TAG_ARENA;
7884
} else {
7885
bpf_log(log, "arg#%d has unsupported set of tags\n", i);
7886
return -EOPNOTSUPP;
7887
}
7888
}
7889
if (id != -ENOENT) {
7890
bpf_log(log, "arg#%d type tag fetching failure: %d\n", i, id);
7891
return id;
7892
}
7893
7894
t = btf_type_by_id(btf, args[i].type);
7895
while (btf_type_is_modifier(t))
7896
t = btf_type_by_id(btf, t->type);
7897
if (!btf_type_is_ptr(t))
7898
goto skip_pointer;
7899
7900
if ((tags & ARG_TAG_CTX) || btf_is_prog_ctx_type(log, btf, t, prog_type, i)) {
7901
if (tags & ~ARG_TAG_CTX) {
7902
bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7903
return -EINVAL;
7904
}
7905
if ((tags & ARG_TAG_CTX) &&
7906
btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
7907
prog->expected_attach_type))
7908
return -EINVAL;
7909
sub->args[i].arg_type = ARG_PTR_TO_CTX;
7910
continue;
7911
}
7912
if (btf_is_dynptr_ptr(btf, t)) {
7913
if (tags) {
7914
bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7915
return -EINVAL;
7916
}
7917
sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
7918
continue;
7919
}
7920
if (tags & ARG_TAG_TRUSTED) {
7921
int kern_type_id;
7922
7923
if (tags & ARG_TAG_NONNULL) {
7924
bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7925
return -EINVAL;
7926
}
7927
7928
kern_type_id = btf_get_ptr_to_btf_id(log, i, btf, t);
7929
if (kern_type_id < 0)
7930
return kern_type_id;
7931
7932
sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED;
7933
if (tags & ARG_TAG_NULLABLE)
7934
sub->args[i].arg_type |= PTR_MAYBE_NULL;
7935
sub->args[i].btf_id = kern_type_id;
7936
continue;
7937
}
7938
if (tags & ARG_TAG_UNTRUSTED) {
7939
struct btf *vmlinux_btf;
7940
int kern_type_id;
7941
7942
if (tags & ~ARG_TAG_UNTRUSTED) {
7943
bpf_log(log, "arg#%d untrusted cannot be combined with any other tags\n", i);
7944
return -EINVAL;
7945
}
7946
7947
ref_t = btf_type_skip_modifiers(btf, t->type, NULL);
7948
if (btf_type_is_void(ref_t) || btf_type_is_primitive(ref_t)) {
7949
sub->args[i].arg_type = ARG_PTR_TO_MEM | MEM_RDONLY | PTR_UNTRUSTED;
7950
sub->args[i].mem_size = 0;
7951
continue;
7952
}
7953
7954
kern_type_id = btf_get_ptr_to_btf_id(log, i, btf, t);
7955
if (kern_type_id < 0)
7956
return kern_type_id;
7957
7958
vmlinux_btf = bpf_get_btf_vmlinux();
7959
ref_t = btf_type_by_id(vmlinux_btf, kern_type_id);
7960
if (!btf_type_is_struct(ref_t)) {
7961
tname = __btf_name_by_offset(vmlinux_btf, t->name_off);
7962
bpf_log(log, "arg#%d has type %s '%s', but only struct or primitive types are allowed\n",
7963
i, btf_type_str(ref_t), tname);
7964
return -EINVAL;
7965
}
7966
sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_UNTRUSTED;
7967
sub->args[i].btf_id = kern_type_id;
7968
continue;
7969
}
7970
if (tags & ARG_TAG_ARENA) {
7971
if (tags & ~ARG_TAG_ARENA) {
7972
bpf_log(log, "arg#%d arena cannot be combined with any other tags\n", i);
7973
return -EINVAL;
7974
}
7975
sub->args[i].arg_type = ARG_PTR_TO_ARENA;
7976
continue;
7977
}
7978
if (is_global) { /* generic user data pointer */
7979
u32 mem_size;
7980
7981
if (tags & ARG_TAG_NULLABLE) {
7982
bpf_log(log, "arg#%d has invalid combination of tags\n", i);
7983
return -EINVAL;
7984
}
7985
7986
t = btf_type_skip_modifiers(btf, t->type, NULL);
7987
ref_t = btf_resolve_size(btf, t, &mem_size);
7988
if (IS_ERR(ref_t)) {
7989
bpf_log(log, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
7990
i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
7991
PTR_ERR(ref_t));
7992
return -EINVAL;
7993
}
7994
7995
sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL;
7996
if (tags & ARG_TAG_NONNULL)
7997
sub->args[i].arg_type &= ~PTR_MAYBE_NULL;
7998
sub->args[i].mem_size = mem_size;
7999
continue;
8000
}
8001
8002
skip_pointer:
8003
if (tags) {
8004
bpf_log(log, "arg#%d has pointer tag, but is not a pointer type\n", i);
8005
return -EINVAL;
8006
}
8007
if (btf_type_is_int(t) || btf_is_any_enum(t)) {
8008
sub->args[i].arg_type = ARG_ANYTHING;
8009
continue;
8010
}
8011
if (!is_global)
8012
return -EINVAL;
8013
bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
8014
i, btf_type_str(t), tname);
8015
return -EINVAL;
8016
}
8017
8018
sub->arg_cnt = nargs;
8019
sub->args_cached = true;
8020
8021
return 0;
8022
}
8023
8024
static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
8025
struct btf_show *show)
8026
{
8027
const struct btf_type *t = btf_type_by_id(btf, type_id);
8028
8029
show->btf = btf;
8030
memset(&show->state, 0, sizeof(show->state));
8031
memset(&show->obj, 0, sizeof(show->obj));
8032
8033
btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
8034
}
8035
8036
__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
8037
va_list args)
8038
{
8039
seq_vprintf((struct seq_file *)show->target, fmt, args);
8040
}
8041
8042
int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
8043
void *obj, struct seq_file *m, u64 flags)
8044
{
8045
struct btf_show sseq;
8046
8047
sseq.target = m;
8048
sseq.showfn = btf_seq_show;
8049
sseq.flags = flags;
8050
8051
btf_type_show(btf, type_id, obj, &sseq);
8052
8053
return sseq.state.status;
8054
}
8055
8056
void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
8057
struct seq_file *m)
8058
{
8059
(void) btf_type_seq_show_flags(btf, type_id, obj, m,
8060
BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
8061
BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
8062
}
8063
8064
struct btf_show_snprintf {
8065
struct btf_show show;
8066
int len_left; /* space left in string */
8067
int len; /* length we would have written */
8068
};
8069
8070
__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
8071
va_list args)
8072
{
8073
struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
8074
int len;
8075
8076
len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
8077
8078
if (len < 0) {
8079
ssnprintf->len_left = 0;
8080
ssnprintf->len = len;
8081
} else if (len >= ssnprintf->len_left) {
8082
/* no space, drive on to get length we would have written */
8083
ssnprintf->len_left = 0;
8084
ssnprintf->len += len;
8085
} else {
8086
ssnprintf->len_left -= len;
8087
ssnprintf->len += len;
8088
show->target += len;
8089
}
8090
}
8091
8092
int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
8093
char *buf, int len, u64 flags)
8094
{
8095
struct btf_show_snprintf ssnprintf;
8096
8097
ssnprintf.show.target = buf;
8098
ssnprintf.show.flags = flags;
8099
ssnprintf.show.showfn = btf_snprintf_show;
8100
ssnprintf.len_left = len;
8101
ssnprintf.len = 0;
8102
8103
btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
8104
8105
/* If we encountered an error, return it. */
8106
if (ssnprintf.show.state.status)
8107
return ssnprintf.show.state.status;
8108
8109
/* Otherwise return length we would have written */
8110
return ssnprintf.len;
8111
}
8112
8113
#ifdef CONFIG_PROC_FS
8114
static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
8115
{
8116
const struct btf *btf = filp->private_data;
8117
8118
seq_printf(m, "btf_id:\t%u\n", btf->id);
8119
}
8120
#endif
8121
8122
static int btf_release(struct inode *inode, struct file *filp)
8123
{
8124
btf_put(filp->private_data);
8125
return 0;
8126
}
8127
8128
const struct file_operations btf_fops = {
8129
#ifdef CONFIG_PROC_FS
8130
.show_fdinfo = bpf_btf_show_fdinfo,
8131
#endif
8132
.release = btf_release,
8133
};
8134
8135
static int __btf_new_fd(struct btf *btf)
8136
{
8137
return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
8138
}
8139
8140
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
8141
{
8142
struct btf *btf;
8143
int ret;
8144
8145
btf = btf_parse(attr, uattr, uattr_size);
8146
if (IS_ERR(btf))
8147
return PTR_ERR(btf);
8148
8149
ret = btf_alloc_id(btf);
8150
if (ret) {
8151
btf_free(btf);
8152
return ret;
8153
}
8154
8155
/*
8156
* The BTF ID is published to the userspace.
8157
* All BTF free must go through call_rcu() from
8158
* now on (i.e. free by calling btf_put()).
8159
*/
8160
8161
ret = __btf_new_fd(btf);
8162
if (ret < 0)
8163
btf_put(btf);
8164
8165
return ret;
8166
}
8167
8168
struct btf *btf_get_by_fd(int fd)
8169
{
8170
struct btf *btf;
8171
CLASS(fd, f)(fd);
8172
8173
btf = __btf_get_by_fd(f);
8174
if (!IS_ERR(btf))
8175
refcount_inc(&btf->refcnt);
8176
8177
return btf;
8178
}
8179
8180
int btf_get_info_by_fd(const struct btf *btf,
8181
const union bpf_attr *attr,
8182
union bpf_attr __user *uattr)
8183
{
8184
struct bpf_btf_info __user *uinfo;
8185
struct bpf_btf_info info;
8186
u32 info_copy, btf_copy;
8187
void __user *ubtf;
8188
char __user *uname;
8189
u32 uinfo_len, uname_len, name_len;
8190
int ret = 0;
8191
8192
uinfo = u64_to_user_ptr(attr->info.info);
8193
uinfo_len = attr->info.info_len;
8194
8195
info_copy = min_t(u32, uinfo_len, sizeof(info));
8196
memset(&info, 0, sizeof(info));
8197
if (copy_from_user(&info, uinfo, info_copy))
8198
return -EFAULT;
8199
8200
info.id = btf->id;
8201
ubtf = u64_to_user_ptr(info.btf);
8202
btf_copy = min_t(u32, btf->data_size, info.btf_size);
8203
if (copy_to_user(ubtf, btf->data, btf_copy))
8204
return -EFAULT;
8205
info.btf_size = btf->data_size;
8206
8207
info.kernel_btf = btf->kernel_btf;
8208
8209
uname = u64_to_user_ptr(info.name);
8210
uname_len = info.name_len;
8211
if (!uname ^ !uname_len)
8212
return -EINVAL;
8213
8214
name_len = strlen(btf->name);
8215
info.name_len = name_len;
8216
8217
if (uname) {
8218
if (uname_len >= name_len + 1) {
8219
if (copy_to_user(uname, btf->name, name_len + 1))
8220
return -EFAULT;
8221
} else {
8222
char zero = '\0';
8223
8224
if (copy_to_user(uname, btf->name, uname_len - 1))
8225
return -EFAULT;
8226
if (put_user(zero, uname + uname_len - 1))
8227
return -EFAULT;
8228
/* let user-space know about too short buffer */
8229
ret = -ENOSPC;
8230
}
8231
}
8232
8233
if (copy_to_user(uinfo, &info, info_copy) ||
8234
put_user(info_copy, &uattr->info.info_len))
8235
return -EFAULT;
8236
8237
return ret;
8238
}
8239
8240
int btf_get_fd_by_id(u32 id)
8241
{
8242
struct btf *btf;
8243
int fd;
8244
8245
rcu_read_lock();
8246
btf = idr_find(&btf_idr, id);
8247
if (!btf || !refcount_inc_not_zero(&btf->refcnt))
8248
btf = ERR_PTR(-ENOENT);
8249
rcu_read_unlock();
8250
8251
if (IS_ERR(btf))
8252
return PTR_ERR(btf);
8253
8254
fd = __btf_new_fd(btf);
8255
if (fd < 0)
8256
btf_put(btf);
8257
8258
return fd;
8259
}
8260
8261
u32 btf_obj_id(const struct btf *btf)
8262
{
8263
return btf->id;
8264
}
8265
8266
bool btf_is_kernel(const struct btf *btf)
8267
{
8268
return btf->kernel_btf;
8269
}
8270
8271
bool btf_is_module(const struct btf *btf)
8272
{
8273
return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
8274
}
8275
8276
enum {
8277
BTF_MODULE_F_LIVE = (1 << 0),
8278
};
8279
8280
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8281
struct btf_module {
8282
struct list_head list;
8283
struct module *module;
8284
struct btf *btf;
8285
struct bin_attribute *sysfs_attr;
8286
int flags;
8287
};
8288
8289
static LIST_HEAD(btf_modules);
8290
static DEFINE_MUTEX(btf_module_mutex);
8291
8292
static void purge_cand_cache(struct btf *btf);
8293
8294
static int btf_module_notify(struct notifier_block *nb, unsigned long op,
8295
void *module)
8296
{
8297
struct btf_module *btf_mod, *tmp;
8298
struct module *mod = module;
8299
struct btf *btf;
8300
int err = 0;
8301
8302
if (mod->btf_data_size == 0 ||
8303
(op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
8304
op != MODULE_STATE_GOING))
8305
goto out;
8306
8307
switch (op) {
8308
case MODULE_STATE_COMING:
8309
btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
8310
if (!btf_mod) {
8311
err = -ENOMEM;
8312
goto out;
8313
}
8314
btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size,
8315
mod->btf_base_data, mod->btf_base_data_size);
8316
if (IS_ERR(btf)) {
8317
kfree(btf_mod);
8318
if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) {
8319
pr_warn("failed to validate module [%s] BTF: %ld\n",
8320
mod->name, PTR_ERR(btf));
8321
err = PTR_ERR(btf);
8322
} else {
8323
pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n");
8324
}
8325
goto out;
8326
}
8327
err = btf_alloc_id(btf);
8328
if (err) {
8329
btf_free(btf);
8330
kfree(btf_mod);
8331
goto out;
8332
}
8333
8334
purge_cand_cache(NULL);
8335
mutex_lock(&btf_module_mutex);
8336
btf_mod->module = module;
8337
btf_mod->btf = btf;
8338
list_add(&btf_mod->list, &btf_modules);
8339
mutex_unlock(&btf_module_mutex);
8340
8341
if (IS_ENABLED(CONFIG_SYSFS)) {
8342
struct bin_attribute *attr;
8343
8344
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
8345
if (!attr)
8346
goto out;
8347
8348
sysfs_bin_attr_init(attr);
8349
attr->attr.name = btf->name;
8350
attr->attr.mode = 0444;
8351
attr->size = btf->data_size;
8352
attr->private = btf->data;
8353
attr->read = sysfs_bin_attr_simple_read;
8354
8355
err = sysfs_create_bin_file(btf_kobj, attr);
8356
if (err) {
8357
pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
8358
mod->name, err);
8359
kfree(attr);
8360
err = 0;
8361
goto out;
8362
}
8363
8364
btf_mod->sysfs_attr = attr;
8365
}
8366
8367
break;
8368
case MODULE_STATE_LIVE:
8369
mutex_lock(&btf_module_mutex);
8370
list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8371
if (btf_mod->module != module)
8372
continue;
8373
8374
btf_mod->flags |= BTF_MODULE_F_LIVE;
8375
break;
8376
}
8377
mutex_unlock(&btf_module_mutex);
8378
break;
8379
case MODULE_STATE_GOING:
8380
mutex_lock(&btf_module_mutex);
8381
list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8382
if (btf_mod->module != module)
8383
continue;
8384
8385
list_del(&btf_mod->list);
8386
if (btf_mod->sysfs_attr)
8387
sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
8388
purge_cand_cache(btf_mod->btf);
8389
btf_put(btf_mod->btf);
8390
kfree(btf_mod->sysfs_attr);
8391
kfree(btf_mod);
8392
break;
8393
}
8394
mutex_unlock(&btf_module_mutex);
8395
break;
8396
}
8397
out:
8398
return notifier_from_errno(err);
8399
}
8400
8401
static struct notifier_block btf_module_nb = {
8402
.notifier_call = btf_module_notify,
8403
};
8404
8405
static int __init btf_module_init(void)
8406
{
8407
register_module_notifier(&btf_module_nb);
8408
return 0;
8409
}
8410
8411
fs_initcall(btf_module_init);
8412
#endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
8413
8414
struct module *btf_try_get_module(const struct btf *btf)
8415
{
8416
struct module *res = NULL;
8417
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8418
struct btf_module *btf_mod, *tmp;
8419
8420
mutex_lock(&btf_module_mutex);
8421
list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8422
if (btf_mod->btf != btf)
8423
continue;
8424
8425
/* We must only consider module whose __init routine has
8426
* finished, hence we must check for BTF_MODULE_F_LIVE flag,
8427
* which is set from the notifier callback for
8428
* MODULE_STATE_LIVE.
8429
*/
8430
if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
8431
res = btf_mod->module;
8432
8433
break;
8434
}
8435
mutex_unlock(&btf_module_mutex);
8436
#endif
8437
8438
return res;
8439
}
8440
8441
/* Returns struct btf corresponding to the struct module.
8442
* This function can return NULL or ERR_PTR.
8443
*/
8444
static struct btf *btf_get_module_btf(const struct module *module)
8445
{
8446
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8447
struct btf_module *btf_mod, *tmp;
8448
#endif
8449
struct btf *btf = NULL;
8450
8451
if (!module) {
8452
btf = bpf_get_btf_vmlinux();
8453
if (!IS_ERR_OR_NULL(btf))
8454
btf_get(btf);
8455
return btf;
8456
}
8457
8458
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8459
mutex_lock(&btf_module_mutex);
8460
list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
8461
if (btf_mod->module != module)
8462
continue;
8463
8464
btf_get(btf_mod->btf);
8465
btf = btf_mod->btf;
8466
break;
8467
}
8468
mutex_unlock(&btf_module_mutex);
8469
#endif
8470
8471
return btf;
8472
}
8473
8474
static int check_btf_kconfigs(const struct module *module, const char *feature)
8475
{
8476
if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
8477
pr_err("missing vmlinux BTF, cannot register %s\n", feature);
8478
return -ENOENT;
8479
}
8480
if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
8481
pr_warn("missing module BTF, cannot register %s\n", feature);
8482
return 0;
8483
}
8484
8485
BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
8486
{
8487
struct btf *btf = NULL;
8488
int btf_obj_fd = 0;
8489
long ret;
8490
8491
if (flags)
8492
return -EINVAL;
8493
8494
if (name_sz <= 1 || name[name_sz - 1])
8495
return -EINVAL;
8496
8497
ret = bpf_find_btf_id(name, kind, &btf);
8498
if (ret > 0 && btf_is_module(btf)) {
8499
btf_obj_fd = __btf_new_fd(btf);
8500
if (btf_obj_fd < 0) {
8501
btf_put(btf);
8502
return btf_obj_fd;
8503
}
8504
return ret | (((u64)btf_obj_fd) << 32);
8505
}
8506
if (ret > 0)
8507
btf_put(btf);
8508
return ret;
8509
}
8510
8511
const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
8512
.func = bpf_btf_find_by_name_kind,
8513
.gpl_only = false,
8514
.ret_type = RET_INTEGER,
8515
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
8516
.arg2_type = ARG_CONST_SIZE,
8517
.arg3_type = ARG_ANYTHING,
8518
.arg4_type = ARG_ANYTHING,
8519
};
8520
8521
BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
8522
#define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
8523
BTF_TRACING_TYPE_xxx
8524
#undef BTF_TRACING_TYPE
8525
8526
/* Validate well-formedness of iter argument type.
8527
* On success, return positive BTF ID of iter state's STRUCT type.
8528
* On error, negative error is returned.
8529
*/
8530
int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
8531
{
8532
const struct btf_param *arg;
8533
const struct btf_type *t;
8534
const char *name;
8535
int btf_id;
8536
8537
if (btf_type_vlen(func) <= arg_idx)
8538
return -EINVAL;
8539
8540
arg = &btf_params(func)[arg_idx];
8541
t = btf_type_skip_modifiers(btf, arg->type, NULL);
8542
if (!t || !btf_type_is_ptr(t))
8543
return -EINVAL;
8544
t = btf_type_skip_modifiers(btf, t->type, &btf_id);
8545
if (!t || !__btf_type_is_struct(t))
8546
return -EINVAL;
8547
8548
name = btf_name_by_offset(btf, t->name_off);
8549
if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
8550
return -EINVAL;
8551
8552
return btf_id;
8553
}
8554
8555
static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
8556
const struct btf_type *func, u32 func_flags)
8557
{
8558
u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
8559
const char *sfx, *iter_name;
8560
const struct btf_type *t;
8561
char exp_name[128];
8562
u32 nr_args;
8563
int btf_id;
8564
8565
/* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
8566
if (!flags || (flags & (flags - 1)))
8567
return -EINVAL;
8568
8569
/* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */
8570
nr_args = btf_type_vlen(func);
8571
if (nr_args < 1)
8572
return -EINVAL;
8573
8574
btf_id = btf_check_iter_arg(btf, func, 0);
8575
if (btf_id < 0)
8576
return btf_id;
8577
8578
/* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
8579
* fit nicely in stack slots
8580
*/
8581
t = btf_type_by_id(btf, btf_id);
8582
if (t->size == 0 || (t->size % 8))
8583
return -EINVAL;
8584
8585
/* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
8586
* naming pattern
8587
*/
8588
iter_name = btf_name_by_offset(btf, t->name_off) + sizeof(ITER_PREFIX) - 1;
8589
if (flags & KF_ITER_NEW)
8590
sfx = "new";
8591
else if (flags & KF_ITER_NEXT)
8592
sfx = "next";
8593
else /* (flags & KF_ITER_DESTROY) */
8594
sfx = "destroy";
8595
8596
snprintf(exp_name, sizeof(exp_name), "bpf_iter_%s_%s", iter_name, sfx);
8597
if (strcmp(func_name, exp_name))
8598
return -EINVAL;
8599
8600
/* only iter constructor should have extra arguments */
8601
if (!(flags & KF_ITER_NEW) && nr_args != 1)
8602
return -EINVAL;
8603
8604
if (flags & KF_ITER_NEXT) {
8605
/* bpf_iter_<type>_next() should return pointer */
8606
t = btf_type_skip_modifiers(btf, func->type, NULL);
8607
if (!t || !btf_type_is_ptr(t))
8608
return -EINVAL;
8609
}
8610
8611
if (flags & KF_ITER_DESTROY) {
8612
/* bpf_iter_<type>_destroy() should return void */
8613
t = btf_type_by_id(btf, func->type);
8614
if (!t || !btf_type_is_void(t))
8615
return -EINVAL;
8616
}
8617
8618
return 0;
8619
}
8620
8621
static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags)
8622
{
8623
const struct btf_type *func;
8624
const char *func_name;
8625
int err;
8626
8627
/* any kfunc should be FUNC -> FUNC_PROTO */
8628
func = btf_type_by_id(btf, func_id);
8629
if (!func || !btf_type_is_func(func))
8630
return -EINVAL;
8631
8632
/* sanity check kfunc name */
8633
func_name = btf_name_by_offset(btf, func->name_off);
8634
if (!func_name || !func_name[0])
8635
return -EINVAL;
8636
8637
func = btf_type_by_id(btf, func->type);
8638
if (!func || !btf_type_is_func_proto(func))
8639
return -EINVAL;
8640
8641
if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) {
8642
err = btf_check_iter_kfuncs(btf, func_name, func, func_flags);
8643
if (err)
8644
return err;
8645
}
8646
8647
return 0;
8648
}
8649
8650
/* Kernel Function (kfunc) BTF ID set registration API */
8651
8652
static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
8653
const struct btf_kfunc_id_set *kset)
8654
{
8655
struct btf_kfunc_hook_filter *hook_filter;
8656
struct btf_id_set8 *add_set = kset->set;
8657
bool vmlinux_set = !btf_is_module(btf);
8658
bool add_filter = !!kset->filter;
8659
struct btf_kfunc_set_tab *tab;
8660
struct btf_id_set8 *set;
8661
u32 set_cnt, i;
8662
int ret;
8663
8664
if (hook >= BTF_KFUNC_HOOK_MAX) {
8665
ret = -EINVAL;
8666
goto end;
8667
}
8668
8669
if (!add_set->cnt)
8670
return 0;
8671
8672
tab = btf->kfunc_set_tab;
8673
8674
if (tab && add_filter) {
8675
u32 i;
8676
8677
hook_filter = &tab->hook_filters[hook];
8678
for (i = 0; i < hook_filter->nr_filters; i++) {
8679
if (hook_filter->filters[i] == kset->filter) {
8680
add_filter = false;
8681
break;
8682
}
8683
}
8684
8685
if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) {
8686
ret = -E2BIG;
8687
goto end;
8688
}
8689
}
8690
8691
if (!tab) {
8692
tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN);
8693
if (!tab)
8694
return -ENOMEM;
8695
btf->kfunc_set_tab = tab;
8696
}
8697
8698
set = tab->sets[hook];
8699
/* Warn when register_btf_kfunc_id_set is called twice for the same hook
8700
* for module sets.
8701
*/
8702
if (WARN_ON_ONCE(set && !vmlinux_set)) {
8703
ret = -EINVAL;
8704
goto end;
8705
}
8706
8707
/* In case of vmlinux sets, there may be more than one set being
8708
* registered per hook. To create a unified set, we allocate a new set
8709
* and concatenate all individual sets being registered. While each set
8710
* is individually sorted, they may become unsorted when concatenated,
8711
* hence re-sorting the final set again is required to make binary
8712
* searching the set using btf_id_set8_contains function work.
8713
*
8714
* For module sets, we need to allocate as we may need to relocate
8715
* BTF ids.
8716
*/
8717
set_cnt = set ? set->cnt : 0;
8718
8719
if (set_cnt > U32_MAX - add_set->cnt) {
8720
ret = -EOVERFLOW;
8721
goto end;
8722
}
8723
8724
if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
8725
ret = -E2BIG;
8726
goto end;
8727
}
8728
8729
/* Grow set */
8730
set = krealloc(tab->sets[hook],
8731
struct_size(set, pairs, set_cnt + add_set->cnt),
8732
GFP_KERNEL | __GFP_NOWARN);
8733
if (!set) {
8734
ret = -ENOMEM;
8735
goto end;
8736
}
8737
8738
/* For newly allocated set, initialize set->cnt to 0 */
8739
if (!tab->sets[hook])
8740
set->cnt = 0;
8741
tab->sets[hook] = set;
8742
8743
/* Concatenate the two sets */
8744
memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
8745
/* Now that the set is copied, update with relocated BTF ids */
8746
for (i = set->cnt; i < set->cnt + add_set->cnt; i++)
8747
set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id);
8748
8749
set->cnt += add_set->cnt;
8750
8751
sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
8752
8753
if (add_filter) {
8754
hook_filter = &tab->hook_filters[hook];
8755
hook_filter->filters[hook_filter->nr_filters++] = kset->filter;
8756
}
8757
return 0;
8758
end:
8759
btf_free_kfunc_set_tab(btf);
8760
return ret;
8761
}
8762
8763
static u32 *btf_kfunc_id_set_contains(const struct btf *btf,
8764
enum btf_kfunc_hook hook,
8765
u32 kfunc_btf_id)
8766
{
8767
struct btf_id_set8 *set;
8768
u32 *id;
8769
8770
if (hook >= BTF_KFUNC_HOOK_MAX)
8771
return NULL;
8772
if (!btf->kfunc_set_tab)
8773
return NULL;
8774
set = btf->kfunc_set_tab->sets[hook];
8775
if (!set)
8776
return NULL;
8777
id = btf_id_set8_contains(set, kfunc_btf_id);
8778
if (!id)
8779
return NULL;
8780
/* The flags for BTF ID are located next to it */
8781
return id + 1;
8782
}
8783
8784
static bool __btf_kfunc_is_allowed(const struct btf *btf,
8785
enum btf_kfunc_hook hook,
8786
u32 kfunc_btf_id,
8787
const struct bpf_prog *prog)
8788
{
8789
struct btf_kfunc_hook_filter *hook_filter;
8790
int i;
8791
8792
if (hook >= BTF_KFUNC_HOOK_MAX)
8793
return false;
8794
if (!btf->kfunc_set_tab)
8795
return false;
8796
8797
hook_filter = &btf->kfunc_set_tab->hook_filters[hook];
8798
for (i = 0; i < hook_filter->nr_filters; i++) {
8799
if (hook_filter->filters[i](prog, kfunc_btf_id))
8800
return false;
8801
}
8802
8803
return true;
8804
}
8805
8806
static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
8807
{
8808
switch (prog_type) {
8809
case BPF_PROG_TYPE_UNSPEC:
8810
return BTF_KFUNC_HOOK_COMMON;
8811
case BPF_PROG_TYPE_XDP:
8812
return BTF_KFUNC_HOOK_XDP;
8813
case BPF_PROG_TYPE_SCHED_CLS:
8814
return BTF_KFUNC_HOOK_TC;
8815
case BPF_PROG_TYPE_STRUCT_OPS:
8816
return BTF_KFUNC_HOOK_STRUCT_OPS;
8817
case BPF_PROG_TYPE_TRACING:
8818
case BPF_PROG_TYPE_TRACEPOINT:
8819
case BPF_PROG_TYPE_RAW_TRACEPOINT:
8820
case BPF_PROG_TYPE_PERF_EVENT:
8821
case BPF_PROG_TYPE_LSM:
8822
return BTF_KFUNC_HOOK_TRACING;
8823
case BPF_PROG_TYPE_SYSCALL:
8824
return BTF_KFUNC_HOOK_SYSCALL;
8825
case BPF_PROG_TYPE_CGROUP_SKB:
8826
case BPF_PROG_TYPE_CGROUP_SOCK:
8827
case BPF_PROG_TYPE_CGROUP_DEVICE:
8828
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
8829
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
8830
case BPF_PROG_TYPE_CGROUP_SYSCTL:
8831
case BPF_PROG_TYPE_SOCK_OPS:
8832
return BTF_KFUNC_HOOK_CGROUP;
8833
case BPF_PROG_TYPE_SCHED_ACT:
8834
return BTF_KFUNC_HOOK_SCHED_ACT;
8835
case BPF_PROG_TYPE_SK_SKB:
8836
return BTF_KFUNC_HOOK_SK_SKB;
8837
case BPF_PROG_TYPE_SOCKET_FILTER:
8838
return BTF_KFUNC_HOOK_SOCKET_FILTER;
8839
case BPF_PROG_TYPE_LWT_OUT:
8840
case BPF_PROG_TYPE_LWT_IN:
8841
case BPF_PROG_TYPE_LWT_XMIT:
8842
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
8843
return BTF_KFUNC_HOOK_LWT;
8844
case BPF_PROG_TYPE_NETFILTER:
8845
return BTF_KFUNC_HOOK_NETFILTER;
8846
case BPF_PROG_TYPE_KPROBE:
8847
return BTF_KFUNC_HOOK_KPROBE;
8848
default:
8849
return BTF_KFUNC_HOOK_MAX;
8850
}
8851
}
8852
8853
bool btf_kfunc_is_allowed(const struct btf *btf,
8854
u32 kfunc_btf_id,
8855
const struct bpf_prog *prog)
8856
{
8857
enum bpf_prog_type prog_type = resolve_prog_type(prog);
8858
enum btf_kfunc_hook hook;
8859
u32 *kfunc_flags;
8860
8861
kfunc_flags = btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id);
8862
if (kfunc_flags && __btf_kfunc_is_allowed(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog))
8863
return true;
8864
8865
hook = bpf_prog_type_to_kfunc_hook(prog_type);
8866
kfunc_flags = btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id);
8867
if (kfunc_flags && __btf_kfunc_is_allowed(btf, hook, kfunc_btf_id, prog))
8868
return true;
8869
8870
return false;
8871
}
8872
8873
/* Caution:
8874
* Reference to the module (obtained using btf_try_get_module) corresponding to
8875
* the struct btf *MUST* be held when calling this function from verifier
8876
* context. This is usually true as we stash references in prog's kfunc_btf_tab;
8877
* keeping the reference for the duration of the call provides the necessary
8878
* protection for looking up a well-formed btf->kfunc_set_tab.
8879
*/
8880
u32 *btf_kfunc_flags(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog)
8881
{
8882
enum bpf_prog_type prog_type = resolve_prog_type(prog);
8883
enum btf_kfunc_hook hook;
8884
u32 *kfunc_flags;
8885
8886
kfunc_flags = btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id);
8887
if (kfunc_flags)
8888
return kfunc_flags;
8889
8890
hook = bpf_prog_type_to_kfunc_hook(prog_type);
8891
return btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id);
8892
}
8893
8894
u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
8895
const struct bpf_prog *prog)
8896
{
8897
if (!__btf_kfunc_is_allowed(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog))
8898
return NULL;
8899
8900
return btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id);
8901
}
8902
8903
static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
8904
const struct btf_kfunc_id_set *kset)
8905
{
8906
struct btf *btf;
8907
int ret, i;
8908
8909
btf = btf_get_module_btf(kset->owner);
8910
if (!btf)
8911
return check_btf_kconfigs(kset->owner, "kfunc");
8912
if (IS_ERR(btf))
8913
return PTR_ERR(btf);
8914
8915
for (i = 0; i < kset->set->cnt; i++) {
8916
ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id),
8917
kset->set->pairs[i].flags);
8918
if (ret)
8919
goto err_out;
8920
}
8921
8922
ret = btf_populate_kfunc_set(btf, hook, kset);
8923
8924
err_out:
8925
btf_put(btf);
8926
return ret;
8927
}
8928
8929
/* This function must be invoked only from initcalls/module init functions */
8930
int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
8931
const struct btf_kfunc_id_set *kset)
8932
{
8933
enum btf_kfunc_hook hook;
8934
8935
/* All kfuncs need to be tagged as such in BTF.
8936
* WARN() for initcall registrations that do not check errors.
8937
*/
8938
if (!(kset->set->flags & BTF_SET8_KFUNCS)) {
8939
WARN_ON(!kset->owner);
8940
return -EINVAL;
8941
}
8942
8943
hook = bpf_prog_type_to_kfunc_hook(prog_type);
8944
return __register_btf_kfunc_id_set(hook, kset);
8945
}
8946
EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
8947
8948
/* This function must be invoked only from initcalls/module init functions */
8949
int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset)
8950
{
8951
return __register_btf_kfunc_id_set(BTF_KFUNC_HOOK_FMODRET, kset);
8952
}
8953
EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set);
8954
8955
s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
8956
{
8957
struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
8958
struct btf_id_dtor_kfunc *dtor;
8959
8960
if (!tab)
8961
return -ENOENT;
8962
/* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
8963
* to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
8964
*/
8965
BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
8966
dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
8967
if (!dtor)
8968
return -ENOENT;
8969
return dtor->kfunc_btf_id;
8970
}
8971
8972
static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
8973
{
8974
const struct btf_type *dtor_func, *dtor_func_proto, *t;
8975
const struct btf_param *args;
8976
s32 dtor_btf_id;
8977
u32 nr_args, i;
8978
8979
for (i = 0; i < cnt; i++) {
8980
dtor_btf_id = btf_relocate_id(btf, dtors[i].kfunc_btf_id);
8981
8982
dtor_func = btf_type_by_id(btf, dtor_btf_id);
8983
if (!dtor_func || !btf_type_is_func(dtor_func))
8984
return -EINVAL;
8985
8986
dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
8987
if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto))
8988
return -EINVAL;
8989
8990
/* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
8991
t = btf_type_by_id(btf, dtor_func_proto->type);
8992
if (!t || !btf_type_is_void(t))
8993
return -EINVAL;
8994
8995
nr_args = btf_type_vlen(dtor_func_proto);
8996
if (nr_args != 1)
8997
return -EINVAL;
8998
args = btf_params(dtor_func_proto);
8999
t = btf_type_by_id(btf, args[0].type);
9000
/* Allow any pointer type, as width on targets Linux supports
9001
* will be same for all pointer types (i.e. sizeof(void *))
9002
*/
9003
if (!t || !btf_type_is_ptr(t))
9004
return -EINVAL;
9005
9006
if (IS_ENABLED(CONFIG_CFI_CLANG)) {
9007
/* Ensure the destructor kfunc type matches btf_dtor_kfunc_t */
9008
t = btf_type_by_id(btf, t->type);
9009
if (!btf_type_is_void(t))
9010
return -EINVAL;
9011
}
9012
}
9013
return 0;
9014
}
9015
9016
/* This function must be invoked only from initcalls/module init functions */
9017
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
9018
struct module *owner)
9019
{
9020
struct btf_id_dtor_kfunc_tab *tab;
9021
struct btf *btf;
9022
u32 tab_cnt, i;
9023
int ret;
9024
9025
btf = btf_get_module_btf(owner);
9026
if (!btf)
9027
return check_btf_kconfigs(owner, "dtor kfuncs");
9028
if (IS_ERR(btf))
9029
return PTR_ERR(btf);
9030
9031
if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
9032
pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
9033
ret = -E2BIG;
9034
goto end;
9035
}
9036
9037
/* Ensure that the prototype of dtor kfuncs being registered is sane */
9038
ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt);
9039
if (ret < 0)
9040
goto end;
9041
9042
tab = btf->dtor_kfunc_tab;
9043
/* Only one call allowed for modules */
9044
if (WARN_ON_ONCE(tab && btf_is_module(btf))) {
9045
ret = -EINVAL;
9046
goto end;
9047
}
9048
9049
tab_cnt = tab ? tab->cnt : 0;
9050
if (tab_cnt > U32_MAX - add_cnt) {
9051
ret = -EOVERFLOW;
9052
goto end;
9053
}
9054
if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
9055
pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
9056
ret = -E2BIG;
9057
goto end;
9058
}
9059
9060
tab = krealloc(btf->dtor_kfunc_tab,
9061
struct_size(tab, dtors, tab_cnt + add_cnt),
9062
GFP_KERNEL | __GFP_NOWARN);
9063
if (!tab) {
9064
ret = -ENOMEM;
9065
goto end;
9066
}
9067
9068
if (!btf->dtor_kfunc_tab)
9069
tab->cnt = 0;
9070
btf->dtor_kfunc_tab = tab;
9071
9072
memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
9073
9074
/* remap BTF ids based on BTF relocation (if any) */
9075
for (i = tab_cnt; i < tab_cnt + add_cnt; i++) {
9076
tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id);
9077
tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id);
9078
}
9079
9080
tab->cnt += add_cnt;
9081
9082
sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
9083
9084
end:
9085
if (ret)
9086
btf_free_dtor_kfunc_tab(btf);
9087
btf_put(btf);
9088
return ret;
9089
}
9090
EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
9091
9092
#define MAX_TYPES_ARE_COMPAT_DEPTH 2
9093
9094
/* Check local and target types for compatibility. This check is used for
9095
* type-based CO-RE relocations and follow slightly different rules than
9096
* field-based relocations. This function assumes that root types were already
9097
* checked for name match. Beyond that initial root-level name check, names
9098
* are completely ignored. Compatibility rules are as follows:
9099
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
9100
* kind should match for local and target types (i.e., STRUCT is not
9101
* compatible with UNION);
9102
* - for ENUMs/ENUM64s, the size is ignored;
9103
* - for INT, size and signedness are ignored;
9104
* - for ARRAY, dimensionality is ignored, element types are checked for
9105
* compatibility recursively;
9106
* - CONST/VOLATILE/RESTRICT modifiers are ignored;
9107
* - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
9108
* - FUNC_PROTOs are compatible if they have compatible signature: same
9109
* number of input args and compatible return and argument types.
9110
* These rules are not set in stone and probably will be adjusted as we get
9111
* more experience with using BPF CO-RE relocations.
9112
*/
9113
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
9114
const struct btf *targ_btf, __u32 targ_id)
9115
{
9116
return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
9117
MAX_TYPES_ARE_COMPAT_DEPTH);
9118
}
9119
9120
#define MAX_TYPES_MATCH_DEPTH 2
9121
9122
int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
9123
const struct btf *targ_btf, u32 targ_id)
9124
{
9125
return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false,
9126
MAX_TYPES_MATCH_DEPTH);
9127
}
9128
9129
static bool bpf_core_is_flavor_sep(const char *s)
9130
{
9131
/* check X___Y name pattern, where X and Y are not underscores */
9132
return s[0] != '_' && /* X */
9133
s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
9134
s[4] != '_'; /* Y */
9135
}
9136
9137
size_t bpf_core_essential_name_len(const char *name)
9138
{
9139
size_t n = strlen(name);
9140
int i;
9141
9142
for (i = n - 5; i >= 0; i--) {
9143
if (bpf_core_is_flavor_sep(name + i))
9144
return i + 1;
9145
}
9146
return n;
9147
}
9148
9149
static void bpf_free_cands(struct bpf_cand_cache *cands)
9150
{
9151
if (!cands->cnt)
9152
/* empty candidate array was allocated on stack */
9153
return;
9154
kfree(cands);
9155
}
9156
9157
static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
9158
{
9159
kfree(cands->name);
9160
kfree(cands);
9161
}
9162
9163
#define VMLINUX_CAND_CACHE_SIZE 31
9164
static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
9165
9166
#define MODULE_CAND_CACHE_SIZE 31
9167
static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
9168
9169
static void __print_cand_cache(struct bpf_verifier_log *log,
9170
struct bpf_cand_cache **cache,
9171
int cache_size)
9172
{
9173
struct bpf_cand_cache *cc;
9174
int i, j;
9175
9176
for (i = 0; i < cache_size; i++) {
9177
cc = cache[i];
9178
if (!cc)
9179
continue;
9180
bpf_log(log, "[%d]%s(", i, cc->name);
9181
for (j = 0; j < cc->cnt; j++) {
9182
bpf_log(log, "%d", cc->cands[j].id);
9183
if (j < cc->cnt - 1)
9184
bpf_log(log, " ");
9185
}
9186
bpf_log(log, "), ");
9187
}
9188
}
9189
9190
static void print_cand_cache(struct bpf_verifier_log *log)
9191
{
9192
mutex_lock(&cand_cache_mutex);
9193
bpf_log(log, "vmlinux_cand_cache:");
9194
__print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9195
bpf_log(log, "\nmodule_cand_cache:");
9196
__print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9197
bpf_log(log, "\n");
9198
mutex_unlock(&cand_cache_mutex);
9199
}
9200
9201
static u32 hash_cands(struct bpf_cand_cache *cands)
9202
{
9203
return jhash(cands->name, cands->name_len, 0);
9204
}
9205
9206
static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
9207
struct bpf_cand_cache **cache,
9208
int cache_size)
9209
{
9210
struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
9211
9212
if (cc && cc->name_len == cands->name_len &&
9213
!strncmp(cc->name, cands->name, cands->name_len))
9214
return cc;
9215
return NULL;
9216
}
9217
9218
static size_t sizeof_cands(int cnt)
9219
{
9220
return offsetof(struct bpf_cand_cache, cands[cnt]);
9221
}
9222
9223
static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
9224
struct bpf_cand_cache **cache,
9225
int cache_size)
9226
{
9227
struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
9228
9229
if (*cc) {
9230
bpf_free_cands_from_cache(*cc);
9231
*cc = NULL;
9232
}
9233
new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL_ACCOUNT);
9234
if (!new_cands) {
9235
bpf_free_cands(cands);
9236
return ERR_PTR(-ENOMEM);
9237
}
9238
/* strdup the name, since it will stay in cache.
9239
* the cands->name points to strings in prog's BTF and the prog can be unloaded.
9240
*/
9241
new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL_ACCOUNT);
9242
bpf_free_cands(cands);
9243
if (!new_cands->name) {
9244
kfree(new_cands);
9245
return ERR_PTR(-ENOMEM);
9246
}
9247
*cc = new_cands;
9248
return new_cands;
9249
}
9250
9251
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
9252
static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
9253
int cache_size)
9254
{
9255
struct bpf_cand_cache *cc;
9256
int i, j;
9257
9258
for (i = 0; i < cache_size; i++) {
9259
cc = cache[i];
9260
if (!cc)
9261
continue;
9262
if (!btf) {
9263
/* when new module is loaded purge all of module_cand_cache,
9264
* since new module might have candidates with the name
9265
* that matches cached cands.
9266
*/
9267
bpf_free_cands_from_cache(cc);
9268
cache[i] = NULL;
9269
continue;
9270
}
9271
/* when module is unloaded purge cache entries
9272
* that match module's btf
9273
*/
9274
for (j = 0; j < cc->cnt; j++)
9275
if (cc->cands[j].btf == btf) {
9276
bpf_free_cands_from_cache(cc);
9277
cache[i] = NULL;
9278
break;
9279
}
9280
}
9281
9282
}
9283
9284
static void purge_cand_cache(struct btf *btf)
9285
{
9286
mutex_lock(&cand_cache_mutex);
9287
__purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9288
mutex_unlock(&cand_cache_mutex);
9289
}
9290
#endif
9291
9292
static struct bpf_cand_cache *
9293
bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
9294
int targ_start_id)
9295
{
9296
struct bpf_cand_cache *new_cands;
9297
const struct btf_type *t;
9298
const char *targ_name;
9299
size_t targ_essent_len;
9300
int n, i;
9301
9302
n = btf_nr_types(targ_btf);
9303
for (i = targ_start_id; i < n; i++) {
9304
t = btf_type_by_id(targ_btf, i);
9305
if (btf_kind(t) != cands->kind)
9306
continue;
9307
9308
targ_name = btf_name_by_offset(targ_btf, t->name_off);
9309
if (!targ_name)
9310
continue;
9311
9312
/* the resched point is before strncmp to make sure that search
9313
* for non-existing name will have a chance to schedule().
9314
*/
9315
cond_resched();
9316
9317
if (strncmp(cands->name, targ_name, cands->name_len) != 0)
9318
continue;
9319
9320
targ_essent_len = bpf_core_essential_name_len(targ_name);
9321
if (targ_essent_len != cands->name_len)
9322
continue;
9323
9324
/* most of the time there is only one candidate for a given kind+name pair */
9325
new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL_ACCOUNT);
9326
if (!new_cands) {
9327
bpf_free_cands(cands);
9328
return ERR_PTR(-ENOMEM);
9329
}
9330
9331
memcpy(new_cands, cands, sizeof_cands(cands->cnt));
9332
bpf_free_cands(cands);
9333
cands = new_cands;
9334
cands->cands[cands->cnt].btf = targ_btf;
9335
cands->cands[cands->cnt].id = i;
9336
cands->cnt++;
9337
}
9338
return cands;
9339
}
9340
9341
static struct bpf_cand_cache *
9342
bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
9343
{
9344
struct bpf_cand_cache *cands, *cc, local_cand = {};
9345
const struct btf *local_btf = ctx->btf;
9346
const struct btf_type *local_type;
9347
const struct btf *main_btf;
9348
size_t local_essent_len;
9349
struct btf *mod_btf;
9350
const char *name;
9351
int id;
9352
9353
main_btf = bpf_get_btf_vmlinux();
9354
if (IS_ERR(main_btf))
9355
return ERR_CAST(main_btf);
9356
if (!main_btf)
9357
return ERR_PTR(-EINVAL);
9358
9359
local_type = btf_type_by_id(local_btf, local_type_id);
9360
if (!local_type)
9361
return ERR_PTR(-EINVAL);
9362
9363
name = btf_name_by_offset(local_btf, local_type->name_off);
9364
if (str_is_empty(name))
9365
return ERR_PTR(-EINVAL);
9366
local_essent_len = bpf_core_essential_name_len(name);
9367
9368
cands = &local_cand;
9369
cands->name = name;
9370
cands->kind = btf_kind(local_type);
9371
cands->name_len = local_essent_len;
9372
9373
cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9374
/* cands is a pointer to stack here */
9375
if (cc) {
9376
if (cc->cnt)
9377
return cc;
9378
goto check_modules;
9379
}
9380
9381
/* Attempt to find target candidates in vmlinux BTF first */
9382
cands = bpf_core_add_cands(cands, main_btf, btf_named_start_id(main_btf, true));
9383
if (IS_ERR(cands))
9384
return ERR_CAST(cands);
9385
9386
/* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
9387
9388
/* populate cache even when cands->cnt == 0 */
9389
cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
9390
if (IS_ERR(cc))
9391
return ERR_CAST(cc);
9392
9393
/* if vmlinux BTF has any candidate, don't go for module BTFs */
9394
if (cc->cnt)
9395
return cc;
9396
9397
check_modules:
9398
/* cands is a pointer to stack here and cands->cnt == 0 */
9399
cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9400
if (cc)
9401
/* if cache has it return it even if cc->cnt == 0 */
9402
return cc;
9403
9404
/* If candidate is not found in vmlinux's BTF then search in module's BTFs */
9405
spin_lock_bh(&btf_idr_lock);
9406
idr_for_each_entry(&btf_idr, mod_btf, id) {
9407
if (!btf_is_module(mod_btf))
9408
continue;
9409
/* linear search could be slow hence unlock/lock
9410
* the IDR to avoiding holding it for too long
9411
*/
9412
btf_get(mod_btf);
9413
spin_unlock_bh(&btf_idr_lock);
9414
cands = bpf_core_add_cands(cands, mod_btf, btf_named_start_id(mod_btf, true));
9415
btf_put(mod_btf);
9416
if (IS_ERR(cands))
9417
return ERR_CAST(cands);
9418
spin_lock_bh(&btf_idr_lock);
9419
}
9420
spin_unlock_bh(&btf_idr_lock);
9421
/* cands is a pointer to kmalloced memory here if cands->cnt > 0
9422
* or pointer to stack if cands->cnd == 0.
9423
* Copy it into the cache even when cands->cnt == 0 and
9424
* return the result.
9425
*/
9426
return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
9427
}
9428
9429
int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
9430
int relo_idx, void *insn)
9431
{
9432
bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
9433
struct bpf_core_cand_list cands = {};
9434
struct bpf_core_relo_res targ_res;
9435
struct bpf_core_spec *specs;
9436
const struct btf_type *type;
9437
int err;
9438
9439
/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
9440
* into arrays of btf_ids of struct fields and array indices.
9441
*/
9442
specs = kcalloc(3, sizeof(*specs), GFP_KERNEL_ACCOUNT);
9443
if (!specs)
9444
return -ENOMEM;
9445
9446
type = btf_type_by_id(ctx->btf, relo->type_id);
9447
if (!type) {
9448
bpf_log(ctx->log, "relo #%u: bad type id %u\n",
9449
relo_idx, relo->type_id);
9450
kfree(specs);
9451
return -EINVAL;
9452
}
9453
9454
if (need_cands) {
9455
struct bpf_cand_cache *cc;
9456
int i;
9457
9458
mutex_lock(&cand_cache_mutex);
9459
cc = bpf_core_find_cands(ctx, relo->type_id);
9460
if (IS_ERR(cc)) {
9461
bpf_log(ctx->log, "target candidate search failed for %d\n",
9462
relo->type_id);
9463
err = PTR_ERR(cc);
9464
goto out;
9465
}
9466
if (cc->cnt) {
9467
cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL_ACCOUNT);
9468
if (!cands.cands) {
9469
err = -ENOMEM;
9470
goto out;
9471
}
9472
}
9473
for (i = 0; i < cc->cnt; i++) {
9474
bpf_log(ctx->log,
9475
"CO-RE relocating %s %s: found target candidate [%d]\n",
9476
btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
9477
cands.cands[i].btf = cc->cands[i].btf;
9478
cands.cands[i].id = cc->cands[i].id;
9479
}
9480
cands.len = cc->cnt;
9481
/* cand_cache_mutex needs to span the cache lookup and
9482
* copy of btf pointer into bpf_core_cand_list,
9483
* since module can be unloaded while bpf_core_calc_relo_insn
9484
* is working with module's btf.
9485
*/
9486
}
9487
9488
err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
9489
&targ_res);
9490
if (err)
9491
goto out;
9492
9493
err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
9494
&targ_res);
9495
9496
out:
9497
kfree(specs);
9498
if (need_cands) {
9499
kfree(cands.cands);
9500
mutex_unlock(&cand_cache_mutex);
9501
if (ctx->log->level & BPF_LOG_LEVEL2)
9502
print_cand_cache(ctx->log);
9503
}
9504
return err;
9505
}
9506
9507
bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
9508
const struct bpf_reg_state *reg,
9509
const char *field_name, u32 btf_id, const char *suffix)
9510
{
9511
struct btf *btf = reg->btf;
9512
const struct btf_type *walk_type, *safe_type;
9513
const char *tname;
9514
char safe_tname[64];
9515
long ret, safe_id;
9516
const struct btf_member *member;
9517
u32 i;
9518
9519
walk_type = btf_type_by_id(btf, reg->btf_id);
9520
if (!walk_type)
9521
return false;
9522
9523
tname = btf_name_by_offset(btf, walk_type->name_off);
9524
9525
ret = snprintf(safe_tname, sizeof(safe_tname), "%s%s", tname, suffix);
9526
if (ret >= sizeof(safe_tname))
9527
return false;
9528
9529
safe_id = btf_find_by_name_kind(btf, safe_tname, BTF_INFO_KIND(walk_type->info));
9530
if (safe_id < 0)
9531
return false;
9532
9533
safe_type = btf_type_by_id(btf, safe_id);
9534
if (!safe_type)
9535
return false;
9536
9537
for_each_member(i, safe_type, member) {
9538
const char *m_name = __btf_name_by_offset(btf, member->name_off);
9539
const struct btf_type *mtype = btf_type_by_id(btf, member->type);
9540
u32 id;
9541
9542
if (!btf_type_is_ptr(mtype))
9543
continue;
9544
9545
btf_type_skip_modifiers(btf, mtype->type, &id);
9546
/* If we match on both type and name, the field is considered trusted. */
9547
if (btf_id == id && !strcmp(field_name, m_name))
9548
return true;
9549
}
9550
9551
return false;
9552
}
9553
9554
bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
9555
const struct btf *reg_btf, u32 reg_id,
9556
const struct btf *arg_btf, u32 arg_id)
9557
{
9558
const char *reg_name, *arg_name, *search_needle;
9559
const struct btf_type *reg_type, *arg_type;
9560
int reg_len, arg_len, cmp_len;
9561
size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char);
9562
9563
reg_type = btf_type_by_id(reg_btf, reg_id);
9564
if (!reg_type)
9565
return false;
9566
9567
arg_type = btf_type_by_id(arg_btf, arg_id);
9568
if (!arg_type)
9569
return false;
9570
9571
reg_name = btf_name_by_offset(reg_btf, reg_type->name_off);
9572
arg_name = btf_name_by_offset(arg_btf, arg_type->name_off);
9573
9574
reg_len = strlen(reg_name);
9575
arg_len = strlen(arg_name);
9576
9577
/* Exactly one of the two type names may be suffixed with ___init, so
9578
* if the strings are the same size, they can't possibly be no-cast
9579
* aliases of one another. If you have two of the same type names, e.g.
9580
* they're both nf_conn___init, it would be improper to return true
9581
* because they are _not_ no-cast aliases, they are the same type.
9582
*/
9583
if (reg_len == arg_len)
9584
return false;
9585
9586
/* Either of the two names must be the other name, suffixed with ___init. */
9587
if ((reg_len != arg_len + pattern_len) &&
9588
(arg_len != reg_len + pattern_len))
9589
return false;
9590
9591
if (reg_len < arg_len) {
9592
search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX);
9593
cmp_len = reg_len;
9594
} else {
9595
search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX);
9596
cmp_len = arg_len;
9597
}
9598
9599
if (!search_needle)
9600
return false;
9601
9602
/* ___init suffix must come at the end of the name */
9603
if (*(search_needle + pattern_len) != '\0')
9604
return false;
9605
9606
return !strncmp(reg_name, arg_name, cmp_len);
9607
}
9608
9609
#ifdef CONFIG_BPF_JIT
9610
static int
9611
btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops,
9612
struct bpf_verifier_log *log)
9613
{
9614
struct btf_struct_ops_tab *tab, *new_tab;
9615
int i, err;
9616
9617
tab = btf->struct_ops_tab;
9618
if (!tab) {
9619
tab = kzalloc(struct_size(tab, ops, 4), GFP_KERNEL);
9620
if (!tab)
9621
return -ENOMEM;
9622
tab->capacity = 4;
9623
btf->struct_ops_tab = tab;
9624
}
9625
9626
for (i = 0; i < tab->cnt; i++)
9627
if (tab->ops[i].st_ops == st_ops)
9628
return -EEXIST;
9629
9630
if (tab->cnt == tab->capacity) {
9631
new_tab = krealloc(tab,
9632
struct_size(tab, ops, tab->capacity * 2),
9633
GFP_KERNEL);
9634
if (!new_tab)
9635
return -ENOMEM;
9636
tab = new_tab;
9637
tab->capacity *= 2;
9638
btf->struct_ops_tab = tab;
9639
}
9640
9641
tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops;
9642
9643
err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log);
9644
if (err)
9645
return err;
9646
9647
btf->struct_ops_tab->cnt++;
9648
9649
return 0;
9650
}
9651
9652
const struct bpf_struct_ops_desc *
9653
bpf_struct_ops_find_value(struct btf *btf, u32 value_id)
9654
{
9655
const struct bpf_struct_ops_desc *st_ops_list;
9656
unsigned int i;
9657
u32 cnt;
9658
9659
if (!value_id)
9660
return NULL;
9661
if (!btf->struct_ops_tab)
9662
return NULL;
9663
9664
cnt = btf->struct_ops_tab->cnt;
9665
st_ops_list = btf->struct_ops_tab->ops;
9666
for (i = 0; i < cnt; i++) {
9667
if (st_ops_list[i].value_id == value_id)
9668
return &st_ops_list[i];
9669
}
9670
9671
return NULL;
9672
}
9673
9674
const struct bpf_struct_ops_desc *
9675
bpf_struct_ops_find(struct btf *btf, u32 type_id)
9676
{
9677
const struct bpf_struct_ops_desc *st_ops_list;
9678
unsigned int i;
9679
u32 cnt;
9680
9681
if (!type_id)
9682
return NULL;
9683
if (!btf->struct_ops_tab)
9684
return NULL;
9685
9686
cnt = btf->struct_ops_tab->cnt;
9687
st_ops_list = btf->struct_ops_tab->ops;
9688
for (i = 0; i < cnt; i++) {
9689
if (st_ops_list[i].type_id == type_id)
9690
return &st_ops_list[i];
9691
}
9692
9693
return NULL;
9694
}
9695
9696
int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops)
9697
{
9698
struct bpf_verifier_log *log;
9699
struct btf *btf;
9700
int err = 0;
9701
9702
btf = btf_get_module_btf(st_ops->owner);
9703
if (!btf)
9704
return check_btf_kconfigs(st_ops->owner, "struct_ops");
9705
if (IS_ERR(btf))
9706
return PTR_ERR(btf);
9707
9708
log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN);
9709
if (!log) {
9710
err = -ENOMEM;
9711
goto errout;
9712
}
9713
9714
log->level = BPF_LOG_KERNEL;
9715
9716
err = btf_add_struct_ops(btf, st_ops, log);
9717
9718
errout:
9719
kfree(log);
9720
btf_put(btf);
9721
9722
return err;
9723
}
9724
EXPORT_SYMBOL_GPL(__register_bpf_struct_ops);
9725
#endif
9726
9727
bool btf_param_match_suffix(const struct btf *btf,
9728
const struct btf_param *arg,
9729
const char *suffix)
9730
{
9731
int suffix_len = strlen(suffix), len;
9732
const char *param_name;
9733
9734
/* In the future, this can be ported to use BTF tagging */
9735
param_name = btf_name_by_offset(btf, arg->name_off);
9736
if (str_is_empty(param_name))
9737
return false;
9738
len = strlen(param_name);
9739
if (len <= suffix_len)
9740
return false;
9741
param_name += len - suffix_len;
9742
return !strncmp(param_name, suffix, suffix_len);
9743
}
9744
9745