Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/bpf/bpftool/gen.c
29267 views
1
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2
/* Copyright (C) 2019 Facebook */
3
4
#ifndef _GNU_SOURCE
5
#define _GNU_SOURCE
6
#endif
7
#include <ctype.h>
8
#include <errno.h>
9
#include <fcntl.h>
10
#include <libgen.h>
11
#include <linux/err.h>
12
#include <stdbool.h>
13
#include <stdio.h>
14
#include <string.h>
15
#include <unistd.h>
16
#include <bpf/bpf.h>
17
#include <bpf/libbpf.h>
18
#include <bpf/libbpf_internal.h>
19
#include <sys/types.h>
20
#include <sys/stat.h>
21
#include <sys/mman.h>
22
#include <bpf/btf.h>
23
24
#include "json_writer.h"
25
#include "main.h"
26
27
#define MAX_OBJ_NAME_LEN 64
28
29
static void sanitize_identifier(char *name)
30
{
31
int i;
32
33
for (i = 0; name[i]; i++)
34
if (!isalnum(name[i]) && name[i] != '_')
35
name[i] = '_';
36
}
37
38
static bool str_has_prefix(const char *str, const char *prefix)
39
{
40
return strncmp(str, prefix, strlen(prefix)) == 0;
41
}
42
43
static bool str_has_suffix(const char *str, const char *suffix)
44
{
45
size_t i, n1 = strlen(str), n2 = strlen(suffix);
46
47
if (n1 < n2)
48
return false;
49
50
for (i = 0; i < n2; i++) {
51
if (str[n1 - i - 1] != suffix[n2 - i - 1])
52
return false;
53
}
54
55
return true;
56
}
57
58
static const struct btf_type *
59
resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
60
{
61
const struct btf_type *t;
62
63
t = skip_mods_and_typedefs(btf, id, NULL);
64
if (!btf_is_ptr(t))
65
return NULL;
66
67
t = skip_mods_and_typedefs(btf, t->type, res_id);
68
69
return btf_is_func_proto(t) ? t : NULL;
70
}
71
72
static void get_obj_name(char *name, const char *file)
73
{
74
char file_copy[PATH_MAX];
75
76
/* Using basename() POSIX version to be more portable. */
77
strncpy(file_copy, file, PATH_MAX - 1)[PATH_MAX - 1] = '\0';
78
strncpy(name, basename(file_copy), MAX_OBJ_NAME_LEN - 1)[MAX_OBJ_NAME_LEN - 1] = '\0';
79
if (str_has_suffix(name, ".o"))
80
name[strlen(name) - 2] = '\0';
81
sanitize_identifier(name);
82
}
83
84
static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
85
{
86
int i;
87
88
sprintf(guard, "__%s_%s__", obj_name, suffix);
89
for (i = 0; guard[i]; i++)
90
guard[i] = toupper(guard[i]);
91
}
92
93
static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
94
{
95
static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
96
const char *name = bpf_map__name(map);
97
int i, n;
98
99
if (!bpf_map__is_internal(map)) {
100
snprintf(buf, buf_sz, "%s", name);
101
return true;
102
}
103
104
for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
105
const char *sfx = sfxs[i], *p;
106
107
p = strstr(name, sfx);
108
if (p) {
109
snprintf(buf, buf_sz, "%s", p + 1);
110
sanitize_identifier(buf);
111
return true;
112
}
113
}
114
115
return false;
116
}
117
118
static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
119
{
120
static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
121
int i, n;
122
123
/* recognize hard coded LLVM section name */
124
if (strcmp(sec_name, ".addr_space.1") == 0) {
125
/* this is the name to use in skeleton */
126
snprintf(buf, buf_sz, "arena");
127
return true;
128
}
129
for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
130
const char *pfx = pfxs[i];
131
132
if (str_has_prefix(sec_name, pfx)) {
133
snprintf(buf, buf_sz, "%s", sec_name + 1);
134
sanitize_identifier(buf);
135
return true;
136
}
137
}
138
139
return false;
140
}
141
142
static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
143
{
144
vprintf(fmt, args);
145
}
146
147
static int codegen_datasec_def(struct bpf_object *obj,
148
struct btf *btf,
149
struct btf_dump *d,
150
const struct btf_type *sec,
151
const char *obj_name)
152
{
153
const char *sec_name = btf__name_by_offset(btf, sec->name_off);
154
const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
155
int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
156
char var_ident[256], sec_ident[256];
157
bool strip_mods = false;
158
159
if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
160
return 0;
161
162
if (strcmp(sec_name, ".kconfig") != 0)
163
strip_mods = true;
164
165
printf(" struct %s__%s {\n", obj_name, sec_ident);
166
for (i = 0; i < vlen; i++, sec_var++) {
167
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
168
const char *var_name = btf__name_by_offset(btf, var->name_off);
169
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
170
.field_name = var_ident,
171
.indent_level = 2,
172
.strip_mods = strip_mods,
173
);
174
int need_off = sec_var->offset, align_off, align;
175
__u32 var_type_id = var->type;
176
177
/* static variables are not exposed through BPF skeleton */
178
if (btf_var(var)->linkage == BTF_VAR_STATIC)
179
continue;
180
181
if (off > need_off) {
182
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
183
sec_name, i, need_off, off);
184
return -EINVAL;
185
}
186
187
align = btf__align_of(btf, var->type);
188
if (align <= 0) {
189
p_err("Failed to determine alignment of variable '%s': %d",
190
var_name, align);
191
return -EINVAL;
192
}
193
/* Assume 32-bit architectures when generating data section
194
* struct memory layout. Given bpftool can't know which target
195
* host architecture it's emitting skeleton for, we need to be
196
* conservative and assume 32-bit one to ensure enough padding
197
* bytes are generated for pointer and long types. This will
198
* still work correctly for 64-bit architectures, because in
199
* the worst case we'll generate unnecessary padding field,
200
* which on 64-bit architectures is not strictly necessary and
201
* would be handled by natural 8-byte alignment. But it still
202
* will be a correct memory layout, based on recorded offsets
203
* in BTF.
204
*/
205
if (align > 4)
206
align = 4;
207
208
align_off = (off + align - 1) / align * align;
209
if (align_off != need_off) {
210
printf("\t\tchar __pad%d[%d];\n",
211
pad_cnt, need_off - off);
212
pad_cnt++;
213
}
214
215
/* sanitize variable name, e.g., for static vars inside
216
* a function, it's name is '<function name>.<variable name>',
217
* which we'll turn into a '<function name>_<variable name>'
218
*/
219
var_ident[0] = '\0';
220
strncat(var_ident, var_name, sizeof(var_ident) - 1);
221
sanitize_identifier(var_ident);
222
223
printf("\t\t");
224
err = btf_dump__emit_type_decl(d, var_type_id, &opts);
225
if (err)
226
return err;
227
printf(";\n");
228
229
off = sec_var->offset + sec_var->size;
230
}
231
printf(" } *%s;\n", sec_ident);
232
return 0;
233
}
234
235
static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
236
{
237
int n = btf__type_cnt(btf), i;
238
char sec_ident[256];
239
240
for (i = 1; i < n; i++) {
241
const struct btf_type *t = btf__type_by_id(btf, i);
242
const char *name;
243
244
if (!btf_is_datasec(t))
245
continue;
246
247
name = btf__str_by_offset(btf, t->name_off);
248
if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
249
continue;
250
251
if (strcmp(sec_ident, map_ident) == 0)
252
return t;
253
}
254
return NULL;
255
}
256
257
static bool is_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
258
{
259
size_t tmp_sz;
260
261
if (bpf_map__type(map) == BPF_MAP_TYPE_ARENA && bpf_map__initial_value(map, &tmp_sz)) {
262
snprintf(buf, sz, "arena");
263
return true;
264
}
265
266
if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
267
return false;
268
269
if (!get_map_ident(map, buf, sz))
270
return false;
271
272
return true;
273
}
274
275
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
276
{
277
struct btf *btf = bpf_object__btf(obj);
278
struct btf_dump *d;
279
struct bpf_map *map;
280
const struct btf_type *sec;
281
char map_ident[256];
282
int err = 0;
283
284
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
285
if (!d)
286
return -errno;
287
288
bpf_object__for_each_map(map, obj) {
289
/* only generate definitions for memory-mapped internal maps */
290
if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
291
continue;
292
293
sec = find_type_for_map(btf, map_ident);
294
295
/* In some cases (e.g., sections like .rodata.cst16 containing
296
* compiler allocated string constants only) there will be
297
* special internal maps with no corresponding DATASEC BTF
298
* type. In such case, generate empty structs for each such
299
* map. It will still be memory-mapped and its contents
300
* accessible from user-space through BPF skeleton.
301
*/
302
if (!sec) {
303
printf(" struct %s__%s {\n", obj_name, map_ident);
304
printf(" } *%s;\n", map_ident);
305
} else {
306
err = codegen_datasec_def(obj, btf, d, sec, obj_name);
307
if (err)
308
goto out;
309
}
310
}
311
312
313
out:
314
btf_dump__free(d);
315
return err;
316
}
317
318
static bool btf_is_ptr_to_func_proto(const struct btf *btf,
319
const struct btf_type *v)
320
{
321
return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
322
}
323
324
static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
325
{
326
struct btf *btf = bpf_object__btf(obj);
327
struct btf_dump *d;
328
struct bpf_map *map;
329
const struct btf_type *sec, *var;
330
const struct btf_var_secinfo *sec_var;
331
int i, err = 0, vlen;
332
char map_ident[256], sec_ident[256];
333
bool strip_mods = false, needs_typeof = false;
334
const char *sec_name, *var_name;
335
__u32 var_type_id;
336
337
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
338
if (!d)
339
return -errno;
340
341
bpf_object__for_each_map(map, obj) {
342
/* only generate definitions for memory-mapped internal maps */
343
if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
344
continue;
345
346
sec = find_type_for_map(btf, map_ident);
347
if (!sec)
348
continue;
349
350
sec_name = btf__name_by_offset(btf, sec->name_off);
351
if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
352
continue;
353
354
strip_mods = strcmp(sec_name, ".kconfig") != 0;
355
printf(" struct %s__%s {\n", obj_name, sec_ident);
356
357
sec_var = btf_var_secinfos(sec);
358
vlen = btf_vlen(sec);
359
for (i = 0; i < vlen; i++, sec_var++) {
360
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
361
.indent_level = 2,
362
.strip_mods = strip_mods,
363
/* we'll print the name separately */
364
.field_name = "",
365
);
366
367
var = btf__type_by_id(btf, sec_var->type);
368
var_name = btf__name_by_offset(btf, var->name_off);
369
var_type_id = var->type;
370
371
/* static variables are not exposed through BPF skeleton */
372
if (btf_var(var)->linkage == BTF_VAR_STATIC)
373
continue;
374
375
/* The datasec member has KIND_VAR but we want the
376
* underlying type of the variable (e.g. KIND_INT).
377
*/
378
var = skip_mods_and_typedefs(btf, var->type, NULL);
379
380
printf("\t\t");
381
/* Func and array members require special handling.
382
* Instead of producing `typename *var`, they produce
383
* `typeof(typename) *var`. This allows us to keep a
384
* similar syntax where the identifier is just prefixed
385
* by *, allowing us to ignore C declaration minutiae.
386
*/
387
needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
388
if (needs_typeof)
389
printf("__typeof__(");
390
391
err = btf_dump__emit_type_decl(d, var_type_id, &opts);
392
if (err)
393
goto out;
394
395
if (needs_typeof)
396
printf(")");
397
398
printf(" *%s;\n", var_name);
399
}
400
printf(" } %s;\n", sec_ident);
401
}
402
403
out:
404
btf_dump__free(d);
405
return err;
406
}
407
408
static void codegen(const char *template, ...)
409
{
410
const char *src, *end;
411
int skip_tabs = 0, n;
412
char *s, *dst;
413
va_list args;
414
char c;
415
416
n = strlen(template);
417
s = malloc(n + 1);
418
if (!s)
419
exit(-1);
420
src = template;
421
dst = s;
422
423
/* find out "baseline" indentation to skip */
424
while ((c = *src++)) {
425
if (c == '\t') {
426
skip_tabs++;
427
} else if (c == '\n') {
428
break;
429
} else {
430
p_err("unrecognized character at pos %td in template '%s': '%c'",
431
src - template - 1, template, c);
432
free(s);
433
exit(-1);
434
}
435
}
436
437
while (*src) {
438
/* skip baseline indentation tabs */
439
for (n = skip_tabs; n > 0; n--, src++) {
440
if (*src != '\t') {
441
p_err("not enough tabs at pos %td in template '%s'",
442
src - template - 1, template);
443
free(s);
444
exit(-1);
445
}
446
}
447
/* trim trailing whitespace */
448
end = strchrnul(src, '\n');
449
for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
450
;
451
memcpy(dst, src, n);
452
dst += n;
453
if (*end)
454
*dst++ = '\n';
455
src = *end ? end + 1 : end;
456
}
457
*dst++ = '\0';
458
459
/* print out using adjusted template */
460
va_start(args, template);
461
n = vprintf(s, args);
462
va_end(args);
463
464
free(s);
465
}
466
467
static void print_hex(const char *data, int data_sz)
468
{
469
int i, len;
470
471
for (i = 0, len = 0; i < data_sz; i++) {
472
int w = data[i] ? 4 : 2;
473
474
len += w;
475
if (len > 78) {
476
printf("\\\n");
477
len = w;
478
}
479
if (!data[i])
480
printf("\\0");
481
else
482
printf("\\x%02x", (unsigned char)data[i]);
483
}
484
}
485
486
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
487
{
488
long page_sz = sysconf(_SC_PAGE_SIZE);
489
size_t map_sz;
490
491
map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
492
map_sz = roundup(map_sz, page_sz);
493
return map_sz;
494
}
495
496
/* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
497
static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
498
{
499
struct btf *btf = bpf_object__btf(obj);
500
struct bpf_map *map;
501
struct btf_var_secinfo *sec_var;
502
int i, vlen;
503
const struct btf_type *sec;
504
char map_ident[256], var_ident[256];
505
506
if (!btf)
507
return;
508
509
codegen("\
510
\n\
511
__attribute__((unused)) static void \n\
512
%1$s__assert(struct %1$s *s __attribute__((unused))) \n\
513
{ \n\
514
#ifdef __cplusplus \n\
515
#define _Static_assert static_assert \n\
516
#endif \n\
517
", obj_name);
518
519
bpf_object__for_each_map(map, obj) {
520
if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
521
continue;
522
523
sec = find_type_for_map(btf, map_ident);
524
if (!sec) {
525
/* best effort, couldn't find the type for this map */
526
continue;
527
}
528
529
sec_var = btf_var_secinfos(sec);
530
vlen = btf_vlen(sec);
531
532
for (i = 0; i < vlen; i++, sec_var++) {
533
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
534
const char *var_name = btf__name_by_offset(btf, var->name_off);
535
long var_size;
536
537
/* static variables are not exposed through BPF skeleton */
538
if (btf_var(var)->linkage == BTF_VAR_STATIC)
539
continue;
540
541
var_size = btf__resolve_size(btf, var->type);
542
if (var_size < 0)
543
continue;
544
545
var_ident[0] = '\0';
546
strncat(var_ident, var_name, sizeof(var_ident) - 1);
547
sanitize_identifier(var_ident);
548
549
printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
550
map_ident, var_ident, var_size, var_ident);
551
}
552
}
553
codegen("\
554
\n\
555
#ifdef __cplusplus \n\
556
#undef _Static_assert \n\
557
#endif \n\
558
} \n\
559
");
560
}
561
562
static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
563
{
564
struct bpf_program *prog;
565
566
bpf_object__for_each_program(prog, obj) {
567
const char *tp_name;
568
569
codegen("\
570
\n\
571
\n\
572
static inline int \n\
573
%1$s__%2$s__attach(struct %1$s *skel) \n\
574
{ \n\
575
int prog_fd = skel->progs.%2$s.prog_fd; \n\
576
", obj_name, bpf_program__name(prog));
577
578
switch (bpf_program__type(prog)) {
579
case BPF_PROG_TYPE_RAW_TRACEPOINT:
580
tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
581
printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
582
break;
583
case BPF_PROG_TYPE_TRACING:
584
case BPF_PROG_TYPE_LSM:
585
if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
586
printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
587
else
588
printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
589
break;
590
default:
591
printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
592
break;
593
}
594
codegen("\
595
\n\
596
\n\
597
if (fd > 0) \n\
598
skel->links.%1$s_fd = fd; \n\
599
return fd; \n\
600
} \n\
601
", bpf_program__name(prog));
602
}
603
604
codegen("\
605
\n\
606
\n\
607
static inline int \n\
608
%1$s__attach(struct %1$s *skel) \n\
609
{ \n\
610
int ret = 0; \n\
611
\n\
612
", obj_name);
613
614
bpf_object__for_each_program(prog, obj) {
615
codegen("\
616
\n\
617
ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
618
", obj_name, bpf_program__name(prog));
619
}
620
621
codegen("\
622
\n\
623
return ret < 0 ? ret : 0; \n\
624
} \n\
625
\n\
626
static inline void \n\
627
%1$s__detach(struct %1$s *skel) \n\
628
{ \n\
629
", obj_name);
630
631
bpf_object__for_each_program(prog, obj) {
632
codegen("\
633
\n\
634
skel_closenz(skel->links.%1$s_fd); \n\
635
", bpf_program__name(prog));
636
}
637
638
codegen("\
639
\n\
640
} \n\
641
");
642
}
643
644
static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
645
{
646
struct bpf_program *prog;
647
struct bpf_map *map;
648
char ident[256];
649
650
codegen("\
651
\n\
652
static void \n\
653
%1$s__destroy(struct %1$s *skel) \n\
654
{ \n\
655
if (!skel) \n\
656
return; \n\
657
%1$s__detach(skel); \n\
658
",
659
obj_name);
660
661
bpf_object__for_each_program(prog, obj) {
662
codegen("\
663
\n\
664
skel_closenz(skel->progs.%1$s.prog_fd); \n\
665
", bpf_program__name(prog));
666
}
667
668
bpf_object__for_each_map(map, obj) {
669
if (!get_map_ident(map, ident, sizeof(ident)))
670
continue;
671
if (bpf_map__is_internal(map) &&
672
(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
673
printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zu);\n",
674
ident, bpf_map_mmap_sz(map));
675
codegen("\
676
\n\
677
skel_closenz(skel->maps.%1$s.map_fd); \n\
678
", ident);
679
}
680
codegen("\
681
\n\
682
skel_free(skel); \n\
683
} \n\
684
",
685
obj_name);
686
}
687
688
static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
689
{
690
DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
691
struct bpf_load_and_run_opts sopts = {};
692
char sig_buf[MAX_SIG_SIZE];
693
__u8 prog_sha[SHA256_DIGEST_LENGTH];
694
struct bpf_map *map;
695
696
char ident[256];
697
int err = 0;
698
699
if (sign_progs)
700
opts.gen_hash = true;
701
702
err = bpf_object__gen_loader(obj, &opts);
703
if (err)
704
return err;
705
706
err = bpf_object__load(obj);
707
if (err) {
708
p_err("failed to load object file");
709
goto out;
710
}
711
712
/* If there was no error during load then gen_loader_opts
713
* are populated with the loader program.
714
*/
715
716
/* finish generating 'struct skel' */
717
codegen("\
718
\n\
719
}; \n\
720
", obj_name);
721
722
723
codegen_attach_detach(obj, obj_name);
724
725
codegen_destroy(obj, obj_name);
726
727
codegen("\
728
\n\
729
static inline struct %1$s * \n\
730
%1$s__open(void) \n\
731
{ \n\
732
struct %1$s *skel; \n\
733
\n\
734
skel = skel_alloc(sizeof(*skel)); \n\
735
if (!skel) \n\
736
goto cleanup; \n\
737
skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
738
",
739
obj_name, opts.data_sz);
740
bpf_object__for_each_map(map, obj) {
741
const void *mmap_data = NULL;
742
size_t mmap_size = 0;
743
744
if (!is_mmapable_map(map, ident, sizeof(ident)))
745
continue;
746
747
codegen("\
748
\n\
749
{ \n\
750
static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
751
");
752
mmap_data = bpf_map__initial_value(map, &mmap_size);
753
print_hex(mmap_data, mmap_size);
754
codegen("\
755
\n\
756
\"; \n\
757
\n\
758
skel->%1$s = skel_prep_map_data((void *)data, %2$zd,\n\
759
sizeof(data) - 1);\n\
760
if (!skel->%1$s) \n\
761
goto cleanup; \n\
762
skel->maps.%1$s.initial_value = (__u64) (long) skel->%1$s;\n\
763
} \n\
764
", ident, bpf_map_mmap_sz(map));
765
}
766
codegen("\
767
\n\
768
return skel; \n\
769
cleanup: \n\
770
%1$s__destroy(skel); \n\
771
return NULL; \n\
772
} \n\
773
\n\
774
static inline int \n\
775
%1$s__load(struct %1$s *skel) \n\
776
{ \n\
777
struct bpf_load_and_run_opts opts = {}; \n\
778
int err; \n\
779
static const char opts_data[] __attribute__((__aligned__(8))) = \"\\\n\
780
",
781
obj_name);
782
print_hex(opts.data, opts.data_sz);
783
codegen("\
784
\n\
785
\"; \n\
786
static const char opts_insn[] __attribute__((__aligned__(8))) = \"\\\n\
787
");
788
print_hex(opts.insns, opts.insns_sz);
789
codegen("\
790
\n\
791
\";\n");
792
793
if (sign_progs) {
794
sopts.insns = opts.insns;
795
sopts.insns_sz = opts.insns_sz;
796
sopts.excl_prog_hash = prog_sha;
797
sopts.excl_prog_hash_sz = sizeof(prog_sha);
798
sopts.signature = sig_buf;
799
sopts.signature_sz = MAX_SIG_SIZE;
800
801
err = bpftool_prog_sign(&sopts);
802
if (err < 0) {
803
p_err("failed to sign program");
804
goto out;
805
}
806
807
codegen("\
808
\n\
809
static const char opts_sig[] __attribute__((__aligned__(8))) = \"\\\n\
810
");
811
print_hex((const void *)sig_buf, sopts.signature_sz);
812
codegen("\
813
\n\
814
\";\n");
815
816
codegen("\
817
\n\
818
static const char opts_excl_hash[] __attribute__((__aligned__(8))) = \"\\\n\
819
");
820
print_hex((const void *)prog_sha, sizeof(prog_sha));
821
codegen("\
822
\n\
823
\";\n");
824
825
codegen("\
826
\n\
827
opts.signature = (void *)opts_sig; \n\
828
opts.signature_sz = sizeof(opts_sig) - 1; \n\
829
opts.excl_prog_hash = (void *)opts_excl_hash; \n\
830
opts.excl_prog_hash_sz = sizeof(opts_excl_hash) - 1; \n\
831
opts.keyring_id = skel->keyring_id; \n\
832
");
833
}
834
835
codegen("\
836
\n\
837
opts.ctx = (struct bpf_loader_ctx *)skel; \n\
838
opts.data_sz = sizeof(opts_data) - 1; \n\
839
opts.data = (void *)opts_data; \n\
840
opts.insns_sz = sizeof(opts_insn) - 1; \n\
841
opts.insns = (void *)opts_insn; \n\
842
\n\
843
err = bpf_load_and_run(&opts); \n\
844
if (err < 0) \n\
845
return err; \n\
846
");
847
bpf_object__for_each_map(map, obj) {
848
const char *mmap_flags;
849
850
if (!is_mmapable_map(map, ident, sizeof(ident)))
851
continue;
852
853
if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
854
mmap_flags = "PROT_READ";
855
else
856
mmap_flags = "PROT_READ | PROT_WRITE";
857
858
codegen("\
859
\n\
860
skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
861
%2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
862
if (!skel->%1$s) \n\
863
return -ENOMEM; \n\
864
",
865
ident, bpf_map_mmap_sz(map), mmap_flags);
866
}
867
codegen("\
868
\n\
869
return 0; \n\
870
} \n\
871
\n\
872
static inline struct %1$s * \n\
873
%1$s__open_and_load(void) \n\
874
{ \n\
875
struct %1$s *skel; \n\
876
\n\
877
skel = %1$s__open(); \n\
878
if (!skel) \n\
879
return NULL; \n\
880
if (%1$s__load(skel)) { \n\
881
%1$s__destroy(skel); \n\
882
return NULL; \n\
883
} \n\
884
return skel; \n\
885
} \n\
886
\n\
887
", obj_name);
888
889
codegen_asserts(obj, obj_name);
890
891
codegen("\
892
\n\
893
\n\
894
#endif /* %s */ \n\
895
",
896
header_guard);
897
err = 0;
898
out:
899
return err;
900
}
901
902
static void
903
codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool populate_links)
904
{
905
struct bpf_map *map;
906
char ident[256];
907
size_t i, map_sz;
908
909
if (!map_cnt)
910
return;
911
912
/* for backward compatibility with old libbpf versions that don't
913
* handle new BPF skeleton with new struct bpf_map_skeleton definition
914
* that includes link field, avoid specifying new increased size,
915
* unless we absolutely have to (i.e., if there are struct_ops maps
916
* present)
917
*/
918
map_sz = offsetof(struct bpf_map_skeleton, link);
919
if (populate_links) {
920
bpf_object__for_each_map(map, obj) {
921
if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
922
map_sz = sizeof(struct bpf_map_skeleton);
923
break;
924
}
925
}
926
}
927
928
codegen("\
929
\n\
930
\n\
931
/* maps */ \n\
932
s->map_cnt = %zu; \n\
933
s->map_skel_sz = %zu; \n\
934
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\
935
sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\
936
if (!s->maps) { \n\
937
err = -ENOMEM; \n\
938
goto err; \n\
939
} \n\
940
",
941
map_cnt, map_sz, map_sz, map_sz
942
);
943
i = 0;
944
bpf_object__for_each_map(map, obj) {
945
if (!get_map_ident(map, ident, sizeof(ident)))
946
continue;
947
948
codegen("\
949
\n\
950
\n\
951
map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
952
map->name = \"%s\"; \n\
953
map->map = &obj->maps.%s; \n\
954
",
955
i, bpf_map__name(map), ident);
956
/* memory-mapped internal maps */
957
if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
958
printf("\tmap->mmaped = (void **)&obj->%s;\n", ident);
959
}
960
961
if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
962
codegen("\
963
\n\
964
map->link = &obj->links.%s; \n\
965
", ident);
966
}
967
i++;
968
}
969
}
970
971
static void
972
codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
973
{
974
struct bpf_program *prog;
975
int i;
976
977
if (!prog_cnt)
978
return;
979
980
codegen("\
981
\n\
982
\n\
983
/* programs */ \n\
984
s->prog_cnt = %zu; \n\
985
s->prog_skel_sz = sizeof(*s->progs); \n\
986
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
987
if (!s->progs) { \n\
988
err = -ENOMEM; \n\
989
goto err; \n\
990
} \n\
991
",
992
prog_cnt
993
);
994
i = 0;
995
bpf_object__for_each_program(prog, obj) {
996
codegen("\
997
\n\
998
\n\
999
s->progs[%1$zu].name = \"%2$s\"; \n\
1000
s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
1001
",
1002
i, bpf_program__name(prog));
1003
1004
if (populate_links) {
1005
codegen("\
1006
\n\
1007
s->progs[%1$zu].link = &obj->links.%2$s;\n\
1008
",
1009
i, bpf_program__name(prog));
1010
}
1011
i++;
1012
}
1013
}
1014
1015
static int walk_st_ops_shadow_vars(struct btf *btf, const char *ident,
1016
const struct btf_type *map_type, __u32 map_type_id)
1017
{
1018
LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .indent_level = 3);
1019
const struct btf_type *member_type;
1020
__u32 offset, next_offset = 0;
1021
const struct btf_member *m;
1022
struct btf_dump *d = NULL;
1023
const char *member_name;
1024
__u32 member_type_id;
1025
int i, err = 0, n;
1026
int size;
1027
1028
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
1029
if (!d)
1030
return -errno;
1031
1032
n = btf_vlen(map_type);
1033
for (i = 0, m = btf_members(map_type); i < n; i++, m++) {
1034
member_type = skip_mods_and_typedefs(btf, m->type, &member_type_id);
1035
member_name = btf__name_by_offset(btf, m->name_off);
1036
1037
offset = m->offset / 8;
1038
if (next_offset < offset)
1039
printf("\t\t\tchar __padding_%d[%u];\n", i, offset - next_offset);
1040
1041
switch (btf_kind(member_type)) {
1042
case BTF_KIND_INT:
1043
case BTF_KIND_FLOAT:
1044
case BTF_KIND_ENUM:
1045
case BTF_KIND_ENUM64:
1046
/* scalar type */
1047
printf("\t\t\t");
1048
opts.field_name = member_name;
1049
err = btf_dump__emit_type_decl(d, member_type_id, &opts);
1050
if (err) {
1051
p_err("Failed to emit type declaration for %s: %d", member_name, err);
1052
goto out;
1053
}
1054
printf(";\n");
1055
1056
size = btf__resolve_size(btf, member_type_id);
1057
if (size < 0) {
1058
p_err("Failed to resolve size of %s: %d\n", member_name, size);
1059
err = size;
1060
goto out;
1061
}
1062
1063
next_offset = offset + size;
1064
break;
1065
1066
case BTF_KIND_PTR:
1067
if (resolve_func_ptr(btf, m->type, NULL)) {
1068
/* Function pointer */
1069
printf("\t\t\tstruct bpf_program *%s;\n", member_name);
1070
1071
next_offset = offset + sizeof(void *);
1072
break;
1073
}
1074
/* All pointer types are unsupported except for
1075
* function pointers.
1076
*/
1077
fallthrough;
1078
1079
default:
1080
/* Unsupported types
1081
*
1082
* Types other than scalar types and function
1083
* pointers are currently not supported in order to
1084
* prevent conflicts in the generated code caused
1085
* by multiple definitions. For instance, if the
1086
* struct type FOO is used in a struct_ops map,
1087
* bpftool has to generate definitions for FOO,
1088
* which may result in conflicts if FOO is defined
1089
* in different skeleton files.
1090
*/
1091
size = btf__resolve_size(btf, member_type_id);
1092
if (size < 0) {
1093
p_err("Failed to resolve size of %s: %d\n", member_name, size);
1094
err = size;
1095
goto out;
1096
}
1097
printf("\t\t\tchar __unsupported_%d[%d];\n", i, size);
1098
1099
next_offset = offset + size;
1100
break;
1101
}
1102
}
1103
1104
/* Cannot fail since it must be a struct type */
1105
size = btf__resolve_size(btf, map_type_id);
1106
if (next_offset < (__u32)size)
1107
printf("\t\t\tchar __padding_end[%u];\n", size - next_offset);
1108
1109
out:
1110
btf_dump__free(d);
1111
1112
return err;
1113
}
1114
1115
/* Generate the pointer of the shadow type for a struct_ops map.
1116
*
1117
* This function adds a pointer of the shadow type for a struct_ops map.
1118
* The members of a struct_ops map can be exported through a pointer to a
1119
* shadow type. The user can access these members through the pointer.
1120
*
1121
* A shadow type includes not all members, only members of some types.
1122
* They are scalar types and function pointers. The function pointers are
1123
* translated to the pointer of the struct bpf_program. The scalar types
1124
* are translated to the original type without any modifiers.
1125
*
1126
* Unsupported types will be translated to a char array to occupy the same
1127
* space as the original field, being renamed as __unsupported_*. The user
1128
* should treat these fields as opaque data.
1129
*/
1130
static int gen_st_ops_shadow_type(const char *obj_name, struct btf *btf, const char *ident,
1131
const struct bpf_map *map)
1132
{
1133
const struct btf_type *map_type;
1134
const char *type_name;
1135
__u32 map_type_id;
1136
int err;
1137
1138
map_type_id = bpf_map__btf_value_type_id(map);
1139
if (map_type_id == 0)
1140
return -EINVAL;
1141
map_type = btf__type_by_id(btf, map_type_id);
1142
if (!map_type)
1143
return -EINVAL;
1144
1145
type_name = btf__name_by_offset(btf, map_type->name_off);
1146
1147
printf("\t\tstruct %s__%s__%s {\n", obj_name, ident, type_name);
1148
1149
err = walk_st_ops_shadow_vars(btf, ident, map_type, map_type_id);
1150
if (err)
1151
return err;
1152
1153
printf("\t\t} *%s;\n", ident);
1154
1155
return 0;
1156
}
1157
1158
static int gen_st_ops_shadow(const char *obj_name, struct btf *btf, struct bpf_object *obj)
1159
{
1160
int err, st_ops_cnt = 0;
1161
struct bpf_map *map;
1162
char ident[256];
1163
1164
if (!btf)
1165
return 0;
1166
1167
/* Generate the pointers to shadow types of
1168
* struct_ops maps.
1169
*/
1170
bpf_object__for_each_map(map, obj) {
1171
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1172
continue;
1173
if (!get_map_ident(map, ident, sizeof(ident)))
1174
continue;
1175
1176
if (st_ops_cnt == 0) /* first struct_ops map */
1177
printf("\tstruct {\n");
1178
st_ops_cnt++;
1179
1180
err = gen_st_ops_shadow_type(obj_name, btf, ident, map);
1181
if (err)
1182
return err;
1183
}
1184
1185
if (st_ops_cnt)
1186
printf("\t} struct_ops;\n");
1187
1188
return 0;
1189
}
1190
1191
/* Generate the code to initialize the pointers of shadow types. */
1192
static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
1193
{
1194
struct bpf_map *map;
1195
char ident[256];
1196
1197
if (!btf)
1198
return;
1199
1200
/* Initialize the pointers to_ops shadow types of
1201
* struct_ops maps.
1202
*/
1203
bpf_object__for_each_map(map, obj) {
1204
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1205
continue;
1206
if (!get_map_ident(map, ident, sizeof(ident)))
1207
continue;
1208
codegen("\
1209
\n\
1210
obj->struct_ops.%1$s = (__typeof__(obj->struct_ops.%1$s))\n\
1211
bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
1212
\n\
1213
", ident);
1214
}
1215
}
1216
1217
static int do_skeleton(int argc, char **argv)
1218
{
1219
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
1220
size_t map_cnt = 0, prog_cnt = 0, attach_map_cnt = 0, file_sz, mmap_sz;
1221
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1222
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1223
struct bpf_object *obj = NULL;
1224
const char *file;
1225
char ident[256];
1226
struct bpf_program *prog;
1227
int fd, err = -1;
1228
struct bpf_map *map;
1229
struct btf *btf;
1230
struct stat st;
1231
1232
if (!REQ_ARGS(1)) {
1233
usage();
1234
return -1;
1235
}
1236
file = GET_ARG();
1237
1238
while (argc) {
1239
if (!REQ_ARGS(2))
1240
return -1;
1241
1242
if (is_prefix(*argv, "name")) {
1243
NEXT_ARG();
1244
1245
if (obj_name[0] != '\0') {
1246
p_err("object name already specified");
1247
return -1;
1248
}
1249
1250
strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1251
obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1252
} else {
1253
p_err("unknown arg %s", *argv);
1254
return -1;
1255
}
1256
1257
NEXT_ARG();
1258
}
1259
1260
if (argc) {
1261
p_err("extra unknown arguments");
1262
return -1;
1263
}
1264
1265
if (stat(file, &st)) {
1266
p_err("failed to stat() %s: %s", file, strerror(errno));
1267
return -1;
1268
}
1269
file_sz = st.st_size;
1270
mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1271
fd = open(file, O_RDONLY);
1272
if (fd < 0) {
1273
p_err("failed to open() %s: %s", file, strerror(errno));
1274
return -1;
1275
}
1276
obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1277
if (obj_data == MAP_FAILED) {
1278
obj_data = NULL;
1279
p_err("failed to mmap() %s: %s", file, strerror(errno));
1280
goto out;
1281
}
1282
if (obj_name[0] == '\0')
1283
get_obj_name(obj_name, file);
1284
opts.object_name = obj_name;
1285
if (verifier_logs)
1286
/* log_level1 + log_level2 + stats, but not stable UAPI */
1287
opts.kernel_log_level = 1 + 2 + 4;
1288
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1289
if (!obj) {
1290
char err_buf[256];
1291
1292
err = -errno;
1293
libbpf_strerror(err, err_buf, sizeof(err_buf));
1294
p_err("failed to open BPF object file: %s", err_buf);
1295
goto out_obj;
1296
}
1297
1298
bpf_object__for_each_map(map, obj) {
1299
if (!get_map_ident(map, ident, sizeof(ident))) {
1300
p_err("ignoring unrecognized internal map '%s'...",
1301
bpf_map__name(map));
1302
continue;
1303
}
1304
1305
if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS)
1306
attach_map_cnt++;
1307
1308
map_cnt++;
1309
}
1310
bpf_object__for_each_program(prog, obj) {
1311
prog_cnt++;
1312
}
1313
1314
get_header_guard(header_guard, obj_name, "SKEL_H");
1315
if (use_loader) {
1316
codegen("\
1317
\n\
1318
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1319
/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1320
#ifndef %2$s \n\
1321
#define %2$s \n\
1322
\n\
1323
#include <bpf/skel_internal.h> \n\
1324
\n\
1325
struct %1$s { \n\
1326
struct bpf_loader_ctx ctx; \n\
1327
",
1328
obj_name, header_guard
1329
);
1330
} else {
1331
codegen("\
1332
\n\
1333
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1334
\n\
1335
/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1336
#ifndef %2$s \n\
1337
#define %2$s \n\
1338
\n\
1339
#include <errno.h> \n\
1340
#include <stdlib.h> \n\
1341
#include <bpf/libbpf.h> \n\
1342
\n\
1343
#define BPF_SKEL_SUPPORTS_MAP_AUTO_ATTACH 1 \n\
1344
\n\
1345
struct %1$s { \n\
1346
struct bpf_object_skeleton *skeleton; \n\
1347
struct bpf_object *obj; \n\
1348
",
1349
obj_name, header_guard
1350
);
1351
}
1352
1353
if (map_cnt) {
1354
printf("\tstruct {\n");
1355
bpf_object__for_each_map(map, obj) {
1356
if (!get_map_ident(map, ident, sizeof(ident)))
1357
continue;
1358
if (use_loader)
1359
printf("\t\tstruct bpf_map_desc %s;\n", ident);
1360
else
1361
printf("\t\tstruct bpf_map *%s;\n", ident);
1362
}
1363
printf("\t} maps;\n");
1364
}
1365
1366
btf = bpf_object__btf(obj);
1367
err = gen_st_ops_shadow(obj_name, btf, obj);
1368
if (err)
1369
goto out;
1370
1371
if (prog_cnt) {
1372
printf("\tstruct {\n");
1373
bpf_object__for_each_program(prog, obj) {
1374
if (use_loader)
1375
printf("\t\tstruct bpf_prog_desc %s;\n",
1376
bpf_program__name(prog));
1377
else
1378
printf("\t\tstruct bpf_program *%s;\n",
1379
bpf_program__name(prog));
1380
}
1381
printf("\t} progs;\n");
1382
}
1383
1384
if (prog_cnt + attach_map_cnt) {
1385
printf("\tstruct {\n");
1386
bpf_object__for_each_program(prog, obj) {
1387
if (use_loader)
1388
printf("\t\tint %s_fd;\n",
1389
bpf_program__name(prog));
1390
else
1391
printf("\t\tstruct bpf_link *%s;\n",
1392
bpf_program__name(prog));
1393
}
1394
1395
bpf_object__for_each_map(map, obj) {
1396
if (!get_map_ident(map, ident, sizeof(ident)))
1397
continue;
1398
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1399
continue;
1400
1401
if (use_loader)
1402
printf("t\tint %s_fd;\n", ident);
1403
else
1404
printf("\t\tstruct bpf_link *%s;\n", ident);
1405
}
1406
1407
printf("\t} links;\n");
1408
}
1409
1410
if (sign_progs) {
1411
codegen("\
1412
\n\
1413
__s32 keyring_id; \n\
1414
");
1415
}
1416
1417
if (btf) {
1418
err = codegen_datasecs(obj, obj_name);
1419
if (err)
1420
goto out;
1421
}
1422
if (use_loader) {
1423
err = gen_trace(obj, obj_name, header_guard);
1424
goto out;
1425
}
1426
1427
codegen("\
1428
\n\
1429
\n\
1430
#ifdef __cplusplus \n\
1431
static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
1432
static inline struct %1$s *open_and_load(); \n\
1433
static inline int load(struct %1$s *skel); \n\
1434
static inline int attach(struct %1$s *skel); \n\
1435
static inline void detach(struct %1$s *skel); \n\
1436
static inline void destroy(struct %1$s *skel); \n\
1437
static inline const void *elf_bytes(size_t *sz); \n\
1438
#endif /* __cplusplus */ \n\
1439
}; \n\
1440
\n\
1441
static void \n\
1442
%1$s__destroy(struct %1$s *obj) \n\
1443
{ \n\
1444
if (!obj) \n\
1445
return; \n\
1446
if (obj->skeleton) \n\
1447
bpf_object__destroy_skeleton(obj->skeleton);\n\
1448
free(obj); \n\
1449
} \n\
1450
\n\
1451
static inline int \n\
1452
%1$s__create_skeleton(struct %1$s *obj); \n\
1453
\n\
1454
static inline struct %1$s * \n\
1455
%1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
1456
{ \n\
1457
struct %1$s *obj; \n\
1458
int err; \n\
1459
\n\
1460
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1461
if (!obj) { \n\
1462
errno = ENOMEM; \n\
1463
return NULL; \n\
1464
} \n\
1465
\n\
1466
err = %1$s__create_skeleton(obj); \n\
1467
if (err) \n\
1468
goto err_out; \n\
1469
\n\
1470
err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
1471
if (err) \n\
1472
goto err_out; \n\
1473
\n\
1474
", obj_name);
1475
1476
gen_st_ops_shadow_init(btf, obj);
1477
1478
codegen("\
1479
\n\
1480
return obj; \n\
1481
err_out: \n\
1482
%1$s__destroy(obj); \n\
1483
errno = -err; \n\
1484
return NULL; \n\
1485
} \n\
1486
\n\
1487
static inline struct %1$s * \n\
1488
%1$s__open(void) \n\
1489
{ \n\
1490
return %1$s__open_opts(NULL); \n\
1491
} \n\
1492
\n\
1493
static inline int \n\
1494
%1$s__load(struct %1$s *obj) \n\
1495
{ \n\
1496
return bpf_object__load_skeleton(obj->skeleton); \n\
1497
} \n\
1498
\n\
1499
static inline struct %1$s * \n\
1500
%1$s__open_and_load(void) \n\
1501
{ \n\
1502
struct %1$s *obj; \n\
1503
int err; \n\
1504
\n\
1505
obj = %1$s__open(); \n\
1506
if (!obj) \n\
1507
return NULL; \n\
1508
err = %1$s__load(obj); \n\
1509
if (err) { \n\
1510
%1$s__destroy(obj); \n\
1511
errno = -err; \n\
1512
return NULL; \n\
1513
} \n\
1514
return obj; \n\
1515
} \n\
1516
\n\
1517
static inline int \n\
1518
%1$s__attach(struct %1$s *obj) \n\
1519
{ \n\
1520
return bpf_object__attach_skeleton(obj->skeleton); \n\
1521
} \n\
1522
\n\
1523
static inline void \n\
1524
%1$s__detach(struct %1$s *obj) \n\
1525
{ \n\
1526
bpf_object__detach_skeleton(obj->skeleton); \n\
1527
} \n\
1528
",
1529
obj_name
1530
);
1531
1532
codegen("\
1533
\n\
1534
\n\
1535
static inline const void *%1$s__elf_bytes(size_t *sz); \n\
1536
\n\
1537
static inline int \n\
1538
%1$s__create_skeleton(struct %1$s *obj) \n\
1539
{ \n\
1540
struct bpf_object_skeleton *s; \n\
1541
struct bpf_map_skeleton *map __attribute__((unused));\n\
1542
int err; \n\
1543
\n\
1544
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
1545
if (!s) { \n\
1546
err = -ENOMEM; \n\
1547
goto err; \n\
1548
} \n\
1549
\n\
1550
s->sz = sizeof(*s); \n\
1551
s->name = \"%1$s\"; \n\
1552
s->obj = &obj->obj; \n\
1553
",
1554
obj_name
1555
);
1556
1557
codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/, true /*links*/);
1558
codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
1559
1560
codegen("\
1561
\n\
1562
\n\
1563
s->data = %1$s__elf_bytes(&s->data_sz); \n\
1564
\n\
1565
obj->skeleton = s; \n\
1566
return 0; \n\
1567
err: \n\
1568
bpf_object__destroy_skeleton(s); \n\
1569
return err; \n\
1570
} \n\
1571
\n\
1572
static inline const void *%1$s__elf_bytes(size_t *sz) \n\
1573
{ \n\
1574
static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
1575
",
1576
obj_name
1577
);
1578
1579
/* embed contents of BPF object file */
1580
print_hex(obj_data, file_sz);
1581
1582
codegen("\
1583
\n\
1584
\"; \n\
1585
\n\
1586
*sz = sizeof(data) - 1; \n\
1587
return (const void *)data; \n\
1588
} \n\
1589
\n\
1590
#ifdef __cplusplus \n\
1591
struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1592
struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\
1593
int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\
1594
int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\
1595
void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\
1596
void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\
1597
const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1598
#endif /* __cplusplus */ \n\
1599
\n\
1600
",
1601
obj_name);
1602
1603
codegen_asserts(obj, obj_name);
1604
1605
codegen("\
1606
\n\
1607
\n\
1608
#endif /* %1$s */ \n\
1609
",
1610
header_guard);
1611
err = 0;
1612
out:
1613
bpf_object__close(obj);
1614
out_obj:
1615
if (obj_data)
1616
munmap(obj_data, mmap_sz);
1617
close(fd);
1618
return err;
1619
}
1620
1621
/* Subskeletons are like skeletons, except they don't own the bpf_object,
1622
* associated maps, links, etc. Instead, they know about the existence of
1623
* variables, maps, programs and are able to find their locations
1624
* _at runtime_ from an already loaded bpf_object.
1625
*
1626
* This allows for library-like BPF objects to have userspace counterparts
1627
* with access to their own items without having to know anything about the
1628
* final BPF object that the library was linked into.
1629
*/
1630
static int do_subskeleton(int argc, char **argv)
1631
{
1632
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
1633
size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
1634
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1635
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1636
struct bpf_object *obj = NULL;
1637
const char *file, *var_name;
1638
char ident[256];
1639
int fd, err = -1, map_type_id;
1640
const struct bpf_map *map;
1641
struct bpf_program *prog;
1642
struct btf *btf;
1643
const struct btf_type *map_type, *var_type;
1644
const struct btf_var_secinfo *var;
1645
struct stat st;
1646
1647
if (!REQ_ARGS(1)) {
1648
usage();
1649
return -1;
1650
}
1651
file = GET_ARG();
1652
1653
while (argc) {
1654
if (!REQ_ARGS(2))
1655
return -1;
1656
1657
if (is_prefix(*argv, "name")) {
1658
NEXT_ARG();
1659
1660
if (obj_name[0] != '\0') {
1661
p_err("object name already specified");
1662
return -1;
1663
}
1664
1665
strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1666
obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1667
} else {
1668
p_err("unknown arg %s", *argv);
1669
return -1;
1670
}
1671
1672
NEXT_ARG();
1673
}
1674
1675
if (argc) {
1676
p_err("extra unknown arguments");
1677
return -1;
1678
}
1679
1680
if (use_loader) {
1681
p_err("cannot use loader for subskeletons");
1682
return -1;
1683
}
1684
1685
if (stat(file, &st)) {
1686
p_err("failed to stat() %s: %s", file, strerror(errno));
1687
return -1;
1688
}
1689
file_sz = st.st_size;
1690
mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1691
fd = open(file, O_RDONLY);
1692
if (fd < 0) {
1693
p_err("failed to open() %s: %s", file, strerror(errno));
1694
return -1;
1695
}
1696
obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1697
if (obj_data == MAP_FAILED) {
1698
obj_data = NULL;
1699
p_err("failed to mmap() %s: %s", file, strerror(errno));
1700
goto out;
1701
}
1702
if (obj_name[0] == '\0')
1703
get_obj_name(obj_name, file);
1704
1705
/* The empty object name allows us to use bpf_map__name and produce
1706
* ELF section names out of it. (".data" instead of "obj.data")
1707
*/
1708
opts.object_name = "";
1709
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1710
if (!obj) {
1711
char err_buf[256];
1712
1713
libbpf_strerror(errno, err_buf, sizeof(err_buf));
1714
p_err("failed to open BPF object file: %s", err_buf);
1715
obj = NULL;
1716
goto out;
1717
}
1718
1719
btf = bpf_object__btf(obj);
1720
if (!btf) {
1721
err = -1;
1722
p_err("need btf type information for %s", obj_name);
1723
goto out;
1724
}
1725
1726
bpf_object__for_each_program(prog, obj) {
1727
prog_cnt++;
1728
}
1729
1730
/* First, count how many variables we have to find.
1731
* We need this in advance so the subskel can allocate the right
1732
* amount of storage.
1733
*/
1734
bpf_object__for_each_map(map, obj) {
1735
if (!get_map_ident(map, ident, sizeof(ident)))
1736
continue;
1737
1738
/* Also count all maps that have a name */
1739
map_cnt++;
1740
1741
if (!is_mmapable_map(map, ident, sizeof(ident)))
1742
continue;
1743
1744
map_type_id = bpf_map__btf_value_type_id(map);
1745
if (map_type_id <= 0) {
1746
err = map_type_id;
1747
goto out;
1748
}
1749
map_type = btf__type_by_id(btf, map_type_id);
1750
1751
var = btf_var_secinfos(map_type);
1752
len = btf_vlen(map_type);
1753
for (i = 0; i < len; i++, var++) {
1754
var_type = btf__type_by_id(btf, var->type);
1755
1756
if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1757
continue;
1758
1759
var_cnt++;
1760
}
1761
}
1762
1763
get_header_guard(header_guard, obj_name, "SUBSKEL_H");
1764
codegen("\
1765
\n\
1766
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1767
\n\
1768
/* THIS FILE IS AUTOGENERATED! */ \n\
1769
#ifndef %2$s \n\
1770
#define %2$s \n\
1771
\n\
1772
#include <errno.h> \n\
1773
#include <stdlib.h> \n\
1774
#include <bpf/libbpf.h> \n\
1775
\n\
1776
struct %1$s { \n\
1777
struct bpf_object *obj; \n\
1778
struct bpf_object_subskeleton *subskel; \n\
1779
", obj_name, header_guard);
1780
1781
if (map_cnt) {
1782
printf("\tstruct {\n");
1783
bpf_object__for_each_map(map, obj) {
1784
if (!get_map_ident(map, ident, sizeof(ident)))
1785
continue;
1786
printf("\t\tstruct bpf_map *%s;\n", ident);
1787
}
1788
printf("\t} maps;\n");
1789
}
1790
1791
err = gen_st_ops_shadow(obj_name, btf, obj);
1792
if (err)
1793
goto out;
1794
1795
if (prog_cnt) {
1796
printf("\tstruct {\n");
1797
bpf_object__for_each_program(prog, obj) {
1798
printf("\t\tstruct bpf_program *%s;\n",
1799
bpf_program__name(prog));
1800
}
1801
printf("\t} progs;\n");
1802
}
1803
1804
err = codegen_subskel_datasecs(obj, obj_name);
1805
if (err)
1806
goto out;
1807
1808
/* emit code that will allocate enough storage for all symbols */
1809
codegen("\
1810
\n\
1811
\n\
1812
#ifdef __cplusplus \n\
1813
static inline struct %1$s *open(const struct bpf_object *src);\n\
1814
static inline void destroy(struct %1$s *skel); \n\
1815
#endif /* __cplusplus */ \n\
1816
}; \n\
1817
\n\
1818
static inline void \n\
1819
%1$s__destroy(struct %1$s *skel) \n\
1820
{ \n\
1821
if (!skel) \n\
1822
return; \n\
1823
if (skel->subskel) \n\
1824
bpf_object__destroy_subskeleton(skel->subskel);\n\
1825
free(skel); \n\
1826
} \n\
1827
\n\
1828
static inline struct %1$s * \n\
1829
%1$s__open(const struct bpf_object *src) \n\
1830
{ \n\
1831
struct %1$s *obj; \n\
1832
struct bpf_object_subskeleton *s; \n\
1833
struct bpf_map_skeleton *map __attribute__((unused));\n\
1834
int err; \n\
1835
\n\
1836
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1837
if (!obj) { \n\
1838
err = -ENOMEM; \n\
1839
goto err; \n\
1840
} \n\
1841
s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
1842
if (!s) { \n\
1843
err = -ENOMEM; \n\
1844
goto err; \n\
1845
} \n\
1846
s->sz = sizeof(*s); \n\
1847
s->obj = src; \n\
1848
s->var_skel_sz = sizeof(*s->vars); \n\
1849
obj->subskel = s; \n\
1850
\n\
1851
/* vars */ \n\
1852
s->var_cnt = %2$d; \n\
1853
s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
1854
if (!s->vars) { \n\
1855
err = -ENOMEM; \n\
1856
goto err; \n\
1857
} \n\
1858
",
1859
obj_name, var_cnt
1860
);
1861
1862
/* walk through each symbol and emit the runtime representation */
1863
bpf_object__for_each_map(map, obj) {
1864
if (!is_mmapable_map(map, ident, sizeof(ident)))
1865
continue;
1866
1867
map_type_id = bpf_map__btf_value_type_id(map);
1868
if (map_type_id <= 0)
1869
/* skip over internal maps with no type*/
1870
continue;
1871
1872
map_type = btf__type_by_id(btf, map_type_id);
1873
var = btf_var_secinfos(map_type);
1874
len = btf_vlen(map_type);
1875
for (i = 0; i < len; i++, var++) {
1876
var_type = btf__type_by_id(btf, var->type);
1877
var_name = btf__name_by_offset(btf, var_type->name_off);
1878
1879
if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1880
continue;
1881
1882
/* Note that we use the dot prefix in .data as the
1883
* field access operator i.e. maps%s becomes maps.data
1884
*/
1885
codegen("\
1886
\n\
1887
\n\
1888
s->vars[%3$d].name = \"%1$s\"; \n\
1889
s->vars[%3$d].map = &obj->maps.%2$s; \n\
1890
s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
1891
", var_name, ident, var_idx);
1892
1893
var_idx++;
1894
}
1895
}
1896
1897
codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/, false /*links*/);
1898
codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
1899
1900
codegen("\
1901
\n\
1902
\n\
1903
err = bpf_object__open_subskeleton(s); \n\
1904
if (err) \n\
1905
goto err; \n\
1906
\n\
1907
");
1908
1909
gen_st_ops_shadow_init(btf, obj);
1910
1911
codegen("\
1912
\n\
1913
return obj; \n\
1914
err: \n\
1915
%1$s__destroy(obj); \n\
1916
errno = -err; \n\
1917
return NULL; \n\
1918
} \n\
1919
\n\
1920
#ifdef __cplusplus \n\
1921
struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
1922
void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
1923
#endif /* __cplusplus */ \n\
1924
\n\
1925
#endif /* %2$s */ \n\
1926
",
1927
obj_name, header_guard);
1928
err = 0;
1929
out:
1930
bpf_object__close(obj);
1931
if (obj_data)
1932
munmap(obj_data, mmap_sz);
1933
close(fd);
1934
return err;
1935
}
1936
1937
static int do_object(int argc, char **argv)
1938
{
1939
struct bpf_linker *linker;
1940
const char *output_file, *file;
1941
int err = 0;
1942
1943
if (!REQ_ARGS(2)) {
1944
usage();
1945
return -1;
1946
}
1947
1948
output_file = GET_ARG();
1949
1950
linker = bpf_linker__new(output_file, NULL);
1951
if (!linker) {
1952
p_err("failed to create BPF linker instance");
1953
return -1;
1954
}
1955
1956
while (argc) {
1957
file = GET_ARG();
1958
1959
err = bpf_linker__add_file(linker, file, NULL);
1960
if (err) {
1961
p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno);
1962
goto out;
1963
}
1964
}
1965
1966
err = bpf_linker__finalize(linker);
1967
if (err) {
1968
p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno);
1969
goto out;
1970
}
1971
1972
err = 0;
1973
out:
1974
bpf_linker__free(linker);
1975
return err;
1976
}
1977
1978
static int do_help(int argc, char **argv)
1979
{
1980
if (json_output) {
1981
jsonw_null(json_wtr);
1982
return 0;
1983
}
1984
1985
fprintf(stderr,
1986
"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1987
" %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1988
" %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
1989
" %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1990
" %1$s %2$s help\n"
1991
"\n"
1992
" " HELP_SPEC_OPTIONS " |\n"
1993
" {-L|--use-loader} | [ {-S|--sign } {-k} <private_key.pem> {-i} <certificate.x509> ]}\n"
1994
"",
1995
bin_name, "gen");
1996
1997
return 0;
1998
}
1999
2000
static int btf_save_raw(const struct btf *btf, const char *path)
2001
{
2002
const void *data;
2003
FILE *f = NULL;
2004
__u32 data_sz;
2005
int err = 0;
2006
2007
data = btf__raw_data(btf, &data_sz);
2008
if (!data)
2009
return -ENOMEM;
2010
2011
f = fopen(path, "wb");
2012
if (!f)
2013
return -errno;
2014
2015
if (fwrite(data, 1, data_sz, f) != data_sz)
2016
err = -errno;
2017
2018
fclose(f);
2019
return err;
2020
}
2021
2022
struct btfgen_info {
2023
struct btf *src_btf;
2024
struct btf *marked_btf; /* btf structure used to mark used types */
2025
};
2026
2027
static size_t btfgen_hash_fn(long key, void *ctx)
2028
{
2029
return key;
2030
}
2031
2032
static bool btfgen_equal_fn(long k1, long k2, void *ctx)
2033
{
2034
return k1 == k2;
2035
}
2036
2037
static void btfgen_free_info(struct btfgen_info *info)
2038
{
2039
if (!info)
2040
return;
2041
2042
btf__free(info->src_btf);
2043
btf__free(info->marked_btf);
2044
2045
free(info);
2046
}
2047
2048
static struct btfgen_info *
2049
btfgen_new_info(const char *targ_btf_path)
2050
{
2051
struct btfgen_info *info;
2052
int err;
2053
2054
info = calloc(1, sizeof(*info));
2055
if (!info)
2056
return NULL;
2057
2058
info->src_btf = btf__parse(targ_btf_path, NULL);
2059
if (!info->src_btf) {
2060
err = -errno;
2061
p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
2062
goto err_out;
2063
}
2064
2065
info->marked_btf = btf__parse(targ_btf_path, NULL);
2066
if (!info->marked_btf) {
2067
err = -errno;
2068
p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
2069
goto err_out;
2070
}
2071
2072
return info;
2073
2074
err_out:
2075
btfgen_free_info(info);
2076
errno = -err;
2077
return NULL;
2078
}
2079
2080
#define MARKED UINT32_MAX
2081
2082
static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
2083
{
2084
const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
2085
struct btf_member *m = btf_members(t) + idx;
2086
2087
m->name_off = MARKED;
2088
}
2089
2090
static int
2091
btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
2092
{
2093
const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
2094
struct btf_type *cloned_type;
2095
struct btf_param *param;
2096
struct btf_array *array;
2097
int err, i;
2098
2099
if (type_id == 0)
2100
return 0;
2101
2102
/* mark type on cloned BTF as used */
2103
cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
2104
cloned_type->name_off = MARKED;
2105
2106
/* recursively mark other types needed by it */
2107
switch (btf_kind(btf_type)) {
2108
case BTF_KIND_UNKN:
2109
case BTF_KIND_INT:
2110
case BTF_KIND_FLOAT:
2111
case BTF_KIND_ENUM:
2112
case BTF_KIND_ENUM64:
2113
case BTF_KIND_STRUCT:
2114
case BTF_KIND_UNION:
2115
break;
2116
case BTF_KIND_PTR:
2117
if (follow_pointers) {
2118
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2119
if (err)
2120
return err;
2121
}
2122
break;
2123
case BTF_KIND_CONST:
2124
case BTF_KIND_RESTRICT:
2125
case BTF_KIND_VOLATILE:
2126
case BTF_KIND_TYPEDEF:
2127
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2128
if (err)
2129
return err;
2130
break;
2131
case BTF_KIND_ARRAY:
2132
array = btf_array(btf_type);
2133
2134
/* mark array type */
2135
err = btfgen_mark_type(info, array->type, follow_pointers);
2136
/* mark array's index type */
2137
err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
2138
if (err)
2139
return err;
2140
break;
2141
case BTF_KIND_FUNC_PROTO:
2142
/* mark ret type */
2143
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2144
if (err)
2145
return err;
2146
2147
/* mark parameters types */
2148
param = btf_params(btf_type);
2149
for (i = 0; i < btf_vlen(btf_type); i++) {
2150
err = btfgen_mark_type(info, param->type, follow_pointers);
2151
if (err)
2152
return err;
2153
param++;
2154
}
2155
break;
2156
/* tells if some other type needs to be handled */
2157
default:
2158
p_err("unsupported kind: %s (%u)", btf_kind_str(btf_type), type_id);
2159
return -EINVAL;
2160
}
2161
2162
return 0;
2163
}
2164
2165
static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2166
{
2167
struct btf *btf = info->src_btf;
2168
const struct btf_type *btf_type;
2169
struct btf_member *btf_member;
2170
struct btf_array *array;
2171
unsigned int type_id = targ_spec->root_type_id;
2172
int idx, err;
2173
2174
/* mark root type */
2175
btf_type = btf__type_by_id(btf, type_id);
2176
err = btfgen_mark_type(info, type_id, false);
2177
if (err)
2178
return err;
2179
2180
/* mark types for complex types (arrays, unions, structures) */
2181
for (int i = 1; i < targ_spec->raw_len; i++) {
2182
/* skip typedefs and mods */
2183
while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
2184
type_id = btf_type->type;
2185
btf_type = btf__type_by_id(btf, type_id);
2186
}
2187
2188
switch (btf_kind(btf_type)) {
2189
case BTF_KIND_STRUCT:
2190
case BTF_KIND_UNION:
2191
idx = targ_spec->raw_spec[i];
2192
btf_member = btf_members(btf_type) + idx;
2193
2194
/* mark member */
2195
btfgen_mark_member(info, type_id, idx);
2196
2197
/* mark member's type */
2198
type_id = btf_member->type;
2199
btf_type = btf__type_by_id(btf, type_id);
2200
err = btfgen_mark_type(info, type_id, false);
2201
if (err)
2202
return err;
2203
break;
2204
case BTF_KIND_ARRAY:
2205
array = btf_array(btf_type);
2206
type_id = array->type;
2207
btf_type = btf__type_by_id(btf, type_id);
2208
break;
2209
default:
2210
p_err("unsupported kind: %s (%u)",
2211
btf_kind_str(btf_type), btf_type->type);
2212
return -EINVAL;
2213
}
2214
}
2215
2216
return 0;
2217
}
2218
2219
/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
2220
* this function does not rely on the target spec for inferring members, but
2221
* uses the associated BTF.
2222
*
2223
* The `behind_ptr` argument is used to stop marking of composite types reached
2224
* through a pointer. This way, we can keep BTF size in check while providing
2225
* reasonable match semantics.
2226
*/
2227
static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
2228
{
2229
const struct btf_type *btf_type;
2230
struct btf *btf = info->src_btf;
2231
struct btf_type *cloned_type;
2232
int i, err;
2233
2234
if (type_id == 0)
2235
return 0;
2236
2237
btf_type = btf__type_by_id(btf, type_id);
2238
/* mark type on cloned BTF as used */
2239
cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
2240
cloned_type->name_off = MARKED;
2241
2242
switch (btf_kind(btf_type)) {
2243
case BTF_KIND_UNKN:
2244
case BTF_KIND_INT:
2245
case BTF_KIND_FLOAT:
2246
case BTF_KIND_ENUM:
2247
case BTF_KIND_ENUM64:
2248
break;
2249
case BTF_KIND_STRUCT:
2250
case BTF_KIND_UNION: {
2251
struct btf_member *m = btf_members(btf_type);
2252
__u16 vlen = btf_vlen(btf_type);
2253
2254
if (behind_ptr)
2255
break;
2256
2257
for (i = 0; i < vlen; i++, m++) {
2258
/* mark member */
2259
btfgen_mark_member(info, type_id, i);
2260
2261
/* mark member's type */
2262
err = btfgen_mark_type_match(info, m->type, false);
2263
if (err)
2264
return err;
2265
}
2266
break;
2267
}
2268
case BTF_KIND_CONST:
2269
case BTF_KIND_FWD:
2270
case BTF_KIND_RESTRICT:
2271
case BTF_KIND_TYPEDEF:
2272
case BTF_KIND_VOLATILE:
2273
return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
2274
case BTF_KIND_PTR:
2275
return btfgen_mark_type_match(info, btf_type->type, true);
2276
case BTF_KIND_ARRAY: {
2277
struct btf_array *array;
2278
2279
array = btf_array(btf_type);
2280
/* mark array type */
2281
err = btfgen_mark_type_match(info, array->type, false);
2282
/* mark array's index type */
2283
err = err ? : btfgen_mark_type_match(info, array->index_type, false);
2284
if (err)
2285
return err;
2286
break;
2287
}
2288
case BTF_KIND_FUNC_PROTO: {
2289
__u16 vlen = btf_vlen(btf_type);
2290
struct btf_param *param;
2291
2292
/* mark ret type */
2293
err = btfgen_mark_type_match(info, btf_type->type, false);
2294
if (err)
2295
return err;
2296
2297
/* mark parameters types */
2298
param = btf_params(btf_type);
2299
for (i = 0; i < vlen; i++) {
2300
err = btfgen_mark_type_match(info, param->type, false);
2301
if (err)
2302
return err;
2303
param++;
2304
}
2305
break;
2306
}
2307
/* tells if some other type needs to be handled */
2308
default:
2309
p_err("unsupported kind: %s (%u)", btf_kind_str(btf_type), type_id);
2310
return -EINVAL;
2311
}
2312
2313
return 0;
2314
}
2315
2316
/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
2317
* this function does not rely on the target spec for inferring members, but
2318
* uses the associated BTF.
2319
*/
2320
static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2321
{
2322
return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
2323
}
2324
2325
static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2326
{
2327
return btfgen_mark_type(info, targ_spec->root_type_id, true);
2328
}
2329
2330
static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2331
{
2332
return btfgen_mark_type(info, targ_spec->root_type_id, false);
2333
}
2334
2335
static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
2336
{
2337
switch (res->relo_kind) {
2338
case BPF_CORE_FIELD_BYTE_OFFSET:
2339
case BPF_CORE_FIELD_BYTE_SIZE:
2340
case BPF_CORE_FIELD_EXISTS:
2341
case BPF_CORE_FIELD_SIGNED:
2342
case BPF_CORE_FIELD_LSHIFT_U64:
2343
case BPF_CORE_FIELD_RSHIFT_U64:
2344
return btfgen_record_field_relo(info, res);
2345
case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
2346
return 0;
2347
case BPF_CORE_TYPE_ID_TARGET:
2348
case BPF_CORE_TYPE_EXISTS:
2349
case BPF_CORE_TYPE_SIZE:
2350
return btfgen_record_type_relo(info, res);
2351
case BPF_CORE_TYPE_MATCHES:
2352
return btfgen_record_type_match_relo(info, res);
2353
case BPF_CORE_ENUMVAL_EXISTS:
2354
case BPF_CORE_ENUMVAL_VALUE:
2355
return btfgen_record_enumval_relo(info, res);
2356
default:
2357
return -EINVAL;
2358
}
2359
}
2360
2361
static struct bpf_core_cand_list *
2362
btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
2363
{
2364
const struct btf_type *local_type;
2365
struct bpf_core_cand_list *cands = NULL;
2366
struct bpf_core_cand local_cand = {};
2367
size_t local_essent_len;
2368
const char *local_name;
2369
int err;
2370
2371
local_cand.btf = local_btf;
2372
local_cand.id = local_id;
2373
2374
local_type = btf__type_by_id(local_btf, local_id);
2375
if (!local_type) {
2376
err = -EINVAL;
2377
goto err_out;
2378
}
2379
2380
local_name = btf__name_by_offset(local_btf, local_type->name_off);
2381
if (!local_name) {
2382
err = -EINVAL;
2383
goto err_out;
2384
}
2385
local_essent_len = bpf_core_essential_name_len(local_name);
2386
2387
cands = calloc(1, sizeof(*cands));
2388
if (!cands)
2389
return NULL;
2390
2391
err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
2392
if (err)
2393
goto err_out;
2394
2395
return cands;
2396
2397
err_out:
2398
bpf_core_free_cands(cands);
2399
errno = -err;
2400
return NULL;
2401
}
2402
2403
/* Record relocation information for a single BPF object */
2404
static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
2405
{
2406
const struct btf_ext_info_sec *sec;
2407
const struct bpf_core_relo *relo;
2408
const struct btf_ext_info *seg;
2409
struct hashmap_entry *entry;
2410
struct hashmap *cand_cache = NULL;
2411
struct btf_ext *btf_ext = NULL;
2412
unsigned int relo_idx;
2413
struct btf *btf = NULL;
2414
size_t i;
2415
int err;
2416
2417
btf = btf__parse(obj_path, &btf_ext);
2418
if (!btf) {
2419
err = -errno;
2420
p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
2421
return err;
2422
}
2423
2424
if (!btf_ext) {
2425
p_err("failed to parse BPF object '%s': section %s not found",
2426
obj_path, BTF_EXT_ELF_SEC);
2427
err = -EINVAL;
2428
goto out;
2429
}
2430
2431
if (btf_ext->core_relo_info.len == 0) {
2432
err = 0;
2433
goto out;
2434
}
2435
2436
cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
2437
if (IS_ERR(cand_cache)) {
2438
err = PTR_ERR(cand_cache);
2439
goto out;
2440
}
2441
2442
seg = &btf_ext->core_relo_info;
2443
for_each_btf_ext_sec(seg, sec) {
2444
for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
2445
struct bpf_core_spec specs_scratch[3] = {};
2446
struct bpf_core_relo_res targ_res = {};
2447
struct bpf_core_cand_list *cands = NULL;
2448
const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
2449
2450
if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
2451
!hashmap__find(cand_cache, relo->type_id, &cands)) {
2452
cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
2453
if (!cands) {
2454
err = -errno;
2455
goto out;
2456
}
2457
2458
err = hashmap__set(cand_cache, relo->type_id, cands,
2459
NULL, NULL);
2460
if (err)
2461
goto out;
2462
}
2463
2464
err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
2465
specs_scratch, &targ_res);
2466
if (err)
2467
goto out;
2468
2469
/* specs_scratch[2] is the target spec */
2470
err = btfgen_record_reloc(info, &specs_scratch[2]);
2471
if (err)
2472
goto out;
2473
}
2474
}
2475
2476
out:
2477
btf__free(btf);
2478
btf_ext__free(btf_ext);
2479
2480
if (!IS_ERR_OR_NULL(cand_cache)) {
2481
hashmap__for_each_entry(cand_cache, entry, i) {
2482
bpf_core_free_cands(entry->pvalue);
2483
}
2484
hashmap__free(cand_cache);
2485
}
2486
2487
return err;
2488
}
2489
2490
/* Generate BTF from relocation information previously recorded */
2491
static struct btf *btfgen_get_btf(struct btfgen_info *info)
2492
{
2493
struct btf *btf_new = NULL;
2494
unsigned int *ids = NULL;
2495
unsigned int i, n = btf__type_cnt(info->marked_btf);
2496
int err = 0;
2497
2498
btf_new = btf__new_empty();
2499
if (!btf_new) {
2500
err = -errno;
2501
goto err_out;
2502
}
2503
2504
ids = calloc(n, sizeof(*ids));
2505
if (!ids) {
2506
err = -errno;
2507
goto err_out;
2508
}
2509
2510
/* first pass: add all marked types to btf_new and add their new ids to the ids map */
2511
for (i = 1; i < n; i++) {
2512
const struct btf_type *cloned_type, *type;
2513
const char *name;
2514
int new_id;
2515
2516
cloned_type = btf__type_by_id(info->marked_btf, i);
2517
2518
if (cloned_type->name_off != MARKED)
2519
continue;
2520
2521
type = btf__type_by_id(info->src_btf, i);
2522
2523
/* add members for struct and union */
2524
if (btf_is_composite(type)) {
2525
struct btf_member *cloned_m, *m;
2526
unsigned short vlen;
2527
int idx_src;
2528
2529
name = btf__str_by_offset(info->src_btf, type->name_off);
2530
2531
if (btf_is_struct(type))
2532
err = btf__add_struct(btf_new, name, type->size);
2533
else
2534
err = btf__add_union(btf_new, name, type->size);
2535
2536
if (err < 0)
2537
goto err_out;
2538
new_id = err;
2539
2540
cloned_m = btf_members(cloned_type);
2541
m = btf_members(type);
2542
vlen = btf_vlen(cloned_type);
2543
for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
2544
/* add only members that are marked as used */
2545
if (cloned_m->name_off != MARKED)
2546
continue;
2547
2548
name = btf__str_by_offset(info->src_btf, m->name_off);
2549
err = btf__add_field(btf_new, name, m->type,
2550
btf_member_bit_offset(cloned_type, idx_src),
2551
btf_member_bitfield_size(cloned_type, idx_src));
2552
if (err < 0)
2553
goto err_out;
2554
}
2555
} else {
2556
err = btf__add_type(btf_new, info->src_btf, type);
2557
if (err < 0)
2558
goto err_out;
2559
new_id = err;
2560
}
2561
2562
/* add ID mapping */
2563
ids[i] = new_id;
2564
}
2565
2566
/* second pass: fix up type ids */
2567
for (i = 1; i < btf__type_cnt(btf_new); i++) {
2568
struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
2569
struct btf_field_iter it;
2570
__u32 *type_id;
2571
2572
err = btf_field_iter_init(&it, btf_type, BTF_FIELD_ITER_IDS);
2573
if (err)
2574
goto err_out;
2575
2576
while ((type_id = btf_field_iter_next(&it)))
2577
*type_id = ids[*type_id];
2578
}
2579
2580
free(ids);
2581
return btf_new;
2582
2583
err_out:
2584
btf__free(btf_new);
2585
free(ids);
2586
errno = -err;
2587
return NULL;
2588
}
2589
2590
/* Create minimized BTF file for a set of BPF objects.
2591
*
2592
* The BTFGen algorithm is divided in two main parts: (1) collect the
2593
* BTF types that are involved in relocations and (2) generate the BTF
2594
* object using the collected types.
2595
*
2596
* In order to collect the types involved in the relocations, we parse
2597
* the BTF and BTF.ext sections of the BPF objects and use
2598
* bpf_core_calc_relo_insn() to get the target specification, this
2599
* indicates how the types and fields are used in a relocation.
2600
*
2601
* Types are recorded in different ways according to the kind of the
2602
* relocation. For field-based relocations only the members that are
2603
* actually used are saved in order to reduce the size of the generated
2604
* BTF file. For type-based relocations empty struct / unions are
2605
* generated and for enum-based relocations the whole type is saved.
2606
*
2607
* The second part of the algorithm generates the BTF object. It creates
2608
* an empty BTF object and fills it with the types recorded in the
2609
* previous step. This function takes care of only adding the structure
2610
* and union members that were marked as used and it also fixes up the
2611
* type IDs on the generated BTF object.
2612
*/
2613
static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
2614
{
2615
struct btfgen_info *info;
2616
struct btf *btf_new = NULL;
2617
int err, i;
2618
2619
info = btfgen_new_info(src_btf);
2620
if (!info) {
2621
err = -errno;
2622
p_err("failed to allocate info structure: %s", strerror(errno));
2623
goto out;
2624
}
2625
2626
for (i = 0; objspaths[i] != NULL; i++) {
2627
err = btfgen_record_obj(info, objspaths[i]);
2628
if (err) {
2629
p_err("error recording relocations for %s: %s", objspaths[i],
2630
strerror(errno));
2631
goto out;
2632
}
2633
}
2634
2635
btf_new = btfgen_get_btf(info);
2636
if (!btf_new) {
2637
err = -errno;
2638
p_err("error generating BTF: %s", strerror(errno));
2639
goto out;
2640
}
2641
2642
err = btf_save_raw(btf_new, dst_btf);
2643
if (err) {
2644
p_err("error saving btf file: %s", strerror(errno));
2645
goto out;
2646
}
2647
2648
out:
2649
btf__free(btf_new);
2650
btfgen_free_info(info);
2651
2652
return err;
2653
}
2654
2655
static int do_min_core_btf(int argc, char **argv)
2656
{
2657
const char *input, *output, **objs;
2658
int i, err;
2659
2660
if (!REQ_ARGS(3)) {
2661
usage();
2662
return -1;
2663
}
2664
2665
input = GET_ARG();
2666
output = GET_ARG();
2667
2668
objs = (const char **) calloc(argc + 1, sizeof(*objs));
2669
if (!objs) {
2670
p_err("failed to allocate array for object names");
2671
return -ENOMEM;
2672
}
2673
2674
i = 0;
2675
while (argc)
2676
objs[i++] = GET_ARG();
2677
2678
err = minimize_btf(input, output, objs);
2679
free(objs);
2680
return err;
2681
}
2682
2683
static const struct cmd cmds[] = {
2684
{ "object", do_object },
2685
{ "skeleton", do_skeleton },
2686
{ "subskeleton", do_subskeleton },
2687
{ "min_core_btf", do_min_core_btf},
2688
{ "help", do_help },
2689
{ 0 }
2690
};
2691
2692
int do_gen(int argc, char **argv)
2693
{
2694
return cmd_select(cmds, argc, argv, do_help);
2695
}
2696
2697