Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/bpf/offload.c
29267 views
1
/*
2
* Copyright (C) 2017-2018 Netronome Systems, Inc.
3
*
4
* This software is licensed under the GNU General License Version 2,
5
* June 1991 as shown in the file COPYING in the top-level directory of this
6
* source tree.
7
*
8
* THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9
* WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11
* FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12
* OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14
*/
15
16
#include <linux/bpf.h>
17
#include <linux/bpf_verifier.h>
18
#include <linux/bug.h>
19
#include <linux/kdev_t.h>
20
#include <linux/list.h>
21
#include <linux/lockdep.h>
22
#include <linux/netdevice.h>
23
#include <linux/printk.h>
24
#include <linux/proc_ns.h>
25
#include <linux/rhashtable.h>
26
#include <linux/rtnetlink.h>
27
#include <linux/rwsem.h>
28
#include <net/netdev_lock.h>
29
#include <net/xdp.h>
30
31
/* Protects offdevs, members of bpf_offload_netdev and offload members
32
* of all progs.
33
* RTNL lock cannot be taken when holding this lock.
34
*/
35
static DECLARE_RWSEM(bpf_devs_lock);
36
37
struct bpf_offload_dev {
38
const struct bpf_prog_offload_ops *ops;
39
struct list_head netdevs;
40
void *priv;
41
};
42
43
struct bpf_offload_netdev {
44
struct rhash_head l;
45
struct net_device *netdev;
46
struct bpf_offload_dev *offdev; /* NULL when bound-only */
47
struct list_head progs;
48
struct list_head maps;
49
struct list_head offdev_netdevs;
50
};
51
52
static const struct rhashtable_params offdevs_params = {
53
.nelem_hint = 4,
54
.key_len = sizeof(struct net_device *),
55
.key_offset = offsetof(struct bpf_offload_netdev, netdev),
56
.head_offset = offsetof(struct bpf_offload_netdev, l),
57
.automatic_shrinking = true,
58
};
59
60
static struct rhashtable offdevs;
61
62
static int bpf_dev_offload_check(struct net_device *netdev)
63
{
64
if (!netdev)
65
return -EINVAL;
66
if (!netdev->netdev_ops->ndo_bpf)
67
return -EOPNOTSUPP;
68
return 0;
69
}
70
71
static struct bpf_offload_netdev *
72
bpf_offload_find_netdev(struct net_device *netdev)
73
{
74
lockdep_assert_held(&bpf_devs_lock);
75
76
return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
77
}
78
79
static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
80
struct net_device *netdev)
81
{
82
struct bpf_offload_netdev *ondev;
83
int err;
84
85
ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
86
if (!ondev)
87
return -ENOMEM;
88
89
ondev->netdev = netdev;
90
ondev->offdev = offdev;
91
INIT_LIST_HEAD(&ondev->progs);
92
INIT_LIST_HEAD(&ondev->maps);
93
94
err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
95
if (err) {
96
netdev_warn(netdev, "failed to register for BPF offload\n");
97
goto err_free;
98
}
99
100
if (offdev)
101
list_add(&ondev->offdev_netdevs, &offdev->netdevs);
102
return 0;
103
104
err_free:
105
kfree(ondev);
106
return err;
107
}
108
109
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
110
{
111
struct bpf_prog_offload *offload = prog->aux->offload;
112
113
if (offload->dev_state)
114
offload->offdev->ops->destroy(prog);
115
116
list_del_init(&offload->offloads);
117
kfree(offload);
118
prog->aux->offload = NULL;
119
}
120
121
static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
122
enum bpf_netdev_command cmd)
123
{
124
struct netdev_bpf data = {};
125
struct net_device *netdev;
126
127
ASSERT_RTNL();
128
129
data.command = cmd;
130
data.offmap = offmap;
131
/* Caller must make sure netdev is valid */
132
netdev = offmap->netdev;
133
134
return netdev->netdev_ops->ndo_bpf(netdev, &data);
135
}
136
137
static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
138
{
139
WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
140
/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
141
bpf_map_free_id(&offmap->map);
142
list_del_init(&offmap->offloads);
143
offmap->netdev = NULL;
144
}
145
146
static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
147
struct net_device *netdev)
148
{
149
struct bpf_offload_netdev *ondev, *altdev = NULL;
150
struct bpf_offloaded_map *offmap, *mtmp;
151
struct bpf_prog_offload *offload, *ptmp;
152
153
ASSERT_RTNL();
154
155
ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
156
if (WARN_ON(!ondev))
157
return;
158
159
WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
160
161
/* Try to move the objects to another netdev of the device */
162
if (offdev) {
163
list_del(&ondev->offdev_netdevs);
164
altdev = list_first_entry_or_null(&offdev->netdevs,
165
struct bpf_offload_netdev,
166
offdev_netdevs);
167
}
168
169
if (altdev) {
170
list_for_each_entry(offload, &ondev->progs, offloads)
171
offload->netdev = altdev->netdev;
172
list_splice_init(&ondev->progs, &altdev->progs);
173
174
list_for_each_entry(offmap, &ondev->maps, offloads)
175
offmap->netdev = altdev->netdev;
176
list_splice_init(&ondev->maps, &altdev->maps);
177
} else {
178
list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
179
__bpf_prog_offload_destroy(offload->prog);
180
list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
181
__bpf_map_offload_destroy(offmap);
182
}
183
184
WARN_ON(!list_empty(&ondev->progs));
185
WARN_ON(!list_empty(&ondev->maps));
186
kfree(ondev);
187
}
188
189
static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev)
190
{
191
struct bpf_offload_netdev *ondev;
192
struct bpf_prog_offload *offload;
193
int err;
194
195
offload = kzalloc(sizeof(*offload), GFP_USER);
196
if (!offload)
197
return -ENOMEM;
198
199
offload->prog = prog;
200
offload->netdev = netdev;
201
202
ondev = bpf_offload_find_netdev(offload->netdev);
203
/* When program is offloaded require presence of "true"
204
* bpf_offload_netdev, avoid the one created for !ondev case below.
205
*/
206
if (bpf_prog_is_offloaded(prog->aux) && (!ondev || !ondev->offdev)) {
207
err = -EINVAL;
208
goto err_free;
209
}
210
if (!ondev) {
211
/* When only binding to the device, explicitly
212
* create an entry in the hashtable.
213
*/
214
err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
215
if (err)
216
goto err_free;
217
ondev = bpf_offload_find_netdev(offload->netdev);
218
}
219
offload->offdev = ondev->offdev;
220
prog->aux->offload = offload;
221
list_add_tail(&offload->offloads, &ondev->progs);
222
223
return 0;
224
err_free:
225
kfree(offload);
226
return err;
227
}
228
229
int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
230
{
231
struct net_device *netdev;
232
int err;
233
234
if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
235
attr->prog_type != BPF_PROG_TYPE_XDP)
236
return -EINVAL;
237
238
if (attr->prog_flags & ~(BPF_F_XDP_DEV_BOUND_ONLY | BPF_F_XDP_HAS_FRAGS))
239
return -EINVAL;
240
241
/* Frags are allowed only if program is dev-bound-only, but not
242
* if it is requesting bpf offload.
243
*/
244
if (attr->prog_flags & BPF_F_XDP_HAS_FRAGS &&
245
!(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY))
246
return -EINVAL;
247
248
if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
249
attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
250
return -EINVAL;
251
252
netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex);
253
if (!netdev)
254
return -EINVAL;
255
256
err = bpf_dev_offload_check(netdev);
257
if (err)
258
goto out;
259
260
prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
261
262
down_write(&bpf_devs_lock);
263
err = __bpf_prog_dev_bound_init(prog, netdev);
264
up_write(&bpf_devs_lock);
265
266
out:
267
dev_put(netdev);
268
return err;
269
}
270
271
int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog)
272
{
273
int err;
274
275
if (!bpf_prog_is_dev_bound(old_prog->aux))
276
return 0;
277
278
if (bpf_prog_is_offloaded(old_prog->aux))
279
return -EINVAL;
280
281
new_prog->aux->dev_bound = old_prog->aux->dev_bound;
282
new_prog->aux->offload_requested = old_prog->aux->offload_requested;
283
284
down_write(&bpf_devs_lock);
285
if (!old_prog->aux->offload) {
286
err = -EINVAL;
287
goto out;
288
}
289
290
err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev);
291
292
out:
293
up_write(&bpf_devs_lock);
294
return err;
295
}
296
297
int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
298
{
299
struct bpf_prog_offload *offload;
300
int ret = -ENODEV;
301
302
down_read(&bpf_devs_lock);
303
offload = prog->aux->offload;
304
if (offload) {
305
ret = offload->offdev->ops->prepare(prog);
306
offload->dev_state = !ret;
307
}
308
up_read(&bpf_devs_lock);
309
310
return ret;
311
}
312
313
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
314
int insn_idx, int prev_insn_idx)
315
{
316
struct bpf_prog_offload *offload;
317
int ret = -ENODEV;
318
319
down_read(&bpf_devs_lock);
320
offload = env->prog->aux->offload;
321
if (offload)
322
ret = offload->offdev->ops->insn_hook(env, insn_idx,
323
prev_insn_idx);
324
up_read(&bpf_devs_lock);
325
326
return ret;
327
}
328
329
int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
330
{
331
struct bpf_prog_offload *offload;
332
int ret = -ENODEV;
333
334
down_read(&bpf_devs_lock);
335
offload = env->prog->aux->offload;
336
if (offload) {
337
if (offload->offdev->ops->finalize)
338
ret = offload->offdev->ops->finalize(env);
339
else
340
ret = 0;
341
}
342
up_read(&bpf_devs_lock);
343
344
return ret;
345
}
346
347
void
348
bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
349
struct bpf_insn *insn)
350
{
351
const struct bpf_prog_offload_ops *ops;
352
struct bpf_prog_offload *offload;
353
int ret = -EOPNOTSUPP;
354
355
down_read(&bpf_devs_lock);
356
offload = env->prog->aux->offload;
357
if (offload) {
358
ops = offload->offdev->ops;
359
if (!offload->opt_failed && ops->replace_insn)
360
ret = ops->replace_insn(env, off, insn);
361
offload->opt_failed |= ret;
362
}
363
up_read(&bpf_devs_lock);
364
}
365
366
void
367
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
368
{
369
struct bpf_prog_offload *offload;
370
int ret = -EOPNOTSUPP;
371
372
down_read(&bpf_devs_lock);
373
offload = env->prog->aux->offload;
374
if (offload) {
375
if (!offload->opt_failed && offload->offdev->ops->remove_insns)
376
ret = offload->offdev->ops->remove_insns(env, off, cnt);
377
offload->opt_failed |= ret;
378
}
379
up_read(&bpf_devs_lock);
380
}
381
382
void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
383
{
384
struct bpf_offload_netdev *ondev;
385
struct net_device *netdev;
386
387
rtnl_lock();
388
down_write(&bpf_devs_lock);
389
if (prog->aux->offload) {
390
list_del_init(&prog->aux->offload->offloads);
391
392
netdev = prog->aux->offload->netdev;
393
__bpf_prog_offload_destroy(prog);
394
395
ondev = bpf_offload_find_netdev(netdev);
396
if (!ondev->offdev && list_empty(&ondev->progs))
397
__bpf_offload_dev_netdev_unregister(NULL, netdev);
398
}
399
up_write(&bpf_devs_lock);
400
rtnl_unlock();
401
}
402
403
static int bpf_prog_offload_translate(struct bpf_prog *prog)
404
{
405
struct bpf_prog_offload *offload;
406
int ret = -ENODEV;
407
408
down_read(&bpf_devs_lock);
409
offload = prog->aux->offload;
410
if (offload)
411
ret = offload->offdev->ops->translate(prog);
412
up_read(&bpf_devs_lock);
413
414
return ret;
415
}
416
417
static unsigned int bpf_prog_warn_on_exec(const void *ctx,
418
const struct bpf_insn *insn)
419
{
420
WARN(1, "attempt to execute device eBPF program on the host!");
421
return 0;
422
}
423
424
int bpf_prog_offload_compile(struct bpf_prog *prog)
425
{
426
prog->bpf_func = bpf_prog_warn_on_exec;
427
428
return bpf_prog_offload_translate(prog);
429
}
430
431
struct ns_get_path_bpf_prog_args {
432
struct bpf_prog *prog;
433
struct bpf_prog_info *info;
434
};
435
436
static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
437
{
438
struct ns_get_path_bpf_prog_args *args = private_data;
439
struct bpf_prog_aux *aux = args->prog->aux;
440
struct ns_common *ns;
441
struct net *net;
442
443
rtnl_lock();
444
down_read(&bpf_devs_lock);
445
446
if (aux->offload) {
447
args->info->ifindex = aux->offload->netdev->ifindex;
448
net = dev_net(aux->offload->netdev);
449
get_net(net);
450
ns = &net->ns;
451
} else {
452
args->info->ifindex = 0;
453
ns = NULL;
454
}
455
456
up_read(&bpf_devs_lock);
457
rtnl_unlock();
458
459
return ns;
460
}
461
462
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
463
struct bpf_prog *prog)
464
{
465
struct ns_get_path_bpf_prog_args args = {
466
.prog = prog,
467
.info = info,
468
};
469
struct bpf_prog_aux *aux = prog->aux;
470
struct inode *ns_inode;
471
struct path ns_path;
472
char __user *uinsns;
473
int res;
474
u32 ulen;
475
476
res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
477
if (res) {
478
if (!info->ifindex)
479
return -ENODEV;
480
return res;
481
}
482
483
down_read(&bpf_devs_lock);
484
485
if (!aux->offload) {
486
up_read(&bpf_devs_lock);
487
return -ENODEV;
488
}
489
490
ulen = info->jited_prog_len;
491
info->jited_prog_len = aux->offload->jited_len;
492
if (info->jited_prog_len && ulen) {
493
uinsns = u64_to_user_ptr(info->jited_prog_insns);
494
ulen = min_t(u32, info->jited_prog_len, ulen);
495
if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
496
up_read(&bpf_devs_lock);
497
return -EFAULT;
498
}
499
}
500
501
up_read(&bpf_devs_lock);
502
503
ns_inode = ns_path.dentry->d_inode;
504
info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
505
info->netns_ino = ns_inode->i_ino;
506
path_put(&ns_path);
507
508
return 0;
509
}
510
511
const struct bpf_prog_ops bpf_offload_prog_ops = {
512
};
513
514
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
515
{
516
struct net *net = current->nsproxy->net_ns;
517
struct bpf_offload_netdev *ondev;
518
struct bpf_offloaded_map *offmap;
519
int err;
520
521
if (!capable(CAP_SYS_ADMIN))
522
return ERR_PTR(-EPERM);
523
if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
524
attr->map_type != BPF_MAP_TYPE_HASH)
525
return ERR_PTR(-EINVAL);
526
527
offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE);
528
if (!offmap)
529
return ERR_PTR(-ENOMEM);
530
531
bpf_map_init_from_attr(&offmap->map, attr);
532
rtnl_lock();
533
offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
534
err = bpf_dev_offload_check(offmap->netdev);
535
if (err)
536
goto err_unlock_rtnl;
537
538
netdev_lock_ops(offmap->netdev);
539
down_write(&bpf_devs_lock);
540
541
ondev = bpf_offload_find_netdev(offmap->netdev);
542
if (!ondev) {
543
err = -EINVAL;
544
goto err_unlock;
545
}
546
547
err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
548
if (err)
549
goto err_unlock;
550
551
list_add_tail(&offmap->offloads, &ondev->maps);
552
up_write(&bpf_devs_lock);
553
netdev_unlock_ops(offmap->netdev);
554
rtnl_unlock();
555
556
return &offmap->map;
557
558
err_unlock:
559
up_write(&bpf_devs_lock);
560
netdev_unlock_ops(offmap->netdev);
561
err_unlock_rtnl:
562
rtnl_unlock();
563
bpf_map_area_free(offmap);
564
return ERR_PTR(err);
565
}
566
567
void bpf_map_offload_map_free(struct bpf_map *map)
568
{
569
struct bpf_offloaded_map *offmap = map_to_offmap(map);
570
571
rtnl_lock();
572
down_write(&bpf_devs_lock);
573
if (offmap->netdev)
574
__bpf_map_offload_destroy(offmap);
575
up_write(&bpf_devs_lock);
576
rtnl_unlock();
577
578
bpf_map_area_free(offmap);
579
}
580
581
u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
582
{
583
/* The memory dynamically allocated in netdev dev_ops is not counted */
584
return sizeof(struct bpf_offloaded_map);
585
}
586
587
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
588
{
589
struct bpf_offloaded_map *offmap = map_to_offmap(map);
590
int ret = -ENODEV;
591
592
down_read(&bpf_devs_lock);
593
if (offmap->netdev)
594
ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
595
up_read(&bpf_devs_lock);
596
597
return ret;
598
}
599
600
int bpf_map_offload_update_elem(struct bpf_map *map,
601
void *key, void *value, u64 flags)
602
{
603
struct bpf_offloaded_map *offmap = map_to_offmap(map);
604
int ret = -ENODEV;
605
606
if (unlikely(flags > BPF_EXIST))
607
return -EINVAL;
608
609
down_read(&bpf_devs_lock);
610
if (offmap->netdev)
611
ret = offmap->dev_ops->map_update_elem(offmap, key, value,
612
flags);
613
up_read(&bpf_devs_lock);
614
615
return ret;
616
}
617
618
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
619
{
620
struct bpf_offloaded_map *offmap = map_to_offmap(map);
621
int ret = -ENODEV;
622
623
down_read(&bpf_devs_lock);
624
if (offmap->netdev)
625
ret = offmap->dev_ops->map_delete_elem(offmap, key);
626
up_read(&bpf_devs_lock);
627
628
return ret;
629
}
630
631
int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
632
{
633
struct bpf_offloaded_map *offmap = map_to_offmap(map);
634
int ret = -ENODEV;
635
636
down_read(&bpf_devs_lock);
637
if (offmap->netdev)
638
ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
639
up_read(&bpf_devs_lock);
640
641
return ret;
642
}
643
644
struct ns_get_path_bpf_map_args {
645
struct bpf_offloaded_map *offmap;
646
struct bpf_map_info *info;
647
};
648
649
static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
650
{
651
struct ns_get_path_bpf_map_args *args = private_data;
652
struct ns_common *ns;
653
struct net *net;
654
655
rtnl_lock();
656
down_read(&bpf_devs_lock);
657
658
if (args->offmap->netdev) {
659
args->info->ifindex = args->offmap->netdev->ifindex;
660
net = dev_net(args->offmap->netdev);
661
get_net(net);
662
ns = &net->ns;
663
} else {
664
args->info->ifindex = 0;
665
ns = NULL;
666
}
667
668
up_read(&bpf_devs_lock);
669
rtnl_unlock();
670
671
return ns;
672
}
673
674
int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
675
{
676
struct ns_get_path_bpf_map_args args = {
677
.offmap = map_to_offmap(map),
678
.info = info,
679
};
680
struct inode *ns_inode;
681
struct path ns_path;
682
int res;
683
684
res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
685
if (res) {
686
if (!info->ifindex)
687
return -ENODEV;
688
return res;
689
}
690
691
ns_inode = ns_path.dentry->d_inode;
692
info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
693
info->netns_ino = ns_inode->i_ino;
694
path_put(&ns_path);
695
696
return 0;
697
}
698
699
static bool __bpf_offload_dev_match(struct bpf_prog *prog,
700
struct net_device *netdev)
701
{
702
struct bpf_offload_netdev *ondev1, *ondev2;
703
struct bpf_prog_offload *offload;
704
705
if (!bpf_prog_is_dev_bound(prog->aux))
706
return false;
707
708
offload = prog->aux->offload;
709
if (!offload)
710
return false;
711
if (offload->netdev == netdev)
712
return true;
713
714
ondev1 = bpf_offload_find_netdev(offload->netdev);
715
ondev2 = bpf_offload_find_netdev(netdev);
716
717
return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
718
}
719
720
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
721
{
722
bool ret;
723
724
down_read(&bpf_devs_lock);
725
ret = __bpf_offload_dev_match(prog, netdev);
726
up_read(&bpf_devs_lock);
727
728
return ret;
729
}
730
EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
731
732
bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
733
{
734
bool ret;
735
736
if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux))
737
return false;
738
739
down_read(&bpf_devs_lock);
740
ret = lhs->aux->offload && rhs->aux->offload &&
741
lhs->aux->offload->netdev &&
742
lhs->aux->offload->netdev == rhs->aux->offload->netdev;
743
up_read(&bpf_devs_lock);
744
745
return ret;
746
}
747
748
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
749
{
750
struct bpf_offloaded_map *offmap;
751
bool ret;
752
753
if (!bpf_map_is_offloaded(map))
754
return bpf_map_offload_neutral(map);
755
offmap = map_to_offmap(map);
756
757
down_read(&bpf_devs_lock);
758
ret = __bpf_offload_dev_match(prog, offmap->netdev);
759
up_read(&bpf_devs_lock);
760
761
return ret;
762
}
763
764
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
765
struct net_device *netdev)
766
{
767
int err;
768
769
down_write(&bpf_devs_lock);
770
err = __bpf_offload_dev_netdev_register(offdev, netdev);
771
up_write(&bpf_devs_lock);
772
return err;
773
}
774
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
775
776
void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
777
struct net_device *netdev)
778
{
779
down_write(&bpf_devs_lock);
780
__bpf_offload_dev_netdev_unregister(offdev, netdev);
781
up_write(&bpf_devs_lock);
782
}
783
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
784
785
struct bpf_offload_dev *
786
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
787
{
788
struct bpf_offload_dev *offdev;
789
790
offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
791
if (!offdev)
792
return ERR_PTR(-ENOMEM);
793
794
offdev->ops = ops;
795
offdev->priv = priv;
796
INIT_LIST_HEAD(&offdev->netdevs);
797
798
return offdev;
799
}
800
EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
801
802
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
803
{
804
WARN_ON(!list_empty(&offdev->netdevs));
805
kfree(offdev);
806
}
807
EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
808
809
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
810
{
811
return offdev->priv;
812
}
813
EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
814
815
void bpf_dev_bound_netdev_unregister(struct net_device *dev)
816
{
817
struct bpf_offload_netdev *ondev;
818
819
ASSERT_RTNL();
820
821
down_write(&bpf_devs_lock);
822
ondev = bpf_offload_find_netdev(dev);
823
if (ondev && !ondev->offdev)
824
__bpf_offload_dev_netdev_unregister(NULL, ondev->netdev);
825
up_write(&bpf_devs_lock);
826
}
827
828
int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
829
struct bpf_prog_aux *prog_aux)
830
{
831
if (!bpf_prog_is_dev_bound(prog_aux)) {
832
bpf_log(log, "metadata kfuncs require device-bound program\n");
833
return -EINVAL;
834
}
835
836
if (bpf_prog_is_offloaded(prog_aux)) {
837
bpf_log(log, "metadata kfuncs can't be offloaded\n");
838
return -EINVAL;
839
}
840
841
return 0;
842
}
843
844
void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
845
{
846
const struct xdp_metadata_ops *ops;
847
void *p = NULL;
848
849
/* We don't hold bpf_devs_lock while resolving several
850
* kfuncs and can race with the unregister_netdevice().
851
* We rely on bpf_dev_bound_match() check at attach
852
* to render this program unusable.
853
*/
854
down_read(&bpf_devs_lock);
855
if (!prog->aux->offload)
856
goto out;
857
858
ops = prog->aux->offload->netdev->xdp_metadata_ops;
859
if (!ops)
860
goto out;
861
862
#define XDP_METADATA_KFUNC(name, _, __, xmo) \
863
if (func_id == bpf_xdp_metadata_kfunc_id(name)) p = ops->xmo;
864
XDP_METADATA_KFUNC_xxx
865
#undef XDP_METADATA_KFUNC
866
867
out:
868
up_read(&bpf_devs_lock);
869
870
return p;
871
}
872
873
static int __init bpf_offload_init(void)
874
{
875
return rhashtable_init(&offdevs, &offdevs_params);
876
}
877
878
core_initcall(bpf_offload_init);
879
880