Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/bpf/net_namespace.c
29280 views
1
// SPDX-License-Identifier: GPL-2.0
2
3
#include <linux/bpf.h>
4
#include <linux/bpf-netns.h>
5
#include <linux/filter.h>
6
#include <net/net_namespace.h>
7
8
/*
9
* Functions to manage BPF programs attached to netns
10
*/
11
12
struct bpf_netns_link {
13
struct bpf_link link;
14
15
/* We don't hold a ref to net in order to auto-detach the link
16
* when netns is going away. Instead we rely on pernet
17
* pre_exit callback to clear this pointer. Must be accessed
18
* with netns_bpf_mutex held.
19
*/
20
struct net *net;
21
struct list_head node; /* node in list of links attached to net */
22
enum netns_bpf_attach_type netns_type;
23
};
24
25
/* Protects updates to netns_bpf */
26
DEFINE_MUTEX(netns_bpf_mutex);
27
28
static void netns_bpf_attach_type_unneed(enum netns_bpf_attach_type type)
29
{
30
switch (type) {
31
#ifdef CONFIG_INET
32
case NETNS_BPF_SK_LOOKUP:
33
static_branch_dec(&bpf_sk_lookup_enabled);
34
break;
35
#endif
36
default:
37
break;
38
}
39
}
40
41
static void netns_bpf_attach_type_need(enum netns_bpf_attach_type type)
42
{
43
switch (type) {
44
#ifdef CONFIG_INET
45
case NETNS_BPF_SK_LOOKUP:
46
static_branch_inc(&bpf_sk_lookup_enabled);
47
break;
48
#endif
49
default:
50
break;
51
}
52
}
53
54
/* Must be called with netns_bpf_mutex held. */
55
static void netns_bpf_run_array_detach(struct net *net,
56
enum netns_bpf_attach_type type)
57
{
58
struct bpf_prog_array *run_array;
59
60
run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL,
61
lockdep_is_held(&netns_bpf_mutex));
62
bpf_prog_array_free(run_array);
63
}
64
65
static int link_index(struct net *net, enum netns_bpf_attach_type type,
66
struct bpf_netns_link *link)
67
{
68
struct bpf_netns_link *pos;
69
int i = 0;
70
71
list_for_each_entry(pos, &net->bpf.links[type], node) {
72
if (pos == link)
73
return i;
74
i++;
75
}
76
return -ENOENT;
77
}
78
79
static int link_count(struct net *net, enum netns_bpf_attach_type type)
80
{
81
struct list_head *pos;
82
int i = 0;
83
84
list_for_each(pos, &net->bpf.links[type])
85
i++;
86
return i;
87
}
88
89
static void fill_prog_array(struct net *net, enum netns_bpf_attach_type type,
90
struct bpf_prog_array *prog_array)
91
{
92
struct bpf_netns_link *pos;
93
unsigned int i = 0;
94
95
list_for_each_entry(pos, &net->bpf.links[type], node) {
96
prog_array->items[i].prog = pos->link.prog;
97
i++;
98
}
99
}
100
101
static void bpf_netns_link_release(struct bpf_link *link)
102
{
103
struct bpf_netns_link *net_link =
104
container_of(link, struct bpf_netns_link, link);
105
enum netns_bpf_attach_type type = net_link->netns_type;
106
struct bpf_prog_array *old_array, *new_array;
107
struct net *net;
108
int cnt, idx;
109
110
mutex_lock(&netns_bpf_mutex);
111
112
/* We can race with cleanup_net, but if we see a non-NULL
113
* struct net pointer, pre_exit has not run yet and wait for
114
* netns_bpf_mutex.
115
*/
116
net = net_link->net;
117
if (!net)
118
goto out_unlock;
119
120
/* Mark attach point as unused */
121
netns_bpf_attach_type_unneed(type);
122
123
/* Remember link position in case of safe delete */
124
idx = link_index(net, type, net_link);
125
list_del(&net_link->node);
126
127
cnt = link_count(net, type);
128
if (!cnt) {
129
netns_bpf_run_array_detach(net, type);
130
goto out_unlock;
131
}
132
133
old_array = rcu_dereference_protected(net->bpf.run_array[type],
134
lockdep_is_held(&netns_bpf_mutex));
135
new_array = bpf_prog_array_alloc(cnt, GFP_KERNEL);
136
if (!new_array) {
137
WARN_ON(bpf_prog_array_delete_safe_at(old_array, idx));
138
goto out_unlock;
139
}
140
fill_prog_array(net, type, new_array);
141
rcu_assign_pointer(net->bpf.run_array[type], new_array);
142
bpf_prog_array_free(old_array);
143
144
out_unlock:
145
net_link->net = NULL;
146
mutex_unlock(&netns_bpf_mutex);
147
}
148
149
static int bpf_netns_link_detach(struct bpf_link *link)
150
{
151
bpf_netns_link_release(link);
152
return 0;
153
}
154
155
static void bpf_netns_link_dealloc(struct bpf_link *link)
156
{
157
struct bpf_netns_link *net_link =
158
container_of(link, struct bpf_netns_link, link);
159
160
kfree(net_link);
161
}
162
163
static int bpf_netns_link_update_prog(struct bpf_link *link,
164
struct bpf_prog *new_prog,
165
struct bpf_prog *old_prog)
166
{
167
struct bpf_netns_link *net_link =
168
container_of(link, struct bpf_netns_link, link);
169
enum netns_bpf_attach_type type = net_link->netns_type;
170
struct bpf_prog_array *run_array;
171
struct net *net;
172
int idx, ret;
173
174
if (old_prog && old_prog != link->prog)
175
return -EPERM;
176
if (new_prog->type != link->prog->type)
177
return -EINVAL;
178
179
mutex_lock(&netns_bpf_mutex);
180
181
net = net_link->net;
182
if (!net || !check_net(net)) {
183
/* Link auto-detached or netns dying */
184
ret = -ENOLINK;
185
goto out_unlock;
186
}
187
188
run_array = rcu_dereference_protected(net->bpf.run_array[type],
189
lockdep_is_held(&netns_bpf_mutex));
190
idx = link_index(net, type, net_link);
191
ret = bpf_prog_array_update_at(run_array, idx, new_prog);
192
if (ret)
193
goto out_unlock;
194
195
old_prog = xchg(&link->prog, new_prog);
196
bpf_prog_put(old_prog);
197
198
out_unlock:
199
mutex_unlock(&netns_bpf_mutex);
200
return ret;
201
}
202
203
static int bpf_netns_link_fill_info(const struct bpf_link *link,
204
struct bpf_link_info *info)
205
{
206
const struct bpf_netns_link *net_link =
207
container_of(link, struct bpf_netns_link, link);
208
unsigned int inum = 0;
209
struct net *net;
210
211
mutex_lock(&netns_bpf_mutex);
212
net = net_link->net;
213
if (net && check_net(net))
214
inum = net->ns.inum;
215
mutex_unlock(&netns_bpf_mutex);
216
217
info->netns.netns_ino = inum;
218
info->netns.attach_type = link->attach_type;
219
return 0;
220
}
221
222
static void bpf_netns_link_show_fdinfo(const struct bpf_link *link,
223
struct seq_file *seq)
224
{
225
struct bpf_link_info info = {};
226
227
bpf_netns_link_fill_info(link, &info);
228
seq_printf(seq,
229
"netns_ino:\t%u\n"
230
"attach_type:\t%u\n",
231
info.netns.netns_ino,
232
link->attach_type);
233
}
234
235
static const struct bpf_link_ops bpf_netns_link_ops = {
236
.release = bpf_netns_link_release,
237
.dealloc = bpf_netns_link_dealloc,
238
.detach = bpf_netns_link_detach,
239
.update_prog = bpf_netns_link_update_prog,
240
.fill_link_info = bpf_netns_link_fill_info,
241
.show_fdinfo = bpf_netns_link_show_fdinfo,
242
};
243
244
/* Must be called with netns_bpf_mutex held. */
245
static int __netns_bpf_prog_query(const union bpf_attr *attr,
246
union bpf_attr __user *uattr,
247
struct net *net,
248
enum netns_bpf_attach_type type)
249
{
250
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
251
struct bpf_prog_array *run_array;
252
u32 prog_cnt = 0, flags = 0;
253
254
run_array = rcu_dereference_protected(net->bpf.run_array[type],
255
lockdep_is_held(&netns_bpf_mutex));
256
if (run_array)
257
prog_cnt = bpf_prog_array_length(run_array);
258
259
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
260
return -EFAULT;
261
if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
262
return -EFAULT;
263
if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
264
return 0;
265
266
return bpf_prog_array_copy_to_user(run_array, prog_ids,
267
attr->query.prog_cnt);
268
}
269
270
int netns_bpf_prog_query(const union bpf_attr *attr,
271
union bpf_attr __user *uattr)
272
{
273
enum netns_bpf_attach_type type;
274
struct net *net;
275
int ret;
276
277
if (attr->query.query_flags)
278
return -EINVAL;
279
280
type = to_netns_bpf_attach_type(attr->query.attach_type);
281
if (type < 0)
282
return -EINVAL;
283
284
net = get_net_ns_by_fd(attr->query.target_fd);
285
if (IS_ERR(net))
286
return PTR_ERR(net);
287
288
mutex_lock(&netns_bpf_mutex);
289
ret = __netns_bpf_prog_query(attr, uattr, net, type);
290
mutex_unlock(&netns_bpf_mutex);
291
292
put_net(net);
293
return ret;
294
}
295
296
int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
297
{
298
struct bpf_prog_array *run_array;
299
enum netns_bpf_attach_type type;
300
struct bpf_prog *attached;
301
struct net *net;
302
int ret;
303
304
if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd)
305
return -EINVAL;
306
307
type = to_netns_bpf_attach_type(attr->attach_type);
308
if (type < 0)
309
return -EINVAL;
310
311
net = current->nsproxy->net_ns;
312
mutex_lock(&netns_bpf_mutex);
313
314
/* Attaching prog directly is not compatible with links */
315
if (!list_empty(&net->bpf.links[type])) {
316
ret = -EEXIST;
317
goto out_unlock;
318
}
319
320
switch (type) {
321
case NETNS_BPF_FLOW_DISSECTOR:
322
ret = flow_dissector_bpf_prog_attach_check(net, prog);
323
break;
324
default:
325
ret = -EINVAL;
326
break;
327
}
328
if (ret)
329
goto out_unlock;
330
331
attached = net->bpf.progs[type];
332
if (attached == prog) {
333
/* The same program cannot be attached twice */
334
ret = -EINVAL;
335
goto out_unlock;
336
}
337
338
run_array = rcu_dereference_protected(net->bpf.run_array[type],
339
lockdep_is_held(&netns_bpf_mutex));
340
if (run_array) {
341
WRITE_ONCE(run_array->items[0].prog, prog);
342
} else {
343
run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
344
if (!run_array) {
345
ret = -ENOMEM;
346
goto out_unlock;
347
}
348
run_array->items[0].prog = prog;
349
rcu_assign_pointer(net->bpf.run_array[type], run_array);
350
}
351
352
net->bpf.progs[type] = prog;
353
if (attached)
354
bpf_prog_put(attached);
355
356
out_unlock:
357
mutex_unlock(&netns_bpf_mutex);
358
359
return ret;
360
}
361
362
/* Must be called with netns_bpf_mutex held. */
363
static int __netns_bpf_prog_detach(struct net *net,
364
enum netns_bpf_attach_type type,
365
struct bpf_prog *old)
366
{
367
struct bpf_prog *attached;
368
369
/* Progs attached via links cannot be detached */
370
if (!list_empty(&net->bpf.links[type]))
371
return -EINVAL;
372
373
attached = net->bpf.progs[type];
374
if (!attached || attached != old)
375
return -ENOENT;
376
netns_bpf_run_array_detach(net, type);
377
net->bpf.progs[type] = NULL;
378
bpf_prog_put(attached);
379
return 0;
380
}
381
382
int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
383
{
384
enum netns_bpf_attach_type type;
385
struct bpf_prog *prog;
386
int ret;
387
388
if (attr->target_fd)
389
return -EINVAL;
390
391
type = to_netns_bpf_attach_type(attr->attach_type);
392
if (type < 0)
393
return -EINVAL;
394
395
prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
396
if (IS_ERR(prog))
397
return PTR_ERR(prog);
398
399
mutex_lock(&netns_bpf_mutex);
400
ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog);
401
mutex_unlock(&netns_bpf_mutex);
402
403
bpf_prog_put(prog);
404
405
return ret;
406
}
407
408
static int netns_bpf_max_progs(enum netns_bpf_attach_type type)
409
{
410
switch (type) {
411
case NETNS_BPF_FLOW_DISSECTOR:
412
return 1;
413
case NETNS_BPF_SK_LOOKUP:
414
return 64;
415
default:
416
return 0;
417
}
418
}
419
420
static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
421
enum netns_bpf_attach_type type)
422
{
423
struct bpf_netns_link *net_link =
424
container_of(link, struct bpf_netns_link, link);
425
struct bpf_prog_array *run_array;
426
int cnt, err;
427
428
mutex_lock(&netns_bpf_mutex);
429
430
cnt = link_count(net, type);
431
if (cnt >= netns_bpf_max_progs(type)) {
432
err = -E2BIG;
433
goto out_unlock;
434
}
435
/* Links are not compatible with attaching prog directly */
436
if (net->bpf.progs[type]) {
437
err = -EEXIST;
438
goto out_unlock;
439
}
440
441
switch (type) {
442
case NETNS_BPF_FLOW_DISSECTOR:
443
err = flow_dissector_bpf_prog_attach_check(net, link->prog);
444
break;
445
case NETNS_BPF_SK_LOOKUP:
446
err = 0; /* nothing to check */
447
break;
448
default:
449
err = -EINVAL;
450
break;
451
}
452
if (err)
453
goto out_unlock;
454
455
run_array = bpf_prog_array_alloc(cnt + 1, GFP_KERNEL);
456
if (!run_array) {
457
err = -ENOMEM;
458
goto out_unlock;
459
}
460
461
list_add_tail(&net_link->node, &net->bpf.links[type]);
462
463
fill_prog_array(net, type, run_array);
464
run_array = rcu_replace_pointer(net->bpf.run_array[type], run_array,
465
lockdep_is_held(&netns_bpf_mutex));
466
bpf_prog_array_free(run_array);
467
468
/* Mark attach point as used */
469
netns_bpf_attach_type_need(type);
470
471
out_unlock:
472
mutex_unlock(&netns_bpf_mutex);
473
return err;
474
}
475
476
int netns_bpf_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
477
{
478
enum netns_bpf_attach_type netns_type;
479
struct bpf_link_primer link_primer;
480
struct bpf_netns_link *net_link;
481
enum bpf_attach_type type;
482
struct net *net;
483
int err;
484
485
if (attr->link_create.flags)
486
return -EINVAL;
487
488
type = attr->link_create.attach_type;
489
netns_type = to_netns_bpf_attach_type(type);
490
if (netns_type < 0)
491
return -EINVAL;
492
493
net = get_net_ns_by_fd(attr->link_create.target_fd);
494
if (IS_ERR(net))
495
return PTR_ERR(net);
496
497
net_link = kzalloc(sizeof(*net_link), GFP_USER);
498
if (!net_link) {
499
err = -ENOMEM;
500
goto out_put_net;
501
}
502
bpf_link_init(&net_link->link, BPF_LINK_TYPE_NETNS,
503
&bpf_netns_link_ops, prog, type);
504
net_link->net = net;
505
net_link->netns_type = netns_type;
506
507
err = bpf_link_prime(&net_link->link, &link_primer);
508
if (err) {
509
kfree(net_link);
510
goto out_put_net;
511
}
512
513
err = netns_bpf_link_attach(net, &net_link->link, netns_type);
514
if (err) {
515
bpf_link_cleanup(&link_primer);
516
goto out_put_net;
517
}
518
519
put_net(net);
520
return bpf_link_settle(&link_primer);
521
522
out_put_net:
523
put_net(net);
524
return err;
525
}
526
527
static int __net_init netns_bpf_pernet_init(struct net *net)
528
{
529
int type;
530
531
for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++)
532
INIT_LIST_HEAD(&net->bpf.links[type]);
533
534
return 0;
535
}
536
537
static void __net_exit netns_bpf_pernet_pre_exit(struct net *net)
538
{
539
enum netns_bpf_attach_type type;
540
struct bpf_netns_link *net_link;
541
542
mutex_lock(&netns_bpf_mutex);
543
for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) {
544
netns_bpf_run_array_detach(net, type);
545
list_for_each_entry(net_link, &net->bpf.links[type], node) {
546
net_link->net = NULL; /* auto-detach link */
547
netns_bpf_attach_type_unneed(type);
548
}
549
if (net->bpf.progs[type])
550
bpf_prog_put(net->bpf.progs[type]);
551
}
552
mutex_unlock(&netns_bpf_mutex);
553
}
554
555
static struct pernet_operations netns_bpf_pernet_ops __net_initdata = {
556
.init = netns_bpf_pernet_init,
557
.pre_exit = netns_bpf_pernet_pre_exit,
558
};
559
560
static int __init netns_bpf_init(void)
561
{
562
return register_pernet_subsys(&netns_bpf_pernet_ops);
563
}
564
565
subsys_initcall(netns_bpf_init);
566
567