Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/bpf/cpumask.c
29267 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (c) 2023 Meta, Inc */
3
#include <linux/bpf.h>
4
#include <linux/bpf_mem_alloc.h>
5
#include <linux/btf.h>
6
#include <linux/btf_ids.h>
7
#include <linux/cpumask.h>
8
9
/**
10
* struct bpf_cpumask - refcounted BPF cpumask wrapper structure
11
* @cpumask: The actual cpumask embedded in the struct.
12
* @usage: Object reference counter. When the refcount goes to 0, the
13
* memory is released back to the BPF allocator, which provides
14
* RCU safety.
15
*
16
* Note that we explicitly embed a cpumask_t rather than a cpumask_var_t. This
17
* is done to avoid confusing the verifier due to the typedef of cpumask_var_t
18
* changing depending on whether CONFIG_CPUMASK_OFFSTACK is defined or not. See
19
* the details in <linux/cpumask.h>. The consequence is that this structure is
20
* likely a bit larger than it needs to be when CONFIG_CPUMASK_OFFSTACK is
21
* defined due to embedding the whole NR_CPUS-size bitmap, but the extra memory
22
* overhead is minimal. For the more typical case of CONFIG_CPUMASK_OFFSTACK
23
* not being defined, the structure is the same size regardless.
24
*/
25
struct bpf_cpumask {
26
cpumask_t cpumask;
27
refcount_t usage;
28
};
29
30
static struct bpf_mem_alloc bpf_cpumask_ma;
31
32
static bool cpu_valid(u32 cpu)
33
{
34
return cpu < nr_cpu_ids;
35
}
36
37
__bpf_kfunc_start_defs();
38
39
/**
40
* bpf_cpumask_create() - Create a mutable BPF cpumask.
41
*
42
* Allocates a cpumask that can be queried, mutated, acquired, and released by
43
* a BPF program. The cpumask returned by this function must either be embedded
44
* in a map as a kptr, or freed with bpf_cpumask_release().
45
*
46
* bpf_cpumask_create() allocates memory using the BPF memory allocator, and
47
* will not block. It may return NULL if no memory is available.
48
*
49
* Return:
50
* * A pointer to a new struct bpf_cpumask instance on success.
51
* * NULL if the BPF memory allocator is out of memory.
52
*/
53
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void)
54
{
55
struct bpf_cpumask *cpumask;
56
57
/* cpumask must be the first element so struct bpf_cpumask be cast to struct cpumask. */
58
BUILD_BUG_ON(offsetof(struct bpf_cpumask, cpumask) != 0);
59
60
cpumask = bpf_mem_cache_alloc(&bpf_cpumask_ma);
61
if (!cpumask)
62
return NULL;
63
64
memset(cpumask, 0, sizeof(*cpumask));
65
refcount_set(&cpumask->usage, 1);
66
67
return cpumask;
68
}
69
70
/**
71
* bpf_cpumask_acquire() - Acquire a reference to a BPF cpumask.
72
* @cpumask: The BPF cpumask being acquired. The cpumask must be a trusted
73
* pointer.
74
*
75
* Acquires a reference to a BPF cpumask. The cpumask returned by this function
76
* must either be embedded in a map as a kptr, or freed with
77
* bpf_cpumask_release().
78
*
79
* Return:
80
* * The struct bpf_cpumask pointer passed to the function.
81
*
82
*/
83
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
84
{
85
refcount_inc(&cpumask->usage);
86
return cpumask;
87
}
88
89
/**
90
* bpf_cpumask_release() - Release a previously acquired BPF cpumask.
91
* @cpumask: The cpumask being released.
92
*
93
* Releases a previously acquired reference to a BPF cpumask. When the final
94
* reference of the BPF cpumask has been released, it is subsequently freed in
95
* an RCU callback in the BPF memory allocator.
96
*/
97
__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
98
{
99
if (!refcount_dec_and_test(&cpumask->usage))
100
return;
101
102
bpf_mem_cache_free_rcu(&bpf_cpumask_ma, cpumask);
103
}
104
105
__bpf_kfunc void bpf_cpumask_release_dtor(void *cpumask)
106
{
107
bpf_cpumask_release(cpumask);
108
}
109
CFI_NOSEAL(bpf_cpumask_release_dtor);
110
111
/**
112
* bpf_cpumask_first() - Get the index of the first nonzero bit in the cpumask.
113
* @cpumask: The cpumask being queried.
114
*
115
* Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask
116
* pointer may be safely passed to this function.
117
*
118
* Return:
119
* * The index of the first nonzero bit in the struct cpumask.
120
*/
121
__bpf_kfunc u32 bpf_cpumask_first(const struct cpumask *cpumask)
122
{
123
return cpumask_first(cpumask);
124
}
125
126
/**
127
* bpf_cpumask_first_zero() - Get the index of the first unset bit in the
128
* cpumask.
129
* @cpumask: The cpumask being queried.
130
*
131
* Find the index of the first unset bit of the cpumask. A struct bpf_cpumask
132
* pointer may be safely passed to this function.
133
*
134
* Return:
135
* * The index of the first zero bit in the struct cpumask.
136
*/
137
__bpf_kfunc u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
138
{
139
return cpumask_first_zero(cpumask);
140
}
141
142
/**
143
* bpf_cpumask_first_and() - Return the index of the first nonzero bit from the
144
* AND of two cpumasks.
145
* @src1: The first cpumask.
146
* @src2: The second cpumask.
147
*
148
* Find the index of the first nonzero bit of the AND of two cpumasks.
149
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
150
*
151
* Return:
152
* * The index of the first bit that is nonzero in both cpumask instances.
153
*/
154
__bpf_kfunc u32 bpf_cpumask_first_and(const struct cpumask *src1,
155
const struct cpumask *src2)
156
{
157
return cpumask_first_and(src1, src2);
158
}
159
160
/**
161
* bpf_cpumask_set_cpu() - Set a bit for a CPU in a BPF cpumask.
162
* @cpu: The CPU to be set in the cpumask.
163
* @cpumask: The BPF cpumask in which a bit is being set.
164
*/
165
__bpf_kfunc void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
166
{
167
if (!cpu_valid(cpu))
168
return;
169
170
cpumask_set_cpu(cpu, (struct cpumask *)cpumask);
171
}
172
173
/**
174
* bpf_cpumask_clear_cpu() - Clear a bit for a CPU in a BPF cpumask.
175
* @cpu: The CPU to be cleared from the cpumask.
176
* @cpumask: The BPF cpumask in which a bit is being cleared.
177
*/
178
__bpf_kfunc void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
179
{
180
if (!cpu_valid(cpu))
181
return;
182
183
cpumask_clear_cpu(cpu, (struct cpumask *)cpumask);
184
}
185
186
/**
187
* bpf_cpumask_test_cpu() - Test whether a CPU is set in a cpumask.
188
* @cpu: The CPU being queried for.
189
* @cpumask: The cpumask being queried for containing a CPU.
190
*
191
* Return:
192
* * true - @cpu is set in the cpumask
193
* * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu.
194
*/
195
__bpf_kfunc bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
196
{
197
if (!cpu_valid(cpu))
198
return false;
199
200
return cpumask_test_cpu(cpu, (struct cpumask *)cpumask);
201
}
202
203
/**
204
* bpf_cpumask_test_and_set_cpu() - Atomically test and set a CPU in a BPF cpumask.
205
* @cpu: The CPU being set and queried for.
206
* @cpumask: The BPF cpumask being set and queried for containing a CPU.
207
*
208
* Return:
209
* * true - @cpu is set in the cpumask
210
* * false - @cpu was not set in the cpumask, or @cpu is invalid.
211
*/
212
__bpf_kfunc bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
213
{
214
if (!cpu_valid(cpu))
215
return false;
216
217
return cpumask_test_and_set_cpu(cpu, (struct cpumask *)cpumask);
218
}
219
220
/**
221
* bpf_cpumask_test_and_clear_cpu() - Atomically test and clear a CPU in a BPF
222
* cpumask.
223
* @cpu: The CPU being cleared and queried for.
224
* @cpumask: The BPF cpumask being cleared and queried for containing a CPU.
225
*
226
* Return:
227
* * true - @cpu is set in the cpumask
228
* * false - @cpu was not set in the cpumask, or @cpu is invalid.
229
*/
230
__bpf_kfunc bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
231
{
232
if (!cpu_valid(cpu))
233
return false;
234
235
return cpumask_test_and_clear_cpu(cpu, (struct cpumask *)cpumask);
236
}
237
238
/**
239
* bpf_cpumask_setall() - Set all of the bits in a BPF cpumask.
240
* @cpumask: The BPF cpumask having all of its bits set.
241
*/
242
__bpf_kfunc void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
243
{
244
cpumask_setall((struct cpumask *)cpumask);
245
}
246
247
/**
248
* bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask.
249
* @cpumask: The BPF cpumask being cleared.
250
*/
251
__bpf_kfunc void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
252
{
253
cpumask_clear((struct cpumask *)cpumask);
254
}
255
256
/**
257
* bpf_cpumask_and() - AND two cpumasks and store the result.
258
* @dst: The BPF cpumask where the result is being stored.
259
* @src1: The first input.
260
* @src2: The second input.
261
*
262
* Return:
263
* * true - @dst has at least one bit set following the operation
264
* * false - @dst is empty following the operation
265
*
266
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
267
*/
268
__bpf_kfunc bool bpf_cpumask_and(struct bpf_cpumask *dst,
269
const struct cpumask *src1,
270
const struct cpumask *src2)
271
{
272
return cpumask_and((struct cpumask *)dst, src1, src2);
273
}
274
275
/**
276
* bpf_cpumask_or() - OR two cpumasks and store the result.
277
* @dst: The BPF cpumask where the result is being stored.
278
* @src1: The first input.
279
* @src2: The second input.
280
*
281
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
282
*/
283
__bpf_kfunc void bpf_cpumask_or(struct bpf_cpumask *dst,
284
const struct cpumask *src1,
285
const struct cpumask *src2)
286
{
287
cpumask_or((struct cpumask *)dst, src1, src2);
288
}
289
290
/**
291
* bpf_cpumask_xor() - XOR two cpumasks and store the result.
292
* @dst: The BPF cpumask where the result is being stored.
293
* @src1: The first input.
294
* @src2: The second input.
295
*
296
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
297
*/
298
__bpf_kfunc void bpf_cpumask_xor(struct bpf_cpumask *dst,
299
const struct cpumask *src1,
300
const struct cpumask *src2)
301
{
302
cpumask_xor((struct cpumask *)dst, src1, src2);
303
}
304
305
/**
306
* bpf_cpumask_equal() - Check two cpumasks for equality.
307
* @src1: The first input.
308
* @src2: The second input.
309
*
310
* Return:
311
* * true - @src1 and @src2 have the same bits set.
312
* * false - @src1 and @src2 differ in at least one bit.
313
*
314
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
315
*/
316
__bpf_kfunc bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
317
{
318
return cpumask_equal(src1, src2);
319
}
320
321
/**
322
* bpf_cpumask_intersects() - Check two cpumasks for overlap.
323
* @src1: The first input.
324
* @src2: The second input.
325
*
326
* Return:
327
* * true - @src1 and @src2 have at least one of the same bits set.
328
* * false - @src1 and @src2 don't have any of the same bits set.
329
*
330
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
331
*/
332
__bpf_kfunc bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2)
333
{
334
return cpumask_intersects(src1, src2);
335
}
336
337
/**
338
* bpf_cpumask_subset() - Check if a cpumask is a subset of another.
339
* @src1: The first cpumask being checked as a subset.
340
* @src2: The second cpumask being checked as a superset.
341
*
342
* Return:
343
* * true - All of the bits of @src1 are set in @src2.
344
* * false - At least one bit in @src1 is not set in @src2.
345
*
346
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
347
*/
348
__bpf_kfunc bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
349
{
350
return cpumask_subset(src1, src2);
351
}
352
353
/**
354
* bpf_cpumask_empty() - Check if a cpumask is empty.
355
* @cpumask: The cpumask being checked.
356
*
357
* Return:
358
* * true - None of the bits in @cpumask are set.
359
* * false - At least one bit in @cpumask is set.
360
*
361
* A struct bpf_cpumask pointer may be safely passed to @cpumask.
362
*/
363
__bpf_kfunc bool bpf_cpumask_empty(const struct cpumask *cpumask)
364
{
365
return cpumask_empty(cpumask);
366
}
367
368
/**
369
* bpf_cpumask_full() - Check if a cpumask has all bits set.
370
* @cpumask: The cpumask being checked.
371
*
372
* Return:
373
* * true - All of the bits in @cpumask are set.
374
* * false - At least one bit in @cpumask is cleared.
375
*
376
* A struct bpf_cpumask pointer may be safely passed to @cpumask.
377
*/
378
__bpf_kfunc bool bpf_cpumask_full(const struct cpumask *cpumask)
379
{
380
return cpumask_full(cpumask);
381
}
382
383
/**
384
* bpf_cpumask_copy() - Copy the contents of a cpumask into a BPF cpumask.
385
* @dst: The BPF cpumask being copied into.
386
* @src: The cpumask being copied.
387
*
388
* A struct bpf_cpumask pointer may be safely passed to @src.
389
*/
390
__bpf_kfunc void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
391
{
392
cpumask_copy((struct cpumask *)dst, src);
393
}
394
395
/**
396
* bpf_cpumask_any_distribute() - Return a random set CPU from a cpumask.
397
* @cpumask: The cpumask being queried.
398
*
399
* Return:
400
* * A random set bit within [0, num_cpus) if at least one bit is set.
401
* * >= num_cpus if no bit is set.
402
*
403
* A struct bpf_cpumask pointer may be safely passed to @src.
404
*/
405
__bpf_kfunc u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask)
406
{
407
return cpumask_any_distribute(cpumask);
408
}
409
410
/**
411
* bpf_cpumask_any_and_distribute() - Return a random set CPU from the AND of
412
* two cpumasks.
413
* @src1: The first cpumask.
414
* @src2: The second cpumask.
415
*
416
* Return:
417
* * A random set bit within [0, num_cpus) from the AND of two cpumasks, if at
418
* least one bit is set.
419
* * >= num_cpus if no bit is set.
420
*
421
* struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
422
*/
423
__bpf_kfunc u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
424
const struct cpumask *src2)
425
{
426
return cpumask_any_and_distribute(src1, src2);
427
}
428
429
/**
430
* bpf_cpumask_weight() - Return the number of bits in @cpumask.
431
* @cpumask: The cpumask being queried.
432
*
433
* Count the number of set bits in the given cpumask.
434
*
435
* Return:
436
* * The number of bits set in the mask.
437
*/
438
__bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
439
{
440
return cpumask_weight(cpumask);
441
}
442
443
/**
444
* bpf_cpumask_populate() - Populate the CPU mask from the contents of
445
* a BPF memory region.
446
*
447
* @cpumask: The cpumask being populated.
448
* @src: The BPF memory holding the bit pattern.
449
* @src__sz: Length of the BPF memory region in bytes.
450
*
451
* Return:
452
* * 0 if the struct cpumask * instance was populated successfully.
453
* * -EACCES if the memory region is too small to populate the cpumask.
454
* * -EINVAL if the memory region is not aligned to the size of a long
455
* and the architecture does not support efficient unaligned accesses.
456
*/
457
__bpf_kfunc int bpf_cpumask_populate(struct cpumask *cpumask, void *src, size_t src__sz)
458
{
459
unsigned long source = (unsigned long)src;
460
461
/* The memory region must be large enough to populate the entire CPU mask. */
462
if (src__sz < bitmap_size(nr_cpu_ids))
463
return -EACCES;
464
465
/* If avoiding unaligned accesses, the input region must be aligned to the nearest long. */
466
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
467
!IS_ALIGNED(source, sizeof(long)))
468
return -EINVAL;
469
470
bitmap_copy(cpumask_bits(cpumask), src, nr_cpu_ids);
471
472
return 0;
473
}
474
475
__bpf_kfunc_end_defs();
476
477
BTF_KFUNCS_START(cpumask_kfunc_btf_ids)
478
BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
479
BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE)
480
BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
481
BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU)
482
BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_RCU)
483
BTF_ID_FLAGS(func, bpf_cpumask_first_and, KF_RCU)
484
BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_RCU)
485
BTF_ID_FLAGS(func, bpf_cpumask_clear_cpu, KF_RCU)
486
BTF_ID_FLAGS(func, bpf_cpumask_test_cpu, KF_RCU)
487
BTF_ID_FLAGS(func, bpf_cpumask_test_and_set_cpu, KF_RCU)
488
BTF_ID_FLAGS(func, bpf_cpumask_test_and_clear_cpu, KF_RCU)
489
BTF_ID_FLAGS(func, bpf_cpumask_setall, KF_RCU)
490
BTF_ID_FLAGS(func, bpf_cpumask_clear, KF_RCU)
491
BTF_ID_FLAGS(func, bpf_cpumask_and, KF_RCU)
492
BTF_ID_FLAGS(func, bpf_cpumask_or, KF_RCU)
493
BTF_ID_FLAGS(func, bpf_cpumask_xor, KF_RCU)
494
BTF_ID_FLAGS(func, bpf_cpumask_equal, KF_RCU)
495
BTF_ID_FLAGS(func, bpf_cpumask_intersects, KF_RCU)
496
BTF_ID_FLAGS(func, bpf_cpumask_subset, KF_RCU)
497
BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_RCU)
498
BTF_ID_FLAGS(func, bpf_cpumask_full, KF_RCU)
499
BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
500
BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
501
BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
502
BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
503
BTF_ID_FLAGS(func, bpf_cpumask_populate, KF_RCU)
504
BTF_KFUNCS_END(cpumask_kfunc_btf_ids)
505
506
static const struct btf_kfunc_id_set cpumask_kfunc_set = {
507
.owner = THIS_MODULE,
508
.set = &cpumask_kfunc_btf_ids,
509
};
510
511
BTF_ID_LIST(cpumask_dtor_ids)
512
BTF_ID(struct, bpf_cpumask)
513
BTF_ID(func, bpf_cpumask_release_dtor)
514
515
static int __init cpumask_kfunc_init(void)
516
{
517
int ret;
518
const struct btf_id_dtor_kfunc cpumask_dtors[] = {
519
{
520
.btf_id = cpumask_dtor_ids[0],
521
.kfunc_btf_id = cpumask_dtor_ids[1]
522
},
523
};
524
525
ret = bpf_mem_alloc_init(&bpf_cpumask_ma, sizeof(struct bpf_cpumask), false);
526
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set);
527
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set);
528
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &cpumask_kfunc_set);
529
return ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors,
530
ARRAY_SIZE(cpumask_dtors),
531
THIS_MODULE);
532
}
533
534
late_initcall(cpumask_kfunc_init);
535
536