Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/bpf/queue_stack_maps.c
29280 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* queue_stack_maps.c: BPF queue and stack maps
4
*
5
* Copyright (c) 2018 Politecnico di Torino
6
*/
7
#include <linux/bpf.h>
8
#include <linux/list.h>
9
#include <linux/slab.h>
10
#include <linux/btf_ids.h>
11
#include "percpu_freelist.h"
12
#include <asm/rqspinlock.h>
13
14
#define QUEUE_STACK_CREATE_FLAG_MASK \
15
(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
16
17
struct bpf_queue_stack {
18
struct bpf_map map;
19
rqspinlock_t lock;
20
u32 head, tail;
21
u32 size; /* max_entries + 1 */
22
23
char elements[] __aligned(8);
24
};
25
26
static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
27
{
28
return container_of(map, struct bpf_queue_stack, map);
29
}
30
31
static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
32
{
33
return qs->head == qs->tail;
34
}
35
36
static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
37
{
38
u32 head = qs->head + 1;
39
40
if (unlikely(head >= qs->size))
41
head = 0;
42
43
return head == qs->tail;
44
}
45
46
/* Called from syscall */
47
static int queue_stack_map_alloc_check(union bpf_attr *attr)
48
{
49
/* check sanity of attributes */
50
if (attr->max_entries == 0 || attr->key_size != 0 ||
51
attr->value_size == 0 ||
52
attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
53
!bpf_map_flags_access_ok(attr->map_flags))
54
return -EINVAL;
55
56
if (attr->value_size > KMALLOC_MAX_SIZE)
57
/* if value_size is bigger, the user space won't be able to
58
* access the elements.
59
*/
60
return -E2BIG;
61
62
return 0;
63
}
64
65
static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
66
{
67
int numa_node = bpf_map_attr_numa_node(attr);
68
struct bpf_queue_stack *qs;
69
u64 size, queue_size;
70
71
size = (u64) attr->max_entries + 1;
72
queue_size = sizeof(*qs) + size * attr->value_size;
73
74
qs = bpf_map_area_alloc(queue_size, numa_node);
75
if (!qs)
76
return ERR_PTR(-ENOMEM);
77
78
bpf_map_init_from_attr(&qs->map, attr);
79
80
qs->size = size;
81
82
raw_res_spin_lock_init(&qs->lock);
83
84
return &qs->map;
85
}
86
87
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
88
static void queue_stack_map_free(struct bpf_map *map)
89
{
90
struct bpf_queue_stack *qs = bpf_queue_stack(map);
91
92
bpf_map_area_free(qs);
93
}
94
95
static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
96
{
97
struct bpf_queue_stack *qs = bpf_queue_stack(map);
98
unsigned long flags;
99
int err = 0;
100
void *ptr;
101
102
if (raw_res_spin_lock_irqsave(&qs->lock, flags))
103
return -EBUSY;
104
105
if (queue_stack_map_is_empty(qs)) {
106
memset(value, 0, qs->map.value_size);
107
err = -ENOENT;
108
goto out;
109
}
110
111
ptr = &qs->elements[qs->tail * qs->map.value_size];
112
memcpy(value, ptr, qs->map.value_size);
113
114
if (delete) {
115
if (unlikely(++qs->tail >= qs->size))
116
qs->tail = 0;
117
}
118
119
out:
120
raw_res_spin_unlock_irqrestore(&qs->lock, flags);
121
return err;
122
}
123
124
125
static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
126
{
127
struct bpf_queue_stack *qs = bpf_queue_stack(map);
128
unsigned long flags;
129
int err = 0;
130
void *ptr;
131
u32 index;
132
133
if (raw_res_spin_lock_irqsave(&qs->lock, flags))
134
return -EBUSY;
135
136
if (queue_stack_map_is_empty(qs)) {
137
memset(value, 0, qs->map.value_size);
138
err = -ENOENT;
139
goto out;
140
}
141
142
index = qs->head - 1;
143
if (unlikely(index >= qs->size))
144
index = qs->size - 1;
145
146
ptr = &qs->elements[index * qs->map.value_size];
147
memcpy(value, ptr, qs->map.value_size);
148
149
if (delete)
150
qs->head = index;
151
152
out:
153
raw_res_spin_unlock_irqrestore(&qs->lock, flags);
154
return err;
155
}
156
157
/* Called from syscall or from eBPF program */
158
static long queue_map_peek_elem(struct bpf_map *map, void *value)
159
{
160
return __queue_map_get(map, value, false);
161
}
162
163
/* Called from syscall or from eBPF program */
164
static long stack_map_peek_elem(struct bpf_map *map, void *value)
165
{
166
return __stack_map_get(map, value, false);
167
}
168
169
/* Called from syscall or from eBPF program */
170
static long queue_map_pop_elem(struct bpf_map *map, void *value)
171
{
172
return __queue_map_get(map, value, true);
173
}
174
175
/* Called from syscall or from eBPF program */
176
static long stack_map_pop_elem(struct bpf_map *map, void *value)
177
{
178
return __stack_map_get(map, value, true);
179
}
180
181
/* Called from syscall or from eBPF program */
182
static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
183
u64 flags)
184
{
185
struct bpf_queue_stack *qs = bpf_queue_stack(map);
186
unsigned long irq_flags;
187
int err = 0;
188
void *dst;
189
190
/* BPF_EXIST is used to force making room for a new element in case the
191
* map is full
192
*/
193
bool replace = (flags & BPF_EXIST);
194
195
/* Check supported flags for queue and stack maps */
196
if (flags & BPF_NOEXIST || flags > BPF_EXIST)
197
return -EINVAL;
198
199
if (raw_res_spin_lock_irqsave(&qs->lock, irq_flags))
200
return -EBUSY;
201
202
if (queue_stack_map_is_full(qs)) {
203
if (!replace) {
204
err = -E2BIG;
205
goto out;
206
}
207
/* advance tail pointer to overwrite oldest element */
208
if (unlikely(++qs->tail >= qs->size))
209
qs->tail = 0;
210
}
211
212
dst = &qs->elements[qs->head * qs->map.value_size];
213
memcpy(dst, value, qs->map.value_size);
214
215
if (unlikely(++qs->head >= qs->size))
216
qs->head = 0;
217
218
out:
219
raw_res_spin_unlock_irqrestore(&qs->lock, irq_flags);
220
return err;
221
}
222
223
/* Called from syscall or from eBPF program */
224
static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
225
{
226
return NULL;
227
}
228
229
/* Called from syscall or from eBPF program */
230
static long queue_stack_map_update_elem(struct bpf_map *map, void *key,
231
void *value, u64 flags)
232
{
233
return -EINVAL;
234
}
235
236
/* Called from syscall or from eBPF program */
237
static long queue_stack_map_delete_elem(struct bpf_map *map, void *key)
238
{
239
return -EINVAL;
240
}
241
242
/* Called from syscall */
243
static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
244
void *next_key)
245
{
246
return -EINVAL;
247
}
248
249
static u64 queue_stack_map_mem_usage(const struct bpf_map *map)
250
{
251
u64 usage = sizeof(struct bpf_queue_stack);
252
253
usage += ((u64)map->max_entries + 1) * map->value_size;
254
return usage;
255
}
256
257
BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
258
const struct bpf_map_ops queue_map_ops = {
259
.map_meta_equal = bpf_map_meta_equal,
260
.map_alloc_check = queue_stack_map_alloc_check,
261
.map_alloc = queue_stack_map_alloc,
262
.map_free = queue_stack_map_free,
263
.map_lookup_elem = queue_stack_map_lookup_elem,
264
.map_update_elem = queue_stack_map_update_elem,
265
.map_delete_elem = queue_stack_map_delete_elem,
266
.map_push_elem = queue_stack_map_push_elem,
267
.map_pop_elem = queue_map_pop_elem,
268
.map_peek_elem = queue_map_peek_elem,
269
.map_get_next_key = queue_stack_map_get_next_key,
270
.map_mem_usage = queue_stack_map_mem_usage,
271
.map_btf_id = &queue_map_btf_ids[0],
272
};
273
274
const struct bpf_map_ops stack_map_ops = {
275
.map_meta_equal = bpf_map_meta_equal,
276
.map_alloc_check = queue_stack_map_alloc_check,
277
.map_alloc = queue_stack_map_alloc,
278
.map_free = queue_stack_map_free,
279
.map_lookup_elem = queue_stack_map_lookup_elem,
280
.map_update_elem = queue_stack_map_update_elem,
281
.map_delete_elem = queue_stack_map_delete_elem,
282
.map_push_elem = queue_stack_map_push_elem,
283
.map_pop_elem = stack_map_pop_elem,
284
.map_peek_elem = stack_map_peek_elem,
285
.map_get_next_key = queue_stack_map_get_next_key,
286
.map_mem_usage = queue_stack_map_mem_usage,
287
.map_btf_id = &queue_map_btf_ids[0],
288
};
289
290