Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/bpf/liveness.c
29267 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3
4
#include <linux/bpf_verifier.h>
5
#include <linux/hashtable.h>
6
#include <linux/jhash.h>
7
#include <linux/slab.h>
8
9
/*
10
* This file implements live stack slots analysis. After accumulating
11
* stack usage data, the analysis answers queries about whether a
12
* particular stack slot may be read by an instruction or any of it's
13
* successors. This data is consumed by the verifier states caching
14
* mechanism to decide which stack slots are important when looking for a
15
* visited state corresponding to the current state.
16
*
17
* The analysis is call chain sensitive, meaning that data is collected
18
* and queried for tuples (call chain, subprogram instruction index).
19
* Such sensitivity allows identifying if some subprogram call always
20
* leads to writes in the caller's stack.
21
*
22
* The basic idea is as follows:
23
* - As the verifier accumulates a set of visited states, the analysis instance
24
* accumulates a conservative estimate of stack slots that can be read
25
* or must be written for each visited tuple (call chain, instruction index).
26
* - If several states happen to visit the same instruction with the same
27
* call chain, stack usage information for the corresponding tuple is joined:
28
* - "may_read" set represents a union of all possibly read slots
29
* (any slot in "may_read" set might be read at or after the instruction);
30
* - "must_write" set represents an intersection of all possibly written slots
31
* (any slot in "must_write" set is guaranteed to be written by the instruction).
32
* - The analysis is split into two phases:
33
* - read and write marks accumulation;
34
* - read and write marks propagation.
35
* - The propagation phase is a textbook live variable data flow analysis:
36
*
37
* state[cc, i].live_after = U [state[cc, s].live_before for s in insn_successors(i)]
38
* state[cc, i].live_before =
39
* (state[cc, i].live_after / state[cc, i].must_write) U state[i].may_read
40
*
41
* Where:
42
* - `U` stands for set union
43
* - `/` stands for set difference;
44
* - `cc` stands for a call chain;
45
* - `i` and `s` are instruction indexes;
46
*
47
* The above equations are computed for each call chain and instruction
48
* index until state stops changing.
49
* - Additionally, in order to transfer "must_write" information from a
50
* subprogram to call instructions invoking this subprogram,
51
* the "must_write_acc" set is tracked for each (cc, i) tuple.
52
* A set of stack slots that are guaranteed to be written by this
53
* instruction or any of its successors (within the subprogram).
54
* The equation for "must_write_acc" propagation looks as follows:
55
*
56
* state[cc, i].must_write_acc =
57
* ∩ [state[cc, s].must_write_acc for s in insn_successors(i)]
58
* U state[cc, i].must_write
59
*
60
* (An intersection of all "must_write_acc" for instruction successors
61
* plus all "must_write" slots for the instruction itself).
62
* - After the propagation phase completes for a subprogram, information from
63
* (cc, 0) tuple (subprogram entry) is transferred to the caller's call chain:
64
* - "must_write_acc" set is intersected with the call site's "must_write" set;
65
* - "may_read" set is added to the call site's "may_read" set.
66
* - Any live stack queries must be taken after the propagation phase.
67
* - Accumulation and propagation phases can be entered multiple times,
68
* at any point in time:
69
* - "may_read" set only grows;
70
* - "must_write" set only shrinks;
71
* - for each visited verifier state with zero branches, all relevant
72
* read and write marks are already recorded by the analysis instance.
73
*
74
* Technically, the analysis is facilitated by the following data structures:
75
* - Call chain: for given verifier state, the call chain is a tuple of call
76
* instruction indexes leading to the current subprogram plus the subprogram
77
* entry point index.
78
* - Function instance: for a given call chain, for each instruction in
79
* the current subprogram, a mapping between instruction index and a
80
* set of "may_read", "must_write" and other marks accumulated for this
81
* instruction.
82
* - A hash table mapping call chains to function instances.
83
*/
84
85
struct callchain {
86
u32 callsites[MAX_CALL_FRAMES]; /* instruction pointer for each frame */
87
/* cached subprog_info[*].start for functions owning the frames:
88
* - sp_starts[curframe] used to get insn relative index within current function;
89
* - sp_starts[0..current-1] used for fast callchain_frame_up().
90
*/
91
u32 sp_starts[MAX_CALL_FRAMES];
92
u32 curframe; /* depth of callsites and sp_starts arrays */
93
};
94
95
struct per_frame_masks {
96
u64 may_read; /* stack slots that may be read by this instruction */
97
u64 must_write; /* stack slots written by this instruction */
98
u64 must_write_acc; /* stack slots written by this instruction and its successors */
99
u64 live_before; /* stack slots that may be read by this insn and its successors */
100
};
101
102
/*
103
* A function instance created for a specific callchain.
104
* Encapsulates read and write marks for each instruction in the function.
105
* Marks are tracked for each frame in the callchain.
106
*/
107
struct func_instance {
108
struct hlist_node hl_node;
109
struct callchain callchain;
110
u32 insn_cnt; /* cached number of insns in the function */
111
bool updated;
112
bool must_write_dropped;
113
/* Per frame, per instruction masks, frames allocated lazily. */
114
struct per_frame_masks *frames[MAX_CALL_FRAMES];
115
/* For each instruction a flag telling if "must_write" had been initialized for it. */
116
bool *must_write_set;
117
};
118
119
struct live_stack_query {
120
struct func_instance *instances[MAX_CALL_FRAMES]; /* valid in range [0..curframe] */
121
u32 curframe;
122
u32 insn_idx;
123
};
124
125
struct bpf_liveness {
126
DECLARE_HASHTABLE(func_instances, 8); /* maps callchain to func_instance */
127
struct live_stack_query live_stack_query; /* cache to avoid repetitive ht lookups */
128
/* Cached instance corresponding to env->cur_state, avoids per-instruction ht lookup */
129
struct func_instance *cur_instance;
130
/*
131
* Below fields are used to accumulate stack write marks for instruction at
132
* @write_insn_idx before submitting the marks to @cur_instance.
133
*/
134
u64 write_masks_acc[MAX_CALL_FRAMES];
135
u32 write_insn_idx;
136
};
137
138
/* Compute callchain corresponding to state @st at depth @frameno */
139
static void compute_callchain(struct bpf_verifier_env *env, struct bpf_verifier_state *st,
140
struct callchain *callchain, u32 frameno)
141
{
142
struct bpf_subprog_info *subprog_info = env->subprog_info;
143
u32 i;
144
145
memset(callchain, 0, sizeof(*callchain));
146
for (i = 0; i <= frameno; i++) {
147
callchain->sp_starts[i] = subprog_info[st->frame[i]->subprogno].start;
148
if (i < st->curframe)
149
callchain->callsites[i] = st->frame[i + 1]->callsite;
150
}
151
callchain->curframe = frameno;
152
callchain->callsites[callchain->curframe] = callchain->sp_starts[callchain->curframe];
153
}
154
155
static u32 hash_callchain(struct callchain *callchain)
156
{
157
return jhash2(callchain->callsites, callchain->curframe, 0);
158
}
159
160
static bool same_callsites(struct callchain *a, struct callchain *b)
161
{
162
int i;
163
164
if (a->curframe != b->curframe)
165
return false;
166
for (i = a->curframe; i >= 0; i--)
167
if (a->callsites[i] != b->callsites[i])
168
return false;
169
return true;
170
}
171
172
/*
173
* Find existing or allocate new function instance corresponding to @callchain.
174
* Instances are accumulated in env->liveness->func_instances and persist
175
* until the end of the verification process.
176
*/
177
static struct func_instance *__lookup_instance(struct bpf_verifier_env *env,
178
struct callchain *callchain)
179
{
180
struct bpf_liveness *liveness = env->liveness;
181
struct bpf_subprog_info *subprog;
182
struct func_instance *result;
183
u32 subprog_sz, size, key;
184
185
key = hash_callchain(callchain);
186
hash_for_each_possible(liveness->func_instances, result, hl_node, key)
187
if (same_callsites(&result->callchain, callchain))
188
return result;
189
190
subprog = bpf_find_containing_subprog(env, callchain->sp_starts[callchain->curframe]);
191
subprog_sz = (subprog + 1)->start - subprog->start;
192
size = sizeof(struct func_instance);
193
result = kvzalloc(size, GFP_KERNEL_ACCOUNT);
194
if (!result)
195
return ERR_PTR(-ENOMEM);
196
result->must_write_set = kvcalloc(subprog_sz, sizeof(*result->must_write_set),
197
GFP_KERNEL_ACCOUNT);
198
if (!result->must_write_set)
199
return ERR_PTR(-ENOMEM);
200
memcpy(&result->callchain, callchain, sizeof(*callchain));
201
result->insn_cnt = subprog_sz;
202
hash_add(liveness->func_instances, &result->hl_node, key);
203
return result;
204
}
205
206
static struct func_instance *lookup_instance(struct bpf_verifier_env *env,
207
struct bpf_verifier_state *st,
208
u32 frameno)
209
{
210
struct callchain callchain;
211
212
compute_callchain(env, st, &callchain, frameno);
213
return __lookup_instance(env, &callchain);
214
}
215
216
int bpf_stack_liveness_init(struct bpf_verifier_env *env)
217
{
218
env->liveness = kvzalloc(sizeof(*env->liveness), GFP_KERNEL_ACCOUNT);
219
if (!env->liveness)
220
return -ENOMEM;
221
hash_init(env->liveness->func_instances);
222
return 0;
223
}
224
225
void bpf_stack_liveness_free(struct bpf_verifier_env *env)
226
{
227
struct func_instance *instance;
228
struct hlist_node *tmp;
229
int bkt, i;
230
231
if (!env->liveness)
232
return;
233
hash_for_each_safe(env->liveness->func_instances, bkt, tmp, instance, hl_node) {
234
for (i = 0; i <= instance->callchain.curframe; i++)
235
kvfree(instance->frames[i]);
236
kvfree(instance->must_write_set);
237
kvfree(instance);
238
}
239
kvfree(env->liveness);
240
}
241
242
/*
243
* Convert absolute instruction index @insn_idx to an index relative
244
* to start of the function corresponding to @instance.
245
*/
246
static int relative_idx(struct func_instance *instance, u32 insn_idx)
247
{
248
return insn_idx - instance->callchain.sp_starts[instance->callchain.curframe];
249
}
250
251
static struct per_frame_masks *get_frame_masks(struct func_instance *instance,
252
u32 frame, u32 insn_idx)
253
{
254
if (!instance->frames[frame])
255
return NULL;
256
257
return &instance->frames[frame][relative_idx(instance, insn_idx)];
258
}
259
260
static struct per_frame_masks *alloc_frame_masks(struct bpf_verifier_env *env,
261
struct func_instance *instance,
262
u32 frame, u32 insn_idx)
263
{
264
struct per_frame_masks *arr;
265
266
if (!instance->frames[frame]) {
267
arr = kvcalloc(instance->insn_cnt, sizeof(*arr), GFP_KERNEL_ACCOUNT);
268
instance->frames[frame] = arr;
269
if (!arr)
270
return ERR_PTR(-ENOMEM);
271
}
272
return get_frame_masks(instance, frame, insn_idx);
273
}
274
275
void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env)
276
{
277
env->liveness->cur_instance = NULL;
278
}
279
280
/* If @env->liveness->cur_instance is null, set it to instance corresponding to @env->cur_state. */
281
static int ensure_cur_instance(struct bpf_verifier_env *env)
282
{
283
struct bpf_liveness *liveness = env->liveness;
284
struct func_instance *instance;
285
286
if (liveness->cur_instance)
287
return 0;
288
289
instance = lookup_instance(env, env->cur_state, env->cur_state->curframe);
290
if (IS_ERR(instance))
291
return PTR_ERR(instance);
292
293
liveness->cur_instance = instance;
294
return 0;
295
}
296
297
/* Accumulate may_read masks for @frame at @insn_idx */
298
static int mark_stack_read(struct bpf_verifier_env *env,
299
struct func_instance *instance, u32 frame, u32 insn_idx, u64 mask)
300
{
301
struct per_frame_masks *masks;
302
u64 new_may_read;
303
304
masks = alloc_frame_masks(env, instance, frame, insn_idx);
305
if (IS_ERR(masks))
306
return PTR_ERR(masks);
307
new_may_read = masks->may_read | mask;
308
if (new_may_read != masks->may_read &&
309
((new_may_read | masks->live_before) != masks->live_before))
310
instance->updated = true;
311
masks->may_read |= mask;
312
return 0;
313
}
314
315
int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frame, u32 insn_idx, u64 mask)
316
{
317
int err;
318
319
err = ensure_cur_instance(env);
320
err = err ?: mark_stack_read(env, env->liveness->cur_instance, frame, insn_idx, mask);
321
return err;
322
}
323
324
static void reset_stack_write_marks(struct bpf_verifier_env *env,
325
struct func_instance *instance, u32 insn_idx)
326
{
327
struct bpf_liveness *liveness = env->liveness;
328
int i;
329
330
liveness->write_insn_idx = insn_idx;
331
for (i = 0; i <= instance->callchain.curframe; i++)
332
liveness->write_masks_acc[i] = 0;
333
}
334
335
int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx)
336
{
337
struct bpf_liveness *liveness = env->liveness;
338
int err;
339
340
err = ensure_cur_instance(env);
341
if (err)
342
return err;
343
344
reset_stack_write_marks(env, liveness->cur_instance, insn_idx);
345
return 0;
346
}
347
348
void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frame, u64 mask)
349
{
350
env->liveness->write_masks_acc[frame] |= mask;
351
}
352
353
static int commit_stack_write_marks(struct bpf_verifier_env *env,
354
struct func_instance *instance)
355
{
356
struct bpf_liveness *liveness = env->liveness;
357
u32 idx, frame, curframe, old_must_write;
358
struct per_frame_masks *masks;
359
u64 mask;
360
361
if (!instance)
362
return 0;
363
364
curframe = instance->callchain.curframe;
365
idx = relative_idx(instance, liveness->write_insn_idx);
366
for (frame = 0; frame <= curframe; frame++) {
367
mask = liveness->write_masks_acc[frame];
368
/* avoid allocating frames for zero masks */
369
if (mask == 0 && !instance->must_write_set[idx])
370
continue;
371
masks = alloc_frame_masks(env, instance, frame, liveness->write_insn_idx);
372
if (IS_ERR(masks))
373
return PTR_ERR(masks);
374
old_must_write = masks->must_write;
375
/*
376
* If instruction at this callchain is seen for a first time, set must_write equal
377
* to @mask. Otherwise take intersection with the previous value.
378
*/
379
if (instance->must_write_set[idx])
380
mask &= old_must_write;
381
if (old_must_write != mask) {
382
masks->must_write = mask;
383
instance->updated = true;
384
}
385
if (old_must_write & ~mask)
386
instance->must_write_dropped = true;
387
}
388
instance->must_write_set[idx] = true;
389
liveness->write_insn_idx = 0;
390
return 0;
391
}
392
393
/*
394
* Merge stack writes marks in @env->liveness->write_masks_acc
395
* with information already in @env->liveness->cur_instance.
396
*/
397
int bpf_commit_stack_write_marks(struct bpf_verifier_env *env)
398
{
399
return commit_stack_write_marks(env, env->liveness->cur_instance);
400
}
401
402
static char *fmt_callchain(struct bpf_verifier_env *env, struct callchain *callchain)
403
{
404
char *buf_end = env->tmp_str_buf + sizeof(env->tmp_str_buf);
405
char *buf = env->tmp_str_buf;
406
int i;
407
408
buf += snprintf(buf, buf_end - buf, "(");
409
for (i = 0; i <= callchain->curframe; i++)
410
buf += snprintf(buf, buf_end - buf, "%s%d", i ? "," : "", callchain->callsites[i]);
411
snprintf(buf, buf_end - buf, ")");
412
return env->tmp_str_buf;
413
}
414
415
static void log_mask_change(struct bpf_verifier_env *env, struct callchain *callchain,
416
char *pfx, u32 frame, u32 insn_idx, u64 old, u64 new)
417
{
418
u64 changed_bits = old ^ new;
419
u64 new_ones = new & changed_bits;
420
u64 new_zeros = ~new & changed_bits;
421
422
if (!changed_bits)
423
return;
424
bpf_log(&env->log, "%s frame %d insn %d ", fmt_callchain(env, callchain), frame, insn_idx);
425
if (new_ones) {
426
bpf_fmt_stack_mask(env->tmp_str_buf, sizeof(env->tmp_str_buf), new_ones);
427
bpf_log(&env->log, "+%s %s ", pfx, env->tmp_str_buf);
428
}
429
if (new_zeros) {
430
bpf_fmt_stack_mask(env->tmp_str_buf, sizeof(env->tmp_str_buf), new_zeros);
431
bpf_log(&env->log, "-%s %s", pfx, env->tmp_str_buf);
432
}
433
bpf_log(&env->log, "\n");
434
}
435
436
int bpf_jmp_offset(struct bpf_insn *insn)
437
{
438
u8 code = insn->code;
439
440
if (code == (BPF_JMP32 | BPF_JA))
441
return insn->imm;
442
return insn->off;
443
}
444
445
__diag_push();
446
__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for opcode_info_tbl");
447
448
inline int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
449
{
450
static const struct opcode_info {
451
bool can_jump;
452
bool can_fallthrough;
453
} opcode_info_tbl[256] = {
454
[0 ... 255] = {.can_jump = false, .can_fallthrough = true},
455
#define _J(code, ...) \
456
[BPF_JMP | code] = __VA_ARGS__, \
457
[BPF_JMP32 | code] = __VA_ARGS__
458
459
_J(BPF_EXIT, {.can_jump = false, .can_fallthrough = false}),
460
_J(BPF_JA, {.can_jump = true, .can_fallthrough = false}),
461
_J(BPF_JEQ, {.can_jump = true, .can_fallthrough = true}),
462
_J(BPF_JNE, {.can_jump = true, .can_fallthrough = true}),
463
_J(BPF_JLT, {.can_jump = true, .can_fallthrough = true}),
464
_J(BPF_JLE, {.can_jump = true, .can_fallthrough = true}),
465
_J(BPF_JGT, {.can_jump = true, .can_fallthrough = true}),
466
_J(BPF_JGE, {.can_jump = true, .can_fallthrough = true}),
467
_J(BPF_JSGT, {.can_jump = true, .can_fallthrough = true}),
468
_J(BPF_JSGE, {.can_jump = true, .can_fallthrough = true}),
469
_J(BPF_JSLT, {.can_jump = true, .can_fallthrough = true}),
470
_J(BPF_JSLE, {.can_jump = true, .can_fallthrough = true}),
471
_J(BPF_JCOND, {.can_jump = true, .can_fallthrough = true}),
472
_J(BPF_JSET, {.can_jump = true, .can_fallthrough = true}),
473
#undef _J
474
};
475
struct bpf_insn *insn = &prog->insnsi[idx];
476
const struct opcode_info *opcode_info;
477
int i = 0, insn_sz;
478
479
opcode_info = &opcode_info_tbl[BPF_CLASS(insn->code) | BPF_OP(insn->code)];
480
insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
481
if (opcode_info->can_fallthrough)
482
succ[i++] = idx + insn_sz;
483
484
if (opcode_info->can_jump)
485
succ[i++] = idx + bpf_jmp_offset(insn) + 1;
486
487
return i;
488
}
489
490
__diag_pop();
491
492
static struct func_instance *get_outer_instance(struct bpf_verifier_env *env,
493
struct func_instance *instance)
494
{
495
struct callchain callchain = instance->callchain;
496
497
/* Adjust @callchain to represent callchain one frame up */
498
callchain.callsites[callchain.curframe] = 0;
499
callchain.sp_starts[callchain.curframe] = 0;
500
callchain.curframe--;
501
callchain.callsites[callchain.curframe] = callchain.sp_starts[callchain.curframe];
502
return __lookup_instance(env, &callchain);
503
}
504
505
static u32 callchain_subprog_start(struct callchain *callchain)
506
{
507
return callchain->sp_starts[callchain->curframe];
508
}
509
510
/*
511
* Transfer @may_read and @must_write_acc marks from the first instruction of @instance,
512
* to the call instruction in function instance calling @instance.
513
*/
514
static int propagate_to_outer_instance(struct bpf_verifier_env *env,
515
struct func_instance *instance)
516
{
517
struct callchain *callchain = &instance->callchain;
518
u32 this_subprog_start, callsite, frame;
519
struct func_instance *outer_instance;
520
struct per_frame_masks *insn;
521
int err;
522
523
this_subprog_start = callchain_subprog_start(callchain);
524
outer_instance = get_outer_instance(env, instance);
525
callsite = callchain->callsites[callchain->curframe - 1];
526
527
reset_stack_write_marks(env, outer_instance, callsite);
528
for (frame = 0; frame < callchain->curframe; frame++) {
529
insn = get_frame_masks(instance, frame, this_subprog_start);
530
if (!insn)
531
continue;
532
bpf_mark_stack_write(env, frame, insn->must_write_acc);
533
err = mark_stack_read(env, outer_instance, frame, callsite, insn->live_before);
534
if (err)
535
return err;
536
}
537
commit_stack_write_marks(env, outer_instance);
538
return 0;
539
}
540
541
static inline bool update_insn(struct bpf_verifier_env *env,
542
struct func_instance *instance, u32 frame, u32 insn_idx)
543
{
544
struct bpf_insn_aux_data *aux = env->insn_aux_data;
545
u64 new_before, new_after, must_write_acc;
546
struct per_frame_masks *insn, *succ_insn;
547
u32 succ_num, s, succ[2];
548
bool changed;
549
550
succ_num = bpf_insn_successors(env->prog, insn_idx, succ);
551
if (unlikely(succ_num == 0))
552
return false;
553
554
changed = false;
555
insn = get_frame_masks(instance, frame, insn_idx);
556
new_before = 0;
557
new_after = 0;
558
/*
559
* New "must_write_acc" is an intersection of all "must_write_acc"
560
* of successors plus all "must_write" slots of instruction itself.
561
*/
562
must_write_acc = U64_MAX;
563
for (s = 0; s < succ_num; ++s) {
564
succ_insn = get_frame_masks(instance, frame, succ[s]);
565
new_after |= succ_insn->live_before;
566
must_write_acc &= succ_insn->must_write_acc;
567
}
568
must_write_acc |= insn->must_write;
569
/*
570
* New "live_before" is a union of all "live_before" of successors
571
* minus slots written by instruction plus slots read by instruction.
572
*/
573
new_before = (new_after & ~insn->must_write) | insn->may_read;
574
changed |= new_before != insn->live_before;
575
changed |= must_write_acc != insn->must_write_acc;
576
if (unlikely(env->log.level & BPF_LOG_LEVEL2) &&
577
(insn->may_read || insn->must_write ||
578
insn_idx == callchain_subprog_start(&instance->callchain) ||
579
aux[insn_idx].prune_point)) {
580
log_mask_change(env, &instance->callchain, "live",
581
frame, insn_idx, insn->live_before, new_before);
582
log_mask_change(env, &instance->callchain, "written",
583
frame, insn_idx, insn->must_write_acc, must_write_acc);
584
}
585
insn->live_before = new_before;
586
insn->must_write_acc = must_write_acc;
587
return changed;
588
}
589
590
/* Fixed-point computation of @live_before and @must_write_acc marks */
591
static int update_instance(struct bpf_verifier_env *env, struct func_instance *instance)
592
{
593
u32 i, frame, po_start, po_end, cnt, this_subprog_start;
594
struct callchain *callchain = &instance->callchain;
595
int *insn_postorder = env->cfg.insn_postorder;
596
struct bpf_subprog_info *subprog;
597
struct per_frame_masks *insn;
598
bool changed;
599
int err;
600
601
this_subprog_start = callchain_subprog_start(callchain);
602
/*
603
* If must_write marks were updated must_write_acc needs to be reset
604
* (to account for the case when new must_write sets became smaller).
605
*/
606
if (instance->must_write_dropped) {
607
for (frame = 0; frame <= callchain->curframe; frame++) {
608
if (!instance->frames[frame])
609
continue;
610
611
for (i = 0; i < instance->insn_cnt; i++) {
612
insn = get_frame_masks(instance, frame, this_subprog_start + i);
613
insn->must_write_acc = 0;
614
}
615
}
616
}
617
618
subprog = bpf_find_containing_subprog(env, this_subprog_start);
619
po_start = subprog->postorder_start;
620
po_end = (subprog + 1)->postorder_start;
621
cnt = 0;
622
/* repeat until fixed point is reached */
623
do {
624
cnt++;
625
changed = false;
626
for (frame = 0; frame <= instance->callchain.curframe; frame++) {
627
if (!instance->frames[frame])
628
continue;
629
630
for (i = po_start; i < po_end; i++)
631
changed |= update_insn(env, instance, frame, insn_postorder[i]);
632
}
633
} while (changed);
634
635
if (env->log.level & BPF_LOG_LEVEL2)
636
bpf_log(&env->log, "%s live stack update done in %d iterations\n",
637
fmt_callchain(env, callchain), cnt);
638
639
/* transfer marks accumulated for outer frames to outer func instance (caller) */
640
if (callchain->curframe > 0) {
641
err = propagate_to_outer_instance(env, instance);
642
if (err)
643
return err;
644
}
645
646
return 0;
647
}
648
649
/*
650
* Prepare all callchains within @env->cur_state for querying.
651
* This function should be called after each verifier.c:pop_stack()
652
* and whenever verifier.c:do_check_insn() processes subprogram exit.
653
* This would guarantee that visited verifier states with zero branches
654
* have their bpf_mark_stack_{read,write}() effects propagated in
655
* @env->liveness.
656
*/
657
int bpf_update_live_stack(struct bpf_verifier_env *env)
658
{
659
struct func_instance *instance;
660
int err, frame;
661
662
bpf_reset_live_stack_callchain(env);
663
for (frame = env->cur_state->curframe; frame >= 0; --frame) {
664
instance = lookup_instance(env, env->cur_state, frame);
665
if (IS_ERR(instance))
666
return PTR_ERR(instance);
667
668
if (instance->updated) {
669
err = update_instance(env, instance);
670
if (err)
671
return err;
672
instance->updated = false;
673
instance->must_write_dropped = false;
674
}
675
}
676
return 0;
677
}
678
679
static bool is_live_before(struct func_instance *instance, u32 insn_idx, u32 frameno, u32 spi)
680
{
681
struct per_frame_masks *masks;
682
683
masks = get_frame_masks(instance, frameno, insn_idx);
684
return masks && (masks->live_before & BIT(spi));
685
}
686
687
int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
688
{
689
struct live_stack_query *q = &env->liveness->live_stack_query;
690
struct func_instance *instance;
691
u32 frame;
692
693
memset(q, 0, sizeof(*q));
694
for (frame = 0; frame <= st->curframe; frame++) {
695
instance = lookup_instance(env, st, frame);
696
if (IS_ERR(instance))
697
return PTR_ERR(instance);
698
q->instances[frame] = instance;
699
}
700
q->curframe = st->curframe;
701
q->insn_idx = st->insn_idx;
702
return 0;
703
}
704
705
bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi)
706
{
707
/*
708
* Slot is alive if it is read before q->st->insn_idx in current func instance,
709
* or if for some outer func instance:
710
* - alive before callsite if callsite calls callback, otherwise
711
* - alive after callsite
712
*/
713
struct live_stack_query *q = &env->liveness->live_stack_query;
714
struct func_instance *instance, *curframe_instance;
715
u32 i, callsite;
716
bool alive;
717
718
curframe_instance = q->instances[q->curframe];
719
if (is_live_before(curframe_instance, q->insn_idx, frameno, spi))
720
return true;
721
722
for (i = frameno; i < q->curframe; i++) {
723
callsite = curframe_instance->callchain.callsites[i];
724
instance = q->instances[i];
725
alive = bpf_calls_callback(env, callsite)
726
? is_live_before(instance, callsite, frameno, spi)
727
: is_live_before(instance, callsite + 1, frameno, spi);
728
if (alive)
729
return true;
730
}
731
732
return false;
733
}
734
735