Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/security/landlock/ruleset.c
29265 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Landlock LSM - Ruleset management
4
*
5
* Copyright © 2016-2020 Mickaël Salaün <[email protected]>
6
* Copyright © 2018-2020 ANSSI
7
*/
8
9
#include <linux/bits.h>
10
#include <linux/bug.h>
11
#include <linux/cleanup.h>
12
#include <linux/compiler_types.h>
13
#include <linux/err.h>
14
#include <linux/errno.h>
15
#include <linux/kernel.h>
16
#include <linux/lockdep.h>
17
#include <linux/mutex.h>
18
#include <linux/overflow.h>
19
#include <linux/rbtree.h>
20
#include <linux/refcount.h>
21
#include <linux/slab.h>
22
#include <linux/spinlock.h>
23
#include <linux/workqueue.h>
24
25
#include "access.h"
26
#include "audit.h"
27
#include "domain.h"
28
#include "limits.h"
29
#include "object.h"
30
#include "ruleset.h"
31
32
static struct landlock_ruleset *create_ruleset(const u32 num_layers)
33
{
34
struct landlock_ruleset *new_ruleset;
35
36
new_ruleset =
37
kzalloc(struct_size(new_ruleset, access_masks, num_layers),
38
GFP_KERNEL_ACCOUNT);
39
if (!new_ruleset)
40
return ERR_PTR(-ENOMEM);
41
refcount_set(&new_ruleset->usage, 1);
42
mutex_init(&new_ruleset->lock);
43
new_ruleset->root_inode = RB_ROOT;
44
45
#if IS_ENABLED(CONFIG_INET)
46
new_ruleset->root_net_port = RB_ROOT;
47
#endif /* IS_ENABLED(CONFIG_INET) */
48
49
new_ruleset->num_layers = num_layers;
50
/*
51
* hierarchy = NULL
52
* num_rules = 0
53
* access_masks[] = 0
54
*/
55
return new_ruleset;
56
}
57
58
struct landlock_ruleset *
59
landlock_create_ruleset(const access_mask_t fs_access_mask,
60
const access_mask_t net_access_mask,
61
const access_mask_t scope_mask)
62
{
63
struct landlock_ruleset *new_ruleset;
64
65
/* Informs about useless ruleset. */
66
if (!fs_access_mask && !net_access_mask && !scope_mask)
67
return ERR_PTR(-ENOMSG);
68
new_ruleset = create_ruleset(1);
69
if (IS_ERR(new_ruleset))
70
return new_ruleset;
71
if (fs_access_mask)
72
landlock_add_fs_access_mask(new_ruleset, fs_access_mask, 0);
73
if (net_access_mask)
74
landlock_add_net_access_mask(new_ruleset, net_access_mask, 0);
75
if (scope_mask)
76
landlock_add_scope_mask(new_ruleset, scope_mask, 0);
77
return new_ruleset;
78
}
79
80
static void build_check_rule(void)
81
{
82
const struct landlock_rule rule = {
83
.num_layers = ~0,
84
};
85
86
BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
87
}
88
89
static bool is_object_pointer(const enum landlock_key_type key_type)
90
{
91
switch (key_type) {
92
case LANDLOCK_KEY_INODE:
93
return true;
94
95
#if IS_ENABLED(CONFIG_INET)
96
case LANDLOCK_KEY_NET_PORT:
97
return false;
98
#endif /* IS_ENABLED(CONFIG_INET) */
99
100
default:
101
WARN_ON_ONCE(1);
102
return false;
103
}
104
}
105
106
static struct landlock_rule *
107
create_rule(const struct landlock_id id,
108
const struct landlock_layer (*const layers)[], const u32 num_layers,
109
const struct landlock_layer *const new_layer)
110
{
111
struct landlock_rule *new_rule;
112
u32 new_num_layers;
113
114
build_check_rule();
115
if (new_layer) {
116
/* Should already be checked by landlock_merge_ruleset(). */
117
if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
118
return ERR_PTR(-E2BIG);
119
new_num_layers = num_layers + 1;
120
} else {
121
new_num_layers = num_layers;
122
}
123
new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
124
GFP_KERNEL_ACCOUNT);
125
if (!new_rule)
126
return ERR_PTR(-ENOMEM);
127
RB_CLEAR_NODE(&new_rule->node);
128
if (is_object_pointer(id.type)) {
129
/* This should have been caught by insert_rule(). */
130
WARN_ON_ONCE(!id.key.object);
131
landlock_get_object(id.key.object);
132
}
133
134
new_rule->key = id.key;
135
new_rule->num_layers = new_num_layers;
136
/* Copies the original layer stack. */
137
memcpy(new_rule->layers, layers,
138
flex_array_size(new_rule, layers, num_layers));
139
if (new_layer)
140
/* Adds a copy of @new_layer on the layer stack. */
141
new_rule->layers[new_rule->num_layers - 1] = *new_layer;
142
return new_rule;
143
}
144
145
static struct rb_root *get_root(struct landlock_ruleset *const ruleset,
146
const enum landlock_key_type key_type)
147
{
148
switch (key_type) {
149
case LANDLOCK_KEY_INODE:
150
return &ruleset->root_inode;
151
152
#if IS_ENABLED(CONFIG_INET)
153
case LANDLOCK_KEY_NET_PORT:
154
return &ruleset->root_net_port;
155
#endif /* IS_ENABLED(CONFIG_INET) */
156
157
default:
158
WARN_ON_ONCE(1);
159
return ERR_PTR(-EINVAL);
160
}
161
}
162
163
static void free_rule(struct landlock_rule *const rule,
164
const enum landlock_key_type key_type)
165
{
166
might_sleep();
167
if (!rule)
168
return;
169
if (is_object_pointer(key_type))
170
landlock_put_object(rule->key.object);
171
kfree(rule);
172
}
173
174
static void build_check_ruleset(void)
175
{
176
const struct landlock_ruleset ruleset = {
177
.num_rules = ~0,
178
.num_layers = ~0,
179
};
180
181
BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
182
BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
183
}
184
185
/**
186
* insert_rule - Create and insert a rule in a ruleset
187
*
188
* @ruleset: The ruleset to be updated.
189
* @id: The ID to build the new rule with. The underlying kernel object, if
190
* any, must be held by the caller.
191
* @layers: One or multiple layers to be copied into the new rule.
192
* @num_layers: The number of @layers entries.
193
*
194
* When user space requests to add a new rule to a ruleset, @layers only
195
* contains one entry and this entry is not assigned to any level. In this
196
* case, the new rule will extend @ruleset, similarly to a boolean OR between
197
* access rights.
198
*
199
* When merging a ruleset in a domain, or copying a domain, @layers will be
200
* added to @ruleset as new constraints, similarly to a boolean AND between
201
* access rights.
202
*/
203
static int insert_rule(struct landlock_ruleset *const ruleset,
204
const struct landlock_id id,
205
const struct landlock_layer (*const layers)[],
206
const size_t num_layers)
207
{
208
struct rb_node **walker_node;
209
struct rb_node *parent_node = NULL;
210
struct landlock_rule *new_rule;
211
struct rb_root *root;
212
213
might_sleep();
214
lockdep_assert_held(&ruleset->lock);
215
if (WARN_ON_ONCE(!layers))
216
return -ENOENT;
217
218
if (is_object_pointer(id.type) && WARN_ON_ONCE(!id.key.object))
219
return -ENOENT;
220
221
root = get_root(ruleset, id.type);
222
if (IS_ERR(root))
223
return PTR_ERR(root);
224
225
walker_node = &root->rb_node;
226
while (*walker_node) {
227
struct landlock_rule *const this =
228
rb_entry(*walker_node, struct landlock_rule, node);
229
230
if (this->key.data != id.key.data) {
231
parent_node = *walker_node;
232
if (this->key.data < id.key.data)
233
walker_node = &((*walker_node)->rb_right);
234
else
235
walker_node = &((*walker_node)->rb_left);
236
continue;
237
}
238
239
/* Only a single-level layer should match an existing rule. */
240
if (WARN_ON_ONCE(num_layers != 1))
241
return -EINVAL;
242
243
/* If there is a matching rule, updates it. */
244
if ((*layers)[0].level == 0) {
245
/*
246
* Extends access rights when the request comes from
247
* landlock_add_rule(2), i.e. @ruleset is not a domain.
248
*/
249
if (WARN_ON_ONCE(this->num_layers != 1))
250
return -EINVAL;
251
if (WARN_ON_ONCE(this->layers[0].level != 0))
252
return -EINVAL;
253
this->layers[0].access |= (*layers)[0].access;
254
return 0;
255
}
256
257
if (WARN_ON_ONCE(this->layers[0].level == 0))
258
return -EINVAL;
259
260
/*
261
* Intersects access rights when it is a merge between a
262
* ruleset and a domain.
263
*/
264
new_rule = create_rule(id, &this->layers, this->num_layers,
265
&(*layers)[0]);
266
if (IS_ERR(new_rule))
267
return PTR_ERR(new_rule);
268
rb_replace_node(&this->node, &new_rule->node, root);
269
free_rule(this, id.type);
270
return 0;
271
}
272
273
/* There is no match for @id. */
274
build_check_ruleset();
275
if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
276
return -E2BIG;
277
new_rule = create_rule(id, layers, num_layers, NULL);
278
if (IS_ERR(new_rule))
279
return PTR_ERR(new_rule);
280
rb_link_node(&new_rule->node, parent_node, walker_node);
281
rb_insert_color(&new_rule->node, root);
282
ruleset->num_rules++;
283
return 0;
284
}
285
286
static void build_check_layer(void)
287
{
288
const struct landlock_layer layer = {
289
.level = ~0,
290
.access = ~0,
291
};
292
293
BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
294
BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
295
}
296
297
/* @ruleset must be locked by the caller. */
298
int landlock_insert_rule(struct landlock_ruleset *const ruleset,
299
const struct landlock_id id,
300
const access_mask_t access)
301
{
302
struct landlock_layer layers[] = { {
303
.access = access,
304
/* When @level is zero, insert_rule() extends @ruleset. */
305
.level = 0,
306
} };
307
308
build_check_layer();
309
return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers));
310
}
311
312
static int merge_tree(struct landlock_ruleset *const dst,
313
struct landlock_ruleset *const src,
314
const enum landlock_key_type key_type)
315
{
316
struct landlock_rule *walker_rule, *next_rule;
317
struct rb_root *src_root;
318
int err = 0;
319
320
might_sleep();
321
lockdep_assert_held(&dst->lock);
322
lockdep_assert_held(&src->lock);
323
324
src_root = get_root(src, key_type);
325
if (IS_ERR(src_root))
326
return PTR_ERR(src_root);
327
328
/* Merges the @src tree. */
329
rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, src_root,
330
node) {
331
struct landlock_layer layers[] = { {
332
.level = dst->num_layers,
333
} };
334
const struct landlock_id id = {
335
.key = walker_rule->key,
336
.type = key_type,
337
};
338
339
if (WARN_ON_ONCE(walker_rule->num_layers != 1))
340
return -EINVAL;
341
342
if (WARN_ON_ONCE(walker_rule->layers[0].level != 0))
343
return -EINVAL;
344
345
layers[0].access = walker_rule->layers[0].access;
346
347
err = insert_rule(dst, id, &layers, ARRAY_SIZE(layers));
348
if (err)
349
return err;
350
}
351
return err;
352
}
353
354
static int merge_ruleset(struct landlock_ruleset *const dst,
355
struct landlock_ruleset *const src)
356
{
357
int err = 0;
358
359
might_sleep();
360
/* Should already be checked by landlock_merge_ruleset() */
361
if (WARN_ON_ONCE(!src))
362
return 0;
363
/* Only merge into a domain. */
364
if (WARN_ON_ONCE(!dst || !dst->hierarchy))
365
return -EINVAL;
366
367
/* Locks @dst first because we are its only owner. */
368
mutex_lock(&dst->lock);
369
mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
370
371
/* Stacks the new layer. */
372
if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
373
err = -EINVAL;
374
goto out_unlock;
375
}
376
dst->access_masks[dst->num_layers - 1] =
377
landlock_upgrade_handled_access_masks(src->access_masks[0]);
378
379
/* Merges the @src inode tree. */
380
err = merge_tree(dst, src, LANDLOCK_KEY_INODE);
381
if (err)
382
goto out_unlock;
383
384
#if IS_ENABLED(CONFIG_INET)
385
/* Merges the @src network port tree. */
386
err = merge_tree(dst, src, LANDLOCK_KEY_NET_PORT);
387
if (err)
388
goto out_unlock;
389
#endif /* IS_ENABLED(CONFIG_INET) */
390
391
out_unlock:
392
mutex_unlock(&src->lock);
393
mutex_unlock(&dst->lock);
394
return err;
395
}
396
397
static int inherit_tree(struct landlock_ruleset *const parent,
398
struct landlock_ruleset *const child,
399
const enum landlock_key_type key_type)
400
{
401
struct landlock_rule *walker_rule, *next_rule;
402
struct rb_root *parent_root;
403
int err = 0;
404
405
might_sleep();
406
lockdep_assert_held(&parent->lock);
407
lockdep_assert_held(&child->lock);
408
409
parent_root = get_root(parent, key_type);
410
if (IS_ERR(parent_root))
411
return PTR_ERR(parent_root);
412
413
/* Copies the @parent inode or network tree. */
414
rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
415
parent_root, node) {
416
const struct landlock_id id = {
417
.key = walker_rule->key,
418
.type = key_type,
419
};
420
421
err = insert_rule(child, id, &walker_rule->layers,
422
walker_rule->num_layers);
423
if (err)
424
return err;
425
}
426
return err;
427
}
428
429
static int inherit_ruleset(struct landlock_ruleset *const parent,
430
struct landlock_ruleset *const child)
431
{
432
int err = 0;
433
434
might_sleep();
435
if (!parent)
436
return 0;
437
438
/* Locks @child first because we are its only owner. */
439
mutex_lock(&child->lock);
440
mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
441
442
/* Copies the @parent inode tree. */
443
err = inherit_tree(parent, child, LANDLOCK_KEY_INODE);
444
if (err)
445
goto out_unlock;
446
447
#if IS_ENABLED(CONFIG_INET)
448
/* Copies the @parent network port tree. */
449
err = inherit_tree(parent, child, LANDLOCK_KEY_NET_PORT);
450
if (err)
451
goto out_unlock;
452
#endif /* IS_ENABLED(CONFIG_INET) */
453
454
if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
455
err = -EINVAL;
456
goto out_unlock;
457
}
458
/* Copies the parent layer stack and leaves a space for the new layer. */
459
memcpy(child->access_masks, parent->access_masks,
460
flex_array_size(parent, access_masks, parent->num_layers));
461
462
if (WARN_ON_ONCE(!parent->hierarchy)) {
463
err = -EINVAL;
464
goto out_unlock;
465
}
466
landlock_get_hierarchy(parent->hierarchy);
467
child->hierarchy->parent = parent->hierarchy;
468
469
out_unlock:
470
mutex_unlock(&parent->lock);
471
mutex_unlock(&child->lock);
472
return err;
473
}
474
475
static void free_ruleset(struct landlock_ruleset *const ruleset)
476
{
477
struct landlock_rule *freeme, *next;
478
479
might_sleep();
480
rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root_inode,
481
node)
482
free_rule(freeme, LANDLOCK_KEY_INODE);
483
484
#if IS_ENABLED(CONFIG_INET)
485
rbtree_postorder_for_each_entry_safe(freeme, next,
486
&ruleset->root_net_port, node)
487
free_rule(freeme, LANDLOCK_KEY_NET_PORT);
488
#endif /* IS_ENABLED(CONFIG_INET) */
489
490
landlock_put_hierarchy(ruleset->hierarchy);
491
kfree(ruleset);
492
}
493
494
void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
495
{
496
might_sleep();
497
if (ruleset && refcount_dec_and_test(&ruleset->usage))
498
free_ruleset(ruleset);
499
}
500
501
static void free_ruleset_work(struct work_struct *const work)
502
{
503
struct landlock_ruleset *ruleset;
504
505
ruleset = container_of(work, struct landlock_ruleset, work_free);
506
free_ruleset(ruleset);
507
}
508
509
/* Only called by hook_cred_free(). */
510
void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
511
{
512
if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
513
INIT_WORK(&ruleset->work_free, free_ruleset_work);
514
schedule_work(&ruleset->work_free);
515
}
516
}
517
518
/**
519
* landlock_merge_ruleset - Merge a ruleset with a domain
520
*
521
* @parent: Parent domain.
522
* @ruleset: New ruleset to be merged.
523
*
524
* The current task is requesting to be restricted. The subjective credentials
525
* must not be in an overridden state. cf. landlock_init_hierarchy_log().
526
*
527
* Returns the intersection of @parent and @ruleset, or returns @parent if
528
* @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
529
*/
530
struct landlock_ruleset *
531
landlock_merge_ruleset(struct landlock_ruleset *const parent,
532
struct landlock_ruleset *const ruleset)
533
{
534
struct landlock_ruleset *new_dom __free(landlock_put_ruleset) = NULL;
535
u32 num_layers;
536
int err;
537
538
might_sleep();
539
if (WARN_ON_ONCE(!ruleset || parent == ruleset))
540
return ERR_PTR(-EINVAL);
541
542
if (parent) {
543
if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
544
return ERR_PTR(-E2BIG);
545
num_layers = parent->num_layers + 1;
546
} else {
547
num_layers = 1;
548
}
549
550
/* Creates a new domain... */
551
new_dom = create_ruleset(num_layers);
552
if (IS_ERR(new_dom))
553
return new_dom;
554
555
new_dom->hierarchy =
556
kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
557
if (!new_dom->hierarchy)
558
return ERR_PTR(-ENOMEM);
559
560
refcount_set(&new_dom->hierarchy->usage, 1);
561
562
/* ...as a child of @parent... */
563
err = inherit_ruleset(parent, new_dom);
564
if (err)
565
return ERR_PTR(err);
566
567
/* ...and including @ruleset. */
568
err = merge_ruleset(new_dom, ruleset);
569
if (err)
570
return ERR_PTR(err);
571
572
err = landlock_init_hierarchy_log(new_dom->hierarchy);
573
if (err)
574
return ERR_PTR(err);
575
576
return no_free_ptr(new_dom);
577
}
578
579
/*
580
* The returned access has the same lifetime as @ruleset.
581
*/
582
const struct landlock_rule *
583
landlock_find_rule(const struct landlock_ruleset *const ruleset,
584
const struct landlock_id id)
585
{
586
const struct rb_root *root;
587
const struct rb_node *node;
588
589
root = get_root((struct landlock_ruleset *)ruleset, id.type);
590
if (IS_ERR(root))
591
return NULL;
592
node = root->rb_node;
593
594
while (node) {
595
struct landlock_rule *this =
596
rb_entry(node, struct landlock_rule, node);
597
598
if (this->key.data == id.key.data)
599
return this;
600
if (this->key.data < id.key.data)
601
node = node->rb_right;
602
else
603
node = node->rb_left;
604
}
605
return NULL;
606
}
607
608
/*
609
* @layer_masks is read and may be updated according to the access request and
610
* the matching rule.
611
* @masks_array_size must be equal to ARRAY_SIZE(*layer_masks).
612
*
613
* Returns true if the request is allowed (i.e. relevant layer masks for the
614
* request are empty).
615
*/
616
bool landlock_unmask_layers(const struct landlock_rule *const rule,
617
const access_mask_t access_request,
618
layer_mask_t (*const layer_masks)[],
619
const size_t masks_array_size)
620
{
621
size_t layer_level;
622
623
if (!access_request || !layer_masks)
624
return true;
625
if (!rule)
626
return false;
627
628
/*
629
* An access is granted if, for each policy layer, at least one rule
630
* encountered on the pathwalk grants the requested access,
631
* regardless of its position in the layer stack. We must then check
632
* the remaining layers for each inode, from the first added layer to
633
* the last one. When there is multiple requested accesses, for each
634
* policy layer, the full set of requested accesses may not be granted
635
* by only one rule, but by the union (binary OR) of multiple rules.
636
* E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
637
*/
638
for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
639
const struct landlock_layer *const layer =
640
&rule->layers[layer_level];
641
const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
642
const unsigned long access_req = access_request;
643
unsigned long access_bit;
644
bool is_empty;
645
646
/*
647
* Records in @layer_masks which layer grants access to each
648
* requested access.
649
*/
650
is_empty = true;
651
for_each_set_bit(access_bit, &access_req, masks_array_size) {
652
if (layer->access & BIT_ULL(access_bit))
653
(*layer_masks)[access_bit] &= ~layer_bit;
654
is_empty = is_empty && !(*layer_masks)[access_bit];
655
}
656
if (is_empty)
657
return true;
658
}
659
return false;
660
}
661
662
typedef access_mask_t
663
get_access_mask_t(const struct landlock_ruleset *const ruleset,
664
const u16 layer_level);
665
666
/**
667
* landlock_init_layer_masks - Initialize layer masks from an access request
668
*
669
* Populates @layer_masks such that for each access right in @access_request,
670
* the bits for all the layers are set where this access right is handled.
671
*
672
* @domain: The domain that defines the current restrictions.
673
* @access_request: The requested access rights to check.
674
* @layer_masks: It must contain %LANDLOCK_NUM_ACCESS_FS or
675
* %LANDLOCK_NUM_ACCESS_NET elements according to @key_type.
676
* @key_type: The key type to switch between access masks of different types.
677
*
678
* Returns: An access mask where each access right bit is set which is handled
679
* in any of the active layers in @domain.
680
*/
681
access_mask_t
682
landlock_init_layer_masks(const struct landlock_ruleset *const domain,
683
const access_mask_t access_request,
684
layer_mask_t (*const layer_masks)[],
685
const enum landlock_key_type key_type)
686
{
687
access_mask_t handled_accesses = 0;
688
size_t layer_level, num_access;
689
get_access_mask_t *get_access_mask;
690
691
switch (key_type) {
692
case LANDLOCK_KEY_INODE:
693
get_access_mask = landlock_get_fs_access_mask;
694
num_access = LANDLOCK_NUM_ACCESS_FS;
695
break;
696
697
#if IS_ENABLED(CONFIG_INET)
698
case LANDLOCK_KEY_NET_PORT:
699
get_access_mask = landlock_get_net_access_mask;
700
num_access = LANDLOCK_NUM_ACCESS_NET;
701
break;
702
#endif /* IS_ENABLED(CONFIG_INET) */
703
704
default:
705
WARN_ON_ONCE(1);
706
return 0;
707
}
708
709
memset(layer_masks, 0,
710
array_size(sizeof((*layer_masks)[0]), num_access));
711
712
/* An empty access request can happen because of O_WRONLY | O_RDWR. */
713
if (!access_request)
714
return 0;
715
716
/* Saves all handled accesses per layer. */
717
for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
718
const unsigned long access_req = access_request;
719
const access_mask_t access_mask =
720
get_access_mask(domain, layer_level);
721
unsigned long access_bit;
722
723
for_each_set_bit(access_bit, &access_req, num_access) {
724
if (BIT_ULL(access_bit) & access_mask) {
725
(*layer_masks)[access_bit] |=
726
BIT_ULL(layer_level);
727
handled_accesses |= BIT_ULL(access_bit);
728
}
729
}
730
}
731
return handled_accesses;
732
}
733
734