Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/security/landlock/fs.c
29265 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Landlock - Filesystem management and hooks
4
*
5
* Copyright © 2016-2020 Mickaël Salaün <[email protected]>
6
* Copyright © 2018-2020 ANSSI
7
* Copyright © 2021-2025 Microsoft Corporation
8
* Copyright © 2022 Günther Noack <[email protected]>
9
* Copyright © 2023-2024 Google LLC
10
*/
11
12
#include <asm/ioctls.h>
13
#include <kunit/test.h>
14
#include <linux/atomic.h>
15
#include <linux/bitops.h>
16
#include <linux/bits.h>
17
#include <linux/compiler_types.h>
18
#include <linux/dcache.h>
19
#include <linux/err.h>
20
#include <linux/falloc.h>
21
#include <linux/fs.h>
22
#include <linux/init.h>
23
#include <linux/kernel.h>
24
#include <linux/limits.h>
25
#include <linux/list.h>
26
#include <linux/lsm_audit.h>
27
#include <linux/lsm_hooks.h>
28
#include <linux/mount.h>
29
#include <linux/namei.h>
30
#include <linux/path.h>
31
#include <linux/pid.h>
32
#include <linux/rcupdate.h>
33
#include <linux/sched/signal.h>
34
#include <linux/spinlock.h>
35
#include <linux/stat.h>
36
#include <linux/types.h>
37
#include <linux/wait_bit.h>
38
#include <linux/workqueue.h>
39
#include <uapi/linux/fiemap.h>
40
#include <uapi/linux/landlock.h>
41
42
#include "access.h"
43
#include "audit.h"
44
#include "common.h"
45
#include "cred.h"
46
#include "domain.h"
47
#include "fs.h"
48
#include "limits.h"
49
#include "object.h"
50
#include "ruleset.h"
51
#include "setup.h"
52
53
/* Underlying object management */
54
55
static void release_inode(struct landlock_object *const object)
56
__releases(object->lock)
57
{
58
struct inode *const inode = object->underobj;
59
struct super_block *sb;
60
61
if (!inode) {
62
spin_unlock(&object->lock);
63
return;
64
}
65
66
/*
67
* Protects against concurrent use by hook_sb_delete() of the reference
68
* to the underlying inode.
69
*/
70
object->underobj = NULL;
71
/*
72
* Makes sure that if the filesystem is concurrently unmounted,
73
* hook_sb_delete() will wait for us to finish iput().
74
*/
75
sb = inode->i_sb;
76
atomic_long_inc(&landlock_superblock(sb)->inode_refs);
77
spin_unlock(&object->lock);
78
/*
79
* Because object->underobj was not NULL, hook_sb_delete() and
80
* get_inode_object() guarantee that it is safe to reset
81
* landlock_inode(inode)->object while it is not NULL. It is therefore
82
* not necessary to lock inode->i_lock.
83
*/
84
rcu_assign_pointer(landlock_inode(inode)->object, NULL);
85
/*
86
* Now, new rules can safely be tied to @inode with get_inode_object().
87
*/
88
89
iput(inode);
90
if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
91
wake_up_var(&landlock_superblock(sb)->inode_refs);
92
}
93
94
static const struct landlock_object_underops landlock_fs_underops = {
95
.release = release_inode
96
};
97
98
/* IOCTL helpers */
99
100
/**
101
* is_masked_device_ioctl - Determine whether an IOCTL command is always
102
* permitted with Landlock for device files. These commands can not be
103
* restricted on device files by enforcing a Landlock policy.
104
*
105
* @cmd: The IOCTL command that is supposed to be run.
106
*
107
* By default, any IOCTL on a device file requires the
108
* LANDLOCK_ACCESS_FS_IOCTL_DEV right. However, we blanket-permit some
109
* commands, if:
110
*
111
* 1. The command is implemented in fs/ioctl.c's do_vfs_ioctl(),
112
* not in f_ops->unlocked_ioctl() or f_ops->compat_ioctl().
113
*
114
* 2. The command is harmless when invoked on devices.
115
*
116
* We also permit commands that do not make sense for devices, but where the
117
* do_vfs_ioctl() implementation returns a more conventional error code.
118
*
119
* Any new IOCTL commands that are implemented in fs/ioctl.c's do_vfs_ioctl()
120
* should be considered for inclusion here.
121
*
122
* Returns: true if the IOCTL @cmd can not be restricted with Landlock for
123
* device files.
124
*/
125
static __attribute_const__ bool is_masked_device_ioctl(const unsigned int cmd)
126
{
127
switch (cmd) {
128
/*
129
* FIOCLEX, FIONCLEX, FIONBIO and FIOASYNC manipulate the FD's
130
* close-on-exec and the file's buffered-IO and async flags. These
131
* operations are also available through fcntl(2), and are
132
* unconditionally permitted in Landlock.
133
*/
134
case FIOCLEX:
135
case FIONCLEX:
136
case FIONBIO:
137
case FIOASYNC:
138
/*
139
* FIOQSIZE queries the size of a regular file, directory, or link.
140
*
141
* We still permit it, because it always returns -ENOTTY for
142
* other file types.
143
*/
144
case FIOQSIZE:
145
/*
146
* FIFREEZE and FITHAW freeze and thaw the file system which the
147
* given file belongs to. Requires CAP_SYS_ADMIN.
148
*
149
* These commands operate on the file system's superblock rather
150
* than on the file itself. The same operations can also be
151
* done through any other file or directory on the same file
152
* system, so it is safe to permit these.
153
*/
154
case FIFREEZE:
155
case FITHAW:
156
/*
157
* FS_IOC_FIEMAP queries information about the allocation of
158
* blocks within a file.
159
*
160
* This IOCTL command only makes sense for regular files and is
161
* not implemented by devices. It is harmless to permit.
162
*/
163
case FS_IOC_FIEMAP:
164
/*
165
* FIGETBSZ queries the file system's block size for a file or
166
* directory.
167
*
168
* This command operates on the file system's superblock rather
169
* than on the file itself. The same operation can also be done
170
* through any other file or directory on the same file system,
171
* so it is safe to permit it.
172
*/
173
case FIGETBSZ:
174
/*
175
* FICLONE, FICLONERANGE and FIDEDUPERANGE make files share
176
* their underlying storage ("reflink") between source and
177
* destination FDs, on file systems which support that.
178
*
179
* These IOCTL commands only apply to regular files
180
* and are harmless to permit for device files.
181
*/
182
case FICLONE:
183
case FICLONERANGE:
184
case FIDEDUPERANGE:
185
/*
186
* FS_IOC_GETFSUUID and FS_IOC_GETFSSYSFSPATH both operate on
187
* the file system superblock, not on the specific file, so
188
* these operations are available through any other file on the
189
* same file system as well.
190
*/
191
case FS_IOC_GETFSUUID:
192
case FS_IOC_GETFSSYSFSPATH:
193
return true;
194
195
/*
196
* FIONREAD, FS_IOC_GETFLAGS, FS_IOC_SETFLAGS, FS_IOC_FSGETXATTR and
197
* FS_IOC_FSSETXATTR are forwarded to device implementations.
198
*/
199
200
/*
201
* file_ioctl() commands (FIBMAP, FS_IOC_RESVSP, FS_IOC_RESVSP64,
202
* FS_IOC_UNRESVSP, FS_IOC_UNRESVSP64 and FS_IOC_ZERO_RANGE) are
203
* forwarded to device implementations, so not permitted.
204
*/
205
206
/* Other commands are guarded by the access right. */
207
default:
208
return false;
209
}
210
}
211
212
/*
213
* is_masked_device_ioctl_compat - same as the helper above, but checking the
214
* "compat" IOCTL commands.
215
*
216
* The IOCTL commands with special handling in compat-mode should behave the
217
* same as their non-compat counterparts.
218
*/
219
static __attribute_const__ bool
220
is_masked_device_ioctl_compat(const unsigned int cmd)
221
{
222
switch (cmd) {
223
/* FICLONE is permitted, same as in the non-compat variant. */
224
case FICLONE:
225
return true;
226
227
#if defined(CONFIG_X86_64)
228
/*
229
* FS_IOC_RESVSP_32, FS_IOC_RESVSP64_32, FS_IOC_UNRESVSP_32,
230
* FS_IOC_UNRESVSP64_32, FS_IOC_ZERO_RANGE_32: not blanket-permitted,
231
* for consistency with their non-compat variants.
232
*/
233
case FS_IOC_RESVSP_32:
234
case FS_IOC_RESVSP64_32:
235
case FS_IOC_UNRESVSP_32:
236
case FS_IOC_UNRESVSP64_32:
237
case FS_IOC_ZERO_RANGE_32:
238
#endif
239
240
/*
241
* FS_IOC32_GETFLAGS, FS_IOC32_SETFLAGS are forwarded to their device
242
* implementations.
243
*/
244
case FS_IOC32_GETFLAGS:
245
case FS_IOC32_SETFLAGS:
246
return false;
247
default:
248
return is_masked_device_ioctl(cmd);
249
}
250
}
251
252
/* Ruleset management */
253
254
static struct landlock_object *get_inode_object(struct inode *const inode)
255
{
256
struct landlock_object *object, *new_object;
257
struct landlock_inode_security *inode_sec = landlock_inode(inode);
258
259
rcu_read_lock();
260
retry:
261
object = rcu_dereference(inode_sec->object);
262
if (object) {
263
if (likely(refcount_inc_not_zero(&object->usage))) {
264
rcu_read_unlock();
265
return object;
266
}
267
/*
268
* We are racing with release_inode(), the object is going
269
* away. Wait for release_inode(), then retry.
270
*/
271
spin_lock(&object->lock);
272
spin_unlock(&object->lock);
273
goto retry;
274
}
275
rcu_read_unlock();
276
277
/*
278
* If there is no object tied to @inode, then create a new one (without
279
* holding any locks).
280
*/
281
new_object = landlock_create_object(&landlock_fs_underops, inode);
282
if (IS_ERR(new_object))
283
return new_object;
284
285
/*
286
* Protects against concurrent calls to get_inode_object() or
287
* hook_sb_delete().
288
*/
289
spin_lock(&inode->i_lock);
290
if (unlikely(rcu_access_pointer(inode_sec->object))) {
291
/* Someone else just created the object, bail out and retry. */
292
spin_unlock(&inode->i_lock);
293
kfree(new_object);
294
295
rcu_read_lock();
296
goto retry;
297
}
298
299
/*
300
* @inode will be released by hook_sb_delete() on its superblock
301
* shutdown, or by release_inode() when no more ruleset references the
302
* related object.
303
*/
304
ihold(inode);
305
rcu_assign_pointer(inode_sec->object, new_object);
306
spin_unlock(&inode->i_lock);
307
return new_object;
308
}
309
310
/* All access rights that can be tied to files. */
311
/* clang-format off */
312
#define ACCESS_FILE ( \
313
LANDLOCK_ACCESS_FS_EXECUTE | \
314
LANDLOCK_ACCESS_FS_WRITE_FILE | \
315
LANDLOCK_ACCESS_FS_READ_FILE | \
316
LANDLOCK_ACCESS_FS_TRUNCATE | \
317
LANDLOCK_ACCESS_FS_IOCTL_DEV)
318
/* clang-format on */
319
320
/*
321
* @path: Should have been checked by get_path_from_fd().
322
*/
323
int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
324
const struct path *const path,
325
access_mask_t access_rights)
326
{
327
int err;
328
struct landlock_id id = {
329
.type = LANDLOCK_KEY_INODE,
330
};
331
332
/* Files only get access rights that make sense. */
333
if (!d_is_dir(path->dentry) &&
334
(access_rights | ACCESS_FILE) != ACCESS_FILE)
335
return -EINVAL;
336
if (WARN_ON_ONCE(ruleset->num_layers != 1))
337
return -EINVAL;
338
339
/* Transforms relative access rights to absolute ones. */
340
access_rights |= LANDLOCK_MASK_ACCESS_FS &
341
~landlock_get_fs_access_mask(ruleset, 0);
342
id.key.object = get_inode_object(d_backing_inode(path->dentry));
343
if (IS_ERR(id.key.object))
344
return PTR_ERR(id.key.object);
345
mutex_lock(&ruleset->lock);
346
err = landlock_insert_rule(ruleset, id, access_rights);
347
mutex_unlock(&ruleset->lock);
348
/*
349
* No need to check for an error because landlock_insert_rule()
350
* increments the refcount for the new object if needed.
351
*/
352
landlock_put_object(id.key.object);
353
return err;
354
}
355
356
/* Access-control management */
357
358
/*
359
* The lifetime of the returned rule is tied to @domain.
360
*
361
* Returns NULL if no rule is found or if @dentry is negative.
362
*/
363
static const struct landlock_rule *
364
find_rule(const struct landlock_ruleset *const domain,
365
const struct dentry *const dentry)
366
{
367
const struct landlock_rule *rule;
368
const struct inode *inode;
369
struct landlock_id id = {
370
.type = LANDLOCK_KEY_INODE,
371
};
372
373
/* Ignores nonexistent leafs. */
374
if (d_is_negative(dentry))
375
return NULL;
376
377
inode = d_backing_inode(dentry);
378
rcu_read_lock();
379
id.key.object = rcu_dereference(landlock_inode(inode)->object);
380
rule = landlock_find_rule(domain, id);
381
rcu_read_unlock();
382
return rule;
383
}
384
385
/*
386
* Allows access to pseudo filesystems that will never be mountable (e.g.
387
* sockfs, pipefs), but can still be reachable through
388
* /proc/<pid>/fd/<file-descriptor>
389
*/
390
static bool is_nouser_or_private(const struct dentry *dentry)
391
{
392
return (dentry->d_sb->s_flags & SB_NOUSER) ||
393
(d_is_positive(dentry) &&
394
unlikely(IS_PRIVATE(d_backing_inode(dentry))));
395
}
396
397
static const struct access_masks any_fs = {
398
.fs = ~0,
399
};
400
401
/*
402
* Check that a destination file hierarchy has more restrictions than a source
403
* file hierarchy. This is only used for link and rename actions.
404
*
405
* @layer_masks_child2: Optional child masks.
406
*/
407
static bool no_more_access(
408
const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
409
const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
410
const bool child1_is_directory,
411
const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
412
const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
413
const bool child2_is_directory)
414
{
415
unsigned long access_bit;
416
417
for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
418
access_bit++) {
419
/* Ignores accesses that only make sense for directories. */
420
const bool is_file_access =
421
!!(BIT_ULL(access_bit) & ACCESS_FILE);
422
423
if (child1_is_directory || is_file_access) {
424
/*
425
* Checks if the destination restrictions are a
426
* superset of the source ones (i.e. inherited access
427
* rights without child exceptions):
428
* restrictions(parent2) >= restrictions(child1)
429
*/
430
if ((((*layer_masks_parent1)[access_bit] &
431
(*layer_masks_child1)[access_bit]) |
432
(*layer_masks_parent2)[access_bit]) !=
433
(*layer_masks_parent2)[access_bit])
434
return false;
435
}
436
437
if (!layer_masks_child2)
438
continue;
439
if (child2_is_directory || is_file_access) {
440
/*
441
* Checks inverted restrictions for RENAME_EXCHANGE:
442
* restrictions(parent1) >= restrictions(child2)
443
*/
444
if ((((*layer_masks_parent2)[access_bit] &
445
(*layer_masks_child2)[access_bit]) |
446
(*layer_masks_parent1)[access_bit]) !=
447
(*layer_masks_parent1)[access_bit])
448
return false;
449
}
450
}
451
return true;
452
}
453
454
#define NMA_TRUE(...) KUNIT_EXPECT_TRUE(test, no_more_access(__VA_ARGS__))
455
#define NMA_FALSE(...) KUNIT_EXPECT_FALSE(test, no_more_access(__VA_ARGS__))
456
457
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
458
459
static void test_no_more_access(struct kunit *const test)
460
{
461
const layer_mask_t rx0[LANDLOCK_NUM_ACCESS_FS] = {
462
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
463
[BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT_ULL(0),
464
};
465
const layer_mask_t mx0[LANDLOCK_NUM_ACCESS_FS] = {
466
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
467
[BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = BIT_ULL(0),
468
};
469
const layer_mask_t x0[LANDLOCK_NUM_ACCESS_FS] = {
470
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
471
};
472
const layer_mask_t x1[LANDLOCK_NUM_ACCESS_FS] = {
473
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(1),
474
};
475
const layer_mask_t x01[LANDLOCK_NUM_ACCESS_FS] = {
476
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) |
477
BIT_ULL(1),
478
};
479
const layer_mask_t allows_all[LANDLOCK_NUM_ACCESS_FS] = {};
480
481
/* Checks without restriction. */
482
NMA_TRUE(&x0, &allows_all, false, &allows_all, NULL, false);
483
NMA_TRUE(&allows_all, &x0, false, &allows_all, NULL, false);
484
NMA_FALSE(&x0, &x0, false, &allows_all, NULL, false);
485
486
/*
487
* Checks that we can only refer a file if no more access could be
488
* inherited.
489
*/
490
NMA_TRUE(&x0, &x0, false, &rx0, NULL, false);
491
NMA_TRUE(&rx0, &rx0, false, &rx0, NULL, false);
492
NMA_FALSE(&rx0, &rx0, false, &x0, NULL, false);
493
NMA_FALSE(&rx0, &rx0, false, &x1, NULL, false);
494
495
/* Checks allowed referring with different nested domains. */
496
NMA_TRUE(&x0, &x1, false, &x0, NULL, false);
497
NMA_TRUE(&x1, &x0, false, &x0, NULL, false);
498
NMA_TRUE(&x0, &x01, false, &x0, NULL, false);
499
NMA_TRUE(&x0, &x01, false, &rx0, NULL, false);
500
NMA_TRUE(&x01, &x0, false, &x0, NULL, false);
501
NMA_TRUE(&x01, &x0, false, &rx0, NULL, false);
502
NMA_FALSE(&x01, &x01, false, &x0, NULL, false);
503
504
/* Checks that file access rights are also enforced for a directory. */
505
NMA_FALSE(&rx0, &rx0, true, &x0, NULL, false);
506
507
/* Checks that directory access rights don't impact file referring... */
508
NMA_TRUE(&mx0, &mx0, false, &x0, NULL, false);
509
/* ...but only directory referring. */
510
NMA_FALSE(&mx0, &mx0, true, &x0, NULL, false);
511
512
/* Checks directory exchange. */
513
NMA_TRUE(&mx0, &mx0, true, &mx0, &mx0, true);
514
NMA_TRUE(&mx0, &mx0, true, &mx0, &x0, true);
515
NMA_FALSE(&mx0, &mx0, true, &x0, &mx0, true);
516
NMA_FALSE(&mx0, &mx0, true, &x0, &x0, true);
517
NMA_FALSE(&mx0, &mx0, true, &x1, &x1, true);
518
519
/* Checks file exchange with directory access rights... */
520
NMA_TRUE(&mx0, &mx0, false, &mx0, &mx0, false);
521
NMA_TRUE(&mx0, &mx0, false, &mx0, &x0, false);
522
NMA_TRUE(&mx0, &mx0, false, &x0, &mx0, false);
523
NMA_TRUE(&mx0, &mx0, false, &x0, &x0, false);
524
/* ...and with file access rights. */
525
NMA_TRUE(&rx0, &rx0, false, &rx0, &rx0, false);
526
NMA_TRUE(&rx0, &rx0, false, &rx0, &x0, false);
527
NMA_FALSE(&rx0, &rx0, false, &x0, &rx0, false);
528
NMA_FALSE(&rx0, &rx0, false, &x0, &x0, false);
529
NMA_FALSE(&rx0, &rx0, false, &x1, &x1, false);
530
531
/*
532
* Allowing the following requests should not be a security risk
533
* because domain 0 denies execute access, and domain 1 is always
534
* nested with domain 0. However, adding an exception for this case
535
* would mean to check all nested domains to make sure none can get
536
* more privileges (e.g. processes only sandboxed by domain 0).
537
* Moreover, this behavior (i.e. composition of N domains) could then
538
* be inconsistent compared to domain 1's ruleset alone (e.g. it might
539
* be denied to link/rename with domain 1's ruleset, whereas it would
540
* be allowed if nested on top of domain 0). Another drawback would be
541
* to create a cover channel that could enable sandboxed processes to
542
* infer most of the filesystem restrictions from their domain. To
543
* make it simple, efficient, safe, and more consistent, this case is
544
* always denied.
545
*/
546
NMA_FALSE(&x1, &x1, false, &x0, NULL, false);
547
NMA_FALSE(&x1, &x1, false, &rx0, NULL, false);
548
NMA_FALSE(&x1, &x1, true, &x0, NULL, false);
549
NMA_FALSE(&x1, &x1, true, &rx0, NULL, false);
550
551
/* Checks the same case of exclusive domains with a file... */
552
NMA_TRUE(&x1, &x1, false, &x01, NULL, false);
553
NMA_FALSE(&x1, &x1, false, &x01, &x0, false);
554
NMA_FALSE(&x1, &x1, false, &x01, &x01, false);
555
NMA_FALSE(&x1, &x1, false, &x0, &x0, false);
556
/* ...and with a directory. */
557
NMA_FALSE(&x1, &x1, false, &x0, &x0, true);
558
NMA_FALSE(&x1, &x1, true, &x0, &x0, false);
559
NMA_FALSE(&x1, &x1, true, &x0, &x0, true);
560
}
561
562
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
563
564
#undef NMA_TRUE
565
#undef NMA_FALSE
566
567
static bool is_layer_masks_allowed(
568
layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
569
{
570
return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
571
}
572
573
/*
574
* Removes @layer_masks accesses that are not requested.
575
*
576
* Returns true if the request is allowed, false otherwise.
577
*/
578
static bool
579
scope_to_request(const access_mask_t access_request,
580
layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
581
{
582
const unsigned long access_req = access_request;
583
unsigned long access_bit;
584
585
if (WARN_ON_ONCE(!layer_masks))
586
return true;
587
588
for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
589
(*layer_masks)[access_bit] = 0;
590
591
return is_layer_masks_allowed(layer_masks);
592
}
593
594
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
595
596
static void test_scope_to_request_with_exec_none(struct kunit *const test)
597
{
598
/* Allows everything. */
599
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
600
601
/* Checks and scopes with execute. */
602
KUNIT_EXPECT_TRUE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
603
&layer_masks));
604
KUNIT_EXPECT_EQ(test, 0,
605
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
606
KUNIT_EXPECT_EQ(test, 0,
607
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
608
}
609
610
static void test_scope_to_request_with_exec_some(struct kunit *const test)
611
{
612
/* Denies execute and write. */
613
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
614
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
615
[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
616
};
617
618
/* Checks and scopes with execute. */
619
KUNIT_EXPECT_FALSE(test, scope_to_request(LANDLOCK_ACCESS_FS_EXECUTE,
620
&layer_masks));
621
KUNIT_EXPECT_EQ(test, BIT_ULL(0),
622
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
623
KUNIT_EXPECT_EQ(test, 0,
624
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
625
}
626
627
static void test_scope_to_request_without_access(struct kunit *const test)
628
{
629
/* Denies execute and write. */
630
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
631
[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0),
632
[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(1),
633
};
634
635
/* Checks and scopes without access request. */
636
KUNIT_EXPECT_TRUE(test, scope_to_request(0, &layer_masks));
637
KUNIT_EXPECT_EQ(test, 0,
638
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)]);
639
KUNIT_EXPECT_EQ(test, 0,
640
layer_masks[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)]);
641
}
642
643
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
644
645
/*
646
* Returns true if there is at least one access right different than
647
* LANDLOCK_ACCESS_FS_REFER.
648
*/
649
static bool
650
is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
651
const access_mask_t access_request)
652
{
653
unsigned long access_bit;
654
/* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */
655
const unsigned long access_check = access_request &
656
~LANDLOCK_ACCESS_FS_REFER;
657
658
if (!layer_masks)
659
return false;
660
661
for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
662
if ((*layer_masks)[access_bit])
663
return true;
664
}
665
return false;
666
}
667
668
#define IE_TRUE(...) KUNIT_EXPECT_TRUE(test, is_eacces(__VA_ARGS__))
669
#define IE_FALSE(...) KUNIT_EXPECT_FALSE(test, is_eacces(__VA_ARGS__))
670
671
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
672
673
static void test_is_eacces_with_none(struct kunit *const test)
674
{
675
const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
676
677
IE_FALSE(&layer_masks, 0);
678
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
679
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
680
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
681
}
682
683
static void test_is_eacces_with_refer(struct kunit *const test)
684
{
685
const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
686
[BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = BIT_ULL(0),
687
};
688
689
IE_FALSE(&layer_masks, 0);
690
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
691
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
692
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
693
}
694
695
static void test_is_eacces_with_write(struct kunit *const test)
696
{
697
const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
698
[BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = BIT_ULL(0),
699
};
700
701
IE_FALSE(&layer_masks, 0);
702
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_REFER);
703
IE_FALSE(&layer_masks, LANDLOCK_ACCESS_FS_EXECUTE);
704
705
IE_TRUE(&layer_masks, LANDLOCK_ACCESS_FS_WRITE_FILE);
706
}
707
708
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
709
710
#undef IE_TRUE
711
#undef IE_FALSE
712
713
/**
714
* is_access_to_paths_allowed - Check accesses for requests with a common path
715
*
716
* @domain: Domain to check against.
717
* @path: File hierarchy to walk through.
718
* @access_request_parent1: Accesses to check, once @layer_masks_parent1 is
719
* equal to @layer_masks_parent2 (if any). This is tied to the unique
720
* requested path for most actions, or the source in case of a refer action
721
* (i.e. rename or link), or the source and destination in case of
722
* RENAME_EXCHANGE.
723
* @layer_masks_parent1: Pointer to a matrix of layer masks per access
724
* masks, identifying the layers that forbid a specific access. Bits from
725
* this matrix can be unset according to the @path walk. An empty matrix
726
* means that @domain allows all possible Landlock accesses (i.e. not only
727
* those identified by @access_request_parent1). This matrix can
728
* initially refer to domain layer masks and, when the accesses for the
729
* destination and source are the same, to requested layer masks.
730
* @log_request_parent1: Audit request to fill if the related access is denied.
731
* @dentry_child1: Dentry to the initial child of the parent1 path. This
732
* pointer must be NULL for non-refer actions (i.e. not link nor rename).
733
* @access_request_parent2: Similar to @access_request_parent1 but for a
734
* request involving a source and a destination. This refers to the
735
* destination, except in case of RENAME_EXCHANGE where it also refers to
736
* the source. Must be set to 0 when using a simple path request.
737
* @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
738
* action. This must be NULL otherwise.
739
* @log_request_parent2: Audit request to fill if the related access is denied.
740
* @dentry_child2: Dentry to the initial child of the parent2 path. This
741
* pointer is only set for RENAME_EXCHANGE actions and must be NULL
742
* otherwise.
743
*
744
* This helper first checks that the destination has a superset of restrictions
745
* compared to the source (if any) for a common path. Because of
746
* RENAME_EXCHANGE actions, source and destinations may be swapped. It then
747
* checks that the collected accesses and the remaining ones are enough to
748
* allow the request.
749
*
750
* Returns:
751
* - true if the access request is granted;
752
* - false otherwise.
753
*/
754
static bool is_access_to_paths_allowed(
755
const struct landlock_ruleset *const domain,
756
const struct path *const path,
757
const access_mask_t access_request_parent1,
758
layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
759
struct landlock_request *const log_request_parent1,
760
struct dentry *const dentry_child1,
761
const access_mask_t access_request_parent2,
762
layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
763
struct landlock_request *const log_request_parent2,
764
struct dentry *const dentry_child2)
765
{
766
bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
767
child1_is_directory = true, child2_is_directory = true;
768
struct path walker_path;
769
access_mask_t access_masked_parent1, access_masked_parent2;
770
layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
771
_layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
772
layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
773
(*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
774
775
if (!access_request_parent1 && !access_request_parent2)
776
return true;
777
778
if (WARN_ON_ONCE(!path))
779
return true;
780
781
if (is_nouser_or_private(path->dentry))
782
return true;
783
784
if (WARN_ON_ONCE(!layer_masks_parent1))
785
return false;
786
787
allowed_parent1 = is_layer_masks_allowed(layer_masks_parent1);
788
789
if (unlikely(layer_masks_parent2)) {
790
if (WARN_ON_ONCE(!dentry_child1))
791
return false;
792
793
allowed_parent2 = is_layer_masks_allowed(layer_masks_parent2);
794
795
/*
796
* For a double request, first check for potential privilege
797
* escalation by looking at domain handled accesses (which are
798
* a superset of the meaningful requested accesses).
799
*/
800
access_masked_parent1 = access_masked_parent2 =
801
landlock_union_access_masks(domain).fs;
802
is_dom_check = true;
803
} else {
804
if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
805
return false;
806
/* For a simple request, only check for requested accesses. */
807
access_masked_parent1 = access_request_parent1;
808
access_masked_parent2 = access_request_parent2;
809
is_dom_check = false;
810
}
811
812
if (unlikely(dentry_child1)) {
813
landlock_unmask_layers(
814
find_rule(domain, dentry_child1),
815
landlock_init_layer_masks(
816
domain, LANDLOCK_MASK_ACCESS_FS,
817
&_layer_masks_child1, LANDLOCK_KEY_INODE),
818
&_layer_masks_child1, ARRAY_SIZE(_layer_masks_child1));
819
layer_masks_child1 = &_layer_masks_child1;
820
child1_is_directory = d_is_dir(dentry_child1);
821
}
822
if (unlikely(dentry_child2)) {
823
landlock_unmask_layers(
824
find_rule(domain, dentry_child2),
825
landlock_init_layer_masks(
826
domain, LANDLOCK_MASK_ACCESS_FS,
827
&_layer_masks_child2, LANDLOCK_KEY_INODE),
828
&_layer_masks_child2, ARRAY_SIZE(_layer_masks_child2));
829
layer_masks_child2 = &_layer_masks_child2;
830
child2_is_directory = d_is_dir(dentry_child2);
831
}
832
833
walker_path = *path;
834
path_get(&walker_path);
835
/*
836
* We need to walk through all the hierarchy to not miss any relevant
837
* restriction.
838
*/
839
while (true) {
840
struct dentry *parent_dentry;
841
const struct landlock_rule *rule;
842
843
/*
844
* If at least all accesses allowed on the destination are
845
* already allowed on the source, respectively if there is at
846
* least as much as restrictions on the destination than on the
847
* source, then we can safely refer files from the source to
848
* the destination without risking a privilege escalation.
849
* This also applies in the case of RENAME_EXCHANGE, which
850
* implies checks on both direction. This is crucial for
851
* standalone multilayered security policies. Furthermore,
852
* this helps avoid policy writers to shoot themselves in the
853
* foot.
854
*/
855
if (unlikely(is_dom_check &&
856
no_more_access(
857
layer_masks_parent1, layer_masks_child1,
858
child1_is_directory, layer_masks_parent2,
859
layer_masks_child2,
860
child2_is_directory))) {
861
/*
862
* Now, downgrades the remaining checks from domain
863
* handled accesses to requested accesses.
864
*/
865
is_dom_check = false;
866
access_masked_parent1 = access_request_parent1;
867
access_masked_parent2 = access_request_parent2;
868
869
allowed_parent1 =
870
allowed_parent1 ||
871
scope_to_request(access_masked_parent1,
872
layer_masks_parent1);
873
allowed_parent2 =
874
allowed_parent2 ||
875
scope_to_request(access_masked_parent2,
876
layer_masks_parent2);
877
878
/* Stops when all accesses are granted. */
879
if (allowed_parent1 && allowed_parent2)
880
break;
881
}
882
883
rule = find_rule(domain, walker_path.dentry);
884
allowed_parent1 = allowed_parent1 ||
885
landlock_unmask_layers(
886
rule, access_masked_parent1,
887
layer_masks_parent1,
888
ARRAY_SIZE(*layer_masks_parent1));
889
allowed_parent2 = allowed_parent2 ||
890
landlock_unmask_layers(
891
rule, access_masked_parent2,
892
layer_masks_parent2,
893
ARRAY_SIZE(*layer_masks_parent2));
894
895
/* Stops when a rule from each layer grants access. */
896
if (allowed_parent1 && allowed_parent2)
897
break;
898
899
jump_up:
900
if (walker_path.dentry == walker_path.mnt->mnt_root) {
901
if (follow_up(&walker_path)) {
902
/* Ignores hidden mount points. */
903
goto jump_up;
904
} else {
905
/*
906
* Stops at the real root. Denies access
907
* because not all layers have granted access.
908
*/
909
break;
910
}
911
}
912
if (unlikely(IS_ROOT(walker_path.dentry))) {
913
/*
914
* Stops at disconnected root directories. Only allows
915
* access to internal filesystems (e.g. nsfs, which is
916
* reachable through /proc/<pid>/ns/<namespace>).
917
*/
918
if (walker_path.mnt->mnt_flags & MNT_INTERNAL) {
919
allowed_parent1 = true;
920
allowed_parent2 = true;
921
}
922
break;
923
}
924
parent_dentry = dget_parent(walker_path.dentry);
925
dput(walker_path.dentry);
926
walker_path.dentry = parent_dentry;
927
}
928
path_put(&walker_path);
929
930
if (!allowed_parent1) {
931
log_request_parent1->type = LANDLOCK_REQUEST_FS_ACCESS;
932
log_request_parent1->audit.type = LSM_AUDIT_DATA_PATH;
933
log_request_parent1->audit.u.path = *path;
934
log_request_parent1->access = access_masked_parent1;
935
log_request_parent1->layer_masks = layer_masks_parent1;
936
log_request_parent1->layer_masks_size =
937
ARRAY_SIZE(*layer_masks_parent1);
938
}
939
940
if (!allowed_parent2) {
941
log_request_parent2->type = LANDLOCK_REQUEST_FS_ACCESS;
942
log_request_parent2->audit.type = LSM_AUDIT_DATA_PATH;
943
log_request_parent2->audit.u.path = *path;
944
log_request_parent2->access = access_masked_parent2;
945
log_request_parent2->layer_masks = layer_masks_parent2;
946
log_request_parent2->layer_masks_size =
947
ARRAY_SIZE(*layer_masks_parent2);
948
}
949
return allowed_parent1 && allowed_parent2;
950
}
951
952
static int current_check_access_path(const struct path *const path,
953
access_mask_t access_request)
954
{
955
const struct access_masks masks = {
956
.fs = access_request,
957
};
958
const struct landlock_cred_security *const subject =
959
landlock_get_applicable_subject(current_cred(), masks, NULL);
960
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
961
struct landlock_request request = {};
962
963
if (!subject)
964
return 0;
965
966
access_request = landlock_init_layer_masks(subject->domain,
967
access_request, &layer_masks,
968
LANDLOCK_KEY_INODE);
969
if (is_access_to_paths_allowed(subject->domain, path, access_request,
970
&layer_masks, &request, NULL, 0, NULL,
971
NULL, NULL))
972
return 0;
973
974
landlock_log_denial(subject, &request);
975
return -EACCES;
976
}
977
978
static __attribute_const__ access_mask_t get_mode_access(const umode_t mode)
979
{
980
switch (mode & S_IFMT) {
981
case S_IFLNK:
982
return LANDLOCK_ACCESS_FS_MAKE_SYM;
983
case S_IFDIR:
984
return LANDLOCK_ACCESS_FS_MAKE_DIR;
985
case S_IFCHR:
986
return LANDLOCK_ACCESS_FS_MAKE_CHAR;
987
case S_IFBLK:
988
return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
989
case S_IFIFO:
990
return LANDLOCK_ACCESS_FS_MAKE_FIFO;
991
case S_IFSOCK:
992
return LANDLOCK_ACCESS_FS_MAKE_SOCK;
993
case S_IFREG:
994
case 0:
995
/* A zero mode translates to S_IFREG. */
996
default:
997
/* Treats weird files as regular files. */
998
return LANDLOCK_ACCESS_FS_MAKE_REG;
999
}
1000
}
1001
1002
static access_mask_t maybe_remove(const struct dentry *const dentry)
1003
{
1004
if (d_is_negative(dentry))
1005
return 0;
1006
return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
1007
LANDLOCK_ACCESS_FS_REMOVE_FILE;
1008
}
1009
1010
/**
1011
* collect_domain_accesses - Walk through a file path and collect accesses
1012
*
1013
* @domain: Domain to check against.
1014
* @mnt_root: Last directory to check.
1015
* @dir: Directory to start the walk from.
1016
* @layer_masks_dom: Where to store the collected accesses.
1017
*
1018
* This helper is useful to begin a path walk from the @dir directory to a
1019
* @mnt_root directory used as a mount point. This mount point is the common
1020
* ancestor between the source and the destination of a renamed and linked
1021
* file. While walking from @dir to @mnt_root, we record all the domain's
1022
* allowed accesses in @layer_masks_dom.
1023
*
1024
* This is similar to is_access_to_paths_allowed() but much simpler because it
1025
* only handles walking on the same mount point and only checks one set of
1026
* accesses.
1027
*
1028
* Returns:
1029
* - true if all the domain access rights are allowed for @dir;
1030
* - false if the walk reached @mnt_root.
1031
*/
1032
static bool collect_domain_accesses(
1033
const struct landlock_ruleset *const domain,
1034
const struct dentry *const mnt_root, struct dentry *dir,
1035
layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
1036
{
1037
unsigned long access_dom;
1038
bool ret = false;
1039
1040
if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
1041
return true;
1042
if (is_nouser_or_private(dir))
1043
return true;
1044
1045
access_dom = landlock_init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
1046
layer_masks_dom,
1047
LANDLOCK_KEY_INODE);
1048
1049
dget(dir);
1050
while (true) {
1051
struct dentry *parent_dentry;
1052
1053
/* Gets all layers allowing all domain accesses. */
1054
if (landlock_unmask_layers(find_rule(domain, dir), access_dom,
1055
layer_masks_dom,
1056
ARRAY_SIZE(*layer_masks_dom))) {
1057
/*
1058
* Stops when all handled accesses are allowed by at
1059
* least one rule in each layer.
1060
*/
1061
ret = true;
1062
break;
1063
}
1064
1065
/* We should not reach a root other than @mnt_root. */
1066
if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
1067
break;
1068
1069
parent_dentry = dget_parent(dir);
1070
dput(dir);
1071
dir = parent_dentry;
1072
}
1073
dput(dir);
1074
return ret;
1075
}
1076
1077
/**
1078
* current_check_refer_path - Check if a rename or link action is allowed
1079
*
1080
* @old_dentry: File or directory requested to be moved or linked.
1081
* @new_dir: Destination parent directory.
1082
* @new_dentry: Destination file or directory.
1083
* @removable: Sets to true if it is a rename operation.
1084
* @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE.
1085
*
1086
* Because of its unprivileged constraints, Landlock relies on file hierarchies
1087
* (and not only inodes) to tie access rights to files. Being able to link or
1088
* rename a file hierarchy brings some challenges. Indeed, moving or linking a
1089
* file (i.e. creating a new reference to an inode) can have an impact on the
1090
* actions allowed for a set of files if it would change its parent directory
1091
* (i.e. reparenting).
1092
*
1093
* To avoid trivial access right bypasses, Landlock first checks if the file or
1094
* directory requested to be moved would gain new access rights inherited from
1095
* its new hierarchy. Before returning any error, Landlock then checks that
1096
* the parent source hierarchy and the destination hierarchy would allow the
1097
* link or rename action. If it is not the case, an error with EACCES is
1098
* returned to inform user space that there is no way to remove or create the
1099
* requested source file type. If it should be allowed but the new inherited
1100
* access rights would be greater than the source access rights, then the
1101
* kernel returns an error with EXDEV. Prioritizing EACCES over EXDEV enables
1102
* user space to abort the whole operation if there is no way to do it, or to
1103
* manually copy the source to the destination if this remains allowed, e.g.
1104
* because file creation is allowed on the destination directory but not direct
1105
* linking.
1106
*
1107
* To achieve this goal, the kernel needs to compare two file hierarchies: the
1108
* one identifying the source file or directory (including itself), and the
1109
* destination one. This can be seen as a multilayer partial ordering problem.
1110
* The kernel walks through these paths and collects in a matrix the access
1111
* rights that are denied per layer. These matrices are then compared to see
1112
* if the destination one has more (or the same) restrictions as the source
1113
* one. If this is the case, the requested action will not return EXDEV, which
1114
* doesn't mean the action is allowed. The parent hierarchy of the source
1115
* (i.e. parent directory), and the destination hierarchy must also be checked
1116
* to verify that they explicitly allow such action (i.e. referencing,
1117
* creation and potentially removal rights). The kernel implementation is then
1118
* required to rely on potentially four matrices of access rights: one for the
1119
* source file or directory (i.e. the child), a potentially other one for the
1120
* other source/destination (in case of RENAME_EXCHANGE), one for the source
1121
* parent hierarchy and a last one for the destination hierarchy. These
1122
* ephemeral matrices take some space on the stack, which limits the number of
1123
* layers to a deemed reasonable number: 16.
1124
*
1125
* Returns:
1126
* - 0 if access is allowed;
1127
* - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
1128
* - -EACCES if file removal or creation is denied.
1129
*/
1130
static int current_check_refer_path(struct dentry *const old_dentry,
1131
const struct path *const new_dir,
1132
struct dentry *const new_dentry,
1133
const bool removable, const bool exchange)
1134
{
1135
const struct landlock_cred_security *const subject =
1136
landlock_get_applicable_subject(current_cred(), any_fs, NULL);
1137
bool allow_parent1, allow_parent2;
1138
access_mask_t access_request_parent1, access_request_parent2;
1139
struct path mnt_dir;
1140
struct dentry *old_parent;
1141
layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
1142
layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
1143
struct landlock_request request1 = {}, request2 = {};
1144
1145
if (!subject)
1146
return 0;
1147
1148
if (unlikely(d_is_negative(old_dentry)))
1149
return -ENOENT;
1150
if (exchange) {
1151
if (unlikely(d_is_negative(new_dentry)))
1152
return -ENOENT;
1153
access_request_parent1 =
1154
get_mode_access(d_backing_inode(new_dentry)->i_mode);
1155
} else {
1156
access_request_parent1 = 0;
1157
}
1158
access_request_parent2 =
1159
get_mode_access(d_backing_inode(old_dentry)->i_mode);
1160
if (removable) {
1161
access_request_parent1 |= maybe_remove(old_dentry);
1162
access_request_parent2 |= maybe_remove(new_dentry);
1163
}
1164
1165
/* The mount points are the same for old and new paths, cf. EXDEV. */
1166
if (old_dentry->d_parent == new_dir->dentry) {
1167
/*
1168
* The LANDLOCK_ACCESS_FS_REFER access right is not required
1169
* for same-directory referer (i.e. no reparenting).
1170
*/
1171
access_request_parent1 = landlock_init_layer_masks(
1172
subject->domain,
1173
access_request_parent1 | access_request_parent2,
1174
&layer_masks_parent1, LANDLOCK_KEY_INODE);
1175
if (is_access_to_paths_allowed(subject->domain, new_dir,
1176
access_request_parent1,
1177
&layer_masks_parent1, &request1,
1178
NULL, 0, NULL, NULL, NULL))
1179
return 0;
1180
1181
landlock_log_denial(subject, &request1);
1182
return -EACCES;
1183
}
1184
1185
access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
1186
access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
1187
1188
/* Saves the common mount point. */
1189
mnt_dir.mnt = new_dir->mnt;
1190
mnt_dir.dentry = new_dir->mnt->mnt_root;
1191
1192
/*
1193
* old_dentry may be the root of the common mount point and
1194
* !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
1195
* OPEN_TREE_CLONE). We do not need to call dget(old_parent) because
1196
* we keep a reference to old_dentry.
1197
*/
1198
old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
1199
old_dentry->d_parent;
1200
1201
/* new_dir->dentry is equal to new_dentry->d_parent */
1202
allow_parent1 = collect_domain_accesses(subject->domain, mnt_dir.dentry,
1203
old_parent,
1204
&layer_masks_parent1);
1205
allow_parent2 = collect_domain_accesses(subject->domain, mnt_dir.dentry,
1206
new_dir->dentry,
1207
&layer_masks_parent2);
1208
1209
if (allow_parent1 && allow_parent2)
1210
return 0;
1211
1212
/*
1213
* To be able to compare source and destination domain access rights,
1214
* take into account the @old_dentry access rights aggregated with its
1215
* parent access rights. This will be useful to compare with the
1216
* destination parent access rights.
1217
*/
1218
if (is_access_to_paths_allowed(
1219
subject->domain, &mnt_dir, access_request_parent1,
1220
&layer_masks_parent1, &request1, old_dentry,
1221
access_request_parent2, &layer_masks_parent2, &request2,
1222
exchange ? new_dentry : NULL))
1223
return 0;
1224
1225
if (request1.access) {
1226
request1.audit.u.path.dentry = old_parent;
1227
landlock_log_denial(subject, &request1);
1228
}
1229
if (request2.access) {
1230
request2.audit.u.path.dentry = new_dir->dentry;
1231
landlock_log_denial(subject, &request2);
1232
}
1233
1234
/*
1235
* This prioritizes EACCES over EXDEV for all actions, including
1236
* renames with RENAME_EXCHANGE.
1237
*/
1238
if (likely(is_eacces(&layer_masks_parent1, access_request_parent1) ||
1239
is_eacces(&layer_masks_parent2, access_request_parent2)))
1240
return -EACCES;
1241
1242
/*
1243
* Gracefully forbids reparenting if the destination directory
1244
* hierarchy is not a superset of restrictions of the source directory
1245
* hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the
1246
* source or the destination.
1247
*/
1248
return -EXDEV;
1249
}
1250
1251
/* Inode hooks */
1252
1253
static void hook_inode_free_security_rcu(void *inode_security)
1254
{
1255
struct landlock_inode_security *inode_sec;
1256
1257
/*
1258
* All inodes must already have been untied from their object by
1259
* release_inode() or hook_sb_delete().
1260
*/
1261
inode_sec = inode_security + landlock_blob_sizes.lbs_inode;
1262
WARN_ON_ONCE(inode_sec->object);
1263
}
1264
1265
/* Super-block hooks */
1266
1267
/*
1268
* Release the inodes used in a security policy.
1269
*
1270
* Cf. fsnotify_unmount_inodes() and evict_inodes()
1271
*/
1272
static void hook_sb_delete(struct super_block *const sb)
1273
{
1274
struct inode *inode, *prev_inode = NULL;
1275
1276
if (!landlock_initialized)
1277
return;
1278
1279
spin_lock(&sb->s_inode_list_lock);
1280
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1281
struct landlock_object *object;
1282
1283
/* Only handles referenced inodes. */
1284
if (!icount_read(inode))
1285
continue;
1286
1287
/*
1288
* Protects against concurrent modification of inode (e.g.
1289
* from get_inode_object()).
1290
*/
1291
spin_lock(&inode->i_lock);
1292
/*
1293
* Checks I_FREEING and I_WILL_FREE to protect against a race
1294
* condition when release_inode() just called iput(), which
1295
* could lead to a NULL dereference of inode->security or a
1296
* second call to iput() for the same Landlock object. Also
1297
* checks I_NEW because such inode cannot be tied to an object.
1298
*/
1299
if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
1300
spin_unlock(&inode->i_lock);
1301
continue;
1302
}
1303
1304
rcu_read_lock();
1305
object = rcu_dereference(landlock_inode(inode)->object);
1306
if (!object) {
1307
rcu_read_unlock();
1308
spin_unlock(&inode->i_lock);
1309
continue;
1310
}
1311
/* Keeps a reference to this inode until the next loop walk. */
1312
__iget(inode);
1313
spin_unlock(&inode->i_lock);
1314
1315
/*
1316
* If there is no concurrent release_inode() ongoing, then we
1317
* are in charge of calling iput() on this inode, otherwise we
1318
* will just wait for it to finish.
1319
*/
1320
spin_lock(&object->lock);
1321
if (object->underobj == inode) {
1322
object->underobj = NULL;
1323
spin_unlock(&object->lock);
1324
rcu_read_unlock();
1325
1326
/*
1327
* Because object->underobj was not NULL,
1328
* release_inode() and get_inode_object() guarantee
1329
* that it is safe to reset
1330
* landlock_inode(inode)->object while it is not NULL.
1331
* It is therefore not necessary to lock inode->i_lock.
1332
*/
1333
rcu_assign_pointer(landlock_inode(inode)->object, NULL);
1334
/*
1335
* At this point, we own the ihold() reference that was
1336
* originally set up by get_inode_object() and the
1337
* __iget() reference that we just set in this loop
1338
* walk. Therefore the following call to iput() will
1339
* not sleep nor drop the inode because there is now at
1340
* least two references to it.
1341
*/
1342
iput(inode);
1343
} else {
1344
spin_unlock(&object->lock);
1345
rcu_read_unlock();
1346
}
1347
1348
if (prev_inode) {
1349
/*
1350
* At this point, we still own the __iget() reference
1351
* that we just set in this loop walk. Therefore we
1352
* can drop the list lock and know that the inode won't
1353
* disappear from under us until the next loop walk.
1354
*/
1355
spin_unlock(&sb->s_inode_list_lock);
1356
/*
1357
* We can now actually put the inode reference from the
1358
* previous loop walk, which is not needed anymore.
1359
*/
1360
iput(prev_inode);
1361
cond_resched();
1362
spin_lock(&sb->s_inode_list_lock);
1363
}
1364
prev_inode = inode;
1365
}
1366
spin_unlock(&sb->s_inode_list_lock);
1367
1368
/* Puts the inode reference from the last loop walk, if any. */
1369
if (prev_inode)
1370
iput(prev_inode);
1371
/* Waits for pending iput() in release_inode(). */
1372
wait_var_event(&landlock_superblock(sb)->inode_refs,
1373
!atomic_long_read(&landlock_superblock(sb)->inode_refs));
1374
}
1375
1376
static void
1377
log_fs_change_topology_path(const struct landlock_cred_security *const subject,
1378
size_t handle_layer, const struct path *const path)
1379
{
1380
landlock_log_denial(subject, &(struct landlock_request) {
1381
.type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY,
1382
.audit = {
1383
.type = LSM_AUDIT_DATA_PATH,
1384
.u.path = *path,
1385
},
1386
.layer_plus_one = handle_layer + 1,
1387
});
1388
}
1389
1390
static void log_fs_change_topology_dentry(
1391
const struct landlock_cred_security *const subject, size_t handle_layer,
1392
struct dentry *const dentry)
1393
{
1394
landlock_log_denial(subject, &(struct landlock_request) {
1395
.type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY,
1396
.audit = {
1397
.type = LSM_AUDIT_DATA_DENTRY,
1398
.u.dentry = dentry,
1399
},
1400
.layer_plus_one = handle_layer + 1,
1401
});
1402
}
1403
1404
/*
1405
* Because a Landlock security policy is defined according to the filesystem
1406
* topology (i.e. the mount namespace), changing it may grant access to files
1407
* not previously allowed.
1408
*
1409
* To make it simple, deny any filesystem topology modification by landlocked
1410
* processes. Non-landlocked processes may still change the namespace of a
1411
* landlocked process, but this kind of threat must be handled by a system-wide
1412
* access-control security policy.
1413
*
1414
* This could be lifted in the future if Landlock can safely handle mount
1415
* namespace updates requested by a landlocked process. Indeed, we could
1416
* update the current domain (which is currently read-only) by taking into
1417
* account the accesses of the source and the destination of a new mount point.
1418
* However, it would also require to make all the child domains dynamically
1419
* inherit these new constraints. Anyway, for backward compatibility reasons,
1420
* a dedicated user space option would be required (e.g. as a ruleset flag).
1421
*/
1422
static int hook_sb_mount(const char *const dev_name,
1423
const struct path *const path, const char *const type,
1424
const unsigned long flags, void *const data)
1425
{
1426
size_t handle_layer;
1427
const struct landlock_cred_security *const subject =
1428
landlock_get_applicable_subject(current_cred(), any_fs,
1429
&handle_layer);
1430
1431
if (!subject)
1432
return 0;
1433
1434
log_fs_change_topology_path(subject, handle_layer, path);
1435
return -EPERM;
1436
}
1437
1438
static int hook_move_mount(const struct path *const from_path,
1439
const struct path *const to_path)
1440
{
1441
size_t handle_layer;
1442
const struct landlock_cred_security *const subject =
1443
landlock_get_applicable_subject(current_cred(), any_fs,
1444
&handle_layer);
1445
1446
if (!subject)
1447
return 0;
1448
1449
log_fs_change_topology_path(subject, handle_layer, to_path);
1450
return -EPERM;
1451
}
1452
1453
/*
1454
* Removing a mount point may reveal a previously hidden file hierarchy, which
1455
* may then grant access to files, which may have previously been forbidden.
1456
*/
1457
static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
1458
{
1459
size_t handle_layer;
1460
const struct landlock_cred_security *const subject =
1461
landlock_get_applicable_subject(current_cred(), any_fs,
1462
&handle_layer);
1463
1464
if (!subject)
1465
return 0;
1466
1467
log_fs_change_topology_dentry(subject, handle_layer, mnt->mnt_root);
1468
return -EPERM;
1469
}
1470
1471
static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
1472
{
1473
size_t handle_layer;
1474
const struct landlock_cred_security *const subject =
1475
landlock_get_applicable_subject(current_cred(), any_fs,
1476
&handle_layer);
1477
1478
if (!subject)
1479
return 0;
1480
1481
log_fs_change_topology_dentry(subject, handle_layer, sb->s_root);
1482
return -EPERM;
1483
}
1484
1485
/*
1486
* pivot_root(2), like mount(2), changes the current mount namespace. It must
1487
* then be forbidden for a landlocked process.
1488
*
1489
* However, chroot(2) may be allowed because it only changes the relative root
1490
* directory of the current process. Moreover, it can be used to restrict the
1491
* view of the filesystem.
1492
*/
1493
static int hook_sb_pivotroot(const struct path *const old_path,
1494
const struct path *const new_path)
1495
{
1496
size_t handle_layer;
1497
const struct landlock_cred_security *const subject =
1498
landlock_get_applicable_subject(current_cred(), any_fs,
1499
&handle_layer);
1500
1501
if (!subject)
1502
return 0;
1503
1504
log_fs_change_topology_path(subject, handle_layer, new_path);
1505
return -EPERM;
1506
}
1507
1508
/* Path hooks */
1509
1510
static int hook_path_link(struct dentry *const old_dentry,
1511
const struct path *const new_dir,
1512
struct dentry *const new_dentry)
1513
{
1514
return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
1515
false);
1516
}
1517
1518
static int hook_path_rename(const struct path *const old_dir,
1519
struct dentry *const old_dentry,
1520
const struct path *const new_dir,
1521
struct dentry *const new_dentry,
1522
const unsigned int flags)
1523
{
1524
/* old_dir refers to old_dentry->d_parent and new_dir->mnt */
1525
return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
1526
!!(flags & RENAME_EXCHANGE));
1527
}
1528
1529
static int hook_path_mkdir(const struct path *const dir,
1530
struct dentry *const dentry, const umode_t mode)
1531
{
1532
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
1533
}
1534
1535
static int hook_path_mknod(const struct path *const dir,
1536
struct dentry *const dentry, const umode_t mode,
1537
const unsigned int dev)
1538
{
1539
return current_check_access_path(dir, get_mode_access(mode));
1540
}
1541
1542
static int hook_path_symlink(const struct path *const dir,
1543
struct dentry *const dentry,
1544
const char *const old_name)
1545
{
1546
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
1547
}
1548
1549
static int hook_path_unlink(const struct path *const dir,
1550
struct dentry *const dentry)
1551
{
1552
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
1553
}
1554
1555
static int hook_path_rmdir(const struct path *const dir,
1556
struct dentry *const dentry)
1557
{
1558
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
1559
}
1560
1561
static int hook_path_truncate(const struct path *const path)
1562
{
1563
return current_check_access_path(path, LANDLOCK_ACCESS_FS_TRUNCATE);
1564
}
1565
1566
/* File hooks */
1567
1568
/**
1569
* get_required_file_open_access - Get access needed to open a file
1570
*
1571
* @file: File being opened.
1572
*
1573
* Returns the access rights that are required for opening the given file,
1574
* depending on the file type and open mode.
1575
*/
1576
static access_mask_t
1577
get_required_file_open_access(const struct file *const file)
1578
{
1579
access_mask_t access = 0;
1580
1581
if (file->f_mode & FMODE_READ) {
1582
/* A directory can only be opened in read mode. */
1583
if (S_ISDIR(file_inode(file)->i_mode))
1584
return LANDLOCK_ACCESS_FS_READ_DIR;
1585
access = LANDLOCK_ACCESS_FS_READ_FILE;
1586
}
1587
if (file->f_mode & FMODE_WRITE)
1588
access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
1589
/* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
1590
if (file->f_flags & __FMODE_EXEC)
1591
access |= LANDLOCK_ACCESS_FS_EXECUTE;
1592
return access;
1593
}
1594
1595
static int hook_file_alloc_security(struct file *const file)
1596
{
1597
/*
1598
* Grants all access rights, even if most of them are not checked later
1599
* on. It is more consistent.
1600
*
1601
* Notably, file descriptors for regular files can also be acquired
1602
* without going through the file_open hook, for example when using
1603
* memfd_create(2).
1604
*/
1605
landlock_file(file)->allowed_access = LANDLOCK_MASK_ACCESS_FS;
1606
return 0;
1607
}
1608
1609
static bool is_device(const struct file *const file)
1610
{
1611
const struct inode *inode = file_inode(file);
1612
1613
return S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode);
1614
}
1615
1616
static int hook_file_open(struct file *const file)
1617
{
1618
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
1619
access_mask_t open_access_request, full_access_request, allowed_access,
1620
optional_access;
1621
const struct landlock_cred_security *const subject =
1622
landlock_get_applicable_subject(file->f_cred, any_fs, NULL);
1623
struct landlock_request request = {};
1624
1625
if (!subject)
1626
return 0;
1627
1628
/*
1629
* Because a file may be opened with O_PATH, get_required_file_open_access()
1630
* may return 0. This case will be handled with a future Landlock
1631
* evolution.
1632
*/
1633
open_access_request = get_required_file_open_access(file);
1634
1635
/*
1636
* We look up more access than what we immediately need for open(), so
1637
* that we can later authorize operations on opened files.
1638
*/
1639
optional_access = LANDLOCK_ACCESS_FS_TRUNCATE;
1640
if (is_device(file))
1641
optional_access |= LANDLOCK_ACCESS_FS_IOCTL_DEV;
1642
1643
full_access_request = open_access_request | optional_access;
1644
1645
if (is_access_to_paths_allowed(
1646
subject->domain, &file->f_path,
1647
landlock_init_layer_masks(subject->domain,
1648
full_access_request, &layer_masks,
1649
LANDLOCK_KEY_INODE),
1650
&layer_masks, &request, NULL, 0, NULL, NULL, NULL)) {
1651
allowed_access = full_access_request;
1652
} else {
1653
unsigned long access_bit;
1654
const unsigned long access_req = full_access_request;
1655
1656
/*
1657
* Calculate the actual allowed access rights from layer_masks.
1658
* Add each access right to allowed_access which has not been
1659
* vetoed by any layer.
1660
*/
1661
allowed_access = 0;
1662
for_each_set_bit(access_bit, &access_req,
1663
ARRAY_SIZE(layer_masks)) {
1664
if (!layer_masks[access_bit])
1665
allowed_access |= BIT_ULL(access_bit);
1666
}
1667
}
1668
1669
/*
1670
* For operations on already opened files (i.e. ftruncate()), it is the
1671
* access rights at the time of open() which decide whether the
1672
* operation is permitted. Therefore, we record the relevant subset of
1673
* file access rights in the opened struct file.
1674
*/
1675
landlock_file(file)->allowed_access = allowed_access;
1676
#ifdef CONFIG_AUDIT
1677
landlock_file(file)->deny_masks = landlock_get_deny_masks(
1678
_LANDLOCK_ACCESS_FS_OPTIONAL, optional_access, &layer_masks,
1679
ARRAY_SIZE(layer_masks));
1680
#endif /* CONFIG_AUDIT */
1681
1682
if ((open_access_request & allowed_access) == open_access_request)
1683
return 0;
1684
1685
/* Sets access to reflect the actual request. */
1686
request.access = open_access_request;
1687
landlock_log_denial(subject, &request);
1688
return -EACCES;
1689
}
1690
1691
static int hook_file_truncate(struct file *const file)
1692
{
1693
/*
1694
* Allows truncation if the truncate right was available at the time of
1695
* opening the file, to get a consistent access check as for read, write
1696
* and execute operations.
1697
*
1698
* Note: For checks done based on the file's Landlock allowed access, we
1699
* enforce them independently of whether the current thread is in a
1700
* Landlock domain, so that open files passed between independent
1701
* processes retain their behaviour.
1702
*/
1703
if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE)
1704
return 0;
1705
1706
landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) {
1707
.type = LANDLOCK_REQUEST_FS_ACCESS,
1708
.audit = {
1709
.type = LSM_AUDIT_DATA_FILE,
1710
.u.file = file,
1711
},
1712
.all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL,
1713
.access = LANDLOCK_ACCESS_FS_TRUNCATE,
1714
#ifdef CONFIG_AUDIT
1715
.deny_masks = landlock_file(file)->deny_masks,
1716
#endif /* CONFIG_AUDIT */
1717
});
1718
return -EACCES;
1719
}
1720
1721
static int hook_file_ioctl_common(const struct file *const file,
1722
const unsigned int cmd, const bool is_compat)
1723
{
1724
access_mask_t allowed_access = landlock_file(file)->allowed_access;
1725
1726
/*
1727
* It is the access rights at the time of opening the file which
1728
* determine whether IOCTL can be used on the opened file later.
1729
*
1730
* The access right is attached to the opened file in hook_file_open().
1731
*/
1732
if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
1733
return 0;
1734
1735
if (!is_device(file))
1736
return 0;
1737
1738
if (unlikely(is_compat) ? is_masked_device_ioctl_compat(cmd) :
1739
is_masked_device_ioctl(cmd))
1740
return 0;
1741
1742
landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) {
1743
.type = LANDLOCK_REQUEST_FS_ACCESS,
1744
.audit = {
1745
.type = LSM_AUDIT_DATA_IOCTL_OP,
1746
.u.op = &(struct lsm_ioctlop_audit) {
1747
.path = file->f_path,
1748
.cmd = cmd,
1749
},
1750
},
1751
.all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL,
1752
.access = LANDLOCK_ACCESS_FS_IOCTL_DEV,
1753
#ifdef CONFIG_AUDIT
1754
.deny_masks = landlock_file(file)->deny_masks,
1755
#endif /* CONFIG_AUDIT */
1756
});
1757
return -EACCES;
1758
}
1759
1760
static int hook_file_ioctl(struct file *file, unsigned int cmd,
1761
unsigned long arg)
1762
{
1763
return hook_file_ioctl_common(file, cmd, false);
1764
}
1765
1766
static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
1767
unsigned long arg)
1768
{
1769
return hook_file_ioctl_common(file, cmd, true);
1770
}
1771
1772
/*
1773
* Always allow sending signals between threads of the same process. This
1774
* ensures consistency with hook_task_kill().
1775
*/
1776
static bool control_current_fowner(struct fown_struct *const fown)
1777
{
1778
struct task_struct *p;
1779
1780
/*
1781
* Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
1782
* file_set_fowner LSM hook inconsistencies").
1783
*/
1784
lockdep_assert_held(&fown->lock);
1785
1786
/*
1787
* Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side
1788
* critical section.
1789
*/
1790
guard(rcu)();
1791
p = pid_task(fown->pid, fown->pid_type);
1792
if (!p)
1793
return true;
1794
1795
return !same_thread_group(p, current);
1796
}
1797
1798
static void hook_file_set_fowner(struct file *file)
1799
{
1800
struct landlock_ruleset *prev_dom;
1801
struct landlock_cred_security fown_subject = {};
1802
size_t fown_layer = 0;
1803
1804
if (control_current_fowner(file_f_owner(file))) {
1805
static const struct access_masks signal_scope = {
1806
.scope = LANDLOCK_SCOPE_SIGNAL,
1807
};
1808
const struct landlock_cred_security *new_subject =
1809
landlock_get_applicable_subject(
1810
current_cred(), signal_scope, &fown_layer);
1811
if (new_subject) {
1812
landlock_get_ruleset(new_subject->domain);
1813
fown_subject = *new_subject;
1814
}
1815
}
1816
1817
prev_dom = landlock_file(file)->fown_subject.domain;
1818
landlock_file(file)->fown_subject = fown_subject;
1819
#ifdef CONFIG_AUDIT
1820
landlock_file(file)->fown_layer = fown_layer;
1821
#endif /* CONFIG_AUDIT*/
1822
1823
/* May be called in an RCU read-side critical section. */
1824
landlock_put_ruleset_deferred(prev_dom);
1825
}
1826
1827
static void hook_file_free_security(struct file *file)
1828
{
1829
landlock_put_ruleset_deferred(landlock_file(file)->fown_subject.domain);
1830
}
1831
1832
static struct security_hook_list landlock_hooks[] __ro_after_init = {
1833
LSM_HOOK_INIT(inode_free_security_rcu, hook_inode_free_security_rcu),
1834
1835
LSM_HOOK_INIT(sb_delete, hook_sb_delete),
1836
LSM_HOOK_INIT(sb_mount, hook_sb_mount),
1837
LSM_HOOK_INIT(move_mount, hook_move_mount),
1838
LSM_HOOK_INIT(sb_umount, hook_sb_umount),
1839
LSM_HOOK_INIT(sb_remount, hook_sb_remount),
1840
LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
1841
1842
LSM_HOOK_INIT(path_link, hook_path_link),
1843
LSM_HOOK_INIT(path_rename, hook_path_rename),
1844
LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
1845
LSM_HOOK_INIT(path_mknod, hook_path_mknod),
1846
LSM_HOOK_INIT(path_symlink, hook_path_symlink),
1847
LSM_HOOK_INIT(path_unlink, hook_path_unlink),
1848
LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
1849
LSM_HOOK_INIT(path_truncate, hook_path_truncate),
1850
1851
LSM_HOOK_INIT(file_alloc_security, hook_file_alloc_security),
1852
LSM_HOOK_INIT(file_open, hook_file_open),
1853
LSM_HOOK_INIT(file_truncate, hook_file_truncate),
1854
LSM_HOOK_INIT(file_ioctl, hook_file_ioctl),
1855
LSM_HOOK_INIT(file_ioctl_compat, hook_file_ioctl_compat),
1856
LSM_HOOK_INIT(file_set_fowner, hook_file_set_fowner),
1857
LSM_HOOK_INIT(file_free_security, hook_file_free_security),
1858
};
1859
1860
__init void landlock_add_fs_hooks(void)
1861
{
1862
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
1863
&landlock_lsmid);
1864
}
1865
1866
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
1867
1868
/* clang-format off */
1869
static struct kunit_case test_cases[] = {
1870
KUNIT_CASE(test_no_more_access),
1871
KUNIT_CASE(test_scope_to_request_with_exec_none),
1872
KUNIT_CASE(test_scope_to_request_with_exec_some),
1873
KUNIT_CASE(test_scope_to_request_without_access),
1874
KUNIT_CASE(test_is_eacces_with_none),
1875
KUNIT_CASE(test_is_eacces_with_refer),
1876
KUNIT_CASE(test_is_eacces_with_write),
1877
{}
1878
};
1879
/* clang-format on */
1880
1881
static struct kunit_suite test_suite = {
1882
.name = "landlock_fs",
1883
.test_cases = test_cases,
1884
};
1885
1886
kunit_test_suite(test_suite);
1887
1888
#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
1889
1890