Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/cachefiles/namei.c
29265 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/* CacheFiles path walking and related routines
3
*
4
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5
* Written by David Howells ([email protected])
6
*/
7
8
#include <linux/fs.h>
9
#include <linux/namei.h>
10
#include "internal.h"
11
12
/*
13
* Mark the backing file as being a cache file if it's not already in use. The
14
* mark tells the culling request command that it's not allowed to cull the
15
* file or directory. The caller must hold the inode lock.
16
*/
17
static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
18
struct inode *inode)
19
{
20
bool can_use = false;
21
22
if (!(inode->i_flags & S_KERNEL_FILE)) {
23
inode->i_flags |= S_KERNEL_FILE;
24
trace_cachefiles_mark_active(object, inode);
25
can_use = true;
26
} else {
27
trace_cachefiles_mark_failed(object, inode);
28
}
29
30
return can_use;
31
}
32
33
static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
34
struct inode *inode)
35
{
36
bool can_use;
37
38
inode_lock(inode);
39
can_use = __cachefiles_mark_inode_in_use(object, inode);
40
inode_unlock(inode);
41
return can_use;
42
}
43
44
/*
45
* Unmark a backing inode. The caller must hold the inode lock.
46
*/
47
static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
48
struct inode *inode)
49
{
50
inode->i_flags &= ~S_KERNEL_FILE;
51
trace_cachefiles_mark_inactive(object, inode);
52
}
53
54
static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
55
struct inode *inode)
56
{
57
inode_lock(inode);
58
__cachefiles_unmark_inode_in_use(object, inode);
59
inode_unlock(inode);
60
}
61
62
/*
63
* Unmark a backing inode and tell cachefilesd that there's something that can
64
* be culled.
65
*/
66
void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
67
struct file *file)
68
{
69
struct cachefiles_cache *cache = object->volume->cache;
70
struct inode *inode = file_inode(file);
71
72
cachefiles_do_unmark_inode_in_use(object, inode);
73
74
if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
75
atomic_long_add(inode->i_blocks, &cache->b_released);
76
if (atomic_inc_return(&cache->f_released))
77
cachefiles_state_changed(cache);
78
}
79
}
80
81
/*
82
* get a subdirectory
83
*/
84
struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
85
struct dentry *dir,
86
const char *dirname,
87
bool *_is_new)
88
{
89
struct dentry *subdir;
90
struct path path;
91
int ret;
92
93
_enter(",,%s", dirname);
94
95
/* search the current directory for the element name */
96
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
97
98
retry:
99
ret = cachefiles_inject_read_error();
100
if (ret == 0)
101
subdir = lookup_one(&nop_mnt_idmap, &QSTR(dirname), dir);
102
else
103
subdir = ERR_PTR(ret);
104
trace_cachefiles_lookup(NULL, dir, subdir);
105
if (IS_ERR(subdir)) {
106
trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
107
PTR_ERR(subdir),
108
cachefiles_trace_lookup_error);
109
if (PTR_ERR(subdir) == -ENOMEM)
110
goto nomem_d_alloc;
111
goto lookup_error;
112
}
113
114
_debug("subdir -> %pd %s",
115
subdir, d_backing_inode(subdir) ? "positive" : "negative");
116
117
/* we need to create the subdir if it doesn't exist yet */
118
if (d_is_negative(subdir)) {
119
ret = cachefiles_has_space(cache, 1, 0,
120
cachefiles_has_space_for_create);
121
if (ret < 0)
122
goto mkdir_error;
123
124
_debug("attempt mkdir");
125
126
path.mnt = cache->mnt;
127
path.dentry = dir;
128
ret = security_path_mkdir(&path, subdir, 0700);
129
if (ret < 0)
130
goto mkdir_error;
131
ret = cachefiles_inject_write_error();
132
if (ret == 0)
133
subdir = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
134
else
135
subdir = ERR_PTR(ret);
136
if (IS_ERR(subdir)) {
137
trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
138
cachefiles_trace_mkdir_error);
139
goto mkdir_error;
140
}
141
trace_cachefiles_mkdir(dir, subdir);
142
143
if (unlikely(d_unhashed(subdir) || d_is_negative(subdir))) {
144
dput(subdir);
145
goto retry;
146
}
147
ASSERT(d_backing_inode(subdir));
148
149
_debug("mkdir -> %pd{ino=%lu}",
150
subdir, d_backing_inode(subdir)->i_ino);
151
if (_is_new)
152
*_is_new = true;
153
}
154
155
/* Tell rmdir() it's not allowed to delete the subdir */
156
inode_lock(d_inode(subdir));
157
inode_unlock(d_inode(dir));
158
159
if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) {
160
pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
161
subdir, d_inode(subdir)->i_ino);
162
goto mark_error;
163
}
164
165
inode_unlock(d_inode(subdir));
166
167
/* we need to make sure the subdir is a directory */
168
ASSERT(d_backing_inode(subdir));
169
170
if (!d_can_lookup(subdir)) {
171
pr_err("%s is not a directory\n", dirname);
172
ret = -EIO;
173
goto check_error;
174
}
175
176
ret = -EPERM;
177
if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
178
!d_backing_inode(subdir)->i_op->lookup ||
179
!d_backing_inode(subdir)->i_op->mkdir ||
180
!d_backing_inode(subdir)->i_op->rename ||
181
!d_backing_inode(subdir)->i_op->rmdir ||
182
!d_backing_inode(subdir)->i_op->unlink)
183
goto check_error;
184
185
_leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
186
return subdir;
187
188
check_error:
189
cachefiles_put_directory(subdir);
190
_leave(" = %d [check]", ret);
191
return ERR_PTR(ret);
192
193
mark_error:
194
inode_unlock(d_inode(subdir));
195
dput(subdir);
196
return ERR_PTR(-EBUSY);
197
198
mkdir_error:
199
inode_unlock(d_inode(dir));
200
if (!IS_ERR(subdir))
201
dput(subdir);
202
pr_err("mkdir %s failed with error %d\n", dirname, ret);
203
return ERR_PTR(ret);
204
205
lookup_error:
206
inode_unlock(d_inode(dir));
207
ret = PTR_ERR(subdir);
208
pr_err("Lookup %s failed with error %d\n", dirname, ret);
209
return ERR_PTR(ret);
210
211
nomem_d_alloc:
212
inode_unlock(d_inode(dir));
213
_leave(" = -ENOMEM");
214
return ERR_PTR(-ENOMEM);
215
}
216
217
/*
218
* Put a subdirectory.
219
*/
220
void cachefiles_put_directory(struct dentry *dir)
221
{
222
if (dir) {
223
cachefiles_do_unmark_inode_in_use(NULL, d_inode(dir));
224
dput(dir);
225
}
226
}
227
228
/*
229
* Remove a regular file from the cache.
230
*/
231
static int cachefiles_unlink(struct cachefiles_cache *cache,
232
struct cachefiles_object *object,
233
struct dentry *dir, struct dentry *dentry,
234
enum fscache_why_object_killed why)
235
{
236
struct path path = {
237
.mnt = cache->mnt,
238
.dentry = dir,
239
};
240
int ret;
241
242
trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
243
ret = security_path_unlink(&path, dentry);
244
if (ret < 0) {
245
cachefiles_io_error(cache, "Unlink security error");
246
return ret;
247
}
248
249
ret = cachefiles_inject_remove_error();
250
if (ret == 0) {
251
ret = vfs_unlink(&nop_mnt_idmap, d_backing_inode(dir), dentry, NULL);
252
if (ret == -EIO)
253
cachefiles_io_error(cache, "Unlink failed");
254
}
255
if (ret != 0)
256
trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
257
cachefiles_trace_unlink_error);
258
return ret;
259
}
260
261
/*
262
* Delete an object representation from the cache
263
* - File backed objects are unlinked
264
* - Directory backed objects are stuffed into the graveyard for userspace to
265
* delete
266
*/
267
int cachefiles_bury_object(struct cachefiles_cache *cache,
268
struct cachefiles_object *object,
269
struct dentry *dir,
270
struct dentry *rep,
271
enum fscache_why_object_killed why)
272
{
273
struct dentry *grave, *trap;
274
struct path path, path_to_graveyard;
275
char nbuffer[8 + 8 + 1];
276
int ret;
277
278
_enter(",'%pd','%pd'", dir, rep);
279
280
if (rep->d_parent != dir) {
281
inode_unlock(d_inode(dir));
282
_leave(" = -ESTALE");
283
return -ESTALE;
284
}
285
286
/* non-directories can just be unlinked */
287
if (!d_is_dir(rep)) {
288
dget(rep); /* Stop the dentry being negated if it's only pinned
289
* by a file struct.
290
*/
291
ret = cachefiles_unlink(cache, object, dir, rep, why);
292
dput(rep);
293
294
inode_unlock(d_inode(dir));
295
_leave(" = %d", ret);
296
return ret;
297
}
298
299
/* directories have to be moved to the graveyard */
300
_debug("move stale object to graveyard");
301
inode_unlock(d_inode(dir));
302
303
try_again:
304
/* first step is to make up a grave dentry in the graveyard */
305
sprintf(nbuffer, "%08x%08x",
306
(uint32_t) ktime_get_real_seconds(),
307
(uint32_t) atomic_inc_return(&cache->gravecounter));
308
309
/* do the multiway lock magic */
310
trap = lock_rename(cache->graveyard, dir);
311
if (IS_ERR(trap))
312
return PTR_ERR(trap);
313
314
/* do some checks before getting the grave dentry */
315
if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
316
/* the entry was probably culled when we dropped the parent dir
317
* lock */
318
unlock_rename(cache->graveyard, dir);
319
_leave(" = 0 [culled?]");
320
return 0;
321
}
322
323
if (!d_can_lookup(cache->graveyard)) {
324
unlock_rename(cache->graveyard, dir);
325
cachefiles_io_error(cache, "Graveyard no longer a directory");
326
return -EIO;
327
}
328
329
if (trap == rep) {
330
unlock_rename(cache->graveyard, dir);
331
cachefiles_io_error(cache, "May not make directory loop");
332
return -EIO;
333
}
334
335
if (d_mountpoint(rep)) {
336
unlock_rename(cache->graveyard, dir);
337
cachefiles_io_error(cache, "Mountpoint in cache");
338
return -EIO;
339
}
340
341
grave = lookup_one(&nop_mnt_idmap, &QSTR(nbuffer), cache->graveyard);
342
if (IS_ERR(grave)) {
343
unlock_rename(cache->graveyard, dir);
344
trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
345
PTR_ERR(grave),
346
cachefiles_trace_lookup_error);
347
348
if (PTR_ERR(grave) == -ENOMEM) {
349
_leave(" = -ENOMEM");
350
return -ENOMEM;
351
}
352
353
cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
354
return -EIO;
355
}
356
357
if (d_is_positive(grave)) {
358
unlock_rename(cache->graveyard, dir);
359
dput(grave);
360
grave = NULL;
361
cond_resched();
362
goto try_again;
363
}
364
365
if (d_mountpoint(grave)) {
366
unlock_rename(cache->graveyard, dir);
367
dput(grave);
368
cachefiles_io_error(cache, "Mountpoint in graveyard");
369
return -EIO;
370
}
371
372
/* target should not be an ancestor of source */
373
if (trap == grave) {
374
unlock_rename(cache->graveyard, dir);
375
dput(grave);
376
cachefiles_io_error(cache, "May not make directory loop");
377
return -EIO;
378
}
379
380
/* attempt the rename */
381
path.mnt = cache->mnt;
382
path.dentry = dir;
383
path_to_graveyard.mnt = cache->mnt;
384
path_to_graveyard.dentry = cache->graveyard;
385
ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
386
if (ret < 0) {
387
cachefiles_io_error(cache, "Rename security error %d", ret);
388
} else {
389
struct renamedata rd = {
390
.mnt_idmap = &nop_mnt_idmap,
391
.old_parent = dir,
392
.old_dentry = rep,
393
.new_parent = cache->graveyard,
394
.new_dentry = grave,
395
};
396
trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
397
ret = cachefiles_inject_read_error();
398
if (ret == 0)
399
ret = vfs_rename(&rd);
400
if (ret != 0)
401
trace_cachefiles_vfs_error(object, d_inode(dir), ret,
402
cachefiles_trace_rename_error);
403
if (ret != 0 && ret != -ENOMEM)
404
cachefiles_io_error(cache,
405
"Rename failed with error %d", ret);
406
}
407
408
__cachefiles_unmark_inode_in_use(object, d_inode(rep));
409
unlock_rename(cache->graveyard, dir);
410
dput(grave);
411
_leave(" = 0");
412
return 0;
413
}
414
415
/*
416
* Delete a cache file.
417
*/
418
int cachefiles_delete_object(struct cachefiles_object *object,
419
enum fscache_why_object_killed why)
420
{
421
struct cachefiles_volume *volume = object->volume;
422
struct dentry *dentry = object->file->f_path.dentry;
423
struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
424
int ret;
425
426
_enter(",OBJ%x{%pD}", object->debug_id, object->file);
427
428
/* Stop the dentry being negated if it's only pinned by a file struct. */
429
dget(dentry);
430
431
inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
432
ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
433
inode_unlock(d_backing_inode(fan));
434
dput(dentry);
435
return ret;
436
}
437
438
/*
439
* Create a temporary file and leave it unattached and un-xattr'd until the
440
* time comes to discard the object from memory.
441
*/
442
struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
443
{
444
struct cachefiles_volume *volume = object->volume;
445
struct cachefiles_cache *cache = volume->cache;
446
const struct cred *saved_cred;
447
struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
448
struct file *file;
449
const struct path parentpath = { .mnt = cache->mnt, .dentry = fan };
450
uint64_t ni_size;
451
long ret;
452
453
454
cachefiles_begin_secure(cache, &saved_cred);
455
456
ret = cachefiles_inject_write_error();
457
if (ret == 0) {
458
file = kernel_tmpfile_open(&nop_mnt_idmap, &parentpath,
459
S_IFREG | 0600,
460
O_RDWR | O_LARGEFILE | O_DIRECT,
461
cache->cache_cred);
462
ret = PTR_ERR_OR_ZERO(file);
463
}
464
if (ret) {
465
trace_cachefiles_vfs_error(object, d_inode(fan), ret,
466
cachefiles_trace_tmpfile_error);
467
if (ret == -EIO)
468
cachefiles_io_error_obj(object, "Failed to create tmpfile");
469
goto err;
470
}
471
472
trace_cachefiles_tmpfile(object, file_inode(file));
473
474
/* This is a newly created file with no other possible user */
475
if (!cachefiles_mark_inode_in_use(object, file_inode(file)))
476
WARN_ON(1);
477
478
ret = cachefiles_ondemand_init_object(object);
479
if (ret < 0)
480
goto err_unuse;
481
482
ni_size = object->cookie->object_size;
483
ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
484
485
if (ni_size > 0) {
486
trace_cachefiles_trunc(object, file_inode(file), 0, ni_size,
487
cachefiles_trunc_expand_tmpfile);
488
ret = cachefiles_inject_write_error();
489
if (ret == 0)
490
ret = vfs_truncate(&file->f_path, ni_size);
491
if (ret < 0) {
492
trace_cachefiles_vfs_error(
493
object, file_inode(file), ret,
494
cachefiles_trace_trunc_error);
495
goto err_unuse;
496
}
497
}
498
499
ret = -EINVAL;
500
if (unlikely(!file->f_op->read_iter) ||
501
unlikely(!file->f_op->write_iter)) {
502
fput(file);
503
pr_notice("Cache does not support read_iter and write_iter\n");
504
goto err_unuse;
505
}
506
out:
507
cachefiles_end_secure(cache, saved_cred);
508
return file;
509
510
err_unuse:
511
cachefiles_do_unmark_inode_in_use(object, file_inode(file));
512
fput(file);
513
err:
514
file = ERR_PTR(ret);
515
goto out;
516
}
517
518
/*
519
* Create a new file.
520
*/
521
static bool cachefiles_create_file(struct cachefiles_object *object)
522
{
523
struct file *file;
524
int ret;
525
526
ret = cachefiles_has_space(object->volume->cache, 1, 0,
527
cachefiles_has_space_for_create);
528
if (ret < 0)
529
return false;
530
531
file = cachefiles_create_tmpfile(object);
532
if (IS_ERR(file))
533
return false;
534
535
set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
536
set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
537
_debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
538
object->file = file;
539
return true;
540
}
541
542
/*
543
* Open an existing file, checking its attributes and replacing it if it is
544
* stale.
545
*/
546
static bool cachefiles_open_file(struct cachefiles_object *object,
547
struct dentry *dentry)
548
{
549
struct cachefiles_cache *cache = object->volume->cache;
550
struct file *file;
551
struct path path;
552
int ret;
553
554
_enter("%pd", dentry);
555
556
if (!cachefiles_mark_inode_in_use(object, d_inode(dentry))) {
557
pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
558
dentry, d_inode(dentry)->i_ino);
559
return false;
560
}
561
562
/* We need to open a file interface onto a data file now as we can't do
563
* it on demand because writeback called from do_exit() sees
564
* current->fs == NULL - which breaks d_path() called from ext4 open.
565
*/
566
path.mnt = cache->mnt;
567
path.dentry = dentry;
568
file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT, cache->cache_cred);
569
if (IS_ERR(file)) {
570
trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
571
PTR_ERR(file),
572
cachefiles_trace_open_error);
573
goto error;
574
}
575
576
if (unlikely(!file->f_op->read_iter) ||
577
unlikely(!file->f_op->write_iter)) {
578
pr_notice("Cache does not support read_iter and write_iter\n");
579
goto error_fput;
580
}
581
_debug("file -> %pd positive", dentry);
582
583
ret = cachefiles_ondemand_init_object(object);
584
if (ret < 0)
585
goto error_fput;
586
587
ret = cachefiles_check_auxdata(object, file);
588
if (ret < 0)
589
goto check_failed;
590
591
clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
592
593
object->file = file;
594
595
/* Always update the atime on an object we've just looked up (this is
596
* used to keep track of culling, and atimes are only updated by read,
597
* write and readdir but not lookup or open).
598
*/
599
touch_atime(&file->f_path);
600
return true;
601
602
check_failed:
603
fscache_cookie_lookup_negative(object->cookie);
604
cachefiles_unmark_inode_in_use(object, file);
605
fput(file);
606
if (ret == -ESTALE)
607
return cachefiles_create_file(object);
608
return false;
609
610
error_fput:
611
fput(file);
612
error:
613
cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
614
return false;
615
}
616
617
/*
618
* walk from the parent object to the child object through the backing
619
* filesystem, creating directories as we go
620
*/
621
bool cachefiles_look_up_object(struct cachefiles_object *object)
622
{
623
struct cachefiles_volume *volume = object->volume;
624
struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
625
int ret;
626
627
_enter("OBJ%x,%s,", object->debug_id, object->d_name);
628
629
/* Look up path "cache/vol/fanout/file". */
630
ret = cachefiles_inject_read_error();
631
if (ret == 0)
632
dentry = lookup_one_positive_unlocked(&nop_mnt_idmap,
633
&QSTR(object->d_name), fan);
634
else
635
dentry = ERR_PTR(ret);
636
trace_cachefiles_lookup(object, fan, dentry);
637
if (IS_ERR(dentry)) {
638
if (dentry == ERR_PTR(-ENOENT))
639
goto new_file;
640
if (dentry == ERR_PTR(-EIO))
641
cachefiles_io_error_obj(object, "Lookup failed");
642
return false;
643
}
644
645
if (!d_is_reg(dentry)) {
646
pr_err("%pd is not a file\n", dentry);
647
inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
648
ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
649
FSCACHE_OBJECT_IS_WEIRD);
650
dput(dentry);
651
if (ret < 0)
652
return false;
653
goto new_file;
654
}
655
656
ret = cachefiles_open_file(object, dentry);
657
dput(dentry);
658
if (!ret)
659
return false;
660
661
_leave(" = t [%lu]", file_inode(object->file)->i_ino);
662
return true;
663
664
new_file:
665
fscache_cookie_lookup_negative(object->cookie);
666
return cachefiles_create_file(object);
667
}
668
669
/*
670
* Attempt to link a temporary file into its rightful place in the cache.
671
*/
672
bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
673
struct cachefiles_object *object)
674
{
675
struct cachefiles_volume *volume = object->volume;
676
struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
677
bool success = false;
678
int ret;
679
680
_enter(",%pD", object->file);
681
682
inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
683
ret = cachefiles_inject_read_error();
684
if (ret == 0)
685
dentry = lookup_one(&nop_mnt_idmap, &QSTR(object->d_name), fan);
686
else
687
dentry = ERR_PTR(ret);
688
if (IS_ERR(dentry)) {
689
trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
690
cachefiles_trace_lookup_error);
691
_debug("lookup fail %ld", PTR_ERR(dentry));
692
goto out_unlock;
693
}
694
695
if (!d_is_negative(dentry)) {
696
ret = cachefiles_unlink(volume->cache, object, fan, dentry,
697
FSCACHE_OBJECT_IS_STALE);
698
if (ret < 0)
699
goto out_dput;
700
701
dput(dentry);
702
ret = cachefiles_inject_read_error();
703
if (ret == 0)
704
dentry = lookup_one(&nop_mnt_idmap, &QSTR(object->d_name), fan);
705
else
706
dentry = ERR_PTR(ret);
707
if (IS_ERR(dentry)) {
708
trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
709
cachefiles_trace_lookup_error);
710
_debug("lookup fail %ld", PTR_ERR(dentry));
711
goto out_unlock;
712
}
713
}
714
715
ret = cachefiles_inject_read_error();
716
if (ret == 0)
717
ret = vfs_link(object->file->f_path.dentry, &nop_mnt_idmap,
718
d_inode(fan), dentry, NULL);
719
if (ret < 0) {
720
trace_cachefiles_vfs_error(object, d_inode(fan), ret,
721
cachefiles_trace_link_error);
722
_debug("link fail %d", ret);
723
} else {
724
trace_cachefiles_link(object, file_inode(object->file));
725
spin_lock(&object->lock);
726
/* TODO: Do we want to switch the file pointer to the new dentry? */
727
clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
728
spin_unlock(&object->lock);
729
success = true;
730
}
731
732
out_dput:
733
dput(dentry);
734
out_unlock:
735
inode_unlock(d_inode(fan));
736
_leave(" = %u", success);
737
return success;
738
}
739
740
/*
741
* Look up an inode to be checked or culled. Return -EBUSY if the inode is
742
* marked in use.
743
*/
744
static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
745
struct dentry *dir,
746
char *filename)
747
{
748
struct dentry *victim;
749
int ret = -ENOENT;
750
751
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
752
753
victim = lookup_one(&nop_mnt_idmap, &QSTR(filename), dir);
754
if (IS_ERR(victim))
755
goto lookup_error;
756
if (d_is_negative(victim))
757
goto lookup_put;
758
if (d_inode(victim)->i_flags & S_KERNEL_FILE)
759
goto lookup_busy;
760
return victim;
761
762
lookup_busy:
763
ret = -EBUSY;
764
lookup_put:
765
inode_unlock(d_inode(dir));
766
dput(victim);
767
return ERR_PTR(ret);
768
769
lookup_error:
770
inode_unlock(d_inode(dir));
771
ret = PTR_ERR(victim);
772
if (ret == -ENOENT)
773
return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
774
775
if (ret == -EIO) {
776
cachefiles_io_error(cache, "Lookup failed");
777
} else if (ret != -ENOMEM) {
778
pr_err("Internal error: %d\n", ret);
779
ret = -EIO;
780
}
781
782
return ERR_PTR(ret);
783
}
784
785
/*
786
* Cull an object if it's not in use
787
* - called only by cache manager daemon
788
*/
789
int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
790
char *filename)
791
{
792
struct dentry *victim;
793
struct inode *inode;
794
int ret;
795
796
_enter(",%pd/,%s", dir, filename);
797
798
victim = cachefiles_lookup_for_cull(cache, dir, filename);
799
if (IS_ERR(victim))
800
return PTR_ERR(victim);
801
802
/* check to see if someone is using this object */
803
inode = d_inode(victim);
804
inode_lock(inode);
805
if (inode->i_flags & S_KERNEL_FILE) {
806
ret = -EBUSY;
807
} else {
808
/* Stop the cache from picking it back up */
809
inode->i_flags |= S_KERNEL_FILE;
810
ret = 0;
811
}
812
inode_unlock(inode);
813
if (ret < 0)
814
goto error_unlock;
815
816
ret = cachefiles_bury_object(cache, NULL, dir, victim,
817
FSCACHE_OBJECT_WAS_CULLED);
818
if (ret < 0)
819
goto error;
820
821
fscache_count_culled();
822
dput(victim);
823
_leave(" = 0");
824
return 0;
825
826
error_unlock:
827
inode_unlock(d_inode(dir));
828
error:
829
dput(victim);
830
if (ret == -ENOENT)
831
return -ESTALE; /* Probably got retired by the netfs */
832
833
if (ret != -ENOMEM) {
834
pr_err("Internal error: %d\n", ret);
835
ret = -EIO;
836
}
837
838
_leave(" = %d", ret);
839
return ret;
840
}
841
842
/*
843
* Find out if an object is in use or not
844
* - called only by cache manager daemon
845
* - returns -EBUSY or 0 to indicate whether an object is in use or not
846
*/
847
int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
848
char *filename)
849
{
850
struct dentry *victim;
851
int ret = 0;
852
853
victim = cachefiles_lookup_for_cull(cache, dir, filename);
854
if (IS_ERR(victim))
855
return PTR_ERR(victim);
856
857
inode_unlock(d_inode(dir));
858
dput(victim);
859
return ret;
860
}
861
862