Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/erofs/data.c
54332 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2017-2018 HUAWEI, Inc.
4
* https://www.huawei.com/
5
* Copyright (C) 2021, Alibaba Cloud
6
*/
7
#include "internal.h"
8
#include <linux/filelock.h>
9
#include <linux/sched/mm.h>
10
#include <trace/events/erofs.h>
11
12
void erofs_unmap_metabuf(struct erofs_buf *buf)
13
{
14
if (!buf->base)
15
return;
16
kunmap_local(buf->base);
17
buf->base = NULL;
18
}
19
20
void erofs_put_metabuf(struct erofs_buf *buf)
21
{
22
if (!buf->page)
23
return;
24
erofs_unmap_metabuf(buf);
25
folio_put(page_folio(buf->page));
26
buf->page = NULL;
27
}
28
29
void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap)
30
{
31
pgoff_t index = (buf->off + offset) >> PAGE_SHIFT;
32
struct folio *folio = NULL;
33
34
if (buf->page) {
35
folio = page_folio(buf->page);
36
if (folio_file_page(folio, index) != buf->page)
37
erofs_unmap_metabuf(buf);
38
}
39
if (!folio || !folio_contains(folio, index)) {
40
erofs_put_metabuf(buf);
41
folio = read_mapping_folio(buf->mapping, index, buf->file);
42
if (IS_ERR(folio))
43
return folio;
44
}
45
buf->page = folio_file_page(folio, index);
46
if (!need_kmap)
47
return NULL;
48
if (!buf->base)
49
buf->base = kmap_local_page(buf->page);
50
return buf->base + (offset & ~PAGE_MASK);
51
}
52
53
int erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb,
54
bool in_metabox)
55
{
56
struct erofs_sb_info *sbi = EROFS_SB(sb);
57
58
buf->file = NULL;
59
if (in_metabox) {
60
if (unlikely(!sbi->metabox_inode))
61
return -EFSCORRUPTED;
62
buf->mapping = sbi->metabox_inode->i_mapping;
63
return 0;
64
}
65
buf->off = sbi->dif0.fsoff;
66
if (erofs_is_fileio_mode(sbi)) {
67
buf->file = sbi->dif0.file; /* some fs like FUSE needs it */
68
buf->mapping = buf->file->f_mapping;
69
} else if (erofs_is_fscache_mode(sb))
70
buf->mapping = sbi->dif0.fscache->inode->i_mapping;
71
else
72
buf->mapping = sb->s_bdev->bd_mapping;
73
return 0;
74
}
75
76
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
77
erofs_off_t offset, bool in_metabox)
78
{
79
int err;
80
81
err = erofs_init_metabuf(buf, sb, in_metabox);
82
if (err)
83
return ERR_PTR(err);
84
return erofs_bread(buf, offset, true);
85
}
86
87
int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
88
{
89
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
90
struct super_block *sb = inode->i_sb;
91
unsigned int unit, blksz = sb->s_blocksize;
92
struct erofs_inode *vi = EROFS_I(inode);
93
struct erofs_inode_chunk_index *idx;
94
erofs_blk_t startblk, addrmask;
95
bool tailpacking;
96
erofs_off_t pos;
97
u64 chunknr;
98
int err = 0;
99
100
trace_erofs_map_blocks_enter(inode, map, 0);
101
map->m_deviceid = 0;
102
map->m_flags = 0;
103
if (map->m_la >= inode->i_size)
104
goto out;
105
106
if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
107
tailpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
108
if (!tailpacking && vi->startblk == EROFS_NULL_ADDR)
109
goto out;
110
pos = erofs_pos(sb, erofs_iblks(inode) - tailpacking);
111
112
map->m_flags = EROFS_MAP_MAPPED;
113
if (map->m_la < pos) {
114
map->m_pa = erofs_pos(sb, vi->startblk) + map->m_la;
115
map->m_llen = pos - map->m_la;
116
} else {
117
map->m_pa = erofs_iloc(inode) + vi->inode_isize +
118
vi->xattr_isize + erofs_blkoff(sb, map->m_la);
119
map->m_llen = inode->i_size - map->m_la;
120
map->m_flags |= EROFS_MAP_META;
121
}
122
goto out;
123
}
124
125
if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
126
unit = sizeof(*idx); /* chunk index */
127
else
128
unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
129
130
chunknr = map->m_la >> vi->chunkbits;
131
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
132
vi->xattr_isize, unit) + unit * chunknr;
133
134
idx = erofs_read_metabuf(&buf, sb, pos, erofs_inode_in_metabox(inode));
135
if (IS_ERR(idx)) {
136
err = PTR_ERR(idx);
137
goto out;
138
}
139
map->m_la = chunknr << vi->chunkbits;
140
map->m_llen = min_t(erofs_off_t, 1UL << vi->chunkbits,
141
round_up(inode->i_size - map->m_la, blksz));
142
if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) {
143
addrmask = (vi->chunkformat & EROFS_CHUNK_FORMAT_48BIT) ?
144
BIT_ULL(48) - 1 : BIT_ULL(32) - 1;
145
startblk = (((u64)le16_to_cpu(idx->startblk_hi) << 32) |
146
le32_to_cpu(idx->startblk_lo)) & addrmask;
147
if ((startblk ^ EROFS_NULL_ADDR) & addrmask) {
148
map->m_deviceid = le16_to_cpu(idx->device_id) &
149
EROFS_SB(sb)->device_id_mask;
150
map->m_pa = erofs_pos(sb, startblk);
151
map->m_flags = EROFS_MAP_MAPPED;
152
}
153
} else {
154
startblk = le32_to_cpu(*(__le32 *)idx);
155
if (startblk != (u32)EROFS_NULL_ADDR) {
156
map->m_pa = erofs_pos(sb, startblk);
157
map->m_flags = EROFS_MAP_MAPPED;
158
}
159
}
160
erofs_put_metabuf(&buf);
161
out:
162
if (!err) {
163
map->m_plen = map->m_llen;
164
/* inline data should be located in the same meta block */
165
if ((map->m_flags & EROFS_MAP_META) &&
166
erofs_blkoff(sb, map->m_pa) + map->m_plen > blksz) {
167
erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid);
168
DBG_BUGON(1);
169
return -EFSCORRUPTED;
170
}
171
}
172
trace_erofs_map_blocks_exit(inode, map, 0, err);
173
return err;
174
}
175
176
static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
177
struct super_block *sb, struct erofs_device_info *dif)
178
{
179
map->m_sb = sb;
180
map->m_dif = dif;
181
map->m_bdev = NULL;
182
if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
183
map->m_bdev = file_bdev(dif->file);
184
}
185
186
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
187
{
188
struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
189
struct erofs_device_info *dif;
190
erofs_off_t startoff;
191
int id;
192
193
erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
194
map->m_bdev = sb->s_bdev; /* use s_bdev for the primary device */
195
if (map->m_deviceid) {
196
down_read(&devs->rwsem);
197
dif = idr_find(&devs->tree, map->m_deviceid - 1);
198
if (!dif) {
199
up_read(&devs->rwsem);
200
return -ENODEV;
201
}
202
if (devs->flatdev) {
203
map->m_pa += erofs_pos(sb, dif->uniaddr);
204
up_read(&devs->rwsem);
205
return 0;
206
}
207
erofs_fill_from_devinfo(map, sb, dif);
208
up_read(&devs->rwsem);
209
} else if (devs->extra_devices && !devs->flatdev) {
210
down_read(&devs->rwsem);
211
idr_for_each_entry(&devs->tree, dif, id) {
212
if (!dif->uniaddr)
213
continue;
214
215
startoff = erofs_pos(sb, dif->uniaddr);
216
if (map->m_pa >= startoff &&
217
map->m_pa < startoff + erofs_pos(sb, dif->blocks)) {
218
map->m_pa -= startoff;
219
erofs_fill_from_devinfo(map, sb, dif);
220
break;
221
}
222
}
223
up_read(&devs->rwsem);
224
}
225
return 0;
226
}
227
228
/*
229
* bit 30: I/O error occurred on this folio
230
* bit 29: CPU has dirty data in D-cache (needs aliasing handling);
231
* bit 0 - 29: remaining parts to complete this folio
232
*/
233
#define EROFS_ONLINEFOLIO_EIO 30
234
#define EROFS_ONLINEFOLIO_DIRTY 29
235
236
void erofs_onlinefolio_init(struct folio *folio)
237
{
238
union {
239
atomic_t o;
240
void *v;
241
} u = { .o = ATOMIC_INIT(1) };
242
243
folio->private = u.v; /* valid only if file-backed folio is locked */
244
}
245
246
void erofs_onlinefolio_split(struct folio *folio)
247
{
248
atomic_inc((atomic_t *)&folio->private);
249
}
250
251
void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty)
252
{
253
int orig, v;
254
255
do {
256
orig = atomic_read((atomic_t *)&folio->private);
257
DBG_BUGON(orig <= 0);
258
v = dirty << EROFS_ONLINEFOLIO_DIRTY;
259
v |= (orig - 1) | (!!err << EROFS_ONLINEFOLIO_EIO);
260
} while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig);
261
262
if (v & (BIT(EROFS_ONLINEFOLIO_DIRTY) - 1))
263
return;
264
folio->private = 0;
265
if (v & BIT(EROFS_ONLINEFOLIO_DIRTY))
266
flush_dcache_folio(folio);
267
folio_end_read(folio, !(v & BIT(EROFS_ONLINEFOLIO_EIO)));
268
}
269
270
struct erofs_iomap_iter_ctx {
271
struct page *page;
272
void *base;
273
struct inode *realinode;
274
};
275
276
static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
277
unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
278
{
279
struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
280
struct erofs_iomap_iter_ctx *ctx = iter->private;
281
struct inode *realinode = ctx ? ctx->realinode : inode;
282
struct super_block *sb = realinode->i_sb;
283
struct erofs_map_blocks map;
284
struct erofs_map_dev mdev;
285
int ret;
286
287
map.m_la = offset;
288
map.m_llen = length;
289
ret = erofs_map_blocks(realinode, &map);
290
if (ret < 0)
291
return ret;
292
293
iomap->offset = map.m_la;
294
iomap->length = map.m_llen;
295
iomap->flags = 0;
296
iomap->addr = IOMAP_NULL_ADDR;
297
if (!(map.m_flags & EROFS_MAP_MAPPED)) {
298
iomap->type = IOMAP_HOLE;
299
return 0;
300
}
301
302
if (!(map.m_flags & EROFS_MAP_META) || !erofs_inode_in_metabox(realinode)) {
303
mdev = (struct erofs_map_dev) {
304
.m_deviceid = map.m_deviceid,
305
.m_pa = map.m_pa,
306
};
307
ret = erofs_map_dev(sb, &mdev);
308
if (ret)
309
return ret;
310
311
if (flags & IOMAP_DAX)
312
iomap->dax_dev = mdev.m_dif->dax_dev;
313
else
314
iomap->bdev = mdev.m_bdev;
315
iomap->addr = mdev.m_dif->fsoff + mdev.m_pa;
316
if (flags & IOMAP_DAX)
317
iomap->addr += mdev.m_dif->dax_part_off;
318
}
319
320
if (map.m_flags & EROFS_MAP_META) {
321
iomap->type = IOMAP_INLINE;
322
/* read context should read the inlined data */
323
if (ctx) {
324
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
325
void *ptr;
326
327
ptr = erofs_read_metabuf(&buf, sb, map.m_pa,
328
erofs_inode_in_metabox(realinode));
329
if (IS_ERR(ptr))
330
return PTR_ERR(ptr);
331
iomap->inline_data = ptr;
332
ctx->page = buf.page;
333
ctx->base = buf.base;
334
}
335
} else {
336
iomap->type = IOMAP_MAPPED;
337
}
338
return 0;
339
}
340
341
static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
342
ssize_t written, unsigned int flags, struct iomap *iomap)
343
{
344
struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
345
struct erofs_iomap_iter_ctx *ctx = iter->private;
346
347
if (ctx && ctx->base) {
348
struct erofs_buf buf = {
349
.page = ctx->page,
350
.base = ctx->base,
351
};
352
353
DBG_BUGON(iomap->type != IOMAP_INLINE);
354
erofs_put_metabuf(&buf);
355
ctx->base = NULL;
356
}
357
return written;
358
}
359
360
static const struct iomap_ops erofs_iomap_ops = {
361
.iomap_begin = erofs_iomap_begin,
362
.iomap_end = erofs_iomap_end,
363
};
364
365
int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
366
u64 start, u64 len)
367
{
368
if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
369
if (!IS_ENABLED(CONFIG_EROFS_FS_ZIP))
370
return -EOPNOTSUPP;
371
return iomap_fiemap(inode, fieinfo, start, len,
372
&z_erofs_iomap_report_ops);
373
}
374
return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
375
}
376
377
/*
378
* since we dont have write or truncate flows, so no inode
379
* locking needs to be held at the moment.
380
*/
381
static int erofs_read_folio(struct file *file, struct folio *folio)
382
{
383
struct iomap_read_folio_ctx read_ctx = {
384
.ops = &iomap_bio_read_ops,
385
.cur_folio = folio,
386
};
387
bool need_iput;
388
struct erofs_iomap_iter_ctx iter_ctx = {
389
.realinode = erofs_real_inode(folio_inode(folio), &need_iput),
390
};
391
392
trace_erofs_read_folio(iter_ctx.realinode, folio, true);
393
iomap_read_folio(&erofs_iomap_ops, &read_ctx, &iter_ctx);
394
if (need_iput)
395
iput(iter_ctx.realinode);
396
return 0;
397
}
398
399
static void erofs_readahead(struct readahead_control *rac)
400
{
401
struct iomap_read_folio_ctx read_ctx = {
402
.ops = &iomap_bio_read_ops,
403
.rac = rac,
404
};
405
bool need_iput;
406
struct erofs_iomap_iter_ctx iter_ctx = {
407
.realinode = erofs_real_inode(rac->mapping->host, &need_iput),
408
};
409
410
trace_erofs_readahead(iter_ctx.realinode, readahead_index(rac),
411
readahead_count(rac), true);
412
iomap_readahead(&erofs_iomap_ops, &read_ctx, &iter_ctx);
413
if (need_iput)
414
iput(iter_ctx.realinode);
415
}
416
417
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
418
{
419
return iomap_bmap(mapping, block, &erofs_iomap_ops);
420
}
421
422
static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
423
{
424
struct inode *inode = file_inode(iocb->ki_filp);
425
426
/* no need taking (shared) inode lock since it's a ro filesystem */
427
if (!iov_iter_count(to))
428
return 0;
429
430
if (IS_ENABLED(CONFIG_FS_DAX) && IS_DAX(inode))
431
return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
432
433
if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev) {
434
struct erofs_iomap_iter_ctx iter_ctx = {
435
.realinode = inode,
436
};
437
438
return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
439
NULL, 0, &iter_ctx, 0);
440
}
441
return filemap_read(iocb, to, 0);
442
}
443
444
/* for uncompressed (aligned) files and raw access for other files */
445
const struct address_space_operations erofs_aops = {
446
.read_folio = erofs_read_folio,
447
.readahead = erofs_readahead,
448
.bmap = erofs_bmap,
449
.direct_IO = noop_direct_IO,
450
.release_folio = iomap_release_folio,
451
.invalidate_folio = iomap_invalidate_folio,
452
};
453
454
#ifdef CONFIG_FS_DAX
455
static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
456
unsigned int order)
457
{
458
return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
459
}
460
461
static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
462
{
463
return erofs_dax_huge_fault(vmf, 0);
464
}
465
466
static const struct vm_operations_struct erofs_dax_vm_ops = {
467
.fault = erofs_dax_fault,
468
.huge_fault = erofs_dax_huge_fault,
469
};
470
471
static int erofs_file_mmap_prepare(struct vm_area_desc *desc)
472
{
473
if (!IS_DAX(file_inode(desc->file)))
474
return generic_file_readonly_mmap_prepare(desc);
475
476
if (vma_desc_test_flags(desc, VMA_SHARED_BIT) &&
477
vma_desc_test_flags(desc, VMA_MAYWRITE_BIT))
478
return -EINVAL;
479
480
desc->vm_ops = &erofs_dax_vm_ops;
481
vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT);
482
return 0;
483
}
484
#else
485
#define erofs_file_mmap_prepare generic_file_readonly_mmap_prepare
486
#endif
487
488
static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence)
489
{
490
struct inode *inode = file->f_mapping->host;
491
const struct iomap_ops *ops = &erofs_iomap_ops;
492
493
if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
494
if (!IS_ENABLED(CONFIG_EROFS_FS_ZIP))
495
return generic_file_llseek(file, offset, whence);
496
ops = &z_erofs_iomap_report_ops;
497
}
498
499
if (whence == SEEK_HOLE)
500
offset = iomap_seek_hole(inode, offset, ops);
501
else if (whence == SEEK_DATA)
502
offset = iomap_seek_data(inode, offset, ops);
503
else
504
return generic_file_llseek(file, offset, whence);
505
506
if (offset < 0)
507
return offset;
508
return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
509
}
510
511
const struct file_operations erofs_file_fops = {
512
.llseek = erofs_file_llseek,
513
.read_iter = erofs_file_read_iter,
514
.unlocked_ioctl = erofs_ioctl,
515
#ifdef CONFIG_COMPAT
516
.compat_ioctl = erofs_compat_ioctl,
517
#endif
518
.mmap_prepare = erofs_file_mmap_prepare,
519
.get_unmapped_area = thp_get_unmapped_area,
520
.splice_read = filemap_splice_read,
521
.setlease = generic_setlease,
522
};
523
524