Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/bio.c
29264 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2001 Jens Axboe <[email protected]>
4
*/
5
#include <linux/mm.h>
6
#include <linux/swap.h>
7
#include <linux/bio-integrity.h>
8
#include <linux/blkdev.h>
9
#include <linux/uio.h>
10
#include <linux/iocontext.h>
11
#include <linux/slab.h>
12
#include <linux/init.h>
13
#include <linux/kernel.h>
14
#include <linux/export.h>
15
#include <linux/mempool.h>
16
#include <linux/workqueue.h>
17
#include <linux/cgroup.h>
18
#include <linux/highmem.h>
19
#include <linux/blk-crypto.h>
20
#include <linux/xarray.h>
21
22
#include <trace/events/block.h>
23
#include "blk.h"
24
#include "blk-rq-qos.h"
25
#include "blk-cgroup.h"
26
27
#define ALLOC_CACHE_THRESHOLD 16
28
#define ALLOC_CACHE_MAX 256
29
30
struct bio_alloc_cache {
31
struct bio *free_list;
32
struct bio *free_list_irq;
33
unsigned int nr;
34
unsigned int nr_irq;
35
};
36
37
static struct biovec_slab {
38
int nr_vecs;
39
char *name;
40
struct kmem_cache *slab;
41
} bvec_slabs[] __read_mostly = {
42
{ .nr_vecs = 16, .name = "biovec-16" },
43
{ .nr_vecs = 64, .name = "biovec-64" },
44
{ .nr_vecs = 128, .name = "biovec-128" },
45
{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
46
};
47
48
static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
49
{
50
switch (nr_vecs) {
51
/* smaller bios use inline vecs */
52
case 5 ... 16:
53
return &bvec_slabs[0];
54
case 17 ... 64:
55
return &bvec_slabs[1];
56
case 65 ... 128:
57
return &bvec_slabs[2];
58
case 129 ... BIO_MAX_VECS:
59
return &bvec_slabs[3];
60
default:
61
BUG();
62
return NULL;
63
}
64
}
65
66
/*
67
* fs_bio_set is the bio_set containing bio and iovec memory pools used by
68
* IO code that does not need private memory pools.
69
*/
70
struct bio_set fs_bio_set;
71
EXPORT_SYMBOL(fs_bio_set);
72
73
/*
74
* Our slab pool management
75
*/
76
struct bio_slab {
77
struct kmem_cache *slab;
78
unsigned int slab_ref;
79
unsigned int slab_size;
80
char name[12];
81
};
82
static DEFINE_MUTEX(bio_slab_lock);
83
static DEFINE_XARRAY(bio_slabs);
84
85
static struct bio_slab *create_bio_slab(unsigned int size)
86
{
87
struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
88
89
if (!bslab)
90
return NULL;
91
92
snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
93
bslab->slab = kmem_cache_create(bslab->name, size,
94
ARCH_KMALLOC_MINALIGN,
95
SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
96
if (!bslab->slab)
97
goto fail_alloc_slab;
98
99
bslab->slab_ref = 1;
100
bslab->slab_size = size;
101
102
if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
103
return bslab;
104
105
kmem_cache_destroy(bslab->slab);
106
107
fail_alloc_slab:
108
kfree(bslab);
109
return NULL;
110
}
111
112
static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
113
{
114
return bs->front_pad + sizeof(struct bio) + bs->back_pad;
115
}
116
117
static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
118
{
119
unsigned int size = bs_bio_slab_size(bs);
120
struct bio_slab *bslab;
121
122
mutex_lock(&bio_slab_lock);
123
bslab = xa_load(&bio_slabs, size);
124
if (bslab)
125
bslab->slab_ref++;
126
else
127
bslab = create_bio_slab(size);
128
mutex_unlock(&bio_slab_lock);
129
130
if (bslab)
131
return bslab->slab;
132
return NULL;
133
}
134
135
static void bio_put_slab(struct bio_set *bs)
136
{
137
struct bio_slab *bslab = NULL;
138
unsigned int slab_size = bs_bio_slab_size(bs);
139
140
mutex_lock(&bio_slab_lock);
141
142
bslab = xa_load(&bio_slabs, slab_size);
143
if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
144
goto out;
145
146
WARN_ON_ONCE(bslab->slab != bs->bio_slab);
147
148
WARN_ON(!bslab->slab_ref);
149
150
if (--bslab->slab_ref)
151
goto out;
152
153
xa_erase(&bio_slabs, slab_size);
154
155
kmem_cache_destroy(bslab->slab);
156
kfree(bslab);
157
158
out:
159
mutex_unlock(&bio_slab_lock);
160
}
161
162
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
163
{
164
BUG_ON(nr_vecs > BIO_MAX_VECS);
165
166
if (nr_vecs == BIO_MAX_VECS)
167
mempool_free(bv, pool);
168
else if (nr_vecs > BIO_INLINE_VECS)
169
kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
170
}
171
172
/*
173
* Make the first allocation restricted and don't dump info on allocation
174
* failures, since we'll fall back to the mempool in case of failure.
175
*/
176
static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
177
{
178
return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
179
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
180
}
181
182
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
183
gfp_t gfp_mask)
184
{
185
struct biovec_slab *bvs = biovec_slab(*nr_vecs);
186
187
if (WARN_ON_ONCE(!bvs))
188
return NULL;
189
190
/*
191
* Upgrade the nr_vecs request to take full advantage of the allocation.
192
* We also rely on this in the bvec_free path.
193
*/
194
*nr_vecs = bvs->nr_vecs;
195
196
/*
197
* Try a slab allocation first for all smaller allocations. If that
198
* fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
199
* The mempool is sized to handle up to BIO_MAX_VECS entries.
200
*/
201
if (*nr_vecs < BIO_MAX_VECS) {
202
struct bio_vec *bvl;
203
204
bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
205
if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
206
return bvl;
207
*nr_vecs = BIO_MAX_VECS;
208
}
209
210
return mempool_alloc(pool, gfp_mask);
211
}
212
213
void bio_uninit(struct bio *bio)
214
{
215
#ifdef CONFIG_BLK_CGROUP
216
if (bio->bi_blkg) {
217
blkg_put(bio->bi_blkg);
218
bio->bi_blkg = NULL;
219
}
220
#endif
221
if (bio_integrity(bio))
222
bio_integrity_free(bio);
223
224
bio_crypt_free_ctx(bio);
225
}
226
EXPORT_SYMBOL(bio_uninit);
227
228
static void bio_free(struct bio *bio)
229
{
230
struct bio_set *bs = bio->bi_pool;
231
void *p = bio;
232
233
WARN_ON_ONCE(!bs);
234
235
bio_uninit(bio);
236
bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
237
mempool_free(p - bs->front_pad, &bs->bio_pool);
238
}
239
240
/*
241
* Users of this function have their own bio allocation. Subsequently,
242
* they must remember to pair any call to bio_init() with bio_uninit()
243
* when IO has completed, or when the bio is released.
244
*/
245
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
246
unsigned short max_vecs, blk_opf_t opf)
247
{
248
bio->bi_next = NULL;
249
bio->bi_bdev = bdev;
250
bio->bi_opf = opf;
251
bio->bi_flags = 0;
252
bio->bi_ioprio = 0;
253
bio->bi_write_hint = 0;
254
bio->bi_write_stream = 0;
255
bio->bi_status = 0;
256
bio->bi_iter.bi_sector = 0;
257
bio->bi_iter.bi_size = 0;
258
bio->bi_iter.bi_idx = 0;
259
bio->bi_iter.bi_bvec_done = 0;
260
bio->bi_end_io = NULL;
261
bio->bi_private = NULL;
262
#ifdef CONFIG_BLK_CGROUP
263
bio->bi_blkg = NULL;
264
bio->issue_time_ns = 0;
265
if (bdev)
266
bio_associate_blkg(bio);
267
#ifdef CONFIG_BLK_CGROUP_IOCOST
268
bio->bi_iocost_cost = 0;
269
#endif
270
#endif
271
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
272
bio->bi_crypt_context = NULL;
273
#endif
274
#ifdef CONFIG_BLK_DEV_INTEGRITY
275
bio->bi_integrity = NULL;
276
#endif
277
bio->bi_vcnt = 0;
278
279
atomic_set(&bio->__bi_remaining, 1);
280
atomic_set(&bio->__bi_cnt, 1);
281
bio->bi_cookie = BLK_QC_T_NONE;
282
283
bio->bi_max_vecs = max_vecs;
284
bio->bi_io_vec = table;
285
bio->bi_pool = NULL;
286
}
287
EXPORT_SYMBOL(bio_init);
288
289
/**
290
* bio_reset - reinitialize a bio
291
* @bio: bio to reset
292
* @bdev: block device to use the bio for
293
* @opf: operation and flags for bio
294
*
295
* Description:
296
* After calling bio_reset(), @bio will be in the same state as a freshly
297
* allocated bio returned bio bio_alloc_bioset() - the only fields that are
298
* preserved are the ones that are initialized by bio_alloc_bioset(). See
299
* comment in struct bio.
300
*/
301
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
302
{
303
bio_uninit(bio);
304
memset(bio, 0, BIO_RESET_BYTES);
305
atomic_set(&bio->__bi_remaining, 1);
306
bio->bi_bdev = bdev;
307
if (bio->bi_bdev)
308
bio_associate_blkg(bio);
309
bio->bi_opf = opf;
310
}
311
EXPORT_SYMBOL(bio_reset);
312
313
static struct bio *__bio_chain_endio(struct bio *bio)
314
{
315
struct bio *parent = bio->bi_private;
316
317
if (bio->bi_status && !parent->bi_status)
318
parent->bi_status = bio->bi_status;
319
bio_put(bio);
320
return parent;
321
}
322
323
static void bio_chain_endio(struct bio *bio)
324
{
325
bio_endio(__bio_chain_endio(bio));
326
}
327
328
/**
329
* bio_chain - chain bio completions
330
* @bio: the target bio
331
* @parent: the parent bio of @bio
332
*
333
* The caller won't have a bi_end_io called when @bio completes - instead,
334
* @parent's bi_end_io won't be called until both @parent and @bio have
335
* completed; the chained bio will also be freed when it completes.
336
*
337
* The caller must not set bi_private or bi_end_io in @bio.
338
*/
339
void bio_chain(struct bio *bio, struct bio *parent)
340
{
341
BUG_ON(bio->bi_private || bio->bi_end_io);
342
343
bio->bi_private = parent;
344
bio->bi_end_io = bio_chain_endio;
345
bio_inc_remaining(parent);
346
}
347
EXPORT_SYMBOL(bio_chain);
348
349
/**
350
* bio_chain_and_submit - submit a bio after chaining it to another one
351
* @prev: bio to chain and submit
352
* @new: bio to chain to
353
*
354
* If @prev is non-NULL, chain it to @new and submit it.
355
*
356
* Return: @new.
357
*/
358
struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new)
359
{
360
if (prev) {
361
bio_chain(prev, new);
362
submit_bio(prev);
363
}
364
return new;
365
}
366
367
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
368
unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
369
{
370
return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp));
371
}
372
EXPORT_SYMBOL_GPL(blk_next_bio);
373
374
static void bio_alloc_rescue(struct work_struct *work)
375
{
376
struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
377
struct bio *bio;
378
379
while (1) {
380
spin_lock(&bs->rescue_lock);
381
bio = bio_list_pop(&bs->rescue_list);
382
spin_unlock(&bs->rescue_lock);
383
384
if (!bio)
385
break;
386
387
submit_bio_noacct(bio);
388
}
389
}
390
391
static void punt_bios_to_rescuer(struct bio_set *bs)
392
{
393
struct bio_list punt, nopunt;
394
struct bio *bio;
395
396
if (WARN_ON_ONCE(!bs->rescue_workqueue))
397
return;
398
/*
399
* In order to guarantee forward progress we must punt only bios that
400
* were allocated from this bio_set; otherwise, if there was a bio on
401
* there for a stacking driver higher up in the stack, processing it
402
* could require allocating bios from this bio_set, and doing that from
403
* our own rescuer would be bad.
404
*
405
* Since bio lists are singly linked, pop them all instead of trying to
406
* remove from the middle of the list:
407
*/
408
409
bio_list_init(&punt);
410
bio_list_init(&nopunt);
411
412
while ((bio = bio_list_pop(&current->bio_list[0])))
413
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
414
current->bio_list[0] = nopunt;
415
416
bio_list_init(&nopunt);
417
while ((bio = bio_list_pop(&current->bio_list[1])))
418
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
419
current->bio_list[1] = nopunt;
420
421
spin_lock(&bs->rescue_lock);
422
bio_list_merge(&bs->rescue_list, &punt);
423
spin_unlock(&bs->rescue_lock);
424
425
queue_work(bs->rescue_workqueue, &bs->rescue_work);
426
}
427
428
static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
429
{
430
unsigned long flags;
431
432
/* cache->free_list must be empty */
433
if (WARN_ON_ONCE(cache->free_list))
434
return;
435
436
local_irq_save(flags);
437
cache->free_list = cache->free_list_irq;
438
cache->free_list_irq = NULL;
439
cache->nr += cache->nr_irq;
440
cache->nr_irq = 0;
441
local_irq_restore(flags);
442
}
443
444
static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
445
unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
446
struct bio_set *bs)
447
{
448
struct bio_alloc_cache *cache;
449
struct bio *bio;
450
451
cache = per_cpu_ptr(bs->cache, get_cpu());
452
if (!cache->free_list) {
453
if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
454
bio_alloc_irq_cache_splice(cache);
455
if (!cache->free_list) {
456
put_cpu();
457
return NULL;
458
}
459
}
460
bio = cache->free_list;
461
cache->free_list = bio->bi_next;
462
cache->nr--;
463
put_cpu();
464
465
if (nr_vecs)
466
bio_init_inline(bio, bdev, nr_vecs, opf);
467
else
468
bio_init(bio, bdev, NULL, nr_vecs, opf);
469
bio->bi_pool = bs;
470
return bio;
471
}
472
473
/**
474
* bio_alloc_bioset - allocate a bio for I/O
475
* @bdev: block device to allocate the bio for (can be %NULL)
476
* @nr_vecs: number of bvecs to pre-allocate
477
* @opf: operation and flags for bio
478
* @gfp_mask: the GFP_* mask given to the slab allocator
479
* @bs: the bio_set to allocate from.
480
*
481
* Allocate a bio from the mempools in @bs.
482
*
483
* If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
484
* allocate a bio. This is due to the mempool guarantees. To make this work,
485
* callers must never allocate more than 1 bio at a time from the general pool.
486
* Callers that need to allocate more than 1 bio must always submit the
487
* previously allocated bio for IO before attempting to allocate a new one.
488
* Failure to do so can cause deadlocks under memory pressure.
489
*
490
* Note that when running under submit_bio_noacct() (i.e. any block driver),
491
* bios are not submitted until after you return - see the code in
492
* submit_bio_noacct() that converts recursion into iteration, to prevent
493
* stack overflows.
494
*
495
* This would normally mean allocating multiple bios under submit_bio_noacct()
496
* would be susceptible to deadlocks, but we have
497
* deadlock avoidance code that resubmits any blocked bios from a rescuer
498
* thread.
499
*
500
* However, we do not guarantee forward progress for allocations from other
501
* mempools. Doing multiple allocations from the same mempool under
502
* submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
503
* for per bio allocations.
504
*
505
* Returns: Pointer to new bio on success, NULL on failure.
506
*/
507
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
508
blk_opf_t opf, gfp_t gfp_mask,
509
struct bio_set *bs)
510
{
511
gfp_t saved_gfp = gfp_mask;
512
struct bio *bio;
513
void *p;
514
515
/* should not use nobvec bioset for nr_vecs > 0 */
516
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
517
return NULL;
518
519
if (opf & REQ_ALLOC_CACHE) {
520
if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
521
bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
522
gfp_mask, bs);
523
if (bio)
524
return bio;
525
/*
526
* No cached bio available, bio returned below marked with
527
* REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
528
*/
529
} else {
530
opf &= ~REQ_ALLOC_CACHE;
531
}
532
}
533
534
/*
535
* submit_bio_noacct() converts recursion to iteration; this means if
536
* we're running beneath it, any bios we allocate and submit will not be
537
* submitted (and thus freed) until after we return.
538
*
539
* This exposes us to a potential deadlock if we allocate multiple bios
540
* from the same bio_set() while running underneath submit_bio_noacct().
541
* If we were to allocate multiple bios (say a stacking block driver
542
* that was splitting bios), we would deadlock if we exhausted the
543
* mempool's reserve.
544
*
545
* We solve this, and guarantee forward progress, with a rescuer
546
* workqueue per bio_set. If we go to allocate and there are bios on
547
* current->bio_list, we first try the allocation without
548
* __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
549
* blocking to the rescuer workqueue before we retry with the original
550
* gfp_flags.
551
*/
552
if (current->bio_list &&
553
(!bio_list_empty(&current->bio_list[0]) ||
554
!bio_list_empty(&current->bio_list[1])) &&
555
bs->rescue_workqueue)
556
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
557
558
p = mempool_alloc(&bs->bio_pool, gfp_mask);
559
if (!p && gfp_mask != saved_gfp) {
560
punt_bios_to_rescuer(bs);
561
gfp_mask = saved_gfp;
562
p = mempool_alloc(&bs->bio_pool, gfp_mask);
563
}
564
if (unlikely(!p))
565
return NULL;
566
if (!mempool_is_saturated(&bs->bio_pool))
567
opf &= ~REQ_ALLOC_CACHE;
568
569
bio = p + bs->front_pad;
570
if (nr_vecs > BIO_INLINE_VECS) {
571
struct bio_vec *bvl = NULL;
572
573
bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
574
if (!bvl && gfp_mask != saved_gfp) {
575
punt_bios_to_rescuer(bs);
576
gfp_mask = saved_gfp;
577
bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
578
}
579
if (unlikely(!bvl))
580
goto err_free;
581
582
bio_init(bio, bdev, bvl, nr_vecs, opf);
583
} else if (nr_vecs) {
584
bio_init_inline(bio, bdev, BIO_INLINE_VECS, opf);
585
} else {
586
bio_init(bio, bdev, NULL, 0, opf);
587
}
588
589
bio->bi_pool = bs;
590
return bio;
591
592
err_free:
593
mempool_free(p, &bs->bio_pool);
594
return NULL;
595
}
596
EXPORT_SYMBOL(bio_alloc_bioset);
597
598
/**
599
* bio_kmalloc - kmalloc a bio
600
* @nr_vecs: number of bio_vecs to allocate
601
* @gfp_mask: the GFP_* mask given to the slab allocator
602
*
603
* Use kmalloc to allocate a bio (including bvecs). The bio must be initialized
604
* using bio_init() before use. To free a bio returned from this function use
605
* kfree() after calling bio_uninit(). A bio returned from this function can
606
* be reused by calling bio_uninit() before calling bio_init() again.
607
*
608
* Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
609
* function are not backed by a mempool can fail. Do not use this function
610
* for allocations in the file system I/O path.
611
*
612
* Returns: Pointer to new bio on success, NULL on failure.
613
*/
614
struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
615
{
616
struct bio *bio;
617
618
if (nr_vecs > BIO_MAX_INLINE_VECS)
619
return NULL;
620
return kmalloc(sizeof(*bio) + nr_vecs * sizeof(struct bio_vec),
621
gfp_mask);
622
}
623
EXPORT_SYMBOL(bio_kmalloc);
624
625
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
626
{
627
struct bio_vec bv;
628
struct bvec_iter iter;
629
630
__bio_for_each_segment(bv, bio, iter, start)
631
memzero_bvec(&bv);
632
}
633
EXPORT_SYMBOL(zero_fill_bio_iter);
634
635
/**
636
* bio_truncate - truncate the bio to small size of @new_size
637
* @bio: the bio to be truncated
638
* @new_size: new size for truncating the bio
639
*
640
* Description:
641
* Truncate the bio to new size of @new_size. If bio_op(bio) is
642
* REQ_OP_READ, zero the truncated part. This function should only
643
* be used for handling corner cases, such as bio eod.
644
*/
645
static void bio_truncate(struct bio *bio, unsigned new_size)
646
{
647
struct bio_vec bv;
648
struct bvec_iter iter;
649
unsigned int done = 0;
650
bool truncated = false;
651
652
if (new_size >= bio->bi_iter.bi_size)
653
return;
654
655
if (bio_op(bio) != REQ_OP_READ)
656
goto exit;
657
658
bio_for_each_segment(bv, bio, iter) {
659
if (done + bv.bv_len > new_size) {
660
size_t offset;
661
662
if (!truncated)
663
offset = new_size - done;
664
else
665
offset = 0;
666
memzero_page(bv.bv_page, bv.bv_offset + offset,
667
bv.bv_len - offset);
668
truncated = true;
669
}
670
done += bv.bv_len;
671
}
672
673
exit:
674
/*
675
* Don't touch bvec table here and make it really immutable, since
676
* fs bio user has to retrieve all pages via bio_for_each_segment_all
677
* in its .end_bio() callback.
678
*
679
* It is enough to truncate bio by updating .bi_size since we can make
680
* correct bvec with the updated .bi_size for drivers.
681
*/
682
bio->bi_iter.bi_size = new_size;
683
}
684
685
/**
686
* guard_bio_eod - truncate a BIO to fit the block device
687
* @bio: bio to truncate
688
*
689
* This allows us to do IO even on the odd last sectors of a device, even if the
690
* block size is some multiple of the physical sector size.
691
*
692
* We'll just truncate the bio to the size of the device, and clear the end of
693
* the buffer head manually. Truly out-of-range accesses will turn into actual
694
* I/O errors, this only handles the "we need to be able to do I/O at the final
695
* sector" case.
696
*/
697
void guard_bio_eod(struct bio *bio)
698
{
699
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
700
701
if (!maxsector)
702
return;
703
704
/*
705
* If the *whole* IO is past the end of the device,
706
* let it through, and the IO layer will turn it into
707
* an EIO.
708
*/
709
if (unlikely(bio->bi_iter.bi_sector >= maxsector))
710
return;
711
712
maxsector -= bio->bi_iter.bi_sector;
713
if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
714
return;
715
716
bio_truncate(bio, maxsector << 9);
717
}
718
719
static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
720
unsigned int nr)
721
{
722
unsigned int i = 0;
723
struct bio *bio;
724
725
while ((bio = cache->free_list) != NULL) {
726
cache->free_list = bio->bi_next;
727
cache->nr--;
728
bio_free(bio);
729
if (++i == nr)
730
break;
731
}
732
return i;
733
}
734
735
static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
736
unsigned int nr)
737
{
738
nr -= __bio_alloc_cache_prune(cache, nr);
739
if (!READ_ONCE(cache->free_list)) {
740
bio_alloc_irq_cache_splice(cache);
741
__bio_alloc_cache_prune(cache, nr);
742
}
743
}
744
745
static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
746
{
747
struct bio_set *bs;
748
749
bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
750
if (bs->cache) {
751
struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
752
753
bio_alloc_cache_prune(cache, -1U);
754
}
755
return 0;
756
}
757
758
static void bio_alloc_cache_destroy(struct bio_set *bs)
759
{
760
int cpu;
761
762
if (!bs->cache)
763
return;
764
765
cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
766
for_each_possible_cpu(cpu) {
767
struct bio_alloc_cache *cache;
768
769
cache = per_cpu_ptr(bs->cache, cpu);
770
bio_alloc_cache_prune(cache, -1U);
771
}
772
free_percpu(bs->cache);
773
bs->cache = NULL;
774
}
775
776
static inline void bio_put_percpu_cache(struct bio *bio)
777
{
778
struct bio_alloc_cache *cache;
779
780
cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
781
if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX)
782
goto out_free;
783
784
if (in_task()) {
785
bio_uninit(bio);
786
bio->bi_next = cache->free_list;
787
/* Not necessary but helps not to iopoll already freed bios */
788
bio->bi_bdev = NULL;
789
cache->free_list = bio;
790
cache->nr++;
791
} else if (in_hardirq()) {
792
lockdep_assert_irqs_disabled();
793
794
bio_uninit(bio);
795
bio->bi_next = cache->free_list_irq;
796
cache->free_list_irq = bio;
797
cache->nr_irq++;
798
} else {
799
goto out_free;
800
}
801
put_cpu();
802
return;
803
out_free:
804
put_cpu();
805
bio_free(bio);
806
}
807
808
/**
809
* bio_put - release a reference to a bio
810
* @bio: bio to release reference to
811
*
812
* Description:
813
* Put a reference to a &struct bio, either one you have gotten with
814
* bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
815
**/
816
void bio_put(struct bio *bio)
817
{
818
if (unlikely(bio_flagged(bio, BIO_REFFED))) {
819
BUG_ON(!atomic_read(&bio->__bi_cnt));
820
if (!atomic_dec_and_test(&bio->__bi_cnt))
821
return;
822
}
823
if (bio->bi_opf & REQ_ALLOC_CACHE)
824
bio_put_percpu_cache(bio);
825
else
826
bio_free(bio);
827
}
828
EXPORT_SYMBOL(bio_put);
829
830
static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
831
{
832
bio_set_flag(bio, BIO_CLONED);
833
bio->bi_ioprio = bio_src->bi_ioprio;
834
bio->bi_write_hint = bio_src->bi_write_hint;
835
bio->bi_write_stream = bio_src->bi_write_stream;
836
bio->bi_iter = bio_src->bi_iter;
837
838
if (bio->bi_bdev) {
839
if (bio->bi_bdev == bio_src->bi_bdev &&
840
bio_flagged(bio_src, BIO_REMAPPED))
841
bio_set_flag(bio, BIO_REMAPPED);
842
bio_clone_blkg_association(bio, bio_src);
843
}
844
845
if (bio_crypt_clone(bio, bio_src, gfp) < 0)
846
return -ENOMEM;
847
if (bio_integrity(bio_src) &&
848
bio_integrity_clone(bio, bio_src, gfp) < 0)
849
return -ENOMEM;
850
return 0;
851
}
852
853
/**
854
* bio_alloc_clone - clone a bio that shares the original bio's biovec
855
* @bdev: block_device to clone onto
856
* @bio_src: bio to clone from
857
* @gfp: allocation priority
858
* @bs: bio_set to allocate from
859
*
860
* Allocate a new bio that is a clone of @bio_src. The caller owns the returned
861
* bio, but not the actual data it points to.
862
*
863
* The caller must ensure that the return bio is not freed before @bio_src.
864
*/
865
struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
866
gfp_t gfp, struct bio_set *bs)
867
{
868
struct bio *bio;
869
870
bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
871
if (!bio)
872
return NULL;
873
874
if (__bio_clone(bio, bio_src, gfp) < 0) {
875
bio_put(bio);
876
return NULL;
877
}
878
bio->bi_io_vec = bio_src->bi_io_vec;
879
880
return bio;
881
}
882
EXPORT_SYMBOL(bio_alloc_clone);
883
884
/**
885
* bio_init_clone - clone a bio that shares the original bio's biovec
886
* @bdev: block_device to clone onto
887
* @bio: bio to clone into
888
* @bio_src: bio to clone from
889
* @gfp: allocation priority
890
*
891
* Initialize a new bio in caller provided memory that is a clone of @bio_src.
892
* The caller owns the returned bio, but not the actual data it points to.
893
*
894
* The caller must ensure that @bio_src is not freed before @bio.
895
*/
896
int bio_init_clone(struct block_device *bdev, struct bio *bio,
897
struct bio *bio_src, gfp_t gfp)
898
{
899
int ret;
900
901
bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
902
ret = __bio_clone(bio, bio_src, gfp);
903
if (ret)
904
bio_uninit(bio);
905
return ret;
906
}
907
EXPORT_SYMBOL(bio_init_clone);
908
909
/**
910
* bio_full - check if the bio is full
911
* @bio: bio to check
912
* @len: length of one segment to be added
913
*
914
* Return true if @bio is full and one segment with @len bytes can't be
915
* added to the bio, otherwise return false
916
*/
917
static inline bool bio_full(struct bio *bio, unsigned len)
918
{
919
if (bio->bi_vcnt >= bio->bi_max_vecs)
920
return true;
921
if (bio->bi_iter.bi_size > UINT_MAX - len)
922
return true;
923
return false;
924
}
925
926
static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
927
unsigned int len, unsigned int off)
928
{
929
size_t bv_end = bv->bv_offset + bv->bv_len;
930
phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
931
phys_addr_t page_addr = page_to_phys(page);
932
933
if (vec_end_addr + 1 != page_addr + off)
934
return false;
935
if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
936
return false;
937
938
if ((vec_end_addr & PAGE_MASK) != ((page_addr + off) & PAGE_MASK)) {
939
if (IS_ENABLED(CONFIG_KMSAN))
940
return false;
941
if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
942
return false;
943
}
944
945
bv->bv_len += len;
946
return true;
947
}
948
949
/*
950
* Try to merge a page into a segment, while obeying the hardware segment
951
* size limit.
952
*
953
* This is kept around for the integrity metadata, which is still tries
954
* to build the initial bio to the hardware limit and doesn't have proper
955
* helpers to split. Hopefully this will go away soon.
956
*/
957
bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
958
struct page *page, unsigned len, unsigned offset)
959
{
960
unsigned long mask = queue_segment_boundary(q);
961
phys_addr_t addr1 = bvec_phys(bv);
962
phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
963
964
if ((addr1 | mask) != (addr2 | mask))
965
return false;
966
if (len > queue_max_segment_size(q) - bv->bv_len)
967
return false;
968
return bvec_try_merge_page(bv, page, len, offset);
969
}
970
971
/**
972
* __bio_add_page - add page(s) to a bio in a new segment
973
* @bio: destination bio
974
* @page: start page to add
975
* @len: length of the data to add, may cross pages
976
* @off: offset of the data relative to @page, may cross pages
977
*
978
* Add the data at @page + @off to @bio as a new bvec. The caller must ensure
979
* that @bio has space for another bvec.
980
*/
981
void __bio_add_page(struct bio *bio, struct page *page,
982
unsigned int len, unsigned int off)
983
{
984
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
985
WARN_ON_ONCE(bio_full(bio, len));
986
987
if (is_pci_p2pdma_page(page))
988
bio->bi_opf |= REQ_NOMERGE;
989
990
bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
991
bio->bi_iter.bi_size += len;
992
bio->bi_vcnt++;
993
}
994
EXPORT_SYMBOL_GPL(__bio_add_page);
995
996
/**
997
* bio_add_virt_nofail - add data in the direct kernel mapping to a bio
998
* @bio: destination bio
999
* @vaddr: data to add
1000
* @len: length of the data to add, may cross pages
1001
*
1002
* Add the data at @vaddr to @bio. The caller must have ensure a segment
1003
* is available for the added data. No merging into an existing segment
1004
* will be performed.
1005
*/
1006
void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len)
1007
{
1008
__bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr));
1009
}
1010
EXPORT_SYMBOL_GPL(bio_add_virt_nofail);
1011
1012
/**
1013
* bio_add_page - attempt to add page(s) to bio
1014
* @bio: destination bio
1015
* @page: start page to add
1016
* @len: vec entry length, may cross pages
1017
* @offset: vec entry offset relative to @page, may cross pages
1018
*
1019
* Attempt to add page(s) to the bio_vec maplist. This will only fail
1020
* if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1021
*/
1022
int bio_add_page(struct bio *bio, struct page *page,
1023
unsigned int len, unsigned int offset)
1024
{
1025
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1026
return 0;
1027
if (bio->bi_iter.bi_size > UINT_MAX - len)
1028
return 0;
1029
1030
if (bio->bi_vcnt > 0) {
1031
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
1032
1033
if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
1034
return 0;
1035
1036
if (bvec_try_merge_page(bv, page, len, offset)) {
1037
bio->bi_iter.bi_size += len;
1038
return len;
1039
}
1040
}
1041
1042
if (bio->bi_vcnt >= bio->bi_max_vecs)
1043
return 0;
1044
__bio_add_page(bio, page, len, offset);
1045
return len;
1046
}
1047
EXPORT_SYMBOL(bio_add_page);
1048
1049
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1050
size_t off)
1051
{
1052
unsigned long nr = off / PAGE_SIZE;
1053
1054
WARN_ON_ONCE(len > UINT_MAX);
1055
__bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
1056
}
1057
EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
1058
1059
/**
1060
* bio_add_folio - Attempt to add part of a folio to a bio.
1061
* @bio: BIO to add to.
1062
* @folio: Folio to add.
1063
* @len: How many bytes from the folio to add.
1064
* @off: First byte in this folio to add.
1065
*
1066
* Filesystems that use folios can call this function instead of calling
1067
* bio_add_page() for each page in the folio. If @off is bigger than
1068
* PAGE_SIZE, this function can create a bio_vec that starts in a page
1069
* after the bv_page. BIOs do not support folios that are 4GiB or larger.
1070
*
1071
* Return: Whether the addition was successful.
1072
*/
1073
bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1074
size_t off)
1075
{
1076
unsigned long nr = off / PAGE_SIZE;
1077
1078
if (len > UINT_MAX)
1079
return false;
1080
return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
1081
}
1082
EXPORT_SYMBOL(bio_add_folio);
1083
1084
/**
1085
* bio_add_vmalloc_chunk - add a vmalloc chunk to a bio
1086
* @bio: destination bio
1087
* @vaddr: vmalloc address to add
1088
* @len: total length in bytes of the data to add
1089
*
1090
* Add data starting at @vaddr to @bio and return how many bytes were added.
1091
* This may be less than the amount originally asked. Returns 0 if no data
1092
* could be added to @bio.
1093
*
1094
* This helper calls flush_kernel_vmap_range() for the range added. For reads
1095
* the caller still needs to manually call invalidate_kernel_vmap_range() in
1096
* the completion handler.
1097
*/
1098
unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len)
1099
{
1100
unsigned int offset = offset_in_page(vaddr);
1101
1102
len = min(len, PAGE_SIZE - offset);
1103
if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len)
1104
return 0;
1105
if (op_is_write(bio_op(bio)))
1106
flush_kernel_vmap_range(vaddr, len);
1107
return len;
1108
}
1109
EXPORT_SYMBOL_GPL(bio_add_vmalloc_chunk);
1110
1111
/**
1112
* bio_add_vmalloc - add a vmalloc region to a bio
1113
* @bio: destination bio
1114
* @vaddr: vmalloc address to add
1115
* @len: total length in bytes of the data to add
1116
*
1117
* Add data starting at @vaddr to @bio. Return %true on success or %false if
1118
* @bio does not have enough space for the payload.
1119
*
1120
* This helper calls flush_kernel_vmap_range() for the range added. For reads
1121
* the caller still needs to manually call invalidate_kernel_vmap_range() in
1122
* the completion handler.
1123
*/
1124
bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len)
1125
{
1126
do {
1127
unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len);
1128
1129
if (!added)
1130
return false;
1131
vaddr += added;
1132
len -= added;
1133
} while (len);
1134
1135
return true;
1136
}
1137
EXPORT_SYMBOL_GPL(bio_add_vmalloc);
1138
1139
void __bio_release_pages(struct bio *bio, bool mark_dirty)
1140
{
1141
struct folio_iter fi;
1142
1143
bio_for_each_folio_all(fi, bio) {
1144
size_t nr_pages;
1145
1146
if (mark_dirty) {
1147
folio_lock(fi.folio);
1148
folio_mark_dirty(fi.folio);
1149
folio_unlock(fi.folio);
1150
}
1151
nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
1152
fi.offset / PAGE_SIZE + 1;
1153
unpin_user_folio(fi.folio, nr_pages);
1154
}
1155
}
1156
EXPORT_SYMBOL_GPL(__bio_release_pages);
1157
1158
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
1159
{
1160
WARN_ON_ONCE(bio->bi_max_vecs);
1161
1162
bio->bi_vcnt = iter->nr_segs;
1163
bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1164
bio->bi_iter.bi_bvec_done = iter->iov_offset;
1165
bio->bi_iter.bi_size = iov_iter_count(iter);
1166
bio_set_flag(bio, BIO_CLONED);
1167
}
1168
1169
static unsigned int get_contig_folio_len(unsigned int *num_pages,
1170
struct page **pages, unsigned int i,
1171
struct folio *folio, size_t left,
1172
size_t offset)
1173
{
1174
size_t bytes = left;
1175
size_t contig_sz = min_t(size_t, PAGE_SIZE - offset, bytes);
1176
unsigned int j;
1177
1178
/*
1179
* We might COW a single page in the middle of
1180
* a large folio, so we have to check that all
1181
* pages belong to the same folio.
1182
*/
1183
bytes -= contig_sz;
1184
for (j = i + 1; j < i + *num_pages; j++) {
1185
size_t next = min_t(size_t, PAGE_SIZE, bytes);
1186
1187
if (page_folio(pages[j]) != folio ||
1188
pages[j] != pages[j - 1] + 1) {
1189
break;
1190
}
1191
contig_sz += next;
1192
bytes -= next;
1193
}
1194
*num_pages = j - i;
1195
1196
return contig_sz;
1197
}
1198
1199
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
1200
1201
/**
1202
* __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1203
* @bio: bio to add pages to
1204
* @iter: iov iterator describing the region to be mapped
1205
*
1206
* Extracts pages from *iter and appends them to @bio's bvec array. The pages
1207
* will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag.
1208
* For a multi-segment *iter, this function only adds pages from the next
1209
* non-empty segment of the iov iterator.
1210
*/
1211
static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1212
{
1213
iov_iter_extraction_t extraction_flags = 0;
1214
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1215
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1216
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1217
struct page **pages = (struct page **)bv;
1218
ssize_t size;
1219
unsigned int num_pages, i = 0;
1220
size_t offset, folio_offset, left, len;
1221
int ret = 0;
1222
1223
/*
1224
* Move page array up in the allocated memory for the bio vecs as far as
1225
* possible so that we can start filling biovecs from the beginning
1226
* without overwriting the temporary page array.
1227
*/
1228
BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1229
pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1230
1231
if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1232
extraction_flags |= ITER_ALLOW_P2PDMA;
1233
1234
size = iov_iter_extract_pages(iter, &pages,
1235
UINT_MAX - bio->bi_iter.bi_size,
1236
nr_pages, extraction_flags, &offset);
1237
if (unlikely(size <= 0))
1238
return size ? size : -EFAULT;
1239
1240
nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
1241
for (left = size, i = 0; left > 0; left -= len, i += num_pages) {
1242
struct page *page = pages[i];
1243
struct folio *folio = page_folio(page);
1244
unsigned int old_vcnt = bio->bi_vcnt;
1245
1246
folio_offset = ((size_t)folio_page_idx(folio, page) <<
1247
PAGE_SHIFT) + offset;
1248
1249
len = min(folio_size(folio) - folio_offset, left);
1250
1251
num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1252
1253
if (num_pages > 1)
1254
len = get_contig_folio_len(&num_pages, pages, i,
1255
folio, left, offset);
1256
1257
if (!bio_add_folio(bio, folio, len, folio_offset)) {
1258
WARN_ON_ONCE(1);
1259
ret = -EINVAL;
1260
goto out;
1261
}
1262
1263
if (bio_flagged(bio, BIO_PAGE_PINNED)) {
1264
/*
1265
* We're adding another fragment of a page that already
1266
* was part of the last segment. Undo our pin as the
1267
* page was pinned when an earlier fragment of it was
1268
* added to the bio and __bio_release_pages expects a
1269
* single pin per page.
1270
*/
1271
if (offset && bio->bi_vcnt == old_vcnt)
1272
unpin_user_folio(folio, 1);
1273
}
1274
offset = 0;
1275
}
1276
1277
iov_iter_revert(iter, left);
1278
out:
1279
while (i < nr_pages)
1280
bio_release_page(bio, pages[i++]);
1281
1282
return ret;
1283
}
1284
1285
/*
1286
* Aligns the bio size to the len_align_mask, releasing excessive bio vecs that
1287
* __bio_iov_iter_get_pages may have inserted, and reverts the trimmed length
1288
* for the next iteration.
1289
*/
1290
static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
1291
unsigned len_align_mask)
1292
{
1293
size_t nbytes = bio->bi_iter.bi_size & len_align_mask;
1294
1295
if (!nbytes)
1296
return 0;
1297
1298
iov_iter_revert(iter, nbytes);
1299
bio->bi_iter.bi_size -= nbytes;
1300
do {
1301
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
1302
1303
if (nbytes < bv->bv_len) {
1304
bv->bv_len -= nbytes;
1305
break;
1306
}
1307
1308
bio_release_page(bio, bv->bv_page);
1309
bio->bi_vcnt--;
1310
nbytes -= bv->bv_len;
1311
} while (nbytes);
1312
1313
if (!bio->bi_vcnt)
1314
return -EFAULT;
1315
return 0;
1316
}
1317
1318
/**
1319
* bio_iov_iter_get_pages_aligned - add user or kernel pages to a bio
1320
* @bio: bio to add pages to
1321
* @iter: iov iterator describing the region to be added
1322
* @len_align_mask: the mask to align the total size to, 0 for any length
1323
*
1324
* This takes either an iterator pointing to user memory, or one pointing to
1325
* kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1326
* map them into the kernel. On IO completion, the caller should put those
1327
* pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1328
* bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1329
* to ensure the bvecs and pages stay referenced until the submitted I/O is
1330
* completed by a call to ->ki_complete() or returns with an error other than
1331
* -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1332
* on IO completion. If it isn't, then pages should be released.
1333
*
1334
* The function tries, but does not guarantee, to pin as many pages as
1335
* fit into the bio, or are requested in @iter, whatever is smaller. If
1336
* MM encounters an error pinning the requested pages, it stops. Error
1337
* is returned only if 0 pages could be pinned.
1338
*/
1339
int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
1340
unsigned len_align_mask)
1341
{
1342
int ret = 0;
1343
1344
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1345
return -EIO;
1346
1347
if (iov_iter_is_bvec(iter)) {
1348
bio_iov_bvec_set(bio, iter);
1349
iov_iter_advance(iter, bio->bi_iter.bi_size);
1350
return 0;
1351
}
1352
1353
if (iov_iter_extract_will_pin(iter))
1354
bio_set_flag(bio, BIO_PAGE_PINNED);
1355
do {
1356
ret = __bio_iov_iter_get_pages(bio, iter);
1357
} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1358
1359
if (bio->bi_vcnt)
1360
return bio_iov_iter_align_down(bio, iter, len_align_mask);
1361
return ret;
1362
}
1363
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages_aligned);
1364
1365
static void submit_bio_wait_endio(struct bio *bio)
1366
{
1367
complete(bio->bi_private);
1368
}
1369
1370
/**
1371
* submit_bio_wait - submit a bio, and wait until it completes
1372
* @bio: The &struct bio which describes the I/O
1373
*
1374
* Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1375
* bio_endio() on failure.
1376
*
1377
* WARNING: Unlike to how submit_bio() is usually used, this function does not
1378
* result in bio reference to be consumed. The caller must drop the reference
1379
* on his own.
1380
*/
1381
int submit_bio_wait(struct bio *bio)
1382
{
1383
DECLARE_COMPLETION_ONSTACK_MAP(done,
1384
bio->bi_bdev->bd_disk->lockdep_map);
1385
1386
bio->bi_private = &done;
1387
bio->bi_end_io = submit_bio_wait_endio;
1388
bio->bi_opf |= REQ_SYNC;
1389
submit_bio(bio);
1390
blk_wait_io(&done);
1391
1392
return blk_status_to_errno(bio->bi_status);
1393
}
1394
EXPORT_SYMBOL(submit_bio_wait);
1395
1396
/**
1397
* bdev_rw_virt - synchronously read into / write from kernel mapping
1398
* @bdev: block device to access
1399
* @sector: sector to access
1400
* @data: data to read/write
1401
* @len: length in byte to read/write
1402
* @op: operation (e.g. REQ_OP_READ/REQ_OP_WRITE)
1403
*
1404
* Performs synchronous I/O to @bdev for @data/@len. @data must be in
1405
* the kernel direct mapping and not a vmalloc address.
1406
*/
1407
int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
1408
size_t len, enum req_op op)
1409
{
1410
struct bio_vec bv;
1411
struct bio bio;
1412
int error;
1413
1414
if (WARN_ON_ONCE(is_vmalloc_addr(data)))
1415
return -EIO;
1416
1417
bio_init(&bio, bdev, &bv, 1, op);
1418
bio.bi_iter.bi_sector = sector;
1419
bio_add_virt_nofail(&bio, data, len);
1420
error = submit_bio_wait(&bio);
1421
bio_uninit(&bio);
1422
return error;
1423
}
1424
EXPORT_SYMBOL_GPL(bdev_rw_virt);
1425
1426
static void bio_wait_end_io(struct bio *bio)
1427
{
1428
complete(bio->bi_private);
1429
bio_put(bio);
1430
}
1431
1432
/*
1433
* bio_await_chain - ends @bio and waits for every chained bio to complete
1434
*/
1435
void bio_await_chain(struct bio *bio)
1436
{
1437
DECLARE_COMPLETION_ONSTACK_MAP(done,
1438
bio->bi_bdev->bd_disk->lockdep_map);
1439
1440
bio->bi_private = &done;
1441
bio->bi_end_io = bio_wait_end_io;
1442
bio_endio(bio);
1443
blk_wait_io(&done);
1444
}
1445
1446
void __bio_advance(struct bio *bio, unsigned bytes)
1447
{
1448
if (bio_integrity(bio))
1449
bio_integrity_advance(bio, bytes);
1450
1451
bio_crypt_advance(bio, bytes);
1452
bio_advance_iter(bio, &bio->bi_iter, bytes);
1453
}
1454
EXPORT_SYMBOL(__bio_advance);
1455
1456
void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1457
struct bio *src, struct bvec_iter *src_iter)
1458
{
1459
while (src_iter->bi_size && dst_iter->bi_size) {
1460
struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1461
struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1462
unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1463
void *src_buf = bvec_kmap_local(&src_bv);
1464
void *dst_buf = bvec_kmap_local(&dst_bv);
1465
1466
memcpy(dst_buf, src_buf, bytes);
1467
1468
kunmap_local(dst_buf);
1469
kunmap_local(src_buf);
1470
1471
bio_advance_iter_single(src, src_iter, bytes);
1472
bio_advance_iter_single(dst, dst_iter, bytes);
1473
}
1474
}
1475
EXPORT_SYMBOL(bio_copy_data_iter);
1476
1477
/**
1478
* bio_copy_data - copy contents of data buffers from one bio to another
1479
* @src: source bio
1480
* @dst: destination bio
1481
*
1482
* Stops when it reaches the end of either @src or @dst - that is, copies
1483
* min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1484
*/
1485
void bio_copy_data(struct bio *dst, struct bio *src)
1486
{
1487
struct bvec_iter src_iter = src->bi_iter;
1488
struct bvec_iter dst_iter = dst->bi_iter;
1489
1490
bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1491
}
1492
EXPORT_SYMBOL(bio_copy_data);
1493
1494
void bio_free_pages(struct bio *bio)
1495
{
1496
struct bio_vec *bvec;
1497
struct bvec_iter_all iter_all;
1498
1499
bio_for_each_segment_all(bvec, bio, iter_all)
1500
__free_page(bvec->bv_page);
1501
}
1502
EXPORT_SYMBOL(bio_free_pages);
1503
1504
/*
1505
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1506
* for performing direct-IO in BIOs.
1507
*
1508
* The problem is that we cannot run folio_mark_dirty() from interrupt context
1509
* because the required locks are not interrupt-safe. So what we can do is to
1510
* mark the pages dirty _before_ performing IO. And in interrupt context,
1511
* check that the pages are still dirty. If so, fine. If not, redirty them
1512
* in process context.
1513
*
1514
* Note that this code is very hard to test under normal circumstances because
1515
* direct-io pins the pages with get_user_pages(). This makes
1516
* is_page_cache_freeable return false, and the VM will not clean the pages.
1517
* But other code (eg, flusher threads) could clean the pages if they are mapped
1518
* pagecache.
1519
*
1520
* Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1521
* deferred bio dirtying paths.
1522
*/
1523
1524
/*
1525
* bio_set_pages_dirty() will mark all the bio's pages as dirty.
1526
*/
1527
void bio_set_pages_dirty(struct bio *bio)
1528
{
1529
struct folio_iter fi;
1530
1531
bio_for_each_folio_all(fi, bio) {
1532
folio_lock(fi.folio);
1533
folio_mark_dirty(fi.folio);
1534
folio_unlock(fi.folio);
1535
}
1536
}
1537
EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1538
1539
/*
1540
* bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1541
* If they are, then fine. If, however, some pages are clean then they must
1542
* have been written out during the direct-IO read. So we take another ref on
1543
* the BIO and re-dirty the pages in process context.
1544
*
1545
* It is expected that bio_check_pages_dirty() will wholly own the BIO from
1546
* here on. It will unpin each page and will run one bio_put() against the
1547
* BIO.
1548
*/
1549
1550
static void bio_dirty_fn(struct work_struct *work);
1551
1552
static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1553
static DEFINE_SPINLOCK(bio_dirty_lock);
1554
static struct bio *bio_dirty_list;
1555
1556
/*
1557
* This runs in process context
1558
*/
1559
static void bio_dirty_fn(struct work_struct *work)
1560
{
1561
struct bio *bio, *next;
1562
1563
spin_lock_irq(&bio_dirty_lock);
1564
next = bio_dirty_list;
1565
bio_dirty_list = NULL;
1566
spin_unlock_irq(&bio_dirty_lock);
1567
1568
while ((bio = next) != NULL) {
1569
next = bio->bi_private;
1570
1571
bio_release_pages(bio, true);
1572
bio_put(bio);
1573
}
1574
}
1575
1576
void bio_check_pages_dirty(struct bio *bio)
1577
{
1578
struct folio_iter fi;
1579
unsigned long flags;
1580
1581
bio_for_each_folio_all(fi, bio) {
1582
if (!folio_test_dirty(fi.folio))
1583
goto defer;
1584
}
1585
1586
bio_release_pages(bio, false);
1587
bio_put(bio);
1588
return;
1589
defer:
1590
spin_lock_irqsave(&bio_dirty_lock, flags);
1591
bio->bi_private = bio_dirty_list;
1592
bio_dirty_list = bio;
1593
spin_unlock_irqrestore(&bio_dirty_lock, flags);
1594
schedule_work(&bio_dirty_work);
1595
}
1596
EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1597
1598
static inline bool bio_remaining_done(struct bio *bio)
1599
{
1600
/*
1601
* If we're not chaining, then ->__bi_remaining is always 1 and
1602
* we always end io on the first invocation.
1603
*/
1604
if (!bio_flagged(bio, BIO_CHAIN))
1605
return true;
1606
1607
BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1608
1609
if (atomic_dec_and_test(&bio->__bi_remaining)) {
1610
bio_clear_flag(bio, BIO_CHAIN);
1611
return true;
1612
}
1613
1614
return false;
1615
}
1616
1617
/**
1618
* bio_endio - end I/O on a bio
1619
* @bio: bio
1620
*
1621
* Description:
1622
* bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1623
* way to end I/O on a bio. No one should call bi_end_io() directly on a
1624
* bio unless they own it and thus know that it has an end_io function.
1625
*
1626
* bio_endio() can be called several times on a bio that has been chained
1627
* using bio_chain(). The ->bi_end_io() function will only be called the
1628
* last time.
1629
**/
1630
void bio_endio(struct bio *bio)
1631
{
1632
again:
1633
if (!bio_remaining_done(bio))
1634
return;
1635
if (!bio_integrity_endio(bio))
1636
return;
1637
1638
blk_zone_bio_endio(bio);
1639
1640
rq_qos_done_bio(bio);
1641
1642
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1643
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1644
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1645
}
1646
1647
/*
1648
* Need to have a real endio function for chained bios, otherwise
1649
* various corner cases will break (like stacking block devices that
1650
* save/restore bi_end_io) - however, we want to avoid unbounded
1651
* recursion and blowing the stack. Tail call optimization would
1652
* handle this, but compiling with frame pointers also disables
1653
* gcc's sibling call optimization.
1654
*/
1655
if (bio->bi_end_io == bio_chain_endio) {
1656
bio = __bio_chain_endio(bio);
1657
goto again;
1658
}
1659
1660
#ifdef CONFIG_BLK_CGROUP
1661
/*
1662
* Release cgroup info. We shouldn't have to do this here, but quite
1663
* a few callers of bio_init fail to call bio_uninit, so we cover up
1664
* for that here at least for now.
1665
*/
1666
if (bio->bi_blkg) {
1667
blkg_put(bio->bi_blkg);
1668
bio->bi_blkg = NULL;
1669
}
1670
#endif
1671
1672
if (bio->bi_end_io)
1673
bio->bi_end_io(bio);
1674
}
1675
EXPORT_SYMBOL(bio_endio);
1676
1677
/**
1678
* bio_split - split a bio
1679
* @bio: bio to split
1680
* @sectors: number of sectors to split from the front of @bio
1681
* @gfp: gfp mask
1682
* @bs: bio set to allocate from
1683
*
1684
* Allocates and returns a new bio which represents @sectors from the start of
1685
* @bio, and updates @bio to represent the remaining sectors.
1686
*
1687
* Unless this is a discard request the newly allocated bio will point
1688
* to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1689
* neither @bio nor @bs are freed before the split bio.
1690
*/
1691
struct bio *bio_split(struct bio *bio, int sectors,
1692
gfp_t gfp, struct bio_set *bs)
1693
{
1694
struct bio *split;
1695
1696
if (WARN_ON_ONCE(sectors <= 0))
1697
return ERR_PTR(-EINVAL);
1698
if (WARN_ON_ONCE(sectors >= bio_sectors(bio)))
1699
return ERR_PTR(-EINVAL);
1700
1701
/* Zone append commands cannot be split */
1702
if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1703
return ERR_PTR(-EINVAL);
1704
1705
/* atomic writes cannot be split */
1706
if (bio->bi_opf & REQ_ATOMIC)
1707
return ERR_PTR(-EINVAL);
1708
1709
split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1710
if (!split)
1711
return ERR_PTR(-ENOMEM);
1712
1713
split->bi_iter.bi_size = sectors << 9;
1714
1715
if (bio_integrity(split))
1716
bio_integrity_trim(split);
1717
1718
bio_advance(bio, split->bi_iter.bi_size);
1719
1720
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1721
bio_set_flag(split, BIO_TRACE_COMPLETION);
1722
1723
return split;
1724
}
1725
EXPORT_SYMBOL(bio_split);
1726
1727
/**
1728
* bio_trim - trim a bio
1729
* @bio: bio to trim
1730
* @offset: number of sectors to trim from the front of @bio
1731
* @size: size we want to trim @bio to, in sectors
1732
*
1733
* This function is typically used for bios that are cloned and submitted
1734
* to the underlying device in parts.
1735
*/
1736
void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1737
{
1738
/* We should never trim an atomic write */
1739
if (WARN_ON_ONCE(bio->bi_opf & REQ_ATOMIC && size))
1740
return;
1741
1742
if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1743
offset + size > bio_sectors(bio)))
1744
return;
1745
1746
size <<= 9;
1747
if (offset == 0 && size == bio->bi_iter.bi_size)
1748
return;
1749
1750
bio_advance(bio, offset << 9);
1751
bio->bi_iter.bi_size = size;
1752
1753
if (bio_integrity(bio))
1754
bio_integrity_trim(bio);
1755
}
1756
EXPORT_SYMBOL_GPL(bio_trim);
1757
1758
/*
1759
* create memory pools for biovec's in a bio_set.
1760
* use the global biovec slabs created for general use.
1761
*/
1762
int biovec_init_pool(mempool_t *pool, int pool_entries)
1763
{
1764
struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1765
1766
return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1767
}
1768
1769
/*
1770
* bioset_exit - exit a bioset initialized with bioset_init()
1771
*
1772
* May be called on a zeroed but uninitialized bioset (i.e. allocated with
1773
* kzalloc()).
1774
*/
1775
void bioset_exit(struct bio_set *bs)
1776
{
1777
bio_alloc_cache_destroy(bs);
1778
if (bs->rescue_workqueue)
1779
destroy_workqueue(bs->rescue_workqueue);
1780
bs->rescue_workqueue = NULL;
1781
1782
mempool_exit(&bs->bio_pool);
1783
mempool_exit(&bs->bvec_pool);
1784
1785
if (bs->bio_slab)
1786
bio_put_slab(bs);
1787
bs->bio_slab = NULL;
1788
}
1789
EXPORT_SYMBOL(bioset_exit);
1790
1791
/**
1792
* bioset_init - Initialize a bio_set
1793
* @bs: pool to initialize
1794
* @pool_size: Number of bio and bio_vecs to cache in the mempool
1795
* @front_pad: Number of bytes to allocate in front of the returned bio
1796
* @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
1797
* and %BIOSET_NEED_RESCUER
1798
*
1799
* Description:
1800
* Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1801
* to ask for a number of bytes to be allocated in front of the bio.
1802
* Front pad allocation is useful for embedding the bio inside
1803
* another structure, to avoid allocating extra data to go with the bio.
1804
* Note that the bio must be embedded at the END of that structure always,
1805
* or things will break badly.
1806
* If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1807
* for allocating iovecs. This pool is not needed e.g. for bio_init_clone().
1808
* If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
1809
* to dispatch queued requests when the mempool runs out of space.
1810
*
1811
*/
1812
int bioset_init(struct bio_set *bs,
1813
unsigned int pool_size,
1814
unsigned int front_pad,
1815
int flags)
1816
{
1817
bs->front_pad = front_pad;
1818
if (flags & BIOSET_NEED_BVECS)
1819
bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1820
else
1821
bs->back_pad = 0;
1822
1823
spin_lock_init(&bs->rescue_lock);
1824
bio_list_init(&bs->rescue_list);
1825
INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1826
1827
bs->bio_slab = bio_find_or_create_slab(bs);
1828
if (!bs->bio_slab)
1829
return -ENOMEM;
1830
1831
if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1832
goto bad;
1833
1834
if ((flags & BIOSET_NEED_BVECS) &&
1835
biovec_init_pool(&bs->bvec_pool, pool_size))
1836
goto bad;
1837
1838
if (flags & BIOSET_NEED_RESCUER) {
1839
bs->rescue_workqueue = alloc_workqueue("bioset",
1840
WQ_MEM_RECLAIM, 0);
1841
if (!bs->rescue_workqueue)
1842
goto bad;
1843
}
1844
if (flags & BIOSET_PERCPU_CACHE) {
1845
bs->cache = alloc_percpu(struct bio_alloc_cache);
1846
if (!bs->cache)
1847
goto bad;
1848
cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1849
}
1850
1851
return 0;
1852
bad:
1853
bioset_exit(bs);
1854
return -ENOMEM;
1855
}
1856
EXPORT_SYMBOL(bioset_init);
1857
1858
static int __init init_bio(void)
1859
{
1860
int i;
1861
1862
BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1863
1864
for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1865
struct biovec_slab *bvs = bvec_slabs + i;
1866
1867
bvs->slab = kmem_cache_create(bvs->name,
1868
bvs->nr_vecs * sizeof(struct bio_vec), 0,
1869
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1870
}
1871
1872
cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1873
bio_cpu_dead);
1874
1875
if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
1876
BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
1877
panic("bio: can't allocate bios\n");
1878
1879
return 0;
1880
}
1881
subsys_initcall(init_bio);
1882
1883