at v6.14 18 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 4 */ 5#ifndef __LINUX_BIO_H 6#define __LINUX_BIO_H 7 8#include <linux/mempool.h> 9/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 10#include <linux/blk_types.h> 11#include <linux/uio.h> 12 13#define BIO_MAX_VECS 256U 14 15struct queue_limits; 16 17static inline unsigned int bio_max_segs(unsigned int nr_segs) 18{ 19 return min(nr_segs, BIO_MAX_VECS); 20} 21 22#define bio_iter_iovec(bio, iter) \ 23 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 24 25#define bio_iter_page(bio, iter) \ 26 bvec_iter_page((bio)->bi_io_vec, (iter)) 27#define bio_iter_len(bio, iter) \ 28 bvec_iter_len((bio)->bi_io_vec, (iter)) 29#define bio_iter_offset(bio, iter) \ 30 bvec_iter_offset((bio)->bi_io_vec, (iter)) 31 32#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 33#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 34#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 35 36#define bvec_iter_sectors(iter) ((iter).bi_size >> 9) 37#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) 38 39#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) 40#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) 41 42/* 43 * Return the data direction, READ or WRITE. 44 */ 45#define bio_data_dir(bio) \ 46 (op_is_write(bio_op(bio)) ? WRITE : READ) 47 48/* 49 * Check whether this bio carries any data or not. A NULL bio is allowed. 50 */ 51static inline bool bio_has_data(struct bio *bio) 52{ 53 if (bio && 54 bio->bi_iter.bi_size && 55 bio_op(bio) != REQ_OP_DISCARD && 56 bio_op(bio) != REQ_OP_SECURE_ERASE && 57 bio_op(bio) != REQ_OP_WRITE_ZEROES) 58 return true; 59 60 return false; 61} 62 63static inline bool bio_no_advance_iter(const struct bio *bio) 64{ 65 return bio_op(bio) == REQ_OP_DISCARD || 66 bio_op(bio) == REQ_OP_SECURE_ERASE || 67 bio_op(bio) == REQ_OP_WRITE_ZEROES; 68} 69 70static inline void *bio_data(struct bio *bio) 71{ 72 if (bio_has_data(bio)) 73 return page_address(bio_page(bio)) + bio_offset(bio); 74 75 return NULL; 76} 77 78static inline bool bio_next_segment(const struct bio *bio, 79 struct bvec_iter_all *iter) 80{ 81 if (iter->idx >= bio->bi_vcnt) 82 return false; 83 84 bvec_advance(&bio->bi_io_vec[iter->idx], iter); 85 return true; 86} 87 88/* 89 * drivers should _never_ use the all version - the bio may have been split 90 * before it got to the driver and the driver won't own all of it 91 */ 92#define bio_for_each_segment_all(bvl, bio, iter) \ 93 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) 94 95static inline void bio_advance_iter(const struct bio *bio, 96 struct bvec_iter *iter, unsigned int bytes) 97{ 98 iter->bi_sector += bytes >> 9; 99 100 if (bio_no_advance_iter(bio)) 101 iter->bi_size -= bytes; 102 else 103 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 104 /* TODO: It is reasonable to complete bio with error here. */ 105} 106 107/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */ 108static inline void bio_advance_iter_single(const struct bio *bio, 109 struct bvec_iter *iter, 110 unsigned int bytes) 111{ 112 iter->bi_sector += bytes >> 9; 113 114 if (bio_no_advance_iter(bio)) 115 iter->bi_size -= bytes; 116 else 117 bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); 118} 119 120void __bio_advance(struct bio *, unsigned bytes); 121 122/** 123 * bio_advance - increment/complete a bio by some number of bytes 124 * @bio: bio to advance 125 * @nbytes: number of bytes to complete 126 * 127 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 128 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 129 * be updated on the last bvec as well. 130 * 131 * @bio will then represent the remaining, uncompleted portion of the io. 132 */ 133static inline void bio_advance(struct bio *bio, unsigned int nbytes) 134{ 135 if (nbytes == bio->bi_iter.bi_size) { 136 bio->bi_iter.bi_size = 0; 137 return; 138 } 139 __bio_advance(bio, nbytes); 140} 141 142#define __bio_for_each_segment(bvl, bio, iter, start) \ 143 for (iter = (start); \ 144 (iter).bi_size && \ 145 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 146 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 147 148#define bio_for_each_segment(bvl, bio, iter) \ 149 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 150 151#define __bio_for_each_bvec(bvl, bio, iter, start) \ 152 for (iter = (start); \ 153 (iter).bi_size && \ 154 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ 155 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 156 157/* iterate over multi-page bvec */ 158#define bio_for_each_bvec(bvl, bio, iter) \ 159 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) 160 161/* 162 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the 163 * same reasons as bio_for_each_segment_all(). 164 */ 165#define bio_for_each_bvec_all(bvl, bio, i) \ 166 for (i = 0, bvl = bio_first_bvec_all(bio); \ 167 i < (bio)->bi_vcnt; i++, bvl++) 168 169#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 170 171static inline unsigned bio_segments(struct bio *bio) 172{ 173 unsigned segs = 0; 174 struct bio_vec bv; 175 struct bvec_iter iter; 176 177 /* 178 * We special case discard/write same/write zeroes, because they 179 * interpret bi_size differently: 180 */ 181 182 switch (bio_op(bio)) { 183 case REQ_OP_DISCARD: 184 case REQ_OP_SECURE_ERASE: 185 case REQ_OP_WRITE_ZEROES: 186 return 0; 187 default: 188 break; 189 } 190 191 bio_for_each_segment(bv, bio, iter) 192 segs++; 193 194 return segs; 195} 196 197/* 198 * get a reference to a bio, so it won't disappear. the intended use is 199 * something like: 200 * 201 * bio_get(bio); 202 * submit_bio(rw, bio); 203 * if (bio->bi_flags ...) 204 * do_something 205 * bio_put(bio); 206 * 207 * without the bio_get(), it could potentially complete I/O before submit_bio 208 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 209 * runs 210 */ 211static inline void bio_get(struct bio *bio) 212{ 213 bio->bi_flags |= (1 << BIO_REFFED); 214 smp_mb__before_atomic(); 215 atomic_inc(&bio->__bi_cnt); 216} 217 218static inline void bio_cnt_set(struct bio *bio, unsigned int count) 219{ 220 if (count != 1) { 221 bio->bi_flags |= (1 << BIO_REFFED); 222 smp_mb(); 223 } 224 atomic_set(&bio->__bi_cnt, count); 225} 226 227static inline bool bio_flagged(struct bio *bio, unsigned int bit) 228{ 229 return bio->bi_flags & (1U << bit); 230} 231 232static inline void bio_set_flag(struct bio *bio, unsigned int bit) 233{ 234 bio->bi_flags |= (1U << bit); 235} 236 237static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 238{ 239 bio->bi_flags &= ~(1U << bit); 240} 241 242static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) 243{ 244 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 245 return bio->bi_io_vec; 246} 247 248static inline struct page *bio_first_page_all(struct bio *bio) 249{ 250 return bio_first_bvec_all(bio)->bv_page; 251} 252 253static inline struct folio *bio_first_folio_all(struct bio *bio) 254{ 255 return page_folio(bio_first_page_all(bio)); 256} 257 258static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) 259{ 260 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 261 return &bio->bi_io_vec[bio->bi_vcnt - 1]; 262} 263 264/** 265 * struct folio_iter - State for iterating all folios in a bio. 266 * @folio: The current folio we're iterating. NULL after the last folio. 267 * @offset: The byte offset within the current folio. 268 * @length: The number of bytes in this iteration (will not cross folio 269 * boundary). 270 */ 271struct folio_iter { 272 struct folio *folio; 273 size_t offset; 274 size_t length; 275 /* private: for use by the iterator */ 276 struct folio *_next; 277 size_t _seg_count; 278 int _i; 279}; 280 281static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, 282 int i) 283{ 284 struct bio_vec *bvec = bio_first_bvec_all(bio) + i; 285 286 if (unlikely(i >= bio->bi_vcnt)) { 287 fi->folio = NULL; 288 return; 289 } 290 291 fi->folio = page_folio(bvec->bv_page); 292 fi->offset = bvec->bv_offset + 293 PAGE_SIZE * (bvec->bv_page - &fi->folio->page); 294 fi->_seg_count = bvec->bv_len; 295 fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); 296 fi->_next = folio_next(fi->folio); 297 fi->_i = i; 298} 299 300static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) 301{ 302 fi->_seg_count -= fi->length; 303 if (fi->_seg_count) { 304 fi->folio = fi->_next; 305 fi->offset = 0; 306 fi->length = min(folio_size(fi->folio), fi->_seg_count); 307 fi->_next = folio_next(fi->folio); 308 } else { 309 bio_first_folio(fi, bio, fi->_i + 1); 310 } 311} 312 313/** 314 * bio_for_each_folio_all - Iterate over each folio in a bio. 315 * @fi: struct folio_iter which is updated for each folio. 316 * @bio: struct bio to iterate over. 317 */ 318#define bio_for_each_folio_all(fi, bio) \ 319 for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio)) 320 321void bio_trim(struct bio *bio, sector_t offset, sector_t size); 322extern struct bio *bio_split(struct bio *bio, int sectors, 323 gfp_t gfp, struct bio_set *bs); 324int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim, 325 unsigned *segs, unsigned max_bytes); 326 327/** 328 * bio_next_split - get next @sectors from a bio, splitting if necessary 329 * @bio: bio to split 330 * @sectors: number of sectors to split from the front of @bio 331 * @gfp: gfp mask 332 * @bs: bio set to allocate from 333 * 334 * Return: a bio representing the next @sectors of @bio - if the bio is smaller 335 * than @sectors, returns the original bio unchanged. 336 */ 337static inline struct bio *bio_next_split(struct bio *bio, int sectors, 338 gfp_t gfp, struct bio_set *bs) 339{ 340 if (sectors >= bio_sectors(bio)) 341 return bio; 342 343 return bio_split(bio, sectors, gfp, bs); 344} 345 346enum { 347 BIOSET_NEED_BVECS = BIT(0), 348 BIOSET_NEED_RESCUER = BIT(1), 349 BIOSET_PERCPU_CACHE = BIT(2), 350}; 351extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); 352extern void bioset_exit(struct bio_set *); 353extern int biovec_init_pool(mempool_t *pool, int pool_entries); 354 355struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 356 blk_opf_t opf, gfp_t gfp_mask, 357 struct bio_set *bs); 358struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask); 359extern void bio_put(struct bio *); 360 361struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, 362 gfp_t gfp, struct bio_set *bs); 363int bio_init_clone(struct block_device *bdev, struct bio *bio, 364 struct bio *bio_src, gfp_t gfp); 365 366extern struct bio_set fs_bio_set; 367 368static inline struct bio *bio_alloc(struct block_device *bdev, 369 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) 370{ 371 return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); 372} 373 374void submit_bio(struct bio *bio); 375 376extern void bio_endio(struct bio *); 377 378static inline void bio_io_error(struct bio *bio) 379{ 380 bio->bi_status = BLK_STS_IOERR; 381 bio_endio(bio); 382} 383 384static inline void bio_wouldblock_error(struct bio *bio) 385{ 386 bio_set_flag(bio, BIO_QUIET); 387 bio->bi_status = BLK_STS_AGAIN; 388 bio_endio(bio); 389} 390 391/* 392 * Calculate number of bvec segments that should be allocated to fit data 393 * pointed by @iter. If @iter is backed by bvec it's going to be reused 394 * instead of allocating a new one. 395 */ 396static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) 397{ 398 if (iov_iter_is_bvec(iter)) 399 return 0; 400 return iov_iter_npages(iter, max_segs); 401} 402 403struct request_queue; 404 405extern int submit_bio_wait(struct bio *bio); 406void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 407 unsigned short max_vecs, blk_opf_t opf); 408extern void bio_uninit(struct bio *); 409void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); 410void bio_chain(struct bio *, struct bio *); 411 412int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len, 413 unsigned off); 414bool __must_check bio_add_folio(struct bio *bio, struct folio *folio, 415 size_t len, size_t off); 416void __bio_add_page(struct bio *bio, struct page *page, 417 unsigned int len, unsigned int off); 418void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, 419 size_t off); 420int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); 421void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter); 422void __bio_release_pages(struct bio *bio, bool mark_dirty); 423extern void bio_set_pages_dirty(struct bio *bio); 424extern void bio_check_pages_dirty(struct bio *bio); 425 426extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 427 struct bio *src, struct bvec_iter *src_iter); 428extern void bio_copy_data(struct bio *dst, struct bio *src); 429extern void bio_free_pages(struct bio *bio); 430void guard_bio_eod(struct bio *bio); 431void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); 432 433static inline void zero_fill_bio(struct bio *bio) 434{ 435 zero_fill_bio_iter(bio, bio->bi_iter); 436} 437 438static inline void bio_release_pages(struct bio *bio, bool mark_dirty) 439{ 440 if (bio_flagged(bio, BIO_PAGE_PINNED)) 441 __bio_release_pages(bio, mark_dirty); 442} 443 444#define bio_dev(bio) \ 445 disk_devt((bio)->bi_bdev->bd_disk) 446 447#ifdef CONFIG_BLK_CGROUP 448void bio_associate_blkg(struct bio *bio); 449void bio_associate_blkg_from_css(struct bio *bio, 450 struct cgroup_subsys_state *css); 451void bio_clone_blkg_association(struct bio *dst, struct bio *src); 452void blkcg_punt_bio_submit(struct bio *bio); 453#else /* CONFIG_BLK_CGROUP */ 454static inline void bio_associate_blkg(struct bio *bio) { } 455static inline void bio_associate_blkg_from_css(struct bio *bio, 456 struct cgroup_subsys_state *css) 457{ } 458static inline void bio_clone_blkg_association(struct bio *dst, 459 struct bio *src) { } 460static inline void blkcg_punt_bio_submit(struct bio *bio) 461{ 462 submit_bio(bio); 463} 464#endif /* CONFIG_BLK_CGROUP */ 465 466static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) 467{ 468 bio_clear_flag(bio, BIO_REMAPPED); 469 if (bio->bi_bdev != bdev) 470 bio_clear_flag(bio, BIO_BPS_THROTTLED); 471 bio->bi_bdev = bdev; 472 bio_associate_blkg(bio); 473} 474 475/* 476 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 477 * 478 * A bio_list anchors a singly-linked list of bios chained through the bi_next 479 * member of the bio. The bio_list also caches the last list member to allow 480 * fast access to the tail. 481 */ 482struct bio_list { 483 struct bio *head; 484 struct bio *tail; 485}; 486 487static inline int bio_list_empty(const struct bio_list *bl) 488{ 489 return bl->head == NULL; 490} 491 492static inline void bio_list_init(struct bio_list *bl) 493{ 494 bl->head = bl->tail = NULL; 495} 496 497#define BIO_EMPTY_LIST { NULL, NULL } 498 499#define bio_list_for_each(bio, bl) \ 500 for (bio = (bl)->head; bio; bio = bio->bi_next) 501 502static inline unsigned bio_list_size(const struct bio_list *bl) 503{ 504 unsigned sz = 0; 505 struct bio *bio; 506 507 bio_list_for_each(bio, bl) 508 sz++; 509 510 return sz; 511} 512 513static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 514{ 515 bio->bi_next = NULL; 516 517 if (bl->tail) 518 bl->tail->bi_next = bio; 519 else 520 bl->head = bio; 521 522 bl->tail = bio; 523} 524 525static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 526{ 527 bio->bi_next = bl->head; 528 529 bl->head = bio; 530 531 if (!bl->tail) 532 bl->tail = bio; 533} 534 535static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 536{ 537 if (!bl2->head) 538 return; 539 540 if (bl->tail) 541 bl->tail->bi_next = bl2->head; 542 else 543 bl->head = bl2->head; 544 545 bl->tail = bl2->tail; 546} 547 548static inline void bio_list_merge_init(struct bio_list *bl, 549 struct bio_list *bl2) 550{ 551 bio_list_merge(bl, bl2); 552 bio_list_init(bl2); 553} 554 555static inline void bio_list_merge_head(struct bio_list *bl, 556 struct bio_list *bl2) 557{ 558 if (!bl2->head) 559 return; 560 561 if (bl->head) 562 bl2->tail->bi_next = bl->head; 563 else 564 bl->tail = bl2->tail; 565 566 bl->head = bl2->head; 567} 568 569static inline struct bio *bio_list_peek(struct bio_list *bl) 570{ 571 return bl->head; 572} 573 574static inline struct bio *bio_list_pop(struct bio_list *bl) 575{ 576 struct bio *bio = bl->head; 577 578 if (bio) { 579 bl->head = bl->head->bi_next; 580 if (!bl->head) 581 bl->tail = NULL; 582 583 bio->bi_next = NULL; 584 } 585 586 return bio; 587} 588 589static inline struct bio *bio_list_get(struct bio_list *bl) 590{ 591 struct bio *bio = bl->head; 592 593 bl->head = bl->tail = NULL; 594 595 return bio; 596} 597 598/* 599 * Increment chain count for the bio. Make sure the CHAIN flag update 600 * is visible before the raised count. 601 */ 602static inline void bio_inc_remaining(struct bio *bio) 603{ 604 bio_set_flag(bio, BIO_CHAIN); 605 smp_mb__before_atomic(); 606 atomic_inc(&bio->__bi_remaining); 607} 608 609/* 610 * bio_set is used to allow other portions of the IO system to 611 * allocate their own private memory pools for bio and iovec structures. 612 * These memory pools in turn all allocate from the bio_slab 613 * and the bvec_slabs[]. 614 */ 615#define BIO_POOL_SIZE 2 616 617struct bio_set { 618 struct kmem_cache *bio_slab; 619 unsigned int front_pad; 620 621 /* 622 * per-cpu bio alloc cache 623 */ 624 struct bio_alloc_cache __percpu *cache; 625 626 mempool_t bio_pool; 627 mempool_t bvec_pool; 628#if defined(CONFIG_BLK_DEV_INTEGRITY) 629 mempool_t bio_integrity_pool; 630 mempool_t bvec_integrity_pool; 631#endif 632 633 unsigned int back_pad; 634 /* 635 * Deadlock avoidance for stacking block drivers: see comments in 636 * bio_alloc_bioset() for details 637 */ 638 spinlock_t rescue_lock; 639 struct bio_list rescue_list; 640 struct work_struct rescue_work; 641 struct workqueue_struct *rescue_workqueue; 642 643 /* 644 * Hot un-plug notifier for the per-cpu cache, if used 645 */ 646 struct hlist_node cpuhp_dead; 647}; 648 649static inline bool bioset_initialized(struct bio_set *bs) 650{ 651 return bs->bio_slab != NULL; 652} 653 654/* 655 * Mark a bio as polled. Note that for async polled IO, the caller must 656 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). 657 * We cannot block waiting for requests on polled IO, as those completions 658 * must be found by the caller. This is different than IRQ driven IO, where 659 * it's safe to wait for IO to complete. 660 */ 661static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) 662{ 663 bio->bi_opf |= REQ_POLLED; 664 if (kiocb->ki_flags & IOCB_NOWAIT) 665 bio->bi_opf |= REQ_NOWAIT; 666} 667 668static inline void bio_clear_polled(struct bio *bio) 669{ 670 bio->bi_opf &= ~REQ_POLLED; 671} 672 673/** 674 * bio_is_zone_append - is this a zone append bio? 675 * @bio: bio to check 676 * 677 * Check if @bio is a zone append operation. Core block layer code and end_io 678 * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check 679 * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if 680 * it is not natively supported. 681 */ 682static inline bool bio_is_zone_append(struct bio *bio) 683{ 684 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) 685 return false; 686 return bio_op(bio) == REQ_OP_ZONE_APPEND || 687 bio_flagged(bio, BIO_EMULATES_ZONE_APPEND); 688} 689 690struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, 691 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); 692struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new); 693 694struct bio *blk_alloc_discard_bio(struct block_device *bdev, 695 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask); 696 697#endif /* __LINUX_BIO_H */