at v6.2-rc2 20 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 4 */ 5#ifndef __LINUX_BIO_H 6#define __LINUX_BIO_H 7 8#include <linux/mempool.h> 9/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 10#include <linux/blk_types.h> 11#include <linux/uio.h> 12 13#define BIO_MAX_VECS 256U 14 15static inline unsigned int bio_max_segs(unsigned int nr_segs) 16{ 17 return min(nr_segs, BIO_MAX_VECS); 18} 19 20#define bio_prio(bio) (bio)->bi_ioprio 21#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) 22 23#define bio_iter_iovec(bio, iter) \ 24 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 25 26#define bio_iter_page(bio, iter) \ 27 bvec_iter_page((bio)->bi_io_vec, (iter)) 28#define bio_iter_len(bio, iter) \ 29 bvec_iter_len((bio)->bi_io_vec, (iter)) 30#define bio_iter_offset(bio, iter) \ 31 bvec_iter_offset((bio)->bi_io_vec, (iter)) 32 33#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 34#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 35#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 36 37#define bvec_iter_sectors(iter) ((iter).bi_size >> 9) 38#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) 39 40#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) 41#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) 42 43/* 44 * Return the data direction, READ or WRITE. 45 */ 46#define bio_data_dir(bio) \ 47 (op_is_write(bio_op(bio)) ? WRITE : READ) 48 49/* 50 * Check whether this bio carries any data or not. A NULL bio is allowed. 51 */ 52static inline bool bio_has_data(struct bio *bio) 53{ 54 if (bio && 55 bio->bi_iter.bi_size && 56 bio_op(bio) != REQ_OP_DISCARD && 57 bio_op(bio) != REQ_OP_SECURE_ERASE && 58 bio_op(bio) != REQ_OP_WRITE_ZEROES) 59 return true; 60 61 return false; 62} 63 64static inline bool bio_no_advance_iter(const struct bio *bio) 65{ 66 return bio_op(bio) == REQ_OP_DISCARD || 67 bio_op(bio) == REQ_OP_SECURE_ERASE || 68 bio_op(bio) == REQ_OP_WRITE_ZEROES; 69} 70 71static inline void *bio_data(struct bio *bio) 72{ 73 if (bio_has_data(bio)) 74 return page_address(bio_page(bio)) + bio_offset(bio); 75 76 return NULL; 77} 78 79static inline bool bio_next_segment(const struct bio *bio, 80 struct bvec_iter_all *iter) 81{ 82 if (iter->idx >= bio->bi_vcnt) 83 return false; 84 85 bvec_advance(&bio->bi_io_vec[iter->idx], iter); 86 return true; 87} 88 89/* 90 * drivers should _never_ use the all version - the bio may have been split 91 * before it got to the driver and the driver won't own all of it 92 */ 93#define bio_for_each_segment_all(bvl, bio, iter) \ 94 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) 95 96static inline void bio_advance_iter(const struct bio *bio, 97 struct bvec_iter *iter, unsigned int bytes) 98{ 99 iter->bi_sector += bytes >> 9; 100 101 if (bio_no_advance_iter(bio)) 102 iter->bi_size -= bytes; 103 else 104 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 105 /* TODO: It is reasonable to complete bio with error here. */ 106} 107 108/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */ 109static inline void bio_advance_iter_single(const struct bio *bio, 110 struct bvec_iter *iter, 111 unsigned int bytes) 112{ 113 iter->bi_sector += bytes >> 9; 114 115 if (bio_no_advance_iter(bio)) 116 iter->bi_size -= bytes; 117 else 118 bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); 119} 120 121void __bio_advance(struct bio *, unsigned bytes); 122 123/** 124 * bio_advance - increment/complete a bio by some number of bytes 125 * @bio: bio to advance 126 * @nbytes: number of bytes to complete 127 * 128 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 129 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 130 * be updated on the last bvec as well. 131 * 132 * @bio will then represent the remaining, uncompleted portion of the io. 133 */ 134static inline void bio_advance(struct bio *bio, unsigned int nbytes) 135{ 136 if (nbytes == bio->bi_iter.bi_size) { 137 bio->bi_iter.bi_size = 0; 138 return; 139 } 140 __bio_advance(bio, nbytes); 141} 142 143#define __bio_for_each_segment(bvl, bio, iter, start) \ 144 for (iter = (start); \ 145 (iter).bi_size && \ 146 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 147 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 148 149#define bio_for_each_segment(bvl, bio, iter) \ 150 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 151 152#define __bio_for_each_bvec(bvl, bio, iter, start) \ 153 for (iter = (start); \ 154 (iter).bi_size && \ 155 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ 156 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 157 158/* iterate over multi-page bvec */ 159#define bio_for_each_bvec(bvl, bio, iter) \ 160 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) 161 162/* 163 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the 164 * same reasons as bio_for_each_segment_all(). 165 */ 166#define bio_for_each_bvec_all(bvl, bio, i) \ 167 for (i = 0, bvl = bio_first_bvec_all(bio); \ 168 i < (bio)->bi_vcnt; i++, bvl++) 169 170#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 171 172static inline unsigned bio_segments(struct bio *bio) 173{ 174 unsigned segs = 0; 175 struct bio_vec bv; 176 struct bvec_iter iter; 177 178 /* 179 * We special case discard/write same/write zeroes, because they 180 * interpret bi_size differently: 181 */ 182 183 switch (bio_op(bio)) { 184 case REQ_OP_DISCARD: 185 case REQ_OP_SECURE_ERASE: 186 case REQ_OP_WRITE_ZEROES: 187 return 0; 188 default: 189 break; 190 } 191 192 bio_for_each_segment(bv, bio, iter) 193 segs++; 194 195 return segs; 196} 197 198/* 199 * get a reference to a bio, so it won't disappear. the intended use is 200 * something like: 201 * 202 * bio_get(bio); 203 * submit_bio(rw, bio); 204 * if (bio->bi_flags ...) 205 * do_something 206 * bio_put(bio); 207 * 208 * without the bio_get(), it could potentially complete I/O before submit_bio 209 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 210 * runs 211 */ 212static inline void bio_get(struct bio *bio) 213{ 214 bio->bi_flags |= (1 << BIO_REFFED); 215 smp_mb__before_atomic(); 216 atomic_inc(&bio->__bi_cnt); 217} 218 219static inline void bio_cnt_set(struct bio *bio, unsigned int count) 220{ 221 if (count != 1) { 222 bio->bi_flags |= (1 << BIO_REFFED); 223 smp_mb(); 224 } 225 atomic_set(&bio->__bi_cnt, count); 226} 227 228static inline bool bio_flagged(struct bio *bio, unsigned int bit) 229{ 230 return (bio->bi_flags & (1U << bit)) != 0; 231} 232 233static inline void bio_set_flag(struct bio *bio, unsigned int bit) 234{ 235 bio->bi_flags |= (1U << bit); 236} 237 238static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 239{ 240 bio->bi_flags &= ~(1U << bit); 241} 242 243static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) 244{ 245 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 246 return bio->bi_io_vec; 247} 248 249static inline struct page *bio_first_page_all(struct bio *bio) 250{ 251 return bio_first_bvec_all(bio)->bv_page; 252} 253 254static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) 255{ 256 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 257 return &bio->bi_io_vec[bio->bi_vcnt - 1]; 258} 259 260/** 261 * struct folio_iter - State for iterating all folios in a bio. 262 * @folio: The current folio we're iterating. NULL after the last folio. 263 * @offset: The byte offset within the current folio. 264 * @length: The number of bytes in this iteration (will not cross folio 265 * boundary). 266 */ 267struct folio_iter { 268 struct folio *folio; 269 size_t offset; 270 size_t length; 271 /* private: for use by the iterator */ 272 struct folio *_next; 273 size_t _seg_count; 274 int _i; 275}; 276 277static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, 278 int i) 279{ 280 struct bio_vec *bvec = bio_first_bvec_all(bio) + i; 281 282 fi->folio = page_folio(bvec->bv_page); 283 fi->offset = bvec->bv_offset + 284 PAGE_SIZE * (bvec->bv_page - &fi->folio->page); 285 fi->_seg_count = bvec->bv_len; 286 fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); 287 fi->_next = folio_next(fi->folio); 288 fi->_i = i; 289} 290 291static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) 292{ 293 fi->_seg_count -= fi->length; 294 if (fi->_seg_count) { 295 fi->folio = fi->_next; 296 fi->offset = 0; 297 fi->length = min(folio_size(fi->folio), fi->_seg_count); 298 fi->_next = folio_next(fi->folio); 299 } else if (fi->_i + 1 < bio->bi_vcnt) { 300 bio_first_folio(fi, bio, fi->_i + 1); 301 } else { 302 fi->folio = NULL; 303 } 304} 305 306/** 307 * bio_for_each_folio_all - Iterate over each folio in a bio. 308 * @fi: struct folio_iter which is updated for each folio. 309 * @bio: struct bio to iterate over. 310 */ 311#define bio_for_each_folio_all(fi, bio) \ 312 for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio)) 313 314enum bip_flags { 315 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 316 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ 317 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ 318 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ 319 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ 320}; 321 322/* 323 * bio integrity payload 324 */ 325struct bio_integrity_payload { 326 struct bio *bip_bio; /* parent bio */ 327 328 struct bvec_iter bip_iter; 329 330 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 331 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ 332 unsigned short bip_flags; /* control flags */ 333 334 struct bvec_iter bio_iter; /* for rewinding parent bio */ 335 336 struct work_struct bip_work; /* I/O completion */ 337 338 struct bio_vec *bip_vec; 339 struct bio_vec bip_inline_vecs[];/* embedded bvec array */ 340}; 341 342#if defined(CONFIG_BLK_DEV_INTEGRITY) 343 344static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) 345{ 346 if (bio->bi_opf & REQ_INTEGRITY) 347 return bio->bi_integrity; 348 349 return NULL; 350} 351 352static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 353{ 354 struct bio_integrity_payload *bip = bio_integrity(bio); 355 356 if (bip) 357 return bip->bip_flags & flag; 358 359 return false; 360} 361 362static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) 363{ 364 return bip->bip_iter.bi_sector; 365} 366 367static inline void bip_set_seed(struct bio_integrity_payload *bip, 368 sector_t seed) 369{ 370 bip->bip_iter.bi_sector = seed; 371} 372 373#endif /* CONFIG_BLK_DEV_INTEGRITY */ 374 375void bio_trim(struct bio *bio, sector_t offset, sector_t size); 376extern struct bio *bio_split(struct bio *bio, int sectors, 377 gfp_t gfp, struct bio_set *bs); 378 379/** 380 * bio_next_split - get next @sectors from a bio, splitting if necessary 381 * @bio: bio to split 382 * @sectors: number of sectors to split from the front of @bio 383 * @gfp: gfp mask 384 * @bs: bio set to allocate from 385 * 386 * Return: a bio representing the next @sectors of @bio - if the bio is smaller 387 * than @sectors, returns the original bio unchanged. 388 */ 389static inline struct bio *bio_next_split(struct bio *bio, int sectors, 390 gfp_t gfp, struct bio_set *bs) 391{ 392 if (sectors >= bio_sectors(bio)) 393 return bio; 394 395 return bio_split(bio, sectors, gfp, bs); 396} 397 398enum { 399 BIOSET_NEED_BVECS = BIT(0), 400 BIOSET_NEED_RESCUER = BIT(1), 401 BIOSET_PERCPU_CACHE = BIT(2), 402}; 403extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); 404extern void bioset_exit(struct bio_set *); 405extern int biovec_init_pool(mempool_t *pool, int pool_entries); 406 407struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 408 blk_opf_t opf, gfp_t gfp_mask, 409 struct bio_set *bs); 410struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask); 411extern void bio_put(struct bio *); 412 413struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, 414 gfp_t gfp, struct bio_set *bs); 415int bio_init_clone(struct block_device *bdev, struct bio *bio, 416 struct bio *bio_src, gfp_t gfp); 417 418extern struct bio_set fs_bio_set; 419 420static inline struct bio *bio_alloc(struct block_device *bdev, 421 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) 422{ 423 return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); 424} 425 426void submit_bio(struct bio *bio); 427 428extern void bio_endio(struct bio *); 429 430static inline void bio_io_error(struct bio *bio) 431{ 432 bio->bi_status = BLK_STS_IOERR; 433 bio_endio(bio); 434} 435 436static inline void bio_wouldblock_error(struct bio *bio) 437{ 438 bio_set_flag(bio, BIO_QUIET); 439 bio->bi_status = BLK_STS_AGAIN; 440 bio_endio(bio); 441} 442 443/* 444 * Calculate number of bvec segments that should be allocated to fit data 445 * pointed by @iter. If @iter is backed by bvec it's going to be reused 446 * instead of allocating a new one. 447 */ 448static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) 449{ 450 if (iov_iter_is_bvec(iter)) 451 return 0; 452 return iov_iter_npages(iter, max_segs); 453} 454 455struct request_queue; 456 457extern int submit_bio_wait(struct bio *bio); 458void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 459 unsigned short max_vecs, blk_opf_t opf); 460extern void bio_uninit(struct bio *); 461void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); 462void bio_chain(struct bio *, struct bio *); 463 464int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off); 465bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off); 466extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 467 unsigned int, unsigned int); 468int bio_add_zone_append_page(struct bio *bio, struct page *page, 469 unsigned int len, unsigned int offset); 470void __bio_add_page(struct bio *bio, struct page *page, 471 unsigned int len, unsigned int off); 472int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); 473void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter); 474void __bio_release_pages(struct bio *bio, bool mark_dirty); 475extern void bio_set_pages_dirty(struct bio *bio); 476extern void bio_check_pages_dirty(struct bio *bio); 477 478extern void bio_copy_data(struct bio *dst, struct bio *src); 479extern void bio_free_pages(struct bio *bio); 480void guard_bio_eod(struct bio *bio); 481void zero_fill_bio(struct bio *bio); 482 483static inline void bio_release_pages(struct bio *bio, bool mark_dirty) 484{ 485 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) 486 __bio_release_pages(bio, mark_dirty); 487} 488 489#define bio_dev(bio) \ 490 disk_devt((bio)->bi_bdev->bd_disk) 491 492#ifdef CONFIG_BLK_CGROUP 493void bio_associate_blkg(struct bio *bio); 494void bio_associate_blkg_from_css(struct bio *bio, 495 struct cgroup_subsys_state *css); 496void bio_clone_blkg_association(struct bio *dst, struct bio *src); 497#else /* CONFIG_BLK_CGROUP */ 498static inline void bio_associate_blkg(struct bio *bio) { } 499static inline void bio_associate_blkg_from_css(struct bio *bio, 500 struct cgroup_subsys_state *css) 501{ } 502static inline void bio_clone_blkg_association(struct bio *dst, 503 struct bio *src) { } 504#endif /* CONFIG_BLK_CGROUP */ 505 506static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) 507{ 508 bio_clear_flag(bio, BIO_REMAPPED); 509 if (bio->bi_bdev != bdev) 510 bio_clear_flag(bio, BIO_BPS_THROTTLED); 511 bio->bi_bdev = bdev; 512 bio_associate_blkg(bio); 513} 514 515/* 516 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 517 * 518 * A bio_list anchors a singly-linked list of bios chained through the bi_next 519 * member of the bio. The bio_list also caches the last list member to allow 520 * fast access to the tail. 521 */ 522struct bio_list { 523 struct bio *head; 524 struct bio *tail; 525}; 526 527static inline int bio_list_empty(const struct bio_list *bl) 528{ 529 return bl->head == NULL; 530} 531 532static inline void bio_list_init(struct bio_list *bl) 533{ 534 bl->head = bl->tail = NULL; 535} 536 537#define BIO_EMPTY_LIST { NULL, NULL } 538 539#define bio_list_for_each(bio, bl) \ 540 for (bio = (bl)->head; bio; bio = bio->bi_next) 541 542static inline unsigned bio_list_size(const struct bio_list *bl) 543{ 544 unsigned sz = 0; 545 struct bio *bio; 546 547 bio_list_for_each(bio, bl) 548 sz++; 549 550 return sz; 551} 552 553static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 554{ 555 bio->bi_next = NULL; 556 557 if (bl->tail) 558 bl->tail->bi_next = bio; 559 else 560 bl->head = bio; 561 562 bl->tail = bio; 563} 564 565static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 566{ 567 bio->bi_next = bl->head; 568 569 bl->head = bio; 570 571 if (!bl->tail) 572 bl->tail = bio; 573} 574 575static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 576{ 577 if (!bl2->head) 578 return; 579 580 if (bl->tail) 581 bl->tail->bi_next = bl2->head; 582 else 583 bl->head = bl2->head; 584 585 bl->tail = bl2->tail; 586} 587 588static inline void bio_list_merge_head(struct bio_list *bl, 589 struct bio_list *bl2) 590{ 591 if (!bl2->head) 592 return; 593 594 if (bl->head) 595 bl2->tail->bi_next = bl->head; 596 else 597 bl->tail = bl2->tail; 598 599 bl->head = bl2->head; 600} 601 602static inline struct bio *bio_list_peek(struct bio_list *bl) 603{ 604 return bl->head; 605} 606 607static inline struct bio *bio_list_pop(struct bio_list *bl) 608{ 609 struct bio *bio = bl->head; 610 611 if (bio) { 612 bl->head = bl->head->bi_next; 613 if (!bl->head) 614 bl->tail = NULL; 615 616 bio->bi_next = NULL; 617 } 618 619 return bio; 620} 621 622static inline struct bio *bio_list_get(struct bio_list *bl) 623{ 624 struct bio *bio = bl->head; 625 626 bl->head = bl->tail = NULL; 627 628 return bio; 629} 630 631/* 632 * Increment chain count for the bio. Make sure the CHAIN flag update 633 * is visible before the raised count. 634 */ 635static inline void bio_inc_remaining(struct bio *bio) 636{ 637 bio_set_flag(bio, BIO_CHAIN); 638 smp_mb__before_atomic(); 639 atomic_inc(&bio->__bi_remaining); 640} 641 642/* 643 * bio_set is used to allow other portions of the IO system to 644 * allocate their own private memory pools for bio and iovec structures. 645 * These memory pools in turn all allocate from the bio_slab 646 * and the bvec_slabs[]. 647 */ 648#define BIO_POOL_SIZE 2 649 650struct bio_set { 651 struct kmem_cache *bio_slab; 652 unsigned int front_pad; 653 654 /* 655 * per-cpu bio alloc cache 656 */ 657 struct bio_alloc_cache __percpu *cache; 658 659 mempool_t bio_pool; 660 mempool_t bvec_pool; 661#if defined(CONFIG_BLK_DEV_INTEGRITY) 662 mempool_t bio_integrity_pool; 663 mempool_t bvec_integrity_pool; 664#endif 665 666 unsigned int back_pad; 667 /* 668 * Deadlock avoidance for stacking block drivers: see comments in 669 * bio_alloc_bioset() for details 670 */ 671 spinlock_t rescue_lock; 672 struct bio_list rescue_list; 673 struct work_struct rescue_work; 674 struct workqueue_struct *rescue_workqueue; 675 676 /* 677 * Hot un-plug notifier for the per-cpu cache, if used 678 */ 679 struct hlist_node cpuhp_dead; 680}; 681 682static inline bool bioset_initialized(struct bio_set *bs) 683{ 684 return bs->bio_slab != NULL; 685} 686 687#if defined(CONFIG_BLK_DEV_INTEGRITY) 688 689#define bip_for_each_vec(bvl, bip, iter) \ 690 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 691 692#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 693 for_each_bio(_bio) \ 694 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 695 696extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 697extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 698extern bool bio_integrity_prep(struct bio *); 699extern void bio_integrity_advance(struct bio *, unsigned int); 700extern void bio_integrity_trim(struct bio *); 701extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 702extern int bioset_integrity_create(struct bio_set *, int); 703extern void bioset_integrity_free(struct bio_set *); 704extern void bio_integrity_init(void); 705 706#else /* CONFIG_BLK_DEV_INTEGRITY */ 707 708static inline void *bio_integrity(struct bio *bio) 709{ 710 return NULL; 711} 712 713static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 714{ 715 return 0; 716} 717 718static inline void bioset_integrity_free (struct bio_set *bs) 719{ 720 return; 721} 722 723static inline bool bio_integrity_prep(struct bio *bio) 724{ 725 return true; 726} 727 728static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 729 gfp_t gfp_mask) 730{ 731 return 0; 732} 733 734static inline void bio_integrity_advance(struct bio *bio, 735 unsigned int bytes_done) 736{ 737 return; 738} 739 740static inline void bio_integrity_trim(struct bio *bio) 741{ 742 return; 743} 744 745static inline void bio_integrity_init(void) 746{ 747 return; 748} 749 750static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 751{ 752 return false; 753} 754 755static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, 756 unsigned int nr) 757{ 758 return ERR_PTR(-EINVAL); 759} 760 761static inline int bio_integrity_add_page(struct bio *bio, struct page *page, 762 unsigned int len, unsigned int offset) 763{ 764 return 0; 765} 766 767#endif /* CONFIG_BLK_DEV_INTEGRITY */ 768 769/* 770 * Mark a bio as polled. Note that for async polled IO, the caller must 771 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). 772 * We cannot block waiting for requests on polled IO, as those completions 773 * must be found by the caller. This is different than IRQ driven IO, where 774 * it's safe to wait for IO to complete. 775 */ 776static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) 777{ 778 bio->bi_opf |= REQ_POLLED; 779 if (!is_sync_kiocb(kiocb)) 780 bio->bi_opf |= REQ_NOWAIT; 781} 782 783static inline void bio_clear_polled(struct bio *bio) 784{ 785 bio->bi_opf &= ~REQ_POLLED; 786} 787 788struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, 789 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); 790 791#endif /* __LINUX_BIO_H */