Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18#ifndef __LINUX_BIO_H
19#define __LINUX_BIO_H
20
21#include <linux/highmem.h>
22#include <linux/mempool.h>
23#include <linux/ioprio.h>
24
25#ifdef CONFIG_BLOCK
26/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
27#include <linux/blk_types.h>
28
29#define BIO_DEBUG
30
31#ifdef BIO_DEBUG
32#define BIO_BUG_ON BUG_ON
33#else
34#define BIO_BUG_ON
35#endif
36
37#define BIO_MAX_PAGES 256
38
39#define bio_prio(bio) (bio)->bi_ioprio
40#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
41
42#define bio_iter_iovec(bio, iter) \
43 bvec_iter_bvec((bio)->bi_io_vec, (iter))
44
45#define bio_iter_page(bio, iter) \
46 bvec_iter_page((bio)->bi_io_vec, (iter))
47#define bio_iter_len(bio, iter) \
48 bvec_iter_len((bio)->bi_io_vec, (iter))
49#define bio_iter_offset(bio, iter) \
50 bvec_iter_offset((bio)->bi_io_vec, (iter))
51
52#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
53#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
54#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
55
56#define bio_multiple_segments(bio) \
57 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
58
59#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
60#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
61
62#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
63#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
64
65/*
66 * Return the data direction, READ or WRITE.
67 */
68#define bio_data_dir(bio) \
69 (op_is_write(bio_op(bio)) ? WRITE : READ)
70
71/*
72 * Check whether this bio carries any data or not. A NULL bio is allowed.
73 */
74static inline bool bio_has_data(struct bio *bio)
75{
76 if (bio &&
77 bio->bi_iter.bi_size &&
78 bio_op(bio) != REQ_OP_DISCARD &&
79 bio_op(bio) != REQ_OP_SECURE_ERASE &&
80 bio_op(bio) != REQ_OP_WRITE_ZEROES)
81 return true;
82
83 return false;
84}
85
86static inline bool bio_no_advance_iter(struct bio *bio)
87{
88 return bio_op(bio) == REQ_OP_DISCARD ||
89 bio_op(bio) == REQ_OP_SECURE_ERASE ||
90 bio_op(bio) == REQ_OP_WRITE_SAME ||
91 bio_op(bio) == REQ_OP_WRITE_ZEROES;
92}
93
94static inline bool bio_mergeable(struct bio *bio)
95{
96 if (bio->bi_opf & REQ_NOMERGE_FLAGS)
97 return false;
98
99 return true;
100}
101
102static inline unsigned int bio_cur_bytes(struct bio *bio)
103{
104 if (bio_has_data(bio))
105 return bio_iovec(bio).bv_len;
106 else /* dataless requests such as discard */
107 return bio->bi_iter.bi_size;
108}
109
110static inline void *bio_data(struct bio *bio)
111{
112 if (bio_has_data(bio))
113 return page_address(bio_page(bio)) + bio_offset(bio);
114
115 return NULL;
116}
117
118static inline bool bio_full(struct bio *bio)
119{
120 return bio->bi_vcnt >= bio->bi_max_vecs;
121}
122
123static inline bool bio_next_segment(const struct bio *bio,
124 struct bvec_iter_all *iter)
125{
126 if (iter->idx >= bio->bi_vcnt)
127 return false;
128
129 bvec_advance(&bio->bi_io_vec[iter->idx], iter);
130 return true;
131}
132
133/*
134 * drivers should _never_ use the all version - the bio may have been split
135 * before it got to the driver and the driver won't own all of it
136 */
137#define bio_for_each_segment_all(bvl, bio, i, iter) \
138 for (i = 0, bvl = bvec_init_iter_all(&iter); \
139 bio_next_segment((bio), &iter); i++)
140
141static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
142 unsigned bytes)
143{
144 iter->bi_sector += bytes >> 9;
145
146 if (bio_no_advance_iter(bio))
147 iter->bi_size -= bytes;
148 else
149 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
150 /* TODO: It is reasonable to complete bio with error here. */
151}
152
153#define __bio_for_each_segment(bvl, bio, iter, start) \
154 for (iter = (start); \
155 (iter).bi_size && \
156 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
157 bio_advance_iter((bio), &(iter), (bvl).bv_len))
158
159#define bio_for_each_segment(bvl, bio, iter) \
160 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
161
162#define __bio_for_each_bvec(bvl, bio, iter, start) \
163 for (iter = (start); \
164 (iter).bi_size && \
165 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
166 bio_advance_iter((bio), &(iter), (bvl).bv_len))
167
168/* iterate over multi-page bvec */
169#define bio_for_each_bvec(bvl, bio, iter) \
170 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
171
172#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
173
174static inline unsigned bio_segments(struct bio *bio)
175{
176 unsigned segs = 0;
177 struct bio_vec bv;
178 struct bvec_iter iter;
179
180 /*
181 * We special case discard/write same/write zeroes, because they
182 * interpret bi_size differently:
183 */
184
185 switch (bio_op(bio)) {
186 case REQ_OP_DISCARD:
187 case REQ_OP_SECURE_ERASE:
188 case REQ_OP_WRITE_ZEROES:
189 return 0;
190 case REQ_OP_WRITE_SAME:
191 return 1;
192 default:
193 break;
194 }
195
196 bio_for_each_segment(bv, bio, iter)
197 segs++;
198
199 return segs;
200}
201
202/*
203 * get a reference to a bio, so it won't disappear. the intended use is
204 * something like:
205 *
206 * bio_get(bio);
207 * submit_bio(rw, bio);
208 * if (bio->bi_flags ...)
209 * do_something
210 * bio_put(bio);
211 *
212 * without the bio_get(), it could potentially complete I/O before submit_bio
213 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
214 * runs
215 */
216static inline void bio_get(struct bio *bio)
217{
218 bio->bi_flags |= (1 << BIO_REFFED);
219 smp_mb__before_atomic();
220 atomic_inc(&bio->__bi_cnt);
221}
222
223static inline void bio_cnt_set(struct bio *bio, unsigned int count)
224{
225 if (count != 1) {
226 bio->bi_flags |= (1 << BIO_REFFED);
227 smp_mb__before_atomic();
228 }
229 atomic_set(&bio->__bi_cnt, count);
230}
231
232static inline bool bio_flagged(struct bio *bio, unsigned int bit)
233{
234 return (bio->bi_flags & (1U << bit)) != 0;
235}
236
237static inline void bio_set_flag(struct bio *bio, unsigned int bit)
238{
239 bio->bi_flags |= (1U << bit);
240}
241
242static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
243{
244 bio->bi_flags &= ~(1U << bit);
245}
246
247static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
248{
249 *bv = bio_iovec(bio);
250}
251
252static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
253{
254 struct bvec_iter iter = bio->bi_iter;
255 int idx;
256
257 if (unlikely(!bio_multiple_segments(bio))) {
258 *bv = bio_iovec(bio);
259 return;
260 }
261
262 bio_advance_iter(bio, &iter, iter.bi_size);
263
264 if (!iter.bi_bvec_done)
265 idx = iter.bi_idx - 1;
266 else /* in the middle of bvec */
267 idx = iter.bi_idx;
268
269 *bv = bio->bi_io_vec[idx];
270
271 /*
272 * iter.bi_bvec_done records actual length of the last bvec
273 * if this bio ends in the middle of one io vector
274 */
275 if (iter.bi_bvec_done)
276 bv->bv_len = iter.bi_bvec_done;
277}
278
279static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
280{
281 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
282 return bio->bi_io_vec;
283}
284
285static inline struct page *bio_first_page_all(struct bio *bio)
286{
287 return bio_first_bvec_all(bio)->bv_page;
288}
289
290static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
291{
292 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
293 return &bio->bi_io_vec[bio->bi_vcnt - 1];
294}
295
296enum bip_flags {
297 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
298 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
299 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
300 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
301 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
302};
303
304/*
305 * bio integrity payload
306 */
307struct bio_integrity_payload {
308 struct bio *bip_bio; /* parent bio */
309
310 struct bvec_iter bip_iter;
311
312 unsigned short bip_slab; /* slab the bip came from */
313 unsigned short bip_vcnt; /* # of integrity bio_vecs */
314 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
315 unsigned short bip_flags; /* control flags */
316
317 struct bvec_iter bio_iter; /* for rewinding parent bio */
318
319 struct work_struct bip_work; /* I/O completion */
320
321 struct bio_vec *bip_vec;
322 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
323};
324
325#if defined(CONFIG_BLK_DEV_INTEGRITY)
326
327static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
328{
329 if (bio->bi_opf & REQ_INTEGRITY)
330 return bio->bi_integrity;
331
332 return NULL;
333}
334
335static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
336{
337 struct bio_integrity_payload *bip = bio_integrity(bio);
338
339 if (bip)
340 return bip->bip_flags & flag;
341
342 return false;
343}
344
345static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
346{
347 return bip->bip_iter.bi_sector;
348}
349
350static inline void bip_set_seed(struct bio_integrity_payload *bip,
351 sector_t seed)
352{
353 bip->bip_iter.bi_sector = seed;
354}
355
356#endif /* CONFIG_BLK_DEV_INTEGRITY */
357
358extern void bio_trim(struct bio *bio, int offset, int size);
359extern struct bio *bio_split(struct bio *bio, int sectors,
360 gfp_t gfp, struct bio_set *bs);
361
362/**
363 * bio_next_split - get next @sectors from a bio, splitting if necessary
364 * @bio: bio to split
365 * @sectors: number of sectors to split from the front of @bio
366 * @gfp: gfp mask
367 * @bs: bio set to allocate from
368 *
369 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
370 * than @sectors, returns the original bio unchanged.
371 */
372static inline struct bio *bio_next_split(struct bio *bio, int sectors,
373 gfp_t gfp, struct bio_set *bs)
374{
375 if (sectors >= bio_sectors(bio))
376 return bio;
377
378 return bio_split(bio, sectors, gfp, bs);
379}
380
381enum {
382 BIOSET_NEED_BVECS = BIT(0),
383 BIOSET_NEED_RESCUER = BIT(1),
384};
385extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
386extern void bioset_exit(struct bio_set *);
387extern int biovec_init_pool(mempool_t *pool, int pool_entries);
388extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
389
390extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
391extern void bio_put(struct bio *);
392
393extern void __bio_clone_fast(struct bio *, struct bio *);
394extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
395
396extern struct bio_set fs_bio_set;
397
398static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
399{
400 return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
401}
402
403static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
404{
405 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
406}
407
408extern blk_qc_t submit_bio(struct bio *);
409
410extern void bio_endio(struct bio *);
411
412static inline void bio_io_error(struct bio *bio)
413{
414 bio->bi_status = BLK_STS_IOERR;
415 bio_endio(bio);
416}
417
418static inline void bio_wouldblock_error(struct bio *bio)
419{
420 bio->bi_status = BLK_STS_AGAIN;
421 bio_endio(bio);
422}
423
424struct request_queue;
425extern int bio_phys_segments(struct request_queue *, struct bio *);
426
427extern int submit_bio_wait(struct bio *bio);
428extern void bio_advance(struct bio *, unsigned);
429
430extern void bio_init(struct bio *bio, struct bio_vec *table,
431 unsigned short max_vecs);
432extern void bio_uninit(struct bio *);
433extern void bio_reset(struct bio *);
434void bio_chain(struct bio *, struct bio *);
435
436extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
437extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
438 unsigned int, unsigned int);
439bool __bio_try_merge_page(struct bio *bio, struct page *page,
440 unsigned int len, unsigned int off, bool same_page);
441void __bio_add_page(struct bio *bio, struct page *page,
442 unsigned int len, unsigned int off);
443int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
444struct rq_map_data;
445extern struct bio *bio_map_user_iov(struct request_queue *,
446 struct iov_iter *, gfp_t);
447extern void bio_unmap_user(struct bio *);
448extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
449 gfp_t);
450extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
451 gfp_t, int);
452extern void bio_set_pages_dirty(struct bio *bio);
453extern void bio_check_pages_dirty(struct bio *bio);
454
455void generic_start_io_acct(struct request_queue *q, int op,
456 unsigned long sectors, struct hd_struct *part);
457void generic_end_io_acct(struct request_queue *q, int op,
458 struct hd_struct *part,
459 unsigned long start_time);
460
461#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
462# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
463#endif
464#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
465extern void bio_flush_dcache_pages(struct bio *bi);
466#else
467static inline void bio_flush_dcache_pages(struct bio *bi)
468{
469}
470#endif
471
472extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
473 struct bio *src, struct bvec_iter *src_iter);
474extern void bio_copy_data(struct bio *dst, struct bio *src);
475extern void bio_list_copy_data(struct bio *dst, struct bio *src);
476extern void bio_free_pages(struct bio *bio);
477
478extern struct bio *bio_copy_user_iov(struct request_queue *,
479 struct rq_map_data *,
480 struct iov_iter *,
481 gfp_t);
482extern int bio_uncopy_user(struct bio *);
483void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
484
485static inline void zero_fill_bio(struct bio *bio)
486{
487 zero_fill_bio_iter(bio, bio->bi_iter);
488}
489
490extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
491extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
492extern unsigned int bvec_nr_vecs(unsigned short idx);
493extern const char *bio_devname(struct bio *bio, char *buffer);
494
495#define bio_set_dev(bio, bdev) \
496do { \
497 if ((bio)->bi_disk != (bdev)->bd_disk) \
498 bio_clear_flag(bio, BIO_THROTTLED);\
499 (bio)->bi_disk = (bdev)->bd_disk; \
500 (bio)->bi_partno = (bdev)->bd_partno; \
501 bio_associate_blkg(bio); \
502} while (0)
503
504#define bio_copy_dev(dst, src) \
505do { \
506 (dst)->bi_disk = (src)->bi_disk; \
507 (dst)->bi_partno = (src)->bi_partno; \
508 bio_clone_blkg_association(dst, src); \
509} while (0)
510
511#define bio_dev(bio) \
512 disk_devt((bio)->bi_disk)
513
514#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
515void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
516#else
517static inline void bio_associate_blkg_from_page(struct bio *bio,
518 struct page *page) { }
519#endif
520
521#ifdef CONFIG_BLK_CGROUP
522void bio_disassociate_blkg(struct bio *bio);
523void bio_associate_blkg(struct bio *bio);
524void bio_associate_blkg_from_css(struct bio *bio,
525 struct cgroup_subsys_state *css);
526void bio_clone_blkg_association(struct bio *dst, struct bio *src);
527#else /* CONFIG_BLK_CGROUP */
528static inline void bio_disassociate_blkg(struct bio *bio) { }
529static inline void bio_associate_blkg(struct bio *bio) { }
530static inline void bio_associate_blkg_from_css(struct bio *bio,
531 struct cgroup_subsys_state *css)
532{ }
533static inline void bio_clone_blkg_association(struct bio *dst,
534 struct bio *src) { }
535#endif /* CONFIG_BLK_CGROUP */
536
537#ifdef CONFIG_HIGHMEM
538/*
539 * remember never ever reenable interrupts between a bvec_kmap_irq and
540 * bvec_kunmap_irq!
541 */
542static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
543{
544 unsigned long addr;
545
546 /*
547 * might not be a highmem page, but the preempt/irq count
548 * balancing is a lot nicer this way
549 */
550 local_irq_save(*flags);
551 addr = (unsigned long) kmap_atomic(bvec->bv_page);
552
553 BUG_ON(addr & ~PAGE_MASK);
554
555 return (char *) addr + bvec->bv_offset;
556}
557
558static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
559{
560 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
561
562 kunmap_atomic((void *) ptr);
563 local_irq_restore(*flags);
564}
565
566#else
567static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
568{
569 return page_address(bvec->bv_page) + bvec->bv_offset;
570}
571
572static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
573{
574 *flags = 0;
575}
576#endif
577
578/*
579 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
580 *
581 * A bio_list anchors a singly-linked list of bios chained through the bi_next
582 * member of the bio. The bio_list also caches the last list member to allow
583 * fast access to the tail.
584 */
585struct bio_list {
586 struct bio *head;
587 struct bio *tail;
588};
589
590static inline int bio_list_empty(const struct bio_list *bl)
591{
592 return bl->head == NULL;
593}
594
595static inline void bio_list_init(struct bio_list *bl)
596{
597 bl->head = bl->tail = NULL;
598}
599
600#define BIO_EMPTY_LIST { NULL, NULL }
601
602#define bio_list_for_each(bio, bl) \
603 for (bio = (bl)->head; bio; bio = bio->bi_next)
604
605static inline unsigned bio_list_size(const struct bio_list *bl)
606{
607 unsigned sz = 0;
608 struct bio *bio;
609
610 bio_list_for_each(bio, bl)
611 sz++;
612
613 return sz;
614}
615
616static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
617{
618 bio->bi_next = NULL;
619
620 if (bl->tail)
621 bl->tail->bi_next = bio;
622 else
623 bl->head = bio;
624
625 bl->tail = bio;
626}
627
628static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
629{
630 bio->bi_next = bl->head;
631
632 bl->head = bio;
633
634 if (!bl->tail)
635 bl->tail = bio;
636}
637
638static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
639{
640 if (!bl2->head)
641 return;
642
643 if (bl->tail)
644 bl->tail->bi_next = bl2->head;
645 else
646 bl->head = bl2->head;
647
648 bl->tail = bl2->tail;
649}
650
651static inline void bio_list_merge_head(struct bio_list *bl,
652 struct bio_list *bl2)
653{
654 if (!bl2->head)
655 return;
656
657 if (bl->head)
658 bl2->tail->bi_next = bl->head;
659 else
660 bl->tail = bl2->tail;
661
662 bl->head = bl2->head;
663}
664
665static inline struct bio *bio_list_peek(struct bio_list *bl)
666{
667 return bl->head;
668}
669
670static inline struct bio *bio_list_pop(struct bio_list *bl)
671{
672 struct bio *bio = bl->head;
673
674 if (bio) {
675 bl->head = bl->head->bi_next;
676 if (!bl->head)
677 bl->tail = NULL;
678
679 bio->bi_next = NULL;
680 }
681
682 return bio;
683}
684
685static inline struct bio *bio_list_get(struct bio_list *bl)
686{
687 struct bio *bio = bl->head;
688
689 bl->head = bl->tail = NULL;
690
691 return bio;
692}
693
694/*
695 * Increment chain count for the bio. Make sure the CHAIN flag update
696 * is visible before the raised count.
697 */
698static inline void bio_inc_remaining(struct bio *bio)
699{
700 bio_set_flag(bio, BIO_CHAIN);
701 smp_mb__before_atomic();
702 atomic_inc(&bio->__bi_remaining);
703}
704
705/*
706 * bio_set is used to allow other portions of the IO system to
707 * allocate their own private memory pools for bio and iovec structures.
708 * These memory pools in turn all allocate from the bio_slab
709 * and the bvec_slabs[].
710 */
711#define BIO_POOL_SIZE 2
712
713struct bio_set {
714 struct kmem_cache *bio_slab;
715 unsigned int front_pad;
716
717 mempool_t bio_pool;
718 mempool_t bvec_pool;
719#if defined(CONFIG_BLK_DEV_INTEGRITY)
720 mempool_t bio_integrity_pool;
721 mempool_t bvec_integrity_pool;
722#endif
723
724 /*
725 * Deadlock avoidance for stacking block drivers: see comments in
726 * bio_alloc_bioset() for details
727 */
728 spinlock_t rescue_lock;
729 struct bio_list rescue_list;
730 struct work_struct rescue_work;
731 struct workqueue_struct *rescue_workqueue;
732};
733
734struct biovec_slab {
735 int nr_vecs;
736 char *name;
737 struct kmem_cache *slab;
738};
739
740static inline bool bioset_initialized(struct bio_set *bs)
741{
742 return bs->bio_slab != NULL;
743}
744
745/*
746 * a small number of entries is fine, not going to be performance critical.
747 * basically we just need to survive
748 */
749#define BIO_SPLIT_ENTRIES 2
750
751#if defined(CONFIG_BLK_DEV_INTEGRITY)
752
753#define bip_for_each_vec(bvl, bip, iter) \
754 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
755
756#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
757 for_each_bio(_bio) \
758 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
759
760extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
761extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
762extern bool bio_integrity_prep(struct bio *);
763extern void bio_integrity_advance(struct bio *, unsigned int);
764extern void bio_integrity_trim(struct bio *);
765extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
766extern int bioset_integrity_create(struct bio_set *, int);
767extern void bioset_integrity_free(struct bio_set *);
768extern void bio_integrity_init(void);
769
770#else /* CONFIG_BLK_DEV_INTEGRITY */
771
772static inline void *bio_integrity(struct bio *bio)
773{
774 return NULL;
775}
776
777static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
778{
779 return 0;
780}
781
782static inline void bioset_integrity_free (struct bio_set *bs)
783{
784 return;
785}
786
787static inline bool bio_integrity_prep(struct bio *bio)
788{
789 return true;
790}
791
792static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
793 gfp_t gfp_mask)
794{
795 return 0;
796}
797
798static inline void bio_integrity_advance(struct bio *bio,
799 unsigned int bytes_done)
800{
801 return;
802}
803
804static inline void bio_integrity_trim(struct bio *bio)
805{
806 return;
807}
808
809static inline void bio_integrity_init(void)
810{
811 return;
812}
813
814static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
815{
816 return false;
817}
818
819static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
820 unsigned int nr)
821{
822 return ERR_PTR(-EINVAL);
823}
824
825static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
826 unsigned int len, unsigned int offset)
827{
828 return 0;
829}
830
831#endif /* CONFIG_BLK_DEV_INTEGRITY */
832
833/*
834 * Mark a bio as polled. Note that for async polled IO, the caller must
835 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
836 * We cannot block waiting for requests on polled IO, as those completions
837 * must be found by the caller. This is different than IRQ driven IO, where
838 * it's safe to wait for IO to complete.
839 */
840static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
841{
842 bio->bi_opf |= REQ_HIPRI;
843 if (!is_sync_kiocb(kiocb))
844 bio->bi_opf |= REQ_NOWAIT;
845}
846
847#endif /* CONFIG_BLOCK */
848#endif /* __LINUX_BIO_H */