at v3.4 1693 lines 40 kB view raw
1/* 2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public Licens 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 16 * 17 */ 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/bio.h> 21#include <linux/blkdev.h> 22#include <linux/slab.h> 23#include <linux/init.h> 24#include <linux/kernel.h> 25#include <linux/export.h> 26#include <linux/mempool.h> 27#include <linux/workqueue.h> 28#include <scsi/sg.h> /* for struct sg_iovec */ 29 30#include <trace/events/block.h> 31 32/* 33 * Test patch to inline a certain number of bi_io_vec's inside the bio 34 * itself, to shrink a bio data allocation from two mempool calls to one 35 */ 36#define BIO_INLINE_VECS 4 37 38static mempool_t *bio_split_pool __read_mostly; 39 40/* 41 * if you change this list, also change bvec_alloc or things will 42 * break badly! cannot be bigger than what you can fit into an 43 * unsigned short 44 */ 45#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 46static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { 47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 48}; 49#undef BV 50 51/* 52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 53 * IO code that does not need private memory pools. 54 */ 55struct bio_set *fs_bio_set; 56 57/* 58 * Our slab pool management 59 */ 60struct bio_slab { 61 struct kmem_cache *slab; 62 unsigned int slab_ref; 63 unsigned int slab_size; 64 char name[8]; 65}; 66static DEFINE_MUTEX(bio_slab_lock); 67static struct bio_slab *bio_slabs; 68static unsigned int bio_slab_nr, bio_slab_max; 69 70static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) 71{ 72 unsigned int sz = sizeof(struct bio) + extra_size; 73 struct kmem_cache *slab = NULL; 74 struct bio_slab *bslab; 75 unsigned int i, entry = -1; 76 77 mutex_lock(&bio_slab_lock); 78 79 i = 0; 80 while (i < bio_slab_nr) { 81 bslab = &bio_slabs[i]; 82 83 if (!bslab->slab && entry == -1) 84 entry = i; 85 else if (bslab->slab_size == sz) { 86 slab = bslab->slab; 87 bslab->slab_ref++; 88 break; 89 } 90 i++; 91 } 92 93 if (slab) 94 goto out_unlock; 95 96 if (bio_slab_nr == bio_slab_max && entry == -1) { 97 bio_slab_max <<= 1; 98 bio_slabs = krealloc(bio_slabs, 99 bio_slab_max * sizeof(struct bio_slab), 100 GFP_KERNEL); 101 if (!bio_slabs) 102 goto out_unlock; 103 } 104 if (entry == -1) 105 entry = bio_slab_nr++; 106 107 bslab = &bio_slabs[entry]; 108 109 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 110 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL); 111 if (!slab) 112 goto out_unlock; 113 114 printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry); 115 bslab->slab = slab; 116 bslab->slab_ref = 1; 117 bslab->slab_size = sz; 118out_unlock: 119 mutex_unlock(&bio_slab_lock); 120 return slab; 121} 122 123static void bio_put_slab(struct bio_set *bs) 124{ 125 struct bio_slab *bslab = NULL; 126 unsigned int i; 127 128 mutex_lock(&bio_slab_lock); 129 130 for (i = 0; i < bio_slab_nr; i++) { 131 if (bs->bio_slab == bio_slabs[i].slab) { 132 bslab = &bio_slabs[i]; 133 break; 134 } 135 } 136 137 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 138 goto out; 139 140 WARN_ON(!bslab->slab_ref); 141 142 if (--bslab->slab_ref) 143 goto out; 144 145 kmem_cache_destroy(bslab->slab); 146 bslab->slab = NULL; 147 148out: 149 mutex_unlock(&bio_slab_lock); 150} 151 152unsigned int bvec_nr_vecs(unsigned short idx) 153{ 154 return bvec_slabs[idx].nr_vecs; 155} 156 157void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx) 158{ 159 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); 160 161 if (idx == BIOVEC_MAX_IDX) 162 mempool_free(bv, bs->bvec_pool); 163 else { 164 struct biovec_slab *bvs = bvec_slabs + idx; 165 166 kmem_cache_free(bvs->slab, bv); 167 } 168} 169 170struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, 171 struct bio_set *bs) 172{ 173 struct bio_vec *bvl; 174 175 /* 176 * see comment near bvec_array define! 177 */ 178 switch (nr) { 179 case 1: 180 *idx = 0; 181 break; 182 case 2 ... 4: 183 *idx = 1; 184 break; 185 case 5 ... 16: 186 *idx = 2; 187 break; 188 case 17 ... 64: 189 *idx = 3; 190 break; 191 case 65 ... 128: 192 *idx = 4; 193 break; 194 case 129 ... BIO_MAX_PAGES: 195 *idx = 5; 196 break; 197 default: 198 return NULL; 199 } 200 201 /* 202 * idx now points to the pool we want to allocate from. only the 203 * 1-vec entry pool is mempool backed. 204 */ 205 if (*idx == BIOVEC_MAX_IDX) { 206fallback: 207 bvl = mempool_alloc(bs->bvec_pool, gfp_mask); 208 } else { 209 struct biovec_slab *bvs = bvec_slabs + *idx; 210 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); 211 212 /* 213 * Make this allocation restricted and don't dump info on 214 * allocation failures, since we'll fallback to the mempool 215 * in case of failure. 216 */ 217 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 218 219 /* 220 * Try a slab allocation. If this fails and __GFP_WAIT 221 * is set, retry with the 1-entry mempool 222 */ 223 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); 224 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { 225 *idx = BIOVEC_MAX_IDX; 226 goto fallback; 227 } 228 } 229 230 return bvl; 231} 232 233void bio_free(struct bio *bio, struct bio_set *bs) 234{ 235 void *p; 236 237 if (bio_has_allocated_vec(bio)) 238 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); 239 240 if (bio_integrity(bio)) 241 bio_integrity_free(bio, bs); 242 243 /* 244 * If we have front padding, adjust the bio pointer before freeing 245 */ 246 p = bio; 247 if (bs->front_pad) 248 p -= bs->front_pad; 249 250 mempool_free(p, bs->bio_pool); 251} 252EXPORT_SYMBOL(bio_free); 253 254void bio_init(struct bio *bio) 255{ 256 memset(bio, 0, sizeof(*bio)); 257 bio->bi_flags = 1 << BIO_UPTODATE; 258 atomic_set(&bio->bi_cnt, 1); 259} 260EXPORT_SYMBOL(bio_init); 261 262/** 263 * bio_alloc_bioset - allocate a bio for I/O 264 * @gfp_mask: the GFP_ mask given to the slab allocator 265 * @nr_iovecs: number of iovecs to pre-allocate 266 * @bs: the bio_set to allocate from. 267 * 268 * Description: 269 * bio_alloc_bioset will try its own mempool to satisfy the allocation. 270 * If %__GFP_WAIT is set then we will block on the internal pool waiting 271 * for a &struct bio to become free. 272 * 273 * Note that the caller must set ->bi_destructor on successful return 274 * of a bio, to do the appropriate freeing of the bio once the reference 275 * count drops to zero. 276 **/ 277struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 278{ 279 unsigned long idx = BIO_POOL_NONE; 280 struct bio_vec *bvl = NULL; 281 struct bio *bio; 282 void *p; 283 284 p = mempool_alloc(bs->bio_pool, gfp_mask); 285 if (unlikely(!p)) 286 return NULL; 287 bio = p + bs->front_pad; 288 289 bio_init(bio); 290 291 if (unlikely(!nr_iovecs)) 292 goto out_set; 293 294 if (nr_iovecs <= BIO_INLINE_VECS) { 295 bvl = bio->bi_inline_vecs; 296 nr_iovecs = BIO_INLINE_VECS; 297 } else { 298 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); 299 if (unlikely(!bvl)) 300 goto err_free; 301 302 nr_iovecs = bvec_nr_vecs(idx); 303 } 304out_set: 305 bio->bi_flags |= idx << BIO_POOL_OFFSET; 306 bio->bi_max_vecs = nr_iovecs; 307 bio->bi_io_vec = bvl; 308 return bio; 309 310err_free: 311 mempool_free(p, bs->bio_pool); 312 return NULL; 313} 314EXPORT_SYMBOL(bio_alloc_bioset); 315 316static void bio_fs_destructor(struct bio *bio) 317{ 318 bio_free(bio, fs_bio_set); 319} 320 321/** 322 * bio_alloc - allocate a new bio, memory pool backed 323 * @gfp_mask: allocation mask to use 324 * @nr_iovecs: number of iovecs 325 * 326 * bio_alloc will allocate a bio and associated bio_vec array that can hold 327 * at least @nr_iovecs entries. Allocations will be done from the 328 * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc. 329 * 330 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate 331 * a bio. This is due to the mempool guarantees. To make this work, callers 332 * must never allocate more than 1 bio at a time from this pool. Callers 333 * that need to allocate more than 1 bio must always submit the previously 334 * allocated bio for IO before attempting to allocate a new one. Failure to 335 * do so can cause livelocks under memory pressure. 336 * 337 * RETURNS: 338 * Pointer to new bio on success, NULL on failure. 339 */ 340struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 341{ 342 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 343 344 if (bio) 345 bio->bi_destructor = bio_fs_destructor; 346 347 return bio; 348} 349EXPORT_SYMBOL(bio_alloc); 350 351static void bio_kmalloc_destructor(struct bio *bio) 352{ 353 if (bio_integrity(bio)) 354 bio_integrity_free(bio, fs_bio_set); 355 kfree(bio); 356} 357 358/** 359 * bio_kmalloc - allocate a bio for I/O using kmalloc() 360 * @gfp_mask: the GFP_ mask given to the slab allocator 361 * @nr_iovecs: number of iovecs to pre-allocate 362 * 363 * Description: 364 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains 365 * %__GFP_WAIT, the allocation is guaranteed to succeed. 366 * 367 **/ 368struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) 369{ 370 struct bio *bio; 371 372 if (nr_iovecs > UIO_MAXIOV) 373 return NULL; 374 375 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), 376 gfp_mask); 377 if (unlikely(!bio)) 378 return NULL; 379 380 bio_init(bio); 381 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; 382 bio->bi_max_vecs = nr_iovecs; 383 bio->bi_io_vec = bio->bi_inline_vecs; 384 bio->bi_destructor = bio_kmalloc_destructor; 385 386 return bio; 387} 388EXPORT_SYMBOL(bio_kmalloc); 389 390void zero_fill_bio(struct bio *bio) 391{ 392 unsigned long flags; 393 struct bio_vec *bv; 394 int i; 395 396 bio_for_each_segment(bv, bio, i) { 397 char *data = bvec_kmap_irq(bv, &flags); 398 memset(data, 0, bv->bv_len); 399 flush_dcache_page(bv->bv_page); 400 bvec_kunmap_irq(data, &flags); 401 } 402} 403EXPORT_SYMBOL(zero_fill_bio); 404 405/** 406 * bio_put - release a reference to a bio 407 * @bio: bio to release reference to 408 * 409 * Description: 410 * Put a reference to a &struct bio, either one you have gotten with 411 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. 412 **/ 413void bio_put(struct bio *bio) 414{ 415 BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); 416 417 /* 418 * last put frees it 419 */ 420 if (atomic_dec_and_test(&bio->bi_cnt)) { 421 bio->bi_next = NULL; 422 bio->bi_destructor(bio); 423 } 424} 425EXPORT_SYMBOL(bio_put); 426 427inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 428{ 429 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 430 blk_recount_segments(q, bio); 431 432 return bio->bi_phys_segments; 433} 434EXPORT_SYMBOL(bio_phys_segments); 435 436/** 437 * __bio_clone - clone a bio 438 * @bio: destination bio 439 * @bio_src: bio to clone 440 * 441 * Clone a &bio. Caller will own the returned bio, but not 442 * the actual data it points to. Reference count of returned 443 * bio will be one. 444 */ 445void __bio_clone(struct bio *bio, struct bio *bio_src) 446{ 447 memcpy(bio->bi_io_vec, bio_src->bi_io_vec, 448 bio_src->bi_max_vecs * sizeof(struct bio_vec)); 449 450 /* 451 * most users will be overriding ->bi_bdev with a new target, 452 * so we don't set nor calculate new physical/hw segment counts here 453 */ 454 bio->bi_sector = bio_src->bi_sector; 455 bio->bi_bdev = bio_src->bi_bdev; 456 bio->bi_flags |= 1 << BIO_CLONED; 457 bio->bi_rw = bio_src->bi_rw; 458 bio->bi_vcnt = bio_src->bi_vcnt; 459 bio->bi_size = bio_src->bi_size; 460 bio->bi_idx = bio_src->bi_idx; 461} 462EXPORT_SYMBOL(__bio_clone); 463 464/** 465 * bio_clone - clone a bio 466 * @bio: bio to clone 467 * @gfp_mask: allocation priority 468 * 469 * Like __bio_clone, only also allocates the returned bio 470 */ 471struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) 472{ 473 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); 474 475 if (!b) 476 return NULL; 477 478 b->bi_destructor = bio_fs_destructor; 479 __bio_clone(b, bio); 480 481 if (bio_integrity(bio)) { 482 int ret; 483 484 ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set); 485 486 if (ret < 0) { 487 bio_put(b); 488 return NULL; 489 } 490 } 491 492 return b; 493} 494EXPORT_SYMBOL(bio_clone); 495 496/** 497 * bio_get_nr_vecs - return approx number of vecs 498 * @bdev: I/O target 499 * 500 * Return the approximate number of pages we can send to this target. 501 * There's no guarantee that you will be able to fit this number of pages 502 * into a bio, it does not account for dynamic restrictions that vary 503 * on offset. 504 */ 505int bio_get_nr_vecs(struct block_device *bdev) 506{ 507 struct request_queue *q = bdev_get_queue(bdev); 508 int nr_pages; 509 510 nr_pages = min_t(unsigned, 511 queue_max_segments(q), 512 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); 513 514 return min_t(unsigned, nr_pages, BIO_MAX_PAGES); 515 516} 517EXPORT_SYMBOL(bio_get_nr_vecs); 518 519static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page 520 *page, unsigned int len, unsigned int offset, 521 unsigned short max_sectors) 522{ 523 int retried_segments = 0; 524 struct bio_vec *bvec; 525 526 /* 527 * cloned bio must not modify vec list 528 */ 529 if (unlikely(bio_flagged(bio, BIO_CLONED))) 530 return 0; 531 532 if (((bio->bi_size + len) >> 9) > max_sectors) 533 return 0; 534 535 /* 536 * For filesystems with a blocksize smaller than the pagesize 537 * we will often be called with the same page as last time and 538 * a consecutive offset. Optimize this special case. 539 */ 540 if (bio->bi_vcnt > 0) { 541 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 542 543 if (page == prev->bv_page && 544 offset == prev->bv_offset + prev->bv_len) { 545 unsigned int prev_bv_len = prev->bv_len; 546 prev->bv_len += len; 547 548 if (q->merge_bvec_fn) { 549 struct bvec_merge_data bvm = { 550 /* prev_bvec is already charged in 551 bi_size, discharge it in order to 552 simulate merging updated prev_bvec 553 as new bvec. */ 554 .bi_bdev = bio->bi_bdev, 555 .bi_sector = bio->bi_sector, 556 .bi_size = bio->bi_size - prev_bv_len, 557 .bi_rw = bio->bi_rw, 558 }; 559 560 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) { 561 prev->bv_len -= len; 562 return 0; 563 } 564 } 565 566 goto done; 567 } 568 } 569 570 if (bio->bi_vcnt >= bio->bi_max_vecs) 571 return 0; 572 573 /* 574 * we might lose a segment or two here, but rather that than 575 * make this too complex. 576 */ 577 578 while (bio->bi_phys_segments >= queue_max_segments(q)) { 579 580 if (retried_segments) 581 return 0; 582 583 retried_segments = 1; 584 blk_recount_segments(q, bio); 585 } 586 587 /* 588 * setup the new entry, we might clear it again later if we 589 * cannot add the page 590 */ 591 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 592 bvec->bv_page = page; 593 bvec->bv_len = len; 594 bvec->bv_offset = offset; 595 596 /* 597 * if queue has other restrictions (eg varying max sector size 598 * depending on offset), it can specify a merge_bvec_fn in the 599 * queue to get further control 600 */ 601 if (q->merge_bvec_fn) { 602 struct bvec_merge_data bvm = { 603 .bi_bdev = bio->bi_bdev, 604 .bi_sector = bio->bi_sector, 605 .bi_size = bio->bi_size, 606 .bi_rw = bio->bi_rw, 607 }; 608 609 /* 610 * merge_bvec_fn() returns number of bytes it can accept 611 * at this offset 612 */ 613 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) { 614 bvec->bv_page = NULL; 615 bvec->bv_len = 0; 616 bvec->bv_offset = 0; 617 return 0; 618 } 619 } 620 621 /* If we may be able to merge these biovecs, force a recount */ 622 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) 623 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 624 625 bio->bi_vcnt++; 626 bio->bi_phys_segments++; 627 done: 628 bio->bi_size += len; 629 return len; 630} 631 632/** 633 * bio_add_pc_page - attempt to add page to bio 634 * @q: the target queue 635 * @bio: destination bio 636 * @page: page to add 637 * @len: vec entry length 638 * @offset: vec entry offset 639 * 640 * Attempt to add a page to the bio_vec maplist. This can fail for a 641 * number of reasons, such as the bio being full or target block device 642 * limitations. The target block device must allow bio's up to PAGE_SIZE, 643 * so it is always possible to add a single page to an empty bio. 644 * 645 * This should only be used by REQ_PC bios. 646 */ 647int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, 648 unsigned int len, unsigned int offset) 649{ 650 return __bio_add_page(q, bio, page, len, offset, 651 queue_max_hw_sectors(q)); 652} 653EXPORT_SYMBOL(bio_add_pc_page); 654 655/** 656 * bio_add_page - attempt to add page to bio 657 * @bio: destination bio 658 * @page: page to add 659 * @len: vec entry length 660 * @offset: vec entry offset 661 * 662 * Attempt to add a page to the bio_vec maplist. This can fail for a 663 * number of reasons, such as the bio being full or target block device 664 * limitations. The target block device must allow bio's up to PAGE_SIZE, 665 * so it is always possible to add a single page to an empty bio. 666 */ 667int bio_add_page(struct bio *bio, struct page *page, unsigned int len, 668 unsigned int offset) 669{ 670 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 671 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); 672} 673EXPORT_SYMBOL(bio_add_page); 674 675struct bio_map_data { 676 struct bio_vec *iovecs; 677 struct sg_iovec *sgvecs; 678 int nr_sgvecs; 679 int is_our_pages; 680}; 681 682static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, 683 struct sg_iovec *iov, int iov_count, 684 int is_our_pages) 685{ 686 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); 687 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); 688 bmd->nr_sgvecs = iov_count; 689 bmd->is_our_pages = is_our_pages; 690 bio->bi_private = bmd; 691} 692 693static void bio_free_map_data(struct bio_map_data *bmd) 694{ 695 kfree(bmd->iovecs); 696 kfree(bmd->sgvecs); 697 kfree(bmd); 698} 699 700static struct bio_map_data *bio_alloc_map_data(int nr_segs, 701 unsigned int iov_count, 702 gfp_t gfp_mask) 703{ 704 struct bio_map_data *bmd; 705 706 if (iov_count > UIO_MAXIOV) 707 return NULL; 708 709 bmd = kmalloc(sizeof(*bmd), gfp_mask); 710 if (!bmd) 711 return NULL; 712 713 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask); 714 if (!bmd->iovecs) { 715 kfree(bmd); 716 return NULL; 717 } 718 719 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask); 720 if (bmd->sgvecs) 721 return bmd; 722 723 kfree(bmd->iovecs); 724 kfree(bmd); 725 return NULL; 726} 727 728static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, 729 struct sg_iovec *iov, int iov_count, 730 int to_user, int from_user, int do_free_page) 731{ 732 int ret = 0, i; 733 struct bio_vec *bvec; 734 int iov_idx = 0; 735 unsigned int iov_off = 0; 736 737 __bio_for_each_segment(bvec, bio, i, 0) { 738 char *bv_addr = page_address(bvec->bv_page); 739 unsigned int bv_len = iovecs[i].bv_len; 740 741 while (bv_len && iov_idx < iov_count) { 742 unsigned int bytes; 743 char __user *iov_addr; 744 745 bytes = min_t(unsigned int, 746 iov[iov_idx].iov_len - iov_off, bv_len); 747 iov_addr = iov[iov_idx].iov_base + iov_off; 748 749 if (!ret) { 750 if (to_user) 751 ret = copy_to_user(iov_addr, bv_addr, 752 bytes); 753 754 if (from_user) 755 ret = copy_from_user(bv_addr, iov_addr, 756 bytes); 757 758 if (ret) 759 ret = -EFAULT; 760 } 761 762 bv_len -= bytes; 763 bv_addr += bytes; 764 iov_addr += bytes; 765 iov_off += bytes; 766 767 if (iov[iov_idx].iov_len == iov_off) { 768 iov_idx++; 769 iov_off = 0; 770 } 771 } 772 773 if (do_free_page) 774 __free_page(bvec->bv_page); 775 } 776 777 return ret; 778} 779 780/** 781 * bio_uncopy_user - finish previously mapped bio 782 * @bio: bio being terminated 783 * 784 * Free pages allocated from bio_copy_user() and write back data 785 * to user space in case of a read. 786 */ 787int bio_uncopy_user(struct bio *bio) 788{ 789 struct bio_map_data *bmd = bio->bi_private; 790 int ret = 0; 791 792 if (!bio_flagged(bio, BIO_NULL_MAPPED)) 793 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, 794 bmd->nr_sgvecs, bio_data_dir(bio) == READ, 795 0, bmd->is_our_pages); 796 bio_free_map_data(bmd); 797 bio_put(bio); 798 return ret; 799} 800EXPORT_SYMBOL(bio_uncopy_user); 801 802/** 803 * bio_copy_user_iov - copy user data to bio 804 * @q: destination block queue 805 * @map_data: pointer to the rq_map_data holding pages (if necessary) 806 * @iov: the iovec. 807 * @iov_count: number of elements in the iovec 808 * @write_to_vm: bool indicating writing to pages or not 809 * @gfp_mask: memory allocation flags 810 * 811 * Prepares and returns a bio for indirect user io, bouncing data 812 * to/from kernel pages as necessary. Must be paired with 813 * call bio_uncopy_user() on io completion. 814 */ 815struct bio *bio_copy_user_iov(struct request_queue *q, 816 struct rq_map_data *map_data, 817 struct sg_iovec *iov, int iov_count, 818 int write_to_vm, gfp_t gfp_mask) 819{ 820 struct bio_map_data *bmd; 821 struct bio_vec *bvec; 822 struct page *page; 823 struct bio *bio; 824 int i, ret; 825 int nr_pages = 0; 826 unsigned int len = 0; 827 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0; 828 829 for (i = 0; i < iov_count; i++) { 830 unsigned long uaddr; 831 unsigned long end; 832 unsigned long start; 833 834 uaddr = (unsigned long)iov[i].iov_base; 835 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 836 start = uaddr >> PAGE_SHIFT; 837 838 /* 839 * Overflow, abort 840 */ 841 if (end < start) 842 return ERR_PTR(-EINVAL); 843 844 nr_pages += end - start; 845 len += iov[i].iov_len; 846 } 847 848 if (offset) 849 nr_pages++; 850 851 bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask); 852 if (!bmd) 853 return ERR_PTR(-ENOMEM); 854 855 ret = -ENOMEM; 856 bio = bio_kmalloc(gfp_mask, nr_pages); 857 if (!bio) 858 goto out_bmd; 859 860 if (!write_to_vm) 861 bio->bi_rw |= REQ_WRITE; 862 863 ret = 0; 864 865 if (map_data) { 866 nr_pages = 1 << map_data->page_order; 867 i = map_data->offset / PAGE_SIZE; 868 } 869 while (len) { 870 unsigned int bytes = PAGE_SIZE; 871 872 bytes -= offset; 873 874 if (bytes > len) 875 bytes = len; 876 877 if (map_data) { 878 if (i == map_data->nr_entries * nr_pages) { 879 ret = -ENOMEM; 880 break; 881 } 882 883 page = map_data->pages[i / nr_pages]; 884 page += (i % nr_pages); 885 886 i++; 887 } else { 888 page = alloc_page(q->bounce_gfp | gfp_mask); 889 if (!page) { 890 ret = -ENOMEM; 891 break; 892 } 893 } 894 895 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) 896 break; 897 898 len -= bytes; 899 offset = 0; 900 } 901 902 if (ret) 903 goto cleanup; 904 905 /* 906 * success 907 */ 908 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || 909 (map_data && map_data->from_user)) { 910 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0); 911 if (ret) 912 goto cleanup; 913 } 914 915 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1); 916 return bio; 917cleanup: 918 if (!map_data) 919 bio_for_each_segment(bvec, bio, i) 920 __free_page(bvec->bv_page); 921 922 bio_put(bio); 923out_bmd: 924 bio_free_map_data(bmd); 925 return ERR_PTR(ret); 926} 927 928/** 929 * bio_copy_user - copy user data to bio 930 * @q: destination block queue 931 * @map_data: pointer to the rq_map_data holding pages (if necessary) 932 * @uaddr: start of user address 933 * @len: length in bytes 934 * @write_to_vm: bool indicating writing to pages or not 935 * @gfp_mask: memory allocation flags 936 * 937 * Prepares and returns a bio for indirect user io, bouncing data 938 * to/from kernel pages as necessary. Must be paired with 939 * call bio_uncopy_user() on io completion. 940 */ 941struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, 942 unsigned long uaddr, unsigned int len, 943 int write_to_vm, gfp_t gfp_mask) 944{ 945 struct sg_iovec iov; 946 947 iov.iov_base = (void __user *)uaddr; 948 iov.iov_len = len; 949 950 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); 951} 952EXPORT_SYMBOL(bio_copy_user); 953 954static struct bio *__bio_map_user_iov(struct request_queue *q, 955 struct block_device *bdev, 956 struct sg_iovec *iov, int iov_count, 957 int write_to_vm, gfp_t gfp_mask) 958{ 959 int i, j; 960 int nr_pages = 0; 961 struct page **pages; 962 struct bio *bio; 963 int cur_page = 0; 964 int ret, offset; 965 966 for (i = 0; i < iov_count; i++) { 967 unsigned long uaddr = (unsigned long)iov[i].iov_base; 968 unsigned long len = iov[i].iov_len; 969 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 970 unsigned long start = uaddr >> PAGE_SHIFT; 971 972 /* 973 * Overflow, abort 974 */ 975 if (end < start) 976 return ERR_PTR(-EINVAL); 977 978 nr_pages += end - start; 979 /* 980 * buffer must be aligned to at least hardsector size for now 981 */ 982 if (uaddr & queue_dma_alignment(q)) 983 return ERR_PTR(-EINVAL); 984 } 985 986 if (!nr_pages) 987 return ERR_PTR(-EINVAL); 988 989 bio = bio_kmalloc(gfp_mask, nr_pages); 990 if (!bio) 991 return ERR_PTR(-ENOMEM); 992 993 ret = -ENOMEM; 994 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); 995 if (!pages) 996 goto out; 997 998 for (i = 0; i < iov_count; i++) { 999 unsigned long uaddr = (unsigned long)iov[i].iov_base; 1000 unsigned long len = iov[i].iov_len; 1001 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1002 unsigned long start = uaddr >> PAGE_SHIFT; 1003 const int local_nr_pages = end - start; 1004 const int page_limit = cur_page + local_nr_pages; 1005 1006 ret = get_user_pages_fast(uaddr, local_nr_pages, 1007 write_to_vm, &pages[cur_page]); 1008 if (ret < local_nr_pages) { 1009 ret = -EFAULT; 1010 goto out_unmap; 1011 } 1012 1013 offset = uaddr & ~PAGE_MASK; 1014 for (j = cur_page; j < page_limit; j++) { 1015 unsigned int bytes = PAGE_SIZE - offset; 1016 1017 if (len <= 0) 1018 break; 1019 1020 if (bytes > len) 1021 bytes = len; 1022 1023 /* 1024 * sorry... 1025 */ 1026 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < 1027 bytes) 1028 break; 1029 1030 len -= bytes; 1031 offset = 0; 1032 } 1033 1034 cur_page = j; 1035 /* 1036 * release the pages we didn't map into the bio, if any 1037 */ 1038 while (j < page_limit) 1039 page_cache_release(pages[j++]); 1040 } 1041 1042 kfree(pages); 1043 1044 /* 1045 * set data direction, and check if mapped pages need bouncing 1046 */ 1047 if (!write_to_vm) 1048 bio->bi_rw |= REQ_WRITE; 1049 1050 bio->bi_bdev = bdev; 1051 bio->bi_flags |= (1 << BIO_USER_MAPPED); 1052 return bio; 1053 1054 out_unmap: 1055 for (i = 0; i < nr_pages; i++) { 1056 if(!pages[i]) 1057 break; 1058 page_cache_release(pages[i]); 1059 } 1060 out: 1061 kfree(pages); 1062 bio_put(bio); 1063 return ERR_PTR(ret); 1064} 1065 1066/** 1067 * bio_map_user - map user address into bio 1068 * @q: the struct request_queue for the bio 1069 * @bdev: destination block device 1070 * @uaddr: start of user address 1071 * @len: length in bytes 1072 * @write_to_vm: bool indicating writing to pages or not 1073 * @gfp_mask: memory allocation flags 1074 * 1075 * Map the user space address into a bio suitable for io to a block 1076 * device. Returns an error pointer in case of error. 1077 */ 1078struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, 1079 unsigned long uaddr, unsigned int len, int write_to_vm, 1080 gfp_t gfp_mask) 1081{ 1082 struct sg_iovec iov; 1083 1084 iov.iov_base = (void __user *)uaddr; 1085 iov.iov_len = len; 1086 1087 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); 1088} 1089EXPORT_SYMBOL(bio_map_user); 1090 1091/** 1092 * bio_map_user_iov - map user sg_iovec table into bio 1093 * @q: the struct request_queue for the bio 1094 * @bdev: destination block device 1095 * @iov: the iovec. 1096 * @iov_count: number of elements in the iovec 1097 * @write_to_vm: bool indicating writing to pages or not 1098 * @gfp_mask: memory allocation flags 1099 * 1100 * Map the user space address into a bio suitable for io to a block 1101 * device. Returns an error pointer in case of error. 1102 */ 1103struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, 1104 struct sg_iovec *iov, int iov_count, 1105 int write_to_vm, gfp_t gfp_mask) 1106{ 1107 struct bio *bio; 1108 1109 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm, 1110 gfp_mask); 1111 if (IS_ERR(bio)) 1112 return bio; 1113 1114 /* 1115 * subtle -- if __bio_map_user() ended up bouncing a bio, 1116 * it would normally disappear when its bi_end_io is run. 1117 * however, we need it for the unmap, so grab an extra 1118 * reference to it 1119 */ 1120 bio_get(bio); 1121 1122 return bio; 1123} 1124 1125static void __bio_unmap_user(struct bio *bio) 1126{ 1127 struct bio_vec *bvec; 1128 int i; 1129 1130 /* 1131 * make sure we dirty pages we wrote to 1132 */ 1133 __bio_for_each_segment(bvec, bio, i, 0) { 1134 if (bio_data_dir(bio) == READ) 1135 set_page_dirty_lock(bvec->bv_page); 1136 1137 page_cache_release(bvec->bv_page); 1138 } 1139 1140 bio_put(bio); 1141} 1142 1143/** 1144 * bio_unmap_user - unmap a bio 1145 * @bio: the bio being unmapped 1146 * 1147 * Unmap a bio previously mapped by bio_map_user(). Must be called with 1148 * a process context. 1149 * 1150 * bio_unmap_user() may sleep. 1151 */ 1152void bio_unmap_user(struct bio *bio) 1153{ 1154 __bio_unmap_user(bio); 1155 bio_put(bio); 1156} 1157EXPORT_SYMBOL(bio_unmap_user); 1158 1159static void bio_map_kern_endio(struct bio *bio, int err) 1160{ 1161 bio_put(bio); 1162} 1163 1164static struct bio *__bio_map_kern(struct request_queue *q, void *data, 1165 unsigned int len, gfp_t gfp_mask) 1166{ 1167 unsigned long kaddr = (unsigned long)data; 1168 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1169 unsigned long start = kaddr >> PAGE_SHIFT; 1170 const int nr_pages = end - start; 1171 int offset, i; 1172 struct bio *bio; 1173 1174 bio = bio_kmalloc(gfp_mask, nr_pages); 1175 if (!bio) 1176 return ERR_PTR(-ENOMEM); 1177 1178 offset = offset_in_page(kaddr); 1179 for (i = 0; i < nr_pages; i++) { 1180 unsigned int bytes = PAGE_SIZE - offset; 1181 1182 if (len <= 0) 1183 break; 1184 1185 if (bytes > len) 1186 bytes = len; 1187 1188 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, 1189 offset) < bytes) 1190 break; 1191 1192 data += bytes; 1193 len -= bytes; 1194 offset = 0; 1195 } 1196 1197 bio->bi_end_io = bio_map_kern_endio; 1198 return bio; 1199} 1200 1201/** 1202 * bio_map_kern - map kernel address into bio 1203 * @q: the struct request_queue for the bio 1204 * @data: pointer to buffer to map 1205 * @len: length in bytes 1206 * @gfp_mask: allocation flags for bio allocation 1207 * 1208 * Map the kernel address into a bio suitable for io to a block 1209 * device. Returns an error pointer in case of error. 1210 */ 1211struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, 1212 gfp_t gfp_mask) 1213{ 1214 struct bio *bio; 1215 1216 bio = __bio_map_kern(q, data, len, gfp_mask); 1217 if (IS_ERR(bio)) 1218 return bio; 1219 1220 if (bio->bi_size == len) 1221 return bio; 1222 1223 /* 1224 * Don't support partial mappings. 1225 */ 1226 bio_put(bio); 1227 return ERR_PTR(-EINVAL); 1228} 1229EXPORT_SYMBOL(bio_map_kern); 1230 1231static void bio_copy_kern_endio(struct bio *bio, int err) 1232{ 1233 struct bio_vec *bvec; 1234 const int read = bio_data_dir(bio) == READ; 1235 struct bio_map_data *bmd = bio->bi_private; 1236 int i; 1237 char *p = bmd->sgvecs[0].iov_base; 1238 1239 __bio_for_each_segment(bvec, bio, i, 0) { 1240 char *addr = page_address(bvec->bv_page); 1241 int len = bmd->iovecs[i].bv_len; 1242 1243 if (read) 1244 memcpy(p, addr, len); 1245 1246 __free_page(bvec->bv_page); 1247 p += len; 1248 } 1249 1250 bio_free_map_data(bmd); 1251 bio_put(bio); 1252} 1253 1254/** 1255 * bio_copy_kern - copy kernel address into bio 1256 * @q: the struct request_queue for the bio 1257 * @data: pointer to buffer to copy 1258 * @len: length in bytes 1259 * @gfp_mask: allocation flags for bio and page allocation 1260 * @reading: data direction is READ 1261 * 1262 * copy the kernel address into a bio suitable for io to a block 1263 * device. Returns an error pointer in case of error. 1264 */ 1265struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 1266 gfp_t gfp_mask, int reading) 1267{ 1268 struct bio *bio; 1269 struct bio_vec *bvec; 1270 int i; 1271 1272 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask); 1273 if (IS_ERR(bio)) 1274 return bio; 1275 1276 if (!reading) { 1277 void *p = data; 1278 1279 bio_for_each_segment(bvec, bio, i) { 1280 char *addr = page_address(bvec->bv_page); 1281 1282 memcpy(addr, p, bvec->bv_len); 1283 p += bvec->bv_len; 1284 } 1285 } 1286 1287 bio->bi_end_io = bio_copy_kern_endio; 1288 1289 return bio; 1290} 1291EXPORT_SYMBOL(bio_copy_kern); 1292 1293/* 1294 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1295 * for performing direct-IO in BIOs. 1296 * 1297 * The problem is that we cannot run set_page_dirty() from interrupt context 1298 * because the required locks are not interrupt-safe. So what we can do is to 1299 * mark the pages dirty _before_ performing IO. And in interrupt context, 1300 * check that the pages are still dirty. If so, fine. If not, redirty them 1301 * in process context. 1302 * 1303 * We special-case compound pages here: normally this means reads into hugetlb 1304 * pages. The logic in here doesn't really work right for compound pages 1305 * because the VM does not uniformly chase down the head page in all cases. 1306 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1307 * handle them at all. So we skip compound pages here at an early stage. 1308 * 1309 * Note that this code is very hard to test under normal circumstances because 1310 * direct-io pins the pages with get_user_pages(). This makes 1311 * is_page_cache_freeable return false, and the VM will not clean the pages. 1312 * But other code (eg, pdflush) could clean the pages if they are mapped 1313 * pagecache. 1314 * 1315 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1316 * deferred bio dirtying paths. 1317 */ 1318 1319/* 1320 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1321 */ 1322void bio_set_pages_dirty(struct bio *bio) 1323{ 1324 struct bio_vec *bvec = bio->bi_io_vec; 1325 int i; 1326 1327 for (i = 0; i < bio->bi_vcnt; i++) { 1328 struct page *page = bvec[i].bv_page; 1329 1330 if (page && !PageCompound(page)) 1331 set_page_dirty_lock(page); 1332 } 1333} 1334 1335static void bio_release_pages(struct bio *bio) 1336{ 1337 struct bio_vec *bvec = bio->bi_io_vec; 1338 int i; 1339 1340 for (i = 0; i < bio->bi_vcnt; i++) { 1341 struct page *page = bvec[i].bv_page; 1342 1343 if (page) 1344 put_page(page); 1345 } 1346} 1347 1348/* 1349 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1350 * If they are, then fine. If, however, some pages are clean then they must 1351 * have been written out during the direct-IO read. So we take another ref on 1352 * the BIO and the offending pages and re-dirty the pages in process context. 1353 * 1354 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1355 * here on. It will run one page_cache_release() against each page and will 1356 * run one bio_put() against the BIO. 1357 */ 1358 1359static void bio_dirty_fn(struct work_struct *work); 1360 1361static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1362static DEFINE_SPINLOCK(bio_dirty_lock); 1363static struct bio *bio_dirty_list; 1364 1365/* 1366 * This runs in process context 1367 */ 1368static void bio_dirty_fn(struct work_struct *work) 1369{ 1370 unsigned long flags; 1371 struct bio *bio; 1372 1373 spin_lock_irqsave(&bio_dirty_lock, flags); 1374 bio = bio_dirty_list; 1375 bio_dirty_list = NULL; 1376 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1377 1378 while (bio) { 1379 struct bio *next = bio->bi_private; 1380 1381 bio_set_pages_dirty(bio); 1382 bio_release_pages(bio); 1383 bio_put(bio); 1384 bio = next; 1385 } 1386} 1387 1388void bio_check_pages_dirty(struct bio *bio) 1389{ 1390 struct bio_vec *bvec = bio->bi_io_vec; 1391 int nr_clean_pages = 0; 1392 int i; 1393 1394 for (i = 0; i < bio->bi_vcnt; i++) { 1395 struct page *page = bvec[i].bv_page; 1396 1397 if (PageDirty(page) || PageCompound(page)) { 1398 page_cache_release(page); 1399 bvec[i].bv_page = NULL; 1400 } else { 1401 nr_clean_pages++; 1402 } 1403 } 1404 1405 if (nr_clean_pages) { 1406 unsigned long flags; 1407 1408 spin_lock_irqsave(&bio_dirty_lock, flags); 1409 bio->bi_private = bio_dirty_list; 1410 bio_dirty_list = bio; 1411 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1412 schedule_work(&bio_dirty_work); 1413 } else { 1414 bio_put(bio); 1415 } 1416} 1417 1418#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1419void bio_flush_dcache_pages(struct bio *bi) 1420{ 1421 int i; 1422 struct bio_vec *bvec; 1423 1424 bio_for_each_segment(bvec, bi, i) 1425 flush_dcache_page(bvec->bv_page); 1426} 1427EXPORT_SYMBOL(bio_flush_dcache_pages); 1428#endif 1429 1430/** 1431 * bio_endio - end I/O on a bio 1432 * @bio: bio 1433 * @error: error, if any 1434 * 1435 * Description: 1436 * bio_endio() will end I/O on the whole bio. bio_endio() is the 1437 * preferred way to end I/O on a bio, it takes care of clearing 1438 * BIO_UPTODATE on error. @error is 0 on success, and and one of the 1439 * established -Exxxx (-EIO, for instance) error values in case 1440 * something went wrong. No one should call bi_end_io() directly on a 1441 * bio unless they own it and thus know that it has an end_io 1442 * function. 1443 **/ 1444void bio_endio(struct bio *bio, int error) 1445{ 1446 if (error) 1447 clear_bit(BIO_UPTODATE, &bio->bi_flags); 1448 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1449 error = -EIO; 1450 1451 if (bio->bi_end_io) 1452 bio->bi_end_io(bio, error); 1453} 1454EXPORT_SYMBOL(bio_endio); 1455 1456void bio_pair_release(struct bio_pair *bp) 1457{ 1458 if (atomic_dec_and_test(&bp->cnt)) { 1459 struct bio *master = bp->bio1.bi_private; 1460 1461 bio_endio(master, bp->error); 1462 mempool_free(bp, bp->bio2.bi_private); 1463 } 1464} 1465EXPORT_SYMBOL(bio_pair_release); 1466 1467static void bio_pair_end_1(struct bio *bi, int err) 1468{ 1469 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1); 1470 1471 if (err) 1472 bp->error = err; 1473 1474 bio_pair_release(bp); 1475} 1476 1477static void bio_pair_end_2(struct bio *bi, int err) 1478{ 1479 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2); 1480 1481 if (err) 1482 bp->error = err; 1483 1484 bio_pair_release(bp); 1485} 1486 1487/* 1488 * split a bio - only worry about a bio with a single page in its iovec 1489 */ 1490struct bio_pair *bio_split(struct bio *bi, int first_sectors) 1491{ 1492 struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO); 1493 1494 if (!bp) 1495 return bp; 1496 1497 trace_block_split(bdev_get_queue(bi->bi_bdev), bi, 1498 bi->bi_sector + first_sectors); 1499 1500 BUG_ON(bi->bi_vcnt != 1); 1501 BUG_ON(bi->bi_idx != 0); 1502 atomic_set(&bp->cnt, 3); 1503 bp->error = 0; 1504 bp->bio1 = *bi; 1505 bp->bio2 = *bi; 1506 bp->bio2.bi_sector += first_sectors; 1507 bp->bio2.bi_size -= first_sectors << 9; 1508 bp->bio1.bi_size = first_sectors << 9; 1509 1510 bp->bv1 = bi->bi_io_vec[0]; 1511 bp->bv2 = bi->bi_io_vec[0]; 1512 bp->bv2.bv_offset += first_sectors << 9; 1513 bp->bv2.bv_len -= first_sectors << 9; 1514 bp->bv1.bv_len = first_sectors << 9; 1515 1516 bp->bio1.bi_io_vec = &bp->bv1; 1517 bp->bio2.bi_io_vec = &bp->bv2; 1518 1519 bp->bio1.bi_max_vecs = 1; 1520 bp->bio2.bi_max_vecs = 1; 1521 1522 bp->bio1.bi_end_io = bio_pair_end_1; 1523 bp->bio2.bi_end_io = bio_pair_end_2; 1524 1525 bp->bio1.bi_private = bi; 1526 bp->bio2.bi_private = bio_split_pool; 1527 1528 if (bio_integrity(bi)) 1529 bio_integrity_split(bi, bp, first_sectors); 1530 1531 return bp; 1532} 1533EXPORT_SYMBOL(bio_split); 1534 1535/** 1536 * bio_sector_offset - Find hardware sector offset in bio 1537 * @bio: bio to inspect 1538 * @index: bio_vec index 1539 * @offset: offset in bv_page 1540 * 1541 * Return the number of hardware sectors between beginning of bio 1542 * and an end point indicated by a bio_vec index and an offset 1543 * within that vector's page. 1544 */ 1545sector_t bio_sector_offset(struct bio *bio, unsigned short index, 1546 unsigned int offset) 1547{ 1548 unsigned int sector_sz; 1549 struct bio_vec *bv; 1550 sector_t sectors; 1551 int i; 1552 1553 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); 1554 sectors = 0; 1555 1556 if (index >= bio->bi_idx) 1557 index = bio->bi_vcnt - 1; 1558 1559 __bio_for_each_segment(bv, bio, i, 0) { 1560 if (i == index) { 1561 if (offset > bv->bv_offset) 1562 sectors += (offset - bv->bv_offset) / sector_sz; 1563 break; 1564 } 1565 1566 sectors += bv->bv_len / sector_sz; 1567 } 1568 1569 return sectors; 1570} 1571EXPORT_SYMBOL(bio_sector_offset); 1572 1573/* 1574 * create memory pools for biovec's in a bio_set. 1575 * use the global biovec slabs created for general use. 1576 */ 1577static int biovec_create_pools(struct bio_set *bs, int pool_entries) 1578{ 1579 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; 1580 1581 bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab); 1582 if (!bs->bvec_pool) 1583 return -ENOMEM; 1584 1585 return 0; 1586} 1587 1588static void biovec_free_pools(struct bio_set *bs) 1589{ 1590 mempool_destroy(bs->bvec_pool); 1591} 1592 1593void bioset_free(struct bio_set *bs) 1594{ 1595 if (bs->bio_pool) 1596 mempool_destroy(bs->bio_pool); 1597 1598 bioset_integrity_free(bs); 1599 biovec_free_pools(bs); 1600 bio_put_slab(bs); 1601 1602 kfree(bs); 1603} 1604EXPORT_SYMBOL(bioset_free); 1605 1606/** 1607 * bioset_create - Create a bio_set 1608 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1609 * @front_pad: Number of bytes to allocate in front of the returned bio 1610 * 1611 * Description: 1612 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1613 * to ask for a number of bytes to be allocated in front of the bio. 1614 * Front pad allocation is useful for embedding the bio inside 1615 * another structure, to avoid allocating extra data to go with the bio. 1616 * Note that the bio must be embedded at the END of that structure always, 1617 * or things will break badly. 1618 */ 1619struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) 1620{ 1621 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1622 struct bio_set *bs; 1623 1624 bs = kzalloc(sizeof(*bs), GFP_KERNEL); 1625 if (!bs) 1626 return NULL; 1627 1628 bs->front_pad = front_pad; 1629 1630 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); 1631 if (!bs->bio_slab) { 1632 kfree(bs); 1633 return NULL; 1634 } 1635 1636 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); 1637 if (!bs->bio_pool) 1638 goto bad; 1639 1640 if (!biovec_create_pools(bs, pool_size)) 1641 return bs; 1642 1643bad: 1644 bioset_free(bs); 1645 return NULL; 1646} 1647EXPORT_SYMBOL(bioset_create); 1648 1649static void __init biovec_init_slabs(void) 1650{ 1651 int i; 1652 1653 for (i = 0; i < BIOVEC_NR_POOLS; i++) { 1654 int size; 1655 struct biovec_slab *bvs = bvec_slabs + i; 1656 1657 if (bvs->nr_vecs <= BIO_INLINE_VECS) { 1658 bvs->slab = NULL; 1659 continue; 1660 } 1661 1662 size = bvs->nr_vecs * sizeof(struct bio_vec); 1663 bvs->slab = kmem_cache_create(bvs->name, size, 0, 1664 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1665 } 1666} 1667 1668static int __init init_bio(void) 1669{ 1670 bio_slab_max = 2; 1671 bio_slab_nr = 0; 1672 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); 1673 if (!bio_slabs) 1674 panic("bio: can't allocate bios\n"); 1675 1676 bio_integrity_init(); 1677 biovec_init_slabs(); 1678 1679 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 1680 if (!fs_bio_set) 1681 panic("bio: can't allocate bios\n"); 1682 1683 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) 1684 panic("bio: can't create integrity pool\n"); 1685 1686 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, 1687 sizeof(struct bio_pair)); 1688 if (!bio_split_pool) 1689 panic("bio: can't create split pool\n"); 1690 1691 return 0; 1692} 1693subsys_initcall(init_bio);