Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.26-rc3 1374 lines 33 kB view raw
1/* 2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public Licens 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 16 * 17 */ 18#include <linux/mm.h> 19#include <linux/swap.h> 20#include <linux/bio.h> 21#include <linux/blkdev.h> 22#include <linux/slab.h> 23#include <linux/init.h> 24#include <linux/kernel.h> 25#include <linux/module.h> 26#include <linux/mempool.h> 27#include <linux/workqueue.h> 28#include <linux/blktrace_api.h> 29#include <scsi/sg.h> /* for struct sg_iovec */ 30 31#define BIO_POOL_SIZE 2 32 33static struct kmem_cache *bio_slab __read_mostly; 34 35#define BIOVEC_NR_POOLS 6 36 37/* 38 * a small number of entries is fine, not going to be performance critical. 39 * basically we just need to survive 40 */ 41#define BIO_SPLIT_ENTRIES 2 42mempool_t *bio_split_pool __read_mostly; 43 44struct biovec_slab { 45 int nr_vecs; 46 char *name; 47 struct kmem_cache *slab; 48}; 49 50/* 51 * if you change this list, also change bvec_alloc or things will 52 * break badly! cannot be bigger than what you can fit into an 53 * unsigned short 54 */ 55 56#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 57static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { 58 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 59}; 60#undef BV 61 62/* 63 * bio_set is used to allow other portions of the IO system to 64 * allocate their own private memory pools for bio and iovec structures. 65 * These memory pools in turn all allocate from the bio_slab 66 * and the bvec_slabs[]. 67 */ 68struct bio_set { 69 mempool_t *bio_pool; 70 mempool_t *bvec_pools[BIOVEC_NR_POOLS]; 71}; 72 73/* 74 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 75 * IO code that does not need private memory pools. 76 */ 77static struct bio_set *fs_bio_set; 78 79static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) 80{ 81 struct bio_vec *bvl; 82 83 /* 84 * see comment near bvec_array define! 85 */ 86 switch (nr) { 87 case 1 : *idx = 0; break; 88 case 2 ... 4: *idx = 1; break; 89 case 5 ... 16: *idx = 2; break; 90 case 17 ... 64: *idx = 3; break; 91 case 65 ... 128: *idx = 4; break; 92 case 129 ... BIO_MAX_PAGES: *idx = 5; break; 93 default: 94 return NULL; 95 } 96 /* 97 * idx now points to the pool we want to allocate from 98 */ 99 100 bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); 101 if (bvl) { 102 struct biovec_slab *bp = bvec_slabs + *idx; 103 104 memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec)); 105 } 106 107 return bvl; 108} 109 110void bio_free(struct bio *bio, struct bio_set *bio_set) 111{ 112 if (bio->bi_io_vec) { 113 const int pool_idx = BIO_POOL_IDX(bio); 114 115 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); 116 117 mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); 118 } 119 120 mempool_free(bio, bio_set->bio_pool); 121} 122 123/* 124 * default destructor for a bio allocated with bio_alloc_bioset() 125 */ 126static void bio_fs_destructor(struct bio *bio) 127{ 128 bio_free(bio, fs_bio_set); 129} 130 131void bio_init(struct bio *bio) 132{ 133 memset(bio, 0, sizeof(*bio)); 134 bio->bi_flags = 1 << BIO_UPTODATE; 135 atomic_set(&bio->bi_cnt, 1); 136} 137 138/** 139 * bio_alloc_bioset - allocate a bio for I/O 140 * @gfp_mask: the GFP_ mask given to the slab allocator 141 * @nr_iovecs: number of iovecs to pre-allocate 142 * @bs: the bio_set to allocate from 143 * 144 * Description: 145 * bio_alloc_bioset will first try it's on mempool to satisfy the allocation. 146 * If %__GFP_WAIT is set then we will block on the internal pool waiting 147 * for a &struct bio to become free. 148 * 149 * allocate bio and iovecs from the memory pools specified by the 150 * bio_set structure. 151 **/ 152struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 153{ 154 struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); 155 156 if (likely(bio)) { 157 struct bio_vec *bvl = NULL; 158 159 bio_init(bio); 160 if (likely(nr_iovecs)) { 161 unsigned long uninitialized_var(idx); 162 163 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); 164 if (unlikely(!bvl)) { 165 mempool_free(bio, bs->bio_pool); 166 bio = NULL; 167 goto out; 168 } 169 bio->bi_flags |= idx << BIO_POOL_OFFSET; 170 bio->bi_max_vecs = bvec_slabs[idx].nr_vecs; 171 } 172 bio->bi_io_vec = bvl; 173 } 174out: 175 return bio; 176} 177 178struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) 179{ 180 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 181 182 if (bio) 183 bio->bi_destructor = bio_fs_destructor; 184 185 return bio; 186} 187 188void zero_fill_bio(struct bio *bio) 189{ 190 unsigned long flags; 191 struct bio_vec *bv; 192 int i; 193 194 bio_for_each_segment(bv, bio, i) { 195 char *data = bvec_kmap_irq(bv, &flags); 196 memset(data, 0, bv->bv_len); 197 flush_dcache_page(bv->bv_page); 198 bvec_kunmap_irq(data, &flags); 199 } 200} 201EXPORT_SYMBOL(zero_fill_bio); 202 203/** 204 * bio_put - release a reference to a bio 205 * @bio: bio to release reference to 206 * 207 * Description: 208 * Put a reference to a &struct bio, either one you have gotten with 209 * bio_alloc or bio_get. The last put of a bio will free it. 210 **/ 211void bio_put(struct bio *bio) 212{ 213 BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); 214 215 /* 216 * last put frees it 217 */ 218 if (atomic_dec_and_test(&bio->bi_cnt)) { 219 bio->bi_next = NULL; 220 bio->bi_destructor(bio); 221 } 222} 223 224inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 225{ 226 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 227 blk_recount_segments(q, bio); 228 229 return bio->bi_phys_segments; 230} 231 232inline int bio_hw_segments(struct request_queue *q, struct bio *bio) 233{ 234 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 235 blk_recount_segments(q, bio); 236 237 return bio->bi_hw_segments; 238} 239 240/** 241 * __bio_clone - clone a bio 242 * @bio: destination bio 243 * @bio_src: bio to clone 244 * 245 * Clone a &bio. Caller will own the returned bio, but not 246 * the actual data it points to. Reference count of returned 247 * bio will be one. 248 */ 249void __bio_clone(struct bio *bio, struct bio *bio_src) 250{ 251 memcpy(bio->bi_io_vec, bio_src->bi_io_vec, 252 bio_src->bi_max_vecs * sizeof(struct bio_vec)); 253 254 /* 255 * most users will be overriding ->bi_bdev with a new target, 256 * so we don't set nor calculate new physical/hw segment counts here 257 */ 258 bio->bi_sector = bio_src->bi_sector; 259 bio->bi_bdev = bio_src->bi_bdev; 260 bio->bi_flags |= 1 << BIO_CLONED; 261 bio->bi_rw = bio_src->bi_rw; 262 bio->bi_vcnt = bio_src->bi_vcnt; 263 bio->bi_size = bio_src->bi_size; 264 bio->bi_idx = bio_src->bi_idx; 265} 266 267/** 268 * bio_clone - clone a bio 269 * @bio: bio to clone 270 * @gfp_mask: allocation priority 271 * 272 * Like __bio_clone, only also allocates the returned bio 273 */ 274struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) 275{ 276 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); 277 278 if (b) { 279 b->bi_destructor = bio_fs_destructor; 280 __bio_clone(b, bio); 281 } 282 283 return b; 284} 285 286/** 287 * bio_get_nr_vecs - return approx number of vecs 288 * @bdev: I/O target 289 * 290 * Return the approximate number of pages we can send to this target. 291 * There's no guarantee that you will be able to fit this number of pages 292 * into a bio, it does not account for dynamic restrictions that vary 293 * on offset. 294 */ 295int bio_get_nr_vecs(struct block_device *bdev) 296{ 297 struct request_queue *q = bdev_get_queue(bdev); 298 int nr_pages; 299 300 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 301 if (nr_pages > q->max_phys_segments) 302 nr_pages = q->max_phys_segments; 303 if (nr_pages > q->max_hw_segments) 304 nr_pages = q->max_hw_segments; 305 306 return nr_pages; 307} 308 309static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page 310 *page, unsigned int len, unsigned int offset, 311 unsigned short max_sectors) 312{ 313 int retried_segments = 0; 314 struct bio_vec *bvec; 315 316 /* 317 * cloned bio must not modify vec list 318 */ 319 if (unlikely(bio_flagged(bio, BIO_CLONED))) 320 return 0; 321 322 if (((bio->bi_size + len) >> 9) > max_sectors) 323 return 0; 324 325 /* 326 * For filesystems with a blocksize smaller than the pagesize 327 * we will often be called with the same page as last time and 328 * a consecutive offset. Optimize this special case. 329 */ 330 if (bio->bi_vcnt > 0) { 331 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 332 333 if (page == prev->bv_page && 334 offset == prev->bv_offset + prev->bv_len) { 335 prev->bv_len += len; 336 if (q->merge_bvec_fn && 337 q->merge_bvec_fn(q, bio, prev) < len) { 338 prev->bv_len -= len; 339 return 0; 340 } 341 342 goto done; 343 } 344 } 345 346 if (bio->bi_vcnt >= bio->bi_max_vecs) 347 return 0; 348 349 /* 350 * we might lose a segment or two here, but rather that than 351 * make this too complex. 352 */ 353 354 while (bio->bi_phys_segments >= q->max_phys_segments 355 || bio->bi_hw_segments >= q->max_hw_segments 356 || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) { 357 358 if (retried_segments) 359 return 0; 360 361 retried_segments = 1; 362 blk_recount_segments(q, bio); 363 } 364 365 /* 366 * setup the new entry, we might clear it again later if we 367 * cannot add the page 368 */ 369 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 370 bvec->bv_page = page; 371 bvec->bv_len = len; 372 bvec->bv_offset = offset; 373 374 /* 375 * if queue has other restrictions (eg varying max sector size 376 * depending on offset), it can specify a merge_bvec_fn in the 377 * queue to get further control 378 */ 379 if (q->merge_bvec_fn) { 380 /* 381 * merge_bvec_fn() returns number of bytes it can accept 382 * at this offset 383 */ 384 if (q->merge_bvec_fn(q, bio, bvec) < len) { 385 bvec->bv_page = NULL; 386 bvec->bv_len = 0; 387 bvec->bv_offset = 0; 388 return 0; 389 } 390 } 391 392 /* If we may be able to merge these biovecs, force a recount */ 393 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) || 394 BIOVEC_VIRT_MERGEABLE(bvec-1, bvec))) 395 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 396 397 bio->bi_vcnt++; 398 bio->bi_phys_segments++; 399 bio->bi_hw_segments++; 400 done: 401 bio->bi_size += len; 402 return len; 403} 404 405/** 406 * bio_add_pc_page - attempt to add page to bio 407 * @q: the target queue 408 * @bio: destination bio 409 * @page: page to add 410 * @len: vec entry length 411 * @offset: vec entry offset 412 * 413 * Attempt to add a page to the bio_vec maplist. This can fail for a 414 * number of reasons, such as the bio being full or target block 415 * device limitations. The target block device must allow bio's 416 * smaller than PAGE_SIZE, so it is always possible to add a single 417 * page to an empty bio. This should only be used by REQ_PC bios. 418 */ 419int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, 420 unsigned int len, unsigned int offset) 421{ 422 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); 423} 424 425/** 426 * bio_add_page - attempt to add page to bio 427 * @bio: destination bio 428 * @page: page to add 429 * @len: vec entry length 430 * @offset: vec entry offset 431 * 432 * Attempt to add a page to the bio_vec maplist. This can fail for a 433 * number of reasons, such as the bio being full or target block 434 * device limitations. The target block device must allow bio's 435 * smaller than PAGE_SIZE, so it is always possible to add a single 436 * page to an empty bio. 437 */ 438int bio_add_page(struct bio *bio, struct page *page, unsigned int len, 439 unsigned int offset) 440{ 441 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 442 return __bio_add_page(q, bio, page, len, offset, q->max_sectors); 443} 444 445struct bio_map_data { 446 struct bio_vec *iovecs; 447 int nr_sgvecs; 448 struct sg_iovec *sgvecs; 449}; 450 451static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, 452 struct sg_iovec *iov, int iov_count) 453{ 454 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); 455 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); 456 bmd->nr_sgvecs = iov_count; 457 bio->bi_private = bmd; 458} 459 460static void bio_free_map_data(struct bio_map_data *bmd) 461{ 462 kfree(bmd->iovecs); 463 kfree(bmd->sgvecs); 464 kfree(bmd); 465} 466 467static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count) 468{ 469 struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL); 470 471 if (!bmd) 472 return NULL; 473 474 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL); 475 if (!bmd->iovecs) { 476 kfree(bmd); 477 return NULL; 478 } 479 480 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, GFP_KERNEL); 481 if (bmd->sgvecs) 482 return bmd; 483 484 kfree(bmd->iovecs); 485 kfree(bmd); 486 return NULL; 487} 488 489static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count, 490 int uncopy) 491{ 492 int ret = 0, i; 493 struct bio_vec *bvec; 494 int iov_idx = 0; 495 unsigned int iov_off = 0; 496 int read = bio_data_dir(bio) == READ; 497 498 __bio_for_each_segment(bvec, bio, i, 0) { 499 char *bv_addr = page_address(bvec->bv_page); 500 unsigned int bv_len = bvec->bv_len; 501 502 while (bv_len && iov_idx < iov_count) { 503 unsigned int bytes; 504 char *iov_addr; 505 506 bytes = min_t(unsigned int, 507 iov[iov_idx].iov_len - iov_off, bv_len); 508 iov_addr = iov[iov_idx].iov_base + iov_off; 509 510 if (!ret) { 511 if (!read && !uncopy) 512 ret = copy_from_user(bv_addr, iov_addr, 513 bytes); 514 if (read && uncopy) 515 ret = copy_to_user(iov_addr, bv_addr, 516 bytes); 517 518 if (ret) 519 ret = -EFAULT; 520 } 521 522 bv_len -= bytes; 523 bv_addr += bytes; 524 iov_addr += bytes; 525 iov_off += bytes; 526 527 if (iov[iov_idx].iov_len == iov_off) { 528 iov_idx++; 529 iov_off = 0; 530 } 531 } 532 533 if (uncopy) 534 __free_page(bvec->bv_page); 535 } 536 537 return ret; 538} 539 540/** 541 * bio_uncopy_user - finish previously mapped bio 542 * @bio: bio being terminated 543 * 544 * Free pages allocated from bio_copy_user() and write back data 545 * to user space in case of a read. 546 */ 547int bio_uncopy_user(struct bio *bio) 548{ 549 struct bio_map_data *bmd = bio->bi_private; 550 int ret; 551 552 ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1); 553 554 bio_free_map_data(bmd); 555 bio_put(bio); 556 return ret; 557} 558 559/** 560 * bio_copy_user_iov - copy user data to bio 561 * @q: destination block queue 562 * @iov: the iovec. 563 * @iov_count: number of elements in the iovec 564 * @write_to_vm: bool indicating writing to pages or not 565 * 566 * Prepares and returns a bio for indirect user io, bouncing data 567 * to/from kernel pages as necessary. Must be paired with 568 * call bio_uncopy_user() on io completion. 569 */ 570struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, 571 int iov_count, int write_to_vm) 572{ 573 struct bio_map_data *bmd; 574 struct bio_vec *bvec; 575 struct page *page; 576 struct bio *bio; 577 int i, ret; 578 int nr_pages = 0; 579 unsigned int len = 0; 580 581 for (i = 0; i < iov_count; i++) { 582 unsigned long uaddr; 583 unsigned long end; 584 unsigned long start; 585 586 uaddr = (unsigned long)iov[i].iov_base; 587 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 588 start = uaddr >> PAGE_SHIFT; 589 590 nr_pages += end - start; 591 len += iov[i].iov_len; 592 } 593 594 bmd = bio_alloc_map_data(nr_pages, iov_count); 595 if (!bmd) 596 return ERR_PTR(-ENOMEM); 597 598 ret = -ENOMEM; 599 bio = bio_alloc(GFP_KERNEL, nr_pages); 600 if (!bio) 601 goto out_bmd; 602 603 bio->bi_rw |= (!write_to_vm << BIO_RW); 604 605 ret = 0; 606 while (len) { 607 unsigned int bytes = PAGE_SIZE; 608 609 if (bytes > len) 610 bytes = len; 611 612 page = alloc_page(q->bounce_gfp | GFP_KERNEL); 613 if (!page) { 614 ret = -ENOMEM; 615 break; 616 } 617 618 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 619 break; 620 621 len -= bytes; 622 } 623 624 if (ret) 625 goto cleanup; 626 627 /* 628 * success 629 */ 630 if (!write_to_vm) { 631 ret = __bio_copy_iov(bio, iov, iov_count, 0); 632 if (ret) 633 goto cleanup; 634 } 635 636 bio_set_map_data(bmd, bio, iov, iov_count); 637 return bio; 638cleanup: 639 bio_for_each_segment(bvec, bio, i) 640 __free_page(bvec->bv_page); 641 642 bio_put(bio); 643out_bmd: 644 bio_free_map_data(bmd); 645 return ERR_PTR(ret); 646} 647 648/** 649 * bio_copy_user - copy user data to bio 650 * @q: destination block queue 651 * @uaddr: start of user address 652 * @len: length in bytes 653 * @write_to_vm: bool indicating writing to pages or not 654 * 655 * Prepares and returns a bio for indirect user io, bouncing data 656 * to/from kernel pages as necessary. Must be paired with 657 * call bio_uncopy_user() on io completion. 658 */ 659struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, 660 unsigned int len, int write_to_vm) 661{ 662 struct sg_iovec iov; 663 664 iov.iov_base = (void __user *)uaddr; 665 iov.iov_len = len; 666 667 return bio_copy_user_iov(q, &iov, 1, write_to_vm); 668} 669 670static struct bio *__bio_map_user_iov(struct request_queue *q, 671 struct block_device *bdev, 672 struct sg_iovec *iov, int iov_count, 673 int write_to_vm) 674{ 675 int i, j; 676 int nr_pages = 0; 677 struct page **pages; 678 struct bio *bio; 679 int cur_page = 0; 680 int ret, offset; 681 682 for (i = 0; i < iov_count; i++) { 683 unsigned long uaddr = (unsigned long)iov[i].iov_base; 684 unsigned long len = iov[i].iov_len; 685 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 686 unsigned long start = uaddr >> PAGE_SHIFT; 687 688 nr_pages += end - start; 689 /* 690 * buffer must be aligned to at least hardsector size for now 691 */ 692 if (uaddr & queue_dma_alignment(q)) 693 return ERR_PTR(-EINVAL); 694 } 695 696 if (!nr_pages) 697 return ERR_PTR(-EINVAL); 698 699 bio = bio_alloc(GFP_KERNEL, nr_pages); 700 if (!bio) 701 return ERR_PTR(-ENOMEM); 702 703 ret = -ENOMEM; 704 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 705 if (!pages) 706 goto out; 707 708 for (i = 0; i < iov_count; i++) { 709 unsigned long uaddr = (unsigned long)iov[i].iov_base; 710 unsigned long len = iov[i].iov_len; 711 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 712 unsigned long start = uaddr >> PAGE_SHIFT; 713 const int local_nr_pages = end - start; 714 const int page_limit = cur_page + local_nr_pages; 715 716 down_read(&current->mm->mmap_sem); 717 ret = get_user_pages(current, current->mm, uaddr, 718 local_nr_pages, 719 write_to_vm, 0, &pages[cur_page], NULL); 720 up_read(&current->mm->mmap_sem); 721 722 if (ret < local_nr_pages) { 723 ret = -EFAULT; 724 goto out_unmap; 725 } 726 727 offset = uaddr & ~PAGE_MASK; 728 for (j = cur_page; j < page_limit; j++) { 729 unsigned int bytes = PAGE_SIZE - offset; 730 731 if (len <= 0) 732 break; 733 734 if (bytes > len) 735 bytes = len; 736 737 /* 738 * sorry... 739 */ 740 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < 741 bytes) 742 break; 743 744 len -= bytes; 745 offset = 0; 746 } 747 748 cur_page = j; 749 /* 750 * release the pages we didn't map into the bio, if any 751 */ 752 while (j < page_limit) 753 page_cache_release(pages[j++]); 754 } 755 756 kfree(pages); 757 758 /* 759 * set data direction, and check if mapped pages need bouncing 760 */ 761 if (!write_to_vm) 762 bio->bi_rw |= (1 << BIO_RW); 763 764 bio->bi_bdev = bdev; 765 bio->bi_flags |= (1 << BIO_USER_MAPPED); 766 return bio; 767 768 out_unmap: 769 for (i = 0; i < nr_pages; i++) { 770 if(!pages[i]) 771 break; 772 page_cache_release(pages[i]); 773 } 774 out: 775 kfree(pages); 776 bio_put(bio); 777 return ERR_PTR(ret); 778} 779 780/** 781 * bio_map_user - map user address into bio 782 * @q: the struct request_queue for the bio 783 * @bdev: destination block device 784 * @uaddr: start of user address 785 * @len: length in bytes 786 * @write_to_vm: bool indicating writing to pages or not 787 * 788 * Map the user space address into a bio suitable for io to a block 789 * device. Returns an error pointer in case of error. 790 */ 791struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, 792 unsigned long uaddr, unsigned int len, int write_to_vm) 793{ 794 struct sg_iovec iov; 795 796 iov.iov_base = (void __user *)uaddr; 797 iov.iov_len = len; 798 799 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); 800} 801 802/** 803 * bio_map_user_iov - map user sg_iovec table into bio 804 * @q: the struct request_queue for the bio 805 * @bdev: destination block device 806 * @iov: the iovec. 807 * @iov_count: number of elements in the iovec 808 * @write_to_vm: bool indicating writing to pages or not 809 * 810 * Map the user space address into a bio suitable for io to a block 811 * device. Returns an error pointer in case of error. 812 */ 813struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, 814 struct sg_iovec *iov, int iov_count, 815 int write_to_vm) 816{ 817 struct bio *bio; 818 819 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); 820 821 if (IS_ERR(bio)) 822 return bio; 823 824 /* 825 * subtle -- if __bio_map_user() ended up bouncing a bio, 826 * it would normally disappear when its bi_end_io is run. 827 * however, we need it for the unmap, so grab an extra 828 * reference to it 829 */ 830 bio_get(bio); 831 832 return bio; 833} 834 835static void __bio_unmap_user(struct bio *bio) 836{ 837 struct bio_vec *bvec; 838 int i; 839 840 /* 841 * make sure we dirty pages we wrote to 842 */ 843 __bio_for_each_segment(bvec, bio, i, 0) { 844 if (bio_data_dir(bio) == READ) 845 set_page_dirty_lock(bvec->bv_page); 846 847 page_cache_release(bvec->bv_page); 848 } 849 850 bio_put(bio); 851} 852 853/** 854 * bio_unmap_user - unmap a bio 855 * @bio: the bio being unmapped 856 * 857 * Unmap a bio previously mapped by bio_map_user(). Must be called with 858 * a process context. 859 * 860 * bio_unmap_user() may sleep. 861 */ 862void bio_unmap_user(struct bio *bio) 863{ 864 __bio_unmap_user(bio); 865 bio_put(bio); 866} 867 868static void bio_map_kern_endio(struct bio *bio, int err) 869{ 870 bio_put(bio); 871} 872 873 874static struct bio *__bio_map_kern(struct request_queue *q, void *data, 875 unsigned int len, gfp_t gfp_mask) 876{ 877 unsigned long kaddr = (unsigned long)data; 878 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 879 unsigned long start = kaddr >> PAGE_SHIFT; 880 const int nr_pages = end - start; 881 int offset, i; 882 struct bio *bio; 883 884 bio = bio_alloc(gfp_mask, nr_pages); 885 if (!bio) 886 return ERR_PTR(-ENOMEM); 887 888 offset = offset_in_page(kaddr); 889 for (i = 0; i < nr_pages; i++) { 890 unsigned int bytes = PAGE_SIZE - offset; 891 892 if (len <= 0) 893 break; 894 895 if (bytes > len) 896 bytes = len; 897 898 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, 899 offset) < bytes) 900 break; 901 902 data += bytes; 903 len -= bytes; 904 offset = 0; 905 } 906 907 bio->bi_end_io = bio_map_kern_endio; 908 return bio; 909} 910 911/** 912 * bio_map_kern - map kernel address into bio 913 * @q: the struct request_queue for the bio 914 * @data: pointer to buffer to map 915 * @len: length in bytes 916 * @gfp_mask: allocation flags for bio allocation 917 * 918 * Map the kernel address into a bio suitable for io to a block 919 * device. Returns an error pointer in case of error. 920 */ 921struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, 922 gfp_t gfp_mask) 923{ 924 struct bio *bio; 925 926 bio = __bio_map_kern(q, data, len, gfp_mask); 927 if (IS_ERR(bio)) 928 return bio; 929 930 if (bio->bi_size == len) 931 return bio; 932 933 /* 934 * Don't support partial mappings. 935 */ 936 bio_put(bio); 937 return ERR_PTR(-EINVAL); 938} 939 940static void bio_copy_kern_endio(struct bio *bio, int err) 941{ 942 struct bio_vec *bvec; 943 const int read = bio_data_dir(bio) == READ; 944 char *p = bio->bi_private; 945 int i; 946 947 __bio_for_each_segment(bvec, bio, i, 0) { 948 char *addr = page_address(bvec->bv_page); 949 950 if (read && !err) 951 memcpy(p, addr, bvec->bv_len); 952 953 __free_page(bvec->bv_page); 954 p += bvec->bv_len; 955 } 956 957 bio_put(bio); 958} 959 960/** 961 * bio_copy_kern - copy kernel address into bio 962 * @q: the struct request_queue for the bio 963 * @data: pointer to buffer to copy 964 * @len: length in bytes 965 * @gfp_mask: allocation flags for bio and page allocation 966 * @reading: data direction is READ 967 * 968 * copy the kernel address into a bio suitable for io to a block 969 * device. Returns an error pointer in case of error. 970 */ 971struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 972 gfp_t gfp_mask, int reading) 973{ 974 unsigned long kaddr = (unsigned long)data; 975 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 976 unsigned long start = kaddr >> PAGE_SHIFT; 977 const int nr_pages = end - start; 978 struct bio *bio; 979 struct bio_vec *bvec; 980 int i, ret; 981 982 bio = bio_alloc(gfp_mask, nr_pages); 983 if (!bio) 984 return ERR_PTR(-ENOMEM); 985 986 while (len) { 987 struct page *page; 988 unsigned int bytes = PAGE_SIZE; 989 990 if (bytes > len) 991 bytes = len; 992 993 page = alloc_page(q->bounce_gfp | gfp_mask); 994 if (!page) { 995 ret = -ENOMEM; 996 goto cleanup; 997 } 998 999 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { 1000 ret = -EINVAL; 1001 goto cleanup; 1002 } 1003 1004 len -= bytes; 1005 } 1006 1007 if (!reading) { 1008 void *p = data; 1009 1010 bio_for_each_segment(bvec, bio, i) { 1011 char *addr = page_address(bvec->bv_page); 1012 1013 memcpy(addr, p, bvec->bv_len); 1014 p += bvec->bv_len; 1015 } 1016 } 1017 1018 bio->bi_private = data; 1019 bio->bi_end_io = bio_copy_kern_endio; 1020 return bio; 1021cleanup: 1022 bio_for_each_segment(bvec, bio, i) 1023 __free_page(bvec->bv_page); 1024 1025 bio_put(bio); 1026 1027 return ERR_PTR(ret); 1028} 1029 1030/* 1031 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1032 * for performing direct-IO in BIOs. 1033 * 1034 * The problem is that we cannot run set_page_dirty() from interrupt context 1035 * because the required locks are not interrupt-safe. So what we can do is to 1036 * mark the pages dirty _before_ performing IO. And in interrupt context, 1037 * check that the pages are still dirty. If so, fine. If not, redirty them 1038 * in process context. 1039 * 1040 * We special-case compound pages here: normally this means reads into hugetlb 1041 * pages. The logic in here doesn't really work right for compound pages 1042 * because the VM does not uniformly chase down the head page in all cases. 1043 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1044 * handle them at all. So we skip compound pages here at an early stage. 1045 * 1046 * Note that this code is very hard to test under normal circumstances because 1047 * direct-io pins the pages with get_user_pages(). This makes 1048 * is_page_cache_freeable return false, and the VM will not clean the pages. 1049 * But other code (eg, pdflush) could clean the pages if they are mapped 1050 * pagecache. 1051 * 1052 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1053 * deferred bio dirtying paths. 1054 */ 1055 1056/* 1057 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1058 */ 1059void bio_set_pages_dirty(struct bio *bio) 1060{ 1061 struct bio_vec *bvec = bio->bi_io_vec; 1062 int i; 1063 1064 for (i = 0; i < bio->bi_vcnt; i++) { 1065 struct page *page = bvec[i].bv_page; 1066 1067 if (page && !PageCompound(page)) 1068 set_page_dirty_lock(page); 1069 } 1070} 1071 1072static void bio_release_pages(struct bio *bio) 1073{ 1074 struct bio_vec *bvec = bio->bi_io_vec; 1075 int i; 1076 1077 for (i = 0; i < bio->bi_vcnt; i++) { 1078 struct page *page = bvec[i].bv_page; 1079 1080 if (page) 1081 put_page(page); 1082 } 1083} 1084 1085/* 1086 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1087 * If they are, then fine. If, however, some pages are clean then they must 1088 * have been written out during the direct-IO read. So we take another ref on 1089 * the BIO and the offending pages and re-dirty the pages in process context. 1090 * 1091 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1092 * here on. It will run one page_cache_release() against each page and will 1093 * run one bio_put() against the BIO. 1094 */ 1095 1096static void bio_dirty_fn(struct work_struct *work); 1097 1098static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1099static DEFINE_SPINLOCK(bio_dirty_lock); 1100static struct bio *bio_dirty_list; 1101 1102/* 1103 * This runs in process context 1104 */ 1105static void bio_dirty_fn(struct work_struct *work) 1106{ 1107 unsigned long flags; 1108 struct bio *bio; 1109 1110 spin_lock_irqsave(&bio_dirty_lock, flags); 1111 bio = bio_dirty_list; 1112 bio_dirty_list = NULL; 1113 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1114 1115 while (bio) { 1116 struct bio *next = bio->bi_private; 1117 1118 bio_set_pages_dirty(bio); 1119 bio_release_pages(bio); 1120 bio_put(bio); 1121 bio = next; 1122 } 1123} 1124 1125void bio_check_pages_dirty(struct bio *bio) 1126{ 1127 struct bio_vec *bvec = bio->bi_io_vec; 1128 int nr_clean_pages = 0; 1129 int i; 1130 1131 for (i = 0; i < bio->bi_vcnt; i++) { 1132 struct page *page = bvec[i].bv_page; 1133 1134 if (PageDirty(page) || PageCompound(page)) { 1135 page_cache_release(page); 1136 bvec[i].bv_page = NULL; 1137 } else { 1138 nr_clean_pages++; 1139 } 1140 } 1141 1142 if (nr_clean_pages) { 1143 unsigned long flags; 1144 1145 spin_lock_irqsave(&bio_dirty_lock, flags); 1146 bio->bi_private = bio_dirty_list; 1147 bio_dirty_list = bio; 1148 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1149 schedule_work(&bio_dirty_work); 1150 } else { 1151 bio_put(bio); 1152 } 1153} 1154 1155/** 1156 * bio_endio - end I/O on a bio 1157 * @bio: bio 1158 * @error: error, if any 1159 * 1160 * Description: 1161 * bio_endio() will end I/O on the whole bio. bio_endio() is the 1162 * preferred way to end I/O on a bio, it takes care of clearing 1163 * BIO_UPTODATE on error. @error is 0 on success, and and one of the 1164 * established -Exxxx (-EIO, for instance) error values in case 1165 * something went wrong. Noone should call bi_end_io() directly on a 1166 * bio unless they own it and thus know that it has an end_io 1167 * function. 1168 **/ 1169void bio_endio(struct bio *bio, int error) 1170{ 1171 if (error) 1172 clear_bit(BIO_UPTODATE, &bio->bi_flags); 1173 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1174 error = -EIO; 1175 1176 if (bio->bi_end_io) 1177 bio->bi_end_io(bio, error); 1178} 1179 1180void bio_pair_release(struct bio_pair *bp) 1181{ 1182 if (atomic_dec_and_test(&bp->cnt)) { 1183 struct bio *master = bp->bio1.bi_private; 1184 1185 bio_endio(master, bp->error); 1186 mempool_free(bp, bp->bio2.bi_private); 1187 } 1188} 1189 1190static void bio_pair_end_1(struct bio *bi, int err) 1191{ 1192 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1); 1193 1194 if (err) 1195 bp->error = err; 1196 1197 bio_pair_release(bp); 1198} 1199 1200static void bio_pair_end_2(struct bio *bi, int err) 1201{ 1202 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2); 1203 1204 if (err) 1205 bp->error = err; 1206 1207 bio_pair_release(bp); 1208} 1209 1210/* 1211 * split a bio - only worry about a bio with a single page 1212 * in it's iovec 1213 */ 1214struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) 1215{ 1216 struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO); 1217 1218 if (!bp) 1219 return bp; 1220 1221 blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, 1222 bi->bi_sector + first_sectors); 1223 1224 BUG_ON(bi->bi_vcnt != 1); 1225 BUG_ON(bi->bi_idx != 0); 1226 atomic_set(&bp->cnt, 3); 1227 bp->error = 0; 1228 bp->bio1 = *bi; 1229 bp->bio2 = *bi; 1230 bp->bio2.bi_sector += first_sectors; 1231 bp->bio2.bi_size -= first_sectors << 9; 1232 bp->bio1.bi_size = first_sectors << 9; 1233 1234 bp->bv1 = bi->bi_io_vec[0]; 1235 bp->bv2 = bi->bi_io_vec[0]; 1236 bp->bv2.bv_offset += first_sectors << 9; 1237 bp->bv2.bv_len -= first_sectors << 9; 1238 bp->bv1.bv_len = first_sectors << 9; 1239 1240 bp->bio1.bi_io_vec = &bp->bv1; 1241 bp->bio2.bi_io_vec = &bp->bv2; 1242 1243 bp->bio1.bi_max_vecs = 1; 1244 bp->bio2.bi_max_vecs = 1; 1245 1246 bp->bio1.bi_end_io = bio_pair_end_1; 1247 bp->bio2.bi_end_io = bio_pair_end_2; 1248 1249 bp->bio1.bi_private = bi; 1250 bp->bio2.bi_private = pool; 1251 1252 return bp; 1253} 1254 1255 1256/* 1257 * create memory pools for biovec's in a bio_set. 1258 * use the global biovec slabs created for general use. 1259 */ 1260static int biovec_create_pools(struct bio_set *bs, int pool_entries) 1261{ 1262 int i; 1263 1264 for (i = 0; i < BIOVEC_NR_POOLS; i++) { 1265 struct biovec_slab *bp = bvec_slabs + i; 1266 mempool_t **bvp = bs->bvec_pools + i; 1267 1268 *bvp = mempool_create_slab_pool(pool_entries, bp->slab); 1269 if (!*bvp) 1270 return -ENOMEM; 1271 } 1272 return 0; 1273} 1274 1275static void biovec_free_pools(struct bio_set *bs) 1276{ 1277 int i; 1278 1279 for (i = 0; i < BIOVEC_NR_POOLS; i++) { 1280 mempool_t *bvp = bs->bvec_pools[i]; 1281 1282 if (bvp) 1283 mempool_destroy(bvp); 1284 } 1285 1286} 1287 1288void bioset_free(struct bio_set *bs) 1289{ 1290 if (bs->bio_pool) 1291 mempool_destroy(bs->bio_pool); 1292 1293 biovec_free_pools(bs); 1294 1295 kfree(bs); 1296} 1297 1298struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) 1299{ 1300 struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); 1301 1302 if (!bs) 1303 return NULL; 1304 1305 bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); 1306 if (!bs->bio_pool) 1307 goto bad; 1308 1309 if (!biovec_create_pools(bs, bvec_pool_size)) 1310 return bs; 1311 1312bad: 1313 bioset_free(bs); 1314 return NULL; 1315} 1316 1317static void __init biovec_init_slabs(void) 1318{ 1319 int i; 1320 1321 for (i = 0; i < BIOVEC_NR_POOLS; i++) { 1322 int size; 1323 struct biovec_slab *bvs = bvec_slabs + i; 1324 1325 size = bvs->nr_vecs * sizeof(struct bio_vec); 1326 bvs->slab = kmem_cache_create(bvs->name, size, 0, 1327 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1328 } 1329} 1330 1331static int __init init_bio(void) 1332{ 1333 bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 1334 1335 biovec_init_slabs(); 1336 1337 fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); 1338 if (!fs_bio_set) 1339 panic("bio: can't allocate bios\n"); 1340 1341 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, 1342 sizeof(struct bio_pair)); 1343 if (!bio_split_pool) 1344 panic("bio: can't create split pool\n"); 1345 1346 return 0; 1347} 1348 1349subsys_initcall(init_bio); 1350 1351EXPORT_SYMBOL(bio_alloc); 1352EXPORT_SYMBOL(bio_put); 1353EXPORT_SYMBOL(bio_free); 1354EXPORT_SYMBOL(bio_endio); 1355EXPORT_SYMBOL(bio_init); 1356EXPORT_SYMBOL(__bio_clone); 1357EXPORT_SYMBOL(bio_clone); 1358EXPORT_SYMBOL(bio_phys_segments); 1359EXPORT_SYMBOL(bio_hw_segments); 1360EXPORT_SYMBOL(bio_add_page); 1361EXPORT_SYMBOL(bio_add_pc_page); 1362EXPORT_SYMBOL(bio_get_nr_vecs); 1363EXPORT_SYMBOL(bio_map_user); 1364EXPORT_SYMBOL(bio_unmap_user); 1365EXPORT_SYMBOL(bio_map_kern); 1366EXPORT_SYMBOL(bio_copy_kern); 1367EXPORT_SYMBOL(bio_pair_release); 1368EXPORT_SYMBOL(bio_split); 1369EXPORT_SYMBOL(bio_split_pool); 1370EXPORT_SYMBOL(bio_copy_user); 1371EXPORT_SYMBOL(bio_uncopy_user); 1372EXPORT_SYMBOL(bioset_create); 1373EXPORT_SYMBOL(bioset_free); 1374EXPORT_SYMBOL(bio_alloc_bioset);