Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.6-rc7 966 lines 24 kB view raw
1/* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19#include <linux/sched.h> 20#include <linux/pagemap.h> 21#include <linux/writeback.h> 22#include <linux/blkdev.h> 23#include <linux/rbtree.h> 24#include <linux/slab.h> 25#include <linux/workqueue.h> 26#include "ctree.h" 27#include "volumes.h" 28#include "disk-io.h" 29#include "transaction.h" 30 31#undef DEBUG 32 33/* 34 * This is the implementation for the generic read ahead framework. 35 * 36 * To trigger a readahead, btrfs_reada_add must be called. It will start 37 * a read ahead for the given range [start, end) on tree root. The returned 38 * handle can either be used to wait on the readahead to finish 39 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach). 40 * 41 * The read ahead works as follows: 42 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree. 43 * reada_start_machine will then search for extents to prefetch and trigger 44 * some reads. When a read finishes for a node, all contained node/leaf 45 * pointers that lie in the given range will also be enqueued. The reads will 46 * be triggered in sequential order, thus giving a big win over a naive 47 * enumeration. It will also make use of multi-device layouts. Each disk 48 * will have its on read pointer and all disks will by utilized in parallel. 49 * Also will no two disks read both sides of a mirror simultaneously, as this 50 * would waste seeking capacity. Instead both disks will read different parts 51 * of the filesystem. 52 * Any number of readaheads can be started in parallel. The read order will be 53 * determined globally, i.e. 2 parallel readaheads will normally finish faster 54 * than the 2 started one after another. 55 */ 56 57#define MAX_IN_FLIGHT 6 58 59struct reada_extctl { 60 struct list_head list; 61 struct reada_control *rc; 62 u64 generation; 63}; 64 65struct reada_extent { 66 u64 logical; 67 struct btrfs_key top; 68 u32 blocksize; 69 int err; 70 struct list_head extctl; 71 struct kref refcnt; 72 spinlock_t lock; 73 struct reada_zone *zones[BTRFS_MAX_MIRRORS]; 74 int nzones; 75 struct btrfs_device *scheduled_for; 76}; 77 78struct reada_zone { 79 u64 start; 80 u64 end; 81 u64 elems; 82 struct list_head list; 83 spinlock_t lock; 84 int locked; 85 struct btrfs_device *device; 86 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl 87 * self */ 88 int ndevs; 89 struct kref refcnt; 90}; 91 92struct reada_machine_work { 93 struct btrfs_work work; 94 struct btrfs_fs_info *fs_info; 95}; 96 97static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *); 98static void reada_control_release(struct kref *kref); 99static void reada_zone_release(struct kref *kref); 100static void reada_start_machine(struct btrfs_fs_info *fs_info); 101static void __reada_start_machine(struct btrfs_fs_info *fs_info); 102 103static int reada_add_block(struct reada_control *rc, u64 logical, 104 struct btrfs_key *top, int level, u64 generation); 105 106/* recurses */ 107/* in case of err, eb might be NULL */ 108static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, 109 u64 start, int err) 110{ 111 int level = 0; 112 int nritems; 113 int i; 114 u64 bytenr; 115 u64 generation; 116 struct reada_extent *re; 117 struct btrfs_fs_info *fs_info = root->fs_info; 118 struct list_head list; 119 unsigned long index = start >> PAGE_CACHE_SHIFT; 120 struct btrfs_device *for_dev; 121 122 if (eb) 123 level = btrfs_header_level(eb); 124 125 /* find extent */ 126 spin_lock(&fs_info->reada_lock); 127 re = radix_tree_lookup(&fs_info->reada_tree, index); 128 if (re) 129 kref_get(&re->refcnt); 130 spin_unlock(&fs_info->reada_lock); 131 132 if (!re) 133 return -1; 134 135 spin_lock(&re->lock); 136 /* 137 * just take the full list from the extent. afterwards we 138 * don't need the lock anymore 139 */ 140 list_replace_init(&re->extctl, &list); 141 for_dev = re->scheduled_for; 142 re->scheduled_for = NULL; 143 spin_unlock(&re->lock); 144 145 if (err == 0) { 146 nritems = level ? btrfs_header_nritems(eb) : 0; 147 generation = btrfs_header_generation(eb); 148 /* 149 * FIXME: currently we just set nritems to 0 if this is a leaf, 150 * effectively ignoring the content. In a next step we could 151 * trigger more readahead depending from the content, e.g. 152 * fetch the checksums for the extents in the leaf. 153 */ 154 } else { 155 /* 156 * this is the error case, the extent buffer has not been 157 * read correctly. We won't access anything from it and 158 * just cleanup our data structures. Effectively this will 159 * cut the branch below this node from read ahead. 160 */ 161 nritems = 0; 162 generation = 0; 163 } 164 165 for (i = 0; i < nritems; i++) { 166 struct reada_extctl *rec; 167 u64 n_gen; 168 struct btrfs_key key; 169 struct btrfs_key next_key; 170 171 btrfs_node_key_to_cpu(eb, &key, i); 172 if (i + 1 < nritems) 173 btrfs_node_key_to_cpu(eb, &next_key, i + 1); 174 else 175 next_key = re->top; 176 bytenr = btrfs_node_blockptr(eb, i); 177 n_gen = btrfs_node_ptr_generation(eb, i); 178 179 list_for_each_entry(rec, &list, list) { 180 struct reada_control *rc = rec->rc; 181 182 /* 183 * if the generation doesn't match, just ignore this 184 * extctl. This will probably cut off a branch from 185 * prefetch. Alternatively one could start a new (sub-) 186 * prefetch for this branch, starting again from root. 187 * FIXME: move the generation check out of this loop 188 */ 189#ifdef DEBUG 190 if (rec->generation != generation) { 191 printk(KERN_DEBUG "generation mismatch for " 192 "(%llu,%d,%llu) %llu != %llu\n", 193 key.objectid, key.type, key.offset, 194 rec->generation, generation); 195 } 196#endif 197 if (rec->generation == generation && 198 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 && 199 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0) 200 reada_add_block(rc, bytenr, &next_key, 201 level - 1, n_gen); 202 } 203 } 204 /* 205 * free extctl records 206 */ 207 while (!list_empty(&list)) { 208 struct reada_control *rc; 209 struct reada_extctl *rec; 210 211 rec = list_first_entry(&list, struct reada_extctl, list); 212 list_del(&rec->list); 213 rc = rec->rc; 214 kfree(rec); 215 216 kref_get(&rc->refcnt); 217 if (atomic_dec_and_test(&rc->elems)) { 218 kref_put(&rc->refcnt, reada_control_release); 219 wake_up(&rc->wait); 220 } 221 kref_put(&rc->refcnt, reada_control_release); 222 223 reada_extent_put(fs_info, re); /* one ref for each entry */ 224 } 225 reada_extent_put(fs_info, re); /* our ref */ 226 if (for_dev) 227 atomic_dec(&for_dev->reada_in_flight); 228 229 return 0; 230} 231 232/* 233 * start is passed separately in case eb in NULL, which may be the case with 234 * failed I/O 235 */ 236int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, 237 u64 start, int err) 238{ 239 int ret; 240 241 ret = __readahead_hook(root, eb, start, err); 242 243 reada_start_machine(root->fs_info); 244 245 return ret; 246} 247 248static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info, 249 struct btrfs_device *dev, u64 logical, 250 struct btrfs_bio *bbio) 251{ 252 int ret; 253 struct reada_zone *zone; 254 struct btrfs_block_group_cache *cache = NULL; 255 u64 start; 256 u64 end; 257 int i; 258 259 zone = NULL; 260 spin_lock(&fs_info->reada_lock); 261 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 262 logical >> PAGE_CACHE_SHIFT, 1); 263 if (ret == 1) 264 kref_get(&zone->refcnt); 265 spin_unlock(&fs_info->reada_lock); 266 267 if (ret == 1) { 268 if (logical >= zone->start && logical < zone->end) 269 return zone; 270 spin_lock(&fs_info->reada_lock); 271 kref_put(&zone->refcnt, reada_zone_release); 272 spin_unlock(&fs_info->reada_lock); 273 } 274 275 cache = btrfs_lookup_block_group(fs_info, logical); 276 if (!cache) 277 return NULL; 278 279 start = cache->key.objectid; 280 end = start + cache->key.offset - 1; 281 btrfs_put_block_group(cache); 282 283 zone = kzalloc(sizeof(*zone), GFP_NOFS); 284 if (!zone) 285 return NULL; 286 287 zone->start = start; 288 zone->end = end; 289 INIT_LIST_HEAD(&zone->list); 290 spin_lock_init(&zone->lock); 291 zone->locked = 0; 292 kref_init(&zone->refcnt); 293 zone->elems = 0; 294 zone->device = dev; /* our device always sits at index 0 */ 295 for (i = 0; i < bbio->num_stripes; ++i) { 296 /* bounds have already been checked */ 297 zone->devs[i] = bbio->stripes[i].dev; 298 } 299 zone->ndevs = bbio->num_stripes; 300 301 spin_lock(&fs_info->reada_lock); 302 ret = radix_tree_insert(&dev->reada_zones, 303 (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), 304 zone); 305 306 if (ret == -EEXIST) { 307 kfree(zone); 308 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 309 logical >> PAGE_CACHE_SHIFT, 1); 310 if (ret == 1) 311 kref_get(&zone->refcnt); 312 } 313 spin_unlock(&fs_info->reada_lock); 314 315 return zone; 316} 317 318static struct reada_extent *reada_find_extent(struct btrfs_root *root, 319 u64 logical, 320 struct btrfs_key *top, int level) 321{ 322 int ret; 323 struct reada_extent *re = NULL; 324 struct reada_extent *re_exist = NULL; 325 struct btrfs_fs_info *fs_info = root->fs_info; 326 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 327 struct btrfs_bio *bbio = NULL; 328 struct btrfs_device *dev; 329 struct btrfs_device *prev_dev; 330 u32 blocksize; 331 u64 length; 332 int nzones = 0; 333 int i; 334 unsigned long index = logical >> PAGE_CACHE_SHIFT; 335 336 spin_lock(&fs_info->reada_lock); 337 re = radix_tree_lookup(&fs_info->reada_tree, index); 338 if (re) 339 kref_get(&re->refcnt); 340 spin_unlock(&fs_info->reada_lock); 341 342 if (re) 343 return re; 344 345 re = kzalloc(sizeof(*re), GFP_NOFS); 346 if (!re) 347 return NULL; 348 349 blocksize = btrfs_level_size(root, level); 350 re->logical = logical; 351 re->blocksize = blocksize; 352 re->top = *top; 353 INIT_LIST_HEAD(&re->extctl); 354 spin_lock_init(&re->lock); 355 kref_init(&re->refcnt); 356 357 /* 358 * map block 359 */ 360 length = blocksize; 361 ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, &bbio, 0); 362 if (ret || !bbio || length < blocksize) 363 goto error; 364 365 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) { 366 printk(KERN_ERR "btrfs readahead: more than %d copies not " 367 "supported", BTRFS_MAX_MIRRORS); 368 goto error; 369 } 370 371 for (nzones = 0; nzones < bbio->num_stripes; ++nzones) { 372 struct reada_zone *zone; 373 374 dev = bbio->stripes[nzones].dev; 375 zone = reada_find_zone(fs_info, dev, logical, bbio); 376 if (!zone) 377 break; 378 379 re->zones[nzones] = zone; 380 spin_lock(&zone->lock); 381 if (!zone->elems) 382 kref_get(&zone->refcnt); 383 ++zone->elems; 384 spin_unlock(&zone->lock); 385 spin_lock(&fs_info->reada_lock); 386 kref_put(&zone->refcnt, reada_zone_release); 387 spin_unlock(&fs_info->reada_lock); 388 } 389 re->nzones = nzones; 390 if (nzones == 0) { 391 /* not a single zone found, error and out */ 392 goto error; 393 } 394 395 /* insert extent in reada_tree + all per-device trees, all or nothing */ 396 spin_lock(&fs_info->reada_lock); 397 ret = radix_tree_insert(&fs_info->reada_tree, index, re); 398 if (ret == -EEXIST) { 399 re_exist = radix_tree_lookup(&fs_info->reada_tree, index); 400 BUG_ON(!re_exist); 401 kref_get(&re_exist->refcnt); 402 spin_unlock(&fs_info->reada_lock); 403 goto error; 404 } 405 if (ret) { 406 spin_unlock(&fs_info->reada_lock); 407 goto error; 408 } 409 prev_dev = NULL; 410 for (i = 0; i < nzones; ++i) { 411 dev = bbio->stripes[i].dev; 412 if (dev == prev_dev) { 413 /* 414 * in case of DUP, just add the first zone. As both 415 * are on the same device, there's nothing to gain 416 * from adding both. 417 * Also, it wouldn't work, as the tree is per device 418 * and adding would fail with EEXIST 419 */ 420 continue; 421 } 422 prev_dev = dev; 423 ret = radix_tree_insert(&dev->reada_extents, index, re); 424 if (ret) { 425 while (--i >= 0) { 426 dev = bbio->stripes[i].dev; 427 BUG_ON(dev == NULL); 428 radix_tree_delete(&dev->reada_extents, index); 429 } 430 BUG_ON(fs_info == NULL); 431 radix_tree_delete(&fs_info->reada_tree, index); 432 spin_unlock(&fs_info->reada_lock); 433 goto error; 434 } 435 } 436 spin_unlock(&fs_info->reada_lock); 437 438 kfree(bbio); 439 return re; 440 441error: 442 while (nzones) { 443 struct reada_zone *zone; 444 445 --nzones; 446 zone = re->zones[nzones]; 447 kref_get(&zone->refcnt); 448 spin_lock(&zone->lock); 449 --zone->elems; 450 if (zone->elems == 0) { 451 /* 452 * no fs_info->reada_lock needed, as this can't be 453 * the last ref 454 */ 455 kref_put(&zone->refcnt, reada_zone_release); 456 } 457 spin_unlock(&zone->lock); 458 459 spin_lock(&fs_info->reada_lock); 460 kref_put(&zone->refcnt, reada_zone_release); 461 spin_unlock(&fs_info->reada_lock); 462 } 463 kfree(bbio); 464 kfree(re); 465 return re_exist; 466} 467 468static void reada_kref_dummy(struct kref *kr) 469{ 470} 471 472static void reada_extent_put(struct btrfs_fs_info *fs_info, 473 struct reada_extent *re) 474{ 475 int i; 476 unsigned long index = re->logical >> PAGE_CACHE_SHIFT; 477 478 spin_lock(&fs_info->reada_lock); 479 if (!kref_put(&re->refcnt, reada_kref_dummy)) { 480 spin_unlock(&fs_info->reada_lock); 481 return; 482 } 483 484 radix_tree_delete(&fs_info->reada_tree, index); 485 for (i = 0; i < re->nzones; ++i) { 486 struct reada_zone *zone = re->zones[i]; 487 488 radix_tree_delete(&zone->device->reada_extents, index); 489 } 490 491 spin_unlock(&fs_info->reada_lock); 492 493 for (i = 0; i < re->nzones; ++i) { 494 struct reada_zone *zone = re->zones[i]; 495 496 kref_get(&zone->refcnt); 497 spin_lock(&zone->lock); 498 --zone->elems; 499 if (zone->elems == 0) { 500 /* no fs_info->reada_lock needed, as this can't be 501 * the last ref */ 502 kref_put(&zone->refcnt, reada_zone_release); 503 } 504 spin_unlock(&zone->lock); 505 506 spin_lock(&fs_info->reada_lock); 507 kref_put(&zone->refcnt, reada_zone_release); 508 spin_unlock(&fs_info->reada_lock); 509 } 510 if (re->scheduled_for) 511 atomic_dec(&re->scheduled_for->reada_in_flight); 512 513 kfree(re); 514} 515 516static void reada_zone_release(struct kref *kref) 517{ 518 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); 519 520 radix_tree_delete(&zone->device->reada_zones, 521 zone->end >> PAGE_CACHE_SHIFT); 522 523 kfree(zone); 524} 525 526static void reada_control_release(struct kref *kref) 527{ 528 struct reada_control *rc = container_of(kref, struct reada_control, 529 refcnt); 530 531 kfree(rc); 532} 533 534static int reada_add_block(struct reada_control *rc, u64 logical, 535 struct btrfs_key *top, int level, u64 generation) 536{ 537 struct btrfs_root *root = rc->root; 538 struct reada_extent *re; 539 struct reada_extctl *rec; 540 541 re = reada_find_extent(root, logical, top, level); /* takes one ref */ 542 if (!re) 543 return -1; 544 545 rec = kzalloc(sizeof(*rec), GFP_NOFS); 546 if (!rec) { 547 reada_extent_put(root->fs_info, re); 548 return -1; 549 } 550 551 rec->rc = rc; 552 rec->generation = generation; 553 atomic_inc(&rc->elems); 554 555 spin_lock(&re->lock); 556 list_add_tail(&rec->list, &re->extctl); 557 spin_unlock(&re->lock); 558 559 /* leave the ref on the extent */ 560 561 return 0; 562} 563 564/* 565 * called with fs_info->reada_lock held 566 */ 567static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) 568{ 569 int i; 570 unsigned long index = zone->end >> PAGE_CACHE_SHIFT; 571 572 for (i = 0; i < zone->ndevs; ++i) { 573 struct reada_zone *peer; 574 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index); 575 if (peer && peer->device != zone->device) 576 peer->locked = lock; 577 } 578} 579 580/* 581 * called with fs_info->reada_lock held 582 */ 583static int reada_pick_zone(struct btrfs_device *dev) 584{ 585 struct reada_zone *top_zone = NULL; 586 struct reada_zone *top_locked_zone = NULL; 587 u64 top_elems = 0; 588 u64 top_locked_elems = 0; 589 unsigned long index = 0; 590 int ret; 591 592 if (dev->reada_curr_zone) { 593 reada_peer_zones_set_lock(dev->reada_curr_zone, 0); 594 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release); 595 dev->reada_curr_zone = NULL; 596 } 597 /* pick the zone with the most elements */ 598 while (1) { 599 struct reada_zone *zone; 600 601 ret = radix_tree_gang_lookup(&dev->reada_zones, 602 (void **)&zone, index, 1); 603 if (ret == 0) 604 break; 605 index = (zone->end >> PAGE_CACHE_SHIFT) + 1; 606 if (zone->locked) { 607 if (zone->elems > top_locked_elems) { 608 top_locked_elems = zone->elems; 609 top_locked_zone = zone; 610 } 611 } else { 612 if (zone->elems > top_elems) { 613 top_elems = zone->elems; 614 top_zone = zone; 615 } 616 } 617 } 618 if (top_zone) 619 dev->reada_curr_zone = top_zone; 620 else if (top_locked_zone) 621 dev->reada_curr_zone = top_locked_zone; 622 else 623 return 0; 624 625 dev->reada_next = dev->reada_curr_zone->start; 626 kref_get(&dev->reada_curr_zone->refcnt); 627 reada_peer_zones_set_lock(dev->reada_curr_zone, 1); 628 629 return 1; 630} 631 632static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, 633 struct btrfs_device *dev) 634{ 635 struct reada_extent *re = NULL; 636 int mirror_num = 0; 637 struct extent_buffer *eb = NULL; 638 u64 logical; 639 u32 blocksize; 640 int ret; 641 int i; 642 int need_kick = 0; 643 644 spin_lock(&fs_info->reada_lock); 645 if (dev->reada_curr_zone == NULL) { 646 ret = reada_pick_zone(dev); 647 if (!ret) { 648 spin_unlock(&fs_info->reada_lock); 649 return 0; 650 } 651 } 652 /* 653 * FIXME currently we issue the reads one extent at a time. If we have 654 * a contiguous block of extents, we could also coagulate them or use 655 * plugging to speed things up 656 */ 657 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 658 dev->reada_next >> PAGE_CACHE_SHIFT, 1); 659 if (ret == 0 || re->logical >= dev->reada_curr_zone->end) { 660 ret = reada_pick_zone(dev); 661 if (!ret) { 662 spin_unlock(&fs_info->reada_lock); 663 return 0; 664 } 665 re = NULL; 666 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 667 dev->reada_next >> PAGE_CACHE_SHIFT, 1); 668 } 669 if (ret == 0) { 670 spin_unlock(&fs_info->reada_lock); 671 return 0; 672 } 673 dev->reada_next = re->logical + re->blocksize; 674 kref_get(&re->refcnt); 675 676 spin_unlock(&fs_info->reada_lock); 677 678 /* 679 * find mirror num 680 */ 681 for (i = 0; i < re->nzones; ++i) { 682 if (re->zones[i]->device == dev) { 683 mirror_num = i + 1; 684 break; 685 } 686 } 687 logical = re->logical; 688 blocksize = re->blocksize; 689 690 spin_lock(&re->lock); 691 if (re->scheduled_for == NULL) { 692 re->scheduled_for = dev; 693 need_kick = 1; 694 } 695 spin_unlock(&re->lock); 696 697 reada_extent_put(fs_info, re); 698 699 if (!need_kick) 700 return 0; 701 702 atomic_inc(&dev->reada_in_flight); 703 ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize, 704 mirror_num, &eb); 705 if (ret) 706 __readahead_hook(fs_info->extent_root, NULL, logical, ret); 707 else if (eb) 708 __readahead_hook(fs_info->extent_root, eb, eb->start, ret); 709 710 if (eb) 711 free_extent_buffer(eb); 712 713 return 1; 714 715} 716 717static void reada_start_machine_worker(struct btrfs_work *work) 718{ 719 struct reada_machine_work *rmw; 720 struct btrfs_fs_info *fs_info; 721 int old_ioprio; 722 723 rmw = container_of(work, struct reada_machine_work, work); 724 fs_info = rmw->fs_info; 725 726 kfree(rmw); 727 728 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current), 729 task_nice_ioprio(current)); 730 set_task_ioprio(current, BTRFS_IOPRIO_READA); 731 __reada_start_machine(fs_info); 732 set_task_ioprio(current, old_ioprio); 733} 734 735static void __reada_start_machine(struct btrfs_fs_info *fs_info) 736{ 737 struct btrfs_device *device; 738 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 739 u64 enqueued; 740 u64 total = 0; 741 int i; 742 743 do { 744 enqueued = 0; 745 list_for_each_entry(device, &fs_devices->devices, dev_list) { 746 if (atomic_read(&device->reada_in_flight) < 747 MAX_IN_FLIGHT) 748 enqueued += reada_start_machine_dev(fs_info, 749 device); 750 } 751 total += enqueued; 752 } while (enqueued && total < 10000); 753 754 if (enqueued == 0) 755 return; 756 757 /* 758 * If everything is already in the cache, this is effectively single 759 * threaded. To a) not hold the caller for too long and b) to utilize 760 * more cores, we broke the loop above after 10000 iterations and now 761 * enqueue to workers to finish it. This will distribute the load to 762 * the cores. 763 */ 764 for (i = 0; i < 2; ++i) 765 reada_start_machine(fs_info); 766} 767 768static void reada_start_machine(struct btrfs_fs_info *fs_info) 769{ 770 struct reada_machine_work *rmw; 771 772 rmw = kzalloc(sizeof(*rmw), GFP_NOFS); 773 if (!rmw) { 774 /* FIXME we cannot handle this properly right now */ 775 BUG(); 776 } 777 rmw->work.func = reada_start_machine_worker; 778 rmw->fs_info = fs_info; 779 780 btrfs_queue_worker(&fs_info->readahead_workers, &rmw->work); 781} 782 783#ifdef DEBUG 784static void dump_devs(struct btrfs_fs_info *fs_info, int all) 785{ 786 struct btrfs_device *device; 787 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 788 unsigned long index; 789 int ret; 790 int i; 791 int j; 792 int cnt; 793 794 spin_lock(&fs_info->reada_lock); 795 list_for_each_entry(device, &fs_devices->devices, dev_list) { 796 printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid, 797 atomic_read(&device->reada_in_flight)); 798 index = 0; 799 while (1) { 800 struct reada_zone *zone; 801 ret = radix_tree_gang_lookup(&device->reada_zones, 802 (void **)&zone, index, 1); 803 if (ret == 0) 804 break; 805 printk(KERN_DEBUG " zone %llu-%llu elems %llu locked " 806 "%d devs", zone->start, zone->end, zone->elems, 807 zone->locked); 808 for (j = 0; j < zone->ndevs; ++j) { 809 printk(KERN_CONT " %lld", 810 zone->devs[j]->devid); 811 } 812 if (device->reada_curr_zone == zone) 813 printk(KERN_CONT " curr off %llu", 814 device->reada_next - zone->start); 815 printk(KERN_CONT "\n"); 816 index = (zone->end >> PAGE_CACHE_SHIFT) + 1; 817 } 818 cnt = 0; 819 index = 0; 820 while (all) { 821 struct reada_extent *re = NULL; 822 823 ret = radix_tree_gang_lookup(&device->reada_extents, 824 (void **)&re, index, 1); 825 if (ret == 0) 826 break; 827 printk(KERN_DEBUG 828 " re: logical %llu size %u empty %d for %lld", 829 re->logical, re->blocksize, 830 list_empty(&re->extctl), re->scheduled_for ? 831 re->scheduled_for->devid : -1); 832 833 for (i = 0; i < re->nzones; ++i) { 834 printk(KERN_CONT " zone %llu-%llu devs", 835 re->zones[i]->start, 836 re->zones[i]->end); 837 for (j = 0; j < re->zones[i]->ndevs; ++j) { 838 printk(KERN_CONT " %lld", 839 re->zones[i]->devs[j]->devid); 840 } 841 } 842 printk(KERN_CONT "\n"); 843 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 844 if (++cnt > 15) 845 break; 846 } 847 } 848 849 index = 0; 850 cnt = 0; 851 while (all) { 852 struct reada_extent *re = NULL; 853 854 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re, 855 index, 1); 856 if (ret == 0) 857 break; 858 if (!re->scheduled_for) { 859 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 860 continue; 861 } 862 printk(KERN_DEBUG 863 "re: logical %llu size %u list empty %d for %lld", 864 re->logical, re->blocksize, list_empty(&re->extctl), 865 re->scheduled_for ? re->scheduled_for->devid : -1); 866 for (i = 0; i < re->nzones; ++i) { 867 printk(KERN_CONT " zone %llu-%llu devs", 868 re->zones[i]->start, 869 re->zones[i]->end); 870 for (i = 0; i < re->nzones; ++i) { 871 printk(KERN_CONT " zone %llu-%llu devs", 872 re->zones[i]->start, 873 re->zones[i]->end); 874 for (j = 0; j < re->zones[i]->ndevs; ++j) { 875 printk(KERN_CONT " %lld", 876 re->zones[i]->devs[j]->devid); 877 } 878 } 879 } 880 printk(KERN_CONT "\n"); 881 index = (re->logical >> PAGE_CACHE_SHIFT) + 1; 882 } 883 spin_unlock(&fs_info->reada_lock); 884} 885#endif 886 887/* 888 * interface 889 */ 890struct reada_control *btrfs_reada_add(struct btrfs_root *root, 891 struct btrfs_key *key_start, struct btrfs_key *key_end) 892{ 893 struct reada_control *rc; 894 u64 start; 895 u64 generation; 896 int level; 897 struct extent_buffer *node; 898 static struct btrfs_key max_key = { 899 .objectid = (u64)-1, 900 .type = (u8)-1, 901 .offset = (u64)-1 902 }; 903 904 rc = kzalloc(sizeof(*rc), GFP_NOFS); 905 if (!rc) 906 return ERR_PTR(-ENOMEM); 907 908 rc->root = root; 909 rc->key_start = *key_start; 910 rc->key_end = *key_end; 911 atomic_set(&rc->elems, 0); 912 init_waitqueue_head(&rc->wait); 913 kref_init(&rc->refcnt); 914 kref_get(&rc->refcnt); /* one ref for having elements */ 915 916 node = btrfs_root_node(root); 917 start = node->start; 918 level = btrfs_header_level(node); 919 generation = btrfs_header_generation(node); 920 free_extent_buffer(node); 921 922 reada_add_block(rc, start, &max_key, level, generation); 923 924 reada_start_machine(root->fs_info); 925 926 return rc; 927} 928 929#ifdef DEBUG 930int btrfs_reada_wait(void *handle) 931{ 932 struct reada_control *rc = handle; 933 934 while (atomic_read(&rc->elems)) { 935 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0, 936 5 * HZ); 937 dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0); 938 } 939 940 dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0); 941 942 kref_put(&rc->refcnt, reada_control_release); 943 944 return 0; 945} 946#else 947int btrfs_reada_wait(void *handle) 948{ 949 struct reada_control *rc = handle; 950 951 while (atomic_read(&rc->elems)) { 952 wait_event(rc->wait, atomic_read(&rc->elems) == 0); 953 } 954 955 kref_put(&rc->refcnt, reada_control_release); 956 957 return 0; 958} 959#endif 960 961void btrfs_reada_detach(void *handle) 962{ 963 struct reada_control *rc = handle; 964 965 kref_put(&rc->refcnt, reada_control_release); 966}