Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.14 994 lines 25 kB view raw
1/* 2 * Copyright (C) 2011 STRATO. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19#include <linux/sched.h> 20#include <linux/pagemap.h> 21#include <linux/writeback.h> 22#include <linux/blkdev.h> 23#include <linux/rbtree.h> 24#include <linux/slab.h> 25#include <linux/workqueue.h> 26#include "ctree.h" 27#include "volumes.h" 28#include "disk-io.h" 29#include "transaction.h" 30#include "dev-replace.h" 31 32#undef DEBUG 33 34/* 35 * This is the implementation for the generic read ahead framework. 36 * 37 * To trigger a readahead, btrfs_reada_add must be called. It will start 38 * a read ahead for the given range [start, end) on tree root. The returned 39 * handle can either be used to wait on the readahead to finish 40 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach). 41 * 42 * The read ahead works as follows: 43 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree. 44 * reada_start_machine will then search for extents to prefetch and trigger 45 * some reads. When a read finishes for a node, all contained node/leaf 46 * pointers that lie in the given range will also be enqueued. The reads will 47 * be triggered in sequential order, thus giving a big win over a naive 48 * enumeration. It will also make use of multi-device layouts. Each disk 49 * will have its on read pointer and all disks will by utilized in parallel. 50 * Also will no two disks read both sides of a mirror simultaneously, as this 51 * would waste seeking capacity. Instead both disks will read different parts 52 * of the filesystem. 53 * Any number of readaheads can be started in parallel. The read order will be 54 * determined globally, i.e. 2 parallel readaheads will normally finish faster 55 * than the 2 started one after another. 56 */ 57 58#define MAX_IN_FLIGHT 6 59 60struct reada_extctl { 61 struct list_head list; 62 struct reada_control *rc; 63 u64 generation; 64}; 65 66struct reada_extent { 67 u64 logical; 68 struct btrfs_key top; 69 struct list_head extctl; 70 int refcnt; 71 spinlock_t lock; 72 struct reada_zone *zones[BTRFS_MAX_MIRRORS]; 73 int nzones; 74 int scheduled; 75}; 76 77struct reada_zone { 78 u64 start; 79 u64 end; 80 u64 elems; 81 struct list_head list; 82 spinlock_t lock; 83 int locked; 84 struct btrfs_device *device; 85 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl 86 * self */ 87 int ndevs; 88 struct kref refcnt; 89}; 90 91struct reada_machine_work { 92 struct btrfs_work work; 93 struct btrfs_fs_info *fs_info; 94}; 95 96static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *); 97static void reada_control_release(struct kref *kref); 98static void reada_zone_release(struct kref *kref); 99static void reada_start_machine(struct btrfs_fs_info *fs_info); 100static void __reada_start_machine(struct btrfs_fs_info *fs_info); 101 102static int reada_add_block(struct reada_control *rc, u64 logical, 103 struct btrfs_key *top, u64 generation); 104 105/* recurses */ 106/* in case of err, eb might be NULL */ 107static void __readahead_hook(struct btrfs_fs_info *fs_info, 108 struct reada_extent *re, struct extent_buffer *eb, 109 int err) 110{ 111 int nritems; 112 int i; 113 u64 bytenr; 114 u64 generation; 115 struct list_head list; 116 117 spin_lock(&re->lock); 118 /* 119 * just take the full list from the extent. afterwards we 120 * don't need the lock anymore 121 */ 122 list_replace_init(&re->extctl, &list); 123 re->scheduled = 0; 124 spin_unlock(&re->lock); 125 126 /* 127 * this is the error case, the extent buffer has not been 128 * read correctly. We won't access anything from it and 129 * just cleanup our data structures. Effectively this will 130 * cut the branch below this node from read ahead. 131 */ 132 if (err) 133 goto cleanup; 134 135 /* 136 * FIXME: currently we just set nritems to 0 if this is a leaf, 137 * effectively ignoring the content. In a next step we could 138 * trigger more readahead depending from the content, e.g. 139 * fetch the checksums for the extents in the leaf. 140 */ 141 if (!btrfs_header_level(eb)) 142 goto cleanup; 143 144 nritems = btrfs_header_nritems(eb); 145 generation = btrfs_header_generation(eb); 146 for (i = 0; i < nritems; i++) { 147 struct reada_extctl *rec; 148 u64 n_gen; 149 struct btrfs_key key; 150 struct btrfs_key next_key; 151 152 btrfs_node_key_to_cpu(eb, &key, i); 153 if (i + 1 < nritems) 154 btrfs_node_key_to_cpu(eb, &next_key, i + 1); 155 else 156 next_key = re->top; 157 bytenr = btrfs_node_blockptr(eb, i); 158 n_gen = btrfs_node_ptr_generation(eb, i); 159 160 list_for_each_entry(rec, &list, list) { 161 struct reada_control *rc = rec->rc; 162 163 /* 164 * if the generation doesn't match, just ignore this 165 * extctl. This will probably cut off a branch from 166 * prefetch. Alternatively one could start a new (sub-) 167 * prefetch for this branch, starting again from root. 168 * FIXME: move the generation check out of this loop 169 */ 170#ifdef DEBUG 171 if (rec->generation != generation) { 172 btrfs_debug(fs_info, 173 "generation mismatch for (%llu,%d,%llu) %llu != %llu", 174 key.objectid, key.type, key.offset, 175 rec->generation, generation); 176 } 177#endif 178 if (rec->generation == generation && 179 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 && 180 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0) 181 reada_add_block(rc, bytenr, &next_key, n_gen); 182 } 183 } 184 185cleanup: 186 /* 187 * free extctl records 188 */ 189 while (!list_empty(&list)) { 190 struct reada_control *rc; 191 struct reada_extctl *rec; 192 193 rec = list_first_entry(&list, struct reada_extctl, list); 194 list_del(&rec->list); 195 rc = rec->rc; 196 kfree(rec); 197 198 kref_get(&rc->refcnt); 199 if (atomic_dec_and_test(&rc->elems)) { 200 kref_put(&rc->refcnt, reada_control_release); 201 wake_up(&rc->wait); 202 } 203 kref_put(&rc->refcnt, reada_control_release); 204 205 reada_extent_put(fs_info, re); /* one ref for each entry */ 206 } 207 208 return; 209} 210 211int btree_readahead_hook(struct extent_buffer *eb, int err) 212{ 213 struct btrfs_fs_info *fs_info = eb->fs_info; 214 int ret = 0; 215 struct reada_extent *re; 216 217 /* find extent */ 218 spin_lock(&fs_info->reada_lock); 219 re = radix_tree_lookup(&fs_info->reada_tree, 220 eb->start >> PAGE_SHIFT); 221 if (re) 222 re->refcnt++; 223 spin_unlock(&fs_info->reada_lock); 224 if (!re) { 225 ret = -1; 226 goto start_machine; 227 } 228 229 __readahead_hook(fs_info, re, eb, err); 230 reada_extent_put(fs_info, re); /* our ref */ 231 232start_machine: 233 reada_start_machine(fs_info); 234 return ret; 235} 236 237static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical, 238 struct btrfs_bio *bbio) 239{ 240 struct btrfs_fs_info *fs_info = dev->fs_info; 241 int ret; 242 struct reada_zone *zone; 243 struct btrfs_block_group_cache *cache = NULL; 244 u64 start; 245 u64 end; 246 int i; 247 248 zone = NULL; 249 spin_lock(&fs_info->reada_lock); 250 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 251 logical >> PAGE_SHIFT, 1); 252 if (ret == 1 && logical >= zone->start && logical <= zone->end) { 253 kref_get(&zone->refcnt); 254 spin_unlock(&fs_info->reada_lock); 255 return zone; 256 } 257 258 spin_unlock(&fs_info->reada_lock); 259 260 cache = btrfs_lookup_block_group(fs_info, logical); 261 if (!cache) 262 return NULL; 263 264 start = cache->key.objectid; 265 end = start + cache->key.offset - 1; 266 btrfs_put_block_group(cache); 267 268 zone = kzalloc(sizeof(*zone), GFP_KERNEL); 269 if (!zone) 270 return NULL; 271 272 ret = radix_tree_preload(GFP_KERNEL); 273 if (ret) { 274 kfree(zone); 275 return NULL; 276 } 277 278 zone->start = start; 279 zone->end = end; 280 INIT_LIST_HEAD(&zone->list); 281 spin_lock_init(&zone->lock); 282 zone->locked = 0; 283 kref_init(&zone->refcnt); 284 zone->elems = 0; 285 zone->device = dev; /* our device always sits at index 0 */ 286 for (i = 0; i < bbio->num_stripes; ++i) { 287 /* bounds have already been checked */ 288 zone->devs[i] = bbio->stripes[i].dev; 289 } 290 zone->ndevs = bbio->num_stripes; 291 292 spin_lock(&fs_info->reada_lock); 293 ret = radix_tree_insert(&dev->reada_zones, 294 (unsigned long)(zone->end >> PAGE_SHIFT), 295 zone); 296 297 if (ret == -EEXIST) { 298 kfree(zone); 299 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, 300 logical >> PAGE_SHIFT, 1); 301 if (ret == 1 && logical >= zone->start && logical <= zone->end) 302 kref_get(&zone->refcnt); 303 else 304 zone = NULL; 305 } 306 spin_unlock(&fs_info->reada_lock); 307 radix_tree_preload_end(); 308 309 return zone; 310} 311 312static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info, 313 u64 logical, 314 struct btrfs_key *top) 315{ 316 int ret; 317 struct reada_extent *re = NULL; 318 struct reada_extent *re_exist = NULL; 319 struct btrfs_bio *bbio = NULL; 320 struct btrfs_device *dev; 321 struct btrfs_device *prev_dev; 322 u64 length; 323 int real_stripes; 324 int nzones = 0; 325 unsigned long index = logical >> PAGE_SHIFT; 326 int dev_replace_is_ongoing; 327 int have_zone = 0; 328 329 spin_lock(&fs_info->reada_lock); 330 re = radix_tree_lookup(&fs_info->reada_tree, index); 331 if (re) 332 re->refcnt++; 333 spin_unlock(&fs_info->reada_lock); 334 335 if (re) 336 return re; 337 338 re = kzalloc(sizeof(*re), GFP_KERNEL); 339 if (!re) 340 return NULL; 341 342 re->logical = logical; 343 re->top = *top; 344 INIT_LIST_HEAD(&re->extctl); 345 spin_lock_init(&re->lock); 346 re->refcnt = 1; 347 348 /* 349 * map block 350 */ 351 length = fs_info->nodesize; 352 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, 353 &length, &bbio, 0); 354 if (ret || !bbio || length < fs_info->nodesize) 355 goto error; 356 357 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) { 358 btrfs_err(fs_info, 359 "readahead: more than %d copies not supported", 360 BTRFS_MAX_MIRRORS); 361 goto error; 362 } 363 364 real_stripes = bbio->num_stripes - bbio->num_tgtdevs; 365 for (nzones = 0; nzones < real_stripes; ++nzones) { 366 struct reada_zone *zone; 367 368 dev = bbio->stripes[nzones].dev; 369 370 /* cannot read ahead on missing device. */ 371 if (!dev->bdev) 372 continue; 373 374 zone = reada_find_zone(dev, logical, bbio); 375 if (!zone) 376 continue; 377 378 re->zones[re->nzones++] = zone; 379 spin_lock(&zone->lock); 380 if (!zone->elems) 381 kref_get(&zone->refcnt); 382 ++zone->elems; 383 spin_unlock(&zone->lock); 384 spin_lock(&fs_info->reada_lock); 385 kref_put(&zone->refcnt, reada_zone_release); 386 spin_unlock(&fs_info->reada_lock); 387 } 388 if (re->nzones == 0) { 389 /* not a single zone found, error and out */ 390 goto error; 391 } 392 393 ret = radix_tree_preload(GFP_KERNEL); 394 if (ret) 395 goto error; 396 397 /* insert extent in reada_tree + all per-device trees, all or nothing */ 398 btrfs_dev_replace_lock(&fs_info->dev_replace, 0); 399 spin_lock(&fs_info->reada_lock); 400 ret = radix_tree_insert(&fs_info->reada_tree, index, re); 401 if (ret == -EEXIST) { 402 re_exist = radix_tree_lookup(&fs_info->reada_tree, index); 403 re_exist->refcnt++; 404 spin_unlock(&fs_info->reada_lock); 405 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); 406 radix_tree_preload_end(); 407 goto error; 408 } 409 if (ret) { 410 spin_unlock(&fs_info->reada_lock); 411 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); 412 radix_tree_preload_end(); 413 goto error; 414 } 415 radix_tree_preload_end(); 416 prev_dev = NULL; 417 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing( 418 &fs_info->dev_replace); 419 for (nzones = 0; nzones < re->nzones; ++nzones) { 420 dev = re->zones[nzones]->device; 421 422 if (dev == prev_dev) { 423 /* 424 * in case of DUP, just add the first zone. As both 425 * are on the same device, there's nothing to gain 426 * from adding both. 427 * Also, it wouldn't work, as the tree is per device 428 * and adding would fail with EEXIST 429 */ 430 continue; 431 } 432 if (!dev->bdev) 433 continue; 434 435 if (dev_replace_is_ongoing && 436 dev == fs_info->dev_replace.tgtdev) { 437 /* 438 * as this device is selected for reading only as 439 * a last resort, skip it for read ahead. 440 */ 441 continue; 442 } 443 prev_dev = dev; 444 ret = radix_tree_insert(&dev->reada_extents, index, re); 445 if (ret) { 446 while (--nzones >= 0) { 447 dev = re->zones[nzones]->device; 448 BUG_ON(dev == NULL); 449 /* ignore whether the entry was inserted */ 450 radix_tree_delete(&dev->reada_extents, index); 451 } 452 radix_tree_delete(&fs_info->reada_tree, index); 453 spin_unlock(&fs_info->reada_lock); 454 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); 455 goto error; 456 } 457 have_zone = 1; 458 } 459 spin_unlock(&fs_info->reada_lock); 460 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); 461 462 if (!have_zone) 463 goto error; 464 465 btrfs_put_bbio(bbio); 466 return re; 467 468error: 469 for (nzones = 0; nzones < re->nzones; ++nzones) { 470 struct reada_zone *zone; 471 472 zone = re->zones[nzones]; 473 kref_get(&zone->refcnt); 474 spin_lock(&zone->lock); 475 --zone->elems; 476 if (zone->elems == 0) { 477 /* 478 * no fs_info->reada_lock needed, as this can't be 479 * the last ref 480 */ 481 kref_put(&zone->refcnt, reada_zone_release); 482 } 483 spin_unlock(&zone->lock); 484 485 spin_lock(&fs_info->reada_lock); 486 kref_put(&zone->refcnt, reada_zone_release); 487 spin_unlock(&fs_info->reada_lock); 488 } 489 btrfs_put_bbio(bbio); 490 kfree(re); 491 return re_exist; 492} 493 494static void reada_extent_put(struct btrfs_fs_info *fs_info, 495 struct reada_extent *re) 496{ 497 int i; 498 unsigned long index = re->logical >> PAGE_SHIFT; 499 500 spin_lock(&fs_info->reada_lock); 501 if (--re->refcnt) { 502 spin_unlock(&fs_info->reada_lock); 503 return; 504 } 505 506 radix_tree_delete(&fs_info->reada_tree, index); 507 for (i = 0; i < re->nzones; ++i) { 508 struct reada_zone *zone = re->zones[i]; 509 510 radix_tree_delete(&zone->device->reada_extents, index); 511 } 512 513 spin_unlock(&fs_info->reada_lock); 514 515 for (i = 0; i < re->nzones; ++i) { 516 struct reada_zone *zone = re->zones[i]; 517 518 kref_get(&zone->refcnt); 519 spin_lock(&zone->lock); 520 --zone->elems; 521 if (zone->elems == 0) { 522 /* no fs_info->reada_lock needed, as this can't be 523 * the last ref */ 524 kref_put(&zone->refcnt, reada_zone_release); 525 } 526 spin_unlock(&zone->lock); 527 528 spin_lock(&fs_info->reada_lock); 529 kref_put(&zone->refcnt, reada_zone_release); 530 spin_unlock(&fs_info->reada_lock); 531 } 532 533 kfree(re); 534} 535 536static void reada_zone_release(struct kref *kref) 537{ 538 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); 539 540 radix_tree_delete(&zone->device->reada_zones, 541 zone->end >> PAGE_SHIFT); 542 543 kfree(zone); 544} 545 546static void reada_control_release(struct kref *kref) 547{ 548 struct reada_control *rc = container_of(kref, struct reada_control, 549 refcnt); 550 551 kfree(rc); 552} 553 554static int reada_add_block(struct reada_control *rc, u64 logical, 555 struct btrfs_key *top, u64 generation) 556{ 557 struct btrfs_fs_info *fs_info = rc->fs_info; 558 struct reada_extent *re; 559 struct reada_extctl *rec; 560 561 /* takes one ref */ 562 re = reada_find_extent(fs_info, logical, top); 563 if (!re) 564 return -1; 565 566 rec = kzalloc(sizeof(*rec), GFP_KERNEL); 567 if (!rec) { 568 reada_extent_put(fs_info, re); 569 return -ENOMEM; 570 } 571 572 rec->rc = rc; 573 rec->generation = generation; 574 atomic_inc(&rc->elems); 575 576 spin_lock(&re->lock); 577 list_add_tail(&rec->list, &re->extctl); 578 spin_unlock(&re->lock); 579 580 /* leave the ref on the extent */ 581 582 return 0; 583} 584 585/* 586 * called with fs_info->reada_lock held 587 */ 588static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) 589{ 590 int i; 591 unsigned long index = zone->end >> PAGE_SHIFT; 592 593 for (i = 0; i < zone->ndevs; ++i) { 594 struct reada_zone *peer; 595 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index); 596 if (peer && peer->device != zone->device) 597 peer->locked = lock; 598 } 599} 600 601/* 602 * called with fs_info->reada_lock held 603 */ 604static int reada_pick_zone(struct btrfs_device *dev) 605{ 606 struct reada_zone *top_zone = NULL; 607 struct reada_zone *top_locked_zone = NULL; 608 u64 top_elems = 0; 609 u64 top_locked_elems = 0; 610 unsigned long index = 0; 611 int ret; 612 613 if (dev->reada_curr_zone) { 614 reada_peer_zones_set_lock(dev->reada_curr_zone, 0); 615 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release); 616 dev->reada_curr_zone = NULL; 617 } 618 /* pick the zone with the most elements */ 619 while (1) { 620 struct reada_zone *zone; 621 622 ret = radix_tree_gang_lookup(&dev->reada_zones, 623 (void **)&zone, index, 1); 624 if (ret == 0) 625 break; 626 index = (zone->end >> PAGE_SHIFT) + 1; 627 if (zone->locked) { 628 if (zone->elems > top_locked_elems) { 629 top_locked_elems = zone->elems; 630 top_locked_zone = zone; 631 } 632 } else { 633 if (zone->elems > top_elems) { 634 top_elems = zone->elems; 635 top_zone = zone; 636 } 637 } 638 } 639 if (top_zone) 640 dev->reada_curr_zone = top_zone; 641 else if (top_locked_zone) 642 dev->reada_curr_zone = top_locked_zone; 643 else 644 return 0; 645 646 dev->reada_next = dev->reada_curr_zone->start; 647 kref_get(&dev->reada_curr_zone->refcnt); 648 reada_peer_zones_set_lock(dev->reada_curr_zone, 1); 649 650 return 1; 651} 652 653static int reada_start_machine_dev(struct btrfs_device *dev) 654{ 655 struct btrfs_fs_info *fs_info = dev->fs_info; 656 struct reada_extent *re = NULL; 657 int mirror_num = 0; 658 struct extent_buffer *eb = NULL; 659 u64 logical; 660 int ret; 661 int i; 662 663 spin_lock(&fs_info->reada_lock); 664 if (dev->reada_curr_zone == NULL) { 665 ret = reada_pick_zone(dev); 666 if (!ret) { 667 spin_unlock(&fs_info->reada_lock); 668 return 0; 669 } 670 } 671 /* 672 * FIXME currently we issue the reads one extent at a time. If we have 673 * a contiguous block of extents, we could also coagulate them or use 674 * plugging to speed things up 675 */ 676 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 677 dev->reada_next >> PAGE_SHIFT, 1); 678 if (ret == 0 || re->logical > dev->reada_curr_zone->end) { 679 ret = reada_pick_zone(dev); 680 if (!ret) { 681 spin_unlock(&fs_info->reada_lock); 682 return 0; 683 } 684 re = NULL; 685 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, 686 dev->reada_next >> PAGE_SHIFT, 1); 687 } 688 if (ret == 0) { 689 spin_unlock(&fs_info->reada_lock); 690 return 0; 691 } 692 dev->reada_next = re->logical + fs_info->nodesize; 693 re->refcnt++; 694 695 spin_unlock(&fs_info->reada_lock); 696 697 spin_lock(&re->lock); 698 if (re->scheduled || list_empty(&re->extctl)) { 699 spin_unlock(&re->lock); 700 reada_extent_put(fs_info, re); 701 return 0; 702 } 703 re->scheduled = 1; 704 spin_unlock(&re->lock); 705 706 /* 707 * find mirror num 708 */ 709 for (i = 0; i < re->nzones; ++i) { 710 if (re->zones[i]->device == dev) { 711 mirror_num = i + 1; 712 break; 713 } 714 } 715 logical = re->logical; 716 717 atomic_inc(&dev->reada_in_flight); 718 ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb); 719 if (ret) 720 __readahead_hook(fs_info, re, NULL, ret); 721 else if (eb) 722 __readahead_hook(fs_info, re, eb, ret); 723 724 if (eb) 725 free_extent_buffer(eb); 726 727 atomic_dec(&dev->reada_in_flight); 728 reada_extent_put(fs_info, re); 729 730 return 1; 731 732} 733 734static void reada_start_machine_worker(struct btrfs_work *work) 735{ 736 struct reada_machine_work *rmw; 737 struct btrfs_fs_info *fs_info; 738 int old_ioprio; 739 740 rmw = container_of(work, struct reada_machine_work, work); 741 fs_info = rmw->fs_info; 742 743 kfree(rmw); 744 745 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current), 746 task_nice_ioprio(current)); 747 set_task_ioprio(current, BTRFS_IOPRIO_READA); 748 __reada_start_machine(fs_info); 749 set_task_ioprio(current, old_ioprio); 750 751 atomic_dec(&fs_info->reada_works_cnt); 752} 753 754static void __reada_start_machine(struct btrfs_fs_info *fs_info) 755{ 756 struct btrfs_device *device; 757 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 758 u64 enqueued; 759 u64 total = 0; 760 int i; 761 762 do { 763 enqueued = 0; 764 mutex_lock(&fs_devices->device_list_mutex); 765 list_for_each_entry(device, &fs_devices->devices, dev_list) { 766 if (atomic_read(&device->reada_in_flight) < 767 MAX_IN_FLIGHT) 768 enqueued += reada_start_machine_dev(device); 769 } 770 mutex_unlock(&fs_devices->device_list_mutex); 771 total += enqueued; 772 } while (enqueued && total < 10000); 773 774 if (enqueued == 0) 775 return; 776 777 /* 778 * If everything is already in the cache, this is effectively single 779 * threaded. To a) not hold the caller for too long and b) to utilize 780 * more cores, we broke the loop above after 10000 iterations and now 781 * enqueue to workers to finish it. This will distribute the load to 782 * the cores. 783 */ 784 for (i = 0; i < 2; ++i) { 785 reada_start_machine(fs_info); 786 if (atomic_read(&fs_info->reada_works_cnt) > 787 BTRFS_MAX_MIRRORS * 2) 788 break; 789 } 790} 791 792static void reada_start_machine(struct btrfs_fs_info *fs_info) 793{ 794 struct reada_machine_work *rmw; 795 796 rmw = kzalloc(sizeof(*rmw), GFP_KERNEL); 797 if (!rmw) { 798 /* FIXME we cannot handle this properly right now */ 799 BUG(); 800 } 801 btrfs_init_work(&rmw->work, btrfs_readahead_helper, 802 reada_start_machine_worker, NULL, NULL); 803 rmw->fs_info = fs_info; 804 805 btrfs_queue_work(fs_info->readahead_workers, &rmw->work); 806 atomic_inc(&fs_info->reada_works_cnt); 807} 808 809#ifdef DEBUG 810static void dump_devs(struct btrfs_fs_info *fs_info, int all) 811{ 812 struct btrfs_device *device; 813 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 814 unsigned long index; 815 int ret; 816 int i; 817 int j; 818 int cnt; 819 820 spin_lock(&fs_info->reada_lock); 821 list_for_each_entry(device, &fs_devices->devices, dev_list) { 822 btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid, 823 atomic_read(&device->reada_in_flight)); 824 index = 0; 825 while (1) { 826 struct reada_zone *zone; 827 ret = radix_tree_gang_lookup(&device->reada_zones, 828 (void **)&zone, index, 1); 829 if (ret == 0) 830 break; 831 pr_debug(" zone %llu-%llu elems %llu locked %d devs", 832 zone->start, zone->end, zone->elems, 833 zone->locked); 834 for (j = 0; j < zone->ndevs; ++j) { 835 pr_cont(" %lld", 836 zone->devs[j]->devid); 837 } 838 if (device->reada_curr_zone == zone) 839 pr_cont(" curr off %llu", 840 device->reada_next - zone->start); 841 pr_cont("\n"); 842 index = (zone->end >> PAGE_SHIFT) + 1; 843 } 844 cnt = 0; 845 index = 0; 846 while (all) { 847 struct reada_extent *re = NULL; 848 849 ret = radix_tree_gang_lookup(&device->reada_extents, 850 (void **)&re, index, 1); 851 if (ret == 0) 852 break; 853 pr_debug(" re: logical %llu size %u empty %d scheduled %d", 854 re->logical, fs_info->nodesize, 855 list_empty(&re->extctl), re->scheduled); 856 857 for (i = 0; i < re->nzones; ++i) { 858 pr_cont(" zone %llu-%llu devs", 859 re->zones[i]->start, 860 re->zones[i]->end); 861 for (j = 0; j < re->zones[i]->ndevs; ++j) { 862 pr_cont(" %lld", 863 re->zones[i]->devs[j]->devid); 864 } 865 } 866 pr_cont("\n"); 867 index = (re->logical >> PAGE_SHIFT) + 1; 868 if (++cnt > 15) 869 break; 870 } 871 } 872 873 index = 0; 874 cnt = 0; 875 while (all) { 876 struct reada_extent *re = NULL; 877 878 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re, 879 index, 1); 880 if (ret == 0) 881 break; 882 if (!re->scheduled) { 883 index = (re->logical >> PAGE_SHIFT) + 1; 884 continue; 885 } 886 pr_debug("re: logical %llu size %u list empty %d scheduled %d", 887 re->logical, fs_info->nodesize, 888 list_empty(&re->extctl), re->scheduled); 889 for (i = 0; i < re->nzones; ++i) { 890 pr_cont(" zone %llu-%llu devs", 891 re->zones[i]->start, 892 re->zones[i]->end); 893 for (j = 0; j < re->zones[i]->ndevs; ++j) { 894 pr_cont(" %lld", 895 re->zones[i]->devs[j]->devid); 896 } 897 } 898 pr_cont("\n"); 899 index = (re->logical >> PAGE_SHIFT) + 1; 900 } 901 spin_unlock(&fs_info->reada_lock); 902} 903#endif 904 905/* 906 * interface 907 */ 908struct reada_control *btrfs_reada_add(struct btrfs_root *root, 909 struct btrfs_key *key_start, struct btrfs_key *key_end) 910{ 911 struct reada_control *rc; 912 u64 start; 913 u64 generation; 914 int ret; 915 struct extent_buffer *node; 916 static struct btrfs_key max_key = { 917 .objectid = (u64)-1, 918 .type = (u8)-1, 919 .offset = (u64)-1 920 }; 921 922 rc = kzalloc(sizeof(*rc), GFP_KERNEL); 923 if (!rc) 924 return ERR_PTR(-ENOMEM); 925 926 rc->fs_info = root->fs_info; 927 rc->key_start = *key_start; 928 rc->key_end = *key_end; 929 atomic_set(&rc->elems, 0); 930 init_waitqueue_head(&rc->wait); 931 kref_init(&rc->refcnt); 932 kref_get(&rc->refcnt); /* one ref for having elements */ 933 934 node = btrfs_root_node(root); 935 start = node->start; 936 generation = btrfs_header_generation(node); 937 free_extent_buffer(node); 938 939 ret = reada_add_block(rc, start, &max_key, generation); 940 if (ret) { 941 kfree(rc); 942 return ERR_PTR(ret); 943 } 944 945 reada_start_machine(root->fs_info); 946 947 return rc; 948} 949 950#ifdef DEBUG 951int btrfs_reada_wait(void *handle) 952{ 953 struct reada_control *rc = handle; 954 struct btrfs_fs_info *fs_info = rc->fs_info; 955 956 while (atomic_read(&rc->elems)) { 957 if (!atomic_read(&fs_info->reada_works_cnt)) 958 reada_start_machine(fs_info); 959 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0, 960 5 * HZ); 961 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0); 962 } 963 964 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0); 965 966 kref_put(&rc->refcnt, reada_control_release); 967 968 return 0; 969} 970#else 971int btrfs_reada_wait(void *handle) 972{ 973 struct reada_control *rc = handle; 974 struct btrfs_fs_info *fs_info = rc->fs_info; 975 976 while (atomic_read(&rc->elems)) { 977 if (!atomic_read(&fs_info->reada_works_cnt)) 978 reada_start_machine(fs_info); 979 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0, 980 (HZ + 9) / 10); 981 } 982 983 kref_put(&rc->refcnt, reada_control_release); 984 985 return 0; 986} 987#endif 988 989void btrfs_reada_detach(void *handle) 990{ 991 struct reada_control *rc = handle; 992 993 kref_put(&rc->refcnt, reada_control_release); 994}