Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.17-rc5 565 lines 14 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6#include <linux/delay.h> 7#include <linux/kthread.h> 8#include <linux/pagemap.h> 9 10#include "ctree.h" 11#include "disk-io.h" 12#include "free-space-cache.h" 13#include "inode-map.h" 14#include "transaction.h" 15 16static int caching_kthread(void *data) 17{ 18 struct btrfs_root *root = data; 19 struct btrfs_fs_info *fs_info = root->fs_info; 20 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 21 struct btrfs_key key; 22 struct btrfs_path *path; 23 struct extent_buffer *leaf; 24 u64 last = (u64)-1; 25 int slot; 26 int ret; 27 28 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE)) 29 return 0; 30 31 path = btrfs_alloc_path(); 32 if (!path) 33 return -ENOMEM; 34 35 /* Since the commit root is read-only, we can safely skip locking. */ 36 path->skip_locking = 1; 37 path->search_commit_root = 1; 38 path->reada = READA_FORWARD; 39 40 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 41 key.offset = 0; 42 key.type = BTRFS_INODE_ITEM_KEY; 43again: 44 /* need to make sure the commit_root doesn't disappear */ 45 down_read(&fs_info->commit_root_sem); 46 47 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 48 if (ret < 0) 49 goto out; 50 51 while (1) { 52 if (btrfs_fs_closing(fs_info)) 53 goto out; 54 55 leaf = path->nodes[0]; 56 slot = path->slots[0]; 57 if (slot >= btrfs_header_nritems(leaf)) { 58 ret = btrfs_next_leaf(root, path); 59 if (ret < 0) 60 goto out; 61 else if (ret > 0) 62 break; 63 64 if (need_resched() || 65 btrfs_transaction_in_commit(fs_info)) { 66 leaf = path->nodes[0]; 67 68 if (WARN_ON(btrfs_header_nritems(leaf) == 0)) 69 break; 70 71 /* 72 * Save the key so we can advances forward 73 * in the next search. 74 */ 75 btrfs_item_key_to_cpu(leaf, &key, 0); 76 btrfs_release_path(path); 77 root->ino_cache_progress = last; 78 up_read(&fs_info->commit_root_sem); 79 schedule_timeout(1); 80 goto again; 81 } else 82 continue; 83 } 84 85 btrfs_item_key_to_cpu(leaf, &key, slot); 86 87 if (key.type != BTRFS_INODE_ITEM_KEY) 88 goto next; 89 90 if (key.objectid >= root->highest_objectid) 91 break; 92 93 if (last != (u64)-1 && last + 1 != key.objectid) { 94 __btrfs_add_free_space(fs_info, ctl, last + 1, 95 key.objectid - last - 1); 96 wake_up(&root->ino_cache_wait); 97 } 98 99 last = key.objectid; 100next: 101 path->slots[0]++; 102 } 103 104 if (last < root->highest_objectid - 1) { 105 __btrfs_add_free_space(fs_info, ctl, last + 1, 106 root->highest_objectid - last - 1); 107 } 108 109 spin_lock(&root->ino_cache_lock); 110 root->ino_cache_state = BTRFS_CACHE_FINISHED; 111 spin_unlock(&root->ino_cache_lock); 112 113 root->ino_cache_progress = (u64)-1; 114 btrfs_unpin_free_ino(root); 115out: 116 wake_up(&root->ino_cache_wait); 117 up_read(&fs_info->commit_root_sem); 118 119 btrfs_free_path(path); 120 121 return ret; 122} 123 124static void start_caching(struct btrfs_root *root) 125{ 126 struct btrfs_fs_info *fs_info = root->fs_info; 127 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 128 struct task_struct *tsk; 129 int ret; 130 u64 objectid; 131 132 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE)) 133 return; 134 135 spin_lock(&root->ino_cache_lock); 136 if (root->ino_cache_state != BTRFS_CACHE_NO) { 137 spin_unlock(&root->ino_cache_lock); 138 return; 139 } 140 141 root->ino_cache_state = BTRFS_CACHE_STARTED; 142 spin_unlock(&root->ino_cache_lock); 143 144 ret = load_free_ino_cache(fs_info, root); 145 if (ret == 1) { 146 spin_lock(&root->ino_cache_lock); 147 root->ino_cache_state = BTRFS_CACHE_FINISHED; 148 spin_unlock(&root->ino_cache_lock); 149 return; 150 } 151 152 /* 153 * It can be quite time-consuming to fill the cache by searching 154 * through the extent tree, and this can keep ino allocation path 155 * waiting. Therefore at start we quickly find out the highest 156 * inode number and we know we can use inode numbers which fall in 157 * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID]. 158 */ 159 ret = btrfs_find_free_objectid(root, &objectid); 160 if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) { 161 __btrfs_add_free_space(fs_info, ctl, objectid, 162 BTRFS_LAST_FREE_OBJECTID - objectid + 1); 163 } 164 165 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu", 166 root->root_key.objectid); 167 if (IS_ERR(tsk)) { 168 btrfs_warn(fs_info, "failed to start inode caching task"); 169 btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE, 170 "disabling inode map caching"); 171 } 172} 173 174int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) 175{ 176 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) 177 return btrfs_find_free_objectid(root, objectid); 178 179again: 180 *objectid = btrfs_find_ino_for_alloc(root); 181 182 if (*objectid != 0) 183 return 0; 184 185 start_caching(root); 186 187 wait_event(root->ino_cache_wait, 188 root->ino_cache_state == BTRFS_CACHE_FINISHED || 189 root->free_ino_ctl->free_space > 0); 190 191 if (root->ino_cache_state == BTRFS_CACHE_FINISHED && 192 root->free_ino_ctl->free_space == 0) 193 return -ENOSPC; 194 else 195 goto again; 196} 197 198void btrfs_return_ino(struct btrfs_root *root, u64 objectid) 199{ 200 struct btrfs_fs_info *fs_info = root->fs_info; 201 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; 202 203 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE)) 204 return; 205again: 206 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { 207 __btrfs_add_free_space(fs_info, pinned, objectid, 1); 208 } else { 209 down_write(&fs_info->commit_root_sem); 210 spin_lock(&root->ino_cache_lock); 211 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { 212 spin_unlock(&root->ino_cache_lock); 213 up_write(&fs_info->commit_root_sem); 214 goto again; 215 } 216 spin_unlock(&root->ino_cache_lock); 217 218 start_caching(root); 219 220 __btrfs_add_free_space(fs_info, pinned, objectid, 1); 221 222 up_write(&fs_info->commit_root_sem); 223 } 224} 225 226/* 227 * When a transaction is committed, we'll move those inode numbers which are 228 * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and 229 * others will just be dropped, because the commit root we were searching has 230 * changed. 231 * 232 * Must be called with root->fs_info->commit_root_sem held 233 */ 234void btrfs_unpin_free_ino(struct btrfs_root *root) 235{ 236 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 237 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; 238 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock; 239 struct btrfs_free_space *info; 240 struct rb_node *n; 241 u64 count; 242 243 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) 244 return; 245 246 while (1) { 247 bool add_to_ctl = true; 248 249 spin_lock(rbroot_lock); 250 n = rb_first(rbroot); 251 if (!n) { 252 spin_unlock(rbroot_lock); 253 break; 254 } 255 256 info = rb_entry(n, struct btrfs_free_space, offset_index); 257 BUG_ON(info->bitmap); /* Logic error */ 258 259 if (info->offset > root->ino_cache_progress) 260 add_to_ctl = false; 261 else if (info->offset + info->bytes > root->ino_cache_progress) 262 count = root->ino_cache_progress - info->offset + 1; 263 else 264 count = info->bytes; 265 266 rb_erase(&info->offset_index, rbroot); 267 spin_unlock(rbroot_lock); 268 if (add_to_ctl) 269 __btrfs_add_free_space(root->fs_info, ctl, 270 info->offset, count); 271 kmem_cache_free(btrfs_free_space_cachep, info); 272 } 273} 274 275#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space)) 276#define INODES_PER_BITMAP (PAGE_SIZE * 8) 277 278/* 279 * The goal is to keep the memory used by the free_ino tree won't 280 * exceed the memory if we use bitmaps only. 281 */ 282static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) 283{ 284 struct btrfs_free_space *info; 285 struct rb_node *n; 286 int max_ino; 287 int max_bitmaps; 288 289 n = rb_last(&ctl->free_space_offset); 290 if (!n) { 291 ctl->extents_thresh = INIT_THRESHOLD; 292 return; 293 } 294 info = rb_entry(n, struct btrfs_free_space, offset_index); 295 296 /* 297 * Find the maximum inode number in the filesystem. Note we 298 * ignore the fact that this can be a bitmap, because we are 299 * not doing precise calculation. 300 */ 301 max_ino = info->bytes - 1; 302 303 max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; 304 if (max_bitmaps <= ctl->total_bitmaps) { 305 ctl->extents_thresh = 0; 306 return; 307 } 308 309 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * 310 PAGE_SIZE / sizeof(*info); 311} 312 313/* 314 * We don't fall back to bitmap, if we are below the extents threshold 315 * or this chunk of inode numbers is a big one. 316 */ 317static bool use_bitmap(struct btrfs_free_space_ctl *ctl, 318 struct btrfs_free_space *info) 319{ 320 if (ctl->free_extents < ctl->extents_thresh || 321 info->bytes > INODES_PER_BITMAP / 10) 322 return false; 323 324 return true; 325} 326 327static const struct btrfs_free_space_op free_ino_op = { 328 .recalc_thresholds = recalculate_thresholds, 329 .use_bitmap = use_bitmap, 330}; 331 332static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) 333{ 334} 335 336static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, 337 struct btrfs_free_space *info) 338{ 339 /* 340 * We always use extents for two reasons: 341 * 342 * - The pinned tree is only used during the process of caching 343 * work. 344 * - Make code simpler. See btrfs_unpin_free_ino(). 345 */ 346 return false; 347} 348 349static const struct btrfs_free_space_op pinned_free_ino_op = { 350 .recalc_thresholds = pinned_recalc_thresholds, 351 .use_bitmap = pinned_use_bitmap, 352}; 353 354void btrfs_init_free_ino_ctl(struct btrfs_root *root) 355{ 356 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 357 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; 358 359 spin_lock_init(&ctl->tree_lock); 360 ctl->unit = 1; 361 ctl->start = 0; 362 ctl->private = NULL; 363 ctl->op = &free_ino_op; 364 INIT_LIST_HEAD(&ctl->trimming_ranges); 365 mutex_init(&ctl->cache_writeout_mutex); 366 367 /* 368 * Initially we allow to use 16K of ram to cache chunks of 369 * inode numbers before we resort to bitmaps. This is somewhat 370 * arbitrary, but it will be adjusted in runtime. 371 */ 372 ctl->extents_thresh = INIT_THRESHOLD; 373 374 spin_lock_init(&pinned->tree_lock); 375 pinned->unit = 1; 376 pinned->start = 0; 377 pinned->private = NULL; 378 pinned->extents_thresh = 0; 379 pinned->op = &pinned_free_ino_op; 380} 381 382int btrfs_save_ino_cache(struct btrfs_root *root, 383 struct btrfs_trans_handle *trans) 384{ 385 struct btrfs_fs_info *fs_info = root->fs_info; 386 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 387 struct btrfs_path *path; 388 struct inode *inode; 389 struct btrfs_block_rsv *rsv; 390 struct extent_changeset *data_reserved = NULL; 391 u64 num_bytes; 392 u64 alloc_hint = 0; 393 int ret; 394 int prealloc; 395 bool retry = false; 396 397 /* only fs tree and subvol/snap needs ino cache */ 398 if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID && 399 (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID || 400 root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID)) 401 return 0; 402 403 /* Don't save inode cache if we are deleting this root */ 404 if (btrfs_root_refs(&root->root_item) == 0) 405 return 0; 406 407 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE)) 408 return 0; 409 410 path = btrfs_alloc_path(); 411 if (!path) 412 return -ENOMEM; 413 414 rsv = trans->block_rsv; 415 trans->block_rsv = &fs_info->trans_block_rsv; 416 417 num_bytes = trans->bytes_reserved; 418 /* 419 * 1 item for inode item insertion if need 420 * 4 items for inode item update (in the worst case) 421 * 1 items for slack space if we need do truncation 422 * 1 item for free space object 423 * 3 items for pre-allocation 424 */ 425 trans->bytes_reserved = btrfs_calc_trans_metadata_size(fs_info, 10); 426 ret = btrfs_block_rsv_add(root, trans->block_rsv, 427 trans->bytes_reserved, 428 BTRFS_RESERVE_NO_FLUSH); 429 if (ret) 430 goto out; 431 trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid, 432 trans->bytes_reserved, 1); 433again: 434 inode = lookup_free_ino_inode(root, path); 435 if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) { 436 ret = PTR_ERR(inode); 437 goto out_release; 438 } 439 440 if (IS_ERR(inode)) { 441 BUG_ON(retry); /* Logic error */ 442 retry = true; 443 444 ret = create_free_ino_inode(root, trans, path); 445 if (ret) 446 goto out_release; 447 goto again; 448 } 449 450 BTRFS_I(inode)->generation = 0; 451 ret = btrfs_update_inode(trans, root, inode); 452 if (ret) { 453 btrfs_abort_transaction(trans, ret); 454 goto out_put; 455 } 456 457 if (i_size_read(inode) > 0) { 458 ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 459 if (ret) { 460 if (ret != -ENOSPC) 461 btrfs_abort_transaction(trans, ret); 462 goto out_put; 463 } 464 } 465 466 spin_lock(&root->ino_cache_lock); 467 if (root->ino_cache_state != BTRFS_CACHE_FINISHED) { 468 ret = -1; 469 spin_unlock(&root->ino_cache_lock); 470 goto out_put; 471 } 472 spin_unlock(&root->ino_cache_lock); 473 474 spin_lock(&ctl->tree_lock); 475 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; 476 prealloc = ALIGN(prealloc, PAGE_SIZE); 477 prealloc += ctl->total_bitmaps * PAGE_SIZE; 478 spin_unlock(&ctl->tree_lock); 479 480 /* Just to make sure we have enough space */ 481 prealloc += 8 * PAGE_SIZE; 482 483 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, 0, prealloc); 484 if (ret) 485 goto out_put; 486 487 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, 488 prealloc, prealloc, &alloc_hint); 489 if (ret) { 490 btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true); 491 goto out_put; 492 } 493 494 ret = btrfs_write_out_ino_cache(root, trans, path, inode); 495 btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, false); 496out_put: 497 iput(inode); 498out_release: 499 trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid, 500 trans->bytes_reserved, 0); 501 btrfs_block_rsv_release(fs_info, trans->block_rsv, 502 trans->bytes_reserved); 503out: 504 trans->block_rsv = rsv; 505 trans->bytes_reserved = num_bytes; 506 507 btrfs_free_path(path); 508 extent_changeset_free(data_reserved); 509 return ret; 510} 511 512int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) 513{ 514 struct btrfs_path *path; 515 int ret; 516 struct extent_buffer *l; 517 struct btrfs_key search_key; 518 struct btrfs_key found_key; 519 int slot; 520 521 path = btrfs_alloc_path(); 522 if (!path) 523 return -ENOMEM; 524 525 search_key.objectid = BTRFS_LAST_FREE_OBJECTID; 526 search_key.type = -1; 527 search_key.offset = (u64)-1; 528 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 529 if (ret < 0) 530 goto error; 531 BUG_ON(ret == 0); /* Corruption */ 532 if (path->slots[0] > 0) { 533 slot = path->slots[0] - 1; 534 l = path->nodes[0]; 535 btrfs_item_key_to_cpu(l, &found_key, slot); 536 *objectid = max_t(u64, found_key.objectid, 537 BTRFS_FIRST_FREE_OBJECTID - 1); 538 } else { 539 *objectid = BTRFS_FIRST_FREE_OBJECTID - 1; 540 } 541 ret = 0; 542error: 543 btrfs_free_path(path); 544 return ret; 545} 546 547int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) 548{ 549 int ret; 550 mutex_lock(&root->objectid_mutex); 551 552 if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) { 553 btrfs_warn(root->fs_info, 554 "the objectid of root %llu reaches its highest value", 555 root->root_key.objectid); 556 ret = -ENOSPC; 557 goto out; 558 } 559 560 *objectid = ++root->highest_objectid; 561 ret = 0; 562out: 563 mutex_unlock(&root->objectid_mutex); 564 return ret; 565}