Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.5-rc2 580 lines 15 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6#include <linux/kthread.h> 7#include <linux/pagemap.h> 8 9#include "ctree.h" 10#include "disk-io.h" 11#include "free-space-cache.h" 12#include "inode-map.h" 13#include "transaction.h" 14#include "delalloc-space.h" 15 16static void fail_caching_thread(struct btrfs_root *root) 17{ 18 struct btrfs_fs_info *fs_info = root->fs_info; 19 20 btrfs_warn(fs_info, "failed to start inode caching task"); 21 btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE, 22 "disabling inode map caching"); 23 spin_lock(&root->ino_cache_lock); 24 root->ino_cache_state = BTRFS_CACHE_ERROR; 25 spin_unlock(&root->ino_cache_lock); 26 wake_up(&root->ino_cache_wait); 27} 28 29static int caching_kthread(void *data) 30{ 31 struct btrfs_root *root = data; 32 struct btrfs_fs_info *fs_info = root->fs_info; 33 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 34 struct btrfs_key key; 35 struct btrfs_path *path; 36 struct extent_buffer *leaf; 37 u64 last = (u64)-1; 38 int slot; 39 int ret; 40 41 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE)) 42 return 0; 43 44 path = btrfs_alloc_path(); 45 if (!path) { 46 fail_caching_thread(root); 47 return -ENOMEM; 48 } 49 50 /* Since the commit root is read-only, we can safely skip locking. */ 51 path->skip_locking = 1; 52 path->search_commit_root = 1; 53 path->reada = READA_FORWARD; 54 55 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 56 key.offset = 0; 57 key.type = BTRFS_INODE_ITEM_KEY; 58again: 59 /* need to make sure the commit_root doesn't disappear */ 60 down_read(&fs_info->commit_root_sem); 61 62 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 63 if (ret < 0) 64 goto out; 65 66 while (1) { 67 if (btrfs_fs_closing(fs_info)) 68 goto out; 69 70 leaf = path->nodes[0]; 71 slot = path->slots[0]; 72 if (slot >= btrfs_header_nritems(leaf)) { 73 ret = btrfs_next_leaf(root, path); 74 if (ret < 0) 75 goto out; 76 else if (ret > 0) 77 break; 78 79 if (need_resched() || 80 btrfs_transaction_in_commit(fs_info)) { 81 leaf = path->nodes[0]; 82 83 if (WARN_ON(btrfs_header_nritems(leaf) == 0)) 84 break; 85 86 /* 87 * Save the key so we can advances forward 88 * in the next search. 89 */ 90 btrfs_item_key_to_cpu(leaf, &key, 0); 91 btrfs_release_path(path); 92 root->ino_cache_progress = last; 93 up_read(&fs_info->commit_root_sem); 94 schedule_timeout(1); 95 goto again; 96 } else 97 continue; 98 } 99 100 btrfs_item_key_to_cpu(leaf, &key, slot); 101 102 if (key.type != BTRFS_INODE_ITEM_KEY) 103 goto next; 104 105 if (key.objectid >= root->highest_objectid) 106 break; 107 108 if (last != (u64)-1 && last + 1 != key.objectid) { 109 __btrfs_add_free_space(fs_info, ctl, last + 1, 110 key.objectid - last - 1); 111 wake_up(&root->ino_cache_wait); 112 } 113 114 last = key.objectid; 115next: 116 path->slots[0]++; 117 } 118 119 if (last < root->highest_objectid - 1) { 120 __btrfs_add_free_space(fs_info, ctl, last + 1, 121 root->highest_objectid - last - 1); 122 } 123 124 spin_lock(&root->ino_cache_lock); 125 root->ino_cache_state = BTRFS_CACHE_FINISHED; 126 spin_unlock(&root->ino_cache_lock); 127 128 root->ino_cache_progress = (u64)-1; 129 btrfs_unpin_free_ino(root); 130out: 131 wake_up(&root->ino_cache_wait); 132 up_read(&fs_info->commit_root_sem); 133 134 btrfs_free_path(path); 135 136 return ret; 137} 138 139static void start_caching(struct btrfs_root *root) 140{ 141 struct btrfs_fs_info *fs_info = root->fs_info; 142 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 143 struct task_struct *tsk; 144 int ret; 145 u64 objectid; 146 147 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE)) 148 return; 149 150 spin_lock(&root->ino_cache_lock); 151 if (root->ino_cache_state != BTRFS_CACHE_NO) { 152 spin_unlock(&root->ino_cache_lock); 153 return; 154 } 155 156 root->ino_cache_state = BTRFS_CACHE_STARTED; 157 spin_unlock(&root->ino_cache_lock); 158 159 ret = load_free_ino_cache(fs_info, root); 160 if (ret == 1) { 161 spin_lock(&root->ino_cache_lock); 162 root->ino_cache_state = BTRFS_CACHE_FINISHED; 163 spin_unlock(&root->ino_cache_lock); 164 wake_up(&root->ino_cache_wait); 165 return; 166 } 167 168 /* 169 * It can be quite time-consuming to fill the cache by searching 170 * through the extent tree, and this can keep ino allocation path 171 * waiting. Therefore at start we quickly find out the highest 172 * inode number and we know we can use inode numbers which fall in 173 * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID]. 174 */ 175 ret = btrfs_find_free_objectid(root, &objectid); 176 if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) { 177 __btrfs_add_free_space(fs_info, ctl, objectid, 178 BTRFS_LAST_FREE_OBJECTID - objectid + 1); 179 wake_up(&root->ino_cache_wait); 180 } 181 182 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu", 183 root->root_key.objectid); 184 if (IS_ERR(tsk)) 185 fail_caching_thread(root); 186} 187 188int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) 189{ 190 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) 191 return btrfs_find_free_objectid(root, objectid); 192 193again: 194 *objectid = btrfs_find_ino_for_alloc(root); 195 196 if (*objectid != 0) 197 return 0; 198 199 start_caching(root); 200 201 wait_event(root->ino_cache_wait, 202 root->ino_cache_state == BTRFS_CACHE_FINISHED || 203 root->ino_cache_state == BTRFS_CACHE_ERROR || 204 root->free_ino_ctl->free_space > 0); 205 206 if (root->ino_cache_state == BTRFS_CACHE_FINISHED && 207 root->free_ino_ctl->free_space == 0) 208 return -ENOSPC; 209 else if (root->ino_cache_state == BTRFS_CACHE_ERROR) 210 return btrfs_find_free_objectid(root, objectid); 211 else 212 goto again; 213} 214 215void btrfs_return_ino(struct btrfs_root *root, u64 objectid) 216{ 217 struct btrfs_fs_info *fs_info = root->fs_info; 218 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; 219 220 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE)) 221 return; 222again: 223 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { 224 __btrfs_add_free_space(fs_info, pinned, objectid, 1); 225 } else { 226 down_write(&fs_info->commit_root_sem); 227 spin_lock(&root->ino_cache_lock); 228 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { 229 spin_unlock(&root->ino_cache_lock); 230 up_write(&fs_info->commit_root_sem); 231 goto again; 232 } 233 spin_unlock(&root->ino_cache_lock); 234 235 start_caching(root); 236 237 __btrfs_add_free_space(fs_info, pinned, objectid, 1); 238 239 up_write(&fs_info->commit_root_sem); 240 } 241} 242 243/* 244 * When a transaction is committed, we'll move those inode numbers which are 245 * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and 246 * others will just be dropped, because the commit root we were searching has 247 * changed. 248 * 249 * Must be called with root->fs_info->commit_root_sem held 250 */ 251void btrfs_unpin_free_ino(struct btrfs_root *root) 252{ 253 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 254 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; 255 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock; 256 struct btrfs_free_space *info; 257 struct rb_node *n; 258 u64 count; 259 260 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) 261 return; 262 263 while (1) { 264 spin_lock(rbroot_lock); 265 n = rb_first(rbroot); 266 if (!n) { 267 spin_unlock(rbroot_lock); 268 break; 269 } 270 271 info = rb_entry(n, struct btrfs_free_space, offset_index); 272 BUG_ON(info->bitmap); /* Logic error */ 273 274 if (info->offset > root->ino_cache_progress) 275 count = 0; 276 else 277 count = min(root->ino_cache_progress - info->offset + 1, 278 info->bytes); 279 280 rb_erase(&info->offset_index, rbroot); 281 spin_unlock(rbroot_lock); 282 if (count) 283 __btrfs_add_free_space(root->fs_info, ctl, 284 info->offset, count); 285 kmem_cache_free(btrfs_free_space_cachep, info); 286 } 287} 288 289#define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space)) 290#define INODES_PER_BITMAP (PAGE_SIZE * 8) 291 292/* 293 * The goal is to keep the memory used by the free_ino tree won't 294 * exceed the memory if we use bitmaps only. 295 */ 296static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) 297{ 298 struct btrfs_free_space *info; 299 struct rb_node *n; 300 int max_ino; 301 int max_bitmaps; 302 303 n = rb_last(&ctl->free_space_offset); 304 if (!n) { 305 ctl->extents_thresh = INIT_THRESHOLD; 306 return; 307 } 308 info = rb_entry(n, struct btrfs_free_space, offset_index); 309 310 /* 311 * Find the maximum inode number in the filesystem. Note we 312 * ignore the fact that this can be a bitmap, because we are 313 * not doing precise calculation. 314 */ 315 max_ino = info->bytes - 1; 316 317 max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; 318 if (max_bitmaps <= ctl->total_bitmaps) { 319 ctl->extents_thresh = 0; 320 return; 321 } 322 323 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * 324 PAGE_SIZE / sizeof(*info); 325} 326 327/* 328 * We don't fall back to bitmap, if we are below the extents threshold 329 * or this chunk of inode numbers is a big one. 330 */ 331static bool use_bitmap(struct btrfs_free_space_ctl *ctl, 332 struct btrfs_free_space *info) 333{ 334 if (ctl->free_extents < ctl->extents_thresh || 335 info->bytes > INODES_PER_BITMAP / 10) 336 return false; 337 338 return true; 339} 340 341static const struct btrfs_free_space_op free_ino_op = { 342 .recalc_thresholds = recalculate_thresholds, 343 .use_bitmap = use_bitmap, 344}; 345 346static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) 347{ 348} 349 350static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, 351 struct btrfs_free_space *info) 352{ 353 /* 354 * We always use extents for two reasons: 355 * 356 * - The pinned tree is only used during the process of caching 357 * work. 358 * - Make code simpler. See btrfs_unpin_free_ino(). 359 */ 360 return false; 361} 362 363static const struct btrfs_free_space_op pinned_free_ino_op = { 364 .recalc_thresholds = pinned_recalc_thresholds, 365 .use_bitmap = pinned_use_bitmap, 366}; 367 368void btrfs_init_free_ino_ctl(struct btrfs_root *root) 369{ 370 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 371 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; 372 373 spin_lock_init(&ctl->tree_lock); 374 ctl->unit = 1; 375 ctl->start = 0; 376 ctl->private = NULL; 377 ctl->op = &free_ino_op; 378 INIT_LIST_HEAD(&ctl->trimming_ranges); 379 mutex_init(&ctl->cache_writeout_mutex); 380 381 /* 382 * Initially we allow to use 16K of ram to cache chunks of 383 * inode numbers before we resort to bitmaps. This is somewhat 384 * arbitrary, but it will be adjusted in runtime. 385 */ 386 ctl->extents_thresh = INIT_THRESHOLD; 387 388 spin_lock_init(&pinned->tree_lock); 389 pinned->unit = 1; 390 pinned->start = 0; 391 pinned->private = NULL; 392 pinned->extents_thresh = 0; 393 pinned->op = &pinned_free_ino_op; 394} 395 396int btrfs_save_ino_cache(struct btrfs_root *root, 397 struct btrfs_trans_handle *trans) 398{ 399 struct btrfs_fs_info *fs_info = root->fs_info; 400 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 401 struct btrfs_path *path; 402 struct inode *inode; 403 struct btrfs_block_rsv *rsv; 404 struct extent_changeset *data_reserved = NULL; 405 u64 num_bytes; 406 u64 alloc_hint = 0; 407 int ret; 408 int prealloc; 409 bool retry = false; 410 411 /* only fs tree and subvol/snap needs ino cache */ 412 if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID && 413 (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID || 414 root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID)) 415 return 0; 416 417 /* Don't save inode cache if we are deleting this root */ 418 if (btrfs_root_refs(&root->root_item) == 0) 419 return 0; 420 421 if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE)) 422 return 0; 423 424 path = btrfs_alloc_path(); 425 if (!path) 426 return -ENOMEM; 427 428 rsv = trans->block_rsv; 429 trans->block_rsv = &fs_info->trans_block_rsv; 430 431 num_bytes = trans->bytes_reserved; 432 /* 433 * 1 item for inode item insertion if need 434 * 4 items for inode item update (in the worst case) 435 * 1 items for slack space if we need do truncation 436 * 1 item for free space object 437 * 3 items for pre-allocation 438 */ 439 trans->bytes_reserved = btrfs_calc_insert_metadata_size(fs_info, 10); 440 ret = btrfs_block_rsv_add(root, trans->block_rsv, 441 trans->bytes_reserved, 442 BTRFS_RESERVE_NO_FLUSH); 443 if (ret) 444 goto out; 445 trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid, 446 trans->bytes_reserved, 1); 447again: 448 inode = lookup_free_ino_inode(root, path); 449 if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) { 450 ret = PTR_ERR(inode); 451 goto out_release; 452 } 453 454 if (IS_ERR(inode)) { 455 BUG_ON(retry); /* Logic error */ 456 retry = true; 457 458 ret = create_free_ino_inode(root, trans, path); 459 if (ret) 460 goto out_release; 461 goto again; 462 } 463 464 BTRFS_I(inode)->generation = 0; 465 ret = btrfs_update_inode(trans, root, inode); 466 if (ret) { 467 btrfs_abort_transaction(trans, ret); 468 goto out_put; 469 } 470 471 if (i_size_read(inode) > 0) { 472 ret = btrfs_truncate_free_space_cache(trans, NULL, inode); 473 if (ret) { 474 if (ret != -ENOSPC) 475 btrfs_abort_transaction(trans, ret); 476 goto out_put; 477 } 478 } 479 480 spin_lock(&root->ino_cache_lock); 481 if (root->ino_cache_state != BTRFS_CACHE_FINISHED) { 482 ret = -1; 483 spin_unlock(&root->ino_cache_lock); 484 goto out_put; 485 } 486 spin_unlock(&root->ino_cache_lock); 487 488 spin_lock(&ctl->tree_lock); 489 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; 490 prealloc = ALIGN(prealloc, PAGE_SIZE); 491 prealloc += ctl->total_bitmaps * PAGE_SIZE; 492 spin_unlock(&ctl->tree_lock); 493 494 /* Just to make sure we have enough space */ 495 prealloc += 8 * PAGE_SIZE; 496 497 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, 0, prealloc); 498 if (ret) 499 goto out_put; 500 501 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, 502 prealloc, prealloc, &alloc_hint); 503 if (ret) { 504 btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc); 505 btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true); 506 goto out_put; 507 } 508 509 ret = btrfs_write_out_ino_cache(root, trans, path, inode); 510 btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc); 511out_put: 512 iput(inode); 513out_release: 514 trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid, 515 trans->bytes_reserved, 0); 516 btrfs_block_rsv_release(fs_info, trans->block_rsv, 517 trans->bytes_reserved); 518out: 519 trans->block_rsv = rsv; 520 trans->bytes_reserved = num_bytes; 521 522 btrfs_free_path(path); 523 extent_changeset_free(data_reserved); 524 return ret; 525} 526 527int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) 528{ 529 struct btrfs_path *path; 530 int ret; 531 struct extent_buffer *l; 532 struct btrfs_key search_key; 533 struct btrfs_key found_key; 534 int slot; 535 536 path = btrfs_alloc_path(); 537 if (!path) 538 return -ENOMEM; 539 540 search_key.objectid = BTRFS_LAST_FREE_OBJECTID; 541 search_key.type = -1; 542 search_key.offset = (u64)-1; 543 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 544 if (ret < 0) 545 goto error; 546 BUG_ON(ret == 0); /* Corruption */ 547 if (path->slots[0] > 0) { 548 slot = path->slots[0] - 1; 549 l = path->nodes[0]; 550 btrfs_item_key_to_cpu(l, &found_key, slot); 551 *objectid = max_t(u64, found_key.objectid, 552 BTRFS_FIRST_FREE_OBJECTID - 1); 553 } else { 554 *objectid = BTRFS_FIRST_FREE_OBJECTID - 1; 555 } 556 ret = 0; 557error: 558 btrfs_free_path(path); 559 return ret; 560} 561 562int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) 563{ 564 int ret; 565 mutex_lock(&root->objectid_mutex); 566 567 if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) { 568 btrfs_warn(root->fs_info, 569 "the objectid of root %llu reaches its highest value", 570 root->root_key.objectid); 571 ret = -ENOSPC; 572 goto out; 573 } 574 575 *objectid = ++root->highest_objectid; 576 ret = 0; 577out: 578 mutex_unlock(&root->objectid_mutex); 579 return ret; 580}