Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcache: remove nested function usage

Uninlined nested functions can cause crashes when using ftrace, as they don't
follow the normal calling convention and confuse the ftrace function graph
tracer as it examines the stack.

Also, nested functions are supported as a gcc extension, but may fail on other
compilers (e.g. llvm).

Signed-off-by: John Sheu <john.sheu@gmail.com>

authored by

John Sheu and committed by
Kent Overstreet
cb851149 3a2fd9d5

+93 -89
+13 -9
drivers/md/bcache/extents.c
··· 308 308 return NULL; 309 309 } 310 310 311 + static void bch_subtract_dirty(struct bkey *k, 312 + struct cache_set *c, 313 + uint64_t offset, 314 + int sectors) 315 + { 316 + if (KEY_DIRTY(k)) 317 + bcache_dev_sectors_dirty_add(c, KEY_INODE(k), 318 + offset, -sectors); 319 + } 320 + 311 321 static bool bch_extent_insert_fixup(struct btree_keys *b, 312 322 struct bkey *insert, 313 323 struct btree_iter *iter, 314 324 struct bkey *replace_key) 315 325 { 316 326 struct cache_set *c = container_of(b, struct btree, keys)->c; 317 - 318 - void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) 319 - { 320 - if (KEY_DIRTY(k)) 321 - bcache_dev_sectors_dirty_add(c, KEY_INODE(k), 322 - offset, -sectors); 323 - } 324 327 325 328 uint64_t old_offset; 326 329 unsigned old_size, sectors_found = 0; ··· 401 398 402 399 struct bkey *top; 403 400 404 - subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert)); 401 + bch_subtract_dirty(k, c, KEY_START(insert), 402 + KEY_SIZE(insert)); 405 403 406 404 if (bkey_written(b, k)) { 407 405 /* ··· 452 448 } 453 449 } 454 450 455 - subtract_dirty(k, old_offset, old_size - KEY_SIZE(k)); 451 + bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); 456 452 } 457 453 458 454 check_failed:
+80 -80
drivers/md/bcache/sysfs.c
··· 405 405 struct bset_stats stats; 406 406 }; 407 407 408 - static int btree_bset_stats(struct btree_op *b_op, struct btree *b) 408 + static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b) 409 409 { 410 410 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); 411 411 ··· 423 423 memset(&op, 0, sizeof(op)); 424 424 bch_btree_op_init(&op.op, -1); 425 425 426 - ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats); 426 + ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats); 427 427 if (ret < 0) 428 428 return ret; 429 429 ··· 441 441 op.stats.floats, op.stats.failed); 442 442 } 443 443 444 + static unsigned bch_root_usage(struct cache_set *c) 445 + { 446 + unsigned bytes = 0; 447 + struct bkey *k; 448 + struct btree *b; 449 + struct btree_iter iter; 450 + 451 + goto lock_root; 452 + 453 + do { 454 + rw_unlock(false, b); 455 + lock_root: 456 + b = c->root; 457 + rw_lock(false, b, b->level); 458 + } while (b != c->root); 459 + 460 + for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 461 + bytes += bkey_bytes(k); 462 + 463 + rw_unlock(false, b); 464 + 465 + return (bytes * 100) / btree_bytes(c); 466 + } 467 + 468 + static size_t bch_cache_size(struct cache_set *c) 469 + { 470 + size_t ret = 0; 471 + struct btree *b; 472 + 473 + mutex_lock(&c->bucket_lock); 474 + list_for_each_entry(b, &c->btree_cache, list) 475 + ret += 1 << (b->keys.page_order + PAGE_SHIFT); 476 + 477 + mutex_unlock(&c->bucket_lock); 478 + return ret; 479 + } 480 + 481 + static unsigned bch_cache_max_chain(struct cache_set *c) 482 + { 483 + unsigned ret = 0; 484 + struct hlist_head *h; 485 + 486 + mutex_lock(&c->bucket_lock); 487 + 488 + for (h = c->bucket_hash; 489 + h < c->bucket_hash + (1 << BUCKET_HASH_BITS); 490 + h++) { 491 + unsigned i = 0; 492 + struct hlist_node *p; 493 + 494 + hlist_for_each(p, h) 495 + i++; 496 + 497 + ret = max(ret, i); 498 + } 499 + 500 + mutex_unlock(&c->bucket_lock); 501 + return ret; 502 + } 503 + 504 + static unsigned bch_btree_used(struct cache_set *c) 505 + { 506 + return div64_u64(c->gc_stats.key_bytes * 100, 507 + (c->gc_stats.nodes ?: 1) * btree_bytes(c)); 508 + } 509 + 510 + static unsigned bch_average_key_size(struct cache_set *c) 511 + { 512 + return c->gc_stats.nkeys 513 + ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) 514 + : 0; 515 + } 516 + 444 517 SHOW(__bch_cache_set) 445 518 { 446 - unsigned root_usage(struct cache_set *c) 447 - { 448 - unsigned bytes = 0; 449 - struct bkey *k; 450 - struct btree *b; 451 - struct btree_iter iter; 452 - 453 - goto lock_root; 454 - 455 - do { 456 - rw_unlock(false, b); 457 - lock_root: 458 - b = c->root; 459 - rw_lock(false, b, b->level); 460 - } while (b != c->root); 461 - 462 - for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 463 - bytes += bkey_bytes(k); 464 - 465 - rw_unlock(false, b); 466 - 467 - return (bytes * 100) / btree_bytes(c); 468 - } 469 - 470 - size_t cache_size(struct cache_set *c) 471 - { 472 - size_t ret = 0; 473 - struct btree *b; 474 - 475 - mutex_lock(&c->bucket_lock); 476 - list_for_each_entry(b, &c->btree_cache, list) 477 - ret += 1 << (b->keys.page_order + PAGE_SHIFT); 478 - 479 - mutex_unlock(&c->bucket_lock); 480 - return ret; 481 - } 482 - 483 - unsigned cache_max_chain(struct cache_set *c) 484 - { 485 - unsigned ret = 0; 486 - struct hlist_head *h; 487 - 488 - mutex_lock(&c->bucket_lock); 489 - 490 - for (h = c->bucket_hash; 491 - h < c->bucket_hash + (1 << BUCKET_HASH_BITS); 492 - h++) { 493 - unsigned i = 0; 494 - struct hlist_node *p; 495 - 496 - hlist_for_each(p, h) 497 - i++; 498 - 499 - ret = max(ret, i); 500 - } 501 - 502 - mutex_unlock(&c->bucket_lock); 503 - return ret; 504 - } 505 - 506 - unsigned btree_used(struct cache_set *c) 507 - { 508 - return div64_u64(c->gc_stats.key_bytes * 100, 509 - (c->gc_stats.nodes ?: 1) * btree_bytes(c)); 510 - } 511 - 512 - unsigned average_key_size(struct cache_set *c) 513 - { 514 - return c->gc_stats.nkeys 515 - ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) 516 - : 0; 517 - } 518 - 519 519 struct cache_set *c = container_of(kobj, struct cache_set, kobj); 520 520 521 521 sysfs_print(synchronous, CACHE_SYNC(&c->sb)); ··· 523 523 sysfs_hprint(bucket_size, bucket_bytes(c)); 524 524 sysfs_hprint(block_size, block_bytes(c)); 525 525 sysfs_print(tree_depth, c->root->level); 526 - sysfs_print(root_usage_percent, root_usage(c)); 526 + sysfs_print(root_usage_percent, bch_root_usage(c)); 527 527 528 - sysfs_hprint(btree_cache_size, cache_size(c)); 529 - sysfs_print(btree_cache_max_chain, cache_max_chain(c)); 528 + sysfs_hprint(btree_cache_size, bch_cache_size(c)); 529 + sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c)); 530 530 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); 531 531 532 532 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); ··· 534 534 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); 535 535 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); 536 536 537 - sysfs_print(btree_used_percent, btree_used(c)); 537 + sysfs_print(btree_used_percent, bch_btree_used(c)); 538 538 sysfs_print(btree_nodes, c->gc_stats.nodes); 539 - sysfs_hprint(average_key_size, average_key_size(c)); 539 + sysfs_hprint(average_key_size, bch_average_key_size(c)); 540 540 541 541 sysfs_print(cache_read_races, 542 542 atomic_long_read(&c->cache_read_races));