Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcache: add identifier names to arguments of function definitions

There are many function definitions do not have identifier argument names,
scripts/checkpatch.pl complains warnings like this,

WARNING: function definition argument 'struct bcache_device *' should
also have an identifier name
#16735: FILE: writeback.h:120:
+void bch_sectors_dirty_init(struct bcache_device *);

This patch adds identifier argument names to all bcache function
definitions to fix such warnings.

Signed-off-by: Coly Li <colyli@suse.de>
Reviewed: Shenghui Wang <shhuiw@foxmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Coly Li and committed by
Jens Axboe
fc2d5988 1fae7cf0

+201 -171
+51 -47
drivers/md/bcache/bcache.h
··· 273 273 274 274 unsigned int data_csum:1; 275 275 276 - int (*cache_miss)(struct btree *, struct search *, 277 - struct bio *, unsigned int); 278 - int (*ioctl) (struct bcache_device *, fmode_t, unsigned int, unsigned long); 276 + int (*cache_miss)(struct btree *b, struct search *s, 277 + struct bio *bio, unsigned int sectors); 278 + int (*ioctl) (struct bcache_device *d, fmode_t mode, 279 + unsigned int cmd, unsigned long arg); 279 280 }; 280 281 281 282 struct io { ··· 926 925 /* Forward declarations */ 927 926 928 927 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); 929 - void bch_count_io_errors(struct cache *, blk_status_t, int, const char *); 930 - void bch_bbio_count_io_errors(struct cache_set *, struct bio *, 931 - blk_status_t, const char *); 932 - void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t, 933 - const char *); 934 - void bch_bbio_free(struct bio *, struct cache_set *); 935 - struct bio *bch_bbio_alloc(struct cache_set *); 928 + void bch_count_io_errors(struct cache *ca, blk_status_t error, 929 + int is_read, const char *m); 930 + void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, 931 + blk_status_t error, const char *m); 932 + void bch_bbio_endio(struct cache_set *c, struct bio *bio, 933 + blk_status_t error, const char *m); 934 + void bch_bbio_free(struct bio *bio, struct cache_set *c); 935 + struct bio *bch_bbio_alloc(struct cache_set *c); 936 936 937 - void __bch_submit_bbio(struct bio *, struct cache_set *); 938 - void bch_submit_bbio(struct bio *, struct cache_set *, 939 - struct bkey *, unsigned int); 937 + void __bch_submit_bbio(struct bio *bio, struct cache_set *c); 938 + void bch_submit_bbio(struct bio *bio, struct cache_set *c, 939 + struct bkey *k, unsigned int ptr); 940 940 941 - uint8_t bch_inc_gen(struct cache *, struct bucket *); 942 - void bch_rescale_priorities(struct cache_set *, int); 941 + uint8_t bch_inc_gen(struct cache *ca, struct bucket *b); 942 + void bch_rescale_priorities(struct cache_set *c, int sectors); 943 943 944 - bool bch_can_invalidate_bucket(struct cache *, struct bucket *); 945 - void __bch_invalidate_one_bucket(struct cache *, struct bucket *); 944 + bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b); 945 + void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b); 946 946 947 - void __bch_bucket_free(struct cache *, struct bucket *); 948 - void bch_bucket_free(struct cache_set *, struct bkey *); 947 + void __bch_bucket_free(struct cache *ca, struct bucket *b); 948 + void bch_bucket_free(struct cache_set *c, struct bkey *k); 949 949 950 - long bch_bucket_alloc(struct cache *, unsigned int, bool); 951 - int __bch_bucket_alloc_set(struct cache_set *, unsigned int, 952 - struct bkey *, int, bool); 953 - int bch_bucket_alloc_set(struct cache_set *, unsigned int, 954 - struct bkey *, int, bool); 955 - bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned int, 956 - unsigned int, unsigned int, bool); 950 + long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait); 951 + int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, 952 + struct bkey *k, int n, bool wait); 953 + int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, 954 + struct bkey *k, int n, bool wait); 955 + bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, 956 + unsigned int sectors, unsigned int write_point, 957 + unsigned int write_prio, bool wait); 957 958 bool bch_cached_dev_error(struct cached_dev *dc); 958 959 959 960 __printf(2, 3) 960 - bool bch_cache_set_error(struct cache_set *, const char *, ...); 961 + bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...); 961 962 962 - void bch_prio_write(struct cache *); 963 - void bch_write_bdev_super(struct cached_dev *, struct closure *); 963 + void bch_prio_write(struct cache *ca); 964 + void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); 964 965 965 966 extern struct workqueue_struct *bcache_wq; 966 967 extern struct mutex bch_register_lock; ··· 974 971 extern struct kobj_type bch_cache_set_internal_ktype; 975 972 extern struct kobj_type bch_cache_ktype; 976 973 977 - void bch_cached_dev_release(struct kobject *); 978 - void bch_flash_dev_release(struct kobject *); 979 - void bch_cache_set_release(struct kobject *); 980 - void bch_cache_release(struct kobject *); 974 + void bch_cached_dev_release(struct kobject *kobj); 975 + void bch_flash_dev_release(struct kobject *kobj); 976 + void bch_cache_set_release(struct kobject *kobj); 977 + void bch_cache_release(struct kobject *kobj); 981 978 982 - int bch_uuid_write(struct cache_set *); 983 - void bcache_write_super(struct cache_set *); 979 + int bch_uuid_write(struct cache_set *c); 980 + void bcache_write_super(struct cache_set *c); 984 981 985 982 int bch_flash_dev_create(struct cache_set *c, uint64_t size); 986 983 987 - int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *); 988 - void bch_cached_dev_detach(struct cached_dev *); 989 - void bch_cached_dev_run(struct cached_dev *); 990 - void bcache_device_stop(struct bcache_device *); 984 + int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 985 + uint8_t *set_uuid); 986 + void bch_cached_dev_detach(struct cached_dev *dc); 987 + void bch_cached_dev_run(struct cached_dev *dc); 988 + void bcache_device_stop(struct bcache_device *d); 991 989 992 - void bch_cache_set_unregister(struct cache_set *); 993 - void bch_cache_set_stop(struct cache_set *); 990 + void bch_cache_set_unregister(struct cache_set *c); 991 + void bch_cache_set_stop(struct cache_set *c); 994 992 995 - struct cache_set *bch_cache_set_alloc(struct cache_sb *); 996 - void bch_btree_cache_free(struct cache_set *); 997 - int bch_btree_cache_alloc(struct cache_set *); 998 - void bch_moving_init_cache_set(struct cache_set *); 999 - int bch_open_buckets_alloc(struct cache_set *); 1000 - void bch_open_buckets_free(struct cache_set *); 993 + struct cache_set *bch_cache_set_alloc(struct cache_sb *sb); 994 + void bch_btree_cache_free(struct cache_set *c); 995 + int bch_btree_cache_alloc(struct cache_set *c); 996 + void bch_moving_init_cache_set(struct cache_set *c); 997 + int bch_open_buckets_alloc(struct cache_set *c); 998 + void bch_open_buckets_free(struct cache_set *c); 1001 999 1002 1000 int bch_cache_allocator_start(struct cache *ca); 1003 1001
+70 -54
drivers/md/bcache/bset.h
··· 187 187 }; 188 188 189 189 struct btree_keys_ops { 190 - bool (*sort_cmp)(struct btree_iter_set, 191 - struct btree_iter_set); 192 - struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *); 193 - bool (*insert_fixup)(struct btree_keys *, struct bkey *, 194 - struct btree_iter *, struct bkey *); 195 - bool (*key_invalid)(struct btree_keys *, 196 - const struct bkey *); 197 - bool (*key_bad)(struct btree_keys *, const struct bkey *); 198 - bool (*key_merge)(struct btree_keys *, 199 - struct bkey *, struct bkey *); 200 - void (*key_to_text)(char *, size_t, const struct bkey *); 201 - void (*key_dump)(struct btree_keys *, const struct bkey *); 190 + bool (*sort_cmp)(struct btree_iter_set l, 191 + struct btree_iter_set r); 192 + struct bkey *(*sort_fixup)(struct btree_iter *iter, 193 + struct bkey *tmp); 194 + bool (*insert_fixup)(struct btree_keys *b, 195 + struct bkey *insert, 196 + struct btree_iter *iter, 197 + struct bkey *replace_key); 198 + bool (*key_invalid)(struct btree_keys *bk, 199 + const struct bkey *k); 200 + bool (*key_bad)(struct btree_keys *bk, 201 + const struct bkey *k); 202 + bool (*key_merge)(struct btree_keys *bk, 203 + struct bkey *l, struct bkey *r); 204 + void (*key_to_text)(char *buf, 205 + size_t size, 206 + const struct bkey *k); 207 + void (*key_dump)(struct btree_keys *keys, 208 + const struct bkey *k); 202 209 203 210 /* 204 211 * Only used for deciding whether to use START_KEY(k) or just the key ··· 287 280 return ((void *) i) + roundup(set_bytes(i), block_bytes); 288 281 } 289 282 290 - void bch_btree_keys_free(struct btree_keys *); 291 - int bch_btree_keys_alloc(struct btree_keys *, unsigned int, gfp_t); 292 - void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *, 293 - bool *); 283 + void bch_btree_keys_free(struct btree_keys *b); 284 + int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order, 285 + gfp_t gfp); 286 + void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, 287 + bool *expensive_debug_checks); 294 288 295 - void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t); 296 - void bch_bset_build_written_tree(struct btree_keys *); 297 - void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *); 298 - bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *); 299 - void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *); 300 - unsigned int bch_btree_insert_key(struct btree_keys *, struct bkey *, 301 - struct bkey *); 289 + void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic); 290 + void bch_bset_build_written_tree(struct btree_keys *b); 291 + void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k); 292 + bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r); 293 + void bch_bset_insert(struct btree_keys *b, struct bkey *where, 294 + struct bkey *insert); 295 + unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, 296 + struct bkey *replace_key); 302 297 303 298 enum { 304 299 BTREE_INSERT_STATUS_NO_INSERT = 0, ··· 322 313 } data[MAX_BSETS]; 323 314 }; 324 315 325 - typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *); 316 + typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k); 326 317 327 - struct bkey *bch_btree_iter_next(struct btree_iter *); 328 - struct bkey *bch_btree_iter_next_filter(struct btree_iter *, 329 - struct btree_keys *, ptr_filter_fn); 318 + struct bkey *bch_btree_iter_next(struct btree_iter *iter); 319 + struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, 320 + struct btree_keys *b, 321 + ptr_filter_fn fn); 330 322 331 - void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); 332 - struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *, 333 - struct bkey *); 323 + void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, 324 + struct bkey *end); 325 + struct bkey *bch_btree_iter_init(struct btree_keys *b, 326 + struct btree_iter *iter, 327 + struct bkey *search); 334 328 335 - struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *, 336 - const struct bkey *); 329 + struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, 330 + const struct bkey *search); 337 331 338 332 /* 339 333 * Returns the first key that is strictly greater than search ··· 367 355 struct time_stats time; 368 356 }; 369 357 370 - void bch_bset_sort_state_free(struct bset_sort_state *); 371 - int bch_bset_sort_state_init(struct bset_sort_state *, unsigned int); 372 - void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *); 373 - void bch_btree_sort_into(struct btree_keys *, struct btree_keys *, 374 - struct bset_sort_state *); 375 - void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *, 376 - struct bset_sort_state *); 377 - void bch_btree_sort_partial(struct btree_keys *, unsigned int, 378 - struct bset_sort_state *); 358 + void bch_bset_sort_state_free(struct bset_sort_state *state); 359 + int bch_bset_sort_state_init(struct bset_sort_state *state, 360 + unsigned int page_order); 361 + void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state); 362 + void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, 363 + struct bset_sort_state *state); 364 + void bch_btree_sort_and_fix_extents(struct btree_keys *b, 365 + struct btree_iter *iter, 366 + struct bset_sort_state *state); 367 + void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, 368 + struct bset_sort_state *state); 379 369 380 370 static inline void bch_btree_sort(struct btree_keys *b, 381 371 struct bset_sort_state *state) ··· 391 377 size_t floats, failed; 392 378 }; 393 379 394 - void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *); 380 + void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state); 395 381 396 382 /* Bkey utility code */ 397 383 ··· 415 401 : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); 416 402 } 417 403 418 - void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, 419 - unsigned int); 420 - bool __bch_cut_front(const struct bkey *, struct bkey *); 421 - bool __bch_cut_back(const struct bkey *, struct bkey *); 404 + void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, 405 + unsigned int i); 406 + bool __bch_cut_front(const struct bkey *where, struct bkey *k); 407 + bool __bch_cut_back(const struct bkey *where, struct bkey *k); 422 408 423 409 static inline bool bch_cut_front(const struct bkey *where, struct bkey *k) 424 410 { ··· 536 522 return bch_keylist_nkeys(l) * sizeof(uint64_t); 537 523 } 538 524 539 - struct bkey *bch_keylist_pop(struct keylist *); 540 - void bch_keylist_pop_front(struct keylist *); 541 - int __bch_keylist_realloc(struct keylist *, unsigned int); 525 + struct bkey *bch_keylist_pop(struct keylist *l); 526 + void bch_keylist_pop_front(struct keylist *l); 527 + int __bch_keylist_realloc(struct keylist *l, unsigned int u64s); 542 528 543 529 /* Debug stuff */ 544 530 545 531 #ifdef CONFIG_BCACHE_DEBUG 546 532 547 - int __bch_count_data(struct btree_keys *); 548 - void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...); 549 - void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int); 550 - void bch_dump_bucket(struct btree_keys *); 533 + int __bch_count_data(struct btree_keys *b); 534 + void __printf(2, 3) __bch_check_keys(struct btree_keys *b, 535 + const char *fmt, 536 + ...); 537 + void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set); 538 + void bch_dump_bucket(struct btree_keys *b); 551 539 552 540 #else 553 541 ··· 557 541 static inline void __printf(2, 3) 558 542 __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} 559 543 static inline void bch_dump_bucket(struct btree_keys *b) {} 560 - void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int); 544 + void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set); 561 545 562 546 #endif 563 547
+4 -2
drivers/md/bcache/btree.c
··· 1309 1309 unsigned int keys; 1310 1310 }; 1311 1311 1312 - static int bch_btree_insert_node(struct btree *, struct btree_op *, 1313 - struct keylist *, atomic_t *, struct bkey *); 1312 + static int bch_btree_insert_node(struct btree *b, struct btree_op *op, 1313 + struct keylist *insert_keys, 1314 + atomic_t *journal_ref, 1315 + struct bkey *replace_key); 1314 1316 1315 1317 static int btree_gc_coalesce(struct btree *b, struct btree_op *op, 1316 1318 struct gc_stat *gc, struct gc_merge_info *r)
+36 -34
drivers/md/bcache/btree.h
··· 238 238 (w ? up_write : up_read)(&b->lock); 239 239 } 240 240 241 - void bch_btree_node_read_done(struct btree *); 242 - void __bch_btree_node_write(struct btree *, struct closure *); 243 - void bch_btree_node_write(struct btree *, struct closure *); 241 + void bch_btree_node_read_done(struct btree *b); 242 + void __bch_btree_node_write(struct btree *b, struct closure *parent); 243 + void bch_btree_node_write(struct btree *b, struct closure *parent); 244 244 245 - void bch_btree_set_root(struct btree *); 246 - struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *, 247 - int, bool, struct btree *); 248 - struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *, 249 - struct bkey *, int, bool, struct btree *); 245 + void bch_btree_set_root(struct btree *b); 246 + struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, 247 + int level, bool wait, 248 + struct btree *parent); 249 + struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, 250 + struct bkey *k, int level, bool write, 251 + struct btree *parent); 250 252 251 - int bch_btree_insert_check_key(struct btree *, struct btree_op *, 252 - struct bkey *); 253 - int bch_btree_insert(struct cache_set *, struct keylist *, 254 - atomic_t *, struct bkey *); 253 + int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, 254 + struct bkey *check_key); 255 + int bch_btree_insert(struct cache_set *c, struct keylist *keys, 256 + atomic_t *journal_ref, struct bkey *replace_key); 255 257 256 - int bch_gc_thread_start(struct cache_set *); 257 - void bch_initial_gc_finish(struct cache_set *); 258 - void bch_moving_gc(struct cache_set *); 259 - int bch_btree_check(struct cache_set *); 260 - void bch_initial_mark_key(struct cache_set *, int, struct bkey *); 258 + int bch_gc_thread_start(struct cache_set *c); 259 + void bch_initial_gc_finish(struct cache_set *c); 260 + void bch_moving_gc(struct cache_set *c); 261 + int bch_btree_check(struct cache_set *c); 262 + void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k); 261 263 262 264 static inline void wake_up_gc(struct cache_set *c) 263 265 { ··· 274 272 275 273 #define MAP_END_KEY 1 276 274 277 - typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *); 278 - int __bch_btree_map_nodes(struct btree_op *, struct cache_set *, 279 - struct bkey *, btree_map_nodes_fn *, int); 275 + typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b); 276 + int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 277 + struct bkey *from, btree_map_nodes_fn *fn, int flags); 280 278 281 279 static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 282 280 struct bkey *from, btree_map_nodes_fn *fn) ··· 292 290 return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); 293 291 } 294 292 295 - typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *, 296 - struct bkey *); 297 - int bch_btree_map_keys(struct btree_op *, struct cache_set *, 298 - struct bkey *, btree_map_keys_fn *, int); 293 + typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b, 294 + struct bkey *k); 295 + int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 296 + struct bkey *from, btree_map_keys_fn *fn, int flags); 299 297 300 - typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); 298 + typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k); 301 299 302 - void bch_keybuf_init(struct keybuf *); 303 - void bch_refill_keybuf(struct cache_set *, struct keybuf *, 304 - struct bkey *, keybuf_pred_fn *); 305 - bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, 306 - struct bkey *); 307 - void bch_keybuf_del(struct keybuf *, struct keybuf_key *); 308 - struct keybuf_key *bch_keybuf_next(struct keybuf *); 309 - struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *, 310 - struct bkey *, keybuf_pred_fn *); 300 + void bch_keybuf_init(struct keybuf *buf); 301 + void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, 302 + struct bkey *end, keybuf_pred_fn *pred); 303 + bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, 304 + struct bkey *end); 305 + void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w); 306 + struct keybuf_key *bch_keybuf_next(struct keybuf *buf); 307 + struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, struct keybuf *buf, 308 + struct bkey *end, keybuf_pred_fn *pred); 311 309 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats); 312 310 #endif
+3 -3
drivers/md/bcache/debug.h
··· 8 8 9 9 #ifdef CONFIG_BCACHE_DEBUG 10 10 11 - void bch_btree_verify(struct btree *); 12 - void bch_data_verify(struct cached_dev *, struct bio *); 11 + void bch_btree_verify(struct btree *b); 12 + void bch_data_verify(struct cached_dev *dc, struct bio *bio); 13 13 14 14 #define expensive_debug_checks(c) ((c)->expensive_debug_checks) 15 15 #define key_merging_disabled(c) ((c)->key_merging_disabled) ··· 27 27 #endif 28 28 29 29 #ifdef CONFIG_DEBUG_FS 30 - void bch_debug_init_cache_set(struct cache_set *); 30 + void bch_debug_init_cache_set(struct cache_set *c); 31 31 #else 32 32 static inline void bch_debug_init_cache_set(struct cache_set *c) {} 33 33 #endif
+3 -3
drivers/md/bcache/extents.h
··· 8 8 struct bkey; 9 9 struct cache_set; 10 10 11 - void bch_extent_to_text(char *, size_t, const struct bkey *); 12 - bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *); 13 - bool __bch_extent_invalid(struct cache_set *, const struct bkey *); 11 + void bch_extent_to_text(char *buf, size_t size, const struct bkey *k); 12 + bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k); 13 + bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k); 14 14 15 15 #endif /* _BCACHE_EXTENTS_H */
+1 -1
drivers/md/bcache/journal.c
··· 581 581 closure_put(&w->c->journal.io); 582 582 } 583 583 584 - static void journal_write(struct closure *); 584 + static void journal_write(struct closure *cl); 585 585 586 586 static void journal_write_done(struct closure *cl) 587 587 {
+10 -8
drivers/md/bcache/journal.h
··· 167 167 struct btree_op; 168 168 struct keylist; 169 169 170 - atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *); 171 - void bch_journal_next(struct journal *); 172 - void bch_journal_mark(struct cache_set *, struct list_head *); 173 - void bch_journal_meta(struct cache_set *, struct closure *); 174 - int bch_journal_read(struct cache_set *, struct list_head *); 175 - int bch_journal_replay(struct cache_set *, struct list_head *); 170 + atomic_t *bch_journal(struct cache_set *c, 171 + struct keylist *keys, 172 + struct closure *parent); 173 + void bch_journal_next(struct journal *j); 174 + void bch_journal_mark(struct cache_set *c, struct list_head *list); 175 + void bch_journal_meta(struct cache_set *c, struct closure *cl); 176 + int bch_journal_read(struct cache_set *c, struct list_head *list); 177 + int bch_journal_replay(struct cache_set *c, struct list_head *list); 176 178 177 - void bch_journal_free(struct cache_set *); 178 - int bch_journal_alloc(struct cache_set *); 179 + void bch_journal_free(struct cache_set *c); 180 + int bch_journal_alloc(struct cache_set *c); 179 181 180 182 #endif /* _BCACHE_JOURNAL_H */
+1 -1
drivers/md/bcache/request.c
··· 25 25 26 26 struct kmem_cache *bch_search_cache; 27 27 28 - static void bch_data_insert_start(struct closure *); 28 + static void bch_data_insert_start(struct closure *cl); 29 29 30 30 static unsigned int cache_mode(struct cached_dev *dc) 31 31 {
+1 -1
drivers/md/bcache/request.h
··· 33 33 BKEY_PADDED(replace_key); 34 34 }; 35 35 36 - unsigned int bch_get_congested(struct cache_set *); 36 + unsigned int bch_get_congested(struct cache_set *c); 37 37 void bch_data_insert(struct closure *cl); 38 38 39 39 void bch_cached_dev_request_init(struct cached_dev *dc);
+8 -5
drivers/md/bcache/stats.h
··· 53 53 54 54 void bch_cache_accounting_destroy(struct cache_accounting *acc); 55 55 56 - void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *, 57 - bool, bool); 58 - void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *); 59 - void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *); 60 - void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int); 56 + void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, 57 + bool hit, bool bypass); 58 + void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d); 59 + void bch_mark_cache_miss_collision(struct cache_set *c, 60 + struct bcache_device *d); 61 + void bch_mark_sectors_bypassed(struct cache_set *c, 62 + struct cached_dev *dc, 63 + int sectors); 61 64 62 65 #endif /* _BCACHE_STATS_H_ */
+2 -2
drivers/md/bcache/super.c
··· 2136 2136 2137 2137 /* Global interfaces/init */ 2138 2138 2139 - static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 2140 - const char *, size_t); 2139 + static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 2140 + const char *buffer, size_t size); 2141 2141 2142 2142 kobj_attribute_write(register, register_bcache); 2143 2143 kobj_attribute_write(register_quiet, register_bcache);
+6 -6
drivers/md/bcache/util.h
··· 288 288 #define ANYSINT_MAX(t) \ 289 289 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) 290 290 291 - int bch_strtoint_h(const char *, int *); 292 - int bch_strtouint_h(const char *, unsigned int *); 293 - int bch_strtoll_h(const char *, long long *); 294 - int bch_strtoull_h(const char *, unsigned long long *); 291 + int bch_strtoint_h(const char *cp, int *res); 292 + int bch_strtouint_h(const char *cp, unsigned int *res); 293 + int bch_strtoll_h(const char *cp, long long *res); 294 + int bch_strtoull_h(const char *cp, unsigned long long *res); 295 295 296 296 static inline int bch_strtol_h(const char *cp, long *res) 297 297 { ··· 563 563 return bdev->bd_inode->i_size >> 9; 564 564 } 565 565 566 - uint64_t bch_crc64_update(uint64_t, const void *, size_t); 567 - uint64_t bch_crc64(const void *, size_t); 566 + uint64_t bch_crc64_update(uint64_t crc, const void *_data, size_t len); 567 + uint64_t bch_crc64(const void *data, size_t len); 568 568 569 569 #endif /* _BCACHE_UTIL_H */
+5 -4
drivers/md/bcache/writeback.h
··· 96 96 } 97 97 } 98 98 99 - void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned int, uint64_t, int); 99 + void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, 100 + uint64_t offset, int nr_sectors); 100 101 101 - void bch_sectors_dirty_init(struct bcache_device *); 102 - void bch_cached_dev_writeback_init(struct cached_dev *); 103 - int bch_cached_dev_writeback_start(struct cached_dev *); 102 + void bch_sectors_dirty_init(struct bcache_device *d); 103 + void bch_cached_dev_writeback_init(struct cached_dev *dc); 104 + int bch_cached_dev_writeback_start(struct cached_dev *dc); 104 105 105 106 #endif