Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcache sysfs interfaces
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "sysfs.h"
11#include "btree.h"
12#include "request.h"
13#include "writeback.h"
14
15#include <linux/blkdev.h>
16#include <linux/sort.h>
17#include <linux/sched/clock.h>
18
19/* Default is 0 ("writethrough") */
20static const char * const bch_cache_modes[] = {
21 "writethrough",
22 "writeback",
23 "writearound",
24 "none",
25 NULL
26};
27
28/* Default is 0 ("auto") */
29static const char * const bch_stop_on_failure_modes[] = {
30 "auto",
31 "always",
32 NULL
33};
34
35static const char * const cache_replacement_policies[] = {
36 "lru",
37 "fifo",
38 "random",
39 NULL
40};
41
42static const char * const error_actions[] = {
43 "unregister",
44 "panic",
45 NULL
46};
47
48write_attribute(attach);
49write_attribute(detach);
50write_attribute(unregister);
51write_attribute(stop);
52write_attribute(clear_stats);
53write_attribute(trigger_gc);
54write_attribute(prune_cache);
55write_attribute(flash_vol_create);
56
57read_attribute(bucket_size);
58read_attribute(block_size);
59read_attribute(nbuckets);
60read_attribute(tree_depth);
61read_attribute(root_usage_percent);
62read_attribute(priority_stats);
63read_attribute(btree_cache_size);
64read_attribute(btree_cache_max_chain);
65read_attribute(cache_available_percent);
66read_attribute(written);
67read_attribute(btree_written);
68read_attribute(metadata_written);
69read_attribute(active_journal_entries);
70
71sysfs_time_stats_attribute(btree_gc, sec, ms);
72sysfs_time_stats_attribute(btree_split, sec, us);
73sysfs_time_stats_attribute(btree_sort, ms, us);
74sysfs_time_stats_attribute(btree_read, ms, us);
75
76read_attribute(btree_nodes);
77read_attribute(btree_used_percent);
78read_attribute(average_key_size);
79read_attribute(dirty_data);
80read_attribute(bset_tree_stats);
81
82read_attribute(state);
83read_attribute(cache_read_races);
84read_attribute(reclaim);
85read_attribute(flush_write);
86read_attribute(retry_flush_write);
87read_attribute(writeback_keys_done);
88read_attribute(writeback_keys_failed);
89read_attribute(io_errors);
90read_attribute(congested);
91read_attribute(cutoff_writeback);
92read_attribute(cutoff_writeback_sync);
93rw_attribute(congested_read_threshold_us);
94rw_attribute(congested_write_threshold_us);
95
96rw_attribute(sequential_cutoff);
97rw_attribute(data_csum);
98rw_attribute(cache_mode);
99rw_attribute(stop_when_cache_set_failed);
100rw_attribute(writeback_metadata);
101rw_attribute(writeback_running);
102rw_attribute(writeback_percent);
103rw_attribute(writeback_delay);
104rw_attribute(writeback_rate);
105
106rw_attribute(writeback_rate_update_seconds);
107rw_attribute(writeback_rate_i_term_inverse);
108rw_attribute(writeback_rate_p_term_inverse);
109rw_attribute(writeback_rate_minimum);
110read_attribute(writeback_rate_debug);
111
112read_attribute(stripe_size);
113read_attribute(partial_stripes_expensive);
114
115rw_attribute(synchronous);
116rw_attribute(journal_delay_ms);
117rw_attribute(io_disable);
118rw_attribute(discard);
119rw_attribute(running);
120rw_attribute(label);
121rw_attribute(readahead);
122rw_attribute(errors);
123rw_attribute(io_error_limit);
124rw_attribute(io_error_halflife);
125rw_attribute(verify);
126rw_attribute(bypass_torture_test);
127rw_attribute(key_merging_disabled);
128rw_attribute(gc_always_rewrite);
129rw_attribute(expensive_debug_checks);
130rw_attribute(cache_replacement_policy);
131rw_attribute(btree_shrinker_disabled);
132rw_attribute(copy_gc_enabled);
133rw_attribute(gc_after_writeback);
134rw_attribute(size);
135
136static ssize_t bch_snprint_string_list(char *buf,
137 size_t size,
138 const char * const list[],
139 size_t selected)
140{
141 char *out = buf;
142 size_t i;
143
144 for (i = 0; list[i]; i++)
145 out += snprintf(out, buf + size - out,
146 i == selected ? "[%s] " : "%s ", list[i]);
147
148 out[-1] = '\n';
149 return out - buf;
150}
151
152SHOW(__bch_cached_dev)
153{
154 struct cached_dev *dc = container_of(kobj, struct cached_dev,
155 disk.kobj);
156 char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
157 int wb = dc->writeback_running;
158
159#define var(stat) (dc->stat)
160
161 if (attr == &sysfs_cache_mode)
162 return bch_snprint_string_list(buf, PAGE_SIZE,
163 bch_cache_modes,
164 BDEV_CACHE_MODE(&dc->sb));
165
166 if (attr == &sysfs_stop_when_cache_set_failed)
167 return bch_snprint_string_list(buf, PAGE_SIZE,
168 bch_stop_on_failure_modes,
169 dc->stop_when_cache_set_failed);
170
171
172 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
173 var_printf(verify, "%i");
174 var_printf(bypass_torture_test, "%i");
175 var_printf(writeback_metadata, "%i");
176 var_printf(writeback_running, "%i");
177 var_print(writeback_delay);
178 var_print(writeback_percent);
179 sysfs_hprint(writeback_rate,
180 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
181 sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
182 sysfs_printf(io_error_limit, "%i", dc->error_limit);
183 sysfs_printf(io_disable, "%i", dc->io_disable);
184 var_print(writeback_rate_update_seconds);
185 var_print(writeback_rate_i_term_inverse);
186 var_print(writeback_rate_p_term_inverse);
187 var_print(writeback_rate_minimum);
188
189 if (attr == &sysfs_writeback_rate_debug) {
190 char rate[20];
191 char dirty[20];
192 char target[20];
193 char proportional[20];
194 char integral[20];
195 char change[20];
196 s64 next_io;
197
198 /*
199 * Except for dirty and target, other values should
200 * be 0 if writeback is not running.
201 */
202 bch_hprint(rate,
203 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
204 : 0);
205 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
206 bch_hprint(target, dc->writeback_rate_target << 9);
207 bch_hprint(proportional,
208 wb ? dc->writeback_rate_proportional << 9 : 0);
209 bch_hprint(integral,
210 wb ? dc->writeback_rate_integral_scaled << 9 : 0);
211 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
212 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
213 NSEC_PER_MSEC) : 0;
214
215 return sprintf(buf,
216 "rate:\t\t%s/sec\n"
217 "dirty:\t\t%s\n"
218 "target:\t\t%s\n"
219 "proportional:\t%s\n"
220 "integral:\t%s\n"
221 "change:\t\t%s/sec\n"
222 "next io:\t%llims\n",
223 rate, dirty, target, proportional,
224 integral, change, next_io);
225 }
226
227 sysfs_hprint(dirty_data,
228 bcache_dev_sectors_dirty(&dc->disk) << 9);
229
230 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
231 var_printf(partial_stripes_expensive, "%u");
232
233 var_hprint(sequential_cutoff);
234 var_hprint(readahead);
235
236 sysfs_print(running, atomic_read(&dc->running));
237 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
238
239 if (attr == &sysfs_label) {
240 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
241 buf[SB_LABEL_SIZE + 1] = '\0';
242 strcat(buf, "\n");
243 return strlen(buf);
244 }
245
246#undef var
247 return 0;
248}
249SHOW_LOCKED(bch_cached_dev)
250
251STORE(__cached_dev)
252{
253 struct cached_dev *dc = container_of(kobj, struct cached_dev,
254 disk.kobj);
255 ssize_t v;
256 struct cache_set *c;
257 struct kobj_uevent_env *env;
258
259#define d_strtoul(var) sysfs_strtoul(var, dc->var)
260#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
261#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
262
263 sysfs_strtoul(data_csum, dc->disk.data_csum);
264 d_strtoul(verify);
265 d_strtoul(bypass_torture_test);
266 d_strtoul(writeback_metadata);
267 d_strtoul(writeback_running);
268 d_strtoul(writeback_delay);
269
270 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
271 0, bch_cutoff_writeback);
272
273 if (attr == &sysfs_writeback_rate) {
274 ssize_t ret;
275 long int v = atomic_long_read(&dc->writeback_rate.rate);
276
277 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
278
279 if (!ret) {
280 atomic_long_set(&dc->writeback_rate.rate, v);
281 ret = size;
282 }
283
284 return ret;
285 }
286
287 sysfs_strtoul_clamp(writeback_rate_update_seconds,
288 dc->writeback_rate_update_seconds,
289 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
290 d_strtoul(writeback_rate_i_term_inverse);
291 d_strtoul_nonzero(writeback_rate_p_term_inverse);
292 d_strtoul_nonzero(writeback_rate_minimum);
293
294 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
295
296 if (attr == &sysfs_io_disable) {
297 int v = strtoul_or_return(buf);
298
299 dc->io_disable = v ? 1 : 0;
300 }
301
302 d_strtoi_h(sequential_cutoff);
303 d_strtoi_h(readahead);
304
305 if (attr == &sysfs_clear_stats)
306 bch_cache_accounting_clear(&dc->accounting);
307
308 if (attr == &sysfs_running &&
309 strtoul_or_return(buf))
310 bch_cached_dev_run(dc);
311
312 if (attr == &sysfs_cache_mode) {
313 v = __sysfs_match_string(bch_cache_modes, -1, buf);
314 if (v < 0)
315 return v;
316
317 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
318 SET_BDEV_CACHE_MODE(&dc->sb, v);
319 bch_write_bdev_super(dc, NULL);
320 }
321 }
322
323 if (attr == &sysfs_stop_when_cache_set_failed) {
324 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
325 if (v < 0)
326 return v;
327
328 dc->stop_when_cache_set_failed = v;
329 }
330
331 if (attr == &sysfs_label) {
332 if (size > SB_LABEL_SIZE)
333 return -EINVAL;
334 memcpy(dc->sb.label, buf, size);
335 if (size < SB_LABEL_SIZE)
336 dc->sb.label[size] = '\0';
337 if (size && dc->sb.label[size - 1] == '\n')
338 dc->sb.label[size - 1] = '\0';
339 bch_write_bdev_super(dc, NULL);
340 if (dc->disk.c) {
341 memcpy(dc->disk.c->uuids[dc->disk.id].label,
342 buf, SB_LABEL_SIZE);
343 bch_uuid_write(dc->disk.c);
344 }
345 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
346 if (!env)
347 return -ENOMEM;
348 add_uevent_var(env, "DRIVER=bcache");
349 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
350 add_uevent_var(env, "CACHED_LABEL=%s", buf);
351 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
352 KOBJ_CHANGE,
353 env->envp);
354 kfree(env);
355 }
356
357 if (attr == &sysfs_attach) {
358 uint8_t set_uuid[16];
359
360 if (bch_parse_uuid(buf, set_uuid) < 16)
361 return -EINVAL;
362
363 v = -ENOENT;
364 list_for_each_entry(c, &bch_cache_sets, list) {
365 v = bch_cached_dev_attach(dc, c, set_uuid);
366 if (!v)
367 return size;
368 }
369 if (v == -ENOENT)
370 pr_err("Can't attach %s: cache set not found", buf);
371 return v;
372 }
373
374 if (attr == &sysfs_detach && dc->disk.c)
375 bch_cached_dev_detach(dc);
376
377 if (attr == &sysfs_stop)
378 bcache_device_stop(&dc->disk);
379
380 return size;
381}
382
383STORE(bch_cached_dev)
384{
385 struct cached_dev *dc = container_of(kobj, struct cached_dev,
386 disk.kobj);
387
388 mutex_lock(&bch_register_lock);
389 size = __cached_dev_store(kobj, attr, buf, size);
390
391 if (attr == &sysfs_writeback_running) {
392 /* dc->writeback_running changed in __cached_dev_store() */
393 if (IS_ERR_OR_NULL(dc->writeback_thread)) {
394 /*
395 * reject setting it to 1 via sysfs if writeback
396 * kthread is not created yet.
397 */
398 if (dc->writeback_running) {
399 dc->writeback_running = false;
400 pr_err("%s: failed to run non-existent writeback thread",
401 dc->disk.disk->disk_name);
402 }
403 } else
404 /*
405 * writeback kthread will check if dc->writeback_running
406 * is true or false.
407 */
408 bch_writeback_queue(dc);
409 }
410
411 if (attr == &sysfs_writeback_percent)
412 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
413 schedule_delayed_work(&dc->writeback_rate_update,
414 dc->writeback_rate_update_seconds * HZ);
415
416 mutex_unlock(&bch_register_lock);
417 return size;
418}
419
420static struct attribute *bch_cached_dev_files[] = {
421 &sysfs_attach,
422 &sysfs_detach,
423 &sysfs_stop,
424#if 0
425 &sysfs_data_csum,
426#endif
427 &sysfs_cache_mode,
428 &sysfs_stop_when_cache_set_failed,
429 &sysfs_writeback_metadata,
430 &sysfs_writeback_running,
431 &sysfs_writeback_delay,
432 &sysfs_writeback_percent,
433 &sysfs_writeback_rate,
434 &sysfs_writeback_rate_update_seconds,
435 &sysfs_writeback_rate_i_term_inverse,
436 &sysfs_writeback_rate_p_term_inverse,
437 &sysfs_writeback_rate_minimum,
438 &sysfs_writeback_rate_debug,
439 &sysfs_errors,
440 &sysfs_io_error_limit,
441 &sysfs_io_disable,
442 &sysfs_dirty_data,
443 &sysfs_stripe_size,
444 &sysfs_partial_stripes_expensive,
445 &sysfs_sequential_cutoff,
446 &sysfs_clear_stats,
447 &sysfs_running,
448 &sysfs_state,
449 &sysfs_label,
450 &sysfs_readahead,
451#ifdef CONFIG_BCACHE_DEBUG
452 &sysfs_verify,
453 &sysfs_bypass_torture_test,
454#endif
455 NULL
456};
457KTYPE(bch_cached_dev);
458
459SHOW(bch_flash_dev)
460{
461 struct bcache_device *d = container_of(kobj, struct bcache_device,
462 kobj);
463 struct uuid_entry *u = &d->c->uuids[d->id];
464
465 sysfs_printf(data_csum, "%i", d->data_csum);
466 sysfs_hprint(size, u->sectors << 9);
467
468 if (attr == &sysfs_label) {
469 memcpy(buf, u->label, SB_LABEL_SIZE);
470 buf[SB_LABEL_SIZE + 1] = '\0';
471 strcat(buf, "\n");
472 return strlen(buf);
473 }
474
475 return 0;
476}
477
478STORE(__bch_flash_dev)
479{
480 struct bcache_device *d = container_of(kobj, struct bcache_device,
481 kobj);
482 struct uuid_entry *u = &d->c->uuids[d->id];
483
484 sysfs_strtoul(data_csum, d->data_csum);
485
486 if (attr == &sysfs_size) {
487 uint64_t v;
488
489 strtoi_h_or_return(buf, v);
490
491 u->sectors = v >> 9;
492 bch_uuid_write(d->c);
493 set_capacity(d->disk, u->sectors);
494 }
495
496 if (attr == &sysfs_label) {
497 memcpy(u->label, buf, SB_LABEL_SIZE);
498 bch_uuid_write(d->c);
499 }
500
501 if (attr == &sysfs_unregister) {
502 set_bit(BCACHE_DEV_DETACHING, &d->flags);
503 bcache_device_stop(d);
504 }
505
506 return size;
507}
508STORE_LOCKED(bch_flash_dev)
509
510static struct attribute *bch_flash_dev_files[] = {
511 &sysfs_unregister,
512#if 0
513 &sysfs_data_csum,
514#endif
515 &sysfs_label,
516 &sysfs_size,
517 NULL
518};
519KTYPE(bch_flash_dev);
520
521struct bset_stats_op {
522 struct btree_op op;
523 size_t nodes;
524 struct bset_stats stats;
525};
526
527static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
528{
529 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
530
531 op->nodes++;
532 bch_btree_keys_stats(&b->keys, &op->stats);
533
534 return MAP_CONTINUE;
535}
536
537static int bch_bset_print_stats(struct cache_set *c, char *buf)
538{
539 struct bset_stats_op op;
540 int ret;
541
542 memset(&op, 0, sizeof(op));
543 bch_btree_op_init(&op.op, -1);
544
545 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
546 if (ret < 0)
547 return ret;
548
549 return snprintf(buf, PAGE_SIZE,
550 "btree nodes: %zu\n"
551 "written sets: %zu\n"
552 "unwritten sets: %zu\n"
553 "written key bytes: %zu\n"
554 "unwritten key bytes: %zu\n"
555 "floats: %zu\n"
556 "failed: %zu\n",
557 op.nodes,
558 op.stats.sets_written, op.stats.sets_unwritten,
559 op.stats.bytes_written, op.stats.bytes_unwritten,
560 op.stats.floats, op.stats.failed);
561}
562
563static unsigned int bch_root_usage(struct cache_set *c)
564{
565 unsigned int bytes = 0;
566 struct bkey *k;
567 struct btree *b;
568 struct btree_iter iter;
569
570 goto lock_root;
571
572 do {
573 rw_unlock(false, b);
574lock_root:
575 b = c->root;
576 rw_lock(false, b, b->level);
577 } while (b != c->root);
578
579 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
580 bytes += bkey_bytes(k);
581
582 rw_unlock(false, b);
583
584 return (bytes * 100) / btree_bytes(c);
585}
586
587static size_t bch_cache_size(struct cache_set *c)
588{
589 size_t ret = 0;
590 struct btree *b;
591
592 mutex_lock(&c->bucket_lock);
593 list_for_each_entry(b, &c->btree_cache, list)
594 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
595
596 mutex_unlock(&c->bucket_lock);
597 return ret;
598}
599
600static unsigned int bch_cache_max_chain(struct cache_set *c)
601{
602 unsigned int ret = 0;
603 struct hlist_head *h;
604
605 mutex_lock(&c->bucket_lock);
606
607 for (h = c->bucket_hash;
608 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
609 h++) {
610 unsigned int i = 0;
611 struct hlist_node *p;
612
613 hlist_for_each(p, h)
614 i++;
615
616 ret = max(ret, i);
617 }
618
619 mutex_unlock(&c->bucket_lock);
620 return ret;
621}
622
623static unsigned int bch_btree_used(struct cache_set *c)
624{
625 return div64_u64(c->gc_stats.key_bytes * 100,
626 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
627}
628
629static unsigned int bch_average_key_size(struct cache_set *c)
630{
631 return c->gc_stats.nkeys
632 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
633 : 0;
634}
635
636SHOW(__bch_cache_set)
637{
638 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
639
640 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
641 sysfs_print(journal_delay_ms, c->journal_delay_ms);
642 sysfs_hprint(bucket_size, bucket_bytes(c));
643 sysfs_hprint(block_size, block_bytes(c));
644 sysfs_print(tree_depth, c->root->level);
645 sysfs_print(root_usage_percent, bch_root_usage(c));
646
647 sysfs_hprint(btree_cache_size, bch_cache_size(c));
648 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
649 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
650
651 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
652 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
653 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
654 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
655
656 sysfs_print(btree_used_percent, bch_btree_used(c));
657 sysfs_print(btree_nodes, c->gc_stats.nodes);
658 sysfs_hprint(average_key_size, bch_average_key_size(c));
659
660 sysfs_print(cache_read_races,
661 atomic_long_read(&c->cache_read_races));
662
663 sysfs_print(reclaim,
664 atomic_long_read(&c->reclaim));
665
666 sysfs_print(flush_write,
667 atomic_long_read(&c->flush_write));
668
669 sysfs_print(retry_flush_write,
670 atomic_long_read(&c->retry_flush_write));
671
672 sysfs_print(writeback_keys_done,
673 atomic_long_read(&c->writeback_keys_done));
674 sysfs_print(writeback_keys_failed,
675 atomic_long_read(&c->writeback_keys_failed));
676
677 if (attr == &sysfs_errors)
678 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
679 c->on_error);
680
681 /* See count_io_errors for why 88 */
682 sysfs_print(io_error_halflife, c->error_decay * 88);
683 sysfs_print(io_error_limit, c->error_limit);
684
685 sysfs_hprint(congested,
686 ((uint64_t) bch_get_congested(c)) << 9);
687 sysfs_print(congested_read_threshold_us,
688 c->congested_read_threshold_us);
689 sysfs_print(congested_write_threshold_us,
690 c->congested_write_threshold_us);
691
692 sysfs_print(cutoff_writeback, bch_cutoff_writeback);
693 sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
694
695 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
696 sysfs_printf(verify, "%i", c->verify);
697 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
698 sysfs_printf(expensive_debug_checks,
699 "%i", c->expensive_debug_checks);
700 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
701 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
702 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
703 sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
704 sysfs_printf(io_disable, "%i",
705 test_bit(CACHE_SET_IO_DISABLE, &c->flags));
706
707 if (attr == &sysfs_bset_tree_stats)
708 return bch_bset_print_stats(c, buf);
709
710 return 0;
711}
712SHOW_LOCKED(bch_cache_set)
713
714STORE(__bch_cache_set)
715{
716 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
717 ssize_t v;
718
719 if (attr == &sysfs_unregister)
720 bch_cache_set_unregister(c);
721
722 if (attr == &sysfs_stop)
723 bch_cache_set_stop(c);
724
725 if (attr == &sysfs_synchronous) {
726 bool sync = strtoul_or_return(buf);
727
728 if (sync != CACHE_SYNC(&c->sb)) {
729 SET_CACHE_SYNC(&c->sb, sync);
730 bcache_write_super(c);
731 }
732 }
733
734 if (attr == &sysfs_flash_vol_create) {
735 int r;
736 uint64_t v;
737
738 strtoi_h_or_return(buf, v);
739
740 r = bch_flash_dev_create(c, v);
741 if (r)
742 return r;
743 }
744
745 if (attr == &sysfs_clear_stats) {
746 atomic_long_set(&c->writeback_keys_done, 0);
747 atomic_long_set(&c->writeback_keys_failed, 0);
748
749 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
750 bch_cache_accounting_clear(&c->accounting);
751 }
752
753 if (attr == &sysfs_trigger_gc)
754 force_wake_up_gc(c);
755
756 if (attr == &sysfs_prune_cache) {
757 struct shrink_control sc;
758
759 sc.gfp_mask = GFP_KERNEL;
760 sc.nr_to_scan = strtoul_or_return(buf);
761 c->shrink.scan_objects(&c->shrink, &sc);
762 }
763
764 sysfs_strtoul(congested_read_threshold_us,
765 c->congested_read_threshold_us);
766 sysfs_strtoul(congested_write_threshold_us,
767 c->congested_write_threshold_us);
768
769 if (attr == &sysfs_errors) {
770 v = __sysfs_match_string(error_actions, -1, buf);
771 if (v < 0)
772 return v;
773
774 c->on_error = v;
775 }
776
777 if (attr == &sysfs_io_error_limit)
778 c->error_limit = strtoul_or_return(buf);
779
780 /* See count_io_errors() for why 88 */
781 if (attr == &sysfs_io_error_halflife)
782 c->error_decay = strtoul_or_return(buf) / 88;
783
784 if (attr == &sysfs_io_disable) {
785 v = strtoul_or_return(buf);
786 if (v) {
787 if (test_and_set_bit(CACHE_SET_IO_DISABLE,
788 &c->flags))
789 pr_warn("CACHE_SET_IO_DISABLE already set");
790 } else {
791 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
792 &c->flags))
793 pr_warn("CACHE_SET_IO_DISABLE already cleared");
794 }
795 }
796
797 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
798 sysfs_strtoul(verify, c->verify);
799 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
800 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
801 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
802 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
803 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
804 /*
805 * write gc_after_writeback here may overwrite an already set
806 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
807 * set in next chance.
808 */
809 sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
810
811 return size;
812}
813STORE_LOCKED(bch_cache_set)
814
815SHOW(bch_cache_set_internal)
816{
817 struct cache_set *c = container_of(kobj, struct cache_set, internal);
818
819 return bch_cache_set_show(&c->kobj, attr, buf);
820}
821
822STORE(bch_cache_set_internal)
823{
824 struct cache_set *c = container_of(kobj, struct cache_set, internal);
825
826 return bch_cache_set_store(&c->kobj, attr, buf, size);
827}
828
829static void bch_cache_set_internal_release(struct kobject *k)
830{
831}
832
833static struct attribute *bch_cache_set_files[] = {
834 &sysfs_unregister,
835 &sysfs_stop,
836 &sysfs_synchronous,
837 &sysfs_journal_delay_ms,
838 &sysfs_flash_vol_create,
839
840 &sysfs_bucket_size,
841 &sysfs_block_size,
842 &sysfs_tree_depth,
843 &sysfs_root_usage_percent,
844 &sysfs_btree_cache_size,
845 &sysfs_cache_available_percent,
846
847 &sysfs_average_key_size,
848
849 &sysfs_errors,
850 &sysfs_io_error_limit,
851 &sysfs_io_error_halflife,
852 &sysfs_congested,
853 &sysfs_congested_read_threshold_us,
854 &sysfs_congested_write_threshold_us,
855 &sysfs_clear_stats,
856 NULL
857};
858KTYPE(bch_cache_set);
859
860static struct attribute *bch_cache_set_internal_files[] = {
861 &sysfs_active_journal_entries,
862
863 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
864 sysfs_time_stats_attribute_list(btree_split, sec, us)
865 sysfs_time_stats_attribute_list(btree_sort, ms, us)
866 sysfs_time_stats_attribute_list(btree_read, ms, us)
867
868 &sysfs_btree_nodes,
869 &sysfs_btree_used_percent,
870 &sysfs_btree_cache_max_chain,
871
872 &sysfs_bset_tree_stats,
873 &sysfs_cache_read_races,
874 &sysfs_reclaim,
875 &sysfs_flush_write,
876 &sysfs_retry_flush_write,
877 &sysfs_writeback_keys_done,
878 &sysfs_writeback_keys_failed,
879
880 &sysfs_trigger_gc,
881 &sysfs_prune_cache,
882#ifdef CONFIG_BCACHE_DEBUG
883 &sysfs_verify,
884 &sysfs_key_merging_disabled,
885 &sysfs_expensive_debug_checks,
886#endif
887 &sysfs_gc_always_rewrite,
888 &sysfs_btree_shrinker_disabled,
889 &sysfs_copy_gc_enabled,
890 &sysfs_gc_after_writeback,
891 &sysfs_io_disable,
892 &sysfs_cutoff_writeback,
893 &sysfs_cutoff_writeback_sync,
894 NULL
895};
896KTYPE(bch_cache_set_internal);
897
898static int __bch_cache_cmp(const void *l, const void *r)
899{
900 return *((uint16_t *)r) - *((uint16_t *)l);
901}
902
903SHOW(__bch_cache)
904{
905 struct cache *ca = container_of(kobj, struct cache, kobj);
906
907 sysfs_hprint(bucket_size, bucket_bytes(ca));
908 sysfs_hprint(block_size, block_bytes(ca));
909 sysfs_print(nbuckets, ca->sb.nbuckets);
910 sysfs_print(discard, ca->discard);
911 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
912 sysfs_hprint(btree_written,
913 atomic_long_read(&ca->btree_sectors_written) << 9);
914 sysfs_hprint(metadata_written,
915 (atomic_long_read(&ca->meta_sectors_written) +
916 atomic_long_read(&ca->btree_sectors_written)) << 9);
917
918 sysfs_print(io_errors,
919 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
920
921 if (attr == &sysfs_cache_replacement_policy)
922 return bch_snprint_string_list(buf, PAGE_SIZE,
923 cache_replacement_policies,
924 CACHE_REPLACEMENT(&ca->sb));
925
926 if (attr == &sysfs_priority_stats) {
927 struct bucket *b;
928 size_t n = ca->sb.nbuckets, i;
929 size_t unused = 0, available = 0, dirty = 0, meta = 0;
930 uint64_t sum = 0;
931 /* Compute 31 quantiles */
932 uint16_t q[31], *p, *cached;
933 ssize_t ret;
934
935 cached = p = vmalloc(array_size(sizeof(uint16_t),
936 ca->sb.nbuckets));
937 if (!p)
938 return -ENOMEM;
939
940 mutex_lock(&ca->set->bucket_lock);
941 for_each_bucket(b, ca) {
942 if (!GC_SECTORS_USED(b))
943 unused++;
944 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
945 available++;
946 if (GC_MARK(b) == GC_MARK_DIRTY)
947 dirty++;
948 if (GC_MARK(b) == GC_MARK_METADATA)
949 meta++;
950 }
951
952 for (i = ca->sb.first_bucket; i < n; i++)
953 p[i] = ca->buckets[i].prio;
954 mutex_unlock(&ca->set->bucket_lock);
955
956 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
957
958 while (n &&
959 !cached[n - 1])
960 --n;
961
962 unused = ca->sb.nbuckets - n;
963
964 while (cached < p + n &&
965 *cached == BTREE_PRIO)
966 cached++, n--;
967
968 for (i = 0; i < n; i++)
969 sum += INITIAL_PRIO - cached[i];
970
971 if (n)
972 do_div(sum, n);
973
974 for (i = 0; i < ARRAY_SIZE(q); i++)
975 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
976 (ARRAY_SIZE(q) + 1)];
977
978 vfree(p);
979
980 ret = scnprintf(buf, PAGE_SIZE,
981 "Unused: %zu%%\n"
982 "Clean: %zu%%\n"
983 "Dirty: %zu%%\n"
984 "Metadata: %zu%%\n"
985 "Average: %llu\n"
986 "Sectors per Q: %zu\n"
987 "Quantiles: [",
988 unused * 100 / (size_t) ca->sb.nbuckets,
989 available * 100 / (size_t) ca->sb.nbuckets,
990 dirty * 100 / (size_t) ca->sb.nbuckets,
991 meta * 100 / (size_t) ca->sb.nbuckets, sum,
992 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
993
994 for (i = 0; i < ARRAY_SIZE(q); i++)
995 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
996 "%u ", q[i]);
997 ret--;
998
999 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1000
1001 return ret;
1002 }
1003
1004 return 0;
1005}
1006SHOW_LOCKED(bch_cache)
1007
1008STORE(__bch_cache)
1009{
1010 struct cache *ca = container_of(kobj, struct cache, kobj);
1011 ssize_t v;
1012
1013 if (attr == &sysfs_discard) {
1014 bool v = strtoul_or_return(buf);
1015
1016 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1017 ca->discard = v;
1018
1019 if (v != CACHE_DISCARD(&ca->sb)) {
1020 SET_CACHE_DISCARD(&ca->sb, v);
1021 bcache_write_super(ca->set);
1022 }
1023 }
1024
1025 if (attr == &sysfs_cache_replacement_policy) {
1026 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1027 if (v < 0)
1028 return v;
1029
1030 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1031 mutex_lock(&ca->set->bucket_lock);
1032 SET_CACHE_REPLACEMENT(&ca->sb, v);
1033 mutex_unlock(&ca->set->bucket_lock);
1034
1035 bcache_write_super(ca->set);
1036 }
1037 }
1038
1039 if (attr == &sysfs_clear_stats) {
1040 atomic_long_set(&ca->sectors_written, 0);
1041 atomic_long_set(&ca->btree_sectors_written, 0);
1042 atomic_long_set(&ca->meta_sectors_written, 0);
1043 atomic_set(&ca->io_count, 0);
1044 atomic_set(&ca->io_errors, 0);
1045 }
1046
1047 return size;
1048}
1049STORE_LOCKED(bch_cache)
1050
1051static struct attribute *bch_cache_files[] = {
1052 &sysfs_bucket_size,
1053 &sysfs_block_size,
1054 &sysfs_nbuckets,
1055 &sysfs_priority_stats,
1056 &sysfs_discard,
1057 &sysfs_written,
1058 &sysfs_btree_written,
1059 &sysfs_metadata_written,
1060 &sysfs_io_errors,
1061 &sysfs_clear_stats,
1062 &sysfs_cache_replacement_policy,
1063 NULL
1064};
1065KTYPE(bch_cache);