···2424 Keeps all active closures in a linked list and provides a debugfs2525 interface to list them, which makes it possible to see asynchronous2626 operations that get stuck.2727-2828-# cgroup code needs to be updated:2929-#3030-#config CGROUP_BCACHE3131-# bool "Cgroup controls for bcache"3232-# depends on BCACHE && BLK_CGROUP3333-# ---help---3434-# TODO
-4
drivers/md/bcache/btree.c
···6868 * alloc_bucket() cannot fail. This should be true but is not completely6969 * obvious.7070 *7171- * Make sure all allocations get charged to the root cgroup7272- *7371 * Plugging?7472 *7573 * If data write is less than hard sector size of ssd, round up offset in open7674 * bucket to the next whole sector7777- *7878- * Also lookup by cgroup in get_open_bucket()7975 *8076 * Superblock needs to be fleshed out for multiple cache devices8177 *
···11#ifndef _BCACHE_REQUEST_H_22#define _BCACHE_REQUEST_H_3344-#include <linux/cgroup.h>55-64struct data_insert_op {75 struct closure cl;86 struct cache_set *c;···3941void bch_flash_dev_request_init(struct bcache_device *d);40424143extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;4242-4343-struct bch_cgroup {4444-#ifdef CONFIG_CGROUP_BCACHE4545- struct cgroup_subsys_state css;4646-#endif4747- /*4848- * We subtract one from the index into bch_cache_modes[], so that4949- * default == -1; this makes it so the rest match up with d->cache_mode,5050- * and we use d->cache_mode if cgrp->cache_mode < 05151- */5252- short cache_mode;5353- bool verify;5454- struct cache_stat_collector stats;5555-};5656-5757-struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);58445945#endif /* _BCACHE_REQUEST_H_ */