Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcache: Kill dead cgroup code

This hasn't been used or even enabled in ages.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>

-202
-8
drivers/md/bcache/Kconfig
··· 24 24 Keeps all active closures in a linked list and provides a debugfs 25 25 interface to list them, which makes it possible to see asynchronous 26 26 operations that get stuck. 27 - 28 - # cgroup code needs to be updated: 29 - # 30 - #config CGROUP_BCACHE 31 - # bool "Cgroup controls for bcache" 32 - # depends on BCACHE && BLK_CGROUP 33 - # ---help--- 34 - # TODO
-4
drivers/md/bcache/btree.c
··· 68 68 * alloc_bucket() cannot fail. This should be true but is not completely 69 69 * obvious. 70 70 * 71 - * Make sure all allocations get charged to the root cgroup 72 - * 73 71 * Plugging? 74 72 * 75 73 * If data write is less than hard sector size of ssd, round up offset in open 76 74 * bucket to the next whole sector 77 - * 78 - * Also lookup by cgroup in get_open_bucket() 79 75 * 80 76 * Superblock needs to be fleshed out for multiple cache devices 81 77 *
-169
drivers/md/bcache/request.c
··· 12 12 #include "request.h" 13 13 #include "writeback.h" 14 14 15 - #include <linux/cgroup.h> 16 15 #include <linux/module.h> 17 16 #include <linux/hash.h> 18 17 #include <linux/random.h> 19 - #include "blk-cgroup.h" 20 18 21 19 #include <trace/events/bcache.h> 22 20 ··· 25 27 26 28 static void bch_data_insert_start(struct closure *); 27 29 28 - /* Cgroup interface */ 29 - 30 - #ifdef CONFIG_CGROUP_BCACHE 31 - static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 }; 32 - 33 - static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup) 34 - { 35 - struct cgroup_subsys_state *css; 36 - return cgroup && 37 - (css = cgroup_subsys_state(cgroup, bcache_subsys_id)) 38 - ? container_of(css, struct bch_cgroup, css) 39 - : &bcache_default_cgroup; 40 - } 41 - 42 - struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio) 43 - { 44 - struct cgroup_subsys_state *css = bio->bi_css 45 - ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id) 46 - : task_subsys_state(current, bcache_subsys_id); 47 - 48 - return css 49 - ? container_of(css, struct bch_cgroup, css) 50 - : &bcache_default_cgroup; 51 - } 52 - 53 - static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, 54 - struct file *file, 55 - char __user *buf, size_t nbytes, loff_t *ppos) 56 - { 57 - char tmp[1024]; 58 - int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes, 59 - cgroup_to_bcache(cgrp)->cache_mode + 1); 60 - 61 - if (len < 0) 62 - return len; 63 - 64 - return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); 65 - } 66 - 67 - static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft, 68 - const char *buf) 69 - { 70 - int v = bch_read_string_list(buf, bch_cache_modes); 71 - if (v < 0) 72 - return v; 73 - 74 - cgroup_to_bcache(cgrp)->cache_mode = v - 1; 75 - return 0; 76 - } 77 - 78 - static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft) 79 - { 80 - return cgroup_to_bcache(cgrp)->verify; 81 - } 82 - 83 - static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val) 84 - { 85 - cgroup_to_bcache(cgrp)->verify = val; 86 - return 0; 87 - } 88 - 89 - static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft) 90 - { 91 - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); 92 - return atomic_read(&bcachecg->stats.cache_hits); 93 - } 94 - 95 - static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft) 96 - { 97 - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); 98 - return atomic_read(&bcachecg->stats.cache_misses); 99 - } 100 - 101 - static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp, 102 - struct cftype *cft) 103 - { 104 - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); 105 - return atomic_read(&bcachecg->stats.cache_bypass_hits); 106 - } 107 - 108 - static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp, 109 - struct cftype *cft) 110 - { 111 - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); 112 - return atomic_read(&bcachecg->stats.cache_bypass_misses); 113 - } 114 - 115 - static struct cftype bch_files[] = { 116 - { 117 - .name = "cache_mode", 118 - .read = cache_mode_read, 119 - .write_string = cache_mode_write, 120 - }, 121 - { 122 - .name = "verify", 123 - .read_u64 = bch_verify_read, 124 - .write_u64 = bch_verify_write, 125 - }, 126 - { 127 - .name = "cache_hits", 128 - .read_u64 = bch_cache_hits_read, 129 - }, 130 - { 131 - .name = "cache_misses", 132 - .read_u64 = bch_cache_misses_read, 133 - }, 134 - { 135 - .name = "cache_bypass_hits", 136 - .read_u64 = bch_cache_bypass_hits_read, 137 - }, 138 - { 139 - .name = "cache_bypass_misses", 140 - .read_u64 = bch_cache_bypass_misses_read, 141 - }, 142 - { } /* terminate */ 143 - }; 144 - 145 - static void init_bch_cgroup(struct bch_cgroup *cg) 146 - { 147 - cg->cache_mode = -1; 148 - } 149 - 150 - static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) 151 - { 152 - struct bch_cgroup *cg; 153 - 154 - cg = kzalloc(sizeof(*cg), GFP_KERNEL); 155 - if (!cg) 156 - return ERR_PTR(-ENOMEM); 157 - init_bch_cgroup(cg); 158 - return &cg->css; 159 - } 160 - 161 - static void bcachecg_destroy(struct cgroup *cgroup) 162 - { 163 - struct bch_cgroup *cg = cgroup_to_bcache(cgroup); 164 - kfree(cg); 165 - } 166 - 167 - struct cgroup_subsys bcache_subsys = { 168 - .create = bcachecg_create, 169 - .destroy = bcachecg_destroy, 170 - .subsys_id = bcache_subsys_id, 171 - .name = "bcache", 172 - .module = THIS_MODULE, 173 - }; 174 - EXPORT_SYMBOL_GPL(bcache_subsys); 175 - #endif 176 - 177 30 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) 178 31 { 179 - #ifdef CONFIG_CGROUP_BCACHE 180 - int r = bch_bio_to_cgroup(bio)->cache_mode; 181 - if (r >= 0) 182 - return r; 183 - #endif 184 32 return BDEV_CACHE_MODE(&dc->sb); 185 33 } 186 34 187 35 static bool verify(struct cached_dev *dc, struct bio *bio) 188 36 { 189 - #ifdef CONFIG_CGROUP_BCACHE 190 - if (bch_bio_to_cgroup(bio)->verify) 191 - return true; 192 - #endif 193 37 return dc->verify; 194 38 } 195 39 ··· 1145 1305 1146 1306 void bch_request_exit(void) 1147 1307 { 1148 - #ifdef CONFIG_CGROUP_BCACHE 1149 - cgroup_unload_subsys(&bcache_subsys); 1150 - #endif 1151 1308 if (bch_search_cache) 1152 1309 kmem_cache_destroy(bch_search_cache); 1153 1310 } ··· 1155 1318 if (!bch_search_cache) 1156 1319 return -ENOMEM; 1157 1320 1158 - #ifdef CONFIG_CGROUP_BCACHE 1159 - cgroup_load_subsys(&bcache_subsys); 1160 - init_bch_cgroup(&bcache_default_cgroup); 1161 - 1162 - cgroup_add_cftypes(&bcache_subsys, bch_files); 1163 - #endif 1164 1321 return 0; 1165 1322 }
-18
drivers/md/bcache/request.h
··· 1 1 #ifndef _BCACHE_REQUEST_H_ 2 2 #define _BCACHE_REQUEST_H_ 3 3 4 - #include <linux/cgroup.h> 5 - 6 4 struct data_insert_op { 7 5 struct closure cl; 8 6 struct cache_set *c; ··· 39 41 void bch_flash_dev_request_init(struct bcache_device *d); 40 42 41 43 extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache; 42 - 43 - struct bch_cgroup { 44 - #ifdef CONFIG_CGROUP_BCACHE 45 - struct cgroup_subsys_state css; 46 - #endif 47 - /* 48 - * We subtract one from the index into bch_cache_modes[], so that 49 - * default == -1; this makes it so the rest match up with d->cache_mode, 50 - * and we use d->cache_mode if cgrp->cache_mode < 0 51 - */ 52 - short cache_mode; 53 - bool verify; 54 - struct cache_stat_collector stats; 55 - }; 56 - 57 - struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio); 58 44 59 45 #endif /* _BCACHE_REQUEST_H_ */
-3
drivers/md/bcache/stats.c
··· 201 201 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 202 202 mark_cache_stats(&dc->accounting.collector, hit, bypass); 203 203 mark_cache_stats(&c->accounting.collector, hit, bypass); 204 - #ifdef CONFIG_CGROUP_BCACHE 205 - mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass); 206 - #endif 207 204 } 208 205 209 206 void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)