Compare changes

Choose any two refs to compare.

+43 -43
+1 -9
drivers/md/bcache/Kconfig
··· 4 tristate "Block device as cache" 5 select BLOCK_HOLDER_DEPRECATED if SYSFS 6 select CRC64 7 help 8 Allows a block device to be used as cache for other devices; uses 9 a btree for indexing and the layout is optimized for SSDs. ··· 19 Enables extra debugging tools, allows expensive runtime checks to be 20 turned on. 21 22 - config BCACHE_CLOSURES_DEBUG 23 - bool "Debug closures" 24 - depends on BCACHE 25 - select DEBUG_FS 26 - help 27 - Keeps all active closures in a linked list and provides a debugfs 28 - interface to list them, which makes it possible to see asynchronous 29 - operations that get stuck. 30 - 31 config BCACHE_ASYNC_REGISTRATION 32 bool "Asynchronous device registration" 33 depends on BCACHE
··· 4 tristate "Block device as cache" 5 select BLOCK_HOLDER_DEPRECATED if SYSFS 6 select CRC64 7 + select CLOSURES 8 help 9 Allows a block device to be used as cache for other devices; uses 10 a btree for indexing and the layout is optimized for SSDs. ··· 20 Enables extra debugging tools, allows expensive runtime checks to be 21 turned on. 22 23 config BCACHE_ASYNC_REGISTRATION 24 bool "Asynchronous device registration" 25 depends on BCACHE
+2 -2
drivers/md/bcache/Makefile
··· 2 3 obj-$(CONFIG_BCACHE) += bcache.o 4 5 - bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\ 6 - io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\ 7 util.o writeback.o features.o
··· 2 3 obj-$(CONFIG_BCACHE) += bcache.o 4 5 + bcache-y := alloc.o bset.o btree.o debug.o extents.o io.o\ 6 + journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\ 7 util.o writeback.o features.o
+1 -1
drivers/md/bcache/bcache.h
··· 179 #define pr_fmt(fmt) "bcache: %s() " fmt, __func__ 180 181 #include <linux/bio.h> 182 #include <linux/kobject.h> 183 #include <linux/list.h> 184 #include <linux/mutex.h> ··· 192 #include "bcache_ondisk.h" 193 #include "bset.h" 194 #include "util.h" 195 - #include "closure.h" 196 197 struct bucket { 198 atomic_t pin;
··· 179 #define pr_fmt(fmt) "bcache: %s() " fmt, __func__ 180 181 #include <linux/bio.h> 182 + #include <linux/closure.h> 183 #include <linux/kobject.h> 184 #include <linux/list.h> 185 #include <linux/mutex.h> ··· 193 #include "bcache_ondisk.h" 194 #include "bset.h" 195 #include "util.h" 196 197 struct bucket { 198 atomic_t pin;
-1
drivers/md/bcache/super.c
··· 2905 goto err; 2906 2907 bch_debug_init(); 2908 - closure_debug_init(); 2909 2910 bcache_is_reboot = false; 2911
··· 2905 goto err; 2906 2907 bch_debug_init(); 2908 2909 bcache_is_reboot = false; 2910
+1 -2
drivers/md/bcache/util.h
··· 4 #define _BCACHE_UTIL_H 5 6 #include <linux/blkdev.h> 7 #include <linux/errno.h> 8 #include <linux/kernel.h> 9 #include <linux/sched/clock.h> ··· 13 #include <linux/workqueue.h> 14 #include <linux/crc64.h> 15 16 - #include "closure.h" 17 - 18 struct closure; 19 20 #ifdef CONFIG_BCACHE_DEBUG
··· 4 #define _BCACHE_UTIL_H 5 6 #include <linux/blkdev.h> 7 + #include <linux/closure.h> 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/sched/clock.h> ··· 14 #include <linux/workqueue.h> 15 #include <linux/crc64.h> 16 17 struct closure; 18 19 #ifdef CONFIG_BCACHE_DEBUG
+8 -9
drivers/md/bcache/closure.h include/linux/closure.h
··· 155 156 atomic_t remaining; 157 158 - #ifdef CONFIG_BCACHE_CLOSURES_DEBUG 159 #define CLOSURE_MAGIC_DEAD 0xc054dead 160 #define CLOSURE_MAGIC_ALIVE 0xc054a11e 161 ··· 184 __closure_sync(cl); 185 } 186 187 - #ifdef CONFIG_BCACHE_CLOSURES_DEBUG 188 189 - void closure_debug_init(void); 190 void closure_debug_create(struct closure *cl); 191 void closure_debug_destroy(struct closure *cl); 192 193 #else 194 195 - static inline void closure_debug_init(void) {} 196 static inline void closure_debug_create(struct closure *cl) {} 197 static inline void closure_debug_destroy(struct closure *cl) {} 198 ··· 200 201 static inline void closure_set_ip(struct closure *cl) 202 { 203 - #ifdef CONFIG_BCACHE_CLOSURES_DEBUG 204 cl->ip = _THIS_IP_; 205 #endif 206 } 207 208 static inline void closure_set_ret_ip(struct closure *cl) 209 { 210 - #ifdef CONFIG_BCACHE_CLOSURES_DEBUG 211 cl->ip = _RET_IP_; 212 #endif 213 } 214 215 static inline void closure_set_waiting(struct closure *cl, unsigned long f) 216 { 217 - #ifdef CONFIG_BCACHE_CLOSURES_DEBUG 218 cl->waiting_on = f; 219 #endif 220 } ··· 243 */ 244 BUILD_BUG_ON(offsetof(struct closure, fn) 245 != offsetof(struct work_struct, func)); 246 if (wq) { 247 INIT_WORK(&cl->work, cl->work.func); 248 BUG_ON(!queue_work(wq, &cl->work)); ··· 255 */ 256 static inline void closure_get(struct closure *cl) 257 { 258 - #ifdef CONFIG_BCACHE_CLOSURES_DEBUG 259 BUG_ON((atomic_inc_return(&cl->remaining) & 260 CLOSURE_REMAINING_MASK) <= 1); 261 #else ··· 271 */ 272 static inline void closure_init(struct closure *cl, struct closure *parent) 273 { 274 - memset(cl, 0, sizeof(struct closure)); 275 cl->parent = parent; 276 if (parent) 277 closure_get(parent);
··· 155 156 atomic_t remaining; 157 158 + #ifdef CONFIG_DEBUG_CLOSURES 159 #define CLOSURE_MAGIC_DEAD 0xc054dead 160 #define CLOSURE_MAGIC_ALIVE 0xc054a11e 161 ··· 184 __closure_sync(cl); 185 } 186 187 + #ifdef CONFIG_DEBUG_CLOSURES 188 189 void closure_debug_create(struct closure *cl); 190 void closure_debug_destroy(struct closure *cl); 191 192 #else 193 194 static inline void closure_debug_create(struct closure *cl) {} 195 static inline void closure_debug_destroy(struct closure *cl) {} 196 ··· 198 199 static inline void closure_set_ip(struct closure *cl) 200 { 201 + #ifdef CONFIG_DEBUG_CLOSURES 202 cl->ip = _THIS_IP_; 203 #endif 204 } 205 206 static inline void closure_set_ret_ip(struct closure *cl) 207 { 208 + #ifdef CONFIG_DEBUG_CLOSURES 209 cl->ip = _RET_IP_; 210 #endif 211 } 212 213 static inline void closure_set_waiting(struct closure *cl, unsigned long f) 214 { 215 + #ifdef CONFIG_DEBUG_CLOSURES 216 cl->waiting_on = f; 217 #endif 218 } ··· 241 */ 242 BUILD_BUG_ON(offsetof(struct closure, fn) 243 != offsetof(struct work_struct, func)); 244 + 245 if (wq) { 246 INIT_WORK(&cl->work, cl->work.func); 247 BUG_ON(!queue_work(wq, &cl->work)); ··· 254 */ 255 static inline void closure_get(struct closure *cl) 256 { 257 + #ifdef CONFIG_DEBUG_CLOSURES 258 BUG_ON((atomic_inc_return(&cl->remaining) & 259 CLOSURE_REMAINING_MASK) <= 1); 260 #else ··· 270 */ 271 static inline void closure_init(struct closure *cl, struct closure *parent) 272 { 273 + cl->fn = NULL; 274 cl->parent = parent; 275 if (parent) 276 closure_get(parent);
+3
lib/Kconfig
··· 506 507 for more information. 508 509 config HAS_IOMEM 510 bool 511 depends on !NO_IOMEM
··· 506 507 for more information. 508 509 + config CLOSURES 510 + bool 511 + 512 config HAS_IOMEM 513 bool 514 depends on !NO_IOMEM
+9
lib/Kconfig.debug
··· 1720 This is a relatively cheap check but if you care about maximum 1721 performance, say N. 1722 1723 config DEBUG_MAPLE_TREE 1724 bool "Debug maple trees" 1725 depends on DEBUG_KERNEL
··· 1720 This is a relatively cheap check but if you care about maximum 1721 performance, say N. 1722 1723 + config DEBUG_CLOSURES 1724 + bool "Debug closures (bcache async widgits)" 1725 + depends on CLOSURES 1726 + select DEBUG_FS 1727 + help 1728 + Keeps all active closures in a linked list and provides a debugfs 1729 + interface to list them, which makes it possible to see asynchronous 1730 + operations that get stuck. 1731 + 1732 config DEBUG_MAPLE_TREE 1733 bool "Debug maple trees" 1734 depends on DEBUG_KERNEL
+2
lib/Makefile
··· 255 256 obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o 257 258 obj-$(CONFIG_DQL) += dynamic_queue_limits.o 259 260 obj-$(CONFIG_GLOB) += glob.o
··· 255 256 obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o 257 258 + obj-$(CONFIG_CLOSURES) += closure.o 259 + 260 obj-$(CONFIG_DQL) += dynamic_queue_limits.o 261 262 obj-$(CONFIG_GLOB) += glob.o
+16 -19
drivers/md/bcache/closure.c lib/closure.c
··· 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include <linux/debugfs.h> 10 - #include <linux/module.h> 11 #include <linux/seq_file.h> 12 #include <linux/sched/debug.h> 13 14 - #include "closure.h" 15 - 16 static inline void closure_put_after_sub(struct closure *cl, int flags) 17 { 18 int r = flags & CLOSURE_REMAINING_MASK; ··· 45 { 46 closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); 47 } 48 49 /* 50 * closure_put - decrement a closure's refcount ··· 53 { 54 closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); 55 } 56 57 /* 58 * closure_wake_up - wake up all closures on a wait list, without memory barrier ··· 74 closure_sub(cl, CLOSURE_WAITING + 1); 75 } 76 } 77 78 /** 79 * closure_wait - add a closure to a waitlist ··· 93 94 return true; 95 } 96 97 struct closure_syncer { 98 struct task_struct *task; ··· 127 128 __set_current_state(TASK_RUNNING); 129 } 130 131 - #ifdef CONFIG_BCACHE_CLOSURES_DEBUG 132 133 static LIST_HEAD(closure_list); 134 static DEFINE_SPINLOCK(closure_list_lock); ··· 144 list_add(&cl->all, &closure_list); 145 spin_unlock_irqrestore(&closure_list_lock, flags); 146 } 147 148 void closure_debug_destroy(struct closure *cl) 149 { ··· 156 list_del(&cl->all); 157 spin_unlock_irqrestore(&closure_list_lock, flags); 158 } 159 - 160 - static struct dentry *closure_debug; 161 162 static int debug_show(struct seq_file *f, void *data) 163 { ··· 181 seq_printf(f, " W %pS\n", 182 (void *) cl->waiting_on); 183 184 - seq_printf(f, "\n"); 185 } 186 187 spin_unlock_irq(&closure_list_lock); ··· 190 191 DEFINE_SHOW_ATTRIBUTE(debug); 192 193 - void __init closure_debug_init(void) 194 { 195 - if (!IS_ERR_OR_NULL(bcache_debug)) 196 - /* 197 - * it is unnecessary to check return value of 198 - * debugfs_create_file(), we should not care 199 - * about this. 200 - */ 201 - closure_debug = debugfs_create_file( 202 - "closures", 0400, bcache_debug, NULL, &debug_fops); 203 } 204 - #endif 205 206 - MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>"); 207 - MODULE_LICENSE("GPL");
··· 6 * Copyright 2012 Google, Inc. 7 */ 8 9 + #include <linux/closure.h> 10 #include <linux/debugfs.h> 11 + #include <linux/export.h> 12 #include <linux/seq_file.h> 13 #include <linux/sched/debug.h> 14 15 static inline void closure_put_after_sub(struct closure *cl, int flags) 16 { 17 int r = flags & CLOSURE_REMAINING_MASK; ··· 44 { 45 closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); 46 } 47 + EXPORT_SYMBOL(closure_sub); 48 49 /* 50 * closure_put - decrement a closure's refcount ··· 53 { 54 closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); 55 } 56 + EXPORT_SYMBOL(closure_put); 57 58 /* 59 * closure_wake_up - wake up all closures on a wait list, without memory barrier ··· 75 closure_sub(cl, CLOSURE_WAITING + 1); 76 } 77 } 78 + EXPORT_SYMBOL(__closure_wake_up); 79 80 /** 81 * closure_wait - add a closure to a waitlist ··· 95 96 return true; 97 } 98 + EXPORT_SYMBOL(closure_wait); 99 100 struct closure_syncer { 101 struct task_struct *task; ··· 130 131 __set_current_state(TASK_RUNNING); 132 } 133 + EXPORT_SYMBOL(__closure_sync); 134 135 + #ifdef CONFIG_DEBUG_CLOSURES 136 137 static LIST_HEAD(closure_list); 138 static DEFINE_SPINLOCK(closure_list_lock); ··· 148 list_add(&cl->all, &closure_list); 149 spin_unlock_irqrestore(&closure_list_lock, flags); 150 } 151 + EXPORT_SYMBOL(closure_debug_create); 152 153 void closure_debug_destroy(struct closure *cl) 154 { ··· 161 list_del(&cl->all); 162 spin_unlock_irqrestore(&closure_list_lock, flags); 163 } 164 + EXPORT_SYMBOL(closure_debug_destroy); 165 166 static int debug_show(struct seq_file *f, void *data) 167 { ··· 185 seq_printf(f, " W %pS\n", 186 (void *) cl->waiting_on); 187 188 + seq_puts(f, "\n"); 189 } 190 191 spin_unlock_irq(&closure_list_lock); ··· 194 195 DEFINE_SHOW_ATTRIBUTE(debug); 196 197 + static int __init closure_debug_init(void) 198 { 199 + debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops); 200 + return 0; 201 } 202 + late_initcall(closure_debug_init) 203 204 + #endif