Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

failslab: add ability to filter slab caches

This patch allow to inject faults only for specific slabs.
In order to preserve default behavior cache filter is off by
default (all caches are faulty).

One may define specific set of slabs like this:
# mark skbuff_head_cache as faulty
echo 1 > /sys/kernel/slab/skbuff_head_cache/failslab
# Turn on cache filter (off by default)
echo 1 > /sys/kernel/debug/failslab/cache-filter
# Turn on fault injection
echo 1 > /sys/kernel/debug/failslab/times
echo 1 > /sys/kernel/debug/failslab/probability

Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Akinobu Mita <akinobu.mita@gmail.com>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>

authored by

Dmitry Monakhov and committed by
Pekka Enberg
4c13dd3b 60b341b7

+52 -8
+1
Documentation/vm/slub.txt
··· 41 41 P Poisoning (object and padding) 42 42 U User tracking (free and alloc) 43 43 T Trace (please only use on single slabs) 44 + A Toggle failslab filter mark for the cache 44 45 O Switch debugging off for caches that would have 45 46 caused higher minimum slab orders 46 47 - Switch all debugging off (useful if the kernel is
+3 -2
include/linux/fault-inject.h
··· 82 82 #endif /* CONFIG_FAULT_INJECTION */ 83 83 84 84 #ifdef CONFIG_FAILSLAB 85 - extern bool should_failslab(size_t size, gfp_t gfpflags); 85 + extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags); 86 86 #else 87 - static inline bool should_failslab(size_t size, gfp_t gfpflags) 87 + static inline bool should_failslab(size_t size, gfp_t gfpflags, 88 + unsigned long flags) 88 89 { 89 90 return false; 90 91 }
+5
include/linux/slab.h
··· 70 70 #else 71 71 # define SLAB_NOTRACK 0x00000000UL 72 72 #endif 73 + #ifdef CONFIG_FAILSLAB 74 + # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ 75 + #else 76 + # define SLAB_FAILSLAB 0x00000000UL 77 + #endif 73 78 74 79 /* The following flags affect the page allocator grouping pages by mobility */ 75 80 #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
+15 -3
mm/failslab.c
··· 1 1 #include <linux/fault-inject.h> 2 2 #include <linux/gfp.h> 3 + #include <linux/slab.h> 3 4 4 5 static struct { 5 6 struct fault_attr attr; 6 7 u32 ignore_gfp_wait; 8 + int cache_filter; 7 9 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 8 10 struct dentry *ignore_gfp_wait_file; 11 + struct dentry *cache_filter_file; 9 12 #endif 10 13 } failslab = { 11 14 .attr = FAULT_ATTR_INITIALIZER, 12 15 .ignore_gfp_wait = 1, 16 + .cache_filter = 0, 13 17 }; 14 18 15 - bool should_failslab(size_t size, gfp_t gfpflags) 19 + bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags) 16 20 { 17 21 if (gfpflags & __GFP_NOFAIL) 18 22 return false; 19 23 20 24 if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) 25 + return false; 26 + 27 + if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB)) 21 28 return false; 22 29 23 30 return should_fail(&failslab.attr, size); ··· 37 30 __setup("failslab=", setup_failslab); 38 31 39 32 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 40 - 41 33 static int __init failslab_debugfs_init(void) 42 34 { 43 35 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; ··· 52 46 debugfs_create_bool("ignore-gfp-wait", mode, dir, 53 47 &failslab.ignore_gfp_wait); 54 48 55 - if (!failslab.ignore_gfp_wait_file) { 49 + failslab.cache_filter_file = 50 + debugfs_create_bool("cache-filter", mode, dir, 51 + &failslab.cache_filter); 52 + 53 + if (!failslab.ignore_gfp_wait_file || 54 + !failslab.cache_filter_file) { 56 55 err = -ENOMEM; 56 + debugfs_remove(failslab.cache_filter_file); 57 57 debugfs_remove(failslab.ignore_gfp_wait_file); 58 58 cleanup_fault_attr_dentries(&failslab.attr); 59 59 }
+1 -1
mm/slab.c
··· 3101 3101 if (cachep == &cache_cache) 3102 3102 return false; 3103 3103 3104 - return should_failslab(obj_size(cachep), flags); 3104 + return should_failslab(obj_size(cachep), flags, cachep->flags); 3105 3105 } 3106 3106 3107 3107 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+27 -2
mm/slub.c
··· 151 151 * Set of flags that will prevent slab merging 152 152 */ 153 153 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 154 - SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) 154 + SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 155 + SLAB_FAILSLAB) 155 156 156 157 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 157 158 SLAB_CACHE_DMA | SLAB_NOTRACK) ··· 1021 1020 case 't': 1022 1021 slub_debug |= SLAB_TRACE; 1023 1022 break; 1023 + case 'a': 1024 + slub_debug |= SLAB_FAILSLAB; 1025 + break; 1024 1026 default: 1025 1027 printk(KERN_ERR "slub_debug option '%c' " 1026 1028 "unknown. skipped\n", *str); ··· 1722 1718 lockdep_trace_alloc(gfpflags); 1723 1719 might_sleep_if(gfpflags & __GFP_WAIT); 1724 1720 1725 - if (should_failslab(s->objsize, gfpflags)) 1721 + if (should_failslab(s->objsize, gfpflags, s->flags)) 1726 1722 return NULL; 1727 1723 1728 1724 local_irq_save(flags); ··· 4175 4171 } 4176 4172 SLAB_ATTR(trace); 4177 4173 4174 + #ifdef CONFIG_FAILSLAB 4175 + static ssize_t failslab_show(struct kmem_cache *s, char *buf) 4176 + { 4177 + return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 4178 + } 4179 + 4180 + static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 4181 + size_t length) 4182 + { 4183 + s->flags &= ~SLAB_FAILSLAB; 4184 + if (buf[0] == '1') 4185 + s->flags |= SLAB_FAILSLAB; 4186 + return length; 4187 + } 4188 + SLAB_ATTR(failslab); 4189 + #endif 4190 + 4178 4191 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 4179 4192 { 4180 4193 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); ··· 4488 4467 &deactivate_remote_frees_attr.attr, 4489 4468 &order_fallback_attr.attr, 4490 4469 #endif 4470 + #ifdef CONFIG_FAILSLAB 4471 + &failslab_attr.attr, 4472 + #endif 4473 + 4491 4474 NULL 4492 4475 }; 4493 4476