BUG_ON() Conversion in mm/slab.c

this changes if() BUG(); constructs to BUG_ON() which is
cleaner, contains unlikely() and can better optimized away.

Signed-off-by: Eric Sesterhenn <snakebyte@gmx.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>

authored by

Eric Sesterhenn and committed by
Adrian Bunk
40094fa6 75babcac

+6 -12
+6 -12
mm/slab.c
··· 1297 if (cache_cache.num) 1298 break; 1299 } 1300 - if (!cache_cache.num) 1301 - BUG(); 1302 cache_cache.gfporder = order; 1303 cache_cache.colour = left_over / cache_cache.colour_off; 1304 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + ··· 1973 * Always checks flags, a caller might be expecting debug support which 1974 * isn't available. 1975 */ 1976 - if (flags & ~CREATE_MASK) 1977 - BUG(); 1978 1979 /* 1980 * Check that size is in terms of words. This is needed to avoid ··· 2204 2205 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 2206 #if DEBUG 2207 - if (slabp->inuse) 2208 - BUG(); 2209 #endif 2210 list_del(&slabp->list); 2211 ··· 2245 */ 2246 int kmem_cache_shrink(struct kmem_cache *cachep) 2247 { 2248 - if (!cachep || in_interrupt()) 2249 - BUG(); 2250 2251 return __cache_shrink(cachep); 2252 } ··· 2273 int i; 2274 struct kmem_list3 *l3; 2275 2276 - if (!cachep || in_interrupt()) 2277 - BUG(); 2278 2279 /* Don't let CPUs to come and go */ 2280 lock_cpu_hotplug(); ··· 2472 * Be lazy and only check for valid flags here, keeping it out of the 2473 * critical path in kmem_cache_alloc(). 2474 */ 2475 - if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) 2476 - BUG(); 2477 if (flags & SLAB_NO_GROW) 2478 return 0; 2479
··· 1297 if (cache_cache.num) 1298 break; 1299 } 1300 + BUG_ON(!cache_cache.num); 1301 cache_cache.gfporder = order; 1302 cache_cache.colour = left_over / cache_cache.colour_off; 1303 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + ··· 1974 * Always checks flags, a caller might be expecting debug support which 1975 * isn't available. 1976 */ 1977 + BUG_ON(flags & ~CREATE_MASK); 1978 1979 /* 1980 * Check that size is in terms of words. This is needed to avoid ··· 2206 2207 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 2208 #if DEBUG 2209 + BUG_ON(slabp->inuse); 2210 #endif 2211 list_del(&slabp->list); 2212 ··· 2248 */ 2249 int kmem_cache_shrink(struct kmem_cache *cachep) 2250 { 2251 + BUG_ON(!cachep || in_interrupt()); 2252 2253 return __cache_shrink(cachep); 2254 } ··· 2277 int i; 2278 struct kmem_list3 *l3; 2279 2280 + BUG_ON(!cachep || in_interrupt()); 2281 2282 /* Don't let CPUs to come and go */ 2283 lock_cpu_hotplug(); ··· 2477 * Be lazy and only check for valid flags here, keeping it out of the 2478 * critical path in kmem_cache_alloc(). 2479 */ 2480 + BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)); 2481 if (flags & SLAB_NO_GROW) 2482 return 0; 2483