SLUB: Fix coding style violations

This fixes most of the obvious coding style violations in mm/slub.c as
reported by checkpatch.

Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Christoph Lameter <clameter@sgi.com>

authored by Pekka Enberg and committed by Christoph Lameter 06428780 7c2e132c

+23 -23
+23 -23
mm/slub.c
··· 357 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 358 newline = 0; 359 } 360 - printk(" %02x", addr[i]); 361 offset = i % 16; 362 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 363 if (offset == 15) { 364 - printk(" %s\n",ascii); 365 newline = 1; 366 } 367 } 368 if (!newline) { 369 i %= 16; 370 while (i < 16) { 371 - printk(" "); 372 ascii[i] = ' '; 373 i++; 374 } 375 - printk(" %s\n", ascii); 376 } 377 } 378 ··· 532 533 if (s->flags & __OBJECT_POISON) { 534 memset(p, POISON_FREE, s->objsize - 1); 535 - p[s->objsize -1] = POISON_END; 536 } 537 538 if (s->flags & SLAB_RED_ZONE) ··· 561 562 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 563 u8 *object, char *what, 564 - u8* start, unsigned int value, unsigned int bytes) 565 { 566 u8 *fault; 567 u8 *end; ··· 695 (!check_bytes_and_report(s, page, p, "Poison", p, 696 POISON_FREE, s->objsize - 1) || 697 !check_bytes_and_report(s, page, p, "Poison", 698 - p + s->objsize -1, POISON_END, 1))) 699 return 0; 700 /* 701 * check_pad_bytes cleans up on its own. ··· 903 "SLUB <none>: no slab for object 0x%p.\n", 904 object); 905 dump_stack(); 906 - } 907 - else 908 object_err(s, page, object, 909 "page slab pointer corrupt."); 910 goto fail; ··· 949 /* 950 * Determine which debug features should be switched on 951 */ 952 - for ( ;*str && *str != ','; str++) { 953 switch (tolower(*str)) { 954 case 'f': 955 slub_debug |= SLAB_DEBUG_FREE; ··· 968 break; 969 default: 970 printk(KERN_ERR "slub_debug option '%c' " 971 - "unknown. skipped\n",*str); 972 } 973 } 974 ··· 1041 */ 1042 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1043 { 1044 - struct page * page; 1045 int pages = 1 << s->order; 1046 1047 if (s->order) ··· 1137 mod_zone_page_state(page_zone(page), 1138 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1139 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1140 - - pages); 1141 1142 __free_pages(page, s->order); 1143 } ··· 1541 * 1542 * Otherwise we can simply pick the next object from the lockless free list. 1543 */ 1544 - static void __always_inline *slab_alloc(struct kmem_cache *s, 1545 gfp_t gfpflags, int node, void *addr) 1546 { 1547 void **object; ··· 1649 * If fastpath is not possible then fall back to __slab_free where we deal 1650 * with all sorts of special processing. 1651 */ 1652 - static void __always_inline slab_free(struct kmem_cache *s, 1653 struct page *page, void *x, void *addr) 1654 { 1655 void **object = (void *)x; ··· 2230 */ 2231 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2232 { 2233 - struct page * page; 2234 2235 page = get_object_page(object); 2236 ··· 2342 2343 static int __init setup_slub_min_order(char *str) 2344 { 2345 - get_option (&str, &slub_min_order); 2346 2347 return 1; 2348 } ··· 2351 2352 static int __init setup_slub_max_order(char *str) 2353 { 2354 - get_option (&str, &slub_max_order); 2355 2356 return 1; 2357 } ··· 2360 2361 static int __init setup_slub_min_objects(char *str) 2362 { 2363 - get_option (&str, &slub_min_objects); 2364 2365 return 1; 2366 } ··· 2945 * Check if alignment is compatible. 2946 * Courtesy of Adrian Drzewiecki 2947 */ 2948 - if ((s->size & ~(align -1)) != s->size) 2949 continue; 2950 2951 if (s->size - size >= sizeof(void *)) ··· 3054 return NOTIFY_OK; 3055 } 3056 3057 - static struct notifier_block __cpuinitdata slab_notifier = 3058 - { &slab_cpuup_callback, NULL, 0 }; 3059 3060 #endif 3061 ··· 3864 SLAB_ATTR(remote_node_defrag_ratio); 3865 #endif 3866 3867 - static struct attribute * slab_attrs[] = { 3868 &slab_size_attr.attr, 3869 &object_size_attr.attr, 3870 &objs_per_slab_attr.attr,
··· 357 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 358 newline = 0; 359 } 360 + printk(KERN_CONT " %02x", addr[i]); 361 offset = i % 16; 362 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 363 if (offset == 15) { 364 + printk(KERN_CONT " %s\n", ascii); 365 newline = 1; 366 } 367 } 368 if (!newline) { 369 i %= 16; 370 while (i < 16) { 371 + printk(KERN_CONT " "); 372 ascii[i] = ' '; 373 i++; 374 } 375 + printk(KERN_CONT " %s\n", ascii); 376 } 377 } 378 ··· 532 533 if (s->flags & __OBJECT_POISON) { 534 memset(p, POISON_FREE, s->objsize - 1); 535 + p[s->objsize - 1] = POISON_END; 536 } 537 538 if (s->flags & SLAB_RED_ZONE) ··· 561 562 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 563 u8 *object, char *what, 564 + u8 *start, unsigned int value, unsigned int bytes) 565 { 566 u8 *fault; 567 u8 *end; ··· 695 (!check_bytes_and_report(s, page, p, "Poison", p, 696 POISON_FREE, s->objsize - 1) || 697 !check_bytes_and_report(s, page, p, "Poison", 698 + p + s->objsize - 1, POISON_END, 1))) 699 return 0; 700 /* 701 * check_pad_bytes cleans up on its own. ··· 903 "SLUB <none>: no slab for object 0x%p.\n", 904 object); 905 dump_stack(); 906 + } else 907 object_err(s, page, object, 908 "page slab pointer corrupt."); 909 goto fail; ··· 950 /* 951 * Determine which debug features should be switched on 952 */ 953 + for (; *str && *str != ','; str++) { 954 switch (tolower(*str)) { 955 case 'f': 956 slub_debug |= SLAB_DEBUG_FREE; ··· 969 break; 970 default: 971 printk(KERN_ERR "slub_debug option '%c' " 972 + "unknown. skipped\n", *str); 973 } 974 } 975 ··· 1042 */ 1043 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1044 { 1045 + struct page *page; 1046 int pages = 1 << s->order; 1047 1048 if (s->order) ··· 1138 mod_zone_page_state(page_zone(page), 1139 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1140 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1141 + -pages); 1142 1143 __free_pages(page, s->order); 1144 } ··· 1542 * 1543 * Otherwise we can simply pick the next object from the lockless free list. 1544 */ 1545 + static __always_inline void *slab_alloc(struct kmem_cache *s, 1546 gfp_t gfpflags, int node, void *addr) 1547 { 1548 void **object; ··· 1650 * If fastpath is not possible then fall back to __slab_free where we deal 1651 * with all sorts of special processing. 1652 */ 1653 + static __always_inline void slab_free(struct kmem_cache *s, 1654 struct page *page, void *x, void *addr) 1655 { 1656 void **object = (void *)x; ··· 2231 */ 2232 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2233 { 2234 + struct page *page; 2235 2236 page = get_object_page(object); 2237 ··· 2343 2344 static int __init setup_slub_min_order(char *str) 2345 { 2346 + get_option(&str, &slub_min_order); 2347 2348 return 1; 2349 } ··· 2352 2353 static int __init setup_slub_max_order(char *str) 2354 { 2355 + get_option(&str, &slub_max_order); 2356 2357 return 1; 2358 } ··· 2361 2362 static int __init setup_slub_min_objects(char *str) 2363 { 2364 + get_option(&str, &slub_min_objects); 2365 2366 return 1; 2367 } ··· 2946 * Check if alignment is compatible. 2947 * Courtesy of Adrian Drzewiecki 2948 */ 2949 + if ((s->size & ~(align - 1)) != s->size) 2950 continue; 2951 2952 if (s->size - size >= sizeof(void *)) ··· 3055 return NOTIFY_OK; 3056 } 3057 3058 + static struct notifier_block __cpuinitdata slab_notifier = { 3059 + &slab_cpuup_callback, NULL, 0 3060 + }; 3061 3062 #endif 3063 ··· 3864 SLAB_ATTR(remote_node_defrag_ratio); 3865 #endif 3866 3867 + static struct attribute *slab_attrs[] = { 3868 &slab_size_attr.attr, 3869 &object_size_attr.attr, 3870 &objs_per_slab_attr.attr,