SLUB: Fix coding style violations

This fixes most of the obvious coding style violations in mm/slub.c as
reported by checkpatch.

Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Christoph Lameter <clameter@sgi.com>

authored by Pekka Enberg and committed by Christoph Lameter 06428780 7c2e132c

+23 -23
+23 -23
mm/slub.c
··· 357 357 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 358 358 newline = 0; 359 359 } 360 - printk(" %02x", addr[i]); 360 + printk(KERN_CONT " %02x", addr[i]); 361 361 offset = i % 16; 362 362 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 363 363 if (offset == 15) { 364 - printk(" %s\n",ascii); 364 + printk(KERN_CONT " %s\n", ascii); 365 365 newline = 1; 366 366 } 367 367 } 368 368 if (!newline) { 369 369 i %= 16; 370 370 while (i < 16) { 371 - printk(" "); 371 + printk(KERN_CONT " "); 372 372 ascii[i] = ' '; 373 373 i++; 374 374 } 375 - printk(" %s\n", ascii); 375 + printk(KERN_CONT " %s\n", ascii); 376 376 } 377 377 } 378 378 ··· 532 532 533 533 if (s->flags & __OBJECT_POISON) { 534 534 memset(p, POISON_FREE, s->objsize - 1); 535 - p[s->objsize -1] = POISON_END; 535 + p[s->objsize - 1] = POISON_END; 536 536 } 537 537 538 538 if (s->flags & SLAB_RED_ZONE) ··· 561 561 562 562 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 563 563 u8 *object, char *what, 564 - u8* start, unsigned int value, unsigned int bytes) 564 + u8 *start, unsigned int value, unsigned int bytes) 565 565 { 566 566 u8 *fault; 567 567 u8 *end; ··· 695 695 (!check_bytes_and_report(s, page, p, "Poison", p, 696 696 POISON_FREE, s->objsize - 1) || 697 697 !check_bytes_and_report(s, page, p, "Poison", 698 - p + s->objsize -1, POISON_END, 1))) 698 + p + s->objsize - 1, POISON_END, 1))) 699 699 return 0; 700 700 /* 701 701 * check_pad_bytes cleans up on its own. ··· 903 903 "SLUB <none>: no slab for object 0x%p.\n", 904 904 object); 905 905 dump_stack(); 906 - } 907 - else 906 + } else 908 907 object_err(s, page, object, 909 908 "page slab pointer corrupt."); 910 909 goto fail; ··· 949 950 /* 950 951 * Determine which debug features should be switched on 951 952 */ 952 - for ( ;*str && *str != ','; str++) { 953 + for (; *str && *str != ','; str++) { 953 954 switch (tolower(*str)) { 954 955 case 'f': 955 956 slub_debug |= SLAB_DEBUG_FREE; ··· 968 969 break; 969 970 default: 970 971 printk(KERN_ERR "slub_debug option '%c' " 971 - "unknown. skipped\n",*str); 972 + "unknown. skipped\n", *str); 972 973 } 973 974 } 974 975 ··· 1041 1042 */ 1042 1043 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1043 1044 { 1044 - struct page * page; 1045 + struct page *page; 1045 1046 int pages = 1 << s->order; 1046 1047 1047 1048 if (s->order) ··· 1137 1138 mod_zone_page_state(page_zone(page), 1138 1139 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1139 1140 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1140 - - pages); 1141 + -pages); 1141 1142 1142 1143 __free_pages(page, s->order); 1143 1144 } ··· 1541 1542 * 1542 1543 * Otherwise we can simply pick the next object from the lockless free list. 1543 1544 */ 1544 - static void __always_inline *slab_alloc(struct kmem_cache *s, 1545 + static __always_inline void *slab_alloc(struct kmem_cache *s, 1545 1546 gfp_t gfpflags, int node, void *addr) 1546 1547 { 1547 1548 void **object; ··· 1649 1650 * If fastpath is not possible then fall back to __slab_free where we deal 1650 1651 * with all sorts of special processing. 1651 1652 */ 1652 - static void __always_inline slab_free(struct kmem_cache *s, 1653 + static __always_inline void slab_free(struct kmem_cache *s, 1653 1654 struct page *page, void *x, void *addr) 1654 1655 { 1655 1656 void **object = (void *)x; ··· 2230 2231 */ 2231 2232 int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2232 2233 { 2233 - struct page * page; 2234 + struct page *page; 2234 2235 2235 2236 page = get_object_page(object); 2236 2237 ··· 2342 2343 2343 2344 static int __init setup_slub_min_order(char *str) 2344 2345 { 2345 - get_option (&str, &slub_min_order); 2346 + get_option(&str, &slub_min_order); 2346 2347 2347 2348 return 1; 2348 2349 } ··· 2351 2352 2352 2353 static int __init setup_slub_max_order(char *str) 2353 2354 { 2354 - get_option (&str, &slub_max_order); 2355 + get_option(&str, &slub_max_order); 2355 2356 2356 2357 return 1; 2357 2358 } ··· 2360 2361 2361 2362 static int __init setup_slub_min_objects(char *str) 2362 2363 { 2363 - get_option (&str, &slub_min_objects); 2364 + get_option(&str, &slub_min_objects); 2364 2365 2365 2366 return 1; 2366 2367 } ··· 2945 2946 * Check if alignment is compatible. 2946 2947 * Courtesy of Adrian Drzewiecki 2947 2948 */ 2948 - if ((s->size & ~(align -1)) != s->size) 2949 + if ((s->size & ~(align - 1)) != s->size) 2949 2950 continue; 2950 2951 2951 2952 if (s->size - size >= sizeof(void *)) ··· 3054 3055 return NOTIFY_OK; 3055 3056 } 3056 3057 3057 - static struct notifier_block __cpuinitdata slab_notifier = 3058 - { &slab_cpuup_callback, NULL, 0 }; 3058 + static struct notifier_block __cpuinitdata slab_notifier = { 3059 + &slab_cpuup_callback, NULL, 0 3060 + }; 3059 3061 3060 3062 #endif 3061 3063 ··· 3864 3864 SLAB_ATTR(remote_node_defrag_ratio); 3865 3865 #endif 3866 3866 3867 - static struct attribute * slab_attrs[] = { 3867 + static struct attribute *slab_attrs[] = { 3868 3868 &slab_size_attr.attr, 3869 3869 &object_size_attr.attr, 3870 3870 &objs_per_slab_attr.attr,