Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] Whitespace and CodingStyle cleanup for lib/idr.c

Cleanup trailing whitespace, blank lines, CodingStyle issues etc, for
lib/idr.c

Signed-off-by: Jesper Juhl <jesper.juhl@gmail.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Jesper Juhl and committed by
Linus Torvalds
e15ae2dd 850b9247

+17 -18
+17 -18
lib/idr.c
··· 6 6 * Modified by George Anzinger to reuse immediately and to use 7 7 * find bit instructions. Also removed _irq on spinlocks. 8 8 * 9 - * Small id to pointer translation service. 9 + * Small id to pointer translation service. 10 10 * 11 - * It uses a radix tree like structure as a sparse array indexed 11 + * It uses a radix tree like structure as a sparse array indexed 12 12 * by the id to obtain the pointer. The bitmap makes allocating 13 - * a new id quick. 13 + * a new id quick. 14 14 * 15 15 * You call it to allocate an id (an int) an associate with that id a 16 16 * pointer or what ever, we treat it as a (void *). You can pass this 17 17 * id to a user for him to pass back at a later time. You then pass 18 18 * that id to this code and it returns your pointer. 19 19 20 - * You can release ids at any time. When all ids are released, most of 20 + * You can release ids at any time. When all ids are released, most of 21 21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we 22 - * don't need to go to the memory "store" during an id allocate, just 22 + * don't need to go to the memory "store" during an id allocate, just 23 23 * so you don't need to be too concerned about locking and conflicts 24 24 * with the slab allocator. 25 25 */ ··· 77 77 while (idp->id_free_cnt < IDR_FREE_MAX) { 78 78 struct idr_layer *new; 79 79 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 80 - if(new == NULL) 80 + if (new == NULL) 81 81 return (0); 82 82 free_layer(idp, new); 83 83 } ··· 107 107 if (m == IDR_SIZE) { 108 108 /* no space available go back to previous layer. */ 109 109 l++; 110 - id = (id | ((1 << (IDR_BITS*l))-1)) + 1; 110 + id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 111 111 if (!(p = pa[l])) { 112 112 *starting_id = id; 113 113 return -2; ··· 161 161 { 162 162 struct idr_layer *p, *new; 163 163 int layers, v, id; 164 - 164 + 165 165 id = starting_id; 166 166 build_up: 167 167 p = idp->top; ··· 225 225 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 226 226 { 227 227 int rv; 228 + 228 229 rv = idr_get_new_above_int(idp, ptr, starting_id); 229 230 /* 230 231 * This is a cheap hack until the IDR code can be fixed to ··· 260 259 int idr_get_new(struct idr *idp, void *ptr, int *id) 261 260 { 262 261 int rv; 262 + 263 263 rv = idr_get_new_above_int(idp, ptr, 0); 264 264 /* 265 265 * This is a cheap hack until the IDR code can be fixed to ··· 308 306 free_layer(idp, **paa); 309 307 **paa-- = NULL; 310 308 } 311 - if ( ! *paa ) 309 + if (!*paa) 312 310 idp->layers = 0; 313 - } else { 311 + } else 314 312 idr_remove_warning(id); 315 - } 316 313 } 317 314 318 315 /** ··· 327 326 id &= MAX_ID_MASK; 328 327 329 328 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 330 - if ( idp->top && idp->top->count == 1 && 331 - (idp->layers > 1) && 332 - idp->top->ary[0]){ // We can drop a layer 329 + if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 330 + idp->top->ary[0]) { // We can drop a layer 333 331 334 332 p = idp->top->ary[0]; 335 333 idp->top->bitmap = idp->top->count = 0; ··· 337 337 --idp->layers; 338 338 } 339 339 while (idp->id_free_cnt >= IDR_FREE_MAX) { 340 - 341 340 p = alloc_layer(idp); 342 341 kmem_cache_free(idr_layer_cache, p); 343 342 return; ··· 390 391 } 391 392 EXPORT_SYMBOL(idr_find); 392 393 393 - static void idr_cache_ctor(void * idr_layer, 394 - kmem_cache_t *idr_layer_cache, unsigned long flags) 394 + static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, 395 + unsigned long flags) 395 396 { 396 397 memset(idr_layer, 0, sizeof(struct idr_layer)); 397 398 } ··· 399 400 static int init_id_cache(void) 400 401 { 401 402 if (!idr_layer_cache) 402 - idr_layer_cache = kmem_cache_create("idr_layer_cache", 403 + idr_layer_cache = kmem_cache_create("idr_layer_cache", 403 404 sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); 404 405 return 0; 405 406 }