lib/idr.c: fix rcu related race with idr_find

2nd part of the fixes needed for
http://bugzilla.kernel.org/show_bug.cgi?id=11796.

When the idr tree is either grown or shrunk, then the update to the number
of layers and the top pointer were not atomic. This race caused crashes.

The attached patch fixes that by replicating the layers counter in each
layer, thus idr_find doesn't need idp->layers anymore.

Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Clement Calmels <cboulte@gmail.com>
Cc: Nadia Derbey <Nadia.Derbey@bull.net>
Cc: Pierre Peiffer <peifferp@gmail.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Manfred Spraul and committed by Linus Torvalds 6ff2d39b 1d678f36

+14 -3
+2 -1
include/linux/idr.h
··· 52 unsigned long bitmap; /* A zero bit means "space here" */ 53 struct idr_layer *ary[1<<IDR_BITS]; 54 int count; /* When zero, we can release it */ 55 struct rcu_head rcu_head; 56 }; 57 58 struct idr { 59 struct idr_layer *top; 60 struct idr_layer *id_free; 61 - int layers; 62 int id_free_cnt; 63 spinlock_t lock; 64 };
··· 52 unsigned long bitmap; /* A zero bit means "space here" */ 53 struct idr_layer *ary[1<<IDR_BITS]; 54 int count; /* When zero, we can release it */ 55 + int layer; /* distance from leaf */ 56 struct rcu_head rcu_head; 57 }; 58 59 struct idr { 60 struct idr_layer *top; 61 struct idr_layer *id_free; 62 + int layers; /* only valid without concurrent changes */ 63 int id_free_cnt; 64 spinlock_t lock; 65 };
+12 -2
lib/idr.c
··· 185 new = get_from_free_list(idp); 186 if (!new) 187 return -1; 188 rcu_assign_pointer(p->ary[m], new); 189 p->count++; 190 } ··· 211 if (unlikely(!p)) { 212 if (!(p = get_from_free_list(idp))) 213 return -1; 214 layers = 1; 215 } 216 /* ··· 239 } 240 new->ary[0] = p; 241 new->count = 1; 242 if (p->bitmap == IDR_FULL) 243 __set_bit(0, &new->bitmap); 244 p = new; ··· 496 int n; 497 struct idr_layer *p; 498 499 - n = idp->layers * IDR_BITS; 500 p = rcu_dereference(idp->top); 501 502 /* Mask off upper bits we don't use for the search. */ 503 id &= MAX_ID_MASK; 504 505 if (id >= (1 << n)) 506 return NULL; 507 508 while (n > 0 && p) { 509 n -= IDR_BITS; 510 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 511 } 512 return((void *)p); ··· 589 int n; 590 struct idr_layer *p, *old_p; 591 592 - n = idp->layers * IDR_BITS; 593 p = idp->top; 594 595 id &= MAX_ID_MASK; 596
··· 185 new = get_from_free_list(idp); 186 if (!new) 187 return -1; 188 + new->layer = l-1; 189 rcu_assign_pointer(p->ary[m], new); 190 p->count++; 191 } ··· 210 if (unlikely(!p)) { 211 if (!(p = get_from_free_list(idp))) 212 return -1; 213 + p->layer = 0; 214 layers = 1; 215 } 216 /* ··· 237 } 238 new->ary[0] = p; 239 new->count = 1; 240 + new->layer = layers-1; 241 if (p->bitmap == IDR_FULL) 242 __set_bit(0, &new->bitmap); 243 p = new; ··· 493 int n; 494 struct idr_layer *p; 495 496 p = rcu_dereference(idp->top); 497 + if (!p) 498 + return NULL; 499 + n = (p->layer+1) * IDR_BITS; 500 501 /* Mask off upper bits we don't use for the search. */ 502 id &= MAX_ID_MASK; 503 504 if (id >= (1 << n)) 505 return NULL; 506 + BUG_ON(n == 0); 507 508 while (n > 0 && p) { 509 n -= IDR_BITS; 510 + BUG_ON(n != p->layer*IDR_BITS); 511 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 512 } 513 return((void *)p); ··· 582 int n; 583 struct idr_layer *p, *old_p; 584 585 p = idp->top; 586 + if (!p) 587 + return ERR_PTR(-EINVAL); 588 + 589 + n = (p->layer+1) * IDR_BITS; 590 591 id &= MAX_ID_MASK; 592