Merge branch 'core/debugobjects' into core/urgent

+6 -9
+6 -9
lib/debugobjects.c
··· 68 68 { 69 69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 70 70 struct debug_obj *new; 71 + unsigned long flags; 71 72 72 73 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 73 74 return obj_pool_free; ··· 82 81 if (!new) 83 82 return obj_pool_free; 84 83 85 - spin_lock(&pool_lock); 84 + spin_lock_irqsave(&pool_lock, flags); 86 85 hlist_add_head(&new->node, &obj_pool); 87 86 obj_pool_free++; 88 - spin_unlock(&pool_lock); 87 + spin_unlock_irqrestore(&pool_lock, flags); 89 88 } 90 89 return obj_pool_free; 91 90 } ··· 111 110 } 112 111 113 112 /* 114 - * Allocate a new object. If the pool is empty and no refill possible, 115 - * switch off the debugger. 113 + * Allocate a new object. If the pool is empty, switch off the debugger. 116 114 */ 117 115 static struct debug_obj * 118 116 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 119 117 { 120 118 struct debug_obj *obj = NULL; 121 - int retry = 0; 122 119 123 - repeat: 124 120 spin_lock(&pool_lock); 125 121 if (obj_pool.first) { 126 122 obj = hlist_entry(obj_pool.first, typeof(*obj), node); ··· 138 140 obj_pool_min_free = obj_pool_free; 139 141 } 140 142 spin_unlock(&pool_lock); 141 - 142 - if (fill_pool() && !obj && !retry++) 143 - goto repeat; 144 143 145 144 return obj; 146 145 } ··· 255 260 struct debug_bucket *db; 256 261 struct debug_obj *obj; 257 262 unsigned long flags; 263 + 264 + fill_pool(); 258 265 259 266 db = get_bucket((unsigned long) addr); 260 267