Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
dm snapshot: wait for chunks in destructor
dm snapshot: fix register_snapshot deadlock
dm raid1: fix do_failures

+28 -9
+2 -1
drivers/md/dm-raid1.c
··· 656 return; 657 658 if (!ms->log_failure) { 659 - while ((bio = bio_list_pop(failures))) 660 ms->in_sync = 0; 661 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); 662 return; 663 } 664
··· 656 return; 657 658 if (!ms->log_failure) { 659 + while ((bio = bio_list_pop(failures))) { 660 ms->in_sync = 0; 661 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); 662 + } 663 return; 664 } 665
+24 -8
drivers/md/dm-snap.c
··· 229 */ 230 static int register_snapshot(struct dm_snapshot *snap) 231 { 232 - struct origin *o; 233 struct block_device *bdev = snap->origin->bdev; 234 235 down_write(&_origins_lock); 236 o = __lookup_origin(bdev); 237 238 - if (!o) { 239 /* New origin */ 240 - o = kmalloc(sizeof(*o), GFP_KERNEL); 241 - if (!o) { 242 - up_write(&_origins_lock); 243 - return -ENOMEM; 244 - } 245 246 /* Initialise the struct */ 247 INIT_LIST_HEAD(&o->snapshots); ··· 370 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 371 GFP_NOIO); 372 373 pe->snap = s; 374 375 return pe; ··· 378 379 static void free_pending_exception(struct dm_snap_pending_exception *pe) 380 { 381 - mempool_free(pe, pe->snap->pending_pool); 382 } 383 384 static void insert_completed_exception(struct dm_snapshot *s, ··· 607 608 s->valid = 1; 609 s->active = 0; 610 init_rwsem(&s->lock); 611 spin_lock_init(&s->pe_lock); 612 s->ti = ti; ··· 733 /* Prevent further origin writes from using this snapshot. */ 734 /* After this returns there can be no new kcopyd jobs. */ 735 unregister_snapshot(s); 736 737 #ifdef CONFIG_DM_DEBUG 738 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
··· 229 */ 230 static int register_snapshot(struct dm_snapshot *snap) 231 { 232 + struct origin *o, *new_o; 233 struct block_device *bdev = snap->origin->bdev; 234 + 235 + new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 236 + if (!new_o) 237 + return -ENOMEM; 238 239 down_write(&_origins_lock); 240 o = __lookup_origin(bdev); 241 242 + if (o) 243 + kfree(new_o); 244 + else { 245 /* New origin */ 246 + o = new_o; 247 248 /* Initialise the struct */ 249 INIT_LIST_HEAD(&o->snapshots); ··· 368 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 369 GFP_NOIO); 370 371 + atomic_inc(&s->pending_exceptions_count); 372 pe->snap = s; 373 374 return pe; ··· 375 376 static void free_pending_exception(struct dm_snap_pending_exception *pe) 377 { 378 + struct dm_snapshot *s = pe->snap; 379 + 380 + mempool_free(pe, s->pending_pool); 381 + smp_mb__before_atomic_dec(); 382 + atomic_dec(&s->pending_exceptions_count); 383 } 384 385 static void insert_completed_exception(struct dm_snapshot *s, ··· 600 601 s->valid = 1; 602 s->active = 0; 603 + atomic_set(&s->pending_exceptions_count, 0); 604 init_rwsem(&s->lock); 605 spin_lock_init(&s->pe_lock); 606 s->ti = ti; ··· 725 /* Prevent further origin writes from using this snapshot. */ 726 /* After this returns there can be no new kcopyd jobs. */ 727 unregister_snapshot(s); 728 + 729 + while (atomic_read(&s->pending_exceptions_count)) 730 + yield(); 731 + /* 732 + * Ensure instructions in mempool_destroy aren't reordered 733 + * before atomic_read. 734 + */ 735 + smp_mb(); 736 737 #ifdef CONFIG_DM_DEBUG 738 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
+2
drivers/md/dm-snap.h
··· 160 161 mempool_t *pending_pool; 162 163 struct exception_table pending; 164 struct exception_table complete; 165
··· 160 161 mempool_t *pending_pool; 162 163 + atomic_t pending_exceptions_count; 164 + 165 struct exception_table pending; 166 struct exception_table complete; 167