[PATCH] device-mapper raid1: drop mark_region spinlock fix

The spinlock region_lock is held while calling mark_region which can sleep.
Drop the spinlock before calling that function.

A region's state and inclusion in the clean list are altered by rh_inc and
rh_dec. The state variable is set to RH_CLEAN in rh_dec, but only if
'pending' is zero. It is set to RH_DIRTY in rh_inc, but not if it is already
so. The changes to 'pending', the state, and the region's inclusion in the
clean list need to be atomicly.

Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Jonathan E Brassow and committed by Linus Torvalds 7692c5dd 233886dd

+9 -11
+9 -11
drivers/md/dm-raid1.c
··· 376 read_lock(&rh->hash_lock); 377 reg = __rh_find(rh, region); 378 379 atomic_inc(&reg->pending); 380 381 - spin_lock_irq(&rh->region_lock); 382 if (reg->state == RH_CLEAN) { 383 - rh->log->type->mark_region(rh->log, reg->key); 384 - 385 reg->state = RH_DIRTY; 386 list_del_init(&reg->list); /* take off the clean list */ 387 - } 388 - spin_unlock_irq(&rh->region_lock); 389 390 read_unlock(&rh->hash_lock); 391 } ··· 410 reg = __rh_lookup(rh, region); 411 read_unlock(&rh->hash_lock); 412 413 if (atomic_dec_and_test(&reg->pending)) { 414 - spin_lock_irqsave(&rh->region_lock, flags); 415 - if (atomic_read(&reg->pending)) { /* check race */ 416 - spin_unlock_irqrestore(&rh->region_lock, flags); 417 - return; 418 - } 419 if (reg->state == RH_RECOVERING) { 420 list_add_tail(&reg->list, &rh->quiesced_regions); 421 } else { 422 reg->state = RH_CLEAN; 423 list_add(&reg->list, &rh->clean_regions); 424 } 425 - spin_unlock_irqrestore(&rh->region_lock, flags); 426 should_wake = 1; 427 } 428 429 if (should_wake) 430 wake();
··· 376 read_lock(&rh->hash_lock); 377 reg = __rh_find(rh, region); 378 379 + spin_lock_irq(&rh->region_lock); 380 atomic_inc(&reg->pending); 381 382 if (reg->state == RH_CLEAN) { 383 reg->state = RH_DIRTY; 384 list_del_init(&reg->list); /* take off the clean list */ 385 + spin_unlock_irq(&rh->region_lock); 386 + 387 + rh->log->type->mark_region(rh->log, reg->key); 388 + } else 389 + spin_unlock_irq(&rh->region_lock); 390 + 391 392 read_unlock(&rh->hash_lock); 393 } ··· 408 reg = __rh_lookup(rh, region); 409 read_unlock(&rh->hash_lock); 410 411 + spin_lock_irqsave(&rh->region_lock, flags); 412 if (atomic_dec_and_test(&reg->pending)) { 413 if (reg->state == RH_RECOVERING) { 414 list_add_tail(&reg->list, &rh->quiesced_regions); 415 } else { 416 reg->state = RH_CLEAN; 417 list_add(&reg->list, &rh->clean_regions); 418 } 419 should_wake = 1; 420 } 421 + spin_unlock_irqrestore(&rh->region_lock, flags); 422 423 if (should_wake) 424 wake();