Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md:
md: Avoid waking up a thread after it has been freed.

+26 -15
+19 -3
drivers/md/md.c
··· 61 static void autostart_arrays(int part); 62 #endif 63 64 static LIST_HEAD(pers_list); 65 static DEFINE_SPINLOCK(pers_lock); 66 ··· 744 } else 745 mutex_unlock(&mddev->reconfig_mutex); 746 747 md_wakeup_thread(mddev->thread); 748 } 749 750 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) ··· 6439 return thread; 6440 } 6441 6442 - void md_unregister_thread(mdk_thread_t *thread) 6443 { 6444 if (!thread) 6445 return; 6446 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6447 6448 kthread_stop(thread->tsk); 6449 kfree(thread); ··· 7357 mdk_rdev_t *rdev; 7358 7359 /* resync has finished, collect result */ 7360 - md_unregister_thread(mddev->sync_thread); 7361 - mddev->sync_thread = NULL; 7362 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7363 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7364 /* success...*/
··· 61 static void autostart_arrays(int part); 62 #endif 63 64 + /* pers_list is a list of registered personalities protected 65 + * by pers_lock. 66 + * pers_lock does extra service to protect accesses to 67 + * mddev->thread when the mutex cannot be held. 68 + */ 69 static LIST_HEAD(pers_list); 70 static DEFINE_SPINLOCK(pers_lock); 71 ··· 739 } else 740 mutex_unlock(&mddev->reconfig_mutex); 741 742 + /* was we've dropped the mutex we need a spinlock to 743 + * make sur the thread doesn't disappear 744 + */ 745 + spin_lock(&pers_lock); 746 md_wakeup_thread(mddev->thread); 747 + spin_unlock(&pers_lock); 748 } 749 750 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) ··· 6429 return thread; 6430 } 6431 6432 + void md_unregister_thread(mdk_thread_t **threadp) 6433 { 6434 + mdk_thread_t *thread = *threadp; 6435 if (!thread) 6436 return; 6437 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6438 + /* Locking ensures that mddev_unlock does not wake_up a 6439 + * non-existent thread 6440 + */ 6441 + spin_lock(&pers_lock); 6442 + *threadp = NULL; 6443 + spin_unlock(&pers_lock); 6444 6445 kthread_stop(thread->tsk); 6446 kfree(thread); ··· 7340 mdk_rdev_t *rdev; 7341 7342 /* resync has finished, collect result */ 7343 + md_unregister_thread(&mddev->sync_thread); 7344 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7345 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7346 /* success...*/
+1 -1
drivers/md/md.h
··· 560 extern int unregister_md_personality(struct mdk_personality *p); 561 extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), 562 mddev_t *mddev, const char *name); 563 - extern void md_unregister_thread(mdk_thread_t *thread); 564 extern void md_wakeup_thread(mdk_thread_t *thread); 565 extern void md_check_recovery(mddev_t *mddev); 566 extern void md_write_start(mddev_t *mddev, struct bio *bi);
··· 560 extern int unregister_md_personality(struct mdk_personality *p); 561 extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), 562 mddev_t *mddev, const char *name); 563 + extern void md_unregister_thread(mdk_thread_t **threadp); 564 extern void md_wakeup_thread(mdk_thread_t *thread); 565 extern void md_check_recovery(mddev_t *mddev); 566 extern void md_write_start(mddev_t *mddev, struct bio *bi);
+1 -2
drivers/md/multipath.c
··· 514 { 515 multipath_conf_t *conf = mddev->private; 516 517 - md_unregister_thread(mddev->thread); 518 - mddev->thread = NULL; 519 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 520 mempool_destroy(conf->pool); 521 kfree(conf->multipaths);
··· 514 { 515 multipath_conf_t *conf = mddev->private; 516 517 + md_unregister_thread(&mddev->thread); 518 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 519 mempool_destroy(conf->pool); 520 kfree(conf->multipaths);
+1 -2
drivers/md/raid1.c
··· 2562 raise_barrier(conf); 2563 lower_barrier(conf); 2564 2565 - md_unregister_thread(mddev->thread); 2566 - mddev->thread = NULL; 2567 if (conf->r1bio_pool) 2568 mempool_destroy(conf->r1bio_pool); 2569 kfree(conf->mirrors);
··· 2562 raise_barrier(conf); 2563 lower_barrier(conf); 2564 2565 + md_unregister_thread(&mddev->thread); 2566 if (conf->r1bio_pool) 2567 mempool_destroy(conf->r1bio_pool); 2568 kfree(conf->mirrors);
+2 -3
drivers/md/raid10.c
··· 2955 return 0; 2956 2957 out_free_conf: 2958 - md_unregister_thread(mddev->thread); 2959 if (conf->r10bio_pool) 2960 mempool_destroy(conf->r10bio_pool); 2961 safe_put_page(conf->tmppage); ··· 2973 raise_barrier(conf, 0); 2974 lower_barrier(conf); 2975 2976 - md_unregister_thread(mddev->thread); 2977 - mddev->thread = NULL; 2978 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2979 if (conf->r10bio_pool) 2980 mempool_destroy(conf->r10bio_pool);
··· 2955 return 0; 2956 2957 out_free_conf: 2958 + md_unregister_thread(&mddev->thread); 2959 if (conf->r10bio_pool) 2960 mempool_destroy(conf->r10bio_pool); 2961 safe_put_page(conf->tmppage); ··· 2973 raise_barrier(conf, 0); 2974 lower_barrier(conf); 2975 2976 + md_unregister_thread(&mddev->thread); 2977 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2978 if (conf->r10bio_pool) 2979 mempool_destroy(conf->r10bio_pool);
+2 -4
drivers/md/raid5.c
··· 4941 4942 return 0; 4943 abort: 4944 - md_unregister_thread(mddev->thread); 4945 - mddev->thread = NULL; 4946 if (conf) { 4947 print_raid5_conf(conf); 4948 free_conf(conf); ··· 4955 { 4956 raid5_conf_t *conf = mddev->private; 4957 4958 - md_unregister_thread(mddev->thread); 4959 - mddev->thread = NULL; 4960 if (mddev->queue) 4961 mddev->queue->backing_dev_info.congested_fn = NULL; 4962 free_conf(conf);
··· 4941 4942 return 0; 4943 abort: 4944 + md_unregister_thread(&mddev->thread); 4945 if (conf) { 4946 print_raid5_conf(conf); 4947 free_conf(conf); ··· 4956 { 4957 raid5_conf_t *conf = mddev->private; 4958 4959 + md_unregister_thread(&mddev->thread); 4960 if (mddev->queue) 4961 mddev->queue->backing_dev_info.congested_fn = NULL; 4962 free_conf(conf);