Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Remove blkdev warning triggered by using md

As setting and clearing queue flags now requires that we hold a spinlock
on the queue, and as blk_queue_stack_limits is called without that lock,
get the lock inside blk_queue_stack_limits.

For blk_queue_stack_limits to be able to find the right lock, each md
personality needs to set q->queue_lock to point to the appropriate lock.
Those personalities which didn't previously use a spin_lock, us
q->__queue_lock. So always initialise that lock when allocated.

With this in place, setting/clearing of the QUEUE_FLAG_PLUGGED bit will no
longer cause warnings as it will be clear that the proper lock is held.

Thanks to Dan Williams for review and fixing the silly bugs.

Signed-off-by: NeilBrown <neilb@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Alistair John Strachan <alistair@devzero.co.uk>
Cc: Nick Piggin <npiggin@suse.de>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Jacek Luczak <difrost.kernel@gmail.com>
Cc: Prakash Punnoor <prakash@punnoor.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Neil Brown and committed by
Linus Torvalds
e7e72bf6 4920916f

+19 -6
+2 -3
block/blk-core.c
··· 482 482 kobject_init(&q->kobj, &blk_queue_ktype); 483 483 484 484 mutex_init(&q->sysfs_lock); 485 + spin_lock_init(&q->__queue_lock); 485 486 486 487 return q; 487 488 } ··· 545 544 * if caller didn't supply a lock, they get per-queue locking with 546 545 * our embedded lock 547 546 */ 548 - if (!lock) { 549 - spin_lock_init(&q->__queue_lock); 547 + if (!lock) 550 548 lock = &q->__queue_lock; 551 - } 552 549 553 550 q->request_fn = rfn; 554 551 q->prep_rq_fn = NULL;
+7 -1
block/blk-settings.c
··· 286 286 t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); 287 287 t->max_segment_size = min(t->max_segment_size, b->max_segment_size); 288 288 t->hardsect_size = max(t->hardsect_size, b->hardsect_size); 289 - if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 289 + if (!t->queue_lock) 290 + WARN_ON_ONCE(1); 291 + else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 292 + unsigned long flags; 293 + spin_lock_irqsave(t->queue_lock, flags); 290 294 queue_flag_clear(QUEUE_FLAG_CLUSTER, t); 295 + spin_unlock_irqrestore(t->queue_lock, flags); 296 + } 291 297 } 292 298 EXPORT_SYMBOL(blk_queue_stack_limits); 293 299
+1
drivers/md/linear.c
··· 250 250 { 251 251 linear_conf_t *conf; 252 252 253 + mddev->queue->queue_lock = &mddev->queue->__queue_lock; 253 254 conf = linear_conf(mddev, mddev->raid_disks); 254 255 255 256 if (!conf)
+1
drivers/md/multipath.c
··· 417 417 * bookkeeping area. [whatever we allocate in multipath_run(), 418 418 * should be freed in multipath_stop()] 419 419 */ 420 + mddev->queue->queue_lock = &mddev->queue->__queue_lock; 420 421 421 422 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); 422 423 mddev->private = conf;
+1
drivers/md/raid0.c
··· 280 280 (mddev->chunk_size>>1)-1); 281 281 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); 282 282 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1); 283 + mddev->queue->queue_lock = &mddev->queue->__queue_lock; 283 284 284 285 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); 285 286 if (!conf)
+3 -1
drivers/md/raid1.c
··· 1935 1935 if (!conf->r1bio_pool) 1936 1936 goto out_no_mem; 1937 1937 1938 + spin_lock_init(&conf->device_lock); 1939 + mddev->queue->queue_lock = &conf->device_lock; 1940 + 1938 1941 rdev_for_each(rdev, tmp, mddev) { 1939 1942 disk_idx = rdev->raid_disk; 1940 1943 if (disk_idx >= mddev->raid_disks ··· 1961 1958 } 1962 1959 conf->raid_disks = mddev->raid_disks; 1963 1960 conf->mddev = mddev; 1964 - spin_lock_init(&conf->device_lock); 1965 1961 INIT_LIST_HEAD(&conf->retry_list); 1966 1962 1967 1963 spin_lock_init(&conf->resync_lock);
+3 -1
drivers/md/raid10.c
··· 2082 2082 goto out_free_conf; 2083 2083 } 2084 2084 2085 + spin_lock_init(&conf->device_lock); 2086 + mddev->queue->queue_lock = &conf->device_lock; 2087 + 2085 2088 rdev_for_each(rdev, tmp, mddev) { 2086 2089 disk_idx = rdev->raid_disk; 2087 2090 if (disk_idx >= mddev->raid_disks ··· 2106 2103 2107 2104 disk->head_position = 0; 2108 2105 } 2109 - spin_lock_init(&conf->device_lock); 2110 2106 INIT_LIST_HEAD(&conf->retry_list); 2111 2107 2112 2108 spin_lock_init(&conf->resync_lock);
+1
drivers/md/raid5.c
··· 4257 4257 goto abort; 4258 4258 } 4259 4259 spin_lock_init(&conf->device_lock); 4260 + mddev->queue->queue_lock = &conf->device_lock; 4260 4261 init_waitqueue_head(&conf->wait_for_stripe); 4261 4262 init_waitqueue_head(&conf->wait_for_overlap); 4262 4263 INIT_LIST_HEAD(&conf->handle_list);