Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
dm table: pass correct dev area size to device_area_is_valid
dm: remove queue next_ordered workaround for barriers
dm raid1: wake kmirrord when requeueing delayed bios after remote recovery

+18 -32
+1 -1
drivers/md/dm-crypt.c
··· 1318 1318 { 1319 1319 struct crypt_config *cc = ti->private; 1320 1320 1321 - return fn(ti, cc->dev, cc->start, data); 1321 + return fn(ti, cc->dev, cc->start, ti->len, data); 1322 1322 } 1323 1323 1324 1324 static struct target_type crypt_target = {
+2 -2
drivers/md/dm-delay.c
··· 324 324 struct delay_c *dc = ti->private; 325 325 int ret = 0; 326 326 327 - ret = fn(ti, dc->dev_read, dc->start_read, data); 327 + ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data); 328 328 if (ret) 329 329 goto out; 330 330 331 331 if (dc->dev_write) 332 - ret = fn(ti, dc->dev_write, dc->start_write, data); 332 + ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data); 333 333 334 334 out: 335 335 return ret;
+1 -1
drivers/md/dm-linear.c
··· 139 139 { 140 140 struct linear_c *lc = ti->private; 141 141 142 - return fn(ti, lc->dev, lc->start, data); 142 + return fn(ti, lc->dev, lc->start, ti->len, data); 143 143 } 144 144 145 145 static struct target_type linear_target = {
+1 -1
drivers/md/dm-mpath.c
··· 1453 1453 1454 1454 list_for_each_entry(pg, &m->priority_groups, list) { 1455 1455 list_for_each_entry(p, &pg->pgpaths, list) { 1456 - ret = fn(ti, p->path.dev, ti->begin, data); 1456 + ret = fn(ti, p->path.dev, ti->begin, ti->len, data); 1457 1457 if (ret) 1458 1458 goto out; 1459 1459 }
+2 -1
drivers/md/dm-raid1.c
··· 638 638 spin_lock_irq(&ms->lock); 639 639 bio_list_merge(&ms->writes, &requeue); 640 640 spin_unlock_irq(&ms->lock); 641 + delayed_wake(ms); 641 642 } 642 643 643 644 /* ··· 1293 1292 1294 1293 for (i = 0; !ret && i < ms->nr_mirrors; i++) 1295 1294 ret = fn(ti, ms->mirror[i].dev, 1296 - ms->mirror[i].offset, data); 1295 + ms->mirror[i].offset, ti->len, data); 1297 1296 1298 1297 return ret; 1299 1298 }
+4 -3
drivers/md/dm-stripe.c
··· 320 320 int ret = 0; 321 321 unsigned i = 0; 322 322 323 - do 323 + do { 324 324 ret = fn(ti, sc->stripe[i].dev, 325 - sc->stripe[i].physical_start, data); 326 - while (!ret && ++i < sc->stripes); 325 + sc->stripe[i].physical_start, 326 + sc->stripe_width, data); 327 + } while (!ret && ++i < sc->stripes); 327 328 328 329 return ret; 329 330 }
+5 -10
drivers/md/dm-table.c
··· 346 346 * If possible, this checks an area of a destination device is valid. 347 347 */ 348 348 static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, 349 - sector_t start, void *data) 349 + sector_t start, sector_t len, void *data) 350 350 { 351 351 struct queue_limits *limits = data; 352 352 struct block_device *bdev = dev->bdev; ··· 359 359 if (!dev_size) 360 360 return 1; 361 361 362 - if ((start >= dev_size) || (start + ti->len > dev_size)) { 362 + if ((start >= dev_size) || (start + len > dev_size)) { 363 363 DMWARN("%s: %s too small for target", 364 364 dm_device_name(ti->table->md), bdevname(bdev, b)); 365 365 return 0; ··· 377 377 return 0; 378 378 } 379 379 380 - if (ti->len & (logical_block_size_sectors - 1)) { 380 + if (len & (logical_block_size_sectors - 1)) { 381 381 DMWARN("%s: len=%llu not aligned to h/w " 382 382 "logical block size %hu of %s", 383 383 dm_device_name(ti->table->md), 384 - (unsigned long long)ti->len, 384 + (unsigned long long)len, 385 385 limits->logical_block_size, bdevname(bdev, b)); 386 386 return 0; 387 387 } ··· 482 482 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 483 483 484 484 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 485 - sector_t start, void *data) 485 + sector_t start, sector_t len, void *data) 486 486 { 487 487 struct queue_limits *limits = data; 488 488 struct block_device *bdev = dev->bdev; ··· 828 828 unsigned dm_table_get_type(struct dm_table *t) 829 829 { 830 830 return t->type; 831 - } 832 - 833 - bool dm_table_bio_based(struct dm_table *t) 834 - { 835 - return dm_table_get_type(t) == DM_TYPE_BIO_BASED; 836 831 } 837 832 838 833 bool dm_table_request_based(struct dm_table *t)
-10
drivers/md/dm.c
··· 2203 2203 goto out; 2204 2204 } 2205 2205 2206 - /* 2207 - * It is enought that blk_queue_ordered() is called only once when 2208 - * the first bio-based table is bound. 2209 - * 2210 - * This setting should be moved to alloc_dev() when request-based dm 2211 - * supports barrier. 2212 - */ 2213 - if (!md->map && dm_table_bio_based(table)) 2214 - blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL); 2215 - 2216 2206 __unbind(md); 2217 2207 r = __bind(md, table, &limits); 2218 2208
-1
drivers/md/dm.h
··· 61 61 int dm_table_any_busy_target(struct dm_table *t); 62 62 int dm_table_set_type(struct dm_table *t); 63 63 unsigned dm_table_get_type(struct dm_table *t); 64 - bool dm_table_bio_based(struct dm_table *t); 65 64 bool dm_table_request_based(struct dm_table *t); 66 65 int dm_table_alloc_md_mempools(struct dm_table *t); 67 66 void dm_table_free_md_mempools(struct dm_table *t);
+2 -2
include/linux/device-mapper.h
··· 84 84 85 85 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 86 86 struct dm_dev *dev, 87 - sector_t physical_start, 87 + sector_t start, sector_t len, 88 88 void *data); 89 89 90 90 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, ··· 104 104 * Combine device limits. 105 105 */ 106 106 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 107 - sector_t start, void *data); 107 + sector_t start, sector_t len, void *data); 108 108 109 109 struct dm_dev { 110 110 struct block_device *bdev;