Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
dm raid1: fix deadlock when suspending failed device
dm: eliminate some holes data structures
dm ioctl: introduce flag indicating uevent was generated
dm: free dm_io before bio_endio not after
dm table: remove unused dm_get_device range parameters
dm ioctl: only issue uevent on resume if state changed
dm raid1: always return error if all legs fail
dm mpath: refactor pg_init
dm mpath: wait for pg_init completion when suspending
dm mpath: hold io until all pg_inits completed
dm mpath: avoid storing private suspended state
dm: document when snapshot has finished merging
dm table: remove dm_get from dm_table_get_md
dm mpath: skip activate_path for failed paths
dm mpath: pass struct pgpath to pg init done

+212 -140
+44
Documentation/device-mapper/snapshot.txt
··· 122 122 brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real 123 123 brw------- 1 root root 254, 12 29 ago 18:16 /dev/mapper/volumeGroup-base-cow 124 124 brw------- 1 root root 254, 10 29 ago 18:16 /dev/mapper/volumeGroup-base 125 + 126 + 127 + How to determine when a merging is complete 128 + =========================================== 129 + The snapshot-merge and snapshot status lines end with: 130 + <sectors_allocated>/<total_sectors> <metadata_sectors> 131 + 132 + Both <sectors_allocated> and <total_sectors> include both data and metadata. 133 + During merging, the number of sectors allocated gets smaller and 134 + smaller. Merging has finished when the number of sectors holding data 135 + is zero, in other words <sectors_allocated> == <metadata_sectors>. 136 + 137 + Here is a practical example (using a hybrid of lvm and dmsetup commands): 138 + 139 + # lvs 140 + LV VG Attr LSize Origin Snap% Move Log Copy% Convert 141 + base volumeGroup owi-a- 4.00g 142 + snap volumeGroup swi-a- 1.00g base 18.97 143 + 144 + # dmsetup status volumeGroup-snap 145 + 0 8388608 snapshot 397896/2097152 1560 146 + ^^^^ metadata sectors 147 + 148 + # lvconvert --merge -b volumeGroup/snap 149 + Merging of volume snap started. 150 + 151 + # lvs volumeGroup/snap 152 + LV VG Attr LSize Origin Snap% Move Log Copy% Convert 153 + base volumeGroup Owi-a- 4.00g 17.23 154 + 155 + # dmsetup status volumeGroup-base 156 + 0 8388608 snapshot-merge 281688/2097152 1104 157 + 158 + # dmsetup status volumeGroup-base 159 + 0 8388608 snapshot-merge 180480/2097152 712 160 + 161 + # dmsetup status volumeGroup-base 162 + 0 8388608 snapshot-merge 16/2097152 16 163 + 164 + Merging has finished. 165 + 166 + # lvs 167 + LV VG Attr LSize Origin Snap% Move Log Copy% Convert 168 + base volumeGroup owi-a- 4.00g
+1 -2
drivers/md/dm-crypt.c
··· 1160 1160 } 1161 1161 cc->start = tmpll; 1162 1162 1163 - if (dm_get_device(ti, argv[3], cc->start, ti->len, 1164 - dm_table_get_mode(ti->table), &cc->dev)) { 1163 + if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { 1165 1164 ti->error = "Device lookup failed"; 1166 1165 goto bad_device; 1167 1166 }
+4 -4
drivers/md/dm-delay.c
··· 156 156 goto bad; 157 157 } 158 158 159 - if (dm_get_device(ti, argv[0], dc->start_read, ti->len, 160 - dm_table_get_mode(ti->table), &dc->dev_read)) { 159 + if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), 160 + &dc->dev_read)) { 161 161 ti->error = "Device lookup failed"; 162 162 goto bad; 163 163 } ··· 177 177 goto bad_dev_read; 178 178 } 179 179 180 - if (dm_get_device(ti, argv[3], dc->start_write, ti->len, 181 - dm_table_get_mode(ti->table), &dc->dev_write)) { 180 + if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), 181 + &dc->dev_write)) { 182 182 ti->error = "Write device lookup failed"; 183 183 goto bad_dev_read; 184 184 }
+15 -9
drivers/md/dm-ioctl.c
··· 285 285 up_write(&_hash_lock); 286 286 } 287 287 288 - static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) 288 + static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old, 289 + const char *new) 289 290 { 290 291 char *new_name, *old_name; 291 292 struct hash_cell *hc; ··· 345 344 dm_table_put(table); 346 345 } 347 346 348 - dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie); 347 + if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie)) 348 + *flags |= DM_UEVENT_GENERATED_FLAG; 349 349 350 350 dm_put(hc->md); 351 351 up_write(&_hash_lock); ··· 738 736 __hash_remove(hc); 739 737 up_write(&_hash_lock); 740 738 741 - dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr); 739 + if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) 740 + param->flags |= DM_UEVENT_GENERATED_FLAG; 742 741 743 742 dm_put(md); 744 - param->data_size = 0; 745 743 return 0; 746 744 } 747 745 ··· 775 773 return r; 776 774 777 775 param->data_size = 0; 778 - return dm_hash_rename(param->event_nr, param->name, new_name); 776 + 777 + return dm_hash_rename(param->event_nr, &param->flags, param->name, 778 + new_name); 779 779 } 780 780 781 781 static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) ··· 901 897 set_disk_ro(dm_disk(md), 1); 902 898 } 903 899 904 - if (dm_suspended_md(md)) 900 + if (dm_suspended_md(md)) { 905 901 r = dm_resume(md); 902 + if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) 903 + param->flags |= DM_UEVENT_GENERATED_FLAG; 904 + } 906 905 907 906 if (old_map) 908 907 dm_table_destroy(old_map); 909 908 910 - if (!r) { 911 - dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr); 909 + if (!r) 912 910 r = __dev_status(md, param); 913 - } 914 911 915 912 dm_put(md); 916 913 return r; ··· 1481 1476 { 1482 1477 /* Always clear this flag */ 1483 1478 param->flags &= ~DM_BUFFER_FULL_FLAG; 1479 + param->flags &= ~DM_UEVENT_GENERATED_FLAG; 1484 1480 1485 1481 /* Ignores parameters */ 1486 1482 if (cmd == DM_REMOVE_ALL_CMD ||
+1 -2
drivers/md/dm-linear.c
··· 47 47 } 48 48 lc->start = tmp; 49 49 50 - if (dm_get_device(ti, argv[0], lc->start, ti->len, 51 - dm_table_get_mode(ti->table), &lc->dev)) { 50 + if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev)) { 52 51 ti->error = "dm-linear: Device lookup failed"; 53 52 goto bad; 54 53 }
+1 -2
drivers/md/dm-log.c
··· 543 543 return -EINVAL; 544 544 } 545 545 546 - r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */, 547 - FMODE_READ | FMODE_WRITE, &dev); 546 + r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &dev); 548 547 if (r) 549 548 return r; 550 549
+72 -39
drivers/md/dm-mpath.c
··· 69 69 struct list_head priority_groups; 70 70 unsigned pg_init_required; /* pg_init needs calling? */ 71 71 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ 72 + wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ 72 73 73 74 unsigned nr_valid_paths; /* Total number of usable paths */ 74 75 struct pgpath *current_pgpath; ··· 96 95 mempool_t *mpio_pool; 97 96 98 97 struct mutex work_mutex; 99 - 100 - unsigned suspended; /* Don't create new I/O internally when set. */ 101 98 }; 102 99 103 100 /* ··· 201 202 m->queue_io = 1; 202 203 INIT_WORK(&m->process_queued_ios, process_queued_ios); 203 204 INIT_WORK(&m->trigger_event, trigger_event); 205 + init_waitqueue_head(&m->pg_init_wait); 204 206 mutex_init(&m->work_mutex); 205 207 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 206 208 if (!m->mpio_pool) { ··· 234 234 /*----------------------------------------------- 235 235 * Path selection 236 236 *-----------------------------------------------*/ 237 + 238 + static void __pg_init_all_paths(struct multipath *m) 239 + { 240 + struct pgpath *pgpath; 241 + 242 + m->pg_init_count++; 243 + m->pg_init_required = 0; 244 + list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { 245 + /* Skip failed paths */ 246 + if (!pgpath->is_active) 247 + continue; 248 + if (queue_work(kmpath_handlerd, &pgpath->activate_path)) 249 + m->pg_init_in_progress++; 250 + } 251 + } 237 252 238 253 static void __switch_pg(struct multipath *m, struct pgpath *pgpath) 239 254 { ··· 454 439 { 455 440 struct multipath *m = 456 441 container_of(work, struct multipath, process_queued_ios); 457 - struct pgpath *pgpath = NULL, *tmp; 442 + struct pgpath *pgpath = NULL; 458 443 unsigned must_queue = 1; 459 444 unsigned long flags; 460 445 ··· 472 457 (!pgpath && !m->queue_if_no_path)) 473 458 must_queue = 0; 474 459 475 - if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { 476 - m->pg_init_count++; 477 - m->pg_init_required = 0; 478 - list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) { 479 - if (queue_work(kmpath_handlerd, &tmp->activate_path)) 480 - m->pg_init_in_progress++; 481 - } 482 - } 460 + if (m->pg_init_required && !m->pg_init_in_progress && pgpath) 461 + __pg_init_all_paths(m); 462 + 483 463 out: 484 464 spin_unlock_irqrestore(&m->lock, flags); 485 465 if (!must_queue) ··· 607 597 if (!p) 608 598 return ERR_PTR(-ENOMEM); 609 599 610 - r = dm_get_device(ti, shift(as), ti->begin, ti->len, 611 - dm_table_get_mode(ti->table), &p->path.dev); 600 + r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), 601 + &p->path.dev); 612 602 if (r) { 613 603 ti->error = "error getting device"; 614 604 goto bad; ··· 900 890 return r; 901 891 } 902 892 903 - static void flush_multipath_work(void) 893 + static void multipath_wait_for_pg_init_completion(struct multipath *m) 894 + { 895 + DECLARE_WAITQUEUE(wait, current); 896 + unsigned long flags; 897 + 898 + add_wait_queue(&m->pg_init_wait, &wait); 899 + 900 + while (1) { 901 + set_current_state(TASK_UNINTERRUPTIBLE); 902 + 903 + spin_lock_irqsave(&m->lock, flags); 904 + if (!m->pg_init_in_progress) { 905 + spin_unlock_irqrestore(&m->lock, flags); 906 + break; 907 + } 908 + spin_unlock_irqrestore(&m->lock, flags); 909 + 910 + io_schedule(); 911 + } 912 + set_current_state(TASK_RUNNING); 913 + 914 + remove_wait_queue(&m->pg_init_wait, &wait); 915 + } 916 + 917 + static void flush_multipath_work(struct multipath *m) 904 918 { 905 919 flush_workqueue(kmpath_handlerd); 920 + multipath_wait_for_pg_init_completion(m); 906 921 flush_workqueue(kmultipathd); 907 922 flush_scheduled_work(); 908 923 } ··· 936 901 { 937 902 struct multipath *m = ti->private; 938 903 939 - flush_multipath_work(); 904 + flush_multipath_work(m); 940 905 free_multipath(m); 941 906 } 942 907 ··· 1163 1128 1164 1129 static void pg_init_done(void *data, int errors) 1165 1130 { 1166 - struct dm_path *path = data; 1167 - struct pgpath *pgpath = path_to_pgpath(path); 1131 + struct pgpath *pgpath = data; 1168 1132 struct priority_group *pg = pgpath->pg; 1169 1133 struct multipath *m = pg->m; 1170 1134 unsigned long flags; ··· 1177 1143 errors = 0; 1178 1144 break; 1179 1145 } 1180 - DMERR("Cannot failover device because scsi_dh_%s was not " 1181 - "loaded.", m->hw_handler_name); 1146 + DMERR("Could not failover the device: Handler scsi_dh_%s " 1147 + "Error %d.", m->hw_handler_name, errors); 1182 1148 /* 1183 1149 * Fail path for now, so we do not ping pong 1184 1150 */ ··· 1215 1181 m->current_pgpath = NULL; 1216 1182 m->current_pg = NULL; 1217 1183 } 1218 - } else if (!m->pg_init_required) { 1219 - m->queue_io = 0; 1184 + } else if (!m->pg_init_required) 1220 1185 pg->bypassed = 0; 1221 - } 1222 1186 1223 - m->pg_init_in_progress--; 1224 - if (!m->pg_init_in_progress) 1225 - queue_work(kmultipathd, &m->process_queued_ios); 1187 + if (--m->pg_init_in_progress) 1188 + /* Activations of other paths are still on going */ 1189 + goto out; 1190 + 1191 + if (!m->pg_init_required) 1192 + m->queue_io = 0; 1193 + 1194 + queue_work(kmultipathd, &m->process_queued_ios); 1195 + 1196 + /* 1197 + * Wake up any thread waiting to suspend. 1198 + */ 1199 + wake_up(&m->pg_init_wait); 1200 + 1201 + out: 1226 1202 spin_unlock_irqrestore(&m->lock, flags); 1227 1203 } 1228 1204 ··· 1242 1198 container_of(work, struct pgpath, activate_path); 1243 1199 1244 1200 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), 1245 - pg_init_done, &pgpath->path); 1201 + pg_init_done, pgpath); 1246 1202 } 1247 1203 1248 1204 /* ··· 1320 1276 struct multipath *m = ti->private; 1321 1277 1322 1278 mutex_lock(&m->work_mutex); 1323 - m->suspended = 1; 1324 - flush_multipath_work(); 1279 + flush_multipath_work(m); 1325 1280 mutex_unlock(&m->work_mutex); 1326 1281 } 1327 1282 ··· 1331 1288 { 1332 1289 struct multipath *m = (struct multipath *) ti->private; 1333 1290 unsigned long flags; 1334 - 1335 - mutex_lock(&m->work_mutex); 1336 - m->suspended = 0; 1337 - mutex_unlock(&m->work_mutex); 1338 1291 1339 1292 spin_lock_irqsave(&m->lock, flags); 1340 1293 m->queue_if_no_path = m->saved_queue_if_no_path; ··· 1467 1428 1468 1429 mutex_lock(&m->work_mutex); 1469 1430 1470 - if (m->suspended) { 1471 - r = -EBUSY; 1472 - goto out; 1473 - } 1474 - 1475 1431 if (dm_suspended(ti)) { 1476 1432 r = -EBUSY; 1477 1433 goto out; ··· 1505 1471 goto out; 1506 1472 } 1507 1473 1508 - r = dm_get_device(ti, argv[1], ti->begin, ti->len, 1509 - dm_table_get_mode(ti->table), &dev); 1474 + r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); 1510 1475 if (r) { 1511 1476 DMWARN("message: error getting device %s", 1512 1477 argv[1]);
+30 -23
drivers/md/dm-raid1.c
··· 465 465 static void hold_bio(struct mirror_set *ms, struct bio *bio) 466 466 { 467 467 /* 468 - * If device is suspended, complete the bio. 468 + * Lock is required to avoid race condition during suspend 469 + * process. 469 470 */ 471 + spin_lock_irq(&ms->lock); 472 + 470 473 if (atomic_read(&ms->suspend)) { 474 + spin_unlock_irq(&ms->lock); 475 + 476 + /* 477 + * If device is suspended, complete the bio. 478 + */ 471 479 if (dm_noflush_suspending(ms->ti)) 472 480 bio_endio(bio, DM_ENDIO_REQUEUE); 473 481 else ··· 486 478 /* 487 479 * Hold bio until the suspend is complete. 488 480 */ 489 - spin_lock_irq(&ms->lock); 490 481 bio_list_add(&ms->holds, bio); 491 482 spin_unlock_irq(&ms->lock); 492 483 } ··· 744 737 dm_rh_delay(ms->rh, bio); 745 738 746 739 while ((bio = bio_list_pop(&nosync))) { 747 - if (unlikely(ms->leg_failure) && errors_handled(ms)) 748 - hold_bio(ms, bio); 749 - else { 740 + if (unlikely(ms->leg_failure) && errors_handled(ms)) { 741 + spin_lock_irq(&ms->lock); 742 + bio_list_add(&ms->failures, bio); 743 + spin_unlock_irq(&ms->lock); 744 + wakeup_mirrord(ms); 745 + } else { 750 746 map_bio(get_default_mirror(ms), bio); 751 747 generic_make_request(bio); 752 748 } ··· 927 917 return -EINVAL; 928 918 } 929 919 930 - if (dm_get_device(ti, argv[0], offset, ti->len, 931 - dm_table_get_mode(ti->table), 920 + if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), 932 921 &ms->mirror[mirror].dev)) { 933 922 ti->error = "Device lookup failure"; 934 923 return -ENXIO; ··· 1268 1259 atomic_set(&ms->suspend, 1); 1269 1260 1270 1261 /* 1262 + * Process bios in the hold list to start recovery waiting 1263 + * for bios in the hold list. After the process, no bio has 1264 + * a chance to be added in the hold list because ms->suspend 1265 + * is set. 1266 + */ 1267 + spin_lock_irq(&ms->lock); 1268 + holds = ms->holds; 1269 + bio_list_init(&ms->holds); 1270 + spin_unlock_irq(&ms->lock); 1271 + 1272 + while ((bio = bio_list_pop(&holds))) 1273 + hold_bio(ms, bio); 1274 + 1275 + /* 1271 1276 * We must finish up all the work that we've 1272 1277 * generated (i.e. recovery work). 1273 1278 */ ··· 1301 1278 * we know that all of our I/O has been pushed. 1302 1279 */ 1303 1280 flush_workqueue(ms->kmirrord_wq); 1304 - 1305 - /* 1306 - * Now set ms->suspend is set and the workqueue flushed, no more 1307 - * entries can be added to ms->hold list, so process it. 1308 - * 1309 - * Bios can still arrive concurrently with or after this 1310 - * presuspend function, but they cannot join the hold list 1311 - * because ms->suspend is set. 1312 - */ 1313 - spin_lock_irq(&ms->lock); 1314 - holds = ms->holds; 1315 - bio_list_init(&ms->holds); 1316 - spin_unlock_irq(&ms->lock); 1317 - 1318 - while ((bio = bio_list_pop(&holds))) 1319 - hold_bio(ms, bio); 1320 1281 } 1321 1282 1322 1283 static void mirror_postsuspend(struct dm_target *ti)
+16 -18
drivers/md/dm-snap.c
··· 83 83 /* Whether or not owning mapped_device is suspended */ 84 84 int suspended; 85 85 86 - mempool_t *pending_pool; 87 - 88 86 atomic_t pending_exceptions_count; 87 + 88 + mempool_t *pending_pool; 89 89 90 90 struct dm_exception_table pending; 91 91 struct dm_exception_table complete; ··· 96 96 */ 97 97 spinlock_t pe_lock; 98 98 99 + /* Chunks with outstanding reads */ 100 + spinlock_t tracked_chunk_lock; 101 + mempool_t *tracked_chunk_pool; 102 + struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 103 + 99 104 /* The on disk metadata handler */ 100 105 struct dm_exception_store *store; 101 106 ··· 110 105 struct bio_list queued_bios; 111 106 struct work_struct queued_bios_work; 112 107 113 - /* Chunks with outstanding reads */ 114 - mempool_t *tracked_chunk_pool; 115 - spinlock_t tracked_chunk_lock; 116 - struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 108 + /* Wait for events based on state_bits */ 109 + unsigned long state_bits; 110 + 111 + /* Range of chunks currently being merged. */ 112 + chunk_t first_merging_chunk; 113 + int num_merging_chunks; 117 114 118 115 /* 119 116 * The merge operation failed if this flag is set. ··· 131 124 * => stop merging; set merge_failed; process I/O normally. 132 125 */ 133 126 int merge_failed; 134 - 135 - /* Wait for events based on state_bits */ 136 - unsigned long state_bits; 137 - 138 - /* Range of chunks currently being merged. */ 139 - chunk_t first_merging_chunk; 140 - int num_merging_chunks; 141 127 142 128 /* 143 129 * Incoming bios that overlap with chunks being merged must wait ··· 1081 1081 argv++; 1082 1082 argc--; 1083 1083 1084 - r = dm_get_device(ti, cow_path, 0, 0, 1085 - FMODE_READ | FMODE_WRITE, &s->cow); 1084 + r = dm_get_device(ti, cow_path, FMODE_READ | FMODE_WRITE, &s->cow); 1086 1085 if (r) { 1087 1086 ti->error = "Cannot get COW device"; 1088 1087 goto bad_cow; ··· 1097 1098 argv += args_used; 1098 1099 argc -= args_used; 1099 1100 1100 - r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin); 1101 + r = dm_get_device(ti, origin_path, origin_mode, &s->origin); 1101 1102 if (r) { 1102 1103 ti->error = "Cannot get origin device"; 1103 1104 goto bad_origin; ··· 2099 2100 return -EINVAL; 2100 2101 } 2101 2102 2102 - r = dm_get_device(ti, argv[0], 0, ti->len, 2103 - dm_table_get_mode(ti->table), &dev); 2103 + r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev); 2104 2104 if (r) { 2105 2105 ti->error = "Cannot get target device"; 2106 2106 return r;
+1 -2
drivers/md/dm-stripe.c
··· 80 80 if (sscanf(argv[1], "%llu", &start) != 1) 81 81 return -EINVAL; 82 82 83 - if (dm_get_device(ti, argv[0], start, sc->stripe_width, 84 - dm_table_get_mode(ti->table), 83 + if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), 85 84 &sc->stripe[stripe].dev)) 86 85 return -ENXIO; 87 86
+4 -8
drivers/md/dm-table.c
··· 429 429 * it's already present. 430 430 */ 431 431 static int __table_get_device(struct dm_table *t, struct dm_target *ti, 432 - const char *path, sector_t start, sector_t len, 433 - fmode_t mode, struct dm_dev **result) 432 + const char *path, fmode_t mode, struct dm_dev **result) 434 433 { 435 434 int r; 436 435 dev_t uninitialized_var(dev); ··· 526 527 } 527 528 EXPORT_SYMBOL_GPL(dm_set_device_limits); 528 529 529 - int dm_get_device(struct dm_target *ti, const char *path, sector_t start, 530 - sector_t len, fmode_t mode, struct dm_dev **result) 530 + int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 531 + struct dm_dev **result) 531 532 { 532 - return __table_get_device(ti->table, ti, path, 533 - start, len, mode, result); 533 + return __table_get_device(ti->table, ti, path, mode, result); 534 534 } 535 535 536 536 ··· 1229 1231 1230 1232 struct mapped_device *dm_table_get_md(struct dm_table *t) 1231 1233 { 1232 - dm_get(t->md); 1233 - 1234 1234 return t->md; 1235 1235 } 1236 1236
+2 -5
drivers/md/dm-uevent.c
··· 187 187 188 188 if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) { 189 189 DMERR("%s: Invalid event_type %d", __func__, event_type); 190 - goto out; 190 + return; 191 191 } 192 192 193 193 event = dm_build_path_uevent(md, ti, ··· 195 195 _dm_uevent_type_names[event_type].name, 196 196 path, nr_valid_paths); 197 197 if (IS_ERR(event)) 198 - goto out; 198 + return; 199 199 200 200 dm_uevent_add(md, &event->elist); 201 - 202 - out: 203 - dm_put(md); 204 201 } 205 202 EXPORT_SYMBOL_GPL(dm_path_uevent); 206 203
+8 -17
drivers/md/dm.c
··· 635 635 if (!md->barrier_error && io_error != -EOPNOTSUPP) 636 636 md->barrier_error = io_error; 637 637 end_io_acct(io); 638 + free_io(md, io); 638 639 } else { 639 640 end_io_acct(io); 641 + free_io(md, io); 640 642 641 643 if (io_error != DM_ENDIO_REQUEUE) { 642 644 trace_block_bio_complete(md->queue, bio); ··· 646 644 bio_endio(bio, io_error); 647 645 } 648 646 } 649 - 650 - free_io(md, io); 651 647 } 652 648 } 653 649 ··· 2618 2618 /*----------------------------------------------------------------- 2619 2619 * Event notification. 2620 2620 *---------------------------------------------------------------*/ 2621 - void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2621 + int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 2622 2622 unsigned cookie) 2623 2623 { 2624 2624 char udev_cookie[DM_COOKIE_LENGTH]; 2625 2625 char *envp[] = { udev_cookie, NULL }; 2626 2626 2627 2627 if (!cookie) 2628 - kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2628 + return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); 2629 2629 else { 2630 2630 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", 2631 2631 DM_COOKIE_ENV_VAR_NAME, cookie); 2632 - kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp); 2632 + return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, 2633 + action, envp); 2633 2634 } 2634 2635 } 2635 2636 ··· 2700 2699 2701 2700 int dm_suspended(struct dm_target *ti) 2702 2701 { 2703 - struct mapped_device *md = dm_table_get_md(ti->table); 2704 - int r = dm_suspended_md(md); 2705 - 2706 - dm_put(md); 2707 - 2708 - return r; 2702 + return dm_suspended_md(dm_table_get_md(ti->table)); 2709 2703 } 2710 2704 EXPORT_SYMBOL_GPL(dm_suspended); 2711 2705 2712 2706 int dm_noflush_suspending(struct dm_target *ti) 2713 2707 { 2714 - struct mapped_device *md = dm_table_get_md(ti->table); 2715 - int r = __noflush_suspending(md); 2716 - 2717 - dm_put(md); 2718 - 2719 - return r; 2708 + return __noflush_suspending(dm_table_get_md(ti->table)); 2720 2709 } 2721 2710 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2722 2711
+2 -2
drivers/md/dm.h
··· 125 125 int dm_open_count(struct mapped_device *md); 126 126 int dm_lock_for_deletion(struct mapped_device *md); 127 127 128 - void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 129 - unsigned cookie); 128 + int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 129 + unsigned cookie); 130 130 131 131 int dm_io_init(void); 132 132 void dm_io_exit(void);
+2 -3
include/linux/device-mapper.h
··· 118 118 /* 119 119 * Constructors should call these functions to ensure destination devices 120 120 * are opened/closed correctly. 121 - * FIXME: too many arguments. 122 121 */ 123 - int dm_get_device(struct dm_target *ti, const char *path, sector_t start, 124 - sector_t len, fmode_t mode, struct dm_dev **result); 122 + int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 123 + struct dm_dev **result); 125 124 void dm_put_device(struct dm_target *ti, struct dm_dev *d); 126 125 127 126 /*
+2 -2
include/linux/dm-io.h
··· 37 37 struct dm_io_memory { 38 38 enum dm_io_mem_type type; 39 39 40 + unsigned offset; 41 + 40 42 union { 41 43 struct page_list *pl; 42 44 struct bio_vec *bvec; 43 45 void *vma; 44 46 void *addr; 45 47 } ptr; 46 - 47 - unsigned offset; 48 48 }; 49 49 50 50 struct dm_io_notify {
+7 -2
include/linux/dm-ioctl.h
··· 266 266 #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 267 267 268 268 #define DM_VERSION_MAJOR 4 269 - #define DM_VERSION_MINOR 16 269 + #define DM_VERSION_MINOR 17 270 270 #define DM_VERSION_PATCHLEVEL 0 271 - #define DM_VERSION_EXTRA "-ioctl (2009-11-05)" 271 + #define DM_VERSION_EXTRA "-ioctl (2010-03-05)" 272 272 273 273 /* Status bits */ 274 274 #define DM_READONLY_FLAG (1 << 0) /* In/Out */ ··· 315 315 * is set before using the data returned. 316 316 */ 317 317 #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ 318 + 319 + /* 320 + * If set, a uevent was generated for which the caller may need to wait. 321 + */ 322 + #define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */ 318 323 319 324 #endif /* _LINUX_DM_IOCTL_H */