Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm: allow remove to be deferred

This patch allows the removal of an open device to be deferred until
it is closed. (Previously such a removal attempt would fail.)

The deferred remove functionality is enabled by setting the flag
DM_DEFERRED_REMOVE in the ioctl structure on DM_DEV_REMOVE or
DM_REMOVE_ALL ioctl.

On return from DM_DEV_REMOVE, the flag DM_DEFERRED_REMOVE indicates if
the device was removed immediately or flagged to be removed on close -
if the flag is clear, the device was removed.

On return from DM_DEV_STATUS and other ioctls, the flag
DM_DEFERRED_REMOVE is set if the device is scheduled to be removed on
closure.

A device that is scheduled to be deleted can be revived using the
message "@cancel_deferred_remove". This message clears the
DMF_DEFERRED_REMOVE flag so that the device won't be deleted on close.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Mikulas Patocka and committed by
Mike Snitzer
2c140a24 7833b08e

+99 -12
+30 -6
drivers/md/dm-ioctl.c
··· 57 57 static struct list_head _name_buckets[NUM_BUCKETS]; 58 58 static struct list_head _uuid_buckets[NUM_BUCKETS]; 59 59 60 - static void dm_hash_remove_all(int keep_open_devices); 60 + static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred); 61 61 62 62 /* 63 63 * Guards access to both hash tables. ··· 86 86 87 87 static void dm_hash_exit(void) 88 88 { 89 - dm_hash_remove_all(0); 89 + dm_hash_remove_all(false, false, false); 90 90 } 91 91 92 92 /*----------------------------------------------------------------- ··· 276 276 return table; 277 277 } 278 278 279 - static void dm_hash_remove_all(int keep_open_devices) 279 + static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred) 280 280 { 281 281 int i, dev_skipped; 282 282 struct hash_cell *hc; ··· 293 293 md = hc->md; 294 294 dm_get(md); 295 295 296 - if (keep_open_devices && dm_lock_for_deletion(md)) { 296 + if (keep_open_devices && 297 + dm_lock_for_deletion(md, mark_deferred, only_deferred)) { 297 298 dm_put(md); 298 299 dev_skipped++; 299 300 continue; ··· 451 450 return md; 452 451 } 453 452 453 + void dm_deferred_remove(void) 454 + { 455 + dm_hash_remove_all(true, false, true); 456 + } 457 + 454 458 /*----------------------------------------------------------------- 455 459 * Implementation of the ioctl commands 456 460 *---------------------------------------------------------------*/ ··· 467 461 468 462 static int remove_all(struct dm_ioctl *param, size_t param_size) 469 463 { 470 - dm_hash_remove_all(1); 464 + dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false); 471 465 param->data_size = 0; 472 466 return 0; 473 467 } ··· 689 683 if (dm_suspended_md(md)) 690 684 param->flags |= DM_SUSPEND_FLAG; 691 685 686 + if (dm_test_deferred_remove_flag(md)) 687 + param->flags |= DM_DEFERRED_REMOVE; 688 + 692 689 param->dev = huge_encode_dev(disk_devt(disk)); 693 690 694 691 /* ··· 841 832 /* 842 833 * Ensure the device is not open and nothing further can open it. 843 834 */ 844 - r = dm_lock_for_deletion(md); 835 + r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false); 845 836 if (r) { 837 + if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) { 838 + up_write(&_hash_lock); 839 + dm_put(md); 840 + return 0; 841 + } 846 842 DMDEBUG_LIMIT("unable to remove open device %s", hc->name); 847 843 up_write(&_hash_lock); 848 844 dm_put(md); ··· 861 847 dm_sync_table(md); 862 848 dm_table_destroy(t); 863 849 } 850 + 851 + param->flags &= ~DM_DEFERRED_REMOVE; 864 852 865 853 if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) 866 854 param->flags |= DM_UEVENT_GENERATED_FLAG; ··· 1484 1468 1485 1469 if (**argv != '@') 1486 1470 return 2; /* no '@' prefix, deliver to target */ 1471 + 1472 + if (!strcasecmp(argv[0], "@cancel_deferred_remove")) { 1473 + if (argc != 1) { 1474 + DMERR("Invalid arguments for @cancel_deferred_remove"); 1475 + return -EINVAL; 1476 + } 1477 + return dm_cancel_deferred_remove(md); 1478 + } 1487 1479 1488 1480 r = dm_stats_message(md, argc, argv, result, maxlen); 1489 1481 if (r < 2)
+44 -3
drivers/md/dm.c
··· 49 49 static DEFINE_IDR(_minor_idr); 50 50 51 51 static DEFINE_SPINLOCK(_minor_lock); 52 + 53 + static void do_deferred_remove(struct work_struct *w); 54 + 55 + static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 56 + 52 57 /* 53 58 * For bio-based dm. 54 59 * One of these is allocated per bio. ··· 121 116 #define DMF_DELETING 4 122 117 #define DMF_NOFLUSH_SUSPENDING 5 123 118 #define DMF_MERGE_IS_OPTIONAL 6 119 + #define DMF_DEFERRED_REMOVE 7 124 120 125 121 /* 126 122 * A dummy definition to make RCU happy. ··· 305 299 306 300 static void local_exit(void) 307 301 { 302 + flush_scheduled_work(); 303 + 308 304 kmem_cache_destroy(_rq_tio_cache); 309 305 kmem_cache_destroy(_io_cache); 310 306 unregister_blkdev(_major, _name); ··· 412 404 413 405 spin_lock(&_minor_lock); 414 406 415 - atomic_dec(&md->open_count); 407 + if (atomic_dec_and_test(&md->open_count) && 408 + (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 409 + schedule_work(&deferred_remove_work); 410 + 416 411 dm_put(md); 417 412 418 413 spin_unlock(&_minor_lock); ··· 429 418 /* 430 419 * Guarantees nothing is using the device before it's deleted. 431 420 */ 432 - int dm_lock_for_deletion(struct mapped_device *md) 421 + int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) 433 422 { 434 423 int r = 0; 435 424 436 425 spin_lock(&_minor_lock); 437 426 438 - if (dm_open_count(md)) 427 + if (dm_open_count(md)) { 439 428 r = -EBUSY; 429 + if (mark_deferred) 430 + set_bit(DMF_DEFERRED_REMOVE, &md->flags); 431 + } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) 432 + r = -EEXIST; 440 433 else 441 434 set_bit(DMF_DELETING, &md->flags); 442 435 443 436 spin_unlock(&_minor_lock); 444 437 445 438 return r; 439 + } 440 + 441 + int dm_cancel_deferred_remove(struct mapped_device *md) 442 + { 443 + int r = 0; 444 + 445 + spin_lock(&_minor_lock); 446 + 447 + if (test_bit(DMF_DELETING, &md->flags)) 448 + r = -EBUSY; 449 + else 450 + clear_bit(DMF_DEFERRED_REMOVE, &md->flags); 451 + 452 + spin_unlock(&_minor_lock); 453 + 454 + return r; 455 + } 456 + 457 + static void do_deferred_remove(struct work_struct *w) 458 + { 459 + dm_deferred_remove(); 446 460 } 447 461 448 462 sector_t dm_get_size(struct mapped_device *md) ··· 2928 2892 int dm_suspended_md(struct mapped_device *md) 2929 2893 { 2930 2894 return test_bit(DMF_SUSPENDED, &md->flags); 2895 + } 2896 + 2897 + int dm_test_deferred_remove_flag(struct mapped_device *md) 2898 + { 2899 + return test_bit(DMF_DEFERRED_REMOVE, &md->flags); 2931 2900 } 2932 2901 2933 2902 int dm_suspended(struct dm_target *ti)
+12 -1
drivers/md/dm.h
··· 129 129 int dm_suspended_md(struct mapped_device *md); 130 130 131 131 /* 132 + * Test if the device is scheduled for deferred remove. 133 + */ 134 + int dm_test_deferred_remove_flag(struct mapped_device *md); 135 + 136 + /* 137 + * Try to remove devices marked for deferred removal. 138 + */ 139 + void dm_deferred_remove(void); 140 + 141 + /* 132 142 * The device-mapper can be driven through one of two interfaces; 133 143 * ioctl or filesystem, depending which patch you have applied. 134 144 */ ··· 168 158 void dm_destroy(struct mapped_device *md); 169 159 void dm_destroy_immediate(struct mapped_device *md); 170 160 int dm_open_count(struct mapped_device *md); 171 - int dm_lock_for_deletion(struct mapped_device *md); 161 + int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); 162 + int dm_cancel_deferred_remove(struct mapped_device *md); 172 163 int dm_request_based(struct mapped_device *md); 173 164 sector_t dm_get_size(struct mapped_device *md); 174 165 struct dm_stats *dm_get_stats(struct mapped_device *md);
+13 -2
include/uapi/linux/dm-ioctl.h
··· 267 267 #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 268 268 269 269 #define DM_VERSION_MAJOR 4 270 - #define DM_VERSION_MINOR 26 270 + #define DM_VERSION_MINOR 27 271 271 #define DM_VERSION_PATCHLEVEL 0 272 - #define DM_VERSION_EXTRA "-ioctl (2013-08-15)" 272 + #define DM_VERSION_EXTRA "-ioctl (2013-10-30)" 273 273 274 274 /* Status bits */ 275 275 #define DM_READONLY_FLAG (1 << 0) /* In/Out */ ··· 340 340 * If set, a message generated output data. 341 341 */ 342 342 #define DM_DATA_OUT_FLAG (1 << 16) /* Out */ 343 + 344 + /* 345 + * If set with DM_DEV_REMOVE or DM_REMOVE_ALL this indicates that if 346 + * the device cannot be removed immediately because it is still in use 347 + * it should instead be scheduled for removal when it gets closed. 348 + * 349 + * On return from DM_DEV_REMOVE, DM_DEV_STATUS or other ioctls, this 350 + * flag indicates that the device is scheduled to be removed when it 351 + * gets closed. 352 + */ 353 + #define DM_DEFERRED_REMOVE (1 << 17) /* In/Out */ 343 354 344 355 #endif /* _LINUX_DM_IOCTL_H */