Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm thin: add read only and fail io modes

Add read-only and fail-io modes to thin provisioning.

If a transaction commit fails the pool's metadata device will transition
to "read-only" mode. If a commit fails once already in read-only mode
the transition to "fail-io" mode occurs.

Once in fail-io mode the pool and all associated thin devices will
report a status of "Fail".

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

authored by

Joe Thornber and committed by
Alasdair G Kergon
e49e5829 da105ed5

+347 -99
+23 -1
Documentation/device-mapper/thin-provisioning.txt
··· 231 231 no_discard_passdown: Don't pass discards down to the underlying 232 232 data device, but just remove the mapping. 233 233 234 + read_only: Don't allow any changes to be made to the pool 235 + metadata. 236 + 234 237 Data block size must be between 64KB (128 sectors) and 1GB 235 238 (2097152 sectors) inclusive. 236 239 ··· 242 239 243 240 <transaction id> <used metadata blocks>/<total metadata blocks> 244 241 <used data blocks>/<total data blocks> <held metadata root> 245 - 242 + [no_]discard_passdown ro|rw 246 243 247 244 transaction id: 248 245 A 64-bit number used by userspace to help synchronise with metadata ··· 259 256 'held' for userspace read access. '-' indicates there is no 260 257 held root. This feature is not yet implemented so '-' is 261 258 always returned. 259 + 260 + discard_passdown|no_discard_passdown 261 + Whether or not discards are actually being passed down to the 262 + underlying device. When this is enabled when loading the table, 263 + it can get disabled if the underlying device doesn't support it. 264 + 265 + ro|rw 266 + If the pool encounters certain types of device failures it will 267 + drop into a read-only metadata mode in which no changes to 268 + the pool metadata (like allocating new blocks) are permitted. 269 + 270 + In serious cases where even a read-only mode is deemed unsafe 271 + no further I/O will be permitted and the status will just 272 + contain the string 'Fail'. The userspace recovery tools 273 + should then be used. 262 274 263 275 iii) Messages 264 276 ··· 347 329 ii) Status 348 330 349 331 <nr mapped sectors> <highest mapped sector> 332 + 333 + If the pool has encountered device errors and failed, the status 334 + will just contain the string 'Fail'. The userspace recovery 335 + tools should then be used.
+324 -98
drivers/md/dm-thin.c
··· 1 1 /* 2 - * Copyright (C) 2011 Red Hat UK. 2 + * Copyright (C) 2011-2012 Red Hat UK. 3 3 * 4 4 * This file is released under the GPL. 5 5 */ ··· 496 496 */ 497 497 struct dm_thin_new_mapping; 498 498 499 + /* 500 + * The pool runs in 3 modes. Ordered in degraded order for comparisons. 501 + */ 502 + enum pool_mode { 503 + PM_WRITE, /* metadata may be changed */ 504 + PM_READ_ONLY, /* metadata may not be changed */ 505 + PM_FAIL, /* all I/O fails */ 506 + }; 507 + 499 508 struct pool_features { 509 + enum pool_mode mode; 510 + 500 511 unsigned zero_new_blocks:1; 501 512 unsigned discard_enabled:1; 502 513 unsigned discard_passdown:1; 503 514 }; 515 + 516 + struct thin_c; 517 + typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); 518 + typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m); 504 519 505 520 struct pool { 506 521 struct list_head list; ··· 557 542 struct dm_thin_new_mapping *next_mapping; 558 543 mempool_t *mapping_pool; 559 544 mempool_t *endio_hook_pool; 545 + 546 + process_bio_fn process_bio; 547 + process_bio_fn process_discard; 548 + 549 + process_mapping_fn process_prepared_mapping; 550 + process_mapping_fn process_prepared_discard; 560 551 }; 552 + 553 + static enum pool_mode get_pool_mode(struct pool *pool); 554 + static void set_pool_mode(struct pool *pool, enum pool_mode mode); 561 555 562 556 /* 563 557 * Target context for a pool. ··· 742 718 struct pool *pool = tc->pool; 743 719 unsigned long flags; 744 720 745 - /* 746 - * Batch together any FUA/FLUSH bios we find and then issue 747 - * a single commit for them in process_deferred_bios(). 748 - */ 749 - if (bio_triggers_commit(tc, bio)) { 750 - spin_lock_irqsave(&pool->lock, flags); 751 - bio_list_add(&pool->deferred_flush_bios, bio); 752 - spin_unlock_irqrestore(&pool->lock, flags); 753 - } else 721 + if (!bio_triggers_commit(tc, bio)) { 754 722 generic_make_request(bio); 723 + return; 724 + } 725 + 726 + /* 727 + * Complete bio with an error if earlier I/O caused changes to 728 + * the metadata that can't be committed e.g, due to I/O errors 729 + * on the metadata device. 730 + */ 731 + if (dm_thin_aborted_changes(tc->td)) { 732 + bio_io_error(bio); 733 + return; 734 + } 735 + 736 + /* 737 + * Batch together any bios that trigger commits and then issue a 738 + * single commit for them in process_deferred_bios(). 739 + */ 740 + spin_lock_irqsave(&pool->lock, flags); 741 + bio_list_add(&pool->deferred_flush_bios, bio); 742 + spin_unlock_irqrestore(&pool->lock, flags); 755 743 } 756 744 757 745 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) ··· 900 864 wake_worker(pool); 901 865 } 902 866 867 + static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) 868 + { 869 + if (m->bio) 870 + m->bio->bi_end_io = m->saved_bi_end_io; 871 + cell_error(m->cell); 872 + list_del(&m->list); 873 + mempool_free(m, m->tc->pool->mapping_pool); 874 + } 903 875 static void process_prepared_mapping(struct dm_thin_new_mapping *m) 904 876 { 905 877 struct thin_c *tc = m->tc; ··· 952 908 mempool_free(m, tc->pool->mapping_pool); 953 909 } 954 910 955 - static void process_prepared_discard(struct dm_thin_new_mapping *m) 911 + static void process_prepared_discard_fail(struct dm_thin_new_mapping *m) 956 912 { 957 - int r; 958 913 struct thin_c *tc = m->tc; 959 914 960 - r = dm_thin_remove_block(tc->td, m->virt_block); 961 - if (r) 962 - DMERR("dm_thin_remove_block() failed"); 915 + bio_io_error(m->bio); 916 + cell_defer_except(tc, m->cell); 917 + cell_defer_except(tc, m->cell2); 918 + mempool_free(m, tc->pool->mapping_pool); 919 + } 963 920 964 - /* 965 - * Pass the discard down to the underlying device? 966 - */ 921 + static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m) 922 + { 923 + struct thin_c *tc = m->tc; 924 + 967 925 if (m->pass_discard) 968 926 remap_and_issue(tc, m->bio, m->data_block); 969 927 else ··· 976 930 mempool_free(m, tc->pool->mapping_pool); 977 931 } 978 932 933 + static void process_prepared_discard(struct dm_thin_new_mapping *m) 934 + { 935 + int r; 936 + struct thin_c *tc = m->tc; 937 + 938 + r = dm_thin_remove_block(tc->td, m->virt_block); 939 + if (r) 940 + DMERR("dm_thin_remove_block() failed"); 941 + 942 + process_prepared_discard_passdown(m); 943 + } 944 + 979 945 static void process_prepared(struct pool *pool, struct list_head *head, 980 - void (*fn)(struct dm_thin_new_mapping *)) 946 + process_mapping_fn *fn) 981 947 { 982 948 unsigned long flags; 983 949 struct list_head maps; ··· 1001 943 spin_unlock_irqrestore(&pool->lock, flags); 1002 944 1003 945 list_for_each_entry_safe(m, tmp, &maps, list) 1004 - fn(m); 946 + (*fn)(m); 1005 947 } 1006 948 1007 949 /* ··· 1167 1109 } 1168 1110 } 1169 1111 1112 + static int commit(struct pool *pool) 1113 + { 1114 + int r; 1115 + 1116 + r = dm_pool_commit_metadata(pool->pmd); 1117 + if (r) 1118 + DMERR("commit failed, error = %d", r); 1119 + 1120 + return r; 1121 + } 1122 + 1123 + /* 1124 + * A non-zero return indicates read_only or fail_io mode. 1125 + * Many callers don't care about the return value. 1126 + */ 1127 + static int commit_or_fallback(struct pool *pool) 1128 + { 1129 + int r; 1130 + 1131 + if (get_pool_mode(pool) != PM_WRITE) 1132 + return -EINVAL; 1133 + 1134 + r = commit(pool); 1135 + if (r) 1136 + set_pool_mode(pool, PM_READ_ONLY); 1137 + 1138 + return r; 1139 + } 1140 + 1170 1141 static int alloc_data_block(struct thin_c *tc, dm_block_t *result) 1171 1142 { 1172 1143 int r; ··· 1224 1137 * Try to commit to see if that will free up some 1225 1138 * more space. 1226 1139 */ 1227 - r = dm_pool_commit_metadata(pool->pmd); 1228 - if (r) { 1229 - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", 1230 - __func__, r); 1231 - return r; 1232 - } 1140 + (void) commit_or_fallback(pool); 1233 1141 1234 1142 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); 1235 1143 if (r) ··· 1455 1373 1456 1374 default: 1457 1375 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); 1376 + set_pool_mode(tc->pool, PM_READ_ONLY); 1458 1377 cell_error(cell); 1459 1378 break; 1460 1379 } ··· 1513 1430 } 1514 1431 } 1515 1432 1433 + static void process_bio_read_only(struct thin_c *tc, struct bio *bio) 1434 + { 1435 + int r; 1436 + int rw = bio_data_dir(bio); 1437 + dm_block_t block = get_bio_block(tc, bio); 1438 + struct dm_thin_lookup_result lookup_result; 1439 + 1440 + r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1441 + switch (r) { 1442 + case 0: 1443 + if (lookup_result.shared && (rw == WRITE) && bio->bi_size) 1444 + bio_io_error(bio); 1445 + else 1446 + remap_and_issue(tc, bio, lookup_result.block); 1447 + break; 1448 + 1449 + case -ENODATA: 1450 + if (rw != READ) { 1451 + bio_io_error(bio); 1452 + break; 1453 + } 1454 + 1455 + if (tc->origin_dev) { 1456 + remap_to_origin_and_issue(tc, bio); 1457 + break; 1458 + } 1459 + 1460 + zero_fill_bio(bio); 1461 + bio_endio(bio, 0); 1462 + break; 1463 + 1464 + default: 1465 + DMERR("dm_thin_find_block() failed, error = %d", r); 1466 + bio_io_error(bio); 1467 + break; 1468 + } 1469 + } 1470 + 1471 + static void process_bio_fail(struct thin_c *tc, struct bio *bio) 1472 + { 1473 + bio_io_error(bio); 1474 + } 1475 + 1516 1476 static int need_commit_due_to_time(struct pool *pool) 1517 1477 { 1518 1478 return jiffies < pool->last_commit_jiffies || ··· 1567 1441 unsigned long flags; 1568 1442 struct bio *bio; 1569 1443 struct bio_list bios; 1570 - int r; 1571 1444 1572 1445 bio_list_init(&bios); 1573 1446 ··· 1593 1468 } 1594 1469 1595 1470 if (bio->bi_rw & REQ_DISCARD) 1596 - process_discard(tc, bio); 1471 + pool->process_discard(tc, bio); 1597 1472 else 1598 - process_bio(tc, bio); 1473 + pool->process_bio(tc, bio); 1599 1474 } 1600 1475 1601 1476 /* ··· 1611 1486 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) 1612 1487 return; 1613 1488 1614 - r = dm_pool_commit_metadata(pool->pmd); 1615 - if (r) { 1616 - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", 1617 - __func__, r); 1489 + if (commit_or_fallback(pool)) { 1618 1490 while ((bio = bio_list_pop(&bios))) 1619 1491 bio_io_error(bio); 1620 1492 return; ··· 1626 1504 { 1627 1505 struct pool *pool = container_of(ws, struct pool, worker); 1628 1506 1629 - process_prepared(pool, &pool->prepared_mappings, process_prepared_mapping); 1630 - process_prepared(pool, &pool->prepared_discards, process_prepared_discard); 1507 + process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); 1508 + process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); 1631 1509 process_deferred_bios(pool); 1632 1510 } 1633 1511 ··· 1640 1518 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); 1641 1519 wake_worker(pool); 1642 1520 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 1521 + } 1522 + 1523 + /*----------------------------------------------------------------*/ 1524 + 1525 + static enum pool_mode get_pool_mode(struct pool *pool) 1526 + { 1527 + return pool->pf.mode; 1528 + } 1529 + 1530 + static void set_pool_mode(struct pool *pool, enum pool_mode mode) 1531 + { 1532 + int r; 1533 + 1534 + pool->pf.mode = mode; 1535 + 1536 + switch (mode) { 1537 + case PM_FAIL: 1538 + DMERR("switching pool to failure mode"); 1539 + pool->process_bio = process_bio_fail; 1540 + pool->process_discard = process_bio_fail; 1541 + pool->process_prepared_mapping = process_prepared_mapping_fail; 1542 + pool->process_prepared_discard = process_prepared_discard_fail; 1543 + break; 1544 + 1545 + case PM_READ_ONLY: 1546 + DMERR("switching pool to read-only mode"); 1547 + r = dm_pool_abort_metadata(pool->pmd); 1548 + if (r) { 1549 + DMERR("aborting transaction failed"); 1550 + set_pool_mode(pool, PM_FAIL); 1551 + } else { 1552 + dm_pool_metadata_read_only(pool->pmd); 1553 + pool->process_bio = process_bio_read_only; 1554 + pool->process_discard = process_discard; 1555 + pool->process_prepared_mapping = process_prepared_mapping_fail; 1556 + pool->process_prepared_discard = process_prepared_discard_passdown; 1557 + } 1558 + break; 1559 + 1560 + case PM_WRITE: 1561 + pool->process_bio = process_bio; 1562 + pool->process_discard = process_discard; 1563 + pool->process_prepared_mapping = process_prepared_mapping; 1564 + pool->process_prepared_discard = process_prepared_discard; 1565 + break; 1566 + } 1643 1567 } 1644 1568 1645 1569 /*----------------------------------------------------------------*/ ··· 1735 1567 struct dm_thin_lookup_result result; 1736 1568 1737 1569 map_context->ptr = thin_hook_bio(tc, bio); 1570 + 1571 + if (get_pool_mode(tc->pool) == PM_FAIL) { 1572 + bio_io_error(bio); 1573 + return DM_MAPIO_SUBMITTED; 1574 + } 1575 + 1738 1576 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { 1739 1577 thin_defer_bio(tc, bio); 1740 1578 return DM_MAPIO_SUBMITTED; ··· 1777 1603 break; 1778 1604 1779 1605 case -ENODATA: 1606 + if (get_pool_mode(tc->pool) == PM_READ_ONLY) { 1607 + /* 1608 + * This block isn't provisioned, and we have no way 1609 + * of doing so. Just error it. 1610 + */ 1611 + bio_io_error(bio); 1612 + r = DM_MAPIO_SUBMITTED; 1613 + break; 1614 + } 1615 + /* fall through */ 1616 + 1617 + case -EWOULDBLOCK: 1780 1618 /* 1781 1619 * In future, the failed dm_thin_find_block above could 1782 1620 * provide the hint to load the metadata into cache. 1783 1621 */ 1784 - case -EWOULDBLOCK: 1785 1622 thin_defer_bio(tc, bio); 1623 + r = DM_MAPIO_SUBMITTED; 1624 + break; 1625 + 1626 + default: 1627 + /* 1628 + * Must always call bio_io_error on failure. 1629 + * dm_thin_find_block can fail with -EINVAL if the 1630 + * pool is switched to fail-io mode. 1631 + */ 1632 + bio_io_error(bio); 1786 1633 r = DM_MAPIO_SUBMITTED; 1787 1634 break; 1788 1635 } ··· 1842 1647 { 1843 1648 struct pool_c *pt = ti->private; 1844 1649 1650 + /* 1651 + * We want to make sure that degraded pools are never upgraded. 1652 + */ 1653 + enum pool_mode old_mode = pool->pf.mode; 1654 + enum pool_mode new_mode = pt->pf.mode; 1655 + 1656 + if (old_mode > new_mode) 1657 + new_mode = old_mode; 1658 + 1845 1659 pool->ti = ti; 1846 1660 pool->low_water_blocks = pt->low_water_blocks; 1847 1661 pool->pf = pt->pf; 1662 + set_pool_mode(pool, new_mode); 1848 1663 1849 1664 /* 1850 1665 * If discard_passdown was enabled verify that the data device 1851 1666 * supports discards. Disable discard_passdown if not; otherwise 1852 1667 * -EOPNOTSUPP will be returned. 1853 1668 */ 1669 + /* FIXME: pull this out into a sep fn. */ 1854 1670 if (pt->pf.discard_passdown) { 1855 1671 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); 1856 1672 if (!q || !blk_queue_discard(q)) { ··· 1887 1681 /* Initialize pool features. */ 1888 1682 static void pool_features_init(struct pool_features *pf) 1889 1683 { 1684 + pf->mode = PM_WRITE; 1890 1685 pf->zero_new_blocks = 1; 1891 1686 pf->discard_enabled = 1; 1892 1687 pf->discard_passdown = 1; ··· 1918 1711 1919 1712 static struct pool *pool_create(struct mapped_device *pool_md, 1920 1713 struct block_device *metadata_dev, 1921 - unsigned long block_size, char **error) 1714 + unsigned long block_size, 1715 + int read_only, char **error) 1922 1716 { 1923 1717 int r; 1924 1718 void *err_p; 1925 1719 struct pool *pool; 1926 1720 struct dm_pool_metadata *pmd; 1721 + bool format_device = read_only ? false : true; 1927 1722 1928 - pmd = dm_pool_metadata_open(metadata_dev, block_size, true); 1723 + pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device); 1929 1724 if (IS_ERR(pmd)) { 1930 1725 *error = "Error creating metadata object"; 1931 1726 return (struct pool *)pmd; ··· 2044 1835 2045 1836 static struct pool *__pool_find(struct mapped_device *pool_md, 2046 1837 struct block_device *metadata_dev, 2047 - unsigned long block_size, char **error, 2048 - int *created) 1838 + unsigned long block_size, int read_only, 1839 + char **error, int *created) 2049 1840 { 2050 1841 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); 2051 1842 ··· 2066 1857 __pool_inc(pool); 2067 1858 2068 1859 } else { 2069 - pool = pool_create(pool_md, metadata_dev, block_size, error); 1860 + pool = pool_create(pool_md, metadata_dev, block_size, read_only, error); 2070 1861 *created = 1; 2071 1862 } 2072 1863 } ··· 2117 1908 arg_name = dm_shift_arg(as); 2118 1909 argc--; 2119 1910 2120 - if (!strcasecmp(arg_name, "skip_block_zeroing")) { 1911 + if (!strcasecmp(arg_name, "skip_block_zeroing")) 2121 1912 pf->zero_new_blocks = 0; 2122 - continue; 2123 - } else if (!strcasecmp(arg_name, "ignore_discard")) { 2124 - pf->discard_enabled = 0; 2125 - continue; 2126 - } else if (!strcasecmp(arg_name, "no_discard_passdown")) { 2127 - pf->discard_passdown = 0; 2128 - continue; 2129 - } 2130 1913 2131 - ti->error = "Unrecognised pool feature requested"; 2132 - r = -EINVAL; 1914 + else if (!strcasecmp(arg_name, "ignore_discard")) 1915 + pf->discard_enabled = 0; 1916 + 1917 + else if (!strcasecmp(arg_name, "no_discard_passdown")) 1918 + pf->discard_passdown = 0; 1919 + 1920 + else if (!strcasecmp(arg_name, "read_only")) 1921 + pf->mode = PM_READ_ONLY; 1922 + 1923 + else { 1924 + ti->error = "Unrecognised pool feature requested"; 1925 + r = -EINVAL; 1926 + break; 1927 + } 2133 1928 } 2134 1929 2135 1930 return r; ··· 2226 2013 } 2227 2014 2228 2015 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, 2229 - block_size, &ti->error, &pool_created); 2016 + block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); 2230 2017 if (IS_ERR(pool)) { 2231 2018 r = PTR_ERR(pool); 2232 2019 goto out_free_pt; ··· 2359 2146 r = dm_pool_resize_data_dev(pool->pmd, data_size); 2360 2147 if (r) { 2361 2148 DMERR("failed to resize data device"); 2149 + /* FIXME Stricter than necessary: Rollback transaction instead here */ 2150 + set_pool_mode(pool, PM_READ_ONLY); 2362 2151 return r; 2363 2152 } 2364 2153 2365 - r = dm_pool_commit_metadata(pool->pmd); 2366 - if (r) { 2367 - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", 2368 - __func__, r); 2369 - return r; 2370 - } 2154 + (void) commit_or_fallback(pool); 2371 2155 } 2372 2156 2373 2157 return 0; ··· 2387 2177 2388 2178 static void pool_postsuspend(struct dm_target *ti) 2389 2179 { 2390 - int r; 2391 2180 struct pool_c *pt = ti->private; 2392 2181 struct pool *pool = pt->pool; 2393 2182 2394 2183 cancel_delayed_work(&pool->waker); 2395 2184 flush_workqueue(pool->wq); 2396 - 2397 - r = dm_pool_commit_metadata(pool->pmd); 2398 - if (r < 0) { 2399 - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", 2400 - __func__, r); 2401 - /* FIXME: invalidate device? error the next FUA or FLUSH bio ?*/ 2402 - } 2185 + (void) commit_or_fallback(pool); 2403 2186 } 2404 2187 2405 2188 static int check_arg_count(unsigned argc, unsigned args_required) ··· 2526 2323 if (r) 2527 2324 return r; 2528 2325 2529 - r = dm_pool_commit_metadata(pool->pmd); 2530 - if (r) { 2531 - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", 2532 - __func__, r); 2533 - return r; 2534 - } 2326 + (void) commit_or_fallback(pool); 2535 2327 2536 2328 r = dm_pool_reserve_metadata_snap(pool->pmd); 2537 2329 if (r) ··· 2587 2389 else 2588 2390 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); 2589 2391 2590 - if (!r) { 2591 - r = dm_pool_commit_metadata(pool->pmd); 2592 - if (r) 2593 - DMERR("%s message: dm_pool_commit_metadata() failed, error = %d", 2594 - argv[0], r); 2595 - } 2392 + if (!r) 2393 + (void) commit_or_fallback(pool); 2596 2394 2597 2395 return r; 2396 + } 2397 + 2398 + static void emit_flags(struct pool_features *pf, char *result, 2399 + unsigned sz, unsigned maxlen) 2400 + { 2401 + unsigned count = !pf->zero_new_blocks + !pf->discard_enabled + 2402 + !pf->discard_passdown + (pf->mode == PM_READ_ONLY); 2403 + DMEMIT("%u ", count); 2404 + 2405 + if (!pf->zero_new_blocks) 2406 + DMEMIT("skip_block_zeroing "); 2407 + 2408 + if (!pf->discard_enabled) 2409 + DMEMIT("ignore_discard "); 2410 + 2411 + if (!pf->discard_passdown) 2412 + DMEMIT("no_discard_passdown "); 2413 + 2414 + if (pf->mode == PM_READ_ONLY) 2415 + DMEMIT("read_only "); 2598 2416 } 2599 2417 2600 2418 /* ··· 2621 2407 static int pool_status(struct dm_target *ti, status_type_t type, 2622 2408 char *result, unsigned maxlen) 2623 2409 { 2624 - int r, count; 2410 + int r; 2625 2411 unsigned sz = 0; 2626 2412 uint64_t transaction_id; 2627 2413 dm_block_t nr_free_blocks_data; ··· 2636 2422 2637 2423 switch (type) { 2638 2424 case STATUSTYPE_INFO: 2425 + if (get_pool_mode(pool) == PM_FAIL) { 2426 + DMEMIT("Fail"); 2427 + break; 2428 + } 2429 + 2639 2430 r = dm_pool_get_metadata_transaction_id(pool->pmd, 2640 2431 &transaction_id); 2641 2432 if (r) ··· 2676 2457 (unsigned long long)nr_blocks_data); 2677 2458 2678 2459 if (held_root) 2679 - DMEMIT("%llu", held_root); 2460 + DMEMIT("%llu ", held_root); 2680 2461 else 2681 - DMEMIT("-"); 2462 + DMEMIT("- "); 2463 + 2464 + if (pool->pf.mode == PM_READ_ONLY) 2465 + DMEMIT("ro "); 2466 + else 2467 + DMEMIT("rw "); 2468 + 2469 + if (pool->pf.discard_enabled && pool->pf.discard_passdown) 2470 + DMEMIT("discard_passdown"); 2471 + else 2472 + DMEMIT("no_discard_passdown"); 2682 2473 2683 2474 break; 2684 2475 ··· 2698 2469 format_dev_t(buf2, pt->data_dev->bdev->bd_dev), 2699 2470 (unsigned long)pool->sectors_per_block, 2700 2471 (unsigned long long)pt->low_water_blocks); 2701 - 2702 - count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled + 2703 - !pt->pf.discard_passdown; 2704 - DMEMIT("%u ", count); 2705 - 2706 - if (!pool->pf.zero_new_blocks) 2707 - DMEMIT("skip_block_zeroing "); 2708 - 2709 - if (!pool->pf.discard_enabled) 2710 - DMEMIT("ignore_discard "); 2711 - 2712 - if (!pt->pf.discard_passdown) 2713 - DMEMIT("no_discard_passdown "); 2714 - 2472 + emit_flags(&pt->pf, result, sz, maxlen); 2715 2473 break; 2716 2474 } 2717 2475 ··· 2758 2542 .name = "thin-pool", 2759 2543 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2760 2544 DM_TARGET_IMMUTABLE, 2761 - .version = {1, 2, 0}, 2545 + .version = {1, 3, 0}, 2762 2546 .module = THIS_MODULE, 2763 2547 .ctr = pool_ctr, 2764 2548 .dtr = pool_dtr, ··· 2862 2646 goto bad_pool_lookup; 2863 2647 } 2864 2648 __pool_inc(tc->pool); 2649 + 2650 + if (get_pool_mode(tc->pool) == PM_FAIL) { 2651 + ti->error = "Couldn't open thin device, Pool is in fail mode"; 2652 + goto bad_thin_open; 2653 + } 2865 2654 2866 2655 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); 2867 2656 if (r) { ··· 2976 2755 char buf[BDEVNAME_SIZE]; 2977 2756 struct thin_c *tc = ti->private; 2978 2757 2758 + if (get_pool_mode(tc->pool) == PM_FAIL) { 2759 + DMEMIT("Fail"); 2760 + return 0; 2761 + } 2762 + 2979 2763 if (!tc->td) 2980 2764 DMEMIT("-"); 2981 2765 else { ··· 3049 2823 3050 2824 static struct target_type thin_target = { 3051 2825 .name = "thin", 3052 - .version = {1, 2, 0}, 2826 + .version = {1, 3, 0}, 3053 2827 .module = THIS_MODULE, 3054 2828 .ctr = thin_ctr, 3055 2829 .dtr = thin_dtr,