Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-4.12/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

- DM cache metadata fixes to short-circuit operations that require the
metadata not be in 'fail_io' mode. Otherwise crashes are possible.

- a DM cache fix to address the inability to adapt to continuous IO
that happened to also reflect a changing working set (which required
old blocks be demoted before the new working set could be promoted)

- a DM cache smq policy cleanup that fell out from reviewing the above

- fix the Kconfig help text for CONFIG_DM_INTEGRITY

* tag 'for-4.12/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm cache metadata: fail operations if fail_io mode has been established
dm integrity: improve the Kconfig help text for DM_INTEGRITY
dm cache policy smq: cleanup free_target_met() and clean_target_met()
dm cache policy smq: allow demotions to happen even during continuous IO

+39 -18
+13 -2
drivers/md/Kconfig
··· 503 503 If unsure, say N. 504 504 505 505 config DM_INTEGRITY 506 - tristate "Integrity target" 506 + tristate "Integrity target support" 507 507 depends on BLK_DEV_DM 508 508 select BLK_DEV_INTEGRITY 509 509 select DM_BUFIO 510 510 select CRYPTO 511 511 select ASYNC_XOR 512 512 ---help--- 513 - This is the integrity target. 513 + This device-mapper target emulates a block device that has 514 + additional per-sector tags that can be used for storing 515 + integrity information. 516 + 517 + This integrity target is used with the dm-crypt target to 518 + provide authenticated disk encryption or it can be used 519 + standalone. 520 + 521 + To compile this code as a module, choose M here: the module will 522 + be called dm-integrity. 523 + 524 + If unsure, say N. 514 525 515 526 endif # MD
+8 -4
drivers/md/dm-cache-metadata.c
··· 1624 1624 1625 1625 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown) 1626 1626 { 1627 - int r; 1627 + int r = -EINVAL; 1628 1628 flags_mutator mutator = (clean_shutdown ? set_clean_shutdown : 1629 1629 clear_clean_shutdown); 1630 1630 1631 1631 WRITE_LOCK(cmd); 1632 + if (cmd->fail_io) 1633 + goto out; 1634 + 1632 1635 r = __commit_transaction(cmd, mutator); 1633 1636 if (r) 1634 1637 goto out; 1635 1638 1636 1639 r = __begin_transaction(cmd); 1637 - 1638 1640 out: 1639 1641 WRITE_UNLOCK(cmd); 1640 1642 return r; ··· 1648 1646 int r = -EINVAL; 1649 1647 1650 1648 READ_LOCK(cmd); 1651 - r = dm_sm_get_nr_free(cmd->metadata_sm, result); 1649 + if (!cmd->fail_io) 1650 + r = dm_sm_get_nr_free(cmd->metadata_sm, result); 1652 1651 READ_UNLOCK(cmd); 1653 1652 1654 1653 return r; ··· 1661 1658 int r = -EINVAL; 1662 1659 1663 1660 READ_LOCK(cmd); 1664 - r = dm_sm_get_nr_blocks(cmd->metadata_sm, result); 1661 + if (!cmd->fail_io) 1662 + r = dm_sm_get_nr_blocks(cmd->metadata_sm, result); 1665 1663 READ_UNLOCK(cmd); 1666 1664 1667 1665 return r;
+18 -12
drivers/md/dm-cache-policy-smq.c
··· 1120 1120 * Cache entries may not be populated. So we cannot rely on the 1121 1121 * size of the clean queue. 1122 1122 */ 1123 - unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); 1123 + unsigned nr_clean; 1124 1124 1125 - if (idle) 1125 + if (idle) { 1126 1126 /* 1127 1127 * We'd like to clean everything. 1128 1128 */ 1129 1129 return q_size(&mq->dirty) == 0u; 1130 - else 1131 - return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= 1132 - percent_to_target(mq, CLEAN_TARGET); 1130 + } 1131 + 1132 + nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); 1133 + return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= 1134 + percent_to_target(mq, CLEAN_TARGET); 1133 1135 } 1134 1136 1135 1137 static bool free_target_met(struct smq_policy *mq, bool idle) 1136 1138 { 1137 - unsigned nr_free = from_cblock(mq->cache_size) - 1138 - mq->cache_alloc.nr_allocated; 1139 + unsigned nr_free; 1139 1140 1140 - if (idle) 1141 - return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= 1142 - percent_to_target(mq, FREE_TARGET); 1143 - else 1141 + if (!idle) 1144 1142 return true; 1143 + 1144 + nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; 1145 + return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= 1146 + percent_to_target(mq, FREE_TARGET); 1145 1147 } 1146 1148 1147 1149 /*----------------------------------------------------------------*/ ··· 1216 1214 return; 1217 1215 1218 1216 if (allocator_empty(&mq->cache_alloc)) { 1219 - if (!free_target_met(mq, false)) 1217 + /* 1218 + * We always claim to be 'idle' to ensure some demotions happen 1219 + * with continuous loads. 1220 + */ 1221 + if (!free_target_met(mq, true)) 1220 1222 queue_demotion(mq); 1221 1223 return; 1222 1224 }