Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-4.15/dm-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull more device mapper updates from Mike Snitzer:
"Given your expected travel I figured I'd get these fixes to you sooner
rather than later.

- a DM multipath stable@ fix to silence an annoying error message
that isn't _really_ an error

- a DM core @stable fix for discard support that was enabled for an
entire DM device despite only having partial support for discards
due to a mix of discard capabilities across the underlying devices.

- a couple other DM core discard fixes.

- a DM bufio @stable fix that resolves a 32-bit overflow"

* tag 'for-4.15/dm-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm bufio: fix integer overflow when limiting maximum cache size
dm: clear all discard attributes in queue_limits when discards are disabled
dm: do not set 'discards_supported' in targets that do not need it
dm: discard support requires all targets in a table support discards
dm mpath: remove annoying message of 'blk_get_request() returned -11'

+28 -39
+6 -9
drivers/md/dm-bufio.c
··· 974 974 buffers = c->minimum_buffers; 975 975 976 976 *limit_buffers = buffers; 977 - *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; 977 + *threshold_buffers = mult_frac(buffers, 978 + DM_BUFIO_WRITEBACK_PERCENT, 100); 978 979 } 979 980 980 981 /* ··· 1911 1910 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); 1912 1911 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); 1913 1912 1914 - mem = (__u64)((totalram_pages - totalhigh_pages) * 1915 - DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; 1913 + mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, 1914 + DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; 1916 1915 1917 1916 if (mem > ULONG_MAX) 1918 1917 mem = ULONG_MAX; 1919 1918 1920 1919 #ifdef CONFIG_MMU 1921 - /* 1922 - * Get the size of vmalloc space the same way as VMALLOC_TOTAL 1923 - * in fs/proc/internal.h 1924 - */ 1925 - if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100) 1926 - mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100; 1920 + if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) 1921 + mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); 1927 1922 #endif 1928 1923 1929 1924 dm_bufio_default_cache_size = mem;
-1
drivers/md/dm-era-target.c
··· 1513 1513 ti->flush_supported = true; 1514 1514 1515 1515 ti->num_discard_bios = 1; 1516 - ti->discards_supported = true; 1517 1516 era->callbacks.congested_fn = era_is_congested; 1518 1517 dm_table_add_target_callbacks(ti->table, &era->callbacks); 1519 1518
-2
drivers/md/dm-mpath.c
··· 499 499 if (IS_ERR(clone)) { 500 500 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ 501 501 bool queue_dying = blk_queue_dying(q); 502 - DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing", 503 - PTR_ERR(clone), queue_dying ? " (path offline)" : ""); 504 502 if (queue_dying) { 505 503 atomic_inc(&m->pg_init_in_progress); 506 504 activate_or_offline_path(pgpath);
-6
drivers/md/dm-raid.c
··· 2887 2887 bool raid456; 2888 2888 struct dm_target *ti = rs->ti; 2889 2889 2890 - /* Assume discards not supported until after checks below. */ 2891 - ti->discards_supported = false; 2892 - 2893 2890 /* 2894 2891 * XXX: RAID level 4,5,6 require zeroing for safety. 2895 2892 */ ··· 2910 2913 } 2911 2914 } 2912 2915 } 2913 - 2914 - /* All RAID members properly support discards */ 2915 - ti->discards_supported = true; 2916 2916 2917 2917 /* 2918 2918 * RAID1 and RAID10 personalities require bio splitting,
+22 -21
drivers/md/dm-table.c
··· 1758 1758 return true; 1759 1759 } 1760 1760 1761 - 1762 - static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1763 - sector_t start, sector_t len, void *data) 1761 + static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1762 + sector_t start, sector_t len, void *data) 1764 1763 { 1765 1764 struct request_queue *q = bdev_get_queue(dev->bdev); 1766 1765 1767 - return q && blk_queue_discard(q); 1766 + return q && !blk_queue_discard(q); 1768 1767 } 1769 1768 1770 1769 static bool dm_table_supports_discards(struct dm_table *t) ··· 1771 1772 struct dm_target *ti; 1772 1773 unsigned i; 1773 1774 1774 - /* 1775 - * Unless any target used by the table set discards_supported, 1776 - * require at least one underlying device to support discards. 1777 - * t->devices includes internal dm devices such as mirror logs 1778 - * so we need to use iterate_devices here, which targets 1779 - * supporting discard selectively must provide. 1780 - */ 1781 1775 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1782 1776 ti = dm_table_get_target(t, i); 1783 1777 1784 1778 if (!ti->num_discard_bios) 1785 - continue; 1779 + return false; 1786 1780 1787 - if (ti->discards_supported) 1788 - return true; 1789 - 1790 - if (ti->type->iterate_devices && 1791 - ti->type->iterate_devices(ti, device_discard_capable, NULL)) 1792 - return true; 1781 + /* 1782 + * Either the target provides discard support (as implied by setting 1783 + * 'discards_supported') or it relies on _all_ data devices having 1784 + * discard support. 1785 + */ 1786 + if (!ti->discards_supported && 1787 + (!ti->type->iterate_devices || 1788 + ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) 1789 + return false; 1793 1790 } 1794 1791 1795 - return false; 1792 + return true; 1796 1793 } 1797 1794 1798 1795 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ··· 1801 1806 */ 1802 1807 q->limits = *limits; 1803 1808 1804 - if (!dm_table_supports_discards(t)) 1809 + if (!dm_table_supports_discards(t)) { 1805 1810 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 1806 - else 1811 + /* Must also clear discard limits... */ 1812 + q->limits.max_discard_sectors = 0; 1813 + q->limits.max_hw_discard_sectors = 0; 1814 + q->limits.discard_granularity = 0; 1815 + q->limits.discard_alignment = 0; 1816 + q->limits.discard_misaligned = 0; 1817 + } else 1807 1818 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 1808 1819 1809 1820 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {