Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm: prepare for allocating blk-mq clone requests in target

For blk-mq request-based DM the responsibility of allocating a cloned
request will be transfered from DM core to the target type.

To prepare for conditionally using this new model the original
request's 'special' now points to the dm_rq_target_io because the
clone is allocated later in the block layer rather than in DM core.

Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Keith Busch and committed by
Mike Snitzer
466d89a6 2eb6e1e3

+66 -68
+66 -68
drivers/md/dm.c
··· 1016 1016 * the md may be freed in dm_put() at the end of this function. 1017 1017 * Or do dm_get() before calling this function and dm_put() later. 1018 1018 */ 1019 - static void rq_completed(struct mapped_device *md, int rw, int run_queue) 1019 + static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1020 1020 { 1021 1021 atomic_dec(&md->pending[rw]); 1022 1022 ··· 1050 1050 1051 1051 /* 1052 1052 * Complete the clone and the original request. 1053 - * Must be called without queue lock. 1053 + * Must be called without clone's queue lock held, 1054 + * see end_clone_request() for more details. 1054 1055 */ 1055 1056 static void dm_end_request(struct request *clone, int error) 1056 1057 { ··· 1080 1079 1081 1080 static void dm_unprep_request(struct request *rq) 1082 1081 { 1083 - struct request *clone = rq->special; 1082 + struct dm_rq_target_io *tio = rq->special; 1083 + struct request *clone = tio->clone; 1084 1084 1085 1085 rq->special = NULL; 1086 1086 rq->cmd_flags &= ~REQ_DONTPREP; ··· 1092 1090 /* 1093 1091 * Requeue the original request of a clone. 1094 1092 */ 1095 - static void dm_requeue_unmapped_request(struct request *clone) 1093 + static void dm_requeue_unmapped_original_request(struct mapped_device *md, 1094 + struct request *rq) 1096 1095 { 1097 - int rw = rq_data_dir(clone); 1098 - struct dm_rq_target_io *tio = clone->end_io_data; 1099 - struct mapped_device *md = tio->md; 1100 - struct request *rq = tio->orig; 1096 + int rw = rq_data_dir(rq); 1101 1097 struct request_queue *q = rq->q; 1102 1098 unsigned long flags; 1103 1099 ··· 1105 1105 blk_requeue_request(q, rq); 1106 1106 spin_unlock_irqrestore(q->queue_lock, flags); 1107 1107 1108 - rq_completed(md, rw, 0); 1108 + rq_completed(md, rw, false); 1109 + } 1110 + 1111 + static void dm_requeue_unmapped_request(struct request *clone) 1112 + { 1113 + struct dm_rq_target_io *tio = clone->end_io_data; 1114 + 1115 + dm_requeue_unmapped_original_request(tio->md, tio->orig); 1109 1116 } 1110 1117 1111 1118 static void __stop_queue(struct request_queue *q) ··· 1182 1175 static void dm_softirq_done(struct request *rq) 1183 1176 { 1184 1177 bool mapped = true; 1185 - struct request *clone = rq->completion_data; 1186 - struct dm_rq_target_io *tio = clone->end_io_data; 1178 + struct dm_rq_target_io *tio = rq->special; 1179 + struct request *clone = tio->clone; 1187 1180 1188 1181 if (rq->cmd_flags & REQ_FAILED) 1189 1182 mapped = false; ··· 1195 1188 * Complete the clone and the original request with the error status 1196 1189 * through softirq context. 1197 1190 */ 1198 - static void dm_complete_request(struct request *clone, int error) 1191 + static void dm_complete_request(struct request *rq, int error) 1199 1192 { 1200 - struct dm_rq_target_io *tio = clone->end_io_data; 1201 - struct request *rq = tio->orig; 1193 + struct dm_rq_target_io *tio = rq->special; 1202 1194 1203 1195 tio->error = error; 1204 - rq->completion_data = clone; 1205 1196 blk_complete_request(rq); 1206 1197 } 1207 1198 ··· 1209 1204 * Target's rq_end_io() function isn't called. 1210 1205 * This may be used when the target's map_rq() function fails. 1211 1206 */ 1212 - static void dm_kill_unmapped_request(struct request *clone, int error) 1207 + static void dm_kill_unmapped_request(struct request *rq, int error) 1213 1208 { 1214 - struct dm_rq_target_io *tio = clone->end_io_data; 1215 - struct request *rq = tio->orig; 1216 - 1217 1209 rq->cmd_flags |= REQ_FAILED; 1218 - dm_complete_request(clone, error); 1210 + dm_complete_request(rq, error); 1219 1211 } 1220 1212 1221 1213 /* 1222 - * Called with the queue lock held 1214 + * Called with the clone's queue lock held 1223 1215 */ 1224 1216 static void end_clone_request(struct request *clone, int error) 1225 1217 { 1218 + struct dm_rq_target_io *tio = clone->end_io_data; 1219 + 1226 1220 /* 1227 1221 * For just cleaning up the information of the queue in which 1228 1222 * the clone was dispatched. ··· 1232 1228 1233 1229 /* 1234 1230 * Actual request completion is done in a softirq context which doesn't 1235 - * hold the queue lock. Otherwise, deadlock could occur because: 1231 + * hold the clone's queue lock. Otherwise, deadlock could occur because: 1236 1232 * - another request may be submitted by the upper level driver 1237 1233 * of the stacking during the completion 1238 1234 * - the submission which requires queue lock may be done 1239 - * against this queue 1235 + * against this clone's queue 1240 1236 */ 1241 - dm_complete_request(clone, error); 1237 + dm_complete_request(tio->orig, error); 1242 1238 } 1243 1239 1244 1240 /* ··· 1716 1712 _dm_request(q, bio); 1717 1713 } 1718 1714 1719 - static void dm_dispatch_request(struct request *rq) 1715 + static void dm_dispatch_clone_request(struct request *clone, struct request *rq) 1720 1716 { 1721 1717 int r; 1722 1718 1723 - if (blk_queue_io_stat(rq->q)) 1724 - rq->cmd_flags |= REQ_IO_STAT; 1719 + if (blk_queue_io_stat(clone->q)) 1720 + clone->cmd_flags |= REQ_IO_STAT; 1725 1721 1726 - rq->start_time = jiffies; 1727 - r = blk_insert_cloned_request(rq->q, rq); 1722 + clone->start_time = jiffies; 1723 + r = blk_insert_cloned_request(clone->q, clone); 1728 1724 if (r) 1725 + /* must complete clone in terms of original request */ 1729 1726 dm_complete_request(rq, r); 1730 1727 } 1731 1728 ··· 1765 1760 return 0; 1766 1761 } 1767 1762 1768 - static struct request *__clone_rq(struct request *rq, struct mapped_device *md, 1769 - struct dm_rq_target_io *tio, gfp_t gfp_mask) 1763 + static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1764 + struct dm_rq_target_io *tio, gfp_t gfp_mask) 1770 1765 { 1771 1766 struct request *clone = alloc_clone_request(md, gfp_mask); 1772 1767 ··· 1785 1780 1786 1781 static void map_tio_request(struct kthread_work *work); 1787 1782 1788 - static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1789 - gfp_t gfp_mask) 1783 + static struct dm_rq_target_io *prep_tio(struct request *rq, 1784 + struct mapped_device *md, gfp_t gfp_mask) 1790 1785 { 1791 - struct request *clone; 1792 1786 struct dm_rq_target_io *tio; 1793 1787 1794 1788 tio = alloc_rq_tio(md, gfp_mask); ··· 1802 1798 memset(&tio->info, 0, sizeof(tio->info)); 1803 1799 init_kthread_work(&tio->work, map_tio_request); 1804 1800 1805 - clone = __clone_rq(rq, md, tio, GFP_ATOMIC); 1806 - if (!clone) { 1801 + if (!clone_rq(rq, md, tio, gfp_mask)) { 1807 1802 free_rq_tio(tio); 1808 1803 return NULL; 1809 1804 } 1810 1805 1811 - return clone; 1806 + return tio; 1812 1807 } 1813 1808 1814 1809 /* ··· 1816 1813 static int dm_prep_fn(struct request_queue *q, struct request *rq) 1817 1814 { 1818 1815 struct mapped_device *md = q->queuedata; 1819 - struct request *clone; 1816 + struct dm_rq_target_io *tio; 1820 1817 1821 1818 if (unlikely(rq->special)) { 1822 1819 DMWARN("Already has something in rq->special."); 1823 1820 return BLKPREP_KILL; 1824 1821 } 1825 1822 1826 - clone = clone_rq(rq, md, GFP_ATOMIC); 1827 - if (!clone) 1823 + tio = prep_tio(rq, md, GFP_ATOMIC); 1824 + if (!tio) 1828 1825 return BLKPREP_DEFER; 1829 1826 1830 - rq->special = clone; 1827 + rq->special = tio; 1831 1828 rq->cmd_flags |= REQ_DONTPREP; 1832 1829 1833 1830 return BLKPREP_OK; ··· 1838 1835 * 0 : the request has been processed (not requeued) 1839 1836 * !0 : the request has been requeued 1840 1837 */ 1841 - static int map_request(struct dm_target *ti, struct request *clone, 1838 + static int map_request(struct dm_target *ti, struct request *rq, 1842 1839 struct mapped_device *md) 1843 1840 { 1844 1841 int r, requeued = 0; 1845 - struct dm_rq_target_io *tio = clone->end_io_data; 1842 + struct dm_rq_target_io *tio = rq->special; 1843 + struct request *clone = tio->clone; 1846 1844 1847 1845 r = ti->type->map_rq(ti, clone, &tio->info); 1848 1846 switch (r) { ··· 1853 1849 case DM_MAPIO_REMAPPED: 1854 1850 /* The target has remapped the I/O so dispatch it */ 1855 1851 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1856 - blk_rq_pos(tio->orig)); 1857 - dm_dispatch_request(clone); 1852 + blk_rq_pos(rq)); 1853 + dm_dispatch_clone_request(clone, rq); 1858 1854 break; 1859 1855 case DM_MAPIO_REQUEUE: 1860 1856 /* The target wants to requeue the I/O */ ··· 1868 1864 } 1869 1865 1870 1866 /* The target wants to complete the I/O */ 1871 - dm_kill_unmapped_request(clone, r); 1867 + dm_kill_unmapped_request(rq, r); 1872 1868 break; 1873 1869 } 1874 1870 ··· 1879 1875 { 1880 1876 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 1881 1877 1882 - map_request(tio->ti, tio->clone, tio->md); 1878 + map_request(tio->ti, tio->orig, tio->md); 1883 1879 } 1884 1880 1885 - static struct request *dm_start_request(struct mapped_device *md, struct request *orig) 1881 + static void dm_start_request(struct mapped_device *md, struct request *orig) 1886 1882 { 1887 - struct request *clone; 1888 - 1889 1883 blk_start_request(orig); 1890 - clone = orig->special; 1891 - atomic_inc(&md->pending[rq_data_dir(clone)]); 1884 + atomic_inc(&md->pending[rq_data_dir(orig)]); 1892 1885 1893 1886 /* 1894 1887 * Hold the md reference here for the in-flight I/O. ··· 1895 1894 * See the comment in rq_completed() too. 1896 1895 */ 1897 1896 dm_get(md); 1898 - 1899 - return clone; 1900 1897 } 1901 1898 1902 1899 /* ··· 1907 1908 int srcu_idx; 1908 1909 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 1909 1910 struct dm_target *ti; 1910 - struct request *rq, *clone; 1911 + struct request *rq; 1911 1912 struct dm_rq_target_io *tio; 1912 1913 sector_t pos; 1913 1914 ··· 1930 1931 ti = dm_table_find_target(map, pos); 1931 1932 if (!dm_target_is_valid(ti)) { 1932 1933 /* 1933 - * Must perform setup, that dm_done() requires, 1934 + * Must perform setup, that rq_completed() requires, 1934 1935 * before calling dm_kill_unmapped_request 1935 1936 */ 1936 1937 DMERR_LIMIT("request attempted access beyond the end of device"); 1937 - clone = dm_start_request(md, rq); 1938 - dm_kill_unmapped_request(clone, -EIO); 1938 + dm_start_request(md, rq); 1939 + dm_kill_unmapped_request(rq, -EIO); 1939 1940 continue; 1940 1941 } 1941 1942 1942 1943 if (ti->type->busy && ti->type->busy(ti)) 1943 1944 goto delay_and_out; 1944 1945 1945 - clone = dm_start_request(md, rq); 1946 + dm_start_request(md, rq); 1946 1947 1947 1948 tio = rq->special; 1948 1949 /* Establish tio->ti before queuing work (map_tio_request) */ ··· 2239 2240 bioset_free(md->bs); 2240 2241 md->bs = p->bs; 2241 2242 p->bs = NULL; 2242 - } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) { 2243 - /* 2244 - * There's no need to reload with request-based dm 2245 - * because the size of front_pad doesn't change. 2246 - * Note for future: If you are to reload bioset, 2247 - * prep-ed requests in the queue may refer 2248 - * to bio from the old bioset, so you must walk 2249 - * through the queue to unprep. 2250 - */ 2251 2243 } 2244 + /* 2245 + * There's no need to reload with request-based dm 2246 + * because the size of front_pad doesn't change. 2247 + * Note for future: If you are to reload bioset, 2248 + * prep-ed requests in the queue may refer 2249 + * to bio from the old bioset, so you must walk 2250 + * through the queue to unprep. 2251 + */ 2252 2252 goto out; 2253 2253 } 2254 2254