Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

null_blk: single kmap per bio segment

Rather than kmap the the request bio segment for each sector, do
the mapping just once.

Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Tested-by: Hans Holmberg <hans.holmberg@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Keith Busch and committed by
Jens Axboe
262a3dd0 84592838

+14 -18
+14 -18
drivers/block/null_blk/main.c
··· 1129 1129 return 0; 1130 1130 } 1131 1131 1132 - static blk_status_t copy_to_nullb(struct nullb *nullb, struct page *source, 1133 - unsigned int off, sector_t sector, size_t n, bool is_fua) 1132 + static blk_status_t copy_to_nullb(struct nullb *nullb, void *source, 1133 + sector_t sector, size_t n, bool is_fua) 1134 1134 { 1135 1135 size_t temp, count = 0; 1136 1136 unsigned int offset; ··· 1148 1148 if (!t_page) 1149 1149 return BLK_STS_NOSPC; 1150 1150 1151 - memcpy_page(t_page->page, offset, source, off + count, temp); 1151 + memcpy_to_page(t_page->page, offset, source + count, temp); 1152 1152 1153 1153 __set_bit(sector & SECTOR_MASK, t_page->bitmap); 1154 1154 ··· 1161 1161 return BLK_STS_OK; 1162 1162 } 1163 1163 1164 - static void copy_from_nullb(struct nullb *nullb, struct page *dest, 1165 - unsigned int off, sector_t sector, size_t n) 1164 + static void copy_from_nullb(struct nullb *nullb, void *dest, sector_t sector, 1165 + size_t n) 1166 1166 { 1167 1167 size_t temp, count = 0; 1168 1168 unsigned int offset; ··· 1176 1176 !null_cache_active(nullb)); 1177 1177 1178 1178 if (t_page) 1179 - memcpy_page(dest, off + count, t_page->page, offset, 1180 - temp); 1179 + memcpy_from_page(dest + count, t_page->page, offset, 1180 + temp); 1181 1181 else 1182 - memzero_page(dest, off + count, temp); 1182 + memset(dest + count, 0, temp); 1183 1183 1184 1184 count += temp; 1185 1185 sector += temp >> SECTOR_SHIFT; 1186 1186 } 1187 - } 1188 - 1189 - static void nullb_fill_pattern(struct nullb *nullb, struct page *page, 1190 - unsigned int len, unsigned int off) 1191 - { 1192 - memset_page(page, off, 0xff, len); 1193 1187 } 1194 1188 1195 1189 blk_status_t null_handle_discard(struct nullb_device *dev, ··· 1234 1240 struct nullb_device *dev = nullb->dev; 1235 1241 blk_status_t err = BLK_STS_OK; 1236 1242 unsigned int valid_len = len; 1243 + void *p; 1237 1244 1245 + p = kmap_local_page(page) + off; 1238 1246 if (!is_write) { 1239 1247 if (dev->zoned) 1240 1248 valid_len = null_zone_valid_read_len(nullb, 1241 1249 sector, len); 1242 1250 1243 1251 if (valid_len) { 1244 - copy_from_nullb(nullb, page, off, sector, 1245 - valid_len); 1252 + copy_from_nullb(nullb, p, sector, valid_len); 1246 1253 off += valid_len; 1247 1254 len -= valid_len; 1248 1255 } 1249 1256 1250 1257 if (len) 1251 - nullb_fill_pattern(nullb, page, len, off); 1258 + memset(p + valid_len, 0xff, len); 1252 1259 flush_dcache_page(page); 1253 1260 } else { 1254 1261 flush_dcache_page(page); 1255 - err = copy_to_nullb(nullb, page, off, sector, len, is_fua); 1262 + err = copy_to_nullb(nullb, p, sector, len, is_fua); 1256 1263 } 1257 1264 1265 + kunmap_local(p); 1258 1266 return err; 1259 1267 } 1260 1268