Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

null_blk: allow byte aligned memory offsets

Allowing byte aligned memory provides a nice testing ground for
direct-io.

Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Tested-by: Hans Holmberg <hans.holmberg@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Keith Busch and committed by
Jens Axboe
3451cf34 262a3dd0

+25 -22
+24 -21
drivers/block/null_blk/main.c
··· 1130 1130 } 1131 1131 1132 1132 static blk_status_t copy_to_nullb(struct nullb *nullb, void *source, 1133 - sector_t sector, size_t n, bool is_fua) 1133 + loff_t pos, size_t n, bool is_fua) 1134 1134 { 1135 1135 size_t temp, count = 0; 1136 - unsigned int offset; 1137 1136 struct nullb_page *t_page; 1137 + sector_t sector; 1138 1138 1139 1139 while (count < n) { 1140 - temp = min_t(size_t, nullb->dev->blocksize, n - count); 1140 + temp = min3(nullb->dev->blocksize, n - count, 1141 + PAGE_SIZE - offset_in_page(pos)); 1142 + sector = pos >> SECTOR_SHIFT; 1141 1143 1142 1144 if (null_cache_active(nullb) && !is_fua) 1143 1145 null_make_cache_space(nullb, PAGE_SIZE); 1144 1146 1145 - offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; 1146 1147 t_page = null_insert_page(nullb, sector, 1147 1148 !null_cache_active(nullb) || is_fua); 1148 1149 if (!t_page) 1149 1150 return BLK_STS_NOSPC; 1150 1151 1151 - memcpy_to_page(t_page->page, offset, source + count, temp); 1152 + memcpy_to_page(t_page->page, offset_in_page(pos), 1153 + source + count, temp); 1152 1154 1153 1155 __set_bit(sector & SECTOR_MASK, t_page->bitmap); 1154 1156 ··· 1158 1156 null_free_sector(nullb, sector, true); 1159 1157 1160 1158 count += temp; 1161 - sector += temp >> SECTOR_SHIFT; 1159 + pos += temp; 1162 1160 } 1163 1161 return BLK_STS_OK; 1164 1162 } 1165 1163 1166 - static void copy_from_nullb(struct nullb *nullb, void *dest, sector_t sector, 1164 + static void copy_from_nullb(struct nullb *nullb, void *dest, loff_t pos, 1167 1165 size_t n) 1168 1166 { 1169 1167 size_t temp, count = 0; 1170 - unsigned int offset; 1171 1168 struct nullb_page *t_page; 1169 + sector_t sector; 1172 1170 1173 1171 while (count < n) { 1174 - temp = min_t(size_t, nullb->dev->blocksize, n - count); 1172 + temp = min3(nullb->dev->blocksize, n - count, 1173 + PAGE_SIZE - offset_in_page(pos)); 1174 + sector = pos >> SECTOR_SHIFT; 1175 1175 1176 - offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; 1177 1176 t_page = null_lookup_page(nullb, sector, false, 1178 1177 !null_cache_active(nullb)); 1179 - 1180 1178 if (t_page) 1181 - memcpy_from_page(dest + count, t_page->page, offset, 1182 - temp); 1179 + memcpy_from_page(dest + count, t_page->page, 1180 + offset_in_page(pos), temp); 1183 1181 else 1184 1182 memset(dest + count, 0, temp); 1185 1183 1186 1184 count += temp; 1187 - sector += temp >> SECTOR_SHIFT; 1185 + pos += temp; 1188 1186 } 1189 1187 } 1190 1188 ··· 1230 1228 } 1231 1229 1232 1230 static blk_status_t null_transfer(struct nullb *nullb, struct page *page, 1233 - unsigned int len, unsigned int off, bool is_write, sector_t sector, 1231 + unsigned int len, unsigned int off, bool is_write, loff_t pos, 1234 1232 bool is_fua) 1235 1233 { 1236 1234 struct nullb_device *dev = nullb->dev; ··· 1242 1240 if (!is_write) { 1243 1241 if (dev->zoned) 1244 1242 valid_len = null_zone_valid_read_len(nullb, 1245 - sector, len); 1243 + pos >> SECTOR_SHIFT, len); 1246 1244 1247 1245 if (valid_len) { 1248 - copy_from_nullb(nullb, p, sector, valid_len); 1246 + copy_from_nullb(nullb, p, pos, valid_len); 1249 1247 off += valid_len; 1250 1248 len -= valid_len; 1251 1249 } ··· 1255 1253 flush_dcache_page(page); 1256 1254 } else { 1257 1255 flush_dcache_page(page); 1258 - err = copy_to_nullb(nullb, p, sector, len, is_fua); 1256 + err = copy_to_nullb(nullb, p, pos, len, is_fua); 1259 1257 } 1260 1258 1261 1259 kunmap_local(p); ··· 1273 1271 struct nullb *nullb = cmd->nq->dev->nullb; 1274 1272 blk_status_t err = BLK_STS_OK; 1275 1273 unsigned int len; 1276 - sector_t sector = blk_rq_pos(rq); 1274 + loff_t pos = blk_rq_pos(rq) << SECTOR_SHIFT; 1277 1275 unsigned int max_bytes = nr_sectors << SECTOR_SHIFT; 1278 1276 unsigned int transferred_bytes = 0; 1279 1277 struct req_iterator iter; ··· 1285 1283 if (transferred_bytes + len > max_bytes) 1286 1284 len = max_bytes - transferred_bytes; 1287 1285 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, 1288 - op_is_write(req_op(rq)), sector, 1286 + op_is_write(req_op(rq)), pos, 1289 1287 rq->cmd_flags & REQ_FUA); 1290 1288 if (err) 1291 1289 break; 1292 - sector += len >> SECTOR_SHIFT; 1290 + pos += len; 1293 1291 transferred_bytes += len; 1294 1292 if (transferred_bytes >= max_bytes) 1295 1293 break; ··· 1946 1944 .logical_block_size = dev->blocksize, 1947 1945 .physical_block_size = dev->blocksize, 1948 1946 .max_hw_sectors = dev->max_sectors, 1947 + .dma_alignment = 1, 1949 1948 }; 1950 1949 1951 1950 struct nullb *nullb;
+1 -1
drivers/block/null_blk/zoned.c
··· 242 242 { 243 243 struct nullb_device *dev = nullb->dev; 244 244 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)]; 245 - unsigned int nr_sectors = len >> SECTOR_SHIFT; 245 + unsigned int nr_sectors = DIV_ROUND_UP(len, SECTOR_SHIFT); 246 246 247 247 /* Read must be below the write pointer position */ 248 248 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||