Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

infiniband: sg chaining support

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

+47 -38
+6 -4
drivers/infiniband/hw/ipath/ipath_dma.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 + #include <linux/scatterlist.h> 33 34 #include <rdma/ib_verbs.h> 34 35 35 36 #include "ipath_verbs.h" ··· 97 96 BUG_ON(!valid_dma_direction(direction)); 98 97 } 99 98 100 - static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, 101 - enum dma_data_direction direction) 99 + static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl, 100 + int nents, enum dma_data_direction direction) 102 101 { 102 + struct scatterlist *sg; 103 103 u64 addr; 104 104 int i; 105 105 int ret = nents; 106 106 107 107 BUG_ON(!valid_dma_direction(direction)); 108 108 109 - for (i = 0; i < nents; i++) { 110 - addr = (u64) page_address(sg[i].page); 109 + for_each_sg(sgl, sg, nents, i) { 110 + addr = (u64) page_address(sg->page); 111 111 /* TODO: handle highmem pages */ 112 112 if (!addr) { 113 113 ret = 0;
+41 -34
drivers/infiniband/ulp/iser/iser_memory.c
··· 124 124 125 125 if (cmd_dir == ISER_DIR_OUT) { 126 126 /* copy the unaligned sg the buffer which is used for RDMA */ 127 - struct scatterlist *sg = (struct scatterlist *)data->buf; 127 + struct scatterlist *sgl = (struct scatterlist *)data->buf; 128 + struct scatterlist *sg; 128 129 int i; 129 130 char *p, *from; 130 131 131 - for (p = mem, i = 0; i < data->size; i++) { 132 - from = kmap_atomic(sg[i].page, KM_USER0); 132 + p = mem; 133 + for_each_sg(sgl, sg, data->size, i) { 134 + from = kmap_atomic(sg->page, KM_USER0); 133 135 memcpy(p, 134 - from + sg[i].offset, 135 - sg[i].length); 136 + from + sg->offset, 137 + sg->length); 136 138 kunmap_atomic(from, KM_USER0); 137 - p += sg[i].length; 139 + p += sg->length; 138 140 } 139 141 } 140 142 ··· 178 176 179 177 if (cmd_dir == ISER_DIR_IN) { 180 178 char *mem; 181 - struct scatterlist *sg; 179 + struct scatterlist *sgl, *sg; 182 180 unsigned char *p, *to; 183 181 unsigned int sg_size; 184 182 int i; ··· 186 184 /* copy back read RDMA to unaligned sg */ 187 185 mem = mem_copy->copy_buf; 188 186 189 - sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; 187 + sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; 190 188 sg_size = iser_ctask->data[ISER_DIR_IN].size; 191 189 192 - for (p = mem, i = 0; i < sg_size; i++){ 193 - to = kmap_atomic(sg[i].page, KM_SOFTIRQ0); 194 - memcpy(to + sg[i].offset, 190 + p = mem; 191 + for_each_sg(sgl, sg, sg_size, i) { 192 + to = kmap_atomic(sg->page, KM_SOFTIRQ0); 193 + memcpy(to + sg->offset, 195 194 p, 196 - sg[i].length); 195 + sg->length); 197 196 kunmap_atomic(to, KM_SOFTIRQ0); 198 - p += sg[i].length; 197 + p += sg->length; 199 198 } 200 199 } 201 200 ··· 227 224 struct iser_page_vec *page_vec, 228 225 struct ib_device *ibdev) 229 226 { 230 - struct scatterlist *sg = (struct scatterlist *)data->buf; 227 + struct scatterlist *sgl = (struct scatterlist *)data->buf; 228 + struct scatterlist *sg; 231 229 u64 first_addr, last_addr, page; 232 230 int end_aligned; 233 231 unsigned int cur_page = 0; ··· 236 232 int i; 237 233 238 234 /* compute the offset of first element */ 239 - page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 235 + page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; 240 236 241 - for (i = 0; i < data->dma_nents; i++) { 242 - unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); 237 + for_each_sg(sgl, sg, data->dma_nents, i) { 238 + unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 243 239 244 240 total_sz += dma_len; 245 241 246 - first_addr = ib_sg_dma_address(ibdev, &sg[i]); 242 + first_addr = ib_sg_dma_address(ibdev, sg); 247 243 last_addr = first_addr + dma_len; 248 244 249 245 end_aligned = !(last_addr & ~MASK_4K); 250 246 251 247 /* continue to collect page fragments till aligned or SG ends */ 252 248 while (!end_aligned && (i + 1 < data->dma_nents)) { 249 + sg = sg_next(sg); 253 250 i++; 254 - dma_len = ib_sg_dma_len(ibdev, &sg[i]); 251 + dma_len = ib_sg_dma_len(ibdev, sg); 255 252 total_sz += dma_len; 256 - last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; 253 + last_addr = ib_sg_dma_address(ibdev, sg) + dma_len; 257 254 end_aligned = !(last_addr & ~MASK_4K); 258 255 } 259 256 ··· 289 284 static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 290 285 struct ib_device *ibdev) 291 286 { 292 - struct scatterlist *sg; 287 + struct scatterlist *sgl, *sg; 293 288 u64 end_addr, next_addr; 294 289 int i, cnt; 295 290 unsigned int ret_len = 0; 296 291 297 - sg = (struct scatterlist *)data->buf; 292 + sgl = (struct scatterlist *)data->buf; 298 293 299 - for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) { 294 + cnt = 0; 295 + for_each_sg(sgl, sg, data->dma_nents, i) { 300 296 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 301 297 "offset: %ld sz: %ld\n", i, 302 - (unsigned long)page_to_phys(sg[i].page), 303 - (unsigned long)sg[i].offset, 304 - (unsigned long)sg[i].length); */ 305 - end_addr = ib_sg_dma_address(ibdev, &sg[i]) + 306 - ib_sg_dma_len(ibdev, &sg[i]); 298 + (unsigned long)page_to_phys(sg->page), 299 + (unsigned long)sg->offset, 300 + (unsigned long)sg->length); */ 301 + end_addr = ib_sg_dma_address(ibdev, sg) + 302 + ib_sg_dma_len(ibdev, sg); 307 303 /* iser_dbg("Checking sg iobuf end address " 308 304 "0x%08lX\n", end_addr); */ 309 305 if (i + 1 < data->dma_nents) { 310 - next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); 306 + next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); 311 307 /* are i, i+1 fragments of the same page? */ 312 308 if (end_addr == next_addr) 313 309 continue; ··· 328 322 static void iser_data_buf_dump(struct iser_data_buf *data, 329 323 struct ib_device *ibdev) 330 324 { 331 - struct scatterlist *sg = (struct scatterlist *)data->buf; 325 + struct scatterlist *sgl = (struct scatterlist *)data->buf; 326 + struct scatterlist *sg; 332 327 int i; 333 328 334 - for (i = 0; i < data->dma_nents; i++) 329 + for_each_sg(sgl, sg, data->dma_nents, i) 335 330 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 336 331 "off:0x%x sz:0x%x dma_len:0x%x\n", 337 - i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), 338 - sg[i].page, sg[i].offset, 339 - sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); 332 + i, (unsigned long)ib_sg_dma_address(ibdev, sg), 333 + sg->page, sg->offset, 334 + sg->length, ib_sg_dma_len(ibdev, sg)); 340 335 } 341 336 342 337 static void iser_dump_page_vec(struct iser_page_vec *page_vec)