IB/iser: Use the new verbs DMA mapping functions

Convert iSER to use the new verbs DMA mapping functions for kernel
verbs consumers.

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Ralph Campbell and committed by Roland Dreier 5180311f 85507bcc

+61 -63
+1 -1
drivers/infiniband/ulp/iser/iscsi_iser.h
··· 182 182 struct iser_mem_reg reg; /* memory registration info */ 183 183 void *virt_addr; 184 184 struct iser_device *device; /* device->device for dma_unmap */ 185 - dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */ 185 + u64 dma_addr; /* if non zero, addr for dma_unmap */ 186 186 enum dma_data_direction direction; /* direction for dma_unmap */ 187 187 unsigned int data_size; 188 188 atomic_t ref_count; /* refcount, freed when dec to 0 */
+60 -62
drivers/infiniband/ulp/iser/iser_memory.c
··· 52 52 */ 53 53 int iser_regd_buff_release(struct iser_regd_buf *regd_buf) 54 54 { 55 - struct device *dma_device; 55 + struct ib_device *dev; 56 56 57 57 if ((atomic_read(&regd_buf->ref_count) == 0) || 58 58 atomic_dec_and_test(&regd_buf->ref_count)) { ··· 61 61 iser_unreg_mem(&regd_buf->reg); 62 62 63 63 if (regd_buf->dma_addr) { 64 - dma_device = regd_buf->device->ib_device->dma_device; 65 - dma_unmap_single(dma_device, 64 + dev = regd_buf->device->ib_device; 65 + ib_dma_unmap_single(dev, 66 66 regd_buf->dma_addr, 67 67 regd_buf->data_size, 68 68 regd_buf->direction); ··· 84 84 struct iser_regd_buf *regd_buf, 85 85 enum dma_data_direction direction) 86 86 { 87 - dma_addr_t dma_addr; 87 + u64 dma_addr; 88 88 89 - dma_addr = dma_map_single(device->ib_device->dma_device, 90 - regd_buf->virt_addr, 91 - regd_buf->data_size, direction); 92 - BUG_ON(dma_mapping_error(dma_addr)); 89 + dma_addr = ib_dma_map_single(device->ib_device, 90 + regd_buf->virt_addr, 91 + regd_buf->data_size, direction); 92 + BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr)); 93 93 94 94 regd_buf->reg.lkey = device->mr->lkey; 95 95 regd_buf->reg.len = regd_buf->data_size; ··· 107 107 enum iser_data_dir cmd_dir) 108 108 { 109 109 int dma_nents; 110 - struct device *dma_device; 110 + struct ib_device *dev; 111 111 char *mem = NULL; 112 112 struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; 113 113 unsigned long cmd_data_len = data->data_len; ··· 147 147 148 148 iser_ctask->data_copy[cmd_dir].copy_buf = mem; 149 149 150 - dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 151 - 152 - if (cmd_dir == ISER_DIR_OUT) 153 - dma_nents = dma_map_sg(dma_device, 154 - &iser_ctask->data_copy[cmd_dir].sg_single, 155 - 1, DMA_TO_DEVICE); 156 - else 157 - dma_nents = dma_map_sg(dma_device, 158 - &iser_ctask->data_copy[cmd_dir].sg_single, 159 - 1, DMA_FROM_DEVICE); 160 - 150 + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 151 + dma_nents = ib_dma_map_sg(dev, 152 + &iser_ctask->data_copy[cmd_dir].sg_single, 153 + 1, 154 + (cmd_dir == ISER_DIR_OUT) ? 155 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 161 156 BUG_ON(dma_nents == 0); 162 157 163 158 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; ··· 165 170 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 166 171 enum iser_data_dir cmd_dir) 167 172 { 168 - struct device *dma_device; 173 + struct ib_device *dev; 169 174 struct iser_data_buf *mem_copy; 170 175 unsigned long cmd_data_len; 171 176 172 - dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 173 - mem_copy = &iser_ctask->data_copy[cmd_dir]; 177 + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 178 + mem_copy = &iser_ctask->data_copy[cmd_dir]; 174 179 175 - if (cmd_dir == ISER_DIR_OUT) 176 - dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, 177 - DMA_TO_DEVICE); 178 - else 179 - dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, 180 - DMA_FROM_DEVICE); 180 + ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, 181 + (cmd_dir == ISER_DIR_OUT) ? 182 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 181 183 182 184 if (cmd_dir == ISER_DIR_IN) { 183 185 char *mem; ··· 223 231 * consecutive elements. Also, it handles one entry SG. 224 232 */ 225 233 static int iser_sg_to_page_vec(struct iser_data_buf *data, 226 - struct iser_page_vec *page_vec) 234 + struct iser_page_vec *page_vec, 235 + struct ib_device *ibdev) 227 236 { 228 237 struct scatterlist *sg = (struct scatterlist *)data->buf; 229 - dma_addr_t first_addr, last_addr, page; 238 + u64 first_addr, last_addr, page; 230 239 int end_aligned; 231 240 unsigned int cur_page = 0; 232 241 unsigned long total_sz = 0; ··· 237 244 page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 238 245 239 246 for (i = 0; i < data->dma_nents; i++) { 240 - total_sz += sg_dma_len(&sg[i]); 247 + unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); 241 248 242 - first_addr = sg_dma_address(&sg[i]); 243 - last_addr = first_addr + sg_dma_len(&sg[i]); 249 + total_sz += dma_len; 250 + 251 + first_addr = ib_sg_dma_address(ibdev, &sg[i]); 252 + last_addr = first_addr + dma_len; 244 253 245 254 end_aligned = !(last_addr & ~MASK_4K); 246 255 247 256 /* continue to collect page fragments till aligned or SG ends */ 248 257 while (!end_aligned && (i + 1 < data->dma_nents)) { 249 258 i++; 250 - total_sz += sg_dma_len(&sg[i]); 251 - last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); 259 + dma_len = ib_sg_dma_len(ibdev, &sg[i]); 260 + total_sz += dma_len; 261 + last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; 252 262 end_aligned = !(last_addr & ~MASK_4K); 253 263 } 254 264 ··· 283 287 * the number of entries which are aligned correctly. Supports the case where 284 288 * consecutive SG elements are actually fragments of the same physcial page. 285 289 */ 286 - static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) 290 + static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 291 + struct ib_device *ibdev) 287 292 { 288 293 struct scatterlist *sg; 289 - dma_addr_t end_addr, next_addr; 294 + u64 end_addr, next_addr; 290 295 int i, cnt; 291 296 unsigned int ret_len = 0; 292 297 ··· 299 302 (unsigned long)page_to_phys(sg[i].page), 300 303 (unsigned long)sg[i].offset, 301 304 (unsigned long)sg[i].length); */ 302 - end_addr = sg_dma_address(&sg[i]) + 303 - sg_dma_len(&sg[i]); 305 + end_addr = ib_sg_dma_address(ibdev, &sg[i]) + 306 + ib_sg_dma_len(ibdev, &sg[i]); 304 307 /* iser_dbg("Checking sg iobuf end address " 305 308 "0x%08lX\n", end_addr); */ 306 309 if (i + 1 < data->dma_nents) { 307 - next_addr = sg_dma_address(&sg[i+1]); 310 + next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); 308 311 /* are i, i+1 fragments of the same page? */ 309 312 if (end_addr == next_addr) 310 313 continue; ··· 321 324 return ret_len; 322 325 } 323 326 324 - static void iser_data_buf_dump(struct iser_data_buf *data) 327 + static void iser_data_buf_dump(struct iser_data_buf *data, 328 + struct ib_device *ibdev) 325 329 { 326 330 struct scatterlist *sg = (struct scatterlist *)data->buf; 327 331 int i; ··· 330 332 for (i = 0; i < data->dma_nents; i++) 331 333 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 332 334 "off:0x%x sz:0x%x dma_len:0x%x\n", 333 - i, (unsigned long)sg_dma_address(&sg[i]), 335 + i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), 334 336 sg[i].page, sg[i].offset, 335 - sg[i].length,sg_dma_len(&sg[i])); 337 + sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); 336 338 } 337 339 338 340 static void iser_dump_page_vec(struct iser_page_vec *page_vec) ··· 346 348 } 347 349 348 350 static void iser_page_vec_build(struct iser_data_buf *data, 349 - struct iser_page_vec *page_vec) 351 + struct iser_page_vec *page_vec, 352 + struct ib_device *ibdev) 350 353 { 351 354 int page_vec_len = 0; 352 355 ··· 355 356 page_vec->offset = 0; 356 357 357 358 iser_dbg("Translating sg sz: %d\n", data->dma_nents); 358 - page_vec_len = iser_sg_to_page_vec(data,page_vec); 359 + page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev); 359 360 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); 360 361 361 362 page_vec->length = page_vec_len; 362 363 363 364 if (page_vec_len * SIZE_4K < page_vec->data_size) { 364 365 iser_err("page_vec too short to hold this SG\n"); 365 - iser_data_buf_dump(data); 366 + iser_data_buf_dump(data, ibdev); 366 367 iser_dump_page_vec(page_vec); 367 368 BUG(); 368 369 } ··· 373 374 enum iser_data_dir iser_dir, 374 375 enum dma_data_direction dma_dir) 375 376 { 376 - struct device *dma_device; 377 + struct ib_device *dev; 377 378 378 379 iser_ctask->dir[iser_dir] = 1; 379 - dma_device = 380 - iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 380 + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 381 381 382 - data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); 382 + data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); 383 383 if (data->dma_nents == 0) { 384 384 iser_err("dma_map_sg failed!!!\n"); 385 385 return -EINVAL; ··· 388 390 389 391 void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) 390 392 { 391 - struct device *dma_device; 393 + struct ib_device *dev; 392 394 struct iser_data_buf *data; 393 395 394 - dma_device = 395 - iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 396 + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 396 397 397 398 if (iser_ctask->dir[ISER_DIR_IN]) { 398 399 data = &iser_ctask->data[ISER_DIR_IN]; 399 - dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); 400 + ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 400 401 } 401 402 402 403 if (iser_ctask->dir[ISER_DIR_OUT]) { 403 404 data = &iser_ctask->data[ISER_DIR_OUT]; 404 - dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); 405 + ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); 405 406 } 406 407 } 407 408 ··· 415 418 { 416 419 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 417 420 struct iser_device *device = ib_conn->device; 421 + struct ib_device *ibdev = device->ib_device; 418 422 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 419 423 struct iser_regd_buf *regd_buf; 420 424 int aligned_len; ··· 425 427 426 428 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 427 429 428 - aligned_len = iser_data_buf_aligned_len(mem); 430 + aligned_len = iser_data_buf_aligned_len(mem, ibdev); 429 431 if (aligned_len != mem->dma_nents) { 430 432 iser_err("rdma alignment violation %d/%d aligned\n", 431 433 aligned_len, mem->size); 432 - iser_data_buf_dump(mem); 434 + iser_data_buf_dump(mem, ibdev); 433 435 434 436 /* unmap the command data before accessing it */ 435 437 iser_dma_unmap_task_data(iser_ctask); ··· 447 449 448 450 regd_buf->reg.lkey = device->mr->lkey; 449 451 regd_buf->reg.rkey = device->mr->rkey; 450 - regd_buf->reg.len = sg_dma_len(&sg[0]); 451 - regd_buf->reg.va = sg_dma_address(&sg[0]); 452 + regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); 453 + regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); 452 454 regd_buf->reg.is_fmr = 0; 453 455 454 456 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " ··· 458 460 (unsigned long)regd_buf->reg.va, 459 461 (unsigned long)regd_buf->reg.len); 460 462 } else { /* use FMR for multiple dma entries */ 461 - iser_page_vec_build(mem, ib_conn->page_vec); 463 + iser_page_vec_build(mem, ib_conn->page_vec, ibdev); 462 464 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); 463 465 if (err) { 464 - iser_data_buf_dump(mem); 466 + iser_data_buf_dump(mem, ibdev); 465 467 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, 466 468 ntoh24(iser_ctask->desc.iscsi_header.dlength)); 467 469 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",