IPoIB: Use the new verbs DMA mapping functions

Convert IPoIB to use the new DMA mapping functions
for kernel verbs consumers.

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Ralph Campbell and committed by Roland Dreier 37ccf9df 1527106f

+38 -41
+2 -2
drivers/infiniband/ulp/ipoib/ipoib.h
··· 105 105 106 106 struct ipoib_rx_buf { 107 107 struct sk_buff *skb; 108 - dma_addr_t mapping; 108 + u64 mapping; 109 109 }; 110 110 111 111 struct ipoib_tx_buf { 112 112 struct sk_buff *skb; 113 - DECLARE_PCI_UNMAP_ADDR(mapping) 113 + u64 mapping; 114 114 }; 115 115 116 116 /*
+36 -39
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 109 109 ret = ib_post_recv(priv->qp, &param, &bad_wr); 110 110 if (unlikely(ret)) { 111 111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 112 - dma_unmap_single(priv->ca->dma_device, 113 - priv->rx_ring[id].mapping, 114 - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 112 + ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, 113 + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 115 114 dev_kfree_skb_any(priv->rx_ring[id].skb); 116 115 priv->rx_ring[id].skb = NULL; 117 116 } ··· 122 123 { 123 124 struct ipoib_dev_priv *priv = netdev_priv(dev); 124 125 struct sk_buff *skb; 125 - dma_addr_t addr; 126 + u64 addr; 126 127 127 128 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 128 129 if (!skb) ··· 135 136 */ 136 137 skb_reserve(skb, 4); 137 138 138 - addr = dma_map_single(priv->ca->dma_device, 139 - skb->data, IPOIB_BUF_SIZE, 140 - DMA_FROM_DEVICE); 141 - if (unlikely(dma_mapping_error(addr))) { 139 + addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, 140 + DMA_FROM_DEVICE); 141 + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 142 142 dev_kfree_skb_any(skb); 143 143 return -EIO; 144 144 } ··· 172 174 struct ipoib_dev_priv *priv = netdev_priv(dev); 173 175 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 174 176 struct sk_buff *skb; 175 - dma_addr_t addr; 177 + u64 addr; 176 178 177 179 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", 178 180 wr_id, wc->opcode, wc->status); ··· 191 193 ipoib_warn(priv, "failed recv event " 192 194 "(status=%d, wrid=%d vend_err %x)\n", 193 195 wc->status, wr_id, wc->vendor_err); 194 - dma_unmap_single(priv->ca->dma_device, addr, 195 - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 196 + ib_dma_unmap_single(priv->ca, addr, 197 + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 196 198 dev_kfree_skb_any(skb); 197 199 priv->rx_ring[wr_id].skb = NULL; 198 200 return; ··· 210 212 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 211 213 wc->byte_len, wc->slid); 212 214 213 - dma_unmap_single(priv->ca->dma_device, addr, 214 - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 215 + ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 215 216 216 217 skb_put(skb, wc->byte_len); 217 218 skb_pull(skb, IB_GRH_BYTES); ··· 258 261 259 262 tx_req = &priv->tx_ring[wr_id]; 260 263 261 - dma_unmap_single(priv->ca->dma_device, 262 - pci_unmap_addr(tx_req, mapping), 263 - tx_req->skb->len, 264 - DMA_TO_DEVICE); 264 + ib_dma_unmap_single(priv->ca, tx_req->mapping, 265 + tx_req->skb->len, DMA_TO_DEVICE); 265 266 266 267 ++priv->stats.tx_packets; 267 268 priv->stats.tx_bytes += tx_req->skb->len; ··· 306 311 static inline int post_send(struct ipoib_dev_priv *priv, 307 312 unsigned int wr_id, 308 313 struct ib_ah *address, u32 qpn, 309 - dma_addr_t addr, int len) 314 + u64 addr, int len) 310 315 { 311 316 struct ib_send_wr *bad_wr; 312 317 ··· 325 330 { 326 331 struct ipoib_dev_priv *priv = netdev_priv(dev); 327 332 struct ipoib_tx_buf *tx_req; 328 - dma_addr_t addr; 333 + u64 addr; 329 334 330 335 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { 331 336 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", ··· 348 353 */ 349 354 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 350 355 tx_req->skb = skb; 351 - addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, 352 - DMA_TO_DEVICE); 353 - if (unlikely(dma_mapping_error(addr))) { 356 + addr = ib_dma_map_single(priv->ca, skb->data, skb->len, 357 + DMA_TO_DEVICE); 358 + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 354 359 ++priv->stats.tx_errors; 355 360 dev_kfree_skb_any(skb); 356 361 return; 357 362 } 358 - pci_unmap_addr_set(tx_req, mapping, addr); 363 + tx_req->mapping = addr; 359 364 360 365 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 361 366 address->ah, qpn, addr, skb->len))) { 362 367 ipoib_warn(priv, "post_send failed\n"); 363 368 ++priv->stats.tx_errors; 364 - dma_unmap_single(priv->ca->dma_device, addr, skb->len, 365 - DMA_TO_DEVICE); 369 + ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 366 370 dev_kfree_skb_any(skb); 367 371 } else { 368 372 dev->trans_start = jiffies; ··· 532 538 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 533 539 tx_req = &priv->tx_ring[priv->tx_tail & 534 540 (ipoib_sendq_size - 1)]; 535 - dma_unmap_single(priv->ca->dma_device, 536 - pci_unmap_addr(tx_req, mapping), 537 - tx_req->skb->len, 538 - DMA_TO_DEVICE); 541 + ib_dma_unmap_single(priv->ca, 542 + tx_req->mapping, 543 + tx_req->skb->len, 544 + DMA_TO_DEVICE); 539 545 dev_kfree_skb_any(tx_req->skb); 540 546 ++priv->tx_tail; 541 547 } 542 548 543 - for (i = 0; i < ipoib_recvq_size; ++i) 544 - if (priv->rx_ring[i].skb) { 545 - dma_unmap_single(priv->ca->dma_device, 546 - pci_unmap_addr(&priv->rx_ring[i], 547 - mapping), 548 - IPOIB_BUF_SIZE, 549 - DMA_FROM_DEVICE); 550 - dev_kfree_skb_any(priv->rx_ring[i].skb); 551 - priv->rx_ring[i].skb = NULL; 552 - } 549 + for (i = 0; i < ipoib_recvq_size; ++i) { 550 + struct ipoib_rx_buf *rx_req; 551 + 552 + rx_req = &priv->rx_ring[i]; 553 + if (!rx_req->skb) 554 + continue; 555 + ib_dma_unmap_single(priv->ca, 556 + rx_req->mapping, 557 + IPOIB_BUF_SIZE, 558 + DMA_FROM_DEVICE); 559 + dev_kfree_skb_any(rx_req->skb); 560 + rx_req->skb = NULL; 561 + } 553 562 554 563 goto timeout; 555 564 }