Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bna: use device model DMA API

Use DMA API as PCI equivalents will be deprecated.

Signed-off-by: Ivan Vecera <ivecera@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Ivan Vecera and committed by
David S. Miller
5ea74318 8fe73503

+57 -53
+56 -52
drivers/net/bna/bnad.c
··· 126 126 } 127 127 unmap_array[unmap_cons].skb = NULL; 128 128 129 - pci_unmap_single(bnad->pcidev, 130 - pci_unmap_addr(&unmap_array[unmap_cons], 129 + dma_unmap_single(&bnad->pcidev->dev, 130 + dma_unmap_addr(&unmap_array[unmap_cons], 131 131 dma_addr), skb_headlen(skb), 132 - PCI_DMA_TODEVICE); 132 + DMA_TO_DEVICE); 133 133 134 - pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 134 + dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 135 135 if (++unmap_cons >= unmap_q->q_depth) 136 136 break; 137 137 138 138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 139 - pci_unmap_page(bnad->pcidev, 140 - pci_unmap_addr(&unmap_array[unmap_cons], 139 + dma_unmap_page(&bnad->pcidev->dev, 140 + dma_unmap_addr(&unmap_array[unmap_cons], 141 141 dma_addr), 142 142 skb_shinfo(skb)->frags[i].size, 143 - PCI_DMA_TODEVICE); 144 - pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 143 + DMA_TO_DEVICE); 144 + dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 145 145 0); 146 146 if (++unmap_cons >= unmap_q->q_depth) 147 147 break; ··· 199 199 sent_bytes += skb->len; 200 200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); 201 201 202 - pci_unmap_single(bnad->pcidev, 203 - pci_unmap_addr(&unmap_array[unmap_cons], 202 + dma_unmap_single(&bnad->pcidev->dev, 203 + dma_unmap_addr(&unmap_array[unmap_cons], 204 204 dma_addr), skb_headlen(skb), 205 - PCI_DMA_TODEVICE); 206 - pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 205 + DMA_TO_DEVICE); 206 + dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 207 207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 208 208 209 209 prefetch(&unmap_array[unmap_cons + 1]); 210 210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 211 211 prefetch(&unmap_array[unmap_cons + 1]); 212 212 213 - pci_unmap_page(bnad->pcidev, 214 - pci_unmap_addr(&unmap_array[unmap_cons], 213 + dma_unmap_page(&bnad->pcidev->dev, 214 + dma_unmap_addr(&unmap_array[unmap_cons], 215 215 dma_addr), 216 216 skb_shinfo(skb)->frags[i].size, 217 - PCI_DMA_TODEVICE); 218 - pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 217 + DMA_TO_DEVICE); 218 + dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 219 219 0); 220 220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 221 221 } ··· 340 340 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 341 341 { 342 342 struct bnad_unmap_q *unmap_q; 343 + struct bnad_skb_unmap *unmap_array; 343 344 struct sk_buff *skb; 344 345 int unmap_cons; 345 346 346 347 unmap_q = rcb->unmap_q; 348 + unmap_array = unmap_q->unmap_array; 347 349 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { 348 - skb = unmap_q->unmap_array[unmap_cons].skb; 350 + skb = unmap_array[unmap_cons].skb; 349 351 if (!skb) 350 352 continue; 351 - unmap_q->unmap_array[unmap_cons].skb = NULL; 352 - pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> 353 - unmap_array[unmap_cons], 354 - dma_addr), rcb->rxq->buffer_size, 355 - PCI_DMA_FROMDEVICE); 353 + unmap_array[unmap_cons].skb = NULL; 354 + dma_unmap_single(&bnad->pcidev->dev, 355 + dma_unmap_addr(&unmap_array[unmap_cons], 356 + dma_addr), 357 + rcb->rxq->buffer_size, 358 + DMA_FROM_DEVICE); 356 359 dev_kfree_skb(skb); 357 360 } 358 361 bnad_reset_rcb(bnad, rcb); ··· 394 391 skb->dev = bnad->netdev; 395 392 skb_reserve(skb, NET_IP_ALIGN); 396 393 unmap_array[unmap_prod].skb = skb; 397 - dma_addr = pci_map_single(bnad->pcidev, skb->data, 398 - rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE); 399 - pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, 394 + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 395 + rcb->rxq->buffer_size, 396 + DMA_FROM_DEVICE); 397 + dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, 400 398 dma_addr); 401 399 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 402 400 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); ··· 438 434 struct bna_rcb *rcb = NULL; 439 435 unsigned int wi_range, packets = 0, wis = 0; 440 436 struct bnad_unmap_q *unmap_q; 437 + struct bnad_skb_unmap *unmap_array; 441 438 struct sk_buff *skb; 442 - u32 flags; 439 + u32 flags, unmap_cons; 443 440 u32 qid0 = ccb->rcb[0]->rxq->rxq_id; 444 441 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 445 442 ··· 461 456 rcb = ccb->rcb[1]; 462 457 463 458 unmap_q = rcb->unmap_q; 459 + unmap_array = unmap_q->unmap_array; 460 + unmap_cons = unmap_q->consumer_index; 464 461 465 - skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; 462 + skb = unmap_array[unmap_cons].skb; 466 463 BUG_ON(!(skb)); 467 - unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; 468 - pci_unmap_single(bnad->pcidev, 469 - pci_unmap_addr(&unmap_q-> 470 - unmap_array[unmap_q-> 471 - consumer_index], 464 + unmap_array[unmap_cons].skb = NULL; 465 + dma_unmap_single(&bnad->pcidev->dev, 466 + dma_unmap_addr(&unmap_array[unmap_cons], 472 467 dma_addr), 473 - rcb->rxq->buffer_size, 474 - PCI_DMA_FROMDEVICE); 468 + rcb->rxq->buffer_size, 469 + DMA_FROM_DEVICE); 475 470 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); 476 471 477 472 /* Should be more efficient ? Performance ? */ ··· 1020 1015 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1021 1016 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), 1022 1017 dma_pa); 1023 - pci_free_consistent(bnad->pcidev, 1024 - mem_info->mdl[i].len, 1025 - mem_info->mdl[i].kva, dma_pa); 1018 + dma_free_coherent(&bnad->pcidev->dev, 1019 + mem_info->mdl[i].len, 1020 + mem_info->mdl[i].kva, dma_pa); 1026 1021 } else 1027 1022 kfree(mem_info->mdl[i].kva); 1028 1023 } ··· 1052 1047 for (i = 0; i < mem_info->num; i++) { 1053 1048 mem_info->mdl[i].len = mem_info->len; 1054 1049 mem_info->mdl[i].kva = 1055 - pci_alloc_consistent(bnad->pcidev, 1056 - mem_info->len, &dma_pa); 1050 + dma_alloc_coherent(&bnad->pcidev->dev, 1051 + mem_info->len, &dma_pa, 1052 + GFP_KERNEL); 1057 1053 1058 1054 if (mem_info->mdl[i].kva == NULL) 1059 1055 goto err_return; ··· 2606 2600 unmap_q->unmap_array[unmap_prod].skb = skb; 2607 2601 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); 2608 2602 txqent->vector[vect_id].length = htons(skb_headlen(skb)); 2609 - dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb), 2610 - PCI_DMA_TODEVICE); 2611 - pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2603 + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 2604 + skb_headlen(skb), DMA_TO_DEVICE); 2605 + dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2612 2606 dma_addr); 2613 2607 2614 2608 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); ··· 2636 2630 2637 2631 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); 2638 2632 txqent->vector[vect_id].length = htons(size); 2639 - dma_addr = 2640 - pci_map_page(bnad->pcidev, frag->page, 2641 - frag->page_offset, size, 2642 - PCI_DMA_TODEVICE); 2643 - pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2633 + dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page, 2634 + frag->page_offset, size, DMA_TO_DEVICE); 2635 + dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2644 2636 dma_addr); 2645 2637 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2646 2638 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); ··· 3026 3022 err = pci_request_regions(pdev, BNAD_NAME); 3027 3023 if (err) 3028 3024 goto disable_device; 3029 - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3030 - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3025 + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3026 + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3031 3027 *using_dac = 1; 3032 3028 } else { 3033 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3029 + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3034 3030 if (err) { 3035 - err = pci_set_consistent_dma_mask(pdev, 3036 - DMA_BIT_MASK(32)); 3031 + err = dma_set_coherent_mask(&pdev->dev, 3032 + DMA_BIT_MASK(32)); 3037 3033 if (err) 3038 3034 goto release_regions; 3039 3035 }
+1 -1
drivers/net/bna/bnad.h
··· 181 181 /* Unmap queues for Tx / Rx cleanup */ 182 182 struct bnad_skb_unmap { 183 183 struct sk_buff *skb; 184 - DECLARE_PCI_UNMAP_ADDR(dma_addr) 184 + DEFINE_DMA_UNMAP_ADDR(dma_addr); 185 185 }; 186 186 187 187 struct bnad_unmap_q {