b43legacy: fix DMA mapping leakage

This fixes a DMA mapping leakage in the case where we reject a DMA buffer
because of its address.
The patch by Michael Buesch has been ported to b43legacy.

Signed-off-by: Stefano Brivio <stefano.brivio@polimi.it>
Cc: Christian Casteyde <casteyde.christian@free.fr>
Signed-off-by: John W. Linville <linville@tuxdriver.com>

authored by Stefano Brivio and committed by John W. Linville dc4ae1f4 b3fc9c6c

+18 -11
+18 -11
drivers/net/wireless/b43legacy/dma.c
··· 585 585 586 586 /* Check if a DMA mapping address is invalid. */ 587 587 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, 588 - dma_addr_t addr, 589 - size_t buffersize) 588 + dma_addr_t addr, 589 + size_t buffersize, 590 + bool dma_to_device) 590 591 { 591 592 if (unlikely(dma_mapping_error(addr))) 592 593 return 1; ··· 595 594 switch (ring->type) { 596 595 case B43legacy_DMA_30BIT: 597 596 if ((u64)addr + buffersize > (1ULL << 30)) 598 - return 1; 597 + goto address_error; 599 598 break; 600 599 case B43legacy_DMA_32BIT: 601 600 if ((u64)addr + buffersize > (1ULL << 32)) 602 - return 1; 601 + goto address_error; 603 602 break; 604 603 case B43legacy_DMA_64BIT: 605 604 /* Currently we can't have addresses beyond 64 bits in the kernel. */ ··· 608 607 609 608 /* The address is OK. */ 610 609 return 0; 610 + 611 + address_error: 612 + /* We can't support this address. Unmap it again. */ 613 + unmap_descbuffer(ring, addr, buffersize, dma_to_device); 614 + 615 + return 1; 611 616 } 612 617 613 618 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, ··· 633 626 return -ENOMEM; 634 627 dmaaddr = map_descbuffer(ring, skb->data, 635 628 ring->rx_buffersize, 0); 636 - if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { 629 + if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 637 630 /* ugh. try to realloc in zone_dma */ 638 631 gfp_flags |= GFP_DMA; 639 632 ··· 646 639 ring->rx_buffersize, 0); 647 640 } 648 641 649 - if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) { 642 + if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 650 643 dev_kfree_skb_any(skb); 651 644 return -EIO; 652 645 } ··· 898 891 DMA_TO_DEVICE); 899 892 900 893 if (b43legacy_dma_mapping_error(ring, dma_test, 901 - sizeof(struct b43legacy_txhdr_fw3))) { 894 + sizeof(struct b43legacy_txhdr_fw3), 1)) { 902 895 /* ugh realloc */ 903 896 kfree(ring->txhdr_cache); 904 897 ring->txhdr_cache = kcalloc(nr_slots, ··· 913 906 DMA_TO_DEVICE); 914 907 915 908 if (b43legacy_dma_mapping_error(ring, dma_test, 916 - sizeof(struct b43legacy_txhdr_fw3))) 909 + sizeof(struct b43legacy_txhdr_fw3), 1)) 917 910 goto err_kfree_txhdr_cache; 918 911 } 919 912 ··· 1242 1235 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1243 1236 sizeof(struct b43legacy_txhdr_fw3), 1); 1244 1237 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, 1245 - sizeof(struct b43legacy_txhdr_fw3))) { 1238 + sizeof(struct b43legacy_txhdr_fw3), 1)) { 1246 1239 ring->current_slot = old_top_slot; 1247 1240 ring->used_slots = old_used_slots; 1248 1241 return -EIO; ··· 1261 1254 1262 1255 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1263 1256 /* create a bounce buffer in zone_dma on mapping failure. */ 1264 - if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { 1257 + if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1265 1258 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1266 1259 if (!bounce_skb) { 1267 1260 ring->current_slot = old_top_slot; ··· 1275 1268 skb = bounce_skb; 1276 1269 meta->skb = skb; 1277 1270 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1278 - if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) { 1271 + if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1279 1272 ring->current_slot = old_top_slot; 1280 1273 ring->used_slots = old_used_slots; 1281 1274 err = -EIO;