Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

b43: Add PIO support for PCMCIA devices

This adds PIO support back (D'oh!) for PCMCIA devices.
This is a complete rewrite of the old PIO code. It does actually work
and we get reasonable performance out of it on a modern machine.
On a PowerBook G4 I get a few MBit for TX and a few more for RX.
So it doesn't work as well as DMA (of course), but it's a _lot_ faster
than the old PIO code (only got a few kBit with that).

The limiting factor is the host CPU speed. So it will generate 100%
CPU usage when the network interface is heavily loaded. A voluntary preemption
point in the RX path makes sure Desktop Latency isn't hurt.

PIO is needed for 16bit PCMCIA devices, as we really don't want to poke with
the braindead DMA mechanisms on PCMCIA sockets. Additionally, not all
PCMCIA sockets do actually support DMA in 16bit mode (mine doesn't).

Signed-off-by: Michael Buesch <mb@bu3sch.de>
Signed-off-by: John W. Linville <linville@tuxdriver.com>

authored by

Michael Buesch and committed by
John W. Linville
5100d5ac 3109ece1

+1265 -101
+17
drivers/net/wireless/b43/Kconfig
··· 62 62 63 63 If unsure, say N. 64 64 65 + # Data transfers to the device via PIO 66 + # This is only needed on PCMCIA devices. All others can do DMA properly. 67 + config B43_PIO 68 + bool 69 + depends on B43 && (B43_PCMCIA || B43_FORCE_PIO) 70 + default y 71 + 65 72 config B43_NPHY 66 73 bool "Pre IEEE 802.11n support (BROKEN)" 67 74 depends on B43 && EXPERIMENTAL && BROKEN ··· 101 94 102 95 Say Y, if you want to find out why the driver does not 103 96 work for you. 97 + 98 + config B43_FORCE_PIO 99 + bool "Force usage of PIO instead of DMA" 100 + depends on B43 && B43_DEBUG 101 + ---help--- 102 + This will disable DMA and always enable PIO instead. 103 + 104 + Say N! 105 + This is only for debugging the PIO engine code. You do 106 + _NOT_ want to enable this.
+1
drivers/net/wireless/b43/Makefile
··· 8 8 b43-y += lo.o 9 9 b43-y += wa.o 10 10 b43-y += dma.o 11 + b43-$(CONFIG_B43_PIO) += pio.o 11 12 b43-$(CONFIG_B43_RFKILL) += rfkill.o 12 13 b43-$(CONFIG_B43_LEDS) += leds.o 13 14 b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
+56 -3
drivers/net/wireless/b43/b43.h
··· 75 75 #define B43_MMIO_DMA64_BASE4 0x300 76 76 #define B43_MMIO_DMA64_BASE5 0x340 77 77 78 + /* PIO on core rev < 11 */ 79 + #define B43_MMIO_PIO_BASE0 0x300 80 + #define B43_MMIO_PIO_BASE1 0x310 81 + #define B43_MMIO_PIO_BASE2 0x320 82 + #define B43_MMIO_PIO_BASE3 0x330 83 + #define B43_MMIO_PIO_BASE4 0x340 84 + #define B43_MMIO_PIO_BASE5 0x350 85 + #define B43_MMIO_PIO_BASE6 0x360 86 + #define B43_MMIO_PIO_BASE7 0x370 87 + /* PIO on core rev >= 11 */ 88 + #define B43_MMIO_PIO11_BASE0 0x200 89 + #define B43_MMIO_PIO11_BASE1 0x240 90 + #define B43_MMIO_PIO11_BASE2 0x280 91 + #define B43_MMIO_PIO11_BASE3 0x2C0 92 + #define B43_MMIO_PIO11_BASE4 0x300 93 + #define B43_MMIO_PIO11_BASE5 0x340 94 + 78 95 #define B43_MMIO_PHY_VER 0x3E0 79 96 #define B43_MMIO_PHY_RADIO 0x3E2 80 97 #define B43_MMIO_PHY0 0x3E6 ··· 459 442 }; 460 443 461 444 struct b43_dmaring; 462 - struct b43_pioqueue; 463 445 464 446 /* The firmware file header */ 465 447 #define B43_FW_TYPE_UCODE 'u' ··· 612 596 struct b43_dmaring *tx_ring_mcast; /* Multicast */ 613 597 614 598 struct b43_dmaring *rx_ring; 599 + }; 600 + 601 + struct b43_pio_txqueue; 602 + struct b43_pio_rxqueue; 603 + 604 + /* Data structures for PIO transmission, per 80211 core. */ 605 + struct b43_pio { 606 + struct b43_pio_txqueue *tx_queue_AC_BK; /* Background */ 607 + struct b43_pio_txqueue *tx_queue_AC_BE; /* Best Effort */ 608 + struct b43_pio_txqueue *tx_queue_AC_VI; /* Video */ 609 + struct b43_pio_txqueue *tx_queue_AC_VO; /* Voice */ 610 + struct b43_pio_txqueue *tx_queue_mcast; /* Multicast */ 611 + 612 + struct b43_pio_rxqueue *rx_queue; 615 613 }; 616 614 617 615 /* Context information for a noise calculation (Link Quality). */ ··· 803 773 /* PHY/Radio device. */ 804 774 struct b43_phy phy; 805 775 806 - /* DMA engines. */ 807 - struct b43_dma dma; 776 + union { 777 + /* DMA engines. */ 778 + struct b43_dma dma; 779 + /* PIO engines. */ 780 + struct b43_pio pio; 781 + }; 782 + /* Use b43_using_pio_transfers() to check whether we are using 783 + * DMA or PIO data transfers. */ 784 + bool __using_pio_transfers; 808 785 809 786 /* Various statistics about the physical device. */ 810 787 struct b43_stats stats; ··· 894 857 { 895 858 ssb_write32(dev->dev, offset, value); 896 859 } 860 + 861 + static inline bool b43_using_pio_transfers(struct b43_wldev *dev) 862 + { 863 + #ifdef CONFIG_B43_PIO 864 + return dev->__using_pio_transfers; 865 + #else 866 + return 0; 867 + #endif 868 + } 869 + 870 + #ifdef CONFIG_B43_FORCE_PIO 871 + # define B43_FORCE_PIO 1 872 + #else 873 + # define B43_FORCE_PIO 0 874 + #endif 875 + 897 876 898 877 /* Message printing */ 899 878 void b43info(struct b43_wl *wl, const char *fmt, ...)
+64 -56
drivers/net/wireless/b43/dma.c
··· 550 550 struct b43_dmadesc_meta *meta, gfp_t gfp_flags) 551 551 { 552 552 struct b43_rxhdr_fw4 *rxhdr; 553 - struct b43_hwtxstatus *txstat; 554 553 dma_addr_t dmaaddr; 555 554 struct sk_buff *skb; 556 555 ··· 585 586 586 587 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); 587 588 rxhdr->frame_len = 0; 588 - txstat = (struct b43_hwtxstatus *)(skb->data); 589 - txstat->cookie = 0; 590 589 591 590 return 0; 592 591 } ··· 773 776 return DMA_30BIT_MASK; 774 777 } 775 778 779 + static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask) 780 + { 781 + if (dmamask == DMA_30BIT_MASK) 782 + return B43_DMA_30BIT; 783 + if (dmamask == DMA_32BIT_MASK) 784 + return B43_DMA_32BIT; 785 + if (dmamask == DMA_64BIT_MASK) 786 + return B43_DMA_64BIT; 787 + B43_WARN_ON(1); 788 + return B43_DMA_30BIT; 789 + } 790 + 776 791 /* Main initialization function. */ 777 792 static 778 793 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, ··· 965 956 966 957 void b43_dma_free(struct b43_wldev *dev) 967 958 { 968 - struct b43_dma *dma = &dev->dma; 959 + struct b43_dma *dma; 960 + 961 + if (b43_using_pio_transfers(dev)) 962 + return; 963 + dma = &dev->dma; 969 964 970 965 destroy_ring(dma, rx_ring); 971 966 destroy_ring(dma, tx_ring_AC_BK); ··· 987 974 enum b43_dmatype type; 988 975 989 976 dmamask = supported_dma_mask(dev); 990 - switch (dmamask) { 991 - default: 992 - B43_WARN_ON(1); 993 - case DMA_30BIT_MASK: 994 - type = B43_DMA_30BIT; 995 - break; 996 - case DMA_32BIT_MASK: 997 - type = B43_DMA_32BIT; 998 - break; 999 - case DMA_64BIT_MASK: 1000 - type = B43_DMA_64BIT; 1001 - break; 1002 - } 977 + type = dma_mask_to_engine_type(dmamask); 1003 978 err = ssb_dma_set_mask(dev->dev, dmamask); 1004 979 if (err) { 1005 980 b43err(dev->wl, "The machine/kernel does not support " ··· 1114 1113 size_t hdrsize = b43_txhdr_size(ring->dev); 1115 1114 1116 1115 #define SLOTS_PER_PACKET 2 1117 - B43_WARN_ON(skb_shinfo(skb)->nr_frags); 1118 1116 1119 1117 old_top_slot = ring->current_slot; 1120 1118 old_used_slots = ring->used_slots; ··· 1257 1257 int err = 0; 1258 1258 unsigned long flags; 1259 1259 1260 - if (unlikely(skb->len < 2 + 2 + 6)) { 1261 - /* Too short, this can't be a valid frame. */ 1262 - return -EINVAL; 1263 - } 1264 - 1265 1260 hdr = (struct ieee80211_hdr *)skb->data; 1266 1261 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1267 1262 /* The multicast ring will be sent after the DTIM */ ··· 1314 1319 return err; 1315 1320 } 1316 1321 1317 - static void b43_fill_txstatus_report(struct b43_dmaring *ring, 1318 - struct ieee80211_tx_status *report, 1319 - const struct b43_txstatus *status) 1320 - { 1321 - bool frame_failed = 0; 1322 - 1323 - if (status->acked) { 1324 - /* The frame was ACKed. */ 1325 - report->flags |= IEEE80211_TX_STATUS_ACK; 1326 - } else { 1327 - /* The frame was not ACKed... */ 1328 - if (!(report->control.flags & IEEE80211_TXCTL_NO_ACK)) { 1329 - /* ...but we expected an ACK. */ 1330 - frame_failed = 1; 1331 - report->excessive_retries = 1; 1332 - } 1333 - } 1334 - if (status->frame_count == 0) { 1335 - /* The frame was not transmitted at all. */ 1336 - report->retry_count = 0; 1337 - } else { 1338 - report->retry_count = status->frame_count - 1; 1339 - #ifdef CONFIG_B43_DEBUG 1340 - if (frame_failed) 1341 - ring->nr_failed_tx_packets++; 1342 - else 1343 - ring->nr_succeed_tx_packets++; 1344 - ring->nr_total_packet_tries += status->frame_count; 1345 - #endif /* DEBUG */ 1346 - } 1347 - } 1348 - 1349 1322 /* Called with IRQs disabled. */ 1350 1323 void b43_dma_handle_txstatus(struct b43_wldev *dev, 1351 1324 const struct b43_txstatus *status) ··· 1323 1360 struct b43_dmadesc_generic *desc; 1324 1361 struct b43_dmadesc_meta *meta; 1325 1362 int slot; 1363 + bool frame_succeed; 1326 1364 1327 1365 ring = parse_cookie(dev, status->cookie, &slot); 1328 1366 if (unlikely(!ring)) ··· 1350 1386 * status of the transmission. 1351 1387 * Some fields of txstat are already filled in dma_tx(). 1352 1388 */ 1353 - b43_fill_txstatus_report(ring, &(meta->txstat), status); 1389 + frame_succeed = b43_fill_txstatus_report( 1390 + &(meta->txstat), status); 1391 + #ifdef CONFIG_B43_DEBUG 1392 + if (frame_succeed) 1393 + ring->nr_succeed_tx_packets++; 1394 + else 1395 + ring->nr_failed_tx_packets++; 1396 + ring->nr_total_packet_tries += status->frame_count; 1397 + #endif /* DEBUG */ 1354 1398 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, 1355 1399 &(meta->txstat)); 1356 1400 /* skb is freed by ieee80211_tx_status_irqsafe() */ ··· 1545 1573 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK); 1546 1574 b43_power_saving_ctl_bits(dev, 0); 1547 1575 } 1576 + 1577 + #ifdef CONFIG_B43_PIO 1578 + static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, 1579 + u16 mmio_base, bool enable) 1580 + { 1581 + u32 ctl; 1582 + 1583 + if (type == B43_DMA_64BIT) { 1584 + ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL); 1585 + ctl &= ~B43_DMA64_RXDIRECTFIFO; 1586 + if (enable) 1587 + ctl |= B43_DMA64_RXDIRECTFIFO; 1588 + b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl); 1589 + } else { 1590 + ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL); 1591 + ctl &= ~B43_DMA32_RXDIRECTFIFO; 1592 + if (enable) 1593 + ctl |= B43_DMA32_RXDIRECTFIFO; 1594 + b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl); 1595 + } 1596 + } 1597 + 1598 + /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine. 1599 + * This is called from PIO code, so DMA structures are not available. */ 1600 + void b43_dma_direct_fifo_rx(struct b43_wldev *dev, 1601 + unsigned int engine_index, bool enable) 1602 + { 1603 + enum b43_dmatype type; 1604 + u16 mmio_base; 1605 + 1606 + type = dma_mask_to_engine_type(supported_dma_mask(dev)); 1607 + 1608 + mmio_base = b43_dmacontroller_base(type, engine_index); 1609 + direct_fifo_rx(dev, type, mmio_base, enable); 1610 + } 1611 + #endif /* CONFIG_B43_PIO */
+3
drivers/net/wireless/b43/dma.h
··· 291 291 292 292 void b43_dma_rx(struct b43_dmaring *ring); 293 293 294 + void b43_dma_direct_fifo_rx(struct b43_wldev *dev, 295 + unsigned int engine_index, bool enable); 296 + 294 297 #endif /* B43_DMA_H_ */
+31 -6
drivers/net/wireless/b43/main.c
··· 47 47 #include "debugfs.h" 48 48 #include "phy.h" 49 49 #include "dma.h" 50 + #include "pio.h" 50 51 #include "sysfs.h" 51 52 #include "xmit.h" 52 53 #include "lo.h" ··· 1594 1593 handle_irq_noise(dev); 1595 1594 1596 1595 /* Check the DMA reason registers for received data. */ 1597 - if (dma_reason[0] & B43_DMAIRQ_RX_DONE) 1598 - b43_dma_rx(dev->dma.rx_ring); 1596 + if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { 1597 + if (b43_using_pio_transfers(dev)) 1598 + b43_pio_rx(dev->pio.rx_queue); 1599 + else 1600 + b43_dma_rx(dev->dma.rx_ring); 1601 + } 1599 1602 B43_WARN_ON(dma_reason[1] & B43_DMAIRQ_RX_DONE); 1600 1603 B43_WARN_ON(dma_reason[2] & B43_DMAIRQ_RX_DONE); 1601 1604 B43_WARN_ON(dma_reason[3] & B43_DMAIRQ_RX_DONE); ··· 2703 2698 struct b43_wldev *dev = wl->current_dev; 2704 2699 int err = -ENODEV; 2705 2700 2701 + if (unlikely(skb->len < 2 + 2 + 6)) { 2702 + /* Too short, this can't be a valid frame. */ 2703 + return -EINVAL; 2704 + } 2705 + B43_WARN_ON(skb_shinfo(skb)->nr_frags); 2706 + 2706 2707 if (unlikely(!dev)) 2707 2708 goto out; 2708 2709 if (unlikely(b43_status(dev) < B43_STAT_STARTED)) 2709 2710 goto out; 2710 - /* DMA-TX is done without a global lock. */ 2711 - err = b43_dma_tx(dev, skb, ctl); 2711 + /* TX is done without a global lock. */ 2712 + if (b43_using_pio_transfers(dev)) 2713 + err = b43_pio_tx(dev, skb, ctl); 2714 + else 2715 + err = b43_dma_tx(dev, skb, ctl); 2712 2716 out: 2713 2717 if (unlikely(err)) 2714 2718 return NETDEV_TX_BUSY; ··· 2911 2897 goto out; 2912 2898 spin_lock_irqsave(&wl->irq_lock, flags); 2913 2899 if (likely(b43_status(dev) >= B43_STAT_STARTED)) { 2914 - b43_dma_get_tx_stats(dev, stats); 2900 + if (b43_using_pio_transfers(dev)) 2901 + b43_pio_get_tx_stats(dev, stats); 2902 + else 2903 + b43_dma_get_tx_stats(dev, stats); 2915 2904 err = 0; 2916 2905 } 2917 2906 spin_unlock_irqrestore(&wl->irq_lock, flags); ··· 3383 3366 3384 3367 b43_set_status(dev, B43_STAT_INITIALIZED); 3385 3368 3369 + b43_pio_stop(dev); 3386 3370 mutex_unlock(&wl->mutex); 3387 3371 /* Must unlock as it would otherwise deadlock. No races here. 3388 3372 * Cancel the possibly running self-rearming periodic work. */ ··· 3701 3683 b43_rng_exit(dev->wl, false); 3702 3684 } 3703 3685 b43_dma_free(dev); 3686 + b43_pio_free(dev); 3704 3687 b43_chip_exit(dev); 3705 3688 b43_radio_turn_off(dev, 1); 3706 3689 b43_switch_analog(dev, 0); ··· 3799 3780 /* Maximum Contention Window */ 3800 3781 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); 3801 3782 3802 - err = b43_dma_init(dev); 3783 + if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || B43_FORCE_PIO) { 3784 + dev->__using_pio_transfers = 1; 3785 + err = b43_pio_init(dev); 3786 + } else { 3787 + dev->__using_pio_transfers = 0; 3788 + err = b43_dma_init(dev); 3789 + } 3803 3790 if (err) 3804 3791 goto err_chip_exit; 3805 3792 b43_qos_init(dev);
+835
drivers/net/wireless/b43/pio.c
··· 1 + /* 2 + 3 + Broadcom B43 wireless driver 4 + 5 + PIO data transfer 6 + 7 + Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de> 8 + 9 + This program is free software; you can redistribute it and/or modify 10 + it under the terms of the GNU General Public License as published by 11 + the Free Software Foundation; either version 2 of the License, or 12 + (at your option) any later version. 13 + 14 + This program is distributed in the hope that it will be useful, 15 + but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 + GNU General Public License for more details. 18 + 19 + You should have received a copy of the GNU General Public License 20 + along with this program; see the file COPYING. If not, write to 21 + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, 22 + Boston, MA 02110-1301, USA. 23 + 24 + */ 25 + 26 + #include "b43.h" 27 + #include "pio.h" 28 + #include "dma.h" 29 + #include "main.h" 30 + #include "xmit.h" 31 + 32 + #include <linux/delay.h> 33 + 34 + 35 + static void b43_pio_rx_work(struct work_struct *work); 36 + 37 + 38 + static u16 generate_cookie(struct b43_pio_txqueue *q, 39 + struct b43_pio_txpacket *pack) 40 + { 41 + u16 cookie; 42 + 43 + /* Use the upper 4 bits of the cookie as 44 + * PIO controller ID and store the packet index number 45 + * in the lower 12 bits. 46 + * Note that the cookie must never be 0, as this 47 + * is a special value used in RX path. 48 + * It can also not be 0xFFFF because that is special 49 + * for multicast frames. 50 + */ 51 + cookie = (((u16)q->index + 1) << 12); 52 + cookie |= pack->index; 53 + 54 + return cookie; 55 + } 56 + 57 + static 58 + struct b43_pio_txqueue * parse_cookie(struct b43_wldev *dev, 59 + u16 cookie, 60 + struct b43_pio_txpacket **pack) 61 + { 62 + struct b43_pio *pio = &dev->pio; 63 + struct b43_pio_txqueue *q = NULL; 64 + unsigned int pack_index; 65 + 66 + switch (cookie & 0xF000) { 67 + case 0x1000: 68 + q = pio->tx_queue_AC_BK; 69 + break; 70 + case 0x2000: 71 + q = pio->tx_queue_AC_BE; 72 + break; 73 + case 0x3000: 74 + q = pio->tx_queue_AC_VI; 75 + break; 76 + case 0x4000: 77 + q = pio->tx_queue_AC_VO; 78 + break; 79 + case 0x5000: 80 + q = pio->tx_queue_mcast; 81 + break; 82 + } 83 + if (B43_WARN_ON(!q)) 84 + return NULL; 85 + pack_index = (cookie & 0x0FFF); 86 + if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) 87 + return NULL; 88 + *pack = &q->packets[pack_index]; 89 + 90 + return q; 91 + } 92 + 93 + static u16 index_to_pioqueue_base(struct b43_wldev *dev, 94 + unsigned int index) 95 + { 96 + static const u16 bases[] = { 97 + B43_MMIO_PIO_BASE0, 98 + B43_MMIO_PIO_BASE1, 99 + B43_MMIO_PIO_BASE2, 100 + B43_MMIO_PIO_BASE3, 101 + B43_MMIO_PIO_BASE4, 102 + B43_MMIO_PIO_BASE5, 103 + B43_MMIO_PIO_BASE6, 104 + B43_MMIO_PIO_BASE7, 105 + }; 106 + static const u16 bases_rev11[] = { 107 + B43_MMIO_PIO11_BASE0, 108 + B43_MMIO_PIO11_BASE1, 109 + B43_MMIO_PIO11_BASE2, 110 + B43_MMIO_PIO11_BASE3, 111 + B43_MMIO_PIO11_BASE4, 112 + B43_MMIO_PIO11_BASE5, 113 + }; 114 + 115 + if (dev->dev->id.revision >= 11) { 116 + B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11)); 117 + return bases_rev11[index]; 118 + } 119 + B43_WARN_ON(index >= ARRAY_SIZE(bases)); 120 + return bases[index]; 121 + } 122 + 123 + static u16 pio_txqueue_offset(struct b43_wldev *dev) 124 + { 125 + if (dev->dev->id.revision >= 11) 126 + return 0x18; 127 + return 0; 128 + } 129 + 130 + static u16 pio_rxqueue_offset(struct b43_wldev *dev) 131 + { 132 + if (dev->dev->id.revision >= 11) 133 + return 0x38; 134 + return 8; 135 + } 136 + 137 + static struct b43_pio_txqueue * b43_setup_pioqueue_tx(struct b43_wldev *dev, 138 + unsigned int index) 139 + { 140 + struct b43_pio_txqueue *q; 141 + struct b43_pio_txpacket *p; 142 + unsigned int i; 143 + 144 + q = kzalloc(sizeof(*q), GFP_KERNEL); 145 + if (!q) 146 + return NULL; 147 + spin_lock_init(&q->lock); 148 + q->dev = dev; 149 + q->rev = dev->dev->id.revision; 150 + q->mmio_base = index_to_pioqueue_base(dev, index) + 151 + pio_txqueue_offset(dev); 152 + q->index = index; 153 + 154 + q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS; 155 + if (q->rev >= 8) { 156 + q->buffer_size = 1920; //FIXME this constant is wrong. 157 + } else { 158 + q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE); 159 + q->buffer_size -= 80; 160 + } 161 + 162 + INIT_LIST_HEAD(&q->packets_list); 163 + for (i = 0; i < ARRAY_SIZE(q->packets); i++) { 164 + p = &(q->packets[i]); 165 + INIT_LIST_HEAD(&p->list); 166 + p->index = i; 167 + p->queue = q; 168 + list_add(&p->list, &q->packets_list); 169 + } 170 + 171 + return q; 172 + } 173 + 174 + static struct b43_pio_rxqueue * b43_setup_pioqueue_rx(struct b43_wldev *dev, 175 + unsigned int index) 176 + { 177 + struct b43_pio_rxqueue *q; 178 + 179 + q = kzalloc(sizeof(*q), GFP_KERNEL); 180 + if (!q) 181 + return NULL; 182 + spin_lock_init(&q->lock); 183 + q->dev = dev; 184 + q->rev = dev->dev->id.revision; 185 + q->mmio_base = index_to_pioqueue_base(dev, index) + 186 + pio_rxqueue_offset(dev); 187 + INIT_WORK(&q->rx_work, b43_pio_rx_work); 188 + 189 + /* Enable Direct FIFO RX (PIO) on the engine. */ 190 + b43_dma_direct_fifo_rx(dev, index, 1); 191 + 192 + return q; 193 + } 194 + 195 + static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q) 196 + { 197 + struct b43_pio_txpacket *pack; 198 + unsigned int i; 199 + 200 + for (i = 0; i < ARRAY_SIZE(q->packets); i++) { 201 + pack = &(q->packets[i]); 202 + if (pack->skb) { 203 + dev_kfree_skb_any(pack->skb); 204 + pack->skb = NULL; 205 + } 206 + } 207 + } 208 + 209 + static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q, 210 + const char *name) 211 + { 212 + if (!q) 213 + return; 214 + b43_pio_cancel_tx_packets(q); 215 + kfree(q); 216 + } 217 + 218 + static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q, 219 + const char *name) 220 + { 221 + if (!q) 222 + return; 223 + kfree(q); 224 + } 225 + 226 + #define destroy_queue_tx(pio, queue) do { \ 227 + b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \ 228 + (pio)->queue = NULL; \ 229 + } while (0) 230 + 231 + #define destroy_queue_rx(pio, queue) do { \ 232 + b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \ 233 + (pio)->queue = NULL; \ 234 + } while (0) 235 + 236 + void b43_pio_free(struct b43_wldev *dev) 237 + { 238 + struct b43_pio *pio; 239 + 240 + if (!b43_using_pio_transfers(dev)) 241 + return; 242 + pio = &dev->pio; 243 + 244 + destroy_queue_rx(pio, rx_queue); 245 + destroy_queue_tx(pio, tx_queue_mcast); 246 + destroy_queue_tx(pio, tx_queue_AC_VO); 247 + destroy_queue_tx(pio, tx_queue_AC_VI); 248 + destroy_queue_tx(pio, tx_queue_AC_BE); 249 + destroy_queue_tx(pio, tx_queue_AC_BK); 250 + } 251 + 252 + void b43_pio_stop(struct b43_wldev *dev) 253 + { 254 + if (!b43_using_pio_transfers(dev)) 255 + return; 256 + cancel_work_sync(&dev->pio.rx_queue->rx_work); 257 + } 258 + 259 + int b43_pio_init(struct b43_wldev *dev) 260 + { 261 + struct b43_pio *pio = &dev->pio; 262 + int err = -ENOMEM; 263 + 264 + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) 265 + & ~B43_MACCTL_BE); 266 + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0); 267 + 268 + pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0); 269 + if (!pio->tx_queue_AC_BK) 270 + goto out; 271 + 272 + pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1); 273 + if (!pio->tx_queue_AC_BE) 274 + goto err_destroy_bk; 275 + 276 + pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2); 277 + if (!pio->tx_queue_AC_VI) 278 + goto err_destroy_be; 279 + 280 + pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3); 281 + if (!pio->tx_queue_AC_VO) 282 + goto err_destroy_vi; 283 + 284 + pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4); 285 + if (!pio->tx_queue_mcast) 286 + goto err_destroy_vo; 287 + 288 + pio->rx_queue = b43_setup_pioqueue_rx(dev, 0); 289 + if (!pio->rx_queue) 290 + goto err_destroy_mcast; 291 + 292 + b43dbg(dev->wl, "PIO initialized\n"); 293 + err = 0; 294 + out: 295 + return err; 296 + 297 + err_destroy_mcast: 298 + destroy_queue_tx(pio, tx_queue_mcast); 299 + err_destroy_vo: 300 + destroy_queue_tx(pio, tx_queue_AC_VO); 301 + err_destroy_vi: 302 + destroy_queue_tx(pio, tx_queue_AC_VI); 303 + err_destroy_be: 304 + destroy_queue_tx(pio, tx_queue_AC_BE); 305 + err_destroy_bk: 306 + destroy_queue_tx(pio, tx_queue_AC_BK); 307 + return err; 308 + } 309 + 310 + /* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */ 311 + static struct b43_pio_txqueue * select_queue_by_priority(struct b43_wldev *dev, 312 + u8 queue_prio) 313 + { 314 + struct b43_pio_txqueue *q; 315 + 316 + if (b43_modparam_qos) { 317 + /* 0 = highest priority */ 318 + switch (queue_prio) { 319 + default: 320 + B43_WARN_ON(1); 321 + /* fallthrough */ 322 + case 0: 323 + q = dev->pio.tx_queue_AC_VO; 324 + break; 325 + case 1: 326 + q = dev->pio.tx_queue_AC_VI; 327 + break; 328 + case 2: 329 + q = dev->pio.tx_queue_AC_BE; 330 + break; 331 + case 3: 332 + q = dev->pio.tx_queue_AC_BK; 333 + break; 334 + } 335 + } else 336 + q = dev->pio.tx_queue_AC_BE; 337 + 338 + return q; 339 + } 340 + 341 + static inline void tx_write_2byte_queue(struct b43_pio_txqueue *q, 342 + u16 *ctl, 343 + const void *_data, 344 + unsigned int data_len) 345 + { 346 + const u8 *data = _data; 347 + unsigned int i; 348 + u16 value; 349 + 350 + *ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI; 351 + b43_piotx_write16(q, B43_PIO_TXCTL, *ctl); 352 + for (i = 0; i < data_len; i += 2) { 353 + value = data[i]; 354 + if (i + 1 < data_len) { 355 + value |= (u16)(data[i + 1]) << 8; 356 + } else { 357 + *ctl &= ~B43_PIO_TXCTL_WRITEHI; 358 + b43_piotx_write16(q, B43_PIO_TXCTL, *ctl); 359 + } 360 + b43_piotx_write16(q, B43_PIO_TXDATA, value); 361 + } 362 + } 363 + 364 + static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack, 365 + const u8 *hdr, unsigned int hdrlen) 366 + { 367 + struct b43_pio_txqueue *q = pack->queue; 368 + const char *frame = pack->skb->data; 369 + unsigned int frame_len = pack->skb->len; 370 + u16 ctl; 371 + 372 + ctl = b43_piotx_read16(q, B43_PIO_TXCTL); 373 + ctl |= B43_PIO_TXCTL_FREADY; 374 + ctl &= ~B43_PIO_TXCTL_EOF; 375 + 376 + /* Transfer the header data. */ 377 + tx_write_2byte_queue(q, &ctl, hdr, hdrlen); 378 + /* Transfer the frame data. */ 379 + tx_write_2byte_queue(q, &ctl, frame, frame_len); 380 + 381 + ctl |= B43_PIO_TXCTL_EOF; 382 + b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 383 + } 384 + 385 + static inline void tx_write_4byte_queue(struct b43_pio_txqueue *q, 386 + u32 *ctl, 387 + const void *_data, 388 + unsigned int data_len) 389 + { 390 + const u8 *data = _data; 391 + unsigned int i; 392 + u32 value; 393 + bool ctl_changed = 0; 394 + 395 + *ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 | 396 + B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31; 397 + b43_piotx_write32(q, B43_PIO8_TXCTL, *ctl); 398 + for (i = 0; i < data_len; i += 4) { 399 + value = data[i]; 400 + if (i + 1 < data_len) { 401 + value |= (u32)(data[i + 1]) << 8; 402 + } else { 403 + *ctl &= ~B43_PIO8_TXCTL_8_15; 404 + ctl_changed = 1; 405 + } 406 + if (i + 2 < data_len) { 407 + value |= (u32)(data[i + 2]) << 16; 408 + } else { 409 + *ctl &= ~B43_PIO8_TXCTL_16_23; 410 + ctl_changed = 1; 411 + } 412 + if (i + 3 < data_len) { 413 + value |= (u32)(data[i + 3]) << 24; 414 + } else { 415 + *ctl &= ~B43_PIO8_TXCTL_24_31; 416 + ctl_changed = 1; 417 + } 418 + if (ctl_changed) 419 + b43_piotx_write32(q, B43_PIO8_TXCTL, *ctl); 420 + b43_piotx_write32(q, B43_PIO8_TXDATA, value); 421 + } 422 + } 423 + 424 + static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack, 425 + const u8 *hdr, unsigned int hdrlen) 426 + { 427 + struct b43_pio_txqueue *q = pack->queue; 428 + const char *frame = pack->skb->data; 429 + unsigned int frame_len = pack->skb->len; 430 + u32 ctl; 431 + 432 + ctl = b43_piotx_read32(q, B43_PIO8_TXCTL); 433 + ctl |= B43_PIO8_TXCTL_FREADY; 434 + ctl &= ~B43_PIO8_TXCTL_EOF; 435 + 436 + /* Transfer the header data. */ 437 + tx_write_4byte_queue(q, &ctl, hdr, hdrlen); 438 + /* Transfer the frame data. */ 439 + tx_write_4byte_queue(q, &ctl, frame, frame_len); 440 + 441 + ctl |= B43_PIO8_TXCTL_EOF; 442 + b43_piotx_write32(q, B43_PIO_TXCTL, ctl); 443 + } 444 + 445 + static int pio_tx_frame(struct b43_pio_txqueue *q, 446 + struct sk_buff *skb, 447 + struct ieee80211_tx_control *ctl) 448 + { 449 + struct b43_pio_txpacket *pack; 450 + struct b43_txhdr txhdr; 451 + u16 cookie; 452 + int err; 453 + unsigned int hdrlen; 454 + 455 + B43_WARN_ON(list_empty(&q->packets_list)); 456 + pack = list_entry(q->packets_list.next, 457 + struct b43_pio_txpacket, list); 458 + memset(&pack->txstat, 0, sizeof(pack->txstat)); 459 + memcpy(&pack->txstat.control, ctl, sizeof(*ctl)); 460 + 461 + cookie = generate_cookie(q, pack); 462 + hdrlen = b43_txhdr_size(q->dev); 463 + err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb->data, 464 + skb->len, ctl, cookie); 465 + if (err) 466 + return err; 467 + 468 + if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 469 + /* Tell the firmware about the cookie of the last 470 + * mcast frame, so it can clear the more-data bit in it. */ 471 + b43_shm_write16(q->dev, B43_SHM_SHARED, 472 + B43_SHM_SH_MCASTCOOKIE, cookie); 473 + } 474 + 475 + pack->skb = skb; 476 + if (q->rev >= 8) 477 + pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen); 478 + else 479 + pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen); 480 + 481 + /* Remove it from the list of available packet slots. 482 + * It will be put back when we receive the status report. */ 483 + list_del(&pack->list); 484 + 485 + /* Update the queue statistics. */ 486 + q->buffer_used += roundup(skb->len + hdrlen, 4); 487 + q->free_packet_slots -= 1; 488 + 489 + return 0; 490 + } 491 + 492 + int b43_pio_tx(struct b43_wldev *dev, 493 + struct sk_buff *skb, struct ieee80211_tx_control *ctl) 494 + { 495 + struct b43_pio_txqueue *q; 496 + struct ieee80211_hdr *hdr; 497 + unsigned long flags; 498 + unsigned int hdrlen, total_len; 499 + int err = 0; 500 + 501 + hdr = (struct ieee80211_hdr *)skb->data; 502 + if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 503 + /* The multicast queue will be sent after the DTIM. */ 504 + q = dev->pio.tx_queue_mcast; 505 + /* Set the frame More-Data bit. Ucode will clear it 506 + * for us on the last frame. */ 507 + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 508 + } else { 509 + /* Decide by priority where to put this frame. */ 510 + q = select_queue_by_priority(dev, ctl->queue); 511 + } 512 + 513 + spin_lock_irqsave(&q->lock, flags); 514 + 515 + hdrlen = b43_txhdr_size(dev); 516 + total_len = roundup(skb->len + hdrlen, 4); 517 + 518 + if (unlikely(total_len > q->buffer_size)) { 519 + err = -ENOBUFS; 520 + b43dbg(dev->wl, "PIO: TX packet longer than queue.\n"); 521 + goto out_unlock; 522 + } 523 + if (unlikely(q->free_packet_slots == 0)) { 524 + err = -ENOBUFS; 525 + b43warn(dev->wl, "PIO: TX packet overflow.\n"); 526 + goto out_unlock; 527 + } 528 + B43_WARN_ON(q->buffer_used > q->buffer_size); 529 + 530 + if (total_len > (q->buffer_size - q->buffer_used)) { 531 + /* Not enough memory on the queue. */ 532 + err = -EBUSY; 533 + ieee80211_stop_queue(dev->wl->hw, ctl->queue); 534 + q->stopped = 1; 535 + goto out_unlock; 536 + } 537 + 538 + /* Assign the queue number to the ring (if not already done before) 539 + * so TX status handling can use it. The mac80211-queue to b43-queue 540 + * mapping is static, so we don't need to store it per frame. */ 541 + q->queue_prio = ctl->queue; 542 + 543 + err = pio_tx_frame(q, skb, ctl); 544 + if (unlikely(err == -ENOKEY)) { 545 + /* Drop this packet, as we don't have the encryption key 546 + * anymore and must not transmit it unencrypted. */ 547 + dev_kfree_skb_any(skb); 548 + err = 0; 549 + goto out_unlock; 550 + } 551 + if (unlikely(err)) { 552 + b43err(dev->wl, "PIO transmission failure\n"); 553 + goto out_unlock; 554 + } 555 + q->nr_tx_packets++; 556 + 557 + B43_WARN_ON(q->buffer_used > q->buffer_size); 558 + if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || 559 + (q->free_packet_slots == 0)) { 560 + /* The queue is full. */ 561 + ieee80211_stop_queue(dev->wl->hw, ctl->queue); 562 + q->stopped = 1; 563 + } 564 + 565 + out_unlock: 566 + spin_unlock_irqrestore(&q->lock, flags); 567 + 568 + return err; 569 + } 570 + 571 + /* Called with IRQs disabled. */ 572 + void b43_pio_handle_txstatus(struct b43_wldev *dev, 573 + const struct b43_txstatus *status) 574 + { 575 + struct b43_pio_txqueue *q; 576 + struct b43_pio_txpacket *pack = NULL; 577 + unsigned int total_len; 578 + 579 + q = parse_cookie(dev, status->cookie, &pack); 580 + if (unlikely(!q)) 581 + return; 582 + B43_WARN_ON(!pack); 583 + 584 + spin_lock(&q->lock); /* IRQs are already disabled. */ 585 + 586 + b43_fill_txstatus_report(&(pack->txstat), status); 587 + 588 + total_len = pack->skb->len + b43_txhdr_size(dev); 589 + total_len = roundup(total_len, 4); 590 + q->buffer_used -= total_len; 591 + q->free_packet_slots += 1; 592 + 593 + ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb, 594 + &(pack->txstat)); 595 + pack->skb = NULL; 596 + list_add(&pack->list, &q->packets_list); 597 + 598 + if (q->stopped) { 599 + ieee80211_wake_queue(dev->wl->hw, q->queue_prio); 600 + q->stopped = 0; 601 + } 602 + 603 + spin_unlock(&q->lock); 604 + } 605 + 606 + void b43_pio_get_tx_stats(struct b43_wldev *dev, 607 + struct ieee80211_tx_queue_stats *stats) 608 + { 609 + const int nr_queues = dev->wl->hw->queues; 610 + struct b43_pio_txqueue *q; 611 + struct ieee80211_tx_queue_stats_data *data; 612 + unsigned long flags; 613 + int i; 614 + 615 + for (i = 0; i < nr_queues; i++) { 616 + data = &(stats->data[i]); 617 + q = select_queue_by_priority(dev, i); 618 + 619 + spin_lock_irqsave(&q->lock, flags); 620 + data->len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots; 621 + data->limit = B43_PIO_MAX_NR_TXPACKETS; 622 + data->count = q->nr_tx_packets; 623 + spin_unlock_irqrestore(&q->lock, flags); 624 + } 625 + } 626 + 627 + /* Returns whether we should fetch another frame. */ 628 + static bool pio_rx_frame(struct b43_pio_rxqueue *q) 629 + { 630 + struct b43_rxhdr_fw4 rxhdr; 631 + u16 len; 632 + u32 macstat; 633 + unsigned int i, padding; 634 + struct sk_buff *skb; 635 + const char *err_msg = NULL; 636 + 637 + memset(&rxhdr, 0, sizeof(rxhdr)); 638 + 639 + /* Check if we have data and wait for it to get ready. */ 640 + if (q->rev >= 8) { 641 + u32 ctl; 642 + 643 + ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); 644 + if (!(ctl & B43_PIO8_RXCTL_FRAMERDY)) 645 + return 0; 646 + b43_piorx_write32(q, B43_PIO8_RXCTL, 647 + B43_PIO8_RXCTL_FRAMERDY); 648 + for (i = 0; i < 10; i++) { 649 + ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); 650 + if (ctl & B43_PIO8_RXCTL_DATARDY) 651 + goto data_ready; 652 + udelay(10); 653 + } 654 + } else { 655 + u16 ctl; 656 + 657 + ctl = b43_piorx_read16(q, B43_PIO_RXCTL); 658 + if (!(ctl & B43_PIO_RXCTL_FRAMERDY)) 659 + return 0; 660 + b43_piorx_write16(q, B43_PIO_RXCTL, 661 + B43_PIO_RXCTL_FRAMERDY); 662 + for (i = 0; i < 10; i++) { 663 + ctl = b43_piorx_read16(q, B43_PIO_RXCTL); 664 + if (ctl & B43_PIO_RXCTL_DATARDY) 665 + goto data_ready; 666 + udelay(10); 667 + } 668 + } 669 + b43dbg(q->dev->wl, "PIO RX timed out\n"); 670 + return 1; 671 + data_ready: 672 + 673 + /* Get the preamble (RX header) */ 674 + if (q->rev >= 8) { 675 + u32 *preamble = (u32 *)&rxhdr; 676 + u32 value; 677 + 678 + for (i = 0; i < sizeof(rxhdr); i += 4) { 679 + value = b43_piorx_read32(q, B43_PIO8_RXDATA); 680 + preamble[i / 4] = cpu_to_le32(value); 681 + } 682 + } else { 683 + u16 *preamble = (u16 *)&rxhdr; 684 + u16 value; 685 + 686 + for (i = 0; i < sizeof(rxhdr); i += 2) { 687 + value = b43_piorx_read16(q, B43_PIO_RXDATA); 688 + preamble[i / 2] = cpu_to_le16(value); 689 + } 690 + } 691 + /* Sanity checks. */ 692 + len = le16_to_cpu(rxhdr.frame_len); 693 + if (unlikely(len > 0x700)) { 694 + err_msg = "len > 0x700"; 695 + goto rx_error; 696 + } 697 + if (unlikely(len == 0)) { 698 + err_msg = "len == 0"; 699 + goto rx_error; 700 + } 701 + 702 + macstat = le32_to_cpu(rxhdr.mac_status); 703 + if (macstat & B43_RX_MAC_FCSERR) { 704 + if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { 705 + /* Drop frames with failed FCS. */ 706 + err_msg = "Frame FCS error"; 707 + goto rx_error; 708 + } 709 + } 710 + 711 + /* We always pad 2 bytes, as that's what upstream code expects 712 + * due to the RX-header being 30 bytes. In case the frame is 713 + * unaligned, we pad another 2 bytes. */ 714 + padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0; 715 + skb = dev_alloc_skb(len + padding + 2); 716 + if (unlikely(!skb)) { 717 + err_msg = "Out of memory"; 718 + goto rx_error; 719 + } 720 + skb_reserve(skb, 2); 721 + skb_put(skb, len + padding); 722 + if (q->rev >= 8) { 723 + u32 value; 724 + 725 + for (i = padding; i < len + padding; i += 4) { 726 + value = b43_piorx_read32(q, B43_PIO8_RXDATA); 727 + skb->data[i] = value; 728 + if ((i + 1) < (len + padding)) 729 + skb->data[i + 1] = value >> 8; 730 + if ((i + 2) < (len + padding)) 731 + skb->data[i + 2] = value >> 16; 732 + if ((i + 3) < (len + padding)) 733 + skb->data[i + 3] = value >> 24; 734 + } 735 + } else { 736 + u16 value; 737 + 738 + for (i = padding; i < len + padding; i += 2) { 739 + value = b43_piorx_read16(q, B43_PIO_RXDATA); 740 + skb->data[i] = value; 741 + if ((i + 1) < (len + padding)) 742 + skb->data[i + 1] = value >> 8; 743 + } 744 + } 745 + 746 + b43_rx(q->dev, skb, &rxhdr); 747 + 748 + return 1; 749 + 750 + rx_error: 751 + if (err_msg) 752 + b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg); 753 + b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY); 754 + return 1; 755 + } 756 + 757 + /* RX workqueue. We can sleep, yay! */ 758 + static void b43_pio_rx_work(struct work_struct *work) 759 + { 760 + struct b43_pio_rxqueue *q = container_of(work, struct b43_pio_rxqueue, 761 + rx_work); 762 + unsigned int budget = 50; 763 + bool stop; 764 + 765 + do { 766 + spin_lock_irq(&q->lock); 767 + stop = (pio_rx_frame(q) == 0); 768 + spin_unlock_irq(&q->lock); 769 + cond_resched(); 770 + if (stop) 771 + break; 772 + } while (--budget); 773 + } 774 + 775 + /* Called with IRQs disabled. */ 776 + void b43_pio_rx(struct b43_pio_rxqueue *q) 777 + { 778 + /* Due to latency issues we must run the RX path in 779 + * a workqueue to be able to schedule between packets. */ 780 + queue_work(q->dev->wl->hw->workqueue, &q->rx_work); 781 + } 782 + 783 + static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q) 784 + { 785 + unsigned long flags; 786 + 787 + spin_lock_irqsave(&q->lock, flags); 788 + if (q->rev >= 8) { 789 + b43_piotx_write32(q, B43_PIO8_TXCTL, 790 + b43_piotx_read32(q, B43_PIO8_TXCTL) 791 + | B43_PIO8_TXCTL_SUSPREQ); 792 + } else { 793 + b43_piotx_write16(q, B43_PIO_TXCTL, 794 + b43_piotx_read16(q, B43_PIO_TXCTL) 795 + | B43_PIO_TXCTL_SUSPREQ); 796 + } 797 + spin_unlock_irqrestore(&q->lock, flags); 798 + } 799 + 800 + static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q) 801 + { 802 + unsigned long flags; 803 + 804 + spin_lock_irqsave(&q->lock, flags); 805 + if (q->rev >= 8) { 806 + b43_piotx_write32(q, B43_PIO8_TXCTL, 807 + b43_piotx_read32(q, B43_PIO8_TXCTL) 808 + & ~B43_PIO8_TXCTL_SUSPREQ); 809 + } else { 810 + b43_piotx_write16(q, B43_PIO_TXCTL, 811 + b43_piotx_read16(q, B43_PIO_TXCTL) 812 + & ~B43_PIO_TXCTL_SUSPREQ); 813 + } 814 + spin_unlock_irqrestore(&q->lock, flags); 815 + } 816 + 817 + void b43_pio_tx_suspend(struct b43_wldev *dev) 818 + { 819 + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); 820 + b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK); 821 + b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE); 822 + b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI); 823 + b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO); 824 + b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast); 825 + } 826 + 827 + void b43_pio_tx_resume(struct b43_wldev *dev) 828 + { 829 + b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast); 830 + b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO); 831 + b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI); 832 + b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE); 833 + b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK); 834 + b43_power_saving_ctl_bits(dev, 0); 835 + }
+220
drivers/net/wireless/b43/pio.h
··· 1 + #ifndef B43_PIO_H_ 2 + #define B43_PIO_H_ 3 + 4 + #include "b43.h" 5 + 6 + #include <linux/interrupt.h> 7 + #include <linux/io.h> 8 + #include <linux/list.h> 9 + #include <linux/skbuff.h> 10 + 11 + 12 + /*** Registers for PIO queues up to revision 7. ***/ 13 + /* TX queue. */ 14 + #define B43_PIO_TXCTL 0x00 15 + #define B43_PIO_TXCTL_WRITELO 0x0001 16 + #define B43_PIO_TXCTL_WRITEHI 0x0002 17 + #define B43_PIO_TXCTL_EOF 0x0004 18 + #define B43_PIO_TXCTL_FREADY 0x0008 19 + #define B43_PIO_TXCTL_FLUSHREQ 0x0020 20 + #define B43_PIO_TXCTL_FLUSHPEND 0x0040 21 + #define B43_PIO_TXCTL_SUSPREQ 0x0080 22 + #define B43_PIO_TXCTL_QSUSP 0x0100 23 + #define B43_PIO_TXCTL_COMMCNT 0xFC00 24 + #define B43_PIO_TXCTL_COMMCNT_SHIFT 10 25 + #define B43_PIO_TXDATA 0x02 26 + #define B43_PIO_TXQBUFSIZE 0x04 27 + /* RX queue. */ 28 + #define B43_PIO_RXCTL 0x00 29 + #define B43_PIO_RXCTL_FRAMERDY 0x0001 30 + #define B43_PIO_RXCTL_DATARDY 0x0002 31 + #define B43_PIO_RXDATA 0x02 32 + 33 + /*** Registers for PIO queues revision 8 and later. ***/ 34 + /* TX queue */ 35 + #define B43_PIO8_TXCTL 0x00 36 + #define B43_PIO8_TXCTL_0_7 0x00000001 37 + #define B43_PIO8_TXCTL_8_15 0x00000002 38 + #define B43_PIO8_TXCTL_16_23 0x00000004 39 + #define B43_PIO8_TXCTL_24_31 0x00000008 40 + #define B43_PIO8_TXCTL_EOF 0x00000010 41 + #define B43_PIO8_TXCTL_FREADY 0x00000080 42 + #define B43_PIO8_TXCTL_SUSPREQ 0x00000100 43 + #define B43_PIO8_TXCTL_QSUSP 0x00000200 44 + #define B43_PIO8_TXCTL_FLUSHREQ 0x00000400 45 + #define B43_PIO8_TXCTL_FLUSHPEND 0x00000800 46 + #define B43_PIO8_TXDATA 0x04 47 + /* RX queue */ 48 + #define B43_PIO8_RXCTL 0x00 49 + #define B43_PIO8_RXCTL_FRAMERDY 0x00000001 50 + #define B43_PIO8_RXCTL_DATARDY 0x00000002 51 + #define B43_PIO8_RXDATA 0x04 52 + 53 + 54 + /* The maximum number of TX-packets the HW can handle. */ 55 + #define B43_PIO_MAX_NR_TXPACKETS 32 56 + 57 + 58 + #ifdef CONFIG_B43_PIO 59 + 60 + struct b43_pio_txpacket { 61 + /* Pointer to the TX queue we belong to. */ 62 + struct b43_pio_txqueue *queue; 63 + /* The TX data packet. */ 64 + struct sk_buff *skb; 65 + /* The status meta data. */ 66 + struct ieee80211_tx_status txstat; 67 + /* Index in the (struct b43_pio_txqueue)->packets array. */ 68 + u8 index; 69 + 70 + struct list_head list; 71 + }; 72 + 73 + struct b43_pio_txqueue { 74 + struct b43_wldev *dev; 75 + spinlock_t lock; 76 + u16 mmio_base; 77 + 78 + /* The device queue buffer size in bytes. */ 79 + u16 buffer_size; 80 + /* The number of used bytes in the device queue buffer. */ 81 + u16 buffer_used; 82 + /* The number of packets that can still get queued. 83 + * This is decremented on queueing a packet and incremented 84 + * after receiving the transmit status. */ 85 + u16 free_packet_slots; 86 + 87 + /* True, if the mac80211 queue was stopped due to overflow at TX. */ 88 + bool stopped; 89 + /* Our b43 queue index number */ 90 + u8 index; 91 + /* The mac80211 QoS queue priority. */ 92 + u8 queue_prio; 93 + 94 + /* Buffer for TX packet meta data. */ 95 + struct b43_pio_txpacket packets[B43_PIO_MAX_NR_TXPACKETS]; 96 + struct list_head packets_list; 97 + 98 + /* Total number of transmitted packets. */ 99 + unsigned int nr_tx_packets; 100 + 101 + /* Shortcut to the 802.11 core revision. This is to 102 + * avoid horrible pointer dereferencing in the fastpaths. */ 103 + u8 rev; 104 + }; 105 + 106 + struct b43_pio_rxqueue { 107 + struct b43_wldev *dev; 108 + spinlock_t lock; 109 + u16 mmio_base; 110 + 111 + /* Work to reduce latency issues on RX. */ 112 + struct work_struct rx_work; 113 + 114 + /* Shortcut to the 802.11 core revision. This is to 115 + * avoid horrible pointer dereferencing in the fastpaths. */ 116 + u8 rev; 117 + }; 118 + 119 + 120 + static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset) 121 + { 122 + return b43_read16(q->dev, q->mmio_base + offset); 123 + } 124 + 125 + static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset) 126 + { 127 + return b43_read32(q->dev, q->mmio_base + offset); 128 + } 129 + 130 + static inline void b43_piotx_write16(struct b43_pio_txqueue *q, 131 + u16 offset, u16 value) 132 + { 133 + b43_write16(q->dev, q->mmio_base + offset, value); 134 + } 135 + 136 + static inline void b43_piotx_write32(struct b43_pio_txqueue *q, 137 + u16 offset, u32 value) 138 + { 139 + b43_write32(q->dev, q->mmio_base + offset, value); 140 + } 141 + 142 + 143 + static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset) 144 + { 145 + return b43_read16(q->dev, q->mmio_base + offset); 146 + } 147 + 148 + static inline u32 b43_piorx_read32(struct b43_pio_rxqueue *q, u16 offset) 149 + { 150 + return b43_read32(q->dev, q->mmio_base + offset); 151 + } 152 + 153 + static inline void b43_piorx_write16(struct b43_pio_rxqueue *q, 154 + u16 offset, u16 value) 155 + { 156 + b43_write16(q->dev, q->mmio_base + offset, value); 157 + } 158 + 159 + static inline void b43_piorx_write32(struct b43_pio_rxqueue *q, 160 + u16 offset, u32 value) 161 + { 162 + b43_write32(q->dev, q->mmio_base + offset, value); 163 + } 164 + 165 + 166 + int b43_pio_init(struct b43_wldev *dev); 167 + void b43_pio_stop(struct b43_wldev *dev); 168 + void b43_pio_free(struct b43_wldev *dev); 169 + 170 + int b43_pio_tx(struct b43_wldev *dev, 171 + struct sk_buff *skb, struct ieee80211_tx_control *ctl); 172 + void b43_pio_handle_txstatus(struct b43_wldev *dev, 173 + const struct b43_txstatus *status); 174 + void b43_pio_get_tx_stats(struct b43_wldev *dev, 175 + struct ieee80211_tx_queue_stats *stats); 176 + void b43_pio_rx(struct b43_pio_rxqueue *q); 177 + 178 + void b43_pio_tx_suspend(struct b43_wldev *dev); 179 + void b43_pio_tx_resume(struct b43_wldev *dev); 180 + 181 + 182 + #else /* CONFIG_B43_PIO */ 183 + 184 + 185 + static inline int b43_pio_init(struct b43_wldev *dev) 186 + { 187 + return 0; 188 + } 189 + static inline void b43_pio_free(struct b43_wldev *dev) 190 + { 191 + } 192 + static inline void b43_pio_stop(struct b43_wldev *dev) 193 + { 194 + } 195 + static inline int b43_pio_tx(struct b43_wldev *dev, 196 + struct sk_buff *skb, 197 + struct ieee80211_tx_control *ctl) 198 + { 199 + return 0; 200 + } 201 + static inline void b43_pio_handle_txstatus(struct b43_wldev *dev, 202 + const struct b43_txstatus *status) 203 + { 204 + } 205 + static inline void b43_pio_get_tx_stats(struct b43_wldev *dev, 206 + struct ieee80211_tx_queue_stats *stats) 207 + { 208 + } 209 + static inline void b43_pio_rx(struct b43_pio_rxqueue *q) 210 + { 211 + } 212 + static inline void b43_pio_tx_suspend(struct b43_wldev *dev) 213 + { 214 + } 215 + static inline void b43_pio_tx_resume(struct b43_wldev *dev) 216 + { 217 + } 218 + 219 + #endif /* CONFIG_B43_PIO */ 220 + #endif /* B43_PIO_H_ */
+36 -21
drivers/net/wireless/b43/xmit.c
··· 30 30 #include "xmit.h" 31 31 #include "phy.h" 32 32 #include "dma.h" 33 + #include "pio.h" 33 34 34 35 35 36 /* Extract the bitrate index out of a CCK PLCP header. */ ··· 669 668 dev->wl->ieee_stats.dot11RTSSuccessCount++; 670 669 } 671 670 672 - b43_dma_handle_txstatus(dev, status); 671 + if (b43_using_pio_transfers(dev)) 672 + b43_pio_handle_txstatus(dev, status); 673 + else 674 + b43_dma_handle_txstatus(dev, status); 673 675 } 674 676 675 - /* Handle TX status report as received through DMA/PIO queues */ 676 - void b43_handle_hwtxstatus(struct b43_wldev *dev, 677 - const struct b43_hwtxstatus *hw) 677 + /* Fill out the mac80211 TXstatus report based on the b43-specific 678 + * txstatus report data. This returns a boolean whether the frame was 679 + * successfully transmitted. */ 680 + bool b43_fill_txstatus_report(struct ieee80211_tx_status *report, 681 + const struct b43_txstatus *status) 678 682 { 679 - struct b43_txstatus status; 680 - u8 tmp; 683 + bool frame_success = 1; 681 684 682 - status.cookie = le16_to_cpu(hw->cookie); 683 - status.seq = le16_to_cpu(hw->seq); 684 - status.phy_stat = hw->phy_stat; 685 - tmp = hw->count; 686 - status.frame_count = (tmp >> 4); 687 - status.rts_count = (tmp & 0x0F); 688 - tmp = hw->flags; 689 - status.supp_reason = ((tmp & 0x1C) >> 2); 690 - status.pm_indicated = !!(tmp & 0x80); 691 - status.intermediate = !!(tmp & 0x40); 692 - status.for_ampdu = !!(tmp & 0x20); 693 - status.acked = !!(tmp & 0x02); 685 + if (status->acked) { 686 + /* The frame was ACKed. */ 687 + report->flags |= IEEE80211_TX_STATUS_ACK; 688 + } else { 689 + /* The frame was not ACKed... */ 690 + if (!(report->control.flags & IEEE80211_TXCTL_NO_ACK)) { 691 + /* ...but we expected an ACK. */ 692 + frame_success = 0; 693 + report->excessive_retries = 1; 694 + } 695 + } 696 + if (status->frame_count == 0) { 697 + /* The frame was not transmitted at all. */ 698 + report->retry_count = 0; 699 + } else 700 + report->retry_count = status->frame_count - 1; 694 701 695 - b43_handle_txstatus(dev, &status); 702 + return frame_success; 696 703 } 697 704 698 705 /* Stop any TX operation on the device (suspend the hardware queues) */ 699 706 void b43_tx_suspend(struct b43_wldev *dev) 700 707 { 701 - b43_dma_tx_suspend(dev); 708 + if (b43_using_pio_transfers(dev)) 709 + b43_pio_tx_suspend(dev); 710 + else 711 + b43_dma_tx_suspend(dev); 702 712 } 703 713 704 714 /* Resume any TX operation on the device (resume the hardware queues) */ 705 715 void b43_tx_resume(struct b43_wldev *dev) 706 716 { 707 - b43_dma_tx_resume(dev); 717 + if (b43_using_pio_transfers(dev)) 718 + b43_pio_tx_resume(dev); 719 + else 720 + b43_dma_tx_resume(dev); 708 721 }
+2 -15
drivers/net/wireless/b43/xmit.h
··· 207 207 B43_TXST_SUPP_ABNACK, /* Afterburner NACK */ 208 208 }; 209 209 210 - /* Transmit Status as received through DMA/PIO on old chips */ 211 - struct b43_hwtxstatus { 212 - PAD_BYTES(4); 213 - __le16 cookie; 214 - u8 flags; 215 - u8 count; 216 - PAD_BYTES(2); 217 - __le16 seq; 218 - u8 phy_stat; 219 - PAD_BYTES(1); 220 - } __attribute__ ((__packed__)); 221 - 222 210 /* Receive header for v4 firmware. */ 223 211 struct b43_rxhdr_fw4 { 224 212 __le16 frame_len; /* Frame length */ ··· 283 295 284 296 void b43_handle_txstatus(struct b43_wldev *dev, 285 297 const struct b43_txstatus *status); 286 - 287 - void b43_handle_hwtxstatus(struct b43_wldev *dev, 288 - const struct b43_hwtxstatus *hw); 298 + bool b43_fill_txstatus_report(struct ieee80211_tx_status *report, 299 + const struct b43_txstatus *status); 289 300 290 301 void b43_tx_suspend(struct b43_wldev *dev); 291 302 void b43_tx_resume(struct b43_wldev *dev);