Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.9-rc8 1820 lines 48 kB view raw
1/* 2 3 Broadcom B43 wireless driver 4 5 DMA ringbuffer and descriptor allocation/management 6 7 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch> 8 9 Some code in this file is derived from the b44.c driver 10 Copyright (C) 2002 David S. Miller 11 Copyright (C) Pekka Pietikainen 12 13 This program is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2 of the License, or 16 (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; see the file COPYING. If not, write to 25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, 26 Boston, MA 02110-1301, USA. 27 28*/ 29 30#include "b43.h" 31#include "dma.h" 32#include "main.h" 33#include "debugfs.h" 34#include "xmit.h" 35 36#include <linux/dma-mapping.h> 37#include <linux/pci.h> 38#include <linux/delay.h> 39#include <linux/skbuff.h> 40#include <linux/etherdevice.h> 41#include <linux/slab.h> 42#include <asm/div64.h> 43 44 45/* Required number of TX DMA slots per TX frame. 46 * This currently is 2, because we put the header and the ieee80211 frame 47 * into separate slots. */ 48#define TX_SLOTS_PER_FRAME 2 49 50static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr, 51 enum b43_addrtype addrtype) 52{ 53 u32 uninitialized_var(addr); 54 55 switch (addrtype) { 56 case B43_DMA_ADDR_LOW: 57 addr = lower_32_bits(dmaaddr); 58 if (dma->translation_in_low) { 59 addr &= ~SSB_DMA_TRANSLATION_MASK; 60 addr |= dma->translation; 61 } 62 break; 63 case B43_DMA_ADDR_HIGH: 64 addr = upper_32_bits(dmaaddr); 65 if (!dma->translation_in_low) { 66 addr &= ~SSB_DMA_TRANSLATION_MASK; 67 addr |= dma->translation; 68 } 69 break; 70 case B43_DMA_ADDR_EXT: 71 if (dma->translation_in_low) 72 addr = lower_32_bits(dmaaddr); 73 else 74 addr = upper_32_bits(dmaaddr); 75 addr &= SSB_DMA_TRANSLATION_MASK; 76 addr >>= SSB_DMA_TRANSLATION_SHIFT; 77 break; 78 } 79 80 return addr; 81} 82 83/* 32bit DMA ops. */ 84static 85struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, 86 int slot, 87 struct b43_dmadesc_meta **meta) 88{ 89 struct b43_dmadesc32 *desc; 90 91 *meta = &(ring->meta[slot]); 92 desc = ring->descbase; 93 desc = &(desc[slot]); 94 95 return (struct b43_dmadesc_generic *)desc; 96} 97 98static void op32_fill_descriptor(struct b43_dmaring *ring, 99 struct b43_dmadesc_generic *desc, 100 dma_addr_t dmaaddr, u16 bufsize, 101 int start, int end, int irq) 102{ 103 struct b43_dmadesc32 *descbase = ring->descbase; 104 int slot; 105 u32 ctl; 106 u32 addr; 107 u32 addrext; 108 109 slot = (int)(&(desc->dma32) - descbase); 110 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 111 112 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); 113 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); 114 115 ctl = bufsize & B43_DMA32_DCTL_BYTECNT; 116 if (slot == ring->nr_slots - 1) 117 ctl |= B43_DMA32_DCTL_DTABLEEND; 118 if (start) 119 ctl |= B43_DMA32_DCTL_FRAMESTART; 120 if (end) 121 ctl |= B43_DMA32_DCTL_FRAMEEND; 122 if (irq) 123 ctl |= B43_DMA32_DCTL_IRQ; 124 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) 125 & B43_DMA32_DCTL_ADDREXT_MASK; 126 127 desc->dma32.control = cpu_to_le32(ctl); 128 desc->dma32.address = cpu_to_le32(addr); 129} 130 131static void op32_poke_tx(struct b43_dmaring *ring, int slot) 132{ 133 b43_dma_write(ring, B43_DMA32_TXINDEX, 134 (u32) (slot * sizeof(struct b43_dmadesc32))); 135} 136 137static void op32_tx_suspend(struct b43_dmaring *ring) 138{ 139 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) 140 | B43_DMA32_TXSUSPEND); 141} 142 143static void op32_tx_resume(struct b43_dmaring *ring) 144{ 145 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) 146 & ~B43_DMA32_TXSUSPEND); 147} 148 149static int op32_get_current_rxslot(struct b43_dmaring *ring) 150{ 151 u32 val; 152 153 val = b43_dma_read(ring, B43_DMA32_RXSTATUS); 154 val &= B43_DMA32_RXDPTR; 155 156 return (val / sizeof(struct b43_dmadesc32)); 157} 158 159static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) 160{ 161 b43_dma_write(ring, B43_DMA32_RXINDEX, 162 (u32) (slot * sizeof(struct b43_dmadesc32))); 163} 164 165static const struct b43_dma_ops dma32_ops = { 166 .idx2desc = op32_idx2desc, 167 .fill_descriptor = op32_fill_descriptor, 168 .poke_tx = op32_poke_tx, 169 .tx_suspend = op32_tx_suspend, 170 .tx_resume = op32_tx_resume, 171 .get_current_rxslot = op32_get_current_rxslot, 172 .set_current_rxslot = op32_set_current_rxslot, 173}; 174 175/* 64bit DMA ops. */ 176static 177struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, 178 int slot, 179 struct b43_dmadesc_meta **meta) 180{ 181 struct b43_dmadesc64 *desc; 182 183 *meta = &(ring->meta[slot]); 184 desc = ring->descbase; 185 desc = &(desc[slot]); 186 187 return (struct b43_dmadesc_generic *)desc; 188} 189 190static void op64_fill_descriptor(struct b43_dmaring *ring, 191 struct b43_dmadesc_generic *desc, 192 dma_addr_t dmaaddr, u16 bufsize, 193 int start, int end, int irq) 194{ 195 struct b43_dmadesc64 *descbase = ring->descbase; 196 int slot; 197 u32 ctl0 = 0, ctl1 = 0; 198 u32 addrlo, addrhi; 199 u32 addrext; 200 201 slot = (int)(&(desc->dma64) - descbase); 202 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 203 204 addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); 205 addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH); 206 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); 207 208 if (slot == ring->nr_slots - 1) 209 ctl0 |= B43_DMA64_DCTL0_DTABLEEND; 210 if (start) 211 ctl0 |= B43_DMA64_DCTL0_FRAMESTART; 212 if (end) 213 ctl0 |= B43_DMA64_DCTL0_FRAMEEND; 214 if (irq) 215 ctl0 |= B43_DMA64_DCTL0_IRQ; 216 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT; 217 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) 218 & B43_DMA64_DCTL1_ADDREXT_MASK; 219 220 desc->dma64.control0 = cpu_to_le32(ctl0); 221 desc->dma64.control1 = cpu_to_le32(ctl1); 222 desc->dma64.address_low = cpu_to_le32(addrlo); 223 desc->dma64.address_high = cpu_to_le32(addrhi); 224} 225 226static void op64_poke_tx(struct b43_dmaring *ring, int slot) 227{ 228 b43_dma_write(ring, B43_DMA64_TXINDEX, 229 (u32) (slot * sizeof(struct b43_dmadesc64))); 230} 231 232static void op64_tx_suspend(struct b43_dmaring *ring) 233{ 234 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) 235 | B43_DMA64_TXSUSPEND); 236} 237 238static void op64_tx_resume(struct b43_dmaring *ring) 239{ 240 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) 241 & ~B43_DMA64_TXSUSPEND); 242} 243 244static int op64_get_current_rxslot(struct b43_dmaring *ring) 245{ 246 u32 val; 247 248 val = b43_dma_read(ring, B43_DMA64_RXSTATUS); 249 val &= B43_DMA64_RXSTATDPTR; 250 251 return (val / sizeof(struct b43_dmadesc64)); 252} 253 254static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) 255{ 256 b43_dma_write(ring, B43_DMA64_RXINDEX, 257 (u32) (slot * sizeof(struct b43_dmadesc64))); 258} 259 260static const struct b43_dma_ops dma64_ops = { 261 .idx2desc = op64_idx2desc, 262 .fill_descriptor = op64_fill_descriptor, 263 .poke_tx = op64_poke_tx, 264 .tx_suspend = op64_tx_suspend, 265 .tx_resume = op64_tx_resume, 266 .get_current_rxslot = op64_get_current_rxslot, 267 .set_current_rxslot = op64_set_current_rxslot, 268}; 269 270static inline int free_slots(struct b43_dmaring *ring) 271{ 272 return (ring->nr_slots - ring->used_slots); 273} 274 275static inline int next_slot(struct b43_dmaring *ring, int slot) 276{ 277 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); 278 if (slot == ring->nr_slots - 1) 279 return 0; 280 return slot + 1; 281} 282 283static inline int prev_slot(struct b43_dmaring *ring, int slot) 284{ 285 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); 286 if (slot == 0) 287 return ring->nr_slots - 1; 288 return slot - 1; 289} 290 291#ifdef CONFIG_B43_DEBUG 292static void update_max_used_slots(struct b43_dmaring *ring, 293 int current_used_slots) 294{ 295 if (current_used_slots <= ring->max_used_slots) 296 return; 297 ring->max_used_slots = current_used_slots; 298 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { 299 b43dbg(ring->dev->wl, 300 "max_used_slots increased to %d on %s ring %d\n", 301 ring->max_used_slots, 302 ring->tx ? "TX" : "RX", ring->index); 303 } 304} 305#else 306static inline 307 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) 308{ 309} 310#endif /* DEBUG */ 311 312/* Request a slot for usage. */ 313static inline int request_slot(struct b43_dmaring *ring) 314{ 315 int slot; 316 317 B43_WARN_ON(!ring->tx); 318 B43_WARN_ON(ring->stopped); 319 B43_WARN_ON(free_slots(ring) == 0); 320 321 slot = next_slot(ring, ring->current_slot); 322 ring->current_slot = slot; 323 ring->used_slots++; 324 325 update_max_used_slots(ring, ring->used_slots); 326 327 return slot; 328} 329 330static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) 331{ 332 static const u16 map64[] = { 333 B43_MMIO_DMA64_BASE0, 334 B43_MMIO_DMA64_BASE1, 335 B43_MMIO_DMA64_BASE2, 336 B43_MMIO_DMA64_BASE3, 337 B43_MMIO_DMA64_BASE4, 338 B43_MMIO_DMA64_BASE5, 339 }; 340 static const u16 map32[] = { 341 B43_MMIO_DMA32_BASE0, 342 B43_MMIO_DMA32_BASE1, 343 B43_MMIO_DMA32_BASE2, 344 B43_MMIO_DMA32_BASE3, 345 B43_MMIO_DMA32_BASE4, 346 B43_MMIO_DMA32_BASE5, 347 }; 348 349 if (type == B43_DMA_64BIT) { 350 B43_WARN_ON(!(controller_idx >= 0 && 351 controller_idx < ARRAY_SIZE(map64))); 352 return map64[controller_idx]; 353 } 354 B43_WARN_ON(!(controller_idx >= 0 && 355 controller_idx < ARRAY_SIZE(map32))); 356 return map32[controller_idx]; 357} 358 359static inline 360 dma_addr_t map_descbuffer(struct b43_dmaring *ring, 361 unsigned char *buf, size_t len, int tx) 362{ 363 dma_addr_t dmaaddr; 364 365 if (tx) { 366 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 367 buf, len, DMA_TO_DEVICE); 368 } else { 369 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 370 buf, len, DMA_FROM_DEVICE); 371 } 372 373 return dmaaddr; 374} 375 376static inline 377 void unmap_descbuffer(struct b43_dmaring *ring, 378 dma_addr_t addr, size_t len, int tx) 379{ 380 if (tx) { 381 dma_unmap_single(ring->dev->dev->dma_dev, 382 addr, len, DMA_TO_DEVICE); 383 } else { 384 dma_unmap_single(ring->dev->dev->dma_dev, 385 addr, len, DMA_FROM_DEVICE); 386 } 387} 388 389static inline 390 void sync_descbuffer_for_cpu(struct b43_dmaring *ring, 391 dma_addr_t addr, size_t len) 392{ 393 B43_WARN_ON(ring->tx); 394 dma_sync_single_for_cpu(ring->dev->dev->dma_dev, 395 addr, len, DMA_FROM_DEVICE); 396} 397 398static inline 399 void sync_descbuffer_for_device(struct b43_dmaring *ring, 400 dma_addr_t addr, size_t len) 401{ 402 B43_WARN_ON(ring->tx); 403 dma_sync_single_for_device(ring->dev->dev->dma_dev, 404 addr, len, DMA_FROM_DEVICE); 405} 406 407static inline 408 void free_descriptor_buffer(struct b43_dmaring *ring, 409 struct b43_dmadesc_meta *meta) 410{ 411 if (meta->skb) { 412 if (ring->tx) 413 ieee80211_free_txskb(ring->dev->wl->hw, meta->skb); 414 else 415 dev_kfree_skb_any(meta->skb); 416 meta->skb = NULL; 417 } 418} 419 420static int alloc_ringmemory(struct b43_dmaring *ring) 421{ 422 gfp_t flags = GFP_KERNEL; 423 424 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K 425 * alignment and 8K buffers for 64-bit DMA with 8K alignment. 426 * In practice we could use smaller buffers for the latter, but the 427 * alignment is really important because of the hardware bug. If bit 428 * 0x00001000 is used in DMA address, some hardware (like BCM4331) 429 * copies that bit into B43_DMA64_RXSTATUS and we get false values from 430 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use 431 * more than 256 slots for ring. 432 */ 433 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? 434 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; 435 436 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 437 ring_mem_size, &(ring->dmabase), 438 flags); 439 if (!ring->descbase) { 440 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); 441 return -ENOMEM; 442 } 443 memset(ring->descbase, 0, ring_mem_size); 444 445 return 0; 446} 447 448static void free_ringmemory(struct b43_dmaring *ring) 449{ 450 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? 451 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; 452 dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size, 453 ring->descbase, ring->dmabase); 454} 455 456/* Reset the RX DMA channel */ 457static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, 458 enum b43_dmatype type) 459{ 460 int i; 461 u32 value; 462 u16 offset; 463 464 might_sleep(); 465 466 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; 467 b43_write32(dev, mmio_base + offset, 0); 468 for (i = 0; i < 10; i++) { 469 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS : 470 B43_DMA32_RXSTATUS; 471 value = b43_read32(dev, mmio_base + offset); 472 if (type == B43_DMA_64BIT) { 473 value &= B43_DMA64_RXSTAT; 474 if (value == B43_DMA64_RXSTAT_DISABLED) { 475 i = -1; 476 break; 477 } 478 } else { 479 value &= B43_DMA32_RXSTATE; 480 if (value == B43_DMA32_RXSTAT_DISABLED) { 481 i = -1; 482 break; 483 } 484 } 485 msleep(1); 486 } 487 if (i != -1) { 488 b43err(dev->wl, "DMA RX reset timed out\n"); 489 return -ENODEV; 490 } 491 492 return 0; 493} 494 495/* Reset the TX DMA channel */ 496static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, 497 enum b43_dmatype type) 498{ 499 int i; 500 u32 value; 501 u16 offset; 502 503 might_sleep(); 504 505 for (i = 0; i < 10; i++) { 506 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : 507 B43_DMA32_TXSTATUS; 508 value = b43_read32(dev, mmio_base + offset); 509 if (type == B43_DMA_64BIT) { 510 value &= B43_DMA64_TXSTAT; 511 if (value == B43_DMA64_TXSTAT_DISABLED || 512 value == B43_DMA64_TXSTAT_IDLEWAIT || 513 value == B43_DMA64_TXSTAT_STOPPED) 514 break; 515 } else { 516 value &= B43_DMA32_TXSTATE; 517 if (value == B43_DMA32_TXSTAT_DISABLED || 518 value == B43_DMA32_TXSTAT_IDLEWAIT || 519 value == B43_DMA32_TXSTAT_STOPPED) 520 break; 521 } 522 msleep(1); 523 } 524 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; 525 b43_write32(dev, mmio_base + offset, 0); 526 for (i = 0; i < 10; i++) { 527 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : 528 B43_DMA32_TXSTATUS; 529 value = b43_read32(dev, mmio_base + offset); 530 if (type == B43_DMA_64BIT) { 531 value &= B43_DMA64_TXSTAT; 532 if (value == B43_DMA64_TXSTAT_DISABLED) { 533 i = -1; 534 break; 535 } 536 } else { 537 value &= B43_DMA32_TXSTATE; 538 if (value == B43_DMA32_TXSTAT_DISABLED) { 539 i = -1; 540 break; 541 } 542 } 543 msleep(1); 544 } 545 if (i != -1) { 546 b43err(dev->wl, "DMA TX reset timed out\n"); 547 return -ENODEV; 548 } 549 /* ensure the reset is completed. */ 550 msleep(1); 551 552 return 0; 553} 554 555/* Check if a DMA mapping address is invalid. */ 556static bool b43_dma_mapping_error(struct b43_dmaring *ring, 557 dma_addr_t addr, 558 size_t buffersize, bool dma_to_device) 559{ 560 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) 561 return 1; 562 563 switch (ring->type) { 564 case B43_DMA_30BIT: 565 if ((u64)addr + buffersize > (1ULL << 30)) 566 goto address_error; 567 break; 568 case B43_DMA_32BIT: 569 if ((u64)addr + buffersize > (1ULL << 32)) 570 goto address_error; 571 break; 572 case B43_DMA_64BIT: 573 /* Currently we can't have addresses beyond 574 * 64bit in the kernel. */ 575 break; 576 } 577 578 /* The address is OK. */ 579 return 0; 580 581address_error: 582 /* We can't support this address. Unmap it again. */ 583 unmap_descbuffer(ring, addr, buffersize, dma_to_device); 584 585 return 1; 586} 587 588static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) 589{ 590 unsigned char *f = skb->data + ring->frameoffset; 591 592 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF); 593} 594 595static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb) 596{ 597 struct b43_rxhdr_fw4 *rxhdr; 598 unsigned char *frame; 599 600 /* This poisons the RX buffer to detect DMA failures. */ 601 602 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); 603 rxhdr->frame_len = 0; 604 605 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); 606 frame = skb->data + ring->frameoffset; 607 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */); 608} 609 610static int setup_rx_descbuffer(struct b43_dmaring *ring, 611 struct b43_dmadesc_generic *desc, 612 struct b43_dmadesc_meta *meta, gfp_t gfp_flags) 613{ 614 dma_addr_t dmaaddr; 615 struct sk_buff *skb; 616 617 B43_WARN_ON(ring->tx); 618 619 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 620 if (unlikely(!skb)) 621 return -ENOMEM; 622 b43_poison_rx_buffer(ring, skb); 623 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); 624 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 625 /* ugh. try to realloc in zone_dma */ 626 gfp_flags |= GFP_DMA; 627 628 dev_kfree_skb_any(skb); 629 630 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 631 if (unlikely(!skb)) 632 return -ENOMEM; 633 b43_poison_rx_buffer(ring, skb); 634 dmaaddr = map_descbuffer(ring, skb->data, 635 ring->rx_buffersize, 0); 636 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 637 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n"); 638 dev_kfree_skb_any(skb); 639 return -EIO; 640 } 641 } 642 643 meta->skb = skb; 644 meta->dmaaddr = dmaaddr; 645 ring->ops->fill_descriptor(ring, desc, dmaaddr, 646 ring->rx_buffersize, 0, 0, 0); 647 648 return 0; 649} 650 651/* Allocate the initial descbuffers. 652 * This is used for an RX ring only. 653 */ 654static int alloc_initial_descbuffers(struct b43_dmaring *ring) 655{ 656 int i, err = -ENOMEM; 657 struct b43_dmadesc_generic *desc; 658 struct b43_dmadesc_meta *meta; 659 660 for (i = 0; i < ring->nr_slots; i++) { 661 desc = ring->ops->idx2desc(ring, i, &meta); 662 663 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 664 if (err) { 665 b43err(ring->dev->wl, 666 "Failed to allocate initial descbuffers\n"); 667 goto err_unwind; 668 } 669 } 670 mb(); 671 ring->used_slots = ring->nr_slots; 672 err = 0; 673 out: 674 return err; 675 676 err_unwind: 677 for (i--; i >= 0; i--) { 678 desc = ring->ops->idx2desc(ring, i, &meta); 679 680 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 681 dev_kfree_skb(meta->skb); 682 } 683 goto out; 684} 685 686/* Do initial setup of the DMA controller. 687 * Reset the controller, write the ring busaddress 688 * and switch the "enable" bit on. 689 */ 690static int dmacontroller_setup(struct b43_dmaring *ring) 691{ 692 int err = 0; 693 u32 value; 694 u32 addrext; 695 bool parity = ring->dev->dma.parity; 696 u32 addrlo; 697 u32 addrhi; 698 699 if (ring->tx) { 700 if (ring->type == B43_DMA_64BIT) { 701 u64 ringbase = (u64) (ring->dmabase); 702 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); 703 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); 704 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH); 705 706 value = B43_DMA64_TXENABLE; 707 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) 708 & B43_DMA64_TXADDREXT_MASK; 709 if (!parity) 710 value |= B43_DMA64_TXPARITYDISABLE; 711 b43_dma_write(ring, B43_DMA64_TXCTL, value); 712 b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo); 713 b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi); 714 } else { 715 u32 ringbase = (u32) (ring->dmabase); 716 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); 717 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); 718 719 value = B43_DMA32_TXENABLE; 720 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) 721 & B43_DMA32_TXADDREXT_MASK; 722 if (!parity) 723 value |= B43_DMA32_TXPARITYDISABLE; 724 b43_dma_write(ring, B43_DMA32_TXCTL, value); 725 b43_dma_write(ring, B43_DMA32_TXRING, addrlo); 726 } 727 } else { 728 err = alloc_initial_descbuffers(ring); 729 if (err) 730 goto out; 731 if (ring->type == B43_DMA_64BIT) { 732 u64 ringbase = (u64) (ring->dmabase); 733 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); 734 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); 735 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH); 736 737 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); 738 value |= B43_DMA64_RXENABLE; 739 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) 740 & B43_DMA64_RXADDREXT_MASK; 741 if (!parity) 742 value |= B43_DMA64_RXPARITYDISABLE; 743 b43_dma_write(ring, B43_DMA64_RXCTL, value); 744 b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo); 745 b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi); 746 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots * 747 sizeof(struct b43_dmadesc64)); 748 } else { 749 u32 ringbase = (u32) (ring->dmabase); 750 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT); 751 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW); 752 753 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); 754 value |= B43_DMA32_RXENABLE; 755 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) 756 & B43_DMA32_RXADDREXT_MASK; 757 if (!parity) 758 value |= B43_DMA32_RXPARITYDISABLE; 759 b43_dma_write(ring, B43_DMA32_RXCTL, value); 760 b43_dma_write(ring, B43_DMA32_RXRING, addrlo); 761 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots * 762 sizeof(struct b43_dmadesc32)); 763 } 764 } 765 766out: 767 return err; 768} 769 770/* Shutdown the DMA controller. */ 771static void dmacontroller_cleanup(struct b43_dmaring *ring) 772{ 773 if (ring->tx) { 774 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 775 ring->type); 776 if (ring->type == B43_DMA_64BIT) { 777 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); 778 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); 779 } else 780 b43_dma_write(ring, B43_DMA32_TXRING, 0); 781 } else { 782 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 783 ring->type); 784 if (ring->type == B43_DMA_64BIT) { 785 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); 786 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); 787 } else 788 b43_dma_write(ring, B43_DMA32_RXRING, 0); 789 } 790} 791 792static void free_all_descbuffers(struct b43_dmaring *ring) 793{ 794 struct b43_dmadesc_meta *meta; 795 int i; 796 797 if (!ring->used_slots) 798 return; 799 for (i = 0; i < ring->nr_slots; i++) { 800 /* get meta - ignore returned value */ 801 ring->ops->idx2desc(ring, i, &meta); 802 803 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { 804 B43_WARN_ON(!ring->tx); 805 continue; 806 } 807 if (ring->tx) { 808 unmap_descbuffer(ring, meta->dmaaddr, 809 meta->skb->len, 1); 810 } else { 811 unmap_descbuffer(ring, meta->dmaaddr, 812 ring->rx_buffersize, 0); 813 } 814 free_descriptor_buffer(ring, meta); 815 } 816} 817 818static u64 supported_dma_mask(struct b43_wldev *dev) 819{ 820 u32 tmp; 821 u16 mmio_base; 822 823 switch (dev->dev->bus_type) { 824#ifdef CONFIG_B43_BCMA 825 case B43_BUS_BCMA: 826 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST); 827 if (tmp & BCMA_IOST_DMA64) 828 return DMA_BIT_MASK(64); 829 break; 830#endif 831#ifdef CONFIG_B43_SSB 832 case B43_BUS_SSB: 833 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH); 834 if (tmp & SSB_TMSHIGH_DMA64) 835 return DMA_BIT_MASK(64); 836 break; 837#endif 838 } 839 840 mmio_base = b43_dmacontroller_base(0, 0); 841 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); 842 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); 843 if (tmp & B43_DMA32_TXADDREXT_MASK) 844 return DMA_BIT_MASK(32); 845 846 return DMA_BIT_MASK(30); 847} 848 849static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask) 850{ 851 if (dmamask == DMA_BIT_MASK(30)) 852 return B43_DMA_30BIT; 853 if (dmamask == DMA_BIT_MASK(32)) 854 return B43_DMA_32BIT; 855 if (dmamask == DMA_BIT_MASK(64)) 856 return B43_DMA_64BIT; 857 B43_WARN_ON(1); 858 return B43_DMA_30BIT; 859} 860 861/* Main initialization function. */ 862static 863struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, 864 int controller_index, 865 int for_tx, 866 enum b43_dmatype type) 867{ 868 struct b43_dmaring *ring; 869 int i, err; 870 dma_addr_t dma_test; 871 872 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 873 if (!ring) 874 goto out; 875 876 ring->nr_slots = B43_RXRING_SLOTS; 877 if (for_tx) 878 ring->nr_slots = B43_TXRING_SLOTS; 879 880 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta), 881 GFP_KERNEL); 882 if (!ring->meta) 883 goto err_kfree_ring; 884 for (i = 0; i < ring->nr_slots; i++) 885 ring->meta->skb = B43_DMA_PTR_POISON; 886 887 ring->type = type; 888 ring->dev = dev; 889 ring->mmio_base = b43_dmacontroller_base(type, controller_index); 890 ring->index = controller_index; 891 if (type == B43_DMA_64BIT) 892 ring->ops = &dma64_ops; 893 else 894 ring->ops = &dma32_ops; 895 if (for_tx) { 896 ring->tx = true; 897 ring->current_slot = -1; 898 } else { 899 if (ring->index == 0) { 900 switch (dev->fw.hdr_format) { 901 case B43_FW_HDR_598: 902 ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE; 903 ring->frameoffset = B43_DMA0_RX_FW598_FO; 904 break; 905 case B43_FW_HDR_410: 906 case B43_FW_HDR_351: 907 ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE; 908 ring->frameoffset = B43_DMA0_RX_FW351_FO; 909 break; 910 } 911 } else 912 B43_WARN_ON(1); 913 } 914#ifdef CONFIG_B43_DEBUG 915 ring->last_injected_overflow = jiffies; 916#endif 917 918 if (for_tx) { 919 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */ 920 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0); 921 922 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, 923 b43_txhdr_size(dev), 924 GFP_KERNEL); 925 if (!ring->txhdr_cache) 926 goto err_kfree_meta; 927 928 /* test for ability to dma to txhdr_cache */ 929 dma_test = dma_map_single(dev->dev->dma_dev, 930 ring->txhdr_cache, 931 b43_txhdr_size(dev), 932 DMA_TO_DEVICE); 933 934 if (b43_dma_mapping_error(ring, dma_test, 935 b43_txhdr_size(dev), 1)) { 936 /* ugh realloc */ 937 kfree(ring->txhdr_cache); 938 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, 939 b43_txhdr_size(dev), 940 GFP_KERNEL | GFP_DMA); 941 if (!ring->txhdr_cache) 942 goto err_kfree_meta; 943 944 dma_test = dma_map_single(dev->dev->dma_dev, 945 ring->txhdr_cache, 946 b43_txhdr_size(dev), 947 DMA_TO_DEVICE); 948 949 if (b43_dma_mapping_error(ring, dma_test, 950 b43_txhdr_size(dev), 1)) { 951 952 b43err(dev->wl, 953 "TXHDR DMA allocation failed\n"); 954 goto err_kfree_txhdr_cache; 955 } 956 } 957 958 dma_unmap_single(dev->dev->dma_dev, 959 dma_test, b43_txhdr_size(dev), 960 DMA_TO_DEVICE); 961 } 962 963 err = alloc_ringmemory(ring); 964 if (err) 965 goto err_kfree_txhdr_cache; 966 err = dmacontroller_setup(ring); 967 if (err) 968 goto err_free_ringmemory; 969 970 out: 971 return ring; 972 973 err_free_ringmemory: 974 free_ringmemory(ring); 975 err_kfree_txhdr_cache: 976 kfree(ring->txhdr_cache); 977 err_kfree_meta: 978 kfree(ring->meta); 979 err_kfree_ring: 980 kfree(ring); 981 ring = NULL; 982 goto out; 983} 984 985#define divide(a, b) ({ \ 986 typeof(a) __a = a; \ 987 do_div(__a, b); \ 988 __a; \ 989 }) 990 991#define modulo(a, b) ({ \ 992 typeof(a) __a = a; \ 993 do_div(__a, b); \ 994 }) 995 996/* Main cleanup function. */ 997static void b43_destroy_dmaring(struct b43_dmaring *ring, 998 const char *ringname) 999{ 1000 if (!ring) 1001 return; 1002 1003#ifdef CONFIG_B43_DEBUG 1004 { 1005 /* Print some statistics. */ 1006 u64 failed_packets = ring->nr_failed_tx_packets; 1007 u64 succeed_packets = ring->nr_succeed_tx_packets; 1008 u64 nr_packets = failed_packets + succeed_packets; 1009 u64 permille_failed = 0, average_tries = 0; 1010 1011 if (nr_packets) 1012 permille_failed = divide(failed_packets * 1000, nr_packets); 1013 if (nr_packets) 1014 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); 1015 1016 b43dbg(ring->dev->wl, "DMA-%u %s: " 1017 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, " 1018 "Average tries %llu.%02llu\n", 1019 (unsigned int)(ring->type), ringname, 1020 ring->max_used_slots, 1021 ring->nr_slots, 1022 (unsigned long long)failed_packets, 1023 (unsigned long long)nr_packets, 1024 (unsigned long long)divide(permille_failed, 10), 1025 (unsigned long long)modulo(permille_failed, 10), 1026 (unsigned long long)divide(average_tries, 100), 1027 (unsigned long long)modulo(average_tries, 100)); 1028 } 1029#endif /* DEBUG */ 1030 1031 /* Device IRQs are disabled prior entering this function, 1032 * so no need to take care of concurrency with rx handler stuff. 1033 */ 1034 dmacontroller_cleanup(ring); 1035 free_all_descbuffers(ring); 1036 free_ringmemory(ring); 1037 1038 kfree(ring->txhdr_cache); 1039 kfree(ring->meta); 1040 kfree(ring); 1041} 1042 1043#define destroy_ring(dma, ring) do { \ 1044 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \ 1045 (dma)->ring = NULL; \ 1046 } while (0) 1047 1048void b43_dma_free(struct b43_wldev *dev) 1049{ 1050 struct b43_dma *dma; 1051 1052 if (b43_using_pio_transfers(dev)) 1053 return; 1054 dma = &dev->dma; 1055 1056 destroy_ring(dma, rx_ring); 1057 destroy_ring(dma, tx_ring_AC_BK); 1058 destroy_ring(dma, tx_ring_AC_BE); 1059 destroy_ring(dma, tx_ring_AC_VI); 1060 destroy_ring(dma, tx_ring_AC_VO); 1061 destroy_ring(dma, tx_ring_mcast); 1062} 1063 1064static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) 1065{ 1066 u64 orig_mask = mask; 1067 bool fallback = false; 1068 int err; 1069 1070 /* Try to set the DMA mask. If it fails, try falling back to a 1071 * lower mask, as we can always also support a lower one. */ 1072 while (1) { 1073 err = dma_set_mask(dev->dev->dma_dev, mask); 1074 if (!err) { 1075 err = dma_set_coherent_mask(dev->dev->dma_dev, mask); 1076 if (!err) 1077 break; 1078 } 1079 if (mask == DMA_BIT_MASK(64)) { 1080 mask = DMA_BIT_MASK(32); 1081 fallback = true; 1082 continue; 1083 } 1084 if (mask == DMA_BIT_MASK(32)) { 1085 mask = DMA_BIT_MASK(30); 1086 fallback = true; 1087 continue; 1088 } 1089 b43err(dev->wl, "The machine/kernel does not support " 1090 "the required %u-bit DMA mask\n", 1091 (unsigned int)dma_mask_to_engine_type(orig_mask)); 1092 return -EOPNOTSUPP; 1093 } 1094 if (fallback) { 1095 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n", 1096 (unsigned int)dma_mask_to_engine_type(orig_mask), 1097 (unsigned int)dma_mask_to_engine_type(mask)); 1098 } 1099 1100 return 0; 1101} 1102 1103/* Some hardware with 64-bit DMA seems to be bugged and looks for translation 1104 * bit in low address word instead of high one. 1105 */ 1106static bool b43_dma_translation_in_low_word(struct b43_wldev *dev, 1107 enum b43_dmatype type) 1108{ 1109 if (type != B43_DMA_64BIT) 1110 return 1; 1111 1112#ifdef CONFIG_B43_SSB 1113 if (dev->dev->bus_type == B43_BUS_SSB && 1114 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI && 1115 !(pci_is_pcie(dev->dev->sdev->bus->host_pci) && 1116 ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64)) 1117 return 1; 1118#endif 1119 return 0; 1120} 1121 1122int b43_dma_init(struct b43_wldev *dev) 1123{ 1124 struct b43_dma *dma = &dev->dma; 1125 int err; 1126 u64 dmamask; 1127 enum b43_dmatype type; 1128 1129 dmamask = supported_dma_mask(dev); 1130 type = dma_mask_to_engine_type(dmamask); 1131 err = b43_dma_set_mask(dev, dmamask); 1132 if (err) 1133 return err; 1134 1135 switch (dev->dev->bus_type) { 1136#ifdef CONFIG_B43_BCMA 1137 case B43_BUS_BCMA: 1138 dma->translation = bcma_core_dma_translation(dev->dev->bdev); 1139 break; 1140#endif 1141#ifdef CONFIG_B43_SSB 1142 case B43_BUS_SSB: 1143 dma->translation = ssb_dma_translation(dev->dev->sdev); 1144 break; 1145#endif 1146 } 1147 dma->translation_in_low = b43_dma_translation_in_low_word(dev, type); 1148 1149 dma->parity = true; 1150#ifdef CONFIG_B43_BCMA 1151 /* TODO: find out which SSB devices need disabling parity */ 1152 if (dev->dev->bus_type == B43_BUS_BCMA) 1153 dma->parity = false; 1154#endif 1155 1156 err = -ENOMEM; 1157 /* setup TX DMA channels. */ 1158 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type); 1159 if (!dma->tx_ring_AC_BK) 1160 goto out; 1161 1162 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type); 1163 if (!dma->tx_ring_AC_BE) 1164 goto err_destroy_bk; 1165 1166 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type); 1167 if (!dma->tx_ring_AC_VI) 1168 goto err_destroy_be; 1169 1170 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type); 1171 if (!dma->tx_ring_AC_VO) 1172 goto err_destroy_vi; 1173 1174 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type); 1175 if (!dma->tx_ring_mcast) 1176 goto err_destroy_vo; 1177 1178 /* setup RX DMA channel. */ 1179 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type); 1180 if (!dma->rx_ring) 1181 goto err_destroy_mcast; 1182 1183 /* No support for the TX status DMA ring. */ 1184 B43_WARN_ON(dev->dev->core_rev < 5); 1185 1186 b43dbg(dev->wl, "%u-bit DMA initialized\n", 1187 (unsigned int)type); 1188 err = 0; 1189out: 1190 return err; 1191 1192err_destroy_mcast: 1193 destroy_ring(dma, tx_ring_mcast); 1194err_destroy_vo: 1195 destroy_ring(dma, tx_ring_AC_VO); 1196err_destroy_vi: 1197 destroy_ring(dma, tx_ring_AC_VI); 1198err_destroy_be: 1199 destroy_ring(dma, tx_ring_AC_BE); 1200err_destroy_bk: 1201 destroy_ring(dma, tx_ring_AC_BK); 1202 return err; 1203} 1204 1205/* Generate a cookie for the TX header. */ 1206static u16 generate_cookie(struct b43_dmaring *ring, int slot) 1207{ 1208 u16 cookie; 1209 1210 /* Use the upper 4 bits of the cookie as 1211 * DMA controller ID and store the slot number 1212 * in the lower 12 bits. 1213 * Note that the cookie must never be 0, as this 1214 * is a special value used in RX path. 1215 * It can also not be 0xFFFF because that is special 1216 * for multicast frames. 1217 */ 1218 cookie = (((u16)ring->index + 1) << 12); 1219 B43_WARN_ON(slot & ~0x0FFF); 1220 cookie |= (u16)slot; 1221 1222 return cookie; 1223} 1224 1225/* Inspect a cookie and find out to which controller/slot it belongs. */ 1226static 1227struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) 1228{ 1229 struct b43_dma *dma = &dev->dma; 1230 struct b43_dmaring *ring = NULL; 1231 1232 switch (cookie & 0xF000) { 1233 case 0x1000: 1234 ring = dma->tx_ring_AC_BK; 1235 break; 1236 case 0x2000: 1237 ring = dma->tx_ring_AC_BE; 1238 break; 1239 case 0x3000: 1240 ring = dma->tx_ring_AC_VI; 1241 break; 1242 case 0x4000: 1243 ring = dma->tx_ring_AC_VO; 1244 break; 1245 case 0x5000: 1246 ring = dma->tx_ring_mcast; 1247 break; 1248 } 1249 *slot = (cookie & 0x0FFF); 1250 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) { 1251 b43dbg(dev->wl, "TX-status contains " 1252 "invalid cookie: 0x%04X\n", cookie); 1253 return NULL; 1254 } 1255 1256 return ring; 1257} 1258 1259static int dma_tx_fragment(struct b43_dmaring *ring, 1260 struct sk_buff *skb) 1261{ 1262 const struct b43_dma_ops *ops = ring->ops; 1263 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1264 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); 1265 u8 *header; 1266 int slot, old_top_slot, old_used_slots; 1267 int err; 1268 struct b43_dmadesc_generic *desc; 1269 struct b43_dmadesc_meta *meta; 1270 struct b43_dmadesc_meta *meta_hdr; 1271 u16 cookie; 1272 size_t hdrsize = b43_txhdr_size(ring->dev); 1273 1274 /* Important note: If the number of used DMA slots per TX frame 1275 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of 1276 * the file has to be updated, too! 1277 */ 1278 1279 old_top_slot = ring->current_slot; 1280 old_used_slots = ring->used_slots; 1281 1282 /* Get a slot for the header. */ 1283 slot = request_slot(ring); 1284 desc = ops->idx2desc(ring, slot, &meta_hdr); 1285 memset(meta_hdr, 0, sizeof(*meta_hdr)); 1286 1287 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); 1288 cookie = generate_cookie(ring, slot); 1289 err = b43_generate_txhdr(ring->dev, header, 1290 skb, info, cookie); 1291 if (unlikely(err)) { 1292 ring->current_slot = old_top_slot; 1293 ring->used_slots = old_used_slots; 1294 return err; 1295 } 1296 1297 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1298 hdrsize, 1); 1299 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { 1300 ring->current_slot = old_top_slot; 1301 ring->used_slots = old_used_slots; 1302 return -EIO; 1303 } 1304 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, 1305 hdrsize, 1, 0, 0); 1306 1307 /* Get a slot for the payload. */ 1308 slot = request_slot(ring); 1309 desc = ops->idx2desc(ring, slot, &meta); 1310 memset(meta, 0, sizeof(*meta)); 1311 1312 meta->skb = skb; 1313 meta->is_last_fragment = true; 1314 priv_info->bouncebuffer = NULL; 1315 1316 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1317 /* create a bounce buffer in zone_dma on mapping failure. */ 1318 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1319 priv_info->bouncebuffer = kmemdup(skb->data, skb->len, 1320 GFP_ATOMIC | GFP_DMA); 1321 if (!priv_info->bouncebuffer) { 1322 ring->current_slot = old_top_slot; 1323 ring->used_slots = old_used_slots; 1324 err = -ENOMEM; 1325 goto out_unmap_hdr; 1326 } 1327 1328 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); 1329 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1330 kfree(priv_info->bouncebuffer); 1331 priv_info->bouncebuffer = NULL; 1332 ring->current_slot = old_top_slot; 1333 ring->used_slots = old_used_slots; 1334 err = -EIO; 1335 goto out_unmap_hdr; 1336 } 1337 } 1338 1339 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); 1340 1341 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { 1342 /* Tell the firmware about the cookie of the last 1343 * mcast frame, so it can clear the more-data bit in it. */ 1344 b43_shm_write16(ring->dev, B43_SHM_SHARED, 1345 B43_SHM_SH_MCASTCOOKIE, cookie); 1346 } 1347 /* Now transfer the whole frame. */ 1348 wmb(); 1349 ops->poke_tx(ring, next_slot(ring, slot)); 1350 return 0; 1351 1352out_unmap_hdr: 1353 unmap_descbuffer(ring, meta_hdr->dmaaddr, 1354 hdrsize, 1); 1355 return err; 1356} 1357 1358static inline int should_inject_overflow(struct b43_dmaring *ring) 1359{ 1360#ifdef CONFIG_B43_DEBUG 1361 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { 1362 /* Check if we should inject another ringbuffer overflow 1363 * to test handling of this situation in the stack. */ 1364 unsigned long next_overflow; 1365 1366 next_overflow = ring->last_injected_overflow + HZ; 1367 if (time_after(jiffies, next_overflow)) { 1368 ring->last_injected_overflow = jiffies; 1369 b43dbg(ring->dev->wl, 1370 "Injecting TX ring overflow on " 1371 "DMA controller %d\n", ring->index); 1372 return 1; 1373 } 1374 } 1375#endif /* CONFIG_B43_DEBUG */ 1376 return 0; 1377} 1378 1379/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */ 1380static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev, 1381 u8 queue_prio) 1382{ 1383 struct b43_dmaring *ring; 1384 1385 if (dev->qos_enabled) { 1386 /* 0 = highest priority */ 1387 switch (queue_prio) { 1388 default: 1389 B43_WARN_ON(1); 1390 /* fallthrough */ 1391 case 0: 1392 ring = dev->dma.tx_ring_AC_VO; 1393 break; 1394 case 1: 1395 ring = dev->dma.tx_ring_AC_VI; 1396 break; 1397 case 2: 1398 ring = dev->dma.tx_ring_AC_BE; 1399 break; 1400 case 3: 1401 ring = dev->dma.tx_ring_AC_BK; 1402 break; 1403 } 1404 } else 1405 ring = dev->dma.tx_ring_AC_BE; 1406 1407 return ring; 1408} 1409 1410int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) 1411{ 1412 struct b43_dmaring *ring; 1413 struct ieee80211_hdr *hdr; 1414 int err = 0; 1415 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1416 1417 hdr = (struct ieee80211_hdr *)skb->data; 1418 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { 1419 /* The multicast ring will be sent after the DTIM */ 1420 ring = dev->dma.tx_ring_mcast; 1421 /* Set the more-data bit. Ucode will clear it on 1422 * the last frame for us. */ 1423 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1424 } else { 1425 /* Decide by priority where to put this frame. */ 1426 ring = select_ring_by_priority( 1427 dev, skb_get_queue_mapping(skb)); 1428 } 1429 1430 B43_WARN_ON(!ring->tx); 1431 1432 if (unlikely(ring->stopped)) { 1433 /* We get here only because of a bug in mac80211. 1434 * Because of a race, one packet may be queued after 1435 * the queue is stopped, thus we got called when we shouldn't. 1436 * For now, just refuse the transmit. */ 1437 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) 1438 b43err(dev->wl, "Packet after queue stopped\n"); 1439 err = -ENOSPC; 1440 goto out; 1441 } 1442 1443 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { 1444 /* If we get here, we have a real error with the queue 1445 * full, but queues not stopped. */ 1446 b43err(dev->wl, "DMA queue overflow\n"); 1447 err = -ENOSPC; 1448 goto out; 1449 } 1450 1451 /* Assign the queue number to the ring (if not already done before) 1452 * so TX status handling can use it. The queue to ring mapping is 1453 * static, so we don't need to store it per frame. */ 1454 ring->queue_prio = skb_get_queue_mapping(skb); 1455 1456 err = dma_tx_fragment(ring, skb); 1457 if (unlikely(err == -ENOKEY)) { 1458 /* Drop this packet, as we don't have the encryption key 1459 * anymore and must not transmit it unencrypted. */ 1460 ieee80211_free_txskb(dev->wl->hw, skb); 1461 err = 0; 1462 goto out; 1463 } 1464 if (unlikely(err)) { 1465 b43err(dev->wl, "DMA tx mapping failure\n"); 1466 goto out; 1467 } 1468 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || 1469 should_inject_overflow(ring)) { 1470 /* This TX ring is full. */ 1471 unsigned int skb_mapping = skb_get_queue_mapping(skb); 1472 ieee80211_stop_queue(dev->wl->hw, skb_mapping); 1473 dev->wl->tx_queue_stopped[skb_mapping] = 1; 1474 ring->stopped = true; 1475 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1476 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); 1477 } 1478 } 1479out: 1480 1481 return err; 1482} 1483 1484void b43_dma_handle_txstatus(struct b43_wldev *dev, 1485 const struct b43_txstatus *status) 1486{ 1487 const struct b43_dma_ops *ops; 1488 struct b43_dmaring *ring; 1489 struct b43_dmadesc_meta *meta; 1490 static const struct b43_txstatus fake; /* filled with 0 */ 1491 const struct b43_txstatus *txstat; 1492 int slot, firstused; 1493 bool frame_succeed; 1494 int skip; 1495 static u8 err_out1, err_out2; 1496 1497 ring = parse_cookie(dev, status->cookie, &slot); 1498 if (unlikely(!ring)) 1499 return; 1500 B43_WARN_ON(!ring->tx); 1501 1502 /* Sanity check: TX packets are processed in-order on one ring. 1503 * Check if the slot deduced from the cookie really is the first 1504 * used slot. */ 1505 firstused = ring->current_slot - ring->used_slots + 1; 1506 if (firstused < 0) 1507 firstused = ring->nr_slots + firstused; 1508 1509 skip = 0; 1510 if (unlikely(slot != firstused)) { 1511 /* This possibly is a firmware bug and will result in 1512 * malfunction, memory leaks and/or stall of DMA functionality. 1513 */ 1514 if (slot == next_slot(ring, next_slot(ring, firstused))) { 1515 /* If a single header/data pair was missed, skip over 1516 * the first two slots in an attempt to recover. 1517 */ 1518 slot = firstused; 1519 skip = 2; 1520 if (!err_out1) { 1521 /* Report the error once. */ 1522 b43dbg(dev->wl, 1523 "Skip on DMA ring %d slot %d.\n", 1524 ring->index, slot); 1525 err_out1 = 1; 1526 } 1527 } else { 1528 /* More than a single header/data pair were missed. 1529 * Report this error once. 1530 */ 1531 if (!err_out2) 1532 b43dbg(dev->wl, 1533 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", 1534 ring->index, firstused, slot); 1535 err_out2 = 1; 1536 return; 1537 } 1538 } 1539 1540 ops = ring->ops; 1541 while (1) { 1542 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); 1543 /* get meta - ignore returned value */ 1544 ops->idx2desc(ring, slot, &meta); 1545 1546 if (b43_dma_ptr_is_poisoned(meta->skb)) { 1547 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " 1548 "on ring %d\n", 1549 slot, firstused, ring->index); 1550 break; 1551 } 1552 1553 if (meta->skb) { 1554 struct b43_private_tx_info *priv_info = 1555 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); 1556 1557 unmap_descbuffer(ring, meta->dmaaddr, 1558 meta->skb->len, 1); 1559 kfree(priv_info->bouncebuffer); 1560 priv_info->bouncebuffer = NULL; 1561 } else { 1562 unmap_descbuffer(ring, meta->dmaaddr, 1563 b43_txhdr_size(dev), 1); 1564 } 1565 1566 if (meta->is_last_fragment) { 1567 struct ieee80211_tx_info *info; 1568 1569 if (unlikely(!meta->skb)) { 1570 /* This is a scatter-gather fragment of a frame, 1571 * so the skb pointer must not be NULL. 1572 */ 1573 b43dbg(dev->wl, "TX status unexpected NULL skb " 1574 "at slot %d (first=%d) on ring %d\n", 1575 slot, firstused, ring->index); 1576 break; 1577 } 1578 1579 info = IEEE80211_SKB_CB(meta->skb); 1580 1581 /* 1582 * Call back to inform the ieee80211 subsystem about 1583 * the status of the transmission. When skipping over 1584 * a missed TX status report, use a status structure 1585 * filled with zeros to indicate that the frame was not 1586 * sent (frame_count 0) and not acknowledged 1587 */ 1588 if (unlikely(skip)) 1589 txstat = &fake; 1590 else 1591 txstat = status; 1592 1593 frame_succeed = b43_fill_txstatus_report(dev, info, 1594 txstat); 1595#ifdef CONFIG_B43_DEBUG 1596 if (frame_succeed) 1597 ring->nr_succeed_tx_packets++; 1598 else 1599 ring->nr_failed_tx_packets++; 1600 ring->nr_total_packet_tries += status->frame_count; 1601#endif /* DEBUG */ 1602 ieee80211_tx_status(dev->wl->hw, meta->skb); 1603 1604 /* skb will be freed by ieee80211_tx_status(). 1605 * Poison our pointer. */ 1606 meta->skb = B43_DMA_PTR_POISON; 1607 } else { 1608 /* No need to call free_descriptor_buffer here, as 1609 * this is only the txhdr, which is not allocated. 1610 */ 1611 if (unlikely(meta->skb)) { 1612 b43dbg(dev->wl, "TX status unexpected non-NULL skb " 1613 "at slot %d (first=%d) on ring %d\n", 1614 slot, firstused, ring->index); 1615 break; 1616 } 1617 } 1618 1619 /* Everything unmapped and free'd. So it's not used anymore. */ 1620 ring->used_slots--; 1621 1622 if (meta->is_last_fragment && !skip) { 1623 /* This is the last scatter-gather 1624 * fragment of the frame. We are done. */ 1625 break; 1626 } 1627 slot = next_slot(ring, slot); 1628 if (skip > 0) 1629 --skip; 1630 } 1631 if (ring->stopped) { 1632 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); 1633 ring->stopped = false; 1634 } 1635 1636 if (dev->wl->tx_queue_stopped[ring->queue_prio]) { 1637 dev->wl->tx_queue_stopped[ring->queue_prio] = 0; 1638 } else { 1639 /* If the driver queue is running wake the corresponding 1640 * mac80211 queue. */ 1641 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); 1642 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1643 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); 1644 } 1645 } 1646 /* Add work to the queue. */ 1647 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work); 1648} 1649 1650static void dma_rx(struct b43_dmaring *ring, int *slot) 1651{ 1652 const struct b43_dma_ops *ops = ring->ops; 1653 struct b43_dmadesc_generic *desc; 1654 struct b43_dmadesc_meta *meta; 1655 struct b43_rxhdr_fw4 *rxhdr; 1656 struct sk_buff *skb; 1657 u16 len; 1658 int err; 1659 dma_addr_t dmaaddr; 1660 1661 desc = ops->idx2desc(ring, *slot, &meta); 1662 1663 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1664 skb = meta->skb; 1665 1666 rxhdr = (struct b43_rxhdr_fw4 *)skb->data; 1667 len = le16_to_cpu(rxhdr->frame_len); 1668 if (len == 0) { 1669 int i = 0; 1670 1671 do { 1672 udelay(2); 1673 barrier(); 1674 len = le16_to_cpu(rxhdr->frame_len); 1675 } while (len == 0 && i++ < 5); 1676 if (unlikely(len == 0)) { 1677 dmaaddr = meta->dmaaddr; 1678 goto drop_recycle_buffer; 1679 } 1680 } 1681 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { 1682 /* Something went wrong with the DMA. 1683 * The device did not touch the buffer and did not overwrite the poison. */ 1684 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); 1685 dmaaddr = meta->dmaaddr; 1686 goto drop_recycle_buffer; 1687 } 1688 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { 1689 /* The data did not fit into one descriptor buffer 1690 * and is split over multiple buffers. 1691 * This should never happen, as we try to allocate buffers 1692 * big enough. So simply ignore this packet. 1693 */ 1694 int cnt = 0; 1695 s32 tmp = len; 1696 1697 while (1) { 1698 desc = ops->idx2desc(ring, *slot, &meta); 1699 /* recycle the descriptor buffer. */ 1700 b43_poison_rx_buffer(ring, meta->skb); 1701 sync_descbuffer_for_device(ring, meta->dmaaddr, 1702 ring->rx_buffersize); 1703 *slot = next_slot(ring, *slot); 1704 cnt++; 1705 tmp -= ring->rx_buffersize; 1706 if (tmp <= 0) 1707 break; 1708 } 1709 b43err(ring->dev->wl, "DMA RX buffer too small " 1710 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1711 len, ring->rx_buffersize, cnt); 1712 goto drop; 1713 } 1714 1715 dmaaddr = meta->dmaaddr; 1716 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); 1717 if (unlikely(err)) { 1718 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); 1719 goto drop_recycle_buffer; 1720 } 1721 1722 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); 1723 skb_put(skb, len + ring->frameoffset); 1724 skb_pull(skb, ring->frameoffset); 1725 1726 b43_rx(ring->dev, skb, rxhdr); 1727drop: 1728 return; 1729 1730drop_recycle_buffer: 1731 /* Poison and recycle the RX buffer. */ 1732 b43_poison_rx_buffer(ring, skb); 1733 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); 1734} 1735 1736void b43_dma_rx(struct b43_dmaring *ring) 1737{ 1738 const struct b43_dma_ops *ops = ring->ops; 1739 int slot, current_slot; 1740 int used_slots = 0; 1741 1742 B43_WARN_ON(ring->tx); 1743 current_slot = ops->get_current_rxslot(ring); 1744 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); 1745 1746 slot = ring->current_slot; 1747 for (; slot != current_slot; slot = next_slot(ring, slot)) { 1748 dma_rx(ring, &slot); 1749 update_max_used_slots(ring, ++used_slots); 1750 } 1751 wmb(); 1752 ops->set_current_rxslot(ring, slot); 1753 ring->current_slot = slot; 1754} 1755 1756static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) 1757{ 1758 B43_WARN_ON(!ring->tx); 1759 ring->ops->tx_suspend(ring); 1760} 1761 1762static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) 1763{ 1764 B43_WARN_ON(!ring->tx); 1765 ring->ops->tx_resume(ring); 1766} 1767 1768void b43_dma_tx_suspend(struct b43_wldev *dev) 1769{ 1770 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); 1771 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK); 1772 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE); 1773 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI); 1774 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO); 1775 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast); 1776} 1777 1778void b43_dma_tx_resume(struct b43_wldev *dev) 1779{ 1780 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast); 1781 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO); 1782 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI); 1783 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE); 1784 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK); 1785 b43_power_saving_ctl_bits(dev, 0); 1786} 1787 1788static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, 1789 u16 mmio_base, bool enable) 1790{ 1791 u32 ctl; 1792 1793 if (type == B43_DMA_64BIT) { 1794 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL); 1795 ctl &= ~B43_DMA64_RXDIRECTFIFO; 1796 if (enable) 1797 ctl |= B43_DMA64_RXDIRECTFIFO; 1798 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl); 1799 } else { 1800 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL); 1801 ctl &= ~B43_DMA32_RXDIRECTFIFO; 1802 if (enable) 1803 ctl |= B43_DMA32_RXDIRECTFIFO; 1804 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl); 1805 } 1806} 1807 1808/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine. 1809 * This is called from PIO code, so DMA structures are not available. */ 1810void b43_dma_direct_fifo_rx(struct b43_wldev *dev, 1811 unsigned int engine_index, bool enable) 1812{ 1813 enum b43_dmatype type; 1814 u16 mmio_base; 1815 1816 type = dma_mask_to_engine_type(supported_dma_mask(dev)); 1817 1818 mmio_base = b43_dmacontroller_base(type, engine_index); 1819 direct_fifo_rx(dev, type, mmio_base, enable); 1820}