Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.0 1671 lines 44 kB view raw
1/* 2 3 Broadcom B43 wireless driver 4 5 DMA ringbuffer and descriptor allocation/management 6 7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> 8 9 Some code in this file is derived from the b44.c driver 10 Copyright (C) 2002 David S. Miller 11 Copyright (C) Pekka Pietikainen 12 13 This program is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2 of the License, or 16 (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; see the file COPYING. If not, write to 25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, 26 Boston, MA 02110-1301, USA. 27 28*/ 29 30#include "b43.h" 31#include "dma.h" 32#include "main.h" 33#include "debugfs.h" 34#include "xmit.h" 35 36#include <linux/dma-mapping.h> 37#include <linux/pci.h> 38#include <linux/delay.h> 39#include <linux/skbuff.h> 40#include <linux/etherdevice.h> 41#include <linux/slab.h> 42#include <asm/div64.h> 43 44 45/* Required number of TX DMA slots per TX frame. 46 * This currently is 2, because we put the header and the ieee80211 frame 47 * into separate slots. */ 48#define TX_SLOTS_PER_FRAME 2 49 50 51/* 32bit DMA ops. */ 52static 53struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, 54 int slot, 55 struct b43_dmadesc_meta **meta) 56{ 57 struct b43_dmadesc32 *desc; 58 59 *meta = &(ring->meta[slot]); 60 desc = ring->descbase; 61 desc = &(desc[slot]); 62 63 return (struct b43_dmadesc_generic *)desc; 64} 65 66static void op32_fill_descriptor(struct b43_dmaring *ring, 67 struct b43_dmadesc_generic *desc, 68 dma_addr_t dmaaddr, u16 bufsize, 69 int start, int end, int irq) 70{ 71 struct b43_dmadesc32 *descbase = ring->descbase; 72 int slot; 73 u32 ctl; 74 u32 addr; 75 u32 addrext; 76 77 slot = (int)(&(desc->dma32) - descbase); 78 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 79 80 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); 81 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) 82 >> SSB_DMA_TRANSLATION_SHIFT; 83 addr |= ring->dev->dma.translation; 84 ctl = bufsize & B43_DMA32_DCTL_BYTECNT; 85 if (slot == ring->nr_slots - 1) 86 ctl |= B43_DMA32_DCTL_DTABLEEND; 87 if (start) 88 ctl |= B43_DMA32_DCTL_FRAMESTART; 89 if (end) 90 ctl |= B43_DMA32_DCTL_FRAMEEND; 91 if (irq) 92 ctl |= B43_DMA32_DCTL_IRQ; 93 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) 94 & B43_DMA32_DCTL_ADDREXT_MASK; 95 96 desc->dma32.control = cpu_to_le32(ctl); 97 desc->dma32.address = cpu_to_le32(addr); 98} 99 100static void op32_poke_tx(struct b43_dmaring *ring, int slot) 101{ 102 b43_dma_write(ring, B43_DMA32_TXINDEX, 103 (u32) (slot * sizeof(struct b43_dmadesc32))); 104} 105 106static void op32_tx_suspend(struct b43_dmaring *ring) 107{ 108 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) 109 | B43_DMA32_TXSUSPEND); 110} 111 112static void op32_tx_resume(struct b43_dmaring *ring) 113{ 114 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) 115 & ~B43_DMA32_TXSUSPEND); 116} 117 118static int op32_get_current_rxslot(struct b43_dmaring *ring) 119{ 120 u32 val; 121 122 val = b43_dma_read(ring, B43_DMA32_RXSTATUS); 123 val &= B43_DMA32_RXDPTR; 124 125 return (val / sizeof(struct b43_dmadesc32)); 126} 127 128static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) 129{ 130 b43_dma_write(ring, B43_DMA32_RXINDEX, 131 (u32) (slot * sizeof(struct b43_dmadesc32))); 132} 133 134static const struct b43_dma_ops dma32_ops = { 135 .idx2desc = op32_idx2desc, 136 .fill_descriptor = op32_fill_descriptor, 137 .poke_tx = op32_poke_tx, 138 .tx_suspend = op32_tx_suspend, 139 .tx_resume = op32_tx_resume, 140 .get_current_rxslot = op32_get_current_rxslot, 141 .set_current_rxslot = op32_set_current_rxslot, 142}; 143 144/* 64bit DMA ops. */ 145static 146struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, 147 int slot, 148 struct b43_dmadesc_meta **meta) 149{ 150 struct b43_dmadesc64 *desc; 151 152 *meta = &(ring->meta[slot]); 153 desc = ring->descbase; 154 desc = &(desc[slot]); 155 156 return (struct b43_dmadesc_generic *)desc; 157} 158 159static void op64_fill_descriptor(struct b43_dmaring *ring, 160 struct b43_dmadesc_generic *desc, 161 dma_addr_t dmaaddr, u16 bufsize, 162 int start, int end, int irq) 163{ 164 struct b43_dmadesc64 *descbase = ring->descbase; 165 int slot; 166 u32 ctl0 = 0, ctl1 = 0; 167 u32 addrlo, addrhi; 168 u32 addrext; 169 170 slot = (int)(&(desc->dma64) - descbase); 171 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 172 173 addrlo = (u32) (dmaaddr & 0xFFFFFFFF); 174 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); 175 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) 176 >> SSB_DMA_TRANSLATION_SHIFT; 177 addrhi |= (ring->dev->dma.translation << 1); 178 if (slot == ring->nr_slots - 1) 179 ctl0 |= B43_DMA64_DCTL0_DTABLEEND; 180 if (start) 181 ctl0 |= B43_DMA64_DCTL0_FRAMESTART; 182 if (end) 183 ctl0 |= B43_DMA64_DCTL0_FRAMEEND; 184 if (irq) 185 ctl0 |= B43_DMA64_DCTL0_IRQ; 186 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT; 187 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) 188 & B43_DMA64_DCTL1_ADDREXT_MASK; 189 190 desc->dma64.control0 = cpu_to_le32(ctl0); 191 desc->dma64.control1 = cpu_to_le32(ctl1); 192 desc->dma64.address_low = cpu_to_le32(addrlo); 193 desc->dma64.address_high = cpu_to_le32(addrhi); 194} 195 196static void op64_poke_tx(struct b43_dmaring *ring, int slot) 197{ 198 b43_dma_write(ring, B43_DMA64_TXINDEX, 199 (u32) (slot * sizeof(struct b43_dmadesc64))); 200} 201 202static void op64_tx_suspend(struct b43_dmaring *ring) 203{ 204 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) 205 | B43_DMA64_TXSUSPEND); 206} 207 208static void op64_tx_resume(struct b43_dmaring *ring) 209{ 210 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) 211 & ~B43_DMA64_TXSUSPEND); 212} 213 214static int op64_get_current_rxslot(struct b43_dmaring *ring) 215{ 216 u32 val; 217 218 val = b43_dma_read(ring, B43_DMA64_RXSTATUS); 219 val &= B43_DMA64_RXSTATDPTR; 220 221 return (val / sizeof(struct b43_dmadesc64)); 222} 223 224static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) 225{ 226 b43_dma_write(ring, B43_DMA64_RXINDEX, 227 (u32) (slot * sizeof(struct b43_dmadesc64))); 228} 229 230static const struct b43_dma_ops dma64_ops = { 231 .idx2desc = op64_idx2desc, 232 .fill_descriptor = op64_fill_descriptor, 233 .poke_tx = op64_poke_tx, 234 .tx_suspend = op64_tx_suspend, 235 .tx_resume = op64_tx_resume, 236 .get_current_rxslot = op64_get_current_rxslot, 237 .set_current_rxslot = op64_set_current_rxslot, 238}; 239 240static inline int free_slots(struct b43_dmaring *ring) 241{ 242 return (ring->nr_slots - ring->used_slots); 243} 244 245static inline int next_slot(struct b43_dmaring *ring, int slot) 246{ 247 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); 248 if (slot == ring->nr_slots - 1) 249 return 0; 250 return slot + 1; 251} 252 253static inline int prev_slot(struct b43_dmaring *ring, int slot) 254{ 255 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); 256 if (slot == 0) 257 return ring->nr_slots - 1; 258 return slot - 1; 259} 260 261#ifdef CONFIG_B43_DEBUG 262static void update_max_used_slots(struct b43_dmaring *ring, 263 int current_used_slots) 264{ 265 if (current_used_slots <= ring->max_used_slots) 266 return; 267 ring->max_used_slots = current_used_slots; 268 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { 269 b43dbg(ring->dev->wl, 270 "max_used_slots increased to %d on %s ring %d\n", 271 ring->max_used_slots, 272 ring->tx ? "TX" : "RX", ring->index); 273 } 274} 275#else 276static inline 277 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) 278{ 279} 280#endif /* DEBUG */ 281 282/* Request a slot for usage. */ 283static inline int request_slot(struct b43_dmaring *ring) 284{ 285 int slot; 286 287 B43_WARN_ON(!ring->tx); 288 B43_WARN_ON(ring->stopped); 289 B43_WARN_ON(free_slots(ring) == 0); 290 291 slot = next_slot(ring, ring->current_slot); 292 ring->current_slot = slot; 293 ring->used_slots++; 294 295 update_max_used_slots(ring, ring->used_slots); 296 297 return slot; 298} 299 300static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) 301{ 302 static const u16 map64[] = { 303 B43_MMIO_DMA64_BASE0, 304 B43_MMIO_DMA64_BASE1, 305 B43_MMIO_DMA64_BASE2, 306 B43_MMIO_DMA64_BASE3, 307 B43_MMIO_DMA64_BASE4, 308 B43_MMIO_DMA64_BASE5, 309 }; 310 static const u16 map32[] = { 311 B43_MMIO_DMA32_BASE0, 312 B43_MMIO_DMA32_BASE1, 313 B43_MMIO_DMA32_BASE2, 314 B43_MMIO_DMA32_BASE3, 315 B43_MMIO_DMA32_BASE4, 316 B43_MMIO_DMA32_BASE5, 317 }; 318 319 if (type == B43_DMA_64BIT) { 320 B43_WARN_ON(!(controller_idx >= 0 && 321 controller_idx < ARRAY_SIZE(map64))); 322 return map64[controller_idx]; 323 } 324 B43_WARN_ON(!(controller_idx >= 0 && 325 controller_idx < ARRAY_SIZE(map32))); 326 return map32[controller_idx]; 327} 328 329static inline 330 dma_addr_t map_descbuffer(struct b43_dmaring *ring, 331 unsigned char *buf, size_t len, int tx) 332{ 333 dma_addr_t dmaaddr; 334 335 if (tx) { 336 dmaaddr = dma_map_single(ring->dev->sdev->dma_dev, 337 buf, len, DMA_TO_DEVICE); 338 } else { 339 dmaaddr = dma_map_single(ring->dev->sdev->dma_dev, 340 buf, len, DMA_FROM_DEVICE); 341 } 342 343 return dmaaddr; 344} 345 346static inline 347 void unmap_descbuffer(struct b43_dmaring *ring, 348 dma_addr_t addr, size_t len, int tx) 349{ 350 if (tx) { 351 dma_unmap_single(ring->dev->sdev->dma_dev, 352 addr, len, DMA_TO_DEVICE); 353 } else { 354 dma_unmap_single(ring->dev->sdev->dma_dev, 355 addr, len, DMA_FROM_DEVICE); 356 } 357} 358 359static inline 360 void sync_descbuffer_for_cpu(struct b43_dmaring *ring, 361 dma_addr_t addr, size_t len) 362{ 363 B43_WARN_ON(ring->tx); 364 dma_sync_single_for_cpu(ring->dev->sdev->dma_dev, 365 addr, len, DMA_FROM_DEVICE); 366} 367 368static inline 369 void sync_descbuffer_for_device(struct b43_dmaring *ring, 370 dma_addr_t addr, size_t len) 371{ 372 B43_WARN_ON(ring->tx); 373 dma_sync_single_for_device(ring->dev->sdev->dma_dev, 374 addr, len, DMA_FROM_DEVICE); 375} 376 377static inline 378 void free_descriptor_buffer(struct b43_dmaring *ring, 379 struct b43_dmadesc_meta *meta) 380{ 381 if (meta->skb) { 382 dev_kfree_skb_any(meta->skb); 383 meta->skb = NULL; 384 } 385} 386 387static int alloc_ringmemory(struct b43_dmaring *ring) 388{ 389 gfp_t flags = GFP_KERNEL; 390 391 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K 392 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing 393 * has shown that 4K is sufficient for the latter as long as the buffer 394 * does not cross an 8K boundary. 395 * 396 * For unknown reasons - possibly a hardware error - the BCM4311 rev 397 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, 398 * which accounts for the GFP_DMA flag below. 399 * 400 * The flags here must match the flags in free_ringmemory below! 401 */ 402 if (ring->type == B43_DMA_64BIT) 403 flags |= GFP_DMA; 404 ring->descbase = dma_alloc_coherent(ring->dev->sdev->dma_dev, 405 B43_DMA_RINGMEMSIZE, 406 &(ring->dmabase), flags); 407 if (!ring->descbase) { 408 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); 409 return -ENOMEM; 410 } 411 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); 412 413 return 0; 414} 415 416static void free_ringmemory(struct b43_dmaring *ring) 417{ 418 dma_free_coherent(ring->dev->sdev->dma_dev, B43_DMA_RINGMEMSIZE, 419 ring->descbase, ring->dmabase); 420} 421 422/* Reset the RX DMA channel */ 423static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, 424 enum b43_dmatype type) 425{ 426 int i; 427 u32 value; 428 u16 offset; 429 430 might_sleep(); 431 432 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; 433 b43_write32(dev, mmio_base + offset, 0); 434 for (i = 0; i < 10; i++) { 435 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS : 436 B43_DMA32_RXSTATUS; 437 value = b43_read32(dev, mmio_base + offset); 438 if (type == B43_DMA_64BIT) { 439 value &= B43_DMA64_RXSTAT; 440 if (value == B43_DMA64_RXSTAT_DISABLED) { 441 i = -1; 442 break; 443 } 444 } else { 445 value &= B43_DMA32_RXSTATE; 446 if (value == B43_DMA32_RXSTAT_DISABLED) { 447 i = -1; 448 break; 449 } 450 } 451 msleep(1); 452 } 453 if (i != -1) { 454 b43err(dev->wl, "DMA RX reset timed out\n"); 455 return -ENODEV; 456 } 457 458 return 0; 459} 460 461/* Reset the TX DMA channel */ 462static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, 463 enum b43_dmatype type) 464{ 465 int i; 466 u32 value; 467 u16 offset; 468 469 might_sleep(); 470 471 for (i = 0; i < 10; i++) { 472 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : 473 B43_DMA32_TXSTATUS; 474 value = b43_read32(dev, mmio_base + offset); 475 if (type == B43_DMA_64BIT) { 476 value &= B43_DMA64_TXSTAT; 477 if (value == B43_DMA64_TXSTAT_DISABLED || 478 value == B43_DMA64_TXSTAT_IDLEWAIT || 479 value == B43_DMA64_TXSTAT_STOPPED) 480 break; 481 } else { 482 value &= B43_DMA32_TXSTATE; 483 if (value == B43_DMA32_TXSTAT_DISABLED || 484 value == B43_DMA32_TXSTAT_IDLEWAIT || 485 value == B43_DMA32_TXSTAT_STOPPED) 486 break; 487 } 488 msleep(1); 489 } 490 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; 491 b43_write32(dev, mmio_base + offset, 0); 492 for (i = 0; i < 10; i++) { 493 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : 494 B43_DMA32_TXSTATUS; 495 value = b43_read32(dev, mmio_base + offset); 496 if (type == B43_DMA_64BIT) { 497 value &= B43_DMA64_TXSTAT; 498 if (value == B43_DMA64_TXSTAT_DISABLED) { 499 i = -1; 500 break; 501 } 502 } else { 503 value &= B43_DMA32_TXSTATE; 504 if (value == B43_DMA32_TXSTAT_DISABLED) { 505 i = -1; 506 break; 507 } 508 } 509 msleep(1); 510 } 511 if (i != -1) { 512 b43err(dev->wl, "DMA TX reset timed out\n"); 513 return -ENODEV; 514 } 515 /* ensure the reset is completed. */ 516 msleep(1); 517 518 return 0; 519} 520 521/* Check if a DMA mapping address is invalid. */ 522static bool b43_dma_mapping_error(struct b43_dmaring *ring, 523 dma_addr_t addr, 524 size_t buffersize, bool dma_to_device) 525{ 526 if (unlikely(dma_mapping_error(ring->dev->sdev->dma_dev, addr))) 527 return 1; 528 529 switch (ring->type) { 530 case B43_DMA_30BIT: 531 if ((u64)addr + buffersize > (1ULL << 30)) 532 goto address_error; 533 break; 534 case B43_DMA_32BIT: 535 if ((u64)addr + buffersize > (1ULL << 32)) 536 goto address_error; 537 break; 538 case B43_DMA_64BIT: 539 /* Currently we can't have addresses beyond 540 * 64bit in the kernel. */ 541 break; 542 } 543 544 /* The address is OK. */ 545 return 0; 546 547address_error: 548 /* We can't support this address. Unmap it again. */ 549 unmap_descbuffer(ring, addr, buffersize, dma_to_device); 550 551 return 1; 552} 553 554static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) 555{ 556 unsigned char *f = skb->data + ring->frameoffset; 557 558 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF); 559} 560 561static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb) 562{ 563 struct b43_rxhdr_fw4 *rxhdr; 564 unsigned char *frame; 565 566 /* This poisons the RX buffer to detect DMA failures. */ 567 568 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); 569 rxhdr->frame_len = 0; 570 571 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); 572 frame = skb->data + ring->frameoffset; 573 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */); 574} 575 576static int setup_rx_descbuffer(struct b43_dmaring *ring, 577 struct b43_dmadesc_generic *desc, 578 struct b43_dmadesc_meta *meta, gfp_t gfp_flags) 579{ 580 dma_addr_t dmaaddr; 581 struct sk_buff *skb; 582 583 B43_WARN_ON(ring->tx); 584 585 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 586 if (unlikely(!skb)) 587 return -ENOMEM; 588 b43_poison_rx_buffer(ring, skb); 589 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); 590 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 591 /* ugh. try to realloc in zone_dma */ 592 gfp_flags |= GFP_DMA; 593 594 dev_kfree_skb_any(skb); 595 596 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 597 if (unlikely(!skb)) 598 return -ENOMEM; 599 b43_poison_rx_buffer(ring, skb); 600 dmaaddr = map_descbuffer(ring, skb->data, 601 ring->rx_buffersize, 0); 602 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 603 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n"); 604 dev_kfree_skb_any(skb); 605 return -EIO; 606 } 607 } 608 609 meta->skb = skb; 610 meta->dmaaddr = dmaaddr; 611 ring->ops->fill_descriptor(ring, desc, dmaaddr, 612 ring->rx_buffersize, 0, 0, 0); 613 614 return 0; 615} 616 617/* Allocate the initial descbuffers. 618 * This is used for an RX ring only. 619 */ 620static int alloc_initial_descbuffers(struct b43_dmaring *ring) 621{ 622 int i, err = -ENOMEM; 623 struct b43_dmadesc_generic *desc; 624 struct b43_dmadesc_meta *meta; 625 626 for (i = 0; i < ring->nr_slots; i++) { 627 desc = ring->ops->idx2desc(ring, i, &meta); 628 629 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 630 if (err) { 631 b43err(ring->dev->wl, 632 "Failed to allocate initial descbuffers\n"); 633 goto err_unwind; 634 } 635 } 636 mb(); 637 ring->used_slots = ring->nr_slots; 638 err = 0; 639 out: 640 return err; 641 642 err_unwind: 643 for (i--; i >= 0; i--) { 644 desc = ring->ops->idx2desc(ring, i, &meta); 645 646 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 647 dev_kfree_skb(meta->skb); 648 } 649 goto out; 650} 651 652/* Do initial setup of the DMA controller. 653 * Reset the controller, write the ring busaddress 654 * and switch the "enable" bit on. 655 */ 656static int dmacontroller_setup(struct b43_dmaring *ring) 657{ 658 int err = 0; 659 u32 value; 660 u32 addrext; 661 u32 trans = ring->dev->dma.translation; 662 663 if (ring->tx) { 664 if (ring->type == B43_DMA_64BIT) { 665 u64 ringbase = (u64) (ring->dmabase); 666 667 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 668 >> SSB_DMA_TRANSLATION_SHIFT; 669 value = B43_DMA64_TXENABLE; 670 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) 671 & B43_DMA64_TXADDREXT_MASK; 672 b43_dma_write(ring, B43_DMA64_TXCTL, value); 673 b43_dma_write(ring, B43_DMA64_TXRINGLO, 674 (ringbase & 0xFFFFFFFF)); 675 b43_dma_write(ring, B43_DMA64_TXRINGHI, 676 ((ringbase >> 32) & 677 ~SSB_DMA_TRANSLATION_MASK) 678 | (trans << 1)); 679 } else { 680 u32 ringbase = (u32) (ring->dmabase); 681 682 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 683 >> SSB_DMA_TRANSLATION_SHIFT; 684 value = B43_DMA32_TXENABLE; 685 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) 686 & B43_DMA32_TXADDREXT_MASK; 687 b43_dma_write(ring, B43_DMA32_TXCTL, value); 688 b43_dma_write(ring, B43_DMA32_TXRING, 689 (ringbase & ~SSB_DMA_TRANSLATION_MASK) 690 | trans); 691 } 692 } else { 693 err = alloc_initial_descbuffers(ring); 694 if (err) 695 goto out; 696 if (ring->type == B43_DMA_64BIT) { 697 u64 ringbase = (u64) (ring->dmabase); 698 699 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 700 >> SSB_DMA_TRANSLATION_SHIFT; 701 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); 702 value |= B43_DMA64_RXENABLE; 703 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) 704 & B43_DMA64_RXADDREXT_MASK; 705 b43_dma_write(ring, B43_DMA64_RXCTL, value); 706 b43_dma_write(ring, B43_DMA64_RXRINGLO, 707 (ringbase & 0xFFFFFFFF)); 708 b43_dma_write(ring, B43_DMA64_RXRINGHI, 709 ((ringbase >> 32) & 710 ~SSB_DMA_TRANSLATION_MASK) 711 | (trans << 1)); 712 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots * 713 sizeof(struct b43_dmadesc64)); 714 } else { 715 u32 ringbase = (u32) (ring->dmabase); 716 717 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 718 >> SSB_DMA_TRANSLATION_SHIFT; 719 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); 720 value |= B43_DMA32_RXENABLE; 721 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) 722 & B43_DMA32_RXADDREXT_MASK; 723 b43_dma_write(ring, B43_DMA32_RXCTL, value); 724 b43_dma_write(ring, B43_DMA32_RXRING, 725 (ringbase & ~SSB_DMA_TRANSLATION_MASK) 726 | trans); 727 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots * 728 sizeof(struct b43_dmadesc32)); 729 } 730 } 731 732out: 733 return err; 734} 735 736/* Shutdown the DMA controller. */ 737static void dmacontroller_cleanup(struct b43_dmaring *ring) 738{ 739 if (ring->tx) { 740 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 741 ring->type); 742 if (ring->type == B43_DMA_64BIT) { 743 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); 744 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); 745 } else 746 b43_dma_write(ring, B43_DMA32_TXRING, 0); 747 } else { 748 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 749 ring->type); 750 if (ring->type == B43_DMA_64BIT) { 751 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); 752 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); 753 } else 754 b43_dma_write(ring, B43_DMA32_RXRING, 0); 755 } 756} 757 758static void free_all_descbuffers(struct b43_dmaring *ring) 759{ 760 struct b43_dmadesc_generic *desc; 761 struct b43_dmadesc_meta *meta; 762 int i; 763 764 if (!ring->used_slots) 765 return; 766 for (i = 0; i < ring->nr_slots; i++) { 767 desc = ring->ops->idx2desc(ring, i, &meta); 768 769 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { 770 B43_WARN_ON(!ring->tx); 771 continue; 772 } 773 if (ring->tx) { 774 unmap_descbuffer(ring, meta->dmaaddr, 775 meta->skb->len, 1); 776 } else { 777 unmap_descbuffer(ring, meta->dmaaddr, 778 ring->rx_buffersize, 0); 779 } 780 free_descriptor_buffer(ring, meta); 781 } 782} 783 784static u64 supported_dma_mask(struct b43_wldev *dev) 785{ 786 u32 tmp; 787 u16 mmio_base; 788 789 tmp = b43_read32(dev, SSB_TMSHIGH); 790 if (tmp & SSB_TMSHIGH_DMA64) 791 return DMA_BIT_MASK(64); 792 mmio_base = b43_dmacontroller_base(0, 0); 793 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); 794 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); 795 if (tmp & B43_DMA32_TXADDREXT_MASK) 796 return DMA_BIT_MASK(32); 797 798 return DMA_BIT_MASK(30); 799} 800 801static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask) 802{ 803 if (dmamask == DMA_BIT_MASK(30)) 804 return B43_DMA_30BIT; 805 if (dmamask == DMA_BIT_MASK(32)) 806 return B43_DMA_32BIT; 807 if (dmamask == DMA_BIT_MASK(64)) 808 return B43_DMA_64BIT; 809 B43_WARN_ON(1); 810 return B43_DMA_30BIT; 811} 812 813/* Main initialization function. */ 814static 815struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, 816 int controller_index, 817 int for_tx, 818 enum b43_dmatype type) 819{ 820 struct b43_dmaring *ring; 821 int i, err; 822 dma_addr_t dma_test; 823 824 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 825 if (!ring) 826 goto out; 827 828 ring->nr_slots = B43_RXRING_SLOTS; 829 if (for_tx) 830 ring->nr_slots = B43_TXRING_SLOTS; 831 832 ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta), 833 GFP_KERNEL); 834 if (!ring->meta) 835 goto err_kfree_ring; 836 for (i = 0; i < ring->nr_slots; i++) 837 ring->meta->skb = B43_DMA_PTR_POISON; 838 839 ring->type = type; 840 ring->dev = dev; 841 ring->mmio_base = b43_dmacontroller_base(type, controller_index); 842 ring->index = controller_index; 843 if (type == B43_DMA_64BIT) 844 ring->ops = &dma64_ops; 845 else 846 ring->ops = &dma32_ops; 847 if (for_tx) { 848 ring->tx = 1; 849 ring->current_slot = -1; 850 } else { 851 if (ring->index == 0) { 852 ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; 853 ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; 854 } else 855 B43_WARN_ON(1); 856 } 857#ifdef CONFIG_B43_DEBUG 858 ring->last_injected_overflow = jiffies; 859#endif 860 861 if (for_tx) { 862 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */ 863 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0); 864 865 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, 866 b43_txhdr_size(dev), 867 GFP_KERNEL); 868 if (!ring->txhdr_cache) 869 goto err_kfree_meta; 870 871 /* test for ability to dma to txhdr_cache */ 872 dma_test = dma_map_single(dev->sdev->dma_dev, 873 ring->txhdr_cache, 874 b43_txhdr_size(dev), 875 DMA_TO_DEVICE); 876 877 if (b43_dma_mapping_error(ring, dma_test, 878 b43_txhdr_size(dev), 1)) { 879 /* ugh realloc */ 880 kfree(ring->txhdr_cache); 881 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, 882 b43_txhdr_size(dev), 883 GFP_KERNEL | GFP_DMA); 884 if (!ring->txhdr_cache) 885 goto err_kfree_meta; 886 887 dma_test = dma_map_single(dev->sdev->dma_dev, 888 ring->txhdr_cache, 889 b43_txhdr_size(dev), 890 DMA_TO_DEVICE); 891 892 if (b43_dma_mapping_error(ring, dma_test, 893 b43_txhdr_size(dev), 1)) { 894 895 b43err(dev->wl, 896 "TXHDR DMA allocation failed\n"); 897 goto err_kfree_txhdr_cache; 898 } 899 } 900 901 dma_unmap_single(dev->sdev->dma_dev, 902 dma_test, b43_txhdr_size(dev), 903 DMA_TO_DEVICE); 904 } 905 906 err = alloc_ringmemory(ring); 907 if (err) 908 goto err_kfree_txhdr_cache; 909 err = dmacontroller_setup(ring); 910 if (err) 911 goto err_free_ringmemory; 912 913 out: 914 return ring; 915 916 err_free_ringmemory: 917 free_ringmemory(ring); 918 err_kfree_txhdr_cache: 919 kfree(ring->txhdr_cache); 920 err_kfree_meta: 921 kfree(ring->meta); 922 err_kfree_ring: 923 kfree(ring); 924 ring = NULL; 925 goto out; 926} 927 928#define divide(a, b) ({ \ 929 typeof(a) __a = a; \ 930 do_div(__a, b); \ 931 __a; \ 932 }) 933 934#define modulo(a, b) ({ \ 935 typeof(a) __a = a; \ 936 do_div(__a, b); \ 937 }) 938 939/* Main cleanup function. */ 940static void b43_destroy_dmaring(struct b43_dmaring *ring, 941 const char *ringname) 942{ 943 if (!ring) 944 return; 945 946#ifdef CONFIG_B43_DEBUG 947 { 948 /* Print some statistics. */ 949 u64 failed_packets = ring->nr_failed_tx_packets; 950 u64 succeed_packets = ring->nr_succeed_tx_packets; 951 u64 nr_packets = failed_packets + succeed_packets; 952 u64 permille_failed = 0, average_tries = 0; 953 954 if (nr_packets) 955 permille_failed = divide(failed_packets * 1000, nr_packets); 956 if (nr_packets) 957 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); 958 959 b43dbg(ring->dev->wl, "DMA-%u %s: " 960 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, " 961 "Average tries %llu.%02llu\n", 962 (unsigned int)(ring->type), ringname, 963 ring->max_used_slots, 964 ring->nr_slots, 965 (unsigned long long)failed_packets, 966 (unsigned long long)nr_packets, 967 (unsigned long long)divide(permille_failed, 10), 968 (unsigned long long)modulo(permille_failed, 10), 969 (unsigned long long)divide(average_tries, 100), 970 (unsigned long long)modulo(average_tries, 100)); 971 } 972#endif /* DEBUG */ 973 974 /* Device IRQs are disabled prior entering this function, 975 * so no need to take care of concurrency with rx handler stuff. 976 */ 977 dmacontroller_cleanup(ring); 978 free_all_descbuffers(ring); 979 free_ringmemory(ring); 980 981 kfree(ring->txhdr_cache); 982 kfree(ring->meta); 983 kfree(ring); 984} 985 986#define destroy_ring(dma, ring) do { \ 987 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \ 988 (dma)->ring = NULL; \ 989 } while (0) 990 991void b43_dma_free(struct b43_wldev *dev) 992{ 993 struct b43_dma *dma; 994 995 if (b43_using_pio_transfers(dev)) 996 return; 997 dma = &dev->dma; 998 999 destroy_ring(dma, rx_ring); 1000 destroy_ring(dma, tx_ring_AC_BK); 1001 destroy_ring(dma, tx_ring_AC_BE); 1002 destroy_ring(dma, tx_ring_AC_VI); 1003 destroy_ring(dma, tx_ring_AC_VO); 1004 destroy_ring(dma, tx_ring_mcast); 1005} 1006 1007static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) 1008{ 1009 u64 orig_mask = mask; 1010 bool fallback = 0; 1011 int err; 1012 1013 /* Try to set the DMA mask. If it fails, try falling back to a 1014 * lower mask, as we can always also support a lower one. */ 1015 while (1) { 1016 err = dma_set_mask(dev->sdev->dma_dev, mask); 1017 if (!err) { 1018 err = dma_set_coherent_mask(dev->sdev->dma_dev, mask); 1019 if (!err) 1020 break; 1021 } 1022 if (mask == DMA_BIT_MASK(64)) { 1023 mask = DMA_BIT_MASK(32); 1024 fallback = 1; 1025 continue; 1026 } 1027 if (mask == DMA_BIT_MASK(32)) { 1028 mask = DMA_BIT_MASK(30); 1029 fallback = 1; 1030 continue; 1031 } 1032 b43err(dev->wl, "The machine/kernel does not support " 1033 "the required %u-bit DMA mask\n", 1034 (unsigned int)dma_mask_to_engine_type(orig_mask)); 1035 return -EOPNOTSUPP; 1036 } 1037 if (fallback) { 1038 b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n", 1039 (unsigned int)dma_mask_to_engine_type(orig_mask), 1040 (unsigned int)dma_mask_to_engine_type(mask)); 1041 } 1042 1043 return 0; 1044} 1045 1046int b43_dma_init(struct b43_wldev *dev) 1047{ 1048 struct b43_dma *dma = &dev->dma; 1049 int err; 1050 u64 dmamask; 1051 enum b43_dmatype type; 1052 1053 dmamask = supported_dma_mask(dev); 1054 type = dma_mask_to_engine_type(dmamask); 1055 err = b43_dma_set_mask(dev, dmamask); 1056 if (err) 1057 return err; 1058 dma->translation = ssb_dma_translation(dev->sdev); 1059 1060 err = -ENOMEM; 1061 /* setup TX DMA channels. */ 1062 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type); 1063 if (!dma->tx_ring_AC_BK) 1064 goto out; 1065 1066 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type); 1067 if (!dma->tx_ring_AC_BE) 1068 goto err_destroy_bk; 1069 1070 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type); 1071 if (!dma->tx_ring_AC_VI) 1072 goto err_destroy_be; 1073 1074 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type); 1075 if (!dma->tx_ring_AC_VO) 1076 goto err_destroy_vi; 1077 1078 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type); 1079 if (!dma->tx_ring_mcast) 1080 goto err_destroy_vo; 1081 1082 /* setup RX DMA channel. */ 1083 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type); 1084 if (!dma->rx_ring) 1085 goto err_destroy_mcast; 1086 1087 /* No support for the TX status DMA ring. */ 1088 B43_WARN_ON(dev->sdev->id.revision < 5); 1089 1090 b43dbg(dev->wl, "%u-bit DMA initialized\n", 1091 (unsigned int)type); 1092 err = 0; 1093out: 1094 return err; 1095 1096err_destroy_mcast: 1097 destroy_ring(dma, tx_ring_mcast); 1098err_destroy_vo: 1099 destroy_ring(dma, tx_ring_AC_VO); 1100err_destroy_vi: 1101 destroy_ring(dma, tx_ring_AC_VI); 1102err_destroy_be: 1103 destroy_ring(dma, tx_ring_AC_BE); 1104err_destroy_bk: 1105 destroy_ring(dma, tx_ring_AC_BK); 1106 return err; 1107} 1108 1109/* Generate a cookie for the TX header. */ 1110static u16 generate_cookie(struct b43_dmaring *ring, int slot) 1111{ 1112 u16 cookie; 1113 1114 /* Use the upper 4 bits of the cookie as 1115 * DMA controller ID and store the slot number 1116 * in the lower 12 bits. 1117 * Note that the cookie must never be 0, as this 1118 * is a special value used in RX path. 1119 * It can also not be 0xFFFF because that is special 1120 * for multicast frames. 1121 */ 1122 cookie = (((u16)ring->index + 1) << 12); 1123 B43_WARN_ON(slot & ~0x0FFF); 1124 cookie |= (u16)slot; 1125 1126 return cookie; 1127} 1128 1129/* Inspect a cookie and find out to which controller/slot it belongs. */ 1130static 1131struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) 1132{ 1133 struct b43_dma *dma = &dev->dma; 1134 struct b43_dmaring *ring = NULL; 1135 1136 switch (cookie & 0xF000) { 1137 case 0x1000: 1138 ring = dma->tx_ring_AC_BK; 1139 break; 1140 case 0x2000: 1141 ring = dma->tx_ring_AC_BE; 1142 break; 1143 case 0x3000: 1144 ring = dma->tx_ring_AC_VI; 1145 break; 1146 case 0x4000: 1147 ring = dma->tx_ring_AC_VO; 1148 break; 1149 case 0x5000: 1150 ring = dma->tx_ring_mcast; 1151 break; 1152 } 1153 *slot = (cookie & 0x0FFF); 1154 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) { 1155 b43dbg(dev->wl, "TX-status contains " 1156 "invalid cookie: 0x%04X\n", cookie); 1157 return NULL; 1158 } 1159 1160 return ring; 1161} 1162 1163static int dma_tx_fragment(struct b43_dmaring *ring, 1164 struct sk_buff *skb) 1165{ 1166 const struct b43_dma_ops *ops = ring->ops; 1167 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1168 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); 1169 u8 *header; 1170 int slot, old_top_slot, old_used_slots; 1171 int err; 1172 struct b43_dmadesc_generic *desc; 1173 struct b43_dmadesc_meta *meta; 1174 struct b43_dmadesc_meta *meta_hdr; 1175 u16 cookie; 1176 size_t hdrsize = b43_txhdr_size(ring->dev); 1177 1178 /* Important note: If the number of used DMA slots per TX frame 1179 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of 1180 * the file has to be updated, too! 1181 */ 1182 1183 old_top_slot = ring->current_slot; 1184 old_used_slots = ring->used_slots; 1185 1186 /* Get a slot for the header. */ 1187 slot = request_slot(ring); 1188 desc = ops->idx2desc(ring, slot, &meta_hdr); 1189 memset(meta_hdr, 0, sizeof(*meta_hdr)); 1190 1191 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); 1192 cookie = generate_cookie(ring, slot); 1193 err = b43_generate_txhdr(ring->dev, header, 1194 skb, info, cookie); 1195 if (unlikely(err)) { 1196 ring->current_slot = old_top_slot; 1197 ring->used_slots = old_used_slots; 1198 return err; 1199 } 1200 1201 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1202 hdrsize, 1); 1203 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { 1204 ring->current_slot = old_top_slot; 1205 ring->used_slots = old_used_slots; 1206 return -EIO; 1207 } 1208 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, 1209 hdrsize, 1, 0, 0); 1210 1211 /* Get a slot for the payload. */ 1212 slot = request_slot(ring); 1213 desc = ops->idx2desc(ring, slot, &meta); 1214 memset(meta, 0, sizeof(*meta)); 1215 1216 meta->skb = skb; 1217 meta->is_last_fragment = 1; 1218 priv_info->bouncebuffer = NULL; 1219 1220 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1221 /* create a bounce buffer in zone_dma on mapping failure. */ 1222 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1223 priv_info->bouncebuffer = kmemdup(skb->data, skb->len, 1224 GFP_ATOMIC | GFP_DMA); 1225 if (!priv_info->bouncebuffer) { 1226 ring->current_slot = old_top_slot; 1227 ring->used_slots = old_used_slots; 1228 err = -ENOMEM; 1229 goto out_unmap_hdr; 1230 } 1231 1232 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); 1233 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1234 kfree(priv_info->bouncebuffer); 1235 priv_info->bouncebuffer = NULL; 1236 ring->current_slot = old_top_slot; 1237 ring->used_slots = old_used_slots; 1238 err = -EIO; 1239 goto out_unmap_hdr; 1240 } 1241 } 1242 1243 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); 1244 1245 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { 1246 /* Tell the firmware about the cookie of the last 1247 * mcast frame, so it can clear the more-data bit in it. */ 1248 b43_shm_write16(ring->dev, B43_SHM_SHARED, 1249 B43_SHM_SH_MCASTCOOKIE, cookie); 1250 } 1251 /* Now transfer the whole frame. */ 1252 wmb(); 1253 ops->poke_tx(ring, next_slot(ring, slot)); 1254 return 0; 1255 1256out_unmap_hdr: 1257 unmap_descbuffer(ring, meta_hdr->dmaaddr, 1258 hdrsize, 1); 1259 return err; 1260} 1261 1262static inline int should_inject_overflow(struct b43_dmaring *ring) 1263{ 1264#ifdef CONFIG_B43_DEBUG 1265 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { 1266 /* Check if we should inject another ringbuffer overflow 1267 * to test handling of this situation in the stack. */ 1268 unsigned long next_overflow; 1269 1270 next_overflow = ring->last_injected_overflow + HZ; 1271 if (time_after(jiffies, next_overflow)) { 1272 ring->last_injected_overflow = jiffies; 1273 b43dbg(ring->dev->wl, 1274 "Injecting TX ring overflow on " 1275 "DMA controller %d\n", ring->index); 1276 return 1; 1277 } 1278 } 1279#endif /* CONFIG_B43_DEBUG */ 1280 return 0; 1281} 1282 1283/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */ 1284static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev, 1285 u8 queue_prio) 1286{ 1287 struct b43_dmaring *ring; 1288 1289 if (dev->qos_enabled) { 1290 /* 0 = highest priority */ 1291 switch (queue_prio) { 1292 default: 1293 B43_WARN_ON(1); 1294 /* fallthrough */ 1295 case 0: 1296 ring = dev->dma.tx_ring_AC_VO; 1297 break; 1298 case 1: 1299 ring = dev->dma.tx_ring_AC_VI; 1300 break; 1301 case 2: 1302 ring = dev->dma.tx_ring_AC_BE; 1303 break; 1304 case 3: 1305 ring = dev->dma.tx_ring_AC_BK; 1306 break; 1307 } 1308 } else 1309 ring = dev->dma.tx_ring_AC_BE; 1310 1311 return ring; 1312} 1313 1314int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) 1315{ 1316 struct b43_dmaring *ring; 1317 struct ieee80211_hdr *hdr; 1318 int err = 0; 1319 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1320 1321 hdr = (struct ieee80211_hdr *)skb->data; 1322 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { 1323 /* The multicast ring will be sent after the DTIM */ 1324 ring = dev->dma.tx_ring_mcast; 1325 /* Set the more-data bit. Ucode will clear it on 1326 * the last frame for us. */ 1327 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1328 } else { 1329 /* Decide by priority where to put this frame. */ 1330 ring = select_ring_by_priority( 1331 dev, skb_get_queue_mapping(skb)); 1332 } 1333 1334 B43_WARN_ON(!ring->tx); 1335 1336 if (unlikely(ring->stopped)) { 1337 /* We get here only because of a bug in mac80211. 1338 * Because of a race, one packet may be queued after 1339 * the queue is stopped, thus we got called when we shouldn't. 1340 * For now, just refuse the transmit. */ 1341 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) 1342 b43err(dev->wl, "Packet after queue stopped\n"); 1343 err = -ENOSPC; 1344 goto out; 1345 } 1346 1347 if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { 1348 /* If we get here, we have a real error with the queue 1349 * full, but queues not stopped. */ 1350 b43err(dev->wl, "DMA queue overflow\n"); 1351 err = -ENOSPC; 1352 goto out; 1353 } 1354 1355 /* Assign the queue number to the ring (if not already done before) 1356 * so TX status handling can use it. The queue to ring mapping is 1357 * static, so we don't need to store it per frame. */ 1358 ring->queue_prio = skb_get_queue_mapping(skb); 1359 1360 err = dma_tx_fragment(ring, skb); 1361 if (unlikely(err == -ENOKEY)) { 1362 /* Drop this packet, as we don't have the encryption key 1363 * anymore and must not transmit it unencrypted. */ 1364 dev_kfree_skb_any(skb); 1365 err = 0; 1366 goto out; 1367 } 1368 if (unlikely(err)) { 1369 b43err(dev->wl, "DMA tx mapping failure\n"); 1370 goto out; 1371 } 1372 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || 1373 should_inject_overflow(ring)) { 1374 /* This TX ring is full. */ 1375 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); 1376 ring->stopped = 1; 1377 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1378 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); 1379 } 1380 } 1381out: 1382 1383 return err; 1384} 1385 1386void b43_dma_handle_txstatus(struct b43_wldev *dev, 1387 const struct b43_txstatus *status) 1388{ 1389 const struct b43_dma_ops *ops; 1390 struct b43_dmaring *ring; 1391 struct b43_dmadesc_generic *desc; 1392 struct b43_dmadesc_meta *meta; 1393 int slot, firstused; 1394 bool frame_succeed; 1395 1396 ring = parse_cookie(dev, status->cookie, &slot); 1397 if (unlikely(!ring)) 1398 return; 1399 B43_WARN_ON(!ring->tx); 1400 1401 /* Sanity check: TX packets are processed in-order on one ring. 1402 * Check if the slot deduced from the cookie really is the first 1403 * used slot. */ 1404 firstused = ring->current_slot - ring->used_slots + 1; 1405 if (firstused < 0) 1406 firstused = ring->nr_slots + firstused; 1407 if (unlikely(slot != firstused)) { 1408 /* This possibly is a firmware bug and will result in 1409 * malfunction, memory leaks and/or stall of DMA functionality. */ 1410 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " 1411 "Expected %d, but got %d\n", 1412 ring->index, firstused, slot); 1413 return; 1414 } 1415 1416 ops = ring->ops; 1417 while (1) { 1418 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); 1419 desc = ops->idx2desc(ring, slot, &meta); 1420 1421 if (b43_dma_ptr_is_poisoned(meta->skb)) { 1422 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " 1423 "on ring %d\n", 1424 slot, firstused, ring->index); 1425 break; 1426 } 1427 if (meta->skb) { 1428 struct b43_private_tx_info *priv_info = 1429 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); 1430 1431 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); 1432 kfree(priv_info->bouncebuffer); 1433 priv_info->bouncebuffer = NULL; 1434 } else { 1435 unmap_descbuffer(ring, meta->dmaaddr, 1436 b43_txhdr_size(dev), 1); 1437 } 1438 1439 if (meta->is_last_fragment) { 1440 struct ieee80211_tx_info *info; 1441 1442 if (unlikely(!meta->skb)) { 1443 /* This is a scatter-gather fragment of a frame, so 1444 * the skb pointer must not be NULL. */ 1445 b43dbg(dev->wl, "TX status unexpected NULL skb " 1446 "at slot %d (first=%d) on ring %d\n", 1447 slot, firstused, ring->index); 1448 break; 1449 } 1450 1451 info = IEEE80211_SKB_CB(meta->skb); 1452 1453 /* 1454 * Call back to inform the ieee80211 subsystem about 1455 * the status of the transmission. 1456 */ 1457 frame_succeed = b43_fill_txstatus_report(dev, info, status); 1458#ifdef CONFIG_B43_DEBUG 1459 if (frame_succeed) 1460 ring->nr_succeed_tx_packets++; 1461 else 1462 ring->nr_failed_tx_packets++; 1463 ring->nr_total_packet_tries += status->frame_count; 1464#endif /* DEBUG */ 1465 ieee80211_tx_status(dev->wl->hw, meta->skb); 1466 1467 /* skb will be freed by ieee80211_tx_status(). 1468 * Poison our pointer. */ 1469 meta->skb = B43_DMA_PTR_POISON; 1470 } else { 1471 /* No need to call free_descriptor_buffer here, as 1472 * this is only the txhdr, which is not allocated. 1473 */ 1474 if (unlikely(meta->skb)) { 1475 b43dbg(dev->wl, "TX status unexpected non-NULL skb " 1476 "at slot %d (first=%d) on ring %d\n", 1477 slot, firstused, ring->index); 1478 break; 1479 } 1480 } 1481 1482 /* Everything unmapped and free'd. So it's not used anymore. */ 1483 ring->used_slots--; 1484 1485 if (meta->is_last_fragment) { 1486 /* This is the last scatter-gather 1487 * fragment of the frame. We are done. */ 1488 break; 1489 } 1490 slot = next_slot(ring, slot); 1491 } 1492 if (ring->stopped) { 1493 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); 1494 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); 1495 ring->stopped = 0; 1496 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1497 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); 1498 } 1499 } 1500} 1501 1502static void dma_rx(struct b43_dmaring *ring, int *slot) 1503{ 1504 const struct b43_dma_ops *ops = ring->ops; 1505 struct b43_dmadesc_generic *desc; 1506 struct b43_dmadesc_meta *meta; 1507 struct b43_rxhdr_fw4 *rxhdr; 1508 struct sk_buff *skb; 1509 u16 len; 1510 int err; 1511 dma_addr_t dmaaddr; 1512 1513 desc = ops->idx2desc(ring, *slot, &meta); 1514 1515 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1516 skb = meta->skb; 1517 1518 rxhdr = (struct b43_rxhdr_fw4 *)skb->data; 1519 len = le16_to_cpu(rxhdr->frame_len); 1520 if (len == 0) { 1521 int i = 0; 1522 1523 do { 1524 udelay(2); 1525 barrier(); 1526 len = le16_to_cpu(rxhdr->frame_len); 1527 } while (len == 0 && i++ < 5); 1528 if (unlikely(len == 0)) { 1529 dmaaddr = meta->dmaaddr; 1530 goto drop_recycle_buffer; 1531 } 1532 } 1533 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { 1534 /* Something went wrong with the DMA. 1535 * The device did not touch the buffer and did not overwrite the poison. */ 1536 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); 1537 dmaaddr = meta->dmaaddr; 1538 goto drop_recycle_buffer; 1539 } 1540 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { 1541 /* The data did not fit into one descriptor buffer 1542 * and is split over multiple buffers. 1543 * This should never happen, as we try to allocate buffers 1544 * big enough. So simply ignore this packet. 1545 */ 1546 int cnt = 0; 1547 s32 tmp = len; 1548 1549 while (1) { 1550 desc = ops->idx2desc(ring, *slot, &meta); 1551 /* recycle the descriptor buffer. */ 1552 b43_poison_rx_buffer(ring, meta->skb); 1553 sync_descbuffer_for_device(ring, meta->dmaaddr, 1554 ring->rx_buffersize); 1555 *slot = next_slot(ring, *slot); 1556 cnt++; 1557 tmp -= ring->rx_buffersize; 1558 if (tmp <= 0) 1559 break; 1560 } 1561 b43err(ring->dev->wl, "DMA RX buffer too small " 1562 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1563 len, ring->rx_buffersize, cnt); 1564 goto drop; 1565 } 1566 1567 dmaaddr = meta->dmaaddr; 1568 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); 1569 if (unlikely(err)) { 1570 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); 1571 goto drop_recycle_buffer; 1572 } 1573 1574 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); 1575 skb_put(skb, len + ring->frameoffset); 1576 skb_pull(skb, ring->frameoffset); 1577 1578 b43_rx(ring->dev, skb, rxhdr); 1579drop: 1580 return; 1581 1582drop_recycle_buffer: 1583 /* Poison and recycle the RX buffer. */ 1584 b43_poison_rx_buffer(ring, skb); 1585 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); 1586} 1587 1588void b43_dma_rx(struct b43_dmaring *ring) 1589{ 1590 const struct b43_dma_ops *ops = ring->ops; 1591 int slot, current_slot; 1592 int used_slots = 0; 1593 1594 B43_WARN_ON(ring->tx); 1595 current_slot = ops->get_current_rxslot(ring); 1596 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); 1597 1598 slot = ring->current_slot; 1599 for (; slot != current_slot; slot = next_slot(ring, slot)) { 1600 dma_rx(ring, &slot); 1601 update_max_used_slots(ring, ++used_slots); 1602 } 1603 ops->set_current_rxslot(ring, slot); 1604 ring->current_slot = slot; 1605} 1606 1607static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) 1608{ 1609 B43_WARN_ON(!ring->tx); 1610 ring->ops->tx_suspend(ring); 1611} 1612 1613static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) 1614{ 1615 B43_WARN_ON(!ring->tx); 1616 ring->ops->tx_resume(ring); 1617} 1618 1619void b43_dma_tx_suspend(struct b43_wldev *dev) 1620{ 1621 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); 1622 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK); 1623 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE); 1624 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI); 1625 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO); 1626 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast); 1627} 1628 1629void b43_dma_tx_resume(struct b43_wldev *dev) 1630{ 1631 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast); 1632 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO); 1633 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI); 1634 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE); 1635 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK); 1636 b43_power_saving_ctl_bits(dev, 0); 1637} 1638 1639static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, 1640 u16 mmio_base, bool enable) 1641{ 1642 u32 ctl; 1643 1644 if (type == B43_DMA_64BIT) { 1645 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL); 1646 ctl &= ~B43_DMA64_RXDIRECTFIFO; 1647 if (enable) 1648 ctl |= B43_DMA64_RXDIRECTFIFO; 1649 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl); 1650 } else { 1651 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL); 1652 ctl &= ~B43_DMA32_RXDIRECTFIFO; 1653 if (enable) 1654 ctl |= B43_DMA32_RXDIRECTFIFO; 1655 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl); 1656 } 1657} 1658 1659/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine. 1660 * This is called from PIO code, so DMA structures are not available. */ 1661void b43_dma_direct_fifo_rx(struct b43_wldev *dev, 1662 unsigned int engine_index, bool enable) 1663{ 1664 enum b43_dmatype type; 1665 u16 mmio_base; 1666 1667 type = dma_mask_to_engine_type(supported_dma_mask(dev)); 1668 1669 mmio_base = b43_dmacontroller_base(type, engine_index); 1670 direct_fifo_rx(dev, type, mmio_base, enable); 1671}