Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ks8842: Support DMA when accessed via timberdale

This patch adds support for RX and TX DMA via the DMA API,
this is only supported when the KS8842 is accessed via timberdale.

There is no support for DMA on the generic bus interface it self,
a state machine inside the FPGA is handling RX and TX transfers to/from
buffers in the FPGA. The host CPU can do DMA to and from these buffers.

The FPGA has to handle the RX interrupts, so these must be enabled in
the ks8842 but not in the FPGA. The driver must not disable the RX interrupt
that would mean that the data transfers into the FPGA buffers would stop.

The host shall not enable TX interrupts since TX is handled by the FPGA,
the host is notified by DMA callbacks when transfers are finished.

Which DMA channels to use are added as parameters in the platform data struct.

Signed-off-by: Richard Röjfors <richard.rojfors@pelagicore.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Richard Röjfors and committed by
David S. Miller
94fe8c68 3eeb2997

+447 -21
+443 -21
drivers/net/ks8842.c
··· 30 30 #include <linux/etherdevice.h> 31 31 #include <linux/ethtool.h> 32 32 #include <linux/ks8842.h> 33 + #include <linux/dmaengine.h> 34 + #include <linux/dma-mapping.h> 35 + #include <linux/scatterlist.h> 33 36 34 37 #define DRV_NAME "ks8842" 35 38 ··· 85 82 #define IRQ_RX_ERROR 0x0080 86 83 #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ 87 84 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) 85 + /* When running via timberdale in DMA mode, the RX interrupt should be 86 + enabled in the KS8842, but not in the FPGA IP, since the IP handles 87 + RX DMA internally. 88 + TX interrupts are not needed it is handled by the FPGA the driver is 89 + notified via DMA callbacks. 90 + */ 91 + #define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \ 92 + IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) 93 + #define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX) 88 94 #define REG_ISR 0x02 89 95 #define REG_RXSR 0x04 90 96 #define RXSR_VALID 0x8000 ··· 136 124 #define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ 137 125 #define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ 138 126 127 + #define DMA_BUFFER_SIZE 2048 128 + 129 + struct ks8842_tx_dma_ctl { 130 + struct dma_chan *chan; 131 + struct dma_async_tx_descriptor *adesc; 132 + void *buf; 133 + struct scatterlist sg; 134 + int channel; 135 + }; 136 + 137 + struct ks8842_rx_dma_ctl { 138 + struct dma_chan *chan; 139 + struct dma_async_tx_descriptor *adesc; 140 + struct sk_buff *skb; 141 + struct scatterlist sg; 142 + struct tasklet_struct tasklet; 143 + int channel; 144 + }; 145 + 146 + #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \ 147 + ((adapter)->dma_rx.channel != -1)) 148 + 139 149 struct ks8842_adapter { 140 150 void __iomem *hw_addr; 141 151 int irq; ··· 166 132 spinlock_t lock; /* spinlock to be interrupt safe */ 167 133 struct work_struct timeout_work; 168 134 struct net_device *netdev; 135 + struct device *dev; 136 + struct ks8842_tx_dma_ctl dma_tx; 137 + struct ks8842_rx_dma_ctl dma_rx; 169 138 }; 139 + 140 + static void ks8842_dma_rx_cb(void *data); 141 + static void ks8842_dma_tx_cb(void *data); 142 + 143 + static inline void ks8842_resume_dma(struct ks8842_adapter *adapter) 144 + { 145 + iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME); 146 + } 170 147 171 148 static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) 172 149 { ··· 342 297 ks8842_write16(adapter, 18, 0xffff, REG_ISR); 343 298 344 299 /* enable interrupts */ 345 - ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 346 - 300 + if (KS8842_USE_DMA(adapter)) { 301 + /* When running in DMA Mode the RX interrupt is not enabled in 302 + timberdale because RX data is received by DMA callbacks 303 + it must still be enabled in the KS8842 because it indicates 304 + to timberdale when there is RX data for it's DMA FIFOs */ 305 + iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER); 306 + ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); 307 + } else { 308 + if (!(adapter->conf_flags & MICREL_KS884X)) 309 + iowrite16(ENABLED_IRQS, 310 + adapter->hw_addr + REG_TIMB_IER); 311 + ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 312 + } 347 313 /* enable the switch */ 348 314 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); 349 315 } ··· 427 371 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; 428 372 } 429 373 374 + static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) 375 + { 376 + struct ks8842_adapter *adapter = netdev_priv(netdev); 377 + struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; 378 + u8 *buf = ctl->buf; 379 + 380 + if (ctl->adesc) { 381 + netdev_dbg(netdev, "%s: TX ongoing\n", __func__); 382 + /* transfer ongoing */ 383 + return NETDEV_TX_BUSY; 384 + } 385 + 386 + sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); 387 + 388 + /* copy data to the TX buffer */ 389 + /* the control word, enable IRQ, port 1 and the length */ 390 + *buf++ = 0x00; 391 + *buf++ = 0x01; /* Port 1 */ 392 + *buf++ = skb->len & 0xff; 393 + *buf++ = (skb->len >> 8) & 0xff; 394 + skb_copy_from_linear_data(skb, buf, skb->len); 395 + 396 + dma_sync_single_range_for_device(adapter->dev, 397 + sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), 398 + DMA_TO_DEVICE); 399 + 400 + /* make sure the length is a multiple of 4 */ 401 + if (sg_dma_len(&ctl->sg) % 4) 402 + sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; 403 + 404 + ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, 405 + &ctl->sg, 1, DMA_TO_DEVICE, 406 + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 407 + if (!ctl->adesc) 408 + return NETDEV_TX_BUSY; 409 + 410 + ctl->adesc->callback_param = netdev; 411 + ctl->adesc->callback = ks8842_dma_tx_cb; 412 + ctl->adesc->tx_submit(ctl->adesc); 413 + 414 + netdev->stats.tx_bytes += skb->len; 415 + 416 + dev_kfree_skb(skb); 417 + 418 + return NETDEV_TX_OK; 419 + } 420 + 430 421 static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) 431 422 { 432 423 struct ks8842_adapter *adapter = netdev_priv(netdev); ··· 525 422 return NETDEV_TX_OK; 526 423 } 527 424 425 + static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status) 426 + { 427 + netdev_dbg(netdev, "RX error, status: %x\n", status); 428 + 429 + netdev->stats.rx_errors++; 430 + if (status & RXSR_TOO_LONG) 431 + netdev->stats.rx_length_errors++; 432 + if (status & RXSR_CRC_ERROR) 433 + netdev->stats.rx_crc_errors++; 434 + if (status & RXSR_RUNT) 435 + netdev->stats.rx_frame_errors++; 436 + } 437 + 438 + static void ks8842_update_rx_counters(struct net_device *netdev, u32 status, 439 + int len) 440 + { 441 + netdev_dbg(netdev, "RX packet, len: %d\n", len); 442 + 443 + netdev->stats.rx_packets++; 444 + netdev->stats.rx_bytes += len; 445 + if (status & RXSR_MULTICAST) 446 + netdev->stats.multicast++; 447 + } 448 + 449 + static int __ks8842_start_new_rx_dma(struct net_device *netdev) 450 + { 451 + struct ks8842_adapter *adapter = netdev_priv(netdev); 452 + struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; 453 + struct scatterlist *sg = &ctl->sg; 454 + int err; 455 + 456 + ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE); 457 + if (ctl->skb) { 458 + sg_init_table(sg, 1); 459 + sg_dma_address(sg) = dma_map_single(adapter->dev, 460 + ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); 461 + err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); 462 + if (unlikely(err)) { 463 + sg_dma_address(sg) = 0; 464 + goto out; 465 + } 466 + 467 + sg_dma_len(sg) = DMA_BUFFER_SIZE; 468 + 469 + ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, 470 + sg, 1, DMA_FROM_DEVICE, 471 + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 472 + 473 + if (!ctl->adesc) 474 + goto out; 475 + 476 + ctl->adesc->callback_param = netdev; 477 + ctl->adesc->callback = ks8842_dma_rx_cb; 478 + ctl->adesc->tx_submit(ctl->adesc); 479 + } else { 480 + err = -ENOMEM; 481 + sg_dma_address(sg) = 0; 482 + goto out; 483 + } 484 + 485 + return err; 486 + out: 487 + if (sg_dma_address(sg)) 488 + dma_unmap_single(adapter->dev, sg_dma_address(sg), 489 + DMA_BUFFER_SIZE, DMA_FROM_DEVICE); 490 + sg_dma_address(sg) = 0; 491 + if (ctl->skb) 492 + dev_kfree_skb(ctl->skb); 493 + 494 + ctl->skb = NULL; 495 + 496 + printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); 497 + return err; 498 + } 499 + 500 + static void ks8842_rx_frame_dma_tasklet(unsigned long arg) 501 + { 502 + struct net_device *netdev = (struct net_device *)arg; 503 + struct ks8842_adapter *adapter = netdev_priv(netdev); 504 + struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; 505 + struct sk_buff *skb = ctl->skb; 506 + dma_addr_t addr = sg_dma_address(&ctl->sg); 507 + u32 status; 508 + 509 + ctl->adesc = NULL; 510 + 511 + /* kick next transfer going */ 512 + __ks8842_start_new_rx_dma(netdev); 513 + 514 + /* now handle the data we got */ 515 + dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); 516 + 517 + status = *((u32 *)skb->data); 518 + 519 + netdev_dbg(netdev, "%s - rx_data: status: %x\n", 520 + __func__, status & 0xffff); 521 + 522 + /* check the status */ 523 + if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 524 + int len = (status >> 16) & 0x7ff; 525 + 526 + ks8842_update_rx_counters(netdev, status, len); 527 + 528 + /* reserve 4 bytes which is the status word */ 529 + skb_reserve(skb, 4); 530 + skb_put(skb, len); 531 + 532 + skb->protocol = eth_type_trans(skb, netdev); 533 + netif_rx(skb); 534 + } else { 535 + ks8842_update_rx_err_counters(netdev, status); 536 + dev_kfree_skb(skb); 537 + } 538 + } 539 + 528 540 static void ks8842_rx_frame(struct net_device *netdev, 529 541 struct ks8842_adapter *adapter) 530 542 { ··· 663 445 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 664 446 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); 665 447 666 - netdev_dbg(netdev, "%s, got package, len: %d\n", __func__, len); 667 448 if (skb) { 668 449 669 - netdev->stats.rx_packets++; 670 - netdev->stats.rx_bytes += len; 671 - if (status & RXSR_MULTICAST) 672 - netdev->stats.multicast++; 450 + ks8842_update_rx_counters(netdev, status, len); 673 451 674 452 if (adapter->conf_flags & KS884X_16BIT) { 675 453 u16 *data16 = (u16 *)skb_put(skb, len); ··· 691 477 netif_rx(skb); 692 478 } else 693 479 netdev->stats.rx_dropped++; 694 - } else { 695 - netdev_dbg(netdev, "RX error, status: %x\n", status); 696 - netdev->stats.rx_errors++; 697 - if (status & RXSR_TOO_LONG) 698 - netdev->stats.rx_length_errors++; 699 - if (status & RXSR_CRC_ERROR) 700 - netdev->stats.rx_crc_errors++; 701 - if (status & RXSR_RUNT) 702 - netdev->stats.rx_frame_errors++; 703 - } 480 + } else 481 + ks8842_update_rx_err_counters(netdev, status); 704 482 705 483 /* set high watermark to 3K */ 706 484 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); ··· 747 541 isr = ks8842_read16(adapter, 18, REG_ISR); 748 542 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); 749 543 544 + /* when running in DMA mode, do not ack RX interrupts, it is handled 545 + internally by timberdale, otherwise it's DMA FIFO:s would stop 546 + */ 547 + if (KS8842_USE_DMA(adapter)) 548 + isr &= ~IRQ_RX; 549 + 750 550 /* Ack */ 751 551 ks8842_write16(adapter, 18, isr, REG_ISR); 752 552 ··· 766 554 if (isr & IRQ_LINK_CHANGE) 767 555 ks8842_update_link_status(netdev, adapter); 768 556 769 - if (isr & (IRQ_RX | IRQ_RX_ERROR)) 557 + /* should not get IRQ_RX when running DMA mode */ 558 + if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter)) 770 559 ks8842_handle_rx(netdev, adapter); 771 560 561 + /* should only happen when in PIO mode */ 772 562 if (isr & IRQ_TX) 773 563 ks8842_handle_tx(netdev, adapter); 774 564 ··· 789 575 790 576 /* re-enable interrupts, put back the bank selection register */ 791 577 spin_lock_irqsave(&adapter->lock, flags); 792 - ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 578 + if (KS8842_USE_DMA(adapter)) 579 + ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); 580 + else 581 + ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 793 582 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 583 + 584 + /* Make sure timberdale continues DMA operations, they are stopped while 585 + we are handling the ks8842 because we might change bank */ 586 + if (KS8842_USE_DMA(adapter)) 587 + ks8842_resume_dma(adapter); 588 + 794 589 spin_unlock_irqrestore(&adapter->lock, flags); 795 590 } 796 591 ··· 815 592 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); 816 593 817 594 if (isr) { 818 - /* disable IRQ */ 819 - ks8842_write16(adapter, 18, 0x00, REG_IER); 595 + if (KS8842_USE_DMA(adapter)) 596 + /* disable all but RX IRQ, since the FPGA relies on it*/ 597 + ks8842_write16(adapter, 18, IRQ_RX, REG_IER); 598 + else 599 + /* disable IRQ */ 600 + ks8842_write16(adapter, 18, 0x00, REG_IER); 820 601 821 602 /* schedule tasklet */ 822 603 tasklet_schedule(&adapter->tasklet); ··· 830 603 831 604 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 832 605 606 + /* After an interrupt, tell timberdale to continue DMA operations. 607 + DMA is disabled while we are handling the ks8842 because we might 608 + change bank */ 609 + ks8842_resume_dma(adapter); 610 + 833 611 return ret; 834 612 } 835 613 614 + static void ks8842_dma_rx_cb(void *data) 615 + { 616 + struct net_device *netdev = data; 617 + struct ks8842_adapter *adapter = netdev_priv(netdev); 618 + 619 + netdev_dbg(netdev, "RX DMA finished\n"); 620 + /* schedule tasklet */ 621 + if (adapter->dma_rx.adesc) 622 + tasklet_schedule(&adapter->dma_rx.tasklet); 623 + } 624 + 625 + static void ks8842_dma_tx_cb(void *data) 626 + { 627 + struct net_device *netdev = data; 628 + struct ks8842_adapter *adapter = netdev_priv(netdev); 629 + struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; 630 + 631 + netdev_dbg(netdev, "TX DMA finished\n"); 632 + 633 + if (!ctl->adesc) 634 + return; 635 + 636 + netdev->stats.tx_packets++; 637 + ctl->adesc = NULL; 638 + 639 + if (netif_queue_stopped(netdev)) 640 + netif_wake_queue(netdev); 641 + } 642 + 643 + static void ks8842_stop_dma(struct ks8842_adapter *adapter) 644 + { 645 + struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; 646 + struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; 647 + 648 + tx_ctl->adesc = NULL; 649 + if (tx_ctl->chan) 650 + tx_ctl->chan->device->device_control(tx_ctl->chan, 651 + DMA_TERMINATE_ALL, 0); 652 + 653 + rx_ctl->adesc = NULL; 654 + if (rx_ctl->chan) 655 + rx_ctl->chan->device->device_control(rx_ctl->chan, 656 + DMA_TERMINATE_ALL, 0); 657 + 658 + if (sg_dma_address(&rx_ctl->sg)) 659 + dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), 660 + DMA_BUFFER_SIZE, DMA_FROM_DEVICE); 661 + sg_dma_address(&rx_ctl->sg) = 0; 662 + 663 + dev_kfree_skb(rx_ctl->skb); 664 + rx_ctl->skb = NULL; 665 + } 666 + 667 + static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter) 668 + { 669 + struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; 670 + struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; 671 + 672 + ks8842_stop_dma(adapter); 673 + 674 + if (tx_ctl->chan) 675 + dma_release_channel(tx_ctl->chan); 676 + tx_ctl->chan = NULL; 677 + 678 + if (rx_ctl->chan) 679 + dma_release_channel(rx_ctl->chan); 680 + rx_ctl->chan = NULL; 681 + 682 + tasklet_kill(&rx_ctl->tasklet); 683 + 684 + if (sg_dma_address(&tx_ctl->sg)) 685 + dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), 686 + DMA_BUFFER_SIZE, DMA_TO_DEVICE); 687 + sg_dma_address(&tx_ctl->sg) = 0; 688 + 689 + kfree(tx_ctl->buf); 690 + tx_ctl->buf = NULL; 691 + } 692 + 693 + static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param) 694 + { 695 + return chan->chan_id == (int)filter_param; 696 + } 697 + 698 + static int ks8842_alloc_dma_bufs(struct net_device *netdev) 699 + { 700 + struct ks8842_adapter *adapter = netdev_priv(netdev); 701 + struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; 702 + struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; 703 + int err; 704 + 705 + dma_cap_mask_t mask; 706 + 707 + dma_cap_zero(mask); 708 + dma_cap_set(DMA_SLAVE, mask); 709 + dma_cap_set(DMA_PRIVATE, mask); 710 + 711 + sg_init_table(&tx_ctl->sg, 1); 712 + 713 + tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, 714 + (void *)tx_ctl->channel); 715 + if (!tx_ctl->chan) { 716 + err = -ENODEV; 717 + goto err; 718 + } 719 + 720 + /* allocate DMA buffer */ 721 + tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL); 722 + if (!tx_ctl->buf) { 723 + err = -ENOMEM; 724 + goto err; 725 + } 726 + 727 + sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, 728 + tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); 729 + err = dma_mapping_error(adapter->dev, 730 + sg_dma_address(&tx_ctl->sg)); 731 + if (err) { 732 + sg_dma_address(&tx_ctl->sg) = 0; 733 + goto err; 734 + } 735 + 736 + rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, 737 + (void *)rx_ctl->channel); 738 + if (!rx_ctl->chan) { 739 + err = -ENODEV; 740 + goto err; 741 + } 742 + 743 + tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet, 744 + (unsigned long)netdev); 745 + 746 + return 0; 747 + err: 748 + ks8842_dealloc_dma_bufs(adapter); 749 + return err; 750 + } 836 751 837 752 /* Netdevice operations */ 838 753 ··· 984 615 int err; 985 616 986 617 netdev_dbg(netdev, "%s - entry\n", __func__); 618 + 619 + if (KS8842_USE_DMA(adapter)) { 620 + err = ks8842_alloc_dma_bufs(netdev); 621 + 622 + if (!err) { 623 + /* start RX dma */ 624 + err = __ks8842_start_new_rx_dma(netdev); 625 + if (err) 626 + ks8842_dealloc_dma_bufs(adapter); 627 + } 628 + 629 + if (err) { 630 + printk(KERN_WARNING DRV_NAME 631 + ": Failed to initiate DMA, running PIO\n"); 632 + ks8842_dealloc_dma_bufs(adapter); 633 + adapter->dma_rx.channel = -1; 634 + adapter->dma_tx.channel = -1; 635 + } 636 + } 987 637 988 638 /* reset the HW */ 989 639 ks8842_reset_hw(adapter); ··· 1029 641 1030 642 cancel_work_sync(&adapter->timeout_work); 1031 643 644 + if (KS8842_USE_DMA(adapter)) 645 + ks8842_dealloc_dma_bufs(adapter); 646 + 1032 647 /* free the irq */ 1033 648 free_irq(adapter->irq, netdev); 1034 649 ··· 1048 657 struct ks8842_adapter *adapter = netdev_priv(netdev); 1049 658 1050 659 netdev_dbg(netdev, "%s: entry\n", __func__); 660 + 661 + if (KS8842_USE_DMA(adapter)) { 662 + unsigned long flags; 663 + ret = ks8842_tx_frame_dma(skb, netdev); 664 + /* for now only allow one transfer at the time */ 665 + spin_lock_irqsave(&adapter->lock, flags); 666 + if (adapter->dma_tx.adesc) 667 + netif_stop_queue(netdev); 668 + spin_unlock_irqrestore(&adapter->lock, flags); 669 + return ret; 670 + } 1051 671 1052 672 ret = ks8842_tx_frame(skb, netdev); 1053 673 ··· 1095 693 netdev_dbg(netdev, "%s: entry\n", __func__); 1096 694 1097 695 spin_lock_irqsave(&adapter->lock, flags); 696 + 697 + if (KS8842_USE_DMA(adapter)) 698 + ks8842_stop_dma(adapter); 699 + 1098 700 /* disable interrupts */ 1099 701 ks8842_write16(adapter, 18, 0, REG_IER); 1100 702 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); ··· 1112 706 ks8842_write_mac_addr(adapter, netdev->dev_addr); 1113 707 1114 708 ks8842_update_link_status(netdev, adapter); 709 + 710 + if (KS8842_USE_DMA(adapter)) 711 + __ks8842_start_new_rx_dma(netdev); 1115 712 } 1116 713 1117 714 static void ks8842_tx_timeout(struct net_device *netdev) ··· 1172 763 if (adapter->irq < 0) { 1173 764 err = adapter->irq; 1174 765 goto err_get_irq; 766 + } 767 + 768 + adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev; 769 + 770 + /* DMA is only supported when accessed via timberdale */ 771 + if (!(adapter->conf_flags & MICREL_KS884X) && pdata && 772 + (pdata->tx_dma_channel != -1) && 773 + (pdata->rx_dma_channel != -1)) { 774 + adapter->dma_rx.channel = pdata->rx_dma_channel; 775 + adapter->dma_tx.channel = pdata->tx_dma_channel; 776 + } else { 777 + adapter->dma_rx.channel = -1; 778 + adapter->dma_tx.channel = -1; 1175 779 } 1176 780 1177 781 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
+4
include/linux/ks8842.h
··· 25 25 * struct ks8842_platform_data - Platform data of the KS8842 network driver 26 26 * @macaddr: The MAC address of the device, set to all 0:s to use the on in 27 27 * the chip. 28 + * @rx_dma_channel: The DMA channel to use for RX, -1 for none. 29 + * @tx_dma_channel: The DMA channel to use for TX, -1 for none. 28 30 * 29 31 */ 30 32 struct ks8842_platform_data { 31 33 u8 macaddr[ETH_ALEN]; 34 + int rx_dma_channel; 35 + int tx_dma_channel; 32 36 }; 33 37 34 38 #endif