Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
fsldma: Fix compile warnings
fsldma: fix memory leak on error path in fsl_dma_prep_memcpy()
fsldma: snooping is not enabled for last entry in descriptor chain
fsldma: fix infinite loop on multi-descriptor DMA chain completion
fsldma: fix "DMA halt timeout!" errors
fsldma: fix check on potential fdev->chan[] overflow
fsldma: update mailling list address in MAINTAINERS

+48 -25
+1 -1
MAINTAINERS
··· 2251 2251 M: leoli@freescale.com 2252 2252 P: Zhang Wei 2253 2253 M: zw@zh-kernel.org 2254 - L: linuxppc-embedded@ozlabs.org 2254 + L: linuxppc-dev@ozlabs.org 2255 2255 L: linux-kernel@vger.kernel.org 2256 2256 S: Maintained 2257 2257 F: drivers/dma/fsldma.*
+47 -24
drivers/dma/fsldma.c
··· 179 179 static void set_ld_eol(struct fsl_dma_chan *fsl_chan, 180 180 struct fsl_desc_sw *desc) 181 181 { 182 + u64 snoop_bits; 183 + 184 + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 185 + ? FSL_DMA_SNEN : 0; 186 + 182 187 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 183 - DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, 184 - 64); 188 + DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 189 + | snoop_bits, 64); 185 190 } 186 191 187 192 static void append_ld_queue(struct fsl_dma_chan *fsl_chan, ··· 318 313 319 314 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 320 315 { 321 - struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 322 316 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 317 + struct fsl_desc_sw *desc; 323 318 unsigned long flags; 324 319 dma_cookie_t cookie; 325 320 ··· 327 322 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 328 323 329 324 cookie = fsl_chan->common.cookie; 330 - cookie++; 331 - if (cookie < 0) 332 - cookie = 1; 333 - desc->async_tx.cookie = cookie; 334 - fsl_chan->common.cookie = desc->async_tx.cookie; 325 + list_for_each_entry(desc, &tx->tx_list, node) { 326 + cookie++; 327 + if (cookie < 0) 328 + cookie = 1; 335 329 336 - append_ld_queue(fsl_chan, desc); 337 - list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); 330 + desc->async_tx.cookie = cookie; 331 + } 332 + 333 + fsl_chan->common.cookie = cookie; 334 + append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); 335 + list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); 338 336 339 337 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 340 338 ··· 462 454 { 463 455 struct fsl_dma_chan *fsl_chan; 464 456 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 457 + struct list_head *list; 465 458 size_t copy; 466 - LIST_HEAD(link_chain); 467 459 468 460 if (!chan) 469 461 return NULL; ··· 480 472 if (!new) { 481 473 dev_err(fsl_chan->dev, 482 474 "No free memory for link descriptor\n"); 483 - return NULL; 475 + goto fail; 484 476 } 485 477 #ifdef FSL_DMA_LD_DEBUG 486 478 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); ··· 515 507 /* Set End-of-link to the last link descriptor of new list*/ 516 508 set_ld_eol(fsl_chan, new); 517 509 518 - return first ? &first->async_tx : NULL; 510 + return &first->async_tx; 511 + 512 + fail: 513 + if (!first) 514 + return NULL; 515 + 516 + list = &first->async_tx.tx_list; 517 + list_for_each_entry_safe_reverse(new, prev, list, node) { 518 + list_del(&new->node); 519 + dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); 520 + } 521 + 522 + return NULL; 519 523 } 520 524 521 525 /** ··· 618 598 dma_addr_t next_dest_addr; 619 599 unsigned long flags; 620 600 601 + spin_lock_irqsave(&fsl_chan->desc_lock, flags); 602 + 621 603 if (!dma_is_idle(fsl_chan)) 622 - return; 604 + goto out_unlock; 623 605 624 606 dma_halt(fsl_chan); 625 607 626 608 /* If there are some link descriptors 627 609 * not transfered in queue. We need to start it. 628 610 */ 629 - spin_lock_irqsave(&fsl_chan->desc_lock, flags); 630 611 631 612 /* Find the first un-transfer desciptor */ 632 613 for (ld_node = fsl_chan->ld_queue.next; ··· 638 617 fsl_chan->common.cookie) == DMA_SUCCESS); 639 618 ld_node = ld_node->next); 640 619 641 - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 642 - 643 620 if (ld_node != &fsl_chan->ld_queue) { 644 621 /* Get the ld start address from ld_queue */ 645 622 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 646 - dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", 647 - (void *)next_dest_addr); 623 + dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", 624 + (unsigned long long)next_dest_addr); 648 625 set_cdar(fsl_chan, next_dest_addr); 649 626 dma_start(fsl_chan); 650 627 } else { 651 628 set_cdar(fsl_chan, 0); 652 629 set_ndar(fsl_chan, 0); 653 630 } 631 + 632 + out_unlock: 633 + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 654 634 } 655 635 656 636 /** ··· 756 734 */ 757 735 if (stat & FSL_DMA_SR_EOSI) { 758 736 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 759 - dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", 760 - (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); 737 + dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", 738 + (unsigned long long)get_cdar(fsl_chan), 739 + (unsigned long long)get_ndar(fsl_chan)); 761 740 stat &= ~FSL_DMA_SR_EOSI; 762 741 update_cookie = 1; 763 742 } ··· 853 830 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 854 831 855 832 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; 856 - if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { 833 + if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 857 834 dev_err(fdev->dev, "There is no %d channel!\n", 858 835 new_fsl_chan->id); 859 836 err = -EINVAL; ··· 948 925 } 949 926 950 927 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 951 - "controller at %p...\n", 952 - match->compatible, (void *)fdev->reg.start); 928 + "controller at 0x%llx...\n", 929 + match->compatible, (unsigned long long)fdev->reg.start); 953 930 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 954 931 - fdev->reg.start + 1); 955 932