Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

DMA: Freescale: move functions to avoid forward declarations

These functions will be modified in the next patch in the series. By moving the
function in a patch separate from the changes, it will make review easier.

Signed-off-by: Hongbo Zhang <hongbo.zhang@freescale.com>
Signed-off-by: Qiang Liu <qiang.liu@freescale.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Hongbo Zhang and committed by
Vinod Koul
2a5ecb79 86d19a54

+95 -95
+95 -95
drivers/dma/fsldma.c
··· 459 459 } 460 460 461 461 /** 462 + * fsl_chan_xfer_ld_queue - transfer any pending transactions 463 + * @chan : Freescale DMA channel 464 + * 465 + * HARDWARE STATE: idle 466 + * LOCKING: must hold chan->desc_lock 467 + */ 468 + static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 469 + { 470 + struct fsl_desc_sw *desc; 471 + 472 + /* 473 + * If the list of pending descriptors is empty, then we 474 + * don't need to do any work at all 475 + */ 476 + if (list_empty(&chan->ld_pending)) { 477 + chan_dbg(chan, "no pending LDs\n"); 478 + return; 479 + } 480 + 481 + /* 482 + * The DMA controller is not idle, which means that the interrupt 483 + * handler will start any queued transactions when it runs after 484 + * this transaction finishes 485 + */ 486 + if (!chan->idle) { 487 + chan_dbg(chan, "DMA controller still busy\n"); 488 + return; 489 + } 490 + 491 + /* 492 + * If there are some link descriptors which have not been 493 + * transferred, we need to start the controller 494 + */ 495 + 496 + /* 497 + * Move all elements from the queue of pending transactions 498 + * onto the list of running transactions 499 + */ 500 + chan_dbg(chan, "idle, starting controller\n"); 501 + desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 502 + list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 503 + 504 + /* 505 + * The 85xx DMA controller doesn't clear the channel start bit 506 + * automatically at the end of a transfer. Therefore we must clear 507 + * it in software before starting the transfer. 508 + */ 509 + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 510 + u32 mode; 511 + 512 + mode = get_mr(chan); 513 + mode &= ~FSL_DMA_MR_CS; 514 + set_mr(chan, mode); 515 + } 516 + 517 + /* 518 + * Program the descriptor's address into the DMA controller, 519 + * then start the DMA transaction 520 + */ 521 + set_cdar(chan, desc->async_tx.phys); 522 + get_cdar(chan); 523 + 524 + dma_start(chan); 525 + chan->idle = false; 526 + } 527 + 528 + /** 529 + * fsldma_cleanup_descriptor - cleanup and free a single link descriptor 530 + * @chan: Freescale DMA channel 531 + * @desc: descriptor to cleanup and free 532 + * 533 + * This function is used on a descriptor which has been executed by the DMA 534 + * controller. It will run any callbacks, submit any dependencies, and then 535 + * free the descriptor. 536 + */ 537 + static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, 538 + struct fsl_desc_sw *desc) 539 + { 540 + struct dma_async_tx_descriptor *txd = &desc->async_tx; 541 + 542 + /* Run the link descriptor callback function */ 543 + if (txd->callback) { 544 + chan_dbg(chan, "LD %p callback\n", desc); 545 + txd->callback(txd->callback_param); 546 + } 547 + 548 + /* Run any dependencies */ 549 + dma_run_dependencies(txd); 550 + 551 + dma_descriptor_unmap(txd); 552 + chan_dbg(chan, "LD %p free\n", desc); 553 + dma_pool_free(chan->desc_pool, desc, txd->phys); 554 + } 555 + 556 + /** 462 557 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 463 558 * @chan : Freescale DMA channel 464 559 * ··· 895 800 } 896 801 897 802 return 0; 898 - } 899 - 900 - /** 901 - * fsldma_cleanup_descriptor - cleanup and free a single link descriptor 902 - * @chan: Freescale DMA channel 903 - * @desc: descriptor to cleanup and free 904 - * 905 - * This function is used on a descriptor which has been executed by the DMA 906 - * controller. It will run any callbacks, submit any dependencies, and then 907 - * free the descriptor. 908 - */ 909 - static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, 910 - struct fsl_desc_sw *desc) 911 - { 912 - struct dma_async_tx_descriptor *txd = &desc->async_tx; 913 - 914 - /* Run the link descriptor callback function */ 915 - if (txd->callback) { 916 - chan_dbg(chan, "LD %p callback\n", desc); 917 - txd->callback(txd->callback_param); 918 - } 919 - 920 - /* Run any dependencies */ 921 - dma_run_dependencies(txd); 922 - 923 - dma_descriptor_unmap(txd); 924 - chan_dbg(chan, "LD %p free\n", desc); 925 - dma_pool_free(chan->desc_pool, desc, txd->phys); 926 - } 927 - 928 - /** 929 - * fsl_chan_xfer_ld_queue - transfer any pending transactions 930 - * @chan : Freescale DMA channel 931 - * 932 - * HARDWARE STATE: idle 933 - * LOCKING: must hold chan->desc_lock 934 - */ 935 - static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 936 - { 937 - struct fsl_desc_sw *desc; 938 - 939 - /* 940 - * If the list of pending descriptors is empty, then we 941 - * don't need to do any work at all 942 - */ 943 - if (list_empty(&chan->ld_pending)) { 944 - chan_dbg(chan, "no pending LDs\n"); 945 - return; 946 - } 947 - 948 - /* 949 - * The DMA controller is not idle, which means that the interrupt 950 - * handler will start any queued transactions when it runs after 951 - * this transaction finishes 952 - */ 953 - if (!chan->idle) { 954 - chan_dbg(chan, "DMA controller still busy\n"); 955 - return; 956 - } 957 - 958 - /* 959 - * If there are some link descriptors which have not been 960 - * transferred, we need to start the controller 961 - */ 962 - 963 - /* 964 - * Move all elements from the queue of pending transactions 965 - * onto the list of running transactions 966 - */ 967 - chan_dbg(chan, "idle, starting controller\n"); 968 - desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 969 - list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 970 - 971 - /* 972 - * The 85xx DMA controller doesn't clear the channel start bit 973 - * automatically at the end of a transfer. Therefore we must clear 974 - * it in software before starting the transfer. 975 - */ 976 - if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 977 - u32 mode; 978 - 979 - mode = get_mr(chan); 980 - mode &= ~FSL_DMA_MR_CS; 981 - set_mr(chan, mode); 982 - } 983 - 984 - /* 985 - * Program the descriptor's address into the DMA controller, 986 - * then start the DMA transaction 987 - */ 988 - set_cdar(chan, desc->async_tx.phys); 989 - get_cdar(chan); 990 - 991 - dma_start(chan); 992 - chan->idle = false; 993 803 } 994 804 995 805 /**