Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM/dmaengine: edma: Public API to use private struct pointer

Instead of relying on indexes pointing to edma private date in the global
pointer array, pass the private data pointer via the public API.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Peter Ujfalusi and committed by
Vinod Koul
ca304fa9 700c3719

+214 -208
+149 -156
arch/arm/common/edma.c
··· 130 130 131 131 struct edma_soc_info *info; 132 132 int id; 133 - 133 + bool unused_chan_list_done; 134 134 /* The edma_inuse bit for each PaRAM slot is clear unless the 135 135 * channel is in use ... by ARM or DSP, for QDMA, or whatever. 136 136 */ ··· 264 264 } 265 265 266 266 /*****************************************************************************/ 267 - static struct edma *edma_cc[EDMA_MAX_CC]; 268 267 static int arch_num_cc; 269 268 270 269 /* dummy param set used to (re)initialize parameter RAM slots */ ··· 489 490 static int prepare_unused_channel_list(struct device *dev, void *data) 490 491 { 491 492 struct platform_device *pdev = to_platform_device(dev); 492 - int i, count, ctlr; 493 + struct edma *cc = data; 494 + int i, count; 493 495 struct of_phandle_args dma_spec; 494 496 495 497 if (dev->of_node) { 498 + struct platform_device *dma_pdev; 499 + 496 500 count = of_property_count_strings(dev->of_node, "dma-names"); 497 501 if (count < 0) 498 502 return 0; 499 503 for (i = 0; i < count; i++) { 504 + 500 505 if (of_parse_phandle_with_args(dev->of_node, "dmas", 501 506 "#dma-cells", i, 502 507 &dma_spec)) ··· 511 508 continue; 512 509 } 513 510 511 + dma_pdev = of_find_device_by_node(dma_spec.np); 512 + if (&dma_pdev->dev != cc->dev) 513 + continue; 514 + 514 515 clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]), 515 - edma_cc[0]->edma_unused); 516 + cc->edma_unused); 516 517 of_node_put(dma_spec.np); 517 518 } 518 519 return 0; ··· 524 517 525 518 /* For non-OF case */ 526 519 for (i = 0; i < pdev->num_resources; i++) { 527 - if ((pdev->resource[i].flags & IORESOURCE_DMA) && 528 - (int)pdev->resource[i].start >= 0) { 529 - ctlr = EDMA_CTLR(pdev->resource[i].start); 520 + struct resource *res = &pdev->resource[i]; 521 + 522 + if ((res->flags & IORESOURCE_DMA) && (int)res->start >= 0) { 530 523 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start), 531 - edma_cc[ctlr]->edma_unused); 524 + cc->edma_unused); 532 525 } 533 526 } 534 527 ··· 536 529 } 537 530 538 531 /*-----------------------------------------------------------------------*/ 539 - 540 - static bool unused_chan_list_done; 541 532 542 533 /* Resource alloc/free: dma channels, parameter RAM slots */ 543 534 ··· 569 564 * 570 565 * Returns the number of the channel, else negative errno. 571 566 */ 572 - int edma_alloc_channel(int channel, 567 + int edma_alloc_channel(struct edma *cc, int channel, 573 568 void (*callback)(unsigned channel, u16 ch_status, void *data), 574 569 void *data, 575 570 enum dma_event_q eventq_no) 576 571 { 577 - unsigned i, done = 0, ctlr = 0; 572 + unsigned done = 0; 578 573 int ret = 0; 579 574 580 - if (!unused_chan_list_done) { 575 + if (!cc->unused_chan_list_done) { 581 576 /* 582 577 * Scan all the platform devices to find out the EDMA channels 583 578 * used and clear them in the unused list, making the rest 584 579 * available for ARM usage. 585 580 */ 586 - ret = bus_for_each_dev(&platform_bus_type, NULL, NULL, 587 - prepare_unused_channel_list); 581 + ret = bus_for_each_dev(&platform_bus_type, NULL, cc, 582 + prepare_unused_channel_list); 588 583 if (ret < 0) 589 584 return ret; 590 585 591 - unused_chan_list_done = true; 586 + cc->unused_chan_list_done = true; 592 587 } 593 588 594 589 if (channel >= 0) { 595 - ctlr = EDMA_CTLR(channel); 590 + if (cc->id != EDMA_CTLR(channel)) { 591 + dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", 592 + __func__, cc->id, EDMA_CTLR(channel)); 593 + return -EINVAL; 594 + } 596 595 channel = EDMA_CHAN_SLOT(channel); 597 596 } 598 597 599 598 if (channel < 0) { 600 - for (i = 0; i < arch_num_cc; i++) { 601 - channel = 0; 602 - for (;;) { 603 - channel = find_next_bit(edma_cc[i]->edma_unused, 604 - edma_cc[i]->num_channels, 605 - channel); 606 - if (channel == edma_cc[i]->num_channels) 607 - break; 608 - if (!test_and_set_bit(channel, 609 - edma_cc[i]->edma_inuse)) { 610 - done = 1; 611 - ctlr = i; 612 - break; 613 - } 614 - channel++; 615 - } 616 - if (done) 599 + channel = 0; 600 + for (;;) { 601 + channel = find_next_bit(cc->edma_unused, 602 + cc->num_channels, channel); 603 + if (channel == cc->num_channels) 617 604 break; 605 + if (!test_and_set_bit(channel, cc->edma_inuse)) { 606 + done = 1; 607 + break; 608 + } 609 + channel++; 618 610 } 619 611 if (!done) 620 612 return -ENOMEM; 621 - } else if (channel >= edma_cc[ctlr]->num_channels) { 613 + } else if (channel >= cc->num_channels) { 622 614 return -EINVAL; 623 - } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) { 615 + } else if (test_and_set_bit(channel, cc->edma_inuse)) { 624 616 return -EBUSY; 625 617 } 626 618 627 619 /* ensure access through shadow region 0 */ 628 - edma_or_array2(edma_cc[ctlr], EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); 620 + edma_or_array2(cc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); 629 621 630 622 /* ensure no events are pending */ 631 - edma_stop(EDMA_CTLR_CHAN(ctlr, channel)); 632 - memcpy_toio(edma_cc[ctlr]->base + PARM_OFFSET(channel), &dummy_paramset, 623 + edma_stop(cc, EDMA_CTLR_CHAN(cc->id, channel)); 624 + memcpy_toio(cc->base + PARM_OFFSET(channel), &dummy_paramset, 633 625 PARM_SIZE); 634 626 635 627 if (callback) 636 - setup_dma_interrupt(edma_cc[ctlr], 637 - EDMA_CTLR_CHAN(ctlr, channel), callback, 638 - data); 628 + setup_dma_interrupt(cc, EDMA_CTLR_CHAN(cc->id, channel), 629 + callback, data); 639 630 640 - map_dmach_queue(edma_cc[ctlr], channel, eventq_no); 631 + map_dmach_queue(cc, channel, eventq_no); 641 632 642 - return EDMA_CTLR_CHAN(ctlr, channel); 633 + return EDMA_CTLR_CHAN(cc->id, channel); 643 634 } 644 635 EXPORT_SYMBOL(edma_alloc_channel); 645 636 ··· 651 650 * will not be reactivated by linking, chaining, or software calls to 652 651 * edma_start(). 653 652 */ 654 - void edma_free_channel(unsigned channel) 653 + void edma_free_channel(struct edma *cc, unsigned channel) 655 654 { 656 - unsigned ctlr; 657 655 658 - ctlr = EDMA_CTLR(channel); 656 + if (cc->id != EDMA_CTLR(channel)) { 657 + dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 658 + cc->id, EDMA_CTLR(channel)); 659 + return; 660 + } 659 661 channel = EDMA_CHAN_SLOT(channel); 660 662 661 - if (channel >= edma_cc[ctlr]->num_channels) 663 + if (channel >= cc->num_channels) 662 664 return; 663 665 664 - setup_dma_interrupt(edma_cc[ctlr], channel, NULL, NULL); 666 + setup_dma_interrupt(cc, channel, NULL, NULL); 665 667 /* REVISIT should probably take out of shadow region 0 */ 666 668 667 - memcpy_toio(edma_cc[ctlr]->base + PARM_OFFSET(channel), &dummy_paramset, 669 + memcpy_toio(cc->base + PARM_OFFSET(channel), &dummy_paramset, 668 670 PARM_SIZE); 669 - clear_bit(channel, edma_cc[ctlr]->edma_inuse); 671 + clear_bit(channel, cc->edma_inuse); 670 672 } 671 673 EXPORT_SYMBOL(edma_free_channel); 672 674 ··· 687 683 * 688 684 * Returns the number of the slot, else negative errno. 689 685 */ 690 - int edma_alloc_slot(unsigned ctlr, int slot) 686 + int edma_alloc_slot(struct edma *cc, int slot) 691 687 { 692 - if (!edma_cc[ctlr]) 693 - return -EINVAL; 694 - 695 - if (slot >= 0) 688 + if (slot > 0) 696 689 slot = EDMA_CHAN_SLOT(slot); 697 - 698 690 if (slot < 0) { 699 - slot = edma_cc[ctlr]->num_channels; 691 + slot = cc->num_channels; 700 692 for (;;) { 701 - slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse, 702 - edma_cc[ctlr]->num_slots, slot); 703 - if (slot == edma_cc[ctlr]->num_slots) 693 + slot = find_next_zero_bit(cc->edma_inuse, cc->num_slots, 694 + slot); 695 + if (slot == cc->num_slots) 704 696 return -ENOMEM; 705 - if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) 697 + if (!test_and_set_bit(slot, cc->edma_inuse)) 706 698 break; 707 699 } 708 - } else if (slot < edma_cc[ctlr]->num_channels || 709 - slot >= edma_cc[ctlr]->num_slots) { 700 + } else if (slot < cc->num_channels || slot >= cc->num_slots) { 710 701 return -EINVAL; 711 - } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) { 702 + } else if (test_and_set_bit(slot, cc->edma_inuse)) { 712 703 return -EBUSY; 713 704 } 714 705 715 - memcpy_toio(edma_cc[ctlr]->base + PARM_OFFSET(slot), &dummy_paramset, 716 - PARM_SIZE); 706 + memcpy_toio(cc->base + PARM_OFFSET(slot), &dummy_paramset, PARM_SIZE); 717 707 718 - return EDMA_CTLR_CHAN(ctlr, slot); 708 + return slot; 719 709 } 720 710 EXPORT_SYMBOL(edma_alloc_slot); 721 711 ··· 721 723 * Callers are responsible for ensuring the slot is inactive, and will 722 724 * not be activated. 723 725 */ 724 - void edma_free_slot(unsigned slot) 726 + void edma_free_slot(struct edma *cc, unsigned slot) 725 727 { 726 - unsigned ctlr; 727 728 728 - ctlr = EDMA_CTLR(slot); 729 729 slot = EDMA_CHAN_SLOT(slot); 730 - 731 - if (slot < edma_cc[ctlr]->num_channels || 732 - slot >= edma_cc[ctlr]->num_slots) 730 + if (slot < cc->num_channels || slot >= cc->num_slots) 733 731 return; 734 732 735 - memcpy_toio(edma_cc[ctlr]->base + PARM_OFFSET(slot), &dummy_paramset, 736 - PARM_SIZE); 737 - clear_bit(slot, edma_cc[ctlr]->edma_inuse); 733 + memcpy_toio(cc->base + PARM_OFFSET(slot), &dummy_paramset, PARM_SIZE); 734 + clear_bit(slot, cc->edma_inuse); 738 735 } 739 736 EXPORT_SYMBOL(edma_free_slot); 740 737 ··· 744 751 * 745 752 * Returns the position of the current active slot 746 753 */ 747 - dma_addr_t edma_get_position(unsigned slot, bool dst) 754 + dma_addr_t edma_get_position(struct edma *cc, unsigned slot, bool dst) 748 755 { 749 - u32 offs, ctlr = EDMA_CTLR(slot); 756 + u32 offs; 750 757 751 758 slot = EDMA_CHAN_SLOT(slot); 752 - 753 759 offs = PARM_OFFSET(slot); 754 760 offs += dst ? PARM_DST : PARM_SRC; 755 761 756 - return edma_read(edma_cc[ctlr], offs); 762 + return edma_read(cc, offs); 757 763 } 758 764 759 765 /** ··· 762 770 * 763 771 * The originating slot should not be part of any active DMA transfer. 764 772 */ 765 - void edma_link(unsigned from, unsigned to) 773 + void edma_link(struct edma *cc, unsigned from, unsigned to) 766 774 { 767 - unsigned ctlr_from, ctlr_to; 768 - 769 - ctlr_from = EDMA_CTLR(from); 770 775 from = EDMA_CHAN_SLOT(from); 771 - ctlr_to = EDMA_CTLR(to); 772 776 to = EDMA_CHAN_SLOT(to); 777 + if (from >= cc->num_slots || to >= cc->num_slots) 778 + return; 773 779 774 - if (from >= edma_cc[ctlr_from]->num_slots) 775 - return; 776 - if (to >= edma_cc[ctlr_to]->num_slots) 777 - return; 778 - edma_parm_modify(edma_cc[ctlr_from], PARM_LINK_BCNTRLD, from, 0xffff0000, 779 - PARM_OFFSET(to)); 780 + edma_parm_modify(cc, PARM_LINK_BCNTRLD, from, 0xffff0000, 781 + PARM_OFFSET(to)); 780 782 } 781 783 EXPORT_SYMBOL(edma_link); 782 784 ··· 788 802 * calls to set up those parameters in small pieces, and provides 789 803 * complete control over all transfer options. 790 804 */ 791 - void edma_write_slot(unsigned slot, const struct edmacc_param *param) 805 + void edma_write_slot(struct edma *cc, unsigned slot, 806 + const struct edmacc_param *param) 792 807 { 793 - unsigned ctlr; 794 - 795 - ctlr = EDMA_CTLR(slot); 796 808 slot = EDMA_CHAN_SLOT(slot); 797 - 798 - if (slot >= edma_cc[ctlr]->num_slots) 809 + if (slot >= cc->num_slots) 799 810 return; 800 - memcpy_toio(edma_cc[ctlr]->base + PARM_OFFSET(slot), param, PARM_SIZE); 811 + memcpy_toio(cc->base + PARM_OFFSET(slot), param, PARM_SIZE); 801 812 } 802 813 EXPORT_SYMBOL(edma_write_slot); 803 814 ··· 806 823 * Use this to read data from a parameter RAM slot, perhaps to 807 824 * save them as a template for later reuse. 808 825 */ 809 - void edma_read_slot(unsigned slot, struct edmacc_param *param) 826 + void edma_read_slot(struct edma *cc, unsigned slot, struct edmacc_param *param) 810 827 { 811 - unsigned ctlr; 812 - 813 - ctlr = EDMA_CTLR(slot); 814 828 slot = EDMA_CHAN_SLOT(slot); 815 - 816 - if (slot >= edma_cc[ctlr]->num_slots) 829 + if (slot >= cc->num_slots) 817 830 return; 818 - memcpy_fromio(param, edma_cc[ctlr]->base + PARM_OFFSET(slot), 819 - PARM_SIZE); 831 + memcpy_fromio(param, cc->base + PARM_OFFSET(slot), PARM_SIZE); 820 832 } 821 833 EXPORT_SYMBOL(edma_read_slot); 822 834 ··· 826 848 * This temporarily disables EDMA hardware events on the specified channel, 827 849 * preventing them from triggering new transfers on its behalf 828 850 */ 829 - void edma_pause(unsigned channel) 851 + void edma_pause(struct edma *cc, unsigned channel) 830 852 { 831 - unsigned ctlr; 832 - 833 - ctlr = EDMA_CTLR(channel); 853 + if (cc->id != EDMA_CTLR(channel)) { 854 + dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 855 + cc->id, EDMA_CTLR(channel)); 856 + return; 857 + } 834 858 channel = EDMA_CHAN_SLOT(channel); 835 859 836 - if (channel < edma_cc[ctlr]->num_channels) { 860 + if (channel < cc->num_channels) { 837 861 unsigned int mask = BIT(channel & 0x1f); 838 862 839 - edma_shadow0_write_array(edma_cc[ctlr], SH_EECR, channel >> 5, 840 - mask); 863 + edma_shadow0_write_array(cc, SH_EECR, channel >> 5, mask); 841 864 } 842 865 } 843 866 EXPORT_SYMBOL(edma_pause); ··· 849 870 * 850 871 * This re-enables EDMA hardware events on the specified channel. 851 872 */ 852 - void edma_resume(unsigned channel) 873 + void edma_resume(struct edma *cc, unsigned channel) 853 874 { 854 - unsigned ctlr; 855 - 856 - ctlr = EDMA_CTLR(channel); 875 + if (cc->id != EDMA_CTLR(channel)) { 876 + dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 877 + cc->id, EDMA_CTLR(channel)); 878 + return; 879 + } 857 880 channel = EDMA_CHAN_SLOT(channel); 858 881 859 - if (channel < edma_cc[ctlr]->num_channels) { 882 + if (channel < cc->num_channels) { 860 883 unsigned int mask = BIT(channel & 0x1f); 861 884 862 - edma_shadow0_write_array(edma_cc[ctlr], SH_EESR, channel >> 5, 863 - mask); 885 + edma_shadow0_write_array(cc, SH_EESR, channel >> 5, mask); 864 886 } 865 887 } 866 888 EXPORT_SYMBOL(edma_resume); 867 889 868 - int edma_trigger_channel(unsigned channel) 890 + int edma_trigger_channel(struct edma *cc, unsigned channel) 869 891 { 870 - unsigned ctlr; 871 892 unsigned int mask; 872 893 873 - ctlr = EDMA_CTLR(channel); 894 + if (cc->id != EDMA_CTLR(channel)) { 895 + dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 896 + cc->id, EDMA_CTLR(channel)); 897 + return -EINVAL; 898 + } 874 899 channel = EDMA_CHAN_SLOT(channel); 875 900 mask = BIT(channel & 0x1f); 876 901 877 - edma_shadow0_write_array(edma_cc[ctlr], SH_ESR, (channel >> 5), mask); 902 + edma_shadow0_write_array(cc, SH_ESR, (channel >> 5), mask); 878 903 879 904 pr_debug("EDMA: ESR%d %08x\n", (channel >> 5), 880 - edma_shadow0_read_array(edma_cc[ctlr], SH_ESR, 881 - (channel >> 5))); 905 + edma_shadow0_read_array(cc, SH_ESR, (channel >> 5))); 882 906 return 0; 883 907 } 884 908 EXPORT_SYMBOL(edma_trigger_channel); ··· 897 915 * 898 916 * Returns zero on success, else negative errno. 899 917 */ 900 - int edma_start(unsigned channel) 918 + int edma_start(struct edma *cc, unsigned channel) 901 919 { 902 - unsigned ctlr; 903 - 904 - ctlr = EDMA_CTLR(channel); 920 + if (cc->id != EDMA_CTLR(channel)) { 921 + dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 922 + cc->id, EDMA_CTLR(channel)); 923 + return -EINVAL; 924 + } 905 925 channel = EDMA_CHAN_SLOT(channel); 906 926 907 - if (channel < edma_cc[ctlr]->num_channels) { 908 - struct edma *cc = edma_cc[ctlr]; 927 + if (channel < cc->num_channels) { 909 928 int j = channel >> 5; 910 929 unsigned int mask = BIT(channel & 0x1f); 911 930 ··· 945 962 * may not be resumed, and the channel's Parameter RAM should be 946 963 * reinitialized before being reused. 947 964 */ 948 - void edma_stop(unsigned channel) 965 + void edma_stop(struct edma *cc, unsigned channel) 949 966 { 950 - unsigned ctlr; 951 - 952 - ctlr = EDMA_CTLR(channel); 967 + if (cc->id != EDMA_CTLR(channel)) { 968 + dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 969 + cc->id, EDMA_CTLR(channel)); 970 + return; 971 + } 953 972 channel = EDMA_CHAN_SLOT(channel); 954 973 955 - if (channel < edma_cc[ctlr]->num_channels) { 956 - struct edma *cc = edma_cc[ctlr]; 974 + if (channel < cc->num_channels) { 957 975 int j = channel >> 5; 958 976 unsigned int mask = BIT(channel & 0x1f); 959 977 ··· 989 1005 * 990 1006 *****************************************************************************/ 991 1007 992 - void edma_clean_channel(unsigned channel) 1008 + void edma_clean_channel(struct edma *cc, unsigned channel) 993 1009 { 994 - unsigned ctlr; 995 - 996 - ctlr = EDMA_CTLR(channel); 1010 + if (cc->id != EDMA_CTLR(channel)) { 1011 + dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 1012 + cc->id, EDMA_CTLR(channel)); 1013 + return; 1014 + } 997 1015 channel = EDMA_CHAN_SLOT(channel); 998 1016 999 - if (channel < edma_cc[ctlr]->num_channels) { 1000 - struct edma *cc = edma_cc[ctlr]; 1017 + if (channel < cc->num_channels) { 1001 1018 int j = (channel >> 5); 1002 1019 unsigned int mask = BIT(channel & 0x1f); 1003 1020 ··· 1022 1037 * 1023 1038 * Can be used to move a channel to a selected event queue. 1024 1039 */ 1025 - void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no) 1040 + void edma_assign_channel_eventq(struct edma *cc, unsigned channel, 1041 + enum dma_event_q eventq_no) 1026 1042 { 1027 - unsigned ctlr; 1028 - 1029 - ctlr = EDMA_CTLR(channel); 1043 + if (cc->id != EDMA_CTLR(channel)) { 1044 + dev_err(cc->dev, "%s: ID mismatch for eDMA%d: %d\n", __func__, 1045 + cc->id, EDMA_CTLR(channel)); 1046 + return; 1047 + } 1030 1048 channel = EDMA_CHAN_SLOT(channel); 1031 1049 1032 - if (channel >= edma_cc[ctlr]->num_channels) 1050 + if (channel >= cc->num_channels) 1033 1051 return; 1034 1052 1035 1053 /* default to low priority queue */ 1036 1054 if (eventq_no == EVENTQ_DEFAULT) 1037 - eventq_no = edma_cc[ctlr]->default_queue; 1038 - if (eventq_no >= edma_cc[ctlr]->num_tc) 1055 + eventq_no = cc->default_queue; 1056 + if (eventq_no >= cc->num_tc) 1039 1057 return; 1040 1058 1041 - map_dmach_queue(edma_cc[ctlr], channel, eventq_no); 1059 + map_dmach_queue(cc, channel, eventq_no); 1042 1060 } 1043 1061 EXPORT_SYMBOL(edma_assign_channel_eventq); 1062 + 1063 + struct edma *edma_get_data(struct device *edma_dev) 1064 + { 1065 + return dev_get_drvdata(edma_dev); 1066 + } 1067 + 1044 1068 1045 1069 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, 1046 1070 struct edma *edma_cc, int cc_id) ··· 1272 1278 } 1273 1279 } 1274 1280 1275 - edma_cc[dev_id] = devm_kzalloc(dev, sizeof(struct edma), GFP_KERNEL); 1276 - if (!edma_cc[dev_id]) 1281 + cc = devm_kzalloc(dev, sizeof(struct edma), GFP_KERNEL); 1282 + if (!cc) 1277 1283 return -ENOMEM; 1278 1284 1279 - cc = edma_cc[dev_id]; 1280 1285 cc->dev = dev; 1281 1286 cc->id = dev_id; 1282 1287 dev_set_drvdata(dev, cc);
+43 -36
drivers/dma/edma.c
··· 119 119 }; 120 120 121 121 struct edma_cc { 122 + struct edma *cc; 122 123 int ctlr; 123 124 struct dma_device dma_slave; 124 125 struct edma_chan slave_chans[EDMA_CHANS]; ··· 151 150 /* Dispatch a queued descriptor to the controller (caller holds lock) */ 152 151 static void edma_execute(struct edma_chan *echan) 153 152 { 153 + struct edma *cc = echan->ecc->cc; 154 154 struct virt_dma_desc *vdesc; 155 155 struct edma_desc *edesc; 156 156 struct device *dev = echan->vchan.chan.device->dev; ··· 176 174 /* Write descriptor PaRAM set(s) */ 177 175 for (i = 0; i < nslots; i++) { 178 176 j = i + edesc->processed; 179 - edma_write_slot(echan->slot[i], &edesc->pset[j].param); 177 + edma_write_slot(cc, echan->slot[i], &edesc->pset[j].param); 180 178 edesc->sg_len += edesc->pset[j].len; 181 179 dev_vdbg(echan->vchan.chan.device->dev, 182 180 "\n pset[%d]:\n" ··· 201 199 edesc->pset[j].param.link_bcntrld); 202 200 /* Link to the previous slot if not the last set */ 203 201 if (i != (nslots - 1)) 204 - edma_link(echan->slot[i], echan->slot[i+1]); 202 + edma_link(cc, echan->slot[i], echan->slot[i+1]); 205 203 } 206 204 207 205 edesc->processed += nslots; ··· 213 211 */ 214 212 if (edesc->processed == edesc->pset_nr) { 215 213 if (edesc->cyclic) 216 - edma_link(echan->slot[nslots-1], echan->slot[1]); 214 + edma_link(cc, echan->slot[nslots-1], echan->slot[1]); 217 215 else 218 - edma_link(echan->slot[nslots-1], 216 + edma_link(cc, echan->slot[nslots-1], 219 217 echan->ecc->dummy_slot); 220 218 } 221 219 ··· 226 224 * transfers of MAX_NR_SG 227 225 */ 228 226 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); 229 - edma_clean_channel(echan->ch_num); 230 - edma_stop(echan->ch_num); 231 - edma_start(echan->ch_num); 232 - edma_trigger_channel(echan->ch_num); 227 + edma_clean_channel(cc, echan->ch_num); 228 + edma_stop(cc, echan->ch_num); 229 + edma_start(cc, echan->ch_num); 230 + edma_trigger_channel(cc, echan->ch_num); 233 231 echan->missed = 0; 234 232 } else if (edesc->processed <= MAX_NR_SG) { 235 233 dev_dbg(dev, "first transfer starting on channel %d\n", 236 234 echan->ch_num); 237 - edma_start(echan->ch_num); 235 + edma_start(cc, echan->ch_num); 238 236 } else { 239 237 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", 240 238 echan->ch_num, edesc->processed); 241 - edma_resume(echan->ch_num); 239 + edma_resume(cc, echan->ch_num); 242 240 } 243 241 } 244 242 ··· 256 254 * echan->edesc is NULL and exit.) 257 255 */ 258 256 if (echan->edesc) { 259 - edma_stop(echan->ch_num); 257 + edma_stop(echan->ecc->cc, echan->ch_num); 260 258 /* Move the cyclic channel back to default queue */ 261 259 if (echan->edesc->cyclic) 262 - edma_assign_channel_eventq(echan->ch_num, 260 + edma_assign_channel_eventq(echan->ecc->cc, 261 + echan->ch_num, 263 262 EVENTQ_DEFAULT); 264 263 /* 265 264 * free the running request descriptor ··· 298 295 if (!echan->edesc) 299 296 return -EINVAL; 300 297 301 - edma_pause(echan->ch_num); 298 + edma_pause(echan->ecc->cc, echan->ch_num); 302 299 return 0; 303 300 } 304 301 ··· 306 303 { 307 304 struct edma_chan *echan = to_edma_chan(chan); 308 305 309 - edma_resume(echan->ch_num); 306 + edma_resume(echan->ecc->cc, echan->ch_num); 310 307 return 0; 311 308 } 312 309 ··· 488 485 for (i = 0; i < nslots; i++) { 489 486 if (echan->slot[i] < 0) { 490 487 echan->slot[i] = 491 - edma_alloc_slot(EDMA_CTLR(echan->ch_num), 492 - EDMA_SLOT_ANY); 488 + edma_alloc_slot(echan->ecc->cc, EDMA_SLOT_ANY); 493 489 if (echan->slot[i] < 0) { 494 490 kfree(edesc); 495 491 dev_err(dev, "%s: Failed to allocate slot\n", ··· 643 641 /* Allocate a PaRAM slot, if needed */ 644 642 if (echan->slot[i] < 0) { 645 643 echan->slot[i] = 646 - edma_alloc_slot(EDMA_CTLR(echan->ch_num), 647 - EDMA_SLOT_ANY); 644 + edma_alloc_slot(echan->ecc->cc, EDMA_SLOT_ANY); 648 645 if (echan->slot[i] < 0) { 649 646 kfree(edesc); 650 647 dev_err(dev, "%s: Failed to allocate slot\n", ··· 704 703 } 705 704 706 705 /* Place the cyclic channel to highest priority queue */ 707 - edma_assign_channel_eventq(echan->ch_num, EVENTQ_0); 706 + edma_assign_channel_eventq(echan->ecc->cc, echan->ch_num, EVENTQ_0); 708 707 709 708 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 710 709 } ··· 712 711 static void edma_callback(unsigned ch_num, u16 ch_status, void *data) 713 712 { 714 713 struct edma_chan *echan = data; 714 + struct edma *cc = echan->ecc->cc; 715 715 struct device *dev = echan->vchan.chan.device->dev; 716 716 struct edma_desc *edesc; 717 717 struct edmacc_param p; ··· 729 727 } else if (edesc->processed == edesc->pset_nr) { 730 728 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); 731 729 edesc->residue = 0; 732 - edma_stop(echan->ch_num); 730 + edma_stop(cc, echan->ch_num); 733 731 vchan_cookie_complete(&edesc->vdesc); 734 732 echan->edesc = NULL; 735 733 } else { 736 734 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); 737 735 738 - edma_pause(echan->ch_num); 736 + edma_pause(cc, echan->ch_num); 739 737 740 738 /* Update statistics for tx_status */ 741 739 edesc->residue -= edesc->sg_len; ··· 746 744 } 747 745 break; 748 746 case EDMA_DMA_CC_ERROR: 749 - edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); 747 + edma_read_slot(cc, echan->slot[0], &p); 750 748 751 749 /* 752 750 * Issue later based on missed flag which will be sure ··· 769 767 * missed, so its safe to issue it here. 770 768 */ 771 769 dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n"); 772 - edma_clean_channel(echan->ch_num); 773 - edma_stop(echan->ch_num); 774 - edma_start(echan->ch_num); 775 - edma_trigger_channel(echan->ch_num); 770 + edma_clean_channel(cc, echan->ch_num); 771 + edma_stop(cc, echan->ch_num); 772 + edma_start(cc, echan->ch_num); 773 + edma_trigger_channel(cc, echan->ch_num); 776 774 } 777 775 break; 778 776 default: ··· 791 789 int a_ch_num; 792 790 LIST_HEAD(descs); 793 791 794 - a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, 795 - echan, EVENTQ_DEFAULT); 792 + a_ch_num = edma_alloc_channel(echan->ecc->cc, echan->ch_num, 793 + edma_callback, echan, EVENTQ_DEFAULT); 796 794 797 795 if (a_ch_num < 0) { 798 796 ret = -ENODEV; ··· 816 814 return 0; 817 815 818 816 err_wrong_chan: 819 - edma_free_channel(a_ch_num); 817 + edma_free_channel(echan->ecc->cc, a_ch_num); 820 818 err_no_chan: 821 819 return ret; 822 820 } ··· 829 827 int i; 830 828 831 829 /* Terminate transfers */ 832 - edma_stop(echan->ch_num); 830 + edma_stop(echan->ecc->cc, echan->ch_num); 833 831 834 832 vchan_free_chan_resources(&echan->vchan); 835 833 836 834 /* Free EDMA PaRAM slots */ 837 835 for (i = 1; i < EDMA_MAX_SLOTS; i++) { 838 836 if (echan->slot[i] >= 0) { 839 - edma_free_slot(echan->slot[i]); 837 + edma_free_slot(echan->ecc->cc, echan->slot[i]); 840 838 echan->slot[i] = -1; 841 839 } 842 840 } 843 841 844 842 /* Free EDMA channel */ 845 843 if (echan->alloced) { 846 - edma_free_channel(echan->ch_num); 844 + edma_free_channel(echan->ecc->cc, echan->ch_num); 847 845 echan->alloced = false; 848 846 } 849 847 ··· 873 871 * We always read the dst/src position from the first RamPar 874 872 * pset. That's the one which is active now. 875 873 */ 876 - pos = edma_get_position(edesc->echan->slot[0], dst); 874 + pos = edma_get_position(edesc->echan->ecc->cc, edesc->echan->slot[0], 875 + dst); 877 876 878 877 /* 879 878 * Cyclic is simple. Just subtract pset[0].addr from pos. ··· 1011 1008 return -ENOMEM; 1012 1009 } 1013 1010 1011 + ecc->cc = edma_get_data(pdev->dev.parent); 1012 + if (!ecc->cc) 1013 + return -ENODEV; 1014 + 1014 1015 ecc->ctlr = pdev->id; 1015 - ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); 1016 + ecc->dummy_slot = edma_alloc_slot(ecc->cc, EDMA_SLOT_ANY); 1016 1017 if (ecc->dummy_slot < 0) { 1017 1018 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); 1018 1019 return ecc->dummy_slot; ··· 1049 1042 return 0; 1050 1043 1051 1044 err_reg1: 1052 - edma_free_slot(ecc->dummy_slot); 1045 + edma_free_slot(ecc->cc, ecc->dummy_slot); 1053 1046 return ret; 1054 1047 } 1055 1048 ··· 1062 1055 if (parent_node) 1063 1056 of_dma_controller_free(parent_node); 1064 1057 dma_async_device_unregister(&ecc->dma_slave); 1065 - edma_free_slot(ecc->dummy_slot); 1058 + edma_free_slot(ecc->cc, ecc->dummy_slot); 1066 1059 1067 1060 return 0; 1068 1061 }
+22 -16
include/linux/platform_data/edma.h
··· 92 92 93 93 #define EDMA_MAX_CC 2 94 94 95 + struct edma; 96 + 97 + struct edma *edma_get_data(struct device *edma_dev); 98 + 95 99 /* alloc/free DMA channels and their dedicated parameter RAM slots */ 96 - int edma_alloc_channel(int channel, 100 + int edma_alloc_channel(struct edma *cc, int channel, 97 101 void (*callback)(unsigned channel, u16 ch_status, void *data), 98 102 void *data, enum dma_event_q); 99 - void edma_free_channel(unsigned channel); 103 + void edma_free_channel(struct edma *cc, unsigned channel); 100 104 101 105 /* alloc/free parameter RAM slots */ 102 - int edma_alloc_slot(unsigned ctlr, int slot); 103 - void edma_free_slot(unsigned slot); 106 + int edma_alloc_slot(struct edma *cc, int slot); 107 + void edma_free_slot(struct edma *cc, unsigned slot); 104 108 105 109 /* calls that operate on part of a parameter RAM slot */ 106 - dma_addr_t edma_get_position(unsigned slot, bool dst); 107 - void edma_link(unsigned from, unsigned to); 110 + dma_addr_t edma_get_position(struct edma *cc, unsigned slot, bool dst); 111 + void edma_link(struct edma *cc, unsigned from, unsigned to); 108 112 109 113 /* calls that operate on an entire parameter RAM slot */ 110 - void edma_write_slot(unsigned slot, const struct edmacc_param *params); 111 - void edma_read_slot(unsigned slot, struct edmacc_param *params); 114 + void edma_write_slot(struct edma *cc, unsigned slot, 115 + const struct edmacc_param *params); 116 + void edma_read_slot(struct edma *cc, unsigned slot, 117 + struct edmacc_param *params); 112 118 113 119 /* channel control operations */ 114 - int edma_start(unsigned channel); 115 - void edma_stop(unsigned channel); 116 - void edma_clean_channel(unsigned channel); 117 - void edma_pause(unsigned channel); 118 - void edma_resume(unsigned channel); 120 + int edma_start(struct edma *cc, unsigned channel); 121 + void edma_stop(struct edma *cc, unsigned channel); 122 + void edma_clean_channel(struct edma *cc, unsigned channel); 123 + void edma_pause(struct edma *cc, unsigned channel); 124 + void edma_resume(struct edma *cc, unsigned channel); 125 + int edma_trigger_channel(struct edma *cc, unsigned channel); 119 126 120 - void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no); 127 + void edma_assign_channel_eventq(struct edma *cc, unsigned channel, 128 + enum dma_event_q eventq_no); 121 129 122 130 struct edma_rsv_info { 123 131 ··· 148 140 s8 (*queue_priority_mapping)[2]; 149 141 const s16 (*xbar_chans)[2]; 150 142 }; 151 - 152 - int edma_trigger_channel(unsigned); 153 143 154 144 #endif