Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'devel-stable' into for-next

+218 -9
+24 -1
arch/arm/include/asm/dma-mapping.h
··· 58 58 #ifndef __arch_pfn_to_dma 59 59 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) 60 60 { 61 + if (dev) 62 + pfn -= dev->dma_pfn_offset; 61 63 return (dma_addr_t)__pfn_to_bus(pfn); 62 64 } 63 65 64 66 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) 65 67 { 66 - return __bus_to_pfn(addr); 68 + unsigned long pfn = __bus_to_pfn(addr); 69 + 70 + if (dev) 71 + pfn += dev->dma_pfn_offset; 72 + 73 + return pfn; 67 74 } 68 75 69 76 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 70 77 { 78 + if (dev) { 79 + unsigned long pfn = dma_to_pfn(dev, addr); 80 + 81 + return phys_to_virt(__pfn_to_phys(pfn)); 82 + } 83 + 71 84 return (void *)__bus_to_virt((unsigned long)addr); 72 85 } 73 86 74 87 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) 75 88 { 89 + if (dev) 90 + return pfn_to_dma(dev, virt_to_pfn(addr)); 91 + 76 92 return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); 77 93 } 78 94 ··· 120 104 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); 121 105 } 122 106 #define dma_max_pfn(dev) dma_max_pfn(dev) 107 + 108 + static inline int set_arch_dma_coherent_ops(struct device *dev) 109 + { 110 + set_dma_ops(dev, &arm_coherent_dma_ops); 111 + return 0; 112 + } 113 + #define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) 123 114 124 115 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 125 116 {
+2 -2
arch/arm/mm/dma-mapping.c
··· 885 885 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 886 886 size_t size, enum dma_data_direction dir) 887 887 { 888 - unsigned long paddr; 888 + phys_addr_t paddr; 889 889 890 890 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 891 891 ··· 901 901 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 902 902 size_t size, enum dma_data_direction dir) 903 903 { 904 - unsigned long paddr = page_to_phys(page) + off; 904 + phys_addr_t paddr = page_to_phys(page) + off; 905 905 906 906 /* FIXME: non-speculating: not required */ 907 907 /* in any case, don't bother invalidating if DMA to device */
+110
drivers/of/address.c
··· 721 721 return ioremap(res.start, resource_size(&res)); 722 722 } 723 723 EXPORT_SYMBOL(of_iomap); 724 + 725 + /** 726 + * of_dma_get_range - Get DMA range info 727 + * @np: device node to get DMA range info 728 + * @dma_addr: pointer to store initial DMA address of DMA range 729 + * @paddr: pointer to store initial CPU address of DMA range 730 + * @size: pointer to store size of DMA range 731 + * 732 + * Look in bottom up direction for the first "dma-ranges" property 733 + * and parse it. 734 + * dma-ranges format: 735 + * DMA addr (dma_addr) : naddr cells 736 + * CPU addr (phys_addr_t) : pna cells 737 + * size : nsize cells 738 + * 739 + * It returns -ENODEV if "dma-ranges" property was not found 740 + * for this device in DT. 741 + */ 742 + int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) 743 + { 744 + struct device_node *node = of_node_get(np); 745 + const __be32 *ranges = NULL; 746 + int len, naddr, nsize, pna; 747 + int ret = 0; 748 + u64 dmaaddr; 749 + 750 + if (!node) 751 + return -EINVAL; 752 + 753 + while (1) { 754 + naddr = of_n_addr_cells(node); 755 + nsize = of_n_size_cells(node); 756 + node = of_get_next_parent(node); 757 + if (!node) 758 + break; 759 + 760 + ranges = of_get_property(node, "dma-ranges", &len); 761 + 762 + /* Ignore empty ranges, they imply no translation required */ 763 + if (ranges && len > 0) 764 + break; 765 + 766 + /* 767 + * At least empty ranges has to be defined for parent node if 768 + * DMA is supported 769 + */ 770 + if (!ranges) 771 + break; 772 + } 773 + 774 + if (!ranges) { 775 + pr_debug("%s: no dma-ranges found for node(%s)\n", 776 + __func__, np->full_name); 777 + ret = -ENODEV; 778 + goto out; 779 + } 780 + 781 + len /= sizeof(u32); 782 + 783 + pna = of_n_addr_cells(node); 784 + 785 + /* dma-ranges format: 786 + * DMA addr : naddr cells 787 + * CPU addr : pna cells 788 + * size : nsize cells 789 + */ 790 + dmaaddr = of_read_number(ranges, naddr); 791 + *paddr = of_translate_dma_address(np, ranges); 792 + if (*paddr == OF_BAD_ADDR) { 793 + pr_err("%s: translation of DMA address(%pad) to CPU address failed node(%s)\n", 794 + __func__, dma_addr, np->full_name); 795 + ret = -EINVAL; 796 + goto out; 797 + } 798 + *dma_addr = dmaaddr; 799 + 800 + *size = of_read_number(ranges + naddr + pna, nsize); 801 + 802 + pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", 803 + *dma_addr, *paddr, *size); 804 + 805 + out: 806 + of_node_put(node); 807 + 808 + return ret; 809 + } 810 + EXPORT_SYMBOL_GPL(of_dma_get_range); 811 + 812 + /** 813 + * of_dma_is_coherent - Check if device is coherent 814 + * @np: device node 815 + * 816 + * It returns true if "dma-coherent" property was found 817 + * for this device in DT. 818 + */ 819 + bool of_dma_is_coherent(struct device_node *np) 820 + { 821 + struct device_node *node = of_node_get(np); 822 + 823 + while (node) { 824 + if (of_property_read_bool(node, "dma-coherent")) { 825 + of_node_put(node); 826 + return true; 827 + } 828 + node = of_get_next_parent(node); 829 + } 830 + of_node_put(node); 831 + return false; 832 + } 833 + EXPORT_SYMBOL_GPL(of_dma_is_coherent);
+59 -6
drivers/of/platform.c
··· 189 189 EXPORT_SYMBOL(of_device_alloc); 190 190 191 191 /** 192 + * of_dma_configure - Setup DMA configuration 193 + * @dev: Device to apply DMA configuration 194 + * 195 + * Try to get devices's DMA configuration from DT and update it 196 + * accordingly. 197 + * 198 + * In case if platform code need to use own special DMA configuration,it 199 + * can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event 200 + * to fix up DMA configuration. 201 + */ 202 + static void of_dma_configure(struct platform_device *pdev) 203 + { 204 + u64 dma_addr, paddr, size; 205 + int ret; 206 + struct device *dev = &pdev->dev; 207 + 208 + #if defined(CONFIG_MICROBLAZE) 209 + pdev->archdata.dma_mask = 0xffffffffUL; 210 + #endif 211 + 212 + /* 213 + * Set default dma-mask to 32 bit. Drivers are expected to setup 214 + * the correct supported dma_mask. 215 + */ 216 + dev->coherent_dma_mask = DMA_BIT_MASK(32); 217 + 218 + /* 219 + * Set it to coherent_dma_mask by default if the architecture 220 + * code has not set it. 221 + */ 222 + if (!dev->dma_mask) 223 + dev->dma_mask = &dev->coherent_dma_mask; 224 + 225 + /* 226 + * if dma-coherent property exist, call arch hook to setup 227 + * dma coherent operations. 228 + */ 229 + if (of_dma_is_coherent(dev->of_node)) { 230 + set_arch_dma_coherent_ops(dev); 231 + dev_dbg(dev, "device is dma coherent\n"); 232 + } 233 + 234 + /* 235 + * if dma-ranges property doesn't exist - just return else 236 + * setup the dma offset 237 + */ 238 + ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size); 239 + if (ret < 0) { 240 + dev_dbg(dev, "no dma range information to setup\n"); 241 + return; 242 + } 243 + 244 + /* DMA ranges found. Calculate and set dma_pfn_offset */ 245 + dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr); 246 + dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset); 247 + } 248 + 249 + /** 192 250 * of_platform_device_create_pdata - Alloc, initialize and register an of_device 193 251 * @np: pointer to node to create device for 194 252 * @bus_id: name to assign device ··· 271 213 if (!dev) 272 214 return NULL; 273 215 274 - #if defined(CONFIG_MICROBLAZE) 275 - dev->archdata.dma_mask = 0xffffffffUL; 276 - #endif 277 - dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 278 - if (!dev->dev.dma_mask) 279 - dev->dev.dma_mask = &dev->dev.coherent_dma_mask; 216 + of_dma_configure(dev); 280 217 dev->dev.bus = &platform_bus_type; 281 218 dev->dev.platform_data = platform_data; 282 219
+2
include/linux/device.h
··· 685 685 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all 686 686 * hardware supports 64-bit addresses for consistent allocations 687 687 * such descriptors. 688 + * @dma_pfn_offset: offset of DMA memory range relatively of RAM 688 689 * @dma_parms: A low level driver may set these to teach IOMMU code about 689 690 * segment limitations. 690 691 * @dma_pools: Dma pools (if dma'ble device). ··· 751 750 not all hardware supports 752 751 64 bit addresses for consistent 753 752 allocations such descriptors. */ 753 + unsigned long dma_pfn_offset; 754 754 755 755 struct device_dma_parameters *dma_parms; 756 756
+7
include/linux/dma-mapping.h
··· 123 123 124 124 extern u64 dma_get_required_mask(struct device *dev); 125 125 126 + #ifndef set_arch_dma_coherent_ops 127 + static inline int set_arch_dma_coherent_ops(struct device *dev) 128 + { 129 + return 0; 130 + } 131 + #endif 132 + 126 133 static inline unsigned int dma_get_max_seg_size(struct device *dev) 127 134 { 128 135 return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
+14
include/linux/of_address.h
··· 63 63 extern struct of_pci_range *of_pci_range_parser_one( 64 64 struct of_pci_range_parser *parser, 65 65 struct of_pci_range *range); 66 + extern int of_dma_get_range(struct device_node *np, u64 *dma_addr, 67 + u64 *paddr, u64 *size); 68 + extern bool of_dma_is_coherent(struct device_node *np); 66 69 #else /* CONFIG_OF_ADDRESS */ 67 70 static inline struct device_node *of_find_matching_node_by_address( 68 71 struct device_node *from, ··· 92 89 struct of_pci_range *range) 93 90 { 94 91 return NULL; 92 + } 93 + 94 + static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr, 95 + u64 *paddr, u64 *size) 96 + { 97 + return -ENODEV; 98 + } 99 + 100 + static inline bool of_dma_is_coherent(struct device_node *np) 101 + { 102 + return false; 95 103 } 96 104 #endif /* CONFIG_OF_ADDRESS */ 97 105