Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Refactor 64 bits DMA operations

This patch completely refactors DMA operations for 64 bits powerpc. 32 bits
is untouched for now.

We use the new dev_archdata structure to add the dma operations pointer
and associated data to struct device. While at it, we also add the OF node
pointer and numa node. In the future, we might want to look into merging
that with pci_dn as well.

The old vio, pci-iommu and pci-direct DMA ops are gone. They are now replaced
by a set of generic iommu and direct DMA ops (non PCI specific) that can be
used by bus types. The toplevel implementation is now inline.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Benjamin Herrenschmidt and committed by
Paul Mackerras
12d04eef 7c719871

+457 -617
+1 -2
arch/powerpc/kernel/Makefile
··· 62 62 module-$(CONFIG_PPC64) += module_64.o 63 63 obj-$(CONFIG_MODULES) += $(module-y) 64 64 65 - pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \ 66 - pci_direct_iommu.o iomap.o 65 + pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o iomap.o 67 66 pci32-$(CONFIG_PPC32) := pci_32.o 68 67 obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y) 69 68 kexec-$(CONFIG_PPC64) := machine_kexec_64.o
+143 -109
arch/powerpc/kernel/dma_64.c
··· 1 1 /* 2 - * Copyright (C) 2004 IBM Corporation 2 + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation 3 3 * 4 - * Implements the generic device dma API for ppc64. Handles 5 - * the pci and vio busses 4 + * Provide default implementations of the DMA mapping callbacks for 5 + * directly mapped busses and busses using the iommu infrastructure 6 6 */ 7 7 8 8 #include <linux/device.h> 9 9 #include <linux/dma-mapping.h> 10 - /* Include the busses we support */ 11 - #include <linux/pci.h> 12 - #include <asm/vio.h> 13 - #include <asm/ibmebus.h> 14 - #include <asm/scatterlist.h> 15 10 #include <asm/bug.h> 11 + #include <asm/iommu.h> 12 + #include <asm/abs_addr.h> 16 13 17 - static struct dma_mapping_ops *get_dma_ops(struct device *dev) 14 + /* 15 + * Generic iommu implementation 16 + */ 17 + 18 + static inline unsigned long device_to_mask(struct device *dev) 18 19 { 19 - #ifdef CONFIG_PCI 20 - if (dev->bus == &pci_bus_type) 21 - return &pci_dma_ops; 22 - #endif 23 - #ifdef CONFIG_IBMVIO 24 - if (dev->bus == &vio_bus_type) 25 - return &vio_dma_ops; 26 - #endif 27 - #ifdef CONFIG_IBMEBUS 28 - if (dev->bus == &ibmebus_bus_type) 29 - return &ibmebus_dma_ops; 30 - #endif 31 - return NULL; 20 + if (dev->dma_mask && *dev->dma_mask) 21 + return *dev->dma_mask; 22 + /* Assume devices without mask can take 32 bit addresses */ 23 + return 0xfffffffful; 32 24 } 33 25 34 - int dma_supported(struct device *dev, u64 mask) 26 + 27 + /* Allocates a contiguous real buffer and creates mappings over it. 28 + * Returns the virtual address of the buffer and sets dma_handle 29 + * to the dma address (mapping) of the first page. 30 + */ 31 + static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, 32 + dma_addr_t *dma_handle, gfp_t flag) 35 33 { 36 - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 37 - 38 - BUG_ON(!dma_ops); 39 - 40 - return dma_ops->dma_supported(dev, mask); 34 + return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle, 35 + device_to_mask(dev), flag, 36 + dev->archdata.numa_node); 41 37 } 42 - EXPORT_SYMBOL(dma_supported); 43 38 44 - int dma_set_mask(struct device *dev, u64 dma_mask) 39 + static void dma_iommu_free_coherent(struct device *dev, size_t size, 40 + void *vaddr, dma_addr_t dma_handle) 45 41 { 46 - #ifdef CONFIG_PCI 47 - if (dev->bus == &pci_bus_type) 48 - return pci_set_dma_mask(to_pci_dev(dev), dma_mask); 49 - #endif 50 - #ifdef CONFIG_IBMVIO 51 - if (dev->bus == &vio_bus_type) 52 - return -EIO; 53 - #endif /* CONFIG_IBMVIO */ 54 - #ifdef CONFIG_IBMEBUS 55 - if (dev->bus == &ibmebus_bus_type) 56 - return -EIO; 57 - #endif 58 - BUG(); 59 - return 0; 42 + iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); 60 43 } 61 - EXPORT_SYMBOL(dma_set_mask); 62 44 63 - void *dma_alloc_coherent(struct device *dev, size_t size, 64 - dma_addr_t *dma_handle, gfp_t flag) 45 + /* Creates TCEs for a user provided buffer. The user buffer must be 46 + * contiguous real kernel storage (not vmalloc). The address of the buffer 47 + * passed here is the kernel (virtual) address of the buffer. The buffer 48 + * need not be page aligned, the dma_addr_t returned will point to the same 49 + * byte within the page as vaddr. 50 + */ 51 + static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, 52 + size_t size, 53 + enum dma_data_direction direction) 65 54 { 66 - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 67 - 68 - BUG_ON(!dma_ops); 69 - 70 - return dma_ops->alloc_coherent(dev, size, dma_handle, flag); 55 + return iommu_map_single(dev->archdata.dma_data, vaddr, size, 56 + device_to_mask(dev), direction); 71 57 } 72 - EXPORT_SYMBOL(dma_alloc_coherent); 73 58 74 - void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 75 - dma_addr_t dma_handle) 59 + 60 + static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, 61 + size_t size, 62 + enum dma_data_direction direction) 76 63 { 77 - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 78 - 79 - BUG_ON(!dma_ops); 80 - 81 - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 64 + iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction); 82 65 } 83 - EXPORT_SYMBOL(dma_free_coherent); 84 66 85 - dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, 86 - enum dma_data_direction direction) 67 + 68 + static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 69 + int nelems, enum dma_data_direction direction) 87 70 { 88 - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 89 - 90 - BUG_ON(!dma_ops); 91 - 92 - return dma_ops->map_single(dev, cpu_addr, size, direction); 71 + return iommu_map_sg(dev->archdata.dma_data, sglist, nelems, 72 + device_to_mask(dev), direction); 93 73 } 94 - EXPORT_SYMBOL(dma_map_single); 95 74 96 - void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 97 - enum dma_data_direction direction) 75 + static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, 76 + int nelems, enum dma_data_direction direction) 98 77 { 99 - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 100 - 101 - BUG_ON(!dma_ops); 102 - 103 - dma_ops->unmap_single(dev, dma_addr, size, direction); 78 + iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction); 104 79 } 105 - EXPORT_SYMBOL(dma_unmap_single); 106 80 107 - dma_addr_t dma_map_page(struct device *dev, struct page *page, 108 - unsigned long offset, size_t size, 109 - enum dma_data_direction direction) 81 + /* We support DMA to/from any memory page via the iommu */ 82 + static int dma_iommu_dma_supported(struct device *dev, u64 mask) 110 83 { 111 - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 84 + struct iommu_table *tbl = dev->archdata.dma_data; 112 85 113 - BUG_ON(!dma_ops); 114 - 115 - return dma_ops->map_single(dev, page_address(page) + offset, size, 116 - direction); 86 + if (!tbl || tbl->it_offset > mask) { 87 + printk(KERN_INFO 88 + "Warning: IOMMU offset too big for device mask\n"); 89 + if (tbl) 90 + printk(KERN_INFO 91 + "mask: 0x%08lx, table offset: 0x%08lx\n", 92 + mask, tbl->it_offset); 93 + else 94 + printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", 95 + mask); 96 + return 0; 97 + } else 98 + return 1; 117 99 } 118 - EXPORT_SYMBOL(dma_map_page); 119 100 120 - void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 121 - enum dma_data_direction direction) 101 + struct dma_mapping_ops dma_iommu_ops = { 102 + .alloc_coherent = dma_iommu_alloc_coherent, 103 + .free_coherent = dma_iommu_free_coherent, 104 + .map_single = dma_iommu_map_single, 105 + .unmap_single = dma_iommu_unmap_single, 106 + .map_sg = dma_iommu_map_sg, 107 + .unmap_sg = dma_iommu_unmap_sg, 108 + .dma_supported = dma_iommu_dma_supported, 109 + }; 110 + EXPORT_SYMBOL(dma_iommu_ops); 111 + 112 + /* 113 + * Generic direct DMA implementation 114 + */ 115 + 116 + static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 117 + dma_addr_t *dma_handle, gfp_t flag) 122 118 { 123 - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 119 + void *ret; 124 120 125 - BUG_ON(!dma_ops); 126 - 127 - dma_ops->unmap_single(dev, dma_address, size, direction); 121 + /* TODO: Maybe use the numa node here too ? */ 122 + ret = (void *)__get_free_pages(flag, get_order(size)); 123 + if (ret != NULL) { 124 + memset(ret, 0, size); 125 + *dma_handle = virt_to_abs(ret); 126 + } 127 + return ret; 128 128 } 129 - EXPORT_SYMBOL(dma_unmap_page); 130 129 131 - int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 132 - enum dma_data_direction direction) 130 + static void dma_direct_free_coherent(struct device *dev, size_t size, 131 + void *vaddr, dma_addr_t dma_handle) 133 132 { 134 - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 135 - 136 - BUG_ON(!dma_ops); 137 - 138 - return dma_ops->map_sg(dev, sg, nents, direction); 133 + free_pages((unsigned long)vaddr, get_order(size)); 139 134 } 140 - EXPORT_SYMBOL(dma_map_sg); 141 135 142 - void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 143 - enum dma_data_direction direction) 136 + static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr, 137 + size_t size, 138 + enum dma_data_direction direction) 144 139 { 145 - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 146 - 147 - BUG_ON(!dma_ops); 148 - 149 - dma_ops->unmap_sg(dev, sg, nhwentries, direction); 140 + return virt_to_abs(ptr); 150 141 } 151 - EXPORT_SYMBOL(dma_unmap_sg); 142 + 143 + static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, 144 + size_t size, 145 + enum dma_data_direction direction) 146 + { 147 + } 148 + 149 + static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg, 150 + int nents, enum dma_data_direction direction) 151 + { 152 + int i; 153 + 154 + for (i = 0; i < nents; i++, sg++) { 155 + sg->dma_address = page_to_phys(sg->page) + sg->offset; 156 + sg->dma_length = sg->length; 157 + } 158 + 159 + return nents; 160 + } 161 + 162 + static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, 163 + int nents, enum dma_data_direction direction) 164 + { 165 + } 166 + 167 + static int dma_direct_dma_supported(struct device *dev, u64 mask) 168 + { 169 + /* Could be improved to check for memory though it better be 170 + * done via some global so platforms can set the limit in case 171 + * they have limited DMA windows 172 + */ 173 + return mask >= DMA_32BIT_MASK; 174 + } 175 + 176 + struct dma_mapping_ops dma_direct_ops = { 177 + .alloc_coherent = dma_direct_alloc_coherent, 178 + .free_coherent = dma_direct_free_coherent, 179 + .map_single = dma_direct_map_single, 180 + .unmap_single = dma_direct_unmap_single, 181 + .map_sg = dma_direct_map_sg, 182 + .unmap_sg = dma_direct_unmap_sg, 183 + .dma_supported = dma_direct_dma_supported, 184 + }; 185 + EXPORT_SYMBOL(dma_direct_ops);
+5 -1
arch/powerpc/kernel/ibmebus.c
··· 112 112 return 1; 113 113 } 114 114 115 - struct dma_mapping_ops ibmebus_dma_ops = { 115 + static struct dma_mapping_ops ibmebus_dma_ops = { 116 116 .alloc_coherent = ibmebus_alloc_coherent, 117 117 .free_coherent = ibmebus_free_coherent, 118 118 .map_single = ibmebus_map_single, ··· 175 175 dev->ofdev.dev.parent = &ibmebus_bus_device.ofdev.dev; 176 176 dev->ofdev.dev.bus = &ibmebus_bus_type; 177 177 dev->ofdev.dev.release = ibmebus_dev_release; 178 + 179 + dev->ofdev.dev.archdata.of_node = dev->ofdev.node; 180 + dev->ofdev.dev.archdata.dma_ops = &ibmebus_dma_ops; 181 + dev->ofdev.dev.archdata.numa_node = of_node_to_nid(dev->ofdev.node); 178 182 179 183 /* An ibmebusdev is based on a of_device. We have to change the 180 184 * bus type to use our own DMA mapping operations.
+3 -3
arch/powerpc/kernel/iommu.c
··· 258 258 spin_unlock_irqrestore(&(tbl->it_lock), flags); 259 259 } 260 260 261 - int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 262 - struct scatterlist *sglist, int nelems, 263 - unsigned long mask, enum dma_data_direction direction) 261 + int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, 262 + int nelems, unsigned long mask, 263 + enum dma_data_direction direction) 264 264 { 265 265 dma_addr_t dma_next = 0, dma_addr; 266 266 unsigned long flags;
+8 -1
arch/powerpc/kernel/of_platform.c
··· 22 22 #include <asm/dcr.h> 23 23 #include <asm/of_device.h> 24 24 #include <asm/of_platform.h> 25 - 25 + #include <asm/topology.h> 26 26 27 27 /* 28 28 * The list of OF IDs below is used for matching bus types in the ··· 221 221 dev->dev.parent = parent; 222 222 dev->dev.bus = &of_platform_bus_type; 223 223 dev->dev.release = of_release_dev; 224 + dev->dev.archdata.of_node = np; 225 + dev->dev.archdata.numa_node = of_node_to_nid(np); 226 + 227 + /* We do not fill the DMA ops for platform devices by default. 228 + * This is currently the responsibility of the platform code 229 + * to do such, possibly using a device notifier 230 + */ 224 231 225 232 if (bus_id) 226 233 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
+23 -3
arch/powerpc/kernel/pci_64.c
··· 61 61 62 62 LIST_HEAD(hose_list); 63 63 64 - struct dma_mapping_ops pci_dma_ops; 64 + struct dma_mapping_ops *pci_dma_ops; 65 65 EXPORT_SYMBOL(pci_dma_ops); 66 66 67 67 int global_phb_number; /* Global phb counter */ ··· 1205 1205 } 1206 1206 EXPORT_SYMBOL(pcibios_fixup_device_resources); 1207 1207 1208 + void __devinit pcibios_setup_new_device(struct pci_dev *dev) 1209 + { 1210 + struct dev_archdata *sd = &dev->dev.archdata; 1211 + 1212 + sd->of_node = pci_device_to_OF_node(dev); 1213 + 1214 + DBG("PCI device %s OF node: %s\n", pci_name(dev), 1215 + sd->of_node ? sd->of_node->full_name : "<none>"); 1216 + 1217 + sd->dma_ops = pci_dma_ops; 1218 + #ifdef CONFIG_NUMA 1219 + sd->numa_node = pcibus_to_node(dev->bus); 1220 + #else 1221 + sd->numa_node = -1; 1222 + #endif 1223 + if (ppc_md.pci_dma_dev_setup) 1224 + ppc_md.pci_dma_dev_setup(dev); 1225 + } 1226 + EXPORT_SYMBOL(pcibios_setup_new_device); 1208 1227 1209 1228 static void __devinit do_bus_setup(struct pci_bus *bus) 1210 1229 { 1211 1230 struct pci_dev *dev; 1212 1231 1213 - ppc_md.iommu_bus_setup(bus); 1232 + if (ppc_md.pci_dma_bus_setup) 1233 + ppc_md.pci_dma_bus_setup(bus); 1214 1234 1215 1235 list_for_each_entry(dev, &bus->devices, bus_list) 1216 - ppc_md.iommu_dev_setup(dev); 1236 + pcibios_setup_new_device(dev); 1217 1237 1218 1238 /* Read default IRQs and fixup if necessary */ 1219 1239 list_for_each_entry(dev, &bus->devices, bus_list) {
-98
arch/powerpc/kernel/pci_direct_iommu.c
··· 1 - /* 2 - * Support for DMA from PCI devices to main memory on 3 - * machines without an iommu or with directly addressable 4 - * RAM (typically a pmac with 2Gb of RAM or less) 5 - * 6 - * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License 10 - * as published by the Free Software Foundation; either version 11 - * 2 of the License, or (at your option) any later version. 12 - */ 13 - 14 - #include <linux/kernel.h> 15 - #include <linux/pci.h> 16 - #include <linux/delay.h> 17 - #include <linux/string.h> 18 - #include <linux/init.h> 19 - #include <linux/bootmem.h> 20 - #include <linux/mm.h> 21 - #include <linux/dma-mapping.h> 22 - 23 - #include <asm/sections.h> 24 - #include <asm/io.h> 25 - #include <asm/prom.h> 26 - #include <asm/pci-bridge.h> 27 - #include <asm/machdep.h> 28 - #include <asm/pmac_feature.h> 29 - #include <asm/abs_addr.h> 30 - #include <asm/ppc-pci.h> 31 - 32 - static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, 33 - dma_addr_t *dma_handle, gfp_t flag) 34 - { 35 - void *ret; 36 - 37 - ret = (void *)__get_free_pages(flag, get_order(size)); 38 - if (ret != NULL) { 39 - memset(ret, 0, size); 40 - *dma_handle = virt_to_abs(ret); 41 - } 42 - return ret; 43 - } 44 - 45 - static void pci_direct_free_coherent(struct device *hwdev, size_t size, 46 - void *vaddr, dma_addr_t dma_handle) 47 - { 48 - free_pages((unsigned long)vaddr, get_order(size)); 49 - } 50 - 51 - static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr, 52 - size_t size, enum dma_data_direction direction) 53 - { 54 - return virt_to_abs(ptr); 55 - } 56 - 57 - static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr, 58 - size_t size, enum dma_data_direction direction) 59 - { 60 - } 61 - 62 - static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg, 63 - int nents, enum dma_data_direction direction) 64 - { 65 - int i; 66 - 67 - for (i = 0; i < nents; i++, sg++) { 68 - sg->dma_address = page_to_phys(sg->page) + sg->offset; 69 - sg->dma_length = sg->length; 70 - } 71 - 72 - return nents; 73 - } 74 - 75 - static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg, 76 - int nents, enum dma_data_direction direction) 77 - { 78 - } 79 - 80 - static int pci_direct_dma_supported(struct device *dev, u64 mask) 81 - { 82 - return mask < 0x100000000ull; 83 - } 84 - 85 - static struct dma_mapping_ops pci_direct_ops = { 86 - .alloc_coherent = pci_direct_alloc_coherent, 87 - .free_coherent = pci_direct_free_coherent, 88 - .map_single = pci_direct_map_single, 89 - .unmap_single = pci_direct_unmap_single, 90 - .map_sg = pci_direct_map_sg, 91 - .unmap_sg = pci_direct_unmap_sg, 92 - .dma_supported = pci_direct_dma_supported, 93 - }; 94 - 95 - void __init pci_direct_iommu_init(void) 96 - { 97 - pci_dma_ops = pci_direct_ops; 98 - }
-164
arch/powerpc/kernel/pci_iommu.c
··· 1 - /* 2 - * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 - * 4 - * Rewrite, cleanup, new allocation schemes: 5 - * Copyright (C) 2004 Olof Johansson, IBM Corporation 6 - * 7 - * Dynamic DMA mapping support, platform-independent parts. 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of the GNU General Public License as published by 11 - * the Free Software Foundation; either version 2 of the License, or 12 - * (at your option) any later version. 13 - * 14 - * This program is distributed in the hope that it will be useful, 15 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 - * GNU General Public License for more details. 18 - * 19 - * You should have received a copy of the GNU General Public License 20 - * along with this program; if not, write to the Free Software 21 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 - */ 23 - 24 - 25 - #include <linux/init.h> 26 - #include <linux/types.h> 27 - #include <linux/slab.h> 28 - #include <linux/mm.h> 29 - #include <linux/spinlock.h> 30 - #include <linux/string.h> 31 - #include <linux/pci.h> 32 - #include <linux/dma-mapping.h> 33 - #include <asm/io.h> 34 - #include <asm/prom.h> 35 - #include <asm/iommu.h> 36 - #include <asm/pci-bridge.h> 37 - #include <asm/machdep.h> 38 - #include <asm/ppc-pci.h> 39 - 40 - /* 41 - * We can use ->sysdata directly and avoid the extra work in 42 - * pci_device_to_OF_node since ->sysdata will have been initialised 43 - * in the iommu init code for all devices. 44 - */ 45 - #define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata)) 46 - 47 - static inline struct iommu_table *device_to_table(struct device *hwdev) 48 - { 49 - struct pci_dev *pdev; 50 - 51 - if (!hwdev) { 52 - pdev = ppc64_isabridge_dev; 53 - if (!pdev) 54 - return NULL; 55 - } else 56 - pdev = to_pci_dev(hwdev); 57 - 58 - return PCI_DN(PCI_GET_DN(pdev))->iommu_table; 59 - } 60 - 61 - 62 - static inline unsigned long device_to_mask(struct device *hwdev) 63 - { 64 - struct pci_dev *pdev; 65 - 66 - if (!hwdev) { 67 - pdev = ppc64_isabridge_dev; 68 - if (!pdev) /* This is the best guess we can do */ 69 - return 0xfffffffful; 70 - } else 71 - pdev = to_pci_dev(hwdev); 72 - 73 - if (pdev->dma_mask) 74 - return pdev->dma_mask; 75 - 76 - /* Assume devices without mask can take 32 bit addresses */ 77 - return 0xfffffffful; 78 - } 79 - 80 - 81 - /* Allocates a contiguous real buffer and creates mappings over it. 82 - * Returns the virtual address of the buffer and sets dma_handle 83 - * to the dma address (mapping) of the first page. 84 - */ 85 - static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, 86 - dma_addr_t *dma_handle, gfp_t flag) 87 - { 88 - return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle, 89 - device_to_mask(hwdev), flag, 90 - pcibus_to_node(to_pci_dev(hwdev)->bus)); 91 - } 92 - 93 - static void pci_iommu_free_coherent(struct device *hwdev, size_t size, 94 - void *vaddr, dma_addr_t dma_handle) 95 - { 96 - iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle); 97 - } 98 - 99 - /* Creates TCEs for a user provided buffer. The user buffer must be 100 - * contiguous real kernel storage (not vmalloc). The address of the buffer 101 - * passed here is the kernel (virtual) address of the buffer. The buffer 102 - * need not be page aligned, the dma_addr_t returned will point to the same 103 - * byte within the page as vaddr. 104 - */ 105 - static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, 106 - size_t size, enum dma_data_direction direction) 107 - { 108 - return iommu_map_single(device_to_table(hwdev), vaddr, size, 109 - device_to_mask(hwdev), direction); 110 - } 111 - 112 - 113 - static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle, 114 - size_t size, enum dma_data_direction direction) 115 - { 116 - iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction); 117 - } 118 - 119 - 120 - static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist, 121 - int nelems, enum dma_data_direction direction) 122 - { 123 - return iommu_map_sg(pdev, device_to_table(pdev), sglist, 124 - nelems, device_to_mask(pdev), direction); 125 - } 126 - 127 - static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, 128 - int nelems, enum dma_data_direction direction) 129 - { 130 - iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction); 131 - } 132 - 133 - /* We support DMA to/from any memory page via the iommu */ 134 - static int pci_iommu_dma_supported(struct device *dev, u64 mask) 135 - { 136 - struct iommu_table *tbl = device_to_table(dev); 137 - 138 - if (!tbl || tbl->it_offset > mask) { 139 - printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n"); 140 - if (tbl) 141 - printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n", 142 - mask, tbl->it_offset); 143 - else 144 - printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", 145 - mask); 146 - return 0; 147 - } else 148 - return 1; 149 - } 150 - 151 - struct dma_mapping_ops pci_iommu_ops = { 152 - .alloc_coherent = pci_iommu_alloc_coherent, 153 - .free_coherent = pci_iommu_free_coherent, 154 - .map_single = pci_iommu_map_single, 155 - .unmap_single = pci_iommu_unmap_single, 156 - .map_sg = pci_iommu_map_sg, 157 - .unmap_sg = pci_iommu_unmap_sg, 158 - .dma_supported = pci_iommu_dma_supported, 159 - }; 160 - 161 - void pci_iommu_init(void) 162 - { 163 - pci_dma_ops = pci_iommu_ops; 164 - }
+1
arch/powerpc/kernel/setup_64.c
··· 33 33 #include <linux/serial.h> 34 34 #include <linux/serial_8250.h> 35 35 #include <linux/bootmem.h> 36 + #include <linux/pci.h> 36 37 #include <asm/io.h> 37 38 #include <asm/kdump.h> 38 39 #include <asm/prom.h>
+21 -73
arch/powerpc/kernel/vio.c
··· 81 81 struct iommu_table *tbl; 82 82 unsigned long offset, size; 83 83 84 - dma_window = get_property(dev->dev.platform_data, 85 - "ibm,my-dma-window", NULL); 84 + dma_window = get_property(dev->dev.archdata.of_node, 85 + "ibm,my-dma-window", NULL); 86 86 if (!dma_window) 87 87 return NULL; 88 88 89 89 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); 90 90 91 - of_parse_dma_window(dev->dev.platform_data, dma_window, 92 - &tbl->it_index, &offset, &size); 91 + of_parse_dma_window(dev->dev.archdata.of_node, dma_window, 92 + &tbl->it_index, &offset, &size); 93 93 94 94 /* TCE table size - measured in tce entries */ 95 95 tbl->it_size = size >> IOMMU_PAGE_SHIFT; ··· 117 117 { 118 118 while (ids->type[0] != '\0') { 119 119 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && 120 - device_is_compatible(dev->dev.platform_data, ids->compat)) 120 + device_is_compatible(dev->dev.archdata.of_node, 121 + ids->compat)) 121 122 return ids; 122 123 ids++; 123 124 } ··· 199 198 /* vio_dev refcount hit 0 */ 200 199 static void __devinit vio_dev_release(struct device *dev) 201 200 { 202 - if (dev->platform_data) { 203 - /* XXX free TCE table */ 204 - of_node_put(dev->platform_data); 201 + if (dev->archdata.of_node) { 202 + /* XXX should free TCE table */ 203 + of_node_put(dev->archdata.of_node); 205 204 } 206 205 kfree(to_vio_dev(dev)); 207 206 } ··· 211 210 * @of_node: The OF node for this device. 212 211 * 213 212 * Creates and initializes a vio_dev structure from the data in 214 - * of_node (dev.platform_data) and adds it to the list of virtual devices. 213 + * of_node and adds it to the list of virtual devices. 215 214 * Returns a pointer to the created vio_dev or NULL if node has 216 215 * NULL device_type or compatible fields. 217 216 */ ··· 241 240 if (viodev == NULL) 242 241 return NULL; 243 242 244 - viodev->dev.platform_data = of_node_get(of_node); 245 - 246 243 viodev->irq = irq_of_parse_and_map(of_node, 0); 247 244 248 245 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); ··· 253 254 if (unit_address != NULL) 254 255 viodev->unit_address = *unit_address; 255 256 } 256 - viodev->iommu_table = vio_build_iommu_table(viodev); 257 + viodev->dev.archdata.of_node = of_node_get(of_node); 258 + viodev->dev.archdata.dma_ops = &dma_iommu_ops; 259 + viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); 260 + viodev->dev.archdata.numa_node = of_node_to_nid(of_node); 257 261 258 262 /* init generic 'struct device' fields: */ 259 263 viodev->dev.parent = &vio_bus_device.dev; ··· 287 285 #ifdef CONFIG_PPC_ISERIES 288 286 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 289 287 iommu_vio_init(); 290 - vio_bus_device.iommu_table = &vio_iommu_table; 288 + vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops; 289 + vio_bus_device.dev.archdata.dma_data = &vio_iommu_table; 291 290 iSeries_vio_dev = &vio_bus_device.dev; 292 291 } 293 - #endif 292 + #endif /* CONFIG_PPC_ISERIES */ 294 293 295 294 err = bus_register(&vio_bus_type); 296 295 if (err) { ··· 339 336 static ssize_t devspec_show(struct device *dev, 340 337 struct device_attribute *attr, char *buf) 341 338 { 342 - struct device_node *of_node = dev->platform_data; 339 + struct device_node *of_node = dev->archdata.of_node; 343 340 344 341 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); 345 342 } ··· 356 353 } 357 354 EXPORT_SYMBOL(vio_unregister_device); 358 355 359 - static dma_addr_t vio_map_single(struct device *dev, void *vaddr, 360 - size_t size, enum dma_data_direction direction) 361 - { 362 - return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size, 363 - ~0ul, direction); 364 - } 365 - 366 - static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle, 367 - size_t size, enum dma_data_direction direction) 368 - { 369 - iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size, 370 - direction); 371 - } 372 - 373 - static int vio_map_sg(struct device *dev, struct scatterlist *sglist, 374 - int nelems, enum dma_data_direction direction) 375 - { 376 - return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist, 377 - nelems, ~0ul, direction); 378 - } 379 - 380 - static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist, 381 - int nelems, enum dma_data_direction direction) 382 - { 383 - iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction); 384 - } 385 - 386 - static void *vio_alloc_coherent(struct device *dev, size_t size, 387 - dma_addr_t *dma_handle, gfp_t flag) 388 - { 389 - return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, 390 - dma_handle, ~0ul, flag, -1); 391 - } 392 - 393 - static void vio_free_coherent(struct device *dev, size_t size, 394 - void *vaddr, dma_addr_t dma_handle) 395 - { 396 - iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr, 397 - dma_handle); 398 - } 399 - 400 - static int vio_dma_supported(struct device *dev, u64 mask) 401 - { 402 - return 1; 403 - } 404 - 405 - struct dma_mapping_ops vio_dma_ops = { 406 - .alloc_coherent = vio_alloc_coherent, 407 - .free_coherent = vio_free_coherent, 408 - .map_single = vio_map_single, 409 - .unmap_single = vio_unmap_single, 410 - .map_sg = vio_map_sg, 411 - .unmap_sg = vio_unmap_sg, 412 - .dma_supported = vio_dma_supported, 413 - }; 414 - 415 356 static int vio_bus_match(struct device *dev, struct device_driver *drv) 416 357 { 417 358 const struct vio_dev *vio_dev = to_vio_dev(dev); ··· 369 422 char *buffer, int buffer_size) 370 423 { 371 424 const struct vio_dev *vio_dev = to_vio_dev(dev); 372 - struct device_node *dn = dev->platform_data; 425 + struct device_node *dn; 373 426 const char *cp; 374 427 int length; 375 428 376 429 if (!num_envp) 377 430 return -ENOMEM; 378 431 432 + dn = dev->archdata.of_node; 379 433 if (!dn) 380 434 return -ENODEV; 381 435 cp = get_property(dn, "compatible", &length); ··· 413 465 */ 414 466 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) 415 467 { 416 - return get_property(vdev->dev.platform_data, which, length); 468 + return get_property(vdev->dev.archdata.of_node, which, length); 417 469 } 418 470 EXPORT_SYMBOL(vio_get_attribute); 419 471
+6 -15
arch/powerpc/platforms/cell/iommu.c
··· 255 255 set_iost_origin(mmio_base); 256 256 } 257 257 258 - static void iommu_dev_setup_null(struct pci_dev *d) { } 259 - static void iommu_bus_setup_null(struct pci_bus *b) { } 260 - 261 258 struct cell_iommu { 262 259 unsigned long base; 263 260 unsigned long mmio_base; ··· 303 306 } 304 307 } 305 308 306 - static void iommu_devnode_setup(struct device_node *d) 309 + static void pci_dma_cell_bus_setup(struct pci_bus *b) 307 310 { 308 311 const unsigned int *ioid; 309 312 unsigned long map_start, map_size, token; 310 313 const unsigned long *dma_window; 311 314 struct cell_iommu *iommu; 315 + struct device_node *d; 316 + 317 + d = pci_bus_to_OF_node(b); 312 318 313 319 ioid = get_property(d, "ioid", NULL); 314 320 if (!ioid) ··· 328 328 iommu = &cell_iommus[token]; 329 329 330 330 cell_do_map_iommu(iommu, *ioid, map_start, map_size); 331 - } 332 - 333 - static void iommu_bus_setup(struct pci_bus *b) 334 - { 335 - struct device_node *d = (struct device_node *)b->sysdata; 336 - iommu_devnode_setup(d); 337 331 } 338 332 339 333 ··· 493 499 494 500 if (setup_bus) { 495 501 pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__); 496 - ppc_md.iommu_dev_setup = iommu_dev_setup_null; 497 - ppc_md.iommu_bus_setup = iommu_bus_setup; 502 + ppc_md.pci_dma_bus_setup = pci_dma_cell_bus_setup; 498 503 } else { 499 504 pr_debug("%s: IOMMU mapping activated, " 500 505 "no device action necessary\n", __FUNCTION__); 501 506 /* Direct I/O, IOMMU off */ 502 - ppc_md.iommu_dev_setup = iommu_dev_setup_null; 503 - ppc_md.iommu_bus_setup = iommu_bus_setup_null; 504 507 } 505 508 } 506 509 507 - pci_dma_ops = cell_iommu_ops; 510 + pci_dma_ops = &cell_iommu_ops; 508 511 }
+4 -8
arch/powerpc/platforms/iseries/iommu.c
··· 27 27 #include <linux/types.h> 28 28 #include <linux/dma-mapping.h> 29 29 #include <linux/list.h> 30 + #include <linux/pci.h> 30 31 31 32 #include <asm/iommu.h> 32 33 #include <asm/tce.h> ··· 169 168 } 170 169 171 170 172 - void iommu_devnode_init_iSeries(struct device_node *dn) 171 + void iommu_devnode_init_iSeries(struct pci_dev *pdev, struct device_node *dn) 173 172 { 174 173 struct iommu_table *tbl; 175 174 struct pci_dn *pdn = PCI_DN(dn); ··· 187 186 pdn->iommu_table = iommu_init_table(tbl, -1); 188 187 else 189 188 kfree(tbl); 189 + pdev->dev.archdata.dma_data = pdn->iommu_table; 190 190 } 191 191 #endif 192 - 193 - static void iommu_dev_setup_iSeries(struct pci_dev *dev) { } 194 - static void iommu_bus_setup_iSeries(struct pci_bus *bus) { } 195 192 196 193 void iommu_init_early_iSeries(void) 197 194 { 198 195 ppc_md.tce_build = tce_build_iSeries; 199 196 ppc_md.tce_free = tce_free_iSeries; 200 197 201 - ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries; 202 - ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries; 203 - 204 - pci_iommu_init(); 198 + pci_dma_ops = &dma_iommu_ops; 205 199 }
+1 -1
arch/powerpc/platforms/iseries/pci.c
··· 253 253 PCI_DN(node)->pcidev = pdev; 254 254 allocate_device_bars(pdev); 255 255 iSeries_Device_Information(pdev, DeviceCount); 256 - iommu_devnode_init_iSeries(node); 256 + iommu_devnode_init_iSeries(pdev, node); 257 257 } else 258 258 printk("PCI: Device Tree not found for 0x%016lX\n", 259 259 (unsigned long)pdev);
+4 -12
arch/powerpc/platforms/pasemi/setup.c
··· 26 26 #include <linux/kernel.h> 27 27 #include <linux/delay.h> 28 28 #include <linux/console.h> 29 + #include <linux/pci.h> 29 30 30 31 #include <asm/prom.h> 31 32 #include <asm/system.h> ··· 72 71 /* Setup SMP callback */ 73 72 smp_ops = &pas_smp_ops; 74 73 #endif 74 + /* no iommu yet */ 75 + pci_dma_ops = &dma_direct_ops; 76 + 75 77 /* Lookup PCI hosts */ 76 78 pas_pci_init(); 77 79 ··· 83 79 #endif 84 80 85 81 printk(KERN_DEBUG "Using default idle loop\n"); 86 - } 87 - 88 - static void iommu_dev_setup_null(struct pci_dev *dev) { } 89 - static void iommu_bus_setup_null(struct pci_bus *bus) { } 90 - 91 - static void __init pas_init_early(void) 92 - { 93 - /* No iommu code yet */ 94 - ppc_md.iommu_dev_setup = iommu_dev_setup_null; 95 - ppc_md.iommu_bus_setup = iommu_bus_setup_null; 96 - pci_direct_iommu_init(); 97 82 } 98 83 99 84 /* No legacy IO on our parts */ ··· 166 173 .name = "PA Semi PA6T-1682M", 167 174 .probe = pas_probe, 168 175 .setup_arch = pas_setup_arch, 169 - .init_early = pas_init_early, 170 176 .init_IRQ = pas_init_IRQ, 171 177 .get_irq = mpic_get_irq, 172 178 .restart = pas_restart,
+46 -44
arch/powerpc/platforms/pseries/iommu.c
··· 309 309 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 310 310 } 311 311 312 - static void iommu_bus_setup_pSeries(struct pci_bus *bus) 312 + static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) 313 313 { 314 314 struct device_node *dn; 315 315 struct iommu_table *tbl; ··· 318 318 struct pci_dn *pci; 319 319 int children; 320 320 321 - DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self); 322 - 323 321 dn = pci_bus_to_OF_node(bus); 324 - pci = PCI_DN(dn); 322 + 323 + DBG("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name); 325 324 326 325 if (bus->self) { 327 326 /* This is not a root bus, any setup will be done for the ··· 328 329 */ 329 330 return; 330 331 } 332 + pci = PCI_DN(dn); 331 333 332 334 /* Check if the ISA bus on the system is under 333 335 * this PHB. ··· 390 390 } 391 391 392 392 393 - static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus) 393 + static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) 394 394 { 395 395 struct iommu_table *tbl; 396 396 struct device_node *dn, *pdn; 397 397 struct pci_dn *ppci; 398 398 const void *dma_window = NULL; 399 399 400 - DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self); 401 - 402 400 dn = pci_bus_to_OF_node(bus); 401 + 402 + DBG("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n", dn->full_name); 403 403 404 404 /* Find nearest ibm,dma-window, walking up the device tree */ 405 405 for (pdn = dn; pdn != NULL; pdn = pdn->parent) { ··· 409 409 } 410 410 411 411 if (dma_window == NULL) { 412 - DBG("iommu_bus_setup_pSeriesLP: bus %s seems to have no ibm,dma-window property\n", dn->full_name); 412 + DBG(" no ibm,dma-window property !\n"); 413 413 return; 414 414 } 415 415 416 416 ppci = PCI_DN(pdn); 417 + 418 + DBG(" parent is %s, iommu_table: 0x%p\n", 419 + pdn->full_name, ppci->iommu_table); 420 + 417 421 if (!ppci->iommu_table) { 418 422 /* Bussubno hasn't been copied yet. 419 423 * Do it now because iommu_table_setparms_lpar needs it. ··· 431 427 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window); 432 428 433 429 ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); 430 + DBG(" created table: %p\n", ppci->iommu_table); 434 431 } 435 432 436 433 if (pdn != dn) ··· 439 434 } 440 435 441 436 442 - static void iommu_dev_setup_pSeries(struct pci_dev *dev) 437 + static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) 443 438 { 444 - struct device_node *dn, *mydn; 439 + struct device_node *dn; 445 440 struct iommu_table *tbl; 446 441 447 - DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev)); 442 + DBG("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev)); 448 443 449 - mydn = dn = pci_device_to_OF_node(dev); 444 + dn = dev->dev.archdata.of_node; 450 445 451 446 /* If we're the direct child of a root bus, then we need to allocate 452 447 * an iommu table ourselves. The bus setup code should have setup 453 448 * the window sizes already. 454 449 */ 455 450 if (!dev->bus->self) { 451 + struct pci_controller *phb = PCI_DN(dn)->phb; 452 + 456 453 DBG(" --> first child, no bridge. Allocating iommu table.\n"); 457 454 tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, 458 - PCI_DN(dn)->phb->node); 459 - iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl); 460 - PCI_DN(dn)->iommu_table = iommu_init_table(tbl, 461 - PCI_DN(dn)->phb->node); 462 - 455 + phb->node); 456 + iommu_table_setparms(phb, dn, tbl); 457 + dev->dev.archdata.dma_data = iommu_init_table(tbl, phb->node); 463 458 return; 464 459 } 465 460 ··· 470 465 while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL) 471 466 dn = dn->parent; 472 467 473 - if (dn && PCI_DN(dn)) { 474 - PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table; 475 - } else { 476 - DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev)); 477 - } 468 + if (dn && PCI_DN(dn)) 469 + dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table; 470 + else 471 + printk(KERN_WARNING "iommu: Device %s has no iommu table\n", 472 + pci_name(dev)); 478 473 } 479 474 480 475 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) ··· 500 495 .notifier_call = iommu_reconfig_notifier, 501 496 }; 502 497 503 - static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) 498 + static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) 504 499 { 505 500 struct device_node *pdn, *dn; 506 501 struct iommu_table *tbl; 507 502 const void *dma_window = NULL; 508 503 struct pci_dn *pci; 504 + 505 + DBG("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); 509 506 510 507 /* dev setup for LPAR is a little tricky, since the device tree might 511 508 * contain the dma-window properties per-device and not neccesarily ··· 516 509 * already allocated. 517 510 */ 518 511 dn = pci_device_to_OF_node(dev); 519 - 520 - DBG("iommu_dev_setup_pSeriesLP, dev %p (%s) %s\n", 521 - dev, pci_name(dev), dn->full_name); 512 + DBG(" node is %s\n", dn->full_name); 522 513 523 514 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; 524 515 pdn = pdn->parent) { ··· 525 520 break; 526 521 } 527 522 523 + DBG(" parent is %s\n", pdn->full_name); 524 + 528 525 /* Check for parent == NULL so we don't try to setup the empty EADS 529 526 * slots on POWER4 machines. 530 527 */ 531 528 if (dma_window == NULL || pdn->parent == NULL) { 532 - DBG("No dma window for device, linking to parent\n"); 533 - PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table; 529 + DBG(" no dma window for device, linking to parent\n"); 530 + dev->dev.archdata.dma_data = PCI_DN(pdn)->iommu_table; 534 531 return; 535 - } else { 536 - DBG("Found DMA window, allocating table\n"); 537 532 } 533 + DBG(" found DMA window, table: %p\n", pci->iommu_table); 538 534 539 535 pci = PCI_DN(pdn); 540 536 if (!pci->iommu_table) { ··· 548 542 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window); 549 543 550 544 pci->iommu_table = iommu_init_table(tbl, pci->phb->node); 545 + DBG(" created table: %p\n", pci->iommu_table); 551 546 } 552 547 553 - if (pdn != dn) 554 - PCI_DN(dn)->iommu_table = pci->iommu_table; 548 + dev->dev.archdata.dma_data = pci->iommu_table; 555 549 } 556 - 557 - static void iommu_bus_setup_null(struct pci_bus *b) { } 558 - static void iommu_dev_setup_null(struct pci_dev *d) { } 559 550 560 551 /* These are called very early. */ 561 552 void iommu_init_early_pSeries(void) 562 553 { 563 554 if (of_chosen && get_property(of_chosen, "linux,iommu-off", NULL)) { 564 555 /* Direct I/O, IOMMU off */ 565 - ppc_md.iommu_dev_setup = iommu_dev_setup_null; 566 - ppc_md.iommu_bus_setup = iommu_bus_setup_null; 567 - pci_direct_iommu_init(); 568 - 556 + ppc_md.pci_dma_dev_setup = NULL; 557 + ppc_md.pci_dma_bus_setup = NULL; 558 + pci_dma_ops = &dma_direct_ops; 569 559 return; 570 560 } 571 561 ··· 574 572 ppc_md.tce_free = tce_free_pSeriesLP; 575 573 } 576 574 ppc_md.tce_get = tce_get_pSeriesLP; 577 - ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP; 578 - ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP; 575 + ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; 576 + ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; 579 577 } else { 580 578 ppc_md.tce_build = tce_build_pSeries; 581 579 ppc_md.tce_free = tce_free_pSeries; 582 580 ppc_md.tce_get = tce_get_pseries; 583 - ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries; 584 - ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries; 581 + ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries; 582 + ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries; 585 583 } 586 584 587 585 588 586 pSeries_reconfig_notifier_register(&iommu_reconfig_nb); 589 587 590 - pci_iommu_init(); 588 + pci_dma_ops = &dma_iommu_ops; 591 589 } 592 590
+2 -2
arch/powerpc/platforms/pseries/pci_dlpar.c
··· 93 93 if (list_empty(&dev->global_list)) { 94 94 int i; 95 95 96 - /* Need to setup IOMMU tables */ 97 - ppc_md.iommu_dev_setup(dev); 96 + /* Fill device archdata and setup iommu table */ 97 + pcibios_setup_new_device(dev); 98 98 99 99 if(fix_bus) 100 100 pcibios_fixup_device_resources(dev, bus);
+9 -22
arch/powerpc/sysdev/dart_iommu.c
··· 289 289 set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map); 290 290 } 291 291 292 - static void iommu_dev_setup_dart(struct pci_dev *dev) 292 + static void pci_dma_dev_setup_dart(struct pci_dev *dev) 293 293 { 294 - struct device_node *dn; 295 - 296 294 /* We only have one iommu table on the mac for now, which makes 297 295 * things simple. Setup all PCI devices to point to this table 298 - * 299 - * We must use pci_device_to_OF_node() to make sure that 300 - * we get the real "final" pointer to the device in the 301 - * pci_dev sysdata and not the temporary PHB one 302 296 */ 303 - dn = pci_device_to_OF_node(dev); 304 - 305 - if (dn) 306 - PCI_DN(dn)->iommu_table = &iommu_table_dart; 297 + dev->dev.archdata.dma_data = &iommu_table_dart; 307 298 } 308 299 309 - static void iommu_bus_setup_dart(struct pci_bus *bus) 300 + static void pci_dma_bus_setup_dart(struct pci_bus *bus) 310 301 { 311 302 struct device_node *dn; 312 303 ··· 311 320 if (dn) 312 321 PCI_DN(dn)->iommu_table = &iommu_table_dart; 313 322 } 314 - 315 - static void iommu_dev_setup_null(struct pci_dev *dev) { } 316 - static void iommu_bus_setup_null(struct pci_bus *bus) { } 317 323 318 324 void iommu_init_early_dart(void) 319 325 { ··· 332 344 333 345 /* Initialize the DART HW */ 334 346 if (dart_init(dn) == 0) { 335 - ppc_md.iommu_dev_setup = iommu_dev_setup_dart; 336 - ppc_md.iommu_bus_setup = iommu_bus_setup_dart; 347 + ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart; 348 + ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart; 337 349 338 350 /* Setup pci_dma ops */ 339 - pci_iommu_init(); 340 - 351 + pci_dma_ops = &dma_iommu_ops; 341 352 return; 342 353 } 343 354 344 355 bail: 345 356 /* If init failed, use direct iommu and null setup functions */ 346 - ppc_md.iommu_dev_setup = iommu_dev_setup_null; 347 - ppc_md.iommu_bus_setup = iommu_bus_setup_null; 357 + ppc_md.pci_dma_dev_setup = NULL; 358 + ppc_md.pci_dma_bus_setup = NULL; 348 359 349 360 /* Setup pci_dma ops */ 350 - pci_direct_iommu_init(); 361 + pci_dma_ops = &dma_direct_ops; 351 362 } 352 363 353 364
+18 -1
include/asm-powerpc/device.h
··· 3 3 * 4 4 * This file is released under the GPLv2 5 5 */ 6 - #include <asm-generic/device.h> 6 + #ifndef _ASM_POWERPC_DEVICE_H 7 + #define _ASM_POWERPC_DEVICE_H 7 8 9 + struct dma_mapping_ops; 10 + struct device_node; 11 + 12 + struct dev_archdata { 13 + /* Optional pointer to an OF device node */ 14 + struct device_node *of_node; 15 + 16 + /* DMA operations on that device */ 17 + struct dma_mapping_ops *dma_ops; 18 + void *dma_data; 19 + 20 + /* NUMA node if applicable */ 21 + int numa_node; 22 + }; 23 + 24 + #endif /* _ASM_POWERPC_DEVICE_H */
+141 -39
include/asm-powerpc/dma-mapping.h
··· 44 44 #endif /* ! CONFIG_NOT_COHERENT_CACHE */ 45 45 46 46 #ifdef CONFIG_PPC64 47 + /* 48 + * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO 49 + */ 50 + struct dma_mapping_ops { 51 + void * (*alloc_coherent)(struct device *dev, size_t size, 52 + dma_addr_t *dma_handle, gfp_t flag); 53 + void (*free_coherent)(struct device *dev, size_t size, 54 + void *vaddr, dma_addr_t dma_handle); 55 + dma_addr_t (*map_single)(struct device *dev, void *ptr, 56 + size_t size, enum dma_data_direction direction); 57 + void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, 58 + size_t size, enum dma_data_direction direction); 59 + int (*map_sg)(struct device *dev, struct scatterlist *sg, 60 + int nents, enum dma_data_direction direction); 61 + void (*unmap_sg)(struct device *dev, struct scatterlist *sg, 62 + int nents, enum dma_data_direction direction); 63 + int (*dma_supported)(struct device *dev, u64 mask); 64 + int (*dac_dma_supported)(struct device *dev, u64 mask); 65 + int (*set_dma_mask)(struct device *dev, u64 dma_mask); 66 + }; 47 67 48 - extern int dma_supported(struct device *dev, u64 mask); 49 - extern int dma_set_mask(struct device *dev, u64 dma_mask); 50 - extern void *dma_alloc_coherent(struct device *dev, size_t size, 51 - dma_addr_t *dma_handle, gfp_t flag); 52 - extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 53 - dma_addr_t dma_handle); 54 - extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 55 - size_t size, enum dma_data_direction direction); 56 - extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 57 - size_t size, enum dma_data_direction direction); 58 - extern dma_addr_t dma_map_page(struct device *dev, struct page *page, 59 - unsigned long offset, size_t size, 60 - enum dma_data_direction direction); 61 - extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 62 - size_t size, enum dma_data_direction direction); 63 - extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 64 - enum dma_data_direction direction); 65 - extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 66 - int nhwentries, enum dma_data_direction direction); 68 + static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 69 + { 70 + /* We don't handle the NULL dev case for ISA for now. We could 71 + * do it via an out of line call but it is not needed for now. The 72 + * only ISA DMA device we support is the floppy and we have a hack 73 + * in the floppy driver directly to get a device for us. 74 + */ 75 + if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) 76 + return NULL; 77 + return dev->archdata.dma_ops; 78 + } 79 + 80 + static inline int dma_supported(struct device *dev, u64 mask) 81 + { 82 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 83 + 84 + if (unlikely(dma_ops == NULL)) 85 + return 0; 86 + if (dma_ops->dma_supported == NULL) 87 + return 1; 88 + return dma_ops->dma_supported(dev, mask); 89 + } 90 + 91 + static inline int dma_set_mask(struct device *dev, u64 dma_mask) 92 + { 93 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 94 + 95 + if (unlikely(dma_ops == NULL)) 96 + return -EIO; 97 + if (dma_ops->set_dma_mask != NULL) 98 + return dma_ops->set_dma_mask(dev, dma_mask); 99 + if (!dev->dma_mask || !dma_supported(dev, *dev->dma_mask)) 100 + return -EIO; 101 + *dev->dma_mask = dma_mask; 102 + return 0; 103 + } 104 + 105 + static inline void *dma_alloc_coherent(struct device *dev, size_t size, 106 + dma_addr_t *dma_handle, gfp_t flag) 107 + { 108 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 109 + 110 + BUG_ON(!dma_ops); 111 + return dma_ops->alloc_coherent(dev, size, dma_handle, flag); 112 + } 113 + 114 + static inline void dma_free_coherent(struct device *dev, size_t size, 115 + void *cpu_addr, dma_addr_t dma_handle) 116 + { 117 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 118 + 119 + BUG_ON(!dma_ops); 120 + dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 121 + } 122 + 123 + static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 124 + size_t size, 125 + enum dma_data_direction direction) 126 + { 127 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 128 + 129 + BUG_ON(!dma_ops); 130 + return dma_ops->map_single(dev, cpu_addr, size, direction); 131 + } 132 + 133 + static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 134 + size_t size, 135 + enum dma_data_direction direction) 136 + { 137 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 138 + 139 + BUG_ON(!dma_ops); 140 + dma_ops->unmap_single(dev, dma_addr, size, direction); 141 + } 142 + 143 + static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 144 + unsigned long offset, size_t size, 145 + enum dma_data_direction direction) 146 + { 147 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 148 + 149 + BUG_ON(!dma_ops); 150 + return dma_ops->map_single(dev, page_address(page) + offset, size, 151 + direction); 152 + } 153 + 154 + static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 155 + size_t size, 156 + enum dma_data_direction direction) 157 + { 158 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 159 + 160 + BUG_ON(!dma_ops); 161 + dma_ops->unmap_single(dev, dma_address, size, direction); 162 + } 163 + 164 + static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 165 + int nents, enum dma_data_direction direction) 166 + { 167 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 168 + 169 + BUG_ON(!dma_ops); 170 + return dma_ops->map_sg(dev, sg, nents, direction); 171 + } 172 + 173 + static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 174 + int nhwentries, 175 + enum dma_data_direction direction) 176 + { 177 + struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 178 + 179 + BUG_ON(!dma_ops); 180 + dma_ops->unmap_sg(dev, sg, nhwentries, direction); 181 + } 182 + 183 + 184 + /* 185 + * Available generic sets of operations 186 + */ 187 + extern struct dma_mapping_ops dma_iommu_ops; 188 + extern struct dma_mapping_ops dma_direct_ops; 67 189 68 190 #else /* CONFIG_PPC64 */ 69 191 ··· 382 260 BUG_ON(direction == DMA_NONE); 383 261 __dma_sync(vaddr, size, (int)direction); 384 262 } 385 - 386 - /* 387 - * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO 388 - */ 389 - struct dma_mapping_ops { 390 - void * (*alloc_coherent)(struct device *dev, size_t size, 391 - dma_addr_t *dma_handle, gfp_t flag); 392 - void (*free_coherent)(struct device *dev, size_t size, 393 - void *vaddr, dma_addr_t dma_handle); 394 - dma_addr_t (*map_single)(struct device *dev, void *ptr, 395 - size_t size, enum dma_data_direction direction); 396 - void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, 397 - size_t size, enum dma_data_direction direction); 398 - int (*map_sg)(struct device *dev, struct scatterlist *sg, 399 - int nents, enum dma_data_direction direction); 400 - void (*unmap_sg)(struct device *dev, struct scatterlist *sg, 401 - int nents, enum dma_data_direction direction); 402 - int (*dma_supported)(struct device *dev, u64 mask); 403 - int (*dac_dma_supported)(struct device *dev, u64 mask); 404 - }; 405 263 406 264 #endif /* __KERNEL__ */ 407 265 #endif /* _ASM_DMA_MAPPING_H */
-1
include/asm-powerpc/ibmebus.h
··· 44 44 #include <linux/mod_devicetable.h> 45 45 #include <asm/of_device.h> 46 46 47 - extern struct dma_mapping_ops ibmebus_dma_ops; 48 47 extern struct bus_type ibmebus_bus_type; 49 48 50 49 struct ibmebus_dev {
+10 -10
include/asm-powerpc/iommu.h
··· 79 79 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, 80 80 int nid); 81 81 82 - extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 83 - struct scatterlist *sglist, int nelems, unsigned long mask, 84 - enum dma_data_direction direction); 82 + extern int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, 83 + int nelems, unsigned long mask, 84 + enum dma_data_direction direction); 85 85 extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 86 - int nelems, enum dma_data_direction direction); 86 + int nelems, enum dma_data_direction direction); 87 87 88 88 extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 89 - dma_addr_t *dma_handle, unsigned long mask, 90 - gfp_t flag, int node); 89 + dma_addr_t *dma_handle, unsigned long mask, 90 + gfp_t flag, int node); 91 91 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 92 - void *vaddr, dma_addr_t dma_handle); 92 + void *vaddr, dma_addr_t dma_handle); 93 93 extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, 94 - size_t size, unsigned long mask, 95 - enum dma_data_direction direction); 94 + size_t size, unsigned long mask, 95 + enum dma_data_direction direction); 96 96 extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, 97 - size_t size, enum dma_data_direction direction); 97 + size_t size, enum dma_data_direction direction); 98 98 99 99 extern void iommu_init_early_pSeries(void); 100 100 extern void iommu_init_early_iSeries(void);
+3 -1
include/asm-powerpc/iseries/iommu.h
··· 21 21 * Boston, MA 02111-1307 USA 22 22 */ 23 23 24 + struct pci_dev; 24 25 struct device_node; 25 26 struct iommu_table; 26 27 27 28 /* Creates table for an individual device node */ 28 - extern void iommu_devnode_init_iSeries(struct device_node *dn); 29 + extern void iommu_devnode_init_iSeries(struct pci_dev *pdev, 30 + struct device_node *dn); 29 31 30 32 /* Get table parameters from HV */ 31 33 extern void iommu_table_getparms_iSeries(unsigned long busno,
+2 -2
include/asm-powerpc/machdep.h
··· 84 84 unsigned long (*tce_get)(struct iommu_table *tbl, 85 85 long index); 86 86 void (*tce_flush)(struct iommu_table *tbl); 87 - void (*iommu_dev_setup)(struct pci_dev *dev); 88 - void (*iommu_bus_setup)(struct pci_bus *bus); 87 + void (*pci_dma_dev_setup)(struct pci_dev *dev); 88 + void (*pci_dma_bus_setup)(struct pci_bus *bus); 89 89 #endif /* CONFIG_PPC64 */ 90 90 91 91 int (*probe)(void);
+1 -1
include/asm-powerpc/of_device.h
··· 14 14 */ 15 15 struct of_device 16 16 { 17 - struct device_node *node; /* OF device node */ 17 + struct device_node *node; /* to be obsoleted */ 18 18 u64 dma_mask; /* DMA mask */ 19 19 struct device dev; /* Generic device interface */ 20 20 };
+5 -3
include/asm-powerpc/pci.h
··· 70 70 */ 71 71 #define PCI_DISABLE_MWI 72 72 73 - extern struct dma_mapping_ops pci_dma_ops; 73 + extern struct dma_mapping_ops *pci_dma_ops; 74 74 75 75 /* For DAC DMA, we currently don't support it by default, but 76 76 * we let 64-bit platforms override this. 77 77 */ 78 78 static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) 79 79 { 80 - if (pci_dma_ops.dac_dma_supported) 81 - return pci_dma_ops.dac_dma_supported(&hwdev->dev, mask); 80 + if (pci_dma_ops && pci_dma_ops->dac_dma_supported) 81 + return pci_dma_ops->dac_dma_supported(&hwdev->dev, mask); 82 82 return 0; 83 83 } 84 84 ··· 209 209 210 210 extern void pcibios_fixup_device_resources(struct pci_dev *dev, 211 211 struct pci_bus *bus); 212 + 213 + extern void pcibios_setup_new_device(struct pci_dev *dev); 212 214 213 215 extern void pcibios_claim_one_bus(struct pci_bus *b); 214 216
-1
include/asm-powerpc/vio.h
··· 45 45 * The vio_dev structure is used to describe virtual I/O devices. 46 46 */ 47 47 struct vio_dev { 48 - struct iommu_table *iommu_table; /* vio_map_* uses this */ 49 48 const char *name; 50 49 const char *type; 51 50 uint32_t unit_address;