Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze

* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (27 commits)
microblaze: entry.S use delay slot for return handlers
microblaze: Save current task directly
microblaze: Simplify entry.S - save/restore r3/r4 - ret_from_trap
microblaze: PCI early support for noMMU system
microblaze: Fix dma alloc and free coherent dma functions
microblaze: Add consistent code
microblaze: pgtable.h: move consistent functions
microblaze: Remove ancient Kconfig option for consistent mapping
microblaze: Remove VMALLOC_VMADDR
microblaze: Add define for ASM_LOOP
microblaze: Preliminary support for dma drivers
microblaze: remove trailing space in messages
microblaze: Use generic show_mem()
microblaze: Change temp register for cmdline
microblaze: Preliminary support for dma drivers
microblaze: Move cache function to cache.c
microblaze: Add support from PREEMPT
microblaze: Add support for Xilinx PCI host bridge
microblaze: Enable PCI, missing files
microblaze: Add core PCI files
...

+3841 -196
+32 -34
arch/microblaze/Kconfig
··· 14 select USB_ARCH_HAS_EHCI 15 select ARCH_WANT_OPTIONAL_GPIOLIB 16 select HAVE_OPROFILE 17 select TRACING_SUPPORT 18 19 config SWAP ··· 77 78 config PCI 79 def_bool n 80 - 81 - config NO_DMA 82 - def_bool y 83 84 config DTC 85 def_bool y ··· 145 146 config ADVANCED_OPTIONS 147 bool "Prompt for advanced kernel configuration options" 148 - depends on MMU 149 help 150 This option will enable prompting for a variety of advanced kernel 151 configuration options. These options can cause the kernel to not ··· 155 156 comment "Default settings for advanced configuration options are used" 157 depends on !ADVANCED_OPTIONS 158 159 config HIGHMEM_START_BOOL 160 bool "Set high memory pool address" ··· 182 183 config LOWMEM_SIZE_BOOL 184 bool "Set maximum low memory" 185 - depends on ADVANCED_OPTIONS 186 help 187 This option allows you to set the maximum amount of memory which 188 will be used as "low memory", that is, memory which the kernel can ··· 194 195 config LOWMEM_SIZE 196 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL 197 - depends on MMU 198 default "0x30000000" 199 200 config KERNEL_START_BOOL ··· 214 215 config TASK_SIZE_BOOL 216 bool "Set custom user task size" 217 - depends on ADVANCED_OPTIONS 218 help 219 This option allows you to set the amount of virtual address space 220 allocated to user tasks. This can be useful in optimizing the ··· 224 225 config TASK_SIZE 226 hex "Size of user task space" if TASK_SIZE_BOOL 227 - depends on MMU 228 default "0x80000000" 229 - 230 - config CONSISTENT_START_BOOL 231 - bool "Set custom consistent memory pool address" 232 - depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE 233 - help 234 - This option allows you to set the base virtual address 235 - of the the consistent memory pool. This pool of virtual 236 - memory is used to make consistent memory allocations. 237 - 238 - config CONSISTENT_START 239 - hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL 240 - depends on MMU 241 - default "0xff100000" if NOT_COHERENT_CACHE 242 - 243 - config CONSISTENT_SIZE_BOOL 244 - bool "Set custom consistent memory pool size" 245 - depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE 246 - help 247 - This option allows you to set the size of the the 248 - consistent memory pool. This pool of virtual memory 249 - is used to make consistent memory allocations. 250 - 251 - config CONSISTENT_SIZE 252 - hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL 253 - depends on MMU 254 - default "0x00200000" if NOT_COHERENT_CACHE 255 256 endmenu 257 ··· 233 menu "Exectuable file formats" 234 235 source "fs/Kconfig.binfmt" 236 237 endmenu 238
··· 14 select USB_ARCH_HAS_EHCI 15 select ARCH_WANT_OPTIONAL_GPIOLIB 16 select HAVE_OPROFILE 17 + select HAVE_DMA_ATTRS 18 + select HAVE_DMA_API_DEBUG 19 select TRACING_SUPPORT 20 21 config SWAP ··· 75 76 config PCI 77 def_bool n 78 79 config DTC 80 def_bool y ··· 146 147 config ADVANCED_OPTIONS 148 bool "Prompt for advanced kernel configuration options" 149 help 150 This option will enable prompting for a variety of advanced kernel 151 configuration options. These options can cause the kernel to not ··· 157 158 comment "Default settings for advanced configuration options are used" 159 depends on !ADVANCED_OPTIONS 160 + 161 + config XILINX_UNCACHED_SHADOW 162 + bool "Are you using uncached shadow for RAM ?" 163 + depends on ADVANCED_OPTIONS && !MMU 164 + default n 165 + help 166 + This is needed to be able to allocate uncachable memory regions. 167 + The feature requires the design to define the RAM memory controller 168 + window to be twice as large as the actual physical memory. 169 170 config HIGHMEM_START_BOOL 171 bool "Set high memory pool address" ··· 175 176 config LOWMEM_SIZE_BOOL 177 bool "Set maximum low memory" 178 + depends on ADVANCED_OPTIONS && MMU 179 help 180 This option allows you to set the maximum amount of memory which 181 will be used as "low memory", that is, memory which the kernel can ··· 187 188 config LOWMEM_SIZE 189 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL 190 default "0x30000000" 191 192 config KERNEL_START_BOOL ··· 208 209 config TASK_SIZE_BOOL 210 bool "Set custom user task size" 211 + depends on ADVANCED_OPTIONS && MMU 212 help 213 This option allows you to set the amount of virtual address space 214 allocated to user tasks. This can be useful in optimizing the ··· 218 219 config TASK_SIZE 220 hex "Size of user task space" if TASK_SIZE_BOOL 221 default "0x80000000" 222 223 endmenu 224 ··· 254 menu "Exectuable file formats" 255 256 source "fs/Kconfig.binfmt" 257 + 258 + endmenu 259 + 260 + menu "Bus Options" 261 + 262 + config PCI 263 + bool "PCI support" 264 + 265 + config PCI_DOMAINS 266 + def_bool PCI 267 + 268 + config PCI_SYSCALL 269 + def_bool PCI 270 + 271 + config PCI_XILINX 272 + bool "Xilinx PCI host bridge support" 273 + depends on PCI 274 + 275 + source "drivers/pci/Kconfig" 276 277 endmenu 278
+1
arch/microblaze/Makefile
··· 50 core-y += arch/microblaze/kernel/ 51 core-y += arch/microblaze/mm/ 52 core-y += arch/microblaze/platform/ 53 54 drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/ 55
··· 50 core-y += arch/microblaze/kernel/ 51 core-y += arch/microblaze/mm/ 52 core-y += arch/microblaze/platform/ 53 + core-$(CONFIG_PCI) += arch/microblaze/pci/ 54 55 drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/ 56
+4
arch/microblaze/include/asm/device.h
··· 14 struct dev_archdata { 15 /* Optional pointer to an OF device node */ 16 struct device_node *of_node; 17 }; 18 19 struct pdev_archdata {
··· 14 struct dev_archdata { 15 /* Optional pointer to an OF device node */ 16 struct device_node *of_node; 17 + 18 + /* DMA operations on that device */ 19 + struct dma_map_ops *dma_ops; 20 + void *dma_data; 21 }; 22 23 struct pdev_archdata {
+153 -1
arch/microblaze/include/asm/dma-mapping.h
··· 1 - #include <asm-generic/dma-mapping-broken.h>
··· 1 + /* 2 + * Implements the generic device dma API for microblaze and the pci 3 + * 4 + * Copyright (C) 2009-2010 Michal Simek <monstr@monstr.eu> 5 + * Copyright (C) 2009-2010 PetaLogix 6 + * 7 + * This file is subject to the terms and conditions of the GNU General 8 + * Public License. See the file COPYING in the main directory of this 9 + * archive for more details. 10 + * 11 + * This file is base on powerpc and x86 dma-mapping.h versions 12 + * Copyright (C) 2004 IBM 13 + */ 14 + 15 + #ifndef _ASM_MICROBLAZE_DMA_MAPPING_H 16 + #define _ASM_MICROBLAZE_DMA_MAPPING_H 17 + 18 + /* 19 + * See Documentation/PCI/PCI-DMA-mapping.txt and 20 + * Documentation/DMA-API.txt for documentation. 21 + */ 22 + 23 + #include <linux/types.h> 24 + #include <linux/cache.h> 25 + #include <linux/mm.h> 26 + #include <linux/scatterlist.h> 27 + #include <linux/dma-debug.h> 28 + #include <linux/dma-attrs.h> 29 + #include <asm/io.h> 30 + #include <asm-generic/dma-coherent.h> 31 + 32 + #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 33 + 34 + #define __dma_alloc_coherent(dev, gfp, size, handle) NULL 35 + #define __dma_free_coherent(size, addr) ((void)0) 36 + #define __dma_sync(addr, size, rw) ((void)0) 37 + 38 + static inline unsigned long device_to_mask(struct device *dev) 39 + { 40 + if (dev->dma_mask && *dev->dma_mask) 41 + return *dev->dma_mask; 42 + /* Assume devices without mask can take 32 bit addresses */ 43 + return 0xfffffffful; 44 + } 45 + 46 + extern struct dma_map_ops *dma_ops; 47 + 48 + /* 49 + * Available generic sets of operations 50 + */ 51 + extern struct dma_map_ops dma_direct_ops; 52 + 53 + static inline struct dma_map_ops *get_dma_ops(struct device *dev) 54 + { 55 + /* We don't handle the NULL dev case for ISA for now. We could 56 + * do it via an out of line call but it is not needed for now. The 57 + * only ISA DMA device we support is the floppy and we have a hack 58 + * in the floppy driver directly to get a device for us. 59 + */ 60 + if (unlikely(!dev) || !dev->archdata.dma_ops) 61 + return NULL; 62 + 63 + return dev->archdata.dma_ops; 64 + } 65 + 66 + static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 67 + { 68 + dev->archdata.dma_ops = ops; 69 + } 70 + 71 + static inline int dma_supported(struct device *dev, u64 mask) 72 + { 73 + struct dma_map_ops *ops = get_dma_ops(dev); 74 + 75 + if (unlikely(!ops)) 76 + return 0; 77 + if (!ops->dma_supported) 78 + return 1; 79 + return ops->dma_supported(dev, mask); 80 + } 81 + 82 + #ifdef CONFIG_PCI 83 + /* We have our own implementation of pci_set_dma_mask() */ 84 + #define HAVE_ARCH_PCI_SET_DMA_MASK 85 + 86 + #endif 87 + 88 + static inline int dma_set_mask(struct device *dev, u64 dma_mask) 89 + { 90 + struct dma_map_ops *ops = get_dma_ops(dev); 91 + 92 + if (unlikely(ops == NULL)) 93 + return -EIO; 94 + if (ops->set_dma_mask) 95 + return ops->set_dma_mask(dev, dma_mask); 96 + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 97 + return -EIO; 98 + *dev->dma_mask = dma_mask; 99 + return 0; 100 + } 101 + 102 + #include <asm-generic/dma-mapping-common.h> 103 + 104 + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 105 + { 106 + struct dma_map_ops *ops = get_dma_ops(dev); 107 + if (ops->mapping_error) 108 + return ops->mapping_error(dev, dma_addr); 109 + 110 + return (dma_addr == DMA_ERROR_CODE); 111 + } 112 + 113 + #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 114 + #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 115 + #define dma_is_consistent(d, h) (1) 116 + 117 + static inline void *dma_alloc_coherent(struct device *dev, size_t size, 118 + dma_addr_t *dma_handle, gfp_t flag) 119 + { 120 + struct dma_map_ops *ops = get_dma_ops(dev); 121 + void *memory; 122 + 123 + BUG_ON(!ops); 124 + 125 + memory = ops->alloc_coherent(dev, size, dma_handle, flag); 126 + 127 + debug_dma_alloc_coherent(dev, size, *dma_handle, memory); 128 + return memory; 129 + } 130 + 131 + static inline void dma_free_coherent(struct device *dev, size_t size, 132 + void *cpu_addr, dma_addr_t dma_handle) 133 + { 134 + struct dma_map_ops *ops = get_dma_ops(dev); 135 + 136 + BUG_ON(!ops); 137 + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 138 + ops->free_coherent(dev, size, cpu_addr, dma_handle); 139 + } 140 + 141 + static inline int dma_get_cache_alignment(void) 142 + { 143 + return L1_CACHE_BYTES; 144 + } 145 + 146 + static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 147 + enum dma_data_direction direction) 148 + { 149 + BUG_ON(direction == DMA_NONE); 150 + __dma_sync(vaddr, size, (int)direction); 151 + } 152 + 153 + #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
+18 -13
arch/microblaze/include/asm/io.h
··· 15 #include <asm/page.h> 16 #include <linux/types.h> 17 #include <linux/mm.h> /* Get struct page {...} */ 18 19 20 #define IO_SPACE_LIMIT (0xFFFFFFFF) 21 ··· 140 #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) 141 #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) 142 143 - #define __page_address(page) \ 144 - (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) 145 - #define page_to_phys(page) virt_to_phys((void *)__page_address(page)) 146 #define page_to_bus(page) (page_to_phys(page)) 147 #define bus_to_virt(addr) (phys_to_virt(addr)) 148 ··· 240 #define out_8(a, v) __raw_writeb((v), (a)) 241 #define in_8(a) __raw_readb(a) 242 243 - /* FIXME */ 244 - static inline void __iomem *ioport_map(unsigned long port, unsigned int len) 245 - { 246 - return (void __iomem *) (port); 247 - } 248 - 249 - static inline void ioport_unmap(void __iomem *addr) 250 - { 251 - /* Nothing to do */ 252 - } 253 254 #endif /* _ASM_MICROBLAZE_IO_H */
··· 15 #include <asm/page.h> 16 #include <linux/types.h> 17 #include <linux/mm.h> /* Get struct page {...} */ 18 + #include <asm-generic/iomap.h> 19 20 + #ifndef CONFIG_PCI 21 + #define _IO_BASE 0 22 + #define _ISA_MEM_BASE 0 23 + #define PCI_DRAM_OFFSET 0 24 + #else 25 + #define _IO_BASE isa_io_base 26 + #define _ISA_MEM_BASE isa_mem_base 27 + #define PCI_DRAM_OFFSET pci_dram_offset 28 + #endif 29 + 30 + extern unsigned long isa_io_base; 31 + extern unsigned long pci_io_base; 32 + extern unsigned long pci_dram_offset; 33 + 34 + extern resource_size_t isa_mem_base; 35 36 #define IO_SPACE_LIMIT (0xFFFFFFFF) 37 ··· 124 #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) 125 #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) 126 127 #define page_to_bus(page) (page_to_phys(page)) 128 #define bus_to_virt(addr) (phys_to_virt(addr)) 129 ··· 227 #define out_8(a, v) __raw_writeb((v), (a)) 228 #define in_8(a) __raw_readb(a) 229 230 + #define ioport_map(port, nr) ((void __iomem *)(port)) 231 + #define ioport_unmap(addr) 232 233 #endif /* _ASM_MICROBLAZE_IO_H */
+36 -1
arch/microblaze/include/asm/irq.h
··· 14 15 #include <linux/interrupt.h> 16 17 extern unsigned int nr_irq; 18 19 #define NO_IRQ (-1) ··· 27 struct pt_regs; 28 extern void do_IRQ(struct pt_regs *regs); 29 30 - /* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space 31 * @device: Device node of the device whose interrupt is to be mapped 32 * @index: Index of the interrupt to map 33 * ··· 46 { 47 return; 48 } 49 50 #endif /* _ASM_MICROBLAZE_IRQ_H */
··· 14 15 #include <linux/interrupt.h> 16 17 + /* This type is the placeholder for a hardware interrupt number. It has to 18 + * be big enough to enclose whatever representation is used by a given 19 + * platform. 20 + */ 21 + typedef unsigned long irq_hw_number_t; 22 + 23 extern unsigned int nr_irq; 24 25 #define NO_IRQ (-1) ··· 21 struct pt_regs; 22 extern void do_IRQ(struct pt_regs *regs); 23 24 + /** 25 + * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space 26 * @device: Device node of the device whose interrupt is to be mapped 27 * @index: Index of the interrupt to map 28 * ··· 39 { 40 return; 41 } 42 + 43 + struct irq_host; 44 + 45 + /** 46 + * irq_create_mapping - Map a hardware interrupt into linux virq space 47 + * @host: host owning this hardware interrupt or NULL for default host 48 + * @hwirq: hardware irq number in that host space 49 + * 50 + * Only one mapping per hardware interrupt is permitted. Returns a linux 51 + * virq number. 52 + * If the sense/trigger is to be specified, set_irq_type() should be called 53 + * on the number returned from that call. 54 + */ 55 + extern unsigned int irq_create_mapping(struct irq_host *host, 56 + irq_hw_number_t hwirq); 57 + 58 + /** 59 + * irq_create_of_mapping - Map a hardware interrupt into linux virq space 60 + * @controller: Device node of the interrupt controller 61 + * @inspec: Interrupt specifier from the device-tree 62 + * @intsize: Size of the interrupt specifier from the device-tree 63 + * 64 + * This function is identical to irq_create_mapping except that it takes 65 + * as input informations straight from the device-tree (typically the results 66 + * of the of_irq_map_*() functions. 67 + */ 68 + extern unsigned int irq_create_of_mapping(struct device_node *controller, 69 + u32 *intspec, unsigned int intsize); 70 71 #endif /* _ASM_MICROBLAZE_IRQ_H */
+5 -7
arch/microblaze/include/asm/page.h
··· 62 #define PAGE_OFFSET CONFIG_KERNEL_START 63 64 /* 65 - * MAP_NR -- given an address, calculate the index of the page struct which 66 - * points to the address's page. 67 - */ 68 - #define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT) 69 - 70 - /* 71 * The basic type of a PTE - 32 bit physical addressing. 72 */ 73 typedef unsigned long pte_basic_t; ··· 148 # define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 149 150 # ifdef CONFIG_MMU 151 - # define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) 152 # else /* CONFIG_MMU */ 153 # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 154 # define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
··· 62 #define PAGE_OFFSET CONFIG_KERNEL_START 63 64 /* 65 * The basic type of a PTE - 32 bit physical addressing. 66 */ 67 typedef unsigned long pte_basic_t; ··· 154 # define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 155 156 # ifdef CONFIG_MMU 157 + 158 + # define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) 159 + # define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) 160 + # define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 161 + 162 # else /* CONFIG_MMU */ 163 # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 164 # define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
+195
arch/microblaze/include/asm/pci-bridge.h
··· 1 #include <linux/pci.h>
··· 1 + #ifndef _ASM_MICROBLAZE_PCI_BRIDGE_H 2 + #define _ASM_MICROBLAZE_PCI_BRIDGE_H 3 + #ifdef __KERNEL__ 4 + /* 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License 7 + * as published by the Free Software Foundation; either version 8 + * 2 of the License, or (at your option) any later version. 9 + */ 10 #include <linux/pci.h> 11 + #include <linux/list.h> 12 + #include <linux/ioport.h> 13 + 14 + struct device_node; 15 + 16 + enum { 17 + /* Force re-assigning all resources (ignore firmware 18 + * setup completely) 19 + */ 20 + PCI_REASSIGN_ALL_RSRC = 0x00000001, 21 + 22 + /* Re-assign all bus numbers */ 23 + PCI_REASSIGN_ALL_BUS = 0x00000002, 24 + 25 + /* Do not try to assign, just use existing setup */ 26 + PCI_PROBE_ONLY = 0x00000004, 27 + 28 + /* Don't bother with ISA alignment unless the bridge has 29 + * ISA forwarding enabled 30 + */ 31 + PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, 32 + 33 + /* Enable domain numbers in /proc */ 34 + PCI_ENABLE_PROC_DOMAINS = 0x00000010, 35 + /* ... except for domain 0 */ 36 + PCI_COMPAT_DOMAIN_0 = 0x00000020, 37 + }; 38 + 39 + /* 40 + * Structure of a PCI controller (host bridge) 41 + */ 42 + struct pci_controller { 43 + struct pci_bus *bus; 44 + char is_dynamic; 45 + struct device_node *dn; 46 + struct list_head list_node; 47 + struct device *parent; 48 + 49 + int first_busno; 50 + int last_busno; 51 + 52 + int self_busno; 53 + 54 + void __iomem *io_base_virt; 55 + resource_size_t io_base_phys; 56 + 57 + resource_size_t pci_io_size; 58 + 59 + /* Some machines (PReP) have a non 1:1 mapping of 60 + * the PCI memory space in the CPU bus space 61 + */ 62 + resource_size_t pci_mem_offset; 63 + 64 + /* Some machines have a special region to forward the ISA 65 + * "memory" cycles such as VGA memory regions. Left to 0 66 + * if unsupported 67 + */ 68 + resource_size_t isa_mem_phys; 69 + resource_size_t isa_mem_size; 70 + 71 + struct pci_ops *ops; 72 + unsigned int __iomem *cfg_addr; 73 + void __iomem *cfg_data; 74 + 75 + /* 76 + * Used for variants of PCI indirect handling and possible quirks: 77 + * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1 78 + * EXT_REG - provides access to PCI-e extended registers 79 + * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS 80 + * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS 81 + * to determine which bus number to match on when generating type0 82 + * config cycles 83 + * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with 84 + * hanging if we don't have link and try to do config cycles to 85 + * anything but the PHB. Only allow talking to the PHB if this is 86 + * set. 87 + * BIG_ENDIAN - cfg_addr is a big endian register 88 + * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs 89 + * on the PLB4. Effectively disable MRM commands by setting this. 90 + */ 91 + #define INDIRECT_TYPE_SET_CFG_TYPE 0x00000001 92 + #define INDIRECT_TYPE_EXT_REG 0x00000002 93 + #define INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004 94 + #define INDIRECT_TYPE_NO_PCIE_LINK 0x00000008 95 + #define INDIRECT_TYPE_BIG_ENDIAN 0x00000010 96 + #define INDIRECT_TYPE_BROKEN_MRM 0x00000020 97 + u32 indirect_type; 98 + 99 + /* Currently, we limit ourselves to 1 IO range and 3 mem 100 + * ranges since the common pci_bus structure can't handle more 101 + */ 102 + struct resource io_resource; 103 + struct resource mem_resources[3]; 104 + int global_number; /* PCI domain number */ 105 + }; 106 + 107 + static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) 108 + { 109 + return bus->sysdata; 110 + } 111 + 112 + static inline int isa_vaddr_is_ioport(void __iomem *address) 113 + { 114 + /* No specific ISA handling on ppc32 at this stage, it 115 + * all goes through PCI 116 + */ 117 + return 0; 118 + } 119 + 120 + /* These are used for config access before all the PCI probing 121 + has been done. */ 122 + extern int early_read_config_byte(struct pci_controller *hose, int bus, 123 + int dev_fn, int where, u8 *val); 124 + extern int early_read_config_word(struct pci_controller *hose, int bus, 125 + int dev_fn, int where, u16 *val); 126 + extern int early_read_config_dword(struct pci_controller *hose, int bus, 127 + int dev_fn, int where, u32 *val); 128 + extern int early_write_config_byte(struct pci_controller *hose, int bus, 129 + int dev_fn, int where, u8 val); 130 + extern int early_write_config_word(struct pci_controller *hose, int bus, 131 + int dev_fn, int where, u16 val); 132 + extern int early_write_config_dword(struct pci_controller *hose, int bus, 133 + int dev_fn, int where, u32 val); 134 + 135 + extern int early_find_capability(struct pci_controller *hose, int bus, 136 + int dev_fn, int cap); 137 + 138 + extern void setup_indirect_pci(struct pci_controller *hose, 139 + resource_size_t cfg_addr, 140 + resource_size_t cfg_data, u32 flags); 141 + 142 + /* Get the PCI host controller for an OF device */ 143 + extern struct pci_controller *pci_find_hose_for_OF_device( 144 + struct device_node *node); 145 + 146 + /* Fill up host controller resources from the OF node */ 147 + extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, 148 + struct device_node *dev, int primary); 149 + 150 + /* Allocate & free a PCI host bridge structure */ 151 + extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); 152 + extern void pcibios_free_controller(struct pci_controller *phb); 153 + extern void pcibios_setup_phb_resources(struct pci_controller *hose); 154 + 155 + #ifdef CONFIG_PCI 156 + extern unsigned int pci_flags; 157 + 158 + static inline void pci_set_flags(int flags) 159 + { 160 + pci_flags = flags; 161 + } 162 + 163 + static inline void pci_add_flags(int flags) 164 + { 165 + pci_flags |= flags; 166 + } 167 + 168 + static inline int pci_has_flag(int flag) 169 + { 170 + return pci_flags & flag; 171 + } 172 + 173 + extern struct list_head hose_list; 174 + 175 + extern unsigned long pci_address_to_pio(phys_addr_t address); 176 + extern int pcibios_vaddr_is_ioport(void __iomem *address); 177 + #else 178 + static inline unsigned long pci_address_to_pio(phys_addr_t address) 179 + { 180 + return (unsigned long)-1; 181 + } 182 + static inline int pcibios_vaddr_is_ioport(void __iomem *address) 183 + { 184 + return 0; 185 + } 186 + 187 + static inline void pci_set_flags(int flags) { } 188 + static inline void pci_add_flags(int flags) { } 189 + static inline int pci_has_flag(int flag) 190 + { 191 + return 0; 192 + } 193 + #endif /* CONFIG_PCI */ 194 + 195 + #endif /* __KERNEL__ */ 196 + #endif /* _ASM_MICROBLAZE_PCI_BRIDGE_H */
+177 -1
arch/microblaze/include/asm/pci.h
··· 1 - #include <asm-generic/pci.h>
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or 3 + * modify it under the terms of the GNU General Public License 4 + * as published by the Free Software Foundation; either version 5 + * 2 of the License, or (at your option) any later version. 6 + * 7 + * Based on powerpc version 8 + */ 9 + 10 + #ifndef __ASM_MICROBLAZE_PCI_H 11 + #define __ASM_MICROBLAZE_PCI_H 12 + #ifdef __KERNEL__ 13 + 14 + #include <linux/types.h> 15 + #include <linux/slab.h> 16 + #include <linux/string.h> 17 + #include <linux/dma-mapping.h> 18 + #include <linux/pci.h> 19 + 20 + #include <asm/scatterlist.h> 21 + #include <asm/io.h> 22 + #include <asm/prom.h> 23 + #include <asm/pci-bridge.h> 24 + 25 + #define PCIBIOS_MIN_IO 0x1000 26 + #define PCIBIOS_MIN_MEM 0x10000000 27 + 28 + struct pci_dev; 29 + 30 + /* Values for the `which' argument to sys_pciconfig_iobase syscall. */ 31 + #define IOBASE_BRIDGE_NUMBER 0 32 + #define IOBASE_MEMORY 1 33 + #define IOBASE_IO 2 34 + #define IOBASE_ISA_IO 3 35 + #define IOBASE_ISA_MEM 4 36 + 37 + #define pcibios_scan_all_fns(a, b) 0 38 + 39 + /* 40 + * Set this to 1 if you want the kernel to re-assign all PCI 41 + * bus numbers (don't do that on ppc64 yet !) 42 + */ 43 + #define pcibios_assign_all_busses() \ 44 + (pci_has_flag(PCI_REASSIGN_ALL_BUS)) 45 + 46 + static inline void pcibios_set_master(struct pci_dev *dev) 47 + { 48 + /* No special bus mastering setup handling */ 49 + } 50 + 51 + static inline void pcibios_penalize_isa_irq(int irq, int active) 52 + { 53 + /* We don't do dynamic PCI IRQ allocation */ 54 + } 55 + 56 + #ifdef CONFIG_PCI 57 + extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); 58 + extern struct dma_map_ops *get_pci_dma_ops(void); 59 + #else /* CONFIG_PCI */ 60 + #define set_pci_dma_ops(d) 61 + #define get_pci_dma_ops() NULL 62 + #endif 63 + 64 + #ifdef CONFIG_PCI 65 + static inline void pci_dma_burst_advice(struct pci_dev *pdev, 66 + enum pci_dma_burst_strategy *strat, 67 + unsigned long *strategy_parameter) 68 + { 69 + *strat = PCI_DMA_BURST_INFINITY; 70 + *strategy_parameter = ~0UL; 71 + } 72 + #endif 73 + 74 + extern int pci_domain_nr(struct pci_bus *bus); 75 + 76 + /* Decide whether to display the domain number in /proc */ 77 + extern int pci_proc_domain(struct pci_bus *bus); 78 + 79 + struct vm_area_struct; 80 + /* Map a range of PCI memory or I/O space for a device into user space */ 81 + int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, 82 + enum pci_mmap_state mmap_state, int write_combine); 83 + 84 + /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ 85 + #define HAVE_PCI_MMAP 1 86 + 87 + extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, 88 + size_t count); 89 + extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, 90 + size_t count); 91 + extern int pci_mmap_legacy_page_range(struct pci_bus *bus, 92 + struct vm_area_struct *vma, 93 + enum pci_mmap_state mmap_state); 94 + 95 + #define HAVE_PCI_LEGACY 1 96 + 97 + /* pci_unmap_{page,single} is a nop so... */ 98 + #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) 99 + #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) 100 + #define pci_unmap_addr(PTR, ADDR_NAME) (0) 101 + #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 102 + #define pci_unmap_len(PTR, LEN_NAME) (0) 103 + #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 104 + 105 + /* The PCI address space does equal the physical memory 106 + * address space (no IOMMU). The IDE and SCSI device layers use 107 + * this boolean for bounce buffer decisions. 108 + */ 109 + #define PCI_DMA_BUS_IS_PHYS (1) 110 + 111 + extern void pcibios_resource_to_bus(struct pci_dev *dev, 112 + struct pci_bus_region *region, 113 + struct resource *res); 114 + 115 + extern void pcibios_bus_to_resource(struct pci_dev *dev, 116 + struct resource *res, 117 + struct pci_bus_region *region); 118 + 119 + static inline struct resource *pcibios_select_root(struct pci_dev *pdev, 120 + struct resource *res) 121 + { 122 + struct resource *root = NULL; 123 + 124 + if (res->flags & IORESOURCE_IO) 125 + root = &ioport_resource; 126 + if (res->flags & IORESOURCE_MEM) 127 + root = &iomem_resource; 128 + 129 + return root; 130 + } 131 + 132 + extern void pcibios_claim_one_bus(struct pci_bus *b); 133 + 134 + extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); 135 + 136 + extern void pcibios_resource_survey(void); 137 + 138 + extern struct pci_controller *init_phb_dynamic(struct device_node *dn); 139 + extern int remove_phb_dynamic(struct pci_controller *phb); 140 + 141 + extern struct pci_dev *of_create_pci_dev(struct device_node *node, 142 + struct pci_bus *bus, int devfn); 143 + 144 + extern void of_scan_pci_bridge(struct device_node *node, 145 + struct pci_dev *dev); 146 + 147 + extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); 148 + extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); 149 + 150 + extern int pci_read_irq_line(struct pci_dev *dev); 151 + 152 + extern int pci_bus_find_capability(struct pci_bus *bus, 153 + unsigned int devfn, int cap); 154 + 155 + struct file; 156 + extern pgprot_t pci_phys_mem_access_prot(struct file *file, 157 + unsigned long pfn, 158 + unsigned long size, 159 + pgprot_t prot); 160 + 161 + #define HAVE_ARCH_PCI_RESOURCE_TO_USER 162 + extern void pci_resource_to_user(const struct pci_dev *dev, int bar, 163 + const struct resource *rsrc, 164 + resource_size_t *start, resource_size_t *end); 165 + 166 + extern void pcibios_setup_bus_devices(struct pci_bus *bus); 167 + extern void pcibios_setup_bus_self(struct pci_bus *bus); 168 + 169 + /* This part of code was originaly in xilinx-pci.h */ 170 + #ifdef CONFIG_PCI_XILINX 171 + extern void __init xilinx_pci_init(void); 172 + #else 173 + static inline void __init xilinx_pci_init(void) { return; } 174 + #endif 175 + 176 + #endif /* __KERNEL__ */ 177 + #endif /* __ASM_MICROBLAZE_PCI_H */
+1 -1
arch/microblaze/include/asm/pgalloc.h
··· 19 #include <asm/io.h> 20 #include <asm/page.h> 21 #include <asm/cache.h> 22 23 #define PGDIR_ORDER 0 24 ··· 112 unsigned long address) 113 { 114 pte_t *pte; 115 - extern int mem_init_done; 116 extern void *early_get_page(void); 117 if (mem_init_done) { 118 pte = (pte_t *)__get_free_page(GFP_KERNEL |
··· 19 #include <asm/io.h> 20 #include <asm/page.h> 21 #include <asm/cache.h> 22 + #include <asm/pgtable.h> 23 24 #define PGDIR_ORDER 0 25 ··· 111 unsigned long address) 112 { 113 pte_t *pte; 114 extern void *early_get_page(void); 115 if (mem_init_done) { 116 pte = (pte_t *)__get_free_page(GFP_KERNEL |
+30 -10
arch/microblaze/include/asm/pgtable.h
··· 16 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 17 remap_pfn_range(vma, vaddr, pfn, size, prot) 18 19 #ifndef CONFIG_MMU 20 21 #define pgd_present(pgd) (1) /* pages are always present on non MMU */ ··· 55 56 #define arch_enter_lazy_cpu_mode() do {} while (0) 57 58 #else /* CONFIG_MMU */ 59 60 #include <asm-generic/4level-fixup.h> ··· 74 75 extern unsigned long va_to_phys(unsigned long address); 76 extern pte_t *va_to_pte(unsigned long address); 77 - extern unsigned long ioremap_bot, ioremap_base; 78 79 /* 80 * The following only work if pte_present() is true. ··· 90 #define VMALLOC_START (CONFIG_KERNEL_START + \ 91 max(32 * 1024 * 1024UL, memory_size)) 92 #define VMALLOC_END ioremap_bot 93 - #define VMALLOC_VMADDR(x) ((unsigned long)(x)) 94 95 #endif /* __ASSEMBLY__ */ 96 97 /* 98 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash ··· 416 mts rmsr, %2\n\ 417 nop" 418 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) 419 - : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) 420 : "cc"); 421 422 return old; ··· 585 int map_page(unsigned long va, phys_addr_t pa, int flags); 586 587 extern int mem_init_done; 588 - extern unsigned long ioremap_base; 589 - extern unsigned long ioremap_bot; 590 591 asmlinkage void __init mmu_init(void); 592 593 void __init *early_get_page(void); 594 595 - void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); 596 - void consistent_free(void *vaddr); 597 - void consistent_sync(void *vaddr, size_t size, int direction); 598 - void consistent_sync_page(struct page *page, unsigned long offset, 599 - size_t size, int direction); 600 #endif /* __ASSEMBLY__ */ 601 #endif /* __KERNEL__ */ 602 ··· 597 598 #ifndef __ASSEMBLY__ 599 #include <asm-generic/pgtable.h> 600 601 void setup_memory(void); 602 #endif /* __ASSEMBLY__ */
··· 16 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 17 remap_pfn_range(vma, vaddr, pfn, size, prot) 18 19 + #ifndef __ASSEMBLY__ 20 + extern int mem_init_done; 21 + #endif 22 + 23 #ifndef CONFIG_MMU 24 25 #define pgd_present(pgd) (1) /* pages are always present on non MMU */ ··· 51 52 #define arch_enter_lazy_cpu_mode() do {} while (0) 53 54 + #define pgprot_noncached_wc(prot) prot 55 + 56 #else /* CONFIG_MMU */ 57 58 #include <asm-generic/4level-fixup.h> ··· 68 69 extern unsigned long va_to_phys(unsigned long address); 70 extern pte_t *va_to_pte(unsigned long address); 71 72 /* 73 * The following only work if pte_present() is true. ··· 85 #define VMALLOC_START (CONFIG_KERNEL_START + \ 86 max(32 * 1024 * 1024UL, memory_size)) 87 #define VMALLOC_END ioremap_bot 88 89 #endif /* __ASSEMBLY__ */ 90 + 91 + /* 92 + * Macro to mark a page protection value as "uncacheable". 93 + */ 94 + 95 + #define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \ 96 + _PAGE_WRITETHRU) 97 + 98 + #define pgprot_noncached(prot) \ 99 + (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 100 + _PAGE_NO_CACHE | _PAGE_GUARDED)) 101 + 102 + #define pgprot_noncached_wc(prot) \ 103 + (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 104 + _PAGE_NO_CACHE)) 105 106 /* 107 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash ··· 397 mts rmsr, %2\n\ 398 nop" 399 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) 400 + : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p) 401 : "cc"); 402 403 return old; ··· 566 int map_page(unsigned long va, phys_addr_t pa, int flags); 567 568 extern int mem_init_done; 569 570 asmlinkage void __init mmu_init(void); 571 572 void __init *early_get_page(void); 573 574 #endif /* __ASSEMBLY__ */ 575 #endif /* __KERNEL__ */ 576 ··· 585 586 #ifndef __ASSEMBLY__ 587 #include <asm-generic/pgtable.h> 588 + 589 + extern unsigned long ioremap_bot, ioremap_base; 590 + 591 + void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); 592 + void consistent_free(void *vaddr); 593 + void consistent_sync(void *vaddr, size_t size, int direction); 594 + void consistent_sync_page(struct page *page, unsigned long offset, 595 + size_t size, int direction); 596 597 void setup_memory(void); 598 #endif /* __ASSEMBLY__ */
+15
arch/microblaze/include/asm/prom.h
··· 31 /* Other Prototypes */ 32 extern int early_uartlite_console(void); 33 34 /* 35 * OF address retreival & translation 36 */
··· 31 /* Other Prototypes */ 32 extern int early_uartlite_console(void); 33 34 + #ifdef CONFIG_PCI 35 + /* 36 + * PCI <-> OF matching functions 37 + * (XXX should these be here?) 38 + */ 39 + struct pci_bus; 40 + struct pci_dev; 41 + extern int pci_device_from_OF_node(struct device_node *node, 42 + u8 *bus, u8 *devfn); 43 + extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus, 44 + int devfn); 45 + extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev); 46 + extern void pci_create_OF_bus_map(void); 47 + #endif 48 + 49 /* 50 * OF address retreival & translation 51 */
+3
arch/microblaze/include/asm/system.h
··· 87 extern char *klimit; 88 extern void ret_from_fork(void); 89 90 #ifdef CONFIG_DEBUG_FS 91 extern struct dentry *of_debugfs_root; 92 #endif
··· 87 extern char *klimit; 88 extern void ret_from_fork(void); 89 90 + extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); 91 + extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 92 + 93 #ifdef CONFIG_DEBUG_FS 94 extern struct dentry *of_debugfs_root; 95 #endif
+1 -1
arch/microblaze/include/asm/tlbflush.h
··· 23 extern void _tlbie(unsigned long address); 24 extern void _tlbia(void); 25 26 - #define __tlbia() _tlbia() 27 28 static inline void local_flush_tlb_all(void) 29 { __tlbia(); }
··· 23 extern void _tlbie(unsigned long address); 24 extern void _tlbia(void); 25 26 + #define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); } 27 28 static inline void local_flush_tlb_all(void) 29 { __tlbia(); }
+1 -1
arch/microblaze/kernel/Makefile
··· 14 15 extra-y := head.o vmlinux.lds 16 17 - obj-y += exceptions.o \ 18 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ 19 of_platform.o process.o prom.o prom_parse.o ptrace.o \ 20 setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
··· 14 15 extra-y := head.o vmlinux.lds 16 17 + obj-y += dma.o exceptions.o \ 18 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ 19 of_platform.o process.o prom.o prom_parse.o ptrace.o \ 20 setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
+1
arch/microblaze/kernel/asm-offsets.c
··· 90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); 93 BLANK(); 94 95 /* struct cpu_context */
··· 90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); 93 + DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count)); 94 BLANK(); 95 96 /* struct cpu_context */
+167 -44
arch/microblaze/kernel/cpu/cache.c
··· 15 #include <asm/cpuinfo.h> 16 #include <asm/pvr.h> 17 18 - static inline void __invalidate_flush_icache(unsigned int addr) 19 - { 20 - __asm__ __volatile__ ("wic %0, r0;" \ 21 - : : "r" (addr)); 22 - } 23 - 24 - static inline void __flush_dcache(unsigned int addr) 25 - { 26 - __asm__ __volatile__ ("wdc.flush %0, r0;" \ 27 - : : "r" (addr)); 28 - } 29 - 30 - static inline void __invalidate_dcache(unsigned int baseaddr, 31 - unsigned int offset) 32 - { 33 - __asm__ __volatile__ ("wdc.clear %0, %1;" \ 34 - : : "r" (baseaddr), "r" (offset)); 35 - } 36 - 37 static inline void __enable_icache_msr(void) 38 { 39 __asm__ __volatile__ (" msrset r0, %0; \ ··· 129 int step = -line_length; \ 130 BUG_ON(step >= 0); \ 131 \ 132 - __asm__ __volatile__ (" 1: " #op " r0, %0; \ 133 - bgtid %0, 1b; \ 134 - addk %0, %0, %1; \ 135 " : : "r" (len), "r" (step) \ 136 : "memory"); \ 137 } while (0); ··· 143 int count = end - start; \ 144 BUG_ON(count <= 0); \ 145 \ 146 - __asm__ __volatile__ (" 1: " #op " %0, %1; \ 147 - bgtid %1, 1b; \ 148 - addk %1, %1, %2; \ 149 " : : "r" (start), "r" (count), \ 150 "r" (step) : "memory"); \ 151 } while (0); ··· 156 int volatile temp; \ 157 BUG_ON(end - start <= 0); \ 158 \ 159 - __asm__ __volatile__ (" 1: " #op " %1, r0; \ 160 cmpu %0, %1, %2; \ 161 bgtid %0, 1b; \ 162 addk %1, %1, %3; \ ··· 164 "r" (line_length) : "memory"); \ 165 } while (0); 166 167 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 168 { 169 unsigned long flags; 170 - 171 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 172 (unsigned int)start, (unsigned int) end); 173 ··· 181 local_irq_save(flags); 182 __disable_icache_msr(); 183 184 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 185 - 186 __enable_icache_msr(); 187 local_irq_restore(flags); 188 } ··· 196 unsigned long end) 197 { 198 unsigned long flags; 199 - 200 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 201 (unsigned int)start, (unsigned int) end); 202 ··· 208 local_irq_save(flags); 209 __disable_icache_nomsr(); 210 211 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 212 213 __enable_icache_nomsr(); 214 local_irq_restore(flags); ··· 223 static void __flush_icache_range_noirq(unsigned long start, 224 unsigned long end) 225 { 226 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 227 (unsigned int)start, (unsigned int) end); 228 229 CACHE_LOOP_LIMITS(start, end, 230 cpuinfo.icache_line_length, cpuinfo.icache_size); 231 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 232 } 233 234 static void __flush_icache_all_msr_irq(void) 235 { 236 unsigned long flags; 237 - 238 pr_debug("%s\n", __func__); 239 240 local_irq_save(flags); 241 __disable_icache_msr(); 242 - 243 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 244 - 245 __enable_icache_msr(); 246 local_irq_restore(flags); 247 } ··· 265 static void __flush_icache_all_nomsr_irq(void) 266 { 267 unsigned long flags; 268 - 269 pr_debug("%s\n", __func__); 270 271 local_irq_save(flags); 272 __disable_icache_nomsr(); 273 - 274 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 275 - 276 __enable_icache_nomsr(); 277 local_irq_restore(flags); 278 } 279 280 static void __flush_icache_all_noirq(void) 281 { 282 pr_debug("%s\n", __func__); 283 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 284 } 285 286 static void __invalidate_dcache_all_msr_irq(void) 287 { 288 unsigned long flags; 289 - 290 pr_debug("%s\n", __func__); 291 292 local_irq_save(flags); 293 __disable_dcache_msr(); 294 - 295 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 296 - 297 __enable_dcache_msr(); 298 local_irq_restore(flags); 299 } ··· 325 static void __invalidate_dcache_all_nomsr_irq(void) 326 { 327 unsigned long flags; 328 - 329 pr_debug("%s\n", __func__); 330 331 local_irq_save(flags); 332 __disable_dcache_nomsr(); 333 - 334 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 335 - 336 __enable_dcache_nomsr(); 337 local_irq_restore(flags); 338 } 339 340 static void __invalidate_dcache_all_noirq_wt(void) 341 { 342 pr_debug("%s\n", __func__); 343 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) 344 } 345 346 /* FIXME this is weird - should be only wdc but not work 347 * MS: I am getting bus errors and other weird things */ 348 static void __invalidate_dcache_all_wb(void) 349 { 350 pr_debug("%s\n", __func__); 351 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 352 wdc.clear) 353 } 354 355 static void __invalidate_dcache_range_wb(unsigned long start, 356 unsigned long end) 357 { 358 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 359 (unsigned int)start, (unsigned int) end); 360 361 CACHE_LOOP_LIMITS(start, end, 362 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 363 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); 364 } 365 366 static void __invalidate_dcache_range_nomsr_wt(unsigned long start, 367 unsigned long end) 368 { 369 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 370 (unsigned int)start, (unsigned int) end); 371 CACHE_LOOP_LIMITS(start, end, 372 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 373 374 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 375 } 376 377 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, 378 unsigned long end) 379 { 380 unsigned long flags; 381 - 382 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 383 (unsigned int)start, (unsigned int) end); 384 CACHE_LOOP_LIMITS(start, end, ··· 434 local_irq_save(flags); 435 __disable_dcache_msr(); 436 437 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 438 439 __enable_dcache_msr(); 440 local_irq_restore(flags); ··· 450 unsigned long end) 451 { 452 unsigned long flags; 453 - 454 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 455 (unsigned int)start, (unsigned int) end); 456 ··· 462 local_irq_save(flags); 463 __disable_dcache_nomsr(); 464 465 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 466 467 __enable_dcache_nomsr(); 468 local_irq_restore(flags); ··· 476 477 static void __flush_dcache_all_wb(void) 478 { 479 pr_debug("%s\n", __func__); 480 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 481 wdc.flush); 482 } 483 484 static void __flush_dcache_range_wb(unsigned long start, unsigned long end) 485 { 486 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 487 (unsigned int)start, (unsigned int) end); 488 489 CACHE_LOOP_LIMITS(start, end, 490 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 491 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); 492 } 493 494 /* struct for wb caches and for wt caches */ ··· 611 #define CPUVER_7_20_A 0x0c 612 #define CPUVER_7_20_D 0x0f 613 614 - #define INFO(s) printk(KERN_INFO "cache: " s " \n"); 615 616 void microblaze_cache_init(void) 617 { ··· 650 } 651 } 652 } 653 }
··· 15 #include <asm/cpuinfo.h> 16 #include <asm/pvr.h> 17 18 static inline void __enable_icache_msr(void) 19 { 20 __asm__ __volatile__ (" msrset r0, %0; \ ··· 148 int step = -line_length; \ 149 BUG_ON(step >= 0); \ 150 \ 151 + __asm__ __volatile__ (" 1: " #op " r0, %0; \ 152 + bgtid %0, 1b; \ 153 + addk %0, %0, %1; \ 154 " : : "r" (len), "r" (step) \ 155 : "memory"); \ 156 } while (0); ··· 162 int count = end - start; \ 163 BUG_ON(count <= 0); \ 164 \ 165 + __asm__ __volatile__ (" 1: " #op " %0, %1; \ 166 + bgtid %1, 1b; \ 167 + addk %1, %1, %2; \ 168 " : : "r" (start), "r" (count), \ 169 "r" (step) : "memory"); \ 170 } while (0); ··· 175 int volatile temp; \ 176 BUG_ON(end - start <= 0); \ 177 \ 178 + __asm__ __volatile__ (" 1: " #op " %1, r0; \ 179 cmpu %0, %1, %2; \ 180 bgtid %0, 1b; \ 181 addk %1, %1, %3; \ ··· 183 "r" (line_length) : "memory"); \ 184 } while (0); 185 186 + #define ASM_LOOP 187 + 188 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 189 { 190 unsigned long flags; 191 + #ifndef ASM_LOOP 192 + int i; 193 + #endif 194 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 195 (unsigned int)start, (unsigned int) end); 196 ··· 196 local_irq_save(flags); 197 __disable_icache_msr(); 198 199 + #ifdef ASM_LOOP 200 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 201 + #else 202 + for (i = start; i < end; i += cpuinfo.icache_line_length) 203 + __asm__ __volatile__ ("wic %0, r0;" \ 204 + : : "r" (i)); 205 + #endif 206 __enable_icache_msr(); 207 local_irq_restore(flags); 208 } ··· 206 unsigned long end) 207 { 208 unsigned long flags; 209 + #ifndef ASM_LOOP 210 + int i; 211 + #endif 212 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 213 (unsigned int)start, (unsigned int) end); 214 ··· 216 local_irq_save(flags); 217 __disable_icache_nomsr(); 218 219 + #ifdef ASM_LOOP 220 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 221 + #else 222 + for (i = start; i < end; i += cpuinfo.icache_line_length) 223 + __asm__ __volatile__ ("wic %0, r0;" \ 224 + : : "r" (i)); 225 + #endif 226 227 __enable_icache_nomsr(); 228 local_irq_restore(flags); ··· 225 static void __flush_icache_range_noirq(unsigned long start, 226 unsigned long end) 227 { 228 + #ifndef ASM_LOOP 229 + int i; 230 + #endif 231 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 232 (unsigned int)start, (unsigned int) end); 233 234 CACHE_LOOP_LIMITS(start, end, 235 cpuinfo.icache_line_length, cpuinfo.icache_size); 236 + #ifdef ASM_LOOP 237 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 238 + #else 239 + for (i = start; i < end; i += cpuinfo.icache_line_length) 240 + __asm__ __volatile__ ("wic %0, r0;" \ 241 + : : "r" (i)); 242 + #endif 243 } 244 245 static void __flush_icache_all_msr_irq(void) 246 { 247 unsigned long flags; 248 + #ifndef ASM_LOOP 249 + int i; 250 + #endif 251 pr_debug("%s\n", __func__); 252 253 local_irq_save(flags); 254 __disable_icache_msr(); 255 + #ifdef ASM_LOOP 256 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 257 + #else 258 + for (i = 0; i < cpuinfo.icache_size; 259 + i += cpuinfo.icache_line_length) 260 + __asm__ __volatile__ ("wic %0, r0;" \ 261 + : : "r" (i)); 262 + #endif 263 __enable_icache_msr(); 264 local_irq_restore(flags); 265 } ··· 251 static void __flush_icache_all_nomsr_irq(void) 252 { 253 unsigned long flags; 254 + #ifndef ASM_LOOP 255 + int i; 256 + #endif 257 pr_debug("%s\n", __func__); 258 259 local_irq_save(flags); 260 __disable_icache_nomsr(); 261 + #ifdef ASM_LOOP 262 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 263 + #else 264 + for (i = 0; i < cpuinfo.icache_size; 265 + i += cpuinfo.icache_line_length) 266 + __asm__ __volatile__ ("wic %0, r0;" \ 267 + : : "r" (i)); 268 + #endif 269 __enable_icache_nomsr(); 270 local_irq_restore(flags); 271 } 272 273 static void __flush_icache_all_noirq(void) 274 { 275 + #ifndef ASM_LOOP 276 + int i; 277 + #endif 278 pr_debug("%s\n", __func__); 279 + #ifdef ASM_LOOP 280 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 281 + #else 282 + for (i = 0; i < cpuinfo.icache_size; 283 + i += cpuinfo.icache_line_length) 284 + __asm__ __volatile__ ("wic %0, r0;" \ 285 + : : "r" (i)); 286 + #endif 287 } 288 289 static void __invalidate_dcache_all_msr_irq(void) 290 { 291 unsigned long flags; 292 + #ifndef ASM_LOOP 293 + int i; 294 + #endif 295 pr_debug("%s\n", __func__); 296 297 local_irq_save(flags); 298 __disable_dcache_msr(); 299 + #ifdef ASM_LOOP 300 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 301 + #else 302 + for (i = 0; i < cpuinfo.dcache_size; 303 + i += cpuinfo.dcache_line_length) 304 + __asm__ __volatile__ ("wdc %0, r0;" \ 305 + : : "r" (i)); 306 + #endif 307 __enable_dcache_msr(); 308 local_irq_restore(flags); 309 } ··· 287 static void __invalidate_dcache_all_nomsr_irq(void) 288 { 289 unsigned long flags; 290 + #ifndef ASM_LOOP 291 + int i; 292 + #endif 293 pr_debug("%s\n", __func__); 294 295 local_irq_save(flags); 296 __disable_dcache_nomsr(); 297 + #ifdef ASM_LOOP 298 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 299 + #else 300 + for (i = 0; i < cpuinfo.dcache_size; 301 + i += cpuinfo.dcache_line_length) 302 + __asm__ __volatile__ ("wdc %0, r0;" \ 303 + : : "r" (i)); 304 + #endif 305 __enable_dcache_nomsr(); 306 local_irq_restore(flags); 307 } 308 309 static void __invalidate_dcache_all_noirq_wt(void) 310 { 311 + #ifndef ASM_LOOP 312 + int i; 313 + #endif 314 pr_debug("%s\n", __func__); 315 + #ifdef ASM_LOOP 316 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) 317 + #else 318 + for (i = 0; i < cpuinfo.dcache_size; 319 + i += cpuinfo.dcache_line_length) 320 + __asm__ __volatile__ ("wdc %0, r0;" \ 321 + : : "r" (i)); 322 + #endif 323 } 324 325 /* FIXME this is weird - should be only wdc but not work 326 * MS: I am getting bus errors and other weird things */ 327 static void __invalidate_dcache_all_wb(void) 328 { 329 + #ifndef ASM_LOOP 330 + int i; 331 + #endif 332 pr_debug("%s\n", __func__); 333 + #ifdef ASM_LOOP 334 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 335 wdc.clear) 336 + #else 337 + for (i = 0; i < cpuinfo.dcache_size; 338 + i += cpuinfo.dcache_line_length) 339 + __asm__ __volatile__ ("wdc.clear %0, r0;" \ 340 + : : "r" (i)); 341 + #endif 342 } 343 344 static void __invalidate_dcache_range_wb(unsigned long start, 345 unsigned long end) 346 { 347 + #ifndef ASM_LOOP 348 + int i; 349 + #endif 350 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 351 (unsigned int)start, (unsigned int) end); 352 353 CACHE_LOOP_LIMITS(start, end, 354 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 355 + #ifdef ASM_LOOP 356 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); 357 + #else 358 + for (i = start; i < end; i += cpuinfo.icache_line_length) 359 + __asm__ __volatile__ ("wdc.clear %0, r0;" \ 360 + : : "r" (i)); 361 + #endif 362 } 363 364 static void __invalidate_dcache_range_nomsr_wt(unsigned long start, 365 unsigned long end) 366 { 367 + #ifndef ASM_LOOP 368 + int i; 369 + #endif 370 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 371 (unsigned int)start, (unsigned int) end); 372 CACHE_LOOP_LIMITS(start, end, 373 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 374 375 + #ifdef ASM_LOOP 376 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 377 + #else 378 + for (i = start; i < end; i += cpuinfo.icache_line_length) 379 + __asm__ __volatile__ ("wdc %0, r0;" \ 380 + : : "r" (i)); 381 + #endif 382 } 383 384 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, 385 unsigned long end) 386 { 387 unsigned long flags; 388 + #ifndef ASM_LOOP 389 + int i; 390 + #endif 391 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 392 (unsigned int)start, (unsigned int) end); 393 CACHE_LOOP_LIMITS(start, end, ··· 349 local_irq_save(flags); 350 __disable_dcache_msr(); 351 352 + #ifdef ASM_LOOP 353 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 354 + #else 355 + for (i = start; i < end; i += cpuinfo.icache_line_length) 356 + __asm__ __volatile__ ("wdc %0, r0;" \ 357 + : : "r" (i)); 358 + #endif 359 360 __enable_dcache_msr(); 361 local_irq_restore(flags); ··· 359 unsigned long end) 360 { 361 unsigned long flags; 362 + #ifndef ASM_LOOP 363 + int i; 364 + #endif 365 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 366 (unsigned int)start, (unsigned int) end); 367 ··· 369 local_irq_save(flags); 370 __disable_dcache_nomsr(); 371 372 + #ifdef ASM_LOOP 373 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 374 + #else 375 + for (i = start; i < end; i += cpuinfo.icache_line_length) 376 + __asm__ __volatile__ ("wdc %0, r0;" \ 377 + : : "r" (i)); 378 + #endif 379 380 __enable_dcache_nomsr(); 381 local_irq_restore(flags); ··· 377 378 static void __flush_dcache_all_wb(void) 379 { 380 + #ifndef ASM_LOOP 381 + int i; 382 + #endif 383 pr_debug("%s\n", __func__); 384 + #ifdef ASM_LOOP 385 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 386 wdc.flush); 387 + #else 388 + for (i = 0; i < cpuinfo.dcache_size; 389 + i += cpuinfo.dcache_line_length) 390 + __asm__ __volatile__ ("wdc.flush %0, r0;" \ 391 + : : "r" (i)); 392 + #endif 393 } 394 395 static void __flush_dcache_range_wb(unsigned long start, unsigned long end) 396 { 397 + #ifndef ASM_LOOP 398 + int i; 399 + #endif 400 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 401 (unsigned int)start, (unsigned int) end); 402 403 CACHE_LOOP_LIMITS(start, end, 404 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 405 + #ifdef ASM_LOOP 406 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); 407 + #else 408 + for (i = start; i < end; i += cpuinfo.icache_line_length) 409 + __asm__ __volatile__ ("wdc.flush %0, r0;" \ 410 + : : "r" (i)); 411 + #endif 412 } 413 414 /* struct for wb caches and for wt caches */ ··· 493 #define CPUVER_7_20_A 0x0c 494 #define CPUVER_7_20_D 0x0f 495 496 + #define INFO(s) printk(KERN_INFO "cache: " s "\n"); 497 498 void microblaze_cache_init(void) 499 { ··· 532 } 533 } 534 } 535 + invalidate_dcache(); 536 + enable_dcache(); 537 + 538 + invalidate_icache(); 539 + enable_icache(); 540 }
+156
arch/microblaze/kernel/dma.c
···
··· 1 + /* 2 + * Copyright (C) 2009-2010 PetaLogix 3 + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation 4 + * 5 + * Provide default implementations of the DMA mapping callbacks for 6 + * directly mapped busses. 7 + */ 8 + 9 + #include <linux/device.h> 10 + #include <linux/dma-mapping.h> 11 + #include <linux/dma-debug.h> 12 + #include <asm/bug.h> 13 + #include <asm/cacheflush.h> 14 + 15 + /* 16 + * Generic direct DMA implementation 17 + * 18 + * This implementation supports a per-device offset that can be applied if 19 + * the address at which memory is visible to devices is not 0. Platform code 20 + * can set archdata.dma_data to an unsigned long holding the offset. By 21 + * default the offset is PCI_DRAM_OFFSET. 22 + */ 23 + static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, 24 + size_t size, enum dma_data_direction direction) 25 + { 26 + switch (direction) { 27 + case DMA_TO_DEVICE: 28 + flush_dcache_range(paddr + offset, paddr + offset + size); 29 + break; 30 + case DMA_FROM_DEVICE: 31 + invalidate_dcache_range(paddr + offset, paddr + offset + size); 32 + break; 33 + default: 34 + BUG(); 35 + } 36 + } 37 + 38 + static unsigned long get_dma_direct_offset(struct device *dev) 39 + { 40 + if (dev) 41 + return (unsigned long)dev->archdata.dma_data; 42 + 43 + return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ 44 + } 45 + 46 + #define NOT_COHERENT_CACHE 47 + 48 + static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 49 + dma_addr_t *dma_handle, gfp_t flag) 50 + { 51 + #ifdef NOT_COHERENT_CACHE 52 + return consistent_alloc(flag, size, dma_handle); 53 + #else 54 + void *ret; 55 + struct page *page; 56 + int node = dev_to_node(dev); 57 + 58 + /* ignore region specifiers */ 59 + flag &= ~(__GFP_HIGHMEM); 60 + 61 + page = alloc_pages_node(node, flag, get_order(size)); 62 + if (page == NULL) 63 + return NULL; 64 + ret = page_address(page); 65 + memset(ret, 0, size); 66 + *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); 67 + 68 + return ret; 69 + #endif 70 + } 71 + 72 + static void dma_direct_free_coherent(struct device *dev, size_t size, 73 + void *vaddr, dma_addr_t dma_handle) 74 + { 75 + #ifdef NOT_COHERENT_CACHE 76 + consistent_free(vaddr); 77 + #else 78 + free_pages((unsigned long)vaddr, get_order(size)); 79 + #endif 80 + } 81 + 82 + static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 83 + int nents, enum dma_data_direction direction, 84 + struct dma_attrs *attrs) 85 + { 86 + struct scatterlist *sg; 87 + int i; 88 + 89 + /* FIXME this part of code is untested */ 90 + for_each_sg(sgl, sg, nents, i) { 91 + sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); 92 + sg->dma_length = sg->length; 93 + __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, 94 + sg->length, direction); 95 + } 96 + 97 + return nents; 98 + } 99 + 100 + static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, 101 + int nents, enum dma_data_direction direction, 102 + struct dma_attrs *attrs) 103 + { 104 + } 105 + 106 + static int dma_direct_dma_supported(struct device *dev, u64 mask) 107 + { 108 + return 1; 109 + } 110 + 111 + static inline dma_addr_t dma_direct_map_page(struct device *dev, 112 + struct page *page, 113 + unsigned long offset, 114 + size_t size, 115 + enum dma_data_direction direction, 116 + struct dma_attrs *attrs) 117 + { 118 + __dma_sync_page(page_to_phys(page), offset, size, direction); 119 + return page_to_phys(page) + offset + get_dma_direct_offset(dev); 120 + } 121 + 122 + static inline void dma_direct_unmap_page(struct device *dev, 123 + dma_addr_t dma_address, 124 + size_t size, 125 + enum dma_data_direction direction, 126 + struct dma_attrs *attrs) 127 + { 128 + /* There is not necessary to do cache cleanup 129 + * 130 + * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and 131 + * dma_address is physical address 132 + */ 133 + __dma_sync_page(dma_address, 0 , size, direction); 134 + } 135 + 136 + struct dma_map_ops dma_direct_ops = { 137 + .alloc_coherent = dma_direct_alloc_coherent, 138 + .free_coherent = dma_direct_free_coherent, 139 + .map_sg = dma_direct_map_sg, 140 + .unmap_sg = dma_direct_unmap_sg, 141 + .dma_supported = dma_direct_dma_supported, 142 + .map_page = dma_direct_map_page, 143 + .unmap_page = dma_direct_unmap_page, 144 + }; 145 + EXPORT_SYMBOL(dma_direct_ops); 146 + 147 + /* Number of entries preallocated for DMA-API debugging */ 148 + #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 149 + 150 + static int __init dma_init(void) 151 + { 152 + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 153 + 154 + return 0; 155 + } 156 + fs_initcall(dma_init);
+57 -59
arch/microblaze/kernel/entry.S
··· 305 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 306 addi r11, r0, 1; 307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 308 - 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 309 /* Save away the syscall number. */ 310 swi r12, r1, PTO+PT_R0; 311 tovirt(r1,r1) ··· 322 rtid r11, 0 323 nop 324 3: 325 - add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ 326 - lwi r11, r11, TS_THREAD_INFO /* get thread info */ 327 lwi r11, r11, TI_FLAGS /* get flags in thread info */ 328 andi r11, r11, _TIF_WORK_SYSCALL_MASK 329 beqi r11, 4f ··· 381 /* See if returning to kernel mode, if so, skip resched &c. */ 382 bnei r11, 2f; 383 384 /* We're returning to user mode, so check for various conditions that 385 * trigger rescheduling. */ 386 - # FIXME: Restructure all these flag checks. 387 - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 388 - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 389 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 390 andi r11, r11, _TIF_WORK_SYSCALL_MASK 391 beqi r11, 1f 392 393 - swi r3, r1, PTO + PT_R3 394 - swi r4, r1, PTO + PT_R4 395 brlid r15, do_syscall_trace_leave 396 addik r5, r1, PTO + PT_R0 397 - lwi r3, r1, PTO + PT_R3 398 - lwi r4, r1, PTO + PT_R4 399 1: 400 - 401 /* We're returning to user mode, so check for various conditions that 402 * trigger rescheduling. */ 403 - /* Get current task ptr into r11 */ 404 - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 405 - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 406 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 407 andi r11, r11, _TIF_NEED_RESCHED; 408 beqi r11, 5f; 409 410 - swi r3, r1, PTO + PT_R3; /* store syscall result */ 411 - swi r4, r1, PTO + PT_R4; 412 bralid r15, schedule; /* Call scheduler */ 413 nop; /* delay slot */ 414 - lwi r3, r1, PTO + PT_R3; /* restore syscall result */ 415 - lwi r4, r1, PTO + PT_R4; 416 417 /* Maybe handle a signal */ 418 - 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 419 - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 420 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 421 andi r11, r11, _TIF_SIGPENDING; 422 beqi r11, 1f; /* Signals to handle, handle them */ 423 424 - swi r3, r1, PTO + PT_R3; /* store syscall result */ 425 - swi r4, r1, PTO + PT_R4; 426 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 427 - add r6, r0, r0; /* Arg 2: sigset_t *oldset */ 428 addi r7, r0, 1; /* Arg 3: int in_syscall */ 429 bralid r15, do_signal; /* Handle any signals */ 430 - nop; 431 lwi r3, r1, PTO + PT_R3; /* restore syscall result */ 432 lwi r4, r1, PTO + PT_R4; 433 434 - /* Finally, return to user state. */ 435 - 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 436 - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 437 - swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ 438 VM_OFF; 439 tophys(r1,r1); 440 RESTORE_REGS; ··· 554 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ 555 addi r11, r0, 1; \ 556 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ 557 - 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ 558 /* Save away the syscall number. */ \ 559 swi r0, r1, PTO+PT_R0; \ 560 tovirt(r1,r1) ··· 662 663 /* We're returning to user mode, so check for various conditions that 664 trigger rescheduling. */ 665 - /* Get current task ptr into r11 */ 666 - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 667 - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 668 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 669 andi r11, r11, _TIF_NEED_RESCHED; 670 beqi r11, 5f; ··· 672 nop; /* delay slot */ 673 674 /* Maybe handle a signal */ 675 - 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 676 - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 677 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 678 andi r11, r11, _TIF_SIGPENDING; 679 beqi r11, 1f; /* Signals to handle, handle them */ ··· 691 * store return registers separately because this macros is use 692 * for others exceptions */ 693 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 694 - add r6, r0, r0; /* Arg 2: sigset_t *oldset */ 695 addi r7, r0, 0; /* Arg 3: int in_syscall */ 696 bralid r15, do_signal; /* Handle any signals */ 697 - nop; 698 699 /* Finally, return to user state. */ 700 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 701 - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 702 - swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ 703 VM_OFF; 704 tophys(r1,r1); 705 ··· 786 swi r11, r0, TOPHYS(PER_CPU(KM)); 787 788 2: 789 - lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); 790 swi r0, r1, PTO + PT_R0; 791 tovirt(r1,r1) 792 la r5, r1, PTO; ··· 801 lwi r11, r1, PTO + PT_MODE; 802 bnei r11, 2f; 803 804 - add r11, r0, CURRENT_TASK; 805 - lwi r11, r11, TS_THREAD_INFO; 806 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ 807 andi r11, r11, _TIF_NEED_RESCHED; 808 beqi r11, 5f ··· 809 nop; /* delay slot */ 810 811 /* Maybe handle a signal */ 812 - 5: add r11, r0, CURRENT_TASK; 813 - lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */ 814 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 815 andi r11, r11, _TIF_SIGPENDING; 816 beqid r11, no_intr_resched ··· 824 /* Disable interrupts, we are now committed to the state restore */ 825 disable_irq 826 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ 827 - add r11, r0, CURRENT_TASK; 828 - swi r11, r0, PER_CPU(CURRENT_SAVE); 829 VM_OFF; 830 tophys(r1,r1); 831 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ ··· 834 lwi r1, r1, PT_R1 - PT_SIZE; 835 bri 6f; 836 /* MS: Return to kernel state. */ 837 - 2: VM_OFF /* MS: turn off MMU */ 838 tophys(r1,r1) 839 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 840 lwi r4, r1, PTO + PT_R4; ··· 917 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 918 addi r11, r0, 1; 919 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 920 - 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 921 /* Save away the syscall number. */ 922 swi r0, r1, PTO+PT_R0; 923 tovirt(r1,r1) ··· 937 bnei r11, 2f; 938 939 /* Get current task ptr into r11 */ 940 - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 941 - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 942 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 943 andi r11, r11, _TIF_NEED_RESCHED; 944 beqi r11, 5f; ··· 950 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ 951 952 /* Maybe handle a signal */ 953 - 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 954 - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 955 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 956 andi r11, r11, _TIF_SIGPENDING; 957 beqi r11, 1f; /* Signals to handle, handle them */ ··· 966 (in a possibly modified form) after do_signal returns. */ 967 968 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 969 - add r6, r0, r0; /* Arg 2: sigset_t *oldset */ 970 addi r7, r0, 0; /* Arg 3: int in_syscall */ 971 bralid r15, do_signal; /* Handle any signals */ 972 - nop; 973 974 975 /* Finally, return to user state. */ 976 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 977 - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 978 - swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ 979 VM_OFF; 980 tophys(r1,r1); 981 ··· 1005 1006 ENTRY(_switch_to) 1007 /* prepare return value */ 1008 - addk r3, r0, r31 1009 1010 /* save registers in cpu_context */ 1011 /* use r11 and r12, volatile registers, as temp register */ ··· 1049 nop 1050 swi r12, r11, CC_FSR 1051 1052 - /* update r31, the current */ 1053 - lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ 1054 /* stored it to current_save too */ 1055 - swi r31, r0, PER_CPU(CURRENT_SAVE) 1056 1057 /* get new process' cpu context and restore */ 1058 /* give me start where start context of next task */
··· 305 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 306 addi r11, r0, 1; 307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 308 + 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); 309 /* Save away the syscall number. */ 310 swi r12, r1, PTO+PT_R0; 311 tovirt(r1,r1) ··· 322 rtid r11, 0 323 nop 324 3: 325 + lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ 326 lwi r11, r11, TI_FLAGS /* get flags in thread info */ 327 andi r11, r11, _TIF_WORK_SYSCALL_MASK 328 beqi r11, 4f ··· 382 /* See if returning to kernel mode, if so, skip resched &c. */ 383 bnei r11, 2f; 384 385 + swi r3, r1, PTO + PT_R3 386 + swi r4, r1, PTO + PT_R4 387 + 388 /* We're returning to user mode, so check for various conditions that 389 * trigger rescheduling. */ 390 + /* FIXME: Restructure all these flag checks. */ 391 + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ 392 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 393 andi r11, r11, _TIF_WORK_SYSCALL_MASK 394 beqi r11, 1f 395 396 brlid r15, do_syscall_trace_leave 397 addik r5, r1, PTO + PT_R0 398 1: 399 /* We're returning to user mode, so check for various conditions that 400 * trigger rescheduling. */ 401 + /* get thread info from current task */ 402 + lwi r11, CURRENT_TASK, TS_THREAD_INFO; 403 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 404 andi r11, r11, _TIF_NEED_RESCHED; 405 beqi r11, 5f; 406 407 bralid r15, schedule; /* Call scheduler */ 408 nop; /* delay slot */ 409 410 /* Maybe handle a signal */ 411 + 5: /* get thread info from current task*/ 412 + lwi r11, CURRENT_TASK, TS_THREAD_INFO; 413 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 414 andi r11, r11, _TIF_SIGPENDING; 415 beqi r11, 1f; /* Signals to handle, handle them */ 416 417 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 418 addi r7, r0, 1; /* Arg 3: int in_syscall */ 419 bralid r15, do_signal; /* Handle any signals */ 420 + add r6, r0, r0; /* Arg 2: sigset_t *oldset */ 421 + 422 + /* Finally, return to user state. */ 423 + 1: 424 lwi r3, r1, PTO + PT_R3; /* restore syscall result */ 425 lwi r4, r1, PTO + PT_R4; 426 427 + swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 428 + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ 429 VM_OFF; 430 tophys(r1,r1); 431 RESTORE_REGS; ··· 565 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ 566 addi r11, r0, 1; \ 567 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ 568 + 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \ 569 /* Save away the syscall number. */ \ 570 swi r0, r1, PTO+PT_R0; \ 571 tovirt(r1,r1) ··· 673 674 /* We're returning to user mode, so check for various conditions that 675 trigger rescheduling. */ 676 + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ 677 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 678 andi r11, r11, _TIF_NEED_RESCHED; 679 beqi r11, 5f; ··· 685 nop; /* delay slot */ 686 687 /* Maybe handle a signal */ 688 + 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ 689 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 690 andi r11, r11, _TIF_SIGPENDING; 691 beqi r11, 1f; /* Signals to handle, handle them */ ··· 705 * store return registers separately because this macros is use 706 * for others exceptions */ 707 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 708 addi r7, r0, 0; /* Arg 3: int in_syscall */ 709 bralid r15, do_signal; /* Handle any signals */ 710 + add r6, r0, r0; /* Arg 2: sigset_t *oldset */ 711 712 /* Finally, return to user state. */ 713 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 714 + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ 715 VM_OFF; 716 tophys(r1,r1); 717 ··· 802 swi r11, r0, TOPHYS(PER_CPU(KM)); 803 804 2: 805 + lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); 806 swi r0, r1, PTO + PT_R0; 807 tovirt(r1,r1) 808 la r5, r1, PTO; ··· 817 lwi r11, r1, PTO + PT_MODE; 818 bnei r11, 2f; 819 820 + lwi r11, CURRENT_TASK, TS_THREAD_INFO; 821 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ 822 andi r11, r11, _TIF_NEED_RESCHED; 823 beqi r11, 5f ··· 826 nop; /* delay slot */ 827 828 /* Maybe handle a signal */ 829 + 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */ 830 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 831 andi r11, r11, _TIF_SIGPENDING; 832 beqid r11, no_intr_resched ··· 842 /* Disable interrupts, we are now committed to the state restore */ 843 disable_irq 844 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ 845 + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); 846 VM_OFF; 847 tophys(r1,r1); 848 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ ··· 853 lwi r1, r1, PT_R1 - PT_SIZE; 854 bri 6f; 855 /* MS: Return to kernel state. */ 856 + 2: 857 + #ifdef CONFIG_PREEMPT 858 + lwi r11, CURRENT_TASK, TS_THREAD_INFO; 859 + /* MS: get preempt_count from thread info */ 860 + lwi r5, r11, TI_PREEMPT_COUNT; 861 + bgti r5, restore; 862 + 863 + lwi r5, r11, TI_FLAGS; /* get flags in thread info */ 864 + andi r5, r5, _TIF_NEED_RESCHED; 865 + beqi r5, restore /* if zero jump over */ 866 + 867 + preempt: 868 + /* interrupts are off that's why I am calling preempt_chedule_irq */ 869 + bralid r15, preempt_schedule_irq 870 + nop 871 + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ 872 + lwi r5, r11, TI_FLAGS; /* get flags in thread info */ 873 + andi r5, r5, _TIF_NEED_RESCHED; 874 + bnei r5, preempt /* if non zero jump to resched */ 875 + restore: 876 + #endif 877 + VM_OFF /* MS: turn off MMU */ 878 tophys(r1,r1) 879 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 880 lwi r4, r1, PTO + PT_R4; ··· 915 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 916 addi r11, r0, 1; 917 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 918 + 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); 919 /* Save away the syscall number. */ 920 swi r0, r1, PTO+PT_R0; 921 tovirt(r1,r1) ··· 935 bnei r11, 2f; 936 937 /* Get current task ptr into r11 */ 938 + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ 939 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 940 andi r11, r11, _TIF_NEED_RESCHED; 941 beqi r11, 5f; ··· 949 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ 950 951 /* Maybe handle a signal */ 952 + 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ 953 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 954 andi r11, r11, _TIF_SIGPENDING; 955 beqi r11, 1f; /* Signals to handle, handle them */ ··· 966 (in a possibly modified form) after do_signal returns. */ 967 968 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 969 addi r7, r0, 0; /* Arg 3: int in_syscall */ 970 bralid r15, do_signal; /* Handle any signals */ 971 + add r6, r0, r0; /* Arg 2: sigset_t *oldset */ 972 973 974 /* Finally, return to user state. */ 975 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 976 + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ 977 VM_OFF; 978 tophys(r1,r1); 979 ··· 1007 1008 ENTRY(_switch_to) 1009 /* prepare return value */ 1010 + addk r3, r0, CURRENT_TASK 1011 1012 /* save registers in cpu_context */ 1013 /* use r11 and r12, volatile registers, as temp register */ ··· 1051 nop 1052 swi r12, r11, CC_FSR 1053 1054 + /* update r31, the current-give me pointer to task which will be next */ 1055 + lwi CURRENT_TASK, r6, TI_TASK 1056 /* stored it to current_save too */ 1057 + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) 1058 1059 /* get new process' cpu context and restore */ 1060 /* give me start where start context of next task */
+7 -6
arch/microblaze/kernel/head.S
··· 99 tophys(r4,r4) /* convert to phys address */ 100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ 101 _copy_command_line: 102 - lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ 103 - sb r7, r4, r6 /* addr[r4+r6]= r7*/ 104 addik r6, r6, 1 /* increment counting */ 105 bgtid r3, _copy_command_line /* loop for all entries */ 106 addik r3, r3, -1 /* descrement loop */ ··· 136 addik r3, r3, -1 137 /* sync */ 138 139 /* 140 * We should still be executing code at physical address area 141 * RAM_BASEADDR at this point. However, kernel code is at ··· 150 151 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ 152 tophys(r4,r3) /* Load the kernel physical address */ 153 - 154 - mts rpid,r0 /* Load the kernel PID */ 155 - nop 156 - bri 4 157 158 /* 159 * Configure and load two entries into TLB slots 0 and 1.
··· 99 tophys(r4,r4) /* convert to phys address */ 100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ 101 _copy_command_line: 102 + lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ 103 + sb r2, r4, r6 /* addr[r4+r6]= r7*/ 104 addik r6, r6, 1 /* increment counting */ 105 bgtid r3, _copy_command_line /* loop for all entries */ 106 addik r3, r3, -1 /* descrement loop */ ··· 136 addik r3, r3, -1 137 /* sync */ 138 139 + /* Setup the kernel PID */ 140 + mts rpid,r0 /* Load the kernel PID */ 141 + nop 142 + bri 4 143 + 144 /* 145 * We should still be executing code at physical address area 146 * RAM_BASEADDR at this point. However, kernel code is at ··· 145 146 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ 147 tophys(r4,r3) /* Load the kernel physical address */ 148 149 /* 150 * Configure and load two entries into TLB slots 0 and 1.
+15
arch/microblaze/kernel/irq.c
··· 93 } 94 return 0; 95 }
··· 93 } 94 return 0; 95 } 96 + 97 + /* MS: There is no any advance mapping mechanism. We are using simple 32bit 98 + intc without any cascades or any connection that's why mapping is 1:1 */ 99 + unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq) 100 + { 101 + return hwirq; 102 + } 103 + EXPORT_SYMBOL_GPL(irq_create_mapping); 104 + 105 + unsigned int irq_create_of_mapping(struct device_node *controller, 106 + u32 *intspec, unsigned int intsize) 107 + { 108 + return intspec[0]; 109 + } 110 + EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+39 -6
arch/microblaze/kernel/setup.c
··· 22 #include <linux/io.h> 23 #include <linux/bug.h> 24 #include <linux/param.h> 25 #include <linux/cache.h> 26 #include <asm/cacheflush.h> 27 #include <asm/entry.h> 28 #include <asm/cpuinfo.h> ··· 57 58 microblaze_cache_init(); 59 60 - invalidate_dcache(); 61 - enable_dcache(); 62 - 63 - invalidate_icache(); 64 - enable_icache(); 65 - 66 setup_memory(); 67 68 #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) 69 printk(KERN_NOTICE "Self modified code enable\n"); ··· 187 } 188 arch_initcall(microblaze_debugfs_init); 189 #endif
··· 22 #include <linux/io.h> 23 #include <linux/bug.h> 24 #include <linux/param.h> 25 + #include <linux/pci.h> 26 #include <linux/cache.h> 27 + #include <linux/of_platform.h> 28 + #include <linux/dma-mapping.h> 29 #include <asm/cacheflush.h> 30 #include <asm/entry.h> 31 #include <asm/cpuinfo.h> ··· 54 55 microblaze_cache_init(); 56 57 setup_memory(); 58 + 59 + xilinx_pci_init(); 60 61 #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) 62 printk(KERN_NOTICE "Self modified code enable\n"); ··· 188 } 189 arch_initcall(microblaze_debugfs_init); 190 #endif 191 + 192 + static int dflt_bus_notify(struct notifier_block *nb, 193 + unsigned long action, void *data) 194 + { 195 + struct device *dev = data; 196 + 197 + /* We are only intereted in device addition */ 198 + if (action != BUS_NOTIFY_ADD_DEVICE) 199 + return 0; 200 + 201 + set_dma_ops(dev, &dma_direct_ops); 202 + 203 + return NOTIFY_DONE; 204 + } 205 + 206 + static struct notifier_block dflt_plat_bus_notifier = { 207 + .notifier_call = dflt_bus_notify, 208 + .priority = INT_MAX, 209 + }; 210 + 211 + static struct notifier_block dflt_of_bus_notifier = { 212 + .notifier_call = dflt_bus_notify, 213 + .priority = INT_MAX, 214 + }; 215 + 216 + static int __init setup_bus_notifier(void) 217 + { 218 + bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier); 219 + bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier); 220 + 221 + return 0; 222 + } 223 + 224 + arch_initcall(setup_bus_notifier);
+1 -1
arch/microblaze/mm/Makefile
··· 2 # Makefile 3 # 4 5 - obj-y := init.o 6 7 obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
··· 2 # Makefile 3 # 4 5 + obj-y := consistent.o init.o 6 7 obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
+246
arch/microblaze/mm/consistent.c
···
··· 1 + /* 2 + * Microblaze support for cache consistent memory. 3 + * Copyright (C) 2010 Michal Simek <monstr@monstr.eu> 4 + * Copyright (C) 2010 PetaLogix 5 + * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> 6 + * 7 + * Based on PowerPC version derived from arch/arm/mm/consistent.c 8 + * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) 9 + * Copyright (C) 2000 Russell King 10 + * 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License version 2 as 13 + * published by the Free Software Foundation. 14 + */ 15 + 16 + #include <linux/module.h> 17 + #include <linux/signal.h> 18 + #include <linux/sched.h> 19 + #include <linux/kernel.h> 20 + #include <linux/errno.h> 21 + #include <linux/string.h> 22 + #include <linux/types.h> 23 + #include <linux/ptrace.h> 24 + #include <linux/mman.h> 25 + #include <linux/mm.h> 26 + #include <linux/swap.h> 27 + #include <linux/stddef.h> 28 + #include <linux/vmalloc.h> 29 + #include <linux/init.h> 30 + #include <linux/delay.h> 31 + #include <linux/bootmem.h> 32 + #include <linux/highmem.h> 33 + #include <linux/pci.h> 34 + #include <linux/interrupt.h> 35 + 36 + #include <asm/pgalloc.h> 37 + #include <linux/io.h> 38 + #include <linux/hardirq.h> 39 + #include <asm/mmu_context.h> 40 + #include <asm/mmu.h> 41 + #include <linux/uaccess.h> 42 + #include <asm/pgtable.h> 43 + #include <asm/cpuinfo.h> 44 + 45 + #ifndef CONFIG_MMU 46 + 47 + /* I have to use dcache values because I can't relate on ram size */ 48 + #define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) 49 + 50 + /* 51 + * Consistent memory allocators. Used for DMA devices that want to 52 + * share uncached memory with the processor core. 53 + * My crufty no-MMU approach is simple. In the HW platform we can optionally 54 + * mirror the DDR up above the processor cacheable region. So, memory accessed 55 + * in this mirror region will not be cached. It's alloced from the same 56 + * pool as normal memory, but the handle we return is shifted up into the 57 + * uncached region. This will no doubt cause big problems if memory allocated 58 + * here is not also freed properly. -- JW 59 + */ 60 + void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) 61 + { 62 + struct page *page, *end, *free; 63 + unsigned long order; 64 + void *ret, *virt; 65 + 66 + if (in_interrupt()) 67 + BUG(); 68 + 69 + size = PAGE_ALIGN(size); 70 + order = get_order(size); 71 + 72 + page = alloc_pages(gfp, order); 73 + if (!page) 74 + goto no_page; 75 + 76 + /* We could do with a page_to_phys and page_to_bus here. */ 77 + virt = page_address(page); 78 + ret = ioremap(virt_to_phys(virt), size); 79 + if (!ret) 80 + goto no_remap; 81 + 82 + /* 83 + * Here's the magic! Note if the uncached shadow is not implemented, 84 + * it's up to the calling code to also test that condition and make 85 + * other arranegments, such as manually flushing the cache and so on. 86 + */ 87 + #ifdef CONFIG_XILINX_UNCACHED_SHADOW 88 + ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); 89 + #endif 90 + /* dma_handle is same as physical (shadowed) address */ 91 + *dma_handle = (dma_addr_t)ret; 92 + 93 + /* 94 + * free wasted pages. We skip the first page since we know 95 + * that it will have count = 1 and won't require freeing. 96 + * We also mark the pages in use as reserved so that 97 + * remap_page_range works. 98 + */ 99 + page = virt_to_page(virt); 100 + free = page + (size >> PAGE_SHIFT); 101 + end = page + (1 << order); 102 + 103 + for (; page < end; page++) { 104 + init_page_count(page); 105 + if (page >= free) 106 + __free_page(page); 107 + else 108 + SetPageReserved(page); 109 + } 110 + 111 + return ret; 112 + no_remap: 113 + __free_pages(page, order); 114 + no_page: 115 + return NULL; 116 + } 117 + 118 + #else 119 + 120 + void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) 121 + { 122 + int order, err, i; 123 + unsigned long page, va, flags; 124 + phys_addr_t pa; 125 + struct vm_struct *area; 126 + void *ret; 127 + 128 + if (in_interrupt()) 129 + BUG(); 130 + 131 + /* Only allocate page size areas. */ 132 + size = PAGE_ALIGN(size); 133 + order = get_order(size); 134 + 135 + page = __get_free_pages(gfp, order); 136 + if (!page) { 137 + BUG(); 138 + return NULL; 139 + } 140 + 141 + /* 142 + * we need to ensure that there are no cachelines in use, 143 + * or worse dirty in this area. 144 + */ 145 + flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size); 146 + 147 + /* Allocate some common virtual space to map the new pages. */ 148 + area = get_vm_area(size, VM_ALLOC); 149 + if (area == NULL) { 150 + free_pages(page, order); 151 + return NULL; 152 + } 153 + va = (unsigned long) area->addr; 154 + ret = (void *)va; 155 + 156 + /* This gives us the real physical address of the first page. */ 157 + *dma_handle = pa = virt_to_bus((void *)page); 158 + 159 + /* MS: This is the whole magic - use cache inhibit pages */ 160 + flags = _PAGE_KERNEL | _PAGE_NO_CACHE; 161 + 162 + /* 163 + * Set refcount=1 on all pages in an order>0 164 + * allocation so that vfree() will actually 165 + * free all pages that were allocated. 166 + */ 167 + if (order > 0) { 168 + struct page *rpage = virt_to_page(page); 169 + for (i = 1; i < (1 << order); i++) 170 + init_page_count(rpage+i); 171 + } 172 + 173 + err = 0; 174 + for (i = 0; i < size && err == 0; i += PAGE_SIZE) 175 + err = map_page(va+i, pa+i, flags); 176 + 177 + if (err) { 178 + vfree((void *)va); 179 + return NULL; 180 + } 181 + 182 + return ret; 183 + } 184 + #endif /* CONFIG_MMU */ 185 + EXPORT_SYMBOL(consistent_alloc); 186 + 187 + /* 188 + * free page(s) as defined by the above mapping. 189 + */ 190 + void consistent_free(void *vaddr) 191 + { 192 + if (in_interrupt()) 193 + BUG(); 194 + 195 + /* Clear SHADOW_MASK bit in address, and free as per usual */ 196 + #ifdef CONFIG_XILINX_UNCACHED_SHADOW 197 + vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); 198 + #endif 199 + vfree(vaddr); 200 + } 201 + EXPORT_SYMBOL(consistent_free); 202 + 203 + /* 204 + * make an area consistent. 205 + */ 206 + void consistent_sync(void *vaddr, size_t size, int direction) 207 + { 208 + unsigned long start; 209 + unsigned long end; 210 + 211 + start = (unsigned long)vaddr; 212 + 213 + /* Convert start address back down to unshadowed memory region */ 214 + #ifdef CONFIG_XILINX_UNCACHED_SHADOW 215 + start &= ~UNCACHED_SHADOW_MASK; 216 + #endif 217 + end = start + size; 218 + 219 + switch (direction) { 220 + case PCI_DMA_NONE: 221 + BUG(); 222 + case PCI_DMA_FROMDEVICE: /* invalidate only */ 223 + flush_dcache_range(start, end); 224 + break; 225 + case PCI_DMA_TODEVICE: /* writeback only */ 226 + flush_dcache_range(start, end); 227 + break; 228 + case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ 229 + flush_dcache_range(start, end); 230 + break; 231 + } 232 + } 233 + EXPORT_SYMBOL(consistent_sync); 234 + 235 + /* 236 + * consistent_sync_page makes memory consistent. identical 237 + * to consistent_sync, but takes a struct page instead of a 238 + * virtual address 239 + */ 240 + void consistent_sync_page(struct page *page, unsigned long offset, 241 + size_t size, int direction) 242 + { 243 + unsigned long start = (unsigned long)page_address(page) + offset; 244 + consistent_sync((void *)start, size, direction); 245 + } 246 + EXPORT_SYMBOL(consistent_sync_page);
+30 -9
arch/microblaze/mm/init.c
··· 23 #include <asm/sections.h> 24 #include <asm/tlb.h> 25 26 #ifndef CONFIG_MMU 27 unsigned int __page_offset; 28 EXPORT_SYMBOL(__page_offset); ··· 33 #else 34 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 35 36 - int mem_init_done; 37 static int init_bootmem_done; 38 #endif /* CONFIG_MMU */ 39 ··· 195 (unsigned long)(&__init_end)); 196 } 197 198 - /* FIXME from arch/powerpc/mm/mem.c*/ 199 - void show_mem(void) 200 - { 201 - printk(KERN_NOTICE "%s\n", __func__); 202 - } 203 - 204 void __init mem_init(void) 205 { 206 high_memory = (void *)__va(memory_end); ··· 204 printk(KERN_INFO "Memory: %luk/%luk available\n", 205 nr_free_pages() << (PAGE_SHIFT-10), 206 num_physpages << (PAGE_SHIFT-10)); 207 - #ifdef CONFIG_MMU 208 mem_init_done = 1; 209 - #endif 210 } 211 212 #ifndef CONFIG_MMU ··· 216 } 217 EXPORT_SYMBOL(___range_ok); 218 219 #else 220 int page_is_ram(unsigned long pfn) 221 { ··· 347 } 348 return p; 349 } 350 #endif /* CONFIG_MMU */
··· 23 #include <asm/sections.h> 24 #include <asm/tlb.h> 25 26 + /* Use for MMU and noMMU because of PCI generic code */ 27 + int mem_init_done; 28 + 29 #ifndef CONFIG_MMU 30 unsigned int __page_offset; 31 EXPORT_SYMBOL(__page_offset); ··· 30 #else 31 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32 33 static int init_bootmem_done; 34 #endif /* CONFIG_MMU */ 35 ··· 193 (unsigned long)(&__init_end)); 194 } 195 196 void __init mem_init(void) 197 { 198 high_memory = (void *)__va(memory_end); ··· 208 printk(KERN_INFO "Memory: %luk/%luk available\n", 209 nr_free_pages() << (PAGE_SHIFT-10), 210 num_physpages << (PAGE_SHIFT-10)); 211 mem_init_done = 1; 212 } 213 214 #ifndef CONFIG_MMU ··· 222 } 223 EXPORT_SYMBOL(___range_ok); 224 225 + int page_is_ram(unsigned long pfn) 226 + { 227 + return __range_ok(pfn, 0); 228 + } 229 #else 230 int page_is_ram(unsigned long pfn) 231 { ··· 349 } 350 return p; 351 } 352 + 353 #endif /* CONFIG_MMU */ 354 + 355 + void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) 356 + { 357 + if (mem_init_done) 358 + return kmalloc(size, mask); 359 + else 360 + return alloc_bootmem(size); 361 + } 362 + 363 + void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) 364 + { 365 + void *p; 366 + 367 + if (mem_init_done) 368 + p = kzalloc(size, mask); 369 + else { 370 + p = alloc_bootmem(size); 371 + if (p) 372 + memset(p, 0, size); 373 + } 374 + return p; 375 + }
+1 -1
arch/microblaze/mm/pgtable.c
··· 103 area = get_vm_area(size, VM_IOREMAP); 104 if (area == NULL) 105 return NULL; 106 - v = VMALLOC_VMADDR(area->addr); 107 } else { 108 v = (ioremap_bot -= size); 109 }
··· 103 area = get_vm_area(size, VM_IOREMAP); 104 if (area == NULL) 105 return NULL; 106 + v = (unsigned long) area->addr; 107 } else { 108 v = (ioremap_bot -= size); 109 }
+6
arch/microblaze/pci/Makefile
···
··· 1 + # 2 + # Makefile 3 + # 4 + 5 + obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o 6 + obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o
+163
arch/microblaze/pci/indirect_pci.c
···
··· 1 + /* 2 + * Support for indirect PCI bridges. 3 + * 4 + * Copyright (C) 1998 Gabriel Paubert. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + */ 11 + 12 + #include <linux/kernel.h> 13 + #include <linux/pci.h> 14 + #include <linux/delay.h> 15 + #include <linux/string.h> 16 + #include <linux/init.h> 17 + 18 + #include <asm/io.h> 19 + #include <asm/prom.h> 20 + #include <asm/pci-bridge.h> 21 + 22 + static int 23 + indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 24 + int len, u32 *val) 25 + { 26 + struct pci_controller *hose = pci_bus_to_host(bus); 27 + volatile void __iomem *cfg_data; 28 + u8 cfg_type = 0; 29 + u32 bus_no, reg; 30 + 31 + if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) { 32 + if (bus->number != hose->first_busno) 33 + return PCIBIOS_DEVICE_NOT_FOUND; 34 + if (devfn != 0) 35 + return PCIBIOS_DEVICE_NOT_FOUND; 36 + } 37 + 38 + if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE) 39 + if (bus->number != hose->first_busno) 40 + cfg_type = 1; 41 + 42 + bus_no = (bus->number == hose->first_busno) ? 43 + hose->self_busno : bus->number; 44 + 45 + if (hose->indirect_type & INDIRECT_TYPE_EXT_REG) 46 + reg = ((offset & 0xf00) << 16) | (offset & 0xfc); 47 + else 48 + reg = offset & 0xfc; /* Only 3 bits for function */ 49 + 50 + if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN) 51 + out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | 52 + (devfn << 8) | reg | cfg_type)); 53 + else 54 + out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | 55 + (devfn << 8) | reg | cfg_type)); 56 + 57 + /* 58 + * Note: the caller has already checked that offset is 59 + * suitably aligned and that len is 1, 2 or 4. 60 + */ 61 + cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */ 62 + switch (len) { 63 + case 1: 64 + *val = in_8(cfg_data); 65 + break; 66 + case 2: 67 + *val = in_le16(cfg_data); 68 + break; 69 + default: 70 + *val = in_le32(cfg_data); 71 + break; 72 + } 73 + return PCIBIOS_SUCCESSFUL; 74 + } 75 + 76 + static int 77 + indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 78 + int len, u32 val) 79 + { 80 + struct pci_controller *hose = pci_bus_to_host(bus); 81 + volatile void __iomem *cfg_data; 82 + u8 cfg_type = 0; 83 + u32 bus_no, reg; 84 + 85 + if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) { 86 + if (bus->number != hose->first_busno) 87 + return PCIBIOS_DEVICE_NOT_FOUND; 88 + if (devfn != 0) 89 + return PCIBIOS_DEVICE_NOT_FOUND; 90 + } 91 + 92 + if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE) 93 + if (bus->number != hose->first_busno) 94 + cfg_type = 1; 95 + 96 + bus_no = (bus->number == hose->first_busno) ? 97 + hose->self_busno : bus->number; 98 + 99 + if (hose->indirect_type & INDIRECT_TYPE_EXT_REG) 100 + reg = ((offset & 0xf00) << 16) | (offset & 0xfc); 101 + else 102 + reg = offset & 0xfc; 103 + 104 + if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN) 105 + out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | 106 + (devfn << 8) | reg | cfg_type)); 107 + else 108 + out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | 109 + (devfn << 8) | reg | cfg_type)); 110 + 111 + /* surpress setting of PCI_PRIMARY_BUS */ 112 + if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) 113 + if ((offset == PCI_PRIMARY_BUS) && 114 + (bus->number == hose->first_busno)) 115 + val &= 0xffffff00; 116 + 117 + /* Workaround for PCI_28 Errata in 440EPx/GRx */ 118 + if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) && 119 + offset == PCI_CACHE_LINE_SIZE) { 120 + val = 0; 121 + } 122 + 123 + /* 124 + * Note: the caller has already checked that offset is 125 + * suitably aligned and that len is 1, 2 or 4. 126 + */ 127 + cfg_data = hose->cfg_data + (offset & 3); 128 + switch (len) { 129 + case 1: 130 + out_8(cfg_data, val); 131 + break; 132 + case 2: 133 + out_le16(cfg_data, val); 134 + break; 135 + default: 136 + out_le32(cfg_data, val); 137 + break; 138 + } 139 + 140 + return PCIBIOS_SUCCESSFUL; 141 + } 142 + 143 + static struct pci_ops indirect_pci_ops = { 144 + .read = indirect_read_config, 145 + .write = indirect_write_config, 146 + }; 147 + 148 + void __init 149 + setup_indirect_pci(struct pci_controller *hose, 150 + resource_size_t cfg_addr, 151 + resource_size_t cfg_data, u32 flags) 152 + { 153 + resource_size_t base = cfg_addr & PAGE_MASK; 154 + void __iomem *mbase; 155 + 156 + mbase = ioremap(base, PAGE_SIZE); 157 + hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK); 158 + if ((cfg_data & PAGE_MASK) != base) 159 + mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE); 160 + hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK); 161 + hose->ops = &indirect_pci_ops; 162 + hose->indirect_type = flags; 163 + }
+39
arch/microblaze/pci/iomap.c
···
··· 1 + /* 2 + * ppc64 "iomap" interface implementation. 3 + * 4 + * (C) Copyright 2004 Linus Torvalds 5 + */ 6 + #include <linux/init.h> 7 + #include <linux/pci.h> 8 + #include <linux/mm.h> 9 + #include <asm/io.h> 10 + #include <asm/pci-bridge.h> 11 + 12 + void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) 13 + { 14 + resource_size_t start = pci_resource_start(dev, bar); 15 + resource_size_t len = pci_resource_len(dev, bar); 16 + unsigned long flags = pci_resource_flags(dev, bar); 17 + 18 + if (!len) 19 + return NULL; 20 + if (max && len > max) 21 + len = max; 22 + if (flags & IORESOURCE_IO) 23 + return ioport_map(start, len); 24 + if (flags & IORESOURCE_MEM) 25 + return ioremap(start, len); 26 + /* What? */ 27 + return NULL; 28 + } 29 + EXPORT_SYMBOL(pci_iomap); 30 + 31 + void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 32 + { 33 + if (isa_vaddr_is_ioport(addr)) 34 + return; 35 + if (pcibios_vaddr_is_ioport(addr)) 36 + return; 37 + iounmap(addr); 38 + } 39 + EXPORT_SYMBOL(pci_iounmap);
+1642
arch/microblaze/pci/pci-common.c
···
··· 1 + /* 2 + * Contains common pci routines for ALL ppc platform 3 + * (based on pci_32.c and pci_64.c) 4 + * 5 + * Port for PPC64 David Engebretsen, IBM Corp. 6 + * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. 7 + * 8 + * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 9 + * Rework, based on alpha PCI code. 10 + * 11 + * Common pmac/prep/chrp pci routines. -- Cort 12 + * 13 + * This program is free software; you can redistribute it and/or 14 + * modify it under the terms of the GNU General Public License 15 + * as published by the Free Software Foundation; either version 16 + * 2 of the License, or (at your option) any later version. 17 + */ 18 + 19 + #include <linux/kernel.h> 20 + #include <linux/pci.h> 21 + #include <linux/string.h> 22 + #include <linux/init.h> 23 + #include <linux/bootmem.h> 24 + #include <linux/mm.h> 25 + #include <linux/list.h> 26 + #include <linux/syscalls.h> 27 + #include <linux/irq.h> 28 + #include <linux/vmalloc.h> 29 + 30 + #include <asm/processor.h> 31 + #include <asm/io.h> 32 + #include <asm/prom.h> 33 + #include <asm/pci-bridge.h> 34 + #include <asm/byteorder.h> 35 + 36 + static DEFINE_SPINLOCK(hose_spinlock); 37 + LIST_HEAD(hose_list); 38 + 39 + /* XXX kill that some day ... */ 40 + static int global_phb_number; /* Global phb counter */ 41 + 42 + /* ISA Memory physical address */ 43 + resource_size_t isa_mem_base; 44 + 45 + /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */ 46 + unsigned int pci_flags; 47 + 48 + static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; 49 + 50 + void set_pci_dma_ops(struct dma_map_ops *dma_ops) 51 + { 52 + pci_dma_ops = dma_ops; 53 + } 54 + 55 + struct dma_map_ops *get_pci_dma_ops(void) 56 + { 57 + return pci_dma_ops; 58 + } 59 + EXPORT_SYMBOL(get_pci_dma_ops); 60 + 61 + int pci_set_dma_mask(struct pci_dev *dev, u64 mask) 62 + { 63 + return dma_set_mask(&dev->dev, mask); 64 + } 65 + 66 + int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 67 + { 68 + int rc; 69 + 70 + rc = dma_set_mask(&dev->dev, mask); 71 + dev->dev.coherent_dma_mask = dev->dma_mask; 72 + 73 + return rc; 74 + } 75 + 76 + struct pci_controller *pcibios_alloc_controller(struct device_node *dev) 77 + { 78 + struct pci_controller *phb; 79 + 80 + phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); 81 + if (!phb) 82 + return NULL; 83 + spin_lock(&hose_spinlock); 84 + phb->global_number = global_phb_number++; 85 + list_add_tail(&phb->list_node, &hose_list); 86 + spin_unlock(&hose_spinlock); 87 + phb->dn = dev; 88 + phb->is_dynamic = mem_init_done; 89 + return phb; 90 + } 91 + 92 + void pcibios_free_controller(struct pci_controller *phb) 93 + { 94 + spin_lock(&hose_spinlock); 95 + list_del(&phb->list_node); 96 + spin_unlock(&hose_spinlock); 97 + 98 + if (phb->is_dynamic) 99 + kfree(phb); 100 + } 101 + 102 + static resource_size_t pcibios_io_size(const struct pci_controller *hose) 103 + { 104 + return hose->io_resource.end - hose->io_resource.start + 1; 105 + } 106 + 107 + int pcibios_vaddr_is_ioport(void __iomem *address) 108 + { 109 + int ret = 0; 110 + struct pci_controller *hose; 111 + resource_size_t size; 112 + 113 + spin_lock(&hose_spinlock); 114 + list_for_each_entry(hose, &hose_list, list_node) { 115 + size = pcibios_io_size(hose); 116 + if (address >= hose->io_base_virt && 117 + address < (hose->io_base_virt + size)) { 118 + ret = 1; 119 + break; 120 + } 121 + } 122 + spin_unlock(&hose_spinlock); 123 + return ret; 124 + } 125 + 126 + unsigned long pci_address_to_pio(phys_addr_t address) 127 + { 128 + struct pci_controller *hose; 129 + resource_size_t size; 130 + unsigned long ret = ~0; 131 + 132 + spin_lock(&hose_spinlock); 133 + list_for_each_entry(hose, &hose_list, list_node) { 134 + size = pcibios_io_size(hose); 135 + if (address >= hose->io_base_phys && 136 + address < (hose->io_base_phys + size)) { 137 + unsigned long base = 138 + (unsigned long)hose->io_base_virt - _IO_BASE; 139 + ret = base + (address - hose->io_base_phys); 140 + break; 141 + } 142 + } 143 + spin_unlock(&hose_spinlock); 144 + 145 + return ret; 146 + } 147 + EXPORT_SYMBOL_GPL(pci_address_to_pio); 148 + 149 + /* 150 + * Return the domain number for this bus. 151 + */ 152 + int pci_domain_nr(struct pci_bus *bus) 153 + { 154 + struct pci_controller *hose = pci_bus_to_host(bus); 155 + 156 + return hose->global_number; 157 + } 158 + EXPORT_SYMBOL(pci_domain_nr); 159 + 160 + /* This routine is meant to be used early during boot, when the 161 + * PCI bus numbers have not yet been assigned, and you need to 162 + * issue PCI config cycles to an OF device. 163 + * It could also be used to "fix" RTAS config cycles if you want 164 + * to set pci_assign_all_buses to 1 and still use RTAS for PCI 165 + * config cycles. 166 + */ 167 + struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node) 168 + { 169 + while (node) { 170 + struct pci_controller *hose, *tmp; 171 + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 172 + if (hose->dn == node) 173 + return hose; 174 + node = node->parent; 175 + } 176 + return NULL; 177 + } 178 + 179 + static ssize_t pci_show_devspec(struct device *dev, 180 + struct device_attribute *attr, char *buf) 181 + { 182 + struct pci_dev *pdev; 183 + struct device_node *np; 184 + 185 + pdev = to_pci_dev(dev); 186 + np = pci_device_to_OF_node(pdev); 187 + if (np == NULL || np->full_name == NULL) 188 + return 0; 189 + return sprintf(buf, "%s", np->full_name); 190 + } 191 + static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 192 + 193 + /* Add sysfs properties */ 194 + int pcibios_add_platform_entries(struct pci_dev *pdev) 195 + { 196 + return device_create_file(&pdev->dev, &dev_attr_devspec); 197 + } 198 + 199 + char __devinit *pcibios_setup(char *str) 200 + { 201 + return str; 202 + } 203 + 204 + /* 205 + * Reads the interrupt pin to determine if interrupt is use by card. 206 + * If the interrupt is used, then gets the interrupt line from the 207 + * openfirmware and sets it in the pci_dev and pci_config line. 208 + */ 209 + int pci_read_irq_line(struct pci_dev *pci_dev) 210 + { 211 + struct of_irq oirq; 212 + unsigned int virq; 213 + 214 + /* The current device-tree that iSeries generates from the HV 215 + * PCI informations doesn't contain proper interrupt routing, 216 + * and all the fallback would do is print out crap, so we 217 + * don't attempt to resolve the interrupts here at all, some 218 + * iSeries specific fixup does it. 219 + * 220 + * In the long run, we will hopefully fix the generated device-tree 221 + * instead. 222 + */ 223 + pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); 224 + 225 + #ifdef DEBUG 226 + memset(&oirq, 0xff, sizeof(oirq)); 227 + #endif 228 + /* Try to get a mapping from the device-tree */ 229 + if (of_irq_map_pci(pci_dev, &oirq)) { 230 + u8 line, pin; 231 + 232 + /* If that fails, lets fallback to what is in the config 233 + * space and map that through the default controller. We 234 + * also set the type to level low since that's what PCI 235 + * interrupts are. If your platform does differently, then 236 + * either provide a proper interrupt tree or don't use this 237 + * function. 238 + */ 239 + if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) 240 + return -1; 241 + if (pin == 0) 242 + return -1; 243 + if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || 244 + line == 0xff || line == 0) { 245 + return -1; 246 + } 247 + pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", 248 + line, pin); 249 + 250 + virq = irq_create_mapping(NULL, line); 251 + if (virq != NO_IRQ) 252 + set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 253 + } else { 254 + pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 255 + oirq.size, oirq.specifier[0], oirq.specifier[1], 256 + oirq.controller ? oirq.controller->full_name : 257 + "<default>"); 258 + 259 + virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 260 + oirq.size); 261 + } 262 + if (virq == NO_IRQ) { 263 + pr_debug(" Failed to map !\n"); 264 + return -1; 265 + } 266 + 267 + pr_debug(" Mapped to linux irq %d\n", virq); 268 + 269 + pci_dev->irq = virq; 270 + 271 + return 0; 272 + } 273 + EXPORT_SYMBOL(pci_read_irq_line); 274 + 275 + /* 276 + * Platform support for /proc/bus/pci/X/Y mmap()s, 277 + * modelled on the sparc64 implementation by Dave Miller. 278 + * -- paulus. 279 + */ 280 + 281 + /* 282 + * Adjust vm_pgoff of VMA such that it is the physical page offset 283 + * corresponding to the 32-bit pci bus offset for DEV requested by the user. 284 + * 285 + * Basically, the user finds the base address for his device which he wishes 286 + * to mmap. They read the 32-bit value from the config space base register, 287 + * add whatever PAGE_SIZE multiple offset they wish, and feed this into the 288 + * offset parameter of mmap on /proc/bus/pci/XXX for that device. 289 + * 290 + * Returns negative error code on failure, zero on success. 291 + */ 292 + static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 293 + resource_size_t *offset, 294 + enum pci_mmap_state mmap_state) 295 + { 296 + struct pci_controller *hose = pci_bus_to_host(dev->bus); 297 + unsigned long io_offset = 0; 298 + int i, res_bit; 299 + 300 + if (hose == 0) 301 + return NULL; /* should never happen */ 302 + 303 + /* If memory, add on the PCI bridge address offset */ 304 + if (mmap_state == pci_mmap_mem) { 305 + #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ 306 + *offset += hose->pci_mem_offset; 307 + #endif 308 + res_bit = IORESOURCE_MEM; 309 + } else { 310 + io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; 311 + *offset += io_offset; 312 + res_bit = IORESOURCE_IO; 313 + } 314 + 315 + /* 316 + * Check that the offset requested corresponds to one of the 317 + * resources of the device. 318 + */ 319 + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 320 + struct resource *rp = &dev->resource[i]; 321 + int flags = rp->flags; 322 + 323 + /* treat ROM as memory (should be already) */ 324 + if (i == PCI_ROM_RESOURCE) 325 + flags |= IORESOURCE_MEM; 326 + 327 + /* Active and same type? */ 328 + if ((flags & res_bit) == 0) 329 + continue; 330 + 331 + /* In the range of this resource? */ 332 + if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) 333 + continue; 334 + 335 + /* found it! construct the final physical address */ 336 + if (mmap_state == pci_mmap_io) 337 + *offset += hose->io_base_phys - io_offset; 338 + return rp; 339 + } 340 + 341 + return NULL; 342 + } 343 + 344 + /* 345 + * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 346 + * device mapping. 347 + */ 348 + static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, 349 + pgprot_t protection, 350 + enum pci_mmap_state mmap_state, 351 + int write_combine) 352 + { 353 + pgprot_t prot = protection; 354 + 355 + /* Write combine is always 0 on non-memory space mappings. On 356 + * memory space, if the user didn't pass 1, we check for a 357 + * "prefetchable" resource. This is a bit hackish, but we use 358 + * this to workaround the inability of /sysfs to provide a write 359 + * combine bit 360 + */ 361 + if (mmap_state != pci_mmap_mem) 362 + write_combine = 0; 363 + else if (write_combine == 0) { 364 + if (rp->flags & IORESOURCE_PREFETCH) 365 + write_combine = 1; 366 + } 367 + 368 + return pgprot_noncached(prot); 369 + } 370 + 371 + /* 372 + * This one is used by /dev/mem and fbdev who have no clue about the 373 + * PCI device, it tries to find the PCI device first and calls the 374 + * above routine 375 + */ 376 + pgprot_t pci_phys_mem_access_prot(struct file *file, 377 + unsigned long pfn, 378 + unsigned long size, 379 + pgprot_t prot) 380 + { 381 + struct pci_dev *pdev = NULL; 382 + struct resource *found = NULL; 383 + resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; 384 + int i; 385 + 386 + if (page_is_ram(pfn)) 387 + return prot; 388 + 389 + prot = pgprot_noncached(prot); 390 + for_each_pci_dev(pdev) { 391 + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 392 + struct resource *rp = &pdev->resource[i]; 393 + int flags = rp->flags; 394 + 395 + /* Active and same type? */ 396 + if ((flags & IORESOURCE_MEM) == 0) 397 + continue; 398 + /* In the range of this resource? */ 399 + if (offset < (rp->start & PAGE_MASK) || 400 + offset > rp->end) 401 + continue; 402 + found = rp; 403 + break; 404 + } 405 + if (found) 406 + break; 407 + } 408 + if (found) { 409 + if (found->flags & IORESOURCE_PREFETCH) 410 + prot = pgprot_noncached_wc(prot); 411 + pci_dev_put(pdev); 412 + } 413 + 414 + pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", 415 + (unsigned long long)offset, pgprot_val(prot)); 416 + 417 + return prot; 418 + } 419 + 420 + /* 421 + * Perform the actual remap of the pages for a PCI device mapping, as 422 + * appropriate for this architecture. The region in the process to map 423 + * is described by vm_start and vm_end members of VMA, the base physical 424 + * address is found in vm_pgoff. 425 + * The pci device structure is provided so that architectures may make mapping 426 + * decisions on a per-device or per-bus basis. 427 + * 428 + * Returns a negative error code on failure, zero on success. 429 + */ 430 + int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 431 + enum pci_mmap_state mmap_state, int write_combine) 432 + { 433 + resource_size_t offset = 434 + ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; 435 + struct resource *rp; 436 + int ret; 437 + 438 + rp = __pci_mmap_make_offset(dev, &offset, mmap_state); 439 + if (rp == NULL) 440 + return -EINVAL; 441 + 442 + vma->vm_pgoff = offset >> PAGE_SHIFT; 443 + vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 444 + vma->vm_page_prot, 445 + mmap_state, write_combine); 446 + 447 + ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 448 + vma->vm_end - vma->vm_start, vma->vm_page_prot); 449 + 450 + return ret; 451 + } 452 + 453 + /* This provides legacy IO read access on a bus */ 454 + int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) 455 + { 456 + unsigned long offset; 457 + struct pci_controller *hose = pci_bus_to_host(bus); 458 + struct resource *rp = &hose->io_resource; 459 + void __iomem *addr; 460 + 461 + /* Check if port can be supported by that bus. We only check 462 + * the ranges of the PHB though, not the bus itself as the rules 463 + * for forwarding legacy cycles down bridges are not our problem 464 + * here. So if the host bridge supports it, we do it. 465 + */ 466 + offset = (unsigned long)hose->io_base_virt - _IO_BASE; 467 + offset += port; 468 + 469 + if (!(rp->flags & IORESOURCE_IO)) 470 + return -ENXIO; 471 + if (offset < rp->start || (offset + size) > rp->end) 472 + return -ENXIO; 473 + addr = hose->io_base_virt + port; 474 + 475 + switch (size) { 476 + case 1: 477 + *((u8 *)val) = in_8(addr); 478 + return 1; 479 + case 2: 480 + if (port & 1) 481 + return -EINVAL; 482 + *((u16 *)val) = in_le16(addr); 483 + return 2; 484 + case 4: 485 + if (port & 3) 486 + return -EINVAL; 487 + *((u32 *)val) = in_le32(addr); 488 + return 4; 489 + } 490 + return -EINVAL; 491 + } 492 + 493 + /* This provides legacy IO write access on a bus */ 494 + int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) 495 + { 496 + unsigned long offset; 497 + struct pci_controller *hose = pci_bus_to_host(bus); 498 + struct resource *rp = &hose->io_resource; 499 + void __iomem *addr; 500 + 501 + /* Check if port can be supported by that bus. We only check 502 + * the ranges of the PHB though, not the bus itself as the rules 503 + * for forwarding legacy cycles down bridges are not our problem 504 + * here. So if the host bridge supports it, we do it. 505 + */ 506 + offset = (unsigned long)hose->io_base_virt - _IO_BASE; 507 + offset += port; 508 + 509 + if (!(rp->flags & IORESOURCE_IO)) 510 + return -ENXIO; 511 + if (offset < rp->start || (offset + size) > rp->end) 512 + return -ENXIO; 513 + addr = hose->io_base_virt + port; 514 + 515 + /* WARNING: The generic code is idiotic. It gets passed a pointer 516 + * to what can be a 1, 2 or 4 byte quantity and always reads that 517 + * as a u32, which means that we have to correct the location of 518 + * the data read within those 32 bits for size 1 and 2 519 + */ 520 + switch (size) { 521 + case 1: 522 + out_8(addr, val >> 24); 523 + return 1; 524 + case 2: 525 + if (port & 1) 526 + return -EINVAL; 527 + out_le16(addr, val >> 16); 528 + return 2; 529 + case 4: 530 + if (port & 3) 531 + return -EINVAL; 532 + out_le32(addr, val); 533 + return 4; 534 + } 535 + return -EINVAL; 536 + } 537 + 538 + /* This provides legacy IO or memory mmap access on a bus */ 539 + int pci_mmap_legacy_page_range(struct pci_bus *bus, 540 + struct vm_area_struct *vma, 541 + enum pci_mmap_state mmap_state) 542 + { 543 + struct pci_controller *hose = pci_bus_to_host(bus); 544 + resource_size_t offset = 545 + ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; 546 + resource_size_t size = vma->vm_end - vma->vm_start; 547 + struct resource *rp; 548 + 549 + pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", 550 + pci_domain_nr(bus), bus->number, 551 + mmap_state == pci_mmap_mem ? "MEM" : "IO", 552 + (unsigned long long)offset, 553 + (unsigned long long)(offset + size - 1)); 554 + 555 + if (mmap_state == pci_mmap_mem) { 556 + /* Hack alert ! 557 + * 558 + * Because X is lame and can fail starting if it gets an error 559 + * trying to mmap legacy_mem (instead of just moving on without 560 + * legacy memory access) we fake it here by giving it anonymous 561 + * memory, effectively behaving just like /dev/zero 562 + */ 563 + if ((offset + size) > hose->isa_mem_size) { 564 + #ifdef CONFIG_MMU 565 + printk(KERN_DEBUG 566 + "Process %s (pid:%d) mapped non-existing PCI" 567 + "legacy memory for 0%04x:%02x\n", 568 + current->comm, current->pid, pci_domain_nr(bus), 569 + bus->number); 570 + #endif 571 + if (vma->vm_flags & VM_SHARED) 572 + return shmem_zero_setup(vma); 573 + return 0; 574 + } 575 + offset += hose->isa_mem_phys; 576 + } else { 577 + unsigned long io_offset = (unsigned long)hose->io_base_virt - \ 578 + _IO_BASE; 579 + unsigned long roffset = offset + io_offset; 580 + rp = &hose->io_resource; 581 + if (!(rp->flags & IORESOURCE_IO)) 582 + return -ENXIO; 583 + if (roffset < rp->start || (roffset + size) > rp->end) 584 + return -ENXIO; 585 + offset += hose->io_base_phys; 586 + } 587 + pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); 588 + 589 + vma->vm_pgoff = offset >> PAGE_SHIFT; 590 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 591 + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 592 + vma->vm_end - vma->vm_start, 593 + vma->vm_page_prot); 594 + } 595 + 596 + void pci_resource_to_user(const struct pci_dev *dev, int bar, 597 + const struct resource *rsrc, 598 + resource_size_t *start, resource_size_t *end) 599 + { 600 + struct pci_controller *hose = pci_bus_to_host(dev->bus); 601 + resource_size_t offset = 0; 602 + 603 + if (hose == NULL) 604 + return; 605 + 606 + if (rsrc->flags & IORESOURCE_IO) 607 + offset = (unsigned long)hose->io_base_virt - _IO_BASE; 608 + 609 + /* We pass a fully fixed up address to userland for MMIO instead of 610 + * a BAR value because X is lame and expects to be able to use that 611 + * to pass to /dev/mem ! 612 + * 613 + * That means that we'll have potentially 64 bits values where some 614 + * userland apps only expect 32 (like X itself since it thinks only 615 + * Sparc has 64 bits MMIO) but if we don't do that, we break it on 616 + * 32 bits CHRPs :-( 617 + * 618 + * Hopefully, the sysfs insterface is immune to that gunk. Once X 619 + * has been fixed (and the fix spread enough), we can re-enable the 620 + * 2 lines below and pass down a BAR value to userland. In that case 621 + * we'll also have to re-enable the matching code in 622 + * __pci_mmap_make_offset(). 623 + * 624 + * BenH. 625 + */ 626 + #if 0 627 + else if (rsrc->flags & IORESOURCE_MEM) 628 + offset = hose->pci_mem_offset; 629 + #endif 630 + 631 + *start = rsrc->start - offset; 632 + *end = rsrc->end - offset; 633 + } 634 + 635 + /** 636 + * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree 637 + * @hose: newly allocated pci_controller to be setup 638 + * @dev: device node of the host bridge 639 + * @primary: set if primary bus (32 bits only, soon to be deprecated) 640 + * 641 + * This function will parse the "ranges" property of a PCI host bridge device 642 + * node and setup the resource mapping of a pci controller based on its 643 + * content. 644 + * 645 + * Life would be boring if it wasn't for a few issues that we have to deal 646 + * with here: 647 + * 648 + * - We can only cope with one IO space range and up to 3 Memory space 649 + * ranges. However, some machines (thanks Apple !) tend to split their 650 + * space into lots of small contiguous ranges. So we have to coalesce. 651 + * 652 + * - We can only cope with all memory ranges having the same offset 653 + * between CPU addresses and PCI addresses. Unfortunately, some bridges 654 + * are setup for a large 1:1 mapping along with a small "window" which 655 + * maps PCI address 0 to some arbitrary high address of the CPU space in 656 + * order to give access to the ISA memory hole. 657 + * The way out of here that I've chosen for now is to always set the 658 + * offset based on the first resource found, then override it if we 659 + * have a different offset and the previous was set by an ISA hole. 660 + * 661 + * - Some busses have IO space not starting at 0, which causes trouble with 662 + * the way we do our IO resource renumbering. The code somewhat deals with 663 + * it for 64 bits but I would expect problems on 32 bits. 664 + * 665 + * - Some 32 bits platforms such as 4xx can have physical space larger than 666 + * 32 bits so we need to use 64 bits values for the parsing 667 + */ 668 + void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 669 + struct device_node *dev, 670 + int primary) 671 + { 672 + const u32 *ranges; 673 + int rlen; 674 + int pna = of_n_addr_cells(dev); 675 + int np = pna + 5; 676 + int memno = 0, isa_hole = -1; 677 + u32 pci_space; 678 + unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; 679 + unsigned long long isa_mb = 0; 680 + struct resource *res; 681 + 682 + printk(KERN_INFO "PCI host bridge %s %s ranges:\n", 683 + dev->full_name, primary ? "(primary)" : ""); 684 + 685 + /* Get ranges property */ 686 + ranges = of_get_property(dev, "ranges", &rlen); 687 + if (ranges == NULL) 688 + return; 689 + 690 + /* Parse it */ 691 + pr_debug("Parsing ranges property...\n"); 692 + while ((rlen -= np * 4) >= 0) { 693 + /* Read next ranges element */ 694 + pci_space = ranges[0]; 695 + pci_addr = of_read_number(ranges + 1, 2); 696 + cpu_addr = of_translate_address(dev, ranges + 3); 697 + size = of_read_number(ranges + pna + 3, 2); 698 + 699 + pr_debug("pci_space: 0x%08x pci_addr:0x%016llx " 700 + "cpu_addr:0x%016llx size:0x%016llx\n", 701 + pci_space, pci_addr, cpu_addr, size); 702 + 703 + ranges += np; 704 + 705 + /* If we failed translation or got a zero-sized region 706 + * (some FW try to feed us with non sensical zero sized regions 707 + * such as power3 which look like some kind of attempt 708 + * at exposing the VGA memory hole) 709 + */ 710 + if (cpu_addr == OF_BAD_ADDR || size == 0) 711 + continue; 712 + 713 + /* Now consume following elements while they are contiguous */ 714 + for (; rlen >= np * sizeof(u32); 715 + ranges += np, rlen -= np * 4) { 716 + if (ranges[0] != pci_space) 717 + break; 718 + pci_next = of_read_number(ranges + 1, 2); 719 + cpu_next = of_translate_address(dev, ranges + 3); 720 + if (pci_next != pci_addr + size || 721 + cpu_next != cpu_addr + size) 722 + break; 723 + size += of_read_number(ranges + pna + 3, 2); 724 + } 725 + 726 + /* Act based on address space type */ 727 + res = NULL; 728 + switch ((pci_space >> 24) & 0x3) { 729 + case 1: /* PCI IO space */ 730 + printk(KERN_INFO 731 + " IO 0x%016llx..0x%016llx -> 0x%016llx\n", 732 + cpu_addr, cpu_addr + size - 1, pci_addr); 733 + 734 + /* We support only one IO range */ 735 + if (hose->pci_io_size) { 736 + printk(KERN_INFO 737 + " \\--> Skipped (too many) !\n"); 738 + continue; 739 + } 740 + /* On 32 bits, limit I/O space to 16MB */ 741 + if (size > 0x01000000) 742 + size = 0x01000000; 743 + 744 + /* 32 bits needs to map IOs here */ 745 + hose->io_base_virt = ioremap(cpu_addr, size); 746 + 747 + /* Expect trouble if pci_addr is not 0 */ 748 + if (primary) 749 + isa_io_base = 750 + (unsigned long)hose->io_base_virt; 751 + /* pci_io_size and io_base_phys always represent IO 752 + * space starting at 0 so we factor in pci_addr 753 + */ 754 + hose->pci_io_size = pci_addr + size; 755 + hose->io_base_phys = cpu_addr - pci_addr; 756 + 757 + /* Build resource */ 758 + res = &hose->io_resource; 759 + res->flags = IORESOURCE_IO; 760 + res->start = pci_addr; 761 + break; 762 + case 2: /* PCI Memory space */ 763 + case 3: /* PCI 64 bits Memory space */ 764 + printk(KERN_INFO 765 + " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", 766 + cpu_addr, cpu_addr + size - 1, pci_addr, 767 + (pci_space & 0x40000000) ? "Prefetch" : ""); 768 + 769 + /* We support only 3 memory ranges */ 770 + if (memno >= 3) { 771 + printk(KERN_INFO 772 + " \\--> Skipped (too many) !\n"); 773 + continue; 774 + } 775 + /* Handles ISA memory hole space here */ 776 + if (pci_addr == 0) { 777 + isa_mb = cpu_addr; 778 + isa_hole = memno; 779 + if (primary || isa_mem_base == 0) 780 + isa_mem_base = cpu_addr; 781 + hose->isa_mem_phys = cpu_addr; 782 + hose->isa_mem_size = size; 783 + } 784 + 785 + /* We get the PCI/Mem offset from the first range or 786 + * the, current one if the offset came from an ISA 787 + * hole. If they don't match, bugger. 788 + */ 789 + if (memno == 0 || 790 + (isa_hole >= 0 && pci_addr != 0 && 791 + hose->pci_mem_offset == isa_mb)) 792 + hose->pci_mem_offset = cpu_addr - pci_addr; 793 + else if (pci_addr != 0 && 794 + hose->pci_mem_offset != cpu_addr - pci_addr) { 795 + printk(KERN_INFO 796 + " \\--> Skipped (offset mismatch) !\n"); 797 + continue; 798 + } 799 + 800 + /* Build resource */ 801 + res = &hose->mem_resources[memno++]; 802 + res->flags = IORESOURCE_MEM; 803 + if (pci_space & 0x40000000) 804 + res->flags |= IORESOURCE_PREFETCH; 805 + res->start = cpu_addr; 806 + break; 807 + } 808 + if (res != NULL) { 809 + res->name = dev->full_name; 810 + res->end = res->start + size - 1; 811 + res->parent = NULL; 812 + res->sibling = NULL; 813 + res->child = NULL; 814 + } 815 + } 816 + 817 + /* If there's an ISA hole and the pci_mem_offset is -not- matching 818 + * the ISA hole offset, then we need to remove the ISA hole from 819 + * the resource list for that brige 820 + */ 821 + if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) { 822 + unsigned int next = isa_hole + 1; 823 + printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb); 824 + if (next < memno) 825 + memmove(&hose->mem_resources[isa_hole], 826 + &hose->mem_resources[next], 827 + sizeof(struct resource) * (memno - next)); 828 + hose->mem_resources[--memno].flags = 0; 829 + } 830 + } 831 + 832 + /* Decide whether to display the domain number in /proc */ 833 + int pci_proc_domain(struct pci_bus *bus) 834 + { 835 + struct pci_controller *hose = pci_bus_to_host(bus); 836 + 837 + if (!(pci_flags & PCI_ENABLE_PROC_DOMAINS)) 838 + return 0; 839 + if (pci_flags & PCI_COMPAT_DOMAIN_0) 840 + return hose->global_number != 0; 841 + return 1; 842 + } 843 + 844 + void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 845 + struct resource *res) 846 + { 847 + resource_size_t offset = 0, mask = (resource_size_t)-1; 848 + struct pci_controller *hose = pci_bus_to_host(dev->bus); 849 + 850 + if (!hose) 851 + return; 852 + if (res->flags & IORESOURCE_IO) { 853 + offset = (unsigned long)hose->io_base_virt - _IO_BASE; 854 + mask = 0xffffffffu; 855 + } else if (res->flags & IORESOURCE_MEM) 856 + offset = hose->pci_mem_offset; 857 + 858 + region->start = (res->start - offset) & mask; 859 + region->end = (res->end - offset) & mask; 860 + } 861 + EXPORT_SYMBOL(pcibios_resource_to_bus); 862 + 863 + void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 864 + struct pci_bus_region *region) 865 + { 866 + resource_size_t offset = 0, mask = (resource_size_t)-1; 867 + struct pci_controller *hose = pci_bus_to_host(dev->bus); 868 + 869 + if (!hose) 870 + return; 871 + if (res->flags & IORESOURCE_IO) { 872 + offset = (unsigned long)hose->io_base_virt - _IO_BASE; 873 + mask = 0xffffffffu; 874 + } else if (res->flags & IORESOURCE_MEM) 875 + offset = hose->pci_mem_offset; 876 + res->start = (region->start + offset) & mask; 877 + res->end = (region->end + offset) & mask; 878 + } 879 + EXPORT_SYMBOL(pcibios_bus_to_resource); 880 + 881 + /* Fixup a bus resource into a linux resource */ 882 + static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) 883 + { 884 + struct pci_controller *hose = pci_bus_to_host(dev->bus); 885 + resource_size_t offset = 0, mask = (resource_size_t)-1; 886 + 887 + if (res->flags & IORESOURCE_IO) { 888 + offset = (unsigned long)hose->io_base_virt - _IO_BASE; 889 + mask = 0xffffffffu; 890 + } else if (res->flags & IORESOURCE_MEM) 891 + offset = hose->pci_mem_offset; 892 + 893 + res->start = (res->start + offset) & mask; 894 + res->end = (res->end + offset) & mask; 895 + } 896 + 897 + /* This header fixup will do the resource fixup for all devices as they are 898 + * probed, but not for bridge ranges 899 + */ 900 + static void __devinit pcibios_fixup_resources(struct pci_dev *dev) 901 + { 902 + struct pci_controller *hose = pci_bus_to_host(dev->bus); 903 + int i; 904 + 905 + if (!hose) { 906 + printk(KERN_ERR "No host bridge for PCI dev %s !\n", 907 + pci_name(dev)); 908 + return; 909 + } 910 + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 911 + struct resource *res = dev->resource + i; 912 + if (!res->flags) 913 + continue; 914 + /* On platforms that have PCI_PROBE_ONLY set, we don't 915 + * consider 0 as an unassigned BAR value. It's technically 916 + * a valid value, but linux doesn't like it... so when we can 917 + * re-assign things, we do so, but if we can't, we keep it 918 + * around and hope for the best... 919 + */ 920 + if (res->start == 0 && !(pci_flags & PCI_PROBE_ONLY)) { 921 + pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \ 922 + "is unassigned\n", 923 + pci_name(dev), i, 924 + (unsigned long long)res->start, 925 + (unsigned long long)res->end, 926 + (unsigned int)res->flags); 927 + res->end -= res->start; 928 + res->start = 0; 929 + res->flags |= IORESOURCE_UNSET; 930 + continue; 931 + } 932 + 933 + pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n", 934 + pci_name(dev), i, 935 + (unsigned long long)res->start,\ 936 + (unsigned long long)res->end, 937 + (unsigned int)res->flags); 938 + 939 + fixup_resource(res, dev); 940 + 941 + pr_debug("PCI:%s %016llx-%016llx\n", 942 + pci_name(dev), 943 + (unsigned long long)res->start, 944 + (unsigned long long)res->end); 945 + } 946 + } 947 + DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); 948 + 949 + /* This function tries to figure out if a bridge resource has been initialized 950 + * by the firmware or not. It doesn't have to be absolutely bullet proof, but 951 + * things go more smoothly when it gets it right. It should covers cases such 952 + * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges 953 + */ 954 + static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus, 955 + struct resource *res) 956 + { 957 + struct pci_controller *hose = pci_bus_to_host(bus); 958 + struct pci_dev *dev = bus->self; 959 + resource_size_t offset; 960 + u16 command; 961 + int i; 962 + 963 + /* We don't do anything if PCI_PROBE_ONLY is set */ 964 + if (pci_flags & PCI_PROBE_ONLY) 965 + return 0; 966 + 967 + /* Job is a bit different between memory and IO */ 968 + if (res->flags & IORESOURCE_MEM) { 969 + /* If the BAR is non-0 (res != pci_mem_offset) then it's 970 + * probably been initialized by somebody 971 + */ 972 + if (res->start != hose->pci_mem_offset) 973 + return 0; 974 + 975 + /* The BAR is 0, let's check if memory decoding is enabled on 976 + * the bridge. If not, we consider it unassigned 977 + */ 978 + pci_read_config_word(dev, PCI_COMMAND, &command); 979 + if ((command & PCI_COMMAND_MEMORY) == 0) 980 + return 1; 981 + 982 + /* Memory decoding is enabled and the BAR is 0. If any of 983 + * the bridge resources covers that starting address (0 then 984 + * it's good enough for us for memory 985 + */ 986 + for (i = 0; i < 3; i++) { 987 + if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && 988 + hose->mem_resources[i].start == hose->pci_mem_offset) 989 + return 0; 990 + } 991 + 992 + /* Well, it starts at 0 and we know it will collide so we may as 993 + * well consider it as unassigned. That covers the Apple case. 994 + */ 995 + return 1; 996 + } else { 997 + /* If the BAR is non-0, then we consider it assigned */ 998 + offset = (unsigned long)hose->io_base_virt - _IO_BASE; 999 + if (((res->start - offset) & 0xfffffffful) != 0) 1000 + return 0; 1001 + 1002 + /* Here, we are a bit different than memory as typically IO 1003 + * space starting at low addresses -is- valid. What we do 1004 + * instead if that we consider as unassigned anything that 1005 + * doesn't have IO enabled in the PCI command register, 1006 + * and that's it. 1007 + */ 1008 + pci_read_config_word(dev, PCI_COMMAND, &command); 1009 + if (command & PCI_COMMAND_IO) 1010 + return 0; 1011 + 1012 + /* It's starting at 0 and IO is disabled in the bridge, consider 1013 + * it unassigned 1014 + */ 1015 + return 1; 1016 + } 1017 + } 1018 + 1019 + /* Fixup resources of a PCI<->PCI bridge */ 1020 + static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) 1021 + { 1022 + struct resource *res; 1023 + int i; 1024 + 1025 + struct pci_dev *dev = bus->self; 1026 + 1027 + for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { 1028 + res = bus->resource[i]; 1029 + if (!res) 1030 + continue; 1031 + if (!res->flags) 1032 + continue; 1033 + if (i >= 3 && bus->self->transparent) 1034 + continue; 1035 + 1036 + pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", 1037 + pci_name(dev), i, 1038 + (unsigned long long)res->start,\ 1039 + (unsigned long long)res->end, 1040 + (unsigned int)res->flags); 1041 + 1042 + /* Perform fixup */ 1043 + fixup_resource(res, dev); 1044 + 1045 + /* Try to detect uninitialized P2P bridge resources, 1046 + * and clear them out so they get re-assigned later 1047 + */ 1048 + if (pcibios_uninitialized_bridge_resource(bus, res)) { 1049 + res->flags = 0; 1050 + pr_debug("PCI:%s (unassigned)\n", 1051 + pci_name(dev)); 1052 + } else { 1053 + pr_debug("PCI:%s %016llx-%016llx\n", 1054 + pci_name(dev), 1055 + (unsigned long long)res->start, 1056 + (unsigned long long)res->end); 1057 + } 1058 + } 1059 + } 1060 + 1061 + void __devinit pcibios_setup_bus_self(struct pci_bus *bus) 1062 + { 1063 + /* Fix up the bus resources for P2P bridges */ 1064 + if (bus->self != NULL) 1065 + pcibios_fixup_bridge(bus); 1066 + } 1067 + 1068 + void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) 1069 + { 1070 + struct pci_dev *dev; 1071 + 1072 + pr_debug("PCI: Fixup bus devices %d (%s)\n", 1073 + bus->number, bus->self ? pci_name(bus->self) : "PHB"); 1074 + 1075 + list_for_each_entry(dev, &bus->devices, bus_list) { 1076 + struct dev_archdata *sd = &dev->dev.archdata; 1077 + 1078 + /* Setup OF node pointer in archdata */ 1079 + sd->of_node = pci_device_to_OF_node(dev); 1080 + 1081 + /* Fixup NUMA node as it may not be setup yet by the generic 1082 + * code and is needed by the DMA init 1083 + */ 1084 + set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); 1085 + 1086 + /* Hook up default DMA ops */ 1087 + sd->dma_ops = pci_dma_ops; 1088 + sd->dma_data = (void *)PCI_DRAM_OFFSET; 1089 + 1090 + /* Read default IRQs and fixup if necessary */ 1091 + pci_read_irq_line(dev); 1092 + } 1093 + } 1094 + 1095 + void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1096 + { 1097 + /* When called from the generic PCI probe, read PCI<->PCI bridge 1098 + * bases. This is -not- called when generating the PCI tree from 1099 + * the OF device-tree. 1100 + */ 1101 + if (bus->self != NULL) 1102 + pci_read_bridge_bases(bus); 1103 + 1104 + /* Now fixup the bus bus */ 1105 + pcibios_setup_bus_self(bus); 1106 + 1107 + /* Now fixup devices on that bus */ 1108 + pcibios_setup_bus_devices(bus); 1109 + } 1110 + EXPORT_SYMBOL(pcibios_fixup_bus); 1111 + 1112 + static int skip_isa_ioresource_align(struct pci_dev *dev) 1113 + { 1114 + if ((pci_flags & PCI_CAN_SKIP_ISA_ALIGN) && 1115 + !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA)) 1116 + return 1; 1117 + return 0; 1118 + } 1119 + 1120 + /* 1121 + * We need to avoid collisions with `mirrored' VGA ports 1122 + * and other strange ISA hardware, so we always want the 1123 + * addresses to be allocated in the 0x000-0x0ff region 1124 + * modulo 0x400. 1125 + * 1126 + * Why? Because some silly external IO cards only decode 1127 + * the low 10 bits of the IO address. The 0x00-0xff region 1128 + * is reserved for motherboard devices that decode all 16 1129 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 1130 + * but we want to try to avoid allocating at 0x2900-0x2bff 1131 + * which might have be mirrored at 0x0100-0x03ff.. 1132 + */ 1133 + void pcibios_align_resource(void *data, struct resource *res, 1134 + resource_size_t size, resource_size_t align) 1135 + { 1136 + struct pci_dev *dev = data; 1137 + 1138 + if (res->flags & IORESOURCE_IO) { 1139 + resource_size_t start = res->start; 1140 + 1141 + if (skip_isa_ioresource_align(dev)) 1142 + return; 1143 + if (start & 0x300) { 1144 + start = (start + 0x3ff) & ~0x3ff; 1145 + res->start = start; 1146 + } 1147 + } 1148 + } 1149 + EXPORT_SYMBOL(pcibios_align_resource); 1150 + 1151 + /* 1152 + * Reparent resource children of pr that conflict with res 1153 + * under res, and make res replace those children. 1154 + */ 1155 + static int __init reparent_resources(struct resource *parent, 1156 + struct resource *res) 1157 + { 1158 + struct resource *p, **pp; 1159 + struct resource **firstpp = NULL; 1160 + 1161 + for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { 1162 + if (p->end < res->start) 1163 + continue; 1164 + if (res->end < p->start) 1165 + break; 1166 + if (p->start < res->start || p->end > res->end) 1167 + return -1; /* not completely contained */ 1168 + if (firstpp == NULL) 1169 + firstpp = pp; 1170 + } 1171 + if (firstpp == NULL) 1172 + return -1; /* didn't find any conflicting entries? */ 1173 + res->parent = parent; 1174 + res->child = *firstpp; 1175 + res->sibling = *pp; 1176 + *firstpp = res; 1177 + *pp = NULL; 1178 + for (p = res->child; p != NULL; p = p->sibling) { 1179 + p->parent = res; 1180 + pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", 1181 + p->name, 1182 + (unsigned long long)p->start, 1183 + (unsigned long long)p->end, res->name); 1184 + } 1185 + return 0; 1186 + } 1187 + 1188 + /* 1189 + * Handle resources of PCI devices. If the world were perfect, we could 1190 + * just allocate all the resource regions and do nothing more. It isn't. 1191 + * On the other hand, we cannot just re-allocate all devices, as it would 1192 + * require us to know lots of host bridge internals. So we attempt to 1193 + * keep as much of the original configuration as possible, but tweak it 1194 + * when it's found to be wrong. 1195 + * 1196 + * Known BIOS problems we have to work around: 1197 + * - I/O or memory regions not configured 1198 + * - regions configured, but not enabled in the command register 1199 + * - bogus I/O addresses above 64K used 1200 + * - expansion ROMs left enabled (this may sound harmless, but given 1201 + * the fact the PCI specs explicitly allow address decoders to be 1202 + * shared between expansion ROMs and other resource regions, it's 1203 + * at least dangerous) 1204 + * 1205 + * Our solution: 1206 + * (1) Allocate resources for all buses behind PCI-to-PCI bridges. 1207 + * This gives us fixed barriers on where we can allocate. 1208 + * (2) Allocate resources for all enabled devices. If there is 1209 + * a collision, just mark the resource as unallocated. Also 1210 + * disable expansion ROMs during this step. 1211 + * (3) Try to allocate resources for disabled devices. If the 1212 + * resources were assigned correctly, everything goes well, 1213 + * if they weren't, they won't disturb allocation of other 1214 + * resources. 1215 + * (4) Assign new addresses to resources which were either 1216 + * not configured at all or misconfigured. If explicitly 1217 + * requested by the user, configure expansion ROM address 1218 + * as well. 1219 + */ 1220 + 1221 + void pcibios_allocate_bus_resources(struct pci_bus *bus) 1222 + { 1223 + struct pci_bus *b; 1224 + int i; 1225 + struct resource *res, *pr; 1226 + 1227 + pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", 1228 + pci_domain_nr(bus), bus->number); 1229 + 1230 + for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { 1231 + res = bus->resource[i]; 1232 + if (!res || !res->flags 1233 + || res->start > res->end || res->parent) 1234 + continue; 1235 + if (bus->parent == NULL) 1236 + pr = (res->flags & IORESOURCE_IO) ? 1237 + &ioport_resource : &iomem_resource; 1238 + else { 1239 + /* Don't bother with non-root busses when 1240 + * re-assigning all resources. We clear the 1241 + * resource flags as if they were colliding 1242 + * and as such ensure proper re-allocation 1243 + * later. 1244 + */ 1245 + if (pci_flags & PCI_REASSIGN_ALL_RSRC) 1246 + goto clear_resource; 1247 + pr = pci_find_parent_resource(bus->self, res); 1248 + if (pr == res) { 1249 + /* this happens when the generic PCI 1250 + * code (wrongly) decides that this 1251 + * bridge is transparent -- paulus 1252 + */ 1253 + continue; 1254 + } 1255 + } 1256 + 1257 + pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " 1258 + "[0x%x], parent %p (%s)\n", 1259 + bus->self ? pci_name(bus->self) : "PHB", 1260 + bus->number, i, 1261 + (unsigned long long)res->start, 1262 + (unsigned long long)res->end, 1263 + (unsigned int)res->flags, 1264 + pr, (pr && pr->name) ? pr->name : "nil"); 1265 + 1266 + if (pr && !(pr->flags & IORESOURCE_UNSET)) { 1267 + if (request_resource(pr, res) == 0) 1268 + continue; 1269 + /* 1270 + * Must be a conflict with an existing entry. 1271 + * Move that entry (or entries) under the 1272 + * bridge resource and try again. 1273 + */ 1274 + if (reparent_resources(pr, res) == 0) 1275 + continue; 1276 + } 1277 + printk(KERN_WARNING "PCI: Cannot allocate resource region " 1278 + "%d of PCI bridge %d, will remap\n", i, bus->number); 1279 + clear_resource: 1280 + res->flags = 0; 1281 + } 1282 + 1283 + list_for_each_entry(b, &bus->children, node) 1284 + pcibios_allocate_bus_resources(b); 1285 + } 1286 + 1287 + static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) 1288 + { 1289 + struct resource *pr, *r = &dev->resource[idx]; 1290 + 1291 + pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", 1292 + pci_name(dev), idx, 1293 + (unsigned long long)r->start, 1294 + (unsigned long long)r->end, 1295 + (unsigned int)r->flags); 1296 + 1297 + pr = pci_find_parent_resource(dev, r); 1298 + if (!pr || (pr->flags & IORESOURCE_UNSET) || 1299 + request_resource(pr, r) < 0) { 1300 + printk(KERN_WARNING "PCI: Cannot allocate resource region %d" 1301 + " of device %s, will remap\n", idx, pci_name(dev)); 1302 + if (pr) 1303 + pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", 1304 + pr, 1305 + (unsigned long long)pr->start, 1306 + (unsigned long long)pr->end, 1307 + (unsigned int)pr->flags); 1308 + /* We'll assign a new address later */ 1309 + r->flags |= IORESOURCE_UNSET; 1310 + r->end -= r->start; 1311 + r->start = 0; 1312 + } 1313 + } 1314 + 1315 + static void __init pcibios_allocate_resources(int pass) 1316 + { 1317 + struct pci_dev *dev = NULL; 1318 + int idx, disabled; 1319 + u16 command; 1320 + struct resource *r; 1321 + 1322 + for_each_pci_dev(dev) { 1323 + pci_read_config_word(dev, PCI_COMMAND, &command); 1324 + for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 1325 + r = &dev->resource[idx]; 1326 + if (r->parent) /* Already allocated */ 1327 + continue; 1328 + if (!r->flags || (r->flags & IORESOURCE_UNSET)) 1329 + continue; /* Not assigned at all */ 1330 + /* We only allocate ROMs on pass 1 just in case they 1331 + * have been screwed up by firmware 1332 + */ 1333 + if (idx == PCI_ROM_RESOURCE) 1334 + disabled = 1; 1335 + if (r->flags & IORESOURCE_IO) 1336 + disabled = !(command & PCI_COMMAND_IO); 1337 + else 1338 + disabled = !(command & PCI_COMMAND_MEMORY); 1339 + if (pass == disabled) 1340 + alloc_resource(dev, idx); 1341 + } 1342 + if (pass) 1343 + continue; 1344 + r = &dev->resource[PCI_ROM_RESOURCE]; 1345 + if (r->flags) { 1346 + /* Turn the ROM off, leave the resource region, 1347 + * but keep it unregistered. 1348 + */ 1349 + u32 reg; 1350 + pci_read_config_dword(dev, dev->rom_base_reg, &reg); 1351 + if (reg & PCI_ROM_ADDRESS_ENABLE) { 1352 + pr_debug("PCI: Switching off ROM of %s\n", 1353 + pci_name(dev)); 1354 + r->flags &= ~IORESOURCE_ROM_ENABLE; 1355 + pci_write_config_dword(dev, dev->rom_base_reg, 1356 + reg & ~PCI_ROM_ADDRESS_ENABLE); 1357 + } 1358 + } 1359 + } 1360 + } 1361 + 1362 + static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) 1363 + { 1364 + struct pci_controller *hose = pci_bus_to_host(bus); 1365 + resource_size_t offset; 1366 + struct resource *res, *pres; 1367 + int i; 1368 + 1369 + pr_debug("Reserving legacy ranges for domain %04x\n", 1370 + pci_domain_nr(bus)); 1371 + 1372 + /* Check for IO */ 1373 + if (!(hose->io_resource.flags & IORESOURCE_IO)) 1374 + goto no_io; 1375 + offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1376 + res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1377 + BUG_ON(res == NULL); 1378 + res->name = "Legacy IO"; 1379 + res->flags = IORESOURCE_IO; 1380 + res->start = offset; 1381 + res->end = (offset + 0xfff) & 0xfffffffful; 1382 + pr_debug("Candidate legacy IO: %pR\n", res); 1383 + if (request_resource(&hose->io_resource, res)) { 1384 + printk(KERN_DEBUG 1385 + "PCI %04x:%02x Cannot reserve Legacy IO %pR\n", 1386 + pci_domain_nr(bus), bus->number, res); 1387 + kfree(res); 1388 + } 1389 + 1390 + no_io: 1391 + /* Check for memory */ 1392 + offset = hose->pci_mem_offset; 1393 + pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); 1394 + for (i = 0; i < 3; i++) { 1395 + pres = &hose->mem_resources[i]; 1396 + if (!(pres->flags & IORESOURCE_MEM)) 1397 + continue; 1398 + pr_debug("hose mem res: %pR\n", pres); 1399 + if ((pres->start - offset) <= 0xa0000 && 1400 + (pres->end - offset) >= 0xbffff) 1401 + break; 1402 + } 1403 + if (i >= 3) 1404 + return; 1405 + res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1406 + BUG_ON(res == NULL); 1407 + res->name = "Legacy VGA memory"; 1408 + res->flags = IORESOURCE_MEM; 1409 + res->start = 0xa0000 + offset; 1410 + res->end = 0xbffff + offset; 1411 + pr_debug("Candidate VGA memory: %pR\n", res); 1412 + if (request_resource(pres, res)) { 1413 + printk(KERN_DEBUG 1414 + "PCI %04x:%02x Cannot reserve VGA memory %pR\n", 1415 + pci_domain_nr(bus), bus->number, res); 1416 + kfree(res); 1417 + } 1418 + } 1419 + 1420 + void __init pcibios_resource_survey(void) 1421 + { 1422 + struct pci_bus *b; 1423 + 1424 + /* Allocate and assign resources. If we re-assign everything, then 1425 + * we skip the allocate phase 1426 + */ 1427 + list_for_each_entry(b, &pci_root_buses, node) 1428 + pcibios_allocate_bus_resources(b); 1429 + 1430 + if (!(pci_flags & PCI_REASSIGN_ALL_RSRC)) { 1431 + pcibios_allocate_resources(0); 1432 + pcibios_allocate_resources(1); 1433 + } 1434 + 1435 + /* Before we start assigning unassigned resource, we try to reserve 1436 + * the low IO area and the VGA memory area if they intersect the 1437 + * bus available resources to avoid allocating things on top of them 1438 + */ 1439 + if (!(pci_flags & PCI_PROBE_ONLY)) { 1440 + list_for_each_entry(b, &pci_root_buses, node) 1441 + pcibios_reserve_legacy_regions(b); 1442 + } 1443 + 1444 + /* Now, if the platform didn't decide to blindly trust the firmware, 1445 + * we proceed to assigning things that were left unassigned 1446 + */ 1447 + if (!(pci_flags & PCI_PROBE_ONLY)) { 1448 + pr_debug("PCI: Assigning unassigned resources...\n"); 1449 + pci_assign_unassigned_resources(); 1450 + } 1451 + } 1452 + 1453 + #ifdef CONFIG_HOTPLUG 1454 + 1455 + /* This is used by the PCI hotplug driver to allocate resource 1456 + * of newly plugged busses. We can try to consolidate with the 1457 + * rest of the code later, for now, keep it as-is as our main 1458 + * resource allocation function doesn't deal with sub-trees yet. 1459 + */ 1460 + void __devinit pcibios_claim_one_bus(struct pci_bus *bus) 1461 + { 1462 + struct pci_dev *dev; 1463 + struct pci_bus *child_bus; 1464 + 1465 + list_for_each_entry(dev, &bus->devices, bus_list) { 1466 + int i; 1467 + 1468 + for (i = 0; i < PCI_NUM_RESOURCES; i++) { 1469 + struct resource *r = &dev->resource[i]; 1470 + 1471 + if (r->parent || !r->start || !r->flags) 1472 + continue; 1473 + 1474 + pr_debug("PCI: Claiming %s: " 1475 + "Resource %d: %016llx..%016llx [%x]\n", 1476 + pci_name(dev), i, 1477 + (unsigned long long)r->start, 1478 + (unsigned long long)r->end, 1479 + (unsigned int)r->flags); 1480 + 1481 + pci_claim_resource(dev, i); 1482 + } 1483 + } 1484 + 1485 + list_for_each_entry(child_bus, &bus->children, node) 1486 + pcibios_claim_one_bus(child_bus); 1487 + } 1488 + EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 1489 + 1490 + 1491 + /* pcibios_finish_adding_to_bus 1492 + * 1493 + * This is to be called by the hotplug code after devices have been 1494 + * added to a bus, this include calling it for a PHB that is just 1495 + * being added 1496 + */ 1497 + void pcibios_finish_adding_to_bus(struct pci_bus *bus) 1498 + { 1499 + pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", 1500 + pci_domain_nr(bus), bus->number); 1501 + 1502 + /* Allocate bus and devices resources */ 1503 + pcibios_allocate_bus_resources(bus); 1504 + pcibios_claim_one_bus(bus); 1505 + 1506 + /* Add new devices to global lists. Register in proc, sysfs. */ 1507 + pci_bus_add_devices(bus); 1508 + 1509 + /* Fixup EEH */ 1510 + eeh_add_device_tree_late(bus); 1511 + } 1512 + EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); 1513 + 1514 + #endif /* CONFIG_HOTPLUG */ 1515 + 1516 + int pcibios_enable_device(struct pci_dev *dev, int mask) 1517 + { 1518 + return pci_enable_resources(dev, mask); 1519 + } 1520 + 1521 + void __devinit pcibios_setup_phb_resources(struct pci_controller *hose) 1522 + { 1523 + struct pci_bus *bus = hose->bus; 1524 + struct resource *res; 1525 + int i; 1526 + 1527 + /* Hookup PHB IO resource */ 1528 + bus->resource[0] = res = &hose->io_resource; 1529 + 1530 + if (!res->flags) { 1531 + printk(KERN_WARNING "PCI: I/O resource not set for host" 1532 + " bridge %s (domain %d)\n", 1533 + hose->dn->full_name, hose->global_number); 1534 + /* Workaround for lack of IO resource only on 32-bit */ 1535 + res->start = (unsigned long)hose->io_base_virt - isa_io_base; 1536 + res->end = res->start + IO_SPACE_LIMIT; 1537 + res->flags = IORESOURCE_IO; 1538 + } 1539 + 1540 + pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", 1541 + (unsigned long long)res->start, 1542 + (unsigned long long)res->end, 1543 + (unsigned long)res->flags); 1544 + 1545 + /* Hookup PHB Memory resources */ 1546 + for (i = 0; i < 3; ++i) { 1547 + res = &hose->mem_resources[i]; 1548 + if (!res->flags) { 1549 + if (i > 0) 1550 + continue; 1551 + printk(KERN_ERR "PCI: Memory resource 0 not set for " 1552 + "host bridge %s (domain %d)\n", 1553 + hose->dn->full_name, hose->global_number); 1554 + 1555 + /* Workaround for lack of MEM resource only on 32-bit */ 1556 + res->start = hose->pci_mem_offset; 1557 + res->end = (resource_size_t)-1LL; 1558 + res->flags = IORESOURCE_MEM; 1559 + 1560 + } 1561 + bus->resource[i+1] = res; 1562 + 1563 + pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", 1564 + i, (unsigned long long)res->start, 1565 + (unsigned long long)res->end, 1566 + (unsigned long)res->flags); 1567 + } 1568 + 1569 + pr_debug("PCI: PHB MEM offset = %016llx\n", 1570 + (unsigned long long)hose->pci_mem_offset); 1571 + pr_debug("PCI: PHB IO offset = %08lx\n", 1572 + (unsigned long)hose->io_base_virt - _IO_BASE); 1573 + } 1574 + 1575 + /* 1576 + * Null PCI config access functions, for the case when we can't 1577 + * find a hose. 1578 + */ 1579 + #define NULL_PCI_OP(rw, size, type) \ 1580 + static int \ 1581 + null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ 1582 + { \ 1583 + return PCIBIOS_DEVICE_NOT_FOUND; \ 1584 + } 1585 + 1586 + static int 1587 + null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 1588 + int len, u32 *val) 1589 + { 1590 + return PCIBIOS_DEVICE_NOT_FOUND; 1591 + } 1592 + 1593 + static int 1594 + null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 1595 + int len, u32 val) 1596 + { 1597 + return PCIBIOS_DEVICE_NOT_FOUND; 1598 + } 1599 + 1600 + static struct pci_ops null_pci_ops = { 1601 + .read = null_read_config, 1602 + .write = null_write_config, 1603 + }; 1604 + 1605 + /* 1606 + * These functions are used early on before PCI scanning is done 1607 + * and all of the pci_dev and pci_bus structures have been created. 1608 + */ 1609 + static struct pci_bus * 1610 + fake_pci_bus(struct pci_controller *hose, int busnr) 1611 + { 1612 + static struct pci_bus bus; 1613 + 1614 + if (!hose) 1615 + printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); 1616 + 1617 + bus.number = busnr; 1618 + bus.sysdata = hose; 1619 + bus.ops = hose ? hose->ops : &null_pci_ops; 1620 + return &bus; 1621 + } 1622 + 1623 + #define EARLY_PCI_OP(rw, size, type) \ 1624 + int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ 1625 + int devfn, int offset, type value) \ 1626 + { \ 1627 + return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ 1628 + devfn, offset, value); \ 1629 + } 1630 + 1631 + EARLY_PCI_OP(read, byte, u8 *) 1632 + EARLY_PCI_OP(read, word, u16 *) 1633 + EARLY_PCI_OP(read, dword, u32 *) 1634 + EARLY_PCI_OP(write, byte, u8) 1635 + EARLY_PCI_OP(write, word, u16) 1636 + EARLY_PCI_OP(write, dword, u32) 1637 + 1638 + int early_find_capability(struct pci_controller *hose, int bus, int devfn, 1639 + int cap) 1640 + { 1641 + return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); 1642 + }
+430
arch/microblaze/pci/pci_32.c
···
··· 1 + /* 2 + * Common pmac/prep/chrp pci routines. -- Cort 3 + */ 4 + 5 + #include <linux/kernel.h> 6 + #include <linux/pci.h> 7 + #include <linux/delay.h> 8 + #include <linux/string.h> 9 + #include <linux/init.h> 10 + #include <linux/capability.h> 11 + #include <linux/sched.h> 12 + #include <linux/errno.h> 13 + #include <linux/bootmem.h> 14 + #include <linux/irq.h> 15 + #include <linux/list.h> 16 + #include <linux/of.h> 17 + 18 + #include <asm/processor.h> 19 + #include <asm/io.h> 20 + #include <asm/prom.h> 21 + #include <asm/sections.h> 22 + #include <asm/pci-bridge.h> 23 + #include <asm/byteorder.h> 24 + #include <asm/uaccess.h> 25 + 26 + #undef DEBUG 27 + 28 + unsigned long isa_io_base; 29 + unsigned long pci_dram_offset; 30 + int pcibios_assign_bus_offset = 1; 31 + 32 + static u8 *pci_to_OF_bus_map; 33 + 34 + /* By default, we don't re-assign bus numbers. We do this only on 35 + * some pmacs 36 + */ 37 + static int pci_assign_all_buses; 38 + 39 + static int pci_bus_count; 40 + 41 + /* 42 + * Functions below are used on OpenFirmware machines. 43 + */ 44 + static void 45 + make_one_node_map(struct device_node *node, u8 pci_bus) 46 + { 47 + const int *bus_range; 48 + int len; 49 + 50 + if (pci_bus >= pci_bus_count) 51 + return; 52 + bus_range = of_get_property(node, "bus-range", &len); 53 + if (bus_range == NULL || len < 2 * sizeof(int)) { 54 + printk(KERN_WARNING "Can't get bus-range for %s, " 55 + "assuming it starts at 0\n", node->full_name); 56 + pci_to_OF_bus_map[pci_bus] = 0; 57 + } else 58 + pci_to_OF_bus_map[pci_bus] = bus_range[0]; 59 + 60 + for_each_child_of_node(node, node) { 61 + struct pci_dev *dev; 62 + const unsigned int *class_code, *reg; 63 + 64 + class_code = of_get_property(node, "class-code", NULL); 65 + if (!class_code || 66 + ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && 67 + (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) 68 + continue; 69 + reg = of_get_property(node, "reg", NULL); 70 + if (!reg) 71 + continue; 72 + dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff)); 73 + if (!dev || !dev->subordinate) { 74 + pci_dev_put(dev); 75 + continue; 76 + } 77 + make_one_node_map(node, dev->subordinate->number); 78 + pci_dev_put(dev); 79 + } 80 + } 81 + 82 + void 83 + pcibios_make_OF_bus_map(void) 84 + { 85 + int i; 86 + struct pci_controller *hose, *tmp; 87 + struct property *map_prop; 88 + struct device_node *dn; 89 + 90 + pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL); 91 + if (!pci_to_OF_bus_map) { 92 + printk(KERN_ERR "Can't allocate OF bus map !\n"); 93 + return; 94 + } 95 + 96 + /* We fill the bus map with invalid values, that helps 97 + * debugging. 98 + */ 99 + for (i = 0; i < pci_bus_count; i++) 100 + pci_to_OF_bus_map[i] = 0xff; 101 + 102 + /* For each hose, we begin searching bridges */ 103 + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 104 + struct device_node *node = hose->dn; 105 + 106 + if (!node) 107 + continue; 108 + make_one_node_map(node, hose->first_busno); 109 + } 110 + dn = of_find_node_by_path("/"); 111 + map_prop = of_find_property(dn, "pci-OF-bus-map", NULL); 112 + if (map_prop) { 113 + BUG_ON(pci_bus_count > map_prop->length); 114 + memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count); 115 + } 116 + of_node_put(dn); 117 + #ifdef DEBUG 118 + printk(KERN_INFO "PCI->OF bus map:\n"); 119 + for (i = 0; i < pci_bus_count; i++) { 120 + if (pci_to_OF_bus_map[i] == 0xff) 121 + continue; 122 + printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]); 123 + } 124 + #endif 125 + } 126 + 127 + typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data); 128 + 129 + static struct device_node *scan_OF_pci_childs(struct device_node *parent, 130 + pci_OF_scan_iterator filter, void *data) 131 + { 132 + struct device_node *node; 133 + struct device_node *sub_node; 134 + 135 + for_each_child_of_node(parent, node) { 136 + const unsigned int *class_code; 137 + 138 + if (filter(node, data)) { 139 + of_node_put(node); 140 + return node; 141 + } 142 + 143 + /* For PCI<->PCI bridges or CardBus bridges, we go down 144 + * Note: some OFs create a parent node "multifunc-device" as 145 + * a fake root for all functions of a multi-function device, 146 + * we go down them as well. 147 + */ 148 + class_code = of_get_property(node, "class-code", NULL); 149 + if ((!class_code || 150 + ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && 151 + (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && 152 + strcmp(node->name, "multifunc-device")) 153 + continue; 154 + sub_node = scan_OF_pci_childs(node, filter, data); 155 + if (sub_node) { 156 + of_node_put(node); 157 + return sub_node; 158 + } 159 + } 160 + return NULL; 161 + } 162 + 163 + static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, 164 + unsigned int devfn) 165 + { 166 + struct device_node *np, *cnp; 167 + const u32 *reg; 168 + unsigned int psize; 169 + 170 + for_each_child_of_node(parent, np) { 171 + reg = of_get_property(np, "reg", &psize); 172 + if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn) 173 + return np; 174 + 175 + /* Note: some OFs create a parent node "multifunc-device" as 176 + * a fake root for all functions of a multi-function device, 177 + * we go down them as well. */ 178 + if (!strcmp(np->name, "multifunc-device")) { 179 + cnp = scan_OF_for_pci_dev(np, devfn); 180 + if (cnp) 181 + return cnp; 182 + } 183 + } 184 + return NULL; 185 + } 186 + 187 + 188 + static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus) 189 + { 190 + struct device_node *parent, *np; 191 + 192 + /* Are we a root bus ? */ 193 + if (bus->self == NULL || bus->parent == NULL) { 194 + struct pci_controller *hose = pci_bus_to_host(bus); 195 + if (hose == NULL) 196 + return NULL; 197 + return of_node_get(hose->dn); 198 + } 199 + 200 + /* not a root bus, we need to get our parent */ 201 + parent = scan_OF_for_pci_bus(bus->parent); 202 + if (parent == NULL) 203 + return NULL; 204 + 205 + /* now iterate for children for a match */ 206 + np = scan_OF_for_pci_dev(parent, bus->self->devfn); 207 + of_node_put(parent); 208 + 209 + return np; 210 + } 211 + 212 + /* 213 + * Scans the OF tree for a device node matching a PCI device 214 + */ 215 + struct device_node * 216 + pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) 217 + { 218 + struct device_node *parent, *np; 219 + 220 + pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); 221 + parent = scan_OF_for_pci_bus(bus); 222 + if (parent == NULL) 223 + return NULL; 224 + pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>"); 225 + np = scan_OF_for_pci_dev(parent, devfn); 226 + of_node_put(parent); 227 + pr_debug(" result is %s\n", np ? np->full_name : "<NULL>"); 228 + 229 + /* XXX most callers don't release the returned node 230 + * mostly because ppc64 doesn't increase the refcount, 231 + * we need to fix that. 232 + */ 233 + return np; 234 + } 235 + EXPORT_SYMBOL(pci_busdev_to_OF_node); 236 + 237 + struct device_node* 238 + pci_device_to_OF_node(struct pci_dev *dev) 239 + { 240 + return pci_busdev_to_OF_node(dev->bus, dev->devfn); 241 + } 242 + EXPORT_SYMBOL(pci_device_to_OF_node); 243 + 244 + static int 245 + find_OF_pci_device_filter(struct device_node *node, void *data) 246 + { 247 + return ((void *)node == data); 248 + } 249 + 250 + /* 251 + * Returns the PCI device matching a given OF node 252 + */ 253 + int 254 + pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn) 255 + { 256 + const unsigned int *reg; 257 + struct pci_controller *hose; 258 + struct pci_dev *dev = NULL; 259 + 260 + /* Make sure it's really a PCI device */ 261 + hose = pci_find_hose_for_OF_device(node); 262 + if (!hose || !hose->dn) 263 + return -ENODEV; 264 + if (!scan_OF_pci_childs(hose->dn, 265 + find_OF_pci_device_filter, (void *)node)) 266 + return -ENODEV; 267 + reg = of_get_property(node, "reg", NULL); 268 + if (!reg) 269 + return -ENODEV; 270 + *bus = (reg[0] >> 16) & 0xff; 271 + *devfn = ((reg[0] >> 8) & 0xff); 272 + 273 + /* Ok, here we need some tweak. If we have already renumbered 274 + * all busses, we can't rely on the OF bus number any more. 275 + * the pci_to_OF_bus_map is not enough as several PCI busses 276 + * may match the same OF bus number. 277 + */ 278 + if (!pci_to_OF_bus_map) 279 + return 0; 280 + 281 + for_each_pci_dev(dev) 282 + if (pci_to_OF_bus_map[dev->bus->number] == *bus && 283 + dev->devfn == *devfn) { 284 + *bus = dev->bus->number; 285 + pci_dev_put(dev); 286 + return 0; 287 + } 288 + 289 + return -ENODEV; 290 + } 291 + EXPORT_SYMBOL(pci_device_from_OF_node); 292 + 293 + /* We create the "pci-OF-bus-map" property now so it appears in the 294 + * /proc device tree 295 + */ 296 + void __init 297 + pci_create_OF_bus_map(void) 298 + { 299 + struct property *of_prop; 300 + struct device_node *dn; 301 + 302 + of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \ 303 + 256); 304 + if (!of_prop) 305 + return; 306 + dn = of_find_node_by_path("/"); 307 + if (dn) { 308 + memset(of_prop, -1, sizeof(struct property) + 256); 309 + of_prop->name = "pci-OF-bus-map"; 310 + of_prop->length = 256; 311 + of_prop->value = &of_prop[1]; 312 + prom_add_property(dn, of_prop); 313 + of_node_put(dn); 314 + } 315 + } 316 + 317 + static void __devinit pcibios_scan_phb(struct pci_controller *hose) 318 + { 319 + struct pci_bus *bus; 320 + struct device_node *node = hose->dn; 321 + unsigned long io_offset; 322 + struct resource *res = &hose->io_resource; 323 + 324 + pr_debug("PCI: Scanning PHB %s\n", 325 + node ? node->full_name : "<NO NAME>"); 326 + 327 + /* Create an empty bus for the toplevel */ 328 + bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); 329 + if (bus == NULL) { 330 + printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 331 + hose->global_number); 332 + return; 333 + } 334 + bus->secondary = hose->first_busno; 335 + hose->bus = bus; 336 + 337 + /* Fixup IO space offset */ 338 + io_offset = (unsigned long)hose->io_base_virt - isa_io_base; 339 + res->start = (res->start + io_offset) & 0xffffffffu; 340 + res->end = (res->end + io_offset) & 0xffffffffu; 341 + 342 + /* Wire up PHB bus resources */ 343 + pcibios_setup_phb_resources(hose); 344 + 345 + /* Scan children */ 346 + hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); 347 + } 348 + 349 + static int __init pcibios_init(void) 350 + { 351 + struct pci_controller *hose, *tmp; 352 + int next_busno = 0; 353 + 354 + printk(KERN_INFO "PCI: Probing PCI hardware\n"); 355 + 356 + if (pci_flags & PCI_REASSIGN_ALL_BUS) { 357 + printk(KERN_INFO "setting pci_asign_all_busses\n"); 358 + pci_assign_all_buses = 1; 359 + } 360 + 361 + /* Scan all of the recorded PCI controllers. */ 362 + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 363 + if (pci_assign_all_buses) 364 + hose->first_busno = next_busno; 365 + hose->last_busno = 0xff; 366 + pcibios_scan_phb(hose); 367 + printk(KERN_INFO "calling pci_bus_add_devices()\n"); 368 + pci_bus_add_devices(hose->bus); 369 + if (pci_assign_all_buses || next_busno <= hose->last_busno) 370 + next_busno = hose->last_busno + \ 371 + pcibios_assign_bus_offset; 372 + } 373 + pci_bus_count = next_busno; 374 + 375 + /* OpenFirmware based machines need a map of OF bus 376 + * numbers vs. kernel bus numbers since we may have to 377 + * remap them. 378 + */ 379 + if (pci_assign_all_buses) 380 + pcibios_make_OF_bus_map(); 381 + 382 + /* Call common code to handle resource allocation */ 383 + pcibios_resource_survey(); 384 + 385 + return 0; 386 + } 387 + 388 + subsys_initcall(pcibios_init); 389 + 390 + static struct pci_controller* 391 + pci_bus_to_hose(int bus) 392 + { 393 + struct pci_controller *hose, *tmp; 394 + 395 + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 396 + if (bus >= hose->first_busno && bus <= hose->last_busno) 397 + return hose; 398 + return NULL; 399 + } 400 + 401 + /* Provide information on locations of various I/O regions in physical 402 + * memory. Do this on a per-card basis so that we choose the right 403 + * root bridge. 404 + * Note that the returned IO or memory base is a physical address 405 + */ 406 + 407 + long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) 408 + { 409 + struct pci_controller *hose; 410 + long result = -EOPNOTSUPP; 411 + 412 + hose = pci_bus_to_hose(bus); 413 + if (!hose) 414 + return -ENODEV; 415 + 416 + switch (which) { 417 + case IOBASE_BRIDGE_NUMBER: 418 + return (long)hose->first_busno; 419 + case IOBASE_MEMORY: 420 + return (long)hose->pci_mem_offset; 421 + case IOBASE_IO: 422 + return (long)hose->io_base_phys; 423 + case IOBASE_ISA_IO: 424 + return (long)isa_io_base; 425 + case IOBASE_ISA_MEM: 426 + return (long)isa_mem_base; 427 + } 428 + 429 + return result; 430 + }
+168
arch/microblaze/pci/xilinx_pci.c
···
··· 1 + /* 2 + * PCI support for Xilinx plbv46_pci soft-core which can be used on 3 + * Xilinx Virtex ML410 / ML510 boards. 4 + * 5 + * Copyright 2009 Roderick Colenbrander 6 + * Copyright 2009 Secret Lab Technologies Ltd. 7 + * 8 + * The pci bridge fixup code was copied from ppc4xx_pci.c and was written 9 + * by Benjamin Herrenschmidt. 10 + * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. 11 + * 12 + * This file is licensed under the terms of the GNU General Public License 13 + * version 2. This program is licensed "as is" without any warranty of any 14 + * kind, whether express or implied. 15 + */ 16 + 17 + #include <linux/ioport.h> 18 + #include <linux/of.h> 19 + #include <linux/pci.h> 20 + #include <asm/io.h> 21 + 22 + #define XPLB_PCI_ADDR 0x10c 23 + #define XPLB_PCI_DATA 0x110 24 + #define XPLB_PCI_BUS 0x114 25 + 26 + #define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \ 27 + PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY) 28 + 29 + static struct of_device_id xilinx_pci_match[] = { 30 + { .compatible = "xlnx,plbv46-pci-1.03.a", }, 31 + {} 32 + }; 33 + 34 + /** 35 + * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration. 36 + */ 37 + static void xilinx_pci_fixup_bridge(struct pci_dev *dev) 38 + { 39 + struct pci_controller *hose; 40 + int i; 41 + 42 + if (dev->devfn || dev->bus->self) 43 + return; 44 + 45 + hose = pci_bus_to_host(dev->bus); 46 + if (!hose) 47 + return; 48 + 49 + if (!of_match_node(xilinx_pci_match, hose->dn)) 50 + return; 51 + 52 + /* Hide the PCI host BARs from the kernel as their content doesn't 53 + * fit well in the resource management 54 + */ 55 + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 56 + dev->resource[i].start = 0; 57 + dev->resource[i].end = 0; 58 + dev->resource[i].flags = 0; 59 + } 60 + 61 + dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n", 62 + pci_name(dev)); 63 + } 64 + DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge); 65 + 66 + #ifdef DEBUG 67 + /** 68 + * xilinx_pci_exclude_device - Don't do config access for non-root bus 69 + * 70 + * This is a hack. Config access to any bus other than bus 0 does not 71 + * currently work on the ML510 so we prevent it here. 72 + */ 73 + static int 74 + xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) 75 + { 76 + return (bus != 0); 77 + } 78 + 79 + /** 80 + * xilinx_early_pci_scan - List pci config space for available devices 81 + * 82 + * List pci devices in very early phase. 83 + */ 84 + void __init xilinx_early_pci_scan(struct pci_controller *hose) 85 + { 86 + u32 bus = 0; 87 + u32 val, dev, func, offset; 88 + 89 + /* Currently we have only 2 device connected - up-to 32 devices */ 90 + for (dev = 0; dev < 2; dev++) { 91 + /* List only first function number - up-to 8 functions */ 92 + for (func = 0; func < 1; func++) { 93 + printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func); 94 + /* read the first 64 standardized bytes */ 95 + /* Up-to 192 bytes can be list of capabilities */ 96 + for (offset = 0; offset < 64; offset += 4) { 97 + early_read_config_dword(hose, bus, 98 + PCI_DEVFN(dev, func), offset, &val); 99 + if (offset == 0 && val == 0xFFFFFFFF) { 100 + printk(KERN_CONT "\nABSENT"); 101 + break; 102 + } 103 + if (!(offset % 0x10)) 104 + printk(KERN_CONT "\n%04x: ", offset); 105 + 106 + printk(KERN_CONT "%08x ", val); 107 + } 108 + printk(KERN_INFO "\n"); 109 + } 110 + } 111 + } 112 + #else 113 + void __init xilinx_early_pci_scan(struct pci_controller *hose) 114 + { 115 + } 116 + #endif 117 + 118 + /** 119 + * xilinx_pci_init - Find and register a Xilinx PCI host bridge 120 + */ 121 + void __init xilinx_pci_init(void) 122 + { 123 + struct pci_controller *hose; 124 + struct resource r; 125 + void __iomem *pci_reg; 126 + struct device_node *pci_node; 127 + 128 + pci_node = of_find_matching_node(NULL, xilinx_pci_match); 129 + if (!pci_node) 130 + return; 131 + 132 + if (of_address_to_resource(pci_node, 0, &r)) { 133 + pr_err("xilinx-pci: cannot resolve base address\n"); 134 + return; 135 + } 136 + 137 + hose = pcibios_alloc_controller(pci_node); 138 + if (!hose) { 139 + pr_err("xilinx-pci: pcibios_alloc_controller() failed\n"); 140 + return; 141 + } 142 + 143 + /* Setup config space */ 144 + setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, 145 + r.start + XPLB_PCI_DATA, 146 + INDIRECT_TYPE_SET_CFG_TYPE); 147 + 148 + /* According to the xilinx plbv46_pci documentation the soft-core starts 149 + * a self-init when the bus master enable bit is set. Without this bit 150 + * set the pci bus can't be scanned. 151 + */ 152 + early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD); 153 + 154 + /* Set the max latency timer to 255 */ 155 + early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff); 156 + 157 + /* Set the max bus number to 255, and bus/subbus no's to 0 */ 158 + pci_reg = of_iomap(pci_node, 0); 159 + out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff); 160 + iounmap(pci_reg); 161 + 162 + /* Register the host bridge with the linux kernel! */ 163 + pci_process_bridge_OF_ranges(hose, pci_node, 164 + INDIRECT_TYPE_SET_CFG_TYPE); 165 + 166 + pr_info("xilinx-pci: Registered PCI host bridge\n"); 167 + xilinx_early_pci_scan(hose); 168 + }
+1
drivers/pci/Makefile
··· 48 obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o 49 obj-$(CONFIG_X86_VISWS) += setup-irq.o 50 obj-$(CONFIG_MN10300) += setup-bus.o 51 52 # 53 # ACPI Related PCI FW Functions
··· 48 obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o 49 obj-$(CONFIG_X86_VISWS) += setup-irq.o 50 obj-$(CONFIG_MN10300) += setup-bus.o 51 + obj-$(CONFIG_MICROBLAZE) += setup-bus.o 52 53 # 54 # ACPI Related PCI FW Functions