Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[IA64] more robust zx1/sx1000 machvec support

Machine vector selection has always been a bit of a hack given how
early in system boot it needs to be done. Services like ACPI namespace
are not available and there are non-trivial problems to moving them to
early boot. However, there's no reason we can't change to a different
machvec later in boot when the services we need are available. By
adding a entry point for later initialization of the swiotlb, we can add
an error path for the hpzx1 machevec initialization and fall back to the
DIG machine vector if IOMMU hardware isn't found in the system. Since
ia64 uses 4GB for zone DMA (no ISA support), it's trivial to allocate a
contiguous range from the slab for bounce buffer usage.

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by

Alex Williamson and committed by
Tony Luck
0b9afede 1619cca2

+157 -29
+11 -2
arch/ia64/hp/common/hwsw_iommu.c
··· 17 17 #include <asm/machvec.h> 18 18 19 19 /* swiotlb declarations & definitions: */ 20 - extern void swiotlb_init_with_default_size (size_t size); 20 + extern int swiotlb_late_init_with_default_size (size_t size); 21 21 extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; 22 22 extern ia64_mv_dma_free_coherent swiotlb_free_coherent; 23 23 extern ia64_mv_dma_map_single swiotlb_map_single; ··· 67 67 hwsw_init (void) 68 68 { 69 69 /* default to a smallish 2MB sw I/O TLB */ 70 - swiotlb_init_with_default_size (2 * (1<<20)); 70 + if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { 71 + #ifdef CONFIG_IA64_GENERIC 72 + /* Better to have normal DMA than panic */ 73 + printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," 74 + " reverting to hpzx1 platform vector\n", __FUNCTION__); 75 + machvec_init("hpzx1"); 76 + #else 77 + panic("Unable to initialize software I/O TLB services"); 78 + #endif 79 + } 71 80 } 72 81 73 82 void *
+33 -14
arch/ia64/hp/common/sba_iommu.c
··· 2028 2028 static int __init 2029 2029 sba_init(void) 2030 2030 { 2031 - acpi_bus_register_driver(&acpi_sba_ioc_driver); 2032 - if (!ioc_list) 2031 + if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) 2033 2032 return 0; 2033 + 2034 + acpi_bus_register_driver(&acpi_sba_ioc_driver); 2035 + if (!ioc_list) { 2036 + #ifdef CONFIG_IA64_GENERIC 2037 + extern int swiotlb_late_init_with_default_size (size_t size); 2038 + 2039 + /* 2040 + * If we didn't find something sba_iommu can claim, we 2041 + * need to setup the swiotlb and switch to the dig machvec. 2042 + */ 2043 + if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2044 + panic("Unable to find SBA IOMMU or initialize " 2045 + "software I/O TLB: Try machvec=dig boot option"); 2046 + machvec_init("dig"); 2047 + #else 2048 + panic("Unable to find SBA IOMMU: Try a generic or DIG kernel"); 2049 + #endif 2050 + return 0; 2051 + } 2052 + 2053 + #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) 2054 + /* 2055 + * hpzx1_swiotlb needs to have a fairly small swiotlb bounce 2056 + * buffer setup to support devices with smaller DMA masks than 2057 + * sba_iommu can handle. 2058 + */ 2059 + if (ia64_platform_is("hpzx1_swiotlb")) { 2060 + extern void hwsw_init(void); 2061 + 2062 + hwsw_init(); 2063 + } 2064 + #endif 2034 2065 2035 2066 #ifdef CONFIG_PCI 2036 2067 { ··· 2078 2047 } 2079 2048 2080 2049 subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ 2081 - 2082 - extern void dig_setup(char**); 2083 - /* 2084 - * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good, 2085 - * so we use the platform_setup hook to fix it up. 2086 - */ 2087 - void __init 2088 - sba_setup(char **cmdline_p) 2089 - { 2090 - MAX_DMA_ADDRESS = ~0UL; 2091 - dig_setup(cmdline_p); 2092 - } 2093 2050 2094 2051 static int __init 2095 2052 nosbagart(char *str)
+102
arch/ia64/lib/swiotlb.c
··· 49 49 */ 50 50 #define IO_TLB_SHIFT 11 51 51 52 + #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 53 + 54 + /* 55 + * Minimum IO TLB size to bother booting with. Systems with mainly 56 + * 64bit capable cards will only lightly use the swiotlb. If we can't 57 + * allocate a contiguous 1MB, we're probably in trouble anyway. 58 + */ 59 + #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 60 + 52 61 int swiotlb_force; 53 62 54 63 /* ··· 161 152 swiotlb_init (void) 162 153 { 163 154 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 155 + } 156 + 157 + /* 158 + * Systems with larger DMA zones (those that don't support ISA) can 159 + * initialize the swiotlb later using the slab allocator if needed. 160 + * This should be just like above, but with some error catching. 161 + */ 162 + int 163 + swiotlb_late_init_with_default_size (size_t default_size) 164 + { 165 + unsigned long i, req_nslabs = io_tlb_nslabs; 166 + unsigned int order; 167 + 168 + if (!io_tlb_nslabs) { 169 + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 170 + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 171 + } 172 + 173 + /* 174 + * Get IO TLB memory from the low pages 175 + */ 176 + order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 177 + io_tlb_nslabs = SLABS_PER_PAGE << order; 178 + 179 + while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 180 + io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 181 + order); 182 + if (io_tlb_start) 183 + break; 184 + order--; 185 + } 186 + 187 + if (!io_tlb_start) 188 + goto cleanup1; 189 + 190 + if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { 191 + printk(KERN_WARNING "Warning: only able to allocate %ld MB " 192 + "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 193 + io_tlb_nslabs = SLABS_PER_PAGE << order; 194 + } 195 + io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 196 + memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 197 + 198 + /* 199 + * Allocate and initialize the free list array. This array is used 200 + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 201 + * between io_tlb_start and io_tlb_end. 202 + */ 203 + io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, 204 + get_order(io_tlb_nslabs * sizeof(int))); 205 + if (!io_tlb_list) 206 + goto cleanup2; 207 + 208 + for (i = 0; i < io_tlb_nslabs; i++) 209 + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 210 + io_tlb_index = 0; 211 + 212 + io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, 213 + get_order(io_tlb_nslabs * sizeof(char *))); 214 + if (!io_tlb_orig_addr) 215 + goto cleanup3; 216 + 217 + memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); 218 + 219 + /* 220 + * Get the overflow emergency buffer 221 + */ 222 + io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, 223 + get_order(io_tlb_overflow)); 224 + if (!io_tlb_overflow_buffer) 225 + goto cleanup4; 226 + 227 + printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " 228 + "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, 229 + virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); 230 + 231 + return 0; 232 + 233 + cleanup4: 234 + free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * 235 + sizeof(char *))); 236 + io_tlb_orig_addr = NULL; 237 + cleanup3: 238 + free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 239 + sizeof(int))); 240 + io_tlb_list = NULL; 241 + io_tlb_end = NULL; 242 + cleanup2: 243 + free_pages((unsigned long)io_tlb_start, order); 244 + io_tlb_start = NULL; 245 + cleanup1: 246 + io_tlb_nslabs = req_nslabs; 247 + return -ENOMEM; 164 248 } 165 249 166 250 static inline int
+10 -11
include/asm-ia64/machvec_hpzx1.h
··· 1 1 #ifndef _ASM_IA64_MACHVEC_HPZX1_h 2 2 #define _ASM_IA64_MACHVEC_HPZX1_h 3 3 4 - extern ia64_mv_setup_t dig_setup; 5 - extern ia64_mv_setup_t sba_setup; 4 + extern ia64_mv_setup_t dig_setup; 6 5 extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 7 6 extern ia64_mv_dma_free_coherent sba_free_coherent; 8 7 extern ia64_mv_dma_map_single sba_map_single; ··· 18 19 * platform's machvec structure. When compiling a non-generic kernel, 19 20 * the macros are used directly. 20 21 */ 21 - #define platform_name "hpzx1" 22 - #define platform_setup sba_setup 23 - #define platform_dma_init machvec_noop 24 - #define platform_dma_alloc_coherent sba_alloc_coherent 25 - #define platform_dma_free_coherent sba_free_coherent 26 - #define platform_dma_map_single sba_map_single 27 - #define platform_dma_unmap_single sba_unmap_single 28 - #define platform_dma_map_sg sba_map_sg 29 - #define platform_dma_unmap_sg sba_unmap_sg 22 + #define platform_name "hpzx1" 23 + #define platform_setup dig_setup 24 + #define platform_dma_init machvec_noop 25 + #define platform_dma_alloc_coherent sba_alloc_coherent 26 + #define platform_dma_free_coherent sba_free_coherent 27 + #define platform_dma_map_single sba_map_single 28 + #define platform_dma_unmap_single sba_unmap_single 29 + #define platform_dma_map_sg sba_map_sg 30 + #define platform_dma_unmap_sg sba_unmap_sg 30 31 #define platform_dma_sync_single_for_cpu machvec_dma_sync_single 31 32 #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg 32 33 #define platform_dma_sync_single_for_device machvec_dma_sync_single
+1 -2
include/asm-ia64/machvec_hpzx1_swiotlb.h
··· 2 2 #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h 3 3 4 4 extern ia64_mv_setup_t dig_setup; 5 - extern ia64_mv_dma_init hwsw_init; 6 5 extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; 7 6 extern ia64_mv_dma_free_coherent hwsw_free_coherent; 8 7 extern ia64_mv_dma_map_single hwsw_map_single; ··· 25 26 #define platform_name "hpzx1_swiotlb" 26 27 27 28 #define platform_setup dig_setup 28 - #define platform_dma_init hwsw_init 29 + #define platform_dma_init machvec_noop 29 30 #define platform_dma_alloc_coherent hwsw_alloc_coherent 30 31 #define platform_dma_free_coherent hwsw_free_coherent 31 32 #define platform_dma_map_single hwsw_map_single