Merge branch 'x86-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-iommu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, iommu: Update header comments with appropriate naming
ia64, iommu: Add a dummy iommu_table.h file in IA64.
x86, iommu: Fix IOMMU_INIT alignment rules
x86, doc: Adding comments about .iommu_table and its neighbors.
x86, iommu: Utilize the IOMMU_INIT macros functionality.
x86, VT-d: Make Intel VT-d IOMMU use IOMMU_INIT_* macros.
x86, GART/AMD-VI: Make AMD GART and IOMMU use IOMMU_INIT_* macros.
x86, calgary: Make Calgary IOMMU use IOMMU_INIT_* macros.
x86, xen-swiotlb: Make Xen-SWIOTLB use IOMMU_INIT_* macros.
x86, swiotlb: Make SWIOTLB use IOMMU_INIT_* macros.
x86, swiotlb: Simplify SWIOTLB pci_swiotlb_detect routine.
x86, iommu: Add proper dependency sort routine (and sanity check).
x86, iommu: Make all IOMMU's detection routines return a value.
x86, iommu: Add IOMMU_INIT macros, .iommu_table section, and iommu_table_entry structure

+345 -58
+6
arch/ia64/include/asm/iommu_table.h
··· 1 + #ifndef _ASM_IA64_IOMMU_TABLE_H 2 + #define _ASM_IA64_IOMMU_TABLE_H 3 + 4 + #define IOMMU_INIT_POST(_detect) 5 + 6 + #endif /* _ASM_IA64_IOMMU_TABLE_H */
+2 -2
arch/x86/include/asm/amd_iommu.h
··· 24 24 25 25 #ifdef CONFIG_AMD_IOMMU 26 26 27 - extern void amd_iommu_detect(void); 27 + extern int amd_iommu_detect(void); 28 28 29 29 #else 30 30 31 - static inline void amd_iommu_detect(void) { } 31 + static inline int amd_iommu_detect(void) { return -ENODEV; } 32 32 33 33 #endif 34 34
+2 -2
arch/x86/include/asm/calgary.h
··· 62 62 extern int use_calgary; 63 63 64 64 #ifdef CONFIG_CALGARY_IOMMU 65 - extern void detect_calgary(void); 65 + extern int detect_calgary(void); 66 66 #else 67 - static inline void detect_calgary(void) { return; } 67 + static inline int detect_calgary(void) { return -ENODEV; } 68 68 #endif 69 69 70 70 #endif /* _ASM_X86_CALGARY_H */
+3 -2
arch/x86/include/asm/gart.h
··· 37 37 extern void early_gart_iommu_check(void); 38 38 extern int gart_iommu_init(void); 39 39 extern void __init gart_parse_options(char *); 40 - extern void gart_iommu_hole_init(void); 40 + extern int gart_iommu_hole_init(void); 41 41 42 42 #else 43 43 #define gart_iommu_aperture 0 ··· 50 50 static inline void gart_parse_options(char *options) 51 51 { 52 52 } 53 - static inline void gart_iommu_hole_init(void) 53 + static inline int gart_iommu_hole_init(void) 54 54 { 55 + return -ENODEV; 55 56 } 56 57 #endif 57 58
+100
arch/x86/include/asm/iommu_table.h
··· 1 + #ifndef _ASM_X86_IOMMU_TABLE_H 2 + #define _ASM_X86_IOMMU_TABLE_H 3 + 4 + #include <asm/swiotlb.h> 5 + 6 + /* 7 + * History lesson: 8 + * The execution chain of IOMMUs in 2.6.36 looks as so: 9 + * 10 + * [xen-swiotlb] 11 + * | 12 + * +----[swiotlb *]--+ 13 + * / | \ 14 + * / | \ 15 + * [GART] [Calgary] [Intel VT-d] 16 + * / 17 + * / 18 + * [AMD-Vi] 19 + * 20 + * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip 21 + * over the rest of IOMMUs and unconditionally initialize the SWIOTLB. 22 + * Also it would surreptitiously initialize set the swiotlb=1 if there were 23 + * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb 24 + * flag would be turned off by all IOMMUs except the Calgary one. 25 + * 26 + * The IOMMU_INIT* macros allow a similar tree (or more complex if desired) 27 + * to be built by defining who we depend on. 28 + * 29 + * And all that needs to be done is to use one of the macros in the IOMMU 30 + * and the pci-dma.c will take care of the rest. 31 + */ 32 + 33 + struct iommu_table_entry { 34 + initcall_t detect; 35 + initcall_t depend; 36 + void (*early_init)(void); /* No memory allocate available. */ 37 + void (*late_init)(void); /* Yes, can allocate memory. */ 38 + #define IOMMU_FINISH_IF_DETECTED (1<<0) 39 + #define IOMMU_DETECTED (1<<1) 40 + int flags; 41 + }; 42 + /* 43 + * Macro fills out an entry in the .iommu_table that is equivalent 44 + * to the fields that 'struct iommu_table_entry' has. The entries 45 + * that are put in the .iommu_table section are not put in any order 46 + * hence during boot-time we will have to resort them based on 47 + * dependency. */ 48 + 49 + 50 + #define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ 51 + static const struct iommu_table_entry const \ 52 + __iommu_entry_##_detect __used \ 53 + __attribute__ ((unused, __section__(".iommu_table"), \ 54 + aligned((sizeof(void *))))) \ 55 + = {_detect, _depend, _early_init, _late_init, \ 56 + _finish ? IOMMU_FINISH_IF_DETECTED : 0} 57 + /* 58 + * The simplest IOMMU definition. Provide the detection routine 59 + * and it will be run after the SWIOTLB and the other IOMMUs 60 + * that utilize this macro. If the IOMMU is detected (ie, the 61 + * detect routine returns a positive value), the other IOMMUs 62 + * are also checked. You can use IOMMU_INIT_POST_FINISH if you prefer 63 + * to stop detecting the other IOMMUs after yours has been detected. 64 + */ 65 + #define IOMMU_INIT_POST(_detect) \ 66 + __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0) 67 + 68 + #define IOMMU_INIT_POST_FINISH(detect) \ 69 + __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1) 70 + 71 + /* 72 + * A more sophisticated version of IOMMU_INIT. This variant requires: 73 + * a). A detection routine function. 74 + * b). The name of the detection routine we depend on to get called 75 + * before us. 76 + * c). The init routine which gets called if the detection routine 77 + * returns a positive value from the pci_iommu_alloc. This means 78 + * no presence of a memory allocator. 79 + * d). Similar to the 'init', except that this gets called from pci_iommu_init 80 + * where we do have a memory allocator. 81 + * 82 + * The standard vs the _FINISH differs in that the _FINISH variant will 83 + * continue detecting other IOMMUs in the call list after the 84 + * the detection routine returns a positive number. The _FINISH will 85 + * stop the execution chain. Both will still call the 'init' and 86 + * 'late_init' functions if they are set. 87 + */ 88 + #define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \ 89 + __IOMMU_INIT(_detect, _depend, _init, _late_init, 1) 90 + 91 + #define IOMMU_INIT(_detect, _depend, _init, _late_init) \ 92 + __IOMMU_INIT(_detect, _depend, _init, _late_init, 0) 93 + 94 + void sort_iommu_table(struct iommu_table_entry *start, 95 + struct iommu_table_entry *finish); 96 + 97 + void check_iommu_entries(struct iommu_table_entry *start, 98 + struct iommu_table_entry *finish); 99 + 100 + #endif /* _ASM_X86_IOMMU_TABLE_H */
+11 -2
arch/x86/include/asm/swiotlb.h
··· 5 5 6 6 #ifdef CONFIG_SWIOTLB 7 7 extern int swiotlb; 8 - extern int __init pci_swiotlb_detect(void); 8 + extern int __init pci_swiotlb_detect_override(void); 9 + extern int __init pci_swiotlb_detect_4gb(void); 9 10 extern void __init pci_swiotlb_init(void); 11 + extern void __init pci_swiotlb_late_init(void); 10 12 #else 11 13 #define swiotlb 0 12 - static inline int pci_swiotlb_detect(void) 14 + static inline int pci_swiotlb_detect_override(void) 15 + { 16 + return 0; 17 + } 18 + static inline int pci_swiotlb_detect_4gb(void) 13 19 { 14 20 return 0; 15 21 } 16 22 static inline void pci_swiotlb_init(void) 23 + { 24 + } 25 + static inline void pci_swiotlb_late_init(void) 17 26 { 18 27 } 19 28 #endif
+1
arch/x86/kernel/Makefile
··· 45 45 obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o 46 46 obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o 47 47 obj-y += tsc.o io_delay.o rtc.o 48 + obj-y += pci-iommu_table.o 48 49 49 50 obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o 50 51 obj-y += process.o
+11 -4
arch/x86/kernel/amd_iommu_init.c
··· 31 31 #include <asm/iommu.h> 32 32 #include <asm/gart.h> 33 33 #include <asm/x86_init.h> 34 - 34 + #include <asm/iommu_table.h> 35 35 /* 36 36 * definitions for the ACPI scanning code 37 37 */ ··· 1499 1499 return 0; 1500 1500 } 1501 1501 1502 - void __init amd_iommu_detect(void) 1502 + int __init amd_iommu_detect(void) 1503 1503 { 1504 1504 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 1505 - return; 1505 + return -ENODEV; 1506 1506 1507 1507 if (amd_iommu_disabled) 1508 - return; 1508 + return -ENODEV; 1509 1509 1510 1510 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { 1511 1511 iommu_detected = 1; ··· 1514 1514 1515 1515 /* Make sure ACS will be enabled */ 1516 1516 pci_request_acs(); 1517 + return 1; 1517 1518 } 1519 + return -ENODEV; 1518 1520 } 1519 1521 1520 1522 /**************************************************************************** ··· 1547 1545 1548 1546 __setup("amd_iommu_dump", parse_amd_iommu_dump); 1549 1547 __setup("amd_iommu=", parse_amd_iommu_options); 1548 + 1549 + IOMMU_INIT_FINISH(amd_iommu_detect, 1550 + gart_iommu_hole_init, 1551 + 0, 1552 + 0);
+7 -4
arch/x86/kernel/aperture_64.c
··· 371 371 372 372 static int __initdata printed_gart_size_msg; 373 373 374 - void __init gart_iommu_hole_init(void) 374 + int __init gart_iommu_hole_init(void) 375 375 { 376 376 u32 agp_aper_base = 0, agp_aper_order = 0; 377 377 u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0; ··· 381 381 382 382 if (gart_iommu_aperture_disabled || !fix_aperture || 383 383 !early_pci_allowed()) 384 - return; 384 + return -ENODEV; 385 385 386 386 printk(KERN_INFO "Checking aperture...\n"); 387 387 ··· 463 463 unsigned long n = (32 * 1024 * 1024) << last_aper_order; 464 464 465 465 insert_aperture_resource((u32)last_aper_base, n); 466 + return 1; 466 467 } 467 - return; 468 + return 0; 468 469 } 469 470 470 471 if (!fallback_aper_force) { ··· 501 500 panic("Not enough memory for aperture"); 502 501 } 503 502 } else { 504 - return; 503 + return 0; 505 504 } 506 505 507 506 /* Fix up the north bridges */ ··· 527 526 } 528 527 529 528 set_up_gart_resume(aper_order, aper_alloc); 529 + 530 + return 1; 530 531 }
+11 -7
arch/x86/kernel/pci-calgary_64.c
··· 47 47 #include <asm/rio.h> 48 48 #include <asm/bios_ebda.h> 49 49 #include <asm/x86_init.h> 50 + #include <asm/iommu_table.h> 50 51 51 52 #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT 52 53 int use_calgary __read_mostly = 1; ··· 1365 1364 return 0; 1366 1365 } 1367 1366 1368 - void __init detect_calgary(void) 1367 + int __init detect_calgary(void) 1369 1368 { 1370 1369 int bus; 1371 1370 void *tbl; ··· 1379 1378 * another HW IOMMU already, bail out. 1380 1379 */ 1381 1380 if (no_iommu || iommu_detected) 1382 - return; 1381 + return -ENODEV; 1383 1382 1384 1383 if (!use_calgary) 1385 - return; 1384 + return -ENODEV; 1386 1385 1387 1386 if (!early_pci_allowed()) 1388 - return; 1387 + return -ENODEV; 1389 1388 1390 1389 printk(KERN_DEBUG "Calgary: detecting Calgary via BIOS EBDA area\n"); 1391 1390 ··· 1411 1410 if (!rio_table_hdr) { 1412 1411 printk(KERN_DEBUG "Calgary: Unable to locate Rio Grande table " 1413 1412 "in EBDA - bailing!\n"); 1414 - return; 1413 + return -ENODEV; 1415 1414 } 1416 1415 1417 1416 ret = build_detail_arrays(); 1418 1417 if (ret) { 1419 1418 printk(KERN_DEBUG "Calgary: build_detail_arrays ret %d\n", ret); 1420 - return; 1419 + return -ENOMEM; 1421 1420 } 1422 1421 1423 1422 specified_table_size = determine_tce_table_size((is_kdump_kernel() ? ··· 1465 1464 1466 1465 x86_init.iommu.iommu_init = calgary_iommu_init; 1467 1466 } 1468 - return; 1467 + return calgary_found; 1469 1468 1470 1469 cleanup: 1471 1470 for (--bus; bus >= 0; --bus) { ··· 1474 1473 if (info->tce_space) 1475 1474 free_tce_table(info->tce_space); 1476 1475 } 1476 + return -ENOMEM; 1477 1477 } 1478 1478 1479 1479 static int __init calgary_parse_options(char *p) ··· 1596 1594 * and before device_initcall. 1597 1595 */ 1598 1596 rootfs_initcall(calgary_fixup_tce_spaces); 1597 + 1598 + IOMMU_INIT_POST(detect_calgary);
+21 -23
arch/x86/kernel/pci-dma.c
··· 11 11 #include <asm/iommu.h> 12 12 #include <asm/gart.h> 13 13 #include <asm/calgary.h> 14 - #include <asm/amd_iommu.h> 15 14 #include <asm/x86_init.h> 16 - #include <asm/xen/swiotlb-xen.h> 15 + #include <asm/iommu_table.h> 17 16 18 17 static int forbid_dac __read_mostly; 19 18 ··· 43 44 * guests and not for driver dma translation. 44 45 */ 45 46 int iommu_pass_through __read_mostly; 47 + 48 + extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; 46 49 47 50 /* Dummy device used for NULL arguments (normally ISA). */ 48 51 struct device x86_dma_fallback_dev = { ··· 131 130 132 131 void __init pci_iommu_alloc(void) 133 132 { 133 + struct iommu_table_entry *p; 134 + 134 135 /* free the range so iommu could get some range less than 4G */ 135 136 dma32_free_bootmem(); 136 137 137 - if (pci_xen_swiotlb_detect() || pci_swiotlb_detect()) 138 - goto out; 138 + sort_iommu_table(__iommu_table, __iommu_table_end); 139 + check_iommu_entries(__iommu_table, __iommu_table_end); 139 140 140 - gart_iommu_hole_init(); 141 - 142 - detect_calgary(); 143 - 144 - detect_intel_iommu(); 145 - 146 - /* needs to be called after gart_iommu_hole_init */ 147 - amd_iommu_detect(); 148 - out: 149 - pci_xen_swiotlb_init(); 150 - 151 - pci_swiotlb_init(); 141 + for (p = __iommu_table; p < __iommu_table_end; p++) { 142 + if (p && p->detect && p->detect() > 0) { 143 + p->flags |= IOMMU_DETECTED; 144 + if (p->early_init) 145 + p->early_init(); 146 + if (p->flags & IOMMU_FINISH_IF_DETECTED) 147 + break; 148 + } 149 + } 152 150 } 153 - 154 151 void *dma_generic_alloc_coherent(struct device *dev, size_t size, 155 152 dma_addr_t *dma_addr, gfp_t flag) 156 153 { ··· 291 292 292 293 static int __init pci_iommu_init(void) 293 294 { 295 + struct iommu_table_entry *p; 294 296 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 295 297 296 298 #ifdef CONFIG_PCI ··· 299 299 #endif 300 300 x86_init.iommu.iommu_init(); 301 301 302 - if (swiotlb || xen_swiotlb) { 303 - printk(KERN_INFO "PCI-DMA: " 304 - "Using software bounce buffering for IO (SWIOTLB)\n"); 305 - swiotlb_print_info(); 306 - } else 307 - swiotlb_free(); 302 + for (p = __iommu_table; p < __iommu_table_end; p++) { 303 + if (p && (p->flags & IOMMU_DETECTED) && p->late_init) 304 + p->late_init(); 305 + } 308 306 309 307 return 0; 310 308 }
+2
arch/x86/kernel/pci-gart_64.c
··· 41 41 #include <asm/dma.h> 42 42 #include <asm/amd_nb.h> 43 43 #include <asm/x86_init.h> 44 + #include <asm/iommu_table.h> 44 45 45 46 static unsigned long iommu_bus_base; /* GART remapping area (physical) */ 46 47 static unsigned long iommu_size; /* size of remapping area bytes */ ··· 906 905 } 907 906 } 908 907 } 908 + IOMMU_INIT_POST(gart_iommu_hole_init);
+89
arch/x86/kernel/pci-iommu_table.c
··· 1 + #include <linux/dma-mapping.h> 2 + #include <asm/iommu_table.h> 3 + #include <linux/string.h> 4 + #include <linux/kallsyms.h> 5 + 6 + 7 + #define DEBUG 1 8 + 9 + static struct iommu_table_entry * __init 10 + find_dependents_of(struct iommu_table_entry *start, 11 + struct iommu_table_entry *finish, 12 + struct iommu_table_entry *q) 13 + { 14 + struct iommu_table_entry *p; 15 + 16 + if (!q) 17 + return NULL; 18 + 19 + for (p = start; p < finish; p++) 20 + if (p->detect == q->depend) 21 + return p; 22 + 23 + return NULL; 24 + } 25 + 26 + 27 + void __init sort_iommu_table(struct iommu_table_entry *start, 28 + struct iommu_table_entry *finish) { 29 + 30 + struct iommu_table_entry *p, *q, tmp; 31 + 32 + for (p = start; p < finish; p++) { 33 + again: 34 + q = find_dependents_of(start, finish, p); 35 + /* We are bit sneaky here. We use the memory address to figure 36 + * out if the node we depend on is past our point, if so, swap. 37 + */ 38 + if (q > p) { 39 + tmp = *p; 40 + memmove(p, q, sizeof(*p)); 41 + *q = tmp; 42 + goto again; 43 + } 44 + } 45 + 46 + } 47 + 48 + #ifdef DEBUG 49 + void __init check_iommu_entries(struct iommu_table_entry *start, 50 + struct iommu_table_entry *finish) 51 + { 52 + struct iommu_table_entry *p, *q, *x; 53 + char sym_p[KSYM_SYMBOL_LEN]; 54 + char sym_q[KSYM_SYMBOL_LEN]; 55 + 56 + /* Simple cyclic dependency checker. */ 57 + for (p = start; p < finish; p++) { 58 + q = find_dependents_of(start, finish, p); 59 + x = find_dependents_of(start, finish, q); 60 + if (p == x) { 61 + sprint_symbol(sym_p, (unsigned long)p->detect); 62 + sprint_symbol(sym_q, (unsigned long)q->detect); 63 + 64 + printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %s depends" \ 65 + " on %s and vice-versa. BREAKING IT.\n", 66 + sym_p, sym_q); 67 + /* Heavy handed way..*/ 68 + x->depend = 0; 69 + } 70 + } 71 + 72 + for (p = start; p < finish; p++) { 73 + q = find_dependents_of(p, finish, p); 74 + if (q && q > p) { 75 + sprint_symbol(sym_p, (unsigned long)p->detect); 76 + sprint_symbol(sym_q, (unsigned long)q->detect); 77 + 78 + printk(KERN_ERR "EXECUTION ORDER INVALID! %s "\ 79 + "should be called before %s!\n", 80 + sym_p, sym_q); 81 + } 82 + } 83 + } 84 + #else 85 + inline void check_iommu_entries(struct iommu_table_entry *start, 86 + struct iommu_table_entry *finish) 87 + { 88 + } 89 + #endif
+38 -8
arch/x86/kernel/pci-swiotlb.c
··· 10 10 #include <asm/iommu.h> 11 11 #include <asm/swiotlb.h> 12 12 #include <asm/dma.h> 13 - 13 + #include <asm/xen/swiotlb-xen.h> 14 + #include <asm/iommu_table.h> 14 15 int swiotlb __read_mostly; 15 16 16 17 static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, ··· 42 41 }; 43 42 44 43 /* 45 - * pci_swiotlb_detect - set swiotlb to 1 if necessary 44 + * pci_swiotlb_detect_override - set swiotlb to 1 if necessary 46 45 * 47 46 * This returns non-zero if we are forced to use swiotlb (by the boot 48 47 * option). 49 48 */ 50 - int __init pci_swiotlb_detect(void) 49 + int __init pci_swiotlb_detect_override(void) 51 50 { 52 51 int use_swiotlb = swiotlb | swiotlb_force; 53 52 54 - /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 55 - #ifdef CONFIG_X86_64 56 - if (!no_iommu && max_pfn > MAX_DMA32_PFN) 57 - swiotlb = 1; 58 - #endif 59 53 if (swiotlb_force) 60 54 swiotlb = 1; 61 55 62 56 return use_swiotlb; 63 57 } 58 + IOMMU_INIT_FINISH(pci_swiotlb_detect_override, 59 + pci_xen_swiotlb_detect, 60 + pci_swiotlb_init, 61 + pci_swiotlb_late_init); 62 + 63 + /* 64 + * if 4GB or more detected (and iommu=off not set) return 1 65 + * and set swiotlb to 1. 66 + */ 67 + int __init pci_swiotlb_detect_4gb(void) 68 + { 69 + /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 70 + #ifdef CONFIG_X86_64 71 + if (!no_iommu && max_pfn > MAX_DMA32_PFN) 72 + swiotlb = 1; 73 + #endif 74 + return swiotlb; 75 + } 76 + IOMMU_INIT(pci_swiotlb_detect_4gb, 77 + pci_swiotlb_detect_override, 78 + pci_swiotlb_init, 79 + pci_swiotlb_late_init); 64 80 65 81 void __init pci_swiotlb_init(void) 66 82 { 67 83 if (swiotlb) { 68 84 swiotlb_init(0); 69 85 dma_ops = &swiotlb_dma_ops; 86 + } 87 + } 88 + 89 + void __init pci_swiotlb_late_init(void) 90 + { 91 + /* An IOMMU turned us off. */ 92 + if (!swiotlb) 93 + swiotlb_free(); 94 + else { 95 + printk(KERN_INFO "PCI-DMA: " 96 + "Using software bounce buffering for IO (SWIOTLB)\n"); 97 + swiotlb_print_info(); 70 98 } 71 99 }
+28
arch/x86/kernel/vmlinux.lds.S
··· 242 242 __x86_cpu_dev_end = .; 243 243 } 244 244 245 + /* 246 + * start address and size of operations which during runtime 247 + * can be patched with virtualization friendly instructions or 248 + * baremetal native ones. Think page table operations. 249 + * Details in paravirt_types.h 250 + */ 245 251 . = ALIGN(8); 246 252 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 247 253 __parainstructions = .; ··· 255 249 __parainstructions_end = .; 256 250 } 257 251 252 + /* 253 + * struct alt_inst entries. From the header (alternative.h): 254 + * "Alternative instructions for different CPU types or capabilities" 255 + * Think locking instructions on spinlocks. 256 + */ 258 257 . = ALIGN(8); 259 258 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 260 259 __alt_instructions = .; ··· 267 256 __alt_instructions_end = .; 268 257 } 269 258 259 + /* 260 + * And here are the replacement instructions. The linker sticks 261 + * them as binary blobs. The .altinstructions has enough data to 262 + * get the address and the length of them to patch the kernel safely. 263 + */ 270 264 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 271 265 *(.altinstr_replacement) 272 266 } 273 267 268 + /* 269 + * struct iommu_table_entry entries are injected in this section. 270 + * It is an array of IOMMUs which during run time gets sorted depending 271 + * on its dependency order. After rootfs_initcall is complete 272 + * this section can be safely removed. 273 + */ 274 + .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { 275 + __iommu_table = .; 276 + *(.iommu_table) 277 + __iommu_table_end = .; 278 + } 279 + . = ALIGN(8); 274 280 /* 275 281 * .exit.text is discard at runtime, not link time, to deal with 276 282 * references from .altinstructions and .eh_frame
+5
arch/x86/xen/pci-swiotlb-xen.c
··· 5 5 6 6 #include <asm/xen/hypervisor.h> 7 7 #include <xen/xen.h> 8 + #include <asm/iommu_table.h> 8 9 9 10 int xen_swiotlb __read_mostly; 10 11 ··· 57 56 dma_ops = &xen_swiotlb_dma_ops; 58 57 } 59 58 } 59 + IOMMU_INIT_FINISH(pci_xen_swiotlb_detect, 60 + 0, 61 + pci_xen_swiotlb_init, 62 + 0);
+5 -1
drivers/pci/dmar.c
··· 36 36 #include <linux/tboot.h> 37 37 #include <linux/dmi.h> 38 38 #include <linux/slab.h> 39 + #include <asm/iommu_table.h> 39 40 40 41 #define PREFIX "DMAR: " 41 42 ··· 688 687 return 0; 689 688 } 690 689 691 - void __init detect_intel_iommu(void) 690 + int __init detect_intel_iommu(void) 692 691 { 693 692 int ret; 694 693 ··· 724 723 } 725 724 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); 726 725 dmar_tbl = NULL; 726 + 727 + return ret ? 1 : -ENODEV; 727 728 } 728 729 729 730 ··· 1458 1455 return 0; 1459 1456 return dmar->flags & 0x1; 1460 1457 } 1458 + IOMMU_INIT_POST(detect_intel_iommu);
+3 -3
include/linux/dmar.h
··· 57 57 extern int dmar_dev_scope_init(void); 58 58 59 59 /* Intel IOMMU detection */ 60 - extern void detect_intel_iommu(void); 60 + extern int detect_intel_iommu(void); 61 61 extern int enable_drhd_fault_handling(void); 62 62 63 63 extern int parse_ioapics_under_ir(void); 64 64 extern int alloc_iommu(struct dmar_drhd_unit *); 65 65 #else 66 - static inline void detect_intel_iommu(void) 66 + static inline int detect_intel_iommu(void) 67 67 { 68 - return; 68 + return -ENODEV; 69 69 } 70 70 71 71 static inline int dmar_table_init(void)