Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86-irq-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 irq updates from Thomas Gleixner:
"Surgery of the MSI interrupt handling to prepare the support of
upcoming devices which require non-PCI based MSI handling:

- Cleanup historical leftovers all over the place

- Rework the code to utilize more core functionality

- Wrap XEN PCI/MSI interrupts into an irqdomain to make irqdomain
assignment to PCI devices possible.

- Assign irqdomains to PCI devices at initialization time which
allows to utilize the full functionality of hierarchical
irqdomains.

- Remove arch_.*_msi_irq() functions from X86 and utilize the
irqdomain which is assigned to the device for interrupt management.

- Make the arch_.*_msi_irq() support conditional on a config switch
and let the last few users select it"

* tag 'x86-irq-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
PCI: MSI: Fix Kconfig dependencies for PCI_MSI_ARCH_FALLBACKS
x86/apic/msi: Unbreak DMAR and HPET MSI
iommu/amd: Remove domain search for PCI/MSI
iommu/vt-d: Remove domain search for PCI/MSI[X]
x86/irq: Make most MSI ops XEN private
x86/irq: Cleanup the arch_*_msi_irqs() leftovers
PCI/MSI: Make arch_.*_msi_irq[s] fallbacks selectable
x86/pci: Set default irq domain in pcibios_add_device()
iommm/amd: Store irq domain in struct device
iommm/vt-d: Store irq domain in struct device
x86/xen: Wrap XEN MSI management into irqdomain
irqdomain/msi: Allow to override msi_domain_alloc/free_irqs()
x86/xen: Consolidate XEN-MSI init
x86/xen: Rework MSI teardown
x86/xen: Make xen_msi_init() static and rename it to xen_hvm_msi_init()
PCI/MSI: Provide pci_dev_has_special_msi_domain() helper
PCI_vmd_Mark_VMD_irqdomain_with_DOMAIN_BUS_VMD_MSI
irqdomain/msi: Provide DOMAIN_BUS_VMD_MSI
x86/irq: Initialize PCI/MSI domain at PCI init time
x86/pci: Reducde #ifdeffery in PCI init code
...

+603 -529
+1
arch/ia64/Kconfig
··· 56 56 select NEED_DMA_MAP_STATE 57 57 select NEED_SG_DMA_LENGTH 58 58 select NUMA if !FLATMEM 59 + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI 59 60 default y 60 61 help 61 62 The Itanium Processor Family is Intel's 64-bit successor to
+1
arch/mips/Kconfig
··· 86 86 select MODULES_USE_ELF_REL if MODULES 87 87 select MODULES_USE_ELF_RELA if MODULES && 64BIT 88 88 select PERF_USE_VMALLOC 89 + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI 89 90 select RTC_LIB 90 91 select SYSCTL_EXCEPTION_TRACE 91 92 select VIRT_TO_BUS
+1
arch/powerpc/Kconfig
··· 245 245 select OLD_SIGACTION if PPC32 246 246 select OLD_SIGSUSPEND 247 247 select PCI_DOMAINS if PCI 248 + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI 248 249 select PCI_SYSCALL if PCI 249 250 select PPC_DAWR if PPC64 250 251 select RTC_LIB
+1
arch/s390/Kconfig
··· 185 185 select OLD_SIGSUSPEND3 186 186 select PCI_DOMAINS if PCI 187 187 select PCI_MSI if PCI 188 + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI 188 189 select SPARSE_IRQ 189 190 select SYSCTL_EXCEPTION_TRACE 190 191 select THREAD_INFO_IN_TASK
+1
arch/sparc/Kconfig
··· 43 43 select GENERIC_STRNLEN_USER 44 44 select MODULES_USE_ELF_RELA 45 45 select PCI_SYSCALL if PCI 46 + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI 46 47 select ODD_RT_SIGACTION 47 48 select OLD_SIGSUSPEND 48 49 select CPU_NO_EFFICIENT_FFS
+8
arch/x86/include/asm/apic.h
··· 519 519 static inline void apic_smt_update(void) { } 520 520 #endif 521 521 522 + struct msi_msg; 523 + 524 + #ifdef CONFIG_PCI_MSI 525 + void x86_vector_msi_compose_msg(struct irq_data *data, struct msi_msg *msg); 526 + #else 527 + # define x86_vector_msi_compose_msg NULL 528 + #endif 529 + 522 530 extern void ioapic_zap_locks(void); 523 531 524 532 #endif /* _ASM_X86_APIC_H */
+42 -47
arch/x86/include/asm/hw_irq.h
··· 36 36 enum irq_alloc_type { 37 37 X86_IRQ_ALLOC_TYPE_IOAPIC = 1, 38 38 X86_IRQ_ALLOC_TYPE_HPET, 39 - X86_IRQ_ALLOC_TYPE_MSI, 40 - X86_IRQ_ALLOC_TYPE_MSIX, 39 + X86_IRQ_ALLOC_TYPE_PCI_MSI, 40 + X86_IRQ_ALLOC_TYPE_PCI_MSIX, 41 41 X86_IRQ_ALLOC_TYPE_DMAR, 42 42 X86_IRQ_ALLOC_TYPE_UV, 43 + X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT, 44 + X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT, 43 45 }; 44 46 47 + struct ioapic_alloc_info { 48 + int pin; 49 + int node; 50 + u32 trigger : 1; 51 + u32 polarity : 1; 52 + u32 valid : 1; 53 + struct IO_APIC_route_entry *entry; 54 + }; 55 + 56 + struct uv_alloc_info { 57 + int limit; 58 + int blade; 59 + unsigned long offset; 60 + char *name; 61 + 62 + }; 63 + 64 + /** 65 + * irq_alloc_info - X86 specific interrupt allocation info 66 + * @type: X86 specific allocation type 67 + * @flags: Flags for allocation tweaks 68 + * @devid: Device ID for allocations 69 + * @hwirq: Associated hw interrupt number in the domain 70 + * @mask: CPU mask for vector allocation 71 + * @desc: Pointer to msi descriptor 72 + * @data: Allocation specific data 73 + * 74 + * @ioapic: IOAPIC specific allocation data 75 + * @uv: UV specific allocation data 76 + */ 45 77 struct irq_alloc_info { 46 78 enum irq_alloc_type type; 47 79 u32 flags; 48 - const struct cpumask *mask; /* CPU mask for vector allocation */ 80 + u32 devid; 81 + irq_hw_number_t hwirq; 82 + const struct cpumask *mask; 83 + struct msi_desc *desc; 84 + void *data; 85 + 49 86 union { 50 - int unused; 51 - #ifdef CONFIG_HPET_TIMER 52 - struct { 53 - int hpet_id; 54 - int hpet_index; 55 - void *hpet_data; 56 - }; 57 - #endif 58 - #ifdef CONFIG_PCI_MSI 59 - struct { 60 - struct pci_dev *msi_dev; 61 - irq_hw_number_t msi_hwirq; 62 - }; 63 - #endif 64 - #ifdef CONFIG_X86_IO_APIC 65 - struct { 66 - int ioapic_id; 67 - int ioapic_pin; 68 - int ioapic_node; 69 - u32 ioapic_trigger : 1; 70 - u32 ioapic_polarity : 1; 71 - u32 ioapic_valid : 1; 72 - struct IO_APIC_route_entry *ioapic_entry; 73 - }; 74 - #endif 75 - #ifdef CONFIG_DMAR_TABLE 76 - struct { 77 - int dmar_id; 78 - void *dmar_data; 79 - }; 80 - #endif 81 - #ifdef CONFIG_X86_UV 82 - struct { 83 - int uv_limit; 84 - int uv_blade; 85 - unsigned long uv_offset; 86 - char *uv_name; 87 - }; 88 - #endif 89 - #if IS_ENABLED(CONFIG_VMD) 90 - struct { 91 - struct msi_desc *desc; 92 - }; 93 - #endif 87 + struct ioapic_alloc_info ioapic; 88 + struct uv_alloc_info uv; 94 89 }; 95 90 }; 96 91
-8
arch/x86/include/asm/irq_remapping.h
··· 45 45 extern void panic_if_irq_remap(const char *msg); 46 46 47 47 extern struct irq_domain * 48 - irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info); 49 - extern struct irq_domain * 50 48 irq_remapping_get_irq_domain(struct irq_alloc_info *info); 51 49 52 50 /* Create PCI MSI/MSIx irqdomain, use @parent as the parent irqdomain. */ ··· 69 71 70 72 static inline void panic_if_irq_remap(const char *msg) 71 73 { 72 - } 73 - 74 - static inline struct irq_domain * 75 - irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info) 76 - { 77 - return NULL; 78 74 } 79 75 80 76 static inline struct irq_domain *
+6 -2
arch/x86/include/asm/irqdomain.h
··· 51 51 #endif /* CONFIG_X86_IO_APIC */ 52 52 53 53 #ifdef CONFIG_PCI_MSI 54 - extern void arch_init_msi_domain(struct irq_domain *domain); 54 + void x86_create_pci_msi_domain(void); 55 + struct irq_domain *native_create_pci_msi_domain(void); 56 + extern struct irq_domain *x86_pci_msi_default_domain; 55 57 #else 56 - static inline void arch_init_msi_domain(struct irq_domain *domain) { } 58 + static inline void x86_create_pci_msi_domain(void) { } 59 + #define native_create_pci_msi_domain NULL 60 + #define x86_pci_msi_default_domain NULL 57 61 #endif 58 62 59 63 #endif
-10
arch/x86/include/asm/mpspec.h
··· 67 67 #ifdef CONFIG_X86_MPPARSE 68 68 extern void e820__memblock_alloc_reserved_mpc_new(void); 69 69 extern int enable_update_mptable; 70 - extern int default_mpc_apic_id(struct mpc_cpu *m); 71 - extern void default_smp_read_mpc_oem(struct mpc_table *mpc); 72 - # ifdef CONFIG_X86_IO_APIC 73 - extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str); 74 - # else 75 - # define default_mpc_oem_bus_info NULL 76 - # endif 77 70 extern void default_find_smp_config(void); 78 71 extern void default_get_smp_config(unsigned int early); 79 72 #else 80 73 static inline void e820__memblock_alloc_reserved_mpc_new(void) { } 81 74 #define enable_update_mptable 0 82 - #define default_mpc_apic_id NULL 83 - #define default_smp_read_mpc_oem NULL 84 - #define default_mpc_oem_bus_info NULL 85 75 #define default_find_smp_config x86_init_noop 86 76 #define default_get_smp_config x86_init_uint_noop 87 77 #endif
-2
arch/x86/include/asm/msi.h
··· 9 9 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, 10 10 msi_alloc_info_t *arg); 11 11 12 - void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc); 13 - 14 12 #endif /* _ASM_X86_MSI_H */
-11
arch/x86/include/asm/pci.h
··· 105 105 106 106 extern void pci_iommu_alloc(void); 107 107 108 - #ifdef CONFIG_PCI_MSI 109 - /* implemented in arch/x86/kernel/apic/io_apic. */ 110 - struct msi_desc; 111 - int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 112 - void native_teardown_msi_irq(unsigned int irq); 113 - void native_restore_msi_irqs(struct pci_dev *dev); 114 - #else 115 - #define native_setup_msi_irqs NULL 116 - #define native_teardown_msi_irq NULL 117 - #endif 118 - 119 108 /* generic pci stuff */ 120 109 #include <asm-generic/pci.h> 121 110
+11
arch/x86/include/asm/pci_x86.h
··· 114 114 extern bool port_cf9_safe; 115 115 116 116 /* arch_initcall level */ 117 + #ifdef CONFIG_PCI_DIRECT 117 118 extern int pci_direct_probe(void); 118 119 extern void pci_direct_init(int type); 120 + #else 121 + static inline int pci_direct_probe(void) { return -1; } 122 + static inline void pci_direct_init(int type) { } 123 + #endif 124 + 125 + #ifdef CONFIG_PCI_BIOS 119 126 extern void pci_pcbios_init(void); 127 + #else 128 + static inline void pci_pcbios_init(void) { } 129 + #endif 130 + 120 131 extern void __init dmi_check_pciprobe(void); 121 132 extern void __init dmi_check_skip_isa_align(void); 122 133
+3 -13
arch/x86/include/asm/x86_init.h
··· 8 8 struct mpc_cpu; 9 9 struct mpc_table; 10 10 struct cpuinfo_x86; 11 + struct irq_domain; 11 12 12 13 /** 13 14 * struct x86_init_mpparse - platform specific mpparse ops 14 - * @mpc_record: platform specific mpc record accounting 15 15 * @setup_ioapic_ids: platform specific ioapic id override 16 - * @mpc_apic_id: platform specific mpc apic id assignment 17 - * @smp_read_mpc_oem: platform specific oem mpc table setup 18 - * @mpc_oem_pci_bus: platform specific pci bus setup (default NULL) 19 - * @mpc_oem_bus_info: platform specific mpc bus info 20 16 * @find_smp_config: find the smp configuration 21 17 * @get_smp_config: get the smp configuration 22 18 */ 23 19 struct x86_init_mpparse { 24 - void (*mpc_record)(unsigned int mode); 25 20 void (*setup_ioapic_ids)(void); 26 - int (*mpc_apic_id)(struct mpc_cpu *m); 27 - void (*smp_read_mpc_oem)(struct mpc_table *mpc); 28 - void (*mpc_oem_pci_bus)(struct mpc_bus *m); 29 - void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); 30 21 void (*find_smp_config)(void); 31 22 void (*get_smp_config)(unsigned int early); 32 23 }; ··· 43 52 * @intr_init: interrupt init code 44 53 * @intr_mode_select: interrupt delivery mode selection 45 54 * @intr_mode_init: interrupt delivery mode setup 55 + * @create_pci_msi_domain: Create the PCI/MSI interrupt domain 46 56 */ 47 57 struct x86_init_irqs { 48 58 void (*pre_vector_init)(void); 49 59 void (*intr_init)(void); 50 60 void (*intr_mode_select)(void); 51 61 void (*intr_mode_init)(void); 62 + struct irq_domain *(*create_pci_msi_domain)(void); 52 63 }; 53 64 54 65 /** ··· 276 283 struct pci_dev; 277 284 278 285 struct x86_msi_ops { 279 - int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); 280 - void (*teardown_msi_irq)(unsigned int irq); 281 - void (*teardown_msi_irqs)(struct pci_dev *dev); 282 286 void (*restore_msi_irqs)(struct pci_dev *dev); 283 287 }; 284 288
+3
arch/x86/kernel/apic/apic.c
··· 1429 1429 break; 1430 1430 } 1431 1431 1432 + if (x86_platform.apic_post_init) 1433 + x86_platform.apic_post_init(); 1434 + 1432 1435 apic_bsp_setup(upmode); 1433 1436 } 1434 1437
+37 -37
arch/x86/kernel/apic/io_apic.c
··· 860 860 { 861 861 init_irq_alloc_info(info, NULL); 862 862 info->type = X86_IRQ_ALLOC_TYPE_IOAPIC; 863 - info->ioapic_node = node; 864 - info->ioapic_trigger = trigger; 865 - info->ioapic_polarity = polarity; 866 - info->ioapic_valid = 1; 863 + info->ioapic.node = node; 864 + info->ioapic.trigger = trigger; 865 + info->ioapic.polarity = polarity; 866 + info->ioapic.valid = 1; 867 867 } 868 868 869 869 #ifndef CONFIG_ACPI ··· 878 878 879 879 copy_irq_alloc_info(dst, src); 880 880 dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC; 881 - dst->ioapic_id = mpc_ioapic_id(ioapic_idx); 882 - dst->ioapic_pin = pin; 883 - dst->ioapic_valid = 1; 884 - if (src && src->ioapic_valid) { 885 - dst->ioapic_node = src->ioapic_node; 886 - dst->ioapic_trigger = src->ioapic_trigger; 887 - dst->ioapic_polarity = src->ioapic_polarity; 881 + dst->devid = mpc_ioapic_id(ioapic_idx); 882 + dst->ioapic.pin = pin; 883 + dst->ioapic.valid = 1; 884 + if (src && src->ioapic.valid) { 885 + dst->ioapic.node = src->ioapic.node; 886 + dst->ioapic.trigger = src->ioapic.trigger; 887 + dst->ioapic.polarity = src->ioapic.polarity; 888 888 } else { 889 - dst->ioapic_node = NUMA_NO_NODE; 889 + dst->ioapic.node = NUMA_NO_NODE; 890 890 if (acpi_get_override_irq(gsi, &trigger, &polarity) >= 0) { 891 - dst->ioapic_trigger = trigger; 892 - dst->ioapic_polarity = polarity; 891 + dst->ioapic.trigger = trigger; 892 + dst->ioapic.polarity = polarity; 893 893 } else { 894 894 /* 895 895 * PCI interrupts are always active low level 896 896 * triggered. 897 897 */ 898 - dst->ioapic_trigger = IOAPIC_LEVEL; 899 - dst->ioapic_polarity = IOAPIC_POL_LOW; 898 + dst->ioapic.trigger = IOAPIC_LEVEL; 899 + dst->ioapic.polarity = IOAPIC_POL_LOW; 900 900 } 901 901 } 902 902 } 903 903 904 904 static int ioapic_alloc_attr_node(struct irq_alloc_info *info) 905 905 { 906 - return (info && info->ioapic_valid) ? info->ioapic_node : NUMA_NO_NODE; 906 + return (info && info->ioapic.valid) ? info->ioapic.node : NUMA_NO_NODE; 907 907 } 908 908 909 909 static void mp_register_handler(unsigned int irq, unsigned long trigger) ··· 933 933 * pin with real trigger and polarity attributes. 934 934 */ 935 935 if (irq < nr_legacy_irqs() && data->count == 1) { 936 - if (info->ioapic_trigger != data->trigger) 937 - mp_register_handler(irq, info->ioapic_trigger); 938 - data->entry.trigger = data->trigger = info->ioapic_trigger; 939 - data->entry.polarity = data->polarity = info->ioapic_polarity; 936 + if (info->ioapic.trigger != data->trigger) 937 + mp_register_handler(irq, info->ioapic.trigger); 938 + data->entry.trigger = data->trigger = info->ioapic.trigger; 939 + data->entry.polarity = data->polarity = info->ioapic.polarity; 940 940 } 941 941 942 - return data->trigger == info->ioapic_trigger && 943 - data->polarity == info->ioapic_polarity; 942 + return data->trigger == info->ioapic.trigger && 943 + data->polarity == info->ioapic.polarity; 944 944 } 945 945 946 946 static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi, ··· 1002 1002 if (!mp_check_pin_attr(irq, info)) 1003 1003 return -EBUSY; 1004 1004 if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic, 1005 - info->ioapic_pin)) 1005 + info->ioapic.pin)) 1006 1006 return -ENOMEM; 1007 1007 } else { 1008 1008 info->flags |= X86_IRQ_ALLOC_LEGACY; ··· 2092 2092 struct irq_alloc_info info; 2093 2093 2094 2094 ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0); 2095 - info.ioapic_id = mpc_ioapic_id(ioapic); 2096 - info.ioapic_pin = pin; 2095 + info.devid = mpc_ioapic_id(ioapic); 2096 + info.ioapic.pin = pin; 2097 2097 mutex_lock(&ioapic_mutex); 2098 2098 irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info); 2099 2099 mutex_unlock(&ioapic_mutex); ··· 2297 2297 return 0; 2298 2298 2299 2299 init_irq_alloc_info(&info, NULL); 2300 - info.type = X86_IRQ_ALLOC_TYPE_IOAPIC; 2301 - info.ioapic_id = mpc_ioapic_id(ioapic); 2302 - parent = irq_remapping_get_ir_irq_domain(&info); 2300 + info.type = X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT; 2301 + info.devid = mpc_ioapic_id(ioapic); 2302 + parent = irq_remapping_get_irq_domain(&info); 2303 2303 if (!parent) 2304 2304 parent = x86_vector_domain; 2305 2305 else ··· 2933 2933 static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data, 2934 2934 struct irq_alloc_info *info) 2935 2935 { 2936 - if (info && info->ioapic_valid) { 2937 - data->trigger = info->ioapic_trigger; 2938 - data->polarity = info->ioapic_polarity; 2936 + if (info && info->ioapic.valid) { 2937 + data->trigger = info->ioapic.trigger; 2938 + data->polarity = info->ioapic.polarity; 2939 2939 } else if (acpi_get_override_irq(gsi, &data->trigger, 2940 2940 &data->polarity) < 0) { 2941 2941 /* PCI interrupts are always active low level triggered. */ ··· 2981 2981 return -EINVAL; 2982 2982 2983 2983 ioapic = mp_irqdomain_ioapic_idx(domain); 2984 - pin = info->ioapic_pin; 2984 + pin = info->ioapic.pin; 2985 2985 if (irq_find_mapping(domain, (irq_hw_number_t)pin) > 0) 2986 2986 return -EEXIST; 2987 2987 ··· 2989 2989 if (!data) 2990 2990 return -ENOMEM; 2991 2991 2992 - info->ioapic_entry = &data->entry; 2992 + info->ioapic.entry = &data->entry; 2993 2993 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info); 2994 2994 if (ret < 0) { 2995 2995 kfree(data); ··· 2997 2997 } 2998 2998 2999 2999 INIT_LIST_HEAD(&data->irq_2_pin); 3000 - irq_data->hwirq = info->ioapic_pin; 3000 + irq_data->hwirq = info->ioapic.pin; 3001 3001 irq_data->chip = (domain->parent == x86_vector_domain) ? 3002 3002 &ioapic_chip : &ioapic_ir_chip; 3003 3003 irq_data->chip_data = data; ··· 3007 3007 add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); 3008 3008 3009 3009 local_irq_save(flags); 3010 - if (info->ioapic_entry) 3011 - mp_setup_entry(cfg, data, info->ioapic_entry); 3010 + if (info->ioapic.entry) 3011 + mp_setup_entry(cfg, data, info->ioapic.entry); 3012 3012 mp_register_handler(virq, data->trigger); 3013 3013 if (virq < nr_legacy_irqs()) 3014 3014 legacy_pic->mask(virq);
+36 -82
arch/x86/kernel/apic/msi.c
··· 21 21 #include <asm/apic.h> 22 22 #include <asm/irq_remapping.h> 23 23 24 - static struct irq_domain *msi_default_domain; 24 + struct irq_domain *x86_pci_msi_default_domain __ro_after_init; 25 25 26 26 static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg) 27 27 { ··· 45 45 MSI_DATA_VECTOR(cfg->vector); 46 46 } 47 47 48 - static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) 48 + void x86_vector_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) 49 49 { 50 50 __irq_msi_compose_msg(irqd_cfg(data), msg); 51 51 } ··· 177 177 .irq_mask = pci_msi_mask_irq, 178 178 .irq_ack = irq_chip_ack_parent, 179 179 .irq_retrigger = irq_chip_retrigger_hierarchy, 180 - .irq_compose_msi_msg = irq_msi_compose_msg, 181 180 .irq_set_affinity = msi_set_affinity, 182 181 .flags = IRQCHIP_SKIP_SET_WAKE, 183 182 }; 184 - 185 - int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 186 - { 187 - struct irq_domain *domain; 188 - struct irq_alloc_info info; 189 - 190 - init_irq_alloc_info(&info, NULL); 191 - info.type = X86_IRQ_ALLOC_TYPE_MSI; 192 - info.msi_dev = dev; 193 - 194 - domain = irq_remapping_get_irq_domain(&info); 195 - if (domain == NULL) 196 - domain = msi_default_domain; 197 - if (domain == NULL) 198 - return -ENOSYS; 199 - 200 - return msi_domain_alloc_irqs(domain, &dev->dev, nvec); 201 - } 202 - 203 - void native_teardown_msi_irq(unsigned int irq) 204 - { 205 - irq_domain_free_irqs(irq, 1); 206 - } 207 - 208 - static irq_hw_number_t pci_msi_get_hwirq(struct msi_domain_info *info, 209 - msi_alloc_info_t *arg) 210 - { 211 - return arg->msi_hwirq; 212 - } 213 183 214 184 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, 215 185 msi_alloc_info_t *arg) ··· 188 218 struct msi_desc *desc = first_pci_msi_entry(pdev); 189 219 190 220 init_irq_alloc_info(arg, NULL); 191 - arg->msi_dev = pdev; 192 221 if (desc->msi_attrib.is_msix) { 193 - arg->type = X86_IRQ_ALLOC_TYPE_MSIX; 222 + arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX; 194 223 } else { 195 - arg->type = X86_IRQ_ALLOC_TYPE_MSI; 224 + arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI; 196 225 arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; 197 226 } 198 227 ··· 199 230 } 200 231 EXPORT_SYMBOL_GPL(pci_msi_prepare); 201 232 202 - void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) 203 - { 204 - arg->msi_hwirq = pci_msi_domain_calc_hwirq(arg->msi_dev, desc); 205 - } 206 - EXPORT_SYMBOL_GPL(pci_msi_set_desc); 207 - 208 233 static struct msi_domain_ops pci_msi_domain_ops = { 209 - .get_hwirq = pci_msi_get_hwirq, 210 234 .msi_prepare = pci_msi_prepare, 211 - .set_desc = pci_msi_set_desc, 212 235 }; 213 236 214 237 static struct msi_domain_info pci_msi_domain_info = { ··· 212 251 .handler_name = "edge", 213 252 }; 214 253 215 - void __init arch_init_msi_domain(struct irq_domain *parent) 254 + struct irq_domain * __init native_create_pci_msi_domain(void) 216 255 { 217 256 struct fwnode_handle *fn; 257 + struct irq_domain *d; 218 258 219 259 if (disable_apic) 220 - return; 260 + return NULL; 221 261 222 262 fn = irq_domain_alloc_named_fwnode("PCI-MSI"); 223 - if (fn) { 224 - msi_default_domain = 225 - pci_msi_create_irq_domain(fn, &pci_msi_domain_info, 226 - parent); 227 - } 228 - if (!msi_default_domain) { 263 + if (!fn) 264 + return NULL; 265 + 266 + d = pci_msi_create_irq_domain(fn, &pci_msi_domain_info, 267 + x86_vector_domain); 268 + if (!d) { 229 269 irq_domain_free_fwnode(fn); 230 - pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); 270 + pr_warn("Failed to initialize PCI-MSI irqdomain.\n"); 231 271 } else { 232 - msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; 272 + d->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; 233 273 } 274 + return d; 275 + } 276 + 277 + void __init x86_create_pci_msi_domain(void) 278 + { 279 + x86_pci_msi_default_domain = x86_init.irqs.create_pci_msi_domain(); 234 280 } 235 281 236 282 #ifdef CONFIG_IRQ_REMAP ··· 247 279 .irq_mask = pci_msi_mask_irq, 248 280 .irq_ack = irq_chip_ack_parent, 249 281 .irq_retrigger = irq_chip_retrigger_hierarchy, 250 - .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent, 251 282 .flags = IRQCHIP_SKIP_SET_WAKE, 252 283 }; 253 284 ··· 288 321 .irq_ack = irq_chip_ack_parent, 289 322 .irq_set_affinity = msi_domain_set_affinity, 290 323 .irq_retrigger = irq_chip_retrigger_hierarchy, 291 - .irq_compose_msi_msg = irq_msi_compose_msg, 292 324 .irq_write_msi_msg = dmar_msi_write_msg, 293 325 .flags = IRQCHIP_SKIP_SET_WAKE, 294 326 }; 295 - 296 - static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info, 297 - msi_alloc_info_t *arg) 298 - { 299 - return arg->dmar_id; 300 - } 301 327 302 328 static int dmar_msi_init(struct irq_domain *domain, 303 329 struct msi_domain_info *info, unsigned int virq, 304 330 irq_hw_number_t hwirq, msi_alloc_info_t *arg) 305 331 { 306 - irq_domain_set_info(domain, virq, arg->dmar_id, info->chip, NULL, 307 - handle_edge_irq, arg->dmar_data, "edge"); 332 + irq_domain_set_info(domain, virq, arg->devid, info->chip, NULL, 333 + handle_edge_irq, arg->data, "edge"); 308 334 309 335 return 0; 310 336 } 311 337 312 338 static struct msi_domain_ops dmar_msi_domain_ops = { 313 - .get_hwirq = dmar_msi_get_hwirq, 314 339 .msi_init = dmar_msi_init, 315 340 }; 316 341 317 342 static struct msi_domain_info dmar_msi_domain_info = { 318 343 .ops = &dmar_msi_domain_ops, 319 344 .chip = &dmar_msi_controller, 345 + .flags = MSI_FLAG_USE_DEF_DOM_OPS, 320 346 }; 321 347 322 348 static struct irq_domain *dmar_get_irq_domain(void) ··· 344 384 345 385 init_irq_alloc_info(&info, NULL); 346 386 info.type = X86_IRQ_ALLOC_TYPE_DMAR; 347 - info.dmar_id = id; 348 - info.dmar_data = arg; 387 + info.devid = id; 388 + info.hwirq = id; 389 + info.data = arg; 349 390 350 391 return irq_domain_alloc_irqs(domain, 1, node, &info); 351 392 } ··· 380 419 .irq_ack = irq_chip_ack_parent, 381 420 .irq_set_affinity = msi_domain_set_affinity, 382 421 .irq_retrigger = irq_chip_retrigger_hierarchy, 383 - .irq_compose_msi_msg = irq_msi_compose_msg, 384 422 .irq_write_msi_msg = hpet_msi_write_msg, 385 423 .flags = IRQCHIP_SKIP_SET_WAKE, 386 424 }; 387 - 388 - static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info, 389 - msi_alloc_info_t *arg) 390 - { 391 - return arg->hpet_index; 392 - } 393 425 394 426 static int hpet_msi_init(struct irq_domain *domain, 395 427 struct msi_domain_info *info, unsigned int virq, 396 428 irq_hw_number_t hwirq, msi_alloc_info_t *arg) 397 429 { 398 430 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); 399 - irq_domain_set_info(domain, virq, arg->hpet_index, info->chip, NULL, 400 - handle_edge_irq, arg->hpet_data, "edge"); 431 + irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL, 432 + handle_edge_irq, arg->data, "edge"); 401 433 402 434 return 0; 403 435 } ··· 402 448 } 403 449 404 450 static struct msi_domain_ops hpet_msi_domain_ops = { 405 - .get_hwirq = hpet_msi_get_hwirq, 406 451 .msi_init = hpet_msi_init, 407 452 .msi_free = hpet_msi_free, 408 453 }; ··· 409 456 static struct msi_domain_info hpet_msi_domain_info = { 410 457 .ops = &hpet_msi_domain_ops, 411 458 .chip = &hpet_msi_controller, 459 + .flags = MSI_FLAG_USE_DEF_DOM_OPS, 412 460 }; 413 461 414 462 struct irq_domain *hpet_create_irq_domain(int hpet_id) ··· 430 476 domain_info->data = (void *)(long)hpet_id; 431 477 432 478 init_irq_alloc_info(&info, NULL); 433 - info.type = X86_IRQ_ALLOC_TYPE_HPET; 434 - info.hpet_id = hpet_id; 435 - parent = irq_remapping_get_ir_irq_domain(&info); 479 + info.type = X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT; 480 + info.devid = hpet_id; 481 + parent = irq_remapping_get_irq_domain(&info); 436 482 if (parent == NULL) 437 483 parent = x86_vector_domain; 438 484 else ··· 460 506 461 507 init_irq_alloc_info(&info, NULL); 462 508 info.type = X86_IRQ_ALLOC_TYPE_HPET; 463 - info.hpet_data = hc; 464 - info.hpet_id = hpet_dev_id(domain); 465 - info.hpet_index = dev_num; 509 + info.data = hc; 510 + info.devid = hpet_dev_id(domain); 511 + info.hwirq = dev_num; 466 512 467 513 return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); 468 514 }
-3
arch/x86/kernel/apic/probe_32.c
··· 170 170 171 171 if (apic->setup_apic_routing) 172 172 apic->setup_apic_routing(); 173 - 174 - if (x86_platform.apic_post_init) 175 - x86_platform.apic_post_init(); 176 173 } 177 174 178 175 void __init generic_apic_probe(void)
-3
arch/x86/kernel/apic/probe_64.c
··· 32 32 break; 33 33 } 34 34 } 35 - 36 - if (x86_platform.apic_post_init) 37 - x86_platform.apic_post_init(); 38 35 } 39 36 40 37 int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+1 -2
arch/x86/kernel/apic/vector.c
··· 714 714 BUG_ON(x86_vector_domain == NULL); 715 715 irq_set_default_host(x86_vector_domain); 716 716 717 - arch_init_msi_domain(x86_vector_domain); 718 - 719 717 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); 720 718 721 719 /* ··· 822 824 .name = "APIC", 823 825 .irq_ack = apic_ack_edge, 824 826 .irq_set_affinity = apic_set_affinity, 827 + .irq_compose_msi_msg = x86_vector_msi_compose_msg, 825 828 .irq_retrigger = apic_retrigger_irq, 826 829 }; 827 830
+2 -2
arch/x86/kernel/devicetree.c
··· 229 229 230 230 it = &of_ioapic_type[type_index]; 231 231 ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity); 232 - tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain)); 233 - tmp.ioapic_pin = fwspec->param[0]; 232 + tmp.devid = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain)); 233 + tmp.ioapic.pin = fwspec->param[0]; 234 234 235 235 return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp); 236 236 }
+4 -22
arch/x86/kernel/mpparse.c
··· 45 45 return sum & 0xFF; 46 46 } 47 47 48 - int __init default_mpc_apic_id(struct mpc_cpu *m) 49 - { 50 - return m->apicid; 51 - } 52 - 53 48 static void __init MP_processor_info(struct mpc_cpu *m) 54 49 { 55 50 int apicid; ··· 55 60 return; 56 61 } 57 62 58 - apicid = x86_init.mpparse.mpc_apic_id(m); 63 + apicid = m->apicid; 59 64 60 65 if (m->cpuflag & CPU_BOOTPROCESSOR) { 61 66 bootup_cpu = " (Bootup-CPU)"; ··· 67 72 } 68 73 69 74 #ifdef CONFIG_X86_IO_APIC 70 - void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str) 75 + static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str) 71 76 { 72 77 memcpy(str, m->bustype, 6); 73 78 str[6] = 0; ··· 78 83 { 79 84 char str[7]; 80 85 81 - x86_init.mpparse.mpc_oem_bus_info(m, str); 86 + mpc_oem_bus_info(m, str); 82 87 83 88 #if MAX_MP_BUSSES < 256 84 89 if (m->busid >= MAX_MP_BUSSES) { ··· 94 99 mp_bus_id_to_type[m->busid] = MP_BUS_ISA; 95 100 #endif 96 101 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { 97 - if (x86_init.mpparse.mpc_oem_pci_bus) 98 - x86_init.mpparse.mpc_oem_pci_bus(m); 99 - 100 102 clear_bit(m->busid, mp_bus_not_pci); 101 103 #ifdef CONFIG_EISA 102 104 mp_bus_id_to_type[m->busid] = MP_BUS_PCI; ··· 189 197 1, mpc, mpc->length, 1); 190 198 } 191 199 192 - void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } 193 - 194 200 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) 195 201 { 196 202 char str[16]; ··· 207 217 if (early) 208 218 return 1; 209 219 210 - if (mpc->oemptr) 211 - x86_init.mpparse.smp_read_mpc_oem(mpc); 212 - 213 - /* 214 - * Now process the configuration blocks. 215 - */ 216 - x86_init.mpparse.mpc_record(0); 217 - 220 + /* Now process the configuration blocks. */ 218 221 while (count < mpc->length) { 219 222 switch (*mpt) { 220 223 case MP_PROCESSOR: ··· 238 255 count = mpc->length; 239 256 break; 240 257 } 241 - x86_init.mpparse.mpc_record(1); 242 258 } 243 259 244 260 if (!num_processors)
+3 -23
arch/x86/kernel/x86_init.c
··· 24 24 #include <asm/tsc.h> 25 25 #include <asm/iommu.h> 26 26 #include <asm/mach_traps.h> 27 + #include <asm/irqdomain.h> 27 28 28 29 void x86_init_noop(void) { } 29 30 void __init x86_init_uint_noop(unsigned int unused) { } ··· 68 67 }, 69 68 70 69 .mpparse = { 71 - .mpc_record = x86_init_uint_noop, 72 70 .setup_ioapic_ids = x86_init_noop, 73 - .mpc_apic_id = default_mpc_apic_id, 74 - .smp_read_mpc_oem = default_smp_read_mpc_oem, 75 - .mpc_oem_bus_info = default_mpc_oem_bus_info, 76 71 .find_smp_config = default_find_smp_config, 77 72 .get_smp_config = default_get_smp_config, 78 73 }, ··· 77 80 .pre_vector_init = init_ISA_irqs, 78 81 .intr_init = native_init_IRQ, 79 82 .intr_mode_select = apic_intr_mode_select, 80 - .intr_mode_init = apic_intr_mode_init 83 + .intr_mode_init = apic_intr_mode_init, 84 + .create_pci_msi_domain = native_create_pci_msi_domain, 81 85 }, 82 86 83 87 .oem = { ··· 146 148 147 149 #if defined(CONFIG_PCI_MSI) 148 150 struct x86_msi_ops x86_msi __ro_after_init = { 149 - .setup_msi_irqs = native_setup_msi_irqs, 150 - .teardown_msi_irq = native_teardown_msi_irq, 151 - .teardown_msi_irqs = default_teardown_msi_irqs, 152 151 .restore_msi_irqs = default_restore_msi_irqs, 153 152 }; 154 153 155 154 /* MSI arch specific hooks */ 156 - int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 157 - { 158 - return x86_msi.setup_msi_irqs(dev, nvec, type); 159 - } 160 - 161 - void arch_teardown_msi_irqs(struct pci_dev *dev) 162 - { 163 - x86_msi.teardown_msi_irqs(dev); 164 - } 165 - 166 - void arch_teardown_msi_irq(unsigned int irq) 167 - { 168 - x86_msi.teardown_msi_irq(irq); 169 - } 170 - 171 155 void arch_restore_msi_irqs(struct pci_dev *dev) 172 156 { 173 157 x86_msi.restore_msi_irqs(dev);
+17 -1
arch/x86/pci/common.c
··· 19 19 #include <asm/smp.h> 20 20 #include <asm/pci_x86.h> 21 21 #include <asm/setup.h> 22 + #include <asm/irqdomain.h> 22 23 23 24 unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | 24 25 PCI_PROBE_MMCONF; ··· 634 633 635 634 int pcibios_add_device(struct pci_dev *dev) 636 635 { 637 - struct setup_data *data; 638 636 struct pci_setup_rom *rom; 637 + struct irq_domain *msidom; 638 + struct setup_data *data; 639 639 u64 pa_data; 640 640 641 641 pa_data = boot_params.hdr.setup_data; ··· 663 661 memunmap(data); 664 662 } 665 663 set_dev_domain_options(dev); 664 + 665 + /* 666 + * Setup the initial MSI domain of the device. If the underlying 667 + * bus has a PCI/MSI irqdomain associated use the bus domain, 668 + * otherwise set the default domain. This ensures that special irq 669 + * domains e.g. VMD are preserved. The default ensures initial 670 + * operation if irq remapping is not active. If irq remapping is 671 + * active it will overwrite the domain pointer when the device is 672 + * associated to a remapping domain. 673 + */ 674 + msidom = dev_get_msi_domain(&dev->bus->dev); 675 + if (!msidom) 676 + msidom = x86_pci_msi_default_domain; 677 + dev_set_msi_domain(&dev->dev, msidom); 666 678 return 0; 667 679 } 668 680
+6 -7
arch/x86/pci/init.c
··· 3 3 #include <linux/init.h> 4 4 #include <asm/pci_x86.h> 5 5 #include <asm/x86_init.h> 6 + #include <asm/irqdomain.h> 6 7 7 8 /* arch_initcall has too random ordering, so call the initializers 8 9 in the right sequence from here. */ 9 10 static __init int pci_arch_init(void) 10 11 { 11 - #ifdef CONFIG_PCI_DIRECT 12 - int type = 0; 12 + int type; 13 + 14 + x86_create_pci_msi_domain(); 13 15 14 16 type = pci_direct_probe(); 15 - #endif 16 17 17 18 if (!(pci_probe & PCI_PROBE_NOEARLY)) 18 19 pci_mmcfg_early_init(); ··· 21 20 if (x86_init.pci.arch_init && !x86_init.pci.arch_init()) 22 21 return 0; 23 22 24 - #ifdef CONFIG_PCI_BIOS 25 23 pci_pcbios_init(); 26 - #endif 24 + 27 25 /* 28 26 * don't check for raw_pci_ops here because we want pcbios as last 29 27 * fallback, yet it's needed to run first to set pcibios_last_bus 30 28 * in case legacy PCI probing is used. otherwise detecting peer busses 31 29 * fails. 32 30 */ 33 - #ifdef CONFIG_PCI_DIRECT 34 31 pci_direct_init(type); 35 - #endif 32 + 36 33 if (!raw_pci_ops && !raw_pci_ext_ops) 37 34 printk(KERN_ERR 38 35 "PCI: Fatal: No config space access function found\n");
+113 -24
arch/x86/pci/xen.c
··· 157 157 struct xen_pci_frontend_ops *xen_pci_frontend; 158 158 EXPORT_SYMBOL_GPL(xen_pci_frontend); 159 159 160 + struct xen_msi_ops { 161 + int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); 162 + void (*teardown_msi_irqs)(struct pci_dev *dev); 163 + }; 164 + 165 + static struct xen_msi_ops xen_msi_ops __ro_after_init; 166 + 160 167 static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 161 168 { 162 169 int irq, ret, i; ··· 379 372 WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret); 380 373 } 381 374 } 382 - #endif 375 + #else /* CONFIG_XEN_DOM0 */ 376 + #define xen_initdom_setup_msi_irqs NULL 377 + #define xen_initdom_restore_msi_irqs NULL 378 + #endif /* !CONFIG_XEN_DOM0 */ 383 379 384 380 static void xen_teardown_msi_irqs(struct pci_dev *dev) 385 381 { 386 382 struct msi_desc *msidesc; 383 + int i; 387 384 388 - msidesc = first_pci_msi_entry(dev); 385 + for_each_pci_msi_entry(msidesc, dev) { 386 + if (msidesc->irq) { 387 + for (i = 0; i < msidesc->nvec_used; i++) 388 + xen_destroy_irq(msidesc->irq + i); 389 + } 390 + } 391 + } 392 + 393 + static void xen_pv_teardown_msi_irqs(struct pci_dev *dev) 394 + { 395 + struct msi_desc *msidesc = first_pci_msi_entry(dev); 396 + 389 397 if (msidesc->msi_attrib.is_msix) 390 398 xen_pci_frontend_disable_msix(dev); 391 399 else 392 400 xen_pci_frontend_disable_msi(dev); 393 401 394 - /* Free the IRQ's and the msidesc using the generic code. */ 395 - default_teardown_msi_irqs(dev); 402 + xen_teardown_msi_irqs(dev); 396 403 } 397 404 398 - static void xen_teardown_msi_irq(unsigned int irq) 405 + static int xen_msi_domain_alloc_irqs(struct irq_domain *domain, 406 + struct device *dev, int nvec) 399 407 { 400 - xen_destroy_irq(irq); 408 + int type; 409 + 410 + if (WARN_ON_ONCE(!dev_is_pci(dev))) 411 + return -EINVAL; 412 + 413 + if (first_msi_entry(dev)->msi_attrib.is_msix) 414 + type = PCI_CAP_ID_MSIX; 415 + else 416 + type = PCI_CAP_ID_MSI; 417 + 418 + return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type); 401 419 } 402 420 403 - #endif 421 + static void xen_msi_domain_free_irqs(struct irq_domain *domain, 422 + struct device *dev) 423 + { 424 + if (WARN_ON_ONCE(!dev_is_pci(dev))) 425 + return; 426 + 427 + xen_msi_ops.teardown_msi_irqs(to_pci_dev(dev)); 428 + } 429 + 430 + static struct msi_domain_ops xen_pci_msi_domain_ops = { 431 + .domain_alloc_irqs = xen_msi_domain_alloc_irqs, 432 + .domain_free_irqs = xen_msi_domain_free_irqs, 433 + }; 434 + 435 + static struct msi_domain_info xen_pci_msi_domain_info = { 436 + .ops = &xen_pci_msi_domain_ops, 437 + }; 438 + 439 + /* 440 + * This irq domain is a blatant violation of the irq domain design, but 441 + * distangling XEN into real irq domains is not a job for mere mortals with 442 + * limited XENology. But it's the least dangerous way for a mere mortal to 443 + * get rid of the arch_*_msi_irqs() hackery in order to store the irq 444 + * domain pointer in struct device. This irq domain wrappery allows to do 445 + * that without breaking XEN terminally. 446 + */ 447 + static __init struct irq_domain *xen_create_pci_msi_domain(void) 448 + { 449 + struct irq_domain *d = NULL; 450 + struct fwnode_handle *fn; 451 + 452 + fn = irq_domain_alloc_named_fwnode("XEN-MSI"); 453 + if (fn) 454 + d = msi_create_irq_domain(fn, &xen_pci_msi_domain_info, NULL); 455 + 456 + /* FIXME: No idea how to survive if this fails */ 457 + BUG_ON(!d); 458 + 459 + return d; 460 + } 461 + 462 + static __init void xen_setup_pci_msi(void) 463 + { 464 + if (xen_pv_domain()) { 465 + if (xen_initial_domain()) { 466 + xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs; 467 + x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; 468 + } else { 469 + xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs; 470 + } 471 + xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs; 472 + pci_msi_ignore_mask = 1; 473 + } else if (xen_hvm_domain()) { 474 + xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs; 475 + xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs; 476 + } else { 477 + WARN_ON_ONCE(1); 478 + return; 479 + } 480 + 481 + /* 482 + * Override the PCI/MSI irq domain init function. No point 483 + * in allocating the native domain and never use it. 484 + */ 485 + x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain; 486 + } 487 + 488 + #else /* CONFIG_PCI_MSI */ 489 + static inline void xen_setup_pci_msi(void) { } 490 + #endif /* CONFIG_PCI_MSI */ 404 491 405 492 int __init pci_xen_init(void) 406 493 { ··· 511 410 /* Keep ACPI out of the picture */ 512 411 acpi_noirq_set(); 513 412 514 - #ifdef CONFIG_PCI_MSI 515 - x86_msi.setup_msi_irqs = xen_setup_msi_irqs; 516 - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; 517 - x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; 518 - pci_msi_ignore_mask = 1; 519 - #endif 413 + xen_setup_pci_msi(); 520 414 return 0; 521 415 } 522 416 523 417 #ifdef CONFIG_PCI_MSI 524 - void __init xen_msi_init(void) 418 + static void __init xen_hvm_msi_init(void) 525 419 { 526 420 if (!disable_apic) { 527 421 /* ··· 531 435 ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC))) 532 436 return; 533 437 } 534 - 535 - x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; 536 - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; 438 + xen_setup_pci_msi(); 537 439 } 538 440 #endif 539 441 ··· 554 460 * We need to wait until after x2apic is initialized 555 461 * before we can set MSI IRQ ops. 556 462 */ 557 - x86_platform.apic_post_init = xen_msi_init; 463 + x86_platform.apic_post_init = xen_hvm_msi_init; 558 464 #endif 559 465 return 0; 560 466 } ··· 564 470 { 565 471 int irq; 566 472 567 - #ifdef CONFIG_PCI_MSI 568 - x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; 569 - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; 570 - x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; 571 - pci_msi_ignore_mask = 1; 572 - #endif 473 + xen_setup_pci_msi(); 573 474 __acpi_register_gsi = acpi_register_gsi_xen; 574 475 __acpi_unregister_gsi = NULL; 575 476 /*
+8 -8
arch/x86/platform/uv/uv_irq.c
··· 90 90 91 91 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 92 92 if (ret >= 0) { 93 - if (info->uv_limit == UV_AFFINITY_CPU) 93 + if (info->uv.limit == UV_AFFINITY_CPU) 94 94 irq_set_status_flags(virq, IRQ_NO_BALANCING); 95 95 else 96 96 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); 97 97 98 - chip_data->pnode = uv_blade_to_pnode(info->uv_blade); 99 - chip_data->offset = info->uv_offset; 98 + chip_data->pnode = uv_blade_to_pnode(info->uv.blade); 99 + chip_data->offset = info->uv.offset; 100 100 irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data, 101 - handle_percpu_irq, NULL, info->uv_name); 101 + handle_percpu_irq, NULL, info->uv.name); 102 102 } else { 103 103 kfree(chip_data); 104 104 } ··· 193 193 194 194 init_irq_alloc_info(&info, cpumask_of(cpu)); 195 195 info.type = X86_IRQ_ALLOC_TYPE_UV; 196 - info.uv_limit = limit; 197 - info.uv_blade = mmr_blade; 198 - info.uv_offset = mmr_offset; 199 - info.uv_name = irq_name; 196 + info.uv.limit = limit; 197 + info.uv.blade = mmr_blade; 198 + info.uv.offset = mmr_offset; 199 + info.uv.name = irq_name; 200 200 201 201 return irq_domain_alloc_irqs(domain, 1, 202 202 uv_blade_to_memory_nid(mmr_blade), &info);
+64 -67
drivers/iommu/amd/iommu.c
··· 730 730 } 731 731 } 732 732 } 733 - #endif /* CONFIG_IRQ_REMAP */ 733 + 734 + static void 735 + amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) 736 + { 737 + if (!irq_remapping_enabled || !dev_is_pci(dev) || 738 + pci_dev_has_special_msi_domain(to_pci_dev(dev))) 739 + return; 740 + 741 + dev_set_msi_domain(dev, iommu->msi_domain); 742 + } 743 + 744 + #else /* CONFIG_IRQ_REMAP */ 745 + static inline void 746 + amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } 747 + #endif /* !CONFIG_IRQ_REMAP */ 734 748 735 749 #define AMD_IOMMU_INT_MASK \ 736 750 (MMIO_STATUS_EVT_INT_MASK | \ ··· 2172 2158 iommu_dev = ERR_PTR(ret); 2173 2159 iommu_ignore_device(dev); 2174 2160 } else { 2161 + amd_iommu_set_pci_msi_domain(dev, iommu); 2175 2162 iommu_dev = &iommu->iommu; 2176 2163 } 2177 2164 ··· 3535 3520 3536 3521 static int get_devid(struct irq_alloc_info *info) 3537 3522 { 3538 - int devid = -1; 3539 - 3540 3523 switch (info->type) { 3541 3524 case X86_IRQ_ALLOC_TYPE_IOAPIC: 3542 - devid = get_ioapic_devid(info->ioapic_id); 3543 - break; 3525 + case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT: 3526 + return get_ioapic_devid(info->devid); 3544 3527 case X86_IRQ_ALLOC_TYPE_HPET: 3545 - devid = get_hpet_devid(info->hpet_id); 3546 - break; 3547 - case X86_IRQ_ALLOC_TYPE_MSI: 3548 - case X86_IRQ_ALLOC_TYPE_MSIX: 3549 - devid = get_device_id(&info->msi_dev->dev); 3550 - break; 3528 + case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT: 3529 + return get_hpet_devid(info->devid); 3530 + case X86_IRQ_ALLOC_TYPE_PCI_MSI: 3531 + case X86_IRQ_ALLOC_TYPE_PCI_MSIX: 3532 + return get_device_id(msi_desc_to_dev(info->desc)); 3551 3533 default: 3552 - BUG_ON(1); 3553 - break; 3534 + WARN_ON_ONCE(1); 3535 + return -1; 3554 3536 } 3555 - 3556 - return devid; 3557 3537 } 3558 3538 3559 - static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info) 3539 + static struct irq_domain *get_irq_domain_for_devid(struct irq_alloc_info *info, 3540 + int devid) 3560 3541 { 3561 - struct amd_iommu *iommu; 3542 + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; 3543 + 3544 + if (!iommu) 3545 + return NULL; 3546 + 3547 + switch (info->type) { 3548 + case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT: 3549 + case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT: 3550 + return iommu->ir_domain; 3551 + default: 3552 + WARN_ON_ONCE(1); 3553 + return NULL; 3554 + } 3555 + } 3556 + 3557 + static struct irq_domain *get_irq_domain(struct irq_alloc_info *info) 3558 + { 3562 3559 int devid; 3563 3560 3564 3561 if (!info) 3565 3562 return NULL; 3566 3563 3567 3564 devid = get_devid(info); 3568 - if (devid >= 0) { 3569 - iommu = amd_iommu_rlookup_table[devid]; 3570 - if (iommu) 3571 - return iommu->ir_domain; 3572 - } 3573 - 3574 - return NULL; 3575 - } 3576 - 3577 - static struct irq_domain *get_irq_domain(struct irq_alloc_info *info) 3578 - { 3579 - struct amd_iommu *iommu; 3580 - int devid; 3581 - 3582 - if (!info) 3565 + if (devid < 0) 3583 3566 return NULL; 3584 - 3585 - switch (info->type) { 3586 - case X86_IRQ_ALLOC_TYPE_MSI: 3587 - case X86_IRQ_ALLOC_TYPE_MSIX: 3588 - devid = get_device_id(&info->msi_dev->dev); 3589 - if (devid < 0) 3590 - return NULL; 3591 - 3592 - iommu = amd_iommu_rlookup_table[devid]; 3593 - if (iommu) 3594 - return iommu->msi_domain; 3595 - break; 3596 - default: 3597 - break; 3598 - } 3599 - 3600 - return NULL; 3567 + return get_irq_domain_for_devid(info, devid); 3601 3568 } 3602 3569 3603 3570 struct irq_remap_ops amd_iommu_irq_ops = { ··· 3588 3591 .disable = amd_iommu_disable, 3589 3592 .reenable = amd_iommu_reenable, 3590 3593 .enable_faulting = amd_iommu_enable_faulting, 3591 - .get_ir_irq_domain = get_ir_irq_domain, 3592 3594 .get_irq_domain = get_irq_domain, 3593 3595 }; 3594 3596 ··· 3613 3617 switch (info->type) { 3614 3618 case X86_IRQ_ALLOC_TYPE_IOAPIC: 3615 3619 /* Setup IOAPIC entry */ 3616 - entry = info->ioapic_entry; 3617 - info->ioapic_entry = NULL; 3620 + entry = info->ioapic.entry; 3621 + info->ioapic.entry = NULL; 3618 3622 memset(entry, 0, sizeof(*entry)); 3619 3623 entry->vector = index; 3620 3624 entry->mask = 0; 3621 - entry->trigger = info->ioapic_trigger; 3622 - entry->polarity = info->ioapic_polarity; 3625 + entry->trigger = info->ioapic.trigger; 3626 + entry->polarity = info->ioapic.polarity; 3623 3627 /* Mask level triggered irqs. */ 3624 - if (info->ioapic_trigger) 3628 + if (info->ioapic.trigger) 3625 3629 entry->mask = 1; 3626 3630 break; 3627 3631 3628 3632 case X86_IRQ_ALLOC_TYPE_HPET: 3629 - case X86_IRQ_ALLOC_TYPE_MSI: 3630 - case X86_IRQ_ALLOC_TYPE_MSIX: 3633 + case X86_IRQ_ALLOC_TYPE_PCI_MSI: 3634 + case X86_IRQ_ALLOC_TYPE_PCI_MSIX: 3631 3635 msg->address_hi = MSI_ADDR_BASE_HI; 3632 3636 msg->address_lo = MSI_ADDR_BASE_LO; 3633 3637 msg->data = irte_info->index; ··· 3671 3675 3672 3676 if (!info) 3673 3677 return -EINVAL; 3674 - if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI && 3675 - info->type != X86_IRQ_ALLOC_TYPE_MSIX) 3678 + if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI && 3679 + info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX) 3676 3680 return -EINVAL; 3677 3681 3678 3682 /* 3679 3683 * With IRQ remapping enabled, don't need contiguous CPU vectors 3680 3684 * to support multiple MSI interrupts. 3681 3685 */ 3682 - if (info->type == X86_IRQ_ALLOC_TYPE_MSI) 3686 + if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) 3683 3687 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; 3684 3688 3685 3689 devid = get_devid(info); ··· 3707 3711 iommu->irte_ops->set_allocated(table, i); 3708 3712 } 3709 3713 WARN_ON(table->min_index != 32); 3710 - index = info->ioapic_pin; 3714 + index = info->ioapic.pin; 3711 3715 } else { 3712 3716 index = -ENOMEM; 3713 3717 } 3714 - } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI || 3715 - info->type == X86_IRQ_ALLOC_TYPE_MSIX) { 3716 - bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI); 3718 + } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI || 3719 + info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) { 3720 + bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI); 3717 3721 3718 - index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev); 3722 + index = alloc_irq_index(devid, nr_irqs, align, 3723 + msi_desc_to_pci_dev(info->desc)); 3719 3724 } else { 3720 3725 index = alloc_irq_index(devid, nr_irqs, false, NULL); 3721 3726 } ··· 3729 3732 3730 3733 for (i = 0; i < nr_irqs; i++) { 3731 3734 irq_data = irq_domain_get_irq_data(domain, virq + i); 3732 - cfg = irqd_cfg(irq_data); 3733 - if (!irq_data || !cfg) { 3735 + cfg = irq_data ? irqd_cfg(irq_data) : NULL; 3736 + if (!cfg) { 3734 3737 ret = -EINVAL; 3735 3738 goto out_free_data; 3736 3739 }
+4 -4
drivers/iommu/hyperv-iommu.c
··· 101 101 * in the chip_data and hyperv_irq_remapping_activate()/hyperv_ir_set_ 102 102 * affinity() set vector and dest_apicid directly into IO-APIC entry. 103 103 */ 104 - irq_data->chip_data = info->ioapic_entry; 104 + irq_data->chip_data = info->ioapic.entry; 105 105 106 106 /* 107 107 * Hypver-V IO APIC irq affinity should be in the scope of ··· 182 182 return IRQ_REMAP_X2APIC_MODE; 183 183 } 184 184 185 - static struct irq_domain *hyperv_get_ir_irq_domain(struct irq_alloc_info *info) 185 + static struct irq_domain *hyperv_get_irq_domain(struct irq_alloc_info *info) 186 186 { 187 - if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) 187 + if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT) 188 188 return ioapic_ir_domain; 189 189 else 190 190 return NULL; ··· 193 193 struct irq_remap_ops hyperv_irq_remap_ops = { 194 194 .prepare = hyperv_prepare_irq_remapping, 195 195 .enable = hyperv_enable_irq_remapping, 196 - .get_ir_irq_domain = hyperv_get_ir_irq_domain, 196 + .get_irq_domain = hyperv_get_irq_domain, 197 197 }; 198 198 199 199 #endif
+3
drivers/iommu/intel/dmar.c
··· 316 316 if (ret < 0 && dmar_dev_scope_status == 0) 317 317 dmar_dev_scope_status = ret; 318 318 319 + if (ret >= 0) 320 + intel_irq_remap_add_device(info); 321 + 319 322 return ret; 320 323 } 321 324
+54 -65
drivers/iommu/intel/irq_remapping.c
··· 204 204 return rc; 205 205 } 206 206 207 - static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) 207 + static struct irq_domain *map_hpet_to_ir(u8 hpet_id) 208 208 { 209 209 int i; 210 210 211 - for (i = 0; i < MAX_HPET_TBS; i++) 211 + for (i = 0; i < MAX_HPET_TBS; i++) { 212 212 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) 213 - return ir_hpet[i].iommu; 213 + return ir_hpet[i].iommu->ir_domain; 214 + } 214 215 return NULL; 215 216 } 216 217 217 - static struct intel_iommu *map_ioapic_to_ir(int apic) 218 + static struct intel_iommu *map_ioapic_to_iommu(int apic) 218 219 { 219 220 int i; 220 221 221 - for (i = 0; i < MAX_IO_APICS; i++) 222 + for (i = 0; i < MAX_IO_APICS; i++) { 222 223 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) 223 224 return ir_ioapic[i].iommu; 225 + } 224 226 return NULL; 225 227 } 226 228 227 - static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) 229 + static struct irq_domain *map_ioapic_to_ir(int apic) 228 230 { 229 - struct dmar_drhd_unit *drhd; 231 + struct intel_iommu *iommu = map_ioapic_to_iommu(apic); 230 232 231 - drhd = dmar_find_matched_drhd_unit(dev); 232 - if (!drhd) 233 - return NULL; 233 + return iommu ? iommu->ir_domain : NULL; 234 + } 234 235 235 - return drhd->iommu; 236 + static struct irq_domain *map_dev_to_ir(struct pci_dev *dev) 237 + { 238 + struct dmar_drhd_unit *drhd = dmar_find_matched_drhd_unit(dev); 239 + 240 + return drhd ? drhd->iommu->ir_msi_domain : NULL; 236 241 } 237 242 238 243 static int clear_entries(struct irq_2_iommu *irq_iommu) ··· 1007 1002 1008 1003 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { 1009 1004 int ioapic_id = mpc_ioapic_id(ioapic_idx); 1010 - if (!map_ioapic_to_ir(ioapic_id)) { 1005 + if (!map_ioapic_to_iommu(ioapic_id)) { 1011 1006 pr_err(FW_BUG "ioapic %d has no mapping iommu, " 1012 1007 "interrupt remapping will be disabled\n", 1013 1008 ioapic_id); ··· 1092 1087 return -1; 1093 1088 } 1094 1089 1090 + /* 1091 + * Store the MSI remapping domain pointer in the device if enabled. 1092 + * 1093 + * This is called from dmar_pci_bus_add_dev() so it works even when DMA 1094 + * remapping is disabled. Only update the pointer if the device is not 1095 + * already handled by a non default PCI/MSI interrupt domain. This protects 1096 + * e.g. VMD devices. 1097 + */ 1098 + void intel_irq_remap_add_device(struct dmar_pci_notify_info *info) 1099 + { 1100 + if (!irq_remapping_enabled || pci_dev_has_special_msi_domain(info->dev)) 1101 + return; 1102 + 1103 + dev_set_msi_domain(&info->dev->dev, map_dev_to_ir(info->dev)); 1104 + } 1105 + 1095 1106 static void prepare_irte(struct irte *irte, int vector, unsigned int dest) 1096 1107 { 1097 1108 memset(irte, 0, sizeof(*irte)); ··· 1128 1107 irte->redir_hint = 1; 1129 1108 } 1130 1109 1131 - static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info) 1132 - { 1133 - struct intel_iommu *iommu = NULL; 1134 - 1135 - if (!info) 1136 - return NULL; 1137 - 1138 - switch (info->type) { 1139 - case X86_IRQ_ALLOC_TYPE_IOAPIC: 1140 - iommu = map_ioapic_to_ir(info->ioapic_id); 1141 - break; 1142 - case X86_IRQ_ALLOC_TYPE_HPET: 1143 - iommu = map_hpet_to_ir(info->hpet_id); 1144 - break; 1145 - case X86_IRQ_ALLOC_TYPE_MSI: 1146 - case X86_IRQ_ALLOC_TYPE_MSIX: 1147 - iommu = map_dev_to_ir(info->msi_dev); 1148 - break; 1149 - default: 1150 - BUG_ON(1); 1151 - break; 1152 - } 1153 - 1154 - return iommu ? iommu->ir_domain : NULL; 1155 - } 1156 - 1157 1110 static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info) 1158 1111 { 1159 - struct intel_iommu *iommu; 1160 - 1161 1112 if (!info) 1162 1113 return NULL; 1163 1114 1164 1115 switch (info->type) { 1165 - case X86_IRQ_ALLOC_TYPE_MSI: 1166 - case X86_IRQ_ALLOC_TYPE_MSIX: 1167 - iommu = map_dev_to_ir(info->msi_dev); 1168 - if (iommu) 1169 - return iommu->ir_msi_domain; 1170 - break; 1116 + case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT: 1117 + return map_ioapic_to_ir(info->devid); 1118 + case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT: 1119 + return map_hpet_to_ir(info->devid); 1171 1120 default: 1172 - break; 1121 + WARN_ON_ONCE(1); 1122 + return NULL; 1173 1123 } 1174 - 1175 - return NULL; 1176 1124 } 1177 1125 1178 1126 struct irq_remap_ops intel_irq_remap_ops = { ··· 1150 1160 .disable = disable_irq_remapping, 1151 1161 .reenable = reenable_irq_remapping, 1152 1162 .enable_faulting = enable_drhd_fault_handling, 1153 - .get_ir_irq_domain = intel_get_ir_irq_domain, 1154 1163 .get_irq_domain = intel_get_irq_domain, 1155 1164 }; 1156 1165 ··· 1273 1284 switch (info->type) { 1274 1285 case X86_IRQ_ALLOC_TYPE_IOAPIC: 1275 1286 /* Set source-id of interrupt request */ 1276 - set_ioapic_sid(irte, info->ioapic_id); 1287 + set_ioapic_sid(irte, info->devid); 1277 1288 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n", 1278 - info->ioapic_id, irte->present, irte->fpd, 1289 + info->devid, irte->present, irte->fpd, 1279 1290 irte->dst_mode, irte->redir_hint, 1280 1291 irte->trigger_mode, irte->dlvry_mode, 1281 1292 irte->avail, irte->vector, irte->dest_id, 1282 1293 irte->sid, irte->sq, irte->svt); 1283 1294 1284 - entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry; 1285 - info->ioapic_entry = NULL; 1295 + entry = (struct IR_IO_APIC_route_entry *)info->ioapic.entry; 1296 + info->ioapic.entry = NULL; 1286 1297 memset(entry, 0, sizeof(*entry)); 1287 1298 entry->index2 = (index >> 15) & 0x1; 1288 1299 entry->zero = 0; ··· 1292 1303 * IO-APIC RTE will be configured with virtual vector. 1293 1304 * irq handler will do the explicit EOI to the io-apic. 1294 1305 */ 1295 - entry->vector = info->ioapic_pin; 1306 + entry->vector = info->ioapic.pin; 1296 1307 entry->mask = 0; /* enable IRQ */ 1297 - entry->trigger = info->ioapic_trigger; 1298 - entry->polarity = info->ioapic_polarity; 1299 - if (info->ioapic_trigger) 1308 + entry->trigger = info->ioapic.trigger; 1309 + entry->polarity = info->ioapic.polarity; 1310 + if (info->ioapic.trigger) 1300 1311 entry->mask = 1; /* Mask level triggered irqs. */ 1301 1312 break; 1302 1313 1303 1314 case X86_IRQ_ALLOC_TYPE_HPET: 1304 - case X86_IRQ_ALLOC_TYPE_MSI: 1305 - case X86_IRQ_ALLOC_TYPE_MSIX: 1315 + case X86_IRQ_ALLOC_TYPE_PCI_MSI: 1316 + case X86_IRQ_ALLOC_TYPE_PCI_MSIX: 1306 1317 if (info->type == X86_IRQ_ALLOC_TYPE_HPET) 1307 - set_hpet_sid(irte, info->hpet_id); 1318 + set_hpet_sid(irte, info->devid); 1308 1319 else 1309 - set_msi_sid(irte, info->msi_dev); 1320 + set_msi_sid(irte, msi_desc_to_pci_dev(info->desc)); 1310 1321 1311 1322 msg->address_hi = MSI_ADDR_BASE_HI; 1312 1323 msg->data = sub_handle; ··· 1357 1368 1358 1369 if (!info || !iommu) 1359 1370 return -EINVAL; 1360 - if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI && 1361 - info->type != X86_IRQ_ALLOC_TYPE_MSIX) 1371 + if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI && 1372 + info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX) 1362 1373 return -EINVAL; 1363 1374 1364 1375 /* 1365 1376 * With IRQ remapping enabled, don't need contiguous CPU vectors 1366 1377 * to support multiple MSI interrupts. 1367 1378 */ 1368 - if (info->type == X86_IRQ_ALLOC_TYPE_MSI) 1379 + if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) 1369 1380 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; 1370 1381 1371 1382 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+1 -22
drivers/iommu/irq_remapping.c
··· 160 160 } 161 161 162 162 /** 163 - * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU 164 - * device serving request @info 165 - * @info: interrupt allocation information, used to identify the IOMMU device 166 - * 167 - * It's used to get parent irqdomain for HPET and IOAPIC irqdomains. 168 - * Returns pointer to IRQ domain, or NULL on failure. 169 - */ 170 - struct irq_domain * 171 - irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info) 172 - { 173 - if (!remap_ops || !remap_ops->get_ir_irq_domain) 174 - return NULL; 175 - 176 - return remap_ops->get_ir_irq_domain(info); 177 - } 178 - 179 - /** 180 163 * irq_remapping_get_irq_domain - Get the irqdomain serving the request @info 181 164 * @info: interrupt allocation information, used to identify the IOMMU device 182 165 * 183 - * There will be one PCI MSI/MSIX irqdomain associated with each interrupt 184 - * remapping device, so this interface is used to retrieve the PCI MSI/MSIX 185 - * irqdomain serving request @info. 186 166 * Returns pointer to IRQ domain, or NULL on failure. 187 167 */ 188 - struct irq_domain * 189 - irq_remapping_get_irq_domain(struct irq_alloc_info *info) 168 + struct irq_domain *irq_remapping_get_irq_domain(struct irq_alloc_info *info) 190 169 { 191 170 if (!remap_ops || !remap_ops->get_irq_domain) 192 171 return NULL;
+1 -4
drivers/iommu/irq_remapping.h
··· 43 43 /* Enable fault handling */ 44 44 int (*enable_faulting)(void); 45 45 46 - /* Get the irqdomain associated the IOMMU device */ 47 - struct irq_domain *(*get_ir_irq_domain)(struct irq_alloc_info *); 48 - 49 - /* Get the MSI irqdomain associated with the IOMMU device */ 46 + /* Get the irqdomain associated to IOMMU device */ 50 47 struct irq_domain *(*get_irq_domain)(struct irq_alloc_info *); 51 48 }; 52 49
+3
drivers/pci/Kconfig
··· 56 56 depends on PCI_MSI 57 57 select GENERIC_MSI_IRQ_DOMAIN 58 58 59 + config PCI_MSI_ARCH_FALLBACKS 60 + bool 61 + 59 62 config PCI_QUIRKS 60 63 default y 61 64 bool "Enable PCI quirk workarounds" if EXPERT
+3
drivers/pci/controller/Kconfig
··· 41 41 bool "NVIDIA Tegra PCIe controller" 42 42 depends on ARCH_TEGRA || COMPILE_TEST 43 43 depends on PCI_MSI_IRQ_DOMAIN 44 + select PCI_MSI_ARCH_FALLBACKS 44 45 help 45 46 Say Y here if you want support for the PCIe host controller found 46 47 on NVIDIA Tegra SoCs. ··· 68 67 bool "Renesas R-Car PCIe host controller" 69 68 depends on ARCH_RENESAS || COMPILE_TEST 70 69 depends on PCI_MSI_IRQ_DOMAIN 70 + select PCI_MSI_ARCH_FALLBACKS 71 71 help 72 72 Say Y here if you want PCIe controller support on R-Car SoCs in host 73 73 mode. ··· 97 95 config PCIE_XILINX 98 96 bool "Xilinx AXI PCIe host bridge support" 99 97 depends on OF || COMPILE_TEST 98 + select PCI_MSI_ARCH_FALLBACKS 100 99 help 101 100 Say 'Y' here if you want kernel to support the Xilinx AXI PCIe 102 101 Host Bridge driver.
-8
drivers/pci/controller/pci-hyperv.c
··· 1531 1531 .irq_unmask = hv_irq_unmask, 1532 1532 }; 1533 1533 1534 - static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info, 1535 - msi_alloc_info_t *arg) 1536 - { 1537 - return arg->msi_hwirq; 1538 - } 1539 - 1540 1534 static struct msi_domain_ops hv_msi_ops = { 1541 - .get_hwirq = hv_msi_domain_ops_get_hwirq, 1542 1535 .msi_prepare = pci_msi_prepare, 1543 - .set_desc = pci_msi_set_desc, 1544 1536 .msi_free = hv_msi_free, 1545 1537 }; 1546 1538
+8 -1
drivers/pci/controller/vmd.c
··· 573 573 return -ENODEV; 574 574 575 575 vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, 576 - x86_vector_domain); 576 + NULL); 577 + 577 578 if (!vmd->irq_domain) { 578 579 irq_domain_free_fwnode(fn); 579 580 return -ENODEV; 580 581 } 582 + 583 + /* 584 + * Override the irq domain bus token so the domain can be distinguished 585 + * from a regular PCI/MSI domain. 586 + */ 587 + irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI); 581 588 582 589 pci_add_resource(&resources, &vmd->resources[0]); 583 590 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
+28 -10
drivers/pci/msi.c
··· 58 58 #define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs 59 59 #endif 60 60 61 + #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS 61 62 /* Arch hooks */ 62 - 63 63 int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) 64 64 { 65 65 struct msi_controller *chip = dev->bus->msi; ··· 132 132 { 133 133 return default_teardown_msi_irqs(dev); 134 134 } 135 + #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ 135 136 136 137 static void default_restore_msi_irq(struct pci_dev *dev, int irq) 137 138 { ··· 1347 1346 1348 1347 /** 1349 1348 * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source 1350 - * @dev: Pointer to the PCI device 1351 1349 * @desc: Pointer to the MSI descriptor 1352 1350 * 1353 1351 * The ID number is only used within the irqdomain. 1354 1352 */ 1355 - irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, 1356 - struct msi_desc *desc) 1353 + static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc) 1357 1354 { 1355 + struct pci_dev *dev = msi_desc_to_pci_dev(desc); 1356 + 1358 1357 return (irq_hw_number_t)desc->msi_attrib.entry_nr | 1359 1358 pci_dev_id(dev) << 11 | 1360 1359 (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; ··· 1402 1401 return error; 1403 1402 } 1404 1403 1405 - #ifdef GENERIC_MSI_DOMAIN_OPS 1406 1404 static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, 1407 1405 struct msi_desc *desc) 1408 1406 { 1409 1407 arg->desc = desc; 1410 - arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), 1411 - desc); 1408 + arg->hwirq = pci_msi_domain_calc_hwirq(desc); 1412 1409 } 1413 - #else 1414 - #define pci_msi_domain_set_desc NULL 1415 - #endif 1416 1410 1417 1411 static struct msi_domain_ops pci_msi_domain_ops_default = { 1418 1412 .set_desc = pci_msi_domain_set_desc, ··· 1554 1558 DOMAIN_BUS_PCI_MSI); 1555 1559 return dom; 1556 1560 } 1561 + 1562 + /** 1563 + * pci_dev_has_special_msi_domain - Check whether the device is handled by 1564 + * a non-standard PCI-MSI domain 1565 + * @pdev: The PCI device to check. 1566 + * 1567 + * Returns: True if the device irqdomain or the bus irqdomain is 1568 + * non-standard PCI/MSI. 1569 + */ 1570 + bool pci_dev_has_special_msi_domain(struct pci_dev *pdev) 1571 + { 1572 + struct irq_domain *dom = dev_get_msi_domain(&pdev->dev); 1573 + 1574 + if (!dom) 1575 + dom = dev_get_msi_domain(&pdev->bus->dev); 1576 + 1577 + if (!dom) 1578 + return true; 1579 + 1580 + return dom->bus_token != DOMAIN_BUS_PCI_MSI; 1581 + } 1582 + 1557 1583 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
+7
include/linux/intel-iommu.h
··· 425 425 int free_cnt; 426 426 }; 427 427 428 + struct dmar_pci_notify_info; 429 + 428 430 #ifdef CONFIG_IRQ_REMAP 429 431 /* 1MB - maximum possible interrupt remapping table size */ 430 432 #define INTR_REMAP_PAGE_ORDER 8 ··· 441 439 struct irte *base; 442 440 unsigned long *bitmap; 443 441 }; 442 + 443 + void intel_irq_remap_add_device(struct dmar_pci_notify_info *info); 444 + #else 445 + static inline void 446 + intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { } 444 447 #endif 445 448 446 449 struct iommu_flush {
+1
include/linux/irqdomain.h
··· 84 84 DOMAIN_BUS_FSL_MC_MSI, 85 85 DOMAIN_BUS_TI_SCI_INTA_MSI, 86 86 DOMAIN_BUS_WAKEUP, 87 + DOMAIN_BUS_VMD_MSI, 87 88 }; 88 89 89 90 /**
+54 -7
include/linux/msi.h
··· 193 193 void pci_msi_unmask_irq(struct irq_data *data); 194 194 195 195 /* 196 - * The arch hooks to setup up msi irqs. Those functions are 197 - * implemented as weak symbols so that they /can/ be overriden by 198 - * architecture specific code if needed. 196 + * The arch hooks to setup up msi irqs. Default functions are implemented 197 + * as weak symbols so that they /can/ be overriden by architecture specific 198 + * code if needed. These hooks must be enabled by the architecture or by 199 + * drivers which depend on them via msi_controller based MSI handling. 200 + * 201 + * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by 202 + * stubs with warnings. 199 203 */ 204 + #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS 200 205 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); 201 206 void arch_teardown_msi_irq(unsigned int irq); 202 207 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 203 208 void arch_teardown_msi_irqs(struct pci_dev *dev); 204 - void arch_restore_msi_irqs(struct pci_dev *dev); 205 - 206 209 void default_teardown_msi_irqs(struct pci_dev *dev); 210 + #else 211 + static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 212 + { 213 + WARN_ON_ONCE(1); 214 + return -ENODEV; 215 + } 216 + 217 + static inline void arch_teardown_msi_irqs(struct pci_dev *dev) 218 + { 219 + WARN_ON_ONCE(1); 220 + } 221 + #endif 222 + 223 + /* 224 + * The restore hooks are still available as they are useful even 225 + * for fully irq domain based setups. Courtesy to XEN/X86. 226 + */ 227 + void arch_restore_msi_irqs(struct pci_dev *dev); 207 228 void default_restore_msi_irqs(struct pci_dev *dev); 208 229 209 230 struct msi_controller { ··· 262 241 * @msi_finish: Optional callback to finalize the allocation 263 242 * @set_desc: Set the msi descriptor for an interrupt 264 243 * @handle_error: Optional error handler if the allocation fails 244 + * @domain_alloc_irqs: Optional function to override the default allocation 245 + * function. 246 + * @domain_free_irqs: Optional function to override the default free 247 + * function. 265 248 * 266 249 * @get_hwirq, @msi_init and @msi_free are callbacks used by 267 250 * msi_create_irq_domain() and related interfaces ··· 273 248 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error 274 249 * are callbacks used by msi_domain_alloc_irqs() and related 275 250 * interfaces which are based on msi_desc. 251 + * 252 + * @domain_alloc_irqs, @domain_free_irqs can be used to override the 253 + * default allocation/free functions (__msi_domain_alloc/free_irqs). This 254 + * is initially for a wrapper around XENs seperate MSI universe which can't 255 + * be wrapped into the regular irq domains concepts by mere mortals. This 256 + * allows to universally use msi_domain_alloc/free_irqs without having to 257 + * special case XEN all over the place. 258 + * 259 + * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs 260 + * are set to the default implementation if NULL and even when 261 + * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and 262 + * because these callbacks are obviously mandatory. 263 + * 264 + * This is NOT meant to be abused, but it can be useful to build wrappers 265 + * for specialized MSI irq domains which need extra work before and after 266 + * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs(). 276 267 */ 277 268 struct msi_domain_ops { 278 269 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, ··· 311 270 struct msi_desc *desc); 312 271 int (*handle_error)(struct irq_domain *domain, 313 272 struct msi_desc *desc, int error); 273 + int (*domain_alloc_irqs)(struct irq_domain *domain, 274 + struct device *dev, int nvec); 275 + void (*domain_free_irqs)(struct irq_domain *domain, 276 + struct device *dev); 314 277 }; 315 278 316 279 /** ··· 372 327 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, 373 328 struct msi_domain_info *info, 374 329 struct irq_domain *parent); 330 + int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 331 + int nvec); 375 332 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 376 333 int nvec); 334 + void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); 377 335 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); 378 336 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); 379 337 ··· 417 369 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, 418 370 struct msi_domain_info *info, 419 371 struct irq_domain *parent); 420 - irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, 421 - struct msi_desc *desc); 422 372 int pci_msi_domain_check_cap(struct irq_domain *domain, 423 373 struct msi_domain_info *info, struct device *dev); 424 374 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); 425 375 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); 376 + bool pci_dev_has_special_msi_domain(struct pci_dev *pdev); 426 377 #else 427 378 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) 428 379 {
+4 -5
kernel/irq/chip.c
··· 1568 1568 */ 1569 1569 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1570 1570 { 1571 - struct irq_data *pos = NULL; 1571 + struct irq_data *pos; 1572 1572 1573 - #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1574 - for (; data; data = data->parent_data) 1575 - #endif 1573 + for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { 1576 1574 if (data->chip && data->chip->irq_compose_msi_msg) 1577 1575 pos = data; 1576 + } 1577 + 1578 1578 if (!pos) 1579 1579 return -ENOSYS; 1580 1580 1581 1581 pos->chip->irq_compose_msi_msg(pos, msg); 1582 - 1583 1582 return 0; 1584 1583 } 1585 1584
+9
kernel/irq/internals.h
··· 473 473 } 474 474 #endif 475 475 476 + static inline struct irq_data *irqd_get_parent_data(struct irq_data *irqd) 477 + { 478 + #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 479 + return irqd->parent_data; 480 + #else 481 + return NULL; 482 + #endif 483 + } 484 + 476 485 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 477 486 #include <linux/debugfs.h> 478 487
+54 -29
kernel/irq/msi.c
··· 187 187 .deactivate = msi_domain_deactivate, 188 188 }; 189 189 190 - #ifdef GENERIC_MSI_DOMAIN_OPS 191 190 static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, 192 191 msi_alloc_info_t *arg) 193 192 { ··· 205 206 { 206 207 arg->desc = desc; 207 208 } 208 - #else 209 - #define msi_domain_ops_get_hwirq NULL 210 - #define msi_domain_ops_prepare NULL 211 - #define msi_domain_ops_set_desc NULL 212 - #endif /* !GENERIC_MSI_DOMAIN_OPS */ 213 209 214 210 static int msi_domain_ops_init(struct irq_domain *domain, 215 211 struct msi_domain_info *info, ··· 229 235 } 230 236 231 237 static struct msi_domain_ops msi_domain_ops_default = { 232 - .get_hwirq = msi_domain_ops_get_hwirq, 233 - .msi_init = msi_domain_ops_init, 234 - .msi_check = msi_domain_ops_check, 235 - .msi_prepare = msi_domain_ops_prepare, 236 - .set_desc = msi_domain_ops_set_desc, 238 + .get_hwirq = msi_domain_ops_get_hwirq, 239 + .msi_init = msi_domain_ops_init, 240 + .msi_check = msi_domain_ops_check, 241 + .msi_prepare = msi_domain_ops_prepare, 242 + .set_desc = msi_domain_ops_set_desc, 243 + .domain_alloc_irqs = __msi_domain_alloc_irqs, 244 + .domain_free_irqs = __msi_domain_free_irqs, 237 245 }; 238 246 239 247 static void msi_domain_update_dom_ops(struct msi_domain_info *info) ··· 246 250 info->ops = &msi_domain_ops_default; 247 251 return; 248 252 } 253 + 254 + if (ops->domain_alloc_irqs == NULL) 255 + ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs; 256 + if (ops->domain_free_irqs == NULL) 257 + ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs; 258 + 259 + if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS)) 260 + return; 249 261 250 262 if (ops->get_hwirq == NULL) 251 263 ops->get_hwirq = msi_domain_ops_default.get_hwirq; ··· 288 284 { 289 285 struct irq_domain *domain; 290 286 291 - if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) 292 - msi_domain_update_dom_ops(info); 287 + msi_domain_update_dom_ops(info); 293 288 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 294 289 msi_domain_update_chip_ops(info); 295 290 ··· 373 370 { 374 371 struct msi_desc *desc; 375 372 376 - if (domain->bus_token != DOMAIN_BUS_PCI_MSI) 373 + switch(domain->bus_token) { 374 + case DOMAIN_BUS_PCI_MSI: 375 + case DOMAIN_BUS_VMD_MSI: 376 + break; 377 + default: 377 378 return false; 379 + } 378 380 379 381 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) 380 382 return false; ··· 395 387 return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; 396 388 } 397 389 398 - /** 399 - * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain 400 - * @domain: The domain to allocate from 401 - * @dev: Pointer to device struct of the device for which the interrupts 402 - * are allocated 403 - * @nvec: The number of interrupts to allocate 404 - * 405 - * Returns 0 on success or an error code. 406 - */ 407 - int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 408 - int nvec) 390 + int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 391 + int nvec) 409 392 { 410 393 struct msi_domain_info *info = domain->host_data; 411 394 struct msi_domain_ops *ops = info->ops; ··· 490 491 } 491 492 492 493 /** 493 - * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev 494 - * @domain: The domain to managing the interrupts 494 + * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain 495 + * @domain: The domain to allocate from 495 496 * @dev: Pointer to device struct of the device for which the interrupts 496 - * are free 497 + * are allocated 498 + * @nvec: The number of interrupts to allocate 499 + * 500 + * Returns 0 on success or an error code. 497 501 */ 498 - void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) 502 + int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 503 + int nvec) 504 + { 505 + struct msi_domain_info *info = domain->host_data; 506 + struct msi_domain_ops *ops = info->ops; 507 + 508 + return ops->domain_alloc_irqs(domain, dev, nvec); 509 + } 510 + 511 + void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) 499 512 { 500 513 struct msi_desc *desc; 501 514 ··· 522 511 desc->irq = 0; 523 512 } 524 513 } 514 + } 515 + 516 + /** 517 + * __msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev 518 + * @domain: The domain to managing the interrupts 519 + * @dev: Pointer to device struct of the device for which the interrupts 520 + * are free 521 + */ 522 + void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) 523 + { 524 + struct msi_domain_info *info = domain->host_data; 525 + struct msi_domain_ops *ops = info->ops; 526 + 527 + return ops->domain_free_irqs(domain, dev); 525 528 } 526 529 527 530 /**