Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-6.15-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

- cleanup: remove an used function

- add support for a XenServer specific virtual PCI device

- fix the handling of a sparse Xen hypervisor symbol table

- avoid warnings when building the kernel with gcc 15

- fix use of devices behind a VMD bridge when running as a Xen PV dom0

* tag 'for-linus-6.15-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
PCI/MSI: Convert pci_msi_ignore_mask to per MSI domain flag
PCI: vmd: Disable MSI remapping bypass under Xen
xen/pci: Do not register devices with segments >= 0x10000
xen/pciback: Remove unused pcistub_get_pci_dev
xenfs/xensyms: respect hypervisor's "next" indication
xen/mcelog: Add __nonstring annotations for unterminated strings
xen: Add support for XenServer 6.1 platform device

+85 -49
+2 -6
arch/x86/pci/xen.c
··· 436 436 }; 437 437 438 438 static struct msi_domain_info xen_pci_msi_domain_info = { 439 - .flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | MSI_FLAG_DEV_SYSFS, 439 + .flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | 440 + MSI_FLAG_DEV_SYSFS | MSI_FLAG_NO_MASK, 440 441 .ops = &xen_pci_msi_domain_ops, 441 442 }; 442 443 ··· 485 484 * in allocating the native domain and never use it. 486 485 */ 487 486 x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain; 488 - /* 489 - * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely 490 - * controlled by the hypervisor. 491 - */ 492 - pci_msi_ignore_mask = 1; 493 487 } 494 488 495 489 #else /* CONFIG_PCI_MSI */
+20
drivers/pci/controller/vmd.c
··· 17 17 #include <linux/rculist.h> 18 18 #include <linux/rcupdate.h> 19 19 20 + #include <xen/xen.h> 21 + 20 22 #include <asm/irqdomain.h> 21 23 22 24 #define VMD_CFGBAR 0 ··· 971 969 unsigned long features = (unsigned long) id->driver_data; 972 970 struct vmd_dev *vmd; 973 971 int err; 972 + 973 + if (xen_domain()) { 974 + /* 975 + * Xen doesn't have knowledge about devices in the VMD bus 976 + * because the config space of devices behind the VMD bridge is 977 + * not known to Xen, and hence Xen cannot discover or configure 978 + * them in any way. 979 + * 980 + * Bypass of MSI remapping won't work in that case as direct 981 + * write by Linux to the MSI entries won't result in functional 982 + * interrupts, as Xen is the entity that manages the host 983 + * interrupt controller and must configure interrupts. However 984 + * multiplexing of interrupts by the VMD bridge will work under 985 + * Xen, so force the usage of that mode which must always be 986 + * supported by VMD bridges. 987 + */ 988 + features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP; 989 + } 974 990 975 991 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) 976 992 return -ENOMEM;
+21 -16
drivers/pci/msi/msi.c
··· 10 10 #include <linux/err.h> 11 11 #include <linux/export.h> 12 12 #include <linux/irq.h> 13 + #include <linux/irqdomain.h> 13 14 14 15 #include "../pci.h" 15 16 #include "msi.h" 16 17 17 18 int pci_msi_enable = 1; 18 - int pci_msi_ignore_mask; 19 19 20 20 /** 21 21 * pci_msi_supported - check whether MSI may be enabled on a device ··· 285 285 static int msi_setup_msi_desc(struct pci_dev *dev, int nvec, 286 286 struct irq_affinity_desc *masks) 287 287 { 288 + const struct irq_domain *d = dev_get_msi_domain(&dev->dev); 289 + const struct msi_domain_info *info = d->host_data; 288 290 struct msi_desc desc; 289 291 u16 control; 290 292 ··· 297 295 /* Lies, damned lies, and MSIs */ 298 296 if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) 299 297 control |= PCI_MSI_FLAGS_MASKBIT; 300 - /* Respect XEN's mask disabling */ 301 - if (pci_msi_ignore_mask) 298 + if (info->flags & MSI_FLAG_NO_MASK) 302 299 control &= ~PCI_MSI_FLAGS_MASKBIT; 303 300 304 301 desc.nvec_used = nvec; ··· 604 603 */ 605 604 void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc) 606 605 { 606 + const struct irq_domain *d = dev_get_msi_domain(&dev->dev); 607 + const struct msi_domain_info *info = d->host_data; 608 + 607 609 desc->nvec_used = 1; 608 610 desc->pci.msi_attrib.is_msix = 1; 609 611 desc->pci.msi_attrib.is_64 = 1; 610 612 desc->pci.msi_attrib.default_irq = dev->irq; 611 613 desc->pci.mask_base = dev->msix_base; 612 - desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask && 614 + desc->pci.msi_attrib.can_mask = !(info->flags & MSI_FLAG_NO_MASK) && 613 615 !desc->pci.msi_attrib.is_virtual; 614 616 615 617 if (desc->pci.msi_attrib.can_mask) { ··· 661 657 { 662 658 u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; 663 659 int i; 664 - 665 - if (pci_msi_ignore_mask) 666 - return; 667 660 668 661 for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) 669 662 writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); ··· 715 714 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, 716 715 int nvec, struct irq_affinity *affd) 717 716 { 717 + const struct irq_domain *d = dev_get_msi_domain(&dev->dev); 718 + const struct msi_domain_info *info = d->host_data; 718 719 int ret, tsize; 719 720 u16 control; 720 721 ··· 747 744 /* Disable INTX */ 748 745 pci_intx_for_msi(dev, 0); 749 746 750 - /* 751 - * Ensure that all table entries are masked to prevent 752 - * stale entries from firing in a crash kernel. 753 - * 754 - * Done late to deal with a broken Marvell NVME device 755 - * which takes the MSI-X mask bits into account even 756 - * when MSI-X is disabled, which prevents MSI delivery. 757 - */ 758 - msix_mask_all(dev->msix_base, tsize); 747 + if (!(info->flags & MSI_FLAG_NO_MASK)) { 748 + /* 749 + * Ensure that all table entries are masked to prevent 750 + * stale entries from firing in a crash kernel. 751 + * 752 + * Done late to deal with a broken Marvell NVME device 753 + * which takes the MSI-X mask bits into account even 754 + * when MSI-X is disabled, which prevents MSI delivery. 755 + */ 756 + msix_mask_all(dev->msix_base, tsize); 757 + } 759 758 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); 760 759 761 760 pcibios_free_irq(dev);
+32
drivers/xen/pci.c
··· 43 43 pci_mcfg_reserved = true; 44 44 } 45 45 #endif 46 + 47 + if (pci_domain_nr(pci_dev->bus) >> 16) { 48 + /* 49 + * The hypercall interface is limited to 16bit PCI segment 50 + * values, do not attempt to register devices with Xen in 51 + * segments greater or equal than 0x10000. 52 + */ 53 + dev_info(dev, 54 + "not registering with Xen: invalid PCI segment\n"); 55 + return 0; 56 + } 57 + 46 58 if (pci_seg_supported) { 47 59 DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1); 48 60 ··· 161 149 int r; 162 150 struct pci_dev *pci_dev = to_pci_dev(dev); 163 151 152 + if (pci_domain_nr(pci_dev->bus) >> 16) { 153 + /* 154 + * The hypercall interface is limited to 16bit PCI segment 155 + * values. 156 + */ 157 + dev_info(dev, 158 + "not unregistering with Xen: invalid PCI segment\n"); 159 + return 0; 160 + } 161 + 164 162 if (pci_seg_supported) { 165 163 struct physdev_pci_device device = { 166 164 .seg = pci_domain_nr(pci_dev->bus), ··· 203 181 .dev.devfn = dev->devfn, 204 182 .flags = PCI_DEVICE_RESET_FLR, 205 183 }; 184 + 185 + if (pci_domain_nr(dev->bus) >> 16) { 186 + /* 187 + * The hypercall interface is limited to 16bit PCI segment 188 + * values. 189 + */ 190 + dev_info(&dev->dev, 191 + "unable to notify Xen of device reset: invalid PCI segment\n"); 192 + return 0; 193 + } 206 194 207 195 return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device); 208 196 }
+4
drivers/xen/platform-pci.c
··· 26 26 27 27 #define DRV_NAME "xen-platform-pci" 28 28 29 + #define PCI_DEVICE_ID_XEN_PLATFORM_XS61 0x0002 30 + 29 31 static unsigned long platform_mmio; 30 32 static unsigned long platform_mmio_alloc; 31 33 static unsigned long platform_mmiolen; ··· 175 173 176 174 static const struct pci_device_id platform_pci_tbl[] = { 177 175 {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, 176 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 177 + {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61, 178 178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 179 179 {0,} 180 180 };
-20
drivers/xen/xen-pciback/pci_stub.c
··· 262 262 return found_dev; 263 263 } 264 264 265 - struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev, 266 - struct pci_dev *dev) 267 - { 268 - struct pcistub_device *psdev; 269 - struct pci_dev *found_dev = NULL; 270 - unsigned long flags; 271 - 272 - spin_lock_irqsave(&pcistub_devices_lock, flags); 273 - 274 - list_for_each_entry(psdev, &pcistub_devices, dev_list) { 275 - if (psdev->dev == dev) { 276 - found_dev = pcistub_device_get_pci_dev(pdev, psdev); 277 - break; 278 - } 279 - } 280 - 281 - spin_unlock_irqrestore(&pcistub_devices_lock, flags); 282 - return found_dev; 283 - } 284 - 285 265 /* 286 266 * Called when: 287 267 * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
-2
drivers/xen/xen-pciback/pciback.h
··· 67 67 struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev, 68 68 int domain, int bus, 69 69 int slot, int func); 70 - struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev, 71 - struct pci_dev *dev); 72 70 void pcistub_put_pci_dev(struct pci_dev *dev); 73 71 74 72 static inline bool xen_pcibk_pv_support(void)
+2 -2
drivers/xen/xenfs/xensyms.c
··· 48 48 return -ENOMEM; 49 49 50 50 set_xen_guest_handle(symdata->name, xs->name); 51 - symdata->symnum--; /* Rewind */ 51 + symdata->symnum = symnum; /* Rewind */ 52 52 53 53 ret = HYPERVISOR_platform_op(&xs->op); 54 54 if (ret < 0) ··· 78 78 { 79 79 struct xensyms *xs = m->private; 80 80 81 - xs->op.u.symdata.symnum = ++(*pos); 81 + *pos = xs->op.u.symdata.symnum; 82 82 83 83 if (xensyms_next_sym(xs)) 84 84 return NULL;
+2 -1
include/linux/msi.h
··· 73 73 }; 74 74 }; 75 75 76 - extern int pci_msi_ignore_mask; 77 76 /* Helper functions */ 78 77 struct msi_desc; 79 78 struct pci_dev; ··· 557 558 MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20), 558 559 /* PCI MSIs cannot be steered separately to CPU cores */ 559 560 MSI_FLAG_NO_AFFINITY = (1 << 21), 561 + /* Inhibit usage of entry masking */ 562 + MSI_FLAG_NO_MASK = (1 << 22), 560 563 }; 561 564 562 565 /*
+1 -1
include/xen/interface/xen-mca.h
··· 372 372 #define XEN_MCE_LOG_LEN 32 373 373 374 374 struct xen_mce_log { 375 - char signature[12]; /* "MACHINECHECK" */ 375 + char signature[12] __nonstring; /* "MACHINECHECK" */ 376 376 unsigned len; /* = XEN_MCE_LOG_LEN */ 377 377 unsigned next; 378 378 unsigned flags;
+1 -1
kernel/irq/msi.c
··· 1144 1144 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) 1145 1145 return false; 1146 1146 1147 - if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask) 1147 + if (info->flags & MSI_FLAG_NO_MASK) 1148 1148 return false; 1149 1149 1150 1150 /*