Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: add pci_iomap_range

Virtio drivers should map the part of the range they need, not
necessarily all of it.
To this end, support mapping ranges within BAR on s390.
Since multiple ranges can now be mapped within a BAR, we keep track of
the number of mappings created, and only clear out the mapping for a BAR
when this number reaches 0.

Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: linux-pci@vger.kernel.org
Tested-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>

authored by

Michael S. Tsirkin and committed by
Rusty Russell
8cfc99b5 eb29d8d2

+28 -7
+1
arch/s390/include/asm/pci_io.h
··· 16 16 struct zpci_iomap_entry { 17 17 u32 fh; 18 18 u8 bar; 19 + u16 count; 19 20 }; 20 21 21 22 extern struct zpci_iomap_entry *zpci_iomap_start;
+27 -7
arch/s390/pci/pci.c
··· 259 259 } 260 260 261 261 /* Create a virtual mapping cookie for a PCI BAR */ 262 - void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) 262 + void __iomem *pci_iomap_range(struct pci_dev *pdev, 263 + int bar, 264 + unsigned long offset, 265 + unsigned long max) 263 266 { 264 267 struct zpci_dev *zdev = get_zdev(pdev); 265 268 u64 addr; ··· 273 270 274 271 idx = zdev->bars[bar].map_idx; 275 272 spin_lock(&zpci_iomap_lock); 276 - zpci_iomap_start[idx].fh = zdev->fh; 277 - zpci_iomap_start[idx].bar = bar; 273 + if (zpci_iomap_start[idx].count++) { 274 + BUG_ON(zpci_iomap_start[idx].fh != zdev->fh || 275 + zpci_iomap_start[idx].bar != bar); 276 + } else { 277 + zpci_iomap_start[idx].fh = zdev->fh; 278 + zpci_iomap_start[idx].bar = bar; 279 + } 280 + /* Detect overrun */ 281 + BUG_ON(!zpci_iomap_start[idx].count); 278 282 spin_unlock(&zpci_iomap_lock); 279 283 280 284 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); 281 - return (void __iomem *) addr; 285 + return (void __iomem *) addr + offset; 282 286 } 283 - EXPORT_SYMBOL_GPL(pci_iomap); 287 + EXPORT_SYMBOL_GPL(pci_iomap_range); 288 + 289 + void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 290 + { 291 + return pci_iomap_range(dev, bar, 0, maxlen); 292 + } 293 + EXPORT_SYMBOL(pci_iomap); 284 294 285 295 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 286 296 { ··· 301 285 302 286 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; 303 287 spin_lock(&zpci_iomap_lock); 304 - zpci_iomap_start[idx].fh = 0; 305 - zpci_iomap_start[idx].bar = 0; 288 + /* Detect underrun */ 289 + BUG_ON(!zpci_iomap_start[idx].count); 290 + if (!--zpci_iomap_start[idx].count) { 291 + zpci_iomap_start[idx].fh = 0; 292 + zpci_iomap_start[idx].bar = 0; 293 + } 306 294 spin_unlock(&zpci_iomap_lock); 307 295 } 308 296 EXPORT_SYMBOL_GPL(pci_iounmap);