Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] sort the devres mess out

* Split the implementation-agnostic stuff in separate files.
* Make sure that targets using non-default request_irq() pull
kernel/irq/devres.o
* Introduce new symbols (HAS_IOPORT and HAS_IOMEM) defaulting to positive;
allow architectures to turn them off (we needed these symbols anyway for
dependencies of quite a few drivers).
* protect the ioport-related parts of lib/devres.o with CONFIG_HAS_IOPORT.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Al Viro and committed by
Linus Torvalds
5ea81769 2835fdfa

+442 -397
+5
arch/arm/Kconfig
··· 29 29 bool 30 30 default y 31 31 32 + config NO_IOPORT 33 + bool 34 + default n 35 + 32 36 config EISA 33 37 bool 34 38 ---help--- ··· 302 298 select TIMER_ACORN 303 299 select ARCH_MAY_HAVE_PC_FDC 304 300 select ISA_DMA_API 301 + select NO_IOPORT 305 302 help 306 303 On the Acorn Risc-PC, Linux can support the internal IDE disk and 307 304 CD-ROM interface, serial and parallel port, and the floppy drive.
+3
arch/cris/Kconfig
··· 44 44 bool 45 45 default y 46 46 47 + config NO_IOPORT 48 + def_bool y 49 + 47 50 config CRIS 48 51 bool 49 52 default y
+3
arch/h8300/Kconfig
··· 57 57 bool 58 58 default y 59 59 60 + config NO_IOPORT 61 + def_bool y 62 + 60 63 config ISA 61 64 bool 62 65 default y
+3 -1
arch/h8300/kernel/Makefile
··· 6 6 7 7 obj-y := process.o traps.o ptrace.o ints.o \ 8 8 sys_h8300.o time.o semaphore.o signal.o \ 9 - setup.o gpio.o init_task.o syscalls.o 9 + setup.o gpio.o init_task.o syscalls.o devres.o 10 + 11 + devres-y = ../../../kernel/irq/devres.o 10 12 11 13 obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o
+3
arch/m32r/Kconfig
··· 28 28 bool 29 29 default y 30 30 31 + config NO_IOPORT 32 + def_bool y 33 + 31 34 source "init/Kconfig" 32 35 33 36
+3
arch/m68k/Kconfig
··· 42 42 depends on Q40 || (BROKEN && SUN3X) 43 43 default y 44 44 45 + config NO_IOPORT 46 + def_bool y 47 + 45 48 mainmenu "Linux/68k Kernel Configuration" 46 49 47 50 source "init/Kconfig"
+3 -1
arch/m68k/kernel/Makefile
··· 10 10 extra-y += vmlinux.lds 11 11 12 12 obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o \ 13 - sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o 13 + sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o 14 + 15 + devres-y = ../../../kernel/irq/devres.o 14 16 15 17 obj-$(CONFIG_PCI) += bios32.o 16 18 obj-$(CONFIG_MODULES) += module.o
+3
arch/m68knommu/Kconfig
··· 53 53 bool 54 54 default y 55 55 56 + config NO_IOPORT 57 + def_bool y 58 + 56 59 source "init/Kconfig" 57 60 58 61 menu "Processor type and features"
+3
arch/s390/Kconfig
··· 41 41 config GENERIC_TIME 42 42 def_bool y 43 43 44 + config NO_IOPORT 45 + def_bool y 46 + 44 47 mainmenu "Linux Kernel Configuration" 45 48 46 49 config S390
+3 -1
arch/sparc/kernel/Makefile
··· 12 12 sys_sparc.o sunos_asm.o systbls.o \ 13 13 time.o windows.o cpu.o devices.o sclow.o \ 14 14 tadpole.o tick14.o ptrace.o sys_solaris.o \ 15 - unaligned.o muldiv.o semaphore.o prom.o of_device.o 15 + unaligned.o muldiv.o semaphore.o prom.o of_device.o devres.o 16 + 17 + devres-y = ../../../kernel/irq/devres.o 16 18 17 19 obj-$(CONFIG_PCI) += pcic.o 18 20 obj-$(CONFIG_SUN4) += sun4setup.o
+3
arch/um/Kconfig
··· 16 16 bool 17 17 default y 18 18 19 + config NO_IOMEM 20 + def_bool y 21 + 19 22 mainmenu "Linux/Usermode Kernel Configuration" 20 23 21 24 config ISA
+3
arch/xtensa/Kconfig
··· 46 46 bool 47 47 default n 48 48 49 + config NO_IOPORT 50 + def_bool y 51 + 49 52 source "init/Kconfig" 50 53 51 54 menu "Processor type and features"
-6
include/linux/io.h
··· 43 43 unsigned long size); 44 44 void devm_iounmap(struct device *dev, void __iomem *addr); 45 45 46 - void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); 47 - void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); 48 - void __iomem * const * pcim_iomap_table(struct pci_dev *pdev); 49 - 50 - int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name); 51 - 52 46 /** 53 47 * check_signature - find BIOS signatures 54 48 * @io_addr: mmio address to check
+5
include/linux/pci.h
··· 840 840 841 841 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 842 842 843 + void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); 844 + void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); 845 + void __iomem * const * pcim_iomap_table(struct pci_dev *pdev); 846 + int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name); 847 + 843 848 extern int pci_pci_problems; 844 849 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ 845 850 #define PCIPCI_TRITON 2
+1 -1
kernel/irq/Makefile
··· 1 1 2 - obj-y := handle.o manage.o spurious.o resend.o chip.o 2 + obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o 3 3 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 4 4 obj-$(CONFIG_PROC_FS) += proc.o 5 5 obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
+88
kernel/irq/devres.c
··· 1 + #include <linux/module.h> 2 + #include <linux/interrupt.h> 3 + 4 + /* 5 + * Device resource management aware IRQ request/free implementation. 6 + */ 7 + struct irq_devres { 8 + unsigned int irq; 9 + void *dev_id; 10 + }; 11 + 12 + static void devm_irq_release(struct device *dev, void *res) 13 + { 14 + struct irq_devres *this = res; 15 + 16 + free_irq(this->irq, this->dev_id); 17 + } 18 + 19 + static int devm_irq_match(struct device *dev, void *res, void *data) 20 + { 21 + struct irq_devres *this = res, *match = data; 22 + 23 + return this->irq == match->irq && this->dev_id == match->dev_id; 24 + } 25 + 26 + /** 27 + * devm_request_irq - allocate an interrupt line for a managed device 28 + * @dev: device to request interrupt for 29 + * @irq: Interrupt line to allocate 30 + * @handler: Function to be called when the IRQ occurs 31 + * @irqflags: Interrupt type flags 32 + * @devname: An ascii name for the claiming device 33 + * @dev_id: A cookie passed back to the handler function 34 + * 35 + * Except for the extra @dev argument, this function takes the 36 + * same arguments and performs the same function as 37 + * request_irq(). IRQs requested with this function will be 38 + * automatically freed on driver detach. 39 + * 40 + * If an IRQ allocated with this function needs to be freed 41 + * separately, dev_free_irq() must be used. 42 + */ 43 + int devm_request_irq(struct device *dev, unsigned int irq, 44 + irq_handler_t handler, unsigned long irqflags, 45 + const char *devname, void *dev_id) 46 + { 47 + struct irq_devres *dr; 48 + int rc; 49 + 50 + dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), 51 + GFP_KERNEL); 52 + if (!dr) 53 + return -ENOMEM; 54 + 55 + rc = request_irq(irq, handler, irqflags, devname, dev_id); 56 + if (rc) { 57 + kfree(dr); 58 + return rc; 59 + } 60 + 61 + dr->irq = irq; 62 + dr->dev_id = dev_id; 63 + devres_add(dev, dr); 64 + 65 + return 0; 66 + } 67 + EXPORT_SYMBOL(devm_request_irq); 68 + 69 + /** 70 + * devm_free_irq - free an interrupt 71 + * @dev: device to free interrupt for 72 + * @irq: Interrupt line to free 73 + * @dev_id: Device identity to free 74 + * 75 + * Except for the extra @dev argument, this function takes the 76 + * same arguments and performs the same function as free_irq(). 77 + * This function instead of free_irq() should be used to manually 78 + * free IRQs allocated with dev_request_irq(). 79 + */ 80 + void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) 81 + { 82 + struct irq_devres match_data = { irq, dev_id }; 83 + 84 + free_irq(irq, dev_id); 85 + WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match, 86 + &match_data)); 87 + } 88 + EXPORT_SYMBOL(devm_free_irq);
-86
kernel/irq/manage.c
··· 482 482 return retval; 483 483 } 484 484 EXPORT_SYMBOL(request_irq); 485 - 486 - /* 487 - * Device resource management aware IRQ request/free implementation. 488 - */ 489 - struct irq_devres { 490 - unsigned int irq; 491 - void *dev_id; 492 - }; 493 - 494 - static void devm_irq_release(struct device *dev, void *res) 495 - { 496 - struct irq_devres *this = res; 497 - 498 - free_irq(this->irq, this->dev_id); 499 - } 500 - 501 - static int devm_irq_match(struct device *dev, void *res, void *data) 502 - { 503 - struct irq_devres *this = res, *match = data; 504 - 505 - return this->irq == match->irq && this->dev_id == match->dev_id; 506 - } 507 - 508 - /** 509 - * devm_request_irq - allocate an interrupt line for a managed device 510 - * @dev: device to request interrupt for 511 - * @irq: Interrupt line to allocate 512 - * @handler: Function to be called when the IRQ occurs 513 - * @irqflags: Interrupt type flags 514 - * @devname: An ascii name for the claiming device 515 - * @dev_id: A cookie passed back to the handler function 516 - * 517 - * Except for the extra @dev argument, this function takes the 518 - * same arguments and performs the same function as 519 - * request_irq(). IRQs requested with this function will be 520 - * automatically freed on driver detach. 521 - * 522 - * If an IRQ allocated with this function needs to be freed 523 - * separately, dev_free_irq() must be used. 524 - */ 525 - int devm_request_irq(struct device *dev, unsigned int irq, 526 - irq_handler_t handler, unsigned long irqflags, 527 - const char *devname, void *dev_id) 528 - { 529 - struct irq_devres *dr; 530 - int rc; 531 - 532 - dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), 533 - GFP_KERNEL); 534 - if (!dr) 535 - return -ENOMEM; 536 - 537 - rc = request_irq(irq, handler, irqflags, devname, dev_id); 538 - if (rc) { 539 - kfree(dr); 540 - return rc; 541 - } 542 - 543 - dr->irq = irq; 544 - dr->dev_id = dev_id; 545 - devres_add(dev, dr); 546 - 547 - return 0; 548 - } 549 - EXPORT_SYMBOL(devm_request_irq); 550 - 551 - /** 552 - * devm_free_irq - free an interrupt 553 - * @dev: device to free interrupt for 554 - * @irq: Interrupt line to free 555 - * @dev_id: Device identity to free 556 - * 557 - * Except for the extra @dev argument, this function takes the 558 - * same arguments and performs the same function as free_irq(). 559 - * This function instead of free_irq() should be used to manually 560 - * free IRQs allocated with dev_request_irq(). 561 - */ 562 - void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) 563 - { 564 - struct irq_devres match_data = { irq, dev_id }; 565 - 566 - free_irq(irq, dev_id); 567 - WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match, 568 - &match_data)); 569 - } 570 - EXPORT_SYMBOL(devm_free_irq);
+7 -2
lib/Kconfig
··· 101 101 config PLIST 102 102 boolean 103 103 104 - config IOMAP_COPY 104 + config HAS_IOMEM 105 105 boolean 106 - depends on !UML 106 + depends on !NO_IOMEM 107 + default y 108 + 109 + config HAS_IOPORT 110 + boolean 111 + depends on HAS_IOMEM && !NO_IOPORT 107 112 default y 108 113 109 114 endmenu
+3 -3
lib/Makefile
··· 12 12 13 13 lib-y += kobject.o kref.o kobject_uevent.o klist.o 14 14 15 - obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o iomap.o \ 16 - bust_spinlocks.o 15 + obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o 17 16 18 17 ifeq ($(CONFIG_DEBUG_KOBJECT),y) 19 18 CFLAGS_kobject.o += -DDEBUG 20 19 CFLAGS_kobject_uevent.o += -DDEBUG 21 20 endif 22 21 23 - obj-$(CONFIG_IOMAP_COPY) += iomap_copy.o 22 + obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 23 + obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o 24 24 obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 25 25 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 26 26 lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+300
lib/devres.c
··· 1 + #include <linux/pci.h> 2 + #include <linux/io.h> 3 + #include <linux/module.h> 4 + 5 + static void devm_ioremap_release(struct device *dev, void *res) 6 + { 7 + iounmap(*(void __iomem **)res); 8 + } 9 + 10 + static int devm_ioremap_match(struct device *dev, void *res, void *match_data) 11 + { 12 + return *(void **)res == match_data; 13 + } 14 + 15 + /** 16 + * devm_ioremap - Managed ioremap() 17 + * @dev: Generic device to remap IO address for 18 + * @offset: BUS offset to map 19 + * @size: Size of map 20 + * 21 + * Managed ioremap(). Map is automatically unmapped on driver detach. 22 + */ 23 + void __iomem *devm_ioremap(struct device *dev, unsigned long offset, 24 + unsigned long size) 25 + { 26 + void __iomem **ptr, *addr; 27 + 28 + ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 29 + if (!ptr) 30 + return NULL; 31 + 32 + addr = ioremap(offset, size); 33 + if (addr) { 34 + *ptr = addr; 35 + devres_add(dev, ptr); 36 + } else 37 + devres_free(ptr); 38 + 39 + return addr; 40 + } 41 + EXPORT_SYMBOL(devm_ioremap); 42 + 43 + /** 44 + * devm_ioremap_nocache - Managed ioremap_nocache() 45 + * @dev: Generic device to remap IO address for 46 + * @offset: BUS offset to map 47 + * @size: Size of map 48 + * 49 + * Managed ioremap_nocache(). Map is automatically unmapped on driver 50 + * detach. 51 + */ 52 + void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset, 53 + unsigned long size) 54 + { 55 + void __iomem **ptr, *addr; 56 + 57 + ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 58 + if (!ptr) 59 + return NULL; 60 + 61 + addr = ioremap_nocache(offset, size); 62 + if (addr) { 63 + *ptr = addr; 64 + devres_add(dev, ptr); 65 + } else 66 + devres_free(ptr); 67 + 68 + return addr; 69 + } 70 + EXPORT_SYMBOL(devm_ioremap_nocache); 71 + 72 + /** 73 + * devm_iounmap - Managed iounmap() 74 + * @dev: Generic device to unmap for 75 + * @addr: Address to unmap 76 + * 77 + * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). 78 + */ 79 + void devm_iounmap(struct device *dev, void __iomem *addr) 80 + { 81 + iounmap(addr); 82 + WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, 83 + (void *)addr)); 84 + } 85 + EXPORT_SYMBOL(devm_iounmap); 86 + 87 + #ifdef CONFIG_HAS_IOPORT 88 + /* 89 + * Generic iomap devres 90 + */ 91 + static void devm_ioport_map_release(struct device *dev, void *res) 92 + { 93 + ioport_unmap(*(void __iomem **)res); 94 + } 95 + 96 + static int devm_ioport_map_match(struct device *dev, void *res, 97 + void *match_data) 98 + { 99 + return *(void **)res == match_data; 100 + } 101 + 102 + /** 103 + * devm_ioport_map - Managed ioport_map() 104 + * @dev: Generic device to map ioport for 105 + * @port: Port to map 106 + * @nr: Number of ports to map 107 + * 108 + * Managed ioport_map(). Map is automatically unmapped on driver 109 + * detach. 110 + */ 111 + void __iomem * devm_ioport_map(struct device *dev, unsigned long port, 112 + unsigned int nr) 113 + { 114 + void __iomem **ptr, *addr; 115 + 116 + ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); 117 + if (!ptr) 118 + return NULL; 119 + 120 + addr = ioport_map(port, nr); 121 + if (addr) { 122 + *ptr = addr; 123 + devres_add(dev, ptr); 124 + } else 125 + devres_free(ptr); 126 + 127 + return addr; 128 + } 129 + EXPORT_SYMBOL(devm_ioport_map); 130 + 131 + /** 132 + * devm_ioport_unmap - Managed ioport_unmap() 133 + * @dev: Generic device to unmap for 134 + * @addr: Address to unmap 135 + * 136 + * Managed ioport_unmap(). @addr must have been mapped using 137 + * devm_ioport_map(). 138 + */ 139 + void devm_ioport_unmap(struct device *dev, void __iomem *addr) 140 + { 141 + ioport_unmap(addr); 142 + WARN_ON(devres_destroy(dev, devm_ioport_map_release, 143 + devm_ioport_map_match, (void *)addr)); 144 + } 145 + EXPORT_SYMBOL(devm_ioport_unmap); 146 + 147 + #ifdef CONFIG_PCI 148 + /* 149 + * PCI iomap devres 150 + */ 151 + #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE 152 + 153 + struct pcim_iomap_devres { 154 + void __iomem *table[PCIM_IOMAP_MAX]; 155 + }; 156 + 157 + static void pcim_iomap_release(struct device *gendev, void *res) 158 + { 159 + struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 160 + struct pcim_iomap_devres *this = res; 161 + int i; 162 + 163 + for (i = 0; i < PCIM_IOMAP_MAX; i++) 164 + if (this->table[i]) 165 + pci_iounmap(dev, this->table[i]); 166 + } 167 + 168 + /** 169 + * pcim_iomap_table - access iomap allocation table 170 + * @pdev: PCI device to access iomap table for 171 + * 172 + * Access iomap allocation table for @dev. If iomap table doesn't 173 + * exist and @pdev is managed, it will be allocated. All iomaps 174 + * recorded in the iomap table are automatically unmapped on driver 175 + * detach. 176 + * 177 + * This function might sleep when the table is first allocated but can 178 + * be safely called without context and guaranteed to succed once 179 + * allocated. 180 + */ 181 + void __iomem * const * pcim_iomap_table(struct pci_dev *pdev) 182 + { 183 + struct pcim_iomap_devres *dr, *new_dr; 184 + 185 + dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); 186 + if (dr) 187 + return dr->table; 188 + 189 + new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); 190 + if (!new_dr) 191 + return NULL; 192 + dr = devres_get(&pdev->dev, new_dr, NULL, NULL); 193 + return dr->table; 194 + } 195 + EXPORT_SYMBOL(pcim_iomap_table); 196 + 197 + /** 198 + * pcim_iomap - Managed pcim_iomap() 199 + * @pdev: PCI device to iomap for 200 + * @bar: BAR to iomap 201 + * @maxlen: Maximum length of iomap 202 + * 203 + * Managed pci_iomap(). Map is automatically unmapped on driver 204 + * detach. 205 + */ 206 + void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 207 + { 208 + void __iomem **tbl; 209 + 210 + BUG_ON(bar >= PCIM_IOMAP_MAX); 211 + 212 + tbl = (void __iomem **)pcim_iomap_table(pdev); 213 + if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ 214 + return NULL; 215 + 216 + tbl[bar] = pci_iomap(pdev, bar, maxlen); 217 + return tbl[bar]; 218 + } 219 + EXPORT_SYMBOL(pcim_iomap); 220 + 221 + /** 222 + * pcim_iounmap - Managed pci_iounmap() 223 + * @pdev: PCI device to iounmap for 224 + * @addr: Address to unmap 225 + * 226 + * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). 227 + */ 228 + void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) 229 + { 230 + void __iomem **tbl; 231 + int i; 232 + 233 + pci_iounmap(pdev, addr); 234 + 235 + tbl = (void __iomem **)pcim_iomap_table(pdev); 236 + BUG_ON(!tbl); 237 + 238 + for (i = 0; i < PCIM_IOMAP_MAX; i++) 239 + if (tbl[i] == addr) { 240 + tbl[i] = NULL; 241 + return; 242 + } 243 + WARN_ON(1); 244 + } 245 + EXPORT_SYMBOL(pcim_iounmap); 246 + 247 + /** 248 + * pcim_iomap_regions - Request and iomap PCI BARs 249 + * @pdev: PCI device to map IO resources for 250 + * @mask: Mask of BARs to request and iomap 251 + * @name: Name used when requesting regions 252 + * 253 + * Request and iomap regions specified by @mask. 254 + */ 255 + int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name) 256 + { 257 + void __iomem * const *iomap; 258 + int i, rc; 259 + 260 + iomap = pcim_iomap_table(pdev); 261 + if (!iomap) 262 + return -ENOMEM; 263 + 264 + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 265 + unsigned long len; 266 + 267 + if (!(mask & (1 << i))) 268 + continue; 269 + 270 + rc = -EINVAL; 271 + len = pci_resource_len(pdev, i); 272 + if (!len) 273 + goto err_inval; 274 + 275 + rc = pci_request_region(pdev, i, name); 276 + if (rc) 277 + goto err_region; 278 + 279 + rc = -ENOMEM; 280 + if (!pcim_iomap(pdev, i, 0)) 281 + goto err_iomap; 282 + } 283 + 284 + return 0; 285 + 286 + err_iomap: 287 + pcim_iounmap(pdev, iomap[i]); 288 + err_region: 289 + pci_release_region(pdev, i); 290 + err_inval: 291 + while (--i >= 0) { 292 + pcim_iounmap(pdev, iomap[i]); 293 + pci_release_region(pdev, i); 294 + } 295 + 296 + return rc; 297 + } 298 + EXPORT_SYMBOL(pcim_iomap_regions); 299 + #endif 300 + #endif
-296
lib/iomap.c
··· 6 6 #include <linux/pci.h> 7 7 #include <linux/io.h> 8 8 9 - #ifdef CONFIG_GENERIC_IOMAP 10 9 #include <linux/module.h> 11 10 12 11 /* ··· 255 256 } 256 257 EXPORT_SYMBOL(pci_iomap); 257 258 EXPORT_SYMBOL(pci_iounmap); 258 - 259 - #endif /* CONFIG_GENERIC_IOMAP */ 260 - 261 - /* 262 - * Generic iomap devres 263 - */ 264 - static void devm_ioport_map_release(struct device *dev, void *res) 265 - { 266 - ioport_unmap(*(void __iomem **)res); 267 - } 268 - 269 - static int devm_ioport_map_match(struct device *dev, void *res, 270 - void *match_data) 271 - { 272 - return *(void **)res == match_data; 273 - } 274 - 275 - /** 276 - * devm_ioport_map - Managed ioport_map() 277 - * @dev: Generic device to map ioport for 278 - * @port: Port to map 279 - * @nr: Number of ports to map 280 - * 281 - * Managed ioport_map(). Map is automatically unmapped on driver 282 - * detach. 283 - */ 284 - void __iomem * devm_ioport_map(struct device *dev, unsigned long port, 285 - unsigned int nr) 286 - { 287 - void __iomem **ptr, *addr; 288 - 289 - ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); 290 - if (!ptr) 291 - return NULL; 292 - 293 - addr = ioport_map(port, nr); 294 - if (addr) { 295 - *ptr = addr; 296 - devres_add(dev, ptr); 297 - } else 298 - devres_free(ptr); 299 - 300 - return addr; 301 - } 302 - EXPORT_SYMBOL(devm_ioport_map); 303 - 304 - /** 305 - * devm_ioport_unmap - Managed ioport_unmap() 306 - * @dev: Generic device to unmap for 307 - * @addr: Address to unmap 308 - * 309 - * Managed ioport_unmap(). @addr must have been mapped using 310 - * devm_ioport_map(). 311 - */ 312 - void devm_ioport_unmap(struct device *dev, void __iomem *addr) 313 - { 314 - ioport_unmap(addr); 315 - WARN_ON(devres_destroy(dev, devm_ioport_map_release, 316 - devm_ioport_map_match, (void *)addr)); 317 - } 318 - EXPORT_SYMBOL(devm_ioport_unmap); 319 - 320 - static void devm_ioremap_release(struct device *dev, void *res) 321 - { 322 - iounmap(*(void __iomem **)res); 323 - } 324 - 325 - static int devm_ioremap_match(struct device *dev, void *res, void *match_data) 326 - { 327 - return *(void **)res == match_data; 328 - } 329 - 330 - /** 331 - * devm_ioremap - Managed ioremap() 332 - * @dev: Generic device to remap IO address for 333 - * @offset: BUS offset to map 334 - * @size: Size of map 335 - * 336 - * Managed ioremap(). Map is automatically unmapped on driver detach. 337 - */ 338 - void __iomem *devm_ioremap(struct device *dev, unsigned long offset, 339 - unsigned long size) 340 - { 341 - void __iomem **ptr, *addr; 342 - 343 - ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 344 - if (!ptr) 345 - return NULL; 346 - 347 - addr = ioremap(offset, size); 348 - if (addr) { 349 - *ptr = addr; 350 - devres_add(dev, ptr); 351 - } else 352 - devres_free(ptr); 353 - 354 - return addr; 355 - } 356 - EXPORT_SYMBOL(devm_ioremap); 357 - 358 - /** 359 - * devm_ioremap_nocache - Managed ioremap_nocache() 360 - * @dev: Generic device to remap IO address for 361 - * @offset: BUS offset to map 362 - * @size: Size of map 363 - * 364 - * Managed ioremap_nocache(). Map is automatically unmapped on driver 365 - * detach. 366 - */ 367 - void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset, 368 - unsigned long size) 369 - { 370 - void __iomem **ptr, *addr; 371 - 372 - ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 373 - if (!ptr) 374 - return NULL; 375 - 376 - addr = ioremap_nocache(offset, size); 377 - if (addr) { 378 - *ptr = addr; 379 - devres_add(dev, ptr); 380 - } else 381 - devres_free(ptr); 382 - 383 - return addr; 384 - } 385 - EXPORT_SYMBOL(devm_ioremap_nocache); 386 - 387 - /** 388 - * devm_iounmap - Managed iounmap() 389 - * @dev: Generic device to unmap for 390 - * @addr: Address to unmap 391 - * 392 - * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). 393 - */ 394 - void devm_iounmap(struct device *dev, void __iomem *addr) 395 - { 396 - iounmap(addr); 397 - WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, 398 - (void *)addr)); 399 - } 400 - EXPORT_SYMBOL(devm_iounmap); 401 - 402 - /* 403 - * PCI iomap devres 404 - */ 405 - #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE 406 - 407 - struct pcim_iomap_devres { 408 - void __iomem *table[PCIM_IOMAP_MAX]; 409 - }; 410 - 411 - static void pcim_iomap_release(struct device *gendev, void *res) 412 - { 413 - struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 414 - struct pcim_iomap_devres *this = res; 415 - int i; 416 - 417 - for (i = 0; i < PCIM_IOMAP_MAX; i++) 418 - if (this->table[i]) 419 - pci_iounmap(dev, this->table[i]); 420 - } 421 - 422 - /** 423 - * pcim_iomap_table - access iomap allocation table 424 - * @pdev: PCI device to access iomap table for 425 - * 426 - * Access iomap allocation table for @dev. If iomap table doesn't 427 - * exist and @pdev is managed, it will be allocated. All iomaps 428 - * recorded in the iomap table are automatically unmapped on driver 429 - * detach. 430 - * 431 - * This function might sleep when the table is first allocated but can 432 - * be safely called without context and guaranteed to succed once 433 - * allocated. 434 - */ 435 - void __iomem * const * pcim_iomap_table(struct pci_dev *pdev) 436 - { 437 - struct pcim_iomap_devres *dr, *new_dr; 438 - 439 - dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); 440 - if (dr) 441 - return dr->table; 442 - 443 - new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); 444 - if (!new_dr) 445 - return NULL; 446 - dr = devres_get(&pdev->dev, new_dr, NULL, NULL); 447 - return dr->table; 448 - } 449 - EXPORT_SYMBOL(pcim_iomap_table); 450 - 451 - /** 452 - * pcim_iomap - Managed pcim_iomap() 453 - * @pdev: PCI device to iomap for 454 - * @bar: BAR to iomap 455 - * @maxlen: Maximum length of iomap 456 - * 457 - * Managed pci_iomap(). Map is automatically unmapped on driver 458 - * detach. 459 - */ 460 - void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 461 - { 462 - void __iomem **tbl; 463 - 464 - BUG_ON(bar >= PCIM_IOMAP_MAX); 465 - 466 - tbl = (void __iomem **)pcim_iomap_table(pdev); 467 - if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ 468 - return NULL; 469 - 470 - tbl[bar] = pci_iomap(pdev, bar, maxlen); 471 - return tbl[bar]; 472 - } 473 - EXPORT_SYMBOL(pcim_iomap); 474 - 475 - /** 476 - * pcim_iounmap - Managed pci_iounmap() 477 - * @pdev: PCI device to iounmap for 478 - * @addr: Address to unmap 479 - * 480 - * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). 481 - */ 482 - void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) 483 - { 484 - void __iomem **tbl; 485 - int i; 486 - 487 - pci_iounmap(pdev, addr); 488 - 489 - tbl = (void __iomem **)pcim_iomap_table(pdev); 490 - BUG_ON(!tbl); 491 - 492 - for (i = 0; i < PCIM_IOMAP_MAX; i++) 493 - if (tbl[i] == addr) { 494 - tbl[i] = NULL; 495 - return; 496 - } 497 - WARN_ON(1); 498 - } 499 - EXPORT_SYMBOL(pcim_iounmap); 500 - 501 - /** 502 - * pcim_iomap_regions - Request and iomap PCI BARs 503 - * @pdev: PCI device to map IO resources for 504 - * @mask: Mask of BARs to request and iomap 505 - * @name: Name used when requesting regions 506 - * 507 - * Request and iomap regions specified by @mask. 508 - */ 509 - int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name) 510 - { 511 - void __iomem * const *iomap; 512 - int i, rc; 513 - 514 - iomap = pcim_iomap_table(pdev); 515 - if (!iomap) 516 - return -ENOMEM; 517 - 518 - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 519 - unsigned long len; 520 - 521 - if (!(mask & (1 << i))) 522 - continue; 523 - 524 - rc = -EINVAL; 525 - len = pci_resource_len(pdev, i); 526 - if (!len) 527 - goto err_inval; 528 - 529 - rc = pci_request_region(pdev, i, name); 530 - if (rc) 531 - goto err_region; 532 - 533 - rc = -ENOMEM; 534 - if (!pcim_iomap(pdev, i, 0)) 535 - goto err_iomap; 536 - } 537 - 538 - return 0; 539 - 540 - err_iomap: 541 - pcim_iounmap(pdev, iomap[i]); 542 - err_region: 543 - pci_release_region(pdev, i); 544 - err_inval: 545 - while (--i >= 0) { 546 - pcim_iounmap(pdev, iomap[i]); 547 - pci_release_region(pdev, i); 548 - } 549 - 550 - return rc; 551 - } 552 - EXPORT_SYMBOL(pcim_iomap_regions);