Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

irq_domain/powerpc: Use common irq_domain structure instead of irq_host

This patch drops the powerpc-specific irq_host structures and uses the common
irq_domain strucutres defined in linux/irqdomain.h. It also fixes all
the users to use the new structure names.

Renaming irq_host to irq_domain has been discussed for a long time, and this
patch is a step in the process of generalizing the powerpc virq code to be
usable by all architecture.

An astute reader will notice that this patch actually removes the irq_host
structure instead of renaming it. This is because the irq_domain structure
already exists in include/linux/irqdomain.h and has the needed data members.

Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Milton Miller <miltonm@bga.com>
Tested-by: Olof Johansson <olof@lixom.net>

+277 -348
+1 -1
arch/powerpc/include/asm/ehv_pic.h
··· 25 25 26 26 struct ehv_pic { 27 27 /* The remapper for this EHV_PIC */ 28 - struct irq_host *irqhost; 28 + struct irq_domain *irqhost; 29 29 30 30 /* The "linux" controller struct */ 31 31 struct irq_chip hc_irq;
+1 -1
arch/powerpc/include/asm/i8259.h
··· 6 6 7 7 extern void i8259_init(struct device_node *node, unsigned long intack_addr); 8 8 extern unsigned int i8259_irq(void); 9 - extern struct irq_host *i8259_get_host(void); 9 + extern struct irq_domain *i8259_get_host(void); 10 10 11 11 #endif /* __KERNEL__ */ 12 12 #endif /* _ASM_POWERPC_I8259_H */
+20 -90
arch/powerpc/include/asm/irq.h
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 12 + #include <linux/irqdomain.h> 12 13 #include <linux/threads.h> 13 14 #include <linux/list.h> 14 15 #include <linux/radix-tree.h> ··· 42 41 /* Same thing, used by the generic IRQ code */ 43 42 #define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS 44 43 45 - /* This type is the placeholder for a hardware interrupt number. It has to 46 - * be big enough to enclose whatever representation is used by a given 47 - * platform. 48 - */ 49 - typedef unsigned long irq_hw_number_t; 50 - 51 - /* Interrupt controller "host" data structure. This could be defined as a 52 - * irq domain controller. That is, it handles the mapping between hardware 53 - * and virtual interrupt numbers for a given interrupt domain. The host 54 - * structure is generally created by the PIC code for a given PIC instance 55 - * (though a host can cover more than one PIC if they have a flat number 56 - * model). It's the host callbacks that are responsible for setting the 57 - * irq_chip on a given irq_desc after it's been mapped. 58 - * 44 + /* 59 45 * The host code and data structures are fairly agnostic to the fact that 60 46 * we use an open firmware device-tree. We do have references to struct 61 47 * device_node in two places: in irq_find_host() to find the host matching ··· 54 66 * by some sort of arch-specific void * "token" used to identify interrupt 55 67 * controllers. 56 68 */ 57 - struct irq_host; 58 - struct radix_tree_root; 59 - 60 - /* Functions below are provided by the host and called whenever a new mapping 61 - * is created or an old mapping is disposed. The host can then proceed to 62 - * whatever internal data structures management is required. It also needs 63 - * to setup the irq_desc when returning from map(). 64 - */ 65 - struct irq_host_ops { 66 - /* Match an interrupt controller device node to a host, returns 67 - * 1 on a match 68 - */ 69 - int (*match)(struct irq_host *h, struct device_node *node); 70 - 71 - /* Create or update a mapping between a virtual irq number and a hw 72 - * irq number. This is called only once for a given mapping. 73 - */ 74 - int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw); 75 - 76 - /* Dispose of such a mapping */ 77 - void (*unmap)(struct irq_host *h, unsigned int virq); 78 - 79 - /* Translate device-tree interrupt specifier from raw format coming 80 - * from the firmware to a irq_hw_number_t (interrupt line number) and 81 - * type (sense) that can be passed to set_irq_type(). In the absence 82 - * of this callback, irq_create_of_mapping() and irq_of_parse_and_map() 83 - * will return the hw number in the first cell and IRQ_TYPE_NONE for 84 - * the type (which amount to keeping whatever default value the 85 - * interrupt controller has for that line) 86 - */ 87 - int (*xlate)(struct irq_host *h, struct device_node *ctrler, 88 - const u32 *intspec, unsigned int intsize, 89 - irq_hw_number_t *out_hwirq, unsigned int *out_type); 90 - }; 91 - 92 - struct irq_host { 93 - struct list_head link; 94 - 95 - /* type of reverse mapping technique */ 96 - unsigned int revmap_type; 97 - #define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */ 98 - #define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */ 99 - #define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */ 100 - #define IRQ_HOST_MAP_TREE 3 /* radix tree */ 101 - union { 102 - struct { 103 - unsigned int size; 104 - unsigned int *revmap; 105 - } linear; 106 - struct radix_tree_root tree; 107 - } revmap_data; 108 - struct irq_host_ops *ops; 109 - void *host_data; 110 - irq_hw_number_t inval_irq; 111 - 112 - /* Optional device node pointer */ 113 - struct device_node *of_node; 114 - }; 115 69 116 70 struct irq_data; 117 71 extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); 118 72 extern irq_hw_number_t virq_to_hw(unsigned int virq); 119 - extern bool virq_is_host(unsigned int virq, struct irq_host *host); 73 + extern bool virq_is_host(unsigned int virq, struct irq_domain *host); 120 74 121 75 /** 122 - * irq_alloc_host - Allocate a new irq_host data structure 76 + * irq_alloc_host - Allocate a new irq_domain data structure 123 77 * @of_node: optional device-tree node of the interrupt controller 124 78 * @revmap_type: type of reverse mapping to use 125 - * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map 79 + * @revmap_arg: for IRQ_DOMAIN_MAP_LINEAR linear only: size of the map 126 80 * @ops: map/unmap host callbacks 127 81 * @inval_irq: provide a hw number in that host space that is always invalid 128 82 * 129 - * Allocates and initialize and irq_host structure. Note that in the case of 130 - * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns 83 + * Allocates and initialize and irq_domain structure. Note that in the case of 84 + * IRQ_DOMAIN_MAP_LEGACY, the map() callback will be called before this returns 131 85 * for all legacy interrupts except 0 (which is always the invalid irq for 132 - * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by 133 - * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated 86 + * a legacy controller). For a IRQ_DOMAIN_MAP_LINEAR, the map is allocated by 87 + * this call as well. For a IRQ_DOMAIN_MAP_TREE, the radix tree will be allocated 134 88 * later during boot automatically (the reverse mapping will use the slow path 135 89 * until that happens). 136 90 */ 137 - extern struct irq_host *irq_alloc_host(struct device_node *of_node, 91 + extern struct irq_domain *irq_alloc_host(struct device_node *of_node, 138 92 unsigned int revmap_type, 139 93 unsigned int revmap_arg, 140 - struct irq_host_ops *ops, 94 + struct irq_domain_ops *ops, 141 95 irq_hw_number_t inval_irq); 142 96 143 97 ··· 87 157 * irq_find_host - Locates a host for a given device node 88 158 * @node: device-tree node of the interrupt controller 89 159 */ 90 - extern struct irq_host *irq_find_host(struct device_node *node); 160 + extern struct irq_domain *irq_find_host(struct device_node *node); 91 161 92 162 93 163 /** ··· 99 169 * platforms that want to manipulate a few hard coded interrupt numbers that 100 170 * aren't properly represented in the device-tree. 101 171 */ 102 - extern void irq_set_default_host(struct irq_host *host); 172 + extern void irq_set_default_host(struct irq_domain *host); 103 173 104 174 105 175 /** ··· 122 192 * If the sense/trigger is to be specified, set_irq_type() should be called 123 193 * on the number returned from that call. 124 194 */ 125 - extern unsigned int irq_create_mapping(struct irq_host *host, 195 + extern unsigned int irq_create_mapping(struct irq_domain *host, 126 196 irq_hw_number_t hwirq); 127 197 128 198 ··· 141 211 * irq controller implementation directly calls the appropriate low level 142 212 * mapping function. 143 213 */ 144 - extern unsigned int irq_find_mapping(struct irq_host *host, 214 + extern unsigned int irq_find_mapping(struct irq_domain *host, 145 215 irq_hw_number_t hwirq); 146 216 147 217 /** ··· 152 222 * interrupt numbers they generate. In such a case it's simplest to use 153 223 * the linux virq as the hardware interrupt number. 154 224 */ 155 - extern unsigned int irq_create_direct_mapping(struct irq_host *host); 225 + extern unsigned int irq_create_direct_mapping(struct irq_domain *host); 156 226 157 227 /** 158 228 * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping. ··· 163 233 * This is for use by irq controllers that use a radix tree reverse 164 234 * mapping for fast lookup. 165 235 */ 166 - extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, 236 + extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, 167 237 irq_hw_number_t hwirq); 168 238 169 239 /** ··· 174 244 * This is a fast path, for use by irq controller code that uses radix tree 175 245 * revmaps 176 246 */ 177 - extern unsigned int irq_radix_revmap_lookup(struct irq_host *host, 247 + extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host, 178 248 irq_hw_number_t hwirq); 179 249 180 250 /** ··· 187 257 * yet and will create the revmap entry with appropriate locking 188 258 */ 189 259 190 - extern unsigned int irq_linear_revmap(struct irq_host *host, 260 + extern unsigned int irq_linear_revmap(struct irq_domain *host, 191 261 irq_hw_number_t hwirq); 192 262 193 263 ··· 202 272 * and that can be used by some irq controllers implementations for things 203 273 * like allocating ranges of numbers for MSIs. The revmaps are left untouched. 204 274 */ 205 - extern unsigned int irq_alloc_virt(struct irq_host *host, 275 + extern unsigned int irq_alloc_virt(struct irq_domain *host, 206 276 unsigned int count, 207 277 unsigned int hint); 208 278
+1 -1
arch/powerpc/include/asm/mpic.h
··· 255 255 struct device_node *node; 256 256 257 257 /* The remapper for this MPIC */ 258 - struct irq_host *irqhost; 258 + struct irq_domain *irqhost; 259 259 260 260 /* The "linux" controller struct */ 261 261 struct irq_chip hc_irq;
+1 -1
arch/powerpc/include/asm/xics.h
··· 86 86 extern unsigned int xics_default_server; 87 87 extern unsigned int xics_default_distrib_server; 88 88 extern unsigned int xics_interrupt_server_size; 89 - extern struct irq_host *xics_host; 89 + extern struct irq_domain *xics_host; 90 90 91 91 struct xics_cppr { 92 92 unsigned char stack[MAX_NUM_PRIORITIES];
+39 -39
arch/powerpc/kernel/irq.c
··· 498 498 */ 499 499 struct irq_map_entry { 500 500 irq_hw_number_t hwirq; 501 - struct irq_host *host; 501 + struct irq_domain *host; 502 502 }; 503 503 504 - static LIST_HEAD(irq_hosts); 504 + static LIST_HEAD(irq_domain_list); 505 505 static DEFINE_RAW_SPINLOCK(irq_big_lock); 506 506 static DEFINE_MUTEX(revmap_trees_mutex); 507 507 static struct irq_map_entry irq_map[NR_IRQS]; 508 508 static unsigned int irq_virq_count = NR_IRQS; 509 - static struct irq_host *irq_default_host; 509 + static struct irq_domain *irq_default_host; 510 510 511 511 irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 512 512 { ··· 520 520 } 521 521 EXPORT_SYMBOL_GPL(virq_to_hw); 522 522 523 - bool virq_is_host(unsigned int virq, struct irq_host *host) 523 + bool virq_is_host(unsigned int virq, struct irq_domain *host) 524 524 { 525 525 return irq_map[virq].host == host; 526 526 } 527 527 EXPORT_SYMBOL_GPL(virq_is_host); 528 528 529 - static int default_irq_host_match(struct irq_host *h, struct device_node *np) 529 + static int default_irq_host_match(struct irq_domain *h, struct device_node *np) 530 530 { 531 531 return h->of_node != NULL && h->of_node == np; 532 532 } 533 533 534 - struct irq_host *irq_alloc_host(struct device_node *of_node, 534 + struct irq_domain *irq_alloc_host(struct device_node *of_node, 535 535 unsigned int revmap_type, 536 536 unsigned int revmap_arg, 537 - struct irq_host_ops *ops, 537 + struct irq_domain_ops *ops, 538 538 irq_hw_number_t inval_irq) 539 539 { 540 - struct irq_host *host; 541 - unsigned int size = sizeof(struct irq_host); 540 + struct irq_domain *host; 541 + unsigned int size = sizeof(struct irq_domain); 542 542 unsigned int i; 543 543 unsigned int *rmap; 544 544 unsigned long flags; 545 545 546 546 /* Allocate structure and revmap table if using linear mapping */ 547 - if (revmap_type == IRQ_HOST_MAP_LINEAR) 547 + if (revmap_type == IRQ_DOMAIN_MAP_LINEAR) 548 548 size += revmap_arg * sizeof(unsigned int); 549 549 host = kzalloc(size, GFP_KERNEL); 550 550 if (host == NULL) ··· 564 564 /* If it's a legacy controller, check for duplicates and 565 565 * mark it as allocated (we use irq 0 host pointer for that 566 566 */ 567 - if (revmap_type == IRQ_HOST_MAP_LEGACY) { 567 + if (revmap_type == IRQ_DOMAIN_MAP_LEGACY) { 568 568 if (irq_map[0].host != NULL) { 569 569 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 570 570 of_node_put(host->of_node); ··· 574 574 irq_map[0].host = host; 575 575 } 576 576 577 - list_add(&host->link, &irq_hosts); 577 + list_add(&host->link, &irq_domain_list); 578 578 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 579 579 580 580 /* Additional setups per revmap type */ 581 581 switch(revmap_type) { 582 - case IRQ_HOST_MAP_LEGACY: 582 + case IRQ_DOMAIN_MAP_LEGACY: 583 583 /* 0 is always the invalid number for legacy */ 584 584 host->inval_irq = 0; 585 585 /* setup us as the host for all legacy interrupts */ ··· 599 599 irq_clear_status_flags(i, IRQ_NOREQUEST); 600 600 } 601 601 break; 602 - case IRQ_HOST_MAP_LINEAR: 602 + case IRQ_DOMAIN_MAP_LINEAR: 603 603 rmap = (unsigned int *)(host + 1); 604 604 for (i = 0; i < revmap_arg; i++) 605 605 rmap[i] = NO_IRQ; ··· 607 607 smp_wmb(); 608 608 host->revmap_data.linear.revmap = rmap; 609 609 break; 610 - case IRQ_HOST_MAP_TREE: 610 + case IRQ_DOMAIN_MAP_TREE: 611 611 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); 612 612 break; 613 613 default: ··· 619 619 return host; 620 620 } 621 621 622 - struct irq_host *irq_find_host(struct device_node *node) 622 + struct irq_domain *irq_find_host(struct device_node *node) 623 623 { 624 - struct irq_host *h, *found = NULL; 624 + struct irq_domain *h, *found = NULL; 625 625 unsigned long flags; 626 626 627 627 /* We might want to match the legacy controller last since ··· 630 630 * yet though... 631 631 */ 632 632 raw_spin_lock_irqsave(&irq_big_lock, flags); 633 - list_for_each_entry(h, &irq_hosts, link) 633 + list_for_each_entry(h, &irq_domain_list, link) 634 634 if (h->ops->match(h, node)) { 635 635 found = h; 636 636 break; ··· 640 640 } 641 641 EXPORT_SYMBOL_GPL(irq_find_host); 642 642 643 - void irq_set_default_host(struct irq_host *host) 643 + void irq_set_default_host(struct irq_domain *host) 644 644 { 645 645 pr_debug("irq: Default host set to @0x%p\n", host); 646 646 ··· 656 656 irq_virq_count = count; 657 657 } 658 658 659 - static int irq_setup_virq(struct irq_host *host, unsigned int virq, 659 + static int irq_setup_virq(struct irq_domain *host, unsigned int virq, 660 660 irq_hw_number_t hwirq) 661 661 { 662 662 int res; ··· 688 688 return -1; 689 689 } 690 690 691 - unsigned int irq_create_direct_mapping(struct irq_host *host) 691 + unsigned int irq_create_direct_mapping(struct irq_domain *host) 692 692 { 693 693 unsigned int virq; 694 694 ··· 696 696 host = irq_default_host; 697 697 698 698 BUG_ON(host == NULL); 699 - WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); 699 + WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_NOMAP); 700 700 701 701 virq = irq_alloc_virt(host, 1, 0); 702 702 if (virq == NO_IRQ) { ··· 712 712 return virq; 713 713 } 714 714 715 - unsigned int irq_create_mapping(struct irq_host *host, 715 + unsigned int irq_create_mapping(struct irq_domain *host, 716 716 irq_hw_number_t hwirq) 717 717 { 718 718 unsigned int virq, hint; ··· 738 738 } 739 739 740 740 /* Get a virtual interrupt number */ 741 - if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { 741 + if (host->revmap_type == IRQ_DOMAIN_MAP_LEGACY) { 742 742 /* Handle legacy */ 743 743 virq = (unsigned int)hwirq; 744 744 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) ··· 767 767 unsigned int irq_create_of_mapping(struct device_node *controller, 768 768 const u32 *intspec, unsigned int intsize) 769 769 { 770 - struct irq_host *host; 770 + struct irq_domain *host; 771 771 irq_hw_number_t hwirq; 772 772 unsigned int type = IRQ_TYPE_NONE; 773 773 unsigned int virq; ··· 806 806 807 807 void irq_dispose_mapping(unsigned int virq) 808 808 { 809 - struct irq_host *host; 809 + struct irq_domain *host; 810 810 irq_hw_number_t hwirq; 811 811 812 812 if (virq == NO_IRQ) ··· 817 817 return; 818 818 819 819 /* Never unmap legacy interrupts */ 820 - if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 820 + if (host->revmap_type == IRQ_DOMAIN_MAP_LEGACY) 821 821 return; 822 822 823 823 irq_set_status_flags(virq, IRQ_NOREQUEST); ··· 836 836 /* Clear reverse map */ 837 837 hwirq = irq_map[virq].hwirq; 838 838 switch(host->revmap_type) { 839 - case IRQ_HOST_MAP_LINEAR: 839 + case IRQ_DOMAIN_MAP_LINEAR: 840 840 if (hwirq < host->revmap_data.linear.size) 841 841 host->revmap_data.linear.revmap[hwirq] = NO_IRQ; 842 842 break; 843 - case IRQ_HOST_MAP_TREE: 843 + case IRQ_DOMAIN_MAP_TREE: 844 844 mutex_lock(&revmap_trees_mutex); 845 845 radix_tree_delete(&host->revmap_data.tree, hwirq); 846 846 mutex_unlock(&revmap_trees_mutex); ··· 857 857 } 858 858 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 859 859 860 - unsigned int irq_find_mapping(struct irq_host *host, 860 + unsigned int irq_find_mapping(struct irq_domain *host, 861 861 irq_hw_number_t hwirq) 862 862 { 863 863 unsigned int i; ··· 870 870 return NO_IRQ; 871 871 872 872 /* legacy -> bail early */ 873 - if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 873 + if (host->revmap_type == IRQ_DOMAIN_MAP_LEGACY) 874 874 return hwirq; 875 875 876 876 /* Slow path does a linear search of the map */ ··· 925 925 } 926 926 #endif 927 927 928 - unsigned int irq_radix_revmap_lookup(struct irq_host *host, 928 + unsigned int irq_radix_revmap_lookup(struct irq_domain *host, 929 929 irq_hw_number_t hwirq) 930 930 { 931 931 struct irq_map_entry *ptr; 932 932 unsigned int virq; 933 933 934 - if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) 934 + if (WARN_ON_ONCE(host->revmap_type != IRQ_DOMAIN_MAP_TREE)) 935 935 return irq_find_mapping(host, hwirq); 936 936 937 937 /* ··· 956 956 return virq; 957 957 } 958 958 959 - void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, 959 + void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, 960 960 irq_hw_number_t hwirq) 961 961 { 962 - if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) 962 + if (WARN_ON(host->revmap_type != IRQ_DOMAIN_MAP_TREE)) 963 963 return; 964 964 965 965 if (virq != NO_IRQ) { ··· 970 970 } 971 971 } 972 972 973 - unsigned int irq_linear_revmap(struct irq_host *host, 973 + unsigned int irq_linear_revmap(struct irq_domain *host, 974 974 irq_hw_number_t hwirq) 975 975 { 976 976 unsigned int *revmap; 977 977 978 - if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) 978 + if (WARN_ON_ONCE(host->revmap_type != IRQ_DOMAIN_MAP_LINEAR)) 979 979 return irq_find_mapping(host, hwirq); 980 980 981 981 /* Check revmap bounds */ ··· 994 994 return revmap[hwirq]; 995 995 } 996 996 997 - unsigned int irq_alloc_virt(struct irq_host *host, 997 + unsigned int irq_alloc_virt(struct irq_domain *host, 998 998 unsigned int count, 999 999 unsigned int hint) 1000 1000 { ··· 1064 1064 1065 1065 raw_spin_lock_irqsave(&irq_big_lock, flags); 1066 1066 for (i = virq; i < (virq + count); i++) { 1067 - struct irq_host *host; 1067 + struct irq_domain *host; 1068 1068 1069 1069 host = irq_map[i].host; 1070 1070 irq_map[i].hwirq = host->inval_irq;
+5 -6
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
··· 21 21 #include <asm/prom.h> 22 22 23 23 static struct device_node *cpld_pic_node; 24 - static struct irq_host *cpld_pic_host; 24 + static struct irq_domain *cpld_pic_host; 25 25 26 26 /* 27 27 * Bits to ignore in the misc_status register ··· 123 123 } 124 124 125 125 static int 126 - cpld_pic_host_match(struct irq_host *h, struct device_node *node) 126 + cpld_pic_host_match(struct irq_domain *h, struct device_node *node) 127 127 { 128 128 return cpld_pic_node == node; 129 129 } 130 130 131 131 static int 132 - cpld_pic_host_map(struct irq_host *h, unsigned int virq, 132 + cpld_pic_host_map(struct irq_domain *h, unsigned int virq, 133 133 irq_hw_number_t hw) 134 134 { 135 135 irq_set_status_flags(virq, IRQ_LEVEL); ··· 137 137 return 0; 138 138 } 139 139 140 - static struct 141 - irq_host_ops cpld_pic_host_ops = { 140 + static struct irq_domain_ops cpld_pic_host_ops = { 142 141 .match = cpld_pic_host_match, 143 142 .map = cpld_pic_host_map, 144 143 }; ··· 191 192 cpld_pic_node = of_node_get(np); 192 193 193 194 cpld_pic_host = 194 - irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 16, &cpld_pic_host_ops, 16); 195 + irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 16, &cpld_pic_host_ops, 16); 195 196 if (!cpld_pic_host) { 196 197 printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); 197 198 goto end;
+5 -5
arch/powerpc/platforms/52xx/media5200.c
··· 45 45 struct media5200_irq { 46 46 void __iomem *regs; 47 47 spinlock_t lock; 48 - struct irq_host *irqhost; 48 + struct irq_domain *irqhost; 49 49 }; 50 50 struct media5200_irq media5200_irq; 51 51 ··· 112 112 raw_spin_unlock(&desc->lock); 113 113 } 114 114 115 - static int media5200_irq_map(struct irq_host *h, unsigned int virq, 115 + static int media5200_irq_map(struct irq_domain *h, unsigned int virq, 116 116 irq_hw_number_t hw) 117 117 { 118 118 pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); ··· 122 122 return 0; 123 123 } 124 124 125 - static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct, 125 + static int media5200_irq_xlate(struct irq_domain *h, struct device_node *ct, 126 126 const u32 *intspec, unsigned int intsize, 127 127 irq_hw_number_t *out_hwirq, 128 128 unsigned int *out_flags) ··· 136 136 return 0; 137 137 } 138 138 139 - static struct irq_host_ops media5200_irq_ops = { 139 + static struct irq_domain_ops media5200_irq_ops = { 140 140 .map = media5200_irq_map, 141 141 .xlate = media5200_irq_xlate, 142 142 }; ··· 173 173 174 174 spin_lock_init(&media5200_irq.lock); 175 175 176 - media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_HOST_MAP_LINEAR, 176 + media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_DOMAIN_MAP_LINEAR, 177 177 MEDIA5200_NUM_IRQS, 178 178 &media5200_irq_ops, -1); 179 179 if (!media5200_irq.irqhost)
+6 -6
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
··· 81 81 * @regs: virtual address of GPT registers 82 82 * @lock: spinlock to coordinate between different functions. 83 83 * @gc: gpio_chip instance structure; used when GPIO is enabled 84 - * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported 84 + * @irqhost: Pointer to irq_domain instance; used when IRQ mode is supported 85 85 * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates 86 86 * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates 87 87 * if the timer is actively used as wdt which blocks gpt functions ··· 91 91 struct device *dev; 92 92 struct mpc52xx_gpt __iomem *regs; 93 93 spinlock_t lock; 94 - struct irq_host *irqhost; 94 + struct irq_domain *irqhost; 95 95 u32 ipb_freq; 96 96 u8 wdt_mode; 97 97 ··· 204 204 } 205 205 } 206 206 207 - static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, 207 + static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq, 208 208 irq_hw_number_t hw) 209 209 { 210 210 struct mpc52xx_gpt_priv *gpt = h->host_data; ··· 216 216 return 0; 217 217 } 218 218 219 - static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct, 219 + static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct, 220 220 const u32 *intspec, unsigned int intsize, 221 221 irq_hw_number_t *out_hwirq, 222 222 unsigned int *out_flags) ··· 236 236 return 0; 237 237 } 238 238 239 - static struct irq_host_ops mpc52xx_gpt_irq_ops = { 239 + static struct irq_domain_ops mpc52xx_gpt_irq_ops = { 240 240 .map = mpc52xx_gpt_irq_map, 241 241 .xlate = mpc52xx_gpt_irq_xlate, 242 242 }; ··· 252 252 if (!cascade_virq) 253 253 return; 254 254 255 - gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1, 255 + gpt->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 1, 256 256 &mpc52xx_gpt_irq_ops, -1); 257 257 if (!gpt->irqhost) { 258 258 dev_err(gpt->dev, "irq_alloc_host() failed\n");
+5 -5
arch/powerpc/platforms/52xx/mpc52xx_pic.c
··· 132 132 133 133 static struct mpc52xx_intr __iomem *intr; 134 134 static struct mpc52xx_sdma __iomem *sdma; 135 - static struct irq_host *mpc52xx_irqhost = NULL; 135 + static struct irq_domain *mpc52xx_irqhost = NULL; 136 136 137 137 static unsigned char mpc52xx_map_senses[4] = { 138 138 IRQ_TYPE_LEVEL_HIGH, ··· 301 301 /** 302 302 * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property 303 303 */ 304 - static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, 304 + static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct, 305 305 const u32 *intspec, unsigned int intsize, 306 306 irq_hw_number_t *out_hwirq, 307 307 unsigned int *out_flags) ··· 335 335 /** 336 336 * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure 337 337 */ 338 - static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, 338 + static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, 339 339 irq_hw_number_t irq) 340 340 { 341 341 int l1irq; ··· 384 384 return 0; 385 385 } 386 386 387 - static struct irq_host_ops mpc52xx_irqhost_ops = { 387 + static struct irq_domain_ops mpc52xx_irqhost_ops = { 388 388 .xlate = mpc52xx_irqhost_xlate, 389 389 .map = mpc52xx_irqhost_map, 390 390 }; ··· 444 444 * As last step, add an irq host to translate the real 445 445 * hw irq information provided by the ofw to linux virq 446 446 */ 447 - mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR, 447 + mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_DOMAIN_MAP_LINEAR, 448 448 MPC52xx_IRQ_HIGHTESTHWIRQ, 449 449 &mpc52xx_irqhost_ops, -1); 450 450
+5 -5
arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
··· 29 29 30 30 struct pq2ads_pci_pic { 31 31 struct device_node *node; 32 - struct irq_host *host; 32 + struct irq_domain *host; 33 33 34 34 struct { 35 35 u32 stat; ··· 103 103 } 104 104 } 105 105 106 - static int pci_pic_host_map(struct irq_host *h, unsigned int virq, 106 + static int pci_pic_host_map(struct irq_domain *h, unsigned int virq, 107 107 irq_hw_number_t hw) 108 108 { 109 109 irq_set_status_flags(virq, IRQ_LEVEL); ··· 112 112 return 0; 113 113 } 114 114 115 - static struct irq_host_ops pci_pic_host_ops = { 115 + static struct irq_domain_ops pci_pic_host_ops = { 116 116 .map = pci_pic_host_map, 117 117 }; 118 118 119 119 int __init pq2ads_pci_init_irq(void) 120 120 { 121 121 struct pq2ads_pci_pic *priv; 122 - struct irq_host *host; 122 + struct irq_domain *host; 123 123 struct device_node *np; 124 124 int ret = -ENODEV; 125 125 int irq; ··· 156 156 out_be32(&priv->regs->mask, ~0); 157 157 mb(); 158 158 159 - host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NUM_IRQS, 159 + host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, NUM_IRQS, 160 160 &pci_pic_host_ops, NUM_IRQS); 161 161 if (!host) { 162 162 ret = -ENOMEM;
+6 -6
arch/powerpc/platforms/85xx/socrates_fpga_pic.c
··· 51 51 static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); 52 52 53 53 static void __iomem *socrates_fpga_pic_iobase; 54 - static struct irq_host *socrates_fpga_pic_irq_host; 54 + static struct irq_domain *socrates_fpga_pic_irq_host; 55 55 static unsigned int socrates_fpga_irqs[3]; 56 56 57 57 static inline uint32_t socrates_fpga_pic_read(int reg) ··· 227 227 .irq_set_type = socrates_fpga_pic_set_type, 228 228 }; 229 229 230 - static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, 230 + static int socrates_fpga_pic_host_map(struct irq_domain *h, unsigned int virq, 231 231 irq_hw_number_t hwirq) 232 232 { 233 233 /* All interrupts are LEVEL sensitive */ ··· 238 238 return 0; 239 239 } 240 240 241 - static int socrates_fpga_pic_host_xlate(struct irq_host *h, 241 + static int socrates_fpga_pic_host_xlate(struct irq_domain *h, 242 242 struct device_node *ct, const u32 *intspec, unsigned int intsize, 243 243 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 244 244 { ··· 269 269 return 0; 270 270 } 271 271 272 - static struct irq_host_ops socrates_fpga_pic_host_ops = { 272 + static struct irq_domain_ops socrates_fpga_pic_host_ops = { 273 273 .map = socrates_fpga_pic_host_map, 274 274 .xlate = socrates_fpga_pic_host_xlate, 275 275 }; ··· 279 279 unsigned long flags; 280 280 int i; 281 281 282 - /* Setup an irq_host structure */ 283 - socrates_fpga_pic_irq_host = irq_alloc_host(pic, IRQ_HOST_MAP_LINEAR, 282 + /* Setup an irq_domain structure */ 283 + socrates_fpga_pic_irq_host = irq_alloc_host(pic, IRQ_DOMAIN_MAP_LINEAR, 284 284 SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, 285 285 SOCRATES_FPGA_NUM_IRQS); 286 286 if (socrates_fpga_pic_irq_host == NULL) {
+6 -6
arch/powerpc/platforms/86xx/gef_pic.c
··· 50 50 static DEFINE_RAW_SPINLOCK(gef_pic_lock); 51 51 52 52 static void __iomem *gef_pic_irq_reg_base; 53 - static struct irq_host *gef_pic_irq_host; 53 + static struct irq_domain *gef_pic_irq_host; 54 54 static int gef_pic_cascade_irq; 55 55 56 56 /* ··· 153 153 /* When an interrupt is being configured, this call allows some flexibilty 154 154 * in deciding which irq_chip structure is used 155 155 */ 156 - static int gef_pic_host_map(struct irq_host *h, unsigned int virq, 156 + static int gef_pic_host_map(struct irq_domain *h, unsigned int virq, 157 157 irq_hw_number_t hwirq) 158 158 { 159 159 /* All interrupts are LEVEL sensitive */ ··· 163 163 return 0; 164 164 } 165 165 166 - static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct, 166 + static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct, 167 167 const u32 *intspec, unsigned int intsize, 168 168 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 169 169 { ··· 177 177 return 0; 178 178 } 179 179 180 - static struct irq_host_ops gef_pic_host_ops = { 180 + static struct irq_domain_ops gef_pic_host_ops = { 181 181 .map = gef_pic_host_map, 182 182 .xlate = gef_pic_host_xlate, 183 183 }; ··· 211 211 return; 212 212 } 213 213 214 - /* Setup an irq_host structure */ 215 - gef_pic_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 214 + /* Setup an irq_domain structure */ 215 + gef_pic_irq_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 216 216 GEF_PIC_NUM_IRQS, 217 217 &gef_pic_host_ops, NO_IRQ); 218 218 if (gef_pic_irq_host == NULL)
+14 -14
arch/powerpc/platforms/cell/axon_msi.c
··· 67 67 68 68 69 69 struct axon_msic { 70 - struct irq_host *irq_host; 70 + struct irq_domain *irq_domain; 71 71 __le32 *fifo_virt; 72 72 dma_addr_t fifo_phys; 73 73 dcr_host_t dcr_host; ··· 152 152 153 153 static struct axon_msic *find_msi_translator(struct pci_dev *dev) 154 154 { 155 - struct irq_host *irq_host; 155 + struct irq_domain *irq_domain; 156 156 struct device_node *dn, *tmp; 157 157 const phandle *ph; 158 158 struct axon_msic *msic = NULL; ··· 184 184 goto out_error; 185 185 } 186 186 187 - irq_host = irq_find_host(dn); 188 - if (!irq_host) { 189 - dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n", 187 + irq_domain = irq_find_host(dn); 188 + if (!irq_domain) { 189 + dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n", 190 190 dn->full_name); 191 191 goto out_error; 192 192 } 193 193 194 - msic = irq_host->host_data; 194 + msic = irq_domain->host_data; 195 195 196 196 out_error: 197 197 of_node_put(dn); ··· 280 280 BUILD_BUG_ON(NR_IRQS > 65536); 281 281 282 282 list_for_each_entry(entry, &dev->msi_list, list) { 283 - virq = irq_create_direct_mapping(msic->irq_host); 283 + virq = irq_create_direct_mapping(msic->irq_domain); 284 284 if (virq == NO_IRQ) { 285 285 dev_warn(&dev->dev, 286 286 "axon_msi: virq allocation failed!\n"); ··· 318 318 .name = "AXON-MSI", 319 319 }; 320 320 321 - static int msic_host_map(struct irq_host *h, unsigned int virq, 321 + static int msic_host_map(struct irq_domain *h, unsigned int virq, 322 322 irq_hw_number_t hw) 323 323 { 324 324 irq_set_chip_data(virq, h->host_data); ··· 327 327 return 0; 328 328 } 329 329 330 - static struct irq_host_ops msic_host_ops = { 330 + static struct irq_domain_ops msic_host_ops = { 331 331 .map = msic_host_map, 332 332 }; 333 333 ··· 337 337 u32 tmp; 338 338 339 339 pr_devel("axon_msi: disabling %s\n", 340 - msic->irq_host->of_node->full_name); 340 + msic->irq_domain->of_node->full_name); 341 341 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); 342 342 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; 343 343 msic_dcr_write(msic, MSIC_CTRL_REG, tmp); ··· 392 392 } 393 393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); 394 394 395 - msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, 395 + msic->irq_domain = irq_alloc_host(dn, IRQ_DOMAIN_MAP_NOMAP, 396 396 NR_IRQS, &msic_host_ops, 0); 397 - if (!msic->irq_host) { 398 - printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n", 397 + if (!msic->irq_domain) { 398 + printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", 399 399 dn->full_name); 400 400 goto out_free_fifo; 401 401 } 402 402 403 - msic->irq_host->host_data = msic; 403 + msic->irq_domain->host_data = msic; 404 404 405 405 irq_set_handler_data(virq, msic); 406 406 irq_set_chained_handler(virq, axon_msi_cascade);
+7 -7
arch/powerpc/platforms/cell/beat_interrupt.c
··· 34 34 static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; 35 35 static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; 36 36 37 - static struct irq_host *beatic_host; 37 + static struct irq_domain *beatic_host; 38 38 39 39 /* 40 40 * In this implementation, "virq" == "IRQ plug number", ··· 122 122 * 123 123 * Note that the number (virq) is already assigned at upper layer. 124 124 */ 125 - static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) 125 + static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq) 126 126 { 127 127 beat_destruct_irq_plug(virq); 128 128 } ··· 133 133 * 134 134 * Note that the number (virq) is already assigned at upper layer. 135 135 */ 136 - static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, 136 + static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq, 137 137 irq_hw_number_t hw) 138 138 { 139 139 int64_t err; ··· 154 154 * Called from irq_create_of_mapping() only. 155 155 * Note: We have only 1 entry to translate. 156 156 */ 157 - static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, 157 + static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct, 158 158 const u32 *intspec, unsigned int intsize, 159 159 irq_hw_number_t *out_hwirq, 160 160 unsigned int *out_flags) ··· 166 166 return 0; 167 167 } 168 168 169 - static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) 169 + static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np) 170 170 { 171 171 /* Match all */ 172 172 return 1; 173 173 } 174 174 175 - static struct irq_host_ops beatic_pic_host_ops = { 175 + static struct irq_domain_ops beatic_pic_host_ops = { 176 176 .map = beatic_pic_host_map, 177 177 .unmap = beatic_pic_host_unmap, 178 178 .xlate = beatic_pic_host_xlate, ··· 239 239 ppc_md.get_irq = beatic_get_irq; 240 240 241 241 /* Allocate an irq host */ 242 - beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, 242 + beatic_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, 243 243 &beatic_pic_host_ops, 244 244 0); 245 245 BUG_ON(beatic_host == NULL);
+7 -7
arch/powerpc/platforms/cell/interrupt.c
··· 56 56 57 57 static DEFINE_PER_CPU(struct iic, cpu_iic); 58 58 #define IIC_NODE_COUNT 2 59 - static struct irq_host *iic_host; 59 + static struct irq_domain *iic_host; 60 60 61 61 /* Convert between "pending" bits and hw irq number */ 62 62 static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) ··· 186 186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); 187 187 } 188 188 189 - struct irq_host *iic_get_irq_host(int node) 189 + struct irq_domain *iic_get_irq_host(int node) 190 190 { 191 191 return iic_host; 192 192 } ··· 222 222 #endif /* CONFIG_SMP */ 223 223 224 224 225 - static int iic_host_match(struct irq_host *h, struct device_node *node) 225 + static int iic_host_match(struct irq_domain *h, struct device_node *node) 226 226 { 227 227 return of_device_is_compatible(node, 228 228 "IBM,CBEA-Internal-Interrupt-Controller"); 229 229 } 230 230 231 - static int iic_host_map(struct irq_host *h, unsigned int virq, 231 + static int iic_host_map(struct irq_domain *h, unsigned int virq, 232 232 irq_hw_number_t hw) 233 233 { 234 234 switch (hw & IIC_IRQ_TYPE_MASK) { ··· 245 245 return 0; 246 246 } 247 247 248 - static int iic_host_xlate(struct irq_host *h, struct device_node *ct, 248 + static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, 249 249 const u32 *intspec, unsigned int intsize, 250 250 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 251 251 ··· 285 285 return 0; 286 286 } 287 287 288 - static struct irq_host_ops iic_host_ops = { 288 + static struct irq_domain_ops iic_host_ops = { 289 289 .match = iic_host_match, 290 290 .map = iic_host_map, 291 291 .xlate = iic_host_xlate, ··· 378 378 void __init iic_init_IRQ(void) 379 379 { 380 380 /* Setup an irq host data structure */ 381 - iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT, 381 + iic_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_LINEAR, IIC_SOURCE_COUNT, 382 382 &iic_host_ops, IIC_IRQ_INVALID); 383 383 BUG_ON(iic_host == NULL); 384 384 irq_set_default_host(iic_host);
+5 -5
arch/powerpc/platforms/cell/spider-pic.c
··· 62 62 #define SPIDER_IRQ_INVALID 63 63 63 64 64 struct spider_pic { 65 - struct irq_host *host; 65 + struct irq_domain *host; 66 66 void __iomem *regs; 67 67 unsigned int node_id; 68 68 }; ··· 168 168 .irq_set_type = spider_set_irq_type, 169 169 }; 170 170 171 - static int spider_host_map(struct irq_host *h, unsigned int virq, 171 + static int spider_host_map(struct irq_domain *h, unsigned int virq, 172 172 irq_hw_number_t hw) 173 173 { 174 174 irq_set_chip_data(virq, h->host_data); ··· 180 180 return 0; 181 181 } 182 182 183 - static int spider_host_xlate(struct irq_host *h, struct device_node *ct, 183 + static int spider_host_xlate(struct irq_domain *h, struct device_node *ct, 184 184 const u32 *intspec, unsigned int intsize, 185 185 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 186 186 ··· 194 194 return 0; 195 195 } 196 196 197 - static struct irq_host_ops spider_host_ops = { 197 + static struct irq_domain_ops spider_host_ops = { 198 198 .map = spider_host_map, 199 199 .xlate = spider_host_xlate, 200 200 }; ··· 299 299 panic("spider_pic: can't map registers !"); 300 300 301 301 /* Allocate a host */ 302 - pic->host = irq_alloc_host(of_node, IRQ_HOST_MAP_LINEAR, 302 + pic->host = irq_alloc_host(of_node, IRQ_DOMAIN_MAP_LINEAR, 303 303 SPIDER_SRC_COUNT, &spider_host_ops, 304 304 SPIDER_IRQ_INVALID); 305 305 if (pic->host == NULL)
+15 -15
arch/powerpc/platforms/embedded6xx/flipper-pic.c
··· 96 96 * 97 97 */ 98 98 99 - static struct irq_host *flipper_irq_host; 99 + static struct irq_domain *flipper_irq_host; 100 100 101 - static int flipper_pic_map(struct irq_host *h, unsigned int virq, 101 + static int flipper_pic_map(struct irq_domain *h, unsigned int virq, 102 102 irq_hw_number_t hwirq) 103 103 { 104 104 irq_set_chip_data(virq, h->host_data); ··· 107 107 return 0; 108 108 } 109 109 110 - static int flipper_pic_match(struct irq_host *h, struct device_node *np) 110 + static int flipper_pic_match(struct irq_domain *h, struct device_node *np) 111 111 { 112 112 return 1; 113 113 } 114 114 115 115 116 - static struct irq_host_ops flipper_irq_host_ops = { 116 + static struct irq_domain_ops flipper_irq_domain_ops = { 117 117 .map = flipper_pic_map, 118 118 .match = flipper_pic_match, 119 119 }; ··· 130 130 out_be32(io_base + FLIPPER_ICR, 0xffffffff); 131 131 } 132 132 133 - struct irq_host * __init flipper_pic_init(struct device_node *np) 133 + struct irq_domain * __init flipper_pic_init(struct device_node *np) 134 134 { 135 135 struct device_node *pi; 136 - struct irq_host *irq_host = NULL; 136 + struct irq_domain *irq_domain = NULL; 137 137 struct resource res; 138 138 void __iomem *io_base; 139 139 int retval; ··· 159 159 160 160 __flipper_quiesce(io_base); 161 161 162 - irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS, 163 - &flipper_irq_host_ops, -1); 164 - if (!irq_host) { 165 - pr_err("failed to allocate irq_host\n"); 162 + irq_domain = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, FLIPPER_NR_IRQS, 163 + &flipper_irq_domain_ops, -1); 164 + if (!irq_domain) { 165 + pr_err("failed to allocate irq_domain\n"); 166 166 return NULL; 167 167 } 168 168 169 - irq_host->host_data = io_base; 169 + irq_domain->host_data = io_base; 170 170 171 171 out: 172 - return irq_host; 172 + return irq_domain; 173 173 } 174 174 175 175 unsigned int flipper_pic_get_irq(void) 176 176 { 177 - void __iomem *io_base = flipper_irq_host->host_data; 177 + void __iomem *io_base = flipper_irq_domain->host_data; 178 178 int irq; 179 179 u32 irq_status; 180 180 ··· 184 184 return NO_IRQ; /* no more IRQs pending */ 185 185 186 186 irq = __ffs(irq_status); 187 - return irq_linear_revmap(flipper_irq_host, irq); 187 + return irq_linear_revmap(flipper_irq_domain, irq); 188 188 } 189 189 190 190 /* ··· 199 199 np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-pic"); 200 200 BUG_ON(!np); 201 201 202 - flipper_irq_host = flipper_pic_init(np); 202 + flipper_irq_domain = flipper_pic_init(np); 203 203 BUG_ON(!flipper_irq_host); 204 204 205 205 irq_set_default_host(flipper_irq_host);
+18 -18
arch/powerpc/platforms/embedded6xx/hlwd-pic.c
··· 89 89 * 90 90 */ 91 91 92 - static struct irq_host *hlwd_irq_host; 92 + static struct irq_domain *hlwd_irq_host; 93 93 94 - static int hlwd_pic_map(struct irq_host *h, unsigned int virq, 94 + static int hlwd_pic_map(struct irq_domain *h, unsigned int virq, 95 95 irq_hw_number_t hwirq) 96 96 { 97 97 irq_set_chip_data(virq, h->host_data); ··· 100 100 return 0; 101 101 } 102 102 103 - static struct irq_host_ops hlwd_irq_host_ops = { 103 + static struct irq_domain_ops hlwd_irq_domain_ops = { 104 104 .map = hlwd_pic_map, 105 105 }; 106 106 107 - static unsigned int __hlwd_pic_get_irq(struct irq_host *h) 107 + static unsigned int __hlwd_pic_get_irq(struct irq_domain *h) 108 108 { 109 109 void __iomem *io_base = h->host_data; 110 110 int irq; ··· 123 123 struct irq_desc *desc) 124 124 { 125 125 struct irq_chip *chip = irq_desc_get_chip(desc); 126 - struct irq_host *irq_host = irq_get_handler_data(cascade_virq); 126 + struct irq_domain *irq_domain = irq_get_handler_data(cascade_virq); 127 127 unsigned int virq; 128 128 129 129 raw_spin_lock(&desc->lock); 130 130 chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */ 131 131 raw_spin_unlock(&desc->lock); 132 132 133 - virq = __hlwd_pic_get_irq(irq_host); 133 + virq = __hlwd_pic_get_irq(irq_domain); 134 134 if (virq != NO_IRQ) 135 135 generic_handle_irq(virq); 136 136 else ··· 155 155 out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff); 156 156 } 157 157 158 - struct irq_host *hlwd_pic_init(struct device_node *np) 158 + struct irq_domain *hlwd_pic_init(struct device_node *np) 159 159 { 160 - struct irq_host *irq_host; 160 + struct irq_domain *irq_domain; 161 161 struct resource res; 162 162 void __iomem *io_base; 163 163 int retval; ··· 177 177 178 178 __hlwd_quiesce(io_base); 179 179 180 - irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, HLWD_NR_IRQS, 181 - &hlwd_irq_host_ops, -1); 182 - if (!irq_host) { 183 - pr_err("failed to allocate irq_host\n"); 180 + irq_domain = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, HLWD_NR_IRQS, 181 + &hlwd_irq_domain_ops, -1); 182 + if (!irq_domain) { 183 + pr_err("failed to allocate irq_domain\n"); 184 184 return NULL; 185 185 } 186 - irq_host->host_data = io_base; 186 + irq_domain->host_data = io_base; 187 187 188 - return irq_host; 188 + return irq_domain; 189 189 } 190 190 191 191 unsigned int hlwd_pic_get_irq(void) 192 192 { 193 - return __hlwd_pic_get_irq(hlwd_irq_host); 193 + return __hlwd_pic_get_irq(hlwd_irq_domain); 194 194 } 195 195 196 196 /* ··· 200 200 201 201 void hlwd_pic_probe(void) 202 202 { 203 - struct irq_host *host; 203 + struct irq_domain *host; 204 204 struct device_node *np; 205 205 const u32 *interrupts; 206 206 int cascade_virq; ··· 214 214 irq_set_handler_data(cascade_virq, host); 215 215 irq_set_chained_handler(cascade_virq, 216 216 hlwd_pic_irq_cascade); 217 - hlwd_irq_host = host; 217 + hlwd_irq_domain = host; 218 218 break; 219 219 } 220 220 } ··· 228 228 */ 229 229 void hlwd_quiesce(void) 230 230 { 231 - void __iomem *io_base = hlwd_irq_host->host_data; 231 + void __iomem *io_base = hlwd_irq_domain->host_data; 232 232 233 233 __hlwd_quiesce(io_base); 234 234 }
+6 -6
arch/powerpc/platforms/iseries/irq.c
··· 342 342 343 343 #ifdef CONFIG_PCI 344 344 345 - static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, 345 + static int iseries_irq_host_map(struct irq_domain *h, unsigned int virq, 346 346 irq_hw_number_t hw) 347 347 { 348 348 irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); ··· 350 350 return 0; 351 351 } 352 352 353 - static int iseries_irq_host_match(struct irq_host *h, struct device_node *np) 353 + static int iseries_irq_host_match(struct irq_domain *h, struct device_node *np) 354 354 { 355 355 /* Match all */ 356 356 return 1; 357 357 } 358 358 359 - static struct irq_host_ops iseries_irq_host_ops = { 359 + static struct irq_domain_ops iseries_irq_domain_ops = { 360 360 .map = iseries_irq_host_map, 361 361 .match = iseries_irq_host_match, 362 362 }; ··· 368 368 void __init iSeries_init_IRQ(void) 369 369 { 370 370 /* Register PCI event handler and open an event path */ 371 - struct irq_host *host; 371 + struct irq_domain *host; 372 372 int ret; 373 373 374 374 /* ··· 380 380 /* Create irq host. No need for a revmap since HV will give us 381 381 * back our virtual irq number 382 382 */ 383 - host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, 384 - &iseries_irq_host_ops, 0); 383 + host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, 384 + &iseries_irq_domain_ops, 0); 385 385 BUG_ON(host == NULL); 386 386 irq_set_default_host(host); 387 387
+6 -6
arch/powerpc/platforms/powermac/pic.c
··· 61 61 static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; 62 62 static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 63 63 static int pmac_irq_cascade = -1; 64 - static struct irq_host *pmac_pic_host; 64 + static struct irq_domain *pmac_pic_host; 65 65 66 66 static void __pmac_retrigger(unsigned int irq_nr) 67 67 { ··· 268 268 .name = "cascade", 269 269 }; 270 270 271 - static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) 271 + static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node) 272 272 { 273 273 /* We match all, we don't always have a node anyway */ 274 274 return 1; 275 275 } 276 276 277 - static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, 277 + static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq, 278 278 irq_hw_number_t hw) 279 279 { 280 280 if (hw >= max_irqs) ··· 288 288 return 0; 289 289 } 290 290 291 - static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, 291 + static int pmac_pic_host_xlate(struct irq_domain *h, struct device_node *ct, 292 292 const u32 *intspec, unsigned int intsize, 293 293 irq_hw_number_t *out_hwirq, 294 294 unsigned int *out_flags) ··· 299 299 return 0; 300 300 } 301 301 302 - static struct irq_host_ops pmac_pic_host_ops = { 302 + static struct irq_domain_ops pmac_pic_host_ops = { 303 303 .match = pmac_pic_host_match, 304 304 .map = pmac_pic_host_map, 305 305 .xlate = pmac_pic_host_xlate, ··· 352 352 /* 353 353 * Allocate an irq host 354 354 */ 355 - pmac_pic_host = irq_alloc_host(master, IRQ_HOST_MAP_LINEAR, max_irqs, 355 + pmac_pic_host = irq_alloc_host(master, IRQ_DOMAIN_MAP_LINEAR, max_irqs, 356 356 &pmac_pic_host_ops, 357 357 max_irqs); 358 358 BUG_ON(pmac_pic_host == NULL);
+4 -4
arch/powerpc/platforms/powermac/smp.c
··· 125 125 static int psurge_type = PSURGE_NONE; 126 126 127 127 /* irq for secondary cpus to report */ 128 - static struct irq_host *psurge_host; 128 + static struct irq_domain *psurge_host; 129 129 int psurge_secondary_virq; 130 130 131 131 /* ··· 176 176 psurge_set_ipi(cpu); 177 177 } 178 178 179 - static int psurge_host_map(struct irq_host *h, unsigned int virq, 179 + static int psurge_host_map(struct irq_domain *h, unsigned int virq, 180 180 irq_hw_number_t hw) 181 181 { 182 182 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq); ··· 184 184 return 0; 185 185 } 186 186 187 - struct irq_host_ops psurge_host_ops = { 187 + struct irq_domain_ops psurge_host_ops = { 188 188 .map = psurge_host_map, 189 189 }; 190 190 ··· 192 192 { 193 193 int rc = -ENOMEM; 194 194 195 - psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, 195 + psurge_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, 196 196 &psurge_host_ops, 0); 197 197 198 198 if (psurge_host)
+5 -5
arch/powerpc/platforms/ps3/interrupt.c
··· 667 667 static void dump_bmp(struct ps3_private* pd) {}; 668 668 #endif /* defined(DEBUG) */ 669 669 670 - static int ps3_host_map(struct irq_host *h, unsigned int virq, 670 + static int ps3_host_map(struct irq_domain *h, unsigned int virq, 671 671 irq_hw_number_t hwirq) 672 672 { 673 673 DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, ··· 678 678 return 0; 679 679 } 680 680 681 - static int ps3_host_match(struct irq_host *h, struct device_node *np) 681 + static int ps3_host_match(struct irq_domain *h, struct device_node *np) 682 682 { 683 683 /* Match all */ 684 684 return 1; 685 685 } 686 686 687 - static struct irq_host_ops ps3_host_ops = { 687 + static struct irq_domain_ops ps3_host_ops = { 688 688 .map = ps3_host_map, 689 689 .match = ps3_host_match, 690 690 }; ··· 751 751 { 752 752 int result; 753 753 unsigned cpu; 754 - struct irq_host *host; 754 + struct irq_domain *host; 755 755 756 - host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &ps3_host_ops, 756 + host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, &ps3_host_ops, 757 757 PS3_INVALID_OUTLET); 758 758 irq_set_default_host(host); 759 759 irq_set_virq_count(PS3_PLUG_MAX + 1);
+5 -5
arch/powerpc/platforms/wsp/opb_pic.c
··· 30 30 static int opb_index = 0; 31 31 32 32 struct opb_pic { 33 - struct irq_host *host; 33 + struct irq_domain *host; 34 34 void *regs; 35 35 int index; 36 36 spinlock_t lock; ··· 179 179 .irq_set_type = opb_set_irq_type 180 180 }; 181 181 182 - static int opb_host_map(struct irq_host *host, unsigned int virq, 182 + static int opb_host_map(struct irq_domain *host, unsigned int virq, 183 183 irq_hw_number_t hwirq) 184 184 { 185 185 struct opb_pic *opb; ··· 196 196 return 0; 197 197 } 198 198 199 - static int opb_host_xlate(struct irq_host *host, struct device_node *dn, 199 + static int opb_host_xlate(struct irq_domain *host, struct device_node *dn, 200 200 const u32 *intspec, unsigned int intsize, 201 201 irq_hw_number_t *out_hwirq, unsigned int *out_type) 202 202 { ··· 207 207 return 0; 208 208 } 209 209 210 - static struct irq_host_ops opb_host_ops = { 210 + static struct irq_domain_ops opb_host_ops = { 211 211 .map = opb_host_map, 212 212 .xlate = opb_host_xlate, 213 213 }; ··· 267 267 * having one interrupt to issue, we're the controller for multiple 268 268 * hardware IRQs, so later we can lookup their virtual IRQs. */ 269 269 270 - opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR, 270 + opb->host = irq_alloc_host(dn, IRQ_DOMAIN_MAP_LINEAR, 271 271 OPB_NR_IRQS, &opb_host_ops, -1); 272 272 273 273 if (!opb->host) {
+4 -4
arch/powerpc/sysdev/cpm1.c
··· 54 54 immap_t __iomem *mpc8xx_immr; 55 55 static cpic8xx_t __iomem *cpic_reg; 56 56 57 - static struct irq_host *cpm_pic_host; 57 + static struct irq_domain *cpm_pic_host; 58 58 59 59 static void cpm_mask_irq(struct irq_data *d) 60 60 { ··· 98 98 return irq_linear_revmap(cpm_pic_host, cpm_vec); 99 99 } 100 100 101 - static int cpm_pic_host_map(struct irq_host *h, unsigned int virq, 101 + static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq, 102 102 irq_hw_number_t hw) 103 103 { 104 104 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); ··· 123 123 .name = "error", 124 124 }; 125 125 126 - static struct irq_host_ops cpm_pic_host_ops = { 126 + static struct irq_domain_ops cpm_pic_host_ops = { 127 127 .map = cpm_pic_host_map, 128 128 }; 129 129 ··· 164 164 165 165 out_be32(&cpic_reg->cpic_cimr, 0); 166 166 167 - cpm_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 167 + cpm_pic_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 168 168 64, &cpm_pic_host_ops, 64); 169 169 if (cpm_pic_host == NULL) { 170 170 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
+5 -5
arch/powerpc/sysdev/cpm2_pic.c
··· 50 50 51 51 static intctl_cpm2_t __iomem *cpm2_intctl; 52 52 53 - static struct irq_host *cpm2_pic_host; 53 + static struct irq_domain *cpm2_pic_host; 54 54 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 55 55 static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 56 56 ··· 214 214 return irq_linear_revmap(cpm2_pic_host, irq); 215 215 } 216 216 217 - static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, 217 + static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq, 218 218 irq_hw_number_t hw) 219 219 { 220 220 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); ··· 224 224 return 0; 225 225 } 226 226 227 - static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct, 227 + static int cpm2_pic_host_xlate(struct irq_domain *h, struct device_node *ct, 228 228 const u32 *intspec, unsigned int intsize, 229 229 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 230 230 { ··· 236 236 return 0; 237 237 } 238 238 239 - static struct irq_host_ops cpm2_pic_host_ops = { 239 + static struct irq_domain_ops cpm2_pic_host_ops = { 240 240 .map = cpm2_pic_host_map, 241 241 .xlate = cpm2_pic_host_xlate, 242 242 }; ··· 275 275 out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); 276 276 277 277 /* create a legacy host */ 278 - cpm2_pic_host = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 278 + cpm2_pic_host = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 279 279 64, &cpm2_pic_host_ops, 64); 280 280 if (cpm2_pic_host == NULL) { 281 281 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
+5 -5
arch/powerpc/sysdev/ehv_pic.c
··· 182 182 return irq_linear_revmap(global_ehv_pic->irqhost, irq); 183 183 } 184 184 185 - static int ehv_pic_host_match(struct irq_host *h, struct device_node *node) 185 + static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node) 186 186 { 187 187 /* Exact match, unless ehv_pic node is NULL */ 188 188 return h->of_node == NULL || h->of_node == node; 189 189 } 190 190 191 - static int ehv_pic_host_map(struct irq_host *h, unsigned int virq, 191 + static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq, 192 192 irq_hw_number_t hw) 193 193 { 194 194 struct ehv_pic *ehv_pic = h->host_data; ··· 217 217 return 0; 218 218 } 219 219 220 - static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct, 220 + static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct, 221 221 const u32 *intspec, unsigned int intsize, 222 222 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 223 223 ··· 248 248 return 0; 249 249 } 250 250 251 - static struct irq_host_ops ehv_pic_host_ops = { 251 + static struct irq_domain_ops ehv_pic_host_ops = { 252 252 .match = ehv_pic_host_match, 253 253 .map = ehv_pic_host_map, 254 254 .xlate = ehv_pic_host_xlate, ··· 275 275 return; 276 276 } 277 277 278 - ehv_pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 278 + ehv_pic->irqhost = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 279 279 NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0); 280 280 281 281 if (!ehv_pic->irqhost) {
+3 -3
arch/powerpc/sysdev/fsl_msi.c
··· 60 60 .name = "FSL-MSI", 61 61 }; 62 62 63 - static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, 63 + static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq, 64 64 irq_hw_number_t hw) 65 65 { 66 66 struct fsl_msi *msi_data = h->host_data; ··· 74 74 return 0; 75 75 } 76 76 77 - static struct irq_host_ops fsl_msi_host_ops = { 77 + static struct irq_domain_ops fsl_msi_host_ops = { 78 78 .map = fsl_msi_host_map, 79 79 }; 80 80 ··· 387 387 } 388 388 platform_set_drvdata(dev, msi); 389 389 390 - msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, 390 + msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_DOMAIN_MAP_LINEAR, 391 391 NR_MSI_IRQS, &fsl_msi_host_ops, 0); 392 392 393 393 if (msi->irqhost == NULL) {
+1 -1
arch/powerpc/sysdev/fsl_msi.h
··· 26 26 #define FSL_PIC_IP_VMPIC 0x00000003 27 27 28 28 struct fsl_msi { 29 - struct irq_host *irqhost; 29 + struct irq_domain *irqhost; 30 30 31 31 unsigned long cascade_irq; 32 32
+7 -7
arch/powerpc/sysdev/i8259.c
··· 25 25 26 26 static DEFINE_RAW_SPINLOCK(i8259_lock); 27 27 28 - static struct irq_host *i8259_host; 28 + static struct irq_domain *i8259_host; 29 29 30 30 /* 31 31 * Acknowledge the IRQ using either the PCI host bridge's interrupt ··· 163 163 .flags = IORESOURCE_BUSY, 164 164 }; 165 165 166 - static int i8259_host_match(struct irq_host *h, struct device_node *node) 166 + static int i8259_host_match(struct irq_domain *h, struct device_node *node) 167 167 { 168 168 return h->of_node == NULL || h->of_node == node; 169 169 } 170 170 171 - static int i8259_host_map(struct irq_host *h, unsigned int virq, 171 + static int i8259_host_map(struct irq_domain *h, unsigned int virq, 172 172 irq_hw_number_t hw) 173 173 { 174 174 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); ··· 185 185 return 0; 186 186 } 187 187 188 - static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, 188 + static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct, 189 189 const u32 *intspec, unsigned int intsize, 190 190 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 191 191 { ··· 205 205 return 0; 206 206 } 207 207 208 - static struct irq_host_ops i8259_host_ops = { 208 + static struct irq_domain_ops i8259_host_ops = { 209 209 .match = i8259_host_match, 210 210 .map = i8259_host_map, 211 211 .xlate = i8259_host_xlate, 212 212 }; 213 213 214 - struct irq_host *i8259_get_host(void) 214 + struct irq_domain *i8259_get_host(void) 215 215 { 216 216 return i8259_host; 217 217 } ··· 263 263 raw_spin_unlock_irqrestore(&i8259_lock, flags); 264 264 265 265 /* create a legacy host */ 266 - i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, 266 + i8259_host = irq_alloc_host(node, IRQ_DOMAIN_MAP_LEGACY, 267 267 0, &i8259_host_ops, 0); 268 268 if (i8259_host == NULL) { 269 269 printk(KERN_ERR "i8259: failed to allocate irq host !\n");
+5 -5
arch/powerpc/sysdev/ipic.c
··· 672 672 .irq_set_type = ipic_set_irq_type, 673 673 }; 674 674 675 - static int ipic_host_match(struct irq_host *h, struct device_node *node) 675 + static int ipic_host_match(struct irq_domain *h, struct device_node *node) 676 676 { 677 677 /* Exact match, unless ipic node is NULL */ 678 678 return h->of_node == NULL || h->of_node == node; 679 679 } 680 680 681 - static int ipic_host_map(struct irq_host *h, unsigned int virq, 681 + static int ipic_host_map(struct irq_domain *h, unsigned int virq, 682 682 irq_hw_number_t hw) 683 683 { 684 684 struct ipic *ipic = h->host_data; ··· 692 692 return 0; 693 693 } 694 694 695 - static int ipic_host_xlate(struct irq_host *h, struct device_node *ct, 695 + static int ipic_host_xlate(struct irq_domain *h, struct device_node *ct, 696 696 const u32 *intspec, unsigned int intsize, 697 697 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 698 698 ··· 708 708 return 0; 709 709 } 710 710 711 - static struct irq_host_ops ipic_host_ops = { 711 + static struct irq_domain_ops ipic_host_ops = { 712 712 .match = ipic_host_match, 713 713 .map = ipic_host_map, 714 714 .xlate = ipic_host_xlate, ··· 728 728 if (ipic == NULL) 729 729 return NULL; 730 730 731 - ipic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 731 + ipic->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 732 732 NR_IPIC_INTS, 733 733 &ipic_host_ops, 0); 734 734 if (ipic->irqhost == NULL) {
+1 -1
arch/powerpc/sysdev/ipic.h
··· 43 43 volatile u32 __iomem *regs; 44 44 45 45 /* The remapper for this IPIC */ 46 - struct irq_host *irqhost; 46 + struct irq_domain *irqhost; 47 47 }; 48 48 49 49 struct ipic_info {
+5 -5
arch/powerpc/sysdev/mpc8xx_pic.c
··· 17 17 18 18 extern int cpm_get_irq(struct pt_regs *regs); 19 19 20 - static struct irq_host *mpc8xx_pic_host; 20 + static struct irq_domain *mpc8xx_pic_host; 21 21 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 22 22 static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 23 23 static sysconf8xx_t __iomem *siu_reg; ··· 110 110 111 111 } 112 112 113 - static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq, 113 + static int mpc8xx_pic_host_map(struct irq_domain *h, unsigned int virq, 114 114 irq_hw_number_t hw) 115 115 { 116 116 pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); ··· 121 121 } 122 122 123 123 124 - static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct, 124 + static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, 125 125 const u32 *intspec, unsigned int intsize, 126 126 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 127 127 { ··· 142 142 } 143 143 144 144 145 - static struct irq_host_ops mpc8xx_pic_host_ops = { 145 + static struct irq_domain_ops mpc8xx_pic_host_ops = { 146 146 .map = mpc8xx_pic_host_map, 147 147 .xlate = mpc8xx_pic_host_xlate, 148 148 }; ··· 171 171 goto out; 172 172 } 173 173 174 - mpc8xx_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 174 + mpc8xx_pic_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 175 175 64, &mpc8xx_pic_host_ops, 64); 176 176 if (mpc8xx_pic_host == NULL) { 177 177 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
+6 -6
arch/powerpc/sysdev/mpic.c
··· 965 965 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 966 966 967 967 968 - static int mpic_host_match(struct irq_host *h, struct device_node *node) 968 + static int mpic_host_match(struct irq_domain *h, struct device_node *node) 969 969 { 970 970 /* Exact match, unless mpic node is NULL */ 971 971 return h->of_node == NULL || h->of_node == node; 972 972 } 973 973 974 - static int mpic_host_map(struct irq_host *h, unsigned int virq, 974 + static int mpic_host_map(struct irq_domain *h, unsigned int virq, 975 975 irq_hw_number_t hw) 976 976 { 977 977 struct mpic *mpic = h->host_data; ··· 1041 1041 return 0; 1042 1042 } 1043 1043 1044 - static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, 1044 + static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct, 1045 1045 const u32 *intspec, unsigned int intsize, 1046 1046 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1047 1047 ··· 1121 1121 BUG_ON(!(mpic->flags & MPIC_SECONDARY)); 1122 1122 1123 1123 virq = mpic_get_one_irq(mpic); 1124 - if (virq != NO_IRQ) 1124 + if (virq) 1125 1125 generic_handle_irq(virq); 1126 1126 1127 1127 chip->irq_eoi(&desc->irq_data); 1128 1128 } 1129 1129 1130 - static struct irq_host_ops mpic_host_ops = { 1130 + static struct irq_domain_ops mpic_host_ops = { 1131 1131 .match = mpic_host_match, 1132 1132 .map = mpic_host_map, 1133 1133 .xlate = mpic_host_xlate, ··· 1345 1345 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); 1346 1346 mpic->isu_mask = (1 << mpic->isu_shift) - 1; 1347 1347 1348 - mpic->irqhost = irq_alloc_host(mpic->node, IRQ_HOST_MAP_LINEAR, 1348 + mpic->irqhost = irq_alloc_host(mpic->node, IRQ_DOMAIN_MAP_LINEAR, 1349 1349 isu_size ? isu_size : mpic->num_sources, 1350 1350 &mpic_host_ops, 1351 1351 flags & MPIC_LARGE_VECTORS ? 2048 : 256);
+1 -1
arch/powerpc/sysdev/mpic_msi.c
··· 32 32 static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) 33 33 { 34 34 irq_hw_number_t hwirq; 35 - struct irq_host_ops *ops = mpic->irqhost->ops; 35 + struct irq_domain_ops *ops = mpic->irqhost->ops; 36 36 struct device_node *np; 37 37 int flags, index, i; 38 38 struct of_irq oirq;
+4 -4
arch/powerpc/sysdev/mv64x60_pic.c
··· 70 70 static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS; 71 71 static u32 mv64x60_cached_gpp_mask; 72 72 73 - static struct irq_host *mv64x60_irq_host; 73 + static struct irq_domain *mv64x60_irq_host; 74 74 75 75 /* 76 76 * mv64x60_chip_low functions ··· 208 208 [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp, 209 209 }; 210 210 211 - static int mv64x60_host_map(struct irq_host *h, unsigned int virq, 211 + static int mv64x60_host_map(struct irq_domain *h, unsigned int virq, 212 212 irq_hw_number_t hwirq) 213 213 { 214 214 int level1; ··· 223 223 return 0; 224 224 } 225 225 226 - static struct irq_host_ops mv64x60_host_ops = { 226 + static struct irq_domain_ops mv64x60_host_ops = { 227 227 .map = mv64x60_host_map, 228 228 }; 229 229 ··· 250 250 paddr = of_translate_address(np, reg); 251 251 mv64x60_irq_reg_base = ioremap(paddr, reg[1]); 252 252 253 - mv64x60_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 253 + mv64x60_irq_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 254 254 MV64x60_NUM_IRQS, 255 255 &mv64x60_host_ops, MV64x60_NUM_IRQS); 256 256
+5 -5
arch/powerpc/sysdev/qe_lib/qe_ic.c
··· 245 245 .irq_mask_ack = qe_ic_mask_irq, 246 246 }; 247 247 248 - static int qe_ic_host_match(struct irq_host *h, struct device_node *node) 248 + static int qe_ic_host_match(struct irq_domain *h, struct device_node *node) 249 249 { 250 250 /* Exact match, unless qe_ic node is NULL */ 251 251 return h->of_node == NULL || h->of_node == node; 252 252 } 253 253 254 - static int qe_ic_host_map(struct irq_host *h, unsigned int virq, 254 + static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, 255 255 irq_hw_number_t hw) 256 256 { 257 257 struct qe_ic *qe_ic = h->host_data; ··· 272 272 return 0; 273 273 } 274 274 275 - static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct, 275 + static int qe_ic_host_xlate(struct irq_domain *h, struct device_node *ct, 276 276 const u32 * intspec, unsigned int intsize, 277 277 irq_hw_number_t * out_hwirq, 278 278 unsigned int *out_flags) ··· 285 285 return 0; 286 286 } 287 287 288 - static struct irq_host_ops qe_ic_host_ops = { 288 + static struct irq_domain_ops qe_ic_host_ops = { 289 289 .match = qe_ic_host_match, 290 290 .map = qe_ic_host_map, 291 291 .xlate = qe_ic_host_xlate, ··· 339 339 if (qe_ic == NULL) 340 340 return; 341 341 342 - qe_ic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 342 + qe_ic->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 343 343 NR_QE_IC_INTS, &qe_ic_host_ops, 0); 344 344 if (qe_ic->irqhost == NULL) { 345 345 kfree(qe_ic);
+1 -1
arch/powerpc/sysdev/qe_lib/qe_ic.h
··· 79 79 volatile u32 __iomem *regs; 80 80 81 81 /* The remapper for this QEIC */ 82 - struct irq_host *irqhost; 82 + struct irq_domain *irqhost; 83 83 84 84 /* The "linux" controller struct */ 85 85 struct irq_chip hc_irq;
+7 -7
arch/powerpc/sysdev/tsi108_pci.c
··· 51 51 u32 tsi108_pci_cfg_base; 52 52 static u32 tsi108_pci_cfg_phys; 53 53 u32 tsi108_csr_vir_base; 54 - static struct irq_host *pci_irq_host; 54 + static struct irq_domain *pci_irq_host; 55 55 56 56 extern u32 get_vir_csrbase(void); 57 57 extern u32 tsi108_read_reg(u32 reg_offset); ··· 376 376 .irq_unmask = tsi108_pci_irq_unmask, 377 377 }; 378 378 379 - static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, 379 + static int pci_irq_host_xlate(struct irq_domain *h, struct device_node *ct, 380 380 const u32 *intspec, unsigned int intsize, 381 381 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 382 382 { ··· 385 385 return 0; 386 386 } 387 387 388 - static int pci_irq_host_map(struct irq_host *h, unsigned int virq, 388 + static int pci_irq_host_map(struct irq_domain *h, unsigned int virq, 389 389 irq_hw_number_t hw) 390 390 { unsigned int irq; 391 391 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); ··· 397 397 return 0; 398 398 } 399 399 400 - static struct irq_host_ops pci_irq_host_ops = { 400 + static struct irq_domain_ops pci_irq_domain_ops = { 401 401 .map = pci_irq_host_map, 402 402 .xlate = pci_irq_host_xlate, 403 403 }; ··· 419 419 { 420 420 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); 421 421 422 - pci_irq_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, 423 - 0, &pci_irq_host_ops, 0); 422 + pci_irq_host = irq_alloc_host(node, IRQ_DOMAIN_MAP_LEGACY, 423 + 0, &pci_irq_domain_ops, 0); 424 424 if (pci_irq_host == NULL) { 425 - printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n"); 425 + printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n"); 426 426 return; 427 427 } 428 428
+5 -5
arch/powerpc/sysdev/uic.c
··· 49 49 raw_spinlock_t lock; 50 50 51 51 /* The remapper for this UIC */ 52 - struct irq_host *irqhost; 52 + struct irq_domain *irqhost; 53 53 }; 54 54 55 55 static void uic_unmask_irq(struct irq_data *d) ··· 174 174 .irq_set_type = uic_set_irq_type, 175 175 }; 176 176 177 - static int uic_host_map(struct irq_host *h, unsigned int virq, 177 + static int uic_host_map(struct irq_domain *h, unsigned int virq, 178 178 irq_hw_number_t hw) 179 179 { 180 180 struct uic *uic = h->host_data; ··· 190 190 return 0; 191 191 } 192 192 193 - static int uic_host_xlate(struct irq_host *h, struct device_node *ct, 193 + static int uic_host_xlate(struct irq_domain *h, struct device_node *ct, 194 194 const u32 *intspec, unsigned int intsize, 195 195 irq_hw_number_t *out_hwirq, unsigned int *out_type) 196 196 ··· 202 202 return 0; 203 203 } 204 204 205 - static struct irq_host_ops uic_host_ops = { 205 + static struct irq_domain_ops uic_host_ops = { 206 206 .map = uic_host_map, 207 207 .xlate = uic_host_xlate, 208 208 }; ··· 270 270 } 271 271 uic->dcrbase = *dcrreg; 272 272 273 - uic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 273 + uic->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 274 274 NR_UIC_INTS, &uic_host_ops, -1); 275 275 if (! uic->irqhost) 276 276 return NULL; /* FIXME: panic? */
+6 -6
arch/powerpc/sysdev/xics/xics-common.c
··· 40 40 41 41 DEFINE_PER_CPU(struct xics_cppr, xics_cppr); 42 42 43 - struct irq_host *xics_host; 43 + struct irq_domain *xics_host; 44 44 45 45 static LIST_HEAD(ics_list); 46 46 ··· 301 301 } 302 302 #endif /* CONFIG_SMP */ 303 303 304 - static int xics_host_match(struct irq_host *h, struct device_node *node) 304 + static int xics_host_match(struct irq_domain *h, struct device_node *node) 305 305 { 306 306 struct ics *ics; 307 307 ··· 323 323 .irq_unmask = xics_ipi_unmask, 324 324 }; 325 325 326 - static int xics_host_map(struct irq_host *h, unsigned int virq, 326 + static int xics_host_map(struct irq_domain *h, unsigned int virq, 327 327 irq_hw_number_t hw) 328 328 { 329 329 struct ics *ics; ··· 351 351 return -EINVAL; 352 352 } 353 353 354 - static int xics_host_xlate(struct irq_host *h, struct device_node *ct, 354 + static int xics_host_xlate(struct irq_domain *h, struct device_node *ct, 355 355 const u32 *intspec, unsigned int intsize, 356 356 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 357 357 ··· 366 366 return 0; 367 367 } 368 368 369 - static struct irq_host_ops xics_host_ops = { 369 + static struct irq_domain_ops xics_host_ops = { 370 370 .match = xics_host_match, 371 371 .map = xics_host_map, 372 372 .xlate = xics_host_xlate, ··· 374 374 375 375 static void __init xics_init_host(void) 376 376 { 377 - xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, 377 + xics_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_TREE, 0, &xics_host_ops, 378 378 XICS_IRQ_SPURIOUS); 379 379 BUG_ON(xics_host == NULL); 380 380 irq_set_default_host(xics_host);
+8 -8
arch/powerpc/sysdev/xilinx_intc.c
··· 40 40 #define XINTC_IVR 24 /* Interrupt Vector */ 41 41 #define XINTC_MER 28 /* Master Enable */ 42 42 43 - static struct irq_host *master_irqhost; 43 + static struct irq_domain *master_irqhost; 44 44 45 45 #define XILINX_INTC_MAXIRQS (32) 46 46 ··· 141 141 /** 142 142 * xilinx_intc_xlate - translate virq# from device tree interrupts property 143 143 */ 144 - static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, 144 + static int xilinx_intc_xlate(struct irq_domain *h, struct device_node *ct, 145 145 const u32 *intspec, unsigned int intsize, 146 146 irq_hw_number_t *out_hwirq, 147 147 unsigned int *out_flags) ··· 161 161 162 162 return 0; 163 163 } 164 - static int xilinx_intc_map(struct irq_host *h, unsigned int virq, 164 + static int xilinx_intc_map(struct irq_domain *h, unsigned int virq, 165 165 irq_hw_number_t irq) 166 166 { 167 167 irq_set_chip_data(virq, h->host_data); ··· 177 177 return 0; 178 178 } 179 179 180 - static struct irq_host_ops xilinx_intc_ops = { 180 + static struct irq_domain_ops xilinx_intc_ops = { 181 181 .map = xilinx_intc_map, 182 182 .xlate = xilinx_intc_xlate, 183 183 }; 184 184 185 - struct irq_host * __init 185 + struct irq_domain * __init 186 186 xilinx_intc_init(struct device_node *np) 187 187 { 188 - struct irq_host * irq; 188 + struct irq_domain * irq; 189 189 void * regs; 190 190 191 191 /* Find and map the intc registers */ ··· 200 200 out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */ 201 201 out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ 202 202 203 - /* Allocate and initialize an irq_host structure. */ 204 - irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, XILINX_INTC_MAXIRQS, 203 + /* Allocate and initialize an irq_domain structure. */ 204 + irq = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, XILINX_INTC_MAXIRQS, 205 205 &xilinx_intc_ops, -1); 206 206 if (!irq) 207 207 panic(__FILE__ ": Cannot allocate IRQ host\n");
+5 -5
drivers/gpio/gpio-mpc8xxx.c
··· 37 37 * open drain mode safely 38 38 */ 39 39 u32 data; 40 - struct irq_host *irq; 40 + struct irq_domain *irq; 41 41 void *of_dev_id_data; 42 42 }; 43 43 ··· 281 281 .irq_set_type = mpc8xxx_irq_set_type, 282 282 }; 283 283 284 - static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, 284 + static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq, 285 285 irq_hw_number_t hw) 286 286 { 287 287 struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data; ··· 296 296 return 0; 297 297 } 298 298 299 - static int mpc8xxx_gpio_irq_xlate(struct irq_host *h, struct device_node *ct, 299 + static int mpc8xxx_gpio_irq_xlate(struct irq_domain *h, struct device_node *ct, 300 300 const u32 *intspec, unsigned int intsize, 301 301 irq_hw_number_t *out_hwirq, 302 302 unsigned int *out_flags) ··· 311 311 return 0; 312 312 } 313 313 314 - static struct irq_host_ops mpc8xxx_gpio_irq_ops = { 314 + static struct irq_domain_ops mpc8xxx_gpio_irq_ops = { 315 315 .map = mpc8xxx_gpio_irq_map, 316 316 .xlate = mpc8xxx_gpio_irq_xlate, 317 317 }; ··· 365 365 goto skip_irq; 366 366 367 367 mpc8xxx_gc->irq = 368 - irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS, 368 + irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, MPC8XXX_GPIO_PINS, 369 369 &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS); 370 370 if (!mpc8xxx_gc->irq) 371 371 goto skip_irq;