Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

irq_domain: Replace irq_alloc_host() with revmap-specific initializers

Each revmap type has different arguments for setting up the revmap.
This patch splits up the generator functions so that each revmap type
can do its own setup and the user doesn't need to keep track of how
each revmap type handles the arguments.

This patch also adds a host_data argument to the generators. There are
cases where the host_data pointer will be needed before the function returns.
ie. the legacy map calls the .map callback for each irq before returning.

v2: - Add void *host_data argument to irq_domain_add_*() functions
- fixed failure to compile
- Moved IRQ_DOMAIN_MAP_* defines into irqdomain.c

Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Cc: Rob Herring <rob.herring@calxeda.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Milton Miller <miltonm@bga.com>
Tested-by: Olof Johansson <olof@lixom.net>

+196 -183
+1 -2
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
··· 190 190 191 191 cpld_pic_node = of_node_get(np); 192 192 193 - cpld_pic_host = 194 - irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 16, &cpld_pic_host_ops, 16); 193 + cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL); 195 194 if (!cpld_pic_host) { 196 195 printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); 197 196 goto end;
+2 -5
arch/powerpc/platforms/52xx/media5200.c
··· 173 173 174 174 spin_lock_init(&media5200_irq.lock); 175 175 176 - media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_DOMAIN_MAP_LINEAR, 177 - MEDIA5200_NUM_IRQS, 178 - &media5200_irq_ops, -1); 176 + media5200_irq.irqhost = irq_domain_add_linear(fpga_np, 177 + MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq); 179 178 if (!media5200_irq.irqhost) 180 179 goto out; 181 180 pr_debug("%s: allocated irqhost\n", __func__); 182 - 183 - media5200_irq.irqhost->host_data = &media5200_irq; 184 181 185 182 irq_set_handler_data(cascade_virq, &media5200_irq); 186 183 irq_set_chained_handler(cascade_virq, media5200_irq_cascade);
+2 -4
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
··· 252 252 if (!cascade_virq) 253 253 return; 254 254 255 - gpt->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 1, 256 - &mpc52xx_gpt_irq_ops, -1); 255 + gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt); 257 256 if (!gpt->irqhost) { 258 - dev_err(gpt->dev, "irq_alloc_host() failed\n"); 257 + dev_err(gpt->dev, "irq_domain_add_linear() failed\n"); 259 258 return; 260 259 } 261 260 262 - gpt->irqhost->host_data = gpt; 263 261 irq_set_handler_data(cascade_virq, gpt); 264 262 irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); 265 263
+2 -2
arch/powerpc/platforms/52xx/mpc52xx_pic.c
··· 444 444 * As last step, add an irq host to translate the real 445 445 * hw irq information provided by the ofw to linux virq 446 446 */ 447 - mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_DOMAIN_MAP_LINEAR, 447 + mpc52xx_irqhost = irq_domain_add_linear(picnode, 448 448 MPC52xx_IRQ_HIGHTESTHWIRQ, 449 - &mpc52xx_irqhost_ops, -1); 449 + &mpc52xx_irqhost_ops, NULL); 450 450 451 451 if (!mpc52xx_irqhost) 452 452 panic(__FILE__ ": Cannot allocate the IRQ host\n");
+1 -5
arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
··· 156 156 out_be32(&priv->regs->mask, ~0); 157 157 mb(); 158 158 159 - host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, NUM_IRQS, 160 - &pci_pic_host_ops, NUM_IRQS); 159 + host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv); 161 160 if (!host) { 162 161 ret = -ENOMEM; 163 162 goto out_unmap_regs; 164 163 } 165 164 166 - host->host_data = priv; 167 - 168 165 priv->host = host; 169 - host->host_data = priv; 170 166 irq_set_handler_data(irq, priv); 171 167 irq_set_chained_handler(irq, pq2ads_pci_irq_demux); 172 168
+2 -3
arch/powerpc/platforms/85xx/socrates_fpga_pic.c
··· 280 280 int i; 281 281 282 282 /* Setup an irq_domain structure */ 283 - socrates_fpga_pic_irq_host = irq_alloc_host(pic, IRQ_DOMAIN_MAP_LINEAR, 284 - SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, 285 - SOCRATES_FPGA_NUM_IRQS); 283 + socrates_fpga_pic_irq_host = irq_domain_add_linear(pic, 284 + SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL); 286 285 if (socrates_fpga_pic_irq_host == NULL) { 287 286 pr_err("FPGA PIC: Unable to allocate host\n"); 288 287 return;
+2 -3
arch/powerpc/platforms/86xx/gef_pic.c
··· 212 212 } 213 213 214 214 /* Setup an irq_domain structure */ 215 - gef_pic_irq_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 216 - GEF_PIC_NUM_IRQS, 217 - &gef_pic_host_ops, NO_IRQ); 215 + gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS, 216 + &gef_pic_host_ops, NULL); 218 217 if (gef_pic_irq_host == NULL) 219 218 return; 220 219
+1 -4
arch/powerpc/platforms/cell/axon_msi.c
··· 392 392 } 393 393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); 394 394 395 - msic->irq_domain = irq_alloc_host(dn, IRQ_DOMAIN_MAP_NOMAP, 396 - NR_IRQS, &msic_host_ops, 0); 395 + msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic); 397 396 if (!msic->irq_domain) { 398 397 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", 399 398 dn->full_name); 400 399 goto out_free_fifo; 401 400 } 402 - 403 - msic->irq_domain->host_data = msic; 404 401 405 402 irq_set_handler_data(virq, msic); 406 403 irq_set_chained_handler(virq, axon_msi_cascade);
+1 -3
arch/powerpc/platforms/cell/beat_interrupt.c
··· 239 239 ppc_md.get_irq = beatic_get_irq; 240 240 241 241 /* Allocate an irq host */ 242 - beatic_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, 243 - &beatic_pic_host_ops, 244 - 0); 242 + beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL); 245 243 BUG_ON(beatic_host == NULL); 246 244 irq_set_default_host(beatic_host); 247 245 }
+2 -2
arch/powerpc/platforms/cell/interrupt.c
··· 378 378 void __init iic_init_IRQ(void) 379 379 { 380 380 /* Setup an irq host data structure */ 381 - iic_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_LINEAR, IIC_SOURCE_COUNT, 382 - &iic_host_ops, IIC_IRQ_INVALID); 381 + iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops, 382 + NULL); 383 383 BUG_ON(iic_host == NULL); 384 384 irq_set_default_host(iic_host); 385 385
+2 -4
arch/powerpc/platforms/cell/spider-pic.c
··· 299 299 panic("spider_pic: can't map registers !"); 300 300 301 301 /* Allocate a host */ 302 - pic->host = irq_alloc_host(of_node, IRQ_DOMAIN_MAP_LINEAR, 303 - SPIDER_SRC_COUNT, &spider_host_ops, 304 - SPIDER_IRQ_INVALID); 302 + pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT, 303 + &spider_host_ops, pic); 305 304 if (pic->host == NULL) 306 305 panic("spider_pic: can't allocate irq host !"); 307 - pic->host->host_data = pic; 308 306 309 307 /* Go through all sources and disable them */ 310 308 for (i = 0; i < SPIDER_SRC_COUNT; i++) {
+2 -4
arch/powerpc/platforms/embedded6xx/flipper-pic.c
··· 159 159 160 160 __flipper_quiesce(io_base); 161 161 162 - irq_domain = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, FLIPPER_NR_IRQS, 163 - &flipper_irq_domain_ops, -1); 162 + irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS, 163 + &flipper_irq_domain_ops, io_base); 164 164 if (!irq_domain) { 165 165 pr_err("failed to allocate irq_domain\n"); 166 166 return NULL; 167 167 } 168 - 169 - irq_domain->host_data = io_base; 170 168 171 169 out: 172 170 return irq_domain;
+2 -3
arch/powerpc/platforms/embedded6xx/hlwd-pic.c
··· 177 177 178 178 __hlwd_quiesce(io_base); 179 179 180 - irq_domain = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, HLWD_NR_IRQS, 181 - &hlwd_irq_domain_ops, -1); 180 + irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS, 181 + &hlwd_irq_domain_ops, io_base); 182 182 if (!irq_domain) { 183 183 pr_err("failed to allocate irq_domain\n"); 184 184 return NULL; 185 185 } 186 - irq_domain->host_data = io_base; 187 186 188 187 return irq_domain; 189 188 }
+1 -2
arch/powerpc/platforms/iseries/irq.c
··· 380 380 /* Create irq host. No need for a revmap since HV will give us 381 381 * back our virtual irq number 382 382 */ 383 - host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, 384 - &iseries_irq_domain_ops, 0); 383 + host = irq_domain_add_nomap(NULL, &iseries_irq_domain_ops, NULL); 385 384 BUG_ON(host == NULL); 386 385 irq_set_default_host(host); 387 386
+2 -3
arch/powerpc/platforms/powermac/pic.c
··· 352 352 /* 353 353 * Allocate an irq host 354 354 */ 355 - pmac_pic_host = irq_alloc_host(master, IRQ_DOMAIN_MAP_LINEAR, max_irqs, 356 - &pmac_pic_host_ops, 357 - max_irqs); 355 + pmac_pic_host = irq_domain_add_linear(master, max_irqs, 356 + &pmac_pic_host_ops, NULL); 358 357 BUG_ON(pmac_pic_host == NULL); 359 358 irq_set_default_host(pmac_pic_host); 360 359
+1 -2
arch/powerpc/platforms/powermac/smp.c
··· 192 192 { 193 193 int rc = -ENOMEM; 194 194 195 - psurge_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, 196 - &psurge_host_ops, 0); 195 + psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL); 197 196 198 197 if (psurge_host) 199 198 psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
+1 -2
arch/powerpc/platforms/ps3/interrupt.c
··· 753 753 unsigned cpu; 754 754 struct irq_domain *host; 755 755 756 - host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, &ps3_host_ops, 757 - PS3_INVALID_OUTLET); 756 + host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL); 758 757 irq_set_default_host(host); 759 758 irq_set_virq_count(PS3_PLUG_MAX + 1); 760 759
+2 -5
arch/powerpc/platforms/wsp/opb_pic.c
··· 263 263 goto free_opb; 264 264 } 265 265 266 - /* Allocate an irq host so that Linux knows that despite only 266 + /* Allocate an irq domain so that Linux knows that despite only 267 267 * having one interrupt to issue, we're the controller for multiple 268 268 * hardware IRQs, so later we can lookup their virtual IRQs. */ 269 269 270 - opb->host = irq_alloc_host(dn, IRQ_DOMAIN_MAP_LINEAR, 271 - OPB_NR_IRQS, &opb_host_ops, -1); 272 - 270 + opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb); 273 271 if (!opb->host) { 274 272 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); 275 273 goto free_regs; ··· 275 277 276 278 opb->index = opb_index++; 277 279 spin_lock_init(&opb->lock); 278 - opb->host->host_data = opb; 279 280 280 281 /* Disable all interrupts by default */ 281 282 opb_out(opb, OPB_MLSASIER, 0);
+1 -2
arch/powerpc/sysdev/cpm1.c
··· 164 164 165 165 out_be32(&cpic_reg->cpic_cimr, 0); 166 166 167 - cpm_pic_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 168 - 64, &cpm_pic_host_ops, 64); 167 + cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL); 169 168 if (cpm_pic_host == NULL) { 170 169 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 171 170 sirq = NO_IRQ;
+1 -2
arch/powerpc/sysdev/cpm2_pic.c
··· 275 275 out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); 276 276 277 277 /* create a legacy host */ 278 - cpm2_pic_host = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 279 - 64, &cpm2_pic_host_ops, 64); 278 + cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL); 280 279 if (cpm2_pic_host == NULL) { 281 280 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 282 281 return;
+2 -4
arch/powerpc/sysdev/ehv_pic.c
··· 275 275 return; 276 276 } 277 277 278 - ehv_pic->irqhost = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 279 - NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0); 280 - 278 + ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS, 279 + &ehv_pic_host_ops, ehv_pic); 281 280 if (!ehv_pic->irqhost) { 282 281 of_node_put(np); 283 282 kfree(ehv_pic); ··· 292 293 of_node_put(np2); 293 294 } 294 295 295 - ehv_pic->irqhost->host_data = ehv_pic; 296 296 ehv_pic->hc_irq = ehv_pic_irq_chip; 297 297 ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity; 298 298 ehv_pic->coreint_flag = coreint_flag;
+2 -4
arch/powerpc/sysdev/fsl_msi.c
··· 387 387 } 388 388 platform_set_drvdata(dev, msi); 389 389 390 - msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_DOMAIN_MAP_LINEAR, 391 - NR_MSI_IRQS, &fsl_msi_host_ops, 0); 390 + msi->irqhost = irq_domain_add_linear(dev->dev.of_node, 391 + NR_MSI_IRQS, &fsl_msi_host_ops, msi); 392 392 393 393 if (msi->irqhost == NULL) { 394 394 dev_err(&dev->dev, "No memory for MSI irqhost\n"); ··· 419 419 } 420 420 421 421 msi->feature = features->fsl_pic_ip; 422 - 423 - msi->irqhost->host_data = msi; 424 422 425 423 /* 426 424 * Remember the phandle, so that we can match with any PCI nodes
+1 -2
arch/powerpc/sysdev/i8259.c
··· 263 263 raw_spin_unlock_irqrestore(&i8259_lock, flags); 264 264 265 265 /* create a legacy host */ 266 - i8259_host = irq_alloc_host(node, IRQ_DOMAIN_MAP_LEGACY, 267 - 0, &i8259_host_ops, 0); 266 + i8259_host = irq_domain_add_legacy(node, &i8259_host_ops, NULL); 268 267 if (i8259_host == NULL) { 269 268 printk(KERN_ERR "i8259: failed to allocate irq host !\n"); 270 269 return;
+2 -5
arch/powerpc/sysdev/ipic.c
··· 728 728 if (ipic == NULL) 729 729 return NULL; 730 730 731 - ipic->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 732 - NR_IPIC_INTS, 733 - &ipic_host_ops, 0); 731 + ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS, 732 + &ipic_host_ops, ipic); 734 733 if (ipic->irqhost == NULL) { 735 734 kfree(ipic); 736 735 return NULL; 737 736 } 738 737 739 738 ipic->regs = ioremap(res.start, resource_size(&res)); 740 - 741 - ipic->irqhost->host_data = ipic; 742 739 743 740 /* init hw */ 744 741 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
+1 -2
arch/powerpc/sysdev/mpc8xx_pic.c
··· 171 171 goto out; 172 172 } 173 173 174 - mpc8xx_pic_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 175 - 64, &mpc8xx_pic_host_ops, 64); 174 + mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL); 176 175 if (mpc8xx_pic_host == NULL) { 177 176 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); 178 177 ret = -ENOMEM;
+2 -5
arch/powerpc/sysdev/mpic.c
··· 1345 1345 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); 1346 1346 mpic->isu_mask = (1 << mpic->isu_shift) - 1; 1347 1347 1348 - mpic->irqhost = irq_alloc_host(mpic->node, IRQ_DOMAIN_MAP_LINEAR, 1348 + mpic->irqhost = irq_domain_add_linear(mpic->node, 1349 1349 isu_size ? isu_size : mpic->num_sources, 1350 - &mpic_host_ops, 1351 - flags & MPIC_LARGE_VECTORS ? 2048 : 256); 1350 + &mpic_host_ops, mpic); 1352 1351 1353 1352 /* 1354 1353 * FIXME: The code leaks the MPIC object and mappings here; this ··· 1355 1356 */ 1356 1357 if (mpic->irqhost == NULL) 1357 1358 return NULL; 1358 - 1359 - mpic->irqhost->host_data = mpic; 1360 1359 1361 1360 /* Display version */ 1362 1361 switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
+2 -3
arch/powerpc/sysdev/mv64x60_pic.c
··· 250 250 paddr = of_translate_address(np, reg); 251 251 mv64x60_irq_reg_base = ioremap(paddr, reg[1]); 252 252 253 - mv64x60_irq_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 254 - MV64x60_NUM_IRQS, 255 - &mv64x60_host_ops, MV64x60_NUM_IRQS); 253 + mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS, 254 + &mv64x60_host_ops, NULL); 256 255 257 256 spin_lock_irqsave(&mv64x60_lock, flags); 258 257 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
+2 -3
arch/powerpc/sysdev/qe_lib/qe_ic.c
··· 339 339 if (qe_ic == NULL) 340 340 return; 341 341 342 - qe_ic->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 343 - NR_QE_IC_INTS, &qe_ic_host_ops, 0); 342 + qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, 343 + &qe_ic_host_ops, qe_ic); 344 344 if (qe_ic->irqhost == NULL) { 345 345 kfree(qe_ic); 346 346 return; ··· 348 348 349 349 qe_ic->regs = ioremap(res.start, resource_size(&res)); 350 350 351 - qe_ic->irqhost->host_data = qe_ic; 352 351 qe_ic->hc_irq = qe_ic_irq_chip; 353 352 354 353 qe_ic->virq_high = irq_of_parse_and_map(node, 0);
+1 -2
arch/powerpc/sysdev/tsi108_pci.c
··· 419 419 { 420 420 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); 421 421 422 - pci_irq_host = irq_alloc_host(node, IRQ_DOMAIN_MAP_LEGACY, 423 - 0, &pci_irq_domain_ops, 0); 422 + pci_irq_host = irq_domain_add_legacy(node, &pci_irq_domain_ops, NULL); 424 423 if (pci_irq_host == NULL) { 425 424 printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n"); 426 425 return;
+2 -4
arch/powerpc/sysdev/uic.c
··· 270 270 } 271 271 uic->dcrbase = *dcrreg; 272 272 273 - uic->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 274 - NR_UIC_INTS, &uic_host_ops, -1); 273 + uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops, 274 + uic); 275 275 if (! uic->irqhost) 276 276 return NULL; /* FIXME: panic? */ 277 - 278 - uic->irqhost->host_data = uic; 279 277 280 278 /* Start with all interrupts disabled, level and non-critical */ 281 279 mtdcr(uic->dcrbase + UIC_ER, 0);
+1 -2
arch/powerpc/sysdev/xics/xics-common.c
··· 374 374 375 375 static void __init xics_init_host(void) 376 376 { 377 - xics_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_TREE, 0, &xics_host_ops, 378 - XICS_IRQ_SPURIOUS); 377 + xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL); 379 378 BUG_ON(xics_host == NULL); 380 379 irq_set_default_host(xics_host); 381 380 }
+2 -3
arch/powerpc/sysdev/xilinx_intc.c
··· 201 201 out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ 202 202 203 203 /* Allocate and initialize an irq_domain structure. */ 204 - irq = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, XILINX_INTC_MAXIRQS, 205 - &xilinx_intc_ops, -1); 204 + irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops, 205 + regs); 206 206 if (!irq) 207 207 panic(__FILE__ ": Cannot allocate IRQ host\n"); 208 - irq->host_data = regs; 209 208 210 209 return irq; 211 210 }
+2 -5
drivers/gpio/gpio-mpc8xxx.c
··· 364 364 if (hwirq == NO_IRQ) 365 365 goto skip_irq; 366 366 367 - mpc8xxx_gc->irq = 368 - irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, MPC8XXX_GPIO_PINS, 369 - &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS); 367 + mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS, 368 + &mpc8xxx_gpio_irq_ops, mpc8xxx_gc); 370 369 if (!mpc8xxx_gc->irq) 371 370 goto skip_irq; 372 371 373 372 id = of_match_node(mpc8xxx_gpio_ids, np); 374 373 if (id) 375 374 mpc8xxx_gc->of_dev_id_data = id->data; 376 - 377 - mpc8xxx_gc->irq->host_data = mpc8xxx_gc; 378 375 379 376 /* ack and mask all irqs */ 380 377 out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
+15 -9
include/linux/irqdomain.h
··· 95 95 96 96 /* type of reverse mapping_technique */ 97 97 unsigned int revmap_type; 98 - #define IRQ_DOMAIN_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */ 99 - #define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */ 100 - #define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */ 101 - #define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */ 102 98 union { 103 99 struct { 104 100 unsigned int size; ··· 116 120 117 121 #ifdef CONFIG_IRQ_DOMAIN 118 122 #ifdef CONFIG_PPC 119 - extern struct irq_domain *irq_alloc_host(struct device_node *of_node, 120 - unsigned int revmap_type, 121 - unsigned int revmap_arg, 122 - struct irq_domain_ops *ops, 123 - irq_hw_number_t inval_irq); 123 + struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, 124 + struct irq_domain_ops *ops, 125 + void *host_data); 126 + struct irq_domain *irq_domain_add_linear(struct device_node *of_node, 127 + unsigned int size, 128 + struct irq_domain_ops *ops, 129 + void *host_data); 130 + struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, 131 + struct irq_domain_ops *ops, 132 + void *host_data); 133 + struct irq_domain *irq_domain_add_tree(struct device_node *of_node, 134 + struct irq_domain_ops *ops, 135 + void *host_data); 136 + 137 + 124 138 extern struct irq_domain *irq_find_host(struct device_node *node); 125 139 extern void irq_set_default_host(struct irq_domain *host); 126 140 extern void irq_set_virq_count(unsigned int count);
+128 -68
kernel/irq/irqdomain.c
··· 13 13 #include <linux/smp.h> 14 14 #include <linux/fs.h> 15 15 16 + #define IRQ_DOMAIN_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */ 17 + #define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */ 18 + #define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */ 19 + #define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */ 20 + 16 21 static LIST_HEAD(irq_domain_list); 17 22 static DEFINE_MUTEX(irq_domain_mutex); 18 23 ··· 32 27 } 33 28 34 29 /** 35 - * irq_alloc_host() - Allocate a new irq_domain data structure 30 + * irq_domain_alloc() - Allocate a new irq_domain data structure 36 31 * @of_node: optional device-tree node of the interrupt controller 37 32 * @revmap_type: type of reverse mapping to use 38 - * @revmap_arg: for IRQ_DOMAIN_MAP_LINEAR linear only: size of the map 39 33 * @ops: map/unmap domain callbacks 40 - * @inval_irq: provide a hw number in that domain space that is always invalid 34 + * @host_data: Controller private data pointer 41 35 * 42 - * Allocates and initialize and irq_domain structure. Note that in the case of 43 - * IRQ_DOMAIN_MAP_LEGACY, the map() callback will be called before this returns 44 - * for all legacy interrupts except 0 (which is always the invalid irq for 45 - * a legacy controller). For a IRQ_DOMAIN_MAP_LINEAR, the map is allocated by 46 - * this call as well. For a IRQ_DOMAIN_MAP_TREE, the radix tree will be 47 - * allocated later during boot automatically (the reverse mapping will use the 48 - * slow path until that happens). 36 + * Allocates and initialize and irq_domain structure. Caller is expected to 37 + * register allocated irq_domain with irq_domain_register(). Returns pointer 38 + * to IRQ domain, or NULL on failure. 49 39 */ 50 - struct irq_domain *irq_alloc_host(struct device_node *of_node, 51 - unsigned int revmap_type, 52 - unsigned int revmap_arg, 53 - struct irq_domain_ops *ops, 54 - irq_hw_number_t inval_irq) 40 + static struct irq_domain *irq_domain_alloc(struct device_node *of_node, 41 + unsigned int revmap_type, 42 + struct irq_domain_ops *ops, 43 + void *host_data) 55 44 { 56 - struct irq_domain *domain, *h; 57 - unsigned int size = sizeof(struct irq_domain); 58 - unsigned int i; 59 - unsigned int *rmap; 45 + struct irq_domain *domain; 60 46 61 - /* Allocate structure and revmap table if using linear mapping */ 62 - if (revmap_type == IRQ_DOMAIN_MAP_LINEAR) 63 - size += revmap_arg * sizeof(unsigned int); 64 - domain = kzalloc(size, GFP_KERNEL); 65 - if (domain == NULL) 47 + domain = kzalloc(sizeof(*domain), GFP_KERNEL); 48 + if (WARN_ON(!domain)) 66 49 return NULL; 67 50 68 51 /* Fill structure */ 69 52 domain->revmap_type = revmap_type; 70 - domain->inval_irq = inval_irq; 71 53 domain->ops = ops; 54 + domain->host_data = host_data; 72 55 domain->of_node = of_node_get(of_node); 73 56 74 57 if (domain->ops->match == NULL) 75 58 domain->ops->match = default_irq_domain_match; 76 59 60 + return domain; 61 + } 62 + 63 + static void irq_domain_add(struct irq_domain *domain) 64 + { 65 + mutex_lock(&irq_domain_mutex); 66 + list_add(&domain->link, &irq_domain_list); 67 + mutex_unlock(&irq_domain_mutex); 68 + pr_debug("irq: Allocated domain of type %d @0x%p\n", 69 + domain->revmap_type, domain); 70 + } 71 + 72 + /** 73 + * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. 74 + * @of_node: pointer to interrupt controller's device tree node. 75 + * @ops: map/unmap domain callbacks 76 + * @host_data: Controller private data pointer 77 + * 78 + * Note: the map() callback will be called before this function returns 79 + * for all legacy interrupts except 0 (which is always the invalid irq for 80 + * a legacy controller). 81 + */ 82 + struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, 83 + struct irq_domain_ops *ops, 84 + void *host_data) 85 + { 86 + struct irq_domain *domain, *h; 87 + unsigned int i; 88 + 89 + domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data); 90 + if (!domain) 91 + return NULL; 92 + 77 93 mutex_lock(&irq_domain_mutex); 78 94 /* Make sure only one legacy controller can be created */ 79 - if (revmap_type == IRQ_DOMAIN_MAP_LEGACY) { 80 - list_for_each_entry(h, &irq_domain_list, link) { 81 - if (WARN_ON(h->revmap_type == IRQ_DOMAIN_MAP_LEGACY)) { 82 - mutex_unlock(&irq_domain_mutex); 83 - of_node_put(domain->of_node); 84 - kfree(domain); 85 - return NULL; 86 - } 95 + list_for_each_entry(h, &irq_domain_list, link) { 96 + if (WARN_ON(h->revmap_type == IRQ_DOMAIN_MAP_LEGACY)) { 97 + mutex_unlock(&irq_domain_mutex); 98 + of_node_put(domain->of_node); 99 + kfree(domain); 100 + return NULL; 87 101 } 88 102 } 89 103 list_add(&domain->link, &irq_domain_list); 90 104 mutex_unlock(&irq_domain_mutex); 91 105 92 - /* Additional setups per revmap type */ 93 - switch(revmap_type) { 94 - case IRQ_DOMAIN_MAP_LEGACY: 95 - /* 0 is always the invalid number for legacy */ 96 - domain->inval_irq = 0; 97 - /* setup us as the domain for all legacy interrupts */ 98 - for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 99 - struct irq_data *irq_data = irq_get_irq_data(i); 100 - irq_data->hwirq = i; 101 - irq_data->domain = domain; 106 + /* setup us as the domain for all legacy interrupts */ 107 + for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 108 + struct irq_data *irq_data = irq_get_irq_data(i); 109 + irq_data->hwirq = i; 110 + irq_data->domain = domain; 102 111 103 - /* Legacy flags are left to default at this point, 104 - * one can then use irq_create_mapping() to 105 - * explicitly change them 106 - */ 107 - ops->map(domain, i, i); 112 + /* Legacy flags are left to default at this point, 113 + * one can then use irq_create_mapping() to 114 + * explicitly change them 115 + */ 116 + ops->map(domain, i, i); 108 117 109 - /* Clear norequest flags */ 110 - irq_clear_status_flags(i, IRQ_NOREQUEST); 111 - } 112 - break; 113 - case IRQ_DOMAIN_MAP_LINEAR: 114 - rmap = (unsigned int *)(domain + 1); 115 - for (i = 0; i < revmap_arg; i++) 116 - rmap[i] = 0; 117 - domain->revmap_data.linear.size = revmap_arg; 118 - domain->revmap_data.linear.revmap = rmap; 119 - break; 120 - case IRQ_DOMAIN_MAP_TREE: 121 - INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL); 122 - break; 123 - default: 124 - break; 118 + /* Clear norequest flags */ 119 + irq_clear_status_flags(i, IRQ_NOREQUEST); 125 120 } 121 + return domain; 122 + } 126 123 127 - pr_debug("irq: Allocated domain of type %d @0x%p\n", revmap_type, domain); 124 + /** 125 + * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain. 126 + * @of_node: pointer to interrupt controller's device tree node. 127 + * @ops: map/unmap domain callbacks 128 + * @host_data: Controller private data pointer 129 + */ 130 + struct irq_domain *irq_domain_add_linear(struct device_node *of_node, 131 + unsigned int size, 132 + struct irq_domain_ops *ops, 133 + void *host_data) 134 + { 135 + struct irq_domain *domain; 136 + unsigned int *revmap; 128 137 138 + revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL); 139 + if (WARN_ON(!revmap)) 140 + return NULL; 141 + 142 + domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data); 143 + if (!domain) { 144 + kfree(revmap); 145 + return NULL; 146 + } 147 + domain->revmap_data.linear.size = size; 148 + domain->revmap_data.linear.revmap = revmap; 149 + irq_domain_add(domain); 150 + return domain; 151 + } 152 + 153 + struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, 154 + struct irq_domain_ops *ops, 155 + void *host_data) 156 + { 157 + struct irq_domain *domain = irq_domain_alloc(of_node, 158 + IRQ_DOMAIN_MAP_NOMAP, ops, host_data); 159 + if (domain) 160 + irq_domain_add(domain); 161 + return domain; 162 + } 163 + 164 + /** 165 + * irq_domain_add_tree() 166 + * @of_node: pointer to interrupt controller's device tree node. 167 + * @ops: map/unmap domain callbacks 168 + * 169 + * Note: The radix tree will be allocated later during boot automatically 170 + * (the reverse mapping will use the slow path until that happens). 171 + */ 172 + struct irq_domain *irq_domain_add_tree(struct device_node *of_node, 173 + struct irq_domain_ops *ops, 174 + void *host_data) 175 + { 176 + struct irq_domain *domain = irq_domain_alloc(of_node, 177 + IRQ_DOMAIN_MAP_TREE, ops, host_data); 178 + if (domain) { 179 + INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL); 180 + irq_domain_add(domain); 181 + } 129 182 return domain; 130 183 } 131 184 ··· 455 392 mutex_unlock(&revmap_trees_mutex); 456 393 break; 457 394 } 458 - 459 - /* Destroy map */ 460 - irq_data->hwirq = domain->inval_irq; 461 395 462 396 irq_free_desc(virq); 463 397 }