Merge tag 'irq-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull interrupt fixes from Thomas Gleixner:
"A set of fixes for the interrupt subsystem:

- Provision only ACPI enabled redistributors on GICv3

- Use the proper command colums when building the INVALL command for
the GICv3-ITS

- Ensure the allocation of the L2 vPE table for GICv4.1

- Correct the GICv4.1 VPROBASER programming so it uses the proper
size

- A set of small GICv4.1 tidy up patches

- Configuration cleanup for C-SKY interrupt chip

- Clarify the function documentation for irq_set_wake() to document
that the wakeup functionality is orthogonal to the irq
disable/enable mechanism"

* tag 'irq-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
irqchip/gic-v3-its: Rename VPENDBASER/VPROPBASER accessors
irqchip/gic-v3-its: Remove superfluous WARN_ON
irqchip/gic-v4.1: Drop 'tmp' in inherit_vpe_l1_table_from_rd()
irqchip/gic-v4.1: Ensure L2 vPE table is allocated at RD level
irqchip/gic-v4.1: Set vpe_l1_base for all redistributors
irqchip/gic-v4.1: Fix programming of GICR_VPROPBASER_4_1_SIZE
genirq: Clarify that irq wake state is orthogonal to enable/disable
irqchip/gic-v3-its: Reference to its_invall_cmd descriptor when building INVALL
irqchip: Some Kconfig cleanup for C-SKY
irqchip/gic-v3: Only provision redistributors that are enabled in ACPI

+127 -35
+6 -6
arch/arm/include/asm/arch_gicv3.h
··· 326 #define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c) 327 328 /* 329 - * GITS_VPROPBASER - hi and lo bits may be accessed independently. 330 */ 331 - #define gits_read_vpropbaser(c) __gic_readq_nonatomic(c) 332 - #define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c) 333 334 /* 335 - * GITS_VPENDBASER - the Valid bit must be cleared before changing 336 * anything else. 337 */ 338 - static inline void gits_write_vpendbaser(u64 val, void __iomem *addr) 339 { 340 u32 tmp; 341 ··· 352 __gic_writeq_nonatomic(val, addr); 353 } 354 355 - #define gits_read_vpendbaser(c) __gic_readq_nonatomic(c) 356 357 static inline bool gic_prio_masking_enabled(void) 358 {
··· 326 #define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c) 327 328 /* 329 + * GICR_VPROPBASER - hi and lo bits may be accessed independently. 330 */ 331 + #define gicr_read_vpropbaser(c) __gic_readq_nonatomic(c) 332 + #define gicr_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c) 333 334 /* 335 + * GICR_VPENDBASER - the Valid bit must be cleared before changing 336 * anything else. 337 */ 338 + static inline void gicr_write_vpendbaser(u64 val, void __iomem *addr) 339 { 340 u32 tmp; 341 ··· 352 __gic_writeq_nonatomic(val, addr); 353 } 354 355 + #define gicr_read_vpendbaser(c) __gic_readq_nonatomic(c) 356 357 static inline bool gic_prio_masking_enabled(void) 358 {
+4 -4
arch/arm64/include/asm/arch_gicv3.h
··· 140 #define gicr_write_pendbaser(v, c) writeq_relaxed(v, c) 141 #define gicr_read_pendbaser(c) readq_relaxed(c) 142 143 - #define gits_write_vpropbaser(v, c) writeq_relaxed(v, c) 144 - #define gits_read_vpropbaser(c) readq_relaxed(c) 145 146 - #define gits_write_vpendbaser(v, c) writeq_relaxed(v, c) 147 - #define gits_read_vpendbaser(c) readq_relaxed(c) 148 149 static inline bool gic_prio_masking_enabled(void) 150 {
··· 140 #define gicr_write_pendbaser(v, c) writeq_relaxed(v, c) 141 #define gicr_read_pendbaser(c) readq_relaxed(c) 142 143 + #define gicr_write_vpropbaser(v, c) writeq_relaxed(v, c) 144 + #define gicr_read_vpropbaser(c) readq_relaxed(c) 145 146 + #define gicr_write_vpendbaser(v, c) writeq_relaxed(v, c) 147 + #define gicr_read_vpendbaser(c) readq_relaxed(c) 148 149 static inline bool gic_prio_masking_enabled(void) 150 {
+2 -2
drivers/irqchip/Kconfig
··· 438 help 439 Say yes here to enable C-SKY SMP interrupt controller driver used 440 for C-SKY SMP system. 441 - In fact it's not mmio map in hw and it use ld/st to visit the 442 controller's register inside CPU. 443 444 config CSKY_APB_INTC ··· 446 depends on CSKY 447 help 448 Say yes here to enable C-SKY APB interrupt controller driver used 449 - by C-SKY single core SOC system. It use mmio map apb-bus to visit 450 the controller's register. 451 452 config IMX_IRQSTEER
··· 438 help 439 Say yes here to enable C-SKY SMP interrupt controller driver used 440 for C-SKY SMP system. 441 + In fact it's not mmio map in hardware and it uses ld/st to visit the 442 controller's register inside CPU. 443 444 config CSKY_APB_INTC ··· 446 depends on CSKY 447 help 448 Say yes here to enable C-SKY APB interrupt controller driver used 449 + by C-SKY single core SOC system. It uses mmio map apb-bus to visit 450 the controller's register. 451 452 config IMX_IRQSTEER
+100 -20
drivers/irqchip/irq-gic-v3-its.c
··· 661 struct its_cmd_desc *desc) 662 { 663 its_encode_cmd(cmd, GITS_CMD_INVALL); 664 - its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); 665 666 its_fixup_cmd(cmd); 667 ··· 2376 continue; 2377 2378 /* We have a winner! */ 2379 val = GICR_VPROPBASER_4_1_VALID; 2380 if (baser & GITS_BASER_INDIRECT) 2381 val |= GICR_VPROPBASER_4_1_INDIRECT; ··· 2415 2416 for_each_possible_cpu(cpu) { 2417 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; 2418 - u32 tmp; 2419 2420 if (!base || cpu == smp_processor_id()) 2421 continue; 2422 2423 val = gic_read_typer(base + GICR_TYPER); 2424 - tmp = compute_common_aff(val); 2425 - if (tmp != aff) 2426 continue; 2427 2428 /* ··· 2429 * ours wrt CommonLPIAff. Let's use its own VPROPBASER. 2430 * Make sure we don't write the Z bit in that case. 2431 */ 2432 - val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); 2433 val &= ~GICR_VPROPBASER_4_1_Z; 2434 2435 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; 2436 2437 return val; 2438 } 2439 2440 return 0; 2441 } 2442 2443 static int allocate_vpe_l1_table(void) ··· 2524 * effect of making sure no doorbell will be generated and we can 2525 * then safely clear VPROPBASER.Valid. 2526 */ 2527 - if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) 2528 - gits_write_vpendbaser(GICR_VPENDBASER_PendingLast, 2529 vlpi_base + GICR_VPENDBASER); 2530 2531 /* ··· 2548 2549 /* First probe the page size */ 2550 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); 2551 - gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2552 - val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER); 2553 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); 2554 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); 2555 ··· 2598 npg = 1; 2599 } 2600 2601 - val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg); 2602 2603 /* Right, that's the number of CPU pages we need for L1 */ 2604 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); ··· 2609 if (!page) 2610 return -ENOMEM; 2611 2612 - gic_data_rdist()->vpe_l1_page = page; 2613 pa = virt_to_phys(page_address(page)); 2614 WARN_ON(!IS_ALIGNED(pa, psz)); 2615 ··· 2620 val |= GICR_VPROPBASER_4_1_VALID; 2621 2622 out: 2623 - gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2624 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); 2625 2626 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", ··· 2727 bool clean; 2728 u64 val; 2729 2730 - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2731 val &= ~GICR_VPENDBASER_Valid; 2732 val &= ~clr; 2733 val |= set; 2734 - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2735 2736 do { 2737 - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2738 clean = !(val & GICR_VPENDBASER_Dirty); 2739 if (!clean) { 2740 count--; ··· 2849 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 2850 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", 2851 smp_processor_id(), val); 2852 - gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2853 2854 /* 2855 * Also clear Valid bit of GICR_VPENDBASER, in case some ··· 2857 * corrupting memory. 2858 */ 2859 val = its_clear_vpend_valid(vlpi_base, 0, 0); 2860 - WARN_ON(val & GICR_VPENDBASER_Dirty); 2861 } 2862 2863 if (allocate_vpe_l1_table()) { ··· 3020 static bool its_alloc_vpe_table(u32 vpe_id) 3021 { 3022 struct its_node *its; 3023 3024 /* 3025 * Make sure the L2 tables are allocated on *all* v4 ITSs. We ··· 3040 return false; 3041 3042 if (!its_alloc_table_entry(its, baser, vpe_id)) 3043 return false; 3044 } 3045 ··· 3523 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 3524 val |= GICR_VPROPBASER_RaWb; 3525 val |= GICR_VPROPBASER_InnerShareable; 3526 - gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 3527 3528 val = virt_to_phys(page_address(vpe->vpt_page)) & 3529 GENMASK_ULL(51, 16); ··· 3541 val |= GICR_VPENDBASER_PendingLast; 3542 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; 3543 val |= GICR_VPENDBASER_Valid; 3544 - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 3545 } 3546 3547 static void its_vpe_deschedule(struct its_vpe *vpe) ··· 3741 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; 3742 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); 3743 3744 - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 3745 } 3746 3747 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
··· 661 struct its_cmd_desc *desc) 662 { 663 its_encode_cmd(cmd, GITS_CMD_INVALL); 664 + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); 665 666 its_fixup_cmd(cmd); 667 ··· 2376 continue; 2377 2378 /* We have a winner! */ 2379 + gic_data_rdist()->vpe_l1_base = its->tables[2].base; 2380 + 2381 val = GICR_VPROPBASER_4_1_VALID; 2382 if (baser & GITS_BASER_INDIRECT) 2383 val |= GICR_VPROPBASER_4_1_INDIRECT; ··· 2413 2414 for_each_possible_cpu(cpu) { 2415 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; 2416 2417 if (!base || cpu == smp_processor_id()) 2418 continue; 2419 2420 val = gic_read_typer(base + GICR_TYPER); 2421 + if (aff != compute_common_aff(val)) 2422 continue; 2423 2424 /* ··· 2429 * ours wrt CommonLPIAff. Let's use its own VPROPBASER. 2430 * Make sure we don't write the Z bit in that case. 2431 */ 2432 + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); 2433 val &= ~GICR_VPROPBASER_4_1_Z; 2434 2435 + gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; 2436 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; 2437 2438 return val; 2439 } 2440 2441 return 0; 2442 + } 2443 + 2444 + static bool allocate_vpe_l2_table(int cpu, u32 id) 2445 + { 2446 + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; 2447 + u64 val, gpsz, npg; 2448 + unsigned int psz, esz, idx; 2449 + struct page *page; 2450 + __le64 *table; 2451 + 2452 + if (!gic_rdists->has_rvpeid) 2453 + return true; 2454 + 2455 + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); 2456 + 2457 + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; 2458 + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); 2459 + npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; 2460 + 2461 + switch (gpsz) { 2462 + default: 2463 + WARN_ON(1); 2464 + /* fall through */ 2465 + case GIC_PAGE_SIZE_4K: 2466 + psz = SZ_4K; 2467 + break; 2468 + case GIC_PAGE_SIZE_16K: 2469 + psz = SZ_16K; 2470 + break; 2471 + case GIC_PAGE_SIZE_64K: 2472 + psz = SZ_64K; 2473 + break; 2474 + } 2475 + 2476 + /* Don't allow vpe_id that exceeds single, flat table limit */ 2477 + if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) 2478 + return (id < (npg * psz / (esz * SZ_8))); 2479 + 2480 + /* Compute 1st level table index & check if that exceeds table limit */ 2481 + idx = id >> ilog2(psz / (esz * SZ_8)); 2482 + if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) 2483 + return false; 2484 + 2485 + table = gic_data_rdist_cpu(cpu)->vpe_l1_base; 2486 + 2487 + /* Allocate memory for 2nd level table */ 2488 + if (!table[idx]) { 2489 + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); 2490 + if (!page) 2491 + return false; 2492 + 2493 + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ 2494 + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) 2495 + gic_flush_dcache_to_poc(page_address(page), psz); 2496 + 2497 + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); 2498 + 2499 + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ 2500 + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) 2501 + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); 2502 + 2503 + /* Ensure updated table contents are visible to RD hardware */ 2504 + dsb(sy); 2505 + } 2506 + 2507 + return true; 2508 } 2509 2510 static int allocate_vpe_l1_table(void) ··· 2457 * effect of making sure no doorbell will be generated and we can 2458 * then safely clear VPROPBASER.Valid. 2459 */ 2460 + if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) 2461 + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, 2462 vlpi_base + GICR_VPENDBASER); 2463 2464 /* ··· 2481 2482 /* First probe the page size */ 2483 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); 2484 + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2485 + val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); 2486 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); 2487 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); 2488 ··· 2531 npg = 1; 2532 } 2533 2534 + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); 2535 2536 /* Right, that's the number of CPU pages we need for L1 */ 2537 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); ··· 2542 if (!page) 2543 return -ENOMEM; 2544 2545 + gic_data_rdist()->vpe_l1_base = page_address(page); 2546 pa = virt_to_phys(page_address(page)); 2547 WARN_ON(!IS_ALIGNED(pa, psz)); 2548 ··· 2553 val |= GICR_VPROPBASER_4_1_VALID; 2554 2555 out: 2556 + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2557 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); 2558 2559 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", ··· 2660 bool clean; 2661 u64 val; 2662 2663 + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2664 val &= ~GICR_VPENDBASER_Valid; 2665 val &= ~clr; 2666 val |= set; 2667 + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 2668 2669 do { 2670 + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 2671 clean = !(val & GICR_VPENDBASER_Dirty); 2672 if (!clean) { 2673 count--; ··· 2782 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 2783 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", 2784 smp_processor_id(), val); 2785 + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 2786 2787 /* 2788 * Also clear Valid bit of GICR_VPENDBASER, in case some ··· 2790 * corrupting memory. 2791 */ 2792 val = its_clear_vpend_valid(vlpi_base, 0, 0); 2793 } 2794 2795 if (allocate_vpe_l1_table()) { ··· 2954 static bool its_alloc_vpe_table(u32 vpe_id) 2955 { 2956 struct its_node *its; 2957 + int cpu; 2958 2959 /* 2960 * Make sure the L2 tables are allocated on *all* v4 ITSs. We ··· 2973 return false; 2974 2975 if (!its_alloc_table_entry(its, baser, vpe_id)) 2976 + return false; 2977 + } 2978 + 2979 + /* Non v4.1? No need to iterate RDs and go back early. */ 2980 + if (!gic_rdists->has_rvpeid) 2981 + return true; 2982 + 2983 + /* 2984 + * Make sure the L2 tables are allocated for all copies of 2985 + * the L1 table on *all* v4.1 RDs. 2986 + */ 2987 + for_each_possible_cpu(cpu) { 2988 + if (!allocate_vpe_l2_table(cpu, vpe_id)) 2989 return false; 2990 } 2991 ··· 3443 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 3444 val |= GICR_VPROPBASER_RaWb; 3445 val |= GICR_VPROPBASER_InnerShareable; 3446 + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 3447 3448 val = virt_to_phys(page_address(vpe->vpt_page)) & 3449 GENMASK_ULL(51, 16); ··· 3461 val |= GICR_VPENDBASER_PendingLast; 3462 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; 3463 val |= GICR_VPENDBASER_Valid; 3464 + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 3465 } 3466 3467 static void its_vpe_deschedule(struct its_vpe *vpe) ··· 3661 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; 3662 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); 3663 3664 + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 3665 } 3666 3667 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
+7 -2
drivers/irqchip/irq-gic-v3.c
··· 1839 struct redist_region *redist_regs; 1840 u32 nr_redist_regions; 1841 bool single_redist; 1842 u32 maint_irq; 1843 int maint_irq_mode; 1844 phys_addr_t vcpu_base; ··· 1934 * If GICC is enabled and has valid gicr base address, then it means 1935 * GICR base is presented via GICC 1936 */ 1937 - if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) 1938 return 0; 1939 1940 /* 1941 * It's perfectly valid firmware can pass disabled GICC entry, driver ··· 1967 1968 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 1969 gic_acpi_match_gicc, 0); 1970 - if (count > 0) 1971 acpi_data.single_redist = true; 1972 1973 return count; 1974 }
··· 1839 struct redist_region *redist_regs; 1840 u32 nr_redist_regions; 1841 bool single_redist; 1842 + int enabled_rdists; 1843 u32 maint_irq; 1844 int maint_irq_mode; 1845 phys_addr_t vcpu_base; ··· 1933 * If GICC is enabled and has valid gicr base address, then it means 1934 * GICR base is presented via GICC 1935 */ 1936 + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { 1937 + acpi_data.enabled_rdists++; 1938 return 0; 1939 + } 1940 1941 /* 1942 * It's perfectly valid firmware can pass disabled GICC entry, driver ··· 1964 1965 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 1966 gic_acpi_match_gicc, 0); 1967 + if (count > 0) { 1968 acpi_data.single_redist = true; 1969 + count = acpi_data.enabled_rdists; 1970 + } 1971 1972 return count; 1973 }
+1 -1
include/linux/irqchip/arm-gic-v3.h
··· 652 struct { 653 void __iomem *rd_base; 654 struct page *pend_page; 655 - struct page *vpe_l1_page; 656 phys_addr_t phys_base; 657 bool lpi_enabled; 658 cpumask_t *vpe_table_mask; 659 } __percpu *rdist; 660 phys_addr_t prop_table_pa; 661 void *prop_table_va;
··· 652 struct { 653 void __iomem *rd_base; 654 struct page *pend_page; 655 phys_addr_t phys_base; 656 bool lpi_enabled; 657 cpumask_t *vpe_table_mask; 658 + void *vpe_l1_base; 659 } __percpu *rdist; 660 phys_addr_t prop_table_pa; 661 void *prop_table_va;
+7
kernel/irq/manage.c
··· 731 * 732 * Wakeup mode lets this IRQ wake the system from sleep 733 * states like "suspend to RAM". 734 */ 735 int irq_set_irq_wake(unsigned int irq, unsigned int on) 736 {
··· 731 * 732 * Wakeup mode lets this IRQ wake the system from sleep 733 * states like "suspend to RAM". 734 + * 735 + * Note: irq enable/disable state is completely orthogonal 736 + * to the enable/disable state of irq wake. An irq can be 737 + * disabled with disable_irq() and still wake the system as 738 + * long as the irq has wake enabled. If this does not hold, 739 + * then the underlying irq chip and the related driver need 740 + * to be investigated. 741 */ 742 int irq_set_irq_wake(unsigned int irq, unsigned int on) 743 {