Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch/tile: Enable more sophisticated IRQ model for 32-bit chips.

This model is based on the on-chip interrupt model used by the
TILE-Gx next-generation hardware, and interacts much more cleanly
with the Linux generic IRQ layer.

The change includes modifications to the Tilera hypervisor, which
are reflected in the hypervisor headers in arch/tile/include/arch/.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>

+438 -157
+3
arch/tile/include/arch/chip_tile64.h
··· 248 248 /** Does the chip support rev1 DMA packets? */ 249 249 #define CHIP_HAS_REV1_DMA_PACKETS() 0 250 250 251 + /** Does the chip have an IPI shim? */ 252 + #define CHIP_HAS_IPI() 0 253 + 251 254 #endif /* !__OPEN_SOURCE__ */ 252 255 #endif /* __ARCH_CHIP_H__ */
+3
arch/tile/include/arch/chip_tilepro.h
··· 248 248 /** Does the chip support rev1 DMA packets? */ 249 249 #define CHIP_HAS_REV1_DMA_PACKETS() 1 250 250 251 + /** Does the chip have an IPI shim? */ 252 + #define CHIP_HAS_IPI() 0 253 + 251 254 #endif /* !__OPEN_SOURCE__ */ 252 255 #endif /* __ARCH_CHIP_H__ */
+56 -6
arch/tile/include/asm/irq.h
··· 23 23 /* IRQ numbers used for linux IPIs. */ 24 24 #define IRQ_RESCHEDULE 1 25 25 26 - /* The HV interrupt state object. */ 27 - DECLARE_PER_CPU(HV_IntrState, dev_intr_state); 28 - 29 26 void ack_bad_irq(unsigned int irq); 30 27 31 28 /* 32 - * Paravirtualized drivers should call this when their init calls 33 - * discover a valid HV IRQ. 29 + * Different ways of handling interrupts. Tile interrupts are always 30 + * per-cpu; there is no global interrupt controller to implement 31 + * enable/disable. Most onboard devices can send their interrupts to 32 + * many tiles at the same time, and Tile-specific drivers know how to 33 + * deal with this. 34 + * 35 + * However, generic devices (usually PCIE based, sometimes GPIO) 36 + * expect that interrupts will fire on a single core at a time and 37 + * that the irq can be enabled or disabled from any core at any time. 38 + * We implement this by directing such interrupts to a single core. 39 + * 40 + * One added wrinkle is that PCI interrupts can be either 41 + * hardware-cleared (legacy interrupts) or software cleared (MSI). 42 + * Other generic device systems (GPIO) are always software-cleared. 43 + * 44 + * The enums below are used by drivers for onboard devices, including 45 + * the internals of PCI root complex and GPIO. They allow the driver 46 + * to tell the generic irq code what kind of interrupt is mapped to a 47 + * particular IRQ number. 34 48 */ 35 - void tile_irq_activate(unsigned int irq); 49 + enum { 50 + /* per-cpu interrupt; use enable/disable_percpu_irq() to mask */ 51 + TILE_IRQ_PERCPU, 52 + /* global interrupt, hardware responsible for clearing. */ 53 + TILE_IRQ_HW_CLEAR, 54 + /* global interrupt, software responsible for clearing. */ 55 + TILE_IRQ_SW_CLEAR, 56 + }; 57 + 58 + 59 + /* 60 + * Paravirtualized drivers should call this when they dynamically 61 + * allocate a new IRQ or discover an IRQ that was pre-allocated by the 62 + * hypervisor for use with their particular device. This gives the 63 + * IRQ subsystem an opportunity to do interrupt-type-specific 64 + * initialization. 65 + * 66 + * ISSUE: We should modify this API so that registering anything 67 + * except percpu interrupts also requires providing callback methods 68 + * for enabling and disabling the interrupt. This would allow the 69 + * generic IRQ code to proxy enable/disable_irq() calls back into the 70 + * PCI subsystem, which in turn could enable or disable the interrupt 71 + * at the PCI shim. 72 + */ 73 + void tile_irq_activate(unsigned int irq, int tile_irq_type); 74 + 75 + /* 76 + * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know 77 + * how to use enable/disable_percpu_irq() to manage interrupts on each 78 + * core. We can't use the generic enable/disable_irq() because they 79 + * use a single reference count per irq, rather than per cpu per irq. 80 + */ 81 + void enable_percpu_irq(unsigned int irq); 82 + void disable_percpu_irq(unsigned int irq); 83 + 84 + 85 + void setup_irq_regs(void); 36 86 37 87 #endif /* _ASM_TILE_IRQ_H */
+24 -3
arch/tile/include/asm/smp.h
··· 20 20 #include <asm/processor.h> 21 21 #include <linux/cpumask.h> 22 22 #include <linux/irqreturn.h> 23 + #include <hv/hypervisor.h> 23 24 24 25 /* Set up this tile to support receiving hypervisor messages */ 25 26 void init_messaging(void); ··· 40 39 /* Process an IPI message */ 41 40 void evaluate_message(int tag); 42 41 43 - /* Process an IRQ_RESCHEDULE IPI. */ 44 - irqreturn_t handle_reschedule_ipi(int irq, void *token); 45 - 46 42 /* Boot a secondary cpu */ 47 43 void online_secondary(void); 48 44 ··· 53 55 /* Accessors for grid size */ 54 56 #define smp_height (smp_topology.height) 55 57 #define smp_width (smp_topology.width) 58 + 59 + /* Convenience functions for converting cpu <-> coords. */ 60 + static inline int cpu_x(int cpu) 61 + { 62 + return cpu % smp_width; 63 + } 64 + static inline int cpu_y(int cpu) 65 + { 66 + return cpu / smp_width; 67 + } 68 + static inline int xy_to_cpu(int x, int y) 69 + { 70 + return y * smp_width + x; 71 + } 56 72 57 73 /* Hypervisor message tags sent via the tile send_IPI*() routines. */ 58 74 #define MSG_TAG_START_CPU 1 ··· 97 85 #define smp_master_cpu 0 98 86 #define smp_height 1 99 87 #define smp_width 1 88 + #define cpu_x(cpu) 0 89 + #define cpu_y(cpu) 0 90 + #define xy_to_cpu(x, y) 0 100 91 101 92 #endif /* !CONFIG_SMP */ 102 93 ··· 137 122 { 138 123 return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits); 139 124 } 125 + 126 + /* Initialize the IPI subsystem. */ 127 + void ipi_init(void); 128 + 129 + /* Function for start-cpu message to cause us to jump to. */ 130 + extern unsigned long start_cpu_function_addr; 140 131 141 132 #endif /* _ASM_TILE_SMP_H */
+64 -55
arch/tile/include/hv/hypervisor.h
··· 20 20 #ifndef _TILE_HV_H 21 21 #define _TILE_HV_H 22 22 23 - #ifdef __tile__ 24 23 #include <arch/chip.h> 25 - #else 26 - /* HACK: Allow use by "tools/cpack/". */ 27 - #include "install/include/arch/chip.h" 28 - #endif 24 + 25 + #include <hv/pagesize.h> 29 26 30 27 /* Linux builds want unsigned long constants, but assembler wants numbers */ 31 28 #ifdef __ASSEMBLER__ ··· 36 39 #define __HV_SIZE_ONE 1UL 37 40 #endif 38 41 39 - 40 42 /** The log2 of the span of a level-1 page table, in bytes. 41 43 */ 42 44 #define HV_LOG2_L1_SPAN 32 ··· 44 48 */ 45 49 #define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) 46 50 47 - /** The log2 of the size of small pages, in bytes. This value should 48 - * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 49 - */ 50 - #define HV_LOG2_PAGE_SIZE_SMALL 16 51 - 52 51 /** The size of small pages, in bytes. This value should be verified 53 52 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 54 53 */ 55 54 #define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) 56 - 57 - /** The log2 of the size of large pages, in bytes. This value should be 58 - * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 59 - */ 60 - #define HV_LOG2_PAGE_SIZE_LARGE 24 61 55 62 56 /** The size of large pages, in bytes. This value should be verified 63 57 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). ··· 79 93 #define HV_DISPATCH_ENTRY_SIZE 32 80 94 81 95 /** Version of the hypervisor interface defined by this file */ 82 - #define _HV_VERSION 10 96 + #define _HV_VERSION 11 83 97 84 98 /* Index into hypervisor interface dispatch code blocks. 85 99 * ··· 239 253 /** hv_set_command_line */ 240 254 #define HV_DISPATCH_SET_COMMAND_LINE 47 241 255 242 - /** hv_dev_register_intr_state */ 243 - #define HV_DISPATCH_DEV_REGISTER_INTR_STATE 48 256 + #if !CHIP_HAS_IPI() 257 + 258 + /** hv_clear_intr */ 259 + #define HV_DISPATCH_CLEAR_INTR 48 244 260 245 261 /** hv_enable_intr */ 246 262 #define HV_DISPATCH_ENABLE_INTR 49 ··· 250 262 /** hv_disable_intr */ 251 263 #define HV_DISPATCH_DISABLE_INTR 50 252 264 265 + /** hv_raise_intr */ 266 + #define HV_DISPATCH_RAISE_INTR 51 267 + 253 268 /** hv_trigger_ipi */ 254 - #define HV_DISPATCH_TRIGGER_IPI 51 269 + #define HV_DISPATCH_TRIGGER_IPI 52 270 + 271 + #endif /* !CHIP_HAS_IPI() */ 255 272 256 273 /** hv_store_mapping */ 257 - #define HV_DISPATCH_STORE_MAPPING 52 274 + #define HV_DISPATCH_STORE_MAPPING 53 258 275 259 276 /** hv_inquire_realpa */ 260 - #define HV_DISPATCH_INQUIRE_REALPA 53 277 + #define HV_DISPATCH_INQUIRE_REALPA 54 261 278 262 279 /** hv_flush_all */ 263 - #define HV_DISPATCH_FLUSH_ALL 54 280 + #define HV_DISPATCH_FLUSH_ALL 55 281 + 282 + #if CHIP_HAS_IPI() 283 + /** hv_get_ipi_pte */ 284 + #define HV_DISPATCH_GET_IPI_PTE 56 285 + #endif 264 286 265 287 /** One more than the largest dispatch value */ 266 - #define _HV_DISPATCH_END 55 288 + #define _HV_DISPATCH_END 57 267 289 268 290 269 291 #ifndef __ASSEMBLER__ ··· 482 484 */ 483 485 int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len); 484 486 485 - /** State object used to enable and disable one-shot and level-sensitive 486 - * interrupts. */ 487 - typedef struct 488 - { 489 - #if CHIP_VA_WIDTH() > 32 490 - __hv64 opaque[2]; /**< No user-serviceable parts inside */ 491 - #else 492 - __hv32 opaque[2]; /**< No user-serviceable parts inside */ 493 - #endif 494 - } 495 - HV_IntrState; 496 - 497 - /** A set of interrupts. */ 498 - typedef __hv32 HV_IntrMask; 499 - 500 487 /** Tile coordinate */ 501 488 typedef struct 502 489 { ··· 492 509 int y; 493 510 } HV_Coord; 494 511 512 + 513 + #if CHIP_HAS_IPI() 514 + 515 + /** Get the PTE for sending an IPI to a particular tile. 516 + * 517 + * @param tile Tile which will receive the IPI. 518 + * @param pl Indicates which IPI registers: 0 = IPI_0, 1 = IPI_1. 519 + * @param pte Filled with resulting PTE. 520 + * @result Zero if no error, non-zero for invalid parameters. 521 + */ 522 + int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte); 523 + 524 + #else /* !CHIP_HAS_IPI() */ 525 + 526 + /** A set of interrupts. */ 527 + typedef __hv32 HV_IntrMask; 528 + 495 529 /** The low interrupt numbers are reserved for use by the client in 496 530 * delivering IPIs. Any interrupt numbers higher than this value are 497 531 * reserved for use by HV device drivers. */ 498 532 #define HV_MAX_IPI_INTERRUPT 7 499 533 500 - /** Register an interrupt state object. This object is used to enable and 501 - * disable one-shot and level-sensitive interrupts. Once the state is 502 - * registered, the client must not read or write the state object; doing 503 - * so will cause undefined results. 534 + /** Enable a set of device interrupts. 504 535 * 505 - * @param intr_state Pointer to interrupt state object. 506 - * @return HV_OK on success, or a hypervisor error code. 507 - */ 508 - HV_Errno hv_dev_register_intr_state(HV_IntrState* intr_state); 509 - 510 - /** Enable a set of one-shot and level-sensitive interrupts. 511 - * 512 - * @param intr_state Pointer to interrupt state object. 513 536 * @param enab_mask Bitmap of interrupts to enable. 514 537 */ 515 - void hv_enable_intr(HV_IntrState* intr_state, HV_IntrMask enab_mask); 538 + void hv_enable_intr(HV_IntrMask enab_mask); 516 539 517 - /** Disable a set of one-shot and level-sensitive interrupts. 540 + /** Disable a set of device interrupts. 518 541 * 519 - * @param intr_state Pointer to interrupt state object. 520 542 * @param disab_mask Bitmap of interrupts to disable. 521 543 */ 522 - void hv_disable_intr(HV_IntrState* intr_state, HV_IntrMask disab_mask); 544 + void hv_disable_intr(HV_IntrMask disab_mask); 545 + 546 + /** Clear a set of device interrupts. 547 + * 548 + * @param clear_mask Bitmap of interrupts to clear. 549 + */ 550 + void hv_clear_intr(HV_IntrMask clear_mask); 551 + 552 + /** Assert a set of device interrupts. 553 + * 554 + * @param assert_mask Bitmap of interrupts to clear. 555 + */ 556 + void hv_assert_intr(HV_IntrMask assert_mask); 523 557 524 558 /** Trigger a one-shot interrupt on some tile 525 559 * ··· 546 546 * @return HV_OK on success, or a hypervisor error code. 547 547 */ 548 548 HV_Errno hv_trigger_ipi(HV_Coord tile, int interrupt); 549 + 550 + #endif // !CHIP_HAS_IPI() 549 551 550 552 /** Store memory mapping in debug memory so that external debugger can read it. 551 553 * A maximum of 16 entries can be stored. ··· 1011 1009 * registers in the client will be set so that when the client irets, 1012 1010 * it will return to the code which was interrupted by the INTCTRL_1 1013 1011 * interrupt. 1012 + * 1013 + * Under some circumstances, the firing of INTCTRL_1 can race with 1014 + * the lowering of a device interrupt. In such a case, the 1015 + * hv_downcall_dispatch service may issue an iret instruction instead 1016 + * of entering one of the client's actual downcall-handling interrupt 1017 + * vectors. This will return execution to the location that was 1018 + * interrupted by INTCTRL_1. 1014 1019 * 1015 1020 * Any saving of registers should be done by the actual handling 1016 1021 * vectors; no registers should be changed by the INTCTRL_1 handler.
+32
arch/tile/include/hv/pagesize.h
··· 1 + /* 2 + * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 + * NON INFRINGEMENT. See the GNU General Public License for 12 + * more details. 13 + */ 14 + 15 + /** 16 + * @file pagesize.h 17 + */ 18 + 19 + #ifndef _HV_PAGESIZE_H 20 + #define _HV_PAGESIZE_H 21 + 22 + /** The log2 of the size of small pages, in bytes. This value should 23 + * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 24 + */ 25 + #define HV_LOG2_PAGE_SIZE_SMALL 16 26 + 27 + /** The log2 of the size of large pages, in bytes. This value should be 28 + * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 29 + */ 30 + #define HV_LOG2_PAGE_SIZE_LARGE 24 31 + 32 + #endif /* _HV_PAGESIZE_H */
+8 -6
arch/tile/kernel/hvglue.lds
··· 46 46 hv_confstr = TEXT_OFFSET + 0x105a0; 47 47 hv_reexec = TEXT_OFFSET + 0x105c0; 48 48 hv_set_command_line = TEXT_OFFSET + 0x105e0; 49 - hv_dev_register_intr_state = TEXT_OFFSET + 0x10600; 49 + hv_clear_intr = TEXT_OFFSET + 0x10600; 50 50 hv_enable_intr = TEXT_OFFSET + 0x10620; 51 51 hv_disable_intr = TEXT_OFFSET + 0x10640; 52 - hv_trigger_ipi = TEXT_OFFSET + 0x10660; 53 - hv_store_mapping = TEXT_OFFSET + 0x10680; 54 - hv_inquire_realpa = TEXT_OFFSET + 0x106a0; 55 - hv_flush_all = TEXT_OFFSET + 0x106c0; 56 - hv_glue_internals = TEXT_OFFSET + 0x106e0; 52 + hv_raise_intr = TEXT_OFFSET + 0x10660; 53 + hv_trigger_ipi = TEXT_OFFSET + 0x10680; 54 + hv_store_mapping = TEXT_OFFSET + 0x106a0; 55 + hv_inquire_realpa = TEXT_OFFSET + 0x106c0; 56 + hv_flush_all = TEXT_OFFSET + 0x106e0; 57 + hv_get_ipi_pte = TEXT_OFFSET + 0x10700; 58 + hv_glue_internals = TEXT_OFFSET + 0x10720;
+185 -78
arch/tile/kernel/irq.c
··· 19 19 #include <linux/kernel_stat.h> 20 20 #include <linux/uaccess.h> 21 21 #include <hv/drv_pcie_rc_intf.h> 22 + #include <arch/spr_def.h> 23 + #include <asm/traps.h> 24 + 25 + /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */ 26 + #define IS_HW_CLEARED 1 22 27 23 28 /* 24 29 * The set of interrupts we enable for raw_local_irq_enable(). ··· 36 31 INITIAL_INTERRUPTS_ENABLED; 37 32 EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); 38 33 39 - /* Define per-tile device interrupt state */ 40 - DEFINE_PER_CPU(HV_IntrState, dev_intr_state); 41 - 34 + /* Define per-tile device interrupt statistics state. */ 42 35 DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; 43 36 EXPORT_PER_CPU_SYMBOL(irq_stat); 44 37 45 - 38 + /* 39 + * Define per-tile irq disable mask; the hardware/HV only has a single 40 + * mask that we use to implement both masking and disabling. 41 + */ 42 + static DEFINE_PER_CPU(unsigned long, irq_disable_mask) 43 + ____cacheline_internodealigned_in_smp; 46 44 47 45 /* 48 - * Interrupt dispatcher, invoked upon a hypervisor device interrupt downcall 46 + * Per-tile IRQ nesting depth. Used to make sure we enable newly 47 + * enabled IRQs before exiting the outermost interrupt. 48 + */ 49 + static DEFINE_PER_CPU(int, irq_depth); 50 + 51 + /* State for allocating IRQs on Gx. */ 52 + #if CHIP_HAS_IPI() 53 + static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE); 54 + static DEFINE_SPINLOCK(available_irqs_lock); 55 + #endif 56 + 57 + #if CHIP_HAS_IPI() 58 + /* Use SPRs to manipulate device interrupts. */ 59 + #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask) 60 + #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask) 61 + #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask) 62 + #else 63 + /* Use HV to manipulate device interrupts. */ 64 + #define mask_irqs(irq_mask) hv_disable_intr(irq_mask) 65 + #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask) 66 + #define clear_irqs(irq_mask) hv_clear_intr(irq_mask) 67 + #endif 68 + 69 + /* 70 + * The interrupt handling path, implemented in terms of HV interrupt 71 + * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx. 49 72 */ 50 73 void tile_dev_intr(struct pt_regs *regs, int intnum) 51 74 { 52 - int irq; 75 + int depth = __get_cpu_var(irq_depth)++; 76 + unsigned long original_irqs; 77 + unsigned long remaining_irqs; 78 + struct pt_regs *old_regs; 53 79 80 + #if CHIP_HAS_IPI() 54 81 /* 55 - * Get the device interrupt pending mask from where the hypervisor 56 - * has tucked it away for us. 82 + * Pending interrupts are listed in an SPR. We might be 83 + * nested, so be sure to only handle irqs that weren't already 84 + * masked by a previous interrupt. Then, mask out the ones 85 + * we're going to handle. 57 86 */ 58 - unsigned long pending_dev_intr_mask = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); 59 - 87 + unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1); 88 + original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked; 89 + __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs); 90 + #else 91 + /* 92 + * Hypervisor performs the equivalent of the Gx code above and 93 + * then puts the pending interrupt mask into a system save reg 94 + * for us to find. 95 + */ 96 + original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); 97 + #endif 98 + remaining_irqs = original_irqs; 60 99 61 100 /* Track time spent here in an interrupt context. */ 62 - struct pt_regs *old_regs = set_irq_regs(regs); 101 + old_regs = set_irq_regs(regs); 63 102 irq_enter(); 64 103 65 104 #ifdef CONFIG_DEBUG_STACKOVERFLOW ··· 111 62 { 112 63 long sp = stack_pointer - (long) current_thread_info(); 113 64 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 114 - printk(KERN_EMERG "tile_dev_intr: " 65 + pr_emerg("tile_dev_intr: " 115 66 "stack overflow: %ld\n", 116 67 sp - sizeof(struct thread_info)); 117 68 dump_stack(); 118 69 } 119 70 } 120 71 #endif 72 + while (remaining_irqs) { 73 + unsigned long irq = __ffs(remaining_irqs); 74 + remaining_irqs &= ~(1UL << irq); 121 75 122 - for (irq = 0; pending_dev_intr_mask; ++irq) { 123 - if (pending_dev_intr_mask & 0x1) { 124 - generic_handle_irq(irq); 76 + /* Count device irqs; Linux IPIs are counted elsewhere. */ 77 + if (irq != IRQ_RESCHEDULE) 78 + __get_cpu_var(irq_stat).irq_dev_intr_count++; 125 79 126 - /* Count device irqs; IPIs are counted elsewhere. */ 127 - if (irq > HV_MAX_IPI_INTERRUPT) 128 - __get_cpu_var(irq_stat).irq_dev_intr_count++; 129 - } 130 - pending_dev_intr_mask >>= 1; 80 + generic_handle_irq(irq); 131 81 } 82 + 83 + /* 84 + * If we weren't nested, turn on all enabled interrupts, 85 + * including any that were reenabled during interrupt 86 + * handling. 87 + */ 88 + if (depth == 0) 89 + unmask_irqs(~__get_cpu_var(irq_disable_mask)); 90 + 91 + __get_cpu_var(irq_depth)--; 132 92 133 93 /* 134 94 * Track time spent against the current process again and ··· 148 90 } 149 91 150 92 151 - /* Mask an interrupt. */ 152 - static void hv_dev_irq_mask(unsigned int irq) 93 + /* 94 + * Remove an irq from the disabled mask. If we're in an interrupt 95 + * context, defer enabling the HW interrupt until we leave. 96 + */ 97 + void enable_percpu_irq(unsigned int irq) 153 98 { 154 - HV_IntrState *p_intr_state = &__get_cpu_var(dev_intr_state); 155 - hv_disable_intr(p_intr_state, 1 << irq); 99 + get_cpu_var(irq_disable_mask) &= ~(1UL << irq); 100 + if (__get_cpu_var(irq_depth) == 0) 101 + unmask_irqs(1UL << irq); 102 + put_cpu_var(irq_disable_mask); 103 + } 104 + EXPORT_SYMBOL(enable_percpu_irq); 105 + 106 + /* 107 + * Add an irq to the disabled mask. We disable the HW interrupt 108 + * immediately so that there's no possibility of it firing. If we're 109 + * in an interrupt context, the return path is careful to avoid 110 + * unmasking a newly disabled interrupt. 111 + */ 112 + void disable_percpu_irq(unsigned int irq) 113 + { 114 + get_cpu_var(irq_disable_mask) |= (1UL << irq); 115 + mask_irqs(1UL << irq); 116 + put_cpu_var(irq_disable_mask); 117 + } 118 + EXPORT_SYMBOL(disable_percpu_irq); 119 + 120 + /* Mask an interrupt. */ 121 + static void tile_irq_chip_mask(unsigned int irq) 122 + { 123 + mask_irqs(1UL << irq); 156 124 } 157 125 158 126 /* Unmask an interrupt. */ 159 - static void hv_dev_irq_unmask(unsigned int irq) 127 + static void tile_irq_chip_unmask(unsigned int irq) 160 128 { 161 - /* Re-enable the hypervisor to generate interrupts. */ 162 - HV_IntrState *p_intr_state = &__get_cpu_var(dev_intr_state); 163 - hv_enable_intr(p_intr_state, 1 << irq); 129 + unmask_irqs(1UL << irq); 164 130 } 165 131 166 132 /* 167 - * The HV doesn't latch incoming interrupts while an interrupt is 168 - * disabled, so we need to reenable interrupts before running the 169 - * handler. 170 - * 171 - * ISSUE: Enabling the interrupt this early avoids any race conditions 172 - * but introduces the possibility of nested interrupt stack overflow. 173 - * An imminent change to the HV IRQ model will fix this. 133 + * Clear an interrupt before processing it so that any new assertions 134 + * will trigger another irq. 174 135 */ 175 - static void hv_dev_irq_ack(unsigned int irq) 136 + static void tile_irq_chip_ack(unsigned int irq) 176 137 { 177 - hv_dev_irq_unmask(irq); 138 + if ((unsigned long)get_irq_chip_data(irq) != IS_HW_CLEARED) 139 + clear_irqs(1UL << irq); 178 140 } 179 141 180 142 /* 181 - * Since ack() reenables interrupts, there's nothing to do at eoi(). 143 + * For per-cpu interrupts, we need to avoid unmasking any interrupts 144 + * that we disabled via disable_percpu_irq(). 182 145 */ 183 - static void hv_dev_irq_eoi(unsigned int irq) 146 + static void tile_irq_chip_eoi(unsigned int irq) 184 147 { 148 + if (!(__get_cpu_var(irq_disable_mask) & (1UL << irq))) 149 + unmask_irqs(1UL << irq); 185 150 } 186 151 187 - static struct irq_chip hv_dev_irq_chip = { 188 - .typename = "hv_dev_irq_chip", 189 - .ack = hv_dev_irq_ack, 190 - .mask = hv_dev_irq_mask, 191 - .unmask = hv_dev_irq_unmask, 192 - .eoi = hv_dev_irq_eoi, 193 - }; 194 - 195 - static struct irqaction resched_action = { 196 - .handler = handle_reschedule_ipi, 197 - .name = "resched", 198 - .dev_id = handle_reschedule_ipi /* unique token */, 152 + static struct irq_chip tile_irq_chip = { 153 + .typename = "tile_irq_chip", 154 + .ack = tile_irq_chip_ack, 155 + .eoi = tile_irq_chip_eoi, 156 + .mask = tile_irq_chip_mask, 157 + .unmask = tile_irq_chip_unmask, 199 158 }; 200 159 201 160 void __init init_IRQ(void) 202 161 { 203 - /* Bind IPI irqs. Does this belong somewhere else in init? */ 204 - tile_irq_activate(IRQ_RESCHEDULE); 205 - BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action)); 162 + ipi_init(); 206 163 } 207 164 208 - void __cpuinit init_per_tile_IRQs(void) 165 + void __cpuinit setup_irq_regs(void) 209 166 { 210 - int rc; 211 - 212 - /* Set the pointer to the per-tile device interrupt state. */ 213 - HV_IntrState *sv_ptr = &__get_cpu_var(dev_intr_state); 214 - rc = hv_dev_register_intr_state(sv_ptr); 215 - if (rc != HV_OK) 216 - panic("hv_dev_register_intr_state: error %d", rc); 217 - 167 + /* Enable interrupt delivery. */ 168 + unmask_irqs(~0UL); 169 + #if CHIP_HAS_IPI() 170 + raw_local_irq_unmask(INT_IPI_1); 171 + #endif 218 172 } 219 173 220 - void tile_irq_activate(unsigned int irq) 174 + void tile_irq_activate(unsigned int irq, int tile_irq_type) 221 175 { 222 176 /* 223 - * Paravirtualized drivers can call up to the HV to find out 224 - * which irq they're associated with. The HV interface 225 - * doesn't provide a generic call for discovering all valid 226 - * IRQs, so drivers must call this method to initialize newly 227 - * discovered IRQs. 228 - * 229 - * We could also just initialize all 32 IRQs at startup, but 230 - * doing so would lead to a kernel fault if an unexpected 231 - * interrupt fires and jumps to a NULL action. By defering 232 - * the set_irq_chip_and_handler() call, unexpected IRQs are 233 - * handled properly by handle_bad_irq(). 177 + * We use handle_level_irq() by default because the pending 178 + * interrupt vector (whether modeled by the HV on TILE64 and 179 + * TILEPro or implemented in hardware on TILE-Gx) has 180 + * level-style semantics for each bit. An interrupt fires 181 + * whenever a bit is high, not just at edges. 234 182 */ 235 - hv_dev_irq_mask(irq); 236 - set_irq_chip_and_handler(irq, &hv_dev_irq_chip, handle_percpu_irq); 183 + irq_flow_handler_t handle = handle_level_irq; 184 + if (tile_irq_type == TILE_IRQ_PERCPU) 185 + handle = handle_percpu_irq; 186 + set_irq_chip_and_handler(irq, &tile_irq_chip, handle); 187 + 188 + /* 189 + * Flag interrupts that are hardware-cleared so that ack() 190 + * won't clear them. 191 + */ 192 + if (tile_irq_type == TILE_IRQ_HW_CLEAR) 193 + set_irq_chip_data(irq, (void *)IS_HW_CLEARED); 237 194 } 195 + EXPORT_SYMBOL(tile_irq_activate); 196 + 238 197 239 198 void ack_bad_irq(unsigned int irq) 240 199 { 241 - printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); 200 + pr_err("unexpected IRQ trap at vector %02x\n", irq); 242 201 } 243 202 244 203 /* ··· 300 225 } 301 226 return 0; 302 227 } 228 + 229 + #if CHIP_HAS_IPI() 230 + int create_irq(void) 231 + { 232 + unsigned long flags; 233 + int result; 234 + 235 + spin_lock_irqsave(&available_irqs_lock, flags); 236 + if (available_irqs == 0) 237 + result = -ENOMEM; 238 + else { 239 + result = __ffs(available_irqs); 240 + available_irqs &= ~(1UL << result); 241 + dynamic_irq_init(result); 242 + } 243 + spin_unlock_irqrestore(&available_irqs_lock, flags); 244 + 245 + return result; 246 + } 247 + EXPORT_SYMBOL(create_irq); 248 + 249 + void destroy_irq(unsigned int irq) 250 + { 251 + unsigned long flags; 252 + 253 + spin_lock_irqsave(&available_irqs_lock, flags); 254 + available_irqs |= (1UL << irq); 255 + dynamic_irq_cleanup(irq); 256 + spin_unlock_irqrestore(&available_irqs_lock, flags); 257 + } 258 + EXPORT_SYMBOL(destroy_irq); 259 + #endif
+63 -9
arch/tile/kernel/smp.c
··· 15 15 */ 16 16 17 17 #include <linux/smp.h> 18 + #include <linux/interrupt.h> 19 + #include <linux/io.h> 18 20 #include <linux/irq.h> 21 + #include <linux/module.h> 19 22 #include <asm/cacheflush.h> 20 23 21 24 HV_Topology smp_topology __write_once; 25 + EXPORT_SYMBOL(smp_topology); 26 + 27 + #if CHIP_HAS_IPI() 28 + static unsigned long __iomem *ipi_mappings[NR_CPUS]; 29 + #endif 22 30 23 31 24 32 /* ··· 108 100 /* Handler to start the current cpu. */ 109 101 static void smp_start_cpu_interrupt(void) 110 102 { 111 - extern unsigned long start_cpu_function_addr; 112 103 get_irq_regs()->pc = start_cpu_function_addr; 113 104 } 114 105 ··· 181 174 } 182 175 183 176 184 - /* 185 - * The smp_send_reschedule() path does not use the hv_message_intr() 186 - * path but instead the faster tile_dev_intr() path for interrupts. 187 - */ 188 - 189 - irqreturn_t handle_reschedule_ipi(int irq, void *token) 177 + /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ 178 + static irqreturn_t handle_reschedule_ipi(int irq, void *token) 190 179 { 191 180 /* 192 181 * Nothing to do here; when we return from interrupt, the ··· 194 191 return IRQ_HANDLED; 195 192 } 196 193 194 + static struct irqaction resched_action = { 195 + .handler = handle_reschedule_ipi, 196 + .name = "resched", 197 + .dev_id = handle_reschedule_ipi /* unique token */, 198 + }; 199 + 200 + void __init ipi_init(void) 201 + { 202 + #if CHIP_HAS_IPI() 203 + int cpu; 204 + /* Map IPI trigger MMIO addresses. */ 205 + for_each_possible_cpu(cpu) { 206 + HV_Coord tile; 207 + HV_PTE pte; 208 + unsigned long offset; 209 + 210 + tile.x = cpu_x(cpu); 211 + tile.y = cpu_y(cpu); 212 + if (hv_get_ipi_pte(tile, 1, &pte) != 0) 213 + panic("Failed to initialize IPI for cpu %d\n", cpu); 214 + 215 + offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; 216 + ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte); 217 + } 218 + #endif 219 + 220 + /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */ 221 + tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU); 222 + BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action)); 223 + } 224 + 225 + #if CHIP_HAS_IPI() 226 + 227 + void smp_send_reschedule(int cpu) 228 + { 229 + WARN_ON(cpu_is_offline(cpu)); 230 + 231 + /* 232 + * We just want to do an MMIO store. The traditional writeq() 233 + * functions aren't really correct here, since they're always 234 + * directed at the PCI shim. For now, just do a raw store, 235 + * casting away the __iomem attribute. 236 + */ 237 + ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0; 238 + } 239 + 240 + #else 241 + 197 242 void smp_send_reschedule(int cpu) 198 243 { 199 244 HV_Coord coord; 200 245 201 246 WARN_ON(cpu_is_offline(cpu)); 202 - coord.y = cpu / smp_width; 203 - coord.x = cpu % smp_width; 247 + 248 + coord.y = cpu_y(cpu); 249 + coord.x = cpu_x(cpu); 204 250 hv_trigger_ipi(coord, IRQ_RESCHEDULE); 205 251 } 252 + 253 + #endif /* CHIP_HAS_IPI() */