Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha-2.6:
alpha: Enable GENERIC_HARDIRQS_NO_DEPRECATED
alpha: irq: Convert affinity to use irq_data
alpha: irq: Remove pointless irq status manipulation
alpha: titan: Convert irq_chip functions
alpha: takara: Convert irq_chip functions
alpha: sable: Convert irq_chip functions
alpha: rx164: Convert irq_chip functions
alpha: noritake: Convert irq_chip functions
alpha: rawhide: Convert irq_chip functions
alpha: mikasa: Convert irq_chip functions
alpha: marvel: Convert irq_chip functions
alpha: eiger: Convert irq_chip functions
alpha: eb64p: Convert irq_chip functions
alpha: dp264: Convert irq_chip functions
alpha: cabriolet: Convert irq_chip functions
alpha: i8259, alcor, jensen wildfire: Convert irq_chip
alpha: srm: Convert irq_chip functions
alpha: Pyxis convert irq_chip functions
Fix typo in call to irq_to_desc()

+224 -209
+1
arch/alpha/Kconfig
··· 11 select HAVE_GENERIC_HARDIRQS 12 select GENERIC_IRQ_PROBE 13 select AUTO_IRQ_AFFINITY if SMP 14 help 15 The Alpha is a 64-bit general-purpose processor designed and 16 marketed by the Digital Equipment Corporation of blessed memory,
··· 11 select HAVE_GENERIC_HARDIRQS 12 select GENERIC_IRQ_PROBE 13 select AUTO_IRQ_AFFINITY if SMP 14 + select GENERIC_HARDIRQS_NO_DEPRECATED 15 help 16 The Alpha is a 64-bit general-purpose processor designed and 17 marketed by the Digital Equipment Corporation of blessed memory,
+9 -4
arch/alpha/kernel/irq.c
··· 44 45 int irq_select_affinity(unsigned int irq) 46 { 47 - struct irq_desc *desc = irq_to_desc[irq]; 48 static int last_cpu; 49 int cpu = last_cpu + 1; 50 51 - if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) 52 return 1; 53 54 while (!cpu_possible(cpu) || ··· 61 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 62 last_cpu = cpu; 63 64 - cpumask_copy(desc->affinity, cpumask_of(cpu)); 65 - get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); 66 return 0; 67 } 68 #endif /* CONFIG_SMP */
··· 44 45 int irq_select_affinity(unsigned int irq) 46 { 47 + struct irq_data *data = irq_get_irq_data(irq); 48 + struct irq_chip *chip; 49 static int last_cpu; 50 int cpu = last_cpu + 1; 51 52 + if (!data) 53 + return 1; 54 + chip = irq_data_get_irq_chip(data); 55 + 56 + if (!chip->irq_set_affinity || irq_user_affinity[irq]) 57 return 1; 58 59 while (!cpu_possible(cpu) || ··· 56 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 57 last_cpu = cpu; 58 59 + cpumask_copy(data->affinity, cpumask_of(cpu)); 60 + chip->irq_set_affinity(data, cpumask_of(cpu), false); 61 return 0; 62 } 63 #endif /* CONFIG_SMP */
+3 -8
arch/alpha/kernel/irq_alpha.c
··· 228 void __init 229 init_rtc_irq(void) 230 { 231 - struct irq_desc *desc = irq_to_desc(RTC_IRQ); 232 - 233 - if (desc) { 234 - desc->status |= IRQ_DISABLED; 235 - set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 236 - handle_simple_irq, "RTC"); 237 - setup_irq(RTC_IRQ, &timer_irqaction); 238 - } 239 } 240 241 /* Dummy irqactions. */
··· 228 void __init 229 init_rtc_irq(void) 230 { 231 + set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 232 + handle_simple_irq, "RTC"); 233 + setup_irq(RTC_IRQ, &timer_irqaction); 234 } 235 236 /* Dummy irqactions. */
+10 -8
arch/alpha/kernel/irq_i8259.c
··· 33 } 34 35 inline void 36 - i8259a_enable_irq(unsigned int irq) 37 { 38 spin_lock(&i8259_irq_lock); 39 - i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 40 spin_unlock(&i8259_irq_lock); 41 } 42 ··· 47 } 48 49 void 50 - i8259a_disable_irq(unsigned int irq) 51 { 52 spin_lock(&i8259_irq_lock); 53 - __i8259a_disable_irq(irq); 54 spin_unlock(&i8259_irq_lock); 55 } 56 57 void 58 - i8259a_mask_and_ack_irq(unsigned int irq) 59 { 60 spin_lock(&i8259_irq_lock); 61 __i8259a_disable_irq(irq); 62 ··· 73 74 struct irq_chip i8259a_irq_type = { 75 .name = "XT-PIC", 76 - .unmask = i8259a_enable_irq, 77 - .mask = i8259a_disable_irq, 78 - .mask_ack = i8259a_mask_and_ack_irq, 79 }; 80 81 void __init
··· 33 } 34 35 inline void 36 + i8259a_enable_irq(struct irq_data *d) 37 { 38 spin_lock(&i8259_irq_lock); 39 + i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); 40 spin_unlock(&i8259_irq_lock); 41 } 42 ··· 47 } 48 49 void 50 + i8259a_disable_irq(struct irq_data *d) 51 { 52 spin_lock(&i8259_irq_lock); 53 + __i8259a_disable_irq(d->irq); 54 spin_unlock(&i8259_irq_lock); 55 } 56 57 void 58 + i8259a_mask_and_ack_irq(struct irq_data *d) 59 { 60 + unsigned int irq = d->irq; 61 + 62 spin_lock(&i8259_irq_lock); 63 __i8259a_disable_irq(irq); 64 ··· 71 72 struct irq_chip i8259a_irq_type = { 73 .name = "XT-PIC", 74 + .irq_unmask = i8259a_enable_irq, 75 + .irq_mask = i8259a_disable_irq, 76 + .irq_mask_ack = i8259a_mask_and_ack_irq, 77 }; 78 79 void __init
+3 -5
arch/alpha/kernel/irq_impl.h
··· 31 32 extern void common_init_isa_dma(void); 33 34 - extern void i8259a_enable_irq(unsigned int); 35 - extern void i8259a_disable_irq(unsigned int); 36 - extern void i8259a_mask_and_ack_irq(unsigned int); 37 - extern unsigned int i8259a_startup_irq(unsigned int); 38 - extern void i8259a_end_irq(unsigned int); 39 extern struct irq_chip i8259a_irq_type; 40 extern void init_i8259a_irqs(void); 41
··· 31 32 extern void common_init_isa_dma(void); 33 34 + extern void i8259a_enable_irq(struct irq_data *d); 35 + extern void i8259a_disable_irq(struct irq_data *d); 36 + extern void i8259a_mask_and_ack_irq(struct irq_data *d); 37 extern struct irq_chip i8259a_irq_type; 38 extern void init_i8259a_irqs(void); 39
+10 -10
arch/alpha/kernel/irq_pyxis.c
··· 29 } 30 31 static inline void 32 - pyxis_enable_irq(unsigned int irq) 33 { 34 - pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 35 } 36 37 static void 38 - pyxis_disable_irq(unsigned int irq) 39 { 40 - pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 41 } 42 43 static void 44 - pyxis_mask_and_ack_irq(unsigned int irq) 45 { 46 - unsigned long bit = 1UL << (irq - 16); 47 unsigned long mask = cached_irq_mask &= ~bit; 48 49 /* Disable the interrupt. */ ··· 58 59 static struct irq_chip pyxis_irq_type = { 60 .name = "PYXIS", 61 - .mask_ack = pyxis_mask_and_ack_irq, 62 - .mask = pyxis_disable_irq, 63 - .unmask = pyxis_enable_irq, 64 }; 65 66 void ··· 103 if ((ignore_mask >> i) & 1) 104 continue; 105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 106 - irq_to_desc(i)->status |= IRQ_LEVEL; 107 } 108 109 setup_irq(16+7, &isa_cascade_irqaction);
··· 29 } 30 31 static inline void 32 + pyxis_enable_irq(struct irq_data *d) 33 { 34 + pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); 35 } 36 37 static void 38 + pyxis_disable_irq(struct irq_data *d) 39 { 40 + pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); 41 } 42 43 static void 44 + pyxis_mask_and_ack_irq(struct irq_data *d) 45 { 46 + unsigned long bit = 1UL << (d->irq - 16); 47 unsigned long mask = cached_irq_mask &= ~bit; 48 49 /* Disable the interrupt. */ ··· 58 59 static struct irq_chip pyxis_irq_type = { 60 .name = "PYXIS", 61 + .irq_mask_ack = pyxis_mask_and_ack_irq, 62 + .irq_mask = pyxis_disable_irq, 63 + .irq_unmask = pyxis_enable_irq, 64 }; 65 66 void ··· 103 if ((ignore_mask >> i) & 1) 104 continue; 105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 106 + irq_set_status_flags(i, IRQ_LEVEL); 107 } 108 109 setup_irq(16+7, &isa_cascade_irqaction);
+8 -8
arch/alpha/kernel/irq_srm.c
··· 18 DEFINE_SPINLOCK(srm_irq_lock); 19 20 static inline void 21 - srm_enable_irq(unsigned int irq) 22 { 23 spin_lock(&srm_irq_lock); 24 - cserve_ena(irq - 16); 25 spin_unlock(&srm_irq_lock); 26 } 27 28 static void 29 - srm_disable_irq(unsigned int irq) 30 { 31 spin_lock(&srm_irq_lock); 32 - cserve_dis(irq - 16); 33 spin_unlock(&srm_irq_lock); 34 } 35 36 /* Handle interrupts from the SRM, assuming no additional weirdness. */ 37 static struct irq_chip srm_irq_type = { 38 .name = "SRM", 39 - .unmask = srm_enable_irq, 40 - .mask = srm_disable_irq, 41 - .mask_ack = srm_disable_irq, 42 }; 43 44 void __init ··· 52 if (i < 64 && ((ignore_mask >> i) & 1)) 53 continue; 54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); 55 - irq_to_desc(i)->status |= IRQ_LEVEL; 56 } 57 } 58
··· 18 DEFINE_SPINLOCK(srm_irq_lock); 19 20 static inline void 21 + srm_enable_irq(struct irq_data *d) 22 { 23 spin_lock(&srm_irq_lock); 24 + cserve_ena(d->irq - 16); 25 spin_unlock(&srm_irq_lock); 26 } 27 28 static void 29 + srm_disable_irq(struct irq_data *d) 30 { 31 spin_lock(&srm_irq_lock); 32 + cserve_dis(d->irq - 16); 33 spin_unlock(&srm_irq_lock); 34 } 35 36 /* Handle interrupts from the SRM, assuming no additional weirdness. */ 37 static struct irq_chip srm_irq_type = { 38 .name = "SRM", 39 + .irq_unmask = srm_enable_irq, 40 + .irq_mask = srm_disable_irq, 41 + .irq_mask_ack = srm_disable_irq, 42 }; 43 44 void __init ··· 52 if (i < 64 && ((ignore_mask >> i) & 1)) 53 continue; 54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); 55 + irq_set_status_flags(i, IRQ_LEVEL); 56 } 57 } 58
+14 -14
arch/alpha/kernel/sys_alcor.c
··· 44 } 45 46 static inline void 47 - alcor_enable_irq(unsigned int irq) 48 { 49 - alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 50 } 51 52 static void 53 - alcor_disable_irq(unsigned int irq) 54 { 55 - alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 56 } 57 58 static void 59 - alcor_mask_and_ack_irq(unsigned int irq) 60 { 61 - alcor_disable_irq(irq); 62 63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 64 - *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); 65 *(vuip)GRU_INT_CLEAR = 0; mb(); 66 } 67 68 static void 69 - alcor_isa_mask_and_ack_irq(unsigned int irq) 70 { 71 - i8259a_mask_and_ack_irq(irq); 72 73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); ··· 77 78 static struct irq_chip alcor_irq_type = { 79 .name = "ALCOR", 80 - .unmask = alcor_enable_irq, 81 - .mask = alcor_disable_irq, 82 - .mask_ack = alcor_mask_and_ack_irq, 83 }; 84 85 static void ··· 126 if (i >= 16+20 && i <= 16+30) 127 continue; 128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 129 - irq_to_desc(i)->status |= IRQ_LEVEL; 130 } 131 - i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; 132 133 init_i8259a_irqs(); 134 common_init_isa_dma();
··· 44 } 45 46 static inline void 47 + alcor_enable_irq(struct irq_data *d) 48 { 49 + alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); 50 } 51 52 static void 53 + alcor_disable_irq(struct irq_data *d) 54 { 55 + alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); 56 } 57 58 static void 59 + alcor_mask_and_ack_irq(struct irq_data *d) 60 { 61 + alcor_disable_irq(d); 62 63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 64 + *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); 65 *(vuip)GRU_INT_CLEAR = 0; mb(); 66 } 67 68 static void 69 + alcor_isa_mask_and_ack_irq(struct irq_data *d) 70 { 71 + i8259a_mask_and_ack_irq(d); 72 73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); ··· 77 78 static struct irq_chip alcor_irq_type = { 79 .name = "ALCOR", 80 + .irq_unmask = alcor_enable_irq, 81 + .irq_mask = alcor_disable_irq, 82 + .irq_mask_ack = alcor_mask_and_ack_irq, 83 }; 84 85 static void ··· 126 if (i >= 16+20 && i <= 16+30) 127 continue; 128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 129 + irq_set_status_flags(i, IRQ_LEVEL); 130 } 131 + i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; 132 133 init_i8259a_irqs(); 134 common_init_isa_dma();
+8 -8
arch/alpha/kernel/sys_cabriolet.c
··· 46 } 47 48 static inline void 49 - cabriolet_enable_irq(unsigned int irq) 50 { 51 - cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); 52 } 53 54 static void 55 - cabriolet_disable_irq(unsigned int irq) 56 { 57 - cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); 58 } 59 60 static struct irq_chip cabriolet_irq_type = { 61 .name = "CABRIOLET", 62 - .unmask = cabriolet_enable_irq, 63 - .mask = cabriolet_disable_irq, 64 - .mask_ack = cabriolet_disable_irq, 65 }; 66 67 static void ··· 107 for (i = 16; i < 35; ++i) { 108 set_irq_chip_and_handler(i, &cabriolet_irq_type, 109 handle_level_irq); 110 - irq_to_desc(i)->status |= IRQ_LEVEL; 111 } 112 } 113
··· 46 } 47 48 static inline void 49 + cabriolet_enable_irq(struct irq_data *d) 50 { 51 + cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq)); 52 } 53 54 static void 55 + cabriolet_disable_irq(struct irq_data *d) 56 { 57 + cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq); 58 } 59 60 static struct irq_chip cabriolet_irq_type = { 61 .name = "CABRIOLET", 62 + .irq_unmask = cabriolet_enable_irq, 63 + .irq_mask = cabriolet_disable_irq, 64 + .irq_mask_ack = cabriolet_disable_irq, 65 }; 66 67 static void ··· 107 for (i = 16; i < 35; ++i) { 108 set_irq_chip_and_handler(i, &cabriolet_irq_type, 109 handle_level_irq); 110 + irq_set_status_flags(i, IRQ_LEVEL); 111 } 112 } 113
+27 -25
arch/alpha/kernel/sys_dp264.c
··· 98 } 99 100 static void 101 - dp264_enable_irq(unsigned int irq) 102 { 103 spin_lock(&dp264_irq_lock); 104 - cached_irq_mask |= 1UL << irq; 105 tsunami_update_irq_hw(cached_irq_mask); 106 spin_unlock(&dp264_irq_lock); 107 } 108 109 static void 110 - dp264_disable_irq(unsigned int irq) 111 { 112 spin_lock(&dp264_irq_lock); 113 - cached_irq_mask &= ~(1UL << irq); 114 tsunami_update_irq_hw(cached_irq_mask); 115 spin_unlock(&dp264_irq_lock); 116 } 117 118 static void 119 - clipper_enable_irq(unsigned int irq) 120 { 121 spin_lock(&dp264_irq_lock); 122 - cached_irq_mask |= 1UL << (irq - 16); 123 tsunami_update_irq_hw(cached_irq_mask); 124 spin_unlock(&dp264_irq_lock); 125 } 126 127 static void 128 - clipper_disable_irq(unsigned int irq) 129 { 130 spin_lock(&dp264_irq_lock); 131 - cached_irq_mask &= ~(1UL << (irq - 16)); 132 tsunami_update_irq_hw(cached_irq_mask); 133 spin_unlock(&dp264_irq_lock); 134 } ··· 149 } 150 151 static int 152 - dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) 153 - { 154 spin_lock(&dp264_irq_lock); 155 - cpu_set_irq_affinity(irq, *affinity); 156 tsunami_update_irq_hw(cached_irq_mask); 157 spin_unlock(&dp264_irq_lock); 158 ··· 161 } 162 163 static int 164 - clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) 165 - { 166 spin_lock(&dp264_irq_lock); 167 - cpu_set_irq_affinity(irq - 16, *affinity); 168 tsunami_update_irq_hw(cached_irq_mask); 169 spin_unlock(&dp264_irq_lock); 170 ··· 173 } 174 175 static struct irq_chip dp264_irq_type = { 176 - .name = "DP264", 177 - .unmask = dp264_enable_irq, 178 - .mask = dp264_disable_irq, 179 - .mask_ack = dp264_disable_irq, 180 - .set_affinity = dp264_set_affinity, 181 }; 182 183 static struct irq_chip clipper_irq_type = { 184 - .name = "CLIPPER", 185 - .unmask = clipper_enable_irq, 186 - .mask = clipper_disable_irq, 187 - .mask_ack = clipper_disable_irq, 188 - .set_affinity = clipper_set_affinity, 189 }; 190 191 static void ··· 270 { 271 long i; 272 for (i = imin; i <= imax; ++i) { 273 - irq_to_desc(i)->status |= IRQ_LEVEL; 274 set_irq_chip_and_handler(i, ops, handle_level_irq); 275 } 276 } 277
··· 98 } 99 100 static void 101 + dp264_enable_irq(struct irq_data *d) 102 { 103 spin_lock(&dp264_irq_lock); 104 + cached_irq_mask |= 1UL << d->irq; 105 tsunami_update_irq_hw(cached_irq_mask); 106 spin_unlock(&dp264_irq_lock); 107 } 108 109 static void 110 + dp264_disable_irq(struct irq_data *d) 111 { 112 spin_lock(&dp264_irq_lock); 113 + cached_irq_mask &= ~(1UL << d->irq); 114 tsunami_update_irq_hw(cached_irq_mask); 115 spin_unlock(&dp264_irq_lock); 116 } 117 118 static void 119 + clipper_enable_irq(struct irq_data *d) 120 { 121 spin_lock(&dp264_irq_lock); 122 + cached_irq_mask |= 1UL << (d->irq - 16); 123 tsunami_update_irq_hw(cached_irq_mask); 124 spin_unlock(&dp264_irq_lock); 125 } 126 127 static void 128 + clipper_disable_irq(struct irq_data *d) 129 { 130 spin_lock(&dp264_irq_lock); 131 + cached_irq_mask &= ~(1UL << (d->irq - 16)); 132 tsunami_update_irq_hw(cached_irq_mask); 133 spin_unlock(&dp264_irq_lock); 134 } ··· 149 } 150 151 static int 152 + dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, 153 + bool force) 154 + { 155 spin_lock(&dp264_irq_lock); 156 + cpu_set_irq_affinity(d->irq, *affinity); 157 tsunami_update_irq_hw(cached_irq_mask); 158 spin_unlock(&dp264_irq_lock); 159 ··· 160 } 161 162 static int 163 + clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, 164 + bool force) 165 + { 166 spin_lock(&dp264_irq_lock); 167 + cpu_set_irq_affinity(d->irq - 16, *affinity); 168 tsunami_update_irq_hw(cached_irq_mask); 169 spin_unlock(&dp264_irq_lock); 170 ··· 171 } 172 173 static struct irq_chip dp264_irq_type = { 174 + .name = "DP264", 175 + .irq_unmask = dp264_enable_irq, 176 + .irq_mask = dp264_disable_irq, 177 + .irq_mask_ack = dp264_disable_irq, 178 + .irq_set_affinity = dp264_set_affinity, 179 }; 180 181 static struct irq_chip clipper_irq_type = { 182 + .name = "CLIPPER", 183 + .irq_unmask = clipper_enable_irq, 184 + .irq_mask = clipper_disable_irq, 185 + .irq_mask_ack = clipper_disable_irq, 186 + .irq_set_affinity = clipper_set_affinity, 187 }; 188 189 static void ··· 268 { 269 long i; 270 for (i = imin; i <= imax; ++i) { 271 set_irq_chip_and_handler(i, ops, handle_level_irq); 272 + irq_set_status_flags(i, IRQ_LEVEL); 273 } 274 } 275
+9 -9
arch/alpha/kernel/sys_eb64p.c
··· 44 } 45 46 static inline void 47 - eb64p_enable_irq(unsigned int irq) 48 { 49 - eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 50 } 51 52 static void 53 - eb64p_disable_irq(unsigned int irq) 54 { 55 - eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); 56 } 57 58 static struct irq_chip eb64p_irq_type = { 59 .name = "EB64P", 60 - .unmask = eb64p_enable_irq, 61 - .mask = eb64p_disable_irq, 62 - .mask_ack = eb64p_disable_irq, 63 }; 64 65 static void ··· 118 init_i8259a_irqs(); 119 120 for (i = 16; i < 32; ++i) { 121 - irq_to_desc(i)->status |= IRQ_LEVEL; 122 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 123 - } 124 125 common_init_isa_dma(); 126 setup_irq(16+5, &isa_cascade_irqaction);
··· 44 } 45 46 static inline void 47 + eb64p_enable_irq(struct irq_data *d) 48 { 49 + eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); 50 } 51 52 static void 53 + eb64p_disable_irq(struct irq_data *d) 54 { 55 + eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq); 56 } 57 58 static struct irq_chip eb64p_irq_type = { 59 .name = "EB64P", 60 + .irq_unmask = eb64p_enable_irq, 61 + .irq_mask = eb64p_disable_irq, 62 + .irq_mask_ack = eb64p_disable_irq, 63 }; 64 65 static void ··· 118 init_i8259a_irqs(); 119 120 for (i = 16; i < 32; ++i) { 121 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 122 + irq_set_status_flags(i, IRQ_LEVEL); 123 + } 124 125 common_init_isa_dma(); 126 setup_irq(16+5, &isa_cascade_irqaction);
+8 -6
arch/alpha/kernel/sys_eiger.c
··· 51 } 52 53 static inline void 54 - eiger_enable_irq(unsigned int irq) 55 { 56 unsigned long mask; 57 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 58 eiger_update_irq_hw(irq, mask); 59 } 60 61 static void 62 - eiger_disable_irq(unsigned int irq) 63 { 64 unsigned long mask; 65 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 66 eiger_update_irq_hw(irq, mask); ··· 70 71 static struct irq_chip eiger_irq_type = { 72 .name = "EIGER", 73 - .unmask = eiger_enable_irq, 74 - .mask = eiger_disable_irq, 75 - .mask_ack = eiger_disable_irq, 76 }; 77 78 static void ··· 138 init_i8259a_irqs(); 139 140 for (i = 16; i < 128; ++i) { 141 - irq_to_desc(i)->status |= IRQ_LEVEL; 142 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 143 } 144 } 145
··· 51 } 52 53 static inline void 54 + eiger_enable_irq(struct irq_data *d) 55 { 56 + unsigned int irq = d->irq; 57 unsigned long mask; 58 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 59 eiger_update_irq_hw(irq, mask); 60 } 61 62 static void 63 + eiger_disable_irq(struct irq_data *d) 64 { 65 + unsigned int irq = d->irq; 66 unsigned long mask; 67 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 68 eiger_update_irq_hw(irq, mask); ··· 68 69 static struct irq_chip eiger_irq_type = { 70 .name = "EIGER", 71 + .irq_unmask = eiger_enable_irq, 72 + .irq_mask = eiger_disable_irq, 73 + .irq_mask_ack = eiger_disable_irq, 74 }; 75 76 static void ··· 136 init_i8259a_irqs(); 137 138 for (i = 16; i < 128; ++i) { 139 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 140 + irq_set_status_flags(i, IRQ_LEVEL); 141 } 142 } 143
+12 -12
arch/alpha/kernel/sys_jensen.c
··· 63 */ 64 65 static void 66 - jensen_local_enable(unsigned int irq) 67 { 68 /* the parport is really hw IRQ 1, silly Jensen. */ 69 - if (irq == 7) 70 - i8259a_enable_irq(1); 71 } 72 73 static void 74 - jensen_local_disable(unsigned int irq) 75 { 76 /* the parport is really hw IRQ 1, silly Jensen. */ 77 - if (irq == 7) 78 - i8259a_disable_irq(1); 79 } 80 81 static void 82 - jensen_local_mask_ack(unsigned int irq) 83 { 84 /* the parport is really hw IRQ 1, silly Jensen. */ 85 - if (irq == 7) 86 - i8259a_mask_and_ack_irq(1); 87 } 88 89 static struct irq_chip jensen_local_irq_type = { 90 .name = "LOCAL", 91 - .unmask = jensen_local_enable, 92 - .mask = jensen_local_disable, 93 - .mask_ack = jensen_local_mask_ack, 94 }; 95 96 static void
··· 63 */ 64 65 static void 66 + jensen_local_enable(struct irq_data *d) 67 { 68 /* the parport is really hw IRQ 1, silly Jensen. */ 69 + if (d->irq == 7) 70 + i8259a_enable_irq(d); 71 } 72 73 static void 74 + jensen_local_disable(struct irq_data *d) 75 { 76 /* the parport is really hw IRQ 1, silly Jensen. */ 77 + if (d->irq == 7) 78 + i8259a_disable_irq(d); 79 } 80 81 static void 82 + jensen_local_mask_ack(struct irq_data *d) 83 { 84 /* the parport is really hw IRQ 1, silly Jensen. */ 85 + if (d->irq == 7) 86 + i8259a_mask_and_ack_irq(d); 87 } 88 89 static struct irq_chip jensen_local_irq_type = { 90 .name = "LOCAL", 91 + .irq_unmask = jensen_local_enable, 92 + .irq_mask = jensen_local_disable, 93 + .irq_mask_ack = jensen_local_mask_ack, 94 }; 95 96 static void
+19 -23
arch/alpha/kernel/sys_marvel.c
··· 104 } 105 106 static void 107 - io7_enable_irq(unsigned int irq) 108 { 109 volatile unsigned long *ctl; 110 struct io7 *io7; 111 112 ctl = io7_get_irq_ctl(irq, &io7); ··· 116 __func__, irq); 117 return; 118 } 119 - 120 spin_lock(&io7->irq_lock); 121 *ctl |= 1UL << 24; 122 mb(); ··· 125 } 126 127 static void 128 - io7_disable_irq(unsigned int irq) 129 { 130 volatile unsigned long *ctl; 131 struct io7 *io7; 132 133 ctl = io7_get_irq_ctl(irq, &io7); ··· 137 __func__, irq); 138 return; 139 } 140 - 141 spin_lock(&io7->irq_lock); 142 *ctl &= ~(1UL << 24); 143 mb(); ··· 146 } 147 148 static void 149 - marvel_irq_noop(unsigned int irq) 150 - { 151 - return; 152 - } 153 - 154 - static unsigned int 155 - marvel_irq_noop_return(unsigned int irq) 156 - { 157 - return 0; 158 } 159 160 static struct irq_chip marvel_legacy_irq_type = { 161 .name = "LEGACY", 162 - .mask = marvel_irq_noop, 163 - .unmask = marvel_irq_noop, 164 }; 165 166 static struct irq_chip io7_lsi_irq_type = { 167 .name = "LSI", 168 - .unmask = io7_enable_irq, 169 - .mask = io7_disable_irq, 170 - .mask_ack = io7_disable_irq, 171 }; 172 173 static struct irq_chip io7_msi_irq_type = { 174 .name = "MSI", 175 - .unmask = io7_enable_irq, 176 - .mask = io7_disable_irq, 177 - .ack = marvel_irq_noop, 178 }; 179 180 static void ··· 276 277 /* Set up the lsi irqs. */ 278 for (i = 0; i < 128; ++i) { 279 - irq_to_desc(base + i)->status |= IRQ_LEVEL; 280 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); 281 } 282 283 /* Disable the implemented irqs in hardware. */ ··· 290 291 /* Set up the msi irqs. */ 292 for (i = 128; i < (128 + 512); ++i) { 293 - irq_to_desc(base + i)->status |= IRQ_LEVEL; 294 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); 295 } 296 297 for (i = 0; i < 16; ++i)
··· 104 } 105 106 static void 107 + io7_enable_irq(struct irq_data *d) 108 { 109 volatile unsigned long *ctl; 110 + unsigned int irq = d->irq; 111 struct io7 *io7; 112 113 ctl = io7_get_irq_ctl(irq, &io7); ··· 115 __func__, irq); 116 return; 117 } 118 + 119 spin_lock(&io7->irq_lock); 120 *ctl |= 1UL << 24; 121 mb(); ··· 124 } 125 126 static void 127 + io7_disable_irq(struct irq_data *d) 128 { 129 volatile unsigned long *ctl; 130 + unsigned int irq = d->irq; 131 struct io7 *io7; 132 133 ctl = io7_get_irq_ctl(irq, &io7); ··· 135 __func__, irq); 136 return; 137 } 138 + 139 spin_lock(&io7->irq_lock); 140 *ctl &= ~(1UL << 24); 141 mb(); ··· 144 } 145 146 static void 147 + marvel_irq_noop(struct irq_data *d) 148 + { 149 + return; 150 } 151 152 static struct irq_chip marvel_legacy_irq_type = { 153 .name = "LEGACY", 154 + .irq_mask = marvel_irq_noop, 155 + .irq_unmask = marvel_irq_noop, 156 }; 157 158 static struct irq_chip io7_lsi_irq_type = { 159 .name = "LSI", 160 + .irq_unmask = io7_enable_irq, 161 + .irq_mask = io7_disable_irq, 162 + .irq_mask_ack = io7_disable_irq, 163 }; 164 165 static struct irq_chip io7_msi_irq_type = { 166 .name = "MSI", 167 + .irq_unmask = io7_enable_irq, 168 + .irq_mask = io7_disable_irq, 169 + .irq_ack = marvel_irq_noop, 170 }; 171 172 static void ··· 280 281 /* Set up the lsi irqs. */ 282 for (i = 0; i < 128; ++i) { 283 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); 284 + irq_set_status_flags(i, IRQ_LEVEL); 285 } 286 287 /* Disable the implemented irqs in hardware. */ ··· 294 295 /* Set up the msi irqs. */ 296 for (i = 128; i < (128 + 512); ++i) { 297 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); 298 + irq_set_status_flags(i, IRQ_LEVEL); 299 } 300 301 for (i = 0; i < 16; ++i)
+8 -8
arch/alpha/kernel/sys_mikasa.c
··· 43 } 44 45 static inline void 46 - mikasa_enable_irq(unsigned int irq) 47 { 48 - mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); 49 } 50 51 static void 52 - mikasa_disable_irq(unsigned int irq) 53 { 54 - mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); 55 } 56 57 static struct irq_chip mikasa_irq_type = { 58 .name = "MIKASA", 59 - .unmask = mikasa_enable_irq, 60 - .mask = mikasa_disable_irq, 61 - .mask_ack = mikasa_disable_irq, 62 }; 63 64 static void ··· 98 mikasa_update_irq_hw(0); 99 100 for (i = 16; i < 32; ++i) { 101 - irq_to_desc(i)->status |= IRQ_LEVEL; 102 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); 103 } 104 105 init_i8259a_irqs();
··· 43 } 44 45 static inline void 46 + mikasa_enable_irq(struct irq_data *d) 47 { 48 + mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16)); 49 } 50 51 static void 52 + mikasa_disable_irq(struct irq_data *d) 53 { 54 + mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16))); 55 } 56 57 static struct irq_chip mikasa_irq_type = { 58 .name = "MIKASA", 59 + .irq_unmask = mikasa_enable_irq, 60 + .irq_mask = mikasa_disable_irq, 61 + .irq_mask_ack = mikasa_disable_irq, 62 }; 63 64 static void ··· 98 mikasa_update_irq_hw(0); 99 100 for (i = 16; i < 32; ++i) { 101 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); 102 + irq_set_status_flags(i, IRQ_LEVEL); 103 } 104 105 init_i8259a_irqs();
+8 -8
arch/alpha/kernel/sys_noritake.c
··· 48 } 49 50 static void 51 - noritake_enable_irq(unsigned int irq) 52 { 53 - noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); 54 } 55 56 static void 57 - noritake_disable_irq(unsigned int irq) 58 { 59 - noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); 60 } 61 62 static struct irq_chip noritake_irq_type = { 63 .name = "NORITAKE", 64 - .unmask = noritake_enable_irq, 65 - .mask = noritake_disable_irq, 66 - .mask_ack = noritake_disable_irq, 67 }; 68 69 static void ··· 127 outw(0, 0x54c); 128 129 for (i = 16; i < 48; ++i) { 130 - irq_to_desc(i)->status |= IRQ_LEVEL; 131 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); 132 } 133 134 init_i8259a_irqs();
··· 48 } 49 50 static void 51 + noritake_enable_irq(struct irq_data *d) 52 { 53 + noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16)); 54 } 55 56 static void 57 + noritake_disable_irq(struct irq_data *d) 58 { 59 + noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16))); 60 } 61 62 static struct irq_chip noritake_irq_type = { 63 .name = "NORITAKE", 64 + .irq_unmask = noritake_enable_irq, 65 + .irq_mask = noritake_disable_irq, 66 + .irq_mask_ack = noritake_disable_irq, 67 }; 68 69 static void ··· 127 outw(0, 0x54c); 128 129 for (i = 16; i < 48; ++i) { 130 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); 131 + irq_set_status_flags(i, IRQ_LEVEL); 132 } 133 134 init_i8259a_irqs();
+10 -7
arch/alpha/kernel/sys_rawhide.c
··· 56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) 57 58 static inline void 59 - rawhide_enable_irq(unsigned int irq) 60 { 61 unsigned int mask, hose; 62 63 irq -= 16; 64 hose = irq / 24; ··· 77 } 78 79 static void 80 - rawhide_disable_irq(unsigned int irq) 81 { 82 unsigned int mask, hose; 83 84 irq -= 16; 85 hose = irq / 24; ··· 98 } 99 100 static void 101 - rawhide_mask_and_ack_irq(unsigned int irq) 102 { 103 unsigned int mask, mask1, hose; 104 105 irq -= 16; 106 hose = irq / 24; ··· 126 127 static struct irq_chip rawhide_irq_type = { 128 .name = "RAWHIDE", 129 - .unmask = rawhide_enable_irq, 130 - .mask = rawhide_disable_irq, 131 - .mask_ack = rawhide_mask_and_ack_irq, 132 }; 133 134 static void ··· 180 } 181 182 for (i = 16; i < 128; ++i) { 183 - irq_to_desc(i)->status |= IRQ_LEVEL; 184 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); 185 } 186 187 init_i8259a_irqs();
··· 56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) 57 58 static inline void 59 + rawhide_enable_irq(struct irq_data *d) 60 { 61 unsigned int mask, hose; 62 + unsigned int irq = d->irq; 63 64 irq -= 16; 65 hose = irq / 24; ··· 76 } 77 78 static void 79 + rawhide_disable_irq(struct irq_data *d) 80 { 81 unsigned int mask, hose; 82 + unsigned int irq = d->irq; 83 84 irq -= 16; 85 hose = irq / 24; ··· 96 } 97 98 static void 99 + rawhide_mask_and_ack_irq(struct irq_data *d) 100 { 101 unsigned int mask, mask1, hose; 102 + unsigned int irq = d->irq; 103 104 irq -= 16; 105 hose = irq / 24; ··· 123 124 static struct irq_chip rawhide_irq_type = { 125 .name = "RAWHIDE", 126 + .irq_unmask = rawhide_enable_irq, 127 + .irq_mask = rawhide_disable_irq, 128 + .irq_mask_ack = rawhide_mask_and_ack_irq, 129 }; 130 131 static void ··· 177 } 178 179 for (i = 16; i < 128; ++i) { 180 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); 181 + irq_set_status_flags(i, IRQ_LEVEL); 182 } 183 184 init_i8259a_irqs();
+8 -8
arch/alpha/kernel/sys_rx164.c
··· 47 } 48 49 static inline void 50 - rx164_enable_irq(unsigned int irq) 51 { 52 - rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 53 } 54 55 static void 56 - rx164_disable_irq(unsigned int irq) 57 { 58 - rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 59 } 60 61 static struct irq_chip rx164_irq_type = { 62 .name = "RX164", 63 - .unmask = rx164_enable_irq, 64 - .mask = rx164_disable_irq, 65 - .mask_ack = rx164_disable_irq, 66 }; 67 68 static void ··· 99 100 rx164_update_irq_hw(0); 101 for (i = 16; i < 40; ++i) { 102 - irq_to_desc(i)->status |= IRQ_LEVEL; 103 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 104 } 105 106 init_i8259a_irqs();
··· 47 } 48 49 static inline void 50 + rx164_enable_irq(struct irq_data *d) 51 { 52 + rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); 53 } 54 55 static void 56 + rx164_disable_irq(struct irq_data *d) 57 { 58 + rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); 59 } 60 61 static struct irq_chip rx164_irq_type = { 62 .name = "RX164", 63 + .irq_unmask = rx164_enable_irq, 64 + .irq_mask = rx164_disable_irq, 65 + .irq_mask_ack = rx164_disable_irq, 66 }; 67 68 static void ··· 99 100 rx164_update_irq_hw(0); 101 for (i = 16; i < 40; ++i) { 102 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 103 + irq_set_status_flags(i, IRQ_LEVEL); 104 } 105 106 init_i8259a_irqs();
+10 -10
arch/alpha/kernel/sys_sable.c
··· 443 /* GENERIC irq routines */ 444 445 static inline void 446 - sable_lynx_enable_irq(unsigned int irq) 447 { 448 unsigned long bit, mask; 449 450 - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 451 spin_lock(&sable_lynx_irq_lock); 452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); 453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 459 } 460 461 static void 462 - sable_lynx_disable_irq(unsigned int irq) 463 { 464 unsigned long bit, mask; 465 466 - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 467 spin_lock(&sable_lynx_irq_lock); 468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 475 } 476 477 static void 478 - sable_lynx_mask_and_ack_irq(unsigned int irq) 479 { 480 unsigned long bit, mask; 481 482 - bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 483 spin_lock(&sable_lynx_irq_lock); 484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 489 490 static struct irq_chip sable_lynx_irq_type = { 491 .name = "SABLE/LYNX", 492 - .unmask = sable_lynx_enable_irq, 493 - .mask = sable_lynx_disable_irq, 494 - .mask_ack = sable_lynx_mask_and_ack_irq, 495 }; 496 497 static void ··· 518 long i; 519 520 for (i = 0; i < nr_of_irqs; ++i) { 521 - irq_to_desc(i)->status |= IRQ_LEVEL; 522 set_irq_chip_and_handler(i, &sable_lynx_irq_type, 523 handle_level_irq); 524 } 525 526 common_init_isa_dma();
··· 443 /* GENERIC irq routines */ 444 445 static inline void 446 + sable_lynx_enable_irq(struct irq_data *d) 447 { 448 unsigned long bit, mask; 449 450 + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; 451 spin_lock(&sable_lynx_irq_lock); 452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); 453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 459 } 460 461 static void 462 + sable_lynx_disable_irq(struct irq_data *d) 463 { 464 unsigned long bit, mask; 465 466 + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; 467 spin_lock(&sable_lynx_irq_lock); 468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 475 } 476 477 static void 478 + sable_lynx_mask_and_ack_irq(struct irq_data *d) 479 { 480 unsigned long bit, mask; 481 482 + bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; 483 spin_lock(&sable_lynx_irq_lock); 484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); ··· 489 490 static struct irq_chip sable_lynx_irq_type = { 491 .name = "SABLE/LYNX", 492 + .irq_unmask = sable_lynx_enable_irq, 493 + .irq_mask = sable_lynx_disable_irq, 494 + .irq_mask_ack = sable_lynx_mask_and_ack_irq, 495 }; 496 497 static void ··· 518 long i; 519 520 for (i = 0; i < nr_of_irqs; ++i) { 521 set_irq_chip_and_handler(i, &sable_lynx_irq_type, 522 handle_level_irq); 523 + irq_set_status_flags(i, IRQ_LEVEL); 524 } 525 526 common_init_isa_dma();
+8 -6
arch/alpha/kernel/sys_takara.c
··· 45 } 46 47 static inline void 48 - takara_enable_irq(unsigned int irq) 49 { 50 unsigned long mask; 51 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 52 takara_update_irq_hw(irq, mask); 53 } 54 55 static void 56 - takara_disable_irq(unsigned int irq) 57 { 58 unsigned long mask; 59 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 60 takara_update_irq_hw(irq, mask); ··· 64 65 static struct irq_chip takara_irq_type = { 66 .name = "TAKARA", 67 - .unmask = takara_enable_irq, 68 - .mask = takara_disable_irq, 69 - .mask_ack = takara_disable_irq, 70 }; 71 72 static void ··· 138 takara_update_irq_hw(i, -1); 139 140 for (i = 16; i < 128; ++i) { 141 - irq_to_desc(i)->status |= IRQ_LEVEL; 142 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); 143 } 144 145 common_init_isa_dma();
··· 45 } 46 47 static inline void 48 + takara_enable_irq(struct irq_data *d) 49 { 50 + unsigned int irq = d->irq; 51 unsigned long mask; 52 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 53 takara_update_irq_hw(irq, mask); 54 } 55 56 static void 57 + takara_disable_irq(struct irq_data *d) 58 { 59 + unsigned int irq = d->irq; 60 unsigned long mask; 61 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 62 takara_update_irq_hw(irq, mask); ··· 62 63 static struct irq_chip takara_irq_type = { 64 .name = "TAKARA", 65 + .irq_unmask = takara_enable_irq, 66 + .irq_mask = takara_disable_irq, 67 + .irq_mask_ack = takara_disable_irq, 68 }; 69 70 static void ··· 136 takara_update_irq_hw(i, -1); 137 138 for (i = 16; i < 128; ++i) { 139 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); 140 + irq_set_status_flags(i, IRQ_LEVEL); 141 } 142 143 common_init_isa_dma();
+12 -9
arch/alpha/kernel/sys_titan.c
··· 112 } 113 114 static inline void 115 - titan_enable_irq(unsigned int irq) 116 { 117 spin_lock(&titan_irq_lock); 118 titan_cached_irq_mask |= 1UL << (irq - 16); 119 titan_update_irq_hw(titan_cached_irq_mask); ··· 122 } 123 124 static inline void 125 - titan_disable_irq(unsigned int irq) 126 { 127 spin_lock(&titan_irq_lock); 128 titan_cached_irq_mask &= ~(1UL << (irq - 16)); 129 titan_update_irq_hw(titan_cached_irq_mask); ··· 146 } 147 148 static int 149 - titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) 150 { 151 spin_lock(&titan_irq_lock); 152 titan_cpu_set_irq_affinity(irq - 16, *affinity); ··· 178 { 179 long i; 180 for (i = imin; i <= imax; ++i) { 181 - irq_to_desc(i)->status |= IRQ_LEVEL; 182 set_irq_chip_and_handler(i, ops, handle_level_irq); 183 } 184 } 185 186 static struct irq_chip titan_irq_type = { 187 - .name = "TITAN", 188 - .unmask = titan_enable_irq, 189 - .mask = titan_disable_irq, 190 - .mask_ack = titan_disable_irq, 191 - .set_affinity = titan_set_irq_affinity, 192 }; 193 194 static irqreturn_t
··· 112 } 113 114 static inline void 115 + titan_enable_irq(struct irq_data *d) 116 { 117 + unsigned int irq = d->irq; 118 spin_lock(&titan_irq_lock); 119 titan_cached_irq_mask |= 1UL << (irq - 16); 120 titan_update_irq_hw(titan_cached_irq_mask); ··· 121 } 122 123 static inline void 124 + titan_disable_irq(struct irq_data *d) 125 { 126 + unsigned int irq = d->irq; 127 spin_lock(&titan_irq_lock); 128 titan_cached_irq_mask &= ~(1UL << (irq - 16)); 129 titan_update_irq_hw(titan_cached_irq_mask); ··· 144 } 145 146 static int 147 + titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, 148 + bool force) 149 { 150 spin_lock(&titan_irq_lock); 151 titan_cpu_set_irq_affinity(irq - 16, *affinity); ··· 175 { 176 long i; 177 for (i = imin; i <= imax; ++i) { 178 set_irq_chip_and_handler(i, ops, handle_level_irq); 179 + irq_set_status_flags(i, IRQ_LEVEL); 180 } 181 } 182 183 static struct irq_chip titan_irq_type = { 184 + .name = "TITAN", 185 + .irq_unmask = titan_enable_irq, 186 + .irq_mask = titan_disable_irq, 187 + .irq_mask_ack = titan_disable_irq, 188 + .irq_set_affinity = titan_set_irq_affinity, 189 }; 190 191 static irqreturn_t
+19 -13
arch/alpha/kernel/sys_wildfire.c
··· 104 } 105 106 static void 107 - wildfire_enable_irq(unsigned int irq) 108 { 109 if (irq < 16) 110 - i8259a_enable_irq(irq); 111 112 spin_lock(&wildfire_irq_lock); 113 set_bit(irq, &cached_irq_mask); ··· 118 } 119 120 static void 121 - wildfire_disable_irq(unsigned int irq) 122 { 123 if (irq < 16) 124 - i8259a_disable_irq(irq); 125 126 spin_lock(&wildfire_irq_lock); 127 clear_bit(irq, &cached_irq_mask); ··· 132 } 133 134 static void 135 - wildfire_mask_and_ack_irq(unsigned int irq) 136 { 137 if (irq < 16) 138 - i8259a_mask_and_ack_irq(irq); 139 140 spin_lock(&wildfire_irq_lock); 141 clear_bit(irq, &cached_irq_mask); ··· 147 148 static struct irq_chip wildfire_irq_type = { 149 .name = "WILDFIRE", 150 - .unmask = wildfire_enable_irq, 151 - .mask = wildfire_disable_irq, 152 - .mask_ack = wildfire_mask_and_ack_irq, 153 }; 154 155 static void __init ··· 183 for (i = 0; i < 16; ++i) { 184 if (i == 2) 185 continue; 186 - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; 187 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 188 handle_level_irq); 189 } 190 191 - irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL; 192 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, 193 handle_level_irq); 194 for (i = 40; i < 64; ++i) { 195 - irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; 196 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 197 handle_level_irq); 198 } 199 200 - setup_irq(32+irq_bias, &isa_enable); 201 } 202 203 static void __init
··· 104 } 105 106 static void 107 + wildfire_enable_irq(struct irq_data *d) 108 { 109 + unsigned int irq = d->irq; 110 + 111 if (irq < 16) 112 + i8259a_enable_irq(d); 113 114 spin_lock(&wildfire_irq_lock); 115 set_bit(irq, &cached_irq_mask); ··· 116 } 117 118 static void 119 + wildfire_disable_irq(struct irq_data *d) 120 { 121 + unsigned int irq = d->irq; 122 + 123 if (irq < 16) 124 + i8259a_disable_irq(d); 125 126 spin_lock(&wildfire_irq_lock); 127 clear_bit(irq, &cached_irq_mask); ··· 128 } 129 130 static void 131 + wildfire_mask_and_ack_irq(struct irq_data *d) 132 { 133 + unsigned int irq = d->irq; 134 + 135 if (irq < 16) 136 + i8259a_mask_and_ack_irq(d); 137 138 spin_lock(&wildfire_irq_lock); 139 clear_bit(irq, &cached_irq_mask); ··· 141 142 static struct irq_chip wildfire_irq_type = { 143 .name = "WILDFIRE", 144 + .irq_unmask = wildfire_enable_irq, 145 + .irq_mask = wildfire_disable_irq, 146 + .irq_mask_ack = wildfire_mask_and_ack_irq, 147 }; 148 149 static void __init ··· 177 for (i = 0; i < 16; ++i) { 178 if (i == 2) 179 continue; 180 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 181 handle_level_irq); 182 + irq_set_status_flags(i + irq_bias, IRQ_LEVEL); 183 } 184 185 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, 186 handle_level_irq); 187 + irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); 188 for (i = 40; i < 64; ++i) { 189 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 190 handle_level_irq); 191 + irq_set_status_flags(i + irq_bias, IRQ_LEVEL); 192 } 193 194 + setup_irq(32+irq_bias, &isa_enable); 195 } 196 197 static void __init