Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

genirq: Wake interrupt threads immediately when changing affinity

The affinity setting of interrupt threads happens in the context of the
thread when the thread is woken up by an hard interrupt. As this can be an
arbitrary after changing the affinity, the thread can become runnable on an
isolated CPU and cause isolation disruption.

Avoid this by checking the set affinity request in wait_for_interrupt() and
waking the threads immediately when the affinity is modified.

Note that this is of the most benefit on systems where the interrupt
affinity itself does not need to be deferred to the interrupt handler, but
even where that's not the case, the total dirsuption will be less.

Signed-off-by: Crystal Wood <crwood@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240122235353.15235-1-crwood@redhat.com

authored by

Crystal Wood and committed by
Thomas Gleixner
c99303a2 ee4c1592

+55 -54
+55 -54
kernel/irq/manage.c
··· 192 192 struct irqaction *action; 193 193 194 194 for_each_action_of_desc(desc, action) { 195 - if (action->thread) 195 + if (action->thread) { 196 196 set_bit(IRQTF_AFFINITY, &action->thread_flags); 197 - if (action->secondary && action->secondary->thread) 197 + wake_up_process(action->thread); 198 + } 199 + if (action->secondary && action->secondary->thread) { 198 200 set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags); 201 + wake_up_process(action->secondary->thread); 202 + } 199 203 } 200 204 } 201 205 ··· 1053 1049 return IRQ_NONE; 1054 1050 } 1055 1051 1056 - static int irq_wait_for_interrupt(struct irqaction *action) 1052 + #ifdef CONFIG_SMP 1053 + /* 1054 + * Check whether we need to change the affinity of the interrupt thread. 1055 + */ 1056 + static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 1057 + { 1058 + cpumask_var_t mask; 1059 + bool valid = false; 1060 + 1061 + if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 1062 + return; 1063 + 1064 + __set_current_state(TASK_RUNNING); 1065 + 1066 + /* 1067 + * In case we are out of memory we set IRQTF_AFFINITY again and 1068 + * try again next time 1069 + */ 1070 + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 1071 + set_bit(IRQTF_AFFINITY, &action->thread_flags); 1072 + return; 1073 + } 1074 + 1075 + raw_spin_lock_irq(&desc->lock); 1076 + /* 1077 + * This code is triggered unconditionally. Check the affinity 1078 + * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 1079 + */ 1080 + if (cpumask_available(desc->irq_common_data.affinity)) { 1081 + const struct cpumask *m; 1082 + 1083 + m = irq_data_get_effective_affinity_mask(&desc->irq_data); 1084 + cpumask_copy(mask, m); 1085 + valid = true; 1086 + } 1087 + raw_spin_unlock_irq(&desc->lock); 1088 + 1089 + if (valid) 1090 + set_cpus_allowed_ptr(current, mask); 1091 + free_cpumask_var(mask); 1092 + } 1093 + #else 1094 + static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 1095 + #endif 1096 + 1097 + static int irq_wait_for_interrupt(struct irq_desc *desc, 1098 + struct irqaction *action) 1057 1099 { 1058 1100 for (;;) { 1059 1101 set_current_state(TASK_INTERRUPTIBLE); 1102 + irq_thread_check_affinity(desc, action); 1060 1103 1061 1104 if (kthread_should_stop()) { 1062 1105 /* may need to run one last time */ ··· 1179 1128 raw_spin_unlock_irq(&desc->lock); 1180 1129 chip_bus_sync_unlock(desc); 1181 1130 } 1182 - 1183 - #ifdef CONFIG_SMP 1184 - /* 1185 - * Check whether we need to change the affinity of the interrupt thread. 1186 - */ 1187 - static void 1188 - irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 1189 - { 1190 - cpumask_var_t mask; 1191 - bool valid = true; 1192 - 1193 - if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 1194 - return; 1195 - 1196 - /* 1197 - * In case we are out of memory we set IRQTF_AFFINITY again and 1198 - * try again next time 1199 - */ 1200 - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 1201 - set_bit(IRQTF_AFFINITY, &action->thread_flags); 1202 - return; 1203 - } 1204 - 1205 - raw_spin_lock_irq(&desc->lock); 1206 - /* 1207 - * This code is triggered unconditionally. Check the affinity 1208 - * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 1209 - */ 1210 - if (cpumask_available(desc->irq_common_data.affinity)) { 1211 - const struct cpumask *m; 1212 - 1213 - m = irq_data_get_effective_affinity_mask(&desc->irq_data); 1214 - cpumask_copy(mask, m); 1215 - } else { 1216 - valid = false; 1217 - } 1218 - raw_spin_unlock_irq(&desc->lock); 1219 - 1220 - if (valid) 1221 - set_cpus_allowed_ptr(current, mask); 1222 - free_cpumask_var(mask); 1223 - } 1224 - #else 1225 - static inline void 1226 - irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 1227 - #endif 1228 1131 1229 1132 /* 1230 1133 * Interrupts which are not explicitly requested as threaded ··· 1317 1312 init_task_work(&on_exit_work, irq_thread_dtor); 1318 1313 task_work_add(current, &on_exit_work, TWA_NONE); 1319 1314 1320 - irq_thread_check_affinity(desc, action); 1321 - 1322 - while (!irq_wait_for_interrupt(action)) { 1315 + while (!irq_wait_for_interrupt(desc, action)) { 1323 1316 irqreturn_t action_ret; 1324 - 1325 - irq_thread_check_affinity(desc, action); 1326 1317 1327 1318 action_ret = handler_fn(desc, action); 1328 1319 if (action_ret == IRQ_WAKE_THREAD)