Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ACPI / processor: Rework processor throttling with work_on_cpu()

acpi_processor_set_throttling() uses set_cpus_allowed_ptr() to make
sure that the (struct acpi_processor)->acpi_processor_set_throttling()
callback will run on the right CPU. However, the function may be
called from a worker thread already bound to a different CPU in which
case that won't work.

Make acpi_processor_set_throttling() use work_on_cpu() as appropriate
instead of abusing set_cpus_allowed_ptr().

Reported-and-tested-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
Cc: All applicable <stable@vger.kernel.org>
[rjw: Changelog]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Lan Tianyu and committed by
Rafael J. Wysocki
f3ca4164 cfbf8d48

+32 -37
+32 -37
drivers/acpi/processor_throttling.c
··· 56 56 int target_state; /* target T-state */ 57 57 }; 58 58 59 + struct acpi_processor_throttling_arg { 60 + struct acpi_processor *pr; 61 + int target_state; 62 + bool force; 63 + }; 64 + 59 65 #define THROTTLING_PRECHANGE (1) 60 66 #define THROTTLING_POSTCHANGE (2) 61 67 ··· 1066 1060 return 0; 1067 1061 } 1068 1062 1063 + static long acpi_processor_throttling_fn(void *data) 1064 + { 1065 + struct acpi_processor_throttling_arg *arg = data; 1066 + struct acpi_processor *pr = arg->pr; 1067 + 1068 + return pr->throttling.acpi_processor_set_throttling(pr, 1069 + arg->target_state, arg->force); 1070 + } 1071 + 1069 1072 int acpi_processor_set_throttling(struct acpi_processor *pr, 1070 1073 int state, bool force) 1071 1074 { 1072 - cpumask_var_t saved_mask; 1073 1075 int ret = 0; 1074 1076 unsigned int i; 1075 1077 struct acpi_processor *match_pr; 1076 1078 struct acpi_processor_throttling *p_throttling; 1079 + struct acpi_processor_throttling_arg arg; 1077 1080 struct throttling_tstate t_state; 1078 - cpumask_var_t online_throttling_cpus; 1079 1081 1080 1082 if (!pr) 1081 1083 return -EINVAL; ··· 1094 1080 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1095 1081 return -EINVAL; 1096 1082 1097 - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 1098 - return -ENOMEM; 1099 - 1100 - if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { 1101 - free_cpumask_var(saved_mask); 1102 - return -ENOMEM; 1103 - } 1104 - 1105 1083 if (cpu_is_offline(pr->id)) { 1106 1084 /* 1107 1085 * the cpu pointed by pr->id is offline. Unnecessary to change ··· 1102 1096 return -ENODEV; 1103 1097 } 1104 1098 1105 - cpumask_copy(saved_mask, &current->cpus_allowed); 1106 1099 t_state.target_state = state; 1107 1100 p_throttling = &(pr->throttling); 1108 - cpumask_and(online_throttling_cpus, cpu_online_mask, 1109 - p_throttling->shared_cpu_map); 1101 + 1110 1102 /* 1111 1103 * The throttling notifier will be called for every 1112 1104 * affected cpu in order to get one proper T-state. 1113 1105 * The notifier event is THROTTLING_PRECHANGE. 1114 1106 */ 1115 - for_each_cpu(i, online_throttling_cpus) { 1107 + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { 1116 1108 t_state.cpu = i; 1117 1109 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1118 1110 &t_state); ··· 1122 1118 * it can be called only for the cpu pointed by pr. 1123 1119 */ 1124 1120 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1125 - /* FIXME: use work_on_cpu() */ 1126 - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { 1127 - /* Can't migrate to the pr->id CPU. Exit */ 1128 - ret = -ENODEV; 1129 - goto exit; 1130 - } 1131 - ret = p_throttling->acpi_processor_set_throttling(pr, 1132 - t_state.target_state, force); 1121 + arg.pr = pr; 1122 + arg.target_state = state; 1123 + arg.force = force; 1124 + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); 1133 1125 } else { 1134 1126 /* 1135 1127 * When the T-state coordination is SW_ALL or HW_ALL, 1136 1128 * it is necessary to set T-state for every affected 1137 1129 * cpus. 1138 1130 */ 1139 - for_each_cpu(i, online_throttling_cpus) { 1131 + for_each_cpu_and(i, cpu_online_mask, 1132 + p_throttling->shared_cpu_map) { 1140 1133 match_pr = per_cpu(processors, i); 1141 1134 /* 1142 1135 * If the pointer is invalid, we will report the ··· 1154 1153 "on CPU %d\n", i)); 1155 1154 continue; 1156 1155 } 1157 - t_state.cpu = i; 1158 - /* FIXME: use work_on_cpu() */ 1159 - if (set_cpus_allowed_ptr(current, cpumask_of(i))) 1160 - continue; 1161 - ret = match_pr->throttling. 1162 - acpi_processor_set_throttling( 1163 - match_pr, t_state.target_state, force); 1156 + 1157 + arg.pr = match_pr; 1158 + arg.target_state = state; 1159 + arg.force = force; 1160 + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, 1161 + &arg); 1164 1162 } 1165 1163 } 1166 1164 /* ··· 1168 1168 * affected cpu to update the T-states. 1169 1169 * The notifier event is THROTTLING_POSTCHANGE 1170 1170 */ 1171 - for_each_cpu(i, online_throttling_cpus) { 1171 + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { 1172 1172 t_state.cpu = i; 1173 1173 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1174 1174 &t_state); 1175 1175 } 1176 - /* restore the previous state */ 1177 - /* FIXME: use work_on_cpu() */ 1178 - set_cpus_allowed_ptr(current, saved_mask); 1179 - exit: 1180 - free_cpumask_var(online_throttling_cpus); 1181 - free_cpumask_var(saved_mask); 1176 + 1182 1177 return ret; 1183 1178 } 1184 1179