Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ipmi:msghandler:Change seq_lock to a mutex

Dan Carpenter got a Smatch warning:

drivers/char/ipmi/ipmi_msghandler.c:5265 ipmi_free_recv_msg()
warn: sleeping in atomic context

due to the recent rework of the IPMI driver's locking. I didn't realize
vfree could block. But there is an easy solution to this, now that
almost everything in the message handler runs in thread context.

I wanted to spend the time earlier to see if seq_lock could be converted
from a spinlock to a mutex, but I wanted the previous changes to go in
and soak before I did that. So I went ahead and did the analysis and
converting should work. And solve this problem.

Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
Closes: https://lore.kernel.org/r/202503240244.LR7pOwyr-lkp@intel.com/
Fixes: 3be997d5a64a ("ipmi:msghandler: Remove srcu from the ipmi user structure")
Cc: <stable@vger.kernel.org> # 6.16
Signed-off-by: Corey Minyard <corey@minyard.net>

+26 -37
+26 -37
drivers/char/ipmi/ipmi_msghandler.c
··· 464 464 * interface to match them up with their responses. A routine 465 465 * is called periodically to time the items in this list. 466 466 */ 467 - spinlock_t seq_lock; 467 + struct mutex seq_lock; 468 468 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 469 469 int curr_seq; 470 470 ··· 1116 1116 struct ipmi_recv_msg **recv_msg) 1117 1117 { 1118 1118 int rv = -ENODEV; 1119 - unsigned long flags; 1120 1119 1121 1120 if (seq >= IPMI_IPMB_NUM_SEQ) 1122 1121 return -EINVAL; 1123 1122 1124 - spin_lock_irqsave(&intf->seq_lock, flags); 1123 + mutex_lock(&intf->seq_lock); 1125 1124 if (intf->seq_table[seq].inuse) { 1126 1125 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1127 1126 ··· 1133 1134 rv = 0; 1134 1135 } 1135 1136 } 1136 - spin_unlock_irqrestore(&intf->seq_lock, flags); 1137 + mutex_unlock(&intf->seq_lock); 1137 1138 1138 1139 return rv; 1139 1140 } ··· 1144 1145 long msgid) 1145 1146 { 1146 1147 int rv = -ENODEV; 1147 - unsigned long flags; 1148 1148 unsigned char seq; 1149 1149 unsigned long seqid; 1150 1150 1151 1151 1152 1152 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1153 1153 1154 - spin_lock_irqsave(&intf->seq_lock, flags); 1154 + mutex_lock(&intf->seq_lock); 1155 1155 /* 1156 1156 * We do this verification because the user can be deleted 1157 1157 * while a message is outstanding. ··· 1161 1163 ent->timeout = ent->orig_timeout; 1162 1164 rv = 0; 1163 1165 } 1164 - spin_unlock_irqrestore(&intf->seq_lock, flags); 1166 + mutex_unlock(&intf->seq_lock); 1165 1167 1166 1168 return rv; 1167 1169 } ··· 1172 1174 unsigned int err) 1173 1175 { 1174 1176 int rv = -ENODEV; 1175 - unsigned long flags; 1176 1177 unsigned char seq; 1177 1178 unsigned long seqid; 1178 1179 struct ipmi_recv_msg *msg = NULL; ··· 1179 1182 1180 1183 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1181 1184 1182 - spin_lock_irqsave(&intf->seq_lock, flags); 1185 + mutex_lock(&intf->seq_lock); 1183 1186 /* 1184 1187 * We do this verification because the user can be deleted 1185 1188 * while a message is outstanding. ··· 1193 1196 msg = ent->recv_msg; 1194 1197 rv = 0; 1195 1198 } 1196 - spin_unlock_irqrestore(&intf->seq_lock, flags); 1199 + mutex_unlock(&intf->seq_lock); 1197 1200 1198 1201 if (msg) 1199 1202 deliver_err_response(intf, msg, err); ··· 1206 1209 void *handler_data, 1207 1210 struct ipmi_user **user) 1208 1211 { 1209 - unsigned long flags; 1210 1212 struct ipmi_user *new_user = NULL; 1211 1213 int rv = 0; 1212 1214 struct ipmi_smi *intf; ··· 1273 1277 new_user->gets_events = false; 1274 1278 1275 1279 mutex_lock(&intf->users_mutex); 1276 - spin_lock_irqsave(&intf->seq_lock, flags); 1280 + mutex_lock(&intf->seq_lock); 1277 1281 list_add(&new_user->link, &intf->users); 1278 - spin_unlock_irqrestore(&intf->seq_lock, flags); 1282 + mutex_unlock(&intf->seq_lock); 1279 1283 mutex_unlock(&intf->users_mutex); 1280 1284 1281 1285 if (handler->ipmi_watchdog_pretimeout) ··· 1321 1325 { 1322 1326 struct ipmi_smi *intf = user->intf; 1323 1327 int i; 1324 - unsigned long flags; 1325 1328 struct cmd_rcvr *rcvr; 1326 1329 struct cmd_rcvr *rcvrs = NULL; 1327 1330 struct ipmi_recv_msg *msg, *msg2; ··· 1341 1346 list_del(&user->link); 1342 1347 atomic_dec(&intf->nr_users); 1343 1348 1344 - spin_lock_irqsave(&intf->seq_lock, flags); 1349 + mutex_lock(&intf->seq_lock); 1345 1350 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1346 1351 if (intf->seq_table[i].inuse 1347 1352 && (intf->seq_table[i].recv_msg->user == user)) { ··· 1350 1355 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1351 1356 } 1352 1357 } 1353 - spin_unlock_irqrestore(&intf->seq_lock, flags); 1358 + mutex_unlock(&intf->seq_lock); 1354 1359 1355 1360 /* 1356 1361 * Remove the user from the command receiver's table. First ··· 2021 2026 */ 2022 2027 smi_msg->user_data = recv_msg; 2023 2028 } else { 2024 - /* It's a command, so get a sequence for it. */ 2025 - unsigned long flags; 2026 - 2027 - spin_lock_irqsave(&intf->seq_lock, flags); 2029 + mutex_lock(&intf->seq_lock); 2028 2030 2029 2031 if (is_maintenance_mode_cmd(msg)) 2030 2032 intf->ipmb_maintenance_mode_timeout = ··· 2079 2087 * to be correct. 2080 2088 */ 2081 2089 out_err: 2082 - spin_unlock_irqrestore(&intf->seq_lock, flags); 2090 + mutex_unlock(&intf->seq_lock); 2083 2091 } 2084 2092 2085 2093 return rv; ··· 2197 2205 */ 2198 2206 smi_msg->user_data = recv_msg; 2199 2207 } else { 2200 - /* It's a command, so get a sequence for it. */ 2201 - unsigned long flags; 2202 - 2203 - spin_lock_irqsave(&intf->seq_lock, flags); 2208 + mutex_lock(&intf->seq_lock); 2204 2209 2205 2210 /* 2206 2211 * Create a sequence number with a 1 second ··· 2246 2257 * to be correct. 2247 2258 */ 2248 2259 out_err: 2249 - spin_unlock_irqrestore(&intf->seq_lock, flags); 2260 + mutex_unlock(&intf->seq_lock); 2250 2261 } 2251 2262 2252 2263 return rv; ··· 3564 3575 atomic_set(&intf->nr_users, 0); 3565 3576 intf->handlers = handlers; 3566 3577 intf->send_info = send_info; 3567 - spin_lock_init(&intf->seq_lock); 3578 + mutex_init(&intf->seq_lock); 3568 3579 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3569 3580 intf->seq_table[j].inuse = 0; 3570 3581 intf->seq_table[j].seqid = 0; ··· 4518 4529 4519 4530 if (msg->rsp_size < 2) { 4520 4531 /* Message is too small to be correct. */ 4521 - dev_warn(intf->si_dev, 4522 - "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4523 - (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4532 + dev_warn_ratelimited(intf->si_dev, 4533 + "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4534 + (msg->data[0] >> 2) | 1, 4535 + msg->data[1], msg->rsp_size); 4524 4536 4525 4537 return_unspecified: 4526 4538 /* Generate an error response for the message. */ ··· 4941 4951 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4942 4952 struct list_head *timeouts, 4943 4953 unsigned long timeout_period, 4944 - int slot, unsigned long *flags, 4945 - bool *need_timer) 4954 + int slot, bool *need_timer) 4946 4955 { 4947 4956 struct ipmi_recv_msg *msg; 4948 4957 ··· 4993 5004 return; 4994 5005 } 4995 5006 4996 - spin_unlock_irqrestore(&intf->seq_lock, *flags); 5007 + mutex_unlock(&intf->seq_lock); 4997 5008 4998 5009 /* 4999 5010 * Send the new message. We send with a zero ··· 5014 5025 } else 5015 5026 ipmi_free_smi_msg(smi_msg); 5016 5027 5017 - spin_lock_irqsave(&intf->seq_lock, *flags); 5028 + mutex_lock(&intf->seq_lock); 5018 5029 } 5019 5030 } 5020 5031 ··· 5041 5052 * list. 5042 5053 */ 5043 5054 INIT_LIST_HEAD(&timeouts); 5044 - spin_lock_irqsave(&intf->seq_lock, flags); 5055 + mutex_lock(&intf->seq_lock); 5045 5056 if (intf->ipmb_maintenance_mode_timeout) { 5046 5057 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 5047 5058 intf->ipmb_maintenance_mode_timeout = 0; ··· 5051 5062 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 5052 5063 check_msg_timeout(intf, &intf->seq_table[i], 5053 5064 &timeouts, timeout_period, i, 5054 - &flags, &need_timer); 5055 - spin_unlock_irqrestore(&intf->seq_lock, flags); 5065 + &need_timer); 5066 + mutex_unlock(&intf->seq_lock); 5056 5067 5057 5068 list_for_each_entry_safe(msg, msg2, &timeouts, link) 5058 5069 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);