Merge tag 'ras-urgent-2020-02-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull RAS fixes from Thomas Gleixner:
"Two fixes for the AMD MCE driver:

- Populate the per CPU MCA bank descriptor pointer only after it has
been completely set up to prevent a use-after-free in case that one
of the subsequent initialization step fails

- Implement a proper release function for the sysfs entries of MCA
threshold controls instead of freeing the memory right in the CPU
teardown code, which leads to another use-after-free when the
associated sysfs file is opened and accessed"

* tag 'ras-urgent-2020-02-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mce/amd: Fix kobject lifetime
x86/mce/amd: Publish the bank pointer only after setup has succeeded

Changed files
+27 -23
arch
x86
kernel
cpu
mce
+27 -23
arch/x86/kernel/cpu/mce/amd.c
··· 1163 .store = store, 1164 }; 1165 1166 static struct kobj_type threshold_ktype = { 1167 .sysfs_ops = &threshold_ops, 1168 .default_attrs = default_attrs, 1169 }; 1170 1171 static const char *get_name(unsigned int bank, struct threshold_block *b) ··· 1201 return buf_mcatype; 1202 } 1203 1204 - static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, 1205 - unsigned int block, u32 address) 1206 { 1207 struct threshold_block *b = NULL; 1208 u32 low, high; ··· 1247 1248 INIT_LIST_HEAD(&b->miscj); 1249 1250 - if (per_cpu(threshold_banks, cpu)[bank]->blocks) { 1251 - list_add(&b->miscj, 1252 - &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); 1253 - } else { 1254 - per_cpu(threshold_banks, cpu)[bank]->blocks = b; 1255 - } 1256 1257 - err = kobject_init_and_add(&b->kobj, &threshold_ktype, 1258 - per_cpu(threshold_banks, cpu)[bank]->kobj, 1259 - get_name(bank, b)); 1260 if (err) 1261 goto out_free; 1262 recurse: ··· 1260 if (!address) 1261 return 0; 1262 1263 - err = allocate_threshold_blocks(cpu, bank, block, address); 1264 if (err) 1265 goto out_free; 1266 ··· 1345 goto out_free; 1346 } 1347 1348 - per_cpu(threshold_banks, cpu)[bank] = b; 1349 - 1350 if (is_shared_bank(bank)) { 1351 refcount_set(&b->cpus, 1); 1352 ··· 1355 } 1356 } 1357 1358 - err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank)); 1359 - if (!err) 1360 - goto out; 1361 1362 out_free: 1363 kfree(b); ··· 1370 return err; 1371 } 1372 1373 - static void deallocate_threshold_block(unsigned int cpu, 1374 - unsigned int bank) 1375 { 1376 struct threshold_block *pos = NULL; 1377 struct threshold_block *tmp = NULL; ··· 1385 return; 1386 1387 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { 1388 - kobject_put(&pos->kobj); 1389 list_del(&pos->miscj); 1390 - kfree(pos); 1391 } 1392 1393 - kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); 1394 - per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; 1395 } 1396 1397 static void __threshold_remove_blocks(struct threshold_bank *b)
··· 1163 .store = store, 1164 }; 1165 1166 + static void threshold_block_release(struct kobject *kobj); 1167 + 1168 static struct kobj_type threshold_ktype = { 1169 .sysfs_ops = &threshold_ops, 1170 .default_attrs = default_attrs, 1171 + .release = threshold_block_release, 1172 }; 1173 1174 static const char *get_name(unsigned int bank, struct threshold_block *b) ··· 1198 return buf_mcatype; 1199 } 1200 1201 + static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb, 1202 + unsigned int bank, unsigned int block, 1203 + u32 address) 1204 { 1205 struct threshold_block *b = NULL; 1206 u32 low, high; ··· 1243 1244 INIT_LIST_HEAD(&b->miscj); 1245 1246 + if (tb->blocks) 1247 + list_add(&b->miscj, &tb->blocks->miscj); 1248 + else 1249 + tb->blocks = b; 1250 1251 + err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b)); 1252 if (err) 1253 goto out_free; 1254 recurse: ··· 1260 if (!address) 1261 return 0; 1262 1263 + err = allocate_threshold_blocks(cpu, tb, bank, block, address); 1264 if (err) 1265 goto out_free; 1266 ··· 1345 goto out_free; 1346 } 1347 1348 if (is_shared_bank(bank)) { 1349 refcount_set(&b->cpus, 1); 1350 ··· 1357 } 1358 } 1359 1360 + err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank)); 1361 + if (err) 1362 + goto out_free; 1363 + 1364 + per_cpu(threshold_banks, cpu)[bank] = b; 1365 + 1366 + return 0; 1367 1368 out_free: 1369 kfree(b); ··· 1368 return err; 1369 } 1370 1371 + static void threshold_block_release(struct kobject *kobj) 1372 + { 1373 + kfree(to_block(kobj)); 1374 + } 1375 + 1376 + static void deallocate_threshold_block(unsigned int cpu, unsigned int bank) 1377 { 1378 struct threshold_block *pos = NULL; 1379 struct threshold_block *tmp = NULL; ··· 1379 return; 1380 1381 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { 1382 list_del(&pos->miscj); 1383 + kobject_put(&pos->kobj); 1384 } 1385 1386 + kobject_put(&head->blocks->kobj); 1387 } 1388 1389 static void __threshold_remove_blocks(struct threshold_bank *b)