Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

slab: make check_object() more consistent

Now check_object() calls check_bytes_and_report() multiple times to
check every section of the object it cares about, like left and right
redzones, object poison, paddings poison and freepointer. It will
abort the checking process and return 0 once it finds an error.

There are two inconsistencies in check_object(), which are alignment
padding checking and object padding checking. We only print the error
messages but don't return 0 to tell callers that something is wrong
and needs to be handled. Please see alloc_debug_processing() and
free_debug_processing() for details.

We want to do all checks without skipping, so use a local variable
"ret" to save each check result and change check_bytes_and_report() to
only report specific error findings. Then at end of check_object(),
print the trailer once if any found an error.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

authored by

Chengming Zhou and committed by
Vlastimil Babka
47d911b0 4d2bcefa

+41 -21
+41 -21
mm/slub.c
··· 788 788 kunit_put_resource(resource); 789 789 return true; 790 790 } 791 + 792 + static bool slab_in_kunit_test(void) 793 + { 794 + struct kunit_resource *resource; 795 + 796 + if (!kunit_get_current_test()) 797 + return false; 798 + 799 + resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 800 + if (!resource) 801 + return false; 802 + 803 + kunit_put_resource(resource); 804 + return true; 805 + } 791 806 #else 792 807 static inline bool slab_add_kunit_errors(void) { return false; } 808 + static inline bool slab_in_kunit_test(void) { return false; } 793 809 #endif 794 810 795 811 static inline unsigned int size_from_object(struct kmem_cache *s) ··· 1206 1190 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1207 1191 fault, end - 1, fault - addr, 1208 1192 fault[0], value); 1209 - print_trailer(s, slab, object); 1210 - add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1211 1193 1212 1194 skip_bug_print: 1213 1195 restore_bytes(s, what, value, fault, end); ··· 1314 1300 u8 *p = object; 1315 1301 u8 *endobject = object + s->object_size; 1316 1302 unsigned int orig_size, kasan_meta_size; 1303 + int ret = 1; 1317 1304 1318 1305 if (s->flags & SLAB_RED_ZONE) { 1319 1306 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1320 1307 object - s->red_left_pad, val, s->red_left_pad)) 1321 - return 0; 1308 + ret = 0; 1322 1309 1323 1310 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1324 1311 endobject, val, s->inuse - s->object_size)) 1325 - return 0; 1312 + ret = 0; 1326 1313 1327 1314 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1328 1315 orig_size = get_orig_size(s, object); ··· 1332 1317 !check_bytes_and_report(s, slab, object, 1333 1318 "kmalloc Redzone", p + orig_size, 1334 1319 val, s->object_size - orig_size)) { 1335 - return 0; 1320 + ret = 0; 1336 1321 } 1337 1322 } 1338 1323 } else { 1339 1324 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1340 - check_bytes_and_report(s, slab, p, "Alignment padding", 1325 + if (!check_bytes_and_report(s, slab, p, "Alignment padding", 1341 1326 endobject, POISON_INUSE, 1342 - s->inuse - s->object_size); 1327 + s->inuse - s->object_size)) 1328 + ret = 0; 1343 1329 } 1344 1330 } 1345 1331 ··· 1356 1340 !check_bytes_and_report(s, slab, p, "Poison", 1357 1341 p + kasan_meta_size, POISON_FREE, 1358 1342 s->object_size - kasan_meta_size - 1)) 1359 - return 0; 1343 + ret = 0; 1360 1344 if (kasan_meta_size < s->object_size && 1361 1345 !check_bytes_and_report(s, slab, p, "End Poison", 1362 1346 p + s->object_size - 1, POISON_END, 1)) 1363 - return 0; 1347 + ret = 0; 1364 1348 } 1365 1349 /* 1366 1350 * check_pad_bytes cleans up on its own. 1367 1351 */ 1368 - check_pad_bytes(s, slab, p); 1352 + if (!check_pad_bytes(s, slab, p)) 1353 + ret = 0; 1369 1354 } 1370 1355 1371 - if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) 1372 - /* 1373 - * Object and freepointer overlap. Cannot check 1374 - * freepointer while object is allocated. 1375 - */ 1376 - return 1; 1377 - 1378 - /* Check free pointer validity */ 1379 - if (!check_valid_pointer(s, slab, get_freepointer(s, p))) { 1356 + /* 1357 + * Cannot check freepointer while object is allocated if 1358 + * object and freepointer overlap. 1359 + */ 1360 + if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) && 1361 + !check_valid_pointer(s, slab, get_freepointer(s, p))) { 1380 1362 object_err(s, slab, p, "Freepointer corrupt"); 1381 1363 /* 1382 1364 * No choice but to zap it and thus lose the remainder ··· 1382 1368 * another error because the object count is now wrong. 1383 1369 */ 1384 1370 set_freepointer(s, p, NULL); 1385 - return 0; 1371 + ret = 0; 1386 1372 } 1387 - return 1; 1373 + 1374 + if (!ret && !slab_in_kunit_test()) { 1375 + print_trailer(s, slab, object); 1376 + add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1377 + } 1378 + 1379 + return ret; 1388 1380 } 1389 1381 1390 1382 static int check_slab(struct kmem_cache *s, struct slab *slab)