Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.14-rc2 1139 lines 30 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 6 */ 7 8#include <linux/bitops.h> 9#include <linux/delay.h> 10#include <linux/kasan.h> 11#include <linux/kernel.h> 12#include <linux/mm.h> 13#include <linux/mman.h> 14#include <linux/module.h> 15#include <linux/printk.h> 16#include <linux/random.h> 17#include <linux/slab.h> 18#include <linux/string.h> 19#include <linux/uaccess.h> 20#include <linux/io.h> 21#include <linux/vmalloc.h> 22 23#include <asm/page.h> 24 25#include <kunit/test.h> 26 27#include "../mm/kasan/kasan.h" 28 29#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE) 30 31/* 32 * Some tests use these global variables to store return values from function 33 * calls that could otherwise be eliminated by the compiler as dead code. 34 */ 35void *kasan_ptr_result; 36int kasan_int_result; 37 38static struct kunit_resource resource; 39static struct kunit_kasan_expectation fail_data; 40static bool multishot; 41 42/* 43 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the 44 * first detected bug and panic the kernel if panic_on_warn is enabled. For 45 * hardware tag-based KASAN also allow tag checking to be reenabled for each 46 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL(). 47 */ 48static int kasan_test_init(struct kunit *test) 49{ 50 if (!kasan_enabled()) { 51 kunit_err(test, "can't run KASAN tests with KASAN disabled"); 52 return -1; 53 } 54 55 multishot = kasan_save_enable_multi_shot(); 56 kasan_set_tagging_report_once(false); 57 fail_data.report_found = false; 58 kunit_add_named_resource(test, NULL, NULL, &resource, 59 "kasan_data", &fail_data); 60 return 0; 61} 62 63static void kasan_test_exit(struct kunit *test) 64{ 65 kasan_set_tagging_report_once(true); 66 kasan_restore_multi_shot(multishot); 67 KUNIT_EXPECT_FALSE(test, fail_data.report_found); 68} 69 70/** 71 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a 72 * KASAN report; causes a test failure otherwise. This relies on a KUnit 73 * resource named "kasan_data". Do not use this name for KUnit resources 74 * outside of KASAN tests. 75 * 76 * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag 77 * checking is auto-disabled. When this happens, this test handler reenables 78 * tag checking. As tag checking can be only disabled or enabled per CPU, 79 * this handler disables migration (preemption). 80 * 81 * Since the compiler doesn't see that the expression can change the fail_data 82 * fields, it can reorder or optimize away the accesses to those fields. 83 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the 84 * expression to prevent that. 85 * 86 * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as 87 * false. This allows detecting KASAN reports that happen outside of the checks 88 * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL 89 * and in kasan_test_exit. 90 */ 91#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ 92 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ 93 !kasan_async_mode_enabled()) \ 94 migrate_disable(); \ 95 KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \ 96 barrier(); \ 97 expression; \ 98 barrier(); \ 99 if (!READ_ONCE(fail_data.report_found)) { \ 100 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \ 101 "expected in \"" #expression \ 102 "\", but none occurred"); \ 103 } \ 104 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \ 105 if (READ_ONCE(fail_data.report_found)) \ 106 kasan_enable_tagging_sync(); \ 107 migrate_enable(); \ 108 } \ 109 WRITE_ONCE(fail_data.report_found, false); \ 110} while (0) 111 112#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ 113 if (!IS_ENABLED(config)) \ 114 kunit_skip((test), "Test requires " #config "=y"); \ 115} while (0) 116 117#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \ 118 if (IS_ENABLED(config)) \ 119 kunit_skip((test), "Test requires " #config "=n"); \ 120} while (0) 121 122static void kmalloc_oob_right(struct kunit *test) 123{ 124 char *ptr; 125 size_t size = 123; 126 127 ptr = kmalloc(size, GFP_KERNEL); 128 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 129 130 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x'); 131 kfree(ptr); 132} 133 134static void kmalloc_oob_left(struct kunit *test) 135{ 136 char *ptr; 137 size_t size = 15; 138 139 ptr = kmalloc(size, GFP_KERNEL); 140 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 141 142 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); 143 kfree(ptr); 144} 145 146static void kmalloc_node_oob_right(struct kunit *test) 147{ 148 char *ptr; 149 size_t size = 4096; 150 151 ptr = kmalloc_node(size, GFP_KERNEL, 0); 152 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 153 154 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); 155 kfree(ptr); 156} 157 158/* 159 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't 160 * fit into a slab cache and therefore is allocated via the page allocator 161 * fallback. Since this kind of fallback is only implemented for SLUB, these 162 * tests are limited to that allocator. 163 */ 164static void kmalloc_pagealloc_oob_right(struct kunit *test) 165{ 166 char *ptr; 167 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 168 169 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 170 171 ptr = kmalloc(size, GFP_KERNEL); 172 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 173 174 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0); 175 176 kfree(ptr); 177} 178 179static void kmalloc_pagealloc_uaf(struct kunit *test) 180{ 181 char *ptr; 182 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 183 184 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 185 186 ptr = kmalloc(size, GFP_KERNEL); 187 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 188 kfree(ptr); 189 190 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0); 191} 192 193static void kmalloc_pagealloc_invalid_free(struct kunit *test) 194{ 195 char *ptr; 196 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 197 198 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 199 200 ptr = kmalloc(size, GFP_KERNEL); 201 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 202 203 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1)); 204} 205 206static void pagealloc_oob_right(struct kunit *test) 207{ 208 char *ptr; 209 struct page *pages; 210 size_t order = 4; 211 size_t size = (1UL << (PAGE_SHIFT + order)); 212 213 /* 214 * With generic KASAN page allocations have no redzones, thus 215 * out-of-bounds detection is not guaranteed. 216 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503. 217 */ 218 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 219 220 pages = alloc_pages(GFP_KERNEL, order); 221 ptr = page_address(pages); 222 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 223 224 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); 225 free_pages((unsigned long)ptr, order); 226} 227 228static void pagealloc_uaf(struct kunit *test) 229{ 230 char *ptr; 231 struct page *pages; 232 size_t order = 4; 233 234 pages = alloc_pages(GFP_KERNEL, order); 235 ptr = page_address(pages); 236 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 237 free_pages((unsigned long)ptr, order); 238 239 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0); 240} 241 242static void kmalloc_large_oob_right(struct kunit *test) 243{ 244 char *ptr; 245 size_t size = KMALLOC_MAX_CACHE_SIZE - 256; 246 247 /* 248 * Allocate a chunk that is large enough, but still fits into a slab 249 * and does not trigger the page allocator fallback in SLUB. 250 */ 251 ptr = kmalloc(size, GFP_KERNEL); 252 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 253 254 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); 255 kfree(ptr); 256} 257 258static void krealloc_more_oob_helper(struct kunit *test, 259 size_t size1, size_t size2) 260{ 261 char *ptr1, *ptr2; 262 size_t middle; 263 264 KUNIT_ASSERT_LT(test, size1, size2); 265 middle = size1 + (size2 - size1) / 2; 266 267 ptr1 = kmalloc(size1, GFP_KERNEL); 268 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 269 270 ptr2 = krealloc(ptr1, size2, GFP_KERNEL); 271 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 272 273 /* All offsets up to size2 must be accessible. */ 274 ptr2[size1 - 1] = 'x'; 275 ptr2[size1] = 'x'; 276 ptr2[middle] = 'x'; 277 ptr2[size2 - 1] = 'x'; 278 279 /* Generic mode is precise, so unaligned size2 must be inaccessible. */ 280 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 281 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); 282 283 /* For all modes first aligned offset after size2 must be inaccessible. */ 284 KUNIT_EXPECT_KASAN_FAIL(test, 285 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); 286 287 kfree(ptr2); 288} 289 290static void krealloc_less_oob_helper(struct kunit *test, 291 size_t size1, size_t size2) 292{ 293 char *ptr1, *ptr2; 294 size_t middle; 295 296 KUNIT_ASSERT_LT(test, size2, size1); 297 middle = size2 + (size1 - size2) / 2; 298 299 ptr1 = kmalloc(size1, GFP_KERNEL); 300 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 301 302 ptr2 = krealloc(ptr1, size2, GFP_KERNEL); 303 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 304 305 /* Must be accessible for all modes. */ 306 ptr2[size2 - 1] = 'x'; 307 308 /* Generic mode is precise, so unaligned size2 must be inaccessible. */ 309 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 310 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); 311 312 /* For all modes first aligned offset after size2 must be inaccessible. */ 313 KUNIT_EXPECT_KASAN_FAIL(test, 314 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); 315 316 /* 317 * For all modes all size2, middle, and size1 should land in separate 318 * granules and thus the latter two offsets should be inaccessible. 319 */ 320 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE), 321 round_down(middle, KASAN_GRANULE_SIZE)); 322 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE), 323 round_down(size1, KASAN_GRANULE_SIZE)); 324 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x'); 325 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x'); 326 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x'); 327 328 kfree(ptr2); 329} 330 331static void krealloc_more_oob(struct kunit *test) 332{ 333 krealloc_more_oob_helper(test, 201, 235); 334} 335 336static void krealloc_less_oob(struct kunit *test) 337{ 338 krealloc_less_oob_helper(test, 235, 201); 339} 340 341static void krealloc_pagealloc_more_oob(struct kunit *test) 342{ 343 /* page_alloc fallback in only implemented for SLUB. */ 344 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 345 346 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201, 347 KMALLOC_MAX_CACHE_SIZE + 235); 348} 349 350static void krealloc_pagealloc_less_oob(struct kunit *test) 351{ 352 /* page_alloc fallback in only implemented for SLUB. */ 353 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 354 355 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235, 356 KMALLOC_MAX_CACHE_SIZE + 201); 357} 358 359/* 360 * Check that krealloc() detects a use-after-free, returns NULL, 361 * and doesn't unpoison the freed object. 362 */ 363static void krealloc_uaf(struct kunit *test) 364{ 365 char *ptr1, *ptr2; 366 int size1 = 201; 367 int size2 = 235; 368 369 ptr1 = kmalloc(size1, GFP_KERNEL); 370 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 371 kfree(ptr1); 372 373 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL)); 374 KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL); 375 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1); 376} 377 378static void kmalloc_oob_16(struct kunit *test) 379{ 380 struct { 381 u64 words[2]; 382 } *ptr1, *ptr2; 383 384 /* This test is specifically crafted for the generic mode. */ 385 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 386 387 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); 388 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 389 390 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); 391 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 392 393 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); 394 kfree(ptr1); 395 kfree(ptr2); 396} 397 398static void kmalloc_uaf_16(struct kunit *test) 399{ 400 struct { 401 u64 words[2]; 402 } *ptr1, *ptr2; 403 404 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); 405 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 406 407 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); 408 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 409 kfree(ptr2); 410 411 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); 412 kfree(ptr1); 413} 414 415static void kmalloc_oob_memset_2(struct kunit *test) 416{ 417 char *ptr; 418 size_t size = 8; 419 420 ptr = kmalloc(size, GFP_KERNEL); 421 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 422 423 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2)); 424 kfree(ptr); 425} 426 427static void kmalloc_oob_memset_4(struct kunit *test) 428{ 429 char *ptr; 430 size_t size = 8; 431 432 ptr = kmalloc(size, GFP_KERNEL); 433 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 434 435 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4)); 436 kfree(ptr); 437} 438 439 440static void kmalloc_oob_memset_8(struct kunit *test) 441{ 442 char *ptr; 443 size_t size = 8; 444 445 ptr = kmalloc(size, GFP_KERNEL); 446 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 447 448 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8)); 449 kfree(ptr); 450} 451 452static void kmalloc_oob_memset_16(struct kunit *test) 453{ 454 char *ptr; 455 size_t size = 16; 456 457 ptr = kmalloc(size, GFP_KERNEL); 458 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 459 460 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16)); 461 kfree(ptr); 462} 463 464static void kmalloc_oob_in_memset(struct kunit *test) 465{ 466 char *ptr; 467 size_t size = 666; 468 469 ptr = kmalloc(size, GFP_KERNEL); 470 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 471 472 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF)); 473 kfree(ptr); 474} 475 476static void kmalloc_memmove_invalid_size(struct kunit *test) 477{ 478 char *ptr; 479 size_t size = 64; 480 volatile size_t invalid_size = -2; 481 482 ptr = kmalloc(size, GFP_KERNEL); 483 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 484 485 memset((char *)ptr, 0, 64); 486 487 KUNIT_EXPECT_KASAN_FAIL(test, 488 memmove((char *)ptr, (char *)ptr + 4, invalid_size)); 489 kfree(ptr); 490} 491 492static void kmalloc_uaf(struct kunit *test) 493{ 494 char *ptr; 495 size_t size = 10; 496 497 ptr = kmalloc(size, GFP_KERNEL); 498 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 499 500 kfree(ptr); 501 KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x'); 502} 503 504static void kmalloc_uaf_memset(struct kunit *test) 505{ 506 char *ptr; 507 size_t size = 33; 508 509 ptr = kmalloc(size, GFP_KERNEL); 510 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 511 512 kfree(ptr); 513 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size)); 514} 515 516static void kmalloc_uaf2(struct kunit *test) 517{ 518 char *ptr1, *ptr2; 519 size_t size = 43; 520 int counter = 0; 521 522again: 523 ptr1 = kmalloc(size, GFP_KERNEL); 524 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 525 526 kfree(ptr1); 527 528 ptr2 = kmalloc(size, GFP_KERNEL); 529 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 530 531 /* 532 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same. 533 * Allow up to 16 attempts at generating different tags. 534 */ 535 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) { 536 kfree(ptr2); 537 goto again; 538 } 539 540 KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x'); 541 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2); 542 543 kfree(ptr2); 544} 545 546static void kfree_via_page(struct kunit *test) 547{ 548 char *ptr; 549 size_t size = 8; 550 struct page *page; 551 unsigned long offset; 552 553 ptr = kmalloc(size, GFP_KERNEL); 554 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 555 556 page = virt_to_page(ptr); 557 offset = offset_in_page(ptr); 558 kfree(page_address(page) + offset); 559} 560 561static void kfree_via_phys(struct kunit *test) 562{ 563 char *ptr; 564 size_t size = 8; 565 phys_addr_t phys; 566 567 ptr = kmalloc(size, GFP_KERNEL); 568 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 569 570 phys = virt_to_phys(ptr); 571 kfree(phys_to_virt(phys)); 572} 573 574static void kmem_cache_oob(struct kunit *test) 575{ 576 char *p; 577 size_t size = 200; 578 struct kmem_cache *cache; 579 580 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); 581 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 582 583 p = kmem_cache_alloc(cache, GFP_KERNEL); 584 if (!p) { 585 kunit_err(test, "Allocation failed: %s\n", __func__); 586 kmem_cache_destroy(cache); 587 return; 588 } 589 590 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]); 591 592 kmem_cache_free(cache, p); 593 kmem_cache_destroy(cache); 594} 595 596static void kmem_cache_accounted(struct kunit *test) 597{ 598 int i; 599 char *p; 600 size_t size = 200; 601 struct kmem_cache *cache; 602 603 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL); 604 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 605 606 /* 607 * Several allocations with a delay to allow for lazy per memcg kmem 608 * cache creation. 609 */ 610 for (i = 0; i < 5; i++) { 611 p = kmem_cache_alloc(cache, GFP_KERNEL); 612 if (!p) 613 goto free_cache; 614 615 kmem_cache_free(cache, p); 616 msleep(100); 617 } 618 619free_cache: 620 kmem_cache_destroy(cache); 621} 622 623static void kmem_cache_bulk(struct kunit *test) 624{ 625 struct kmem_cache *cache; 626 size_t size = 200; 627 char *p[10]; 628 bool ret; 629 int i; 630 631 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); 632 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 633 634 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p); 635 if (!ret) { 636 kunit_err(test, "Allocation failed: %s\n", __func__); 637 kmem_cache_destroy(cache); 638 return; 639 } 640 641 for (i = 0; i < ARRAY_SIZE(p); i++) 642 p[i][0] = p[i][size - 1] = 42; 643 644 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p); 645 kmem_cache_destroy(cache); 646} 647 648static char global_array[10]; 649 650static void kasan_global_oob(struct kunit *test) 651{ 652 /* 653 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS 654 * from failing here and panicking the kernel, access the array via a 655 * volatile pointer, which will prevent the compiler from being able to 656 * determine the array bounds. 657 * 658 * This access uses a volatile pointer to char (char *volatile) rather 659 * than the more conventional pointer to volatile char (volatile char *) 660 * because we want to prevent the compiler from making inferences about 661 * the pointer itself (i.e. its array bounds), not the data that it 662 * refers to. 663 */ 664 char *volatile array = global_array; 665 char *p = &array[ARRAY_SIZE(global_array) + 3]; 666 667 /* Only generic mode instruments globals. */ 668 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 669 670 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 671} 672 673/* Check that ksize() makes the whole object accessible. */ 674static void ksize_unpoisons_memory(struct kunit *test) 675{ 676 char *ptr; 677 size_t size = 123, real_size; 678 679 ptr = kmalloc(size, GFP_KERNEL); 680 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 681 real_size = ksize(ptr); 682 683 /* This access shouldn't trigger a KASAN report. */ 684 ptr[size] = 'x'; 685 686 /* This one must. */ 687 KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y'); 688 689 kfree(ptr); 690} 691 692/* 693 * Check that a use-after-free is detected by ksize() and via normal accesses 694 * after it. 695 */ 696static void ksize_uaf(struct kunit *test) 697{ 698 char *ptr; 699 int size = 128 - KASAN_GRANULE_SIZE; 700 701 ptr = kmalloc(size, GFP_KERNEL); 702 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 703 kfree(ptr); 704 705 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); 706 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr); 707 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size)); 708} 709 710static void kasan_stack_oob(struct kunit *test) 711{ 712 char stack_array[10]; 713 /* See comment in kasan_global_oob. */ 714 char *volatile array = stack_array; 715 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF]; 716 717 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); 718 719 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 720} 721 722static void kasan_alloca_oob_left(struct kunit *test) 723{ 724 volatile int i = 10; 725 char alloca_array[i]; 726 /* See comment in kasan_global_oob. */ 727 char *volatile array = alloca_array; 728 char *p = array - 1; 729 730 /* Only generic mode instruments dynamic allocas. */ 731 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 732 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); 733 734 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 735} 736 737static void kasan_alloca_oob_right(struct kunit *test) 738{ 739 volatile int i = 10; 740 char alloca_array[i]; 741 /* See comment in kasan_global_oob. */ 742 char *volatile array = alloca_array; 743 char *p = array + i; 744 745 /* Only generic mode instruments dynamic allocas. */ 746 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 747 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); 748 749 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 750} 751 752static void kmem_cache_double_free(struct kunit *test) 753{ 754 char *p; 755 size_t size = 200; 756 struct kmem_cache *cache; 757 758 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); 759 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 760 761 p = kmem_cache_alloc(cache, GFP_KERNEL); 762 if (!p) { 763 kunit_err(test, "Allocation failed: %s\n", __func__); 764 kmem_cache_destroy(cache); 765 return; 766 } 767 768 kmem_cache_free(cache, p); 769 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p)); 770 kmem_cache_destroy(cache); 771} 772 773static void kmem_cache_invalid_free(struct kunit *test) 774{ 775 char *p; 776 size_t size = 200; 777 struct kmem_cache *cache; 778 779 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, 780 NULL); 781 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 782 783 p = kmem_cache_alloc(cache, GFP_KERNEL); 784 if (!p) { 785 kunit_err(test, "Allocation failed: %s\n", __func__); 786 kmem_cache_destroy(cache); 787 return; 788 } 789 790 /* Trigger invalid free, the object doesn't get freed. */ 791 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1)); 792 793 /* 794 * Properly free the object to prevent the "Objects remaining in 795 * test_cache on __kmem_cache_shutdown" BUG failure. 796 */ 797 kmem_cache_free(cache, p); 798 799 kmem_cache_destroy(cache); 800} 801 802static void kasan_memchr(struct kunit *test) 803{ 804 char *ptr; 805 size_t size = 24; 806 807 /* 808 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. 809 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. 810 */ 811 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); 812 813 if (OOB_TAG_OFF) 814 size = round_up(size, OOB_TAG_OFF); 815 816 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); 817 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 818 819 KUNIT_EXPECT_KASAN_FAIL(test, 820 kasan_ptr_result = memchr(ptr, '1', size + 1)); 821 822 kfree(ptr); 823} 824 825static void kasan_memcmp(struct kunit *test) 826{ 827 char *ptr; 828 size_t size = 24; 829 int arr[9]; 830 831 /* 832 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. 833 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. 834 */ 835 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); 836 837 if (OOB_TAG_OFF) 838 size = round_up(size, OOB_TAG_OFF); 839 840 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); 841 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 842 memset(arr, 0, sizeof(arr)); 843 844 KUNIT_EXPECT_KASAN_FAIL(test, 845 kasan_int_result = memcmp(ptr, arr, size+1)); 846 kfree(ptr); 847} 848 849static void kasan_strings(struct kunit *test) 850{ 851 char *ptr; 852 size_t size = 24; 853 854 /* 855 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. 856 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. 857 */ 858 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); 859 860 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); 861 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 862 863 kfree(ptr); 864 865 /* 866 * Try to cause only 1 invalid access (less spam in dmesg). 867 * For that we need ptr to point to zeroed byte. 868 * Skip metadata that could be stored in freed object so ptr 869 * will likely point to zeroed byte. 870 */ 871 ptr += 16; 872 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1')); 873 874 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1')); 875 876 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2")); 877 878 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1)); 879 880 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr)); 881 882 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1)); 883} 884 885static void kasan_bitops_modify(struct kunit *test, int nr, void *addr) 886{ 887 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr)); 888 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr)); 889 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr)); 890 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr)); 891 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr)); 892 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr)); 893 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr)); 894 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr)); 895} 896 897static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) 898{ 899 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr)); 900 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr)); 901 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr)); 902 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr)); 903 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr)); 904 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); 905 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); 906 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); 907 908#if defined(clear_bit_unlock_is_negative_byte) 909 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = 910 clear_bit_unlock_is_negative_byte(nr, addr)); 911#endif 912} 913 914static void kasan_bitops_generic(struct kunit *test) 915{ 916 long *bits; 917 918 /* This test is specifically crafted for the generic mode. */ 919 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 920 921 /* 922 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes; 923 * this way we do not actually corrupt other memory. 924 */ 925 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); 926 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); 927 928 /* 929 * Below calls try to access bit within allocated memory; however, the 930 * below accesses are still out-of-bounds, since bitops are defined to 931 * operate on the whole long the bit is in. 932 */ 933 kasan_bitops_modify(test, BITS_PER_LONG, bits); 934 935 /* 936 * Below calls try to access bit beyond allocated memory. 937 */ 938 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits); 939 940 kfree(bits); 941} 942 943static void kasan_bitops_tags(struct kunit *test) 944{ 945 long *bits; 946 947 /* This test is specifically crafted for tag-based modes. */ 948 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 949 950 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */ 951 bits = kzalloc(48, GFP_KERNEL); 952 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); 953 954 /* Do the accesses past the 48 allocated bytes, but within the redone. */ 955 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48); 956 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48); 957 958 kfree(bits); 959} 960 961static void kmalloc_double_kzfree(struct kunit *test) 962{ 963 char *ptr; 964 size_t size = 16; 965 966 ptr = kmalloc(size, GFP_KERNEL); 967 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 968 969 kfree_sensitive(ptr); 970 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr)); 971} 972 973static void vmalloc_oob(struct kunit *test) 974{ 975 void *area; 976 977 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); 978 979 /* 980 * We have to be careful not to hit the guard page. 981 * The MMU will catch that and crash us. 982 */ 983 area = vmalloc(3000); 984 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area); 985 986 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]); 987 vfree(area); 988} 989 990/* 991 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN, 992 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based 993 * modes. 994 */ 995static void match_all_not_assigned(struct kunit *test) 996{ 997 char *ptr; 998 struct page *pages; 999 int i, size, order; 1000 1001 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1002 1003 for (i = 0; i < 256; i++) { 1004 size = (get_random_int() % 1024) + 1; 1005 ptr = kmalloc(size, GFP_KERNEL); 1006 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1007 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1008 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1009 kfree(ptr); 1010 } 1011 1012 for (i = 0; i < 256; i++) { 1013 order = (get_random_int() % 4) + 1; 1014 pages = alloc_pages(GFP_KERNEL, order); 1015 ptr = page_address(pages); 1016 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1017 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1018 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1019 free_pages((unsigned long)ptr, order); 1020 } 1021} 1022 1023/* Check that 0xff works as a match-all pointer tag for tag-based modes. */ 1024static void match_all_ptr_tag(struct kunit *test) 1025{ 1026 char *ptr; 1027 u8 tag; 1028 1029 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1030 1031 ptr = kmalloc(128, GFP_KERNEL); 1032 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1033 1034 /* Backup the assigned tag. */ 1035 tag = get_tag(ptr); 1036 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL); 1037 1038 /* Reset the tag to 0xff.*/ 1039 ptr = set_tag(ptr, KASAN_TAG_KERNEL); 1040 1041 /* This access shouldn't trigger a KASAN report. */ 1042 *ptr = 0; 1043 1044 /* Recover the pointer tag and free. */ 1045 ptr = set_tag(ptr, tag); 1046 kfree(ptr); 1047} 1048 1049/* Check that there are no match-all memory tags for tag-based modes. */ 1050static void match_all_mem_tag(struct kunit *test) 1051{ 1052 char *ptr; 1053 int tag; 1054 1055 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1056 1057 ptr = kmalloc(128, GFP_KERNEL); 1058 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1059 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1060 1061 /* For each possible tag value not matching the pointer tag. */ 1062 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) { 1063 if (tag == get_tag(ptr)) 1064 continue; 1065 1066 /* Mark the first memory granule with the chosen memory tag. */ 1067 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false); 1068 1069 /* This access must cause a KASAN report. */ 1070 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0); 1071 } 1072 1073 /* Recover the memory tag and free. */ 1074 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false); 1075 kfree(ptr); 1076} 1077 1078static struct kunit_case kasan_kunit_test_cases[] = { 1079 KUNIT_CASE(kmalloc_oob_right), 1080 KUNIT_CASE(kmalloc_oob_left), 1081 KUNIT_CASE(kmalloc_node_oob_right), 1082 KUNIT_CASE(kmalloc_pagealloc_oob_right), 1083 KUNIT_CASE(kmalloc_pagealloc_uaf), 1084 KUNIT_CASE(kmalloc_pagealloc_invalid_free), 1085 KUNIT_CASE(pagealloc_oob_right), 1086 KUNIT_CASE(pagealloc_uaf), 1087 KUNIT_CASE(kmalloc_large_oob_right), 1088 KUNIT_CASE(krealloc_more_oob), 1089 KUNIT_CASE(krealloc_less_oob), 1090 KUNIT_CASE(krealloc_pagealloc_more_oob), 1091 KUNIT_CASE(krealloc_pagealloc_less_oob), 1092 KUNIT_CASE(krealloc_uaf), 1093 KUNIT_CASE(kmalloc_oob_16), 1094 KUNIT_CASE(kmalloc_uaf_16), 1095 KUNIT_CASE(kmalloc_oob_in_memset), 1096 KUNIT_CASE(kmalloc_oob_memset_2), 1097 KUNIT_CASE(kmalloc_oob_memset_4), 1098 KUNIT_CASE(kmalloc_oob_memset_8), 1099 KUNIT_CASE(kmalloc_oob_memset_16), 1100 KUNIT_CASE(kmalloc_memmove_invalid_size), 1101 KUNIT_CASE(kmalloc_uaf), 1102 KUNIT_CASE(kmalloc_uaf_memset), 1103 KUNIT_CASE(kmalloc_uaf2), 1104 KUNIT_CASE(kfree_via_page), 1105 KUNIT_CASE(kfree_via_phys), 1106 KUNIT_CASE(kmem_cache_oob), 1107 KUNIT_CASE(kmem_cache_accounted), 1108 KUNIT_CASE(kmem_cache_bulk), 1109 KUNIT_CASE(kasan_global_oob), 1110 KUNIT_CASE(kasan_stack_oob), 1111 KUNIT_CASE(kasan_alloca_oob_left), 1112 KUNIT_CASE(kasan_alloca_oob_right), 1113 KUNIT_CASE(ksize_unpoisons_memory), 1114 KUNIT_CASE(ksize_uaf), 1115 KUNIT_CASE(kmem_cache_double_free), 1116 KUNIT_CASE(kmem_cache_invalid_free), 1117 KUNIT_CASE(kasan_memchr), 1118 KUNIT_CASE(kasan_memcmp), 1119 KUNIT_CASE(kasan_strings), 1120 KUNIT_CASE(kasan_bitops_generic), 1121 KUNIT_CASE(kasan_bitops_tags), 1122 KUNIT_CASE(kmalloc_double_kzfree), 1123 KUNIT_CASE(vmalloc_oob), 1124 KUNIT_CASE(match_all_not_assigned), 1125 KUNIT_CASE(match_all_ptr_tag), 1126 KUNIT_CASE(match_all_mem_tag), 1127 {} 1128}; 1129 1130static struct kunit_suite kasan_kunit_test_suite = { 1131 .name = "kasan", 1132 .init = kasan_test_init, 1133 .test_cases = kasan_kunit_test_cases, 1134 .exit = kasan_test_exit, 1135}; 1136 1137kunit_test_suite(kasan_kunit_test_suite); 1138 1139MODULE_LICENSE("GPL");