Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: adapt one more case in test_lru_map to the new target_free

The below commit that updated BPF_MAP_TYPE_LRU_HASH free target,
also updated tools/testing/selftests/bpf/test_lru_map to match.

But that missed one case that passes with 4 cores, but fails at
higher cpu counts.

Update test_lru_sanity3 to also adjust its expectation of target_free.

This time tested with 1, 4, 16, 64 and 384 cpu count.

Fixes: d4adf1c9ee77 ("bpf: Adjust free target to avoid global starvation of LRU map")
Signed-off-by: Willem de Bruijn <willemb@google.com>
Link: https://lore.kernel.org/r/20250625210412.2732970-1-willemdebruijn.kernel@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Willem de Bruijn and committed by
Alexei Starovoitov
5e9388f7 fa6f092c

+18 -15
+18 -15
tools/testing/selftests/bpf/test_lru_map.c
··· 138 138 return ret; 139 139 } 140 140 141 + /* Derive target_free from map_size, same as bpf_common_lru_populate */ 142 + static unsigned int __tgt_size(unsigned int map_size) 143 + { 144 + return (map_size / nr_cpus) / 2; 145 + } 146 + 141 147 /* Inverse of how bpf_common_lru_populate derives target_free from map_size. */ 142 148 static unsigned int __map_size(unsigned int tgt_free) 143 149 { ··· 416 410 printf("Pass\n"); 417 411 } 418 412 419 - /* Size of the LRU map is 2*tgt_free 420 - * It is to test the active/inactive list rotation 421 - * Insert 1 to 2*tgt_free (+2*tgt_free keys) 422 - * Lookup key 1 to tgt_free*3/2 423 - * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys) 424 - * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU 413 + /* Test the active/inactive list rotation 414 + * 415 + * Fill the whole map, deplete the free list. 416 + * Reference all except the last lru->target_free elements. 417 + * Insert lru->target_free new elements. This triggers one shrink. 418 + * Verify that the non-referenced elements are replaced. 425 419 */ 426 420 static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) 427 421 { ··· 440 434 441 435 assert(sched_next_online(0, &next_cpu) != -1); 442 436 443 - batch_size = tgt_free / 2; 444 - assert(batch_size * 2 == tgt_free); 437 + batch_size = __tgt_size(tgt_free); 445 438 446 439 map_size = tgt_free * 2; 447 440 lru_map_fd = create_map(map_type, map_flags, map_size); ··· 451 446 452 447 value[0] = 1234; 453 448 454 - /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */ 455 - end_key = 1 + (2 * tgt_free); 449 + /* Fill the map */ 450 + end_key = 1 + map_size; 456 451 for (key = 1; key < end_key; key++) 457 452 assert(!bpf_map_update_elem(lru_map_fd, &key, value, 458 453 BPF_NOEXIST)); 459 454 460 - /* Lookup key 1 to tgt_free*3/2 */ 461 - end_key = tgt_free + batch_size; 455 + /* Reference all but the last batch_size */ 456 + end_key = 1 + map_size - batch_size; 462 457 for (key = 1; key < end_key; key++) { 463 458 assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); 464 459 assert(!bpf_map_update_elem(expected_map_fd, &key, value, 465 460 BPF_NOEXIST)); 466 461 } 467 462 468 - /* Add 1+2*tgt_free to tgt_free*5/2 469 - * (+tgt_free/2 keys) 470 - */ 463 + /* Insert new batch_size: replaces the non-referenced elements */ 471 464 key = 2 * tgt_free + 1; 472 465 end_key = key + batch_size; 473 466 for (; key < end_key; key++) {