Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

treewide: use get_random_u32_inclusive() when possible

These cases were done with this Coccinelle:

@@
expression H;
expression L;
@@
- (get_random_u32_below(H) + L)
+ get_random_u32_inclusive(L, H + L - 1)

@@
expression H;
expression L;
expression E;
@@
get_random_u32_inclusive(L,
H
- + E
- - E
)

@@
expression H;
expression L;
expression E;
@@
get_random_u32_inclusive(L,
H
- - E
- + E
)

@@
expression H;
expression L;
expression E;
expression F;
@@
get_random_u32_inclusive(L,
H
- - E
+ F
- + E
)

@@
expression H;
expression L;
expression E;
expression F;
@@
get_random_u32_inclusive(L,
H
- + E
+ F
- - E
)

And then subsequently cleaned up by hand, with several automatic cases
rejected if it didn't make sense contextually.

Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> # for infiniband
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>

+54 -62
+1 -1
arch/x86/kernel/module.c
··· 53 53 */ 54 54 if (module_load_offset == 0) 55 55 module_load_offset = 56 - (get_random_u32_below(1024) + 1) * PAGE_SIZE; 56 + get_random_u32_inclusive(1, 1024) * PAGE_SIZE; 57 57 mutex_unlock(&module_kaslr_mutex); 58 58 } 59 59 return module_load_offset;
+1 -1
crypto/rsa-pkcs1pad.c
··· 253 253 ps_end = ctx->key_size - req->src_len - 2; 254 254 req_ctx->in_buf[0] = 0x02; 255 255 for (i = 1; i < ps_end; i++) 256 - req_ctx->in_buf[i] = 1 + get_random_u32_below(255); 256 + req_ctx->in_buf[i] = get_random_u32_inclusive(1, 255); 257 257 req_ctx->in_buf[ps_end] = 0x00; 258 258 259 259 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
+5 -5
crypto/testmgr.c
··· 962 962 if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0) 963 963 this_len = remaining; 964 964 else 965 - this_len = 1 + get_random_u32_below(remaining); 965 + this_len = get_random_u32_inclusive(1, remaining); 966 966 div->proportion_of_total = this_len; 967 967 968 968 if (get_random_u32_below(4) == 0) 969 - div->offset = (PAGE_SIZE - 128) + get_random_u32_below(128); 969 + div->offset = get_random_u32_inclusive(PAGE_SIZE - 128, PAGE_SIZE - 1); 970 970 else if (get_random_u32_below(2) == 0) 971 971 div->offset = get_random_u32_below(32); 972 972 else ··· 1094 1094 } 1095 1095 1096 1096 if (get_random_u32_below(2) == 0) { 1097 - cfg->iv_offset = 1 + get_random_u32_below(MAX_ALGAPI_ALIGNMASK); 1097 + cfg->iv_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK); 1098 1098 p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset); 1099 1099 } 1100 1100 1101 1101 if (get_random_u32_below(2) == 0) { 1102 - cfg->key_offset = 1 + get_random_u32_below(MAX_ALGAPI_ALIGNMASK); 1102 + cfg->key_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK); 1103 1103 p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset); 1104 1104 } 1105 1105 ··· 1653 1653 if (maxkeysize) { 1654 1654 vec->ksize = maxkeysize; 1655 1655 if (get_random_u32_below(4) == 0) 1656 - vec->ksize = 1 + get_random_u32_below(maxkeysize); 1656 + vec->ksize = get_random_u32_inclusive(1, maxkeysize); 1657 1657 generate_random_bytes((u8 *)vec->key, vec->ksize); 1658 1658 1659 1659 vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
+1 -1
drivers/bus/mhi/host/internal.h
··· 129 129 #define PRIMARY_CMD_RING 0 130 130 #define MHI_DEV_WAKE_DB 127 131 131 #define MHI_MAX_MTU 0xffff 132 - #define MHI_RANDOM_U32_NONZERO(bmsk) (get_random_u32_below(bmsk) + 1) 132 + #define MHI_RANDOM_U32_NONZERO(bmsk) (get_random_u32_inclusive(1, bmsk)) 133 133 134 134 enum mhi_er_type { 135 135 MHI_ER_TYPE_INVALID = 0x0,
+1 -1
drivers/dma-buf/st-dma-fence-chain.c
··· 400 400 struct dma_fence *fence = dma_fence_get(data->fc.tail); 401 401 int seqno; 402 402 403 - seqno = get_random_u32_below(data->fc.chain_length) + 1; 403 + seqno = get_random_u32_inclusive(1, data->fc.chain_length); 404 404 405 405 err = dma_fence_chain_find_seqno(&fence, seqno); 406 406 if (err) {
+1 -1
drivers/infiniband/core/cma.c
··· 3807 3807 3808 3808 inet_get_local_port_range(net, &low, &high); 3809 3809 remaining = (high - low) + 1; 3810 - rover = get_random_u32_below(remaining) + low; 3810 + rover = get_random_u32_inclusive(low, remaining + low - 1); 3811 3811 retry: 3812 3812 if (last_used_port != rover) { 3813 3813 struct rdma_bind_list *bind_list;
+2 -3
drivers/infiniband/hw/hns/hns_roce_ah.c
··· 41 41 u16 sport; 42 42 43 43 if (!fl) 44 - sport = get_random_u32_below(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 45 - 1 - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) + 46 - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN; 44 + sport = get_random_u32_inclusive(IB_ROCE_UDP_ENCAP_VALID_PORT_MIN, 45 + IB_ROCE_UDP_ENCAP_VALID_PORT_MAX); 47 46 else 48 47 sport = rdma_flow_label_to_udp_sport(fl); 49 48
+1 -1
drivers/mtd/nand/raw/nandsim.c
··· 1405 1405 if (bitflips && get_random_u16() < (1 << 6)) { 1406 1406 int flips = 1; 1407 1407 if (bitflips > 1) 1408 - flips = get_random_u32_below(bitflips) + 1; 1408 + flips = get_random_u32_inclusive(1, bitflips); 1409 1409 while (flips--) { 1410 1410 int pos = get_random_u32_below(num * 8); 1411 1411 ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
+4 -4
drivers/net/wireguard/selftest/allowedips.c
··· 285 285 286 286 for (i = 0; i < NUM_RAND_ROUTES; ++i) { 287 287 get_random_bytes(ip, 4); 288 - cidr = get_random_u32_below(32) + 1; 288 + cidr = get_random_u32_inclusive(1, 32); 289 289 peer = peers[get_random_u32_below(NUM_PEERS)]; 290 290 if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr, 291 291 peer, &mutex) < 0) { ··· 311 311 mutated[k] = (mutated[k] & mutate_mask[k]) | 312 312 (~mutate_mask[k] & 313 313 get_random_u8()); 314 - cidr = get_random_u32_below(32) + 1; 314 + cidr = get_random_u32_inclusive(1, 32); 315 315 peer = peers[get_random_u32_below(NUM_PEERS)]; 316 316 if (wg_allowedips_insert_v4(&t, 317 317 (struct in_addr *)mutated, ··· 329 329 330 330 for (i = 0; i < NUM_RAND_ROUTES; ++i) { 331 331 get_random_bytes(ip, 16); 332 - cidr = get_random_u32_below(128) + 1; 332 + cidr = get_random_u32_inclusive(1, 128); 333 333 peer = peers[get_random_u32_below(NUM_PEERS)]; 334 334 if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr, 335 335 peer, &mutex) < 0) { ··· 355 355 mutated[k] = (mutated[k] & mutate_mask[k]) | 356 356 (~mutate_mask[k] & 357 357 get_random_u8()); 358 - cidr = get_random_u32_below(128) + 1; 358 + cidr = get_random_u32_inclusive(1, 128); 359 359 peer = peers[get_random_u32_below(NUM_PEERS)]; 360 360 if (wg_allowedips_insert_v6(&t, 361 361 (struct in6_addr *)mutated,
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
··· 1128 1128 if (afx_hdl->is_listen && afx_hdl->my_listen_chan) 1129 1129 /* 100ms ~ 300ms */ 1130 1130 err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan, 1131 - 100 * (1 + get_random_u32_below(3))); 1131 + 100 * get_random_u32_inclusive(1, 3)); 1132 1132 else 1133 1133 err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan); 1134 1134
+1 -1
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
··· 1099 1099 iwl_mvm_mac_ap_iterator, &data); 1100 1100 1101 1101 if (data.beacon_device_ts) { 1102 - u32 rand = get_random_u32_below(64 - 36) + 36; 1102 + u32 rand = get_random_u32_inclusive(36, 63); 1103 1103 mvmvif->ap_beacon_time = data.beacon_device_ts + 1104 1104 ieee80211_tu_to_usec(data.beacon_int * rand / 1105 1105 100);
+3 -3
fs/f2fs/segment.c
··· 2588 2588 curseg->alloc_type = LFS; 2589 2589 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 2590 2590 curseg->fragment_remained_chunk = 2591 - get_random_u32_below(sbi->max_fragment_chunk) + 1; 2591 + get_random_u32_inclusive(1, sbi->max_fragment_chunk); 2592 2592 } 2593 2593 2594 2594 static int __next_free_blkoff(struct f2fs_sb_info *sbi, ··· 2625 2625 /* To allocate block chunks in different sizes, use random number */ 2626 2626 if (--seg->fragment_remained_chunk <= 0) { 2627 2627 seg->fragment_remained_chunk = 2628 - get_random_u32_below(sbi->max_fragment_chunk) + 1; 2628 + get_random_u32_inclusive(1, sbi->max_fragment_chunk); 2629 2629 seg->next_blkoff += 2630 - get_random_u32_below(sbi->max_fragment_hole) + 1; 2630 + get_random_u32_inclusive(1, sbi->max_fragment_hole); 2631 2631 } 2632 2632 } 2633 2633 }
+1 -1
kernel/kcsan/selftest.c
··· 31 31 int i; 32 32 33 33 for (i = 0; i < ITERS_PER_TEST; ++i) { 34 - size_t size = get_random_u32_below(MAX_ENCODABLE_SIZE) + 1; 34 + size_t size = get_random_u32_inclusive(1, MAX_ENCODABLE_SIZE); 35 35 bool is_write = !!get_random_u32_below(2); 36 36 unsigned long verif_masked_addr; 37 37 long encoded_watchpoint;
+4 -4
lib/test_hexdump.c
··· 149 149 static void __init test_hexdump_set(int rowsize, bool ascii) 150 150 { 151 151 size_t d = min_t(size_t, sizeof(data_b), rowsize); 152 - size_t len = get_random_u32_below(d) + 1; 152 + size_t len = get_random_u32_inclusive(1, d); 153 153 154 154 test_hexdump(len, rowsize, 4, ascii); 155 155 test_hexdump(len, rowsize, 2, ascii); ··· 208 208 static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) 209 209 { 210 210 unsigned int i = 0; 211 - int rs = (get_random_u32_below(2) + 1) * 16; 211 + int rs = get_random_u32_inclusive(1, 2) * 16; 212 212 213 213 do { 214 214 int gs = 1 << i; ··· 223 223 unsigned int i; 224 224 int rowsize; 225 225 226 - rowsize = (get_random_u32_below(2) + 1) * 16; 226 + rowsize = get_random_u32_inclusive(1, 2) * 16; 227 227 for (i = 0; i < 16; i++) 228 228 test_hexdump_set(rowsize, false); 229 229 230 - rowsize = (get_random_u32_below(2) + 1) * 16; 230 + rowsize = get_random_u32_inclusive(1, 2) * 16; 231 231 for (i = 0; i < 16; i++) 232 232 test_hexdump_set(rowsize, true); 233 233
+1 -1
lib/test_printf.c
··· 126 126 * be able to print it as expected. 127 127 */ 128 128 failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap); 129 - rand = 1 + get_random_u32_below(elen + 1); 129 + rand = get_random_u32_inclusive(1, elen + 1); 130 130 /* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */ 131 131 failed_tests += do_test(rand, expect, elen, fmt, ap); 132 132 failed_tests += do_test(0, expect, elen, fmt, ap);
+3 -3
lib/test_vmalloc.c
··· 151 151 int i; 152 152 153 153 for (i = 0; i < test_loop_count; i++) { 154 - n = get_random_u32_below(100) + 1; 154 + n = get_random_u32_inclusive(1, 100); 155 155 p = vmalloc(n * PAGE_SIZE); 156 156 157 157 if (!p) ··· 291 291 return -1; 292 292 293 293 for (i = 0; i < 35000; i++) { 294 - size = get_random_u32_below(PAGE_SIZE / 4) + 1; 294 + size = get_random_u32_inclusive(1, PAGE_SIZE / 4); 295 295 296 296 /* 297 297 * Maximum PAGE_SIZE 298 298 */ 299 - align = 1 << (get_random_u32_below(11) + 1); 299 + align = 1 << get_random_u32_inclusive(1, 11); 300 300 301 301 pcpu[i] = __alloc_percpu(size, align); 302 302 if (!pcpu[i])
+3 -3
mm/kasan/kasan_test.c
··· 1299 1299 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1300 1300 1301 1301 for (i = 0; i < 256; i++) { 1302 - size = get_random_u32_below(1024) + 1; 1302 + size = get_random_u32_inclusive(1, 1024); 1303 1303 ptr = kmalloc(size, GFP_KERNEL); 1304 1304 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1305 1305 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); ··· 1308 1308 } 1309 1309 1310 1310 for (i = 0; i < 256; i++) { 1311 - order = get_random_u32_below(4) + 1; 1311 + order = get_random_u32_inclusive(1, 4); 1312 1312 pages = alloc_pages(GFP_KERNEL, order); 1313 1313 ptr = page_address(pages); 1314 1314 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ··· 1321 1321 return; 1322 1322 1323 1323 for (i = 0; i < 256; i++) { 1324 - size = get_random_u32_below(1024) + 1; 1324 + size = get_random_u32_inclusive(1, 1024); 1325 1325 ptr = vmalloc(size); 1326 1326 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1327 1327 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+1 -1
mm/kfence/kfence_test.c
··· 532 532 int iter; 533 533 534 534 for (iter = 0; iter < 5; iter++) { 535 - const size_t size = setup_test_cache(test, 8 + get_random_u32_below(300), 535 + const size_t size = setup_test_cache(test, get_random_u32_inclusive(8, 307), 536 536 0, (iter & 1) ? ctor_set_x : NULL); 537 537 void *objects[] = { 538 538 test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
+2 -3
mm/swapfile.c
··· 772 772 /* No free swap slots available */ 773 773 if (si->highest_bit <= si->lowest_bit) 774 774 return; 775 - next = si->lowest_bit + 776 - get_random_u32_below(si->highest_bit - si->lowest_bit + 1); 775 + next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit); 777 776 next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES); 778 777 next = max_t(unsigned int, next, si->lowest_bit); 779 778 } ··· 3088 3089 */ 3089 3090 for_each_possible_cpu(cpu) { 3090 3091 per_cpu(*p->cluster_next_cpu, cpu) = 3091 - 1 + get_random_u32_below(p->highest_bit); 3092 + get_random_u32_inclusive(1, p->highest_bit); 3092 3093 } 3093 3094 nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 3094 3095
+2 -3
net/bluetooth/mgmt.c
··· 7373 7373 /* To avoid client trying to guess when to poll again for information we 7374 7374 * calculate conn info age as random value between min/max set in hdev. 7375 7375 */ 7376 - conn_info_age = hdev->conn_info_min_age + 7377 - get_random_u32_below(hdev->conn_info_max_age - 7378 - hdev->conn_info_min_age); 7376 + conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age, 7377 + hdev->conn_info_max_age - 1); 7379 7378 7380 7379 /* Query controller to refresh cached values if they are too old or were 7381 7380 * never read.
+10 -15
net/core/pktgen.c
··· 2380 2380 else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { 2381 2381 __u16 t; 2382 2382 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2383 - t = get_random_u32_below(pkt_dev->queue_map_max - 2384 - pkt_dev->queue_map_min + 1) + 2385 - pkt_dev->queue_map_min; 2383 + t = get_random_u32_inclusive(pkt_dev->queue_map_min, 2384 + pkt_dev->queue_map_max); 2386 2385 } else { 2387 2386 t = pkt_dev->cur_queue_map + 1; 2388 2387 if (t > pkt_dev->queue_map_max) ··· 2477 2478 2478 2479 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 2479 2480 if (pkt_dev->flags & F_UDPSRC_RND) 2480 - pkt_dev->cur_udp_src = get_random_u32_below( 2481 - pkt_dev->udp_src_max - pkt_dev->udp_src_min) + 2482 - pkt_dev->udp_src_min; 2481 + pkt_dev->cur_udp_src = get_random_u32_inclusive(pkt_dev->udp_src_min, 2482 + pkt_dev->udp_src_max - 1); 2483 2483 2484 2484 else { 2485 2485 pkt_dev->cur_udp_src++; ··· 2489 2491 2490 2492 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { 2491 2493 if (pkt_dev->flags & F_UDPDST_RND) { 2492 - pkt_dev->cur_udp_dst = get_random_u32_below( 2493 - pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) + 2494 - pkt_dev->udp_dst_min; 2494 + pkt_dev->cur_udp_dst = get_random_u32_inclusive(pkt_dev->udp_dst_min, 2495 + pkt_dev->udp_dst_max - 1); 2495 2496 } else { 2496 2497 pkt_dev->cur_udp_dst++; 2497 2498 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) ··· 2505 2508 if (imn < imx) { 2506 2509 __u32 t; 2507 2510 if (pkt_dev->flags & F_IPSRC_RND) 2508 - t = get_random_u32_below(imx - imn) + imn; 2511 + t = get_random_u32_inclusive(imn, imx - 1); 2509 2512 else { 2510 2513 t = ntohl(pkt_dev->cur_saddr); 2511 2514 t++; ··· 2527 2530 if (pkt_dev->flags & F_IPDST_RND) { 2528 2531 2529 2532 do { 2530 - t = get_random_u32_below(imx - imn) + 2531 - imn; 2533 + t = get_random_u32_inclusive(imn, imx - 1); 2532 2534 s = htonl(t); 2533 2535 } while (ipv4_is_loopback(s) || 2534 2536 ipv4_is_multicast(s) || ··· 2574 2578 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { 2575 2579 __u32 t; 2576 2580 if (pkt_dev->flags & F_TXSIZE_RND) { 2577 - t = get_random_u32_below(pkt_dev->max_pkt_size - 2578 - pkt_dev->min_pkt_size) + 2579 - pkt_dev->min_pkt_size; 2581 + t = get_random_u32_inclusive(pkt_dev->min_pkt_size, 2582 + pkt_dev->max_pkt_size - 1); 2580 2583 } else { 2581 2584 t = pkt_dev->cur_pkt_size + 1; 2582 2585 if (t > pkt_dev->max_pkt_size)
+1 -1
net/ipv4/tcp_input.c
··· 3647 3647 3648 3648 WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now); 3649 3649 WRITE_ONCE(net->ipv4.tcp_challenge_count, 3650 - half + get_random_u32_below(ack_limit)); 3650 + get_random_u32_inclusive(half, ack_limit + half - 1)); 3651 3651 } 3652 3652 count = READ_ONCE(net->ipv4.tcp_challenge_count); 3653 3653 if (count > 0) {
+3 -3
net/ipv6/addrconf.c
··· 104 104 static inline s32 rfc3315_s14_backoff_init(s32 irt) 105 105 { 106 106 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */ 107 - u64 tmp = (900000 + get_random_u32_below(200001)) * (u64)irt; 107 + u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt; 108 108 do_div(tmp, 1000000); 109 109 return (s32)tmp; 110 110 } ··· 112 112 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt) 113 113 { 114 114 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */ 115 - u64 tmp = (1900000 + get_random_u32_below(200001)) * (u64)rt; 115 + u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt; 116 116 do_div(tmp, 1000000); 117 117 if ((s32)tmp > mrt) { 118 118 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */ 119 - tmp = (900000 + get_random_u32_below(200001)) * (u64)mrt; 119 + tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt; 120 120 do_div(tmp, 1000000); 121 121 } 122 122 return (s32)tmp;
+1 -1
net/xfrm/xfrm_state.c
··· 2072 2072 } else { 2073 2073 u32 spi = 0; 2074 2074 for (h = 0; h < high-low+1; h++) { 2075 - spi = low + get_random_u32_below(high - low + 1); 2075 + spi = get_random_u32_inclusive(low, high); 2076 2076 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); 2077 2077 if (x0 == NULL) { 2078 2078 newspi = htonl(spi);