treewide: use prandom_u32_max() when possible, part 1

Rather than incurring a division or requesting too many random bytes for
the given range, use the prandom_u32_max() function, which only takes
the minimum required bytes from the RNG and avoids divisions. This was
done mechanically with this coccinelle script:

@basic@
expression E;
type T;
identifier get_random_u32 =~ "get_random_int|prandom_u32|get_random_u32";
typedef u64;
@@
(
- ((T)get_random_u32() % (E))
+ prandom_u32_max(E)
|
- ((T)get_random_u32() & ((E) - 1))
+ prandom_u32_max(E * XXX_MAKE_SURE_E_IS_POW2)
|
- ((u64)(E) * get_random_u32() >> 32)
+ prandom_u32_max(E)
|
- ((T)get_random_u32() & ~PAGE_MASK)
+ prandom_u32_max(PAGE_SIZE)
)

@multi_line@
identifier get_random_u32 =~ "get_random_int|prandom_u32|get_random_u32";
identifier RAND;
expression E;
@@

- RAND = get_random_u32();
... when != RAND
- RAND %= (E);
+ RAND = prandom_u32_max(E);

// Find a potential literal
@literal_mask@
expression LITERAL;
type T;
identifier get_random_u32 =~ "get_random_int|prandom_u32|get_random_u32";
position p;
@@

((T)get_random_u32()@p & (LITERAL))

// Add one to the literal.
@script:python add_one@
literal << literal_mask.LITERAL;
RESULT;
@@

value = None
if literal.startswith('0x'):
value = int(literal, 16)
elif literal[0] in '123456789':
value = int(literal, 10)
if value is None:
print("I don't know how to handle %s" % (literal))
cocci.include_match(False)
elif value == 2**32 - 1 or value == 2**31 - 1 or value == 2**24 - 1 or value == 2**16 - 1 or value == 2**8 - 1:
print("Skipping 0x%x for cleanup elsewhere" % (value))
cocci.include_match(False)
elif value & (value + 1) != 0:
print("Skipping 0x%x because it's not a power of two minus one" % (value))
cocci.include_match(False)
elif literal.startswith('0x'):
coccinelle.RESULT = cocci.make_expr("0x%x" % (value + 1))
else:
coccinelle.RESULT = cocci.make_expr("%d" % (value + 1))

// Replace the literal mask with the calculated result.
@plus_one@
expression literal_mask.LITERAL;
position literal_mask.p;
expression add_one.RESULT;
identifier FUNC;
@@

- (FUNC()@p & (LITERAL))
+ prandom_u32_max(RESULT)

@collapse_ret@
type T;
identifier VAR;
expression E;
@@

{
- T VAR;
- VAR = (E);
- return VAR;
+ return E;
}

@drop_var@
type T;
identifier VAR;
@@

{
- T VAR;
... when != VAR
}

Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Yury Norov <yury.norov@gmail.com>
Reviewed-by: KP Singh <kpsingh@kernel.org>
Reviewed-by: Jan Kara <jack@suse.cz> # for ext4 and sbitmap
Reviewed-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com> # for drbd
Acked-by: Jakub Kicinski <kuba@kernel.org>
Acked-by: Heiko Carstens <hca@linux.ibm.com> # for s390
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # for mmc
Acked-by: Darrick J. Wong <djwong@kernel.org> # for xfs
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>

+204 -218
+1 -1
arch/arm/kernel/process.c
··· 375 375 376 376 slots = ((last - first) >> PAGE_SHIFT) + 1; 377 377 378 - offset = get_random_int() % slots; 378 + offset = prandom_u32_max(slots); 379 379 380 380 addr = first + (offset << PAGE_SHIFT); 381 381
+1 -1
arch/arm64/kernel/process.c
··· 595 595 unsigned long arch_align_stack(unsigned long sp) 596 596 { 597 597 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 598 - sp -= get_random_int() & ~PAGE_MASK; 598 + sp -= prandom_u32_max(PAGE_SIZE); 599 599 return sp & ~0xf; 600 600 } 601 601
+1 -1
arch/loongarch/kernel/process.c
··· 293 293 unsigned long arch_align_stack(unsigned long sp) 294 294 { 295 295 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 296 - sp -= get_random_int() & ~PAGE_MASK; 296 + sp -= prandom_u32_max(PAGE_SIZE); 297 297 298 298 return sp & STACK_ALIGN; 299 299 }
+1 -1
arch/loongarch/kernel/vdso.c
··· 78 78 unsigned long base = STACK_TOP; 79 79 80 80 if (current->flags & PF_RANDOMIZE) { 81 - base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1); 81 + base += prandom_u32_max(VDSO_RANDOMIZE_SIZE); 82 82 base = PAGE_ALIGN(base); 83 83 } 84 84
+1 -1
arch/mips/kernel/process.c
··· 711 711 unsigned long arch_align_stack(unsigned long sp) 712 712 { 713 713 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 714 - sp -= get_random_int() & ~PAGE_MASK; 714 + sp -= prandom_u32_max(PAGE_SIZE); 715 715 716 716 return sp & ALMASK; 717 717 }
+1 -1
arch/mips/kernel/vdso.c
··· 79 79 } 80 80 81 81 if (current->flags & PF_RANDOMIZE) { 82 - base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1); 82 + base += prandom_u32_max(VDSO_RANDOMIZE_SIZE); 83 83 base = PAGE_ALIGN(base); 84 84 } 85 85
+1 -1
arch/parisc/kernel/vdso.c
··· 75 75 76 76 map_base = mm->mmap_base; 77 77 if (current->flags & PF_RANDOMIZE) 78 - map_base -= (get_random_int() & 0x1f) * PAGE_SIZE; 78 + map_base -= prandom_u32_max(0x20) * PAGE_SIZE; 79 79 80 80 vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0); 81 81
+1 -1
arch/powerpc/kernel/process.c
··· 2308 2308 unsigned long arch_align_stack(unsigned long sp) 2309 2309 { 2310 2310 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 2311 - sp -= get_random_int() & ~PAGE_MASK; 2311 + sp -= prandom_u32_max(PAGE_SIZE); 2312 2312 return sp & ~0xf; 2313 2313 }
+1 -1
arch/s390/kernel/process.c
··· 224 224 unsigned long arch_align_stack(unsigned long sp) 225 225 { 226 226 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 227 - sp -= get_random_int() & ~PAGE_MASK; 227 + sp -= prandom_u32_max(PAGE_SIZE); 228 228 return sp & ~0xf; 229 229 } 230 230
+1 -1
arch/s390/kernel/vdso.c
··· 227 227 end -= len; 228 228 229 229 if (end > start) { 230 - offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); 230 + offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1); 231 231 addr = start + (offset << PAGE_SHIFT); 232 232 } else { 233 233 addr = start;
+1 -1
arch/sparc/vdso/vma.c
··· 354 354 unsigned int offset; 355 355 356 356 /* This loses some more bits than a modulo, but is cheaper */ 357 - offset = get_random_int() & (PTRS_PER_PTE - 1); 357 + offset = prandom_u32_max(PTRS_PER_PTE); 358 358 return start + (offset << PAGE_SHIFT); 359 359 } 360 360
+1 -1
arch/um/kernel/process.c
··· 356 356 unsigned long arch_align_stack(unsigned long sp) 357 357 { 358 358 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 359 - sp -= get_random_int() % 8192; 359 + sp -= prandom_u32_max(8192); 360 360 return sp & ~0xf; 361 361 } 362 362 #endif
+1 -1
arch/x86/entry/vdso/vma.c
··· 327 327 end -= len; 328 328 329 329 if (end > start) { 330 - offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); 330 + offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1); 331 331 addr = start + (offset << PAGE_SHIFT); 332 332 } else { 333 333 addr = start;
+1 -1
arch/x86/kernel/module.c
··· 53 53 */ 54 54 if (module_load_offset == 0) 55 55 module_load_offset = 56 - (get_random_int() % 1024 + 1) * PAGE_SIZE; 56 + (prandom_u32_max(1024) + 1) * PAGE_SIZE; 57 57 mutex_unlock(&module_kaslr_mutex); 58 58 } 59 59 return module_load_offset;
+1 -1
arch/x86/kernel/process.c
··· 965 965 unsigned long arch_align_stack(unsigned long sp) 966 966 { 967 967 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 968 - sp -= get_random_int() % 8192; 968 + sp -= prandom_u32_max(8192); 969 969 return sp & ~0xf; 970 970 } 971 971
+2 -2
arch/x86/mm/pat/cpa-test.c
··· 136 136 failed += print_split(&sa); 137 137 138 138 for (i = 0; i < NTEST; i++) { 139 - unsigned long pfn = prandom_u32() % max_pfn_mapped; 139 + unsigned long pfn = prandom_u32_max(max_pfn_mapped); 140 140 141 141 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); 142 - len[i] = prandom_u32() % NPAGES; 142 + len[i] = prandom_u32_max(NPAGES); 143 143 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); 144 144 145 145 if (len[i] == 0)
+43 -43
crypto/testmgr.c
··· 855 855 /* Generate a random length in range [0, max_len], but prefer smaller values */ 856 856 static unsigned int generate_random_length(unsigned int max_len) 857 857 { 858 - unsigned int len = prandom_u32() % (max_len + 1); 858 + unsigned int len = prandom_u32_max(max_len + 1); 859 859 860 - switch (prandom_u32() % 4) { 860 + switch (prandom_u32_max(4)) { 861 861 case 0: 862 862 return len % 64; 863 863 case 1: ··· 874 874 { 875 875 size_t bitpos; 876 876 877 - bitpos = prandom_u32() % (size * 8); 877 + bitpos = prandom_u32_max(size * 8); 878 878 buf[bitpos / 8] ^= 1 << (bitpos % 8); 879 879 } 880 880 881 881 /* Flip a random byte in the given nonempty data buffer */ 882 882 static void flip_random_byte(u8 *buf, size_t size) 883 883 { 884 - buf[prandom_u32() % size] ^= 0xff; 884 + buf[prandom_u32_max(size)] ^= 0xff; 885 885 } 886 886 887 887 /* Sometimes make some random changes to the given nonempty data buffer */ ··· 891 891 size_t i; 892 892 893 893 /* Sometimes flip some bits */ 894 - if (prandom_u32() % 4 == 0) { 895 - num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8); 894 + if (prandom_u32_max(4) == 0) { 895 + num_flips = min_t(size_t, 1 << prandom_u32_max(8), size * 8); 896 896 for (i = 0; i < num_flips; i++) 897 897 flip_random_bit(buf, size); 898 898 } 899 899 900 900 /* Sometimes flip some bytes */ 901 - if (prandom_u32() % 4 == 0) { 902 - num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size); 901 + if (prandom_u32_max(4) == 0) { 902 + num_flips = min_t(size_t, 1 << prandom_u32_max(8), size); 903 903 for (i = 0; i < num_flips; i++) 904 904 flip_random_byte(buf, size); 905 905 } ··· 915 915 if (count == 0) 916 916 return; 917 917 918 - switch (prandom_u32() % 8) { /* Choose a generation strategy */ 918 + switch (prandom_u32_max(8)) { /* Choose a generation strategy */ 919 919 case 0: 920 920 case 1: 921 921 /* All the same byte, plus optional mutations */ 922 - switch (prandom_u32() % 4) { 922 + switch (prandom_u32_max(4)) { 923 923 case 0: 924 924 b = 0x00; 925 925 break; ··· 959 959 unsigned int this_len; 960 960 const char *flushtype_str; 961 961 962 - if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0) 962 + if (div == &divs[max_divs - 1] || prandom_u32_max(2) == 0) 963 963 this_len = remaining; 964 964 else 965 - this_len = 1 + (prandom_u32() % remaining); 965 + this_len = 1 + prandom_u32_max(remaining); 966 966 div->proportion_of_total = this_len; 967 967 968 - if (prandom_u32() % 4 == 0) 969 - div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128); 970 - else if (prandom_u32() % 2 == 0) 971 - div->offset = prandom_u32() % 32; 968 + if (prandom_u32_max(4) == 0) 969 + div->offset = (PAGE_SIZE - 128) + prandom_u32_max(128); 970 + else if (prandom_u32_max(2) == 0) 971 + div->offset = prandom_u32_max(32); 972 972 else 973 - div->offset = prandom_u32() % PAGE_SIZE; 974 - if (prandom_u32() % 8 == 0) 973 + div->offset = prandom_u32_max(PAGE_SIZE); 974 + if (prandom_u32_max(8) == 0) 975 975 div->offset_relative_to_alignmask = true; 976 976 977 977 div->flush_type = FLUSH_TYPE_NONE; 978 978 if (gen_flushes) { 979 - switch (prandom_u32() % 4) { 979 + switch (prandom_u32_max(4)) { 980 980 case 0: 981 981 div->flush_type = FLUSH_TYPE_REIMPORT; 982 982 break; ··· 988 988 989 989 if (div->flush_type != FLUSH_TYPE_NONE && 990 990 !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && 991 - prandom_u32() % 2 == 0) 991 + prandom_u32_max(2) == 0) 992 992 div->nosimd = true; 993 993 994 994 switch (div->flush_type) { ··· 1035 1035 1036 1036 p += scnprintf(p, end - p, "random:"); 1037 1037 1038 - switch (prandom_u32() % 4) { 1038 + switch (prandom_u32_max(4)) { 1039 1039 case 0: 1040 1040 case 1: 1041 1041 cfg->inplace_mode = OUT_OF_PLACE; ··· 1050 1050 break; 1051 1051 } 1052 1052 1053 - if (prandom_u32() % 2 == 0) { 1053 + if (prandom_u32_max(2) == 0) { 1054 1054 cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP; 1055 1055 p += scnprintf(p, end - p, " may_sleep"); 1056 1056 } 1057 1057 1058 - switch (prandom_u32() % 4) { 1058 + switch (prandom_u32_max(4)) { 1059 1059 case 0: 1060 1060 cfg->finalization_type = FINALIZATION_TYPE_FINAL; 1061 1061 p += scnprintf(p, end - p, " use_final"); ··· 1071 1071 } 1072 1072 1073 1073 if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && 1074 - prandom_u32() % 2 == 0) { 1074 + prandom_u32_max(2) == 0) { 1075 1075 cfg->nosimd = true; 1076 1076 p += scnprintf(p, end - p, " nosimd"); 1077 1077 } ··· 1084 1084 cfg->req_flags); 1085 1085 p += scnprintf(p, end - p, "]"); 1086 1086 1087 - if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32() % 2 == 0) { 1087 + if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32_max(2) == 0) { 1088 1088 p += scnprintf(p, end - p, " dst_divs=["); 1089 1089 p = generate_random_sgl_divisions(cfg->dst_divs, 1090 1090 ARRAY_SIZE(cfg->dst_divs), ··· 1093 1093 p += scnprintf(p, end - p, "]"); 1094 1094 } 1095 1095 1096 - if (prandom_u32() % 2 == 0) { 1097 - cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); 1096 + if (prandom_u32_max(2) == 0) { 1097 + cfg->iv_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK); 1098 1098 p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset); 1099 1099 } 1100 1100 1101 - if (prandom_u32() % 2 == 0) { 1102 - cfg->key_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); 1101 + if (prandom_u32_max(2) == 0) { 1102 + cfg->key_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK); 1103 1103 p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset); 1104 1104 } 1105 1105 ··· 1652 1652 vec->ksize = 0; 1653 1653 if (maxkeysize) { 1654 1654 vec->ksize = maxkeysize; 1655 - if (prandom_u32() % 4 == 0) 1656 - vec->ksize = 1 + (prandom_u32() % maxkeysize); 1655 + if (prandom_u32_max(4) == 0) 1656 + vec->ksize = 1 + prandom_u32_max(maxkeysize); 1657 1657 generate_random_bytes((u8 *)vec->key, vec->ksize); 1658 1658 1659 1659 vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key, ··· 2218 2218 const unsigned int aad_tail_size = aad_iv ? ivsize : 0; 2219 2219 const unsigned int authsize = vec->clen - vec->plen; 2220 2220 2221 - if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) { 2221 + if (prandom_u32_max(2) == 0 && vec->alen > aad_tail_size) { 2222 2222 /* Mutate the AAD */ 2223 2223 flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size); 2224 - if (prandom_u32() % 2 == 0) 2224 + if (prandom_u32_max(2) == 0) 2225 2225 return; 2226 2226 } 2227 - if (prandom_u32() % 2 == 0) { 2227 + if (prandom_u32_max(2) == 0) { 2228 2228 /* Mutate auth tag (assuming it's at the end of ciphertext) */ 2229 2229 flip_random_bit((u8 *)vec->ctext + vec->plen, authsize); 2230 2230 } else { ··· 2249 2249 const unsigned int ivsize = crypto_aead_ivsize(tfm); 2250 2250 const unsigned int authsize = vec->clen - vec->plen; 2251 2251 const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) && 2252 - (prefer_inauthentic || prandom_u32() % 4 == 0); 2252 + (prefer_inauthentic || prandom_u32_max(4) == 0); 2253 2253 2254 2254 /* Generate the AAD. */ 2255 2255 generate_random_bytes((u8 *)vec->assoc, vec->alen); ··· 2257 2257 /* Avoid implementation-defined behavior. */ 2258 2258 memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize); 2259 2259 2260 - if (inauthentic && prandom_u32() % 2 == 0) { 2260 + if (inauthentic && prandom_u32_max(2) == 0) { 2261 2261 /* Generate a random ciphertext. */ 2262 2262 generate_random_bytes((u8 *)vec->ctext, vec->clen); 2263 2263 } else { ··· 2321 2321 2322 2322 /* Key: length in [0, maxkeysize], but usually choose maxkeysize */ 2323 2323 vec->klen = maxkeysize; 2324 - if (prandom_u32() % 4 == 0) 2325 - vec->klen = prandom_u32() % (maxkeysize + 1); 2324 + if (prandom_u32_max(4) == 0) 2325 + vec->klen = prandom_u32_max(maxkeysize + 1); 2326 2326 generate_random_bytes((u8 *)vec->key, vec->klen); 2327 2327 vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen); 2328 2328 ··· 2331 2331 2332 2332 /* Tag length: in [0, maxauthsize], but usually choose maxauthsize */ 2333 2333 authsize = maxauthsize; 2334 - if (prandom_u32() % 4 == 0) 2335 - authsize = prandom_u32() % (maxauthsize + 1); 2334 + if (prandom_u32_max(4) == 0) 2335 + authsize = prandom_u32_max(maxauthsize + 1); 2336 2336 if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE) 2337 2337 authsize = MIN_COLLISION_FREE_AUTHSIZE; 2338 2338 if (WARN_ON(authsize > maxdatasize)) ··· 2342 2342 2343 2343 /* AAD, plaintext, and ciphertext lengths */ 2344 2344 total_len = generate_random_length(maxdatasize); 2345 - if (prandom_u32() % 4 == 0) 2345 + if (prandom_u32_max(4) == 0) 2346 2346 vec->alen = 0; 2347 2347 else 2348 2348 vec->alen = generate_random_length(total_len); ··· 2958 2958 2959 2959 /* Key: length in [0, maxkeysize], but usually choose maxkeysize */ 2960 2960 vec->klen = maxkeysize; 2961 - if (prandom_u32() % 4 == 0) 2962 - vec->klen = prandom_u32() % (maxkeysize + 1); 2961 + if (prandom_u32_max(4) == 0) 2962 + vec->klen = prandom_u32_max(maxkeysize + 1); 2963 2963 generate_random_bytes((u8 *)vec->key, vec->klen); 2964 2964 vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen); 2965 2965
+2 -2
drivers/block/drbd/drbd_receiver.c
··· 781 781 782 782 timeo = connect_int * HZ; 783 783 /* 28.5% random jitter */ 784 - timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7; 784 + timeo += prandom_u32_max(2) ? timeo / 7 : -timeo / 7; 785 785 786 786 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo); 787 787 if (err <= 0) ··· 1004 1004 drbd_warn(connection, "Error receiving initial packet\n"); 1005 1005 sock_release(s); 1006 1006 randomize: 1007 - if (prandom_u32() & 1) 1007 + if (prandom_u32_max(2)) 1008 1008 goto retry; 1009 1009 } 1010 1010 }
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 2424 2424 /* Check whether the file_priv has already selected one ring. */ 2425 2425 if ((int)file_priv->bsd_engine < 0) 2426 2426 file_priv->bsd_engine = 2427 - get_random_int() % num_vcs_engines(dev_priv); 2427 + prandom_u32_max(num_vcs_engines(dev_priv)); 2428 2428 2429 2429 return file_priv->bsd_engine; 2430 2430 }
+1 -1
drivers/infiniband/core/cma.c
··· 3807 3807 3808 3808 inet_get_local_port_range(net, &low, &high); 3809 3809 remaining = (high - low) + 1; 3810 - rover = prandom_u32() % remaining + low; 3810 + rover = prandom_u32_max(remaining) + low; 3811 3811 retry: 3812 3812 if (last_used_port != rover) { 3813 3813 struct rdma_bind_list *bind_list;
+2 -2
drivers/infiniband/hw/cxgb4/id_table.c
··· 54 54 55 55 if (obj < alloc->max) { 56 56 if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) 57 - alloc->last += prandom_u32() % RANDOM_SKIP; 57 + alloc->last += prandom_u32_max(RANDOM_SKIP); 58 58 else 59 59 alloc->last = obj + 1; 60 60 if (alloc->last >= alloc->max) ··· 85 85 alloc->start = start; 86 86 alloc->flags = flags; 87 87 if (flags & C4IW_ID_TABLE_F_RANDOM) 88 - alloc->last = prandom_u32() % RANDOM_SKIP; 88 + alloc->last = prandom_u32_max(RANDOM_SKIP); 89 89 else 90 90 alloc->last = 0; 91 91 alloc->max = num;
+2 -3
drivers/infiniband/hw/hns/hns_roce_ah.c
··· 41 41 u16 sport; 42 42 43 43 if (!fl) 44 - sport = get_random_u32() % 45 - (IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 - 46 - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) + 44 + sport = prandom_u32_max(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 - 45 + IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) + 47 46 IB_ROCE_UDP_ENCAP_VALID_PORT_MIN; 48 47 else 49 48 sport = rdma_flow_label_to_udp_sport(fl);
+1 -2
drivers/infiniband/ulp/rtrs/rtrs-clt.c
··· 1517 1517 rtrs_clt_stop_and_destroy_conns(clt_path); 1518 1518 queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 1519 1519 msecs_to_jiffies(delay_ms + 1520 - prandom_u32() % 1521 - RTRS_RECONNECT_SEED)); 1520 + prandom_u32_max(RTRS_RECONNECT_SEED))); 1522 1521 } 1523 1522 1524 1523 static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
+1 -1
drivers/md/bcache/request.c
··· 401 401 } 402 402 403 403 if (bypass_torture_test(dc)) { 404 - if ((get_random_int() & 3) == 3) 404 + if (prandom_u32_max(4) == 3) 405 405 goto skip; 406 406 else 407 407 goto rescale;
+1 -1
drivers/media/test-drivers/vivid/vivid-touch-cap.c
··· 221 221 222 222 static inline int get_random_pressure(void) 223 223 { 224 - return get_random_int() % VIVID_PRESSURE_LIMIT; 224 + return prandom_u32_max(VIVID_PRESSURE_LIMIT); 225 225 } 226 226 227 227 static void vivid_tch_buf_set(struct v4l2_pix_format *f,
+2 -2
drivers/mmc/core/core.c
··· 97 97 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) 98 98 return; 99 99 100 - data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; 101 - data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; 100 + data->error = data_errors[prandom_u32_max(ARRAY_SIZE(data_errors))]; 101 + data->bytes_xfered = prandom_u32_max(data->bytes_xfered >> 9) << 9; 102 102 } 103 103 104 104 #else /* CONFIG_FAIL_MMC_REQUEST */
+1 -1
drivers/mmc/host/dw_mmc.c
··· 1858 1858 * Try to inject the error at random points during the data transfer. 1859 1859 */ 1860 1860 hrtimer_start(&host->fault_timer, 1861 - ms_to_ktime(prandom_u32() % 25), 1861 + ms_to_ktime(prandom_u32_max(25)), 1862 1862 HRTIMER_MODE_REL); 1863 1863 } 1864 1864
+2 -2
drivers/mtd/nand/raw/nandsim.c
··· 1405 1405 if (bitflips && prandom_u32() < (1 << 22)) { 1406 1406 int flips = 1; 1407 1407 if (bitflips > 1) 1408 - flips = (prandom_u32() % (int) bitflips) + 1; 1408 + flips = prandom_u32_max(bitflips) + 1; 1409 1409 while (flips--) { 1410 - int pos = prandom_u32() % (num * 8); 1410 + int pos = prandom_u32_max(num * 8); 1411 1411 ns->buf.byte[pos / 8] ^= (1 << (pos % 8)); 1412 1412 NS_WARN("read_page: flipping bit %d in page %d " 1413 1413 "reading from %d ecc: corrected=%u failed=%u\n",
+5 -5
drivers/mtd/tests/mtd_nandecctest.c
··· 47 47 static void single_bit_error_data(void *error_data, void *correct_data, 48 48 size_t size) 49 49 { 50 - unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE); 50 + unsigned int offset = prandom_u32_max(size * BITS_PER_BYTE); 51 51 52 52 memcpy(error_data, correct_data, size); 53 53 __change_bit_le(offset, error_data); ··· 58 58 { 59 59 unsigned int offset[2]; 60 60 61 - offset[0] = prandom_u32() % (size * BITS_PER_BYTE); 61 + offset[0] = prandom_u32_max(size * BITS_PER_BYTE); 62 62 do { 63 - offset[1] = prandom_u32() % (size * BITS_PER_BYTE); 63 + offset[1] = prandom_u32_max(size * BITS_PER_BYTE); 64 64 } while (offset[0] == offset[1]); 65 65 66 66 memcpy(error_data, correct_data, size); ··· 71 71 72 72 static unsigned int random_ecc_bit(size_t size) 73 73 { 74 - unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE); 74 + unsigned int offset = prandom_u32_max(3 * BITS_PER_BYTE); 75 75 76 76 if (size == 256) { 77 77 /* ··· 79 79 * and 17th bit) in ECC code for 256 byte data block 80 80 */ 81 81 while (offset == 16 || offset == 17) 82 - offset = prandom_u32() % (3 * BITS_PER_BYTE); 82 + offset = prandom_u32_max(3 * BITS_PER_BYTE); 83 83 } 84 84 85 85 return offset;
+4 -13
drivers/mtd/tests/stresstest.c
··· 45 45 unsigned int eb; 46 46 47 47 again: 48 - eb = prandom_u32(); 49 48 /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */ 50 - eb %= (ebcnt - 1); 49 + eb = prandom_u32_max(ebcnt - 1); 51 50 if (bbt[eb]) 52 51 goto again; 53 52 return eb; ··· 54 55 55 56 static int rand_offs(void) 56 57 { 57 - unsigned int offs; 58 - 59 - offs = prandom_u32(); 60 - offs %= bufsize; 61 - return offs; 58 + return prandom_u32_max(bufsize); 62 59 } 63 60 64 61 static int rand_len(int offs) 65 62 { 66 - unsigned int len; 67 - 68 - len = prandom_u32(); 69 - len %= (bufsize - offs); 70 - return len; 63 + return prandom_u32_max(bufsize - offs); 71 64 } 72 65 73 66 static int do_read(void) ··· 118 127 119 128 static int do_operation(void) 120 129 { 121 - if (prandom_u32() & 1) 130 + if (prandom_u32_max(2)) 122 131 return do_read(); 123 132 else 124 133 return do_write();
+1 -1
drivers/mtd/ubi/debug.c
··· 590 590 591 591 if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) { 592 592 range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min; 593 - ubi->dbg.power_cut_counter += prandom_u32() % range; 593 + ubi->dbg.power_cut_counter += prandom_u32_max(range); 594 594 } 595 595 return 0; 596 596 }
+3 -3
drivers/mtd/ubi/debug.h
··· 73 73 static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) 74 74 { 75 75 if (ubi->dbg.emulate_bitflips) 76 - return !(prandom_u32() % 200); 76 + return !prandom_u32_max(200); 77 77 return 0; 78 78 } 79 79 ··· 87 87 static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi) 88 88 { 89 89 if (ubi->dbg.emulate_io_failures) 90 - return !(prandom_u32() % 500); 90 + return !prandom_u32_max(500); 91 91 return 0; 92 92 } 93 93 ··· 101 101 static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi) 102 102 { 103 103 if (ubi->dbg.emulate_io_failures) 104 - return !(prandom_u32() % 400); 104 + return !prandom_u32_max(400); 105 105 return 0; 106 106 } 107 107
+1 -2
drivers/net/ethernet/broadcom/cnic.c
··· 4105 4105 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) 4106 4106 atomic_set(&cp->csk_tbl[i].ref_count, 0); 4107 4107 4108 - port_id = prandom_u32(); 4109 - port_id %= CNIC_LOCAL_PORT_RANGE; 4108 + port_id = prandom_u32_max(CNIC_LOCAL_PORT_RANGE); 4110 4109 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 4111 4110 CNIC_LOCAL_PORT_MIN, port_id)) { 4112 4111 cnic_cm_free_mem(dev);
+2 -2
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
··· 919 919 current_timeo = *timeo_p; 920 920 noblock = (*timeo_p ? false : true); 921 921 if (csk_mem_free(cdev, sk)) { 922 - current_timeo = (prandom_u32() % (HZ / 5)) + 2; 923 - vm_wait = (prandom_u32() % (HZ / 5)) + 2; 922 + current_timeo = prandom_u32_max(HZ / 5) + 2; 923 + vm_wait = prandom_u32_max(HZ / 5) + 2; 924 924 } 925 925 926 926 add_wait_queue(sk_sleep(sk), &wait);
+1 -1
drivers/net/hamradio/baycom_epp.c
··· 438 438 if ((--bc->hdlctx.slotcnt) > 0) 439 439 return 0; 440 440 bc->hdlctx.slotcnt = bc->ch_params.slottime; 441 - if ((prandom_u32() % 256) > bc->ch_params.ppersist) 441 + if (prandom_u32_max(256) > bc->ch_params.ppersist) 442 442 return 0; 443 443 } 444 444 }
+1 -1
drivers/net/hamradio/hdlcdrv.c
··· 377 377 if ((--s->hdlctx.slotcnt) > 0) 378 378 return; 379 379 s->hdlctx.slotcnt = s->ch_params.slottime; 380 - if ((prandom_u32() % 256) > s->ch_params.ppersist) 380 + if (prandom_u32_max(256) > s->ch_params.ppersist) 381 381 return; 382 382 start_tx(dev, s); 383 383 }
+1 -1
drivers/net/hamradio/yam.c
··· 626 626 yp->slotcnt = yp->slot / 10; 627 627 628 628 /* is random > persist ? */ 629 - if ((prandom_u32() % 256) > yp->pers) 629 + if (prandom_u32_max(256) > yp->pers) 630 630 return; 631 631 632 632 yam_start_tx(dev, yp);
+1 -1
drivers/net/phy/at803x.c
··· 1758 1758 1759 1759 static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev) 1760 1760 { 1761 - u16 seed_value = (prandom_u32() % QCA808X_MASTER_SLAVE_SEED_RANGE); 1761 + u16 seed_value = prandom_u32_max(QCA808X_MASTER_SLAVE_SEED_RANGE); 1762 1762 1763 1763 return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED, 1764 1764 QCA808X_MASTER_SLAVE_SEED_CFG,
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
··· 1128 1128 if (afx_hdl->is_listen && afx_hdl->my_listen_chan) 1129 1129 /* 100ms ~ 300ms */ 1130 1130 err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan, 1131 - 100 * (1 + prandom_u32() % 3)); 1131 + 100 * (1 + prandom_u32_max(3))); 1132 1132 else 1133 1133 err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan); 1134 1134
+1 -1
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
··· 1099 1099 iwl_mvm_mac_ap_iterator, &data); 1100 1100 1101 1101 if (data.beacon_device_ts) { 1102 - u32 rand = (prandom_u32() % (64 - 36)) + 36; 1102 + u32 rand = prandom_u32_max(64 - 36) + 36; 1103 1103 mvmvif->ap_beacon_time = data.beacon_device_ts + 1104 1104 ieee80211_tu_to_usec(data.beacon_int * rand / 1105 1105 100);
+2 -2
drivers/scsi/fcoe/fcoe_ctlr.c
··· 2233 2233 2234 2234 if (fip->probe_tries < FIP_VN_RLIM_COUNT) { 2235 2235 fip->probe_tries++; 2236 - wait = prandom_u32() % FIP_VN_PROBE_WAIT; 2236 + wait = prandom_u32_max(FIP_VN_PROBE_WAIT); 2237 2237 } else 2238 2238 wait = FIP_VN_RLIM_INT; 2239 2239 mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait)); ··· 3125 3125 fcoe_all_vn2vn, 0); 3126 3126 fip->port_ka_time = jiffies + 3127 3127 msecs_to_jiffies(FIP_VN_BEACON_INT + 3128 - (prandom_u32() % FIP_VN_BEACON_FUZZ)); 3128 + prandom_u32_max(FIP_VN_BEACON_FUZZ)); 3129 3129 } 3130 3130 if (time_before(fip->port_ka_time, next_time)) 3131 3131 next_time = fip->port_ka_time;
+1 -1
drivers/scsi/qedi/qedi_main.c
··· 618 618 sizeof(struct qedi_endpoint *)), GFP_KERNEL); 619 619 if (!qedi->ep_tbl) 620 620 return -ENOMEM; 621 - port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE; 621 + port_id = prandom_u32_max(QEDI_LOCAL_PORT_RANGE); 622 622 if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE, 623 623 QEDI_LOCAL_PORT_MIN, port_id)) { 624 624 qedi_cm_free_mem(qedi);
+1 -1
fs/ceph/inode.c
··· 362 362 if (nsplits != ci->i_fragtree_nsplits) { 363 363 update = true; 364 364 } else if (nsplits) { 365 - i = prandom_u32() % nsplits; 365 + i = prandom_u32_max(nsplits); 366 366 id = le32_to_cpu(fragtree->splits[i].frag); 367 367 if (!__ceph_find_frag(ci, id)) 368 368 update = true;
+1 -1
fs/ceph/mdsmap.c
··· 29 29 return -1; 30 30 31 31 /* pick */ 32 - n = prandom_u32() % n; 32 + n = prandom_u32_max(n); 33 33 for (j = 0, i = 0; i < m->possible_max_rank; i++) { 34 34 if (CEPH_MDS_IS_READY(i, ignore_laggy)) 35 35 j++;
+3 -4
fs/ext4/super.c
··· 3782 3782 } 3783 3783 if (!progress) { 3784 3784 elr->lr_next_sched = jiffies + 3785 - (prandom_u32() 3786 - % (EXT4_DEF_LI_MAX_START_DELAY * HZ)); 3785 + prandom_u32_max(EXT4_DEF_LI_MAX_START_DELAY * HZ); 3787 3786 } 3788 3787 if (time_before(elr->lr_next_sched, next_wakeup)) 3789 3788 next_wakeup = elr->lr_next_sched; ··· 3929 3930 * spread the inode table initialization requests 3930 3931 * better. 3931 3932 */ 3932 - elr->lr_next_sched = jiffies + (prandom_u32() % 3933 - (EXT4_DEF_LI_MAX_START_DELAY * HZ)); 3933 + elr->lr_next_sched = jiffies + prandom_u32_max( 3934 + EXT4_DEF_LI_MAX_START_DELAY * HZ); 3934 3935 return elr; 3935 3936 } 3936 3937
+1 -1
fs/f2fs/gc.c
··· 282 282 283 283 /* let's select beginning hot/small space first in no_heap mode*/ 284 284 if (f2fs_need_rand_seg(sbi)) 285 - p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec); 285 + p->offset = prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec); 286 286 else if (test_opt(sbi, NOHEAP) && 287 287 (type == CURSEG_HOT_DATA || IS_NODESEG(type))) 288 288 p->offset = 0;
+4 -4
fs/f2fs/segment.c
··· 2534 2534 2535 2535 sanity_check_seg_type(sbi, seg_type); 2536 2536 if (f2fs_need_rand_seg(sbi)) 2537 - return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec); 2537 + return prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec); 2538 2538 2539 2539 /* if segs_per_sec is large than 1, we need to keep original policy. */ 2540 2540 if (__is_large_section(sbi)) ··· 2588 2588 curseg->alloc_type = LFS; 2589 2589 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) 2590 2590 curseg->fragment_remained_chunk = 2591 - prandom_u32() % sbi->max_fragment_chunk + 1; 2591 + prandom_u32_max(sbi->max_fragment_chunk) + 1; 2592 2592 } 2593 2593 2594 2594 static int __next_free_blkoff(struct f2fs_sb_info *sbi, ··· 2625 2625 /* To allocate block chunks in different sizes, use random number */ 2626 2626 if (--seg->fragment_remained_chunk <= 0) { 2627 2627 seg->fragment_remained_chunk = 2628 - prandom_u32() % sbi->max_fragment_chunk + 1; 2628 + prandom_u32_max(sbi->max_fragment_chunk) + 1; 2629 2629 seg->next_blkoff += 2630 - prandom_u32() % sbi->max_fragment_hole + 1; 2630 + prandom_u32_max(sbi->max_fragment_hole) + 1; 2631 2631 } 2632 2632 } 2633 2633 }
+4 -4
fs/ubifs/debug.c
··· 2467 2467 2468 2468 static inline int chance(unsigned int n, unsigned int out_of) 2469 2469 { 2470 - return !!((prandom_u32() % out_of) + 1 <= n); 2470 + return !!(prandom_u32_max(out_of) + 1 <= n); 2471 2471 2472 2472 } 2473 2473 ··· 2485 2485 if (chance(1, 2)) { 2486 2486 d->pc_delay = 1; 2487 2487 /* Fail within 1 minute */ 2488 - delay = prandom_u32() % 60000; 2488 + delay = prandom_u32_max(60000); 2489 2489 d->pc_timeout = jiffies; 2490 2490 d->pc_timeout += msecs_to_jiffies(delay); 2491 2491 ubifs_warn(c, "failing after %lums", delay); 2492 2492 } else { 2493 2493 d->pc_delay = 2; 2494 - delay = prandom_u32() % 10000; 2494 + delay = prandom_u32_max(10000); 2495 2495 /* Fail within 10000 operations */ 2496 2496 d->pc_cnt_max = delay; 2497 2497 ubifs_warn(c, "failing after %lu calls", delay); ··· 2571 2571 unsigned int from, to, ffs = chance(1, 2); 2572 2572 unsigned char *p = (void *)buf; 2573 2573 2574 - from = prandom_u32() % len; 2574 + from = prandom_u32_max(len); 2575 2575 /* Corruption span max to end of write unit */ 2576 2576 to = min(len, ALIGN(from + 1, c->max_write_size)); 2577 2577
+7 -7
fs/ubifs/lpt_commit.c
··· 1970 1970 1971 1971 if (!dbg_is_chk_gen(c)) 1972 1972 return 0; 1973 - if (prandom_u32() & 3) 1973 + if (prandom_u32_max(4)) 1974 1974 return 0; 1975 1975 1976 1976 for (i = 0; i < c->lsave_cnt; i++) 1977 1977 c->lsave[i] = c->main_first; 1978 1978 1979 1979 list_for_each_entry(lprops, &c->empty_list, list) 1980 - c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; 1980 + c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum; 1981 1981 list_for_each_entry(lprops, &c->freeable_list, list) 1982 - c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; 1982 + c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum; 1983 1983 list_for_each_entry(lprops, &c->frdi_idx_list, list) 1984 - c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; 1984 + c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum; 1985 1985 1986 1986 heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; 1987 1987 for (i = 0; i < heap->cnt; i++) 1988 - c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; 1988 + c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum; 1989 1989 heap = &c->lpt_heap[LPROPS_DIRTY - 1]; 1990 1990 for (i = 0; i < heap->cnt; i++) 1991 - c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; 1991 + c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum; 1992 1992 heap = &c->lpt_heap[LPROPS_FREE - 1]; 1993 1993 for (i = 0; i < heap->cnt; i++) 1994 - c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; 1994 + c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum; 1995 1995 1996 1996 return 1; 1997 1997 }
+1 -1
fs/ubifs/tnc_commit.c
··· 700 700 c->ilebs[c->ileb_cnt++] = lnum; 701 701 dbg_cmt("LEB %d", lnum); 702 702 } 703 - if (dbg_is_chk_index(c) && !(prandom_u32() & 7)) 703 + if (dbg_is_chk_index(c) && !prandom_u32_max(8)) 704 704 return -ENOSPC; 705 705 return 0; 706 706 }
+1 -1
fs/xfs/libxfs/xfs_alloc.c
··· 1520 1520 1521 1521 #ifdef DEBUG 1522 1522 /* Randomly don't execute the first algorithm. */ 1523 - if (prandom_u32() & 1) 1523 + if (prandom_u32_max(2)) 1524 1524 return 0; 1525 1525 #endif 1526 1526
+1 -1
fs/xfs/libxfs/xfs_ialloc.c
··· 636 636 /* randomly do sparse inode allocations */ 637 637 if (xfs_has_sparseinodes(tp->t_mountp) && 638 638 igeo->ialloc_min_blks < igeo->ialloc_blks) 639 - do_sparse = prandom_u32() & 1; 639 + do_sparse = prandom_u32_max(2); 640 640 #endif 641 641 642 642 /*
+1 -1
fs/xfs/xfs_error.c
··· 274 274 275 275 ASSERT(error_tag < XFS_ERRTAG_MAX); 276 276 randfactor = mp->m_errortag[error_tag]; 277 - if (!randfactor || prandom_u32() % randfactor) 277 + if (!randfactor || prandom_u32_max(randfactor)) 278 278 return false; 279 279 280 280 xfs_warn_ratelimited(mp,
+1 -1
include/linux/nodemask.h
··· 516 516 bit = first_node(*maskp); 517 517 break; 518 518 default: 519 - bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_int() % w); 519 + bit = find_nth_bit(maskp->bits, MAX_NUMNODES, prandom_u32_max(w)); 520 520 break; 521 521 } 522 522 return bit;
+2 -2
kernel/bpf/core.c
··· 1032 1032 hdr->size = size; 1033 1033 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 1034 1034 PAGE_SIZE - sizeof(*hdr)); 1035 - start = (get_random_int() % hole) & ~(alignment - 1); 1035 + start = prandom_u32_max(hole) & ~(alignment - 1); 1036 1036 1037 1037 /* Leave a random number of instructions before BPF code. */ 1038 1038 *image_ptr = &hdr->image[start]; ··· 1094 1094 1095 1095 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)), 1096 1096 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header)); 1097 - start = (get_random_int() % hole) & ~(alignment - 1); 1097 + start = prandom_u32_max(hole) & ~(alignment - 1); 1098 1098 1099 1099 *image_ptr = &ro_header->image[start]; 1100 1100 *rw_image = &(*rw_header)->image[start];
+2 -2
kernel/locking/test-ww_mutex.c
··· 399 399 order[n] = n; 400 400 401 401 for (n = count - 1; n > 1; n--) { 402 - r = get_random_int() % (n + 1); 402 + r = prandom_u32_max(n + 1); 403 403 if (r != n) { 404 404 tmp = order[n]; 405 405 order[n] = order[r]; ··· 538 538 { 539 539 struct stress *stress = container_of(work, typeof(*stress), work); 540 540 const int nlocks = stress->nlocks; 541 - struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks); 541 + struct ww_mutex *lock = stress->locks + prandom_u32_max(nlocks); 542 542 int err; 543 543 544 544 do {
+1 -1
kernel/time/clocksource.c
··· 310 310 * CPUs that are currently online. 311 311 */ 312 312 for (i = 1; i < n; i++) { 313 - cpu = prandom_u32() % nr_cpu_ids; 313 + cpu = prandom_u32_max(nr_cpu_ids); 314 314 cpu = cpumask_next(cpu - 1, cpu_online_mask); 315 315 if (cpu >= nr_cpu_ids) 316 316 cpu = cpumask_first(cpu_online_mask);
+1 -1
lib/fault-inject.c
··· 139 139 return false; 140 140 } 141 141 142 - if (attr->probability <= prandom_u32() % 100) 142 + if (attr->probability <= prandom_u32_max(100)) 143 143 return false; 144 144 145 145 if (!fail_stacktrace(attr))
+2 -2
lib/find_bit_benchmark.c
··· 174 174 bitmap_zero(bitmap2, BITMAP_LEN); 175 175 176 176 while (nbits--) { 177 - __set_bit(prandom_u32() % BITMAP_LEN, bitmap); 178 - __set_bit(prandom_u32() % BITMAP_LEN, bitmap2); 177 + __set_bit(prandom_u32_max(BITMAP_LEN), bitmap); 178 + __set_bit(prandom_u32_max(BITMAP_LEN), bitmap2); 179 179 } 180 180 181 181 test_find_next_bit(bitmap, BITMAP_LEN);
+1 -1
lib/kobject.c
··· 694 694 { 695 695 struct kobject *kobj = container_of(kref, struct kobject, kref); 696 696 #ifdef CONFIG_DEBUG_KOBJECT_RELEASE 697 - unsigned long delay = HZ + HZ * (get_random_int() & 0x3); 697 + unsigned long delay = HZ + HZ * prandom_u32_max(4); 698 698 pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n", 699 699 kobject_name(kobj), kobj, __func__, kobj->parent, delay); 700 700 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
+3 -3
lib/reed_solomon/test_rslib.c
··· 183 183 184 184 do { 185 185 /* Must not choose the same location twice */ 186 - errloc = prandom_u32() % len; 186 + errloc = prandom_u32_max(len); 187 187 } while (errlocs[errloc] != 0); 188 188 189 189 errlocs[errloc] = 1; ··· 194 194 for (i = 0; i < eras; i++) { 195 195 do { 196 196 /* Must not choose the same location twice */ 197 - errloc = prandom_u32() % len; 197 + errloc = prandom_u32_max(len); 198 198 } while (errlocs[errloc] != 0); 199 199 200 200 derrlocs[i] = errloc; 201 201 202 - if (ewsc && (prandom_u32() & 1)) { 202 + if (ewsc && prandom_u32_max(2)) { 203 203 /* Erasure with the symbol intact */ 204 204 errlocs[errloc] = 2; 205 205 } else {
+1 -1
lib/sbitmap.c
··· 33 33 34 34 hint = this_cpu_read(*sb->alloc_hint); 35 35 if (unlikely(hint >= depth)) { 36 - hint = depth ? prandom_u32() % depth : 0; 36 + hint = depth ? prandom_u32_max(depth) : 0; 37 37 this_cpu_write(*sb->alloc_hint, hint); 38 38 } 39 39
+1 -1
lib/test-string_helpers.c
··· 587 587 for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++) 588 588 test_string_unescape("unescape", i, false); 589 589 test_string_unescape("unescape inplace", 590 - get_random_int() % (UNESCAPE_ANY + 1), true); 590 + prandom_u32_max(UNESCAPE_ANY + 1), true); 591 591 592 592 /* Without dictionary */ 593 593 for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
+5 -5
lib/test_hexdump.c
··· 149 149 static void __init test_hexdump_set(int rowsize, bool ascii) 150 150 { 151 151 size_t d = min_t(size_t, sizeof(data_b), rowsize); 152 - size_t len = get_random_int() % d + 1; 152 + size_t len = prandom_u32_max(d) + 1; 153 153 154 154 test_hexdump(len, rowsize, 4, ascii); 155 155 test_hexdump(len, rowsize, 2, ascii); ··· 208 208 static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) 209 209 { 210 210 unsigned int i = 0; 211 - int rs = (get_random_int() % 2 + 1) * 16; 211 + int rs = (prandom_u32_max(2) + 1) * 16; 212 212 213 213 do { 214 214 int gs = 1 << i; 215 - size_t len = get_random_int() % rs + gs; 215 + size_t len = prandom_u32_max(rs) + gs; 216 216 217 217 test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii); 218 218 } while (i++ < 3); ··· 223 223 unsigned int i; 224 224 int rowsize; 225 225 226 - rowsize = (get_random_int() % 2 + 1) * 16; 226 + rowsize = (prandom_u32_max(2) + 1) * 16; 227 227 for (i = 0; i < 16; i++) 228 228 test_hexdump_set(rowsize, false); 229 229 230 - rowsize = (get_random_int() % 2 + 1) * 16; 230 + rowsize = (prandom_u32_max(2) + 1) * 16; 231 231 for (i = 0; i < 16; i++) 232 232 test_hexdump_set(rowsize, true); 233 233
+1 -1
lib/test_list_sort.c
··· 71 71 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el); 72 72 73 73 /* force some equivalencies */ 74 - el->value = prandom_u32() % (TEST_LIST_LEN / 3); 74 + el->value = prandom_u32_max(TEST_LIST_LEN / 3); 75 75 el->serial = i; 76 76 el->poison1 = TEST_POISON1; 77 77 el->poison2 = TEST_POISON2;
+3 -3
mm/kasan/kasan_test.c
··· 1292 1292 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1293 1293 1294 1294 for (i = 0; i < 256; i++) { 1295 - size = (get_random_int() % 1024) + 1; 1295 + size = prandom_u32_max(1024) + 1; 1296 1296 ptr = kmalloc(size, GFP_KERNEL); 1297 1297 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1298 1298 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); ··· 1301 1301 } 1302 1302 1303 1303 for (i = 0; i < 256; i++) { 1304 - order = (get_random_int() % 4) + 1; 1304 + order = prandom_u32_max(4) + 1; 1305 1305 pages = alloc_pages(GFP_KERNEL, order); 1306 1306 ptr = page_address(pages); 1307 1307 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ··· 1314 1314 return; 1315 1315 1316 1316 for (i = 0; i < 256; i++) { 1317 - size = (get_random_int() % 1024) + 1; 1317 + size = prandom_u32_max(1024) + 1; 1318 1318 ptr = vmalloc(size); 1319 1319 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1320 1320 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
+1 -1
mm/slub.c
··· 1881 1881 return false; 1882 1882 1883 1883 freelist_count = oo_objects(s->oo); 1884 - pos = get_random_int() % freelist_count; 1884 + pos = prandom_u32_max(freelist_count); 1885 1885 1886 1886 page_limit = slab->objects * s->size; 1887 1887 start = fixup_red_left(s, slab_address(slab));
+1 -1
net/802/garp.c
··· 407 407 { 408 408 unsigned long delay; 409 409 410 - delay = (u64)msecs_to_jiffies(garp_join_time) * prandom_u32() >> 32; 410 + delay = prandom_u32_max(msecs_to_jiffies(garp_join_time)); 411 411 mod_timer(&app->join_timer, jiffies + delay); 412 412 } 413 413
+1 -1
net/802/mrp.c
··· 592 592 { 593 593 unsigned long delay; 594 594 595 - delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32; 595 + delay = prandom_u32_max(msecs_to_jiffies(mrp_join_time)); 596 596 mod_timer(&app->join_timer, jiffies + delay); 597 597 } 598 598
+1 -1
net/ceph/mon_client.c
··· 222 222 max--; 223 223 } 224 224 225 - n = prandom_u32() % max; 225 + n = prandom_u32_max(max); 226 226 if (o >= 0 && n >= o) 227 227 n++; 228 228
+1 -1
net/ceph/osd_client.c
··· 1479 1479 1480 1480 static int pick_random_replica(const struct ceph_osds *acting) 1481 1481 { 1482 - int i = prandom_u32() % acting->size; 1482 + int i = prandom_u32_max(acting->size); 1483 1483 1484 1484 dout("%s picked osd%d, primary osd%d\n", __func__, 1485 1485 acting->osds[i], acting->primary);
+1 -1
net/core/neighbour.c
··· 111 111 112 112 unsigned long neigh_rand_reach_time(unsigned long base) 113 113 { 114 - return base ? (prandom_u32() % base) + (base >> 1) : 0; 114 + return base ? prandom_u32_max(base) + (base >> 1) : 0; 115 115 } 116 116 EXPORT_SYMBOL(neigh_rand_reach_time); 117 117
+21 -22
net/core/pktgen.c
··· 2324 2324 pkt_dev->curfl = 0; /*reset */ 2325 2325 } 2326 2326 } else { 2327 - flow = prandom_u32() % pkt_dev->cflows; 2327 + flow = prandom_u32_max(pkt_dev->cflows); 2328 2328 pkt_dev->curfl = flow; 2329 2329 2330 2330 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { ··· 2380 2380 else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { 2381 2381 __u16 t; 2382 2382 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2383 - t = prandom_u32() % 2384 - (pkt_dev->queue_map_max - 2385 - pkt_dev->queue_map_min + 1) 2386 - + pkt_dev->queue_map_min; 2383 + t = prandom_u32_max(pkt_dev->queue_map_max - 2384 + pkt_dev->queue_map_min + 1) + 2385 + pkt_dev->queue_map_min; 2387 2386 } else { 2388 2387 t = pkt_dev->cur_queue_map + 1; 2389 2388 if (t > pkt_dev->queue_map_max) ··· 2411 2412 __u32 tmp; 2412 2413 2413 2414 if (pkt_dev->flags & F_MACSRC_RND) 2414 - mc = prandom_u32() % pkt_dev->src_mac_count; 2415 + mc = prandom_u32_max(pkt_dev->src_mac_count); 2415 2416 else { 2416 2417 mc = pkt_dev->cur_src_mac_offset++; 2417 2418 if (pkt_dev->cur_src_mac_offset >= ··· 2437 2438 __u32 tmp; 2438 2439 2439 2440 if (pkt_dev->flags & F_MACDST_RND) 2440 - mc = prandom_u32() % pkt_dev->dst_mac_count; 2441 + mc = prandom_u32_max(pkt_dev->dst_mac_count); 2441 2442 2442 2443 else { 2443 2444 mc = pkt_dev->cur_dst_mac_offset++; ··· 2469 2470 } 2470 2471 2471 2472 if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) { 2472 - pkt_dev->vlan_id = prandom_u32() & (4096 - 1); 2473 + pkt_dev->vlan_id = prandom_u32_max(4096); 2473 2474 } 2474 2475 2475 2476 if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) { 2476 - pkt_dev->svlan_id = prandom_u32() & (4096 - 1); 2477 + pkt_dev->svlan_id = prandom_u32_max(4096); 2477 2478 } 2478 2479 2479 2480 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 2480 2481 if (pkt_dev->flags & F_UDPSRC_RND) 2481 - pkt_dev->cur_udp_src = prandom_u32() % 2482 - (pkt_dev->udp_src_max - pkt_dev->udp_src_min) 2483 - + pkt_dev->udp_src_min; 2482 + pkt_dev->cur_udp_src = prandom_u32_max( 2483 + pkt_dev->udp_src_max - pkt_dev->udp_src_min) + 2484 + pkt_dev->udp_src_min; 2484 2485 2485 2486 else { 2486 2487 pkt_dev->cur_udp_src++; ··· 2491 2492 2492 2493 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { 2493 2494 if (pkt_dev->flags & F_UDPDST_RND) { 2494 - pkt_dev->cur_udp_dst = prandom_u32() % 2495 - (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) 2496 - + pkt_dev->udp_dst_min; 2495 + pkt_dev->cur_udp_dst = prandom_u32_max( 2496 + pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) + 2497 + pkt_dev->udp_dst_min; 2497 2498 } else { 2498 2499 pkt_dev->cur_udp_dst++; 2499 2500 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) ··· 2508 2509 if (imn < imx) { 2509 2510 __u32 t; 2510 2511 if (pkt_dev->flags & F_IPSRC_RND) 2511 - t = prandom_u32() % (imx - imn) + imn; 2512 + t = prandom_u32_max(imx - imn) + imn; 2512 2513 else { 2513 2514 t = ntohl(pkt_dev->cur_saddr); 2514 2515 t++; ··· 2530 2531 if (pkt_dev->flags & F_IPDST_RND) { 2531 2532 2532 2533 do { 2533 - t = prandom_u32() % 2534 - (imx - imn) + imn; 2534 + t = prandom_u32_max(imx - imn) + 2535 + imn; 2535 2536 s = htonl(t); 2536 2537 } while (ipv4_is_loopback(s) || 2537 2538 ipv4_is_multicast(s) || ··· 2578 2579 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { 2579 2580 __u32 t; 2580 2581 if (pkt_dev->flags & F_TXSIZE_RND) { 2581 - t = prandom_u32() % 2582 - (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size) 2583 - + pkt_dev->min_pkt_size; 2582 + t = prandom_u32_max(pkt_dev->max_pkt_size - 2583 + pkt_dev->min_pkt_size) + 2584 + pkt_dev->min_pkt_size; 2584 2585 } else { 2585 2586 t = pkt_dev->cur_pkt_size + 1; 2586 2587 if (t > pkt_dev->max_pkt_size) ··· 2589 2590 pkt_dev->cur_pkt_size = t; 2590 2591 } else if (pkt_dev->n_imix_entries > 0) { 2591 2592 struct imix_pkt *entry; 2592 - __u32 t = prandom_u32() % IMIX_PRECISION; 2593 + __u32 t = prandom_u32_max(IMIX_PRECISION); 2593 2594 __u8 entry_index = pkt_dev->imix_distribution[t]; 2594 2595 2595 2596 entry = &pkt_dev->imix_entries[entry_index];
+1 -1
net/core/stream.c
··· 123 123 DEFINE_WAIT_FUNC(wait, woken_wake_function); 124 124 125 125 if (sk_stream_memory_free(sk)) 126 - current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; 126 + current_timeo = vm_wait = prandom_u32_max(HZ / 5) + 2; 127 127 128 128 add_wait_queue(sk_sleep(sk), &wait); 129 129
+3 -3
net/ipv4/igmp.c
··· 213 213 /* It must be called with locked im->lock */ 214 214 static void igmp_start_timer(struct ip_mc_list *im, int max_delay) 215 215 { 216 - int tv = prandom_u32() % max_delay; 216 + int tv = prandom_u32_max(max_delay); 217 217 218 218 im->tm_running = 1; 219 219 if (!mod_timer(&im->timer, jiffies+tv+2)) ··· 222 222 223 223 static void igmp_gq_start_timer(struct in_device *in_dev) 224 224 { 225 - int tv = prandom_u32() % in_dev->mr_maxdelay; 225 + int tv = prandom_u32_max(in_dev->mr_maxdelay); 226 226 unsigned long exp = jiffies + tv + 2; 227 227 228 228 if (in_dev->mr_gq_running && ··· 236 236 237 237 static void igmp_ifc_start_timer(struct in_device *in_dev, int delay) 238 238 { 239 - int tv = prandom_u32() % delay; 239 + int tv = prandom_u32_max(delay); 240 240 241 241 if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2)) 242 242 in_dev_hold(in_dev);
+1 -1
net/ipv4/inet_connection_sock.c
··· 314 314 if (likely(remaining > 1)) 315 315 remaining &= ~1U; 316 316 317 - offset = prandom_u32() % remaining; 317 + offset = prandom_u32_max(remaining); 318 318 /* __inet_hash_connect() favors ports having @low parity 319 319 * We do the opposite to not pollute connect() users. 320 320 */
+1 -1
net/ipv4/inet_hashtables.c
··· 1037 1037 * on low contention the randomness is maximal and on high contention 1038 1038 * it may be inexistent. 1039 1039 */ 1040 - i = max_t(int, i, (prandom_u32() & 7) * 2); 1040 + i = max_t(int, i, prandom_u32_max(8) * 2); 1041 1041 WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); 1042 1042 1043 1043 /* Head lock still held and bh's disabled */
+4 -4
net/ipv6/addrconf.c
··· 104 104 static inline s32 rfc3315_s14_backoff_init(s32 irt) 105 105 { 106 106 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */ 107 - u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt; 107 + u64 tmp = (900000 + prandom_u32_max(200001)) * (u64)irt; 108 108 do_div(tmp, 1000000); 109 109 return (s32)tmp; 110 110 } ··· 112 112 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt) 113 113 { 114 114 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */ 115 - u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt; 115 + u64 tmp = (1900000 + prandom_u32_max(200001)) * (u64)rt; 116 116 do_div(tmp, 1000000); 117 117 if ((s32)tmp > mrt) { 118 118 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */ 119 - tmp = (900000 + prandom_u32() % 200001) * (u64)mrt; 119 + tmp = (900000 + prandom_u32_max(200001)) * (u64)mrt; 120 120 do_div(tmp, 1000000); 121 121 } 122 122 return (s32)tmp; ··· 3967 3967 if (ifp->flags & IFA_F_OPTIMISTIC) 3968 3968 rand_num = 0; 3969 3969 else 3970 - rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1); 3970 + rand_num = prandom_u32_max(idev->cnf.rtr_solicit_delay ?: 1); 3971 3971 3972 3972 nonce = 0; 3973 3973 if (idev->cnf.enhanced_dad ||
+5 -5
net/ipv6/mcast.c
··· 1050 1050 /* called with mc_lock */ 1051 1051 static void mld_gq_start_work(struct inet6_dev *idev) 1052 1052 { 1053 - unsigned long tv = prandom_u32() % idev->mc_maxdelay; 1053 + unsigned long tv = prandom_u32_max(idev->mc_maxdelay); 1054 1054 1055 1055 idev->mc_gq_running = 1; 1056 1056 if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2)) ··· 1068 1068 /* called with mc_lock */ 1069 1069 static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay) 1070 1070 { 1071 - unsigned long tv = prandom_u32() % delay; 1071 + unsigned long tv = prandom_u32_max(delay); 1072 1072 1073 1073 if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2)) 1074 1074 in6_dev_hold(idev); ··· 1085 1085 /* called with mc_lock */ 1086 1086 static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay) 1087 1087 { 1088 - unsigned long tv = prandom_u32() % delay; 1088 + unsigned long tv = prandom_u32_max(delay); 1089 1089 1090 1090 if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2)) 1091 1091 in6_dev_hold(idev); ··· 1130 1130 } 1131 1131 1132 1132 if (delay >= resptime) 1133 - delay = prandom_u32() % resptime; 1133 + delay = prandom_u32_max(resptime); 1134 1134 1135 1135 if (!mod_delayed_work(mld_wq, &ma->mca_work, delay)) 1136 1136 refcount_inc(&ma->mca_refcnt); ··· 2574 2574 2575 2575 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); 2576 2576 2577 - delay = prandom_u32() % unsolicited_report_interval(ma->idev); 2577 + delay = prandom_u32_max(unsolicited_report_interval(ma->idev)); 2578 2578 2579 2579 if (cancel_delayed_work(&ma->mca_work)) { 2580 2580 refcount_dec(&ma->mca_refcnt);
+2 -2
net/netfilter/ipvs/ip_vs_twos.c
··· 71 71 * from 0 to total_weight 72 72 */ 73 73 total_weight += 1; 74 - rweight1 = prandom_u32() % total_weight; 75 - rweight2 = prandom_u32() % total_weight; 74 + rweight1 = prandom_u32_max(total_weight); 75 + rweight2 = prandom_u32_max(total_weight); 76 76 77 77 /* Pick two weighted servers */ 78 78 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
+1 -1
net/packet/af_packet.c
··· 1350 1350 if (READ_ONCE(history[i]) == rxhash) 1351 1351 count++; 1352 1352 1353 - victim = prandom_u32() % ROLLOVER_HLEN; 1353 + victim = prandom_u32_max(ROLLOVER_HLEN); 1354 1354 1355 1355 /* Avoid dirtying the cache line if possible */ 1356 1356 if (READ_ONCE(history[victim]) != rxhash)
+1 -1
net/sched/act_gact.c
··· 25 25 static int gact_net_rand(struct tcf_gact *gact) 26 26 { 27 27 smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */ 28 - if (prandom_u32() % gact->tcfg_pval) 28 + if (prandom_u32_max(gact->tcfg_pval)) 29 29 return gact->tcf_action; 30 30 return gact->tcfg_paction; 31 31 }
+1 -1
net/sched/act_sample.c
··· 168 168 psample_group = rcu_dereference_bh(s->psample_group); 169 169 170 170 /* randomly sample packets according to rate */ 171 - if (psample_group && (prandom_u32() % s->rate == 0)) { 171 + if (psample_group && (prandom_u32_max(s->rate) == 0)) { 172 172 if (!skb_at_tc_ingress(skb)) { 173 173 md.in_ifindex = skb->skb_iif; 174 174 md.out_ifindex = skb->dev->ifindex;
+2 -2
net/sched/sch_netem.c
··· 513 513 goto finish_segs; 514 514 } 515 515 516 - skb->data[prandom_u32() % skb_headlen(skb)] ^= 517 - 1<<(prandom_u32() % 8); 516 + skb->data[prandom_u32_max(skb_headlen(skb))] ^= 517 + 1<<prandom_u32_max(8); 518 518 } 519 519 520 520 if (unlikely(sch->q.qlen >= sch->limit)) {
+1 -1
net/sctp/socket.c
··· 8319 8319 8320 8320 inet_get_local_port_range(net, &low, &high); 8321 8321 remaining = (high - low) + 1; 8322 - rover = prandom_u32() % remaining + low; 8322 + rover = prandom_u32_max(remaining) + low; 8323 8323 8324 8324 do { 8325 8325 rover++;
+1 -1
net/sunrpc/cache.c
··· 677 677 678 678 /* Consider removing either the first or the last */ 679 679 if (cache_defer_cnt > DFR_MAX) { 680 - if (prandom_u32() & 1) 680 + if (prandom_u32_max(2)) 681 681 discard = list_entry(cache_defer_list.next, 682 682 struct cache_deferred_req, recent); 683 683 else
+1 -1
net/sunrpc/xprtsock.c
··· 1619 1619 if (max < min) 1620 1620 return -EADDRINUSE; 1621 1621 range = max - min + 1; 1622 - rand = (unsigned short) prandom_u32() % range; 1622 + rand = prandom_u32_max(range); 1623 1623 return rand + min; 1624 1624 } 1625 1625
+1 -1
net/tipc/socket.c
··· 3010 3010 struct net *net = sock_net(sk); 3011 3011 struct tipc_net *tn = net_generic(net, tipc_net_id); 3012 3012 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1; 3013 - u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT; 3013 + u32 portid = prandom_u32_max(remaining) + TIPC_MIN_PORT; 3014 3014 3015 3015 while (remaining--) { 3016 3016 portid++;
+1 -1
net/xfrm/xfrm_state.c
··· 2072 2072 } else { 2073 2073 u32 spi = 0; 2074 2074 for (h = 0; h < high-low+1; h++) { 2075 - spi = low + prandom_u32()%(high-low+1); 2075 + spi = low + prandom_u32_max(high - low + 1); 2076 2076 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); 2077 2077 if (x0 == NULL) { 2078 2078 newspi = htonl(spi);