Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: use reciprocal_scale() helper

Replace open codings of (((u64) <x> * <y>) >> 32) with reciprocal_scale().

Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Daniel Borkmann and committed by
David S. Miller
8fc54f68 690e36e7

+24 -22
+1 -2
net/core/dev.c
··· 3124 3124 } 3125 3125 3126 3126 if (map) { 3127 - tcpu = map->cpus[((u64) hash * map->len) >> 32]; 3128 - 3127 + tcpu = map->cpus[reciprocal_scale(hash, map->len)]; 3129 3128 if (cpu_online(tcpu)) { 3130 3129 cpu = tcpu; 3131 3130 goto done;
+3 -4
net/core/flow_dissector.c
··· 298 298 qcount = dev->tc_to_txq[tc].count; 299 299 } 300 300 301 - return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset; 301 + return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 302 302 } 303 303 EXPORT_SYMBOL(__skb_tx_hash); 304 304 ··· 371 371 if (map->len == 1) 372 372 queue_index = map->queues[0]; 373 373 else 374 - queue_index = map->queues[ 375 - ((u64)skb_get_hash(skb) * map->len) >> 32]; 376 - 374 + queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), 375 + map->len)]; 377 376 if (unlikely(queue_index >= dev->real_num_tx_queues)) 378 377 queue_index = -1; 379 378 }
+1 -1
net/ipv4/inet_hashtables.c
··· 229 229 } 230 230 } else if (score == hiscore && reuseport) { 231 231 matches++; 232 - if (((u64)phash * matches) >> 32 == 0) 232 + if (reciprocal_scale(phash, matches) == 0) 233 233 result = sk; 234 234 phash = next_pseudo_random32(phash); 235 235 }
+1 -1
net/ipv4/netfilter/ipt_CLUSTERIP.c
··· 285 285 } 286 286 287 287 /* node numbers are 1..n, not 0..n */ 288 - return (((u64)hashval * config->num_total_nodes) >> 32) + 1; 288 + return reciprocal_scale(hashval, config->num_total_nodes) + 1; 289 289 } 290 290 291 291 static inline int
+3 -3
net/ipv4/udp.c
··· 224 224 remaining = (high - low) + 1; 225 225 226 226 rand = prandom_u32(); 227 - first = (((u64)rand * remaining) >> 32) + low; 227 + first = reciprocal_scale(rand, remaining) + low; 228 228 /* 229 229 * force rand to be an odd multiple of UDP_HTABLE_SIZE 230 230 */ ··· 448 448 } 449 449 } else if (score == badness && reuseport) { 450 450 matches++; 451 - if (((u64)hash * matches) >> 32 == 0) 451 + if (reciprocal_scale(hash, matches) == 0) 452 452 result = sk; 453 453 hash = next_pseudo_random32(hash); 454 454 } ··· 529 529 } 530 530 } else if (score == badness && reuseport) { 531 531 matches++; 532 - if (((u64)hash * matches) >> 32 == 0) 532 + if (reciprocal_scale(hash, matches) == 0) 533 533 result = sk; 534 534 hash = next_pseudo_random32(hash); 535 535 }
+1 -1
net/ipv6/inet6_hashtables.c
··· 198 198 } 199 199 } else if (score == hiscore && reuseport) { 200 200 matches++; 201 - if (((u64)phash * matches) >> 32 == 0) 201 + if (reciprocal_scale(phash, matches) == 0) 202 202 result = sk; 203 203 phash = next_pseudo_random32(phash); 204 204 }
+2 -2
net/ipv6/udp.c
··· 243 243 goto exact_match; 244 244 } else if (score == badness && reuseport) { 245 245 matches++; 246 - if (((u64)hash * matches) >> 32 == 0) 246 + if (reciprocal_scale(hash, matches) == 0) 247 247 result = sk; 248 248 hash = next_pseudo_random32(hash); 249 249 } ··· 323 323 } 324 324 } else if (score == badness && reuseport) { 325 325 matches++; 326 - if (((u64)hash * matches) >> 32 == 0) 326 + if (reciprocal_scale(hash, matches) == 0) 327 327 result = sk; 328 328 hash = next_pseudo_random32(hash); 329 329 }
+1 -1
net/netfilter/nf_conntrack_core.c
··· 142 142 143 143 static u32 __hash_bucket(u32 hash, unsigned int size) 144 144 { 145 - return ((u64)hash * size) >> 32; 145 + return reciprocal_scale(hash, size); 146 146 } 147 147 148 148 static u32 hash_bucket(u32 hash, const struct net *net)
+2 -1
net/netfilter/nf_conntrack_expect.c
··· 83 83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), 84 84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | 85 85 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd); 86 - return ((u64)hash * nf_ct_expect_hsize) >> 32; 86 + 87 + return reciprocal_scale(hash, nf_ct_expect_hsize); 87 88 } 88 89 89 90 struct nf_conntrack_expect *
+3 -2
net/netfilter/nf_nat_core.c
··· 126 126 /* Original src, to ensure we map it consistently if poss. */ 127 127 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), 128 128 tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd); 129 - return ((u64)hash * net->ct.nat_htable_size) >> 32; 129 + 130 + return reciprocal_scale(hash, net->ct.nat_htable_size); 130 131 } 131 132 132 133 /* Is this tuple already taken? (not by us) */ ··· 275 274 } 276 275 277 276 var_ipp->all[i] = (__force __u32) 278 - htonl(minip + (((u64)j * dist) >> 32)); 277 + htonl(minip + reciprocal_scale(j, dist)); 279 278 if (var_ipp->all[i] != range->max_addr.all[i]) 280 279 full_range = true; 281 280
+1 -1
net/netfilter/xt_HMARK.c
··· 126 126 hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); 127 127 hash = hash ^ (t->proto & info->proto_mask); 128 128 129 - return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; 129 + return reciprocal_scale(hash, info->hmodulus) + info->hoffset; 130 130 } 131 131 132 132 static void
+2 -1
net/netfilter/xt_cluster.c
··· 55 55 WARN_ON(1); 56 56 break; 57 57 } 58 - return (((u64)hash * info->total_nodes) >> 32); 58 + 59 + return reciprocal_scale(hash, info->total_nodes); 59 60 } 60 61 61 62 static inline bool
+1 -1
net/netfilter/xt_hashlimit.c
··· 135 135 * give results between [0 and cfg.size-1] and same hash distribution, 136 136 * but using a multiply, less expensive than a divide 137 137 */ 138 - return ((u64)hash * ht->cfg.size) >> 32; 138 + return reciprocal_scale(hash, ht->cfg.size); 139 139 } 140 140 141 141 static struct dsthash_ent *
+2 -1
net/sched/sch_fq_codel.c
··· 77 77 hash = jhash_3words((__force u32)keys.dst, 78 78 (__force u32)keys.src ^ keys.ip_proto, 79 79 (__force u32)keys.ports, q->perturbation); 80 - return ((u64)hash * q->flows_cnt) >> 32; 80 + 81 + return reciprocal_scale(hash, q->flows_cnt); 81 82 } 82 83 83 84 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,