Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ipvs: fix overflow on dest weight multiply

Schedulers such as lblc and lblcr require the weight to be as high as the
maximum number of active connections. In commit b552f7e3a9524abcbcdf
("ipvs: unify the formula to estimate the overhead of processing
connections"), the consideration of inactconns and activeconns was cleaned
up to always count activeconns as 256 times more important than inactconns.
In cases where 3000 or more connections are expected, a weight of 3000 *
256 * 3000 connections overflows the 32-bit signed result used to determine
if rescheduling is required.

On amd64, this merely changes the multiply and comparison instructions to
64-bit. On x86, a 64-bit result is already present from imull, so only
a few more comparison instructions are emitted.

Signed-off-by: Simon Kirby <sim@hostway.ca>
Acked-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Simon Horman <horms@verge.net.au>

authored by

Simon Kirby and committed by
Simon Horman
c16526a7 61c5923a

+20 -20
+1 -1
include/net/ip_vs.h
··· 1649 1649 /* CONFIG_IP_VS_NFCT */ 1650 1650 #endif 1651 1651 1652 - static inline unsigned int 1652 + static inline int 1653 1653 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1654 1654 { 1655 1655 /*
+2 -2
net/netfilter/ipvs/ip_vs_lblc.c
··· 443 443 continue; 444 444 445 445 doh = ip_vs_dest_conn_overhead(dest); 446 - if (loh * atomic_read(&dest->weight) > 447 - doh * atomic_read(&least->weight)) { 446 + if ((__s64)loh * atomic_read(&dest->weight) > 447 + (__s64)doh * atomic_read(&least->weight)) { 448 448 least = dest; 449 449 loh = doh; 450 450 }
+6 -6
net/netfilter/ipvs/ip_vs_lblcr.c
··· 200 200 continue; 201 201 202 202 doh = ip_vs_dest_conn_overhead(dest); 203 - if ((loh * atomic_read(&dest->weight) > 204 - doh * atomic_read(&least->weight)) 203 + if (((__s64)loh * atomic_read(&dest->weight) > 204 + (__s64)doh * atomic_read(&least->weight)) 205 205 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 206 206 least = dest; 207 207 loh = doh; ··· 246 246 dest = rcu_dereference_protected(e->dest, 1); 247 247 doh = ip_vs_dest_conn_overhead(dest); 248 248 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 249 - if ((moh * atomic_read(&dest->weight) < 250 - doh * atomic_read(&most->weight)) 249 + if (((__s64)moh * atomic_read(&dest->weight) < 250 + (__s64)doh * atomic_read(&most->weight)) 251 251 && (atomic_read(&dest->weight) > 0)) { 252 252 most = dest; 253 253 moh = doh; ··· 611 611 continue; 612 612 613 613 doh = ip_vs_dest_conn_overhead(dest); 614 - if (loh * atomic_read(&dest->weight) > 615 - doh * atomic_read(&least->weight)) { 614 + if ((__s64)loh * atomic_read(&dest->weight) > 615 + (__s64)doh * atomic_read(&least->weight)) { 616 616 least = dest; 617 617 loh = doh; 618 618 }
+4 -4
net/netfilter/ipvs/ip_vs_nq.c
··· 40 40 #include <net/ip_vs.h> 41 41 42 42 43 - static inline unsigned int 43 + static inline int 44 44 ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) 45 45 { 46 46 /* ··· 59 59 struct ip_vs_iphdr *iph) 60 60 { 61 61 struct ip_vs_dest *dest, *least = NULL; 62 - unsigned int loh = 0, doh; 62 + int loh = 0, doh; 63 63 64 64 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 65 65 ··· 92 92 } 93 93 94 94 if (!least || 95 - (loh * atomic_read(&dest->weight) > 96 - doh * atomic_read(&least->weight))) { 95 + ((__s64)loh * atomic_read(&dest->weight) > 96 + (__s64)doh * atomic_read(&least->weight))) { 97 97 least = dest; 98 98 loh = doh; 99 99 }
+4 -4
net/netfilter/ipvs/ip_vs_sed.c
··· 44 44 #include <net/ip_vs.h> 45 45 46 46 47 - static inline unsigned int 47 + static inline int 48 48 ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) 49 49 { 50 50 /* ··· 63 63 struct ip_vs_iphdr *iph) 64 64 { 65 65 struct ip_vs_dest *dest, *least; 66 - unsigned int loh, doh; 66 + int loh, doh; 67 67 68 68 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 69 69 ··· 99 99 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 100 100 continue; 101 101 doh = ip_vs_sed_dest_overhead(dest); 102 - if (loh * atomic_read(&dest->weight) > 103 - doh * atomic_read(&least->weight)) { 102 + if ((__s64)loh * atomic_read(&dest->weight) > 103 + (__s64)doh * atomic_read(&least->weight)) { 104 104 least = dest; 105 105 loh = doh; 106 106 }
+3 -3
net/netfilter/ipvs/ip_vs_wlc.c
··· 35 35 struct ip_vs_iphdr *iph) 36 36 { 37 37 struct ip_vs_dest *dest, *least; 38 - unsigned int loh, doh; 38 + int loh, doh; 39 39 40 40 IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n"); 41 41 ··· 71 71 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 72 72 continue; 73 73 doh = ip_vs_dest_conn_overhead(dest); 74 - if (loh * atomic_read(&dest->weight) > 75 - doh * atomic_read(&least->weight)) { 74 + if ((__s64)loh * atomic_read(&dest->weight) > 75 + (__s64)doh * atomic_read(&least->weight)) { 76 76 least = dest; 77 77 loh = doh; 78 78 }