Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ipvs: unify the formula to estimate the overhead of processing connections

lc and wlc use the same formula, but lblc and lblcr use another one. There
is no reason for using two different formulas for the lc variants.

The formula used by lc is used by all the lc variants in this patch.

Signed-off-by: Changli Gao <xiaosuo@gmail.com>
Acked-by: Wensong Zhang <wensong@linux-vs.org>
Signed-off-by: Simon Horman <horms@verge.net.au>

authored by

Changli Gao and committed by
Simon Horman
b552f7e3 17a8f8e3

+27 -63
+14
include/net/ip_vs.h
··· 1243 1243 /* CONFIG_IP_VS_NFCT */ 1244 1244 #endif 1245 1245 1246 + static inline unsigned int 1247 + ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1248 + { 1249 + /* 1250 + * We think the overhead of processing active connections is 256 1251 + * times higher than that of inactive connections in average. (This 1252 + * 256 times might not be accurate, we will change it later) We 1253 + * use the following formula to estimate the overhead now: 1254 + * dest->activeconns*256 + dest->inactconns 1255 + */ 1256 + return (atomic_read(&dest->activeconns) << 8) + 1257 + atomic_read(&dest->inactconns); 1258 + } 1259 + 1246 1260 #endif /* __KERNEL__ */ 1247 1261 1248 1262 #endif /* _NET_IP_VS_H */
+3 -10
net/netfilter/ipvs/ip_vs_lblc.c
··· 389 389 int loh, doh; 390 390 391 391 /* 392 - * We think the overhead of processing active connections is fifty 393 - * times higher than that of inactive connections in average. (This 394 - * fifty times might not be accurate, we will change it later.) We 395 - * use the following formula to estimate the overhead: 396 - * dest->activeconns*50 + dest->inactconns 397 - * and the load: 392 + * We use the following formula to estimate the load: 398 393 * (dest overhead) / dest->weight 399 394 * 400 395 * Remember -- no floats in kernel mode!!! ··· 405 410 continue; 406 411 if (atomic_read(&dest->weight) > 0) { 407 412 least = dest; 408 - loh = atomic_read(&least->activeconns) * 50 409 - + atomic_read(&least->inactconns); 413 + loh = ip_vs_dest_conn_overhead(least); 410 414 goto nextstage; 411 415 } 412 416 } ··· 419 425 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 420 426 continue; 421 427 422 - doh = atomic_read(&dest->activeconns) * 50 423 - + atomic_read(&dest->inactconns); 428 + doh = ip_vs_dest_conn_overhead(dest); 424 429 if (loh * atomic_read(&dest->weight) > 425 430 doh * atomic_read(&least->weight)) { 426 431 least = dest;
+7 -18
net/netfilter/ipvs/ip_vs_lblcr.c
··· 178 178 179 179 if ((atomic_read(&least->weight) > 0) 180 180 && (least->flags & IP_VS_DEST_F_AVAILABLE)) { 181 - loh = atomic_read(&least->activeconns) * 50 182 - + atomic_read(&least->inactconns); 181 + loh = ip_vs_dest_conn_overhead(least); 183 182 goto nextstage; 184 183 } 185 184 } ··· 191 192 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 192 193 continue; 193 194 194 - doh = atomic_read(&dest->activeconns) * 50 195 - + atomic_read(&dest->inactconns); 195 + doh = ip_vs_dest_conn_overhead(dest); 196 196 if ((loh * atomic_read(&dest->weight) > 197 197 doh * atomic_read(&least->weight)) 198 198 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { ··· 226 228 list_for_each_entry(e, &set->list, list) { 227 229 most = e->dest; 228 230 if (atomic_read(&most->weight) > 0) { 229 - moh = atomic_read(&most->activeconns) * 50 230 - + atomic_read(&most->inactconns); 231 + moh = ip_vs_dest_conn_overhead(most); 231 232 goto nextstage; 232 233 } 233 234 } ··· 236 239 nextstage: 237 240 list_for_each_entry(e, &set->list, list) { 238 241 dest = e->dest; 239 - doh = atomic_read(&dest->activeconns) * 50 240 - + atomic_read(&dest->inactconns); 242 + doh = ip_vs_dest_conn_overhead(dest); 241 243 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 242 244 if ((moh * atomic_read(&dest->weight) < 243 245 doh * atomic_read(&most->weight)) ··· 559 563 int loh, doh; 560 564 561 565 /* 562 - * We think the overhead of processing active connections is fifty 563 - * times higher than that of inactive connections in average. (This 564 - * fifty times might not be accurate, we will change it later.) We 565 - * use the following formula to estimate the overhead: 566 - * dest->activeconns*50 + dest->inactconns 567 - * and the load: 566 + * We use the following formula to estimate the load: 568 567 * (dest overhead) / dest->weight 569 568 * 570 569 * Remember -- no floats in kernel mode!!! ··· 576 585 577 586 if (atomic_read(&dest->weight) > 0) { 578 587 least = dest; 579 - loh = atomic_read(&least->activeconns) * 50 580 - + atomic_read(&least->inactconns); 588 + loh = ip_vs_dest_conn_overhead(least); 581 589 goto nextstage; 582 590 } 583 591 } ··· 590 600 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 591 601 continue; 592 602 593 - doh = atomic_read(&dest->activeconns) * 50 594 - + atomic_read(&dest->inactconns); 603 + doh = ip_vs_dest_conn_overhead(dest); 595 604 if (loh * atomic_read(&dest->weight) > 596 605 doh * atomic_read(&least->weight)) { 597 606 least = dest;
+1 -17
net/netfilter/ipvs/ip_vs_lc.c
··· 22 22 23 23 #include <net/ip_vs.h> 24 24 25 - 26 - static inline unsigned int 27 - ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) 28 - { 29 - /* 30 - * We think the overhead of processing active connections is 256 31 - * times higher than that of inactive connections in average. (This 32 - * 256 times might not be accurate, we will change it later) We 33 - * use the following formula to estimate the overhead now: 34 - * dest->activeconns*256 + dest->inactconns 35 - */ 36 - return (atomic_read(&dest->activeconns) << 8) + 37 - atomic_read(&dest->inactconns); 38 - } 39 - 40 - 41 25 /* 42 26 * Least Connection scheduling 43 27 */ ··· 46 62 if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || 47 63 atomic_read(&dest->weight) == 0) 48 64 continue; 49 - doh = ip_vs_lc_dest_overhead(dest); 65 + doh = ip_vs_dest_conn_overhead(dest); 50 66 if (!least || doh < loh) { 51 67 least = dest; 52 68 loh = doh;
+2 -18
net/netfilter/ipvs/ip_vs_wlc.c
··· 27 27 28 28 #include <net/ip_vs.h> 29 29 30 - 31 - static inline unsigned int 32 - ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) 33 - { 34 - /* 35 - * We think the overhead of processing active connections is 256 36 - * times higher than that of inactive connections in average. (This 37 - * 256 times might not be accurate, we will change it later) We 38 - * use the following formula to estimate the overhead now: 39 - * dest->activeconns*256 + dest->inactconns 40 - */ 41 - return (atomic_read(&dest->activeconns) << 8) + 42 - atomic_read(&dest->inactconns); 43 - } 44 - 45 - 46 30 /* 47 31 * Weighted Least Connection scheduling 48 32 */ ··· 55 71 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && 56 72 atomic_read(&dest->weight) > 0) { 57 73 least = dest; 58 - loh = ip_vs_wlc_dest_overhead(least); 74 + loh = ip_vs_dest_conn_overhead(least); 59 75 goto nextstage; 60 76 } 61 77 } ··· 69 85 list_for_each_entry_continue(dest, &svc->destinations, n_list) { 70 86 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 71 87 continue; 72 - doh = ip_vs_wlc_dest_overhead(dest); 88 + doh = ip_vs_dest_conn_overhead(dest); 73 89 if (loh * atomic_read(&dest->weight) > 74 90 doh * atomic_read(&least->weight)) { 75 91 least = dest;