Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
The following patchset contains Netfilter/IPVS fixes for your net
tree, they are:

* Fix BUG_ON splat due to malformed TCP packets seen by synproxy, from
Patrick McHardy.

* Fix possible weight overflow in lblc and lblcr schedulers due to
32-bits arithmetics, from Simon Kirby.

* Fix possible memory access race in the lblc and lblcr schedulers,
introduced when it was converted to use RCU, two patches from
Julian Anastasov.

* Fix hard dependency on CPU 0 when reading per-cpu stats in the
rate estimator, from Julian Anastasov.

* Fix race that may lead to object use after release, when invoking
ipvsadm -C && ipvsadm -R, introduced when adding RCU, from Julian
Anastasov.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+145 -160
+3 -6
include/net/ip_vs.h
··· 723 723 struct rcu_head rcu_head; 724 724 }; 725 725 726 - /* In grace period after removing */ 727 - #define IP_VS_DEST_STATE_REMOVING 0x01 728 726 /* 729 727 * The real server destination forwarding entry 730 728 * with ip address, port number, and so on. ··· 740 742 741 743 atomic_t refcnt; /* reference counter */ 742 744 struct ip_vs_stats stats; /* statistics */ 743 - unsigned long state; /* state flags */ 745 + unsigned long idle_start; /* start time, jiffies */ 744 746 745 747 /* connection counters and thresholds */ 746 748 atomic_t activeconns; /* active connections */ ··· 754 756 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ 755 757 756 758 /* for virtual service */ 757 - struct ip_vs_service *svc; /* service it belongs to */ 759 + struct ip_vs_service __rcu *svc; /* service it belongs to */ 758 760 __u16 protocol; /* which protocol (TCP/UDP) */ 759 761 __be16 vport; /* virtual port number */ 760 762 union nf_inet_addr vaddr; /* virtual IP address */ 761 763 __u32 vfwmark; /* firewall mark of service */ 762 764 763 765 struct list_head t_list; /* in dest_trash */ 764 - struct rcu_head rcu_head; 765 766 unsigned int in_rs_table:1; /* we are in rs_table */ 766 767 }; 767 768 ··· 1646 1649 /* CONFIG_IP_VS_NFCT */ 1647 1650 #endif 1648 1651 1649 - static inline unsigned int 1652 + static inline int 1650 1653 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1651 1654 { 1652 1655 /*
+1 -1
include/net/netfilter/nf_conntrack_synproxy.h
··· 56 56 57 57 struct tcphdr; 58 58 struct xt_synproxy_info; 59 - extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, 59 + extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, 60 60 const struct tcphdr *th, 61 61 struct synproxy_options *opts); 62 62 extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
+7 -3
net/ipv4/netfilter/ipt_SYNPROXY.c
··· 267 267 if (th == NULL) 268 268 return NF_DROP; 269 269 270 - synproxy_parse_options(skb, par->thoff, th, &opts); 270 + if (!synproxy_parse_options(skb, par->thoff, th, &opts)) 271 + return NF_DROP; 271 272 272 273 if (th->syn && !(th->ack || th->fin || th->rst)) { 273 274 /* Initial SYN from client */ ··· 351 350 352 351 /* fall through */ 353 352 case TCP_CONNTRACK_SYN_SENT: 354 - synproxy_parse_options(skb, thoff, th, &opts); 353 + if (!synproxy_parse_options(skb, thoff, th, &opts)) 354 + return NF_DROP; 355 355 356 356 if (!th->syn && th->ack && 357 357 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { ··· 375 373 if (!th->syn || !th->ack) 376 374 break; 377 375 378 - synproxy_parse_options(skb, thoff, th, &opts); 376 + if (!synproxy_parse_options(skb, thoff, th, &opts)) 377 + return NF_DROP; 378 + 379 379 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 380 380 synproxy->tsoff = opts.tsval - synproxy->its; 381 381
+7 -3
net/ipv6/netfilter/ip6t_SYNPROXY.c
··· 282 282 if (th == NULL) 283 283 return NF_DROP; 284 284 285 - synproxy_parse_options(skb, par->thoff, th, &opts); 285 + if (!synproxy_parse_options(skb, par->thoff, th, &opts)) 286 + return NF_DROP; 286 287 287 288 if (th->syn && !(th->ack || th->fin || th->rst)) { 288 289 /* Initial SYN from client */ ··· 373 372 374 373 /* fall through */ 375 374 case TCP_CONNTRACK_SYN_SENT: 376 - synproxy_parse_options(skb, thoff, th, &opts); 375 + if (!synproxy_parse_options(skb, thoff, th, &opts)) 376 + return NF_DROP; 377 377 378 378 if (!th->syn && th->ack && 379 379 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { ··· 397 395 if (!th->syn || !th->ack) 398 396 break; 399 397 400 - synproxy_parse_options(skb, thoff, th, &opts); 398 + if (!synproxy_parse_options(skb, thoff, th, &opts)) 399 + return NF_DROP; 400 + 401 401 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 402 402 synproxy->tsoff = opts.tsval - synproxy->its; 403 403
+10 -2
net/netfilter/ipvs/ip_vs_core.c
··· 116 116 117 117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 118 118 struct ip_vs_cpu_stats *s; 119 + struct ip_vs_service *svc; 119 120 120 121 s = this_cpu_ptr(dest->stats.cpustats); 121 122 s->ustats.inpkts++; ··· 124 123 s->ustats.inbytes += skb->len; 125 124 u64_stats_update_end(&s->syncp); 126 125 127 - s = this_cpu_ptr(dest->svc->stats.cpustats); 126 + rcu_read_lock(); 127 + svc = rcu_dereference(dest->svc); 128 + s = this_cpu_ptr(svc->stats.cpustats); 128 129 s->ustats.inpkts++; 129 130 u64_stats_update_begin(&s->syncp); 130 131 s->ustats.inbytes += skb->len; 131 132 u64_stats_update_end(&s->syncp); 133 + rcu_read_unlock(); 132 134 133 135 s = this_cpu_ptr(ipvs->tot_stats.cpustats); 134 136 s->ustats.inpkts++; ··· 150 146 151 147 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 152 148 struct ip_vs_cpu_stats *s; 149 + struct ip_vs_service *svc; 153 150 154 151 s = this_cpu_ptr(dest->stats.cpustats); 155 152 s->ustats.outpkts++; ··· 158 153 s->ustats.outbytes += skb->len; 159 154 u64_stats_update_end(&s->syncp); 160 155 161 - s = this_cpu_ptr(dest->svc->stats.cpustats); 156 + rcu_read_lock(); 157 + svc = rcu_dereference(dest->svc); 158 + s = this_cpu_ptr(svc->stats.cpustats); 162 159 s->ustats.outpkts++; 163 160 u64_stats_update_begin(&s->syncp); 164 161 s->ustats.outbytes += skb->len; 165 162 u64_stats_update_end(&s->syncp); 163 + rcu_read_unlock(); 166 164 167 165 s = this_cpu_ptr(ipvs->tot_stats.cpustats); 168 166 s->ustats.outpkts++;
+35 -51
net/netfilter/ipvs/ip_vs_ctl.c
··· 460 460 __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) 461 461 { 462 462 atomic_inc(&svc->refcnt); 463 - dest->svc = svc; 463 + rcu_assign_pointer(dest->svc, svc); 464 464 } 465 465 466 466 static void ip_vs_service_free(struct ip_vs_service *svc) ··· 470 470 kfree(svc); 471 471 } 472 472 473 - static void 474 - __ip_vs_unbind_svc(struct ip_vs_dest *dest) 473 + static void ip_vs_service_rcu_free(struct rcu_head *head) 475 474 { 476 - struct ip_vs_service *svc = dest->svc; 475 + struct ip_vs_service *svc; 477 476 478 - dest->svc = NULL; 477 + svc = container_of(head, struct ip_vs_service, rcu_head); 478 + ip_vs_service_free(svc); 479 + } 480 + 481 + static void __ip_vs_svc_put(struct ip_vs_service *svc, bool do_delay) 482 + { 479 483 if (atomic_dec_and_test(&svc->refcnt)) { 480 484 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n", 481 485 svc->fwmark, 482 486 IP_VS_DBG_ADDR(svc->af, &svc->addr), 483 487 ntohs(svc->port)); 484 - ip_vs_service_free(svc); 488 + if (do_delay) 489 + call_rcu(&svc->rcu_head, ip_vs_service_rcu_free); 490 + else 491 + ip_vs_service_free(svc); 485 492 } 486 493 } 487 494 ··· 674 667 IP_VS_DBG_ADDR(svc->af, &dest->addr), 675 668 ntohs(dest->port), 676 669 atomic_read(&dest->refcnt)); 677 - /* We can not reuse dest while in grace period 678 - * because conns still can use dest->svc 679 - */ 680 - if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state)) 681 - continue; 682 670 if (dest->af == svc->af && 683 671 ip_vs_addr_equal(svc->af, &dest->addr, daddr) && 684 672 dest->port == dport && ··· 699 697 700 698 static void ip_vs_dest_free(struct ip_vs_dest *dest) 701 699 { 700 + struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1); 701 + 702 702 __ip_vs_dst_cache_reset(dest); 703 - __ip_vs_unbind_svc(dest); 703 + __ip_vs_svc_put(svc, false); 704 704 free_percpu(dest->stats.cpustats); 705 705 kfree(dest); 706 706 } ··· 775 771 struct ip_vs_dest_user_kern *udest, int add) 776 772 { 777 773 struct netns_ipvs *ipvs = net_ipvs(svc->net); 774 + struct ip_vs_service *old_svc; 778 775 struct ip_vs_scheduler *sched; 779 776 int conn_flags; 780 777 ··· 797 792 atomic_set(&dest->conn_flags, conn_flags); 798 793 799 794 /* bind the service */ 800 - if (!dest->svc) { 795 + old_svc = rcu_dereference_protected(dest->svc, 1); 796 + if (!old_svc) { 801 797 __ip_vs_bind_svc(dest, svc); 802 798 } else { 803 - if (dest->svc != svc) { 804 - __ip_vs_unbind_svc(dest); 799 + if (old_svc != svc) { 805 800 ip_vs_zero_stats(&dest->stats); 806 801 __ip_vs_bind_svc(dest, svc); 802 + __ip_vs_svc_put(old_svc, true); 807 803 } 808 804 } 809 805 ··· 1004 998 return 0; 1005 999 } 1006 1000 1007 - static void ip_vs_dest_wait_readers(struct rcu_head *head) 1008 - { 1009 - struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest, 1010 - rcu_head); 1011 - 1012 - /* End of grace period after unlinking */ 1013 - clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state); 1014 - } 1015 - 1016 - 1017 1001 /* 1018 1002 * Delete a destination (must be already unlinked from the service) 1019 1003 */ ··· 1019 1023 */ 1020 1024 ip_vs_rs_unhash(dest); 1021 1025 1022 - if (!cleanup) { 1023 - set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state); 1024 - call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers); 1025 - } 1026 - 1027 1026 spin_lock_bh(&ipvs->dest_trash_lock); 1028 1027 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", 1029 1028 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), 1030 1029 atomic_read(&dest->refcnt)); 1031 1030 if (list_empty(&ipvs->dest_trash) && !cleanup) 1032 1031 mod_timer(&ipvs->dest_trash_timer, 1033 - jiffies + IP_VS_DEST_TRASH_PERIOD); 1032 + jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); 1034 1033 /* dest lives in trash without reference */ 1035 1034 list_add(&dest->t_list, &ipvs->dest_trash); 1035 + dest->idle_start = 0; 1036 1036 spin_unlock_bh(&ipvs->dest_trash_lock); 1037 1037 ip_vs_dest_put(dest); 1038 1038 } ··· 1100 1108 struct net *net = (struct net *) data; 1101 1109 struct netns_ipvs *ipvs = net_ipvs(net); 1102 1110 struct ip_vs_dest *dest, *next; 1111 + unsigned long now = jiffies; 1103 1112 1104 1113 spin_lock(&ipvs->dest_trash_lock); 1105 1114 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { 1106 - /* Skip if dest is in grace period */ 1107 - if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state)) 1108 - continue; 1109 1115 if (atomic_read(&dest->refcnt) > 0) 1110 1116 continue; 1117 + if (dest->idle_start) { 1118 + if (time_before(now, dest->idle_start + 1119 + IP_VS_DEST_TRASH_PERIOD)) 1120 + continue; 1121 + } else { 1122 + dest->idle_start = max(1UL, now); 1123 + continue; 1124 + } 1111 1125 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n", 1112 1126 dest->vfwmark, 1113 - IP_VS_DBG_ADDR(dest->svc->af, &dest->addr), 1127 + IP_VS_DBG_ADDR(dest->af, &dest->addr), 1114 1128 ntohs(dest->port)); 1115 1129 list_del(&dest->t_list); 1116 1130 ip_vs_dest_free(dest); 1117 1131 } 1118 1132 if (!list_empty(&ipvs->dest_trash)) 1119 1133 mod_timer(&ipvs->dest_trash_timer, 1120 - jiffies + IP_VS_DEST_TRASH_PERIOD); 1134 + jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); 1121 1135 spin_unlock(&ipvs->dest_trash_lock); 1122 1136 } 1123 1137 ··· 1318 1320 return ret; 1319 1321 } 1320 1322 1321 - static void ip_vs_service_rcu_free(struct rcu_head *head) 1322 - { 1323 - struct ip_vs_service *svc; 1324 - 1325 - svc = container_of(head, struct ip_vs_service, rcu_head); 1326 - ip_vs_service_free(svc); 1327 - } 1328 - 1329 1323 /* 1330 1324 * Delete a service from the service list 1331 1325 * - The service must be unlinked, unlocked and not referenced! ··· 1366 1376 /* 1367 1377 * Free the service if nobody refers to it 1368 1378 */ 1369 - if (atomic_dec_and_test(&svc->refcnt)) { 1370 - IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n", 1371 - svc->fwmark, 1372 - IP_VS_DBG_ADDR(svc->af, &svc->addr), 1373 - ntohs(svc->port)); 1374 - call_rcu(&svc->rcu_head, ip_vs_service_rcu_free); 1375 - } 1379 + __ip_vs_svc_put(svc, true); 1376 1380 1377 1381 /* decrease the module use count */ 1378 1382 ip_vs_use_count_dec();
+3 -1
net/netfilter/ipvs/ip_vs_est.c
··· 59 59 struct ip_vs_cpu_stats __percpu *stats) 60 60 { 61 61 int i; 62 + bool add = false; 62 63 63 64 for_each_possible_cpu(i) { 64 65 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); 65 66 unsigned int start; 66 67 __u64 inbytes, outbytes; 67 - if (i) { 68 + if (add) { 68 69 sum->conns += s->ustats.conns; 69 70 sum->inpkts += s->ustats.inpkts; 70 71 sum->outpkts += s->ustats.outpkts; ··· 77 76 sum->inbytes += inbytes; 78 77 sum->outbytes += outbytes; 79 78 } else { 79 + add = true; 80 80 sum->conns = s->ustats.conns; 81 81 sum->inpkts = s->ustats.inpkts; 82 82 sum->outpkts = s->ustats.outpkts;
+35 -41
net/netfilter/ipvs/ip_vs_lblc.c
··· 93 93 struct hlist_node list; 94 94 int af; /* address family */ 95 95 union nf_inet_addr addr; /* destination IP address */ 96 - struct ip_vs_dest __rcu *dest; /* real server (cache) */ 96 + struct ip_vs_dest *dest; /* real server (cache) */ 97 97 unsigned long lastuse; /* last used time */ 98 98 struct rcu_head rcu_head; 99 99 }; ··· 130 130 }; 131 131 #endif 132 132 133 - static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) 133 + static void ip_vs_lblc_rcu_free(struct rcu_head *head) 134 134 { 135 - struct ip_vs_dest *dest; 135 + struct ip_vs_lblc_entry *en = container_of(head, 136 + struct ip_vs_lblc_entry, 137 + rcu_head); 136 138 137 - hlist_del_rcu(&en->list); 138 - /* 139 - * We don't kfree dest because it is referred either by its service 140 - * or the trash dest list. 141 - */ 142 - dest = rcu_dereference_protected(en->dest, 1); 143 - ip_vs_dest_put(dest); 144 - kfree_rcu(en, rcu_head); 139 + ip_vs_dest_put(en->dest); 140 + kfree(en); 145 141 } 146 142 143 + static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en) 144 + { 145 + hlist_del_rcu(&en->list); 146 + call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free); 147 + } 147 148 148 149 /* 149 150 * Returns hash value for IPVS LBLC entry ··· 204 203 struct ip_vs_lblc_entry *en; 205 204 206 205 en = ip_vs_lblc_get(dest->af, tbl, daddr); 207 - if (!en) { 208 - en = kmalloc(sizeof(*en), GFP_ATOMIC); 209 - if (!en) 210 - return NULL; 211 - 212 - en->af = dest->af; 213 - ip_vs_addr_copy(dest->af, &en->addr, daddr); 214 - en->lastuse = jiffies; 215 - 216 - ip_vs_dest_hold(dest); 217 - RCU_INIT_POINTER(en->dest, dest); 218 - 219 - ip_vs_lblc_hash(tbl, en); 220 - } else { 221 - struct ip_vs_dest *old_dest; 222 - 223 - old_dest = rcu_dereference_protected(en->dest, 1); 224 - if (old_dest != dest) { 225 - ip_vs_dest_put(old_dest); 226 - ip_vs_dest_hold(dest); 227 - /* No ordering constraints for refcnt */ 228 - RCU_INIT_POINTER(en->dest, dest); 229 - } 206 + if (en) { 207 + if (en->dest == dest) 208 + return en; 209 + ip_vs_lblc_del(en); 230 210 } 211 + en = kmalloc(sizeof(*en), GFP_ATOMIC); 212 + if (!en) 213 + return NULL; 214 + 215 + en->af = dest->af; 216 + ip_vs_addr_copy(dest->af, &en->addr, daddr); 217 + en->lastuse = jiffies; 218 + 219 + ip_vs_dest_hold(dest); 220 + en->dest = dest; 221 + 222 + ip_vs_lblc_hash(tbl, en); 231 223 232 224 return en; 233 225 } ··· 240 246 tbl->dead = 1; 241 247 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 242 248 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { 243 - ip_vs_lblc_free(en); 249 + ip_vs_lblc_del(en); 244 250 atomic_dec(&tbl->entries); 245 251 } 246 252 } ··· 275 281 sysctl_lblc_expiration(svc))) 276 282 continue; 277 283 278 - ip_vs_lblc_free(en); 284 + ip_vs_lblc_del(en); 279 285 atomic_dec(&tbl->entries); 280 286 } 281 287 spin_unlock(&svc->sched_lock); ··· 329 335 if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) 330 336 continue; 331 337 332 - ip_vs_lblc_free(en); 338 + ip_vs_lblc_del(en); 333 339 atomic_dec(&tbl->entries); 334 340 goal--; 335 341 } ··· 437 443 continue; 438 444 439 445 doh = ip_vs_dest_conn_overhead(dest); 440 - if (loh * atomic_read(&dest->weight) > 441 - doh * atomic_read(&least->weight)) { 446 + if ((__s64)loh * atomic_read(&dest->weight) > 447 + (__s64)doh * atomic_read(&least->weight)) { 442 448 least = dest; 443 449 loh = doh; 444 450 } ··· 505 511 * free up entries from the trash at any time. 506 512 */ 507 513 508 - dest = rcu_dereference(en->dest); 514 + dest = en->dest; 509 515 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) && 510 516 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) 511 517 goto out; ··· 625 631 { 626 632 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler); 627 633 unregister_pernet_subsys(&ip_vs_lblc_ops); 628 - synchronize_rcu(); 634 + rcu_barrier(); 629 635 } 630 636 631 637
+26 -36
net/netfilter/ipvs/ip_vs_lblcr.c
··· 89 89 */ 90 90 struct ip_vs_dest_set_elem { 91 91 struct list_head list; /* list link */ 92 - struct ip_vs_dest __rcu *dest; /* destination server */ 92 + struct ip_vs_dest *dest; /* destination server */ 93 93 struct rcu_head rcu_head; 94 94 }; 95 95 ··· 107 107 108 108 if (check) { 109 109 list_for_each_entry(e, &set->list, list) { 110 - struct ip_vs_dest *d; 111 - 112 - d = rcu_dereference_protected(e->dest, 1); 113 - if (d == dest) 114 - /* already existed */ 110 + if (e->dest == dest) 115 111 return; 116 112 } 117 113 } ··· 117 121 return; 118 122 119 123 ip_vs_dest_hold(dest); 120 - RCU_INIT_POINTER(e->dest, dest); 124 + e->dest = dest; 121 125 122 126 list_add_rcu(&e->list, &set->list); 123 127 atomic_inc(&set->size); 124 128 125 129 set->lastmod = jiffies; 130 + } 131 + 132 + static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head) 133 + { 134 + struct ip_vs_dest_set_elem *e; 135 + 136 + e = container_of(head, struct ip_vs_dest_set_elem, rcu_head); 137 + ip_vs_dest_put(e->dest); 138 + kfree(e); 126 139 } 127 140 128 141 static void ··· 140 135 struct ip_vs_dest_set_elem *e; 141 136 142 137 list_for_each_entry(e, &set->list, list) { 143 - struct ip_vs_dest *d; 144 - 145 - d = rcu_dereference_protected(e->dest, 1); 146 - if (d == dest) { 138 + if (e->dest == dest) { 147 139 /* HIT */ 148 140 atomic_dec(&set->size); 149 141 set->lastmod = jiffies; 150 - ip_vs_dest_put(dest); 151 142 list_del_rcu(&e->list); 152 - kfree_rcu(e, rcu_head); 143 + call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free); 153 144 break; 154 145 } 155 146 } ··· 156 155 struct ip_vs_dest_set_elem *e, *ep; 157 156 158 157 list_for_each_entry_safe(e, ep, &set->list, list) { 159 - struct ip_vs_dest *d; 160 - 161 - d = rcu_dereference_protected(e->dest, 1); 162 - /* 163 - * We don't kfree dest because it is referred either 164 - * by its service or by the trash dest list. 165 - */ 166 - ip_vs_dest_put(d); 167 158 list_del_rcu(&e->list); 168 - kfree_rcu(e, rcu_head); 159 + call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free); 169 160 } 170 161 } 171 162 ··· 168 175 struct ip_vs_dest *dest, *least; 169 176 int loh, doh; 170 177 171 - if (set == NULL) 172 - return NULL; 173 - 174 178 /* select the first destination server, whose weight > 0 */ 175 179 list_for_each_entry_rcu(e, &set->list, list) { 176 - least = rcu_dereference(e->dest); 180 + least = e->dest; 177 181 if (least->flags & IP_VS_DEST_F_OVERLOAD) 178 182 continue; 179 183 ··· 185 195 /* find the destination with the weighted least load */ 186 196 nextstage: 187 197 list_for_each_entry_continue_rcu(e, &set->list, list) { 188 - dest = rcu_dereference(e->dest); 198 + dest = e->dest; 189 199 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 190 200 continue; 191 201 192 202 doh = ip_vs_dest_conn_overhead(dest); 193 - if ((loh * atomic_read(&dest->weight) > 194 - doh * atomic_read(&least->weight)) 203 + if (((__s64)loh * atomic_read(&dest->weight) > 204 + (__s64)doh * atomic_read(&least->weight)) 195 205 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 196 206 least = dest; 197 207 loh = doh; ··· 222 232 223 233 /* select the first destination server, whose weight > 0 */ 224 234 list_for_each_entry(e, &set->list, list) { 225 - most = rcu_dereference_protected(e->dest, 1); 235 + most = e->dest; 226 236 if (atomic_read(&most->weight) > 0) { 227 237 moh = ip_vs_dest_conn_overhead(most); 228 238 goto nextstage; ··· 233 243 /* find the destination with the weighted most load */ 234 244 nextstage: 235 245 list_for_each_entry_continue(e, &set->list, list) { 236 - dest = rcu_dereference_protected(e->dest, 1); 246 + dest = e->dest; 237 247 doh = ip_vs_dest_conn_overhead(dest); 238 248 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 239 - if ((moh * atomic_read(&dest->weight) < 240 - doh * atomic_read(&most->weight)) 249 + if (((__s64)moh * atomic_read(&dest->weight) < 250 + (__s64)doh * atomic_read(&most->weight)) 241 251 && (atomic_read(&dest->weight) > 0)) { 242 252 most = dest; 243 253 moh = doh; ··· 601 611 continue; 602 612 603 613 doh = ip_vs_dest_conn_overhead(dest); 604 - if (loh * atomic_read(&dest->weight) > 605 - doh * atomic_read(&least->weight)) { 614 + if ((__s64)loh * atomic_read(&dest->weight) > 615 + (__s64)doh * atomic_read(&least->weight)) { 606 616 least = dest; 607 617 loh = doh; 608 618 } ··· 809 819 { 810 820 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 811 821 unregister_pernet_subsys(&ip_vs_lblcr_ops); 812 - synchronize_rcu(); 822 + rcu_barrier(); 813 823 } 814 824 815 825
+4 -4
net/netfilter/ipvs/ip_vs_nq.c
··· 40 40 #include <net/ip_vs.h> 41 41 42 42 43 - static inline unsigned int 43 + static inline int 44 44 ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) 45 45 { 46 46 /* ··· 59 59 struct ip_vs_iphdr *iph) 60 60 { 61 61 struct ip_vs_dest *dest, *least = NULL; 62 - unsigned int loh = 0, doh; 62 + int loh = 0, doh; 63 63 64 64 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 65 65 ··· 92 92 } 93 93 94 94 if (!least || 95 - (loh * atomic_read(&dest->weight) > 96 - doh * atomic_read(&least->weight))) { 95 + ((__s64)loh * atomic_read(&dest->weight) > 96 + (__s64)doh * atomic_read(&least->weight))) { 97 97 least = dest; 98 98 loh = doh; 99 99 }
+4 -4
net/netfilter/ipvs/ip_vs_sed.c
··· 44 44 #include <net/ip_vs.h> 45 45 46 46 47 - static inline unsigned int 47 + static inline int 48 48 ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) 49 49 { 50 50 /* ··· 63 63 struct ip_vs_iphdr *iph) 64 64 { 65 65 struct ip_vs_dest *dest, *least; 66 - unsigned int loh, doh; 66 + int loh, doh; 67 67 68 68 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 69 69 ··· 99 99 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 100 100 continue; 101 101 doh = ip_vs_sed_dest_overhead(dest); 102 - if (loh * atomic_read(&dest->weight) > 103 - doh * atomic_read(&least->weight)) { 102 + if ((__s64)loh * atomic_read(&dest->weight) > 103 + (__s64)doh * atomic_read(&least->weight)) { 104 104 least = dest; 105 105 loh = doh; 106 106 }
+3 -3
net/netfilter/ipvs/ip_vs_wlc.c
··· 35 35 struct ip_vs_iphdr *iph) 36 36 { 37 37 struct ip_vs_dest *dest, *least; 38 - unsigned int loh, doh; 38 + int loh, doh; 39 39 40 40 IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n"); 41 41 ··· 71 71 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 72 72 continue; 73 73 doh = ip_vs_dest_conn_overhead(dest); 74 - if (loh * atomic_read(&dest->weight) > 75 - doh * atomic_read(&least->weight)) { 74 + if ((__s64)loh * atomic_read(&dest->weight) > 75 + (__s64)doh * atomic_read(&least->weight)) { 76 76 least = dest; 77 77 loh = doh; 78 78 }
+7 -5
net/netfilter/nf_synproxy_core.c
··· 24 24 int synproxy_net_id; 25 25 EXPORT_SYMBOL_GPL(synproxy_net_id); 26 26 27 - void 27 + bool 28 28 synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, 29 29 const struct tcphdr *th, struct synproxy_options *opts) 30 30 { ··· 32 32 u8 buf[40], *ptr; 33 33 34 34 ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); 35 - BUG_ON(ptr == NULL); 35 + if (ptr == NULL) 36 + return false; 36 37 37 38 opts->options = 0; 38 39 while (length > 0) { ··· 42 41 43 42 switch (opcode) { 44 43 case TCPOPT_EOL: 45 - return; 44 + return true; 46 45 case TCPOPT_NOP: 47 46 length--; 48 47 continue; 49 48 default: 50 49 opsize = *ptr++; 51 50 if (opsize < 2) 52 - return; 51 + return true; 53 52 if (opsize > length) 54 - return; 53 + return true; 55 54 56 55 switch (opcode) { 57 56 case TCPOPT_MSS: ··· 85 84 length -= opsize; 86 85 } 87 86 } 87 + return true; 88 88 } 89 89 EXPORT_SYMBOL_GPL(synproxy_parse_options); 90 90