Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next

Florian Westphal says:

====================
The following set contains changes for your *net-next* tree:

- make conntrack ignore packets that are delayed (containing
data already acked). The current behaviour to flag them as INVALID
causes more harm than good, let them pass so peer can send an
immediate ACK for the most recent sequence number.
- make conntrack recognize when both peers have sent 'invalid' FINs:
This helps cleaning out stale connections faster for those cases where
conntrack is no longer in sync with the actual connection state.
- Now that DECNET is gone, we don't need to reserve space for DECNET
related information.
- compact common 'find a free port number for the new inbound
connection' code and move it to a helper, then cap number of tries
the new helper will make until it gives up.
- replace various instances of strlcpy with strscpy, from Wolfram Sang.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+269 -255
+1
include/net/netfilter/nf_nat_helper.h
··· 38 38 * to port ct->master->saved_proto. */ 39 39 void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this); 40 40 41 + u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port); 41 42 #endif
+2
include/uapi/linux/netfilter.h
··· 63 63 NFPROTO_NETDEV = 5, 64 64 NFPROTO_BRIDGE = 7, 65 65 NFPROTO_IPV6 = 10, 66 + #ifndef __KERNEL__ /* no longer supported by kernel */ 66 67 NFPROTO_DECNET = 12, 68 + #endif 67 69 NFPROTO_NUMPROTO, 68 70 }; 69 71
+4 -56
net/ipv4/netfilter/nf_nat_h323.c
··· 291 291 exp->expectfn = nf_nat_follow_master; 292 292 exp->dir = !dir; 293 293 294 - /* Try to get same port: if not, try to change it. */ 295 - for (; nated_port != 0; nated_port++) { 296 - int ret; 297 - 298 - exp->tuple.dst.u.tcp.port = htons(nated_port); 299 - ret = nf_ct_expect_related(exp, 0); 300 - if (ret == 0) 301 - break; 302 - else if (ret != -EBUSY) { 303 - nated_port = 0; 304 - break; 305 - } 306 - } 307 - 294 + nated_port = nf_nat_exp_find_port(exp, nated_port); 308 295 if (nated_port == 0) { /* No port available */ 309 296 net_notice_ratelimited("nf_nat_h323: out of TCP ports\n"); 310 297 return 0; ··· 334 347 if (info->sig_port[dir] == port) 335 348 nated_port = ntohs(info->sig_port[!dir]); 336 349 337 - /* Try to get same port: if not, try to change it. */ 338 - for (; nated_port != 0; nated_port++) { 339 - int ret; 340 - 341 - exp->tuple.dst.u.tcp.port = htons(nated_port); 342 - ret = nf_ct_expect_related(exp, 0); 343 - if (ret == 0) 344 - break; 345 - else if (ret != -EBUSY) { 346 - nated_port = 0; 347 - break; 348 - } 349 - } 350 - 350 + nated_port = nf_nat_exp_find_port(exp, nated_port); 351 351 if (nated_port == 0) { /* No port available */ 352 352 net_notice_ratelimited("nf_nat_q931: out of TCP ports\n"); 353 353 return 0; ··· 413 439 if (info->sig_port[dir] == port) 414 440 nated_port = ntohs(info->sig_port[!dir]); 415 441 416 - /* Try to get same port: if not, try to change it. */ 417 - for (; nated_port != 0; nated_port++) { 418 - int ret; 419 - 420 - exp->tuple.dst.u.tcp.port = htons(nated_port); 421 - ret = nf_ct_expect_related(exp, 0); 422 - if (ret == 0) 423 - break; 424 - else if (ret != -EBUSY) { 425 - nated_port = 0; 426 - break; 427 - } 428 - } 429 - 442 + nated_port = nf_nat_exp_find_port(exp, nated_port); 430 443 if (nated_port == 0) { /* No port available */ 431 444 net_notice_ratelimited("nf_nat_ras: out of TCP ports\n"); 432 445 return 0; ··· 493 532 exp->expectfn = ip_nat_callforwarding_expect; 494 533 exp->dir = !dir; 495 534 496 - /* Try to get same port: if not, try to change it. */ 497 - for (nated_port = ntohs(port); nated_port != 0; nated_port++) { 498 - int ret; 499 - 500 - exp->tuple.dst.u.tcp.port = htons(nated_port); 501 - ret = nf_ct_expect_related(exp, 0); 502 - if (ret == 0) 503 - break; 504 - else if (ret != -EBUSY) { 505 - nated_port = 0; 506 - break; 507 - } 508 - } 509 - 535 + nated_port = nf_nat_exp_find_port(exp, ntohs(port)); 510 536 if (nated_port == 0) { /* No port available */ 511 537 net_notice_ratelimited("nf_nat_q931: out of TCP ports\n"); 512 538 return 0;
+2 -2
net/netfilter/ipset/ip_set_core.c
··· 353 353 c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); 354 354 if (unlikely(!c)) 355 355 return; 356 - strlcpy(c->str, ext->comment, len + 1); 356 + strscpy(c->str, ext->comment, len + 1); 357 357 set->ext_size += sizeof(*c) + strlen(c->str) + 1; 358 358 rcu_assign_pointer(comment->c, c); 359 359 } ··· 1072 1072 if (!set) 1073 1073 return -ENOMEM; 1074 1074 spin_lock_init(&set->lock); 1075 - strlcpy(set->name, name, IPSET_MAXNAMELEN); 1075 + strscpy(set->name, name, IPSET_MAXNAMELEN); 1076 1076 set->family = family; 1077 1077 set->revision = revision; 1078 1078
+4 -4
net/netfilter/ipvs/ip_vs_ctl.c
··· 2611 2611 dst->addr = src->addr.ip; 2612 2612 dst->port = src->port; 2613 2613 dst->fwmark = src->fwmark; 2614 - strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name)); 2614 + strscpy(dst->sched_name, sched_name, sizeof(dst->sched_name)); 2615 2615 dst->flags = src->flags; 2616 2616 dst->timeout = src->timeout / HZ; 2617 2617 dst->netmask = src->netmask; ··· 2805 2805 mutex_lock(&ipvs->sync_mutex); 2806 2806 if (ipvs->sync_state & IP_VS_STATE_MASTER) { 2807 2807 d[0].state = IP_VS_STATE_MASTER; 2808 - strlcpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn, 2808 + strscpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn, 2809 2809 sizeof(d[0].mcast_ifn)); 2810 2810 d[0].syncid = ipvs->mcfg.syncid; 2811 2811 } 2812 2812 if (ipvs->sync_state & IP_VS_STATE_BACKUP) { 2813 2813 d[1].state = IP_VS_STATE_BACKUP; 2814 - strlcpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn, 2814 + strscpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn, 2815 2815 sizeof(d[1].mcast_ifn)); 2816 2816 d[1].syncid = ipvs->bcfg.syncid; 2817 2817 } ··· 3561 3561 attrs[IPVS_DAEMON_ATTR_MCAST_IFN] && 3562 3562 attrs[IPVS_DAEMON_ATTR_SYNC_ID])) 3563 3563 return -EINVAL; 3564 - strlcpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), 3564 + strscpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), 3565 3565 sizeof(c.mcast_ifn)); 3566 3566 c.syncid = nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]); 3567 3567
+204 -123
net/netfilter/nf_conntrack_proto_tcp.c
··· 47 47 "SYN_SENT2", 48 48 }; 49 49 50 + enum nf_ct_tcp_action { 51 + NFCT_TCP_IGNORE, 52 + NFCT_TCP_INVALID, 53 + NFCT_TCP_ACCEPT, 54 + }; 55 + 50 56 #define SECS * HZ 51 57 #define MINS * 60 SECS 52 58 #define HOURS * 60 MINS ··· 478 472 } 479 473 } 480 474 481 - static bool tcp_in_window(struct nf_conn *ct, 482 - enum ip_conntrack_dir dir, 483 - unsigned int index, 484 - const struct sk_buff *skb, 485 - unsigned int dataoff, 486 - const struct tcphdr *tcph, 487 - const struct nf_hook_state *hook_state) 475 + __printf(6, 7) 476 + static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb, 477 + const struct nf_conn *ct, 478 + const struct nf_hook_state *state, 479 + const struct ip_ct_tcp_state *sender, 480 + enum nf_ct_tcp_action ret, 481 + const char *fmt, ...) 482 + { 483 + const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct)); 484 + struct va_format vaf; 485 + va_list args; 486 + bool be_liberal; 487 + 488 + be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal; 489 + if (be_liberal) 490 + return NFCT_TCP_ACCEPT; 491 + 492 + va_start(args, fmt); 493 + vaf.fmt = fmt; 494 + vaf.va = &args; 495 + nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf); 496 + va_end(args); 497 + 498 + return ret; 499 + } 500 + 501 + static enum nf_ct_tcp_action 502 + tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir, 503 + unsigned int index, const struct sk_buff *skb, 504 + unsigned int dataoff, const struct tcphdr *tcph, 505 + const struct nf_hook_state *hook_state) 488 506 { 489 507 struct ip_ct_tcp *state = &ct->proto.tcp; 490 - struct net *net = nf_ct_net(ct); 491 - struct nf_tcp_net *tn = nf_tcp_pernet(net); 492 508 struct ip_ct_tcp_state *sender = &state->seen[dir]; 493 509 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 494 510 __u32 seq, ack, sack, end, win, swin; 495 - u16 win_raw; 511 + bool in_recv_win, seq_ok; 496 512 s32 receiver_offset; 497 - bool res, in_recv_win; 513 + u16 win_raw; 498 514 499 515 /* 500 516 * Get the required data from the packet. ··· 545 517 end, win); 546 518 if (!tcph->ack) 547 519 /* Simultaneous open */ 548 - return true; 520 + return NFCT_TCP_ACCEPT; 549 521 } else { 550 522 /* 551 523 * We are in the middle of a connection, ··· 588 560 end, win); 589 561 590 562 if (dir == IP_CT_DIR_REPLY && !tcph->ack) 591 - return true; 563 + return NFCT_TCP_ACCEPT; 592 564 } 593 565 594 566 if (!(tcph->ack)) { ··· 612 584 */ 613 585 seq = end = sender->td_end; 614 586 587 + seq_ok = before(seq, sender->td_maxend + 1); 588 + if (!seq_ok) { 589 + u32 overshot = end - sender->td_maxend + 1; 590 + bool ack_ok; 591 + 592 + ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1); 593 + in_recv_win = receiver->td_maxwin && 594 + after(end, sender->td_end - receiver->td_maxwin - 1); 595 + 596 + if (in_recv_win && 597 + ack_ok && 598 + overshot <= receiver->td_maxwin && 599 + before(sack, receiver->td_end + 1)) { 600 + /* Work around TCPs that send more bytes than allowed by 601 + * the receive window. 602 + * 603 + * If the (marked as invalid) packet is allowed to pass by 604 + * the ruleset and the peer acks this data, then its possible 605 + * all future packets will trigger 'ACK is over upper bound' check. 606 + * 607 + * Thus if only the sequence check fails then do update td_end so 608 + * possible ACK for this data can update internal state. 609 + */ 610 + sender->td_end = end; 611 + sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; 612 + 613 + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE, 614 + "%u bytes more than expected", overshot); 615 + } 616 + 617 + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID, 618 + "SEQ is over upper bound %u (over the window of the receiver)", 619 + sender->td_maxend + 1); 620 + } 621 + 622 + if (!before(sack, receiver->td_end + 1)) 623 + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID, 624 + "ACK is over upper bound %u (ACKed data not seen yet)", 625 + receiver->td_end + 1); 626 + 615 627 /* Is the ending sequence in the receive window (if available)? */ 616 628 in_recv_win = !receiver->td_maxwin || 617 629 after(end, sender->td_end - receiver->td_maxwin - 1); 630 + if (!in_recv_win) 631 + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE, 632 + "SEQ is under lower bound %u (already ACKed data retransmitted)", 633 + sender->td_end - receiver->td_maxwin - 1); 634 + if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) 635 + return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE, 636 + "ignored ACK under lower bound %u (possible overly delayed)", 637 + receiver->td_end - MAXACKWINDOW(sender) - 1); 618 638 619 - if (before(seq, sender->td_maxend + 1) && 620 - in_recv_win && 621 - before(sack, receiver->td_end + 1) && 622 - after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { 623 - /* 624 - * Take into account window scaling (RFC 1323). 625 - */ 626 - if (!tcph->syn) 627 - win <<= sender->td_scale; 639 + /* Take into account window scaling (RFC 1323). */ 640 + if (!tcph->syn) 641 + win <<= sender->td_scale; 628 642 629 - /* 630 - * Update sender data. 631 - */ 632 - swin = win + (sack - ack); 633 - if (sender->td_maxwin < swin) 634 - sender->td_maxwin = swin; 635 - if (after(end, sender->td_end)) { 636 - sender->td_end = end; 637 - sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; 638 - } 639 - if (tcph->ack) { 640 - if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) { 641 - sender->td_maxack = ack; 642 - sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET; 643 - } else if (after(ack, sender->td_maxack)) 644 - sender->td_maxack = ack; 645 - } 646 - 647 - /* 648 - * Update receiver data. 649 - */ 650 - if (receiver->td_maxwin != 0 && after(end, sender->td_maxend)) 651 - receiver->td_maxwin += end - sender->td_maxend; 652 - if (after(sack + win, receiver->td_maxend - 1)) { 653 - receiver->td_maxend = sack + win; 654 - if (win == 0) 655 - receiver->td_maxend++; 656 - } 657 - if (ack == receiver->td_end) 658 - receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; 659 - 660 - /* 661 - * Check retransmissions. 662 - */ 663 - if (index == TCP_ACK_SET) { 664 - if (state->last_dir == dir 665 - && state->last_seq == seq 666 - && state->last_ack == ack 667 - && state->last_end == end 668 - && state->last_win == win_raw) 669 - state->retrans++; 670 - else { 671 - state->last_dir = dir; 672 - state->last_seq = seq; 673 - state->last_ack = ack; 674 - state->last_end = end; 675 - state->last_win = win_raw; 676 - state->retrans = 0; 677 - } 678 - } 679 - res = true; 680 - } else { 681 - res = false; 682 - if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || 683 - tn->tcp_be_liberal) 684 - res = true; 685 - if (!res) { 686 - bool seq_ok = before(seq, sender->td_maxend + 1); 687 - 688 - if (!seq_ok) { 689 - u32 overshot = end - sender->td_maxend + 1; 690 - bool ack_ok; 691 - 692 - ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1); 693 - 694 - if (in_recv_win && 695 - ack_ok && 696 - overshot <= receiver->td_maxwin && 697 - before(sack, receiver->td_end + 1)) { 698 - /* Work around TCPs that send more bytes than allowed by 699 - * the receive window. 700 - * 701 - * If the (marked as invalid) packet is allowed to pass by 702 - * the ruleset and the peer acks this data, then its possible 703 - * all future packets will trigger 'ACK is over upper bound' check. 704 - * 705 - * Thus if only the sequence check fails then do update td_end so 706 - * possible ACK for this data can update internal state. 707 - */ 708 - sender->td_end = end; 709 - sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; 710 - 711 - nf_ct_l4proto_log_invalid(skb, ct, hook_state, 712 - "%u bytes more than expected", overshot); 713 - return res; 714 - } 715 - } 716 - 717 - nf_ct_l4proto_log_invalid(skb, ct, hook_state, 718 - "%s", 719 - before(seq, sender->td_maxend + 1) ? 720 - in_recv_win ? 721 - before(sack, receiver->td_end + 1) ? 722 - after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" 723 - : "ACK is under the lower bound (possible overly delayed ACK)" 724 - : "ACK is over the upper bound (ACKed data not seen yet)" 725 - : "SEQ is under the lower bound (already ACKed data retransmitted)" 726 - : "SEQ is over the upper bound (over the window of the receiver)"); 643 + /* Update sender data. */ 644 + swin = win + (sack - ack); 645 + if (sender->td_maxwin < swin) 646 + sender->td_maxwin = swin; 647 + if (after(end, sender->td_end)) { 648 + sender->td_end = end; 649 + sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; 650 + } 651 + if (tcph->ack) { 652 + if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) { 653 + sender->td_maxack = ack; 654 + sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET; 655 + } else if (after(ack, sender->td_maxack)) { 656 + sender->td_maxack = ack; 727 657 } 728 658 } 729 659 730 - return res; 660 + /* Update receiver data. */ 661 + if (receiver->td_maxwin != 0 && after(end, sender->td_maxend)) 662 + receiver->td_maxwin += end - sender->td_maxend; 663 + if (after(sack + win, receiver->td_maxend - 1)) { 664 + receiver->td_maxend = sack + win; 665 + if (win == 0) 666 + receiver->td_maxend++; 667 + } 668 + if (ack == receiver->td_end) 669 + receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; 670 + 671 + /* Check retransmissions. */ 672 + if (index == TCP_ACK_SET) { 673 + if (state->last_dir == dir && 674 + state->last_seq == seq && 675 + state->last_ack == ack && 676 + state->last_end == end && 677 + state->last_win == win_raw) { 678 + state->retrans++; 679 + } else { 680 + state->last_dir = dir; 681 + state->last_seq = seq; 682 + state->last_ack = ack; 683 + state->last_end = end; 684 + state->last_win = win_raw; 685 + state->retrans = 0; 686 + } 687 + } 688 + 689 + return NFCT_TCP_ACCEPT; 690 + } 691 + 692 + static void __cold nf_tcp_handle_invalid(struct nf_conn *ct, 693 + enum ip_conntrack_dir dir, 694 + int index, 695 + const struct sk_buff *skb, 696 + const struct nf_hook_state *hook_state) 697 + { 698 + const unsigned int *timeouts; 699 + const struct nf_tcp_net *tn; 700 + unsigned int timeout; 701 + u32 expires; 702 + 703 + if (!test_bit(IPS_ASSURED_BIT, &ct->status) || 704 + test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) 705 + return; 706 + 707 + /* We don't want to have connections hanging around in ESTABLISHED 708 + * state for long time 'just because' conntrack deemed a FIN/RST 709 + * out-of-window. 710 + * 711 + * Shrink the timeout just like when there is unacked data. 712 + * This speeds up eviction of 'dead' connections where the 713 + * connection and conntracks internal state are out of sync. 714 + */ 715 + switch (index) { 716 + case TCP_RST_SET: 717 + case TCP_FIN_SET: 718 + break; 719 + default: 720 + return; 721 + } 722 + 723 + if (ct->proto.tcp.last_dir != dir && 724 + (ct->proto.tcp.last_index == TCP_FIN_SET || 725 + ct->proto.tcp.last_index == TCP_RST_SET)) { 726 + expires = nf_ct_expires(ct); 727 + if (expires < 120 * HZ) 728 + return; 729 + 730 + tn = nf_tcp_pernet(nf_ct_net(ct)); 731 + timeouts = nf_ct_timeout_lookup(ct); 732 + if (!timeouts) 733 + timeouts = tn->timeouts; 734 + 735 + timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]); 736 + if (expires > timeout) { 737 + nf_ct_l4proto_log_invalid(skb, ct, hook_state, 738 + "packet (index %d, dir %d) response for index %d lower timeout to %u", 739 + index, dir, ct->proto.tcp.last_index, timeout); 740 + 741 + WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp); 742 + } 743 + } else { 744 + ct->proto.tcp.last_index = index; 745 + ct->proto.tcp.last_dir = dir; 746 + } 731 747 } 732 748 733 749 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */ ··· 933 861 struct nf_conntrack_tuple *tuple; 934 862 enum tcp_conntrack new_state, old_state; 935 863 unsigned int index, *timeouts; 864 + enum nf_ct_tcp_action res; 936 865 enum ip_conntrack_dir dir; 937 866 const struct tcphdr *th; 938 867 struct tcphdr _tcph; ··· 1199 1126 break; 1200 1127 } 1201 1128 1202 - if (!tcp_in_window(ct, dir, index, 1203 - skb, dataoff, th, state)) { 1129 + res = tcp_in_window(ct, dir, index, 1130 + skb, dataoff, th, state); 1131 + switch (res) { 1132 + case NFCT_TCP_IGNORE: 1133 + spin_unlock_bh(&ct->lock); 1134 + return NF_ACCEPT; 1135 + case NFCT_TCP_INVALID: 1136 + nf_tcp_handle_invalid(ct, dir, index, skb, state); 1204 1137 spin_unlock_bh(&ct->lock); 1205 1138 return -NF_ACCEPT; 1139 + case NFCT_TCP_ACCEPT: 1140 + break; 1206 1141 } 1207 1142 in_window: 1208 1143 /* From now on we have got in-window packets */
+2 -2
net/netfilter/nf_log.c
··· 443 443 mutex_lock(&nf_log_mutex); 444 444 logger = nft_log_dereference(net->nf.nf_loggers[tindex]); 445 445 if (!logger) 446 - strlcpy(buf, "NONE", sizeof(buf)); 446 + strscpy(buf, "NONE", sizeof(buf)); 447 447 else 448 - strlcpy(buf, logger->name, sizeof(buf)); 448 + strscpy(buf, logger->name, sizeof(buf)); 449 449 mutex_unlock(&nf_log_mutex); 450 450 r = proc_dostring(&tmp, write, buffer, lenp, ppos); 451 451 }
+1 -13
net/netfilter/nf_nat_amanda.c
··· 44 44 exp->expectfn = nf_nat_follow_master; 45 45 46 46 /* Try to get same port: if not, try to change it. */ 47 - for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { 48 - int res; 49 - 50 - exp->tuple.dst.u.tcp.port = htons(port); 51 - res = nf_ct_expect_related(exp, 0); 52 - if (res == 0) 53 - break; 54 - else if (res != -EBUSY) { 55 - port = 0; 56 - break; 57 - } 58 - } 59 - 47 + port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port)); 60 48 if (port == 0) { 61 49 nf_ct_helper_log(skb, exp->master, "all ports in use"); 62 50 return NF_DROP;
+2 -15
net/netfilter/nf_nat_ftp.c
··· 86 86 * this one. */ 87 87 exp->expectfn = nf_nat_follow_master; 88 88 89 - /* Try to get same port: if not, try to change it. */ 90 - for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { 91 - int ret; 92 - 93 - exp->tuple.dst.u.tcp.port = htons(port); 94 - ret = nf_ct_expect_related(exp, 0); 95 - if (ret == 0) 96 - break; 97 - else if (ret != -EBUSY) { 98 - port = 0; 99 - break; 100 - } 101 - } 102 - 89 + port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port)); 103 90 if (port == 0) { 104 - nf_ct_helper_log(skb, ct, "all ports in use"); 91 + nf_ct_helper_log(skb, exp->master, "all ports in use"); 105 92 return NF_DROP; 106 93 } 107 94
+31
net/netfilter/nf_nat_helper.c
··· 198 198 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); 199 199 } 200 200 EXPORT_SYMBOL(nf_nat_follow_master); 201 + 202 + u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port) 203 + { 204 + static const unsigned int max_attempts = 128; 205 + int range, attempts_left; 206 + u16 min = port; 207 + 208 + range = USHRT_MAX - port; 209 + attempts_left = range; 210 + 211 + if (attempts_left > max_attempts) 212 + attempts_left = max_attempts; 213 + 214 + /* Try to get same port: if not, try to change it. */ 215 + for (;;) { 216 + int res; 217 + 218 + exp->tuple.dst.u.tcp.port = htons(port); 219 + res = nf_ct_expect_related(exp, 0); 220 + if (res == 0) 221 + return port; 222 + 223 + if (res != -EBUSY || (--attempts_left < 0)) 224 + break; 225 + 226 + port = min + prandom_u32_max(range); 227 + } 228 + 229 + return 0; 230 + } 231 + EXPORT_SYMBOL_GPL(nf_nat_exp_find_port);
+2 -14
net/netfilter/nf_nat_irc.c
··· 48 48 exp->dir = IP_CT_DIR_REPLY; 49 49 exp->expectfn = nf_nat_follow_master; 50 50 51 - /* Try to get same port: if not, try to change it. */ 52 - for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { 53 - int ret; 54 - 55 - exp->tuple.dst.u.tcp.port = htons(port); 56 - ret = nf_ct_expect_related(exp, 0); 57 - if (ret == 0) 58 - break; 59 - else if (ret != -EBUSY) { 60 - port = 0; 61 - break; 62 - } 63 - } 64 - 51 + port = nf_nat_exp_find_port(exp, 52 + ntohs(exp->saved_proto.tcp.port)); 65 53 if (port == 0) { 66 54 nf_ct_helper_log(skb, ct, "all ports in use"); 67 55 return NF_DROP;
+1 -13
net/netfilter/nf_nat_sip.c
··· 410 410 exp->dir = !dir; 411 411 exp->expectfn = nf_nat_sip_expected; 412 412 413 - for (; port != 0; port++) { 414 - int ret; 415 - 416 - exp->tuple.dst.u.udp.port = htons(port); 417 - ret = nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER); 418 - if (ret == 0) 419 - break; 420 - else if (ret != -EBUSY) { 421 - port = 0; 422 - break; 423 - } 424 - } 425 - 413 + port = nf_nat_exp_find_port(exp, port); 426 414 if (port == 0) { 427 415 nf_ct_helper_log(skb, ct, "all ports in use for SIP"); 428 416 return NF_DROP;
+1 -1
net/netfilter/nf_tables_api.c
··· 742 742 return -ENOMEM; 743 743 744 744 req->done = false; 745 - strlcpy(req->module, module_name, MODULE_NAME_LEN); 745 + strscpy(req->module, module_name, MODULE_NAME_LEN); 746 746 list_add_tail(&req->list, &nft_net->module_list); 747 747 748 748 return -EAGAIN;
+1 -1
net/netfilter/nft_osf.c
··· 51 51 snprintf(os_match, NFT_OSF_MAXGENRELEN, "%s:%s", 52 52 data.genre, data.version); 53 53 else 54 - strlcpy(os_match, data.genre, NFT_OSF_MAXGENRELEN); 54 + strscpy(os_match, data.genre, NFT_OSF_MAXGENRELEN); 55 55 56 56 strncpy((char *)dest, os_match, NFT_OSF_MAXGENRELEN); 57 57 }
+10 -10
net/netfilter/x_tables.c
··· 766 766 767 767 msize += off; 768 768 m->u.user.match_size = msize; 769 - strlcpy(name, match->name, sizeof(name)); 769 + strscpy(name, match->name, sizeof(name)); 770 770 module_put(match->me); 771 771 strncpy(m->u.user.name, name, sizeof(m->u.user.name)); 772 772 ··· 1146 1146 1147 1147 tsize += off; 1148 1148 t->u.user.target_size = tsize; 1149 - strlcpy(name, target->name, sizeof(name)); 1149 + strscpy(name, target->name, sizeof(name)); 1150 1150 module_put(target->me); 1151 1151 strncpy(t->u.user.name, name, sizeof(t->u.user.name)); 1152 1152 ··· 1827 1827 root_uid = make_kuid(net->user_ns, 0); 1828 1828 root_gid = make_kgid(net->user_ns, 0); 1829 1829 1830 - strlcpy(buf, xt_prefix[af], sizeof(buf)); 1830 + strscpy(buf, xt_prefix[af], sizeof(buf)); 1831 1831 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1832 1832 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops, 1833 1833 sizeof(struct seq_net_private), ··· 1837 1837 if (uid_valid(root_uid) && gid_valid(root_gid)) 1838 1838 proc_set_user(proc, root_uid, root_gid); 1839 1839 1840 - strlcpy(buf, xt_prefix[af], sizeof(buf)); 1840 + strscpy(buf, xt_prefix[af], sizeof(buf)); 1841 1841 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1842 1842 proc = proc_create_seq_private(buf, 0440, net->proc_net, 1843 1843 &xt_match_seq_ops, sizeof(struct nf_mttg_trav), ··· 1847 1847 if (uid_valid(root_uid) && gid_valid(root_gid)) 1848 1848 proc_set_user(proc, root_uid, root_gid); 1849 1849 1850 - strlcpy(buf, xt_prefix[af], sizeof(buf)); 1850 + strscpy(buf, xt_prefix[af], sizeof(buf)); 1851 1851 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 1852 1852 proc = proc_create_seq_private(buf, 0440, net->proc_net, 1853 1853 &xt_target_seq_ops, sizeof(struct nf_mttg_trav), ··· 1862 1862 1863 1863 #ifdef CONFIG_PROC_FS 1864 1864 out_remove_matches: 1865 - strlcpy(buf, xt_prefix[af], sizeof(buf)); 1865 + strscpy(buf, xt_prefix[af], sizeof(buf)); 1866 1866 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1867 1867 remove_proc_entry(buf, net->proc_net); 1868 1868 1869 1869 out_remove_tables: 1870 - strlcpy(buf, xt_prefix[af], sizeof(buf)); 1870 + strscpy(buf, xt_prefix[af], sizeof(buf)); 1871 1871 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1872 1872 remove_proc_entry(buf, net->proc_net); 1873 1873 out: ··· 1881 1881 #ifdef CONFIG_PROC_FS 1882 1882 char buf[XT_FUNCTION_MAXNAMELEN]; 1883 1883 1884 - strlcpy(buf, xt_prefix[af], sizeof(buf)); 1884 + strscpy(buf, xt_prefix[af], sizeof(buf)); 1885 1885 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1886 1886 remove_proc_entry(buf, net->proc_net); 1887 1887 1888 - strlcpy(buf, xt_prefix[af], sizeof(buf)); 1888 + strscpy(buf, xt_prefix[af], sizeof(buf)); 1889 1889 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 1890 1890 remove_proc_entry(buf, net->proc_net); 1891 1891 1892 - strlcpy(buf, xt_prefix[af], sizeof(buf)); 1892 + strscpy(buf, xt_prefix[af], sizeof(buf)); 1893 1893 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1894 1894 remove_proc_entry(buf, net->proc_net); 1895 1895 #endif /*CONFIG_PROC_FS*/
+1 -1
net/netfilter/xt_RATEEST.c
··· 144 144 goto err1; 145 145 146 146 gnet_stats_basic_sync_init(&est->bstats); 147 - strlcpy(est->name, info->name, sizeof(est->name)); 147 + strscpy(est->name, info->name, sizeof(est->name)); 148 148 spin_lock_init(&est->lock); 149 149 est->refcnt = 1; 150 150 est->params.interval = info->interval;