Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: nf_conntrack: pass timeout array to l4->new and l4->packet

This patch defines a new interface for l4 protocol trackers:

unsigned int *(*get_timeouts)(struct net *net);

that is used to return the array of unsigned int that contains
the timeouts that will be applied for this flow. This is passed
to the l4proto->new(...) and l4proto->packet(...) functions to
specify the timeout policy.

This interface allows per-net global timeout configuration
(although only DCCP supports this by now) and it will allow
custom custom timeout configuration by means of follow-up
patches.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

+129 -53
+6 -2
include/net/netfilter/nf_conntrack_l4proto.h
··· 39 39 unsigned int dataoff, 40 40 enum ip_conntrack_info ctinfo, 41 41 u_int8_t pf, 42 - unsigned int hooknum); 42 + unsigned int hooknum, 43 + unsigned int *timeouts); 43 44 44 45 /* Called when a new connection for this protocol found; 45 46 * returns TRUE if it's OK. If so, packet() called next. */ 46 47 bool (*new)(struct nf_conn *ct, const struct sk_buff *skb, 47 - unsigned int dataoff); 48 + unsigned int dataoff, unsigned int *timeouts); 48 49 49 50 /* Called when a conntrack entry is destroyed */ 50 51 void (*destroy)(struct nf_conn *ct); ··· 60 59 61 60 /* Print out the private part of the conntrack. */ 62 61 int (*print_conntrack)(struct seq_file *s, struct nf_conn *); 62 + 63 + /* Return the array of timeouts for this protocol. */ 64 + unsigned int *(*get_timeouts)(struct net *net); 63 65 64 66 /* convert protoinfo to nfnetink attributes */ 65 67 int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
+10 -3
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
··· 75 75 ntohs(tuple->src.u.icmp.id)); 76 76 } 77 77 78 + static unsigned int *icmp_get_timeouts(struct net *net) 79 + { 80 + return &nf_ct_icmp_timeout; 81 + } 82 + 78 83 /* Returns verdict for packet, or -1 for invalid. */ 79 84 static int icmp_packet(struct nf_conn *ct, 80 85 const struct sk_buff *skb, 81 86 unsigned int dataoff, 82 87 enum ip_conntrack_info ctinfo, 83 88 u_int8_t pf, 84 - unsigned int hooknum) 89 + unsigned int hooknum, 90 + unsigned int *timeout) 85 91 { 86 92 /* Do not immediately delete the connection after the first 87 93 successful reply to avoid excessive conntrackd traffic 88 94 and also to handle correctly ICMP echo reply duplicates. */ 89 - nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout); 95 + nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); 90 96 91 97 return NF_ACCEPT; 92 98 } 93 99 94 100 /* Called when a new connection for this protocol found. */ 95 101 static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, 96 - unsigned int dataoff) 102 + unsigned int dataoff, unsigned int *timeouts) 97 103 { 98 104 static const u_int8_t valid_new[] = { 99 105 [ICMP_ECHO] = 1, ··· 304 298 .invert_tuple = icmp_invert_tuple, 305 299 .print_tuple = icmp_print_tuple, 306 300 .packet = icmp_packet, 301 + .get_timeouts = icmp_get_timeouts, 307 302 .new = icmp_new, 308 303 .error = icmp_error, 309 304 .destroy = NULL,
+10 -3
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
··· 88 88 ntohs(tuple->src.u.icmp.id)); 89 89 } 90 90 91 + static unsigned int *icmpv6_get_timeouts(struct net *net) 92 + { 93 + return &nf_ct_icmpv6_timeout; 94 + } 95 + 91 96 /* Returns verdict for packet, or -1 for invalid. */ 92 97 static int icmpv6_packet(struct nf_conn *ct, 93 98 const struct sk_buff *skb, 94 99 unsigned int dataoff, 95 100 enum ip_conntrack_info ctinfo, 96 101 u_int8_t pf, 97 - unsigned int hooknum) 102 + unsigned int hooknum, 103 + unsigned int *timeout) 98 104 { 99 105 /* Do not immediately delete the connection after the first 100 106 successful reply to avoid excessive conntrackd traffic 101 107 and also to handle correctly ICMP echo reply duplicates. */ 102 - nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout); 108 + nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); 103 109 104 110 return NF_ACCEPT; 105 111 } 106 112 107 113 /* Called when a new connection for this protocol found. */ 108 114 static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb, 109 - unsigned int dataoff) 115 + unsigned int dataoff, unsigned int *timeouts) 110 116 { 111 117 static const u_int8_t valid_new[] = { 112 118 [ICMPV6_ECHO_REQUEST - 128] = 1, ··· 299 293 .invert_tuple = icmpv6_invert_tuple, 300 294 .print_tuple = icmpv6_print_tuple, 301 295 .packet = icmpv6_packet, 296 + .get_timeouts = icmpv6_get_timeouts, 302 297 .new = icmpv6_new, 303 298 .error = icmpv6_error, 304 299 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+12 -6
net/netfilter/nf_conntrack_core.c
··· 763 763 struct nf_conntrack_l3proto *l3proto, 764 764 struct nf_conntrack_l4proto *l4proto, 765 765 struct sk_buff *skb, 766 - unsigned int dataoff, u32 hash) 766 + unsigned int dataoff, u32 hash, 767 + unsigned int *timeouts) 767 768 { 768 769 struct nf_conn *ct; 769 770 struct nf_conn_help *help; ··· 783 782 if (IS_ERR(ct)) 784 783 return (struct nf_conntrack_tuple_hash *)ct; 785 784 786 - if (!l4proto->new(ct, skb, dataoff)) { 785 + if (!l4proto->new(ct, skb, dataoff, timeouts)) { 787 786 nf_conntrack_free(ct); 788 787 pr_debug("init conntrack: can't track with proto module\n"); 789 788 return NULL; ··· 849 848 struct nf_conntrack_l3proto *l3proto, 850 849 struct nf_conntrack_l4proto *l4proto, 851 850 int *set_reply, 852 - enum ip_conntrack_info *ctinfo) 851 + enum ip_conntrack_info *ctinfo, 852 + unsigned int *timeouts) 853 853 { 854 854 struct nf_conntrack_tuple tuple; 855 855 struct nf_conntrack_tuple_hash *h; ··· 870 868 h = __nf_conntrack_find_get(net, zone, &tuple, hash); 871 869 if (!h) { 872 870 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, 873 - skb, dataoff, hash); 871 + skb, dataoff, hash, timeouts); 874 872 if (!h) 875 873 return NULL; 876 874 if (IS_ERR(h)) ··· 911 909 enum ip_conntrack_info ctinfo; 912 910 struct nf_conntrack_l3proto *l3proto; 913 911 struct nf_conntrack_l4proto *l4proto; 912 + unsigned int *timeouts; 914 913 unsigned int dataoff; 915 914 u_int8_t protonum; 916 915 int set_reply = 0; ··· 958 955 goto out; 959 956 } 960 957 958 + timeouts = l4proto->get_timeouts(net); 959 + 961 960 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, 962 - l3proto, l4proto, &set_reply, &ctinfo); 961 + l3proto, l4proto, &set_reply, &ctinfo, 962 + timeouts); 963 963 if (!ct) { 964 964 /* Not valid part of a connection */ 965 965 NF_CT_STAT_INC_ATOMIC(net, invalid); ··· 979 973 980 974 NF_CT_ASSERT(skb->nfct); 981 975 982 - ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); 976 + ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); 983 977 if (ret <= 0) { 984 978 /* Invalid: inverse of the return code tells 985 979 * the netfilter core what to do */
+11 -5
net/netfilter/nf_conntrack_proto_dccp.c
··· 423 423 } 424 424 425 425 static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, 426 - unsigned int dataoff) 426 + unsigned int dataoff, unsigned int *timeouts) 427 427 { 428 428 struct net *net = nf_ct_net(ct); 429 429 struct dccp_net *dn; ··· 472 472 ntohl(dhack->dccph_ack_nr_low); 473 473 } 474 474 475 + static unsigned int *dccp_get_timeouts(struct net *net) 476 + { 477 + return dccp_pernet(net)->dccp_timeout; 478 + } 479 + 475 480 static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, 476 481 unsigned int dataoff, enum ip_conntrack_info ctinfo, 477 - u_int8_t pf, unsigned int hooknum) 482 + u_int8_t pf, unsigned int hooknum, 483 + unsigned int *timeouts) 478 484 { 479 485 struct net *net = nf_ct_net(ct); 480 - struct dccp_net *dn; 481 486 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 482 487 struct dccp_hdr _dh, *dh; 483 488 u_int8_t type, old_state, new_state; ··· 564 559 if (new_state != old_state) 565 560 nf_conntrack_event_cache(IPCT_PROTOINFO, ct); 566 561 567 - dn = dccp_pernet(net); 568 - nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]); 562 + nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); 569 563 570 564 return NF_ACCEPT; 571 565 } ··· 771 767 .invert_tuple = dccp_invert_tuple, 772 768 .new = dccp_new, 773 769 .packet = dccp_packet, 770 + .get_timeouts = dccp_get_timeouts, 774 771 .error = dccp_error, 775 772 .print_tuple = dccp_print_tuple, 776 773 .print_conntrack = dccp_print_conntrack, ··· 794 789 .invert_tuple = dccp_invert_tuple, 795 790 .new = dccp_new, 796 791 .packet = dccp_packet, 792 + .get_timeouts = dccp_get_timeouts, 797 793 .error = dccp_error, 798 794 .print_tuple = dccp_print_tuple, 799 795 .print_conntrack = dccp_print_conntrack,
+19 -12
net/netfilter/nf_conntrack_proto_generic.c
··· 40 40 return 0; 41 41 } 42 42 43 - /* Returns verdict for packet, or -1 for invalid. */ 44 - static int packet(struct nf_conn *ct, 45 - const struct sk_buff *skb, 46 - unsigned int dataoff, 47 - enum ip_conntrack_info ctinfo, 48 - u_int8_t pf, 49 - unsigned int hooknum) 43 + static unsigned int *generic_get_timeouts(struct net *net) 50 44 { 51 - nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_generic_timeout); 45 + return &nf_ct_generic_timeout; 46 + } 47 + 48 + /* Returns verdict for packet, or -1 for invalid. */ 49 + static int generic_packet(struct nf_conn *ct, 50 + const struct sk_buff *skb, 51 + unsigned int dataoff, 52 + enum ip_conntrack_info ctinfo, 53 + u_int8_t pf, 54 + unsigned int hooknum, 55 + unsigned int *timeout) 56 + { 57 + nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); 52 58 return NF_ACCEPT; 53 59 } 54 60 55 61 /* Called when a new connection for this protocol found. */ 56 - static bool new(struct nf_conn *ct, const struct sk_buff *skb, 57 - unsigned int dataoff) 62 + static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, 63 + unsigned int dataoff, unsigned int *timeouts) 58 64 { 59 65 return true; 60 66 } ··· 99 93 .pkt_to_tuple = generic_pkt_to_tuple, 100 94 .invert_tuple = generic_invert_tuple, 101 95 .print_tuple = generic_print_tuple, 102 - .packet = packet, 103 - .new = new, 96 + .packet = generic_packet, 97 + .get_timeouts = generic_get_timeouts, 98 + .new = generic_new, 104 99 #ifdef CONFIG_SYSCTL 105 100 .ctl_table_header = &generic_sysctl_header, 106 101 .ctl_table = generic_sysctl_table,
+11 -4
net/netfilter/nf_conntrack_proto_gre.c
··· 235 235 (ct->proto.gre.stream_timeout / HZ)); 236 236 } 237 237 238 + static unsigned int *gre_get_timeouts(struct net *net) 239 + { 240 + return gre_timeouts; 241 + } 242 + 238 243 /* Returns verdict for packet, and may modify conntrack */ 239 244 static int gre_packet(struct nf_conn *ct, 240 245 const struct sk_buff *skb, 241 246 unsigned int dataoff, 242 247 enum ip_conntrack_info ctinfo, 243 248 u_int8_t pf, 244 - unsigned int hooknum) 249 + unsigned int hooknum, 250 + unsigned int *timeouts) 245 251 { 246 252 /* If we've seen traffic both ways, this is a GRE connection. 247 253 * Extend timeout. */ ··· 266 260 267 261 /* Called when a new connection for this protocol found. */ 268 262 static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb, 269 - unsigned int dataoff) 263 + unsigned int dataoff, unsigned int *timeouts) 270 264 { 271 265 pr_debug(": "); 272 266 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 273 267 274 268 /* initialize to sane value. Ideally a conntrack helper 275 269 * (e.g. in case of pptp) is increasing them */ 276 - ct->proto.gre.stream_timeout = gre_timeouts[GRE_CT_REPLIED]; 277 - ct->proto.gre.timeout = gre_timeouts[GRE_CT_UNREPLIED]; 270 + ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED]; 271 + ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED]; 278 272 279 273 return true; 280 274 } ··· 301 295 .invert_tuple = gre_invert_tuple, 302 296 .print_tuple = gre_print_tuple, 303 297 .print_conntrack = gre_print_conntrack, 298 + .get_timeouts = gre_get_timeouts, 304 299 .packet = gre_packet, 305 300 .new = gre_new, 306 301 .destroy = gre_destroy,
+11 -3
net/netfilter/nf_conntrack_proto_sctp.c
··· 279 279 return sctp_conntracks[dir][i][cur_state]; 280 280 } 281 281 282 + static unsigned int *sctp_get_timeouts(struct net *net) 283 + { 284 + return sctp_timeouts; 285 + } 286 + 282 287 /* Returns verdict for packet, or -NF_ACCEPT for invalid. */ 283 288 static int sctp_packet(struct nf_conn *ct, 284 289 const struct sk_buff *skb, 285 290 unsigned int dataoff, 286 291 enum ip_conntrack_info ctinfo, 287 292 u_int8_t pf, 288 - unsigned int hooknum) 293 + unsigned int hooknum, 294 + unsigned int *timeouts) 289 295 { 290 296 enum sctp_conntrack new_state, old_state; 291 297 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); ··· 376 370 } 377 371 spin_unlock_bh(&ct->lock); 378 372 379 - nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]); 373 + nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); 380 374 381 375 if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && 382 376 dir == IP_CT_DIR_REPLY && ··· 396 390 397 391 /* Called when a new connection for this protocol found. */ 398 392 static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb, 399 - unsigned int dataoff) 393 + unsigned int dataoff, unsigned int *timeouts) 400 394 { 401 395 enum sctp_conntrack new_state; 402 396 const struct sctphdr *sh; ··· 670 664 .print_tuple = sctp_print_tuple, 671 665 .print_conntrack = sctp_print_conntrack, 672 666 .packet = sctp_packet, 667 + .get_timeouts = sctp_get_timeouts, 673 668 .new = sctp_new, 674 669 .me = THIS_MODULE, 675 670 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) ··· 701 694 .print_tuple = sctp_print_tuple, 702 695 .print_conntrack = sctp_print_conntrack, 703 696 .packet = sctp_packet, 697 + .get_timeouts = sctp_get_timeouts, 704 698 .new = sctp_new, 705 699 .me = THIS_MODULE, 706 700 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+15 -7
net/netfilter/nf_conntrack_proto_tcp.c
··· 813 813 return NF_ACCEPT; 814 814 } 815 815 816 + static unsigned int *tcp_get_timeouts(struct net *net) 817 + { 818 + return tcp_timeouts; 819 + } 820 + 816 821 /* Returns verdict for packet, or -1 for invalid. */ 817 822 static int tcp_packet(struct nf_conn *ct, 818 823 const struct sk_buff *skb, 819 824 unsigned int dataoff, 820 825 enum ip_conntrack_info ctinfo, 821 826 u_int8_t pf, 822 - unsigned int hooknum) 827 + unsigned int hooknum, 828 + unsigned int *timeouts) 823 829 { 824 830 struct net *net = nf_ct_net(ct); 825 831 struct nf_conntrack_tuple *tuple; ··· 1020 1014 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; 1021 1015 1022 1016 if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans && 1023 - tcp_timeouts[new_state] > tcp_timeouts[TCP_CONNTRACK_RETRANS]) 1024 - timeout = tcp_timeouts[TCP_CONNTRACK_RETRANS]; 1017 + timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS]) 1018 + timeout = timeouts[TCP_CONNTRACK_RETRANS]; 1025 1019 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) & 1026 1020 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && 1027 - tcp_timeouts[new_state] > tcp_timeouts[TCP_CONNTRACK_UNACK]) 1028 - timeout = tcp_timeouts[TCP_CONNTRACK_UNACK]; 1021 + timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK]) 1022 + timeout = timeouts[TCP_CONNTRACK_UNACK]; 1029 1023 else 1030 - timeout = tcp_timeouts[new_state]; 1024 + timeout = timeouts[new_state]; 1031 1025 spin_unlock_bh(&ct->lock); 1032 1026 1033 1027 if (new_state != old_state) ··· 1059 1053 1060 1054 /* Called when a new connection for this protocol found. */ 1061 1055 static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb, 1062 - unsigned int dataoff) 1056 + unsigned int dataoff, unsigned int *timeouts) 1063 1057 { 1064 1058 enum tcp_conntrack new_state; 1065 1059 const struct tcphdr *th; ··· 1450 1444 .print_tuple = tcp_print_tuple, 1451 1445 .print_conntrack = tcp_print_conntrack, 1452 1446 .packet = tcp_packet, 1447 + .get_timeouts = tcp_get_timeouts, 1453 1448 .new = tcp_new, 1454 1449 .error = tcp_error, 1455 1450 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) ··· 1483 1476 .print_tuple = tcp_print_tuple, 1484 1477 .print_conntrack = tcp_print_conntrack, 1485 1478 .packet = tcp_packet, 1479 + .get_timeouts = tcp_get_timeouts, 1486 1480 .new = tcp_new, 1487 1481 .error = tcp_error, 1488 1482 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+12 -4
net/netfilter/nf_conntrack_proto_udp.c
··· 71 71 ntohs(tuple->dst.u.udp.port)); 72 72 } 73 73 74 + static unsigned int *udp_get_timeouts(struct net *net) 75 + { 76 + return udp_timeouts; 77 + } 78 + 74 79 /* Returns verdict for packet, and may modify conntracktype */ 75 80 static int udp_packet(struct nf_conn *ct, 76 81 const struct sk_buff *skb, 77 82 unsigned int dataoff, 78 83 enum ip_conntrack_info ctinfo, 79 84 u_int8_t pf, 80 - unsigned int hooknum) 85 + unsigned int hooknum, 86 + unsigned int *timeouts) 81 87 { 82 88 /* If we've seen traffic both ways, this is some kind of UDP 83 89 stream. Extend timeout. */ 84 90 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 85 91 nf_ct_refresh_acct(ct, ctinfo, skb, 86 - udp_timeouts[UDP_CT_REPLIED]); 92 + timeouts[UDP_CT_REPLIED]); 87 93 /* Also, more likely to be important, and not a probe */ 88 94 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 89 95 nf_conntrack_event_cache(IPCT_ASSURED, ct); 90 96 } else { 91 97 nf_ct_refresh_acct(ct, ctinfo, skb, 92 - udp_timeouts[UDP_CT_UNREPLIED]); 98 + timeouts[UDP_CT_UNREPLIED]); 93 99 } 94 100 return NF_ACCEPT; 95 101 } 96 102 97 103 /* Called when a new connection for this protocol found. */ 98 104 static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, 99 - unsigned int dataoff) 105 + unsigned int dataoff, unsigned int *timeouts) 100 106 { 101 107 return true; 102 108 } ··· 202 196 .invert_tuple = udp_invert_tuple, 203 197 .print_tuple = udp_print_tuple, 204 198 .packet = udp_packet, 199 + .get_timeouts = udp_get_timeouts, 205 200 .new = udp_new, 206 201 .error = udp_error, 207 202 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) ··· 231 224 .invert_tuple = udp_invert_tuple, 232 225 .print_tuple = udp_print_tuple, 233 226 .packet = udp_packet, 227 + .get_timeouts = udp_get_timeouts, 234 228 .new = udp_new, 235 229 .error = udp_error, 236 230 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+12 -4
net/netfilter/nf_conntrack_proto_udplite.c
··· 68 68 ntohs(tuple->dst.u.udp.port)); 69 69 } 70 70 71 + static unsigned int *udplite_get_timeouts(struct net *net) 72 + { 73 + return udplite_timeouts; 74 + } 75 + 71 76 /* Returns verdict for packet, and may modify conntracktype */ 72 77 static int udplite_packet(struct nf_conn *ct, 73 78 const struct sk_buff *skb, 74 79 unsigned int dataoff, 75 80 enum ip_conntrack_info ctinfo, 76 81 u_int8_t pf, 77 - unsigned int hooknum) 82 + unsigned int hooknum, 83 + unsigned int *timeouts) 78 84 { 79 85 /* If we've seen traffic both ways, this is some kind of UDP 80 86 stream. Extend timeout. */ 81 87 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 82 88 nf_ct_refresh_acct(ct, ctinfo, skb, 83 - udplite_timeouts[UDPLITE_CT_REPLIED]); 89 + timeouts[UDPLITE_CT_REPLIED]); 84 90 /* Also, more likely to be important, and not a probe */ 85 91 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 86 92 nf_conntrack_event_cache(IPCT_ASSURED, ct); 87 93 } else { 88 94 nf_ct_refresh_acct(ct, ctinfo, skb, 89 - udplite_timeouts[UDPLITE_CT_UNREPLIED]); 95 + timeouts[UDPLITE_CT_UNREPLIED]); 90 96 } 91 97 return NF_ACCEPT; 92 98 } 93 99 94 100 /* Called when a new connection for this protocol found. */ 95 101 static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb, 96 - unsigned int dataoff) 102 + unsigned int dataoff, unsigned int *timeouts) 97 103 { 98 104 return true; 99 105 } ··· 187 181 .invert_tuple = udplite_invert_tuple, 188 182 .print_tuple = udplite_print_tuple, 189 183 .packet = udplite_packet, 184 + .get_timeouts = udplite_get_timeouts, 190 185 .new = udplite_new, 191 186 .error = udplite_error, 192 187 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) ··· 212 205 .invert_tuple = udplite_invert_tuple, 213 206 .print_tuple = udplite_print_tuple, 214 207 .packet = udplite_packet, 208 + .get_timeouts = udplite_get_timeouts, 215 209 .new = udplite_new, 216 210 .error = udplite_error, 217 211 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)