Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter/IPVS updates for net-next

The following patchset contains Netfilter/IPVS updates for your net-next tree:

1) Introduce a hashtable to speed up object lookups, from Florian Westphal.

2) Make direct calls to built-in extension, also from Florian.

3) Call helper before confirming the conntrack as it used to be originally,
from Florian.

4) Call request_module() to autoload br_netfilter when physdev is used
to relax the dependency, also from Florian.

5) Allow to insert rules at a given position ID that is internal to the
batch, from Phil Sutter.

6) Several patches to replace conntrack indirections by direct calls,
and to reduce modularization, from Florian. This also includes
several follow up patches to deal with minor fallout from this
rework.

7) Use RCU from conntrack gre helper, from Florian.

8) GRE conntrack module becomes built-in into nf_conntrack, from Florian.

9) Replace nf_ct_invert_tuplepr() by calls to nf_ct_invert_tuple(),
from Florian.

10) Unify sysctl handling at the core of nf_conntrack, from Florian.

11) Provide modparam to register conntrack hooks.

12) Allow to match on the interface kind string, from wenxu.

13) Remove several exported symbols, not required anymore now after
a bit of de-modulatization work has been done, from Florian.

14) Remove built-in map support in the hash extension, this can be
done with the existing userspace infrastructure, from laura.

15) Remove indirection to calculate checksums in IPVS, from Matteo Croce.

16) Use call wrappers for indirection in IPVS, also from Matteo.

17) Remove superfluous __percpu parameter in nft_counter, patch from
Luc Van Oostenryck.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1178 -1697
+4 -13
include/linux/netfilter/nf_conntrack_proto_gre.h
··· 19 19 struct nf_ct_gre_keymap { 20 20 struct list_head list; 21 21 struct nf_conntrack_tuple tuple; 22 - }; 23 - 24 - enum grep_conntrack { 25 - GRE_CT_UNREPLIED, 26 - GRE_CT_REPLIED, 27 - GRE_CT_MAX 28 - }; 29 - 30 - struct netns_proto_gre { 31 - struct nf_proto_net nf; 32 - rwlock_t keymap_lock; 33 - struct list_head keymap_list; 34 - unsigned int gre_timeouts[GRE_CT_MAX]; 22 + struct rcu_head rcu; 35 23 }; 36 24 37 25 /* add new tuple->key_reply pair to keymap */ 38 26 int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, 39 27 struct nf_conntrack_tuple *t); 40 28 29 + void nf_ct_gre_keymap_flush(struct net *net); 41 30 /* delete keymap entries */ 42 31 void nf_ct_gre_keymap_destroy(struct nf_conn *ct); 43 32 33 + bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 34 + struct net *net, struct nf_conntrack_tuple *tuple); 44 35 #endif /* __KERNEL__ */ 45 36 #endif /* _CONNTRACK_PROTO_GRE_H */
-6
include/linux/netfilter_ipv4.h
··· 25 25 unsigned int dataoff, u_int8_t protocol); 26 26 int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, 27 27 bool strict); 28 - int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); 29 28 #else 30 29 static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, 31 30 unsigned int dataoff, u_int8_t protocol) ··· 33 34 } 34 35 static inline int nf_ip_route(struct net *net, struct dst_entry **dst, 35 36 struct flowi *fl, bool strict) 36 - { 37 - return -EOPNOTSUPP; 38 - } 39 - static inline int nf_ip_reroute(struct sk_buff *skb, 40 - const struct nf_queue_entry *entry) 41 37 { 42 38 return -EOPNOTSUPP; 43 39 }
-3
include/net/ip_vs.h
··· 453 453 int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 454 454 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 455 455 456 - int (*csum_check)(int af, struct sk_buff *skb, 457 - struct ip_vs_protocol *pp); 458 - 459 456 const char *(*state_name)(int state); 460 457 461 458 void (*state_transition)(struct ip_vs_conn *cp, int direction,
-1
include/net/netfilter/br_netfilter.h
··· 43 43 } 44 44 45 45 struct net_device *setup_pre_routing(struct sk_buff *skb); 46 - void br_netfilter_enable(void); 47 46 48 47 #if IS_ENABLED(CONFIG_IPV6) 49 48 int br_validate_ipv6(struct net *net, struct sk_buff *skb);
+3
include/net/netfilter/ipv4/nf_conntrack_ipv4.h
··· 22 22 #ifdef CONFIG_NF_CT_PROTO_UDPLITE 23 23 extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite; 24 24 #endif 25 + #ifdef CONFIG_NF_CT_PROTO_GRE 26 + extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre; 27 + #endif 25 28 26 29 #endif /*_NF_CONNTRACK_IPV4_H*/
-2
include/net/netfilter/nf_conntrack.h
··· 187 187 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, 188 188 u_int16_t l3num, struct net *net, 189 189 struct nf_conntrack_tuple *tuple); 190 - bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, 191 - const struct nf_conntrack_tuple *orig); 192 190 193 191 void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo, 194 192 const struct sk_buff *skb,
+2 -3
include/net/netfilter/nf_conntrack_core.h
··· 26 26 void nf_conntrack_cleanup_net(struct net *net); 27 27 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list); 28 28 29 - int nf_conntrack_proto_pernet_init(struct net *net); 29 + void nf_conntrack_proto_pernet_init(struct net *net); 30 30 void nf_conntrack_proto_pernet_fini(struct net *net); 31 31 32 32 int nf_conntrack_proto_init(void); ··· 39 39 void nf_conntrack_cleanup_end(void); 40 40 41 41 bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 42 - const struct nf_conntrack_tuple *orig, 43 - const struct nf_conntrack_l4proto *l4proto); 42 + const struct nf_conntrack_tuple *orig); 44 43 45 44 /* Find a connection corresponding to a tuple. */ 46 45 struct nf_conntrack_tuple_hash *
+74 -50
include/net/netfilter/nf_conntrack_l4proto.h
··· 27 27 /* protoinfo nlattr size, closes a hole */ 28 28 u16 nlattr_size; 29 29 30 - /* Try to fill in the third arg: dataoff is offset past network protocol 31 - hdr. Return true if possible. */ 32 - bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff, 33 - struct net *net, struct nf_conntrack_tuple *tuple); 34 - 35 - /* Invert the per-proto part of the tuple: ie. turn xmit into reply. 36 - * Only used by icmp, most protocols use a generic version. 37 - */ 38 - bool (*invert_tuple)(struct nf_conntrack_tuple *inverse, 39 - const struct nf_conntrack_tuple *orig); 40 - 41 - /* Returns verdict for packet, or -1 for invalid. */ 42 - int (*packet)(struct nf_conn *ct, 43 - struct sk_buff *skb, 44 - unsigned int dataoff, 45 - enum ip_conntrack_info ctinfo, 46 - const struct nf_hook_state *state); 47 - 48 - /* Called when a conntrack entry is destroyed */ 49 - void (*destroy)(struct nf_conn *ct); 50 - 51 30 /* called by gc worker if table is full */ 52 31 bool (*can_early_drop)(const struct nf_conn *ct); 53 32 ··· 58 79 /* Print out the private part of the conntrack. */ 59 80 void (*print_conntrack)(struct seq_file *s, struct nf_conn *); 60 81 #endif 61 - unsigned int *net_id; 62 - /* Init l4proto pernet data */ 63 - int (*init_net)(struct net *net); 64 - 65 - /* Return the per-net protocol part. */ 66 - struct nf_proto_net *(*get_net_proto)(struct net *net); 67 - 68 - /* Module (if any) which this is connected to. */ 69 - struct module *me; 70 82 }; 83 + 84 + bool icmp_pkt_to_tuple(const struct sk_buff *skb, 85 + unsigned int dataoff, 86 + struct net *net, 87 + struct nf_conntrack_tuple *tuple); 88 + 89 + bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, 90 + unsigned int dataoff, 91 + struct net *net, 92 + struct nf_conntrack_tuple *tuple); 93 + 94 + bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple, 95 + const struct nf_conntrack_tuple *orig); 96 + bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple, 97 + const struct nf_conntrack_tuple *orig); 71 98 72 99 int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, 73 100 struct sk_buff *skb, ··· 84 99 struct sk_buff *skb, 85 100 unsigned int dataoff, 86 101 const struct nf_hook_state *state); 102 + 103 + int nf_conntrack_icmp_packet(struct nf_conn *ct, 104 + struct sk_buff *skb, 105 + enum ip_conntrack_info ctinfo, 106 + const struct nf_hook_state *state); 107 + 108 + int nf_conntrack_icmpv6_packet(struct nf_conn *ct, 109 + struct sk_buff *skb, 110 + enum ip_conntrack_info ctinfo, 111 + const struct nf_hook_state *state); 112 + 113 + int nf_conntrack_udp_packet(struct nf_conn *ct, 114 + struct sk_buff *skb, 115 + unsigned int dataoff, 116 + enum ip_conntrack_info ctinfo, 117 + const struct nf_hook_state *state); 118 + int nf_conntrack_udplite_packet(struct nf_conn *ct, 119 + struct sk_buff *skb, 120 + unsigned int dataoff, 121 + enum ip_conntrack_info ctinfo, 122 + const struct nf_hook_state *state); 123 + int nf_conntrack_tcp_packet(struct nf_conn *ct, 124 + struct sk_buff *skb, 125 + unsigned int dataoff, 126 + enum ip_conntrack_info ctinfo, 127 + const struct nf_hook_state *state); 128 + int nf_conntrack_dccp_packet(struct nf_conn *ct, 129 + struct sk_buff *skb, 130 + unsigned int dataoff, 131 + enum ip_conntrack_info ctinfo, 132 + const struct nf_hook_state *state); 133 + int nf_conntrack_sctp_packet(struct nf_conn *ct, 134 + struct sk_buff *skb, 135 + unsigned int dataoff, 136 + enum ip_conntrack_info ctinfo, 137 + const struct nf_hook_state *state); 138 + int nf_conntrack_gre_packet(struct nf_conn *ct, 139 + struct sk_buff *skb, 140 + unsigned int dataoff, 141 + enum ip_conntrack_info ctinfo, 142 + const struct nf_hook_state *state); 143 + 144 + void nf_conntrack_generic_init_net(struct net *net); 145 + void nf_conntrack_tcp_init_net(struct net *net); 146 + void nf_conntrack_udp_init_net(struct net *net); 147 + void nf_conntrack_gre_init_net(struct net *net); 148 + void nf_conntrack_dccp_init_net(struct net *net); 149 + void nf_conntrack_sctp_init_net(struct net *net); 150 + void nf_conntrack_icmp_init_net(struct net *net); 151 + void nf_conntrack_icmpv6_init_net(struct net *net); 152 + 87 153 /* Existing built-in generic protocol */ 88 154 extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; 89 155 90 156 #define MAX_NF_CT_PROTO IPPROTO_UDPLITE 91 157 92 - const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto); 93 - 94 - const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4proto); 95 - void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p); 96 - 97 - /* Protocol pernet registration. */ 98 - int nf_ct_l4proto_pernet_register_one(struct net *net, 99 - const struct nf_conntrack_l4proto *proto); 100 - void nf_ct_l4proto_pernet_unregister_one(struct net *net, 101 - const struct nf_conntrack_l4proto *proto); 102 - int nf_ct_l4proto_pernet_register(struct net *net, 103 - const struct nf_conntrack_l4proto *const proto[], 104 - unsigned int num_proto); 105 - void nf_ct_l4proto_pernet_unregister(struct net *net, 106 - const struct nf_conntrack_l4proto *const proto[], 107 - unsigned int num_proto); 108 - 109 - /* Protocol global registration. */ 110 - int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto); 111 - void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto); 158 + const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto); 112 159 113 160 /* Generic netlink helpers */ 114 161 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, ··· 206 189 static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net) 207 190 { 208 191 return &net->ct.nf_ct_proto.sctp; 192 + } 193 + #endif 194 + 195 + #ifdef CONFIG_NF_CT_PROTO_GRE 196 + static inline struct nf_gre_net *nf_gre_pernet(struct net *net) 197 + { 198 + return &net->ct.nf_ct_proto.gre; 209 199 } 210 200 #endif 211 201
-4
include/net/netfilter/nf_nat.h
··· 47 47 48 48 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct); 49 49 50 - /* Is this tuple already taken? (not by us)*/ 51 - int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 52 - const struct nf_conn *ignored_conntrack); 53 - 54 50 static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct) 55 51 { 56 52 #if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
+19 -7
include/net/netfilter/nf_tables.h
··· 1012 1012 const struct nft_verdict *v); 1013 1013 1014 1014 /** 1015 + * struct nft_object_hash_key - key to lookup nft_object 1016 + * 1017 + * @name: name of the stateful object to look up 1018 + * @table: table the object belongs to 1019 + */ 1020 + struct nft_object_hash_key { 1021 + const char *name; 1022 + const struct nft_table *table; 1023 + }; 1024 + 1025 + /** 1015 1026 * struct nft_object - nf_tables stateful object 1016 1027 * 1017 1028 * @list: table stateful object list node 1018 - * @table: table this object belongs to 1019 - * @name: name of this stateful object 1029 + * @key: keys that identify this object 1030 + * @rhlhead: nft_objname_ht node 1020 1031 * @genmask: generation mask 1021 1032 * @use: number of references to this stateful object 1022 1033 * @handle: unique object handle 1023 1034 * @ops: object operations 1024 - * @data: object data, layout depends on type 1035 + * @data: object data, layout depends on type 1025 1036 */ 1026 1037 struct nft_object { 1027 1038 struct list_head list; 1028 - char *name; 1029 - struct nft_table *table; 1039 + struct rhlist_head rhlhead; 1040 + struct nft_object_hash_key key; 1030 1041 u32 genmask:2, 1031 1042 use:30; 1032 1043 u64 handle; ··· 1054 1043 1055 1044 #define nft_expr_obj(expr) *((struct nft_object **)nft_expr_priv(expr)) 1056 1045 1057 - struct nft_object *nft_obj_lookup(const struct nft_table *table, 1046 + struct nft_object *nft_obj_lookup(const struct net *net, 1047 + const struct nft_table *table, 1058 1048 const struct nlattr *nla, u32 objtype, 1059 1049 u8 genmask); 1060 1050 1061 - void nft_obj_notify(struct net *net, struct nft_table *table, 1051 + void nft_obj_notify(struct net *net, const struct nft_table *table, 1062 1052 struct nft_object *obj, u32 portid, u32 seq, 1063 1053 int event, int family, int report, gfp_t gfp); 1064 1054
+16
include/net/netfilter/nf_tables_core.h
··· 80 80 struct nft_pktinfo; 81 81 void nft_meta_get_eval(const struct nft_expr *expr, 82 82 struct nft_regs *regs, const struct nft_pktinfo *pkt); 83 + void nft_cmp_eval(const struct nft_expr *expr, 84 + struct nft_regs *regs, const struct nft_pktinfo *pkt); 83 85 void nft_lookup_eval(const struct nft_expr *expr, 86 + struct nft_regs *regs, const struct nft_pktinfo *pkt); 87 + void nft_payload_eval(const struct nft_expr *expr, 88 + struct nft_regs *regs, const struct nft_pktinfo *pkt); 89 + void nft_immediate_eval(const struct nft_expr *expr, 90 + struct nft_regs *regs, const struct nft_pktinfo *pkt); 91 + void nft_bitwise_eval(const struct nft_expr *expr, 92 + struct nft_regs *regs, const struct nft_pktinfo *pkt); 93 + void nft_range_eval(const struct nft_expr *expr, 94 + struct nft_regs *regs, const struct nft_pktinfo *pkt); 95 + void nft_byteorder_eval(const struct nft_expr *expr, 96 + struct nft_regs *regs, const struct nft_pktinfo *pkt); 97 + void nft_dynset_eval(const struct nft_expr *expr, 98 + struct nft_regs *regs, const struct nft_pktinfo *pkt); 99 + void nft_rt_get_eval(const struct nft_expr *expr, 84 100 struct nft_regs *regs, const struct nft_pktinfo *pkt); 85 101 #endif /* _NET_NF_TABLES_CORE_H */
+16 -14
include/net/netns/conntrack.h
··· 18 18 struct ctl_table_header; 19 19 struct nf_conntrack_ecache; 20 20 21 - struct nf_proto_net { 22 - #ifdef CONFIG_SYSCTL 23 - struct ctl_table_header *ctl_table_header; 24 - struct ctl_table *ctl_table; 25 - #endif 26 - unsigned int users; 27 - }; 28 - 29 21 struct nf_generic_net { 30 - struct nf_proto_net pn; 31 22 unsigned int timeout; 32 23 }; 33 24 34 25 struct nf_tcp_net { 35 - struct nf_proto_net pn; 36 26 unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX]; 37 27 unsigned int tcp_loose; 38 28 unsigned int tcp_be_liberal; ··· 36 46 }; 37 47 38 48 struct nf_udp_net { 39 - struct nf_proto_net pn; 40 49 unsigned int timeouts[UDP_CT_MAX]; 41 50 }; 42 51 43 52 struct nf_icmp_net { 44 - struct nf_proto_net pn; 45 53 unsigned int timeout; 46 54 }; 47 55 48 56 #ifdef CONFIG_NF_CT_PROTO_DCCP 49 57 struct nf_dccp_net { 50 - struct nf_proto_net pn; 51 58 int dccp_loose; 52 59 unsigned int dccp_timeout[CT_DCCP_MAX + 1]; 53 60 }; ··· 52 65 53 66 #ifdef CONFIG_NF_CT_PROTO_SCTP 54 67 struct nf_sctp_net { 55 - struct nf_proto_net pn; 56 68 unsigned int timeouts[SCTP_CONNTRACK_MAX]; 69 + }; 70 + #endif 71 + 72 + #ifdef CONFIG_NF_CT_PROTO_GRE 73 + enum gre_conntrack { 74 + GRE_CT_UNREPLIED, 75 + GRE_CT_REPLIED, 76 + GRE_CT_MAX 77 + }; 78 + 79 + struct nf_gre_net { 80 + struct list_head keymap_list; 81 + unsigned int timeouts[GRE_CT_MAX]; 57 82 }; 58 83 #endif 59 84 ··· 80 81 #endif 81 82 #ifdef CONFIG_NF_CT_PROTO_SCTP 82 83 struct nf_sctp_net sctp; 84 + #endif 85 + #ifdef CONFIG_NF_CT_PROTO_GRE 86 + struct nf_gre_net gre; 83 87 #endif 84 88 }; 85 89
+8 -2
include/uapi/linux/netfilter/nf_tables.h
··· 219 219 * @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64) 220 220 * @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN) 221 221 * @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32) 222 + * @NFTA_RULE_POSITION_ID: transaction unique identifier of the previous rule (NLA_U32) 222 223 */ 223 224 enum nft_rule_attributes { 224 225 NFTA_RULE_UNSPEC, ··· 232 231 NFTA_RULE_USERDATA, 233 232 NFTA_RULE_PAD, 234 233 NFTA_RULE_ID, 234 + NFTA_RULE_POSITION_ID, 235 235 __NFTA_RULE_MAX 236 236 }; 237 237 #define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1) ··· 791 789 * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid) 792 790 * @NFT_META_PRANDOM: a 32bit pseudo-random number 793 791 * @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp) 792 + * @NFT_META_IIFKIND: packet input interface kind name (dev->rtnl_link_ops->kind) 793 + * @NFT_META_OIFKIND: packet output interface kind name (dev->rtnl_link_ops->kind) 794 794 */ 795 795 enum nft_meta_keys { 796 796 NFT_META_LEN, ··· 821 817 NFT_META_CGROUP, 822 818 NFT_META_PRANDOM, 823 819 NFT_META_SECPATH, 820 + NFT_META_IIFKIND, 821 + NFT_META_OIFKIND, 824 822 }; 825 823 826 824 /** ··· 877 871 NFTA_HASH_SEED, 878 872 NFTA_HASH_OFFSET, 879 873 NFTA_HASH_TYPE, 880 - NFTA_HASH_SET_NAME, 881 - NFTA_HASH_SET_ID, 874 + NFTA_HASH_SET_NAME, /* deprecated */ 875 + NFTA_HASH_SET_ID, /* deprecated */ 882 876 __NFTA_HASH_MAX, 883 877 }; 884 878 #define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
-5
net/bridge/br_netfilter_hooks.c
··· 881 881 .br_dev_xmit_hook = br_nf_dev_xmit, 882 882 }; 883 883 884 - void br_netfilter_enable(void) 885 - { 886 - } 887 - EXPORT_SYMBOL_GPL(br_netfilter_enable); 888 - 889 884 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because 890 885 * br_dev_queue_push_xmit is called afterwards */ 891 886 static const struct nf_hook_ops br_nf_ops[] = {
-18
net/ipv4/netfilter.c
··· 80 80 } 81 81 EXPORT_SYMBOL(ip_route_me_harder); 82 82 83 - int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) 84 - { 85 - const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); 86 - 87 - if (entry->state.hook == NF_INET_LOCAL_OUT) { 88 - const struct iphdr *iph = ip_hdr(skb); 89 - 90 - if (!(iph->tos == rt_info->tos && 91 - skb->mark == rt_info->mark && 92 - iph->daddr == rt_info->daddr && 93 - iph->saddr == rt_info->saddr)) 94 - return ip_route_me_harder(entry->state.net, skb, 95 - RTN_UNSPEC); 96 - } 97 - return 0; 98 - } 99 - EXPORT_SYMBOL_GPL(nf_ip_reroute); 100 - 101 83 int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, 102 84 bool strict __always_unused) 103 85 {
+1 -1
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
··· 214 214 } 215 215 216 216 /* Change outer to look like the reply to an incoming packet */ 217 - nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 217 + nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); 218 218 if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) 219 219 return 0; 220 220
+1 -1
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
··· 225 225 skb->len - hdrlen, 0)); 226 226 } 227 227 228 - nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 228 + nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); 229 229 if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) 230 230 return 0; 231 231
+1 -1
net/netfilter/Kconfig
··· 174 174 If unsure, say Y. 175 175 176 176 config NF_CT_PROTO_GRE 177 - tristate 177 + bool 178 178 179 179 config NF_CT_PROTO_SCTP 180 180 bool 'SCTP protocol connection tracking support'
+1 -2
net/netfilter/Makefile
··· 13 13 nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o 14 14 nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o 15 15 nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o 16 + nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o 16 17 17 18 obj-$(CONFIG_NETFILTER) = netfilter.o 18 19 ··· 25 24 26 25 # connection tracking 27 26 obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o 28 - 29 - obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o 30 27 31 28 # netlink interface for nf_conntrack 32 29 obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
+41 -8
net/netfilter/ipvs/ip_vs_core.c
··· 53 53 #endif 54 54 55 55 #include <net/ip_vs.h> 56 + #include <linux/indirect_call_wrapper.h> 56 57 57 58 58 59 EXPORT_SYMBOL(register_ip_vs_scheduler); ··· 70 69 EXPORT_SYMBOL(ip_vs_get_debug_level); 71 70 #endif 72 71 EXPORT_SYMBOL(ip_vs_new_conn_out); 72 + 73 + #ifdef CONFIG_IP_VS_PROTO_TCP 74 + INDIRECT_CALLABLE_DECLARE(int 75 + tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 76 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)); 77 + #endif 78 + 79 + #ifdef CONFIG_IP_VS_PROTO_UDP 80 + INDIRECT_CALLABLE_DECLARE(int 81 + udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 82 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)); 83 + #endif 84 + 85 + #if defined(CONFIG_IP_VS_PROTO_TCP) && defined(CONFIG_IP_VS_PROTO_UDP) 86 + #define SNAT_CALL(f, ...) \ 87 + INDIRECT_CALL_2(f, tcp_snat_handler, udp_snat_handler, __VA_ARGS__) 88 + #elif defined(CONFIG_IP_VS_PROTO_TCP) 89 + #define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, tcp_snat_handler, __VA_ARGS__) 90 + #elif defined(CONFIG_IP_VS_PROTO_UDP) 91 + #define SNAT_CALL(f, ...) INDIRECT_CALL_1(f, udp_snat_handler, __VA_ARGS__) 92 + #else 93 + #define SNAT_CALL(f, ...) f(__VA_ARGS__) 94 + #endif 73 95 74 96 static unsigned int ip_vs_net_id __read_mostly; 75 97 /* netns cnt used for uniqueness */ ··· 502 478 */ 503 479 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) { 504 480 iph->hdr_flags ^= IP_VS_HDR_INVERSE; 505 - cp = pp->conn_in_get(svc->ipvs, svc->af, skb, iph); 481 + cp = INDIRECT_CALL_1(pp->conn_in_get, 482 + ip_vs_conn_in_get_proto, svc->ipvs, 483 + svc->af, skb, iph); 506 484 iph->hdr_flags ^= IP_VS_HDR_INVERSE; 507 485 508 486 if (cp) { ··· 998 972 ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph); 999 973 1000 974 /* The embedded headers contain source and dest in reverse order */ 1001 - cp = pp->conn_out_get(ipvs, AF_INET, skb, &ciph); 975 + cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto, 976 + ipvs, AF_INET, skb, &ciph); 1002 977 if (!cp) 1003 978 return NF_ACCEPT; 1004 979 ··· 1055 1028 return NF_ACCEPT; 1056 1029 1057 1030 /* The embedded headers contain source and dest in reverse order */ 1058 - cp = pp->conn_out_get(ipvs, AF_INET6, skb, &ciph); 1031 + cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto, 1032 + ipvs, AF_INET6, skb, &ciph); 1059 1033 if (!cp) 1060 1034 return NF_ACCEPT; 1061 1035 ··· 1291 1263 goto drop; 1292 1264 1293 1265 /* mangle the packet */ 1294 - if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph)) 1266 + if (pp->snat_handler && 1267 + !SNAT_CALL(pp->snat_handler, skb, pp, cp, iph)) 1295 1268 goto drop; 1296 1269 1297 1270 #ifdef CONFIG_IP_VS_IPV6 ··· 1418 1389 /* 1419 1390 * Check if the packet belongs to an existing entry 1420 1391 */ 1421 - cp = pp->conn_out_get(ipvs, af, skb, &iph); 1392 + cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto, 1393 + ipvs, af, skb, &iph); 1422 1394 1423 1395 if (likely(cp)) { 1424 1396 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) ··· 1674 1644 /* The embedded headers contain source and dest in reverse order. 1675 1645 * For IPIP this is error for request, not for reply. 1676 1646 */ 1677 - cp = pp->conn_in_get(ipvs, AF_INET, skb, &ciph); 1647 + cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto, 1648 + ipvs, AF_INET, skb, &ciph); 1678 1649 1679 1650 if (!cp) { 1680 1651 int v; ··· 1827 1796 /* The embedded headers contain source and dest in reverse order 1828 1797 * if not from localhost 1829 1798 */ 1830 - cp = pp->conn_in_get(ipvs, AF_INET6, skb, &ciph); 1799 + cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto, 1800 + ipvs, AF_INET6, skb, &ciph); 1831 1801 1832 1802 if (!cp) { 1833 1803 int v; ··· 1957 1925 /* 1958 1926 * Check if the packet belongs to an existing connection entry 1959 1927 */ 1960 - cp = pp->conn_in_get(ipvs, af, skb, &iph); 1928 + cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto, 1929 + ipvs, af, skb, &iph); 1961 1930 1962 1931 conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); 1963 1932 if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
-2
net/netfilter/ipvs/ip_vs_proto_ah_esp.c
··· 129 129 .conn_out_get = ah_esp_conn_out_get, 130 130 .snat_handler = NULL, 131 131 .dnat_handler = NULL, 132 - .csum_check = NULL, 133 132 .state_transition = NULL, 134 133 .register_app = NULL, 135 134 .unregister_app = NULL, ··· 151 152 .conn_out_get = ah_esp_conn_out_get, 152 153 .snat_handler = NULL, 153 154 .dnat_handler = NULL, 154 - .csum_check = NULL, 155 155 .state_transition = NULL, 156 156 .register_app = NULL, 157 157 .unregister_app = NULL,
+5 -3
net/netfilter/ipvs/ip_vs_proto_sctp.c
··· 10 10 #include <net/ip_vs.h> 11 11 12 12 static int 13 + sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp); 14 + 15 + static int 13 16 sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, 14 17 struct ip_vs_proto_data *pd, 15 18 int *verdict, struct ip_vs_conn **cpp, ··· 108 105 int ret; 109 106 110 107 /* Some checks before mangling */ 111 - if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) 108 + if (!sctp_csum_check(cp->af, skb, pp)) 112 109 return 0; 113 110 114 111 /* Call application helper if needed */ ··· 155 152 int ret; 156 153 157 154 /* Some checks before mangling */ 158 - if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) 155 + if (!sctp_csum_check(cp->af, skb, pp)) 159 156 return 0; 160 157 161 158 /* Call application helper if needed */ ··· 590 587 .conn_out_get = ip_vs_conn_out_get_proto, 591 588 .snat_handler = sctp_snat_handler, 592 589 .dnat_handler = sctp_dnat_handler, 593 - .csum_check = sctp_csum_check, 594 590 .state_name = sctp_state_name, 595 591 .state_transition = sctp_state_transition, 596 592 .app_conn_bind = sctp_app_conn_bind,
+9 -6
net/netfilter/ipvs/ip_vs_proto_tcp.c
··· 28 28 #include <net/ip6_checksum.h> 29 29 #include <linux/netfilter.h> 30 30 #include <linux/netfilter_ipv4.h> 31 + #include <linux/indirect_call_wrapper.h> 31 32 32 33 #include <net/ip_vs.h> 34 + 35 + static int 36 + tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp); 33 37 34 38 static int 35 39 tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, ··· 147 143 } 148 144 149 145 150 - static int 146 + INDIRECT_CALLABLE_SCOPE int 151 147 tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 152 148 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 153 149 { ··· 170 166 int ret; 171 167 172 168 /* Some checks before mangling */ 173 - if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) 169 + if (!tcp_csum_check(cp->af, skb, pp)) 174 170 return 0; 175 171 176 172 /* Call application helper if needed */ ··· 196 192 tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, 197 193 cp->dport, cp->vport); 198 194 if (skb->ip_summed == CHECKSUM_COMPLETE) 199 - skb->ip_summed = (cp->app && pp->csum_check) ? 195 + skb->ip_summed = cp->app ? 200 196 CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 201 197 } else { 202 198 /* full checksum calculation */ ··· 248 244 int ret; 249 245 250 246 /* Some checks before mangling */ 251 - if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) 247 + if (!tcp_csum_check(cp->af, skb, pp)) 252 248 return 0; 253 249 254 250 /* ··· 279 275 tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, 280 276 cp->vport, cp->dport); 281 277 if (skb->ip_summed == CHECKSUM_COMPLETE) 282 - skb->ip_summed = (cp->app && pp->csum_check) ? 278 + skb->ip_summed = cp->app ? 283 279 CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 284 280 } else { 285 281 /* full checksum calculation */ ··· 740 736 .conn_out_get = ip_vs_conn_out_get_proto, 741 737 .snat_handler = tcp_snat_handler, 742 738 .dnat_handler = tcp_dnat_handler, 743 - .csum_check = tcp_csum_check, 744 739 .state_name = tcp_state_name, 745 740 .state_transition = tcp_state_transition, 746 741 .app_conn_bind = tcp_app_conn_bind,
+9 -6
net/netfilter/ipvs/ip_vs_proto_udp.c
··· 23 23 #include <linux/netfilter.h> 24 24 #include <linux/netfilter_ipv4.h> 25 25 #include <linux/udp.h> 26 + #include <linux/indirect_call_wrapper.h> 26 27 27 28 #include <net/ip_vs.h> 28 29 #include <net/ip.h> 29 30 #include <net/ip6_checksum.h> 31 + 32 + static int 33 + udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp); 30 34 31 35 static int 32 36 udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, ··· 137 133 } 138 134 139 135 140 - static int 136 + INDIRECT_CALLABLE_SCOPE int 141 137 udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 142 138 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 143 139 { ··· 160 156 int ret; 161 157 162 158 /* Some checks before mangling */ 163 - if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) 159 + if (!udp_csum_check(cp->af, skb, pp)) 164 160 return 0; 165 161 166 162 /* ··· 190 186 udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, 191 187 cp->dport, cp->vport); 192 188 if (skb->ip_summed == CHECKSUM_COMPLETE) 193 - skb->ip_summed = (cp->app && pp->csum_check) ? 189 + skb->ip_summed = cp->app ? 194 190 CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 195 191 } else { 196 192 /* full checksum calculation */ ··· 243 239 int ret; 244 240 245 241 /* Some checks before mangling */ 246 - if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) 242 + if (!udp_csum_check(cp->af, skb, pp)) 247 243 return 0; 248 244 249 245 /* ··· 274 270 udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr, 275 271 cp->vport, cp->dport); 276 272 if (skb->ip_summed == CHECKSUM_COMPLETE) 277 - skb->ip_summed = (cp->app && pp->csum_check) ? 273 + skb->ip_summed = cp->app ? 278 274 CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 279 275 } else { 280 276 /* full checksum calculation */ ··· 498 494 .conn_out_get = ip_vs_conn_out_get_proto, 499 495 .snat_handler = udp_snat_handler, 500 496 .dnat_handler = udp_dnat_handler, 501 - .csum_check = udp_csum_check, 502 497 .state_transition = udp_state_transition, 503 498 .state_name = udp_state_name, 504 499 .register_app = udp_register_app,
+138 -72
net/netfilter/nf_conntrack_core.c
··· 222 222 return scale_hash(hash_conntrack_raw(tuple, net)); 223 223 } 224 224 225 + static bool nf_ct_get_tuple_ports(const struct sk_buff *skb, 226 + unsigned int dataoff, 227 + struct nf_conntrack_tuple *tuple) 228 + { struct { 229 + __be16 sport; 230 + __be16 dport; 231 + } _inet_hdr, *inet_hdr; 232 + 233 + /* Actually only need first 4 bytes to get ports. */ 234 + inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr); 235 + if (!inet_hdr) 236 + return false; 237 + 238 + tuple->src.u.udp.port = inet_hdr->sport; 239 + tuple->dst.u.udp.port = inet_hdr->dport; 240 + return true; 241 + } 242 + 225 243 static bool 226 244 nf_ct_get_tuple(const struct sk_buff *skb, 227 245 unsigned int nhoff, ··· 247 229 u_int16_t l3num, 248 230 u_int8_t protonum, 249 231 struct net *net, 250 - struct nf_conntrack_tuple *tuple, 251 - const struct nf_conntrack_l4proto *l4proto) 232 + struct nf_conntrack_tuple *tuple) 252 233 { 253 234 unsigned int size; 254 235 const __be32 *ap; 255 236 __be32 _addrs[8]; 256 - struct { 257 - __be16 sport; 258 - __be16 dport; 259 - } _inet_hdr, *inet_hdr; 260 237 261 238 memset(tuple, 0, sizeof(*tuple)); 262 239 ··· 287 274 tuple->dst.protonum = protonum; 288 275 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 289 276 290 - if (unlikely(l4proto->pkt_to_tuple)) 291 - return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); 277 + switch (protonum) { 278 + #if IS_ENABLED(CONFIG_IPV6) 279 + case IPPROTO_ICMPV6: 280 + return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple); 281 + #endif 282 + case IPPROTO_ICMP: 283 + return icmp_pkt_to_tuple(skb, dataoff, net, tuple); 284 + #ifdef CONFIG_NF_CT_PROTO_GRE 285 + case IPPROTO_GRE: 286 + return gre_pkt_to_tuple(skb, dataoff, net, tuple); 287 + #endif 288 + case IPPROTO_TCP: 289 + case IPPROTO_UDP: /* fallthrough */ 290 + return nf_ct_get_tuple_ports(skb, dataoff, tuple); 291 + #ifdef CONFIG_NF_CT_PROTO_UDPLITE 292 + case IPPROTO_UDPLITE: 293 + return nf_ct_get_tuple_ports(skb, dataoff, tuple); 294 + #endif 295 + #ifdef CONFIG_NF_CT_PROTO_SCTP 296 + case IPPROTO_SCTP: 297 + return nf_ct_get_tuple_ports(skb, dataoff, tuple); 298 + #endif 299 + #ifdef CONFIG_NF_CT_PROTO_DCCP 300 + case IPPROTO_DCCP: 301 + return nf_ct_get_tuple_ports(skb, dataoff, tuple); 302 + #endif 303 + default: 304 + break; 305 + } 292 306 293 - /* Actually only need first 4 bytes to get ports. */ 294 - inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr); 295 - if (!inet_hdr) 296 - return false; 297 - 298 - tuple->src.u.udp.port = inet_hdr->sport; 299 - tuple->dst.u.udp.port = inet_hdr->dport; 300 307 return true; 301 308 } 302 309 ··· 399 366 u_int16_t l3num, 400 367 struct net *net, struct nf_conntrack_tuple *tuple) 401 368 { 402 - const struct nf_conntrack_l4proto *l4proto; 403 369 u8 protonum; 404 370 int protoff; 405 - int ret; 406 - 407 - rcu_read_lock(); 408 371 409 372 protoff = get_l4proto(skb, nhoff, l3num, &protonum); 410 - if (protoff <= 0) { 411 - rcu_read_unlock(); 373 + if (protoff <= 0) 412 374 return false; 413 - } 414 375 415 - l4proto = __nf_ct_l4proto_find(protonum); 416 - 417 - ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple, 418 - l4proto); 419 - 420 - rcu_read_unlock(); 421 - return ret; 376 + return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple); 422 377 } 423 378 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); 424 379 425 380 bool 426 381 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 427 - const struct nf_conntrack_tuple *orig, 428 - const struct nf_conntrack_l4proto *l4proto) 382 + const struct nf_conntrack_tuple *orig) 429 383 { 430 384 memset(inverse, 0, sizeof(*inverse)); 431 385 ··· 435 415 436 416 inverse->dst.protonum = orig->dst.protonum; 437 417 438 - if (unlikely(l4proto->invert_tuple)) 439 - return l4proto->invert_tuple(inverse, orig); 418 + switch (orig->dst.protonum) { 419 + case IPPROTO_ICMP: 420 + return nf_conntrack_invert_icmp_tuple(inverse, orig); 421 + #if IS_ENABLED(CONFIG_IPV6) 422 + case IPPROTO_ICMPV6: 423 + return nf_conntrack_invert_icmpv6_tuple(inverse, orig); 424 + #endif 425 + } 440 426 441 427 inverse->src.u.all = orig->dst.u.all; 442 428 inverse->dst.u.all = orig->src.u.all; ··· 552 526 } 553 527 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free); 554 528 529 + static void destroy_gre_conntrack(struct nf_conn *ct) 530 + { 531 + #ifdef CONFIG_NF_CT_PROTO_GRE 532 + struct nf_conn *master = ct->master; 533 + 534 + if (master) 535 + nf_ct_gre_keymap_destroy(master); 536 + #endif 537 + } 538 + 555 539 static void 556 540 destroy_conntrack(struct nf_conntrack *nfct) 557 541 { 558 542 struct nf_conn *ct = (struct nf_conn *)nfct; 559 - const struct nf_conntrack_l4proto *l4proto; 560 543 561 544 pr_debug("destroy_conntrack(%p)\n", ct); 562 545 WARN_ON(atomic_read(&nfct->use) != 0); ··· 574 539 nf_ct_tmpl_free(ct); 575 540 return; 576 541 } 577 - l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); 578 - if (l4proto->destroy) 579 - l4proto->destroy(ct); 542 + 543 + if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE)) 544 + destroy_gre_conntrack(ct); 580 545 581 546 local_bh_disable(); 582 547 /* Expectations will have been removed in clean_from_lists, ··· 875 840 enum ip_conntrack_info oldinfo; 876 841 struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); 877 842 878 - l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); 843 + l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 879 844 if (l4proto->allow_clash && 880 845 !nf_ct_is_dying(ct) && 881 846 atomic_inc_not_zero(&ct->ct_general.use)) { ··· 1147 1112 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) 1148 1113 return true; 1149 1114 1150 - l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); 1115 + l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 1151 1116 if (l4proto->can_early_drop && l4proto->can_early_drop(ct)) 1152 1117 return true; 1153 1118 ··· 1377 1342 static noinline struct nf_conntrack_tuple_hash * 1378 1343 init_conntrack(struct net *net, struct nf_conn *tmpl, 1379 1344 const struct nf_conntrack_tuple *tuple, 1380 - const struct nf_conntrack_l4proto *l4proto, 1381 1345 struct sk_buff *skb, 1382 1346 unsigned int dataoff, u32 hash) 1383 1347 { ··· 1389 1355 struct nf_conn_timeout *timeout_ext; 1390 1356 struct nf_conntrack_zone tmp; 1391 1357 1392 - if (!nf_ct_invert_tuple(&repl_tuple, tuple, l4proto)) { 1358 + if (!nf_ct_invert_tuple(&repl_tuple, tuple)) { 1393 1359 pr_debug("Can't invert tuple.\n"); 1394 1360 return NULL; 1395 1361 } ··· 1471 1437 struct sk_buff *skb, 1472 1438 unsigned int dataoff, 1473 1439 u_int8_t protonum, 1474 - const struct nf_conntrack_l4proto *l4proto, 1475 1440 const struct nf_hook_state *state) 1476 1441 { 1477 1442 const struct nf_conntrack_zone *zone; ··· 1483 1450 1484 1451 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 1485 1452 dataoff, state->pf, protonum, state->net, 1486 - &tuple, l4proto)) { 1453 + &tuple)) { 1487 1454 pr_debug("Can't get tuple\n"); 1488 1455 return 0; 1489 1456 } ··· 1493 1460 hash = hash_conntrack_raw(&tuple, state->net); 1494 1461 h = __nf_conntrack_find_get(state->net, zone, &tuple, hash); 1495 1462 if (!h) { 1496 - h = init_conntrack(state->net, tmpl, &tuple, l4proto, 1463 + h = init_conntrack(state->net, tmpl, &tuple, 1497 1464 skb, dataoff, hash); 1498 1465 if (!h) 1499 1466 return 0; ··· 1555 1522 return ret; 1556 1523 } 1557 1524 1525 + static int generic_packet(struct nf_conn *ct, struct sk_buff *skb, 1526 + enum ip_conntrack_info ctinfo) 1527 + { 1528 + const unsigned int *timeout = nf_ct_timeout_lookup(ct); 1529 + 1530 + if (!timeout) 1531 + timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout; 1532 + 1533 + nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); 1534 + return NF_ACCEPT; 1535 + } 1536 + 1537 + /* Returns verdict for packet, or -1 for invalid. */ 1538 + static int nf_conntrack_handle_packet(struct nf_conn *ct, 1539 + struct sk_buff *skb, 1540 + unsigned int dataoff, 1541 + enum ip_conntrack_info ctinfo, 1542 + const struct nf_hook_state *state) 1543 + { 1544 + switch (nf_ct_protonum(ct)) { 1545 + case IPPROTO_TCP: 1546 + return nf_conntrack_tcp_packet(ct, skb, dataoff, 1547 + ctinfo, state); 1548 + case IPPROTO_UDP: 1549 + return nf_conntrack_udp_packet(ct, skb, dataoff, 1550 + ctinfo, state); 1551 + case IPPROTO_ICMP: 1552 + return nf_conntrack_icmp_packet(ct, skb, ctinfo, state); 1553 + #if IS_ENABLED(CONFIG_IPV6) 1554 + case IPPROTO_ICMPV6: 1555 + return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state); 1556 + #endif 1557 + #ifdef CONFIG_NF_CT_PROTO_UDPLITE 1558 + case IPPROTO_UDPLITE: 1559 + return nf_conntrack_udplite_packet(ct, skb, dataoff, 1560 + ctinfo, state); 1561 + #endif 1562 + #ifdef CONFIG_NF_CT_PROTO_SCTP 1563 + case IPPROTO_SCTP: 1564 + return nf_conntrack_sctp_packet(ct, skb, dataoff, 1565 + ctinfo, state); 1566 + #endif 1567 + #ifdef CONFIG_NF_CT_PROTO_DCCP 1568 + case IPPROTO_DCCP: 1569 + return nf_conntrack_dccp_packet(ct, skb, dataoff, 1570 + ctinfo, state); 1571 + #endif 1572 + #ifdef CONFIG_NF_CT_PROTO_GRE 1573 + case IPPROTO_GRE: 1574 + return nf_conntrack_gre_packet(ct, skb, dataoff, 1575 + ctinfo, state); 1576 + #endif 1577 + } 1578 + 1579 + return generic_packet(ct, skb, ctinfo); 1580 + } 1581 + 1558 1582 unsigned int 1559 1583 nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state) 1560 1584 { 1561 - const struct nf_conntrack_l4proto *l4proto; 1562 1585 enum ip_conntrack_info ctinfo; 1563 1586 struct nf_conn *ct, *tmpl; 1564 1587 u_int8_t protonum; ··· 1641 1552 goto out; 1642 1553 } 1643 1554 1644 - l4proto = __nf_ct_l4proto_find(protonum); 1645 - 1646 1555 if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) { 1647 1556 ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff, 1648 1557 protonum, state); ··· 1654 1567 } 1655 1568 repeat: 1656 1569 ret = resolve_normal_ct(tmpl, skb, dataoff, 1657 - protonum, l4proto, state); 1570 + protonum, state); 1658 1571 if (ret < 0) { 1659 1572 /* Too stressed to deal. */ 1660 1573 NF_CT_STAT_INC_ATOMIC(state->net, drop); ··· 1670 1583 goto out; 1671 1584 } 1672 1585 1673 - ret = l4proto->packet(ct, skb, dataoff, ctinfo, state); 1586 + ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state); 1674 1587 if (ret <= 0) { 1675 1588 /* Invalid: inverse of the return code tells 1676 1589 * the netfilter core what to do */ ··· 1700 1613 return ret; 1701 1614 } 1702 1615 EXPORT_SYMBOL_GPL(nf_conntrack_in); 1703 - 1704 - bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, 1705 - const struct nf_conntrack_tuple *orig) 1706 - { 1707 - bool ret; 1708 - 1709 - rcu_read_lock(); 1710 - ret = nf_ct_invert_tuple(inverse, orig, 1711 - __nf_ct_l4proto_find(orig->dst.protonum)); 1712 - rcu_read_unlock(); 1713 - return ret; 1714 - } 1715 - EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); 1716 1616 1717 1617 /* Alter reply tuple (maybe alter helper). This is for NAT, and is 1718 1618 implicitly racy: see __nf_conntrack_confirm */ ··· 1831 1757 1832 1758 static int nf_conntrack_update(struct net *net, struct sk_buff *skb) 1833 1759 { 1834 - const struct nf_conntrack_l4proto *l4proto; 1835 1760 struct nf_conntrack_tuple_hash *h; 1836 1761 struct nf_conntrack_tuple tuple; 1837 1762 enum ip_conntrack_info ctinfo; ··· 1851 1778 if (dataoff <= 0) 1852 1779 return -1; 1853 1780 1854 - l4proto = nf_ct_l4proto_find_get(l4num); 1855 - 1856 1781 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num, 1857 - l4num, net, &tuple, l4proto)) 1782 + l4num, net, &tuple)) 1858 1783 return -1; 1859 1784 1860 1785 if (ct->status & IPS_SRC_NAT) { ··· 2484 2413 nf_conntrack_tstamp_pernet_init(net); 2485 2414 nf_conntrack_ecache_pernet_init(net); 2486 2415 nf_conntrack_helper_pernet_init(net); 2416 + nf_conntrack_proto_pernet_init(net); 2487 2417 2488 - ret = nf_conntrack_proto_pernet_init(net); 2489 - if (ret < 0) 2490 - goto err_proto; 2491 2418 return 0; 2492 2419 2493 - err_proto: 2494 - nf_conntrack_ecache_pernet_fini(net); 2495 - nf_conntrack_expect_pernet_fini(net); 2496 2420 err_expect: 2497 2421 free_percpu(net->ct.stat); 2498 2422 err_pcpu_lists:
+1 -1
net/netfilter/nf_conntrack_expect.c
··· 610 610 expect->tuple.src.l3num, 611 611 expect->tuple.dst.protonum); 612 612 print_tuple(s, &expect->tuple, 613 - __nf_ct_l4proto_find(expect->tuple.dst.protonum)); 613 + nf_ct_l4proto_find(expect->tuple.dst.protonum)); 614 614 615 615 if (expect->flags & NF_CT_EXPECT_PERMANENT) { 616 616 seq_puts(s, "PERMANENT");
+6 -8
net/netfilter/nf_conntrack_netlink.c
··· 134 134 ret = ctnetlink_dump_tuples_ip(skb, tuple); 135 135 136 136 if (ret >= 0) { 137 - l4proto = __nf_ct_l4proto_find(tuple->dst.protonum); 137 + l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 138 138 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); 139 139 } 140 140 rcu_read_unlock(); ··· 182 182 struct nlattr *nest_proto; 183 183 int ret; 184 184 185 - l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); 185 + l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 186 186 if (!l4proto->to_nlattr) 187 187 return 0; 188 188 ··· 590 590 len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1); 591 591 len *= 3u; /* ORIG, REPLY, MASTER */ 592 592 593 - l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); 593 + l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 594 594 len += l4proto->nlattr_size; 595 595 if (l4proto->nlattr_tuple_size) { 596 596 len4 = l4proto->nlattr_tuple_size(); ··· 1059 1059 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); 1060 1060 1061 1061 rcu_read_lock(); 1062 - l4proto = __nf_ct_l4proto_find(tuple->dst.protonum); 1062 + l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 1063 1063 1064 1064 if (likely(l4proto->nlattr_to_tuple)) { 1065 1065 ret = nla_validate_nested(attr, CTA_PROTO_MAX, ··· 1722 1722 if (err < 0) 1723 1723 return err; 1724 1724 1725 - rcu_read_lock(); 1726 - l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); 1725 + l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 1727 1726 if (l4proto->from_nlattr) 1728 1727 err = l4proto->from_nlattr(tb, ct); 1729 - rcu_read_unlock(); 1730 1728 1731 1729 return err; 1732 1730 } ··· 2674 2676 rcu_read_lock(); 2675 2677 ret = ctnetlink_dump_tuples_ip(skb, &m); 2676 2678 if (ret >= 0) { 2677 - l4proto = __nf_ct_l4proto_find(tuple->dst.protonum); 2679 + l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 2678 2680 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); 2679 2681 } 2680 2682 rcu_read_unlock();
+1 -1
net/netfilter/nf_conntrack_pptp.c
··· 121 121 struct nf_conntrack_expect *exp_other; 122 122 123 123 /* obviously this tuple inversion only works until you do NAT */ 124 - nf_ct_invert_tuplepr(&inv_t, &exp->tuple); 124 + nf_ct_invert_tuple(&inv_t, &exp->tuple); 125 125 pr_debug("trying to unexpect other dir: "); 126 126 nf_ct_dump_tuple(&inv_t); 127 127
+81 -433
net/netfilter/nf_conntrack_proto.c
··· 43 43 44 44 extern unsigned int nf_conntrack_net_id; 45 45 46 - static struct nf_conntrack_l4proto __rcu *nf_ct_protos[MAX_NF_CT_PROTO + 1] __read_mostly; 47 - 48 46 static DEFINE_MUTEX(nf_ct_proto_mutex); 49 47 50 48 #ifdef CONFIG_SYSCTL 51 - static int 52 - nf_ct_register_sysctl(struct net *net, 53 - struct ctl_table_header **header, 54 - const char *path, 55 - struct ctl_table *table) 56 - { 57 - if (*header == NULL) { 58 - *header = register_net_sysctl(net, path, table); 59 - if (*header == NULL) 60 - return -ENOMEM; 61 - } 62 - 63 - return 0; 64 - } 65 - 66 - static void 67 - nf_ct_unregister_sysctl(struct ctl_table_header **header, 68 - struct ctl_table **table, 69 - unsigned int users) 70 - { 71 - if (users > 0) 72 - return; 73 - 74 - unregister_net_sysctl_table(*header); 75 - kfree(*table); 76 - *header = NULL; 77 - *table = NULL; 78 - } 79 - 80 49 __printf(5, 6) 81 50 void nf_l4proto_log_invalid(const struct sk_buff *skb, 82 51 struct net *net, ··· 93 124 EXPORT_SYMBOL_GPL(nf_ct_l4proto_log_invalid); 94 125 #endif 95 126 96 - const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto) 127 + const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto) 97 128 { 98 - if (unlikely(l4proto >= ARRAY_SIZE(nf_ct_protos))) 99 - return &nf_conntrack_l4proto_generic; 100 - 101 - return rcu_dereference(nf_ct_protos[l4proto]); 102 - } 103 - EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find); 104 - 105 - const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4num) 106 - { 107 - const struct nf_conntrack_l4proto *p; 108 - 109 - rcu_read_lock(); 110 - p = __nf_ct_l4proto_find(l4num); 111 - if (!try_module_get(p->me)) 112 - p = &nf_conntrack_l4proto_generic; 113 - rcu_read_unlock(); 114 - 115 - return p; 116 - } 117 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get); 118 - 119 - void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p) 120 - { 121 - module_put(p->me); 122 - } 123 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_put); 124 - 125 - static int kill_l4proto(struct nf_conn *i, void *data) 126 - { 127 - const struct nf_conntrack_l4proto *l4proto; 128 - l4proto = data; 129 - return nf_ct_protonum(i) == l4proto->l4proto; 130 - } 131 - 132 - static struct nf_proto_net *nf_ct_l4proto_net(struct net *net, 133 - const struct nf_conntrack_l4proto *l4proto) 134 - { 135 - if (l4proto->get_net_proto) { 136 - /* statically built-in protocols use static per-net */ 137 - return l4proto->get_net_proto(net); 138 - } else if (l4proto->net_id) { 139 - /* ... and loadable protocols use dynamic per-net */ 140 - return net_generic(net, *l4proto->net_id); 141 - } 142 - return NULL; 143 - } 144 - 145 - static 146 - int nf_ct_l4proto_register_sysctl(struct net *net, 147 - struct nf_proto_net *pn) 148 - { 149 - int err = 0; 150 - 151 - #ifdef CONFIG_SYSCTL 152 - if (pn->ctl_table != NULL) { 153 - err = nf_ct_register_sysctl(net, 154 - &pn->ctl_table_header, 155 - "net/netfilter", 156 - pn->ctl_table); 157 - if (err < 0) { 158 - if (!pn->users) { 159 - kfree(pn->ctl_table); 160 - pn->ctl_table = NULL; 161 - } 162 - } 163 - } 164 - #endif /* CONFIG_SYSCTL */ 165 - return err; 166 - } 167 - 168 - static 169 - void nf_ct_l4proto_unregister_sysctl(struct nf_proto_net *pn) 170 - { 171 - #ifdef CONFIG_SYSCTL 172 - if (pn->ctl_table_header != NULL) 173 - nf_ct_unregister_sysctl(&pn->ctl_table_header, 174 - &pn->ctl_table, 175 - pn->users); 176 - #endif /* CONFIG_SYSCTL */ 177 - } 178 - 179 - /* FIXME: Allow NULL functions and sub in pointers to generic for 180 - them. --RR */ 181 - int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto) 182 - { 183 - int ret = 0; 184 - 185 - if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) || 186 - (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size)) 187 - return -EINVAL; 188 - 189 - mutex_lock(&nf_ct_proto_mutex); 190 - if (rcu_dereference_protected( 191 - nf_ct_protos[l4proto->l4proto], 192 - lockdep_is_held(&nf_ct_proto_mutex) 193 - ) != &nf_conntrack_l4proto_generic) { 194 - ret = -EBUSY; 195 - goto out_unlock; 129 + switch (l4proto) { 130 + case IPPROTO_UDP: return &nf_conntrack_l4proto_udp; 131 + case IPPROTO_TCP: return &nf_conntrack_l4proto_tcp; 132 + case IPPROTO_ICMP: return &nf_conntrack_l4proto_icmp; 133 + #ifdef CONFIG_NF_CT_PROTO_DCCP 134 + case IPPROTO_DCCP: return &nf_conntrack_l4proto_dccp; 135 + #endif 136 + #ifdef CONFIG_NF_CT_PROTO_SCTP 137 + case IPPROTO_SCTP: return &nf_conntrack_l4proto_sctp; 138 + #endif 139 + #ifdef CONFIG_NF_CT_PROTO_UDPLITE 140 + case IPPROTO_UDPLITE: return &nf_conntrack_l4proto_udplite; 141 + #endif 142 + #ifdef CONFIG_NF_CT_PROTO_GRE 143 + case IPPROTO_GRE: return &nf_conntrack_l4proto_gre; 144 + #endif 145 + #if IS_ENABLED(CONFIG_IPV6) 146 + case IPPROTO_ICMPV6: return &nf_conntrack_l4proto_icmpv6; 147 + #endif /* CONFIG_IPV6 */ 196 148 } 197 149 198 - rcu_assign_pointer(nf_ct_protos[l4proto->l4proto], l4proto); 199 - out_unlock: 200 - mutex_unlock(&nf_ct_proto_mutex); 201 - return ret; 202 - } 203 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_register_one); 150 + return &nf_conntrack_l4proto_generic; 151 + }; 152 + EXPORT_SYMBOL_GPL(nf_ct_l4proto_find); 204 153 205 - int nf_ct_l4proto_pernet_register_one(struct net *net, 206 - const struct nf_conntrack_l4proto *l4proto) 154 + static unsigned int nf_confirm(struct sk_buff *skb, 155 + unsigned int protoff, 156 + struct nf_conn *ct, 157 + enum ip_conntrack_info ctinfo) 207 158 { 208 - int ret = 0; 209 - struct nf_proto_net *pn = NULL; 210 - 211 - if (l4proto->init_net) { 212 - ret = l4proto->init_net(net); 213 - if (ret < 0) 214 - goto out; 215 - } 216 - 217 - pn = nf_ct_l4proto_net(net, l4proto); 218 - if (pn == NULL) 219 - goto out; 220 - 221 - ret = nf_ct_l4proto_register_sysctl(net, pn); 222 - if (ret < 0) 223 - goto out; 224 - 225 - pn->users++; 226 - out: 227 - return ret; 228 - } 229 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register_one); 230 - 231 - static void __nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto) 232 - 233 - { 234 - BUG_ON(l4proto->l4proto >= ARRAY_SIZE(nf_ct_protos)); 235 - 236 - BUG_ON(rcu_dereference_protected( 237 - nf_ct_protos[l4proto->l4proto], 238 - lockdep_is_held(&nf_ct_proto_mutex) 239 - ) != l4proto); 240 - rcu_assign_pointer(nf_ct_protos[l4proto->l4proto], 241 - &nf_conntrack_l4proto_generic); 242 - } 243 - 244 - void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto) 245 - { 246 - mutex_lock(&nf_ct_proto_mutex); 247 - __nf_ct_l4proto_unregister_one(l4proto); 248 - mutex_unlock(&nf_ct_proto_mutex); 249 - 250 - synchronize_net(); 251 - /* Remove all contrack entries for this protocol */ 252 - nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto); 253 - } 254 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister_one); 255 - 256 - void nf_ct_l4proto_pernet_unregister_one(struct net *net, 257 - const struct nf_conntrack_l4proto *l4proto) 258 - { 259 - struct nf_proto_net *pn = nf_ct_l4proto_net(net, l4proto); 260 - 261 - if (pn == NULL) 262 - return; 263 - 264 - pn->users--; 265 - nf_ct_l4proto_unregister_sysctl(pn); 266 - } 267 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one); 268 - 269 - static void 270 - nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[], 271 - unsigned int num_proto) 272 - { 273 - int i; 274 - 275 - mutex_lock(&nf_ct_proto_mutex); 276 - for (i = 0; i < num_proto; i++) 277 - __nf_ct_l4proto_unregister_one(l4proto[i]); 278 - mutex_unlock(&nf_ct_proto_mutex); 279 - 280 - synchronize_net(); 281 - 282 - for (i = 0; i < num_proto; i++) 283 - nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto[i]); 284 - } 285 - 286 - static int 287 - nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[], 288 - unsigned int num_proto) 289 - { 290 - int ret = -EINVAL; 291 - unsigned int i; 292 - 293 - for (i = 0; i < num_proto; i++) { 294 - ret = nf_ct_l4proto_register_one(l4proto[i]); 295 - if (ret < 0) 296 - break; 297 - } 298 - if (i != num_proto) { 299 - pr_err("nf_conntrack: can't register l4 %d proto.\n", 300 - l4proto[i]->l4proto); 301 - nf_ct_l4proto_unregister(l4proto, i); 302 - } 303 - return ret; 304 - } 305 - 306 - int nf_ct_l4proto_pernet_register(struct net *net, 307 - const struct nf_conntrack_l4proto *const l4proto[], 308 - unsigned int num_proto) 309 - { 310 - int ret = -EINVAL; 311 - unsigned int i; 312 - 313 - for (i = 0; i < num_proto; i++) { 314 - ret = nf_ct_l4proto_pernet_register_one(net, l4proto[i]); 315 - if (ret < 0) 316 - break; 317 - } 318 - if (i != num_proto) { 319 - pr_err("nf_conntrack %d: pernet registration failed\n", 320 - l4proto[i]->l4proto); 321 - nf_ct_l4proto_pernet_unregister(net, l4proto, i); 322 - } 323 - return ret; 324 - } 325 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register); 326 - 327 - void nf_ct_l4proto_pernet_unregister(struct net *net, 328 - const struct nf_conntrack_l4proto *const l4proto[], 329 - unsigned int num_proto) 330 - { 331 - while (num_proto-- != 0) 332 - nf_ct_l4proto_pernet_unregister_one(net, l4proto[num_proto]); 333 - } 334 - EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister); 335 - 336 - static unsigned int ipv4_helper(void *priv, 337 - struct sk_buff *skb, 338 - const struct nf_hook_state *state) 339 - { 340 - struct nf_conn *ct; 341 - enum ip_conntrack_info ctinfo; 342 159 const struct nf_conn_help *help; 343 - const struct nf_conntrack_helper *helper; 344 - 345 - /* This is where we call the helper: as the packet goes out. */ 346 - ct = nf_ct_get(skb, &ctinfo); 347 - if (!ct || ctinfo == IP_CT_RELATED_REPLY) 348 - return NF_ACCEPT; 349 160 350 161 help = nfct_help(ct); 351 - if (!help) 352 - return NF_ACCEPT; 162 + if (help) { 163 + const struct nf_conntrack_helper *helper; 164 + int ret; 353 165 354 - /* rcu_read_lock()ed by nf_hook_thresh */ 355 - helper = rcu_dereference(help->helper); 356 - if (!helper) 357 - return NF_ACCEPT; 166 + /* rcu_read_lock()ed by nf_hook_thresh */ 167 + helper = rcu_dereference(help->helper); 168 + if (helper) { 169 + ret = helper->help(skb, 170 + protoff, 171 + ct, ctinfo); 172 + if (ret != NF_ACCEPT) 173 + return ret; 174 + } 175 + } 358 176 359 - return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), 360 - ct, ctinfo); 177 + if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && 178 + !nf_is_loopback_packet(skb)) { 179 + if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { 180 + NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); 181 + return NF_DROP; 182 + } 183 + } 184 + 185 + /* We've seen it coming out the other side: confirm it */ 186 + return nf_conntrack_confirm(skb); 361 187 } 362 188 363 189 static unsigned int ipv4_confirm(void *priv, 364 190 struct sk_buff *skb, 365 191 const struct nf_hook_state *state) 366 192 { 367 - struct nf_conn *ct; 368 193 enum ip_conntrack_info ctinfo; 194 + struct nf_conn *ct; 369 195 370 196 ct = nf_ct_get(skb, &ctinfo); 371 197 if (!ct || ctinfo == IP_CT_RELATED_REPLY) 372 - goto out; 198 + return nf_conntrack_confirm(skb); 373 199 374 - /* adjust seqs for loopback traffic only in outgoing direction */ 375 - if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && 376 - !nf_is_loopback_packet(skb)) { 377 - if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) { 378 - NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); 379 - return NF_DROP; 380 - } 381 - } 382 - out: 383 - /* We've seen it coming out the other side: confirm it */ 384 - return nf_conntrack_confirm(skb); 200 + return nf_confirm(skb, 201 + skb_network_offset(skb) + ip_hdrlen(skb), 202 + ct, ctinfo); 385 203 } 386 204 387 205 static unsigned int ipv4_conntrack_in(void *priv, ··· 217 461 .priority = NF_IP_PRI_CONNTRACK, 218 462 }, 219 463 { 220 - .hook = ipv4_helper, 221 - .pf = NFPROTO_IPV4, 222 - .hooknum = NF_INET_POST_ROUTING, 223 - .priority = NF_IP_PRI_CONNTRACK_HELPER, 224 - }, 225 - { 226 464 .hook = ipv4_confirm, 227 465 .pf = NFPROTO_IPV4, 228 466 .hooknum = NF_INET_POST_ROUTING, 229 467 .priority = NF_IP_PRI_CONNTRACK_CONFIRM, 230 - }, 231 - { 232 - .hook = ipv4_helper, 233 - .pf = NFPROTO_IPV4, 234 - .hooknum = NF_INET_LOCAL_IN, 235 - .priority = NF_IP_PRI_CONNTRACK_HELPER, 236 468 }, 237 469 { 238 470 .hook = ipv4_confirm, ··· 367 623 struct nf_conn *ct; 368 624 enum ip_conntrack_info ctinfo; 369 625 unsigned char pnum = ipv6_hdr(skb)->nexthdr; 370 - int protoff; 371 626 __be16 frag_off; 627 + int protoff; 372 628 373 629 ct = nf_ct_get(skb, &ctinfo); 374 630 if (!ct || ctinfo == IP_CT_RELATED_REPLY) 375 - goto out; 631 + return nf_conntrack_confirm(skb); 376 632 377 633 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, 378 634 &frag_off); 379 635 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { 380 636 pr_debug("proto header not found\n"); 381 - goto out; 637 + return nf_conntrack_confirm(skb); 382 638 } 383 639 384 - /* adjust seqs for loopback traffic only in outgoing direction */ 385 - if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && 386 - !nf_is_loopback_packet(skb)) { 387 - if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { 388 - NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); 389 - return NF_DROP; 390 - } 391 - } 392 - out: 393 - /* We've seen it coming out the other side: confirm it */ 394 - return nf_conntrack_confirm(skb); 640 + return nf_confirm(skb, protoff, ct, ctinfo); 395 641 } 396 642 397 643 static unsigned int ipv6_conntrack_in(void *priv, ··· 398 664 return nf_conntrack_in(skb, state); 399 665 } 400 666 401 - static unsigned int ipv6_helper(void *priv, 402 - struct sk_buff *skb, 403 - const struct nf_hook_state *state) 404 - { 405 - struct nf_conn *ct; 406 - const struct nf_conn_help *help; 407 - const struct nf_conntrack_helper *helper; 408 - enum ip_conntrack_info ctinfo; 409 - __be16 frag_off; 410 - int protoff; 411 - u8 nexthdr; 412 - 413 - /* This is where we call the helper: as the packet goes out. */ 414 - ct = nf_ct_get(skb, &ctinfo); 415 - if (!ct || ctinfo == IP_CT_RELATED_REPLY) 416 - return NF_ACCEPT; 417 - 418 - help = nfct_help(ct); 419 - if (!help) 420 - return NF_ACCEPT; 421 - /* rcu_read_lock()ed by nf_hook_thresh */ 422 - helper = rcu_dereference(help->helper); 423 - if (!helper) 424 - return NF_ACCEPT; 425 - 426 - nexthdr = ipv6_hdr(skb)->nexthdr; 427 - protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, 428 - &frag_off); 429 - if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { 430 - pr_debug("proto header not found\n"); 431 - return NF_ACCEPT; 432 - } 433 - 434 - return helper->help(skb, protoff, ct, ctinfo); 435 - } 436 - 437 667 static const struct nf_hook_ops ipv6_conntrack_ops[] = { 438 668 { 439 669 .hook = ipv6_conntrack_in, ··· 412 714 .priority = NF_IP6_PRI_CONNTRACK, 413 715 }, 414 716 { 415 - .hook = ipv6_helper, 416 - .pf = NFPROTO_IPV6, 417 - .hooknum = NF_INET_POST_ROUTING, 418 - .priority = NF_IP6_PRI_CONNTRACK_HELPER, 419 - }, 420 - { 421 717 .hook = ipv6_confirm, 422 718 .pf = NFPROTO_IPV6, 423 719 .hooknum = NF_INET_POST_ROUTING, 424 720 .priority = NF_IP6_PRI_LAST, 425 - }, 426 - { 427 - .hook = ipv6_helper, 428 - .pf = NFPROTO_IPV6, 429 - .hooknum = NF_INET_LOCAL_IN, 430 - .priority = NF_IP6_PRI_CONNTRACK_HELPER, 431 721 }, 432 722 { 433 723 .hook = ipv6_confirm, ··· 560 874 } 561 875 EXPORT_SYMBOL_GPL(nf_ct_netns_put); 562 876 563 - static const struct nf_conntrack_l4proto * const builtin_l4proto[] = { 564 - &nf_conntrack_l4proto_tcp, 565 - &nf_conntrack_l4proto_udp, 566 - &nf_conntrack_l4proto_icmp, 567 - #ifdef CONFIG_NF_CT_PROTO_DCCP 568 - &nf_conntrack_l4proto_dccp, 569 - #endif 570 - #ifdef CONFIG_NF_CT_PROTO_SCTP 571 - &nf_conntrack_l4proto_sctp, 572 - #endif 573 - #ifdef CONFIG_NF_CT_PROTO_UDPLITE 574 - &nf_conntrack_l4proto_udplite, 575 - #endif 576 - #if IS_ENABLED(CONFIG_IPV6) 577 - &nf_conntrack_l4proto_icmpv6, 578 - #endif /* CONFIG_IPV6 */ 579 - }; 580 - 581 877 int nf_conntrack_proto_init(void) 582 878 { 583 - int ret = 0, i; 879 + int ret; 584 880 585 881 ret = nf_register_sockopt(&so_getorigdst); 586 882 if (ret < 0) ··· 574 906 goto cleanup_sockopt; 575 907 #endif 576 908 577 - for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++) 578 - RCU_INIT_POINTER(nf_ct_protos[i], 579 - &nf_conntrack_l4proto_generic); 580 - 581 - ret = nf_ct_l4proto_register(builtin_l4proto, 582 - ARRAY_SIZE(builtin_l4proto)); 583 - if (ret < 0) 584 - goto cleanup_sockopt2; 585 - 586 909 return ret; 587 - cleanup_sockopt2: 588 - nf_unregister_sockopt(&so_getorigdst); 910 + 589 911 #if IS_ENABLED(CONFIG_IPV6) 590 912 cleanup_sockopt: 591 913 nf_unregister_sockopt(&so_getorigdst6); ··· 591 933 #endif 592 934 } 593 935 594 - int nf_conntrack_proto_pernet_init(struct net *net) 936 + void nf_conntrack_proto_pernet_init(struct net *net) 595 937 { 596 - int err; 597 - struct nf_proto_net *pn = nf_ct_l4proto_net(net, 598 - &nf_conntrack_l4proto_generic); 599 - 600 - err = nf_conntrack_l4proto_generic.init_net(net); 601 - if (err < 0) 602 - return err; 603 - err = nf_ct_l4proto_register_sysctl(net, 604 - pn); 605 - if (err < 0) 606 - return err; 607 - 608 - err = nf_ct_l4proto_pernet_register(net, builtin_l4proto, 609 - ARRAY_SIZE(builtin_l4proto)); 610 - if (err < 0) { 611 - nf_ct_l4proto_unregister_sysctl(pn); 612 - return err; 613 - } 614 - 615 - pn->users++; 616 - return 0; 938 + nf_conntrack_generic_init_net(net); 939 + nf_conntrack_udp_init_net(net); 940 + nf_conntrack_tcp_init_net(net); 941 + nf_conntrack_icmp_init_net(net); 942 + #if IS_ENABLED(CONFIG_IPV6) 943 + nf_conntrack_icmpv6_init_net(net); 944 + #endif 945 + #ifdef CONFIG_NF_CT_PROTO_DCCP 946 + nf_conntrack_dccp_init_net(net); 947 + #endif 948 + #ifdef CONFIG_NF_CT_PROTO_SCTP 949 + nf_conntrack_sctp_init_net(net); 950 + #endif 951 + #ifdef CONFIG_NF_CT_PROTO_GRE 952 + nf_conntrack_gre_init_net(net); 953 + #endif 617 954 } 618 955 619 956 void nf_conntrack_proto_pernet_fini(struct net *net) 620 957 { 621 - struct nf_proto_net *pn = nf_ct_l4proto_net(net, 622 - &nf_conntrack_l4proto_generic); 623 - 624 - nf_ct_l4proto_pernet_unregister(net, builtin_l4proto, 625 - ARRAY_SIZE(builtin_l4proto)); 626 - pn->users--; 627 - nf_ct_l4proto_unregister_sysctl(pn); 958 + #ifdef CONFIG_NF_CT_PROTO_GRE 959 + nf_ct_gre_keymap_flush(net); 960 + #endif 628 961 } 629 - 630 962 631 963 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, 632 964 &nf_conntrack_htable_size, 0600);
+18 -114
net/netfilter/nf_conntrack_proto_dccp.c
··· 472 472 return true; 473 473 } 474 474 475 - static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb, 476 - unsigned int dataoff, enum ip_conntrack_info ctinfo, 477 - const struct nf_hook_state *state) 475 + int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb, 476 + unsigned int dataoff, 477 + enum ip_conntrack_info ctinfo, 478 + const struct nf_hook_state *state) 478 479 { 479 480 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 480 481 struct dccp_hdr _dh, *dh; ··· 724 723 }; 725 724 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 726 725 727 - #ifdef CONFIG_SYSCTL 728 - /* template, data assigned later */ 729 - static struct ctl_table dccp_sysctl_table[] = { 730 - { 731 - .procname = "nf_conntrack_dccp_timeout_request", 732 - .maxlen = sizeof(unsigned int), 733 - .mode = 0644, 734 - .proc_handler = proc_dointvec_jiffies, 735 - }, 736 - { 737 - .procname = "nf_conntrack_dccp_timeout_respond", 738 - .maxlen = sizeof(unsigned int), 739 - .mode = 0644, 740 - .proc_handler = proc_dointvec_jiffies, 741 - }, 742 - { 743 - .procname = "nf_conntrack_dccp_timeout_partopen", 744 - .maxlen = sizeof(unsigned int), 745 - .mode = 0644, 746 - .proc_handler = proc_dointvec_jiffies, 747 - }, 748 - { 749 - .procname = "nf_conntrack_dccp_timeout_open", 750 - .maxlen = sizeof(unsigned int), 751 - .mode = 0644, 752 - .proc_handler = proc_dointvec_jiffies, 753 - }, 754 - { 755 - .procname = "nf_conntrack_dccp_timeout_closereq", 756 - .maxlen = sizeof(unsigned int), 757 - .mode = 0644, 758 - .proc_handler = proc_dointvec_jiffies, 759 - }, 760 - { 761 - .procname = "nf_conntrack_dccp_timeout_closing", 762 - .maxlen = sizeof(unsigned int), 763 - .mode = 0644, 764 - .proc_handler = proc_dointvec_jiffies, 765 - }, 766 - { 767 - .procname = "nf_conntrack_dccp_timeout_timewait", 768 - .maxlen = sizeof(unsigned int), 769 - .mode = 0644, 770 - .proc_handler = proc_dointvec_jiffies, 771 - }, 772 - { 773 - .procname = "nf_conntrack_dccp_loose", 774 - .maxlen = sizeof(int), 775 - .mode = 0644, 776 - .proc_handler = proc_dointvec, 777 - }, 778 - { } 779 - }; 780 - #endif /* CONFIG_SYSCTL */ 781 - 782 - static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn, 783 - struct nf_dccp_net *dn) 784 - { 785 - #ifdef CONFIG_SYSCTL 786 - if (pn->ctl_table) 787 - return 0; 788 - 789 - pn->ctl_table = kmemdup(dccp_sysctl_table, 790 - sizeof(dccp_sysctl_table), 791 - GFP_KERNEL); 792 - if (!pn->ctl_table) 793 - return -ENOMEM; 794 - 795 - pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST]; 796 - pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND]; 797 - pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN]; 798 - pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN]; 799 - pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ]; 800 - pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING]; 801 - pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; 802 - pn->ctl_table[7].data = &dn->dccp_loose; 803 - 804 - /* Don't export sysctls to unprivileged users */ 805 - if (net->user_ns != &init_user_ns) 806 - pn->ctl_table[0].procname = NULL; 807 - #endif 808 - return 0; 809 - } 810 - 811 - static int dccp_init_net(struct net *net) 726 + void nf_conntrack_dccp_init_net(struct net *net) 812 727 { 813 728 struct nf_dccp_net *dn = nf_dccp_pernet(net); 814 - struct nf_proto_net *pn = &dn->pn; 815 729 816 - if (!pn->users) { 817 - /* default values */ 818 - dn->dccp_loose = 1; 819 - dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL; 820 - dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL; 821 - dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL; 822 - dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ; 823 - dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; 824 - dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; 825 - dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; 730 + /* default values */ 731 + dn->dccp_loose = 1; 732 + dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL; 733 + dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL; 734 + dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL; 735 + dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ; 736 + dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; 737 + dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; 738 + dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; 826 739 827 - /* timeouts[0] is unused, make it same as SYN_SENT so 828 - * ->timeouts[0] contains 'new' timeout, like udp or icmp. 829 - */ 830 - dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST]; 831 - } 832 - 833 - return dccp_kmemdup_sysctl_table(net, pn, dn); 834 - } 835 - 836 - static struct nf_proto_net *dccp_get_net_proto(struct net *net) 837 - { 838 - return &net->ct.nf_ct_proto.dccp.pn; 740 + /* timeouts[0] is unused, make it same as SYN_SENT so 741 + * ->timeouts[0] contains 'new' timeout, like udp or icmp. 742 + */ 743 + dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST]; 839 744 } 840 745 841 746 const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = { 842 747 .l4proto = IPPROTO_DCCP, 843 - .packet = dccp_packet, 844 748 .can_early_drop = dccp_can_early_drop, 845 749 #ifdef CONFIG_NF_CONNTRACK_PROCFS 846 750 .print_conntrack = dccp_print_conntrack, ··· 768 862 .nla_policy = dccp_timeout_nla_policy, 769 863 }, 770 864 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 771 - .init_net = dccp_init_net, 772 - .get_net_proto = dccp_get_net_proto, 773 865 };
+1 -84
net/netfilter/nf_conntrack_proto_generic.c
··· 15 15 16 16 static const unsigned int nf_ct_generic_timeout = 600*HZ; 17 17 18 - static bool nf_generic_should_process(u8 proto) 19 - { 20 - switch (proto) { 21 - #ifdef CONFIG_NF_CT_PROTO_GRE_MODULE 22 - case IPPROTO_GRE: 23 - return false; 24 - #endif 25 - default: 26 - return true; 27 - } 28 - } 29 - 30 - static bool generic_pkt_to_tuple(const struct sk_buff *skb, 31 - unsigned int dataoff, 32 - struct net *net, struct nf_conntrack_tuple *tuple) 33 - { 34 - tuple->src.u.all = 0; 35 - tuple->dst.u.all = 0; 36 - 37 - return true; 38 - } 39 - 40 - /* Returns verdict for packet, or -1 for invalid. */ 41 - static int generic_packet(struct nf_conn *ct, 42 - struct sk_buff *skb, 43 - unsigned int dataoff, 44 - enum ip_conntrack_info ctinfo, 45 - const struct nf_hook_state *state) 46 - { 47 - const unsigned int *timeout = nf_ct_timeout_lookup(ct); 48 - 49 - if (!nf_generic_should_process(nf_ct_protonum(ct))) { 50 - pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n", 51 - nf_ct_protonum(ct)); 52 - return -NF_ACCEPT; 53 - } 54 - 55 - if (!timeout) 56 - timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout; 57 - 58 - nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); 59 - return NF_ACCEPT; 60 - } 61 - 62 18 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 63 19 64 20 #include <linux/netfilter/nfnetlink.h> ··· 60 104 }; 61 105 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 62 106 63 - #ifdef CONFIG_SYSCTL 64 - static struct ctl_table generic_sysctl_table[] = { 65 - { 66 - .procname = "nf_conntrack_generic_timeout", 67 - .maxlen = sizeof(unsigned int), 68 - .mode = 0644, 69 - .proc_handler = proc_dointvec_jiffies, 70 - }, 71 - { } 72 - }; 73 - #endif /* CONFIG_SYSCTL */ 74 - 75 - static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn, 76 - struct nf_generic_net *gn) 77 - { 78 - #ifdef CONFIG_SYSCTL 79 - pn->ctl_table = kmemdup(generic_sysctl_table, 80 - sizeof(generic_sysctl_table), 81 - GFP_KERNEL); 82 - if (!pn->ctl_table) 83 - return -ENOMEM; 84 - 85 - pn->ctl_table[0].data = &gn->timeout; 86 - #endif 87 - return 0; 88 - } 89 - 90 - static int generic_init_net(struct net *net) 107 + void nf_conntrack_generic_init_net(struct net *net) 91 108 { 92 109 struct nf_generic_net *gn = nf_generic_pernet(net); 93 - struct nf_proto_net *pn = &gn->pn; 94 110 95 111 gn->timeout = nf_ct_generic_timeout; 96 - 97 - return generic_kmemdup_sysctl_table(pn, gn); 98 - } 99 - 100 - static struct nf_proto_net *generic_get_net_proto(struct net *net) 101 - { 102 - return &net->ct.nf_ct_proto.generic.pn; 103 112 } 104 113 105 114 const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = 106 115 { 107 116 .l4proto = 255, 108 - .pkt_to_tuple = generic_pkt_to_tuple, 109 - .packet = generic_packet, 110 117 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 111 118 .ctnl_timeout = { 112 119 .nlattr_to_obj = generic_timeout_nlattr_to_obj, ··· 79 160 .nla_policy = generic_timeout_nla_policy, 80 161 }, 81 162 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 82 - .init_net = generic_init_net, 83 - .get_net_proto = generic_get_net_proto, 84 163 };
+36 -160
net/netfilter/nf_conntrack_proto_gre.c
··· 48 48 [GRE_CT_REPLIED] = 180*HZ, 49 49 }; 50 50 51 - static unsigned int proto_gre_net_id __read_mostly; 51 + /* used when expectation is added */ 52 + static DEFINE_SPINLOCK(keymap_lock); 52 53 53 - static inline struct netns_proto_gre *gre_pernet(struct net *net) 54 + static inline struct nf_gre_net *gre_pernet(struct net *net) 54 55 { 55 - return net_generic(net, proto_gre_net_id); 56 + return &net->ct.nf_ct_proto.gre; 56 57 } 57 58 58 - static void nf_ct_gre_keymap_flush(struct net *net) 59 + void nf_ct_gre_keymap_flush(struct net *net) 59 60 { 60 - struct netns_proto_gre *net_gre = gre_pernet(net); 61 + struct nf_gre_net *net_gre = gre_pernet(net); 61 62 struct nf_ct_gre_keymap *km, *tmp; 62 63 63 - write_lock_bh(&net_gre->keymap_lock); 64 + spin_lock_bh(&keymap_lock); 64 65 list_for_each_entry_safe(km, tmp, &net_gre->keymap_list, list) { 65 - list_del(&km->list); 66 - kfree(km); 66 + list_del_rcu(&km->list); 67 + kfree_rcu(km, rcu); 67 68 } 68 - write_unlock_bh(&net_gre->keymap_lock); 69 + spin_unlock_bh(&keymap_lock); 69 70 } 70 71 71 72 static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km, ··· 82 81 /* look up the source key for a given tuple */ 83 82 static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t) 84 83 { 85 - struct netns_proto_gre *net_gre = gre_pernet(net); 84 + struct nf_gre_net *net_gre = gre_pernet(net); 86 85 struct nf_ct_gre_keymap *km; 87 86 __be16 key = 0; 88 87 89 - read_lock_bh(&net_gre->keymap_lock); 90 - list_for_each_entry(km, &net_gre->keymap_list, list) { 88 + list_for_each_entry_rcu(km, &net_gre->keymap_list, list) { 91 89 if (gre_key_cmpfn(km, t)) { 92 90 key = km->tuple.src.u.gre.key; 93 91 break; 94 92 } 95 93 } 96 - read_unlock_bh(&net_gre->keymap_lock); 97 94 98 95 pr_debug("lookup src key 0x%x for ", key); 99 96 nf_ct_dump_tuple(t); ··· 104 105 struct nf_conntrack_tuple *t) 105 106 { 106 107 struct net *net = nf_ct_net(ct); 107 - struct netns_proto_gre *net_gre = gre_pernet(net); 108 + struct nf_gre_net *net_gre = gre_pernet(net); 108 109 struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); 109 110 struct nf_ct_gre_keymap **kmp, *km; 110 111 111 112 kmp = &ct_pptp_info->keymap[dir]; 112 113 if (*kmp) { 113 114 /* check whether it's a retransmission */ 114 - read_lock_bh(&net_gre->keymap_lock); 115 - list_for_each_entry(km, &net_gre->keymap_list, list) { 116 - if (gre_key_cmpfn(km, t) && km == *kmp) { 117 - read_unlock_bh(&net_gre->keymap_lock); 115 + list_for_each_entry_rcu(km, &net_gre->keymap_list, list) { 116 + if (gre_key_cmpfn(km, t) && km == *kmp) 118 117 return 0; 119 - } 120 118 } 121 - read_unlock_bh(&net_gre->keymap_lock); 122 119 pr_debug("trying to override keymap_%s for ct %p\n", 123 120 dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); 124 121 return -EEXIST; ··· 129 134 pr_debug("adding new entry %p: ", km); 130 135 nf_ct_dump_tuple(&km->tuple); 131 136 132 - write_lock_bh(&net_gre->keymap_lock); 137 + spin_lock_bh(&keymap_lock); 133 138 list_add_tail(&km->list, &net_gre->keymap_list); 134 - write_unlock_bh(&net_gre->keymap_lock); 139 + spin_unlock_bh(&keymap_lock); 135 140 136 141 return 0; 137 142 } ··· 140 145 /* destroy the keymap entries associated with specified master ct */ 141 146 void nf_ct_gre_keymap_destroy(struct nf_conn *ct) 142 147 { 143 - struct net *net = nf_ct_net(ct); 144 - struct netns_proto_gre *net_gre = gre_pernet(net); 145 148 struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); 146 149 enum ip_conntrack_dir dir; 147 150 148 151 pr_debug("entering for ct %p\n", ct); 149 152 150 - write_lock_bh(&net_gre->keymap_lock); 153 + spin_lock_bh(&keymap_lock); 151 154 for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) { 152 155 if (ct_pptp_info->keymap[dir]) { 153 156 pr_debug("removing %p from list\n", 154 157 ct_pptp_info->keymap[dir]); 155 - list_del(&ct_pptp_info->keymap[dir]->list); 156 - kfree(ct_pptp_info->keymap[dir]); 158 + list_del_rcu(&ct_pptp_info->keymap[dir]->list); 159 + kfree_rcu(ct_pptp_info->keymap[dir], rcu); 157 160 ct_pptp_info->keymap[dir] = NULL; 158 161 } 159 162 } 160 - write_unlock_bh(&net_gre->keymap_lock); 163 + spin_unlock_bh(&keymap_lock); 161 164 } 162 165 EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy); 163 166 164 167 /* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */ 165 168 166 169 /* gre hdr info to tuple */ 167 - static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 168 - struct net *net, struct nf_conntrack_tuple *tuple) 170 + bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 171 + struct net *net, struct nf_conntrack_tuple *tuple) 169 172 { 170 173 const struct pptp_gre_header *pgrehdr; 171 174 struct pptp_gre_header _pgrehdr; ··· 209 216 210 217 static unsigned int *gre_get_timeouts(struct net *net) 211 218 { 212 - return gre_pernet(net)->gre_timeouts; 219 + return gre_pernet(net)->timeouts; 213 220 } 214 221 215 222 /* Returns verdict for packet, and may modify conntrack */ 216 - static int gre_packet(struct nf_conn *ct, 217 - struct sk_buff *skb, 218 - unsigned int dataoff, 219 - enum ip_conntrack_info ctinfo, 220 - const struct nf_hook_state *state) 223 + int nf_conntrack_gre_packet(struct nf_conn *ct, 224 + struct sk_buff *skb, 225 + unsigned int dataoff, 226 + enum ip_conntrack_info ctinfo, 227 + const struct nf_hook_state *state) 221 228 { 222 229 if (state->pf != NFPROTO_IPV4) 223 230 return -NF_ACCEPT; ··· 249 256 return NF_ACCEPT; 250 257 } 251 258 252 - /* Called when a conntrack entry has already been removed from the hashes 253 - * and is about to be deleted from memory */ 254 - static void gre_destroy(struct nf_conn *ct) 255 - { 256 - struct nf_conn *master = ct->master; 257 - pr_debug(" entering\n"); 258 - 259 - if (!master) 260 - pr_debug("no master !?!\n"); 261 - else 262 - nf_ct_gre_keymap_destroy(master); 263 - } 264 - 265 259 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT 266 260 267 261 #include <linux/netfilter/nfnetlink.h> ··· 258 278 struct net *net, void *data) 259 279 { 260 280 unsigned int *timeouts = data; 261 - struct netns_proto_gre *net_gre = gre_pernet(net); 281 + struct nf_gre_net *net_gre = gre_pernet(net); 262 282 263 283 if (!timeouts) 264 284 timeouts = gre_get_timeouts(net); 265 285 /* set default timeouts for GRE. */ 266 - timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED]; 267 - timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED]; 286 + timeouts[GRE_CT_UNREPLIED] = net_gre->timeouts[GRE_CT_UNREPLIED]; 287 + timeouts[GRE_CT_REPLIED] = net_gre->timeouts[GRE_CT_REPLIED]; 268 288 269 289 if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) { 270 290 timeouts[GRE_CT_UNREPLIED] = ··· 300 320 }; 301 321 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 302 322 303 - #ifdef CONFIG_SYSCTL 304 - static struct ctl_table gre_sysctl_table[] = { 305 - { 306 - .procname = "nf_conntrack_gre_timeout", 307 - .maxlen = sizeof(unsigned int), 308 - .mode = 0644, 309 - .proc_handler = proc_dointvec_jiffies, 310 - }, 311 - { 312 - .procname = "nf_conntrack_gre_timeout_stream", 313 - .maxlen = sizeof(unsigned int), 314 - .mode = 0644, 315 - .proc_handler = proc_dointvec_jiffies, 316 - }, 317 - {} 318 - }; 319 - #endif 320 - 321 - static int gre_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *nf, 322 - struct netns_proto_gre *net_gre) 323 + void nf_conntrack_gre_init_net(struct net *net) 323 324 { 324 - #ifdef CONFIG_SYSCTL 325 + struct nf_gre_net *net_gre = gre_pernet(net); 325 326 int i; 326 327 327 - if (nf->ctl_table) 328 - return 0; 329 - 330 - nf->ctl_table = kmemdup(gre_sysctl_table, 331 - sizeof(gre_sysctl_table), 332 - GFP_KERNEL); 333 - if (!nf->ctl_table) 334 - return -ENOMEM; 335 - 336 - for (i = 0; i < GRE_CT_MAX; i++) 337 - nf->ctl_table[i].data = &net_gre->gre_timeouts[i]; 338 - #endif 339 - return 0; 340 - } 341 - 342 - static int gre_init_net(struct net *net) 343 - { 344 - struct netns_proto_gre *net_gre = gre_pernet(net); 345 - struct nf_proto_net *nf = &net_gre->nf; 346 - int i; 347 - 348 - rwlock_init(&net_gre->keymap_lock); 349 328 INIT_LIST_HEAD(&net_gre->keymap_list); 350 329 for (i = 0; i < GRE_CT_MAX; i++) 351 - net_gre->gre_timeouts[i] = gre_timeouts[i]; 352 - 353 - return gre_kmemdup_sysctl_table(net, nf, net_gre); 330 + net_gre->timeouts[i] = gre_timeouts[i]; 354 331 } 355 332 356 333 /* protocol helper struct */ 357 - static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { 334 + const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre = { 358 335 .l4proto = IPPROTO_GRE, 359 - .pkt_to_tuple = gre_pkt_to_tuple, 360 336 #ifdef CONFIG_NF_CONNTRACK_PROCFS 361 337 .print_conntrack = gre_print_conntrack, 362 338 #endif 363 - .packet = gre_packet, 364 - .destroy = gre_destroy, 365 - .me = THIS_MODULE, 366 339 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 367 340 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 368 341 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, ··· 331 398 .nla_policy = gre_timeout_nla_policy, 332 399 }, 333 400 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 334 - .net_id = &proto_gre_net_id, 335 - .init_net = gre_init_net, 336 401 }; 337 - 338 - static int proto_gre_net_init(struct net *net) 339 - { 340 - int ret = 0; 341 - 342 - ret = nf_ct_l4proto_pernet_register_one(net, 343 - &nf_conntrack_l4proto_gre4); 344 - if (ret < 0) 345 - pr_err("nf_conntrack_gre4: pernet registration failed.\n"); 346 - return ret; 347 - } 348 - 349 - static void proto_gre_net_exit(struct net *net) 350 - { 351 - nf_ct_l4proto_pernet_unregister_one(net, &nf_conntrack_l4proto_gre4); 352 - nf_ct_gre_keymap_flush(net); 353 - } 354 - 355 - static struct pernet_operations proto_gre_net_ops = { 356 - .init = proto_gre_net_init, 357 - .exit = proto_gre_net_exit, 358 - .id = &proto_gre_net_id, 359 - .size = sizeof(struct netns_proto_gre), 360 - }; 361 - 362 - static int __init nf_ct_proto_gre_init(void) 363 - { 364 - int ret; 365 - 366 - BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0); 367 - 368 - ret = register_pernet_subsys(&proto_gre_net_ops); 369 - if (ret < 0) 370 - goto out_pernet; 371 - ret = nf_ct_l4proto_register_one(&nf_conntrack_l4proto_gre4); 372 - if (ret < 0) 373 - goto out_gre4; 374 - 375 - return 0; 376 - out_gre4: 377 - unregister_pernet_subsys(&proto_gre_net_ops); 378 - out_pernet: 379 - return ret; 380 - } 381 - 382 - static void __exit nf_ct_proto_gre_fini(void) 383 - { 384 - nf_ct_l4proto_unregister_one(&nf_conntrack_l4proto_gre4); 385 - unregister_pernet_subsys(&proto_gre_net_ops); 386 - } 387 - 388 - module_init(nf_ct_proto_gre_init); 389 - module_exit(nf_ct_proto_gre_fini); 390 - 391 - MODULE_LICENSE("GPL");
+10 -57
net/netfilter/nf_conntrack_proto_icmp.c
··· 25 25 26 26 static const unsigned int nf_ct_icmp_timeout = 30*HZ; 27 27 28 - static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 29 - struct net *net, struct nf_conntrack_tuple *tuple) 28 + bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, 29 + struct net *net, struct nf_conntrack_tuple *tuple) 30 30 { 31 31 const struct icmphdr *hp; 32 32 struct icmphdr _hdr; ··· 54 54 [ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1 55 55 }; 56 56 57 - static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple, 58 - const struct nf_conntrack_tuple *orig) 57 + bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple, 58 + const struct nf_conntrack_tuple *orig) 59 59 { 60 60 if (orig->dst.u.icmp.type >= sizeof(invmap) || 61 61 !invmap[orig->dst.u.icmp.type]) ··· 68 68 } 69 69 70 70 /* Returns verdict for packet, or -1 for invalid. */ 71 - static int icmp_packet(struct nf_conn *ct, 72 - struct sk_buff *skb, 73 - unsigned int dataoff, 74 - enum ip_conntrack_info ctinfo, 75 - const struct nf_hook_state *state) 71 + int nf_conntrack_icmp_packet(struct nf_conn *ct, 72 + struct sk_buff *skb, 73 + enum ip_conntrack_info ctinfo, 74 + const struct nf_hook_state *state) 76 75 { 77 76 /* Do not immediately delete the connection after the first 78 77 successful reply to avoid excessive conntrackd traffic ··· 109 110 const struct nf_hook_state *state) 110 111 { 111 112 struct nf_conntrack_tuple innertuple, origtuple; 112 - const struct nf_conntrack_l4proto *innerproto; 113 113 const struct nf_conntrack_tuple_hash *h; 114 114 const struct nf_conntrack_zone *zone; 115 115 enum ip_conntrack_info ctinfo; ··· 126 128 return -NF_ACCEPT; 127 129 } 128 130 129 - /* rcu_read_lock()ed by nf_hook_thresh */ 130 - innerproto = __nf_ct_l4proto_find(origtuple.dst.protonum); 131 - 132 131 /* Ordinarily, we'd expect the inverted tupleproto, but it's 133 132 been preserved inside the ICMP. */ 134 - if (!nf_ct_invert_tuple(&innertuple, &origtuple, innerproto)) { 133 + if (!nf_ct_invert_tuple(&innertuple, &origtuple)) { 135 134 pr_debug("icmp_error_message: no match\n"); 136 135 return -NF_ACCEPT; 137 136 } ··· 298 303 }; 299 304 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 300 305 301 - #ifdef CONFIG_SYSCTL 302 - static struct ctl_table icmp_sysctl_table[] = { 303 - { 304 - .procname = "nf_conntrack_icmp_timeout", 305 - .maxlen = sizeof(unsigned int), 306 - .mode = 0644, 307 - .proc_handler = proc_dointvec_jiffies, 308 - }, 309 - { } 310 - }; 311 - #endif /* CONFIG_SYSCTL */ 312 - 313 - static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn, 314 - struct nf_icmp_net *in) 315 - { 316 - #ifdef CONFIG_SYSCTL 317 - pn->ctl_table = kmemdup(icmp_sysctl_table, 318 - sizeof(icmp_sysctl_table), 319 - GFP_KERNEL); 320 - if (!pn->ctl_table) 321 - return -ENOMEM; 322 - 323 - pn->ctl_table[0].data = &in->timeout; 324 - #endif 325 - return 0; 326 - } 327 - 328 - static int icmp_init_net(struct net *net) 306 + void nf_conntrack_icmp_init_net(struct net *net) 329 307 { 330 308 struct nf_icmp_net *in = nf_icmp_pernet(net); 331 - struct nf_proto_net *pn = &in->pn; 332 309 333 310 in->timeout = nf_ct_icmp_timeout; 334 - 335 - return icmp_kmemdup_sysctl_table(pn, in); 336 - } 337 - 338 - static struct nf_proto_net *icmp_get_net_proto(struct net *net) 339 - { 340 - return &net->ct.nf_ct_proto.icmp.pn; 341 311 } 342 312 343 313 const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = 344 314 { 345 315 .l4proto = IPPROTO_ICMP, 346 - .pkt_to_tuple = icmp_pkt_to_tuple, 347 - .invert_tuple = icmp_invert_tuple, 348 - .packet = icmp_packet, 349 - .destroy = NULL, 350 - .me = NULL, 351 316 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 352 317 .tuple_to_nlattr = icmp_tuple_to_nlattr, 353 318 .nlattr_tuple_size = icmp_nlattr_tuple_size, ··· 323 368 .nla_policy = icmp_timeout_nla_policy, 324 369 }, 325 370 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 326 - .init_net = icmp_init_net, 327 - .get_net_proto = icmp_get_net_proto, 328 371 };
+12 -57
net/netfilter/nf_conntrack_proto_icmpv6.c
··· 30 30 31 31 static const unsigned int nf_ct_icmpv6_timeout = 30*HZ; 32 32 33 - static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, 34 - unsigned int dataoff, 35 - struct net *net, 36 - struct nf_conntrack_tuple *tuple) 33 + bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, 34 + unsigned int dataoff, 35 + struct net *net, 36 + struct nf_conntrack_tuple *tuple) 37 37 { 38 38 const struct icmp6hdr *hp; 39 39 struct icmp6hdr _hdr; ··· 67 67 [ICMPV6_MLD2_REPORT - 130] = 1 68 68 }; 69 69 70 - static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple, 71 - const struct nf_conntrack_tuple *orig) 70 + bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple, 71 + const struct nf_conntrack_tuple *orig) 72 72 { 73 73 int type = orig->dst.u.icmp.type - 128; 74 74 if (type < 0 || type >= sizeof(invmap) || !invmap[type]) ··· 86 86 } 87 87 88 88 /* Returns verdict for packet, or -1 for invalid. */ 89 - static int icmpv6_packet(struct nf_conn *ct, 90 - struct sk_buff *skb, 91 - unsigned int dataoff, 92 - enum ip_conntrack_info ctinfo, 93 - const struct nf_hook_state *state) 89 + int nf_conntrack_icmpv6_packet(struct nf_conn *ct, 90 + struct sk_buff *skb, 91 + enum ip_conntrack_info ctinfo, 92 + const struct nf_hook_state *state) 94 93 { 95 94 unsigned int *timeout = nf_ct_timeout_lookup(ct); 96 95 static const u8 valid_new[] = { ··· 130 131 { 131 132 struct nf_conntrack_tuple intuple, origtuple; 132 133 const struct nf_conntrack_tuple_hash *h; 133 - const struct nf_conntrack_l4proto *inproto; 134 134 enum ip_conntrack_info ctinfo; 135 135 struct nf_conntrack_zone tmp; 136 136 ··· 145 147 return -NF_ACCEPT; 146 148 } 147 149 148 - /* rcu_read_lock()ed by nf_hook_thresh */ 149 - inproto = __nf_ct_l4proto_find(origtuple.dst.protonum); 150 - 151 150 /* Ordinarily, we'd expect the inverted tupleproto, but it's 152 151 been preserved inside the ICMP. */ 153 - if (!nf_ct_invert_tuple(&intuple, &origtuple, inproto)) { 152 + if (!nf_ct_invert_tuple(&intuple, &origtuple)) { 154 153 pr_debug("icmpv6_error: Can't invert tuple\n"); 155 154 return -NF_ACCEPT; 156 155 } ··· 309 314 }; 310 315 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 311 316 312 - #ifdef CONFIG_SYSCTL 313 - static struct ctl_table icmpv6_sysctl_table[] = { 314 - { 315 - .procname = "nf_conntrack_icmpv6_timeout", 316 - .maxlen = sizeof(unsigned int), 317 - .mode = 0644, 318 - .proc_handler = proc_dointvec_jiffies, 319 - }, 320 - { } 321 - }; 322 - #endif /* CONFIG_SYSCTL */ 323 - 324 - static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn, 325 - struct nf_icmp_net *in) 326 - { 327 - #ifdef CONFIG_SYSCTL 328 - pn->ctl_table = kmemdup(icmpv6_sysctl_table, 329 - sizeof(icmpv6_sysctl_table), 330 - GFP_KERNEL); 331 - if (!pn->ctl_table) 332 - return -ENOMEM; 333 - 334 - pn->ctl_table[0].data = &in->timeout; 335 - #endif 336 - return 0; 337 - } 338 - 339 - static int icmpv6_init_net(struct net *net) 317 + void nf_conntrack_icmpv6_init_net(struct net *net) 340 318 { 341 319 struct nf_icmp_net *in = nf_icmpv6_pernet(net); 342 - struct nf_proto_net *pn = &in->pn; 343 320 344 321 in->timeout = nf_ct_icmpv6_timeout; 345 - 346 - return icmpv6_kmemdup_sysctl_table(pn, in); 347 - } 348 - 349 - static struct nf_proto_net *icmpv6_get_net_proto(struct net *net) 350 - { 351 - return &net->ct.nf_ct_proto.icmpv6.pn; 352 322 } 353 323 354 324 const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = 355 325 { 356 326 .l4proto = IPPROTO_ICMPV6, 357 - .pkt_to_tuple = icmpv6_pkt_to_tuple, 358 - .invert_tuple = icmpv6_invert_tuple, 359 - .packet = icmpv6_packet, 360 327 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 361 328 .tuple_to_nlattr = icmpv6_tuple_to_nlattr, 362 329 .nlattr_tuple_size = icmpv6_nlattr_tuple_size, ··· 334 377 .nla_policy = icmpv6_timeout_nla_policy, 335 378 }, 336 379 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 337 - .init_net = icmpv6_init_net, 338 - .get_net_proto = icmpv6_get_net_proto, 339 380 };
+13 -115
net/netfilter/nf_conntrack_proto_sctp.c
··· 357 357 } 358 358 359 359 /* Returns verdict for packet, or -NF_ACCEPT for invalid. */ 360 - static int sctp_packet(struct nf_conn *ct, 361 - struct sk_buff *skb, 362 - unsigned int dataoff, 363 - enum ip_conntrack_info ctinfo, 364 - const struct nf_hook_state *state) 360 + int nf_conntrack_sctp_packet(struct nf_conn *ct, 361 + struct sk_buff *skb, 362 + unsigned int dataoff, 363 + enum ip_conntrack_info ctinfo, 364 + const struct nf_hook_state *state) 365 365 { 366 366 enum sctp_conntrack new_state, old_state; 367 367 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); ··· 642 642 }; 643 643 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 644 644 645 - 646 - #ifdef CONFIG_SYSCTL 647 - static struct ctl_table sctp_sysctl_table[] = { 648 - { 649 - .procname = "nf_conntrack_sctp_timeout_closed", 650 - .maxlen = sizeof(unsigned int), 651 - .mode = 0644, 652 - .proc_handler = proc_dointvec_jiffies, 653 - }, 654 - { 655 - .procname = "nf_conntrack_sctp_timeout_cookie_wait", 656 - .maxlen = sizeof(unsigned int), 657 - .mode = 0644, 658 - .proc_handler = proc_dointvec_jiffies, 659 - }, 660 - { 661 - .procname = "nf_conntrack_sctp_timeout_cookie_echoed", 662 - .maxlen = sizeof(unsigned int), 663 - .mode = 0644, 664 - .proc_handler = proc_dointvec_jiffies, 665 - }, 666 - { 667 - .procname = "nf_conntrack_sctp_timeout_established", 668 - .maxlen = sizeof(unsigned int), 669 - .mode = 0644, 670 - .proc_handler = proc_dointvec_jiffies, 671 - }, 672 - { 673 - .procname = "nf_conntrack_sctp_timeout_shutdown_sent", 674 - .maxlen = sizeof(unsigned int), 675 - .mode = 0644, 676 - .proc_handler = proc_dointvec_jiffies, 677 - }, 678 - { 679 - .procname = "nf_conntrack_sctp_timeout_shutdown_recd", 680 - .maxlen = sizeof(unsigned int), 681 - .mode = 0644, 682 - .proc_handler = proc_dointvec_jiffies, 683 - }, 684 - { 685 - .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent", 686 - .maxlen = sizeof(unsigned int), 687 - .mode = 0644, 688 - .proc_handler = proc_dointvec_jiffies, 689 - }, 690 - { 691 - .procname = "nf_conntrack_sctp_timeout_heartbeat_sent", 692 - .maxlen = sizeof(unsigned int), 693 - .mode = 0644, 694 - .proc_handler = proc_dointvec_jiffies, 695 - }, 696 - { 697 - .procname = "nf_conntrack_sctp_timeout_heartbeat_acked", 698 - .maxlen = sizeof(unsigned int), 699 - .mode = 0644, 700 - .proc_handler = proc_dointvec_jiffies, 701 - }, 702 - { } 703 - }; 704 - #endif 705 - 706 - static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn, 707 - struct nf_sctp_net *sn) 708 - { 709 - #ifdef CONFIG_SYSCTL 710 - if (pn->ctl_table) 711 - return 0; 712 - 713 - pn->ctl_table = kmemdup(sctp_sysctl_table, 714 - sizeof(sctp_sysctl_table), 715 - GFP_KERNEL); 716 - if (!pn->ctl_table) 717 - return -ENOMEM; 718 - 719 - pn->ctl_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED]; 720 - pn->ctl_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT]; 721 - pn->ctl_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED]; 722 - pn->ctl_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED]; 723 - pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT]; 724 - pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD]; 725 - pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]; 726 - pn->ctl_table[7].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_SENT]; 727 - pn->ctl_table[8].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_ACKED]; 728 - #endif 729 - return 0; 730 - } 731 - 732 - static int sctp_init_net(struct net *net) 645 + void nf_conntrack_sctp_init_net(struct net *net) 733 646 { 734 647 struct nf_sctp_net *sn = nf_sctp_pernet(net); 735 - struct nf_proto_net *pn = &sn->pn; 648 + int i; 736 649 737 - if (!pn->users) { 738 - int i; 650 + for (i = 0; i < SCTP_CONNTRACK_MAX; i++) 651 + sn->timeouts[i] = sctp_timeouts[i]; 739 652 740 - for (i = 0; i < SCTP_CONNTRACK_MAX; i++) 741 - sn->timeouts[i] = sctp_timeouts[i]; 742 - 743 - /* timeouts[0] is unused, init it so ->timeouts[0] contains 744 - * 'new' timeout, like udp or icmp. 745 - */ 746 - sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED]; 747 - } 748 - 749 - return sctp_kmemdup_sysctl_table(pn, sn); 750 - } 751 - 752 - static struct nf_proto_net *sctp_get_net_proto(struct net *net) 753 - { 754 - return &net->ct.nf_ct_proto.sctp.pn; 653 + /* timeouts[0] is unused, init it so ->timeouts[0] contains 654 + * 'new' timeout, like udp or icmp. 655 + */ 656 + sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED]; 755 657 } 756 658 757 659 const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = { ··· 661 759 #ifdef CONFIG_NF_CONNTRACK_PROCFS 662 760 .print_conntrack = sctp_print_conntrack, 663 761 #endif 664 - .packet = sctp_packet, 665 762 .can_early_drop = sctp_can_early_drop, 666 - .me = THIS_MODULE, 667 763 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 668 764 .nlattr_size = SCTP_NLATTR_SIZE, 669 765 .to_nlattr = sctp_to_nlattr, ··· 680 780 .nla_policy = sctp_timeout_nla_policy, 681 781 }, 682 782 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 683 - .init_net = sctp_init_net, 684 - .get_net_proto = sctp_get_net_proto, 685 783 };
+16 -144
net/netfilter/nf_conntrack_proto_tcp.c
··· 829 829 } 830 830 831 831 /* Returns verdict for packet, or -1 for invalid. */ 832 - static int tcp_packet(struct nf_conn *ct, 833 - struct sk_buff *skb, 834 - unsigned int dataoff, 835 - enum ip_conntrack_info ctinfo, 836 - const struct nf_hook_state *state) 832 + int nf_conntrack_tcp_packet(struct nf_conn *ct, 833 + struct sk_buff *skb, 834 + unsigned int dataoff, 835 + enum ip_conntrack_info ctinfo, 836 + const struct nf_hook_state *state) 837 837 { 838 838 struct net *net = nf_ct_net(ct); 839 839 struct nf_tcp_net *tn = nf_tcp_pernet(net); ··· 1387 1387 }; 1388 1388 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 1389 1389 1390 - #ifdef CONFIG_SYSCTL 1391 - static struct ctl_table tcp_sysctl_table[] = { 1392 - { 1393 - .procname = "nf_conntrack_tcp_timeout_syn_sent", 1394 - .maxlen = sizeof(unsigned int), 1395 - .mode = 0644, 1396 - .proc_handler = proc_dointvec_jiffies, 1397 - }, 1398 - { 1399 - .procname = "nf_conntrack_tcp_timeout_syn_recv", 1400 - .maxlen = sizeof(unsigned int), 1401 - .mode = 0644, 1402 - .proc_handler = proc_dointvec_jiffies, 1403 - }, 1404 - { 1405 - .procname = "nf_conntrack_tcp_timeout_established", 1406 - .maxlen = sizeof(unsigned int), 1407 - .mode = 0644, 1408 - .proc_handler = proc_dointvec_jiffies, 1409 - }, 1410 - { 1411 - .procname = "nf_conntrack_tcp_timeout_fin_wait", 1412 - .maxlen = sizeof(unsigned int), 1413 - .mode = 0644, 1414 - .proc_handler = proc_dointvec_jiffies, 1415 - }, 1416 - { 1417 - .procname = "nf_conntrack_tcp_timeout_close_wait", 1418 - .maxlen = sizeof(unsigned int), 1419 - .mode = 0644, 1420 - .proc_handler = proc_dointvec_jiffies, 1421 - }, 1422 - { 1423 - .procname = "nf_conntrack_tcp_timeout_last_ack", 1424 - .maxlen = sizeof(unsigned int), 1425 - .mode = 0644, 1426 - .proc_handler = proc_dointvec_jiffies, 1427 - }, 1428 - { 1429 - .procname = "nf_conntrack_tcp_timeout_time_wait", 1430 - .maxlen = sizeof(unsigned int), 1431 - .mode = 0644, 1432 - .proc_handler = proc_dointvec_jiffies, 1433 - }, 1434 - { 1435 - .procname = "nf_conntrack_tcp_timeout_close", 1436 - .maxlen = sizeof(unsigned int), 1437 - .mode = 0644, 1438 - .proc_handler = proc_dointvec_jiffies, 1439 - }, 1440 - { 1441 - .procname = "nf_conntrack_tcp_timeout_max_retrans", 1442 - .maxlen = sizeof(unsigned int), 1443 - .mode = 0644, 1444 - .proc_handler = proc_dointvec_jiffies, 1445 - }, 1446 - { 1447 - .procname = "nf_conntrack_tcp_timeout_unacknowledged", 1448 - .maxlen = sizeof(unsigned int), 1449 - .mode = 0644, 1450 - .proc_handler = proc_dointvec_jiffies, 1451 - }, 1452 - { 1453 - .procname = "nf_conntrack_tcp_loose", 1454 - .maxlen = sizeof(unsigned int), 1455 - .mode = 0644, 1456 - .proc_handler = proc_dointvec, 1457 - }, 1458 - { 1459 - .procname = "nf_conntrack_tcp_be_liberal", 1460 - .maxlen = sizeof(unsigned int), 1461 - .mode = 0644, 1462 - .proc_handler = proc_dointvec, 1463 - }, 1464 - { 1465 - .procname = "nf_conntrack_tcp_max_retrans", 1466 - .maxlen = sizeof(unsigned int), 1467 - .mode = 0644, 1468 - .proc_handler = proc_dointvec, 1469 - }, 1470 - { } 1471 - }; 1472 - #endif /* CONFIG_SYSCTL */ 1473 - 1474 - static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn, 1475 - struct nf_tcp_net *tn) 1476 - { 1477 - #ifdef CONFIG_SYSCTL 1478 - if (pn->ctl_table) 1479 - return 0; 1480 - 1481 - pn->ctl_table = kmemdup(tcp_sysctl_table, 1482 - sizeof(tcp_sysctl_table), 1483 - GFP_KERNEL); 1484 - if (!pn->ctl_table) 1485 - return -ENOMEM; 1486 - 1487 - pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT]; 1488 - pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV]; 1489 - pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED]; 1490 - pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT]; 1491 - pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT]; 1492 - pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK]; 1493 - pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT]; 1494 - pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE]; 1495 - pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS]; 1496 - pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK]; 1497 - pn->ctl_table[10].data = &tn->tcp_loose; 1498 - pn->ctl_table[11].data = &tn->tcp_be_liberal; 1499 - pn->ctl_table[12].data = &tn->tcp_max_retrans; 1500 - #endif 1501 - return 0; 1502 - } 1503 - 1504 - static int tcp_init_net(struct net *net) 1390 + void nf_conntrack_tcp_init_net(struct net *net) 1505 1391 { 1506 1392 struct nf_tcp_net *tn = nf_tcp_pernet(net); 1507 - struct nf_proto_net *pn = &tn->pn; 1393 + int i; 1508 1394 1509 - if (!pn->users) { 1510 - int i; 1395 + for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++) 1396 + tn->timeouts[i] = tcp_timeouts[i]; 1511 1397 1512 - for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++) 1513 - tn->timeouts[i] = tcp_timeouts[i]; 1514 - 1515 - /* timeouts[0] is unused, make it same as SYN_SENT so 1516 - * ->timeouts[0] contains 'new' timeout, like udp or icmp. 1517 - */ 1518 - tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT]; 1519 - tn->tcp_loose = nf_ct_tcp_loose; 1520 - tn->tcp_be_liberal = nf_ct_tcp_be_liberal; 1521 - tn->tcp_max_retrans = nf_ct_tcp_max_retrans; 1522 - } 1523 - 1524 - return tcp_kmemdup_sysctl_table(pn, tn); 1525 - } 1526 - 1527 - static struct nf_proto_net *tcp_get_net_proto(struct net *net) 1528 - { 1529 - return &net->ct.nf_ct_proto.tcp.pn; 1398 + /* timeouts[0] is unused, make it same as SYN_SENT so 1399 + * ->timeouts[0] contains 'new' timeout, like udp or icmp. 1400 + */ 1401 + tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT]; 1402 + tn->tcp_loose = nf_ct_tcp_loose; 1403 + tn->tcp_be_liberal = nf_ct_tcp_be_liberal; 1404 + tn->tcp_max_retrans = nf_ct_tcp_max_retrans; 1530 1405 } 1531 1406 1532 1407 const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp = ··· 1410 1535 #ifdef CONFIG_NF_CONNTRACK_PROCFS 1411 1536 .print_conntrack = tcp_print_conntrack, 1412 1537 #endif 1413 - .packet = tcp_packet, 1414 1538 .can_early_drop = tcp_can_early_drop, 1415 1539 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 1416 1540 .to_nlattr = tcp_to_nlattr, ··· 1430 1556 .nla_policy = tcp_timeout_nla_policy, 1431 1557 }, 1432 1558 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 1433 - .init_net = tcp_init_net, 1434 - .get_net_proto = tcp_get_net_proto, 1435 1559 };
+14 -66
net/netfilter/nf_conntrack_proto_udp.c
··· 85 85 } 86 86 87 87 /* Returns verdict for packet, and may modify conntracktype */ 88 - static int udp_packet(struct nf_conn *ct, 89 - struct sk_buff *skb, 90 - unsigned int dataoff, 91 - enum ip_conntrack_info ctinfo, 92 - const struct nf_hook_state *state) 88 + int nf_conntrack_udp_packet(struct nf_conn *ct, 89 + struct sk_buff *skb, 90 + unsigned int dataoff, 91 + enum ip_conntrack_info ctinfo, 92 + const struct nf_hook_state *state) 93 93 { 94 94 unsigned int *timeouts; 95 95 ··· 177 177 } 178 178 179 179 /* Returns verdict for packet, and may modify conntracktype */ 180 - static int udplite_packet(struct nf_conn *ct, 181 - struct sk_buff *skb, 182 - unsigned int dataoff, 183 - enum ip_conntrack_info ctinfo, 184 - const struct nf_hook_state *state) 180 + int nf_conntrack_udplite_packet(struct nf_conn *ct, 181 + struct sk_buff *skb, 182 + unsigned int dataoff, 183 + enum ip_conntrack_info ctinfo, 184 + const struct nf_hook_state *state) 185 185 { 186 186 unsigned int *timeouts; 187 187 ··· 260 260 }; 261 261 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 262 262 263 - #ifdef CONFIG_SYSCTL 264 - static struct ctl_table udp_sysctl_table[] = { 265 - { 266 - .procname = "nf_conntrack_udp_timeout", 267 - .maxlen = sizeof(unsigned int), 268 - .mode = 0644, 269 - .proc_handler = proc_dointvec_jiffies, 270 - }, 271 - { 272 - .procname = "nf_conntrack_udp_timeout_stream", 273 - .maxlen = sizeof(unsigned int), 274 - .mode = 0644, 275 - .proc_handler = proc_dointvec_jiffies, 276 - }, 277 - { } 278 - }; 279 - #endif /* CONFIG_SYSCTL */ 280 - 281 - static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn, 282 - struct nf_udp_net *un) 283 - { 284 - #ifdef CONFIG_SYSCTL 285 - if (pn->ctl_table) 286 - return 0; 287 - pn->ctl_table = kmemdup(udp_sysctl_table, 288 - sizeof(udp_sysctl_table), 289 - GFP_KERNEL); 290 - if (!pn->ctl_table) 291 - return -ENOMEM; 292 - pn->ctl_table[0].data = &un->timeouts[UDP_CT_UNREPLIED]; 293 - pn->ctl_table[1].data = &un->timeouts[UDP_CT_REPLIED]; 294 - #endif 295 - return 0; 296 - } 297 - 298 - static int udp_init_net(struct net *net) 263 + void nf_conntrack_udp_init_net(struct net *net) 299 264 { 300 265 struct nf_udp_net *un = nf_udp_pernet(net); 301 - struct nf_proto_net *pn = &un->pn; 266 + int i; 302 267 303 - if (!pn->users) { 304 - int i; 305 - 306 - for (i = 0; i < UDP_CT_MAX; i++) 307 - un->timeouts[i] = udp_timeouts[i]; 308 - } 309 - 310 - return udp_kmemdup_sysctl_table(pn, un); 311 - } 312 - 313 - static struct nf_proto_net *udp_get_net_proto(struct net *net) 314 - { 315 - return &net->ct.nf_ct_proto.udp.pn; 268 + for (i = 0; i < UDP_CT_MAX; i++) 269 + un->timeouts[i] = udp_timeouts[i]; 316 270 } 317 271 318 272 const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp = 319 273 { 320 274 .l4proto = IPPROTO_UDP, 321 275 .allow_clash = true, 322 - .packet = udp_packet, 323 276 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 324 277 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 325 278 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, ··· 288 335 .nla_policy = udp_timeout_nla_policy, 289 336 }, 290 337 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 291 - .init_net = udp_init_net, 292 - .get_net_proto = udp_get_net_proto, 293 338 }; 294 339 295 340 #ifdef CONFIG_NF_CT_PROTO_UDPLITE ··· 295 344 { 296 345 .l4proto = IPPROTO_UDPLITE, 297 346 .allow_clash = true, 298 - .packet = udplite_packet, 299 347 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 300 348 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 301 349 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, ··· 310 360 .nla_policy = udp_timeout_nla_policy, 311 361 }, 312 362 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 313 - .init_net = udp_init_net, 314 - .get_net_proto = udp_get_net_proto, 315 363 }; 316 364 #endif
+409 -18
net/netfilter/nf_conntrack_standalone.c
··· 24 24 #include <net/netfilter/nf_conntrack_timestamp.h> 25 25 #include <linux/rculist_nulls.h> 26 26 27 + static bool enable_hooks __read_mostly; 28 + MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks"); 29 + module_param(enable_hooks, bool, 0000); 30 + 27 31 unsigned int nf_conntrack_net_id __read_mostly; 28 32 29 33 #ifdef CONFIG_NF_CONNTRACK_PROCFS ··· 314 310 if (!net_eq(nf_ct_net(ct), net)) 315 311 goto release; 316 312 317 - l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct)); 318 - WARN_ON(!l4proto); 313 + l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 319 314 320 315 ret = -ENOSPC; 321 316 seq_printf(s, "%-8s %u %-8s %u ", ··· 550 547 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 551 548 NF_SYSCTL_CT_TIMESTAMP, 552 549 #endif 550 + NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC, 551 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT, 552 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV, 553 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED, 554 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT, 555 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT, 556 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK, 557 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT, 558 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE, 559 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS, 560 + NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK, 561 + NF_SYSCTL_CT_PROTO_TCP_LOOSE, 562 + NF_SYSCTL_CT_PROTO_TCP_LIBERAL, 563 + NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS, 564 + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP, 565 + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM, 566 + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP, 567 + NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6, 568 + #ifdef CONFIG_NF_CT_PROTO_SCTP 569 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED, 570 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT, 571 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED, 572 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED, 573 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT, 574 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD, 575 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, 576 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT, 577 + NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED, 578 + #endif 579 + #ifdef CONFIG_NF_CT_PROTO_DCCP 580 + NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST, 581 + NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND, 582 + NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN, 583 + NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN, 584 + NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ, 585 + NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING, 586 + NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT, 587 + NF_SYSCTL_CT_PROTO_DCCP_LOOSE, 588 + #endif 589 + #ifdef CONFIG_NF_CT_PROTO_GRE 590 + NF_SYSCTL_CT_PROTO_TIMEOUT_GRE, 591 + NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM, 592 + #endif 593 + 594 + __NF_SYSCTL_CT_LAST_SYSCTL, 553 595 }; 596 + 597 + #define NF_SYSCTL_CT_LAST_SYSCTL (__NF_SYSCTL_CT_LAST_SYSCTL + 1) 554 598 555 599 static struct ctl_table nf_ct_sysctl_table[] = { 556 600 [NF_SYSCTL_CT_MAX] = { ··· 676 626 .proc_handler = proc_dointvec, 677 627 }, 678 628 #endif 679 - { } 629 + [NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC] = { 630 + .procname = "nf_conntrack_generic_timeout", 631 + .maxlen = sizeof(unsigned int), 632 + .mode = 0644, 633 + .proc_handler = proc_dointvec_jiffies, 634 + }, 635 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT] = { 636 + .procname = "nf_conntrack_tcp_timeout_syn_sent", 637 + .maxlen = sizeof(unsigned int), 638 + .mode = 0644, 639 + .proc_handler = proc_dointvec_jiffies, 640 + }, 641 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV] = { 642 + .procname = "nf_conntrack_tcp_timeout_syn_recv", 643 + .maxlen = sizeof(unsigned int), 644 + .mode = 0644, 645 + .proc_handler = proc_dointvec_jiffies, 646 + }, 647 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED] = { 648 + .procname = "nf_conntrack_tcp_timeout_established", 649 + .maxlen = sizeof(unsigned int), 650 + .mode = 0644, 651 + .proc_handler = proc_dointvec_jiffies, 652 + }, 653 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT] = { 654 + .procname = "nf_conntrack_tcp_timeout_fin_wait", 655 + .maxlen = sizeof(unsigned int), 656 + .mode = 0644, 657 + .proc_handler = proc_dointvec_jiffies, 658 + }, 659 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT] = { 660 + .procname = "nf_conntrack_tcp_timeout_close_wait", 661 + .maxlen = sizeof(unsigned int), 662 + .mode = 0644, 663 + .proc_handler = proc_dointvec_jiffies, 664 + }, 665 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK] = { 666 + .procname = "nf_conntrack_tcp_timeout_last_ack", 667 + .maxlen = sizeof(unsigned int), 668 + .mode = 0644, 669 + .proc_handler = proc_dointvec_jiffies, 670 + }, 671 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT] = { 672 + .procname = "nf_conntrack_tcp_timeout_time_wait", 673 + .maxlen = sizeof(unsigned int), 674 + .mode = 0644, 675 + .proc_handler = proc_dointvec_jiffies, 676 + }, 677 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE] = { 678 + .procname = "nf_conntrack_tcp_timeout_close", 679 + .maxlen = sizeof(unsigned int), 680 + .mode = 0644, 681 + .proc_handler = proc_dointvec_jiffies, 682 + }, 683 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS] = { 684 + .procname = "nf_conntrack_tcp_timeout_max_retrans", 685 + .maxlen = sizeof(unsigned int), 686 + .mode = 0644, 687 + .proc_handler = proc_dointvec_jiffies, 688 + }, 689 + [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK] = { 690 + .procname = "nf_conntrack_tcp_timeout_unacknowledged", 691 + .maxlen = sizeof(unsigned int), 692 + .mode = 0644, 693 + .proc_handler = proc_dointvec_jiffies, 694 + }, 695 + [NF_SYSCTL_CT_PROTO_TCP_LOOSE] = { 696 + .procname = "nf_conntrack_tcp_loose", 697 + .maxlen = sizeof(unsigned int), 698 + .mode = 0644, 699 + .proc_handler = proc_dointvec, 700 + }, 701 + [NF_SYSCTL_CT_PROTO_TCP_LIBERAL] = { 702 + .procname = "nf_conntrack_tcp_be_liberal", 703 + .maxlen = sizeof(unsigned int), 704 + .mode = 0644, 705 + .proc_handler = proc_dointvec, 706 + }, 707 + [NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = { 708 + .procname = "nf_conntrack_tcp_max_retrans", 709 + .maxlen = sizeof(unsigned int), 710 + .mode = 0644, 711 + .proc_handler = proc_dointvec, 712 + }, 713 + [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP] = { 714 + .procname = "nf_conntrack_udp_timeout", 715 + .maxlen = sizeof(unsigned int), 716 + .mode = 0644, 717 + .proc_handler = proc_dointvec_jiffies, 718 + }, 719 + [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM] = { 720 + .procname = "nf_conntrack_udp_timeout_stream", 721 + .maxlen = sizeof(unsigned int), 722 + .mode = 0644, 723 + .proc_handler = proc_dointvec_jiffies, 724 + }, 725 + [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = { 726 + .procname = "nf_conntrack_icmp_timeout", 727 + .maxlen = sizeof(unsigned int), 728 + .mode = 0644, 729 + .proc_handler = proc_dointvec_jiffies, 730 + }, 731 + [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6] = { 732 + .procname = "nf_conntrack_icmpv6_timeout", 733 + .maxlen = sizeof(unsigned int), 734 + .mode = 0644, 735 + .proc_handler = proc_dointvec_jiffies, 736 + }, 737 + #ifdef CONFIG_NF_CT_PROTO_SCTP 738 + [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED] = { 739 + .procname = "nf_conntrack_sctp_timeout_closed", 740 + .maxlen = sizeof(unsigned int), 741 + .mode = 0644, 742 + .proc_handler = proc_dointvec_jiffies, 743 + }, 744 + [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT] = { 745 + .procname = "nf_conntrack_sctp_timeout_cookie_wait", 746 + .maxlen = sizeof(unsigned int), 747 + .mode = 0644, 748 + .proc_handler = proc_dointvec_jiffies, 749 + }, 750 + [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED] = { 751 + .procname = "nf_conntrack_sctp_timeout_cookie_echoed", 752 + .maxlen = sizeof(unsigned int), 753 + .mode = 0644, 754 + .proc_handler = proc_dointvec_jiffies, 755 + }, 756 + [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED] = { 757 + .procname = "nf_conntrack_sctp_timeout_established", 758 + .maxlen = sizeof(unsigned int), 759 + .mode = 0644, 760 + .proc_handler = proc_dointvec_jiffies, 761 + }, 762 + [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT] = { 763 + .procname = "nf_conntrack_sctp_timeout_shutdown_sent", 764 + .maxlen = sizeof(unsigned int), 765 + .mode = 0644, 766 + .proc_handler = proc_dointvec_jiffies, 767 + }, 768 + [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD] = { 769 + .procname = "nf_conntrack_sctp_timeout_shutdown_recd", 770 + .maxlen = sizeof(unsigned int), 771 + .mode = 0644, 772 + .proc_handler = proc_dointvec_jiffies, 773 + }, 774 + [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { 775 + .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent", 776 + .maxlen = sizeof(unsigned int), 777 + .mode = 0644, 778 + .proc_handler = proc_dointvec_jiffies, 779 + }, 780 + [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT] = { 781 + .procname = "nf_conntrack_sctp_timeout_heartbeat_sent", 782 + .maxlen = sizeof(unsigned int), 783 + .mode = 0644, 784 + .proc_handler = proc_dointvec_jiffies, 785 + }, 786 + [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { 787 + .procname = "nf_conntrack_sctp_timeout_heartbeat_acked", 788 + .maxlen = sizeof(unsigned int), 789 + .mode = 0644, 790 + .proc_handler = proc_dointvec_jiffies, 791 + }, 792 + #endif 793 + #ifdef CONFIG_NF_CT_PROTO_DCCP 794 + [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = { 795 + .procname = "nf_conntrack_dccp_timeout_request", 796 + .maxlen = sizeof(unsigned int), 797 + .mode = 0644, 798 + .proc_handler = proc_dointvec_jiffies, 799 + }, 800 + [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND] = { 801 + .procname = "nf_conntrack_dccp_timeout_respond", 802 + .maxlen = sizeof(unsigned int), 803 + .mode = 0644, 804 + .proc_handler = proc_dointvec_jiffies, 805 + }, 806 + [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN] = { 807 + .procname = "nf_conntrack_dccp_timeout_partopen", 808 + .maxlen = sizeof(unsigned int), 809 + .mode = 0644, 810 + .proc_handler = proc_dointvec_jiffies, 811 + }, 812 + [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN] = { 813 + .procname = "nf_conntrack_dccp_timeout_open", 814 + .maxlen = sizeof(unsigned int), 815 + .mode = 0644, 816 + .proc_handler = proc_dointvec_jiffies, 817 + }, 818 + [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ] = { 819 + .procname = "nf_conntrack_dccp_timeout_closereq", 820 + .maxlen = sizeof(unsigned int), 821 + .mode = 0644, 822 + .proc_handler = proc_dointvec_jiffies, 823 + }, 824 + [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING] = { 825 + .procname = "nf_conntrack_dccp_timeout_closing", 826 + .maxlen = sizeof(unsigned int), 827 + .mode = 0644, 828 + .proc_handler = proc_dointvec_jiffies, 829 + }, 830 + [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT] = { 831 + .procname = "nf_conntrack_dccp_timeout_timewait", 832 + .maxlen = sizeof(unsigned int), 833 + .mode = 0644, 834 + .proc_handler = proc_dointvec_jiffies, 835 + }, 836 + [NF_SYSCTL_CT_PROTO_DCCP_LOOSE] = { 837 + .procname = "nf_conntrack_dccp_loose", 838 + .maxlen = sizeof(int), 839 + .mode = 0644, 840 + .proc_handler = proc_dointvec, 841 + }, 842 + #endif 843 + #ifdef CONFIG_NF_CT_PROTO_GRE 844 + [NF_SYSCTL_CT_PROTO_TIMEOUT_GRE] = { 845 + .procname = "nf_conntrack_gre_timeout", 846 + .maxlen = sizeof(unsigned int), 847 + .mode = 0644, 848 + .proc_handler = proc_dointvec_jiffies, 849 + }, 850 + [NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM] = { 851 + .procname = "nf_conntrack_gre_timeout_stream", 852 + .maxlen = sizeof(unsigned int), 853 + .mode = 0644, 854 + .proc_handler = proc_dointvec_jiffies, 855 + }, 856 + #endif 857 + {} 680 858 }; 681 859 682 860 static struct ctl_table nf_ct_netfilter_table[] = { ··· 918 640 { } 919 641 }; 920 642 643 + static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net, 644 + struct ctl_table *table) 645 + { 646 + struct nf_tcp_net *tn = nf_tcp_pernet(net); 647 + 648 + #define XASSIGN(XNAME, tn) \ 649 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ ## XNAME].data = \ 650 + &(tn)->timeouts[TCP_CONNTRACK_ ## XNAME] 651 + 652 + XASSIGN(SYN_SENT, tn); 653 + XASSIGN(SYN_RECV, tn); 654 + XASSIGN(ESTABLISHED, tn); 655 + XASSIGN(FIN_WAIT, tn); 656 + XASSIGN(CLOSE_WAIT, tn); 657 + XASSIGN(LAST_ACK, tn); 658 + XASSIGN(TIME_WAIT, tn); 659 + XASSIGN(CLOSE, tn); 660 + XASSIGN(RETRANS, tn); 661 + XASSIGN(UNACK, tn); 662 + #undef XASSIGN 663 + #define XASSIGN(XNAME, rval) \ 664 + table[NF_SYSCTL_CT_PROTO_TCP_ ## XNAME].data = (rval) 665 + 666 + XASSIGN(LOOSE, &tn->tcp_loose); 667 + XASSIGN(LIBERAL, &tn->tcp_be_liberal); 668 + XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans); 669 + #undef XASSIGN 670 + } 671 + 672 + static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net, 673 + struct ctl_table *table) 674 + { 675 + #ifdef CONFIG_NF_CT_PROTO_SCTP 676 + struct nf_sctp_net *sn = nf_sctp_pernet(net); 677 + 678 + #define XASSIGN(XNAME, sn) \ 679 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ ## XNAME].data = \ 680 + &(sn)->timeouts[SCTP_CONNTRACK_ ## XNAME] 681 + 682 + XASSIGN(CLOSED, sn); 683 + XASSIGN(COOKIE_WAIT, sn); 684 + XASSIGN(COOKIE_ECHOED, sn); 685 + XASSIGN(ESTABLISHED, sn); 686 + XASSIGN(SHUTDOWN_SENT, sn); 687 + XASSIGN(SHUTDOWN_RECD, sn); 688 + XASSIGN(SHUTDOWN_ACK_SENT, sn); 689 + XASSIGN(HEARTBEAT_SENT, sn); 690 + XASSIGN(HEARTBEAT_ACKED, sn); 691 + #undef XASSIGN 692 + #endif 693 + } 694 + 695 + static void nf_conntrack_standalone_init_dccp_sysctl(struct net *net, 696 + struct ctl_table *table) 697 + { 698 + #ifdef CONFIG_NF_CT_PROTO_DCCP 699 + struct nf_dccp_net *dn = nf_dccp_pernet(net); 700 + 701 + #define XASSIGN(XNAME, dn) \ 702 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_ ## XNAME].data = \ 703 + &(dn)->dccp_timeout[CT_DCCP_ ## XNAME] 704 + 705 + XASSIGN(REQUEST, dn); 706 + XASSIGN(RESPOND, dn); 707 + XASSIGN(PARTOPEN, dn); 708 + XASSIGN(OPEN, dn); 709 + XASSIGN(CLOSEREQ, dn); 710 + XASSIGN(CLOSING, dn); 711 + XASSIGN(TIMEWAIT, dn); 712 + #undef XASSIGN 713 + 714 + table[NF_SYSCTL_CT_PROTO_DCCP_LOOSE].data = &dn->dccp_loose; 715 + #endif 716 + } 717 + 718 + static void nf_conntrack_standalone_init_gre_sysctl(struct net *net, 719 + struct ctl_table *table) 720 + { 721 + #ifdef CONFIG_NF_CT_PROTO_GRE 722 + struct nf_gre_net *gn = nf_gre_pernet(net); 723 + 724 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE].data = &gn->timeouts[GRE_CT_UNREPLIED]; 725 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM].data = &gn->timeouts[GRE_CT_REPLIED]; 726 + #endif 727 + } 728 + 921 729 static int nf_conntrack_standalone_init_sysctl(struct net *net) 922 730 { 731 + struct nf_udp_net *un = nf_udp_pernet(net); 923 732 struct ctl_table *table; 733 + 734 + BUILD_BUG_ON(ARRAY_SIZE(nf_ct_sysctl_table) != NF_SYSCTL_CT_LAST_SYSCTL); 924 735 925 736 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table), 926 737 GFP_KERNEL); 927 738 if (!table) 928 - goto out_kmemdup; 739 + return -ENOMEM; 929 740 930 741 table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; 931 742 table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; ··· 1022 655 #ifdef CONFIG_NF_CONNTRACK_EVENTS 1023 656 table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; 1024 657 #endif 658 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; 659 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; 660 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout; 661 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED]; 662 + table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED]; 663 + 664 + nf_conntrack_standalone_init_tcp_sysctl(net, table); 665 + nf_conntrack_standalone_init_sctp_sysctl(net, table); 666 + nf_conntrack_standalone_init_dccp_sysctl(net, table); 667 + nf_conntrack_standalone_init_gre_sysctl(net, table); 1025 668 1026 669 /* Don't export sysctls to unprivileged users */ 1027 670 if (net->user_ns != &init_user_ns) { ··· 1057 680 1058 681 out_unregister_netfilter: 1059 682 kfree(table); 1060 - out_kmemdup: 1061 683 return -ENOMEM; 1062 684 } 1063 685 ··· 1079 703 } 1080 704 #endif /* CONFIG_SYSCTL */ 1081 705 706 + static void nf_conntrack_fini_net(struct net *net) 707 + { 708 + if (enable_hooks) 709 + nf_ct_netns_put(net, NFPROTO_INET); 710 + 711 + nf_conntrack_standalone_fini_proc(net); 712 + nf_conntrack_standalone_fini_sysctl(net); 713 + } 714 + 1082 715 static int nf_conntrack_pernet_init(struct net *net) 1083 716 { 1084 717 int ret; 1085 718 1086 - ret = nf_conntrack_init_net(net); 719 + net->ct.sysctl_checksum = 1; 720 + 721 + ret = nf_conntrack_standalone_init_sysctl(net); 1087 722 if (ret < 0) 1088 - goto out_init; 723 + return ret; 1089 724 1090 725 ret = nf_conntrack_standalone_init_proc(net); 1091 726 if (ret < 0) 1092 727 goto out_proc; 1093 728 1094 - net->ct.sysctl_checksum = 1; 1095 - net->ct.sysctl_log_invalid = 0; 1096 - ret = nf_conntrack_standalone_init_sysctl(net); 729 + ret = nf_conntrack_init_net(net); 1097 730 if (ret < 0) 1098 - goto out_sysctl; 731 + goto out_init_net; 732 + 733 + if (enable_hooks) { 734 + ret = nf_ct_netns_get(net, NFPROTO_INET); 735 + if (ret < 0) 736 + goto out_hooks; 737 + } 1099 738 1100 739 return 0; 1101 740 1102 - out_sysctl: 741 + out_hooks: 742 + nf_conntrack_cleanup_net(net); 743 + out_init_net: 1103 744 nf_conntrack_standalone_fini_proc(net); 1104 745 out_proc: 1105 - nf_conntrack_cleanup_net(net); 1106 - out_init: 746 + nf_conntrack_standalone_fini_sysctl(net); 1107 747 return ret; 1108 748 } 1109 749 ··· 1127 735 { 1128 736 struct net *net; 1129 737 1130 - list_for_each_entry(net, net_exit_list, exit_list) { 1131 - nf_conntrack_standalone_fini_sysctl(net); 1132 - nf_conntrack_standalone_fini_proc(net); 1133 - } 738 + list_for_each_entry(net, net_exit_list, exit_list) 739 + nf_conntrack_fini_net(net); 740 + 1134 741 nf_conntrack_cleanup_net_list(net_exit_list); 1135 742 } 1136 743
+1 -1
net/netfilter/nf_flow_table_core.c
··· 121 121 if (l4num == IPPROTO_TCP) 122 122 flow_offload_fixup_tcp(&ct->proto.tcp); 123 123 124 - l4proto = __nf_ct_l4proto_find(l4num); 124 + l4proto = nf_ct_l4proto_find(l4num); 125 125 if (!l4proto) 126 126 return; 127 127
+7 -8
net/netfilter/nf_nat_core.c
··· 146 146 } 147 147 148 148 /* Is this tuple already taken? (not by us) */ 149 - int 149 + static int 150 150 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 151 151 const struct nf_conn *ignored_conntrack) 152 152 { ··· 158 158 */ 159 159 struct nf_conntrack_tuple reply; 160 160 161 - nf_ct_invert_tuplepr(&reply, tuple); 161 + nf_ct_invert_tuple(&reply, tuple); 162 162 return nf_conntrack_tuple_taken(&reply, ignored_conntrack); 163 163 } 164 - EXPORT_SYMBOL(nf_nat_used_tuple); 165 164 166 165 static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t, 167 166 const struct nf_nat_range2 *range) ··· 252 253 net_eq(net, nf_ct_net(ct)) && 253 254 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) { 254 255 /* Copy source part from reply tuple. */ 255 - nf_ct_invert_tuplepr(result, 256 + nf_ct_invert_tuple(result, 256 257 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 257 258 result->dst = tuple->dst; 258 259 ··· 559 560 * manipulations (future optimization: if num_manips == 0, 560 561 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 561 562 */ 562 - nf_ct_invert_tuplepr(&curr_tuple, 563 - &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 563 + nf_ct_invert_tuple(&curr_tuple, 564 + &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 564 565 565 566 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); 566 567 ··· 568 569 struct nf_conntrack_tuple reply; 569 570 570 571 /* Alter conntrack table so will recognize replies. */ 571 - nf_ct_invert_tuplepr(&reply, &new_tuple); 572 + nf_ct_invert_tuple(&reply, &new_tuple); 572 573 nf_conntrack_alter_reply(ct, &reply); 573 574 574 575 /* Non-atomic: we own this at the moment. */ ··· 639 640 struct nf_conntrack_tuple target; 640 641 641 642 /* We are aiming to look like inverse of other direction. */ 642 - nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 643 + nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); 643 644 644 645 l3proto = __nf_nat_l3proto_find(target.src.l3num); 645 646 if (!l3proto->manip_pkt(skb, 0, &target, mtype))
+116 -23
net/netfilter/nf_tables_api.c
··· 37 37 NFT_VALIDATE_DO, 38 38 }; 39 39 40 + static struct rhltable nft_objname_ht; 41 + 40 42 static u32 nft_chain_hash(const void *data, u32 len, u32 seed); 41 43 static u32 nft_chain_hash_obj(const void *data, u32 len, u32 seed); 42 44 static int nft_chain_hash_cmp(struct rhashtable_compare_arg *, const void *); 45 + 46 + static u32 nft_objname_hash(const void *data, u32 len, u32 seed); 47 + static u32 nft_objname_hash_obj(const void *data, u32 len, u32 seed); 48 + static int nft_objname_hash_cmp(struct rhashtable_compare_arg *, const void *); 43 49 44 50 static const struct rhashtable_params nft_chain_ht_params = { 45 51 .head_offset = offsetof(struct nft_chain, rhlhead), ··· 54 48 .obj_hashfn = nft_chain_hash_obj, 55 49 .obj_cmpfn = nft_chain_hash_cmp, 56 50 .locks_mul = 1, 51 + .automatic_shrinking = true, 52 + }; 53 + 54 + static const struct rhashtable_params nft_objname_ht_params = { 55 + .head_offset = offsetof(struct nft_object, rhlhead), 56 + .key_offset = offsetof(struct nft_object, key), 57 + .hashfn = nft_objname_hash, 58 + .obj_hashfn = nft_objname_hash_obj, 59 + .obj_cmpfn = nft_objname_hash_cmp, 57 60 .automatic_shrinking = true, 58 61 }; 59 62 ··· 829 814 return strcmp(chain->name, name); 830 815 } 831 816 817 + static u32 nft_objname_hash(const void *data, u32 len, u32 seed) 818 + { 819 + const struct nft_object_hash_key *k = data; 820 + 821 + seed ^= hash_ptr(k->table, 32); 822 + 823 + return jhash(k->name, strlen(k->name), seed); 824 + } 825 + 826 + static u32 nft_objname_hash_obj(const void *data, u32 len, u32 seed) 827 + { 828 + const struct nft_object *obj = data; 829 + 830 + return nft_objname_hash(&obj->key, 0, seed); 831 + } 832 + 833 + static int nft_objname_hash_cmp(struct rhashtable_compare_arg *arg, 834 + const void *ptr) 835 + { 836 + const struct nft_object_hash_key *k = arg->key; 837 + const struct nft_object *obj = ptr; 838 + 839 + if (obj->key.table != k->table) 840 + return -1; 841 + 842 + return strcmp(obj->key.name, k->name); 843 + } 844 + 832 845 static int nf_tables_newtable(struct net *net, struct sock *nlsk, 833 846 struct sk_buff *skb, const struct nlmsghdr *nlh, 834 847 const struct nlattr * const nla[], ··· 1113 1070 return ERR_PTR(-ENOENT); 1114 1071 } 1115 1072 1116 - static bool lockdep_commit_lock_is_held(struct net *net) 1073 + static bool lockdep_commit_lock_is_held(const struct net *net) 1117 1074 { 1118 1075 #ifdef CONFIG_PROVE_LOCKING 1119 1076 return lockdep_is_held(&net->nft.commit_mutex); ··· 2608 2565 return 0; 2609 2566 } 2610 2567 2568 + static struct nft_rule *nft_rule_lookup_byid(const struct net *net, 2569 + const struct nlattr *nla); 2570 + 2611 2571 #define NFT_RULE_MAXEXPRS 128 2612 2572 2613 2573 static int nf_tables_newrule(struct net *net, struct sock *nlsk, ··· 2678 2632 old_rule = __nft_rule_lookup(chain, pos_handle); 2679 2633 if (IS_ERR(old_rule)) { 2680 2634 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]); 2635 + return PTR_ERR(old_rule); 2636 + } 2637 + } else if (nla[NFTA_RULE_POSITION_ID]) { 2638 + old_rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_POSITION_ID]); 2639 + if (IS_ERR(old_rule)) { 2640 + NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION_ID]); 2681 2641 return PTR_ERR(old_rule); 2682 2642 } 2683 2643 } ··· 3903 3851 3904 3852 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) && 3905 3853 nla_put_string(skb, NFTA_SET_ELEM_OBJREF, 3906 - (*nft_set_ext_obj(ext))->name) < 0) 3854 + (*nft_set_ext_obj(ext))->key.name) < 0) 3907 3855 goto nla_put_failure; 3908 3856 3909 3857 if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && ··· 4436 4384 err = -EINVAL; 4437 4385 goto err2; 4438 4386 } 4439 - obj = nft_obj_lookup(ctx->table, nla[NFTA_SET_ELEM_OBJREF], 4387 + obj = nft_obj_lookup(ctx->net, ctx->table, 4388 + nla[NFTA_SET_ELEM_OBJREF], 4440 4389 set->objtype, genmask); 4441 4390 if (IS_ERR(obj)) { 4442 4391 err = PTR_ERR(obj); ··· 4872 4819 } 4873 4820 EXPORT_SYMBOL_GPL(nft_unregister_obj); 4874 4821 4875 - struct nft_object *nft_obj_lookup(const struct nft_table *table, 4822 + struct nft_object *nft_obj_lookup(const struct net *net, 4823 + const struct nft_table *table, 4876 4824 const struct nlattr *nla, u32 objtype, 4877 4825 u8 genmask) 4878 4826 { 4827 + struct nft_object_hash_key k = { .table = table }; 4828 + char search[NFT_OBJ_MAXNAMELEN]; 4829 + struct rhlist_head *tmp, *list; 4879 4830 struct nft_object *obj; 4880 4831 4881 - list_for_each_entry_rcu(obj, &table->objects, list) { 4882 - if (!nla_strcmp(nla, obj->name) && 4883 - objtype == obj->ops->type->type && 4884 - nft_active_genmask(obj, genmask)) 4832 + nla_strlcpy(search, nla, sizeof(search)); 4833 + k.name = search; 4834 + 4835 + WARN_ON_ONCE(!rcu_read_lock_held() && 4836 + !lockdep_commit_lock_is_held(net)); 4837 + 4838 + rcu_read_lock(); 4839 + list = rhltable_lookup(&nft_objname_ht, &k, nft_objname_ht_params); 4840 + if (!list) 4841 + goto out; 4842 + 4843 + rhl_for_each_entry_rcu(obj, tmp, list, rhlhead) { 4844 + if (objtype == obj->ops->type->type && 4845 + nft_active_genmask(obj, genmask)) { 4846 + rcu_read_unlock(); 4885 4847 return obj; 4848 + } 4886 4849 } 4850 + out: 4851 + rcu_read_unlock(); 4887 4852 return ERR_PTR(-ENOENT); 4888 4853 } 4889 4854 EXPORT_SYMBOL_GPL(nft_obj_lookup); ··· 5059 4988 } 5060 4989 5061 4990 objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 5062 - obj = nft_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask); 4991 + obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask); 5063 4992 if (IS_ERR(obj)) { 5064 4993 err = PTR_ERR(obj); 5065 4994 if (err != -ENOENT) { ··· 5085 5014 err = PTR_ERR(obj); 5086 5015 goto err1; 5087 5016 } 5088 - obj->table = table; 5017 + obj->key.table = table; 5089 5018 obj->handle = nf_tables_alloc_handle(table); 5090 5019 5091 - obj->name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL); 5092 - if (!obj->name) { 5020 + obj->key.name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL); 5021 + if (!obj->key.name) { 5093 5022 err = -ENOMEM; 5094 5023 goto err2; 5095 5024 } ··· 5098 5027 if (err < 0) 5099 5028 goto err3; 5100 5029 5030 + err = rhltable_insert(&nft_objname_ht, &obj->rhlhead, 5031 + nft_objname_ht_params); 5032 + if (err < 0) 5033 + goto err4; 5034 + 5101 5035 list_add_tail_rcu(&obj->list, &table->objects); 5102 5036 table->use++; 5103 5037 return 0; 5038 + err4: 5039 + /* queued in transaction log */ 5040 + INIT_LIST_HEAD(&obj->list); 5041 + return err; 5104 5042 err3: 5105 - kfree(obj->name); 5043 + kfree(obj->key.name); 5106 5044 err2: 5107 5045 if (obj->ops->destroy) 5108 5046 obj->ops->destroy(&ctx, obj); ··· 5140 5060 nfmsg->res_id = htons(net->nft.base_seq & 0xffff); 5141 5061 5142 5062 if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) || 5143 - nla_put_string(skb, NFTA_OBJ_NAME, obj->name) || 5063 + nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) || 5144 5064 nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) || 5145 5065 nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) || 5146 5066 nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset) || ··· 5295 5215 } 5296 5216 5297 5217 objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 5298 - obj = nft_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask); 5218 + obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask); 5299 5219 if (IS_ERR(obj)) { 5300 5220 NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]); 5301 5221 return PTR_ERR(obj); ··· 5326 5246 obj->ops->destroy(ctx, obj); 5327 5247 5328 5248 module_put(obj->ops->type->owner); 5329 - kfree(obj->name); 5249 + kfree(obj->key.name); 5330 5250 kfree(obj); 5331 5251 } 5332 5252 ··· 5360 5280 obj = nft_obj_lookup_byhandle(table, attr, objtype, genmask); 5361 5281 } else { 5362 5282 attr = nla[NFTA_OBJ_NAME]; 5363 - obj = nft_obj_lookup(table, attr, objtype, genmask); 5283 + obj = nft_obj_lookup(net, table, attr, objtype, genmask); 5364 5284 } 5365 5285 5366 5286 if (IS_ERR(obj)) { ··· 5377 5297 return nft_delobj(&ctx, obj); 5378 5298 } 5379 5299 5380 - void nft_obj_notify(struct net *net, struct nft_table *table, 5300 + void nft_obj_notify(struct net *net, const struct nft_table *table, 5381 5301 struct nft_object *obj, u32 portid, u32 seq, int event, 5382 5302 int family, int report, gfp_t gfp) 5383 5303 { ··· 6484 6404 nf_tables_commit_chain_free_rules_old(g0); 6485 6405 } 6486 6406 6407 + static void nft_obj_del(struct nft_object *obj) 6408 + { 6409 + rhltable_remove(&nft_objname_ht, &obj->rhlhead, nft_objname_ht_params); 6410 + list_del_rcu(&obj->list); 6411 + } 6412 + 6487 6413 static void nft_chain_del(struct nft_chain *chain) 6488 6414 { 6489 6415 struct nft_table *table = chain->table; ··· 6666 6580 nft_trans_destroy(trans); 6667 6581 break; 6668 6582 case NFT_MSG_DELOBJ: 6669 - list_del_rcu(&nft_trans_obj(trans)->list); 6583 + nft_obj_del(nft_trans_obj(trans)); 6670 6584 nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans), 6671 6585 NFT_MSG_DELOBJ); 6672 6586 break; ··· 6802 6716 break; 6803 6717 case NFT_MSG_NEWOBJ: 6804 6718 trans->ctx.table->use--; 6805 - list_del_rcu(&nft_trans_obj(trans)->list); 6719 + nft_obj_del(nft_trans_obj(trans)); 6806 6720 break; 6807 6721 case NFT_MSG_DELOBJ: 6808 6722 trans->ctx.table->use++; ··· 7416 7330 nft_set_destroy(set); 7417 7331 } 7418 7332 list_for_each_entry_safe(obj, ne, &table->objects, list) { 7419 - list_del(&obj->list); 7333 + nft_obj_del(obj); 7420 7334 table->use--; 7421 7335 nft_obj_destroy(&ctx, obj); 7422 7336 } ··· 7478 7392 if (err < 0) 7479 7393 goto err3; 7480 7394 7481 - /* must be last */ 7482 - err = nfnetlink_subsys_register(&nf_tables_subsys); 7395 + err = rhltable_init(&nft_objname_ht, &nft_objname_ht_params); 7483 7396 if (err < 0) 7484 7397 goto err4; 7485 7398 7399 + /* must be last */ 7400 + err = nfnetlink_subsys_register(&nf_tables_subsys); 7401 + if (err < 0) 7402 + goto err5; 7403 + 7486 7404 return err; 7405 + err5: 7406 + rhltable_destroy(&nft_objname_ht); 7487 7407 err4: 7488 7408 unregister_netdevice_notifier(&nf_tables_flowtable_notifier); 7489 7409 err3: ··· 7509 7417 unregister_pernet_subsys(&nf_tables_net_ops); 7510 7418 cancel_work_sync(&trans_destroy_work); 7511 7419 rcu_barrier(); 7420 + rhltable_destroy(&nft_objname_ht); 7512 7421 nf_tables_core_module_exit(); 7513 7422 } 7514 7423
+17 -6
net/netfilter/nf_tables_core.c
··· 124 124 struct nft_regs *regs, 125 125 struct nft_pktinfo *pkt) 126 126 { 127 + #ifdef CONFIG_RETPOLINE 127 128 unsigned long e = (unsigned long)expr->ops->eval; 129 + #define X(e, fun) \ 130 + do { if ((e) == (unsigned long)(fun)) \ 131 + return fun(expr, regs, pkt); } while (0) 128 132 129 - if (e == (unsigned long)nft_meta_get_eval) 130 - nft_meta_get_eval(expr, regs, pkt); 131 - else if (e == (unsigned long)nft_lookup_eval) 132 - nft_lookup_eval(expr, regs, pkt); 133 - else 134 - expr->ops->eval(expr, regs, pkt); 133 + X(e, nft_payload_eval); 134 + X(e, nft_cmp_eval); 135 + X(e, nft_meta_get_eval); 136 + X(e, nft_lookup_eval); 137 + X(e, nft_range_eval); 138 + X(e, nft_immediate_eval); 139 + X(e, nft_byteorder_eval); 140 + X(e, nft_dynset_eval); 141 + X(e, nft_rt_get_eval); 142 + X(e, nft_bitwise_eval); 143 + #undef X 144 + #endif /* CONFIG_RETPOLINE */ 145 + expr->ops->eval(expr, regs, pkt); 135 146 } 136 147 137 148 unsigned int
+2 -3
net/netfilter/nft_bitwise.c
··· 25 25 struct nft_data xor; 26 26 }; 27 27 28 - static void nft_bitwise_eval(const struct nft_expr *expr, 29 - struct nft_regs *regs, 30 - const struct nft_pktinfo *pkt) 28 + void nft_bitwise_eval(const struct nft_expr *expr, 29 + struct nft_regs *regs, const struct nft_pktinfo *pkt) 31 30 { 32 31 const struct nft_bitwise *priv = nft_expr_priv(expr); 33 32 const u32 *src = &regs->data[priv->sreg];
+3 -3
net/netfilter/nft_byteorder.c
··· 26 26 u8 size; 27 27 }; 28 28 29 - static void nft_byteorder_eval(const struct nft_expr *expr, 30 - struct nft_regs *regs, 31 - const struct nft_pktinfo *pkt) 29 + void nft_byteorder_eval(const struct nft_expr *expr, 30 + struct nft_regs *regs, 31 + const struct nft_pktinfo *pkt) 32 32 { 33 33 const struct nft_byteorder *priv = nft_expr_priv(expr); 34 34 u32 *src = &regs->data[priv->sreg];
+3 -3
net/netfilter/nft_cmp.c
··· 24 24 enum nft_cmp_ops op:8; 25 25 }; 26 26 27 - static void nft_cmp_eval(const struct nft_expr *expr, 28 - struct nft_regs *regs, 29 - const struct nft_pktinfo *pkt) 27 + void nft_cmp_eval(const struct nft_expr *expr, 28 + struct nft_regs *regs, 29 + const struct nft_pktinfo *pkt) 30 30 { 31 31 const struct nft_cmp_expr *priv = nft_expr_priv(expr); 32 32 int d;
+1 -1
net/netfilter/nft_counter.c
··· 104 104 nft_counter_do_destroy(priv); 105 105 } 106 106 107 - static void nft_counter_reset(struct nft_counter_percpu_priv __percpu *priv, 107 + static void nft_counter_reset(struct nft_counter_percpu_priv *priv, 108 108 struct nft_counter *total) 109 109 { 110 110 struct nft_counter *this_cpu;
+1 -3
net/netfilter/nft_ct.c
··· 870 870 l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]); 871 871 priv->l4proto = l4num; 872 872 873 - l4proto = nf_ct_l4proto_find_get(l4num); 873 + l4proto = nf_ct_l4proto_find(l4num); 874 874 875 875 if (l4proto->l4proto != l4num) { 876 876 ret = -EOPNOTSUPP; ··· 902 902 err_free_timeout: 903 903 kfree(timeout); 904 904 err_proto_put: 905 - nf_ct_l4proto_put(l4proto); 906 905 return ret; 907 906 } 908 907 ··· 912 913 struct nf_ct_timeout *timeout = priv->timeout; 913 914 914 915 nf_ct_untimeout(ctx->net, timeout); 915 - nf_ct_l4proto_put(timeout->l4proto); 916 916 nf_ct_netns_put(ctx->net, ctx->family); 917 917 kfree(priv->timeout); 918 918 }
+2 -3
net/netfilter/nft_dynset.c
··· 62 62 return NULL; 63 63 } 64 64 65 - static void nft_dynset_eval(const struct nft_expr *expr, 66 - struct nft_regs *regs, 67 - const struct nft_pktinfo *pkt) 65 + void nft_dynset_eval(const struct nft_expr *expr, 66 + struct nft_regs *regs, const struct nft_pktinfo *pkt) 68 67 { 69 68 const struct nft_dynset *priv = nft_expr_priv(expr); 70 69 struct nft_set *set = priv->set;
-121
net/netfilter/nft_hash.c
··· 25 25 u32 modulus; 26 26 u32 seed; 27 27 u32 offset; 28 - struct nft_set *map; 29 28 }; 30 29 31 30 static void nft_jhash_eval(const struct nft_expr *expr, ··· 41 42 regs->data[priv->dreg] = h + priv->offset; 42 43 } 43 44 44 - static void nft_jhash_map_eval(const struct nft_expr *expr, 45 - struct nft_regs *regs, 46 - const struct nft_pktinfo *pkt) 47 - { 48 - struct nft_jhash *priv = nft_expr_priv(expr); 49 - const void *data = &regs->data[priv->sreg]; 50 - const struct nft_set *map = priv->map; 51 - const struct nft_set_ext *ext; 52 - u32 result; 53 - bool found; 54 - 55 - result = reciprocal_scale(jhash(data, priv->len, priv->seed), 56 - priv->modulus) + priv->offset; 57 - 58 - found = map->ops->lookup(nft_net(pkt), map, &result, &ext); 59 - if (!found) 60 - return; 61 - 62 - nft_data_copy(&regs->data[priv->dreg], 63 - nft_set_ext_data(ext), map->dlen); 64 - } 65 - 66 45 struct nft_symhash { 67 46 enum nft_registers dreg:8; 68 47 u32 modulus; 69 48 u32 offset; 70 - struct nft_set *map; 71 49 }; 72 50 73 51 static void nft_symhash_eval(const struct nft_expr *expr, ··· 60 84 regs->data[priv->dreg] = h + priv->offset; 61 85 } 62 86 63 - static void nft_symhash_map_eval(const struct nft_expr *expr, 64 - struct nft_regs *regs, 65 - const struct nft_pktinfo *pkt) 66 - { 67 - struct nft_symhash *priv = nft_expr_priv(expr); 68 - struct sk_buff *skb = pkt->skb; 69 - const struct nft_set *map = priv->map; 70 - const struct nft_set_ext *ext; 71 - u32 result; 72 - bool found; 73 - 74 - result = reciprocal_scale(__skb_get_hash_symmetric(skb), 75 - priv->modulus) + priv->offset; 76 - 77 - found = map->ops->lookup(nft_net(pkt), map, &result, &ext); 78 - if (!found) 79 - return; 80 - 81 - nft_data_copy(&regs->data[priv->dreg], 82 - nft_set_ext_data(ext), map->dlen); 83 - } 84 - 85 87 static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = { 86 88 [NFTA_HASH_SREG] = { .type = NLA_U32 }, 87 89 [NFTA_HASH_DREG] = { .type = NLA_U32 }, ··· 68 114 [NFTA_HASH_SEED] = { .type = NLA_U32 }, 69 115 [NFTA_HASH_OFFSET] = { .type = NLA_U32 }, 70 116 [NFTA_HASH_TYPE] = { .type = NLA_U32 }, 71 - [NFTA_HASH_SET_NAME] = { .type = NLA_STRING, 72 - .len = NFT_SET_MAXNAMELEN - 1 }, 73 - [NFTA_HASH_SET_ID] = { .type = NLA_U32 }, 74 117 }; 75 118 76 119 static int nft_jhash_init(const struct nft_ctx *ctx, ··· 117 166 NFT_DATA_VALUE, sizeof(u32)); 118 167 } 119 168 120 - static int nft_jhash_map_init(const struct nft_ctx *ctx, 121 - const struct nft_expr *expr, 122 - const struct nlattr * const tb[]) 123 - { 124 - struct nft_jhash *priv = nft_expr_priv(expr); 125 - u8 genmask = nft_genmask_next(ctx->net); 126 - 127 - nft_jhash_init(ctx, expr, tb); 128 - priv->map = nft_set_lookup_global(ctx->net, ctx->table, 129 - tb[NFTA_HASH_SET_NAME], 130 - tb[NFTA_HASH_SET_ID], genmask); 131 - return PTR_ERR_OR_ZERO(priv->map); 132 - } 133 - 134 169 static int nft_symhash_init(const struct nft_ctx *ctx, 135 170 const struct nft_expr *expr, 136 171 const struct nlattr * const tb[]) ··· 141 204 142 205 return nft_validate_register_store(ctx, priv->dreg, NULL, 143 206 NFT_DATA_VALUE, sizeof(u32)); 144 - } 145 - 146 - static int nft_symhash_map_init(const struct nft_ctx *ctx, 147 - const struct nft_expr *expr, 148 - const struct nlattr * const tb[]) 149 - { 150 - struct nft_jhash *priv = nft_expr_priv(expr); 151 - u8 genmask = nft_genmask_next(ctx->net); 152 - 153 - nft_symhash_init(ctx, expr, tb); 154 - priv->map = nft_set_lookup_global(ctx->net, ctx->table, 155 - tb[NFTA_HASH_SET_NAME], 156 - tb[NFTA_HASH_SET_ID], genmask); 157 - return PTR_ERR_OR_ZERO(priv->map); 158 207 } 159 208 160 209 static int nft_jhash_dump(struct sk_buff *skb, ··· 170 247 return -1; 171 248 } 172 249 173 - static int nft_jhash_map_dump(struct sk_buff *skb, 174 - const struct nft_expr *expr) 175 - { 176 - const struct nft_jhash *priv = nft_expr_priv(expr); 177 - 178 - if (nft_jhash_dump(skb, expr) || 179 - nla_put_string(skb, NFTA_HASH_SET_NAME, priv->map->name)) 180 - return -1; 181 - 182 - return 0; 183 - } 184 - 185 250 static int nft_symhash_dump(struct sk_buff *skb, 186 251 const struct nft_expr *expr) 187 252 { ··· 190 279 return -1; 191 280 } 192 281 193 - static int nft_symhash_map_dump(struct sk_buff *skb, 194 - const struct nft_expr *expr) 195 - { 196 - const struct nft_symhash *priv = nft_expr_priv(expr); 197 - 198 - if (nft_symhash_dump(skb, expr) || 199 - nla_put_string(skb, NFTA_HASH_SET_NAME, priv->map->name)) 200 - return -1; 201 - 202 - return 0; 203 - } 204 - 205 282 static struct nft_expr_type nft_hash_type; 206 283 static const struct nft_expr_ops nft_jhash_ops = { 207 284 .type = &nft_hash_type, ··· 199 300 .dump = nft_jhash_dump, 200 301 }; 201 302 202 - static const struct nft_expr_ops nft_jhash_map_ops = { 203 - .type = &nft_hash_type, 204 - .size = NFT_EXPR_SIZE(sizeof(struct nft_jhash)), 205 - .eval = nft_jhash_map_eval, 206 - .init = nft_jhash_map_init, 207 - .dump = nft_jhash_map_dump, 208 - }; 209 - 210 303 static const struct nft_expr_ops nft_symhash_ops = { 211 304 .type = &nft_hash_type, 212 305 .size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)), 213 306 .eval = nft_symhash_eval, 214 307 .init = nft_symhash_init, 215 308 .dump = nft_symhash_dump, 216 - }; 217 - 218 - static const struct nft_expr_ops nft_symhash_map_ops = { 219 - .type = &nft_hash_type, 220 - .size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)), 221 - .eval = nft_symhash_map_eval, 222 - .init = nft_symhash_map_init, 223 - .dump = nft_symhash_map_dump, 224 309 }; 225 310 226 311 static const struct nft_expr_ops * ··· 219 336 type = ntohl(nla_get_be32(tb[NFTA_HASH_TYPE])); 220 337 switch (type) { 221 338 case NFT_HASH_SYM: 222 - if (tb[NFTA_HASH_SET_NAME]) 223 - return &nft_symhash_map_ops; 224 339 return &nft_symhash_ops; 225 340 case NFT_HASH_JENKINS: 226 - if (tb[NFTA_HASH_SET_NAME]) 227 - return &nft_jhash_map_ops; 228 341 return &nft_jhash_ops; 229 342 default: 230 343 break;
+3 -3
net/netfilter/nft_immediate.c
··· 17 17 #include <net/netfilter/nf_tables_core.h> 18 18 #include <net/netfilter/nf_tables.h> 19 19 20 - static void nft_immediate_eval(const struct nft_expr *expr, 21 - struct nft_regs *regs, 22 - const struct nft_pktinfo *pkt) 20 + void nft_immediate_eval(const struct nft_expr *expr, 21 + struct nft_regs *regs, 22 + const struct nft_pktinfo *pkt) 23 23 { 24 24 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 25 25
+12
net/netfilter/nft_meta.c
··· 244 244 strncpy((char *)dest, p->br->dev->name, IFNAMSIZ); 245 245 return; 246 246 #endif 247 + case NFT_META_IIFKIND: 248 + if (in == NULL || in->rtnl_link_ops == NULL) 249 + goto err; 250 + strncpy((char *)dest, in->rtnl_link_ops->kind, IFNAMSIZ); 251 + break; 252 + case NFT_META_OIFKIND: 253 + if (out == NULL || out->rtnl_link_ops == NULL) 254 + goto err; 255 + strncpy((char *)dest, out->rtnl_link_ops->kind, IFNAMSIZ); 256 + break; 247 257 default: 248 258 WARN_ON(1); 249 259 goto err; ··· 350 340 break; 351 341 case NFT_META_IIFNAME: 352 342 case NFT_META_OIFNAME: 343 + case NFT_META_IIFKIND: 344 + case NFT_META_OIFKIND: 353 345 len = IFNAMSIZ; 354 346 break; 355 347 case NFT_META_PRANDOM:
+3 -2
net/netfilter/nft_objref.c
··· 38 38 return -EINVAL; 39 39 40 40 objtype = ntohl(nla_get_be32(tb[NFTA_OBJREF_IMM_TYPE])); 41 - obj = nft_obj_lookup(ctx->table, tb[NFTA_OBJREF_IMM_NAME], objtype, 41 + obj = nft_obj_lookup(ctx->net, ctx->table, 42 + tb[NFTA_OBJREF_IMM_NAME], objtype, 42 43 genmask); 43 44 if (IS_ERR(obj)) 44 45 return -ENOENT; ··· 54 53 { 55 54 const struct nft_object *obj = nft_objref_priv(expr); 56 55 57 - if (nla_put_string(skb, NFTA_OBJREF_IMM_NAME, obj->name) || 56 + if (nla_put_string(skb, NFTA_OBJREF_IMM_NAME, obj->key.name) || 58 57 nla_put_be32(skb, NFTA_OBJREF_IMM_TYPE, 59 58 htonl(obj->ops->type->type))) 60 59 goto nla_put_failure;
+3 -3
net/netfilter/nft_payload.c
··· 70 70 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; 71 71 } 72 72 73 - static void nft_payload_eval(const struct nft_expr *expr, 74 - struct nft_regs *regs, 75 - const struct nft_pktinfo *pkt) 73 + void nft_payload_eval(const struct nft_expr *expr, 74 + struct nft_regs *regs, 75 + const struct nft_pktinfo *pkt) 76 76 { 77 77 const struct nft_payload *priv = nft_expr_priv(expr); 78 78 const struct sk_buff *skb = pkt->skb;
+1 -1
net/netfilter/nft_quota.c
··· 61 61 62 62 if (overquota && 63 63 !test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags)) 64 - nft_obj_notify(nft_net(pkt), obj->table, obj, 0, 0, 64 + nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0, 65 65 NFT_MSG_NEWOBJ, nft_pf(pkt), 0, GFP_ATOMIC); 66 66 } 67 67
+2 -3
net/netfilter/nft_range.c
··· 23 23 enum nft_range_ops op:8; 24 24 }; 25 25 26 - static void nft_range_eval(const struct nft_expr *expr, 27 - struct nft_regs *regs, 28 - const struct nft_pktinfo *pkt) 26 + void nft_range_eval(const struct nft_expr *expr, 27 + struct nft_regs *regs, const struct nft_pktinfo *pkt) 29 28 { 30 29 const struct nft_range_expr *priv = nft_expr_priv(expr); 31 30 int d1, d2;
+3 -3
net/netfilter/nft_rt.c
··· 53 53 return mtu - minlen; 54 54 } 55 55 56 - static void nft_rt_get_eval(const struct nft_expr *expr, 57 - struct nft_regs *regs, 58 - const struct nft_pktinfo *pkt) 56 + void nft_rt_get_eval(const struct nft_expr *expr, 57 + struct nft_regs *regs, 58 + const struct nft_pktinfo *pkt) 59 59 { 60 60 const struct nft_rt *priv = nft_expr_priv(expr); 61 61 const struct sk_buff *skb = pkt->skb;
+19
net/netfilter/utils.c
··· 180 180 } 181 181 EXPORT_SYMBOL_GPL(nf_route); 182 182 183 + static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) 184 + { 185 + #ifdef CONFIG_INET 186 + const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); 187 + 188 + if (entry->state.hook == NF_INET_LOCAL_OUT) { 189 + const struct iphdr *iph = ip_hdr(skb); 190 + 191 + if (!(iph->tos == rt_info->tos && 192 + skb->mark == rt_info->mark && 193 + iph->daddr == rt_info->daddr && 194 + iph->saddr == rt_info->saddr)) 195 + return ip_route_me_harder(entry->state.net, skb, 196 + RTN_UNSPEC); 197 + } 198 + #endif 199 + return 0; 200 + } 201 + 183 202 int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry) 184 203 { 185 204 const struct nf_ipv6_ops *v6ops;
+1 -1
net/netfilter/xt_CT.c
··· 159 159 /* Make sure the timeout policy matches any existing protocol tracker, 160 160 * otherwise default to generic. 161 161 */ 162 - l4proto = __nf_ct_l4proto_find(proto); 162 + l4proto = nf_ct_l4proto_find(proto); 163 163 if (timeout->l4proto->l4proto != l4proto->l4proto) { 164 164 ret = -EINVAL; 165 165 pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
+7 -2
net/netfilter/xt_physdev.c
··· 96 96 static int physdev_mt_check(const struct xt_mtchk_param *par) 97 97 { 98 98 const struct xt_physdev_info *info = par->matchinfo; 99 - 100 - br_netfilter_enable(); 99 + static bool brnf_probed __read_mostly; 101 100 102 101 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || 103 102 info->bitmask & ~XT_PHYSDEV_OP_MASK) ··· 110 111 if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) 111 112 return -EINVAL; 112 113 } 114 + 115 + if (!brnf_probed) { 116 + brnf_probed = true; 117 + request_module("br_netfilter"); 118 + } 119 + 113 120 return 0; 114 121 } 115 122
+1 -1
net/openvswitch/conntrack.c
··· 622 622 if (natted) { 623 623 struct nf_conntrack_tuple inverse; 624 624 625 - if (!nf_ct_invert_tuplepr(&inverse, &tuple)) { 625 + if (!nf_ct_invert_tuple(&inverse, &tuple)) { 626 626 pr_debug("ovs_ct_find_existing: Inversion failed!\n"); 627 627 return NULL; 628 628 }