Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next

Pablo Neira Ayuso says:

====================
Netfilter/IPVS updates for net-next

The following patchset contains Netfilter/IPVS updates for net-next:

1) Simplify nf_ct_get_tuple(), from Jackie Liu.

2) Add format to request_module() call, from Bill Wendling.

3) Add /proc/net/stats/nf_flowtable to monitor in-flight pending
hardware offload objects to be processed, from Vlad Buslov.

4) Missing rcu annotation and accessors in the netfilter tree,
from Florian Westphal.

5) Merge h323 conntrack helper nat hooks into single object,
also from Florian.

6) A batch of update to fix sparse warnings treewide,
from Florian Westphal.

7) Move nft_cmp_fast_mask() where it used, from Florian.

8) Missing const in nf_nat_initialized(), from James Yonan.

9) Use bitmap API for Maglev IPVS scheduler, from Christophe Jaillet.

10) Use refcount_inc instead of _inc_not_zero in flowtable,
from Florian Westphal.

11) Remove pr_debug in xt_TPROXY, from Nathan Cancellor.

* git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next:
netfilter: xt_TPROXY: remove pr_debug invocations
netfilter: flowtable: prefer refcount_inc
netfilter: ipvs: Use the bitmap API to allocate bitmaps
netfilter: nf_nat: in nf_nat_initialized(), use const struct nf_conn *
netfilter: nf_tables: move nft_cmp_fast_mask to where its used
netfilter: nf_tables: use correct integer types
netfilter: nf_tables: add and use BE register load-store helpers
netfilter: nf_tables: use the correct get/put helpers
netfilter: x_tables: use correct integer types
netfilter: nfnetlink: add missing __be16 cast
netfilter: nft_set_bitmap: Fix spelling mistake
netfilter: h323: merge nat hook pointers into one
netfilter: nf_conntrack: use rcu accessors where needed
netfilter: nf_conntrack: add missing __rcu annotations
netfilter: nf_flow_table: count pending offload workqueue tasks
net/sched: act_ct: set 'net' pointer when creating new nf_flow_table
netfilter: conntrack: use correct format characters
netfilter: conntrack: use fallthrough to cleanup
====================

Link: https://lore.kernel.org/r/20220720230754.209053-1-pablo@netfilter.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+518 -357
+56 -53
include/linux/netfilter/nf_conntrack_h323.h
··· 38 38 struct nf_conntrack_expect *this); 39 39 void nf_conntrack_q931_expect(struct nf_conn *new, 40 40 struct nf_conntrack_expect *this); 41 - extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff, 42 - unsigned char **data, int dataoff, 43 - H245_TransportAddress *taddr, 44 - union nf_inet_addr *addr, 45 - __be16 port); 46 - extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff, 47 - unsigned char **data, int dataoff, 48 - TransportAddress *taddr, 49 - union nf_inet_addr *addr, 50 - __be16 port); 51 - extern int (*set_sig_addr_hook) (struct sk_buff *skb, 52 - struct nf_conn *ct, 53 - enum ip_conntrack_info ctinfo, 54 - unsigned int protoff, unsigned char **data, 55 - TransportAddress *taddr, int count); 56 - extern int (*set_ras_addr_hook) (struct sk_buff *skb, 57 - struct nf_conn *ct, 58 - enum ip_conntrack_info ctinfo, 59 - unsigned int protoff, unsigned char **data, 60 - TransportAddress *taddr, int count); 61 - extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, 62 - struct nf_conn *ct, 63 - enum ip_conntrack_info ctinfo, 64 - unsigned int protoff, unsigned char **data, 65 - int dataoff, 66 - H245_TransportAddress *taddr, 67 - __be16 port, __be16 rtp_port, 68 - struct nf_conntrack_expect *rtp_exp, 69 - struct nf_conntrack_expect *rtcp_exp); 70 - extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct, 71 - enum ip_conntrack_info ctinfo, 72 - unsigned int protoff, 41 + 42 + struct nfct_h323_nat_hooks { 43 + int (*set_h245_addr)(struct sk_buff *skb, unsigned int protoff, 73 44 unsigned char **data, int dataoff, 74 - H245_TransportAddress *taddr, __be16 port, 75 - struct nf_conntrack_expect *exp); 76 - extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct, 77 - enum ip_conntrack_info ctinfo, 78 - unsigned int protoff, 45 + H245_TransportAddress *taddr, 46 + union nf_inet_addr *addr, __be16 port); 47 + int (*set_h225_addr)(struct sk_buff *skb, unsigned int protoff, 79 48 unsigned char **data, int dataoff, 80 - TransportAddress *taddr, __be16 port, 81 - struct nf_conntrack_expect *exp); 82 - extern int (*nat_callforwarding_hook) (struct sk_buff *skb, 83 - struct nf_conn *ct, 84 - enum ip_conntrack_info ctinfo, 85 - unsigned int protoff, 86 - unsigned char **data, int dataoff, 87 - TransportAddress *taddr, 88 - __be16 port, 89 - struct nf_conntrack_expect *exp); 90 - extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, 91 - enum ip_conntrack_info ctinfo, 92 - unsigned int protoff, 93 - unsigned char **data, TransportAddress *taddr, 94 - int idx, __be16 port, 95 - struct nf_conntrack_expect *exp); 49 + TransportAddress *taddr, 50 + union nf_inet_addr *addr, __be16 port); 51 + int (*set_sig_addr)(struct sk_buff *skb, 52 + struct nf_conn *ct, 53 + enum ip_conntrack_info ctinfo, 54 + unsigned int protoff, unsigned char **data, 55 + TransportAddress *taddr, int count); 56 + int (*set_ras_addr)(struct sk_buff *skb, 57 + struct nf_conn *ct, 58 + enum ip_conntrack_info ctinfo, 59 + unsigned int protoff, unsigned char **data, 60 + TransportAddress *taddr, int count); 61 + int (*nat_rtp_rtcp)(struct sk_buff *skb, 62 + struct nf_conn *ct, 63 + enum ip_conntrack_info ctinfo, 64 + unsigned int protoff, 65 + unsigned char **data, int dataoff, 66 + H245_TransportAddress *taddr, 67 + __be16 port, __be16 rtp_port, 68 + struct nf_conntrack_expect *rtp_exp, 69 + struct nf_conntrack_expect *rtcp_exp); 70 + int (*nat_t120)(struct sk_buff *skb, 71 + struct nf_conn *ct, 72 + enum ip_conntrack_info ctinfo, 73 + unsigned int protoff, 74 + unsigned char **data, int dataoff, 75 + H245_TransportAddress *taddr, __be16 port, 76 + struct nf_conntrack_expect *exp); 77 + int (*nat_h245)(struct sk_buff *skb, 78 + struct nf_conn *ct, 79 + enum ip_conntrack_info ctinfo, 80 + unsigned int protoff, 81 + unsigned char **data, int dataoff, 82 + TransportAddress *taddr, __be16 port, 83 + struct nf_conntrack_expect *exp); 84 + int (*nat_callforwarding)(struct sk_buff *skb, 85 + struct nf_conn *ct, 86 + enum ip_conntrack_info ctinfo, 87 + unsigned int protoff, 88 + unsigned char **data, int dataoff, 89 + TransportAddress *taddr, __be16 port, 90 + struct nf_conntrack_expect *exp); 91 + int (*nat_q931)(struct sk_buff *skb, 92 + struct nf_conn *ct, 93 + enum ip_conntrack_info ctinfo, 94 + unsigned int protoff, 95 + unsigned char **data, TransportAddress *taddr, int idx, 96 + __be16 port, struct nf_conntrack_expect *exp); 97 + }; 98 + extern const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook; 96 99 97 100 #endif
+1 -1
include/linux/netfilter/nf_conntrack_sip.h
··· 164 164 unsigned int medialen, 165 165 union nf_inet_addr *rtp_addr); 166 166 }; 167 - extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks; 167 + extern const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks; 168 168 169 169 int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr, 170 170 unsigned int datalen, unsigned int *matchoff,
+6
include/net/net_namespace.h
··· 26 26 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 27 27 #include <net/netns/conntrack.h> 28 28 #endif 29 + #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) 30 + #include <net/netns/flow_table.h> 31 + #endif 29 32 #include <net/netns/nftables.h> 30 33 #include <net/netns/xfrm.h> 31 34 #include <net/netns/mpls.h> ··· 144 141 #endif 145 142 #if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE) 146 143 struct netns_nftables nft; 144 + #endif 145 + #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) 146 + struct netns_ft ft; 147 147 #endif 148 148 #endif 149 149 #ifdef CONFIG_WEXT_CORE
+1 -1
include/net/netfilter/nf_conntrack_timeout.h
··· 105 105 void (*timeout_put)(struct nf_ct_timeout *timeout); 106 106 }; 107 107 108 - extern const struct nf_ct_timeout_hooks *nf_ct_timeout_hook; 108 + extern const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook; 109 109 #endif 110 110 111 111 #endif /* _NF_CONNTRACK_TIMEOUT_H */
+21
include/net/netfilter/nf_flow_table.h
··· 335 335 return 0; 336 336 } 337 337 338 + #define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count) 339 + #define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count) 340 + #define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \ 341 + this_cpu_inc((net)->ft.stat->count) 342 + #define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count) \ 343 + this_cpu_dec((net)->ft.stat->count) 344 + 345 + #ifdef CONFIG_NF_FLOW_TABLE_PROCFS 346 + int nf_flow_table_init_proc(struct net *net); 347 + void nf_flow_table_fini_proc(struct net *net); 348 + #else 349 + static inline int nf_flow_table_init_proc(struct net *net) 350 + { 351 + return 0; 352 + } 353 + 354 + static inline void nf_flow_table_fini_proc(struct net *net) 355 + { 356 + } 357 + #endif /* CONFIG_NF_FLOW_TABLE_PROCFS */ 358 + 338 359 #endif /* _NF_FLOW_TABLE_H */
+1 -1
include/net/netfilter/nf_nat.h
··· 104 104 nf_nat_inet_fn(void *priv, struct sk_buff *skb, 105 105 const struct nf_hook_state *state); 106 106 107 - static inline int nf_nat_initialized(struct nf_conn *ct, 107 + static inline int nf_nat_initialized(const struct nf_conn *ct, 108 108 enum nf_nat_manip_type manip) 109 109 { 110 110 if (manip == NF_NAT_MANIP_SRC)
+15
include/net/netfilter/nf_tables.h
··· 157 157 *(u16 *)dreg = val; 158 158 } 159 159 160 + static inline void nft_reg_store_be16(u32 *dreg, __be16 val) 161 + { 162 + nft_reg_store16(dreg, (__force __u16)val); 163 + } 164 + 160 165 static inline u16 nft_reg_load16(const u32 *sreg) 161 166 { 162 167 return *(u16 *)sreg; 168 + } 169 + 170 + static inline __be16 nft_reg_load_be16(const u32 *sreg) 171 + { 172 + return (__force __be16)nft_reg_load16(sreg); 173 + } 174 + 175 + static inline __be32 nft_reg_load_be32(const u32 *sreg) 176 + { 177 + return *(__force __be32 *)sreg; 163 178 } 164 179 165 180 static inline void nft_reg_store64(u32 *dreg, u64 val)
-10
include/net/netfilter/nf_tables_core.h
··· 56 56 u8 dlen; 57 57 }; 58 58 59 - /* Calculate the mask for the nft_cmp_fast expression. On big endian the 60 - * mask needs to include the *upper* bytes when interpreting that data as 61 - * something smaller than the full u32, therefore a cpu_to_le32 is done. 62 - */ 63 - static inline u32 nft_cmp_fast_mask(unsigned int len) 64 - { 65 - return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr, 66 - data) * BITS_PER_BYTE - len)); 67 - } 68 - 69 59 extern const struct nft_expr_ops nft_cmp_fast_ops; 70 60 extern const struct nft_expr_ops nft_cmp16_fast_ops; 71 61
+14
include/net/netns/flow_table.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __NETNS_FLOW_TABLE_H 3 + #define __NETNS_FLOW_TABLE_H 4 + 5 + struct nf_flow_table_stat { 6 + unsigned int count_wq_add; 7 + unsigned int count_wq_del; 8 + unsigned int count_wq_stats; 9 + }; 10 + 11 + struct netns_ft { 12 + struct nf_flow_table_stat __percpu *stat; 13 + }; 14 + #endif
+1 -1
net/bridge/netfilter/nft_meta_bridge.c
··· 53 53 goto err; 54 54 55 55 br_vlan_get_proto(br_dev, &p_proto); 56 - nft_reg_store16(dest, htons(p_proto)); 56 + nft_reg_store_be16(dest, htons(p_proto)); 57 57 return; 58 58 } 59 59 default:
+14 -28
net/ipv4/netfilter/nf_nat_h323.c
··· 579 579 .expectfn = ip_nat_callforwarding_expect, 580 580 }; 581 581 582 + static const struct nfct_h323_nat_hooks nathooks = { 583 + .set_h245_addr = set_h245_addr, 584 + .set_h225_addr = set_h225_addr, 585 + .set_sig_addr = set_sig_addr, 586 + .set_ras_addr = set_ras_addr, 587 + .nat_rtp_rtcp = nat_rtp_rtcp, 588 + .nat_t120 = nat_t120, 589 + .nat_h245 = nat_h245, 590 + .nat_callforwarding = nat_callforwarding, 591 + .nat_q931 = nat_q931, 592 + }; 593 + 582 594 /****************************************************************************/ 583 595 static int __init nf_nat_h323_init(void) 584 596 { 585 - BUG_ON(set_h245_addr_hook != NULL); 586 - BUG_ON(set_h225_addr_hook != NULL); 587 - BUG_ON(set_sig_addr_hook != NULL); 588 - BUG_ON(set_ras_addr_hook != NULL); 589 - BUG_ON(nat_rtp_rtcp_hook != NULL); 590 - BUG_ON(nat_t120_hook != NULL); 591 - BUG_ON(nat_h245_hook != NULL); 592 - BUG_ON(nat_callforwarding_hook != NULL); 593 - BUG_ON(nat_q931_hook != NULL); 594 - 595 - RCU_INIT_POINTER(set_h245_addr_hook, set_h245_addr); 596 - RCU_INIT_POINTER(set_h225_addr_hook, set_h225_addr); 597 - RCU_INIT_POINTER(set_sig_addr_hook, set_sig_addr); 598 - RCU_INIT_POINTER(set_ras_addr_hook, set_ras_addr); 599 - RCU_INIT_POINTER(nat_rtp_rtcp_hook, nat_rtp_rtcp); 600 - RCU_INIT_POINTER(nat_t120_hook, nat_t120); 601 - RCU_INIT_POINTER(nat_h245_hook, nat_h245); 602 - RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding); 603 - RCU_INIT_POINTER(nat_q931_hook, nat_q931); 597 + RCU_INIT_POINTER(nfct_h323_nat_hook, &nathooks); 604 598 nf_ct_helper_expectfn_register(&q931_nat); 605 599 nf_ct_helper_expectfn_register(&callforwarding_nat); 606 600 return 0; ··· 603 609 /****************************************************************************/ 604 610 static void __exit nf_nat_h323_fini(void) 605 611 { 606 - RCU_INIT_POINTER(set_h245_addr_hook, NULL); 607 - RCU_INIT_POINTER(set_h225_addr_hook, NULL); 608 - RCU_INIT_POINTER(set_sig_addr_hook, NULL); 609 - RCU_INIT_POINTER(set_ras_addr_hook, NULL); 610 - RCU_INIT_POINTER(nat_rtp_rtcp_hook, NULL); 611 - RCU_INIT_POINTER(nat_t120_hook, NULL); 612 - RCU_INIT_POINTER(nat_h245_hook, NULL); 613 - RCU_INIT_POINTER(nat_callforwarding_hook, NULL); 614 - RCU_INIT_POINTER(nat_q931_hook, NULL); 612 + RCU_INIT_POINTER(nfct_h323_nat_hook, NULL); 615 613 nf_ct_helper_expectfn_unregister(&q931_nat); 616 614 nf_ct_helper_expectfn_unregister(&callforwarding_nat); 617 615 synchronize_rcu();
+9
net/netfilter/Kconfig
··· 734 734 735 735 To compile it as a module, choose M here. 736 736 737 + config NF_FLOW_TABLE_PROCFS 738 + bool "Supply flow table statistics in procfs" 739 + default y 740 + depends on PROC_FS 741 + depends on SYSCTL 742 + help 743 + This option enables for the flow table offload statistics 744 + to be shown in procfs under net/netfilter/nf_flowtable. 745 + 737 746 config NETFILTER_XTABLES 738 747 tristate "Netfilter Xtables support (required for ip_tables)" 739 748 default m if NETFILTER_ADVANCED=n
+1
net/netfilter/Makefile
··· 128 128 obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o 129 129 nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \ 130 130 nf_flow_table_offload.o 131 + nf_flow_table-$(CONFIG_NF_FLOW_TABLE_PROCFS) += nf_flow_table_procfs.o 131 132 132 133 obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o 133 134
+2 -3
net/netfilter/ipvs/ip_vs_mh.c
··· 174 174 return 0; 175 175 } 176 176 177 - table = kcalloc(BITS_TO_LONGS(IP_VS_MH_TAB_SIZE), 178 - sizeof(unsigned long), GFP_KERNEL); 177 + table = bitmap_zalloc(IP_VS_MH_TAB_SIZE, GFP_KERNEL); 179 178 if (!table) 180 179 return -ENOMEM; 181 180 ··· 226 227 } 227 228 228 229 out: 229 - kfree(table); 230 + bitmap_free(table); 230 231 return 0; 231 232 } 232 233
+5 -1
net/netfilter/nf_conntrack_broadcast.c
··· 20 20 enum ip_conntrack_info ctinfo, 21 21 unsigned int timeout) 22 22 { 23 + const struct nf_conntrack_helper *helper; 23 24 struct nf_conntrack_expect *exp; 24 25 struct iphdr *iph = ip_hdr(skb); 25 26 struct rtable *rt = skb_rtable(skb); ··· 59 58 goto out; 60 59 61 60 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; 62 - exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port; 61 + 62 + helper = rcu_dereference(help->helper); 63 + if (helper) 64 + exp->tuple.src.u.udp.port = helper->tuple.src.u.udp.port; 63 65 64 66 exp->mask.src.u3.ip = mask; 65 67 exp->mask.src.u.udp.port = htons(0xFFFF);
+3 -5
net/netfilter/nf_conntrack_core.c
··· 329 329 return gre_pkt_to_tuple(skb, dataoff, net, tuple); 330 330 #endif 331 331 case IPPROTO_TCP: 332 - case IPPROTO_UDP: /* fallthrough */ 333 - return nf_ct_get_tuple_ports(skb, dataoff, tuple); 332 + case IPPROTO_UDP: 334 333 #ifdef CONFIG_NF_CT_PROTO_UDPLITE 335 334 case IPPROTO_UDPLITE: 336 - return nf_ct_get_tuple_ports(skb, dataoff, tuple); 337 335 #endif 338 336 #ifdef CONFIG_NF_CT_PROTO_SCTP 339 337 case IPPROTO_SCTP: 340 - return nf_ct_get_tuple_ports(skb, dataoff, tuple); 341 338 #endif 342 339 #ifdef CONFIG_NF_CT_PROTO_DCCP 343 340 case IPPROTO_DCCP: 344 - return nf_ct_get_tuple_ports(skb, dataoff, tuple); 345 341 #endif 342 + /* fallthrough */ 343 + return nf_ct_get_tuple_ports(skb, dataoff, tuple); 346 344 default: 347 345 break; 348 346 }
+99 -161
net/netfilter/nf_conntrack_h323_main.c
··· 49 49 "if both endpoints are on different sides " 50 50 "(determined by routing information)"); 51 51 52 - /* Hooks for NAT */ 53 - int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff, 54 - unsigned char **data, int dataoff, 55 - H245_TransportAddress *taddr, 56 - union nf_inet_addr *addr, __be16 port) 57 - __read_mostly; 58 - int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff, 59 - unsigned char **data, int dataoff, 60 - TransportAddress *taddr, 61 - union nf_inet_addr *addr, __be16 port) 62 - __read_mostly; 63 - int (*set_sig_addr_hook) (struct sk_buff *skb, 64 - struct nf_conn *ct, 65 - enum ip_conntrack_info ctinfo, 66 - unsigned int protoff, unsigned char **data, 67 - TransportAddress *taddr, int count) __read_mostly; 68 - int (*set_ras_addr_hook) (struct sk_buff *skb, 69 - struct nf_conn *ct, 70 - enum ip_conntrack_info ctinfo, 71 - unsigned int protoff, unsigned char **data, 72 - TransportAddress *taddr, int count) __read_mostly; 73 - int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, 74 - struct nf_conn *ct, 75 - enum ip_conntrack_info ctinfo, 76 - unsigned int protoff, 77 - unsigned char **data, int dataoff, 78 - H245_TransportAddress *taddr, 79 - __be16 port, __be16 rtp_port, 80 - struct nf_conntrack_expect *rtp_exp, 81 - struct nf_conntrack_expect *rtcp_exp) __read_mostly; 82 - int (*nat_t120_hook) (struct sk_buff *skb, 83 - struct nf_conn *ct, 84 - enum ip_conntrack_info ctinfo, 85 - unsigned int protoff, 86 - unsigned char **data, int dataoff, 87 - H245_TransportAddress *taddr, __be16 port, 88 - struct nf_conntrack_expect *exp) __read_mostly; 89 - int (*nat_h245_hook) (struct sk_buff *skb, 90 - struct nf_conn *ct, 91 - enum ip_conntrack_info ctinfo, 92 - unsigned int protoff, 93 - unsigned char **data, int dataoff, 94 - TransportAddress *taddr, __be16 port, 95 - struct nf_conntrack_expect *exp) __read_mostly; 96 - int (*nat_callforwarding_hook) (struct sk_buff *skb, 97 - struct nf_conn *ct, 98 - enum ip_conntrack_info ctinfo, 99 - unsigned int protoff, 100 - unsigned char **data, int dataoff, 101 - TransportAddress *taddr, __be16 port, 102 - struct nf_conntrack_expect *exp) __read_mostly; 103 - int (*nat_q931_hook) (struct sk_buff *skb, 104 - struct nf_conn *ct, 105 - enum ip_conntrack_info ctinfo, 106 - unsigned int protoff, 107 - unsigned char **data, TransportAddress *taddr, int idx, 108 - __be16 port, struct nf_conntrack_expect *exp) 109 - __read_mostly; 52 + const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook __read_mostly; 53 + EXPORT_SYMBOL_GPL(nfct_h323_nat_hook); 110 54 111 55 static DEFINE_SPINLOCK(nf_h323_lock); 112 56 static char *h323_buffer; ··· 203 259 unsigned char **data, int dataoff, 204 260 H245_TransportAddress *taddr) 205 261 { 262 + const struct nfct_h323_nat_hooks *nathook; 206 263 int dir = CTINFO2DIR(ctinfo); 207 264 int ret = 0; 208 265 __be16 port; ··· 211 266 union nf_inet_addr addr; 212 267 struct nf_conntrack_expect *rtp_exp; 213 268 struct nf_conntrack_expect *rtcp_exp; 214 - typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp; 215 269 216 270 /* Read RTP or RTCP address */ 217 271 if (!get_h245_addr(ct, *data, taddr, &addr, &port) || ··· 240 296 &ct->tuplehash[!dir].tuple.dst.u3, 241 297 IPPROTO_UDP, NULL, &rtcp_port); 242 298 299 + nathook = rcu_dereference(nfct_h323_nat_hook); 243 300 if (memcmp(&ct->tuplehash[dir].tuple.src.u3, 244 301 &ct->tuplehash[!dir].tuple.dst.u3, 245 302 sizeof(ct->tuplehash[dir].tuple.src.u3)) && 246 - (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) && 303 + nathook && 247 304 nf_ct_l3num(ct) == NFPROTO_IPV4 && 248 305 ct->status & IPS_NAT_MASK) { 249 306 /* NAT needed */ 250 - ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff, 251 - taddr, port, rtp_port, rtp_exp, rtcp_exp); 307 + ret = nathook->nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff, 308 + taddr, port, rtp_port, rtp_exp, rtcp_exp); 252 309 } else { /* Conntrack only */ 253 310 if (nf_ct_expect_related(rtp_exp, 0) == 0) { 254 311 if (nf_ct_expect_related(rtcp_exp, 0) == 0) { ··· 278 333 unsigned char **data, int dataoff, 279 334 H245_TransportAddress *taddr) 280 335 { 336 + const struct nfct_h323_nat_hooks *nathook; 281 337 int dir = CTINFO2DIR(ctinfo); 282 338 int ret = 0; 283 339 __be16 port; 284 340 union nf_inet_addr addr; 285 341 struct nf_conntrack_expect *exp; 286 - typeof(nat_t120_hook) nat_t120; 287 342 288 343 /* Read T.120 address */ 289 344 if (!get_h245_addr(ct, *data, taddr, &addr, &port) || ··· 300 355 IPPROTO_TCP, NULL, &port); 301 356 exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */ 302 357 358 + nathook = rcu_dereference(nfct_h323_nat_hook); 303 359 if (memcmp(&ct->tuplehash[dir].tuple.src.u3, 304 360 &ct->tuplehash[!dir].tuple.dst.u3, 305 361 sizeof(ct->tuplehash[dir].tuple.src.u3)) && 306 - (nat_t120 = rcu_dereference(nat_t120_hook)) && 362 + nathook && 307 363 nf_ct_l3num(ct) == NFPROTO_IPV4 && 308 364 ct->status & IPS_NAT_MASK) { 309 365 /* NAT needed */ 310 - ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr, 311 - port, exp); 366 + ret = nathook->nat_t120(skb, ct, ctinfo, protoff, data, 367 + dataoff, taddr, port, exp); 312 368 } else { /* Conntrack only */ 313 369 if (nf_ct_expect_related(exp, 0) == 0) { 314 370 pr_debug("nf_ct_h323: expect T.120 "); ··· 610 664 611 665 return 1; 612 666 } 667 + EXPORT_SYMBOL_GPL(get_h225_addr); 613 668 614 669 static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, 615 670 enum ip_conntrack_info ctinfo, 616 671 unsigned int protoff, unsigned char **data, int dataoff, 617 672 TransportAddress *taddr) 618 673 { 674 + const struct nfct_h323_nat_hooks *nathook; 619 675 int dir = CTINFO2DIR(ctinfo); 620 676 int ret = 0; 621 677 __be16 port; 622 678 union nf_inet_addr addr; 623 679 struct nf_conntrack_expect *exp; 624 - typeof(nat_h245_hook) nat_h245; 625 680 626 681 /* Read h245Address */ 627 682 if (!get_h225_addr(ct, *data, taddr, &addr, &port) || ··· 639 692 IPPROTO_TCP, NULL, &port); 640 693 exp->helper = &nf_conntrack_helper_h245; 641 694 695 + nathook = rcu_dereference(nfct_h323_nat_hook); 642 696 if (memcmp(&ct->tuplehash[dir].tuple.src.u3, 643 697 &ct->tuplehash[!dir].tuple.dst.u3, 644 698 sizeof(ct->tuplehash[dir].tuple.src.u3)) && 645 - (nat_h245 = rcu_dereference(nat_h245_hook)) && 699 + nathook && 646 700 nf_ct_l3num(ct) == NFPROTO_IPV4 && 647 701 ct->status & IPS_NAT_MASK) { 648 702 /* NAT needed */ 649 - ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr, 650 - port, exp); 703 + ret = nathook->nat_h245(skb, ct, ctinfo, protoff, data, 704 + dataoff, taddr, port, exp); 651 705 } else { /* Conntrack only */ 652 706 if (nf_ct_expect_related(exp, 0) == 0) { 653 707 pr_debug("nf_ct_q931: expect H.245 "); ··· 733 785 unsigned char **data, int dataoff, 734 786 TransportAddress *taddr) 735 787 { 788 + const struct nfct_h323_nat_hooks *nathook; 736 789 int dir = CTINFO2DIR(ctinfo); 737 790 int ret = 0; 738 791 __be16 port; 739 792 union nf_inet_addr addr; 740 793 struct nf_conntrack_expect *exp; 741 794 struct net *net = nf_ct_net(ct); 742 - typeof(nat_callforwarding_hook) nat_callforwarding; 743 795 744 796 /* Read alternativeAddress */ 745 797 if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0) ··· 763 815 IPPROTO_TCP, NULL, &port); 764 816 exp->helper = nf_conntrack_helper_q931; 765 817 818 + nathook = rcu_dereference(nfct_h323_nat_hook); 766 819 if (memcmp(&ct->tuplehash[dir].tuple.src.u3, 767 820 &ct->tuplehash[!dir].tuple.dst.u3, 768 821 sizeof(ct->tuplehash[dir].tuple.src.u3)) && 769 - (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) && 822 + nathook && 770 823 nf_ct_l3num(ct) == NFPROTO_IPV4 && 771 824 ct->status & IPS_NAT_MASK) { 772 825 /* Need NAT */ 773 - ret = nat_callforwarding(skb, ct, ctinfo, 774 - protoff, data, dataoff, 775 - taddr, port, exp); 826 + ret = nathook->nat_callforwarding(skb, ct, ctinfo, 827 + protoff, data, dataoff, 828 + taddr, port, exp); 776 829 } else { /* Conntrack only */ 777 830 if (nf_ct_expect_related(exp, 0) == 0) { 778 831 pr_debug("nf_ct_q931: expect Call Forwarding "); ··· 793 844 unsigned char **data, int dataoff, 794 845 Setup_UUIE *setup) 795 846 { 847 + const struct nfct_h323_nat_hooks *nathook; 796 848 int dir = CTINFO2DIR(ctinfo); 797 849 int ret; 798 850 int i; 799 851 __be16 port; 800 852 union nf_inet_addr addr; 801 - typeof(set_h225_addr_hook) set_h225_addr; 802 853 803 854 pr_debug("nf_ct_q931: Setup\n"); 804 855 ··· 809 860 return -1; 810 861 } 811 862 812 - set_h225_addr = rcu_dereference(set_h225_addr_hook); 863 + nathook = rcu_dereference(nfct_h323_nat_hook); 813 864 if ((setup->options & eSetup_UUIE_destCallSignalAddress) && 814 - (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 && 865 + nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 815 866 ct->status & IPS_NAT_MASK && 816 867 get_h225_addr(ct, *data, &setup->destCallSignalAddress, 817 868 &addr, &port) && ··· 819 870 pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n", 820 871 &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3, 821 872 ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); 822 - ret = set_h225_addr(skb, protoff, data, dataoff, 823 - &setup->destCallSignalAddress, 824 - &ct->tuplehash[!dir].tuple.src.u3, 825 - ct->tuplehash[!dir].tuple.src.u.tcp.port); 873 + ret = nathook->set_h225_addr(skb, protoff, data, dataoff, 874 + &setup->destCallSignalAddress, 875 + &ct->tuplehash[!dir].tuple.src.u3, 876 + ct->tuplehash[!dir].tuple.src.u.tcp.port); 826 877 if (ret < 0) 827 878 return -1; 828 879 } 829 880 830 881 if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && 831 - (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 && 882 + nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 832 883 ct->status & IPS_NAT_MASK && 833 884 get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, 834 885 &addr, &port) && ··· 836 887 pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n", 837 888 &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3, 838 889 ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); 839 - ret = set_h225_addr(skb, protoff, data, dataoff, 840 - &setup->sourceCallSignalAddress, 841 - &ct->tuplehash[!dir].tuple.dst.u3, 842 - ct->tuplehash[!dir].tuple.dst.u.tcp.port); 890 + ret = nathook->set_h225_addr(skb, protoff, data, dataoff, 891 + &setup->sourceCallSignalAddress, 892 + &ct->tuplehash[!dir].tuple.dst.u3, 893 + ct->tuplehash[!dir].tuple.dst.u.tcp.port); 843 894 if (ret < 0) 844 895 return -1; 845 896 } ··· 1198 1249 TransportAddress *taddr, int count) 1199 1250 { 1200 1251 struct nf_ct_h323_master *info = nfct_help_data(ct); 1252 + const struct nfct_h323_nat_hooks *nathook; 1201 1253 int dir = CTINFO2DIR(ctinfo); 1202 1254 int ret = 0; 1203 1255 int i; 1204 1256 __be16 port; 1205 1257 union nf_inet_addr addr; 1206 1258 struct nf_conntrack_expect *exp; 1207 - typeof(nat_q931_hook) nat_q931; 1208 1259 1209 1260 /* Look for the first related address */ 1210 1261 for (i = 0; i < count; i++) { ··· 1228 1279 exp->helper = nf_conntrack_helper_q931; 1229 1280 exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */ 1230 1281 1231 - nat_q931 = rcu_dereference(nat_q931_hook); 1232 - if (nat_q931 && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1282 + nathook = rcu_dereference(nfct_h323_nat_hook); 1283 + if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1233 1284 ct->status & IPS_NAT_MASK) { /* Need NAT */ 1234 - ret = nat_q931(skb, ct, ctinfo, protoff, data, 1235 - taddr, i, port, exp); 1285 + ret = nathook->nat_q931(skb, ct, ctinfo, protoff, data, 1286 + taddr, i, port, exp); 1236 1287 } else { /* Conntrack only */ 1237 1288 if (nf_ct_expect_related(exp, 0) == 0) { 1238 1289 pr_debug("nf_ct_ras: expect Q.931 "); ··· 1254 1305 unsigned int protoff, 1255 1306 unsigned char **data, GatekeeperRequest *grq) 1256 1307 { 1257 - typeof(set_ras_addr_hook) set_ras_addr; 1308 + const struct nfct_h323_nat_hooks *nathook; 1258 1309 1259 1310 pr_debug("nf_ct_ras: GRQ\n"); 1260 1311 1261 - set_ras_addr = rcu_dereference(set_ras_addr_hook); 1262 - if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1312 + nathook = rcu_dereference(nfct_h323_nat_hook); 1313 + if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1263 1314 ct->status & IPS_NAT_MASK) /* NATed */ 1264 - return set_ras_addr(skb, ct, ctinfo, protoff, data, 1265 - &grq->rasAddress, 1); 1315 + return nathook->set_ras_addr(skb, ct, ctinfo, protoff, data, 1316 + &grq->rasAddress, 1); 1266 1317 return 0; 1267 1318 } 1268 1319 ··· 1316 1367 unsigned char **data, RegistrationRequest *rrq) 1317 1368 { 1318 1369 struct nf_ct_h323_master *info = nfct_help_data(ct); 1370 + const struct nfct_h323_nat_hooks *nathook; 1319 1371 int ret; 1320 - typeof(set_ras_addr_hook) set_ras_addr; 1321 1372 1322 1373 pr_debug("nf_ct_ras: RRQ\n"); 1323 1374 ··· 1327 1378 if (ret < 0) 1328 1379 return -1; 1329 1380 1330 - set_ras_addr = rcu_dereference(set_ras_addr_hook); 1331 - if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1381 + nathook = rcu_dereference(nfct_h323_nat_hook); 1382 + if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1332 1383 ct->status & IPS_NAT_MASK) { 1333 - ret = set_ras_addr(skb, ct, ctinfo, protoff, data, 1334 - rrq->rasAddress.item, 1335 - rrq->rasAddress.count); 1384 + ret = nathook->set_ras_addr(skb, ct, ctinfo, protoff, data, 1385 + rrq->rasAddress.item, 1386 + rrq->rasAddress.count); 1336 1387 if (ret < 0) 1337 1388 return -1; 1338 1389 } ··· 1352 1403 unsigned char **data, RegistrationConfirm *rcf) 1353 1404 { 1354 1405 struct nf_ct_h323_master *info = nfct_help_data(ct); 1406 + const struct nfct_h323_nat_hooks *nathook; 1355 1407 int dir = CTINFO2DIR(ctinfo); 1356 1408 int ret; 1357 1409 struct nf_conntrack_expect *exp; 1358 - typeof(set_sig_addr_hook) set_sig_addr; 1359 1410 1360 1411 pr_debug("nf_ct_ras: RCF\n"); 1361 1412 1362 - set_sig_addr = rcu_dereference(set_sig_addr_hook); 1363 - if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1413 + nathook = rcu_dereference(nfct_h323_nat_hook); 1414 + if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1364 1415 ct->status & IPS_NAT_MASK) { 1365 - ret = set_sig_addr(skb, ct, ctinfo, protoff, data, 1366 - rcf->callSignalAddress.item, 1367 - rcf->callSignalAddress.count); 1416 + ret = nathook->set_sig_addr(skb, ct, ctinfo, protoff, data, 1417 + rcf->callSignalAddress.item, 1418 + rcf->callSignalAddress.count); 1368 1419 if (ret < 0) 1369 1420 return -1; 1370 1421 } ··· 1403 1454 unsigned char **data, UnregistrationRequest *urq) 1404 1455 { 1405 1456 struct nf_ct_h323_master *info = nfct_help_data(ct); 1457 + const struct nfct_h323_nat_hooks *nathook; 1406 1458 int dir = CTINFO2DIR(ctinfo); 1407 1459 int ret; 1408 - typeof(set_sig_addr_hook) set_sig_addr; 1409 1460 1410 1461 pr_debug("nf_ct_ras: URQ\n"); 1411 1462 1412 - set_sig_addr = rcu_dereference(set_sig_addr_hook); 1413 - if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1463 + nathook = rcu_dereference(nfct_h323_nat_hook); 1464 + if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1414 1465 ct->status & IPS_NAT_MASK) { 1415 - ret = set_sig_addr(skb, ct, ctinfo, protoff, data, 1416 - urq->callSignalAddress.item, 1417 - urq->callSignalAddress.count); 1466 + ret = nathook->set_sig_addr(skb, ct, ctinfo, protoff, data, 1467 + urq->callSignalAddress.item, 1468 + urq->callSignalAddress.count); 1418 1469 if (ret < 0) 1419 1470 return -1; 1420 1471 } ··· 1436 1487 unsigned char **data, AdmissionRequest *arq) 1437 1488 { 1438 1489 const struct nf_ct_h323_master *info = nfct_help_data(ct); 1490 + const struct nfct_h323_nat_hooks *nathook; 1439 1491 int dir = CTINFO2DIR(ctinfo); 1440 1492 __be16 port; 1441 1493 union nf_inet_addr addr; 1442 - typeof(set_h225_addr_hook) set_h225_addr; 1443 1494 1444 1495 pr_debug("nf_ct_ras: ARQ\n"); 1445 1496 1446 - set_h225_addr = rcu_dereference(set_h225_addr_hook); 1497 + nathook = rcu_dereference(nfct_h323_nat_hook); 1498 + if (!nathook) 1499 + return 0; 1500 + 1447 1501 if ((arq->options & eAdmissionRequest_destCallSignalAddress) && 1448 1502 get_h225_addr(ct, *data, &arq->destCallSignalAddress, 1449 1503 &addr, &port) && 1450 1504 !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && 1451 1505 port == info->sig_port[dir] && 1452 1506 nf_ct_l3num(ct) == NFPROTO_IPV4 && 1453 - set_h225_addr && ct->status & IPS_NAT_MASK) { 1507 + ct->status & IPS_NAT_MASK) { 1454 1508 /* Answering ARQ */ 1455 - return set_h225_addr(skb, protoff, data, 0, 1456 - &arq->destCallSignalAddress, 1457 - &ct->tuplehash[!dir].tuple.dst.u3, 1458 - info->sig_port[!dir]); 1509 + return nathook->set_h225_addr(skb, protoff, data, 0, 1510 + &arq->destCallSignalAddress, 1511 + &ct->tuplehash[!dir].tuple.dst.u3, 1512 + info->sig_port[!dir]); 1459 1513 } 1460 1514 1461 1515 if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && 1462 1516 get_h225_addr(ct, *data, &arq->srcCallSignalAddress, 1463 1517 &addr, &port) && 1464 1518 !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && 1465 - set_h225_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1519 + nf_ct_l3num(ct) == NFPROTO_IPV4 && 1466 1520 ct->status & IPS_NAT_MASK) { 1467 1521 /* Calling ARQ */ 1468 - return set_h225_addr(skb, protoff, data, 0, 1469 - &arq->srcCallSignalAddress, 1470 - &ct->tuplehash[!dir].tuple.dst.u3, 1471 - port); 1522 + return nathook->set_h225_addr(skb, protoff, data, 0, 1523 + &arq->srcCallSignalAddress, 1524 + &ct->tuplehash[!dir].tuple.dst.u3, 1525 + port); 1472 1526 } 1473 1527 1474 1528 return 0; ··· 1487 1535 __be16 port; 1488 1536 union nf_inet_addr addr; 1489 1537 struct nf_conntrack_expect *exp; 1490 - typeof(set_sig_addr_hook) set_sig_addr; 1491 1538 1492 1539 pr_debug("nf_ct_ras: ACF\n"); 1493 1540 ··· 1495 1544 return 0; 1496 1545 1497 1546 if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) { 1547 + const struct nfct_h323_nat_hooks *nathook; 1548 + 1498 1549 /* Answering ACF */ 1499 - set_sig_addr = rcu_dereference(set_sig_addr_hook); 1500 - if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1550 + nathook = rcu_dereference(nfct_h323_nat_hook); 1551 + if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1501 1552 ct->status & IPS_NAT_MASK) 1502 - return set_sig_addr(skb, ct, ctinfo, protoff, data, 1503 - &acf->destCallSignalAddress, 1); 1553 + return nathook->set_sig_addr(skb, ct, ctinfo, protoff, 1554 + data, 1555 + &acf->destCallSignalAddress, 1); 1504 1556 return 0; 1505 1557 } 1506 1558 ··· 1532 1578 unsigned int protoff, 1533 1579 unsigned char **data, LocationRequest *lrq) 1534 1580 { 1535 - typeof(set_ras_addr_hook) set_ras_addr; 1581 + const struct nfct_h323_nat_hooks *nathook; 1536 1582 1537 1583 pr_debug("nf_ct_ras: LRQ\n"); 1538 1584 1539 - set_ras_addr = rcu_dereference(set_ras_addr_hook); 1540 - if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1585 + nathook = rcu_dereference(nfct_h323_nat_hook); 1586 + if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1541 1587 ct->status & IPS_NAT_MASK) 1542 - return set_ras_addr(skb, ct, ctinfo, protoff, data, 1543 - &lrq->replyAddress, 1); 1588 + return nathook->set_ras_addr(skb, ct, ctinfo, protoff, data, 1589 + &lrq->replyAddress, 1); 1544 1590 return 0; 1545 1591 } 1546 1592 ··· 1588 1634 unsigned int protoff, 1589 1635 unsigned char **data, InfoRequestResponse *irr) 1590 1636 { 1637 + const struct nfct_h323_nat_hooks *nathook; 1591 1638 int ret; 1592 - typeof(set_ras_addr_hook) set_ras_addr; 1593 - typeof(set_sig_addr_hook) set_sig_addr; 1594 1639 1595 1640 pr_debug("nf_ct_ras: IRR\n"); 1596 1641 1597 - set_ras_addr = rcu_dereference(set_ras_addr_hook); 1598 - if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1642 + nathook = rcu_dereference(nfct_h323_nat_hook); 1643 + if (nathook && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1599 1644 ct->status & IPS_NAT_MASK) { 1600 - ret = set_ras_addr(skb, ct, ctinfo, protoff, data, 1601 - &irr->rasAddress, 1); 1645 + ret = nathook->set_ras_addr(skb, ct, ctinfo, protoff, data, 1646 + &irr->rasAddress, 1); 1602 1647 if (ret < 0) 1603 1648 return -1; 1604 - } 1605 1649 1606 - set_sig_addr = rcu_dereference(set_sig_addr_hook); 1607 - if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 && 1608 - ct->status & IPS_NAT_MASK) { 1609 - ret = set_sig_addr(skb, ct, ctinfo, protoff, data, 1610 - irr->callSignalAddress.item, 1611 - irr->callSignalAddress.count); 1650 + ret = nathook->set_sig_addr(skb, ct, ctinfo, protoff, data, 1651 + irr->callSignalAddress.item, 1652 + irr->callSignalAddress.count); 1612 1653 if (ret < 0) 1613 1654 return -1; 1614 1655 } ··· 1785 1836 1786 1837 module_init(nf_conntrack_h323_init); 1787 1838 module_exit(nf_conntrack_h323_fini); 1788 - 1789 - EXPORT_SYMBOL_GPL(get_h225_addr); 1790 - EXPORT_SYMBOL_GPL(set_h245_addr_hook); 1791 - EXPORT_SYMBOL_GPL(set_h225_addr_hook); 1792 - EXPORT_SYMBOL_GPL(set_sig_addr_hook); 1793 - EXPORT_SYMBOL_GPL(set_ras_addr_hook); 1794 - EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook); 1795 - EXPORT_SYMBOL_GPL(nat_t120_hook); 1796 - EXPORT_SYMBOL_GPL(nat_h245_hook); 1797 - EXPORT_SYMBOL_GPL(nat_callforwarding_hook); 1798 - EXPORT_SYMBOL_GPL(nat_q931_hook); 1799 1839 1800 1840 MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); 1801 1841 MODULE_DESCRIPTION("H.323 connection tracking helper");
+2 -2
net/netfilter/nf_conntrack_helper.c
··· 165 165 if (!nat) { 166 166 snprintf(mod_name, sizeof(mod_name), "%s", h->nat_mod_name); 167 167 rcu_read_unlock(); 168 - request_module(mod_name); 168 + request_module("%s", mod_name); 169 169 170 170 rcu_read_lock(); 171 171 nat = nf_conntrack_nat_helper_find(mod_name); ··· 249 249 if (tmpl != NULL) { 250 250 help = nfct_help(tmpl); 251 251 if (help != NULL) { 252 - helper = help->helper; 252 + helper = rcu_dereference(help->helper); 253 253 set_bit(IPS_HELPER_BIT, &ct->status); 254 254 } 255 255 }
+7 -2
net/netfilter/nf_conntrack_netlink.c
··· 2005 2005 } 2006 2006 2007 2007 if (help) { 2008 - if (help->helper == helper) { 2008 + if (rcu_access_pointer(help->helper) == helper) { 2009 2009 /* update private helper data if allowed. */ 2010 2010 if (helper->from_nlattr) 2011 2011 helper->from_nlattr(helpinfo, ct); ··· 3413 3413 3414 3414 static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data) 3415 3415 { 3416 + struct nf_conntrack_helper *helper; 3416 3417 const struct nf_conn_help *m_help; 3417 3418 const char *name = data; 3418 3419 3419 3420 m_help = nfct_help(exp->master); 3420 3421 3421 - return strcmp(m_help->helper->name, name) == 0; 3422 + helper = rcu_dereference(m_help->helper); 3423 + if (!helper) 3424 + return false; 3425 + 3426 + return strcmp(helper->name, name) == 0; 3422 3427 } 3423 3428 3424 3429 static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
+1 -1
net/netfilter/nf_conntrack_pptp.c
··· 45 45 46 46 static DEFINE_SPINLOCK(nf_pptp_lock); 47 47 48 - const struct nf_nat_pptp_hook *nf_nat_pptp_hook; 48 + const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook; 49 49 EXPORT_SYMBOL_GPL(nf_nat_pptp_hook); 50 50 51 51 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+7 -2
net/netfilter/nf_conntrack_sip.c
··· 60 60 MODULE_PARM_DESC(sip_external_media, "Expect Media streams between external " 61 61 "endpoints (default 0)"); 62 62 63 - const struct nf_nat_sip_hooks *nf_nat_sip_hooks; 63 + const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks; 64 64 EXPORT_SYMBOL_GPL(nf_nat_sip_hooks); 65 65 66 66 static int string_len(const struct nf_conn *ct, const char *dptr, ··· 1229 1229 struct nf_conntrack_expect *exp; 1230 1230 union nf_inet_addr *saddr, daddr; 1231 1231 const struct nf_nat_sip_hooks *hooks; 1232 + struct nf_conntrack_helper *helper; 1232 1233 __be16 port; 1233 1234 u8 proto; 1234 1235 unsigned int expires = 0; ··· 1290 1289 if (sip_direct_signalling) 1291 1290 saddr = &ct->tuplehash[!dir].tuple.src.u3; 1292 1291 1292 + helper = rcu_dereference(nfct_help(ct)->helper); 1293 + if (!helper) 1294 + return NF_DROP; 1295 + 1293 1296 nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct), 1294 1297 saddr, &daddr, proto, NULL, &port); 1295 1298 exp->timeout.expires = sip_timeout * HZ; 1296 - exp->helper = nfct_help(ct)->helper; 1299 + exp->helper = helper; 1297 1300 exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE; 1298 1301 1299 1302 hooks = rcu_dereference(nf_nat_sip_hooks);
+14 -4
net/netfilter/nf_conntrack_timeout.c
··· 22 22 #include <net/netfilter/nf_conntrack_l4proto.h> 23 23 #include <net/netfilter/nf_conntrack_timeout.h> 24 24 25 - const struct nf_ct_timeout_hooks *nf_ct_timeout_hook __read_mostly; 25 + const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook __read_mostly; 26 26 EXPORT_SYMBOL_GPL(nf_ct_timeout_hook); 27 27 28 28 static int untimeout(struct nf_conn *ct, void *timeout) 29 29 { 30 30 struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct); 31 31 32 - if (timeout_ext && (!timeout || timeout_ext->timeout == timeout)) 33 - RCU_INIT_POINTER(timeout_ext->timeout, NULL); 32 + if (timeout_ext) { 33 + const struct nf_ct_timeout *t; 34 + 35 + t = rcu_access_pointer(timeout_ext->timeout); 36 + 37 + if (!timeout || t == timeout) 38 + RCU_INIT_POINTER(timeout_ext->timeout, NULL); 39 + } 34 40 35 41 /* We are not intended to delete this conntrack. */ 36 42 return 0; ··· 133 127 if (h) { 134 128 timeout_ext = nf_ct_timeout_find(ct); 135 129 if (timeout_ext) { 136 - h->timeout_put(timeout_ext->timeout); 130 + struct nf_ct_timeout *t; 131 + 132 + t = rcu_dereference(timeout_ext->timeout); 133 + if (t) 134 + h->timeout_put(t); 137 135 RCU_INIT_POINTER(timeout_ext->timeout, NULL); 138 136 } 139 137 }
+64 -9
net/netfilter/nf_flow_table_core.c
··· 53 53 { 54 54 struct flow_offload *flow; 55 55 56 - if (unlikely(nf_ct_is_dying(ct) || 57 - !refcount_inc_not_zero(&ct->ct_general.use))) 56 + if (unlikely(nf_ct_is_dying(ct))) 58 57 return NULL; 59 58 60 59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); 61 60 if (!flow) 62 - goto err_ct_refcnt; 61 + return NULL; 63 62 63 + refcount_inc(&ct->ct_general.use); 64 64 flow->ct = ct; 65 65 66 66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL); ··· 72 72 __set_bit(NF_FLOW_DNAT, &flow->flags); 73 73 74 74 return flow; 75 - 76 - err_ct_refcnt: 77 - nf_ct_put(ct); 78 - 79 - return NULL; 80 75 } 81 76 EXPORT_SYMBOL_GPL(flow_offload_alloc); 82 77 ··· 609 614 } 610 615 EXPORT_SYMBOL_GPL(nf_flow_table_free); 611 616 617 + static int nf_flow_table_init_net(struct net *net) 618 + { 619 + net->ft.stat = alloc_percpu(struct nf_flow_table_stat); 620 + return net->ft.stat ? 0 : -ENOMEM; 621 + } 622 + 623 + static void nf_flow_table_fini_net(struct net *net) 624 + { 625 + free_percpu(net->ft.stat); 626 + } 627 + 628 + static int nf_flow_table_pernet_init(struct net *net) 629 + { 630 + int ret; 631 + 632 + ret = nf_flow_table_init_net(net); 633 + if (ret < 0) 634 + return ret; 635 + 636 + ret = nf_flow_table_init_proc(net); 637 + if (ret < 0) 638 + goto out_proc; 639 + 640 + return 0; 641 + 642 + out_proc: 643 + nf_flow_table_fini_net(net); 644 + return ret; 645 + } 646 + 647 + static void nf_flow_table_pernet_exit(struct list_head *net_exit_list) 648 + { 649 + struct net *net; 650 + 651 + list_for_each_entry(net, net_exit_list, exit_list) { 652 + nf_flow_table_fini_proc(net); 653 + nf_flow_table_fini_net(net); 654 + } 655 + } 656 + 657 + static struct pernet_operations nf_flow_table_net_ops = { 658 + .init = nf_flow_table_pernet_init, 659 + .exit_batch = nf_flow_table_pernet_exit, 660 + }; 661 + 612 662 static int __init nf_flow_table_module_init(void) 613 663 { 614 - return nf_flow_table_offload_init(); 664 + int ret; 665 + 666 + ret = register_pernet_subsys(&nf_flow_table_net_ops); 667 + if (ret < 0) 668 + return ret; 669 + 670 + ret = nf_flow_table_offload_init(); 671 + if (ret) 672 + goto out_offload; 673 + 674 + return 0; 675 + 676 + out_offload: 677 + unregister_pernet_subsys(&nf_flow_table_net_ops); 678 + return ret; 615 679 } 616 680 617 681 static void __exit nf_flow_table_module_exit(void) 618 682 { 619 683 nf_flow_table_offload_exit(); 684 + unregister_pernet_subsys(&nf_flow_table_net_ops); 620 685 } 621 686 622 687 module_init(nf_flow_table_module_init);
+14 -3
net/netfilter/nf_flow_table_offload.c
··· 967 967 static void flow_offload_work_handler(struct work_struct *work) 968 968 { 969 969 struct flow_offload_work *offload; 970 + struct net *net; 970 971 971 972 offload = container_of(work, struct flow_offload_work, work); 973 + net = read_pnet(&offload->flowtable->net); 972 974 switch (offload->cmd) { 973 975 case FLOW_CLS_REPLACE: 974 976 flow_offload_work_add(offload); 977 + NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_add); 975 978 break; 976 979 case FLOW_CLS_DESTROY: 977 980 flow_offload_work_del(offload); 981 + NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_del); 978 982 break; 979 983 case FLOW_CLS_STATS: 980 984 flow_offload_work_stats(offload); 985 + NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_stats); 981 986 break; 982 987 default: 983 988 WARN_ON_ONCE(1); ··· 994 989 995 990 static void flow_offload_queue_work(struct flow_offload_work *offload) 996 991 { 997 - if (offload->cmd == FLOW_CLS_REPLACE) 992 + struct net *net = read_pnet(&offload->flowtable->net); 993 + 994 + if (offload->cmd == FLOW_CLS_REPLACE) { 995 + NF_FLOW_TABLE_STAT_INC(net, count_wq_add); 998 996 queue_work(nf_flow_offload_add_wq, &offload->work); 999 - else if (offload->cmd == FLOW_CLS_DESTROY) 997 + } else if (offload->cmd == FLOW_CLS_DESTROY) { 998 + NF_FLOW_TABLE_STAT_INC(net, count_wq_del); 1000 999 queue_work(nf_flow_offload_del_wq, &offload->work); 1001 - else 1000 + } else { 1001 + NF_FLOW_TABLE_STAT_INC(net, count_wq_stats); 1002 1002 queue_work(nf_flow_offload_stats_wq, &offload->work); 1003 + } 1003 1004 } 1004 1005 1005 1006 static struct flow_offload_work *
+80
net/netfilter/nf_flow_table_procfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #include <linux/kernel.h> 3 + #include <linux/proc_fs.h> 4 + #include <net/netfilter/nf_flow_table.h> 5 + 6 + static void *nf_flow_table_cpu_seq_start(struct seq_file *seq, loff_t *pos) 7 + { 8 + struct net *net = seq_file_net(seq); 9 + int cpu; 10 + 11 + if (*pos == 0) 12 + return SEQ_START_TOKEN; 13 + 14 + for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) { 15 + if (!cpu_possible(cpu)) 16 + continue; 17 + *pos = cpu + 1; 18 + return per_cpu_ptr(net->ft.stat, cpu); 19 + } 20 + 21 + return NULL; 22 + } 23 + 24 + static void *nf_flow_table_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) 25 + { 26 + struct net *net = seq_file_net(seq); 27 + int cpu; 28 + 29 + for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 30 + if (!cpu_possible(cpu)) 31 + continue; 32 + *pos = cpu + 1; 33 + return per_cpu_ptr(net->ft.stat, cpu); 34 + } 35 + (*pos)++; 36 + return NULL; 37 + } 38 + 39 + static void nf_flow_table_cpu_seq_stop(struct seq_file *seq, void *v) 40 + { 41 + } 42 + 43 + static int nf_flow_table_cpu_seq_show(struct seq_file *seq, void *v) 44 + { 45 + const struct nf_flow_table_stat *st = v; 46 + 47 + if (v == SEQ_START_TOKEN) { 48 + seq_puts(seq, "wq_add wq_del wq_stats\n"); 49 + return 0; 50 + } 51 + 52 + seq_printf(seq, "%8d %8d %8d\n", 53 + st->count_wq_add, 54 + st->count_wq_del, 55 + st->count_wq_stats 56 + ); 57 + return 0; 58 + } 59 + 60 + static const struct seq_operations nf_flow_table_cpu_seq_ops = { 61 + .start = nf_flow_table_cpu_seq_start, 62 + .next = nf_flow_table_cpu_seq_next, 63 + .stop = nf_flow_table_cpu_seq_stop, 64 + .show = nf_flow_table_cpu_seq_show, 65 + }; 66 + 67 + int nf_flow_table_init_proc(struct net *net) 68 + { 69 + struct proc_dir_entry *pde; 70 + 71 + pde = proc_create_net("nf_flowtable", 0444, net->proc_net_stat, 72 + &nf_flow_table_cpu_seq_ops, 73 + sizeof(struct seq_net_private)); 74 + return pde ? 0 : -ENOMEM; 75 + } 76 + 77 + void nf_flow_table_fini_proc(struct net *net) 78 + { 79 + remove_proc_entry("nf_flowtable", net->proc_net_stat); 80 + }
+1 -1
net/netfilter/nfnetlink.c
··· 626 626 nfgenmsg = nlmsg_data(nlh); 627 627 skb_pull(skb, msglen); 628 628 /* Work around old nft using host byte order */ 629 - if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES) 629 + if (nfgenmsg->res_id == (__force __be16)NFNL_SUBSYS_NFTABLES) 630 630 res_id = NFNL_SUBSYS_NFTABLES; 631 631 else 632 632 res_id = ntohs(nfgenmsg->res_id);
+2 -1
net/netfilter/nft_byteorder.c
··· 44 44 case NFT_BYTEORDER_NTOH: 45 45 for (i = 0; i < priv->len / 8; i++) { 46 46 src64 = nft_reg_load64(&src[i]); 47 - nft_reg_store64(&dst[i], be64_to_cpu(src64)); 47 + nft_reg_store64(&dst[i], 48 + be64_to_cpu((__force __be64)src64)); 48 49 } 49 50 break; 50 51 case NFT_BYTEORDER_HTON:
+15 -3
net/netfilter/nft_cmp.c
··· 125 125 { 126 126 switch (len) { 127 127 case 2: 128 - data->val16 = ntohs(*((u16 *)val)); 128 + data->val16 = ntohs(*((__be16 *)val)); 129 129 break; 130 130 case 4: 131 - data->val32 = ntohl(*((u32 *)val)); 131 + data->val32 = ntohl(*((__be32 *)val)); 132 132 break; 133 133 case 8: 134 - data->val64 = be64_to_cpu(*((u64 *)val)); 134 + data->val64 = be64_to_cpu(*((__be64 *)val)); 135 135 break; 136 136 default: 137 137 WARN_ON_ONCE(1); ··· 196 196 .reduce = NFT_REDUCE_READONLY, 197 197 .offload = nft_cmp_offload, 198 198 }; 199 + 200 + /* Calculate the mask for the nft_cmp_fast expression. On big endian the 201 + * mask needs to include the *upper* bytes when interpreting that data as 202 + * something smaller than the full u32, therefore a cpu_to_le32 is done. 203 + */ 204 + static u32 nft_cmp_fast_mask(unsigned int len) 205 + { 206 + __le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr, 207 + data) * BITS_PER_BYTE - len)); 208 + 209 + return (__force u32)mask; 210 + } 199 211 200 212 static int nft_cmp_fast_init(const struct nft_ctx *ctx, 201 213 const struct nft_expr *expr,
+2 -2
net/netfilter/nft_ct.c
··· 204 204 case NFT_CT_SRC_IP: 205 205 if (nf_ct_l3num(ct) != NFPROTO_IPV4) 206 206 goto err; 207 - *dest = tuple->src.u3.ip; 207 + *dest = (__force __u32)tuple->src.u3.ip; 208 208 return; 209 209 case NFT_CT_DST_IP: 210 210 if (nf_ct_l3num(ct) != NFPROTO_IPV4) 211 211 goto err; 212 - *dest = tuple->dst.u3.ip; 212 + *dest = (__force __u32)tuple->dst.u3.ip; 213 213 return; 214 214 case NFT_CT_SRC_IP6: 215 215 if (nf_ct_l3num(ct) != NFPROTO_IPV6)
+5 -5
net/netfilter/nft_exthdr.c
··· 266 266 267 267 switch (priv->len) { 268 268 case 2: 269 - old.v16 = get_unaligned((u16 *)(opt + offset)); 269 + old.v16 = (__force __be16)get_unaligned((u16 *)(opt + offset)); 270 270 new.v16 = (__force __be16)nft_reg_load16( 271 271 &regs->data[priv->sreg]); 272 272 ··· 281 281 if (old.v16 == new.v16) 282 282 return; 283 283 284 - put_unaligned(new.v16, (u16*)(opt + offset)); 284 + put_unaligned(new.v16, (__be16*)(opt + offset)); 285 285 inet_proto_csum_replace2(&tcph->check, pkt->skb, 286 286 old.v16, new.v16, false); 287 287 break; 288 288 case 4: 289 - new.v32 = regs->data[priv->sreg]; 290 - old.v32 = get_unaligned((u32 *)(opt + offset)); 289 + new.v32 = nft_reg_load_be32(&regs->data[priv->sreg]); 290 + old.v32 = (__force __be32)get_unaligned((u32 *)(opt + offset)); 291 291 292 292 if (old.v32 == new.v32) 293 293 return; 294 294 295 - put_unaligned(new.v32, (u32*)(opt + offset)); 295 + put_unaligned(new.v32, (__be32*)(opt + offset)); 296 296 inet_proto_csum_replace4(&tcph->check, pkt->skb, 297 297 old.v32, new.v32, false); 298 298 break;
+1 -1
net/netfilter/nft_osf.c
··· 99 99 if (nla_put_u8(skb, NFTA_OSF_TTL, priv->ttl)) 100 100 goto nla_put_failure; 101 101 102 - if (nla_put_be32(skb, NFTA_OSF_FLAGS, ntohl(priv->flags))) 102 + if (nla_put_u32(skb, NFTA_OSF_FLAGS, ntohl((__force __be32)priv->flags))) 103 103 goto nla_put_failure; 104 104 105 105 if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg))
+2 -2
net/netfilter/nft_set_bitmap.c
··· 21 21 * the element state in the current and the future generation. 22 22 * 23 23 * An element can be in three states. The generation cursor is represented using 24 - * the ^ character, note that this cursor shifts on every succesful transaction. 24 + * the ^ character, note that this cursor shifts on every successful transaction. 25 25 * If no transaction is going on, we observe all elements are in the following 26 26 * state: 27 27 * ··· 39 39 * 10 = this element is active in the current generation and it becomes inactive 40 40 * ^ in the next one. This happens when the element is deactivated but commit 41 41 * path has not yet been executed yet, so removal is still pending. On 42 - * transation abortion, the next generation bit is reset to go back to 42 + * transaction abortion, the next generation bit is reset to go back to 43 43 * restore its previous state. 44 44 */ 45 45 struct nft_bitmap {
+4 -4
net/netfilter/nft_socket.c
··· 163 163 return -EOPNOTSUPP; 164 164 } 165 165 166 - priv->key = ntohl(nla_get_u32(tb[NFTA_SOCKET_KEY])); 166 + priv->key = ntohl(nla_get_be32(tb[NFTA_SOCKET_KEY])); 167 167 switch(priv->key) { 168 168 case NFT_SOCKET_TRANSPARENT: 169 169 case NFT_SOCKET_WILDCARD: ··· 179 179 if (!tb[NFTA_SOCKET_LEVEL]) 180 180 return -EINVAL; 181 181 182 - level = ntohl(nla_get_u32(tb[NFTA_SOCKET_LEVEL])); 182 + level = ntohl(nla_get_be32(tb[NFTA_SOCKET_LEVEL])); 183 183 if (level > 255) 184 184 return -EOPNOTSUPP; 185 185 ··· 202 202 { 203 203 const struct nft_socket *priv = nft_expr_priv(expr); 204 204 205 - if (nla_put_u32(skb, NFTA_SOCKET_KEY, htonl(priv->key))) 205 + if (nla_put_be32(skb, NFTA_SOCKET_KEY, htonl(priv->key))) 206 206 return -1; 207 207 if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg)) 208 208 return -1; 209 209 if (priv->key == NFT_SOCKET_CGROUPV2 && 210 - nla_put_u32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level))) 210 + nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level))) 211 211 return -1; 212 212 return 0; 213 213 }
+3 -3
net/netfilter/nft_tproxy.c
··· 52 52 skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED); 53 53 54 54 if (priv->sreg_addr) 55 - taddr = regs->data[priv->sreg_addr]; 55 + taddr = nft_reg_load_be32(&regs->data[priv->sreg_addr]); 56 56 taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr); 57 57 58 58 if (priv->sreg_port) 59 - tport = nft_reg_load16(&regs->data[priv->sreg_port]); 59 + tport = nft_reg_load_be16(&regs->data[priv->sreg_port]); 60 60 if (!tport) 61 61 tport = hp->dest; 62 62 ··· 124 124 taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr); 125 125 126 126 if (priv->sreg_port) 127 - tport = nft_reg_load16(&regs->data[priv->sreg_port]); 127 + tport = nft_reg_load_be16(&regs->data[priv->sreg_port]); 128 128 if (!tport) 129 129 tport = hp->dest; 130 130
+2 -1
net/netfilter/nft_tunnel.c
··· 383 383 struct ip_tunnel_info *info, 384 384 struct nft_tunnel_opts *opts) 385 385 { 386 - int err, rem, type = 0; 387 386 struct nlattr *nla; 387 + __be16 type = 0; 388 + int err, rem; 388 389 389 390 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX, 390 391 nft_tunnel_opts_policy, NULL);
+4 -4
net/netfilter/nft_xfrm.c
··· 51 51 return -EOPNOTSUPP; 52 52 } 53 53 54 - priv->key = ntohl(nla_get_u32(tb[NFTA_XFRM_KEY])); 54 + priv->key = ntohl(nla_get_be32(tb[NFTA_XFRM_KEY])); 55 55 switch (priv->key) { 56 56 case NFT_XFRM_KEY_REQID: 57 57 case NFT_XFRM_KEY_SPI: ··· 134 134 WARN_ON_ONCE(1); 135 135 break; 136 136 case NFT_XFRM_KEY_DADDR_IP4: 137 - *dest = state->id.daddr.a4; 137 + *dest = (__force __u32)state->id.daddr.a4; 138 138 return; 139 139 case NFT_XFRM_KEY_DADDR_IP6: 140 140 memcpy(dest, &state->id.daddr.in6, sizeof(struct in6_addr)); 141 141 return; 142 142 case NFT_XFRM_KEY_SADDR_IP4: 143 - *dest = state->props.saddr.a4; 143 + *dest = (__force __u32)state->props.saddr.a4; 144 144 return; 145 145 case NFT_XFRM_KEY_SADDR_IP6: 146 146 memcpy(dest, &state->props.saddr.in6, sizeof(struct in6_addr)); ··· 149 149 *dest = state->props.reqid; 150 150 return; 151 151 case NFT_XFRM_KEY_SPI: 152 - *dest = state->id.spi; 152 + *dest = (__force __u32)state->id.spi; 153 153 return; 154 154 } 155 155
+18 -5
net/netfilter/xt_CT.c
··· 96 96 return -ENOMEM; 97 97 } 98 98 99 - help->helper = helper; 99 + rcu_assign_pointer(help->helper, helper); 100 100 return 0; 101 101 } 102 102 ··· 134 134 default: 135 135 return NF_CT_DEFAULT_ZONE_DIR; 136 136 } 137 + } 138 + 139 + static void xt_ct_put_helper(struct nf_conn_help *help) 140 + { 141 + struct nf_conntrack_helper *helper; 142 + 143 + if (!help) 144 + return; 145 + 146 + /* not yet exposed to other cpus, or ruleset 147 + * already detached (post-replacement). 148 + */ 149 + helper = rcu_dereference_raw(help->helper); 150 + if (helper) 151 + nf_conntrack_helper_put(helper); 137 152 } 138 153 139 154 static int xt_ct_tg_check(const struct xt_tgchk_param *par, ··· 222 207 223 208 err4: 224 209 help = nfct_help(ct); 225 - if (help) 226 - nf_conntrack_helper_put(help->helper); 210 + xt_ct_put_helper(help); 227 211 err3: 228 212 nf_ct_tmpl_free(ct); 229 213 err2: ··· 284 270 285 271 if (ct) { 286 272 help = nfct_help(ct); 287 - if (help) 288 - nf_conntrack_helper_put(help->helper); 273 + xt_ct_put_helper(help); 289 274 290 275 nf_ct_netns_put(par->net, par->family); 291 276
+4 -4
net/netfilter/xt_DSCP.c
··· 24 24 MODULE_ALIAS("ipt_TOS"); 25 25 MODULE_ALIAS("ip6t_TOS"); 26 26 27 + #define XT_DSCP_ECN_MASK 3u 28 + 27 29 static unsigned int 28 30 dscp_tg(struct sk_buff *skb, const struct xt_action_param *par) 29 31 { ··· 36 34 if (skb_ensure_writable(skb, sizeof(struct iphdr))) 37 35 return NF_DROP; 38 36 39 - ipv4_change_dsfield(ip_hdr(skb), 40 - (__force __u8)(~XT_DSCP_MASK), 37 + ipv4_change_dsfield(ip_hdr(skb), XT_DSCP_ECN_MASK, 41 38 dinfo->dscp << XT_DSCP_SHIFT); 42 39 43 40 } ··· 53 52 if (skb_ensure_writable(skb, sizeof(struct ipv6hdr))) 54 53 return NF_DROP; 55 54 56 - ipv6_change_dsfield(ipv6_hdr(skb), 57 - (__force __u8)(~XT_DSCP_MASK), 55 + ipv6_change_dsfield(ipv6_hdr(skb), XT_DSCP_ECN_MASK, 58 56 dinfo->dscp << XT_DSCP_SHIFT); 59 57 } 60 58 return XT_CONTINUE;
+2 -2
net/netfilter/xt_TCPMSS.c
··· 239 239 oldlen = ipv6h->payload_len; 240 240 newlen = htons(ntohs(oldlen) + ret); 241 241 if (skb->ip_summed == CHECKSUM_COMPLETE) 242 - skb->csum = csum_add(csum_sub(skb->csum, oldlen), 243 - newlen); 242 + skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)oldlen), 243 + (__force __wsum)newlen); 244 244 ipv6h->payload_len = newlen; 245 245 } 246 246 return XT_CONTINUE;
+2 -23
net/netfilter/xt_TPROXY.c
··· 74 74 /* This should be in a separate target, but we don't do multiple 75 75 targets on the same rule yet */ 76 76 skb->mark = (skb->mark & ~mark_mask) ^ mark_value; 77 - 78 - pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", 79 - iph->protocol, &iph->daddr, ntohs(hp->dest), 80 - &laddr, ntohs(lport), skb->mark); 81 - 82 77 nf_tproxy_assign_sock(skb, sk); 83 78 return NF_ACCEPT; 84 79 } 85 80 86 - pr_debug("no socket, dropping: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", 87 - iph->protocol, &iph->saddr, ntohs(hp->source), 88 - &iph->daddr, ntohs(hp->dest), skb->mark); 89 81 return NF_DROP; 90 82 } 91 83 ··· 114 122 int tproto; 115 123 116 124 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); 117 - if (tproto < 0) { 118 - pr_debug("unable to find transport header in IPv6 packet, dropping\n"); 125 + if (tproto < 0) 119 126 return NF_DROP; 120 - } 121 127 122 128 hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); 123 - if (hp == NULL) { 124 - pr_debug("unable to grab transport header contents in IPv6 packet, dropping\n"); 129 + if (!hp) 125 130 return NF_DROP; 126 - } 127 131 128 132 /* check if there's an ongoing connection on the packet 129 133 * addresses, this happens if the redirect already happened ··· 156 168 /* This should be in a separate target, but we don't do multiple 157 169 targets on the same rule yet */ 158 170 skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; 159 - 160 - pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", 161 - tproto, &iph->saddr, ntohs(hp->source), 162 - laddr, ntohs(lport), skb->mark); 163 - 164 171 nf_tproxy_assign_sock(skb, sk); 165 172 return NF_ACCEPT; 166 173 } 167 - 168 - pr_debug("no socket, dropping: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", 169 - tproto, &iph->saddr, ntohs(hp->source), 170 - &iph->daddr, ntohs(hp->dest), skb->mark); 171 174 172 175 return NF_DROP; 173 176 }
+3 -3
net/netfilter/xt_connlimit.c
··· 62 62 key[4] = zone->id; 63 63 } else { 64 64 const struct iphdr *iph = ip_hdr(skb); 65 - key[0] = (info->flags & XT_CONNLIMIT_DADDR) ? 66 - iph->daddr : iph->saddr; 67 65 68 - key[0] &= info->mask.ip; 66 + key[0] = (info->flags & XT_CONNLIMIT_DADDR) ? 67 + (__force __u32)iph->daddr : (__force __u32)iph->saddr; 68 + key[0] &= (__force __u32)info->mask.ip; 69 69 key[1] = zone->id; 70 70 } 71 71
+3 -2
net/sched/act_ct.c
··· 277 277 .owner = THIS_MODULE, 278 278 }; 279 279 280 - static int tcf_ct_flow_table_get(struct tcf_ct_params *params) 280 + static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params) 281 281 { 282 282 struct tcf_ct_flow_table *ct_ft; 283 283 int err = -ENOMEM; ··· 303 303 err = nf_flow_table_init(&ct_ft->nf_ft); 304 304 if (err) 305 305 goto err_init; 306 + write_pnet(&ct_ft->nf_ft.net, net); 306 307 307 308 __module_get(THIS_MODULE); 308 309 out_unlock: ··· 1392 1391 if (err) 1393 1392 goto cleanup; 1394 1393 1395 - err = tcf_ct_flow_table_get(params); 1394 + err = tcf_ct_flow_table_get(net, params); 1396 1395 if (err) 1397 1396 goto cleanup; 1398 1397