Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains Netfilter updates for your net-next
tree, they are:

1) Stash ctinfo 3-bit field into pointer to nf_conntrack object from
sk_buff so we only access one single cacheline in the conntrack
hotpath. Patchset from Florian Westphal.

2) Don't leak pointer to internal structures when exporting x_tables
ruleset back to userspace, from Willem DeBruijn. This includes new
helper functions to copy data to userspace such as xt_data_to_user()
as well as conversions of our ip_tables, ip6_tables and arp_tables
clients to use it. Not surprinsingly, ebtables requires an ad-hoc
update. There is also a new field in x_tables extensions to indicate
the amount of bytes that we copy to userspace.

3) Add nf_log_all_netns sysctl: This new knob allows you to enable
logging via nf_log infrastructure for all existing netnamespaces.
Given the effort to provide pernet syslog has been discontinued,
let's provide a way to restore logging using netfilter kernel logging
facilities in trusted environments. Patch from Michal Kubecek.

4) Validate SCTP checksum from conntrack helper, from Davide Caratti.

5) Merge UDPlite conntrack and NAT helpers into UDP, this was mostly
a copy&paste from the original helper, from Florian Westphal.

6) Reset netfilter state when duplicating packets, also from Florian.

7) Remove unnecessary check for broadcast in IPv6 in pkttype match and
nft_meta, from Liping Zhang.

8) Add missing code to deal with loopback packets from nft_meta when
used by the netdev family, also from Liping.

9) Several cleanups on nf_tables, one to remove unnecessary check from
the netlink control plane path to add table, set and stateful objects
and code consolidation when unregister chain hooks, from Gao Feng.

10) Fix harmless reference counter underflow in IPVS that, however,
results in problems with the introduction of the new refcount_t
type, from David Windsor.

11) Enable LIBCRC32C from nf_ct_sctp instead of nf_nat_sctp,
from Davide Caratti.

12) Missing documentation on nf_tables uapi header, from Liping Zhang.

13) Use rb_entry() helper in xt_connlimit, from Geliang Tang.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+647 -681
+10
Documentation/networking/netfilter-sysctl.txt
··· 1 + /proc/sys/net/netfilter/* Variables: 2 + 3 + nf_log_all_netns - BOOLEAN 4 + 0 - disabled (default) 5 + not 0 - enabled 6 + 7 + By default, only init_net namespace can log packets into kernel log 8 + with LOG target; this aims to prevent containers from flooding host 9 + kernel log. If enabled, this target also works in other network 10 + namespaces. This variable is only accessible from init_net.
+9
include/linux/netfilter/x_tables.h
··· 167 167 168 168 const char *table; 169 169 unsigned int matchsize; 170 + unsigned int usersize; 170 171 #ifdef CONFIG_COMPAT 171 172 unsigned int compatsize; 172 173 #endif ··· 208 207 209 208 const char *table; 210 209 unsigned int targetsize; 210 + unsigned int usersize; 211 211 #ifdef CONFIG_COMPAT 212 212 unsigned int compatsize; 213 213 #endif ··· 288 286 bool inv_proto); 289 287 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, 290 288 bool inv_proto); 289 + 290 + int xt_match_to_user(const struct xt_entry_match *m, 291 + struct xt_entry_match __user *u); 292 + int xt_target_to_user(const struct xt_entry_target *t, 293 + struct xt_entry_target __user *u); 294 + int xt_data_to_user(void __user *dst, const void *src, 295 + int usersize, int size); 291 296 292 297 void *xt_copy_counters_from_user(const void __user *user, unsigned int len, 293 298 struct xt_counters_info *info, bool compat);
+18 -14
include/linux/skbuff.h
··· 585 585 * @cloned: Head may be cloned (check refcnt to be sure) 586 586 * @ip_summed: Driver fed us an IP checksum 587 587 * @nohdr: Payload reference only, must not modify header 588 - * @nfctinfo: Relationship of this skb to the connection 589 588 * @pkt_type: Packet class 590 589 * @fclone: skbuff clone status 591 590 * @ipvs_property: skbuff is owned by ipvs ··· 597 598 * @nf_trace: netfilter packet trace flag 598 599 * @protocol: Packet protocol from driver 599 600 * @destructor: Destruct function 600 - * @nfct: Associated connection, if any 601 + * @_nfct: Associated connection, if any (with nfctinfo bits) 601 602 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 602 603 * @skb_iif: ifindex of device we arrived on 603 604 * @tc_index: Traffic control index ··· 670 671 struct sec_path *sp; 671 672 #endif 672 673 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 673 - struct nf_conntrack *nfct; 674 + unsigned long _nfct; 674 675 #endif 675 676 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 676 677 struct nf_bridge_info *nf_bridge; ··· 723 724 __u8 pkt_type:3; 724 725 __u8 pfmemalloc:1; 725 726 __u8 ignore_df:1; 726 - __u8 nfctinfo:3; 727 727 728 728 __u8 nf_trace:1; 729 729 __u8 ip_summed:2; ··· 839 841 #define SKB_DST_NOREF 1UL 840 842 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 841 843 844 + #define SKB_NFCT_PTRMASK ~(7UL) 842 845 /** 843 846 * skb_dst - returns skb dst_entry 844 847 * @skb: buffer ··· 3557 3558 skb->csum = csum_add(skb->csum, delta); 3558 3559 } 3559 3560 3561 + static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) 3562 + { 3563 + #if IS_ENABLED(CONFIG_NF_CONNTRACK) 3564 + return (void *)(skb->_nfct & SKB_NFCT_PTRMASK); 3565 + #else 3566 + return NULL; 3567 + #endif 3568 + } 3569 + 3560 3570 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3561 3571 void nf_conntrack_destroy(struct nf_conntrack *nfct); 3562 3572 static inline void nf_conntrack_put(struct nf_conntrack *nfct) ··· 3594 3586 static inline void nf_reset(struct sk_buff *skb) 3595 3587 { 3596 3588 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3597 - nf_conntrack_put(skb->nfct); 3598 - skb->nfct = NULL; 3589 + nf_conntrack_put(skb_nfct(skb)); 3590 + skb->_nfct = 0; 3599 3591 #endif 3600 3592 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3601 3593 nf_bridge_put(skb->nf_bridge); ··· 3615 3607 bool copy) 3616 3608 { 3617 3609 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3618 - dst->nfct = src->nfct; 3619 - nf_conntrack_get(src->nfct); 3620 - if (copy) 3621 - dst->nfctinfo = src->nfctinfo; 3610 + dst->_nfct = src->_nfct; 3611 + nf_conntrack_get(skb_nfct(src)); 3622 3612 #endif 3623 3613 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3624 3614 dst->nf_bridge = src->nf_bridge; ··· 3631 3625 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 3632 3626 { 3633 3627 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3634 - nf_conntrack_put(dst->nfct); 3628 + nf_conntrack_put(skb_nfct(dst)); 3635 3629 #endif 3636 3630 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3637 3631 nf_bridge_put(dst->nf_bridge); ··· 3663 3657 #if IS_ENABLED(CONFIG_XFRM) 3664 3658 !skb->sp && 3665 3659 #endif 3666 - #if IS_ENABLED(CONFIG_NF_CONNTRACK) 3667 - !skb->nfct && 3668 - #endif 3660 + !skb_nfct(skb) && 3669 3661 !skb->_skb_refdst && 3670 3662 !skb_has_frag_list(skb); 3671 3663 }
+7 -5
include/net/ip_vs.h
··· 1421 1421 1422 1422 static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) 1423 1423 { 1424 - if (atomic_dec_return(&dest->refcnt) < 0) 1424 + if (atomic_dec_and_test(&dest->refcnt)) 1425 1425 kfree(dest); 1426 1426 } 1427 1427 ··· 1554 1554 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1555 1555 1556 1556 if (!ct || !nf_ct_is_untracked(ct)) { 1557 - nf_conntrack_put(skb->nfct); 1558 - skb->nfct = &nf_ct_untracked_get()->ct_general; 1559 - skb->nfctinfo = IP_CT_NEW; 1560 - nf_conntrack_get(skb->nfct); 1557 + struct nf_conn *untracked; 1558 + 1559 + nf_conntrack_put(&ct->ct_general); 1560 + untracked = nf_ct_untracked_get(); 1561 + nf_conntrack_get(&untracked->ct_general); 1562 + nf_ct_set(skb, untracked, IP_CT_NEW); 1561 1563 } 1562 1564 #endif 1563 1565 }
+1
include/net/netfilter/ipv4/nf_conntrack_ipv4.h
··· 14 14 15 15 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; 16 16 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; 17 + extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; 17 18 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; 18 19 #ifdef CONFIG_NF_CT_PROTO_DCCP 19 20 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
+1
include/net/netfilter/ipv6/nf_conntrack_ipv6.h
··· 5 5 6 6 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; 7 7 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; 8 + extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; 8 9 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; 9 10 #ifdef CONFIG_NF_CT_PROTO_DCCP 10 11 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
+14 -3
include/net/netfilter/nf_conntrack.h
··· 34 34 struct ip_ct_sctp sctp; 35 35 struct ip_ct_tcp tcp; 36 36 struct nf_ct_gre gre; 37 + unsigned int tmpl_padto; 37 38 }; 38 39 39 40 union nf_conntrack_expect_proto { ··· 76 75 /* Usage count in here is 1 for hash table, 1 per skb, 77 76 * plus 1 for any connection(s) we are `master' for 78 77 * 79 - * Hint, SKB address this struct and refcnt via skb->nfct and 78 + * Hint, SKB address this struct and refcnt via skb->_nfct and 80 79 * helpers nf_conntrack_get() and nf_conntrack_put(). 81 80 * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt, 82 81 * beware nf_ct_get() is different and don't inc refcnt. ··· 163 162 int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, 164 163 const struct nf_conn *ignored_conntrack); 165 164 165 + #define NFCT_INFOMASK 7UL 166 + #define NFCT_PTRMASK ~(NFCT_INFOMASK) 167 + 166 168 /* Return conntrack_info and tuple hash for given skb. */ 167 169 static inline struct nf_conn * 168 170 nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo) 169 171 { 170 - *ctinfo = skb->nfctinfo; 171 - return (struct nf_conn *)skb->nfct; 172 + *ctinfo = skb->_nfct & NFCT_INFOMASK; 173 + 174 + return (struct nf_conn *)(skb->_nfct & NFCT_PTRMASK); 172 175 } 173 176 174 177 /* decrement reference count on a conntrack */ ··· 345 340 const struct nf_conntrack_zone *zone, 346 341 gfp_t flags); 347 342 void nf_ct_tmpl_free(struct nf_conn *tmpl); 343 + 344 + static inline void 345 + nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) 346 + { 347 + skb->_nfct = (unsigned long)ct | info; 348 + } 348 349 349 350 #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 350 351 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
+1 -1
include/net/netfilter/nf_conntrack_core.h
··· 62 62 /* Confirm a connection: returns NF_DROP if packet must be dropped. */ 63 63 static inline int nf_conntrack_confirm(struct sk_buff *skb) 64 64 { 65 - struct nf_conn *ct = (struct nf_conn *)skb->nfct; 65 + struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb); 66 66 int ret = NF_ACCEPT; 67 67 68 68 if (ct && !nf_ct_is_untracked(ct)) {
+1 -1
include/net/netfilter/nf_conntrack_l4proto.h
··· 55 55 void (*destroy)(struct nf_conn *ct); 56 56 57 57 int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, 58 - unsigned int dataoff, enum ip_conntrack_info *ctinfo, 58 + unsigned int dataoff, 59 59 u_int8_t pf, unsigned int hooknum); 60 60 61 61 /* Print out the per-protocol part of the tuple. Return like seq_* */
+3
include/net/netfilter/nf_log.h
··· 51 51 struct module *me; 52 52 }; 53 53 54 + /* sysctl_nf_log_all_netns - allow LOG target in all network namespaces */ 55 + extern int sysctl_nf_log_all_netns; 56 + 54 57 /* Function to register/unregister log function. */ 55 58 int nf_log_register(u_int8_t pf, struct nf_logger *logger); 56 59 void nf_log_unregister(struct nf_logger *logger);
-16
include/net/netns/conntrack.h
··· 69 69 }; 70 70 #endif 71 71 72 - #ifdef CONFIG_NF_CT_PROTO_UDPLITE 73 - enum udplite_conntrack { 74 - UDPLITE_CT_UNREPLIED, 75 - UDPLITE_CT_REPLIED, 76 - UDPLITE_CT_MAX 77 - }; 78 - 79 - struct nf_udplite_net { 80 - struct nf_proto_net pn; 81 - unsigned int timeouts[UDPLITE_CT_MAX]; 82 - }; 83 - #endif 84 - 85 72 struct nf_ip_net { 86 73 struct nf_generic_net generic; 87 74 struct nf_tcp_net tcp; ··· 80 93 #endif 81 94 #ifdef CONFIG_NF_CT_PROTO_SCTP 82 95 struct nf_sctp_net sctp; 83 - #endif 84 - #ifdef CONFIG_NF_CT_PROTO_UDPLITE 85 - struct nf_udplite_net udplite; 86 96 #endif 87 97 }; 88 98
+5
include/uapi/linux/netfilter/nf_tables.h
··· 860 860 * @NFT_CT_PROTOCOL: conntrack layer 4 protocol 861 861 * @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source 862 862 * @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination 863 + * @NFT_CT_LABELS: conntrack labels 864 + * @NFT_CT_PKTS: conntrack packets 865 + * @NFT_CT_BYTES: conntrack bytes 866 + * @NFT_CT_AVGPKT: conntrack average bytes per packet 863 867 */ 864 868 enum nft_ct_keys { 865 869 NFT_CT_STATE, ··· 882 878 NFT_CT_LABELS, 883 879 NFT_CT_PKTS, 884 880 NFT_CT_BYTES, 881 + NFT_CT_AVGPKT, 885 882 }; 886 883 887 884 /**
+1
net/bridge/netfilter/ebt_limit.c
··· 105 105 .match = ebt_limit_mt, 106 106 .checkentry = ebt_limit_mt_check, 107 107 .matchsize = sizeof(struct ebt_limit_info), 108 + .usersize = offsetof(struct ebt_limit_info, prev), 108 109 #ifdef CONFIG_COMPAT 109 110 .compatsize = sizeof(struct ebt_compat_limit_info), 110 111 #endif
+1 -1
net/bridge/netfilter/ebt_log.c
··· 78 78 unsigned int bitmask; 79 79 80 80 /* FIXME: Disabled from containers until syslog ns is supported */ 81 - if (!net_eq(net, &init_net)) 81 + if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns) 82 82 return; 83 83 84 84 spin_lock_bh(&ebt_log_lock);
+48 -32
net/bridge/netfilter/ebtables.c
··· 1346 1346 hlp.num_counters, user, len); 1347 1347 } 1348 1348 1349 - static inline int ebt_make_matchname(const struct ebt_entry_match *m, 1350 - const char *base, char __user *ubase) 1349 + static inline int ebt_obj_to_user(char __user *um, const char *_name, 1350 + const char *data, int entrysize, 1351 + int usersize, int datasize) 1351 1352 { 1352 - char __user *hlp = ubase + ((char *)m - base); 1353 - char name[EBT_FUNCTION_MAXNAMELEN] = {}; 1353 + char name[EBT_FUNCTION_MAXNAMELEN] = {0}; 1354 1354 1355 1355 /* ebtables expects 32 bytes long names but xt_match names are 29 bytes 1356 1356 * long. Copy 29 bytes and fill remaining bytes with zeroes. 1357 1357 */ 1358 - strlcpy(name, m->u.match->name, sizeof(name)); 1359 - if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) 1358 + strlcpy(name, _name, sizeof(name)); 1359 + if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || 1360 + put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || 1361 + xt_data_to_user(um + entrysize, data, usersize, datasize)) 1360 1362 return -EFAULT; 1363 + 1361 1364 return 0; 1362 1365 } 1363 1366 1364 - static inline int ebt_make_watchername(const struct ebt_entry_watcher *w, 1365 - const char *base, char __user *ubase) 1367 + static inline int ebt_match_to_user(const struct ebt_entry_match *m, 1368 + const char *base, char __user *ubase) 1366 1369 { 1367 - char __user *hlp = ubase + ((char *)w - base); 1368 - char name[EBT_FUNCTION_MAXNAMELEN] = {}; 1369 - 1370 - strlcpy(name, w->u.watcher->name, sizeof(name)); 1371 - if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) 1372 - return -EFAULT; 1373 - return 0; 1370 + return ebt_obj_to_user(ubase + ((char *)m - base), 1371 + m->u.match->name, m->data, sizeof(*m), 1372 + m->u.match->usersize, m->match_size); 1374 1373 } 1375 1374 1376 - static inline int ebt_make_names(struct ebt_entry *e, const char *base, 1377 - char __user *ubase) 1375 + static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w, 1376 + const char *base, char __user *ubase) 1377 + { 1378 + return ebt_obj_to_user(ubase + ((char *)w - base), 1379 + w->u.watcher->name, w->data, sizeof(*w), 1380 + w->u.watcher->usersize, w->watcher_size); 1381 + } 1382 + 1383 + static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base, 1384 + char __user *ubase) 1378 1385 { 1379 1386 int ret; 1380 1387 char __user *hlp; 1381 1388 const struct ebt_entry_target *t; 1382 - char name[EBT_FUNCTION_MAXNAMELEN] = {}; 1383 1389 1384 - if (e->bitmask == 0) 1390 + if (e->bitmask == 0) { 1391 + /* special case !EBT_ENTRY_OR_ENTRIES */ 1392 + if (copy_to_user(ubase + ((char *)e - base), e, 1393 + sizeof(struct ebt_entries))) 1394 + return -EFAULT; 1385 1395 return 0; 1396 + } 1397 + 1398 + if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e))) 1399 + return -EFAULT; 1386 1400 1387 1401 hlp = ubase + (((char *)e + e->target_offset) - base); 1388 1402 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 1389 1403 1390 - ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase); 1404 + ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase); 1391 1405 if (ret != 0) 1392 1406 return ret; 1393 - ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase); 1407 + ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase); 1394 1408 if (ret != 0) 1395 1409 return ret; 1396 - strlcpy(name, t->u.target->name, sizeof(name)); 1397 - if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) 1398 - return -EFAULT; 1410 + ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t), 1411 + t->u.target->usersize, t->target_size); 1412 + if (ret != 0) 1413 + return ret; 1414 + 1399 1415 return 0; 1400 1416 } 1401 1417 ··· 1491 1475 if (ret) 1492 1476 return ret; 1493 1477 1494 - if (copy_to_user(tmp.entries, entries, entries_size)) { 1495 - BUGPRINT("Couldn't copy entries to userspace\n"); 1496 - return -EFAULT; 1497 - } 1498 1478 /* set the match/watcher/target names right */ 1499 1479 return EBT_ENTRY_ITERATE(entries, entries_size, 1500 - ebt_make_names, entries, tmp.entries); 1480 + ebt_entry_to_user, entries, tmp.entries); 1501 1481 } 1502 1482 1503 1483 static int do_ebt_set_ctl(struct sock *sk, ··· 1642 1630 if (match->compat_to_user) { 1643 1631 if (match->compat_to_user(cm->data, m->data)) 1644 1632 return -EFAULT; 1645 - } else if (copy_to_user(cm->data, m->data, msize)) 1633 + } else { 1634 + if (xt_data_to_user(cm->data, m->data, match->usersize, msize)) 1646 1635 return -EFAULT; 1636 + } 1647 1637 1648 1638 *size -= ebt_compat_entry_padsize() + off; 1649 1639 *dstptr = cm->data; ··· 1671 1657 if (target->compat_to_user) { 1672 1658 if (target->compat_to_user(cm->data, t->data)) 1673 1659 return -EFAULT; 1674 - } else if (copy_to_user(cm->data, t->data, tsize)) 1675 - return -EFAULT; 1660 + } else { 1661 + if (xt_data_to_user(cm->data, t->data, target->usersize, tsize)) 1662 + return -EFAULT; 1663 + } 1676 1664 1677 1665 *size -= ebt_compat_entry_padsize() + off; 1678 1666 *dstptr = cm->data;
+1 -1
net/core/skbuff.c
··· 654 654 skb->destructor(skb); 655 655 } 656 656 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 657 - nf_conntrack_put(skb->nfct); 657 + nf_conntrack_put(skb_nfct(skb)); 658 658 #endif 659 659 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 660 660 nf_bridge_put(skb->nf_bridge);
+5 -10
net/ipv4/netfilter/arp_tables.c
··· 677 677 return PTR_ERR(counters); 678 678 679 679 loc_cpu_entry = private->entries; 680 - /* ... then copy entire thing ... */ 681 - if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { 682 - ret = -EFAULT; 683 - goto free_counters; 684 - } 685 680 686 681 /* FIXME: use iterator macros --RR */ 687 682 /* ... then go back and fix counters and names */ ··· 684 689 const struct xt_entry_target *t; 685 690 686 691 e = (struct arpt_entry *)(loc_cpu_entry + off); 692 + if (copy_to_user(userptr + off, e, sizeof(*e))) { 693 + ret = -EFAULT; 694 + goto free_counters; 695 + } 687 696 if (copy_to_user(userptr + off 688 697 + offsetof(struct arpt_entry, counters), 689 698 &counters[num], ··· 697 698 } 698 699 699 700 t = arpt_get_target_c(e); 700 - if (copy_to_user(userptr + off + e->target_offset 701 - + offsetof(struct xt_entry_target, 702 - u.user.name), 703 - t->u.kernel.target->name, 704 - strlen(t->u.kernel.target->name)+1) != 0) { 701 + if (xt_target_to_user(t, userptr + off + e->target_offset)) { 705 702 ret = -EFAULT; 706 703 goto free_counters; 707 704 }
+6 -15
net/ipv4/netfilter/ip_tables.c
··· 826 826 return PTR_ERR(counters); 827 827 828 828 loc_cpu_entry = private->entries; 829 - if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { 830 - ret = -EFAULT; 831 - goto free_counters; 832 - } 833 829 834 830 /* FIXME: use iterator macros --RR */ 835 831 /* ... then go back and fix counters and names */ ··· 835 839 const struct xt_entry_target *t; 836 840 837 841 e = (struct ipt_entry *)(loc_cpu_entry + off); 842 + if (copy_to_user(userptr + off, e, sizeof(*e))) { 843 + ret = -EFAULT; 844 + goto free_counters; 845 + } 838 846 if (copy_to_user(userptr + off 839 847 + offsetof(struct ipt_entry, counters), 840 848 &counters[num], ··· 852 852 i += m->u.match_size) { 853 853 m = (void *)e + i; 854 854 855 - if (copy_to_user(userptr + off + i 856 - + offsetof(struct xt_entry_match, 857 - u.user.name), 858 - m->u.kernel.match->name, 859 - strlen(m->u.kernel.match->name)+1) 860 - != 0) { 855 + if (xt_match_to_user(m, userptr + off + i)) { 861 856 ret = -EFAULT; 862 857 goto free_counters; 863 858 } 864 859 } 865 860 866 861 t = ipt_get_target_c(e); 867 - if (copy_to_user(userptr + off + e->target_offset 868 - + offsetof(struct xt_entry_target, 869 - u.user.name), 870 - t->u.kernel.target->name, 871 - strlen(t->u.kernel.target->name)+1) != 0) { 862 + if (xt_target_to_user(t, userptr + off + e->target_offset)) { 872 863 ret = -EFAULT; 873 864 goto free_counters; 874 865 }
+1
net/ipv4/netfilter/ipt_CLUSTERIP.c
··· 485 485 .checkentry = clusterip_tg_check, 486 486 .destroy = clusterip_tg_destroy, 487 487 .targetsize = sizeof(struct ipt_clusterip_tgt_info), 488 + .usersize = offsetof(struct ipt_clusterip_tgt_info, config), 488 489 #ifdef CONFIG_COMPAT 489 490 .compatsize = sizeof(struct compat_ipt_clusterip_tgt_info), 490 491 #endif /* CONFIG_COMPAT */
+5 -6
net/ipv4/netfilter/ipt_SYNPROXY.c
··· 57 57 goto free_nskb; 58 58 59 59 if (nfct) { 60 - nskb->nfct = nfct; 61 - nskb->nfctinfo = ctinfo; 60 + nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo); 62 61 nf_conntrack_get(nfct); 63 62 } 64 63 ··· 106 107 107 108 synproxy_build_options(nth, opts); 108 109 109 - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 110 - niph, nth, tcp_hdr_size); 110 + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), 111 + IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); 111 112 } 112 113 113 114 static void ··· 229 230 230 231 synproxy_build_options(nth, opts); 231 232 232 - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 233 - niph, nth, tcp_hdr_size); 233 + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), 234 + IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); 234 235 } 235 236 236 237 static bool
+7 -8
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
··· 128 128 /* Returns conntrack if it dealt with ICMP, and filled in skb fields */ 129 129 static int 130 130 icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, 131 - enum ip_conntrack_info *ctinfo, 132 131 unsigned int hooknum) 133 132 { 134 133 struct nf_conntrack_tuple innertuple, origtuple; 135 134 const struct nf_conntrack_l4proto *innerproto; 136 135 const struct nf_conntrack_tuple_hash *h; 137 136 const struct nf_conntrack_zone *zone; 137 + enum ip_conntrack_info ctinfo; 138 138 struct nf_conntrack_zone tmp; 139 139 140 - NF_CT_ASSERT(skb->nfct == NULL); 140 + NF_CT_ASSERT(!skb_nfct(skb)); 141 141 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); 142 142 143 143 /* Are they talking about one of our connections? */ ··· 160 160 return -NF_ACCEPT; 161 161 } 162 162 163 - *ctinfo = IP_CT_RELATED; 163 + ctinfo = IP_CT_RELATED; 164 164 165 165 h = nf_conntrack_find_get(net, zone, &innertuple); 166 166 if (!h) { ··· 169 169 } 170 170 171 171 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) 172 - *ctinfo += IP_CT_IS_REPLY; 172 + ctinfo += IP_CT_IS_REPLY; 173 173 174 174 /* Update skb to refer to this connection */ 175 - skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 176 - skb->nfctinfo = *ctinfo; 175 + nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo); 177 176 return NF_ACCEPT; 178 177 } 179 178 ··· 180 181 static int 181 182 icmp_error(struct net *net, struct nf_conn *tmpl, 182 183 struct sk_buff *skb, unsigned int dataoff, 183 - enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) 184 + u8 pf, unsigned int hooknum) 184 185 { 185 186 const struct icmphdr *icmph; 186 187 struct icmphdr _ih; ··· 224 225 icmph->type != ICMP_REDIRECT) 225 226 return NF_ACCEPT; 226 227 227 - return icmp_error_message(net, tmpl, skb, ctinfo, hooknum); 228 + return icmp_error_message(net, tmpl, skb, hooknum); 228 229 } 229 230 230 231 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+2 -2
net/ipv4/netfilter/nf_defrag_ipv4.c
··· 45 45 { 46 46 u16 zone_id = NF_CT_DEFAULT_ZONE_ID; 47 47 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 48 - if (skb->nfct) { 48 + if (skb_nfct(skb)) { 49 49 enum ip_conntrack_info ctinfo; 50 50 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 51 51 ··· 75 75 #if !IS_ENABLED(CONFIG_NF_NAT) 76 76 /* Previously seen (loopback)? Ignore. Do this before 77 77 fragment check. */ 78 - if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) 78 + if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb))) 79 79 return NF_ACCEPT; 80 80 #endif 81 81 #endif
+3 -4
net/ipv4/netfilter/nf_dup_ipv4.c
··· 68 68 69 69 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 70 70 /* Avoid counting cloned packets towards the original connection. */ 71 - nf_conntrack_put(skb->nfct); 72 - skb->nfct = &nf_ct_untracked_get()->ct_general; 73 - skb->nfctinfo = IP_CT_NEW; 74 - nf_conntrack_get(skb->nfct); 71 + nf_reset(skb); 72 + nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); 73 + nf_conntrack_get(skb_nfct(skb)); 75 74 #endif 76 75 /* 77 76 * If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential
+1 -1
net/ipv4/netfilter/nf_log_arp.c
··· 87 87 struct nf_log_buf *m; 88 88 89 89 /* FIXME: Disabled from containers until syslog ns is supported */ 90 - if (!net_eq(net, &init_net)) 90 + if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns) 91 91 return; 92 92 93 93 m = nf_log_buf_open();
+1 -1
net/ipv4/netfilter/nf_log_ipv4.c
··· 319 319 struct nf_log_buf *m; 320 320 321 321 /* FIXME: Disabled from containers until syslog ns is supported */ 322 - if (!net_eq(net, &init_net)) 322 + if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns) 323 323 return; 324 324 325 325 m = nf_log_buf_open();
+6 -15
net/ipv6/netfilter/ip6_tables.c
··· 855 855 return PTR_ERR(counters); 856 856 857 857 loc_cpu_entry = private->entries; 858 - if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { 859 - ret = -EFAULT; 860 - goto free_counters; 861 - } 862 858 863 859 /* FIXME: use iterator macros --RR */ 864 860 /* ... then go back and fix counters and names */ ··· 864 868 const struct xt_entry_target *t; 865 869 866 870 e = (struct ip6t_entry *)(loc_cpu_entry + off); 871 + if (copy_to_user(userptr + off, e, sizeof(*e))) { 872 + ret = -EFAULT; 873 + goto free_counters; 874 + } 867 875 if (copy_to_user(userptr + off 868 876 + offsetof(struct ip6t_entry, counters), 869 877 &counters[num], ··· 881 881 i += m->u.match_size) { 882 882 m = (void *)e + i; 883 883 884 - if (copy_to_user(userptr + off + i 885 - + offsetof(struct xt_entry_match, 886 - u.user.name), 887 - m->u.kernel.match->name, 888 - strlen(m->u.kernel.match->name)+1) 889 - != 0) { 884 + if (xt_match_to_user(m, userptr + off + i)) { 890 885 ret = -EFAULT; 891 886 goto free_counters; 892 887 } 893 888 } 894 889 895 890 t = ip6t_get_target_c(e); 896 - if (copy_to_user(userptr + off + e->target_offset 897 - + offsetof(struct xt_entry_target, 898 - u.user.name), 899 - t->u.kernel.target->name, 900 - strlen(t->u.kernel.target->name)+1) != 0) { 891 + if (xt_target_to_user(t, userptr + off + e->target_offset)) { 901 892 ret = -EFAULT; 902 893 goto free_counters; 903 894 }
+2
net/ipv6/netfilter/ip6t_NPT.c
··· 112 112 .table = "mangle", 113 113 .target = ip6t_snpt_tg, 114 114 .targetsize = sizeof(struct ip6t_npt_tginfo), 115 + .usersize = offsetof(struct ip6t_npt_tginfo, adjustment), 115 116 .checkentry = ip6t_npt_checkentry, 116 117 .family = NFPROTO_IPV6, 117 118 .hooks = (1 << NF_INET_LOCAL_IN) | ··· 124 123 .table = "mangle", 125 124 .target = ip6t_dnpt_tg, 126 125 .targetsize = sizeof(struct ip6t_npt_tginfo), 126 + .usersize = offsetof(struct ip6t_npt_tginfo, adjustment), 127 127 .checkentry = ip6t_npt_checkentry, 128 128 .family = NFPROTO_IPV6, 129 129 .hooks = (1 << NF_INET_PRE_ROUTING) |
+5 -6
net/ipv6/netfilter/ip6t_SYNPROXY.c
··· 71 71 skb_dst_set(nskb, dst); 72 72 73 73 if (nfct) { 74 - nskb->nfct = nfct; 75 - nskb->nfctinfo = ctinfo; 74 + nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo); 76 75 nf_conntrack_get(nfct); 77 76 } 78 77 ··· 120 121 121 122 synproxy_build_options(nth, opts); 122 123 123 - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 124 - niph, nth, tcp_hdr_size); 124 + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), 125 + IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); 125 126 } 126 127 127 128 static void ··· 243 244 244 245 synproxy_build_options(nth, opts); 245 246 246 - synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 247 - niph, nth, tcp_hdr_size); 247 + synproxy_send_tcp(net, skb, nskb, skb_nfct(skb), 248 + IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); 248 249 } 249 250 250 251 static bool
+9 -11
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
··· 145 145 icmpv6_error_message(struct net *net, struct nf_conn *tmpl, 146 146 struct sk_buff *skb, 147 147 unsigned int icmp6off, 148 - enum ip_conntrack_info *ctinfo, 149 148 unsigned int hooknum) 150 149 { 151 150 struct nf_conntrack_tuple intuple, origtuple; 152 151 const struct nf_conntrack_tuple_hash *h; 153 152 const struct nf_conntrack_l4proto *inproto; 153 + enum ip_conntrack_info ctinfo; 154 154 struct nf_conntrack_zone tmp; 155 155 156 - NF_CT_ASSERT(skb->nfct == NULL); 156 + NF_CT_ASSERT(!skb_nfct(skb)); 157 157 158 158 /* Are they talking about one of our connections? */ 159 159 if (!nf_ct_get_tuplepr(skb, ··· 176 176 return -NF_ACCEPT; 177 177 } 178 178 179 - *ctinfo = IP_CT_RELATED; 179 + ctinfo = IP_CT_RELATED; 180 180 181 181 h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp), 182 182 &intuple); ··· 185 185 return -NF_ACCEPT; 186 186 } else { 187 187 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) 188 - *ctinfo += IP_CT_IS_REPLY; 188 + ctinfo += IP_CT_IS_REPLY; 189 189 } 190 190 191 191 /* Update skb to refer to this connection */ 192 - skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 193 - skb->nfctinfo = *ctinfo; 192 + nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo); 194 193 return NF_ACCEPT; 195 194 } 196 195 197 196 static int 198 197 icmpv6_error(struct net *net, struct nf_conn *tmpl, 199 198 struct sk_buff *skb, unsigned int dataoff, 200 - enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) 199 + u8 pf, unsigned int hooknum) 201 200 { 202 201 const struct icmp6hdr *icmp6h; 203 202 struct icmp6hdr _ih; ··· 221 222 type = icmp6h->icmp6_type - 130; 222 223 if (type >= 0 && type < sizeof(noct_valid_new) && 223 224 noct_valid_new[type]) { 224 - skb->nfct = &nf_ct_untracked_get()->ct_general; 225 - skb->nfctinfo = IP_CT_NEW; 226 - nf_conntrack_get(skb->nfct); 225 + nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); 226 + nf_conntrack_get(skb_nfct(skb)); 227 227 return NF_ACCEPT; 228 228 } 229 229 ··· 230 232 if (icmp6h->icmp6_type >= 128) 231 233 return NF_ACCEPT; 232 234 233 - return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum); 235 + return icmpv6_error_message(net, tmpl, skb, dataoff, hooknum); 234 236 } 235 237 236 238 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+2 -2
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
··· 37 37 { 38 38 u16 zone_id = NF_CT_DEFAULT_ZONE_ID; 39 39 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 40 - if (skb->nfct) { 40 + if (skb_nfct(skb)) { 41 41 enum ip_conntrack_info ctinfo; 42 42 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 43 43 ··· 61 61 62 62 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 63 63 /* Previously seen (loopback)? */ 64 - if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) 64 + if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb))) 65 65 return NF_ACCEPT; 66 66 #endif 67 67
+3 -4
net/ipv6/netfilter/nf_dup_ipv6.c
··· 57 57 return; 58 58 59 59 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 60 - nf_conntrack_put(skb->nfct); 61 - skb->nfct = &nf_ct_untracked_get()->ct_general; 62 - skb->nfctinfo = IP_CT_NEW; 63 - nf_conntrack_get(skb->nfct); 60 + nf_reset(skb); 61 + nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); 62 + nf_conntrack_get(skb_nfct(skb)); 64 63 #endif 65 64 if (hooknum == NF_INET_PRE_ROUTING || 66 65 hooknum == NF_INET_LOCAL_IN) {
+1 -1
net/ipv6/netfilter/nf_log_ipv6.c
··· 351 351 struct nf_log_buf *m; 352 352 353 353 /* FIXME: Disabled from containers until syslog ns is supported */ 354 - if (!net_eq(net, &init_net)) 354 + if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns) 355 355 return; 356 356 357 357 m = nf_log_buf_open();
+1 -1
net/netfilter/Kconfig
··· 162 162 bool 'SCTP protocol connection tracking support' 163 163 depends on NETFILTER_ADVANCED 164 164 default y 165 + select LIBCRC32C 165 166 help 166 167 With this option enabled, the layer 3 independent connection 167 168 tracking code will be able to do state tracking on SCTP connections. ··· 398 397 bool 399 398 default NF_NAT && NF_CT_PROTO_SCTP 400 399 depends on NF_NAT && NF_CT_PROTO_SCTP 401 - select LIBCRC32C 402 400 403 401 config NF_NAT_AMANDA 404 402 tristate
-2
net/netfilter/Makefile
··· 7 7 nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o 8 8 nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o 9 9 nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o 10 - nf_conntrack-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o 11 10 12 11 obj-$(CONFIG_NETFILTER) = netfilter.o 13 12 ··· 46 47 # NAT protocols (nf_nat) 47 48 nf_nat-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o 48 49 nf_nat-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o 49 - nf_nat-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o 50 50 51 51 # generic transport layer logging 52 52 obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
+1 -1
net/netfilter/core.c
··· 375 375 { 376 376 void (*attach)(struct sk_buff *, const struct sk_buff *); 377 377 378 - if (skb->nfct) { 378 + if (skb->_nfct) { 379 379 rcu_read_lock(); 380 380 attach = rcu_dereference(ip_ct_attach); 381 381 if (attach)
+3 -5
net/netfilter/ipvs/ip_vs_ctl.c
··· 710 710 dest->vport == svc->port))) { 711 711 /* HIT */ 712 712 list_del(&dest->t_list); 713 - ip_vs_dest_hold(dest); 714 713 goto out; 715 714 } 716 715 } ··· 739 740 * When the ip_vs_control_clearup is activated by ipvs module exit, 740 741 * the service tables must have been flushed and all the connections 741 742 * are expired, and the refcnt of each destination in the trash must 742 - * be 0, so we simply release them here. 743 + * be 1, so we simply release them here. 743 744 */ 744 745 static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs) 745 746 { ··· 1078 1079 if (list_empty(&ipvs->dest_trash) && !cleanup) 1079 1080 mod_timer(&ipvs->dest_trash_timer, 1080 1081 jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); 1081 - /* dest lives in trash without reference */ 1082 + /* dest lives in trash with reference */ 1082 1083 list_add(&dest->t_list, &ipvs->dest_trash); 1083 1084 dest->idle_start = 0; 1084 1085 spin_unlock_bh(&ipvs->dest_trash_lock); 1085 - ip_vs_dest_put(dest); 1086 1086 } 1087 1087 1088 1088 ··· 1157 1159 1158 1160 spin_lock(&ipvs->dest_trash_lock); 1159 1161 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { 1160 - if (atomic_read(&dest->refcnt) > 0) 1162 + if (atomic_read(&dest->refcnt) > 1) 1161 1163 continue; 1162 1164 if (dest->idle_start) { 1163 1165 if (time_before(now, dest->idle_start +
+45 -28
net/netfilter/nf_conntrack_core.c
··· 350 350 spin_unlock(&pcpu->lock); 351 351 } 352 352 353 + #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK) 354 + 353 355 /* Released via destroy_conntrack() */ 354 356 struct nf_conn *nf_ct_tmpl_alloc(struct net *net, 355 357 const struct nf_conntrack_zone *zone, 356 358 gfp_t flags) 357 359 { 358 - struct nf_conn *tmpl; 360 + struct nf_conn *tmpl, *p; 359 361 360 - tmpl = kzalloc(sizeof(*tmpl), flags); 361 - if (tmpl == NULL) 362 - return NULL; 362 + if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) { 363 + tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags); 364 + if (!tmpl) 365 + return NULL; 366 + 367 + p = tmpl; 368 + tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); 369 + if (tmpl != p) { 370 + tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); 371 + tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p; 372 + } 373 + } else { 374 + tmpl = kzalloc(sizeof(*tmpl), flags); 375 + if (!tmpl) 376 + return NULL; 377 + } 363 378 364 379 tmpl->status = IPS_TEMPLATE; 365 380 write_pnet(&tmpl->ct_net, net); ··· 389 374 { 390 375 nf_ct_ext_destroy(tmpl); 391 376 nf_ct_ext_free(tmpl); 392 - kfree(tmpl); 377 + 378 + if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) 379 + kfree((char *)tmpl - tmpl->proto.tmpl_padto); 380 + else 381 + kfree(tmpl); 393 382 } 394 383 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free); 395 384 ··· 705 686 !nfct_nat(ct) && 706 687 !nf_ct_is_dying(ct) && 707 688 atomic_inc_not_zero(&ct->ct_general.use)) { 708 - nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct); 709 - nf_conntrack_put(skb->nfct); 710 - /* Assign conntrack already in hashes to this skbuff. Don't 711 - * modify skb->nfctinfo to ensure consistent stateful filtering. 712 - */ 713 - skb->nfct = &ct->ct_general; 689 + enum ip_conntrack_info oldinfo; 690 + struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo); 691 + 692 + nf_ct_acct_merge(ct, ctinfo, loser_ct); 693 + nf_conntrack_put(&loser_ct->ct_general); 694 + nf_ct_set(skb, ct, oldinfo); 714 695 return NF_ACCEPT; 715 696 } 716 697 NF_CT_STAT_INC(net, drop); ··· 1237 1218 return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; 1238 1219 } 1239 1220 1240 - /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 1221 + /* On success, returns conntrack ptr, sets skb->_nfct | ctinfo */ 1241 1222 static inline struct nf_conn * 1242 1223 resolve_normal_ct(struct net *net, struct nf_conn *tmpl, 1243 1224 struct sk_buff *skb, ··· 1296 1277 } 1297 1278 *set_reply = 0; 1298 1279 } 1299 - skb->nfct = &ct->ct_general; 1300 - skb->nfctinfo = *ctinfo; 1280 + nf_ct_set(skb, ct, *ctinfo); 1301 1281 return ct; 1302 1282 } 1303 1283 ··· 1304 1286 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, 1305 1287 struct sk_buff *skb) 1306 1288 { 1307 - struct nf_conn *ct, *tmpl = NULL; 1289 + struct nf_conn *ct, *tmpl; 1308 1290 enum ip_conntrack_info ctinfo; 1309 1291 struct nf_conntrack_l3proto *l3proto; 1310 1292 struct nf_conntrack_l4proto *l4proto; ··· 1314 1296 int set_reply = 0; 1315 1297 int ret; 1316 1298 1317 - if (skb->nfct) { 1299 + tmpl = nf_ct_get(skb, &ctinfo); 1300 + if (tmpl) { 1318 1301 /* Previously seen (loopback or untracked)? Ignore. */ 1319 - tmpl = (struct nf_conn *)skb->nfct; 1320 1302 if (!nf_ct_is_template(tmpl)) { 1321 1303 NF_CT_STAT_INC_ATOMIC(net, ignore); 1322 1304 return NF_ACCEPT; 1323 1305 } 1324 - skb->nfct = NULL; 1306 + skb->_nfct = 0; 1325 1307 } 1326 1308 1327 1309 /* rcu_read_lock()ed by nf_hook_thresh */ ··· 1342 1324 * inverse of the return code tells to the netfilter 1343 1325 * core what to do with the packet. */ 1344 1326 if (l4proto->error != NULL) { 1345 - ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, 1346 - pf, hooknum); 1327 + ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum); 1347 1328 if (ret <= 0) { 1348 1329 NF_CT_STAT_INC_ATOMIC(net, error); 1349 1330 NF_CT_STAT_INC_ATOMIC(net, invalid); ··· 1350 1333 goto out; 1351 1334 } 1352 1335 /* ICMP[v6] protocol trackers may assign one conntrack. */ 1353 - if (skb->nfct) 1336 + if (skb->_nfct) 1354 1337 goto out; 1355 1338 } 1356 1339 repeat: ··· 1370 1353 goto out; 1371 1354 } 1372 1355 1373 - NF_CT_ASSERT(skb->nfct); 1356 + NF_CT_ASSERT(skb_nfct(skb)); 1374 1357 1375 1358 /* Decide what timeout policy we want to apply to this flow. */ 1376 1359 timeouts = nf_ct_timeout_lookup(net, ct, l4proto); ··· 1380 1363 /* Invalid: inverse of the return code tells 1381 1364 * the netfilter core what to do */ 1382 1365 pr_debug("nf_conntrack_in: Can't track with proto module\n"); 1383 - nf_conntrack_put(skb->nfct); 1384 - skb->nfct = NULL; 1366 + nf_conntrack_put(&ct->ct_general); 1367 + skb->_nfct = 0; 1385 1368 NF_CT_STAT_INC_ATOMIC(net, invalid); 1386 1369 if (ret == -NF_DROP) 1387 1370 NF_CT_STAT_INC_ATOMIC(net, drop); ··· 1539 1522 ctinfo = IP_CT_RELATED; 1540 1523 1541 1524 /* Attach to new skbuff, and increment count */ 1542 - nskb->nfct = &ct->ct_general; 1543 - nskb->nfctinfo = ctinfo; 1544 - nf_conntrack_get(nskb->nfct); 1525 + nf_ct_set(nskb, ct, ctinfo); 1526 + nf_conntrack_get(skb_nfct(nskb)); 1545 1527 } 1546 1528 1547 1529 /* Bring out ya dead! */ ··· 1876 1860 nf_conntrack_max = max_factor * nf_conntrack_htable_size; 1877 1861 1878 1862 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1879 - sizeof(struct nf_conn), 0, 1863 + sizeof(struct nf_conn), 1864 + NFCT_INFOMASK + 1, 1880 1865 SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL); 1881 1866 if (!nf_conntrack_cachep) 1882 1867 goto err_cachep;
-1
net/netfilter/nf_conntrack_proto_dccp.c
··· 561 561 562 562 static int dccp_error(struct net *net, struct nf_conn *tmpl, 563 563 struct sk_buff *skb, unsigned int dataoff, 564 - enum ip_conntrack_info *ctinfo, 565 564 u_int8_t pf, unsigned int hooknum) 566 565 { 567 566 struct dccp_hdr _dh, *dh;
+32
net/netfilter/nf_conntrack_proto_sctp.c
··· 22 22 #include <linux/seq_file.h> 23 23 #include <linux/spinlock.h> 24 24 #include <linux/interrupt.h> 25 + #include <net/sctp/checksum.h> 25 26 27 + #include <net/netfilter/nf_log.h> 26 28 #include <net/netfilter/nf_conntrack.h> 27 29 #include <net/netfilter/nf_conntrack_l4proto.h> 28 30 #include <net/netfilter/nf_conntrack_ecache.h> ··· 507 505 return true; 508 506 } 509 507 508 + static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb, 509 + unsigned int dataoff, 510 + u8 pf, unsigned int hooknum) 511 + { 512 + const struct sctphdr *sh; 513 + struct sctphdr _sctph; 514 + const char *logmsg; 515 + 516 + sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 517 + if (!sh) { 518 + logmsg = "nf_ct_sctp: short packet "; 519 + goto out_invalid; 520 + } 521 + if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 522 + skb->ip_summed == CHECKSUM_NONE) { 523 + if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { 524 + logmsg = "nf_ct_sctp: bad CRC "; 525 + goto out_invalid; 526 + } 527 + skb->ip_summed = CHECKSUM_UNNECESSARY; 528 + } 529 + return NF_ACCEPT; 530 + out_invalid: 531 + if (LOG_INVALID(net, IPPROTO_SCTP)) 532 + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", logmsg); 533 + return -NF_ACCEPT; 534 + } 535 + 510 536 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 511 537 512 538 #include <linux/netfilter/nfnetlink.h> ··· 782 752 .packet = sctp_packet, 783 753 .get_timeouts = sctp_get_timeouts, 784 754 .new = sctp_new, 755 + .error = sctp_error, 785 756 .me = THIS_MODULE, 786 757 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 787 758 .to_nlattr = sctp_to_nlattr, ··· 817 786 .packet = sctp_packet, 818 787 .get_timeouts = sctp_get_timeouts, 819 788 .new = sctp_new, 789 + .error = sctp_error, 820 790 .me = THIS_MODULE, 821 791 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 822 792 .to_nlattr = sctp_to_nlattr,
-1
net/netfilter/nf_conntrack_proto_tcp.c
··· 750 750 static int tcp_error(struct net *net, struct nf_conn *tmpl, 751 751 struct sk_buff *skb, 752 752 unsigned int dataoff, 753 - enum ip_conntrack_info *ctinfo, 754 753 u_int8_t pf, 755 754 unsigned int hooknum) 756 755 {
+123 -1
net/netfilter/nf_conntrack_proto_udp.c
··· 108 108 return true; 109 109 } 110 110 111 + #ifdef CONFIG_NF_CT_PROTO_UDPLITE 112 + static int udplite_error(struct net *net, struct nf_conn *tmpl, 113 + struct sk_buff *skb, 114 + unsigned int dataoff, 115 + u8 pf, unsigned int hooknum) 116 + { 117 + unsigned int udplen = skb->len - dataoff; 118 + const struct udphdr *hdr; 119 + struct udphdr _hdr; 120 + unsigned int cscov; 121 + 122 + /* Header is too small? */ 123 + hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 124 + if (!hdr) { 125 + if (LOG_INVALID(net, IPPROTO_UDPLITE)) 126 + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 127 + "nf_ct_udplite: short packet "); 128 + return -NF_ACCEPT; 129 + } 130 + 131 + cscov = ntohs(hdr->len); 132 + if (cscov == 0) { 133 + cscov = udplen; 134 + } else if (cscov < sizeof(*hdr) || cscov > udplen) { 135 + if (LOG_INVALID(net, IPPROTO_UDPLITE)) 136 + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 137 + "nf_ct_udplite: invalid checksum coverage "); 138 + return -NF_ACCEPT; 139 + } 140 + 141 + /* UDPLITE mandates checksums */ 142 + if (!hdr->check) { 143 + if (LOG_INVALID(net, IPPROTO_UDPLITE)) 144 + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 145 + "nf_ct_udplite: checksum missing "); 146 + return -NF_ACCEPT; 147 + } 148 + 149 + /* Checksum invalid? Ignore. */ 150 + if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 151 + nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP, 152 + pf)) { 153 + if (LOG_INVALID(net, IPPROTO_UDPLITE)) 154 + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 155 + "nf_ct_udplite: bad UDPLite checksum "); 156 + return -NF_ACCEPT; 157 + } 158 + 159 + return NF_ACCEPT; 160 + } 161 + #endif 162 + 111 163 static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, 112 - unsigned int dataoff, enum ip_conntrack_info *ctinfo, 164 + unsigned int dataoff, 113 165 u_int8_t pf, 114 166 unsigned int hooknum) 115 167 { ··· 342 290 }; 343 291 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4); 344 292 293 + #ifdef CONFIG_NF_CT_PROTO_UDPLITE 294 + struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = 295 + { 296 + .l3proto = PF_INET, 297 + .l4proto = IPPROTO_UDPLITE, 298 + .name = "udplite", 299 + .allow_clash = true, 300 + .pkt_to_tuple = udp_pkt_to_tuple, 301 + .invert_tuple = udp_invert_tuple, 302 + .print_tuple = udp_print_tuple, 303 + .packet = udp_packet, 304 + .get_timeouts = udp_get_timeouts, 305 + .new = udp_new, 306 + .error = udplite_error, 307 + #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 308 + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 309 + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 310 + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 311 + .nla_policy = nf_ct_port_nla_policy, 312 + #endif 313 + #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 314 + .ctnl_timeout = { 315 + .nlattr_to_obj = udp_timeout_nlattr_to_obj, 316 + .obj_to_nlattr = udp_timeout_obj_to_nlattr, 317 + .nlattr_max = CTA_TIMEOUT_UDP_MAX, 318 + .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 319 + .nla_policy = udp_timeout_nla_policy, 320 + }, 321 + #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 322 + .init_net = udp_init_net, 323 + .get_net_proto = udp_get_net_proto, 324 + }; 325 + EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4); 326 + #endif 327 + 345 328 struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = 346 329 { 347 330 .l3proto = PF_INET6, ··· 409 322 .get_net_proto = udp_get_net_proto, 410 323 }; 411 324 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6); 325 + 326 + #ifdef CONFIG_NF_CT_PROTO_UDPLITE 327 + struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = 328 + { 329 + .l3proto = PF_INET6, 330 + .l4proto = IPPROTO_UDPLITE, 331 + .name = "udplite", 332 + .allow_clash = true, 333 + .pkt_to_tuple = udp_pkt_to_tuple, 334 + .invert_tuple = udp_invert_tuple, 335 + .print_tuple = udp_print_tuple, 336 + .packet = udp_packet, 337 + .get_timeouts = udp_get_timeouts, 338 + .new = udp_new, 339 + .error = udplite_error, 340 + #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 341 + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 342 + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 343 + .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 344 + .nla_policy = nf_ct_port_nla_policy, 345 + #endif 346 + #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 347 + .ctnl_timeout = { 348 + .nlattr_to_obj = udp_timeout_nlattr_to_obj, 349 + .obj_to_nlattr = udp_timeout_obj_to_nlattr, 350 + .nlattr_max = CTA_TIMEOUT_UDP_MAX, 351 + .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 352 + .nla_policy = udp_timeout_nla_policy, 353 + }, 354 + #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 355 + .init_net = udp_init_net, 356 + .get_net_proto = udp_get_net_proto, 357 + }; 358 + EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); 359 + #endif
-324
net/netfilter/nf_conntrack_proto_udplite.c
··· 1 - /* (C) 1999-2001 Paul `Rusty' Russell 2 - * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> 3 - * (C) 2007 Patrick McHardy <kaber@trash.net> 4 - * 5 - * This program is free software; you can redistribute it and/or modify 6 - * it under the terms of the GNU General Public License version 2 as 7 - * published by the Free Software Foundation. 8 - */ 9 - 10 - #include <linux/types.h> 11 - #include <linux/timer.h> 12 - #include <linux/udp.h> 13 - #include <linux/seq_file.h> 14 - #include <linux/skbuff.h> 15 - #include <linux/ipv6.h> 16 - #include <net/ip6_checksum.h> 17 - #include <net/checksum.h> 18 - 19 - #include <linux/netfilter.h> 20 - #include <linux/netfilter_ipv4.h> 21 - #include <linux/netfilter_ipv6.h> 22 - #include <net/netfilter/nf_conntrack_l4proto.h> 23 - #include <net/netfilter/nf_conntrack_ecache.h> 24 - #include <net/netfilter/nf_log.h> 25 - 26 - static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = { 27 - [UDPLITE_CT_UNREPLIED] = 30*HZ, 28 - [UDPLITE_CT_REPLIED] = 180*HZ, 29 - }; 30 - 31 - static inline struct nf_udplite_net *udplite_pernet(struct net *net) 32 - { 33 - return &net->ct.nf_ct_proto.udplite; 34 - } 35 - 36 - static bool udplite_pkt_to_tuple(const struct sk_buff *skb, 37 - unsigned int dataoff, 38 - struct net *net, 39 - struct nf_conntrack_tuple *tuple) 40 - { 41 - const struct udphdr *hp; 42 - struct udphdr _hdr; 43 - 44 - /* Actually only need first 4 bytes to get ports. */ 45 - hp = skb_header_pointer(skb, dataoff, 4, &_hdr); 46 - if (hp == NULL) 47 - return false; 48 - 49 - tuple->src.u.udp.port = hp->source; 50 - tuple->dst.u.udp.port = hp->dest; 51 - return true; 52 - } 53 - 54 - static bool udplite_invert_tuple(struct nf_conntrack_tuple *tuple, 55 - const struct nf_conntrack_tuple *orig) 56 - { 57 - tuple->src.u.udp.port = orig->dst.u.udp.port; 58 - tuple->dst.u.udp.port = orig->src.u.udp.port; 59 - return true; 60 - } 61 - 62 - /* Print out the per-protocol part of the tuple. */ 63 - static void udplite_print_tuple(struct seq_file *s, 64 - const struct nf_conntrack_tuple *tuple) 65 - { 66 - seq_printf(s, "sport=%hu dport=%hu ", 67 - ntohs(tuple->src.u.udp.port), 68 - ntohs(tuple->dst.u.udp.port)); 69 - } 70 - 71 - static unsigned int *udplite_get_timeouts(struct net *net) 72 - { 73 - return udplite_pernet(net)->timeouts; 74 - } 75 - 76 - /* Returns verdict for packet, and may modify conntracktype */ 77 - static int udplite_packet(struct nf_conn *ct, 78 - const struct sk_buff *skb, 79 - unsigned int dataoff, 80 - enum ip_conntrack_info ctinfo, 81 - u_int8_t pf, 82 - unsigned int hooknum, 83 - unsigned int *timeouts) 84 - { 85 - /* If we've seen traffic both ways, this is some kind of UDP 86 - stream. Extend timeout. */ 87 - if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 88 - nf_ct_refresh_acct(ct, ctinfo, skb, 89 - timeouts[UDPLITE_CT_REPLIED]); 90 - /* Also, more likely to be important, and not a probe */ 91 - if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 92 - nf_conntrack_event_cache(IPCT_ASSURED, ct); 93 - } else { 94 - nf_ct_refresh_acct(ct, ctinfo, skb, 95 - timeouts[UDPLITE_CT_UNREPLIED]); 96 - } 97 - return NF_ACCEPT; 98 - } 99 - 100 - /* Called when a new connection for this protocol found. */ 101 - static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb, 102 - unsigned int dataoff, unsigned int *timeouts) 103 - { 104 - return true; 105 - } 106 - 107 - static int udplite_error(struct net *net, struct nf_conn *tmpl, 108 - struct sk_buff *skb, 109 - unsigned int dataoff, 110 - enum ip_conntrack_info *ctinfo, 111 - u_int8_t pf, 112 - unsigned int hooknum) 113 - { 114 - unsigned int udplen = skb->len - dataoff; 115 - const struct udphdr *hdr; 116 - struct udphdr _hdr; 117 - unsigned int cscov; 118 - 119 - /* Header is too small? */ 120 - hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 121 - if (hdr == NULL) { 122 - if (LOG_INVALID(net, IPPROTO_UDPLITE)) 123 - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 124 - "nf_ct_udplite: short packet "); 125 - return -NF_ACCEPT; 126 - } 127 - 128 - cscov = ntohs(hdr->len); 129 - if (cscov == 0) 130 - cscov = udplen; 131 - else if (cscov < sizeof(*hdr) || cscov > udplen) { 132 - if (LOG_INVALID(net, IPPROTO_UDPLITE)) 133 - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 134 - "nf_ct_udplite: invalid checksum coverage "); 135 - return -NF_ACCEPT; 136 - } 137 - 138 - /* UDPLITE mandates checksums */ 139 - if (!hdr->check) { 140 - if (LOG_INVALID(net, IPPROTO_UDPLITE)) 141 - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 142 - "nf_ct_udplite: checksum missing "); 143 - return -NF_ACCEPT; 144 - } 145 - 146 - /* Checksum invalid? Ignore. */ 147 - if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 148 - nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP, 149 - pf)) { 150 - if (LOG_INVALID(net, IPPROTO_UDPLITE)) 151 - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 152 - "nf_ct_udplite: bad UDPLite checksum "); 153 - return -NF_ACCEPT; 154 - } 155 - 156 - return NF_ACCEPT; 157 - } 158 - 159 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 160 - 161 - #include <linux/netfilter/nfnetlink.h> 162 - #include <linux/netfilter/nfnetlink_cttimeout.h> 163 - 164 - static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], 165 - struct net *net, void *data) 166 - { 167 - unsigned int *timeouts = data; 168 - struct nf_udplite_net *un = udplite_pernet(net); 169 - 170 - /* set default timeouts for UDPlite. */ 171 - timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED]; 172 - timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED]; 173 - 174 - if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) { 175 - timeouts[UDPLITE_CT_UNREPLIED] = 176 - ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ; 177 - } 178 - if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) { 179 - timeouts[UDPLITE_CT_REPLIED] = 180 - ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ; 181 - } 182 - return 0; 183 - } 184 - 185 - static int 186 - udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) 187 - { 188 - const unsigned int *timeouts = data; 189 - 190 - if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED, 191 - htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) || 192 - nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED, 193 - htonl(timeouts[UDPLITE_CT_REPLIED] / HZ))) 194 - goto nla_put_failure; 195 - return 0; 196 - 197 - nla_put_failure: 198 - return -ENOSPC; 199 - } 200 - 201 - static const struct nla_policy 202 - udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = { 203 - [CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 }, 204 - [CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 }, 205 - }; 206 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 207 - 208 - #ifdef CONFIG_SYSCTL 209 - static struct ctl_table udplite_sysctl_table[] = { 210 - { 211 - .procname = "nf_conntrack_udplite_timeout", 212 - .maxlen = sizeof(unsigned int), 213 - .mode = 0644, 214 - .proc_handler = proc_dointvec_jiffies, 215 - }, 216 - { 217 - .procname = "nf_conntrack_udplite_timeout_stream", 218 - .maxlen = sizeof(unsigned int), 219 - .mode = 0644, 220 - .proc_handler = proc_dointvec_jiffies, 221 - }, 222 - { } 223 - }; 224 - #endif /* CONFIG_SYSCTL */ 225 - 226 - static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn, 227 - struct nf_udplite_net *un) 228 - { 229 - #ifdef CONFIG_SYSCTL 230 - if (pn->ctl_table) 231 - return 0; 232 - 233 - pn->ctl_table = kmemdup(udplite_sysctl_table, 234 - sizeof(udplite_sysctl_table), 235 - GFP_KERNEL); 236 - if (!pn->ctl_table) 237 - return -ENOMEM; 238 - 239 - pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED]; 240 - pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED]; 241 - #endif 242 - return 0; 243 - } 244 - 245 - static int udplite_init_net(struct net *net, u_int16_t proto) 246 - { 247 - struct nf_udplite_net *un = udplite_pernet(net); 248 - struct nf_proto_net *pn = &un->pn; 249 - 250 - if (!pn->users) { 251 - int i; 252 - 253 - for (i = 0 ; i < UDPLITE_CT_MAX; i++) 254 - un->timeouts[i] = udplite_timeouts[i]; 255 - } 256 - 257 - return udplite_kmemdup_sysctl_table(pn, un); 258 - } 259 - 260 - struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = 261 - { 262 - .l3proto = PF_INET, 263 - .l4proto = IPPROTO_UDPLITE, 264 - .name = "udplite", 265 - .allow_clash = true, 266 - .pkt_to_tuple = udplite_pkt_to_tuple, 267 - .invert_tuple = udplite_invert_tuple, 268 - .print_tuple = udplite_print_tuple, 269 - .packet = udplite_packet, 270 - .get_timeouts = udplite_get_timeouts, 271 - .new = udplite_new, 272 - .error = udplite_error, 273 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 274 - .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 275 - .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 276 - .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 277 - .nla_policy = nf_ct_port_nla_policy, 278 - #endif 279 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 280 - .ctnl_timeout = { 281 - .nlattr_to_obj = udplite_timeout_nlattr_to_obj, 282 - .obj_to_nlattr = udplite_timeout_obj_to_nlattr, 283 - .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX, 284 - .obj_size = sizeof(unsigned int) * 285 - CTA_TIMEOUT_UDPLITE_MAX, 286 - .nla_policy = udplite_timeout_nla_policy, 287 - }, 288 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 289 - .init_net = udplite_init_net, 290 - }; 291 - EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4); 292 - 293 - struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = 294 - { 295 - .l3proto = PF_INET6, 296 - .l4proto = IPPROTO_UDPLITE, 297 - .name = "udplite", 298 - .allow_clash = true, 299 - .pkt_to_tuple = udplite_pkt_to_tuple, 300 - .invert_tuple = udplite_invert_tuple, 301 - .print_tuple = udplite_print_tuple, 302 - .packet = udplite_packet, 303 - .get_timeouts = udplite_get_timeouts, 304 - .new = udplite_new, 305 - .error = udplite_error, 306 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 307 - .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 308 - .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 309 - .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 310 - .nla_policy = nf_ct_port_nla_policy, 311 - #endif 312 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 313 - .ctnl_timeout = { 314 - .nlattr_to_obj = udplite_timeout_nlattr_to_obj, 315 - .obj_to_nlattr = udplite_timeout_obj_to_nlattr, 316 - .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX, 317 - .obj_size = sizeof(unsigned int) * 318 - CTA_TIMEOUT_UDPLITE_MAX, 319 - .nla_policy = udplite_timeout_nla_policy, 320 - }, 321 - #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 322 - .init_net = udplite_init_net, 323 - }; 324 - EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
+3
net/netfilter/nf_conntrack_standalone.c
··· 642 642 if (ret < 0) 643 643 goto out_start; 644 644 645 + BUILD_BUG_ON(SKB_NFCT_PTRMASK != NFCT_PTRMASK); 646 + BUILD_BUG_ON(NFCT_INFOMASK <= IP_CT_NUMBER); 647 + 645 648 #ifdef CONFIG_SYSCTL 646 649 nf_ct_netfilter_header = 647 650 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
+24
net/netfilter/nf_log.c
··· 15 15 16 16 #define NFLOGGER_NAME_LEN 64 17 17 18 + int sysctl_nf_log_all_netns __read_mostly; 19 + EXPORT_SYMBOL(sysctl_nf_log_all_netns); 20 + 18 21 static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; 19 22 static DEFINE_MUTEX(nf_log_mutex); 20 23 ··· 416 413 #ifdef CONFIG_SYSCTL 417 414 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; 418 415 static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; 416 + static struct ctl_table_header *nf_log_sysctl_fhdr; 417 + 418 + static struct ctl_table nf_log_sysctl_ftable[] = { 419 + { 420 + .procname = "nf_log_all_netns", 421 + .data = &sysctl_nf_log_all_netns, 422 + .maxlen = sizeof(sysctl_nf_log_all_netns), 423 + .mode = 0644, 424 + .proc_handler = proc_dointvec, 425 + }, 426 + { } 427 + }; 419 428 420 429 static int nf_log_proc_dostring(struct ctl_table *table, int write, 421 430 void __user *buffer, size_t *lenp, loff_t *ppos) ··· 497 482 nf_log_sysctl_table[i].extra1 = 498 483 (void *)(unsigned long) i; 499 484 } 485 + nf_log_sysctl_fhdr = register_net_sysctl(net, "net/netfilter", 486 + nf_log_sysctl_ftable); 487 + if (!nf_log_sysctl_fhdr) 488 + goto err_freg; 500 489 } 501 490 502 491 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) ··· 517 498 err_reg: 518 499 if (!net_eq(net, &init_net)) 519 500 kfree(table); 501 + else 502 + unregister_net_sysctl_table(nf_log_sysctl_fhdr); 503 + err_freg: 520 504 err_alloc: 521 505 return -ENOMEM; 522 506 } ··· 532 510 unregister_net_sysctl_table(net->nf.nf_log_dir_header); 533 511 if (!net_eq(net, &init_net)) 534 512 kfree(table); 513 + else 514 + unregister_net_sysctl_table(nf_log_sysctl_fhdr); 535 515 } 536 516 #else 537 517 static int netfilter_log_sysctl_init(struct net *net)
+1 -1
net/netfilter/nf_nat_helper.c
··· 60 60 __skb_trim(skb, skb->len + rep_len - match_len); 61 61 } 62 62 63 - if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) { 63 + if (nf_ct_l3num((struct nf_conn *)skb_nfct(skb)) == NFPROTO_IPV4) { 64 64 /* fix IP hdr checksum information */ 65 65 ip_hdr(skb)->tot_len = htons(skb->len); 66 66 ip_send_check(ip_hdr(skb));
+66 -12
net/netfilter/nf_nat_proto_udp.c
··· 30 30 &udp_port_rover); 31 31 } 32 32 33 - static bool 34 - udp_manip_pkt(struct sk_buff *skb, 35 - const struct nf_nat_l3proto *l3proto, 36 - unsigned int iphdroff, unsigned int hdroff, 37 - const struct nf_conntrack_tuple *tuple, 38 - enum nf_nat_manip_type maniptype) 33 + static void 34 + __udp_manip_pkt(struct sk_buff *skb, 35 + const struct nf_nat_l3proto *l3proto, 36 + unsigned int iphdroff, struct udphdr *hdr, 37 + const struct nf_conntrack_tuple *tuple, 38 + enum nf_nat_manip_type maniptype, bool do_csum) 39 39 { 40 - struct udphdr *hdr; 41 40 __be16 *portptr, newport; 42 - 43 - if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 44 - return false; 45 - hdr = (struct udphdr *)(skb->data + hdroff); 46 41 47 42 if (maniptype == NF_NAT_MANIP_SRC) { 48 43 /* Get rid of src port */ ··· 48 53 newport = tuple->dst.u.udp.port; 49 54 portptr = &hdr->dest; 50 55 } 51 - if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) { 56 + if (do_csum) { 52 57 l3proto->csum_update(skb, iphdroff, &hdr->check, 53 58 tuple, maniptype); 54 59 inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, ··· 57 62 hdr->check = CSUM_MANGLED_0; 58 63 } 59 64 *portptr = newport; 65 + } 66 + 67 + static bool udp_manip_pkt(struct sk_buff *skb, 68 + const struct nf_nat_l3proto *l3proto, 69 + unsigned int iphdroff, unsigned int hdroff, 70 + const struct nf_conntrack_tuple *tuple, 71 + enum nf_nat_manip_type maniptype) 72 + { 73 + struct udphdr *hdr; 74 + bool do_csum; 75 + 76 + if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 77 + return false; 78 + 79 + hdr = (struct udphdr *)(skb->data + hdroff); 80 + do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL; 81 + 82 + __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum); 60 83 return true; 61 84 } 85 + 86 + #ifdef CONFIG_NF_NAT_PROTO_UDPLITE 87 + static u16 udplite_port_rover; 88 + 89 + static bool udplite_manip_pkt(struct sk_buff *skb, 90 + const struct nf_nat_l3proto *l3proto, 91 + unsigned int iphdroff, unsigned int hdroff, 92 + const struct nf_conntrack_tuple *tuple, 93 + enum nf_nat_manip_type maniptype) 94 + { 95 + struct udphdr *hdr; 96 + 97 + if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 98 + return false; 99 + 100 + hdr = (struct udphdr *)(skb->data + hdroff); 101 + __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, true); 102 + return true; 103 + } 104 + 105 + static void 106 + udplite_unique_tuple(const struct nf_nat_l3proto *l3proto, 107 + struct nf_conntrack_tuple *tuple, 108 + const struct nf_nat_range *range, 109 + enum nf_nat_manip_type maniptype, 110 + const struct nf_conn *ct) 111 + { 112 + nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, 113 + &udplite_port_rover); 114 + } 115 + 116 + const struct nf_nat_l4proto nf_nat_l4proto_udplite = { 117 + .l4proto = IPPROTO_UDPLITE, 118 + .manip_pkt = udplite_manip_pkt, 119 + .in_range = nf_nat_l4proto_in_range, 120 + .unique_tuple = udplite_unique_tuple, 121 + #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 122 + .nlattr_to_range = nf_nat_l4proto_nlattr_to_range, 123 + #endif 124 + }; 125 + #endif /* CONFIG_NF_NAT_PROTO_UDPLITE */ 62 126 63 127 const struct nf_nat_l4proto nf_nat_l4proto_udp = { 64 128 .l4proto = IPPROTO_UDP,
-73
net/netfilter/nf_nat_proto_udplite.c
··· 1 - /* (C) 1999-2001 Paul `Rusty' Russell 2 - * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 3 - * (C) 2008 Patrick McHardy <kaber@trash.net> 4 - * 5 - * This program is free software; you can redistribute it and/or modify 6 - * it under the terms of the GNU General Public License version 2 as 7 - * published by the Free Software Foundation. 8 - */ 9 - 10 - #include <linux/types.h> 11 - #include <linux/udp.h> 12 - 13 - #include <linux/netfilter.h> 14 - #include <net/netfilter/nf_nat.h> 15 - #include <net/netfilter/nf_nat_l3proto.h> 16 - #include <net/netfilter/nf_nat_l4proto.h> 17 - 18 - static u16 udplite_port_rover; 19 - 20 - static void 21 - udplite_unique_tuple(const struct nf_nat_l3proto *l3proto, 22 - struct nf_conntrack_tuple *tuple, 23 - const struct nf_nat_range *range, 24 - enum nf_nat_manip_type maniptype, 25 - const struct nf_conn *ct) 26 - { 27 - nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, 28 - &udplite_port_rover); 29 - } 30 - 31 - static bool 32 - udplite_manip_pkt(struct sk_buff *skb, 33 - const struct nf_nat_l3proto *l3proto, 34 - unsigned int iphdroff, unsigned int hdroff, 35 - const struct nf_conntrack_tuple *tuple, 36 - enum nf_nat_manip_type maniptype) 37 - { 38 - struct udphdr *hdr; 39 - __be16 *portptr, newport; 40 - 41 - if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) 42 - return false; 43 - 44 - hdr = (struct udphdr *)(skb->data + hdroff); 45 - 46 - if (maniptype == NF_NAT_MANIP_SRC) { 47 - /* Get rid of source port */ 48 - newport = tuple->src.u.udp.port; 49 - portptr = &hdr->source; 50 - } else { 51 - /* Get rid of dst port */ 52 - newport = tuple->dst.u.udp.port; 53 - portptr = &hdr->dest; 54 - } 55 - 56 - l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype); 57 - inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false); 58 - if (!hdr->check) 59 - hdr->check = CSUM_MANGLED_0; 60 - 61 - *portptr = newport; 62 - return true; 63 - } 64 - 65 - const struct nf_nat_l4proto nf_nat_l4proto_udplite = { 66 - .l4proto = IPPROTO_UDPLITE, 67 - .manip_pkt = udplite_manip_pkt, 68 - .in_range = nf_nat_l4proto_in_range, 69 - .unique_tuple = udplite_unique_tuple, 70 - #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 71 - .nlattr_to_range = nf_nat_l4proto_nlattr_to_range, 72 - #endif 73 - };
+28 -35
net/netfilter/nf_tables_api.c
··· 576 576 return err; 577 577 } 578 578 579 + static void _nf_tables_table_disable(struct net *net, 580 + const struct nft_af_info *afi, 581 + struct nft_table *table, 582 + u32 cnt) 583 + { 584 + struct nft_chain *chain; 585 + u32 i = 0; 586 + 587 + list_for_each_entry(chain, &table->chains, list) { 588 + if (!nft_is_active_next(net, chain)) 589 + continue; 590 + if (!(chain->flags & NFT_BASE_CHAIN)) 591 + continue; 592 + 593 + if (cnt && i++ == cnt) 594 + break; 595 + 596 + nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, 597 + afi->nops); 598 + } 599 + } 600 + 579 601 static int nf_tables_table_enable(struct net *net, 580 602 const struct nft_af_info *afi, 581 603 struct nft_table *table) ··· 620 598 } 621 599 return 0; 622 600 err: 623 - list_for_each_entry(chain, &table->chains, list) { 624 - if (!nft_is_active_next(net, chain)) 625 - continue; 626 - if (!(chain->flags & NFT_BASE_CHAIN)) 627 - continue; 628 - 629 - if (i-- <= 0) 630 - break; 631 - 632 - nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, 633 - afi->nops); 634 - } 601 + if (i) 602 + _nf_tables_table_disable(net, afi, table, i); 635 603 return err; 636 604 } 637 605 ··· 629 617 const struct nft_af_info *afi, 630 618 struct nft_table *table) 631 619 { 632 - struct nft_chain *chain; 633 - 634 - list_for_each_entry(chain, &table->chains, list) { 635 - if (!nft_is_active_next(net, chain)) 636 - continue; 637 - if (!(chain->flags & NFT_BASE_CHAIN)) 638 - continue; 639 - 640 - nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, 641 - afi->nops); 642 - } 620 + _nf_tables_table_disable(net, afi, table, 0); 643 621 } 644 622 645 623 static int nf_tables_updtable(struct nft_ctx *ctx) ··· 698 696 if (IS_ERR(table)) { 699 697 if (PTR_ERR(table) != -ENOENT) 700 698 return PTR_ERR(table); 701 - table = NULL; 702 - } 703 - 704 - if (table != NULL) { 699 + } else { 705 700 if (nlh->nlmsg_flags & NLM_F_EXCL) 706 701 return -EEXIST; 707 702 if (nlh->nlmsg_flags & NLM_F_REPLACE) ··· 2965 2966 if (IS_ERR(set)) { 2966 2967 if (PTR_ERR(set) != -ENOENT) 2967 2968 return PTR_ERR(set); 2968 - set = NULL; 2969 - } 2970 - 2971 - if (set != NULL) { 2969 + } else { 2972 2970 if (nlh->nlmsg_flags & NLM_F_EXCL) 2973 2971 return -EEXIST; 2974 2972 if (nlh->nlmsg_flags & NLM_F_REPLACE) ··· 4159 4163 if (err != -ENOENT) 4160 4164 return err; 4161 4165 4162 - obj = NULL; 4163 - } 4164 - 4165 - if (obj != NULL) { 4166 + } else { 4166 4167 if (nlh->nlmsg_flags & NLM_F_EXCL) 4167 4168 return -EEXIST; 4168 4169
+22 -3
net/netfilter/nft_ct.c
··· 129 129 memcpy(dest, &count, sizeof(count)); 130 130 return; 131 131 } 132 + case NFT_CT_AVGPKT: { 133 + const struct nf_conn_acct *acct = nf_conn_acct_find(ct); 134 + u64 avgcnt = 0, bcnt = 0, pcnt = 0; 135 + 136 + if (acct) { 137 + pcnt = nft_ct_get_eval_counter(acct->counter, 138 + NFT_CT_PKTS, priv->dir); 139 + bcnt = nft_ct_get_eval_counter(acct->counter, 140 + NFT_CT_BYTES, priv->dir); 141 + if (pcnt != 0) 142 + avgcnt = div64_u64(bcnt, pcnt); 143 + } 144 + 145 + memcpy(dest, &avgcnt, sizeof(avgcnt)); 146 + return; 147 + } 132 148 case NFT_CT_L3PROTOCOL: 133 149 *dest = nf_ct_l3num(ct); 134 150 return; ··· 332 316 break; 333 317 case NFT_CT_BYTES: 334 318 case NFT_CT_PKTS: 319 + case NFT_CT_AVGPKT: 335 320 /* no direction? return sum of original + reply */ 336 321 if (tb[NFTA_CT_DIRECTION] == NULL) 337 322 priv->dir = IP_CT_DIR_MAX; ··· 363 346 if (err < 0) 364 347 return err; 365 348 366 - if (priv->key == NFT_CT_BYTES || priv->key == NFT_CT_PKTS) 349 + if (priv->key == NFT_CT_BYTES || 350 + priv->key == NFT_CT_PKTS || 351 + priv->key == NFT_CT_AVGPKT) 367 352 nf_ct_set_acct(ctx->net, true); 368 353 369 354 return 0; ··· 464 445 break; 465 446 case NFT_CT_BYTES: 466 447 case NFT_CT_PKTS: 448 + case NFT_CT_AVGPKT: 467 449 if (priv->dir < IP_CT_DIR_MAX && 468 450 nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir)) 469 451 goto nla_put_failure; ··· 554 534 555 535 ct = nf_ct_untracked_get(); 556 536 atomic_inc(&ct->ct_general.use); 557 - skb->nfct = &ct->ct_general; 558 - skb->nfctinfo = IP_CT_NEW; 537 + nf_ct_set(skb, ct, IP_CT_NEW); 559 538 } 560 539 561 540 static struct nft_expr_type nft_notrack_type;
+27 -4
net/netfilter/nft_meta.c
··· 154 154 *dest = PACKET_BROADCAST; 155 155 break; 156 156 case NFPROTO_IPV6: 157 - if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF) 157 + *dest = PACKET_MULTICAST; 158 + break; 159 + case NFPROTO_NETDEV: 160 + switch (skb->protocol) { 161 + case htons(ETH_P_IP): { 162 + int noff = skb_network_offset(skb); 163 + struct iphdr *iph, _iph; 164 + 165 + iph = skb_header_pointer(skb, noff, 166 + sizeof(_iph), &_iph); 167 + if (!iph) 168 + goto err; 169 + 170 + if (ipv4_is_multicast(iph->daddr)) 171 + *dest = PACKET_MULTICAST; 172 + else 173 + *dest = PACKET_BROADCAST; 174 + 175 + break; 176 + } 177 + case htons(ETH_P_IPV6): 158 178 *dest = PACKET_MULTICAST; 159 - else 160 - *dest = PACKET_BROADCAST; 179 + break; 180 + default: 181 + WARN_ON_ONCE(1); 182 + goto err; 183 + } 161 184 break; 162 185 default: 163 - WARN_ON(1); 186 + WARN_ON_ONCE(1); 164 187 goto err; 165 188 } 166 189 break;
+58 -10
net/netfilter/x_tables.c
··· 262 262 } 263 263 EXPORT_SYMBOL_GPL(xt_request_find_target); 264 264 265 + 266 + static int xt_obj_to_user(u16 __user *psize, u16 size, 267 + void __user *pname, const char *name, 268 + u8 __user *prev, u8 rev) 269 + { 270 + if (put_user(size, psize)) 271 + return -EFAULT; 272 + if (copy_to_user(pname, name, strlen(name) + 1)) 273 + return -EFAULT; 274 + if (put_user(rev, prev)) 275 + return -EFAULT; 276 + 277 + return 0; 278 + } 279 + 280 + #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ 281 + xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ 282 + U->u.user.name, K->u.kernel.TYPE->name, \ 283 + &U->u.user.revision, K->u.kernel.TYPE->revision) 284 + 285 + int xt_data_to_user(void __user *dst, const void *src, 286 + int usersize, int size) 287 + { 288 + usersize = usersize ? : size; 289 + if (copy_to_user(dst, src, usersize)) 290 + return -EFAULT; 291 + if (usersize != size && clear_user(dst + usersize, size - usersize)) 292 + return -EFAULT; 293 + 294 + return 0; 295 + } 296 + EXPORT_SYMBOL_GPL(xt_data_to_user); 297 + 298 + #define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ 299 + xt_data_to_user(U->data, K->data, \ 300 + K->u.kernel.TYPE->usersize, \ 301 + C_SIZE ? : K->u.kernel.TYPE->TYPE##size) 302 + 303 + int xt_match_to_user(const struct xt_entry_match *m, 304 + struct xt_entry_match __user *u) 305 + { 306 + return XT_OBJ_TO_USER(u, m, match, 0) || 307 + XT_DATA_TO_USER(u, m, match, 0); 308 + } 309 + EXPORT_SYMBOL_GPL(xt_match_to_user); 310 + 311 + int xt_target_to_user(const struct xt_entry_target *t, 312 + struct xt_entry_target __user *u) 313 + { 314 + return XT_OBJ_TO_USER(u, t, target, 0) || 315 + XT_DATA_TO_USER(u, t, target, 0); 316 + } 317 + EXPORT_SYMBOL_GPL(xt_target_to_user); 318 + 265 319 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) 266 320 { 267 321 const struct xt_match *m; ··· 619 565 int off = xt_compat_match_offset(match); 620 566 u_int16_t msize = m->u.user.match_size - off; 621 567 622 - if (copy_to_user(cm, m, sizeof(*cm)) || 623 - put_user(msize, &cm->u.user.match_size) || 624 - copy_to_user(cm->u.user.name, m->u.kernel.match->name, 625 - strlen(m->u.kernel.match->name) + 1)) 568 + if (XT_OBJ_TO_USER(cm, m, match, msize)) 626 569 return -EFAULT; 627 570 628 571 if (match->compat_to_user) { 629 572 if (match->compat_to_user((void __user *)cm->data, m->data)) 630 573 return -EFAULT; 631 574 } else { 632 - if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) 575 + if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) 633 576 return -EFAULT; 634 577 } 635 578 ··· 974 923 int off = xt_compat_target_offset(target); 975 924 u_int16_t tsize = t->u.user.target_size - off; 976 925 977 - if (copy_to_user(ct, t, sizeof(*ct)) || 978 - put_user(tsize, &ct->u.user.target_size) || 979 - copy_to_user(ct->u.user.name, t->u.kernel.target->name, 980 - strlen(t->u.kernel.target->name) + 1)) 926 + if (XT_OBJ_TO_USER(ct, t, target, tsize)) 981 927 return -EFAULT; 982 928 983 929 if (target->compat_to_user) { 984 930 if (target->compat_to_user((void __user *)ct->data, t->data)) 985 931 return -EFAULT; 986 932 } else { 987 - if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) 933 + if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) 988 934 return -EFAULT; 989 935 } 990 936
+8 -7
net/netfilter/xt_CT.c
··· 23 23 static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) 24 24 { 25 25 /* Previously seen (loopback)? Ignore. */ 26 - if (skb->nfct != NULL) 26 + if (skb->_nfct != 0) 27 27 return XT_CONTINUE; 28 28 29 29 /* special case the untracked ct : we want the percpu object */ 30 30 if (!ct) 31 31 ct = nf_ct_untracked_get(); 32 32 atomic_inc(&ct->ct_general.use); 33 - skb->nfct = &ct->ct_general; 34 - skb->nfctinfo = IP_CT_NEW; 33 + nf_ct_set(skb, ct, IP_CT_NEW); 35 34 36 35 return XT_CONTINUE; 37 36 } ··· 372 373 .name = "CT", 373 374 .family = NFPROTO_UNSPEC, 374 375 .targetsize = sizeof(struct xt_ct_target_info), 376 + .usersize = offsetof(struct xt_ct_target_info, ct), 375 377 .checkentry = xt_ct_tg_check_v0, 376 378 .destroy = xt_ct_tg_destroy_v0, 377 379 .target = xt_ct_target_v0, ··· 384 384 .family = NFPROTO_UNSPEC, 385 385 .revision = 1, 386 386 .targetsize = sizeof(struct xt_ct_target_info_v1), 387 + .usersize = offsetof(struct xt_ct_target_info, ct), 387 388 .checkentry = xt_ct_tg_check_v1, 388 389 .destroy = xt_ct_tg_destroy_v1, 389 390 .target = xt_ct_target_v1, ··· 396 395 .family = NFPROTO_UNSPEC, 397 396 .revision = 2, 398 397 .targetsize = sizeof(struct xt_ct_target_info_v1), 398 + .usersize = offsetof(struct xt_ct_target_info, ct), 399 399 .checkentry = xt_ct_tg_check_v2, 400 400 .destroy = xt_ct_tg_destroy_v1, 401 401 .target = xt_ct_target_v1, ··· 409 407 notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) 410 408 { 411 409 /* Previously seen (loopback)? Ignore. */ 412 - if (skb->nfct != NULL) 410 + if (skb->_nfct != 0) 413 411 return XT_CONTINUE; 414 412 415 - skb->nfct = &nf_ct_untracked_get()->ct_general; 416 - skb->nfctinfo = IP_CT_NEW; 417 - nf_conntrack_get(skb->nfct); 413 + nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); 414 + nf_conntrack_get(skb_nfct(skb)); 418 415 419 416 return XT_CONTINUE; 420 417 }
+1
net/netfilter/xt_RATEEST.c
··· 162 162 .checkentry = xt_rateest_tg_checkentry, 163 163 .destroy = xt_rateest_tg_destroy, 164 164 .targetsize = sizeof(struct xt_rateest_target_info), 165 + .usersize = offsetof(struct xt_rateest_target_info, est), 165 166 .me = THIS_MODULE, 166 167 }; 167 168
+2
net/netfilter/xt_TEE.c
··· 133 133 .family = NFPROTO_IPV4, 134 134 .target = tee_tg4, 135 135 .targetsize = sizeof(struct xt_tee_tginfo), 136 + .usersize = offsetof(struct xt_tee_tginfo, priv), 136 137 .checkentry = tee_tg_check, 137 138 .destroy = tee_tg_destroy, 138 139 .me = THIS_MODULE, ··· 145 144 .family = NFPROTO_IPV6, 146 145 .target = tee_tg6, 147 146 .targetsize = sizeof(struct xt_tee_tginfo), 147 + .usersize = offsetof(struct xt_tee_tginfo, priv), 148 148 .checkentry = tee_tg_check, 149 149 .destroy = tee_tg_destroy, 150 150 .me = THIS_MODULE,
+2
net/netfilter/xt_bpf.c
··· 110 110 .match = bpf_mt, 111 111 .destroy = bpf_mt_destroy, 112 112 .matchsize = sizeof(struct xt_bpf_info), 113 + .usersize = offsetof(struct xt_bpf_info, filter), 113 114 .me = THIS_MODULE, 114 115 }, 115 116 { ··· 121 120 .match = bpf_mt_v1, 122 121 .destroy = bpf_mt_destroy_v1, 123 122 .matchsize = sizeof(struct xt_bpf_info_v1), 123 + .usersize = offsetof(struct xt_bpf_info_v1, filter), 124 124 .me = THIS_MODULE, 125 125 }, 126 126 };
+1
net/netfilter/xt_cgroup.c
··· 122 122 .checkentry = cgroup_mt_check_v1, 123 123 .match = cgroup_mt_v1, 124 124 .matchsize = sizeof(struct xt_cgroup_info_v1), 125 + .usersize = offsetof(struct xt_cgroup_info_v1, priv), 125 126 .destroy = cgroup_mt_destroy_v1, 126 127 .me = THIS_MODULE, 127 128 .hooks = (1 << NF_INET_LOCAL_OUT) |
+3 -2
net/netfilter/xt_connlimit.c
··· 218 218 int diff; 219 219 bool addit; 220 220 221 - rbconn = container_of(*rbnode, struct xt_connlimit_rb, node); 221 + rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node); 222 222 223 223 parent = *rbnode; 224 224 diff = same_source_net(addr, mask, &rbconn->addr, family); ··· 398 398 struct rb_node *node; 399 399 400 400 while ((node = rb_first(r)) != NULL) { 401 - rbconn = container_of(node, struct xt_connlimit_rb, node); 401 + rbconn = rb_entry(node, struct xt_connlimit_rb, node); 402 402 403 403 rb_erase(node, r); 404 404 ··· 431 431 .checkentry = connlimit_mt_check, 432 432 .match = connlimit_mt, 433 433 .matchsize = sizeof(struct xt_connlimit_info), 434 + .usersize = offsetof(struct xt_connlimit_info, data), 434 435 .destroy = connlimit_mt_destroy, 435 436 .me = THIS_MODULE, 436 437 };
+4
net/netfilter/xt_hashlimit.c
··· 838 838 .family = NFPROTO_IPV4, 839 839 .match = hashlimit_mt_v1, 840 840 .matchsize = sizeof(struct xt_hashlimit_mtinfo1), 841 + .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), 841 842 .checkentry = hashlimit_mt_check_v1, 842 843 .destroy = hashlimit_mt_destroy_v1, 843 844 .me = THIS_MODULE, ··· 849 848 .family = NFPROTO_IPV4, 850 849 .match = hashlimit_mt, 851 850 .matchsize = sizeof(struct xt_hashlimit_mtinfo2), 851 + .usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo), 852 852 .checkentry = hashlimit_mt_check, 853 853 .destroy = hashlimit_mt_destroy, 854 854 .me = THIS_MODULE, ··· 861 859 .family = NFPROTO_IPV6, 862 860 .match = hashlimit_mt_v1, 863 861 .matchsize = sizeof(struct xt_hashlimit_mtinfo1), 862 + .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), 864 863 .checkentry = hashlimit_mt_check_v1, 865 864 .destroy = hashlimit_mt_destroy_v1, 866 865 .me = THIS_MODULE, ··· 872 869 .family = NFPROTO_IPV6, 873 870 .match = hashlimit_mt, 874 871 .matchsize = sizeof(struct xt_hashlimit_mtinfo2), 872 + .usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo), 875 873 .checkentry = hashlimit_mt_check, 876 874 .destroy = hashlimit_mt_destroy, 877 875 .me = THIS_MODULE,
+2
net/netfilter/xt_limit.c
··· 192 192 .compatsize = sizeof(struct compat_xt_rateinfo), 193 193 .compat_from_user = limit_mt_compat_from_user, 194 194 .compat_to_user = limit_mt_compat_to_user, 195 + #else 196 + .usersize = offsetof(struct xt_rateinfo, prev), 195 197 #endif 196 198 .me = THIS_MODULE, 197 199 };
+1 -2
net/netfilter/xt_pkttype.c
··· 33 33 else if (xt_family(par) == NFPROTO_IPV4 && 34 34 ipv4_is_multicast(ip_hdr(skb)->daddr)) 35 35 type = PACKET_MULTICAST; 36 - else if (xt_family(par) == NFPROTO_IPV6 && 37 - ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF) 36 + else if (xt_family(par) == NFPROTO_IPV6) 38 37 type = PACKET_MULTICAST; 39 38 else 40 39 type = PACKET_BROADCAST;
+1
net/netfilter/xt_quota.c
··· 73 73 .checkentry = quota_mt_check, 74 74 .destroy = quota_mt_destroy, 75 75 .matchsize = sizeof(struct xt_quota_info), 76 + .usersize = offsetof(struct xt_quota_info, master), 76 77 .me = THIS_MODULE, 77 78 }; 78 79
+1
net/netfilter/xt_rateest.c
··· 133 133 .checkentry = xt_rateest_mt_checkentry, 134 134 .destroy = xt_rateest_mt_destroy, 135 135 .matchsize = sizeof(struct xt_rateest_match_info), 136 + .usersize = offsetof(struct xt_rateest_match_info, est1), 136 137 .me = THIS_MODULE, 137 138 }; 138 139
+1
net/netfilter/xt_string.c
··· 77 77 .match = string_mt, 78 78 .destroy = string_mt_destroy, 79 79 .matchsize = sizeof(struct xt_string_info), 80 + .usersize = offsetof(struct xt_string_info, config), 80 81 .me = THIS_MODULE, 81 82 }; 82 83
+5 -7
net/openvswitch/conntrack.c
··· 460 460 461 461 ct = nf_ct_tuplehash_to_ctrack(h); 462 462 463 - skb->nfct = &ct->ct_general; 464 - skb->nfctinfo = ovs_ct_get_info(h); 463 + nf_ct_set(skb, ct, ovs_ct_get_info(h)); 465 464 return ct; 466 465 } 467 466 ··· 721 722 722 723 /* Associate skb with specified zone. */ 723 724 if (tmpl) { 724 - if (skb->nfct) 725 - nf_conntrack_put(skb->nfct); 725 + if (skb_nfct(skb)) 726 + nf_conntrack_put(skb_nfct(skb)); 726 727 nf_conntrack_get(&tmpl->ct_general); 727 - skb->nfct = &tmpl->ct_general; 728 - skb->nfctinfo = IP_CT_NEW; 728 + nf_ct_set(skb, tmpl, IP_CT_NEW); 729 729 } 730 730 731 731 err = nf_conntrack_in(net, info->family, ··· 818 820 if (err) 819 821 return err; 820 822 821 - ct = (struct nf_conn *)skb->nfct; 823 + ct = (struct nf_conn *)skb_nfct(skb); 822 824 if (ct) 823 825 nf_ct_deliver_cached_events(ct); 824 826 }
+1 -1
net/sched/cls_flow.c
··· 129 129 static u32 flow_get_nfct(const struct sk_buff *skb) 130 130 { 131 131 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 132 - return addr_fold(skb->nfct); 132 + return addr_fold(skb_nfct(skb)); 133 133 #else 134 134 return 0; 135 135 #endif