Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter/IPVS updates for net-next

The following patchset contains Netfilter/IPVS updates for net-next:

1) Inspect the reply packets coming from DR/TUN and refresh connection
state and timeout, from longguang yue and Julian Anastasov.

2) Series to add support for the inet ingress chain type in nf_tables.
====================

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+281 -75
+6
include/net/netfilter/nf_tables.h
··· 1081 1081 u8 *udata; 1082 1082 }; 1083 1083 1084 + static inline bool nft_base_chain_netdev(int family, u32 hooknum) 1085 + { 1086 + return family == NFPROTO_NETDEV || 1087 + (family == NFPROTO_INET && hooknum == NF_INET_INGRESS); 1088 + } 1089 + 1084 1090 void nft_register_chain_type(const struct nft_chain_type *); 1085 1091 void nft_unregister_chain_type(const struct nft_chain_type *); 1086 1092
+33
include/net/netfilter/nf_tables_ipv4.h
··· 53 53 nft_set_pktinfo_unspec(pkt, skb); 54 54 } 55 55 56 + static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt, 57 + struct sk_buff *skb) 58 + { 59 + struct iphdr *iph; 60 + u32 len, thoff; 61 + 62 + if (!pskb_may_pull(skb, sizeof(*iph))) 63 + return -1; 64 + 65 + iph = ip_hdr(skb); 66 + if (iph->ihl < 5 || iph->version != 4) 67 + goto inhdr_error; 68 + 69 + len = ntohs(iph->tot_len); 70 + thoff = iph->ihl * 4; 71 + if (skb->len < len) { 72 + __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS); 73 + return -1; 74 + } else if (len < thoff) { 75 + goto inhdr_error; 76 + } 77 + 78 + pkt->tprot_set = true; 79 + pkt->tprot = iph->protocol; 80 + pkt->xt.thoff = thoff; 81 + pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET; 82 + 83 + return 0; 84 + 85 + inhdr_error: 86 + __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INHDRERRORS); 87 + return -1; 88 + } 56 89 #endif
+46
include/net/netfilter/nf_tables_ipv6.h
··· 70 70 nft_set_pktinfo_unspec(pkt, skb); 71 71 } 72 72 73 + static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt, 74 + struct sk_buff *skb) 75 + { 76 + #if IS_ENABLED(CONFIG_IPV6) 77 + unsigned int flags = IP6_FH_F_AUTH; 78 + unsigned short frag_off; 79 + unsigned int thoff = 0; 80 + struct inet6_dev *idev; 81 + struct ipv6hdr *ip6h; 82 + int protohdr; 83 + u32 pkt_len; 84 + 85 + if (!pskb_may_pull(skb, sizeof(*ip6h))) 86 + return -1; 87 + 88 + ip6h = ipv6_hdr(skb); 89 + if (ip6h->version != 6) 90 + goto inhdr_error; 91 + 92 + pkt_len = ntohs(ip6h->payload_len); 93 + if (pkt_len + sizeof(*ip6h) > skb->len) { 94 + idev = __in6_dev_get(nft_in(pkt)); 95 + __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS); 96 + return -1; 97 + } 98 + 99 + protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); 100 + if (protohdr < 0) 101 + goto inhdr_error; 102 + 103 + pkt->tprot_set = true; 104 + pkt->tprot = protohdr; 105 + pkt->xt.thoff = thoff; 106 + pkt->xt.fragoff = frag_off; 107 + 108 + return 0; 109 + 110 + inhdr_error: 111 + idev = __in6_dev_get(nft_in(pkt)); 112 + __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INHDRERRORS); 113 + return -1; 114 + #else 115 + return -1; 116 + #endif 117 + } 118 + 73 119 #endif
+1
include/uapi/linux/netfilter.h
··· 45 45 NF_INET_FORWARD, 46 46 NF_INET_LOCAL_OUT, 47 47 NF_INET_POST_ROUTING, 48 + NF_INET_INGRESS, 48 49 NF_INET_NUMHOOKS 49 50 }; 50 51
+102 -25
net/netfilter/core.c
··· 282 282 return NULL; 283 283 return net->nf.hooks_bridge + hooknum; 284 284 #endif 285 + #ifdef CONFIG_NETFILTER_INGRESS 286 + case NFPROTO_INET: 287 + if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS)) 288 + return NULL; 289 + if (!dev || dev_net(dev) != net) { 290 + WARN_ON_ONCE(1); 291 + return NULL; 292 + } 293 + return &dev->nf_hooks_ingress; 294 + #endif 285 295 case NFPROTO_IPV4: 286 296 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum)) 287 297 return NULL; ··· 321 311 return NULL; 322 312 } 323 313 314 + static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg, 315 + int hooknum) 316 + { 317 + #ifndef CONFIG_NETFILTER_INGRESS 318 + if (reg->hooknum == hooknum) 319 + return -EOPNOTSUPP; 320 + #endif 321 + if (reg->hooknum != hooknum || 322 + !reg->dev || dev_net(reg->dev) != net) 323 + return -EINVAL; 324 + 325 + return 0; 326 + } 327 + 328 + static inline bool nf_ingress_hook(const struct nf_hook_ops *reg, int pf) 329 + { 330 + if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) || 331 + (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS)) 332 + return true; 333 + 334 + return false; 335 + } 336 + 337 + static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf) 338 + { 339 + #ifdef CONFIG_JUMP_LABEL 340 + int hooknum; 341 + 342 + if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) { 343 + pf = NFPROTO_NETDEV; 344 + hooknum = NF_NETDEV_INGRESS; 345 + } else { 346 + hooknum = reg->hooknum; 347 + } 348 + static_key_slow_inc(&nf_hooks_needed[pf][hooknum]); 349 + #endif 350 + } 351 + 352 + static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf) 353 + { 354 + #ifdef CONFIG_JUMP_LABEL 355 + int hooknum; 356 + 357 + if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) { 358 + pf = NFPROTO_NETDEV; 359 + hooknum = NF_NETDEV_INGRESS; 360 + } else { 361 + hooknum = reg->hooknum; 362 + } 363 + static_key_slow_dec(&nf_hooks_needed[pf][hooknum]); 364 + #endif 365 + } 366 + 324 367 static int __nf_register_net_hook(struct net *net, int pf, 325 368 const struct nf_hook_ops *reg) 326 369 { 327 370 struct nf_hook_entries *p, *new_hooks; 328 371 struct nf_hook_entries __rcu **pp; 372 + int err; 329 373 330 - if (pf == NFPROTO_NETDEV) { 331 - #ifndef CONFIG_NETFILTER_INGRESS 332 - if (reg->hooknum == NF_NETDEV_INGRESS) 333 - return -EOPNOTSUPP; 334 - #endif 335 - if (reg->hooknum != NF_NETDEV_INGRESS || 336 - !reg->dev || dev_net(reg->dev) != net) 337 - return -EINVAL; 374 + switch (pf) { 375 + case NFPROTO_NETDEV: 376 + err = nf_ingress_check(net, reg, NF_NETDEV_INGRESS); 377 + if (err < 0) 378 + return err; 379 + break; 380 + case NFPROTO_INET: 381 + if (reg->hooknum != NF_INET_INGRESS) 382 + break; 383 + 384 + err = nf_ingress_check(net, reg, NF_INET_INGRESS); 385 + if (err < 0) 386 + return err; 387 + break; 338 388 } 339 389 340 390 pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); ··· 415 345 416 346 hooks_validate(new_hooks); 417 347 #ifdef CONFIG_NETFILTER_INGRESS 418 - if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) 348 + if (nf_ingress_hook(reg, pf)) 419 349 net_inc_ingress_queue(); 420 350 #endif 421 - #ifdef CONFIG_JUMP_LABEL 422 - static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]); 423 - #endif 351 + nf_static_key_inc(reg, pf); 352 + 424 353 BUG_ON(p == new_hooks); 425 354 nf_hook_entries_free(p); 426 355 return 0; ··· 472 403 473 404 if (nf_remove_net_hook(p, reg)) { 474 405 #ifdef CONFIG_NETFILTER_INGRESS 475 - if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) 406 + if (nf_ingress_hook(reg, pf)) 476 407 net_dec_ingress_queue(); 477 408 #endif 478 - #ifdef CONFIG_JUMP_LABEL 479 - static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]); 480 - #endif 409 + nf_static_key_dec(reg, pf); 481 410 } else { 482 411 WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum); 483 412 } ··· 492 425 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) 493 426 { 494 427 if (reg->pf == NFPROTO_INET) { 495 - __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); 496 - __nf_unregister_net_hook(net, NFPROTO_IPV6, reg); 428 + if (reg->hooknum == NF_INET_INGRESS) { 429 + __nf_unregister_net_hook(net, NFPROTO_INET, reg); 430 + } else { 431 + __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); 432 + __nf_unregister_net_hook(net, NFPROTO_IPV6, reg); 433 + } 497 434 } else { 498 435 __nf_unregister_net_hook(net, reg->pf, reg); 499 436 } ··· 522 451 int err; 523 452 524 453 if (reg->pf == NFPROTO_INET) { 525 - err = __nf_register_net_hook(net, NFPROTO_IPV4, reg); 526 - if (err < 0) 527 - return err; 454 + if (reg->hooknum == NF_INET_INGRESS) { 455 + err = __nf_register_net_hook(net, NFPROTO_INET, reg); 456 + if (err < 0) 457 + return err; 458 + } else { 459 + err = __nf_register_net_hook(net, NFPROTO_IPV4, reg); 460 + if (err < 0) 461 + return err; 528 462 529 - err = __nf_register_net_hook(net, NFPROTO_IPV6, reg); 530 - if (err < 0) { 531 - __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); 532 - return err; 463 + err = __nf_register_net_hook(net, NFPROTO_IPV6, reg); 464 + if (err < 0) { 465 + __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); 466 + return err; 467 + } 533 468 } 534 469 } else { 535 470 err = __nf_register_net_hook(net, reg->pf, reg);
+15 -3
net/netfilter/ipvs/ip_vs_conn.c
··· 402 402 { 403 403 unsigned int hash; 404 404 struct ip_vs_conn *cp, *ret=NULL; 405 + const union nf_inet_addr *saddr; 406 + __be16 sport; 405 407 406 408 /* 407 409 * Check for "full" addressed entries ··· 413 411 rcu_read_lock(); 414 412 415 413 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { 416 - if (p->vport == cp->cport && p->cport == cp->dport && 417 - cp->af == p->af && 414 + if (p->vport != cp->cport) 415 + continue; 416 + 417 + if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { 418 + sport = cp->vport; 419 + saddr = &cp->vaddr; 420 + } else { 421 + sport = cp->dport; 422 + saddr = &cp->daddr; 423 + } 424 + 425 + if (p->cport == sport && cp->af == p->af && 418 426 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && 419 - ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) && 427 + ip_vs_addr_equal(p->af, p->caddr, saddr) && 420 428 p->protocol == cp->protocol && 421 429 cp->ipvs == p->ipvs) { 422 430 if (!__ip_vs_conn_get(cp))
+7 -12
net/netfilter/ipvs/ip_vs_core.c
··· 875 875 unsigned int verdict = NF_DROP; 876 876 877 877 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) 878 - goto ignore_cp; 878 + goto after_nat; 879 879 880 880 /* Ensure the checksum is correct */ 881 881 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { ··· 901 901 if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum)) 902 902 goto out; 903 903 904 + after_nat: 904 905 /* do the statistics and put it back */ 905 906 ip_vs_out_stats(cp, skb); 906 907 ··· 910 909 ip_vs_notrack(skb); 911 910 else 912 911 ip_vs_update_conntrack(skb, cp, 0); 913 - 914 - ignore_cp: 915 912 verdict = NF_ACCEPT; 916 913 917 914 out: ··· 1275 1276 { 1276 1277 struct ip_vs_protocol *pp = pd->pp; 1277 1278 1279 + if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) 1280 + goto after_nat; 1281 + 1278 1282 IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet"); 1279 1283 1280 1284 if (skb_ensure_writable(skb, iph->len)) ··· 1318 1316 1319 1317 IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT"); 1320 1318 1319 + after_nat: 1321 1320 ip_vs_out_stats(cp, skb); 1322 1321 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd); 1323 1322 skb->ipvs_property = 1; ··· 1415 1412 cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto, 1416 1413 ipvs, af, skb, &iph); 1417 1414 1418 - if (likely(cp)) { 1419 - if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) 1420 - goto ignore_cp; 1415 + if (likely(cp)) 1421 1416 return handle_response(af, skb, pd, cp, &iph, hooknum); 1422 - } 1423 1417 1424 1418 /* Check for real-server-started requests */ 1425 1419 if (atomic_read(&ipvs->conn_out_counter)) { ··· 1475 1475 } 1476 1476 } 1477 1477 1478 - out: 1479 1478 IP_VS_DBG_PKT(12, af, pp, skb, iph.off, 1480 1479 "ip_vs_out: packet continues traversal as normal"); 1481 1480 return NF_ACCEPT; 1482 - 1483 - ignore_cp: 1484 - __ip_vs_conn_put(cp); 1485 - goto out; 1486 1481 } 1487 1482 1488 1483 /*
+4 -8
net/netfilter/nf_flow_table_core.c
··· 395 395 { 396 396 struct tcphdr *tcph; 397 397 398 - if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) || 399 - skb_try_make_writable(skb, thoff + sizeof(*tcph))) 398 + if (skb_try_make_writable(skb, thoff + sizeof(*tcph))) 400 399 return -1; 401 400 402 401 tcph = (void *)(skb_network_header(skb) + thoff); ··· 409 410 { 410 411 struct udphdr *udph; 411 412 412 - if (!pskb_may_pull(skb, thoff + sizeof(*udph)) || 413 - skb_try_make_writable(skb, thoff + sizeof(*udph))) 413 + if (skb_try_make_writable(skb, thoff + sizeof(*udph))) 414 414 return -1; 415 415 416 416 udph = (void *)(skb_network_header(skb) + thoff); ··· 447 449 struct flow_ports *hdr; 448 450 __be16 port, new_port; 449 451 450 - if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) || 451 - skb_try_make_writable(skb, thoff + sizeof(*hdr))) 452 + if (skb_try_make_writable(skb, thoff + sizeof(*hdr))) 452 453 return -1; 453 454 454 455 hdr = (void *)(skb_network_header(skb) + thoff); ··· 478 481 struct flow_ports *hdr; 479 482 __be16 port, new_port; 480 483 481 - if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) || 482 - skb_try_make_writable(skb, thoff + sizeof(*hdr))) 484 + if (skb_try_make_writable(skb, thoff + sizeof(*hdr))) 483 485 return -1; 484 486 485 487 hdr = (void *)(skb_network_header(skb) + thoff);
+26 -19
net/netfilter/nf_flow_table_ip.c
··· 25 25 if (proto != IPPROTO_TCP) 26 26 return 0; 27 27 28 - if (!pskb_may_pull(skb, thoff + sizeof(*tcph))) 29 - return -1; 30 - 31 28 tcph = (void *)(skb_network_header(skb) + thoff); 32 29 if (unlikely(tcph->fin || tcph->rst)) { 33 30 flow_offload_teardown(flow); ··· 39 42 { 40 43 struct tcphdr *tcph; 41 44 42 - if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) || 43 - skb_try_make_writable(skb, thoff + sizeof(*tcph))) 45 + if (skb_try_make_writable(skb, thoff + sizeof(*tcph))) 44 46 return -1; 45 47 46 48 tcph = (void *)(skb_network_header(skb) + thoff); ··· 53 57 { 54 58 struct udphdr *udph; 55 59 56 - if (!pskb_may_pull(skb, thoff + sizeof(*udph)) || 57 - skb_try_make_writable(skb, thoff + sizeof(*udph))) 60 + if (skb_try_make_writable(skb, thoff + sizeof(*udph))) 58 61 return -1; 59 62 60 63 udph = (void *)(skb_network_header(skb) + thoff); ··· 162 167 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, 163 168 struct flow_offload_tuple *tuple) 164 169 { 170 + unsigned int thoff, hdrsize; 165 171 struct flow_ports *ports; 166 - unsigned int thoff; 167 172 struct iphdr *iph; 168 173 169 174 if (!pskb_may_pull(skb, sizeof(*iph))) ··· 176 181 unlikely(ip_has_options(thoff))) 177 182 return -1; 178 183 179 - if (iph->protocol != IPPROTO_TCP && 180 - iph->protocol != IPPROTO_UDP) 184 + switch (iph->protocol) { 185 + case IPPROTO_TCP: 186 + hdrsize = sizeof(struct tcphdr); 187 + break; 188 + case IPPROTO_UDP: 189 + hdrsize = sizeof(struct udphdr); 190 + break; 191 + default: 181 192 return -1; 193 + } 182 194 183 195 if (iph->ttl <= 1) 184 196 return -1; 185 197 186 198 thoff = iph->ihl * 4; 187 - if (!pskb_may_pull(skb, thoff + sizeof(*ports))) 199 + if (!pskb_may_pull(skb, thoff + hdrsize)) 188 200 return -1; 189 201 190 202 iph = ip_hdr(skb); ··· 317 315 { 318 316 struct tcphdr *tcph; 319 317 320 - if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) || 321 - skb_try_make_writable(skb, thoff + sizeof(*tcph))) 318 + if (skb_try_make_writable(skb, thoff + sizeof(*tcph))) 322 319 return -1; 323 320 324 321 tcph = (void *)(skb_network_header(skb) + thoff); ··· 333 332 { 334 333 struct udphdr *udph; 335 334 336 - if (!pskb_may_pull(skb, thoff + sizeof(*udph)) || 337 - skb_try_make_writable(skb, thoff + sizeof(*udph))) 335 + if (skb_try_make_writable(skb, thoff + sizeof(*udph))) 338 336 return -1; 339 337 340 338 udph = (void *)(skb_network_header(skb) + thoff); ··· 439 439 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, 440 440 struct flow_offload_tuple *tuple) 441 441 { 442 + unsigned int thoff, hdrsize; 442 443 struct flow_ports *ports; 443 444 struct ipv6hdr *ip6h; 444 - unsigned int thoff; 445 445 446 446 if (!pskb_may_pull(skb, sizeof(*ip6h))) 447 447 return -1; 448 448 449 449 ip6h = ipv6_hdr(skb); 450 450 451 - if (ip6h->nexthdr != IPPROTO_TCP && 452 - ip6h->nexthdr != IPPROTO_UDP) 451 + switch (ip6h->nexthdr) { 452 + case IPPROTO_TCP: 453 + hdrsize = sizeof(struct tcphdr); 454 + break; 455 + case IPPROTO_UDP: 456 + hdrsize = sizeof(struct udphdr); 457 + break; 458 + default: 453 459 return -1; 460 + } 454 461 455 462 if (ip6h->hop_limit <= 1) 456 463 return -1; 457 464 458 465 thoff = sizeof(*ip6h); 459 - if (!pskb_may_pull(skb, thoff + sizeof(*ports))) 466 + if (!pskb_may_pull(skb, thoff + hdrsize)) 460 467 return -1; 461 468 462 469 ip6h = ipv6_hdr(skb);
+7 -7
net/netfilter/nf_tables_api.c
··· 206 206 if (basechain->type->ops_register) 207 207 return basechain->type->ops_register(net, ops); 208 208 209 - if (table->family == NFPROTO_NETDEV) 209 + if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) 210 210 return nft_netdev_register_hooks(net, &basechain->hook_list); 211 211 212 212 return nf_register_net_hook(net, &basechain->ops); ··· 228 228 if (basechain->type->ops_unregister) 229 229 return basechain->type->ops_unregister(net, ops); 230 230 231 - if (table->family == NFPROTO_NETDEV) 231 + if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) 232 232 nft_netdev_unregister_hooks(net, &basechain->hook_list); 233 233 else 234 234 nf_unregister_net_hook(net, &basechain->ops); ··· 1381 1381 if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority))) 1382 1382 goto nla_put_failure; 1383 1383 1384 - if (family == NFPROTO_NETDEV) { 1384 + if (nft_base_chain_netdev(family, ops->hooknum)) { 1385 1385 nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS); 1386 1386 list_for_each_entry(hook, &basechain->hook_list, list) { 1387 1387 if (!first) ··· 1685 1685 if (nft_is_base_chain(chain)) { 1686 1686 struct nft_base_chain *basechain = nft_base_chain(chain); 1687 1687 1688 - if (ctx->family == NFPROTO_NETDEV) { 1688 + if (nft_base_chain_netdev(ctx->family, basechain->ops.hooknum)) { 1689 1689 list_for_each_entry_safe(hook, next, 1690 1690 &basechain->hook_list, list) { 1691 1691 list_del_rcu(&hook->list); ··· 1877 1877 hook->type = type; 1878 1878 1879 1879 INIT_LIST_HEAD(&hook->list); 1880 - if (family == NFPROTO_NETDEV) { 1880 + if (nft_base_chain_netdev(family, hook->num)) { 1881 1881 err = nft_chain_parse_netdev(net, ha, &hook->list); 1882 1882 if (err < 0) { 1883 1883 module_put(type->owner); ··· 1944 1944 INIT_LIST_HEAD(&basechain->hook_list); 1945 1945 chain = &basechain->chain; 1946 1946 1947 - if (family == NFPROTO_NETDEV) { 1947 + if (nft_base_chain_netdev(family, hook->num)) { 1948 1948 list_splice_init(&hook->list, &basechain->hook_list); 1949 1949 list_for_each_entry(h, &basechain->hook_list, list) 1950 1950 nft_basechain_hook_init(&h->ops, family, hook, chain); ··· 2168 2168 return -EEXIST; 2169 2169 } 2170 2170 2171 - if (ctx->family == NFPROTO_NETDEV) { 2171 + if (nft_base_chain_netdev(ctx->family, hook.num)) { 2172 2172 if (!nft_hook_list_equal(&basechain->hook_list, 2173 2173 &hook.list)) { 2174 2174 nft_chain_release_hook(&hook);
+34 -1
net/netfilter/nft_chain_filter.c
··· 161 161 return nft_do_chain(&pkt, priv); 162 162 } 163 163 164 + static unsigned int nft_do_chain_inet_ingress(void *priv, struct sk_buff *skb, 165 + const struct nf_hook_state *state) 166 + { 167 + struct nf_hook_state ingress_state = *state; 168 + struct nft_pktinfo pkt; 169 + 170 + switch (skb->protocol) { 171 + case htons(ETH_P_IP): 172 + /* Original hook is NFPROTO_NETDEV and NF_NETDEV_INGRESS. */ 173 + ingress_state.pf = NFPROTO_IPV4; 174 + ingress_state.hook = NF_INET_INGRESS; 175 + nft_set_pktinfo(&pkt, skb, &ingress_state); 176 + 177 + if (nft_set_pktinfo_ipv4_ingress(&pkt, skb) < 0) 178 + return NF_DROP; 179 + break; 180 + case htons(ETH_P_IPV6): 181 + ingress_state.pf = NFPROTO_IPV6; 182 + ingress_state.hook = NF_INET_INGRESS; 183 + nft_set_pktinfo(&pkt, skb, &ingress_state); 184 + 185 + if (nft_set_pktinfo_ipv6_ingress(&pkt, skb) < 0) 186 + return NF_DROP; 187 + break; 188 + default: 189 + return NF_ACCEPT; 190 + } 191 + 192 + return nft_do_chain(&pkt, priv); 193 + } 194 + 164 195 static const struct nft_chain_type nft_chain_filter_inet = { 165 196 .name = "filter", 166 197 .type = NFT_CHAIN_T_DEFAULT, 167 198 .family = NFPROTO_INET, 168 - .hook_mask = (1 << NF_INET_LOCAL_IN) | 199 + .hook_mask = (1 << NF_INET_INGRESS) | 200 + (1 << NF_INET_LOCAL_IN) | 169 201 (1 << NF_INET_LOCAL_OUT) | 170 202 (1 << NF_INET_FORWARD) | 171 203 (1 << NF_INET_PRE_ROUTING) | 172 204 (1 << NF_INET_POST_ROUTING), 173 205 .hooks = { 206 + [NF_INET_INGRESS] = nft_do_chain_inet_ingress, 174 207 [NF_INET_LOCAL_IN] = nft_do_chain_inet, 175 208 [NF_INET_LOCAL_OUT] = nft_do_chain_inet, 176 209 [NF_INET_FORWARD] = nft_do_chain_inet,