at v5.2-rc2 40 kB view raw
1/* 2 * This is a module which is used for queueing packets and communicating with 3 * userspace via nfnetlink. 4 * 5 * (C) 2005 by Harald Welte <laforge@netfilter.org> 6 * (C) 2007 by Patrick McHardy <kaber@trash.net> 7 * 8 * Based on the old ipv4-only ip_queue.c: 9 * (C) 2000-2002 James Morris <jmorris@intercode.com.au> 10 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 * 16 */ 17 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20#include <linux/module.h> 21#include <linux/skbuff.h> 22#include <linux/init.h> 23#include <linux/spinlock.h> 24#include <linux/slab.h> 25#include <linux/notifier.h> 26#include <linux/netdevice.h> 27#include <linux/netfilter.h> 28#include <linux/proc_fs.h> 29#include <linux/netfilter_ipv4.h> 30#include <linux/netfilter_ipv6.h> 31#include <linux/netfilter_bridge.h> 32#include <linux/netfilter/nfnetlink.h> 33#include <linux/netfilter/nfnetlink_queue.h> 34#include <linux/netfilter/nf_conntrack_common.h> 35#include <linux/list.h> 36#include <net/sock.h> 37#include <net/tcp_states.h> 38#include <net/netfilter/nf_queue.h> 39#include <net/netns/generic.h> 40 41#include <linux/atomic.h> 42 43#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 44#include "../bridge/br_private.h" 45#endif 46 47#if IS_ENABLED(CONFIG_NF_CONNTRACK) 48#include <net/netfilter/nf_conntrack.h> 49#endif 50 51#define NFQNL_QMAX_DEFAULT 1024 52 53/* We're using struct nlattr which has 16bit nla_len. Note that nla_len 54 * includes the header length. Thus, the maximum packet length that we 55 * support is 65531 bytes. We send truncated packets if the specified length 56 * is larger than that. Userspace can check for presence of NFQA_CAP_LEN 57 * attribute to detect truncation. 58 */ 59#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN) 60 61struct nfqnl_instance { 62 struct hlist_node hlist; /* global list of queues */ 63 struct rcu_head rcu; 64 65 u32 peer_portid; 66 unsigned int queue_maxlen; 67 unsigned int copy_range; 68 unsigned int queue_dropped; 69 unsigned int queue_user_dropped; 70 71 72 u_int16_t queue_num; /* number of this queue */ 73 u_int8_t copy_mode; 74 u_int32_t flags; /* Set using NFQA_CFG_FLAGS */ 75/* 76 * Following fields are dirtied for each queued packet, 77 * keep them in same cache line if possible. 78 */ 79 spinlock_t lock ____cacheline_aligned_in_smp; 80 unsigned int queue_total; 81 unsigned int id_sequence; /* 'sequence' of pkt ids */ 82 struct list_head queue_list; /* packets in queue */ 83}; 84 85typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); 86 87static unsigned int nfnl_queue_net_id __read_mostly; 88 89#define INSTANCE_BUCKETS 16 90struct nfnl_queue_net { 91 spinlock_t instances_lock; 92 struct hlist_head instance_table[INSTANCE_BUCKETS]; 93}; 94 95static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net) 96{ 97 return net_generic(net, nfnl_queue_net_id); 98} 99 100static inline u_int8_t instance_hashfn(u_int16_t queue_num) 101{ 102 return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS; 103} 104 105static struct nfqnl_instance * 106instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num) 107{ 108 struct hlist_head *head; 109 struct nfqnl_instance *inst; 110 111 head = &q->instance_table[instance_hashfn(queue_num)]; 112 hlist_for_each_entry_rcu(inst, head, hlist) { 113 if (inst->queue_num == queue_num) 114 return inst; 115 } 116 return NULL; 117} 118 119static struct nfqnl_instance * 120instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid) 121{ 122 struct nfqnl_instance *inst; 123 unsigned int h; 124 int err; 125 126 spin_lock(&q->instances_lock); 127 if (instance_lookup(q, queue_num)) { 128 err = -EEXIST; 129 goto out_unlock; 130 } 131 132 inst = kzalloc(sizeof(*inst), GFP_ATOMIC); 133 if (!inst) { 134 err = -ENOMEM; 135 goto out_unlock; 136 } 137 138 inst->queue_num = queue_num; 139 inst->peer_portid = portid; 140 inst->queue_maxlen = NFQNL_QMAX_DEFAULT; 141 inst->copy_range = NFQNL_MAX_COPY_RANGE; 142 inst->copy_mode = NFQNL_COPY_NONE; 143 spin_lock_init(&inst->lock); 144 INIT_LIST_HEAD(&inst->queue_list); 145 146 if (!try_module_get(THIS_MODULE)) { 147 err = -EAGAIN; 148 goto out_free; 149 } 150 151 h = instance_hashfn(queue_num); 152 hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]); 153 154 spin_unlock(&q->instances_lock); 155 156 return inst; 157 158out_free: 159 kfree(inst); 160out_unlock: 161 spin_unlock(&q->instances_lock); 162 return ERR_PTR(err); 163} 164 165static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, 166 unsigned long data); 167 168static void 169instance_destroy_rcu(struct rcu_head *head) 170{ 171 struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, 172 rcu); 173 174 nfqnl_flush(inst, NULL, 0); 175 kfree(inst); 176 module_put(THIS_MODULE); 177} 178 179static void 180__instance_destroy(struct nfqnl_instance *inst) 181{ 182 hlist_del_rcu(&inst->hlist); 183 call_rcu(&inst->rcu, instance_destroy_rcu); 184} 185 186static void 187instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst) 188{ 189 spin_lock(&q->instances_lock); 190 __instance_destroy(inst); 191 spin_unlock(&q->instances_lock); 192} 193 194static inline void 195__enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) 196{ 197 list_add_tail(&entry->list, &queue->queue_list); 198 queue->queue_total++; 199} 200 201static void 202__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) 203{ 204 list_del(&entry->list); 205 queue->queue_total--; 206} 207 208static struct nf_queue_entry * 209find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) 210{ 211 struct nf_queue_entry *entry = NULL, *i; 212 213 spin_lock_bh(&queue->lock); 214 215 list_for_each_entry(i, &queue->queue_list, list) { 216 if (i->id == id) { 217 entry = i; 218 break; 219 } 220 } 221 222 if (entry) 223 __dequeue_entry(queue, entry); 224 225 spin_unlock_bh(&queue->lock); 226 227 return entry; 228} 229 230static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict) 231{ 232 struct nf_ct_hook *ct_hook; 233 int err; 234 235 if (verdict == NF_ACCEPT || 236 verdict == NF_REPEAT || 237 verdict == NF_STOP) { 238 rcu_read_lock(); 239 ct_hook = rcu_dereference(nf_ct_hook); 240 if (ct_hook) { 241 err = ct_hook->update(entry->state.net, entry->skb); 242 if (err < 0) 243 verdict = NF_DROP; 244 } 245 rcu_read_unlock(); 246 } 247 nf_reinject(entry, verdict); 248} 249 250static void 251nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) 252{ 253 struct nf_queue_entry *entry, *next; 254 255 spin_lock_bh(&queue->lock); 256 list_for_each_entry_safe(entry, next, &queue->queue_list, list) { 257 if (!cmpfn || cmpfn(entry, data)) { 258 list_del(&entry->list); 259 queue->queue_total--; 260 nfqnl_reinject(entry, NF_DROP); 261 } 262 } 263 spin_unlock_bh(&queue->lock); 264} 265 266static int 267nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet, 268 bool csum_verify) 269{ 270 __u32 flags = 0; 271 272 if (packet->ip_summed == CHECKSUM_PARTIAL) 273 flags = NFQA_SKB_CSUMNOTREADY; 274 else if (csum_verify) 275 flags = NFQA_SKB_CSUM_NOTVERIFIED; 276 277 if (skb_is_gso(packet)) 278 flags |= NFQA_SKB_GSO; 279 280 return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0; 281} 282 283static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk) 284{ 285 const struct cred *cred; 286 287 if (!sk_fullsock(sk)) 288 return 0; 289 290 read_lock_bh(&sk->sk_callback_lock); 291 if (sk->sk_socket && sk->sk_socket->file) { 292 cred = sk->sk_socket->file->f_cred; 293 if (nla_put_be32(skb, NFQA_UID, 294 htonl(from_kuid_munged(&init_user_ns, cred->fsuid)))) 295 goto nla_put_failure; 296 if (nla_put_be32(skb, NFQA_GID, 297 htonl(from_kgid_munged(&init_user_ns, cred->fsgid)))) 298 goto nla_put_failure; 299 } 300 read_unlock_bh(&sk->sk_callback_lock); 301 return 0; 302 303nla_put_failure: 304 read_unlock_bh(&sk->sk_callback_lock); 305 return -1; 306} 307 308static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata) 309{ 310 u32 seclen = 0; 311#if IS_ENABLED(CONFIG_NETWORK_SECMARK) 312 if (!skb || !sk_fullsock(skb->sk)) 313 return 0; 314 315 read_lock_bh(&skb->sk->sk_callback_lock); 316 317 if (skb->secmark) 318 security_secid_to_secctx(skb->secmark, secdata, &seclen); 319 320 read_unlock_bh(&skb->sk->sk_callback_lock); 321#endif 322 return seclen; 323} 324 325static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry) 326{ 327 struct sk_buff *entskb = entry->skb; 328 u32 nlalen = 0; 329 330 if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb)) 331 return 0; 332 333 if (skb_vlan_tag_present(entskb)) 334 nlalen += nla_total_size(nla_total_size(sizeof(__be16)) + 335 nla_total_size(sizeof(__be16))); 336 337 if (entskb->network_header > entskb->mac_header) 338 nlalen += nla_total_size((entskb->network_header - 339 entskb->mac_header)); 340 341 return nlalen; 342} 343 344static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb) 345{ 346 struct sk_buff *entskb = entry->skb; 347 348 if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb)) 349 return 0; 350 351 if (skb_vlan_tag_present(entskb)) { 352 struct nlattr *nest; 353 354 nest = nla_nest_start(skb, NFQA_VLAN); 355 if (!nest) 356 goto nla_put_failure; 357 358 if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) || 359 nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto)) 360 goto nla_put_failure; 361 362 nla_nest_end(skb, nest); 363 } 364 365 if (entskb->mac_header < entskb->network_header) { 366 int len = (int)(entskb->network_header - entskb->mac_header); 367 368 if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb))) 369 goto nla_put_failure; 370 } 371 372 return 0; 373 374nla_put_failure: 375 return -1; 376} 377 378static struct sk_buff * 379nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, 380 struct nf_queue_entry *entry, 381 __be32 **packet_id_ptr) 382{ 383 size_t size; 384 size_t data_len = 0, cap_len = 0; 385 unsigned int hlen = 0; 386 struct sk_buff *skb; 387 struct nlattr *nla; 388 struct nfqnl_msg_packet_hdr *pmsg; 389 struct nlmsghdr *nlh; 390 struct nfgenmsg *nfmsg; 391 struct sk_buff *entskb = entry->skb; 392 struct net_device *indev; 393 struct net_device *outdev; 394 struct nf_conn *ct = NULL; 395 enum ip_conntrack_info uninitialized_var(ctinfo); 396 struct nfnl_ct_hook *nfnl_ct; 397 bool csum_verify; 398 char *secdata = NULL; 399 u32 seclen = 0; 400 401 size = nlmsg_total_size(sizeof(struct nfgenmsg)) 402 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) 403 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 404 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 405#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 406 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 407 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 408#endif 409 + nla_total_size(sizeof(u_int32_t)) /* mark */ 410 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) 411 + nla_total_size(sizeof(u_int32_t)) /* skbinfo */ 412 + nla_total_size(sizeof(u_int32_t)); /* cap_len */ 413 414 if (entskb->tstamp) 415 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); 416 417 size += nfqnl_get_bridge_size(entry); 418 419 if (entry->state.hook <= NF_INET_FORWARD || 420 (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) 421 csum_verify = !skb_csum_unnecessary(entskb); 422 else 423 csum_verify = false; 424 425 outdev = entry->state.out; 426 427 switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) { 428 case NFQNL_COPY_META: 429 case NFQNL_COPY_NONE: 430 break; 431 432 case NFQNL_COPY_PACKET: 433 if (!(queue->flags & NFQA_CFG_F_GSO) && 434 entskb->ip_summed == CHECKSUM_PARTIAL && 435 skb_checksum_help(entskb)) 436 return NULL; 437 438 data_len = READ_ONCE(queue->copy_range); 439 if (data_len > entskb->len) 440 data_len = entskb->len; 441 442 hlen = skb_zerocopy_headlen(entskb); 443 hlen = min_t(unsigned int, hlen, data_len); 444 size += sizeof(struct nlattr) + hlen; 445 cap_len = entskb->len; 446 break; 447 } 448 449 nfnl_ct = rcu_dereference(nfnl_ct_hook); 450 451 if (queue->flags & NFQA_CFG_F_CONNTRACK) { 452 if (nfnl_ct != NULL) { 453 ct = nfnl_ct->get_ct(entskb, &ctinfo); 454 if (ct != NULL) 455 size += nfnl_ct->build_size(ct); 456 } 457 } 458 459 if (queue->flags & NFQA_CFG_F_UID_GID) { 460 size += (nla_total_size(sizeof(u_int32_t)) /* uid */ 461 + nla_total_size(sizeof(u_int32_t))); /* gid */ 462 } 463 464 if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) { 465 seclen = nfqnl_get_sk_secctx(entskb, &secdata); 466 if (seclen) 467 size += nla_total_size(seclen); 468 } 469 470 skb = alloc_skb(size, GFP_ATOMIC); 471 if (!skb) { 472 skb_tx_error(entskb); 473 goto nlmsg_failure; 474 } 475 476 nlh = nlmsg_put(skb, 0, 0, 477 nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET), 478 sizeof(struct nfgenmsg), 0); 479 if (!nlh) { 480 skb_tx_error(entskb); 481 kfree_skb(skb); 482 goto nlmsg_failure; 483 } 484 nfmsg = nlmsg_data(nlh); 485 nfmsg->nfgen_family = entry->state.pf; 486 nfmsg->version = NFNETLINK_V0; 487 nfmsg->res_id = htons(queue->queue_num); 488 489 nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); 490 pmsg = nla_data(nla); 491 pmsg->hw_protocol = entskb->protocol; 492 pmsg->hook = entry->state.hook; 493 *packet_id_ptr = &pmsg->packet_id; 494 495 indev = entry->state.in; 496 if (indev) { 497#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 498 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) 499 goto nla_put_failure; 500#else 501 if (entry->state.pf == PF_BRIDGE) { 502 /* Case 1: indev is physical input device, we need to 503 * look for bridge group (when called from 504 * netfilter_bridge) */ 505 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, 506 htonl(indev->ifindex)) || 507 /* this is the bridge group "brX" */ 508 /* rcu_read_lock()ed by __nf_queue */ 509 nla_put_be32(skb, NFQA_IFINDEX_INDEV, 510 htonl(br_port_get_rcu(indev)->br->dev->ifindex))) 511 goto nla_put_failure; 512 } else { 513 int physinif; 514 515 /* Case 2: indev is bridge group, we need to look for 516 * physical device (when called from ipv4) */ 517 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, 518 htonl(indev->ifindex))) 519 goto nla_put_failure; 520 521 physinif = nf_bridge_get_physinif(entskb); 522 if (physinif && 523 nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, 524 htonl(physinif))) 525 goto nla_put_failure; 526 } 527#endif 528 } 529 530 if (outdev) { 531#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 532 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) 533 goto nla_put_failure; 534#else 535 if (entry->state.pf == PF_BRIDGE) { 536 /* Case 1: outdev is physical output device, we need to 537 * look for bridge group (when called from 538 * netfilter_bridge) */ 539 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, 540 htonl(outdev->ifindex)) || 541 /* this is the bridge group "brX" */ 542 /* rcu_read_lock()ed by __nf_queue */ 543 nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, 544 htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) 545 goto nla_put_failure; 546 } else { 547 int physoutif; 548 549 /* Case 2: outdev is bridge group, we need to look for 550 * physical output device (when called from ipv4) */ 551 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, 552 htonl(outdev->ifindex))) 553 goto nla_put_failure; 554 555 physoutif = nf_bridge_get_physoutif(entskb); 556 if (physoutif && 557 nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, 558 htonl(physoutif))) 559 goto nla_put_failure; 560 } 561#endif 562 } 563 564 if (entskb->mark && 565 nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark))) 566 goto nla_put_failure; 567 568 if (indev && entskb->dev && 569 entskb->mac_header != entskb->network_header) { 570 struct nfqnl_msg_packet_hw phw; 571 int len; 572 573 memset(&phw, 0, sizeof(phw)); 574 len = dev_parse_header(entskb, phw.hw_addr); 575 if (len) { 576 phw.hw_addrlen = htons(len); 577 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) 578 goto nla_put_failure; 579 } 580 } 581 582 if (nfqnl_put_bridge(entry, skb) < 0) 583 goto nla_put_failure; 584 585 if (entry->state.hook <= NF_INET_FORWARD && entskb->tstamp) { 586 struct nfqnl_msg_packet_timestamp ts; 587 struct timespec64 kts = ktime_to_timespec64(entskb->tstamp); 588 589 ts.sec = cpu_to_be64(kts.tv_sec); 590 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); 591 592 if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts)) 593 goto nla_put_failure; 594 } 595 596 if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk && 597 nfqnl_put_sk_uidgid(skb, entskb->sk) < 0) 598 goto nla_put_failure; 599 600 if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata)) 601 goto nla_put_failure; 602 603 if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0) 604 goto nla_put_failure; 605 606 if (cap_len > data_len && 607 nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) 608 goto nla_put_failure; 609 610 if (nfqnl_put_packet_info(skb, entskb, csum_verify)) 611 goto nla_put_failure; 612 613 if (data_len) { 614 struct nlattr *nla; 615 616 if (skb_tailroom(skb) < sizeof(*nla) + hlen) 617 goto nla_put_failure; 618 619 nla = skb_put(skb, sizeof(*nla)); 620 nla->nla_type = NFQA_PAYLOAD; 621 nla->nla_len = nla_attr_size(data_len); 622 623 if (skb_zerocopy(skb, entskb, data_len, hlen)) 624 goto nla_put_failure; 625 } 626 627 nlh->nlmsg_len = skb->len; 628 if (seclen) 629 security_release_secctx(secdata, seclen); 630 return skb; 631 632nla_put_failure: 633 skb_tx_error(entskb); 634 kfree_skb(skb); 635 net_err_ratelimited("nf_queue: error creating packet message\n"); 636nlmsg_failure: 637 if (seclen) 638 security_release_secctx(secdata, seclen); 639 return NULL; 640} 641 642static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry) 643{ 644#if IS_ENABLED(CONFIG_NF_CONNTRACK) 645 static const unsigned long flags = IPS_CONFIRMED | IPS_DYING; 646 const struct nf_conn *ct = (void *)skb_nfct(entry->skb); 647 648 if (ct && ((ct->status & flags) == IPS_DYING)) 649 return true; 650#endif 651 return false; 652} 653 654static int 655__nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, 656 struct nf_queue_entry *entry) 657{ 658 struct sk_buff *nskb; 659 int err = -ENOBUFS; 660 __be32 *packet_id_ptr; 661 int failopen = 0; 662 663 nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr); 664 if (nskb == NULL) { 665 err = -ENOMEM; 666 goto err_out; 667 } 668 spin_lock_bh(&queue->lock); 669 670 if (nf_ct_drop_unconfirmed(entry)) 671 goto err_out_free_nskb; 672 673 if (queue->queue_total >= queue->queue_maxlen) { 674 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { 675 failopen = 1; 676 err = 0; 677 } else { 678 queue->queue_dropped++; 679 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n", 680 queue->queue_total); 681 } 682 goto err_out_free_nskb; 683 } 684 entry->id = ++queue->id_sequence; 685 *packet_id_ptr = htonl(entry->id); 686 687 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 688 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); 689 if (err < 0) { 690 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { 691 failopen = 1; 692 err = 0; 693 } else { 694 queue->queue_user_dropped++; 695 } 696 goto err_out_unlock; 697 } 698 699 __enqueue_entry(queue, entry); 700 701 spin_unlock_bh(&queue->lock); 702 return 0; 703 704err_out_free_nskb: 705 kfree_skb(nskb); 706err_out_unlock: 707 spin_unlock_bh(&queue->lock); 708 if (failopen) 709 nfqnl_reinject(entry, NF_ACCEPT); 710err_out: 711 return err; 712} 713 714static struct nf_queue_entry * 715nf_queue_entry_dup(struct nf_queue_entry *e) 716{ 717 struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); 718 if (entry) 719 nf_queue_entry_get_refs(entry); 720 return entry; 721} 722 723#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 724/* When called from bridge netfilter, skb->data must point to MAC header 725 * before calling skb_gso_segment(). Else, original MAC header is lost 726 * and segmented skbs will be sent to wrong destination. 727 */ 728static void nf_bridge_adjust_skb_data(struct sk_buff *skb) 729{ 730 if (nf_bridge_info_get(skb)) 731 __skb_push(skb, skb->network_header - skb->mac_header); 732} 733 734static void nf_bridge_adjust_segmented_data(struct sk_buff *skb) 735{ 736 if (nf_bridge_info_get(skb)) 737 __skb_pull(skb, skb->network_header - skb->mac_header); 738} 739#else 740#define nf_bridge_adjust_skb_data(s) do {} while (0) 741#define nf_bridge_adjust_segmented_data(s) do {} while (0) 742#endif 743 744static void free_entry(struct nf_queue_entry *entry) 745{ 746 nf_queue_entry_release_refs(entry); 747 kfree(entry); 748} 749 750static int 751__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, 752 struct sk_buff *skb, struct nf_queue_entry *entry) 753{ 754 int ret = -ENOMEM; 755 struct nf_queue_entry *entry_seg; 756 757 nf_bridge_adjust_segmented_data(skb); 758 759 if (skb->next == NULL) { /* last packet, no need to copy entry */ 760 struct sk_buff *gso_skb = entry->skb; 761 entry->skb = skb; 762 ret = __nfqnl_enqueue_packet(net, queue, entry); 763 if (ret) 764 entry->skb = gso_skb; 765 return ret; 766 } 767 768 skb_mark_not_on_list(skb); 769 770 entry_seg = nf_queue_entry_dup(entry); 771 if (entry_seg) { 772 entry_seg->skb = skb; 773 ret = __nfqnl_enqueue_packet(net, queue, entry_seg); 774 if (ret) 775 free_entry(entry_seg); 776 } 777 return ret; 778} 779 780static int 781nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) 782{ 783 unsigned int queued; 784 struct nfqnl_instance *queue; 785 struct sk_buff *skb, *segs; 786 int err = -ENOBUFS; 787 struct net *net = entry->state.net; 788 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 789 790 /* rcu_read_lock()ed by nf_hook_thresh */ 791 queue = instance_lookup(q, queuenum); 792 if (!queue) 793 return -ESRCH; 794 795 if (queue->copy_mode == NFQNL_COPY_NONE) 796 return -EINVAL; 797 798 skb = entry->skb; 799 800 switch (entry->state.pf) { 801 case NFPROTO_IPV4: 802 skb->protocol = htons(ETH_P_IP); 803 break; 804 case NFPROTO_IPV6: 805 skb->protocol = htons(ETH_P_IPV6); 806 break; 807 } 808 809 if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb)) 810 return __nfqnl_enqueue_packet(net, queue, entry); 811 812 nf_bridge_adjust_skb_data(skb); 813 segs = skb_gso_segment(skb, 0); 814 /* Does not use PTR_ERR to limit the number of error codes that can be 815 * returned by nf_queue. For instance, callers rely on -ESRCH to 816 * mean 'ignore this hook'. 817 */ 818 if (IS_ERR_OR_NULL(segs)) 819 goto out_err; 820 queued = 0; 821 err = 0; 822 do { 823 struct sk_buff *nskb = segs->next; 824 if (err == 0) 825 err = __nfqnl_enqueue_packet_gso(net, queue, 826 segs, entry); 827 if (err == 0) 828 queued++; 829 else 830 kfree_skb(segs); 831 segs = nskb; 832 } while (segs); 833 834 if (queued) { 835 if (err) /* some segments are already queued */ 836 free_entry(entry); 837 kfree_skb(skb); 838 return 0; 839 } 840 out_err: 841 nf_bridge_adjust_segmented_data(skb); 842 return err; 843} 844 845static int 846nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff) 847{ 848 struct sk_buff *nskb; 849 850 if (diff < 0) { 851 if (pskb_trim(e->skb, data_len)) 852 return -ENOMEM; 853 } else if (diff > 0) { 854 if (data_len > 0xFFFF) 855 return -EINVAL; 856 if (diff > skb_tailroom(e->skb)) { 857 nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), 858 diff, GFP_ATOMIC); 859 if (!nskb) 860 return -ENOMEM; 861 kfree_skb(e->skb); 862 e->skb = nskb; 863 } 864 skb_put(e->skb, diff); 865 } 866 if (!skb_make_writable(e->skb, data_len)) 867 return -ENOMEM; 868 skb_copy_to_linear_data(e->skb, data, data_len); 869 e->skb->ip_summed = CHECKSUM_NONE; 870 return 0; 871} 872 873static int 874nfqnl_set_mode(struct nfqnl_instance *queue, 875 unsigned char mode, unsigned int range) 876{ 877 int status = 0; 878 879 spin_lock_bh(&queue->lock); 880 switch (mode) { 881 case NFQNL_COPY_NONE: 882 case NFQNL_COPY_META: 883 queue->copy_mode = mode; 884 queue->copy_range = 0; 885 break; 886 887 case NFQNL_COPY_PACKET: 888 queue->copy_mode = mode; 889 if (range == 0 || range > NFQNL_MAX_COPY_RANGE) 890 queue->copy_range = NFQNL_MAX_COPY_RANGE; 891 else 892 queue->copy_range = range; 893 break; 894 895 default: 896 status = -EINVAL; 897 898 } 899 spin_unlock_bh(&queue->lock); 900 901 return status; 902} 903 904static int 905dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) 906{ 907#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 908 int physinif, physoutif; 909 910 physinif = nf_bridge_get_physinif(entry->skb); 911 physoutif = nf_bridge_get_physoutif(entry->skb); 912 913 if (physinif == ifindex || physoutif == ifindex) 914 return 1; 915#endif 916 if (entry->state.in) 917 if (entry->state.in->ifindex == ifindex) 918 return 1; 919 if (entry->state.out) 920 if (entry->state.out->ifindex == ifindex) 921 return 1; 922 923 return 0; 924} 925 926/* drop all packets with either indev or outdev == ifindex from all queue 927 * instances */ 928static void 929nfqnl_dev_drop(struct net *net, int ifindex) 930{ 931 int i; 932 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 933 934 rcu_read_lock(); 935 936 for (i = 0; i < INSTANCE_BUCKETS; i++) { 937 struct nfqnl_instance *inst; 938 struct hlist_head *head = &q->instance_table[i]; 939 940 hlist_for_each_entry_rcu(inst, head, hlist) 941 nfqnl_flush(inst, dev_cmp, ifindex); 942 } 943 944 rcu_read_unlock(); 945} 946 947static int 948nfqnl_rcv_dev_event(struct notifier_block *this, 949 unsigned long event, void *ptr) 950{ 951 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 952 953 /* Drop any packets associated with the downed device */ 954 if (event == NETDEV_DOWN) 955 nfqnl_dev_drop(dev_net(dev), dev->ifindex); 956 return NOTIFY_DONE; 957} 958 959static struct notifier_block nfqnl_dev_notifier = { 960 .notifier_call = nfqnl_rcv_dev_event, 961}; 962 963static void nfqnl_nf_hook_drop(struct net *net) 964{ 965 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 966 int i; 967 968 for (i = 0; i < INSTANCE_BUCKETS; i++) { 969 struct nfqnl_instance *inst; 970 struct hlist_head *head = &q->instance_table[i]; 971 972 hlist_for_each_entry_rcu(inst, head, hlist) 973 nfqnl_flush(inst, NULL, 0); 974 } 975} 976 977static int 978nfqnl_rcv_nl_event(struct notifier_block *this, 979 unsigned long event, void *ptr) 980{ 981 struct netlink_notify *n = ptr; 982 struct nfnl_queue_net *q = nfnl_queue_pernet(n->net); 983 984 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { 985 int i; 986 987 /* destroy all instances for this portid */ 988 spin_lock(&q->instances_lock); 989 for (i = 0; i < INSTANCE_BUCKETS; i++) { 990 struct hlist_node *t2; 991 struct nfqnl_instance *inst; 992 struct hlist_head *head = &q->instance_table[i]; 993 994 hlist_for_each_entry_safe(inst, t2, head, hlist) { 995 if (n->portid == inst->peer_portid) 996 __instance_destroy(inst); 997 } 998 } 999 spin_unlock(&q->instances_lock); 1000 } 1001 return NOTIFY_DONE; 1002} 1003 1004static struct notifier_block nfqnl_rtnl_notifier = { 1005 .notifier_call = nfqnl_rcv_nl_event, 1006}; 1007 1008static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = { 1009 [NFQA_VLAN_TCI] = { .type = NLA_U16}, 1010 [NFQA_VLAN_PROTO] = { .type = NLA_U16}, 1011}; 1012 1013static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { 1014 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, 1015 [NFQA_MARK] = { .type = NLA_U32 }, 1016 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, 1017 [NFQA_CT] = { .type = NLA_UNSPEC }, 1018 [NFQA_EXP] = { .type = NLA_UNSPEC }, 1019 [NFQA_VLAN] = { .type = NLA_NESTED }, 1020}; 1021 1022static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { 1023 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, 1024 [NFQA_MARK] = { .type = NLA_U32 }, 1025}; 1026 1027static struct nfqnl_instance * 1028verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid) 1029{ 1030 struct nfqnl_instance *queue; 1031 1032 queue = instance_lookup(q, queue_num); 1033 if (!queue) 1034 return ERR_PTR(-ENODEV); 1035 1036 if (queue->peer_portid != nlportid) 1037 return ERR_PTR(-EPERM); 1038 1039 return queue; 1040} 1041 1042static struct nfqnl_msg_verdict_hdr* 1043verdicthdr_get(const struct nlattr * const nfqa[]) 1044{ 1045 struct nfqnl_msg_verdict_hdr *vhdr; 1046 unsigned int verdict; 1047 1048 if (!nfqa[NFQA_VERDICT_HDR]) 1049 return NULL; 1050 1051 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); 1052 verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK; 1053 if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN) 1054 return NULL; 1055 return vhdr; 1056} 1057 1058static int nfq_id_after(unsigned int id, unsigned int max) 1059{ 1060 return (int)(id - max) > 0; 1061} 1062 1063static int nfqnl_recv_verdict_batch(struct net *net, struct sock *ctnl, 1064 struct sk_buff *skb, 1065 const struct nlmsghdr *nlh, 1066 const struct nlattr * const nfqa[], 1067 struct netlink_ext_ack *extack) 1068{ 1069 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1070 struct nf_queue_entry *entry, *tmp; 1071 unsigned int verdict, maxid; 1072 struct nfqnl_msg_verdict_hdr *vhdr; 1073 struct nfqnl_instance *queue; 1074 LIST_HEAD(batch_list); 1075 u16 queue_num = ntohs(nfmsg->res_id); 1076 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1077 1078 queue = verdict_instance_lookup(q, queue_num, 1079 NETLINK_CB(skb).portid); 1080 if (IS_ERR(queue)) 1081 return PTR_ERR(queue); 1082 1083 vhdr = verdicthdr_get(nfqa); 1084 if (!vhdr) 1085 return -EINVAL; 1086 1087 verdict = ntohl(vhdr->verdict); 1088 maxid = ntohl(vhdr->id); 1089 1090 spin_lock_bh(&queue->lock); 1091 1092 list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { 1093 if (nfq_id_after(entry->id, maxid)) 1094 break; 1095 __dequeue_entry(queue, entry); 1096 list_add_tail(&entry->list, &batch_list); 1097 } 1098 1099 spin_unlock_bh(&queue->lock); 1100 1101 if (list_empty(&batch_list)) 1102 return -ENOENT; 1103 1104 list_for_each_entry_safe(entry, tmp, &batch_list, list) { 1105 if (nfqa[NFQA_MARK]) 1106 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); 1107 1108 nfqnl_reinject(entry, verdict); 1109 } 1110 return 0; 1111} 1112 1113static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct, 1114 const struct nlmsghdr *nlh, 1115 const struct nlattr * const nfqa[], 1116 struct nf_queue_entry *entry, 1117 enum ip_conntrack_info *ctinfo) 1118{ 1119 struct nf_conn *ct; 1120 1121 ct = nfnl_ct->get_ct(entry->skb, ctinfo); 1122 if (ct == NULL) 1123 return NULL; 1124 1125 if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0) 1126 return NULL; 1127 1128 if (nfqa[NFQA_EXP]) 1129 nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct, 1130 NETLINK_CB(entry->skb).portid, 1131 nlmsg_report(nlh)); 1132 return ct; 1133} 1134 1135static int nfqa_parse_bridge(struct nf_queue_entry *entry, 1136 const struct nlattr * const nfqa[]) 1137{ 1138 if (nfqa[NFQA_VLAN]) { 1139 struct nlattr *tb[NFQA_VLAN_MAX + 1]; 1140 int err; 1141 1142 err = nla_parse_nested_deprecated(tb, NFQA_VLAN_MAX, 1143 nfqa[NFQA_VLAN], 1144 nfqa_vlan_policy, NULL); 1145 if (err < 0) 1146 return err; 1147 1148 if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO]) 1149 return -EINVAL; 1150 1151 __vlan_hwaccel_put_tag(entry->skb, 1152 nla_get_be16(tb[NFQA_VLAN_PROTO]), 1153 ntohs(nla_get_be16(tb[NFQA_VLAN_TCI]))); 1154 } 1155 1156 if (nfqa[NFQA_L2HDR]) { 1157 int mac_header_len = entry->skb->network_header - 1158 entry->skb->mac_header; 1159 1160 if (mac_header_len != nla_len(nfqa[NFQA_L2HDR])) 1161 return -EINVAL; 1162 else if (mac_header_len > 0) 1163 memcpy(skb_mac_header(entry->skb), 1164 nla_data(nfqa[NFQA_L2HDR]), 1165 mac_header_len); 1166 } 1167 1168 return 0; 1169} 1170 1171static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl, 1172 struct sk_buff *skb, 1173 const struct nlmsghdr *nlh, 1174 const struct nlattr * const nfqa[], 1175 struct netlink_ext_ack *extack) 1176{ 1177 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1178 u_int16_t queue_num = ntohs(nfmsg->res_id); 1179 struct nfqnl_msg_verdict_hdr *vhdr; 1180 struct nfqnl_instance *queue; 1181 unsigned int verdict; 1182 struct nf_queue_entry *entry; 1183 enum ip_conntrack_info uninitialized_var(ctinfo); 1184 struct nfnl_ct_hook *nfnl_ct; 1185 struct nf_conn *ct = NULL; 1186 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1187 int err; 1188 1189 queue = verdict_instance_lookup(q, queue_num, 1190 NETLINK_CB(skb).portid); 1191 if (IS_ERR(queue)) 1192 return PTR_ERR(queue); 1193 1194 vhdr = verdicthdr_get(nfqa); 1195 if (!vhdr) 1196 return -EINVAL; 1197 1198 verdict = ntohl(vhdr->verdict); 1199 1200 entry = find_dequeue_entry(queue, ntohl(vhdr->id)); 1201 if (entry == NULL) 1202 return -ENOENT; 1203 1204 /* rcu lock already held from nfnl->call_rcu. */ 1205 nfnl_ct = rcu_dereference(nfnl_ct_hook); 1206 1207 if (nfqa[NFQA_CT]) { 1208 if (nfnl_ct != NULL) 1209 ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); 1210 } 1211 1212 if (entry->state.pf == PF_BRIDGE) { 1213 err = nfqa_parse_bridge(entry, nfqa); 1214 if (err < 0) 1215 return err; 1216 } 1217 1218 if (nfqa[NFQA_PAYLOAD]) { 1219 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]); 1220 int diff = payload_len - entry->skb->len; 1221 1222 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), 1223 payload_len, entry, diff) < 0) 1224 verdict = NF_DROP; 1225 1226 if (ct && diff) 1227 nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff); 1228 } 1229 1230 if (nfqa[NFQA_MARK]) 1231 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); 1232 1233 nfqnl_reinject(entry, verdict); 1234 return 0; 1235} 1236 1237static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl, 1238 struct sk_buff *skb, const struct nlmsghdr *nlh, 1239 const struct nlattr * const nfqa[], 1240 struct netlink_ext_ack *extack) 1241{ 1242 return -ENOTSUPP; 1243} 1244 1245static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { 1246 [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, 1247 [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, 1248 [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 }, 1249 [NFQA_CFG_MASK] = { .type = NLA_U32 }, 1250 [NFQA_CFG_FLAGS] = { .type = NLA_U32 }, 1251}; 1252 1253static const struct nf_queue_handler nfqh = { 1254 .outfn = nfqnl_enqueue_packet, 1255 .nf_hook_drop = nfqnl_nf_hook_drop, 1256}; 1257 1258static int nfqnl_recv_config(struct net *net, struct sock *ctnl, 1259 struct sk_buff *skb, const struct nlmsghdr *nlh, 1260 const struct nlattr * const nfqa[], 1261 struct netlink_ext_ack *extack) 1262{ 1263 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1264 u_int16_t queue_num = ntohs(nfmsg->res_id); 1265 struct nfqnl_instance *queue; 1266 struct nfqnl_msg_config_cmd *cmd = NULL; 1267 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1268 __u32 flags = 0, mask = 0; 1269 int ret = 0; 1270 1271 if (nfqa[NFQA_CFG_CMD]) { 1272 cmd = nla_data(nfqa[NFQA_CFG_CMD]); 1273 1274 /* Obsolete commands without queue context */ 1275 switch (cmd->command) { 1276 case NFQNL_CFG_CMD_PF_BIND: return 0; 1277 case NFQNL_CFG_CMD_PF_UNBIND: return 0; 1278 } 1279 } 1280 1281 /* Check if we support these flags in first place, dependencies should 1282 * be there too not to break atomicity. 1283 */ 1284 if (nfqa[NFQA_CFG_FLAGS]) { 1285 if (!nfqa[NFQA_CFG_MASK]) { 1286 /* A mask is needed to specify which flags are being 1287 * changed. 1288 */ 1289 return -EINVAL; 1290 } 1291 1292 flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS])); 1293 mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK])); 1294 1295 if (flags >= NFQA_CFG_F_MAX) 1296 return -EOPNOTSUPP; 1297 1298#if !IS_ENABLED(CONFIG_NETWORK_SECMARK) 1299 if (flags & mask & NFQA_CFG_F_SECCTX) 1300 return -EOPNOTSUPP; 1301#endif 1302 if ((flags & mask & NFQA_CFG_F_CONNTRACK) && 1303 !rcu_access_pointer(nfnl_ct_hook)) { 1304#ifdef CONFIG_MODULES 1305 nfnl_unlock(NFNL_SUBSYS_QUEUE); 1306 request_module("ip_conntrack_netlink"); 1307 nfnl_lock(NFNL_SUBSYS_QUEUE); 1308 if (rcu_access_pointer(nfnl_ct_hook)) 1309 return -EAGAIN; 1310#endif 1311 return -EOPNOTSUPP; 1312 } 1313 } 1314 1315 rcu_read_lock(); 1316 queue = instance_lookup(q, queue_num); 1317 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) { 1318 ret = -EPERM; 1319 goto err_out_unlock; 1320 } 1321 1322 if (cmd != NULL) { 1323 switch (cmd->command) { 1324 case NFQNL_CFG_CMD_BIND: 1325 if (queue) { 1326 ret = -EBUSY; 1327 goto err_out_unlock; 1328 } 1329 queue = instance_create(q, queue_num, 1330 NETLINK_CB(skb).portid); 1331 if (IS_ERR(queue)) { 1332 ret = PTR_ERR(queue); 1333 goto err_out_unlock; 1334 } 1335 break; 1336 case NFQNL_CFG_CMD_UNBIND: 1337 if (!queue) { 1338 ret = -ENODEV; 1339 goto err_out_unlock; 1340 } 1341 instance_destroy(q, queue); 1342 goto err_out_unlock; 1343 case NFQNL_CFG_CMD_PF_BIND: 1344 case NFQNL_CFG_CMD_PF_UNBIND: 1345 break; 1346 default: 1347 ret = -ENOTSUPP; 1348 goto err_out_unlock; 1349 } 1350 } 1351 1352 if (!queue) { 1353 ret = -ENODEV; 1354 goto err_out_unlock; 1355 } 1356 1357 if (nfqa[NFQA_CFG_PARAMS]) { 1358 struct nfqnl_msg_config_params *params = 1359 nla_data(nfqa[NFQA_CFG_PARAMS]); 1360 1361 nfqnl_set_mode(queue, params->copy_mode, 1362 ntohl(params->copy_range)); 1363 } 1364 1365 if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { 1366 __be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); 1367 1368 spin_lock_bh(&queue->lock); 1369 queue->queue_maxlen = ntohl(*queue_maxlen); 1370 spin_unlock_bh(&queue->lock); 1371 } 1372 1373 if (nfqa[NFQA_CFG_FLAGS]) { 1374 spin_lock_bh(&queue->lock); 1375 queue->flags &= ~mask; 1376 queue->flags |= flags & mask; 1377 spin_unlock_bh(&queue->lock); 1378 } 1379 1380err_out_unlock: 1381 rcu_read_unlock(); 1382 return ret; 1383} 1384 1385static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { 1386 [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp, 1387 .attr_count = NFQA_MAX, }, 1388 [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict, 1389 .attr_count = NFQA_MAX, 1390 .policy = nfqa_verdict_policy }, 1391 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, 1392 .attr_count = NFQA_CFG_MAX, 1393 .policy = nfqa_cfg_policy }, 1394 [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch, 1395 .attr_count = NFQA_MAX, 1396 .policy = nfqa_verdict_batch_policy }, 1397}; 1398 1399static const struct nfnetlink_subsystem nfqnl_subsys = { 1400 .name = "nf_queue", 1401 .subsys_id = NFNL_SUBSYS_QUEUE, 1402 .cb_count = NFQNL_MSG_MAX, 1403 .cb = nfqnl_cb, 1404}; 1405 1406#ifdef CONFIG_PROC_FS 1407struct iter_state { 1408 struct seq_net_private p; 1409 unsigned int bucket; 1410}; 1411 1412static struct hlist_node *get_first(struct seq_file *seq) 1413{ 1414 struct iter_state *st = seq->private; 1415 struct net *net; 1416 struct nfnl_queue_net *q; 1417 1418 if (!st) 1419 return NULL; 1420 1421 net = seq_file_net(seq); 1422 q = nfnl_queue_pernet(net); 1423 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 1424 if (!hlist_empty(&q->instance_table[st->bucket])) 1425 return q->instance_table[st->bucket].first; 1426 } 1427 return NULL; 1428} 1429 1430static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) 1431{ 1432 struct iter_state *st = seq->private; 1433 struct net *net = seq_file_net(seq); 1434 1435 h = h->next; 1436 while (!h) { 1437 struct nfnl_queue_net *q; 1438 1439 if (++st->bucket >= INSTANCE_BUCKETS) 1440 return NULL; 1441 1442 q = nfnl_queue_pernet(net); 1443 h = q->instance_table[st->bucket].first; 1444 } 1445 return h; 1446} 1447 1448static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) 1449{ 1450 struct hlist_node *head; 1451 head = get_first(seq); 1452 1453 if (head) 1454 while (pos && (head = get_next(seq, head))) 1455 pos--; 1456 return pos ? NULL : head; 1457} 1458 1459static void *seq_start(struct seq_file *s, loff_t *pos) 1460 __acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock) 1461{ 1462 spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); 1463 return get_idx(s, *pos); 1464} 1465 1466static void *seq_next(struct seq_file *s, void *v, loff_t *pos) 1467{ 1468 (*pos)++; 1469 return get_next(s, v); 1470} 1471 1472static void seq_stop(struct seq_file *s, void *v) 1473 __releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock) 1474{ 1475 spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); 1476} 1477 1478static int seq_show(struct seq_file *s, void *v) 1479{ 1480 const struct nfqnl_instance *inst = v; 1481 1482 seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n", 1483 inst->queue_num, 1484 inst->peer_portid, inst->queue_total, 1485 inst->copy_mode, inst->copy_range, 1486 inst->queue_dropped, inst->queue_user_dropped, 1487 inst->id_sequence, 1); 1488 return 0; 1489} 1490 1491static const struct seq_operations nfqnl_seq_ops = { 1492 .start = seq_start, 1493 .next = seq_next, 1494 .stop = seq_stop, 1495 .show = seq_show, 1496}; 1497#endif /* PROC_FS */ 1498 1499static int __net_init nfnl_queue_net_init(struct net *net) 1500{ 1501 unsigned int i; 1502 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1503 1504 for (i = 0; i < INSTANCE_BUCKETS; i++) 1505 INIT_HLIST_HEAD(&q->instance_table[i]); 1506 1507 spin_lock_init(&q->instances_lock); 1508 1509#ifdef CONFIG_PROC_FS 1510 if (!proc_create_net("nfnetlink_queue", 0440, net->nf.proc_netfilter, 1511 &nfqnl_seq_ops, sizeof(struct iter_state))) 1512 return -ENOMEM; 1513#endif 1514 nf_register_queue_handler(net, &nfqh); 1515 return 0; 1516} 1517 1518static void __net_exit nfnl_queue_net_exit(struct net *net) 1519{ 1520 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1521 unsigned int i; 1522 1523 nf_unregister_queue_handler(net); 1524#ifdef CONFIG_PROC_FS 1525 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); 1526#endif 1527 for (i = 0; i < INSTANCE_BUCKETS; i++) 1528 WARN_ON_ONCE(!hlist_empty(&q->instance_table[i])); 1529} 1530 1531static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) 1532{ 1533 synchronize_rcu(); 1534} 1535 1536static struct pernet_operations nfnl_queue_net_ops = { 1537 .init = nfnl_queue_net_init, 1538 .exit = nfnl_queue_net_exit, 1539 .exit_batch = nfnl_queue_net_exit_batch, 1540 .id = &nfnl_queue_net_id, 1541 .size = sizeof(struct nfnl_queue_net), 1542}; 1543 1544static int __init nfnetlink_queue_init(void) 1545{ 1546 int status; 1547 1548 status = register_pernet_subsys(&nfnl_queue_net_ops); 1549 if (status < 0) { 1550 pr_err("failed to register pernet ops\n"); 1551 goto out; 1552 } 1553 1554 netlink_register_notifier(&nfqnl_rtnl_notifier); 1555 status = nfnetlink_subsys_register(&nfqnl_subsys); 1556 if (status < 0) { 1557 pr_err("failed to create netlink socket\n"); 1558 goto cleanup_netlink_notifier; 1559 } 1560 1561 status = register_netdevice_notifier(&nfqnl_dev_notifier); 1562 if (status < 0) { 1563 pr_err("failed to register netdevice notifier\n"); 1564 goto cleanup_netlink_subsys; 1565 } 1566 1567 return status; 1568 1569cleanup_netlink_subsys: 1570 nfnetlink_subsys_unregister(&nfqnl_subsys); 1571cleanup_netlink_notifier: 1572 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1573 unregister_pernet_subsys(&nfnl_queue_net_ops); 1574out: 1575 return status; 1576} 1577 1578static void __exit nfnetlink_queue_fini(void) 1579{ 1580 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1581 nfnetlink_subsys_unregister(&nfqnl_subsys); 1582 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1583 unregister_pernet_subsys(&nfnl_queue_net_ops); 1584 1585 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1586} 1587 1588MODULE_DESCRIPTION("netfilter packet queue handler"); 1589MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 1590MODULE_LICENSE("GPL"); 1591MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); 1592 1593module_init(nfnetlink_queue_init); 1594module_exit(nfnetlink_queue_fini);