Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking updates from David Miller:

1) UAPI changes for networking from David Howells

2) A netlink dump is an operation we can sleep within, and therefore we
need to make sure the dump provider module doesn't disappear on us
meanwhile. Fix from Gao Feng.

3) Now that tunnels support GRO, we have to be more careful in
skb_gro_reset_offset() otherwise we OOPS, from Eric Dumazet.

4) We can end up processing packets for VLANs we aren't actually
configured to be on, fix from Florian Zumbiehl.

5) Fix routing cache removal regression in redirects and IPVS. The
core issue on the IPVS side is that it wants to rewrite who the
nexthop is and we have to explicitly accomodate that case. From
Julian Anastasov.

6) Error code return fixes all over the networking drivers from Peter
Senna Tschudin.

7) Fix routing cache removal regressions in IPSEC, from Steffen
Klassert.

8) Fix deadlock in RDS during pings, from Jeff Liu.

9) Neighbour packet queue can trigger skb_under_panic() because we do
not reset the network header of the SKB in the right spot. From
Ramesh Nagappa.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits)
RDS: fix rds-ping spinlock recursion
netdev/phy: Prototype of_mdio_find_bus()
farsync: fix support for over 30 cards
be2net: Remove code that stops further access to BE NIC based on UE bits
pch_gbe: Fix build error by selecting all the possible dependencies.
e1000e: add device IDs for i218
ixgbe/ixgbevf: Limit maximum jumbo frame size to 9.5K to avoid Tx hangs
ixgbevf: Set the netdev number of Tx queues
UAPI: (Scripted) Disintegrate include/linux/tc_ematch
UAPI: (Scripted) Disintegrate include/linux/tc_act
UAPI: (Scripted) Disintegrate include/linux/netfilter_ipv6
UAPI: (Scripted) Disintegrate include/linux/netfilter_ipv4
UAPI: (Scripted) Disintegrate include/linux/netfilter_bridge
UAPI: (Scripted) Disintegrate include/linux/netfilter_arp
UAPI: (Scripted) Disintegrate include/linux/netfilter/ipset
UAPI: (Scripted) Disintegrate include/linux/netfilter
UAPI: (Scripted) Disintegrate include/linux/isdn
UAPI: (Scripted) Disintegrate include/linux/caif
net: fix typo in freescale/ucc_geth.c
vxlan: fix more sparse warnings
...

+2394 -2294
+2 -1
drivers/infiniband/core/cma.c
··· 3498 3498 } 3499 3499 3500 3500 static const struct ibnl_client_cbs cma_cb_table[] = { 3501 - [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats }, 3501 + [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, 3502 + .module = THIS_MODULE }, 3502 3503 }; 3503 3504 3504 3505 static int __init cma_init(void)
+1
drivers/infiniband/core/netlink.c
··· 154 154 { 155 155 struct netlink_dump_control c = { 156 156 .dump = client->cb_table[op].dump, 157 + .module = client->cb_table[op].module, 157 158 }; 158 159 return netlink_dump_start(nls, skb, nlh, &c); 159 160 }
+2
drivers/net/ethernet/amd/amd8111e.c
··· 1845 1845 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ 1846 1846 printk(KERN_ERR "amd8111e: No Power Management capability, " 1847 1847 "exiting.\n"); 1848 + err = -ENODEV; 1848 1849 goto err_free_reg; 1849 1850 } 1850 1851 ··· 1853 1852 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { 1854 1853 printk(KERN_ERR "amd8111e: DMA not supported," 1855 1854 "exiting.\n"); 1855 + err = -ENODEV; 1856 1856 goto err_free_reg; 1857 1857 } 1858 1858
+8 -2
drivers/net/ethernet/amd/au1000_eth.c
··· 1174 1174 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 1175 1175 pdev->name, aup->mac_id); 1176 1176 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1177 - if (aup->mii_bus->irq == NULL) 1177 + if (aup->mii_bus->irq == NULL) { 1178 + err = -ENOMEM; 1178 1179 goto err_out; 1180 + } 1179 1181 1180 1182 for (i = 0; i < PHY_MAX_ADDR; ++i) 1181 1183 aup->mii_bus->irq[i] = PHY_POLL; ··· 1192 1190 goto err_mdiobus_reg; 1193 1191 } 1194 1192 1195 - if (au1000_mii_probe(dev) != 0) 1193 + err = au1000_mii_probe(dev); 1194 + if (err != 0) 1196 1195 goto err_out; 1197 1196 1198 1197 pDBfree = NULL; ··· 1208 1205 } 1209 1206 aup->pDBfree = pDBfree; 1210 1207 1208 + err = -ENODEV; 1211 1209 for (i = 0; i < NUM_RX_DMA; i++) { 1212 1210 pDB = au1000_GetFreeDB(aup); 1213 1211 if (!pDB) ··· 1217 1213 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; 1218 1214 aup->rx_db_inuse[i] = pDB; 1219 1215 } 1216 + 1217 + err = -ENODEV; 1220 1218 for (i = 0; i < NUM_TX_DMA; i++) { 1221 1219 pDB = au1000_GetFreeDB(aup); 1222 1220 if (!pDB)
+2 -17
drivers/net/ethernet/calxeda/xgmac.c
··· 375 375 unsigned int tx_tail; 376 376 377 377 void __iomem *base; 378 - struct sk_buff_head rx_recycle; 379 378 unsigned int dma_buf_sz; 380 379 dma_addr_t dma_rx_phy; 381 380 dma_addr_t dma_tx_phy; ··· 671 672 p = priv->dma_rx + entry; 672 673 673 674 if (priv->rx_skbuff[entry] == NULL) { 674 - skb = __skb_dequeue(&priv->rx_recycle); 675 - if (skb == NULL) 676 - skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); 675 + skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); 677 676 if (unlikely(skb == NULL)) 678 677 break; 679 678 ··· 884 887 desc_get_buf_len(p), DMA_TO_DEVICE); 885 888 } 886 889 887 - /* 888 - * If there's room in the queue (limit it to size) 889 - * we add this skb back into the pool, 890 - * if it's the right size. 891 - */ 892 - if ((skb_queue_len(&priv->rx_recycle) < 893 - DMA_RX_RING_SZ) && 894 - skb_recycle_check(skb, priv->dma_buf_sz)) 895 - __skb_queue_head(&priv->rx_recycle, skb); 896 - else 897 - dev_kfree_skb(skb); 890 + dev_kfree_skb(skb); 898 891 } 899 892 900 893 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > ··· 1003 1016 dev->dev_addr); 1004 1017 } 1005 1018 1006 - skb_queue_head_init(&priv->rx_recycle); 1007 1019 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); 1008 1020 1009 1021 /* Initialize the XGMAC and descriptors */ ··· 1039 1053 napi_disable(&priv->napi); 1040 1054 1041 1055 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1042 - skb_queue_purge(&priv->rx_recycle); 1043 1056 1044 1057 /* Disable the MAC core */ 1045 1058 xgmac_mac_disable(priv->base);
+1
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 696 696 int get_vpd_params(struct adapter *adapter, struct vpd_params *p); 697 697 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 698 698 unsigned int t4_flash_cfg_addr(struct adapter *adapter); 699 + int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); 699 700 int t4_check_fw_version(struct adapter *adapter); 700 701 int t4_prep_adapter(struct adapter *adapter); 701 702 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+30 -24
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 443 443 module_param(dbfifo_int_thresh, int, 0644); 444 444 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); 445 445 446 - int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */ 446 + /* 447 + * usecs to sleep while draining the dbfifo 448 + */ 449 + static int dbfifo_drain_delay = 1000; 447 450 module_param(dbfifo_drain_delay, int, 0644); 448 451 MODULE_PARM_DESC(dbfifo_drain_delay, 449 452 "usecs to sleep while draining the dbfifo"); ··· 639 636 static int request_msix_queue_irqs(struct adapter *adap) 640 637 { 641 638 struct sge *s = &adap->sge; 642 - int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; 639 + int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2; 643 640 644 641 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 645 642 adap->msix_info[1].desc, &s->fw_evtq); ··· 647 644 return err; 648 645 649 646 for_each_ethrxq(s, ethqidx) { 650 - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, 651 - adap->msix_info[msi].desc, 647 + err = request_irq(adap->msix_info[msi_index].vec, 648 + t4_sge_intr_msix, 0, 649 + adap->msix_info[msi_index].desc, 652 650 &s->ethrxq[ethqidx].rspq); 653 651 if (err) 654 652 goto unwind; 655 - msi++; 653 + msi_index++; 656 654 } 657 655 for_each_ofldrxq(s, ofldqidx) { 658 - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, 659 - adap->msix_info[msi].desc, 656 + err = request_irq(adap->msix_info[msi_index].vec, 657 + t4_sge_intr_msix, 0, 658 + adap->msix_info[msi_index].desc, 660 659 &s->ofldrxq[ofldqidx].rspq); 661 660 if (err) 662 661 goto unwind; 663 - msi++; 662 + msi_index++; 664 663 } 665 664 for_each_rdmarxq(s, rdmaqidx) { 666 - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, 667 - adap->msix_info[msi].desc, 665 + err = request_irq(adap->msix_info[msi_index].vec, 666 + t4_sge_intr_msix, 0, 667 + adap->msix_info[msi_index].desc, 668 668 &s->rdmarxq[rdmaqidx].rspq); 669 669 if (err) 670 670 goto unwind; 671 - msi++; 671 + msi_index++; 672 672 } 673 673 return 0; 674 674 675 675 unwind: 676 676 while (--rdmaqidx >= 0) 677 - free_irq(adap->msix_info[--msi].vec, 677 + free_irq(adap->msix_info[--msi_index].vec, 678 678 &s->rdmarxq[rdmaqidx].rspq); 679 679 while (--ofldqidx >= 0) 680 - free_irq(adap->msix_info[--msi].vec, 680 + free_irq(adap->msix_info[--msi_index].vec, 681 681 &s->ofldrxq[ofldqidx].rspq); 682 682 while (--ethqidx >= 0) 683 - free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); 683 + free_irq(adap->msix_info[--msi_index].vec, 684 + &s->ethrxq[ethqidx].rspq); 684 685 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 685 686 return err; 686 687 } 687 688 688 689 static void free_msix_queue_irqs(struct adapter *adap) 689 690 { 690 - int i, msi = 2; 691 + int i, msi_index = 2; 691 692 struct sge *s = &adap->sge; 692 693 693 694 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 694 695 for_each_ethrxq(s, i) 695 - free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); 696 + free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); 696 697 for_each_ofldrxq(s, i) 697 - free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); 698 + free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); 698 699 for_each_rdmarxq(s, i) 699 - free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); 700 + free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); 700 701 } 701 702 702 703 /** ··· 2542 2535 2543 2536 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8); 2544 2537 if (!ret) { 2545 - indices = be64_to_cpu(indices); 2546 - *cidx = (indices >> 25) & 0xffff; 2547 - *pidx = (indices >> 9) & 0xffff; 2538 + *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; 2539 + *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; 2548 2540 } 2549 2541 return ret; 2550 2542 } ··· 3640 3634 * field selections will fit in the 36-bit budget. 3641 3635 */ 3642 3636 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) { 3643 - int i, bits = 0; 3637 + int j, bits = 0; 3644 3638 3645 - for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++) 3646 - switch (tp_vlan_pri_map & (1 << i)) { 3639 + for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++) 3640 + switch (tp_vlan_pri_map & (1 << j)) { 3647 3641 case 0: 3648 3642 /* compressed filter field not enabled */ 3649 3643 break;
+9 -6
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 380 380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ 381 381 for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { 382 382 if (dir) 383 - *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i)); 383 + *data++ = (__force __be32) t4_read_reg(adap, 384 + (MEMWIN0_BASE + i)); 384 385 else 385 - t4_write_reg(adap, (MEMWIN0_BASE + i), *data++); 386 + t4_write_reg(adap, (MEMWIN0_BASE + i), 387 + (__force u32) *data++); 386 388 } 387 389 388 390 return 0; ··· 419 417 if ((addr & 0x3) || (len & 0x3)) 420 418 return -EINVAL; 421 419 422 - data = vmalloc(MEMWIN0_APERTURE/sizeof(__be32)); 420 + data = vmalloc(MEMWIN0_APERTURE); 423 421 if (!data) 424 422 return -ENOMEM; 425 423 ··· 746 744 if (ret) 747 745 return ret; 748 746 if (byte_oriented) 749 - *data = htonl(*data); 747 + *data = (__force __u32) (htonl(*data)); 750 748 } 751 749 return 0; 752 750 } ··· 994 992 int ret, addr; 995 993 unsigned int i; 996 994 u8 first_page[SF_PAGE_SIZE]; 997 - const u32 *p = (const u32 *)fw_data; 995 + const __be32 *p = (const __be32 *)fw_data; 998 996 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 999 997 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1000 998 unsigned int fw_img_start = adap->params.sf_fw_start; ··· 2317 2315 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 2318 2316 2319 2317 for (i = 0; i < len; i += 4) 2320 - *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); 2318 + *data++ = (__force __be32) t4_read_reg(adap, 2319 + (MEMWIN0_BASE + off + i)); 2321 2320 2322 2321 return 0; 2323 2322 }
+9 -3
drivers/net/ethernet/dec/tulip/dmfe.c
··· 446 446 /* Allocate Tx/Rx descriptor memory */ 447 447 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * 448 448 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 449 - if (!db->desc_pool_ptr) 449 + if (!db->desc_pool_ptr) { 450 + err = -ENOMEM; 450 451 goto err_out_res; 452 + } 451 453 452 454 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * 453 455 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 454 - if (!db->buf_pool_ptr) 456 + if (!db->buf_pool_ptr) { 457 + err = -ENOMEM; 455 458 goto err_out_free_desc; 459 + } 456 460 457 461 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 458 462 db->first_tx_desc_dma = db->desc_pool_dma_ptr; ··· 466 462 db->chip_id = ent->driver_data; 467 463 /* IO type range. */ 468 464 db->ioaddr = pci_iomap(pdev, 0, 0); 469 - if (!db->ioaddr) 465 + if (!db->ioaddr) { 466 + err = -ENOMEM; 470 467 goto err_out_free_buf; 468 + } 471 469 472 470 db->chip_revision = pdev->revision; 473 471 db->wol_mode = 0;
+5 -2
drivers/net/ethernet/emulex/benet/be_main.c
··· 2129 2129 ue_hi = (ue_hi & ~ue_hi_mask); 2130 2130 } 2131 2131 2132 - if (ue_lo || ue_hi || 2133 - sliport_status & SLIPORT_STATUS_ERR_MASK) { 2132 + /* On certain platforms BE hardware can indicate spurious UEs. 2133 + * Allow the h/w to stop working completely in case of a real UE. 2134 + * Hence not setting the hw_error for UE detection. 2135 + */ 2136 + if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2134 2137 adapter->hw_error = true; 2135 2138 dev_err(&adapter->pdev->dev, 2136 2139 "Error detected in the card\n");
+4 -23
drivers/net/ethernet/freescale/gianfar.c
··· 1765 1765 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1766 1766 priv->tx_queue[0]->tx_bd_base, 1767 1767 priv->tx_queue[0]->tx_bd_dma_base); 1768 - skb_queue_purge(&priv->rx_recycle); 1769 1768 } 1770 1769 1771 1770 void gfar_start(struct net_device *dev) ··· 1941 1942 int err; 1942 1943 1943 1944 enable_napi(priv); 1944 - 1945 - skb_queue_head_init(&priv->rx_recycle); 1946 1945 1947 1946 /* Initialize a bunch of registers */ 1948 1947 init_registers(dev); ··· 2530 2533 2531 2534 bytes_sent += skb->len; 2532 2535 2533 - /* If there's room in the queue (limit it to rx_buffer_size) 2534 - * we add this skb back into the pool, if it's the right size 2535 - */ 2536 - if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2537 - skb_recycle_check(skb, priv->rx_buffer_size + 2538 - RXBUF_ALIGNMENT)) { 2539 - gfar_align_skb(skb); 2540 - skb_queue_head(&priv->rx_recycle, skb); 2541 - } else 2542 - dev_kfree_skb_any(skb); 2536 + dev_kfree_skb_any(skb); 2543 2537 2544 2538 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2545 2539 ··· 2596 2608 static struct sk_buff *gfar_alloc_skb(struct net_device *dev) 2597 2609 { 2598 2610 struct gfar_private *priv = netdev_priv(dev); 2599 - struct sk_buff *skb = NULL; 2611 + struct sk_buff *skb; 2600 2612 2601 2613 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 2602 2614 if (!skb) ··· 2609 2621 2610 2622 struct sk_buff *gfar_new_skb(struct net_device *dev) 2611 2623 { 2612 - struct gfar_private *priv = netdev_priv(dev); 2613 - struct sk_buff *skb = NULL; 2614 - 2615 - skb = skb_dequeue(&priv->rx_recycle); 2616 - if (!skb) 2617 - skb = gfar_alloc_skb(dev); 2618 - 2619 - return skb; 2624 + return gfar_alloc_skb(dev); 2620 2625 } 2621 2626 2622 2627 static inline void count_errors(unsigned short status, struct net_device *dev) ··· 2768 2787 if (unlikely(!newskb)) 2769 2788 newskb = skb; 2770 2789 else if (skb) 2771 - skb_queue_head(&priv->rx_recycle, skb); 2790 + dev_kfree_skb(skb); 2772 2791 } else { 2773 2792 /* Increment the number of packets */ 2774 2793 rx_queue->stats.rx_packets++;
-2
drivers/net/ethernet/freescale/gianfar.h
··· 1080 1080 1081 1081 u32 cur_filer_idx; 1082 1082 1083 - struct sk_buff_head rx_recycle; 1084 - 1085 1083 /* RX queue filer rule set*/ 1086 1084 struct ethtool_rx_list rx_list; 1087 1085 struct mutex rx_queue_access;
+6 -23
drivers/net/ethernet/freescale/ucc_geth.c
··· 209 209 static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, 210 210 u8 __iomem *bd) 211 211 { 212 - struct sk_buff *skb = NULL; 212 + struct sk_buff *skb; 213 213 214 - skb = __skb_dequeue(&ugeth->rx_recycle); 214 + skb = netdev_alloc_skb(ugeth->ndev, 215 + ugeth->ug_info->uf_info.max_rx_buf_length + 216 + UCC_GETH_RX_DATA_BUF_ALIGNMENT); 215 217 if (!skb) 216 - skb = netdev_alloc_skb(ugeth->ndev, 217 - ugeth->ug_info->uf_info.max_rx_buf_length + 218 - UCC_GETH_RX_DATA_BUF_ALIGNMENT); 219 - if (skb == NULL) 220 218 return NULL; 221 219 222 220 /* We need the data buffer to be aligned properly. We will reserve ··· 2018 2020 iounmap(ugeth->ug_regs); 2019 2021 ugeth->ug_regs = NULL; 2020 2022 } 2021 - 2022 - skb_queue_purge(&ugeth->rx_recycle); 2023 2023 } 2024 2024 2025 2025 static void ucc_geth_set_multi(struct net_device *dev) ··· 2225 2229 ugeth_err("%s: Failed to ioremap regs.", __func__); 2226 2230 return -ENOMEM; 2227 2231 } 2228 - 2229 - skb_queue_head_init(&ugeth->rx_recycle); 2230 2232 2231 2233 return 0; 2232 2234 } ··· 3268 3274 if (netif_msg_rx_err(ugeth)) 3269 3275 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3270 3276 __func__, __LINE__, (u32) skb); 3271 - if (skb) { 3272 - skb->data = skb->head + NET_SKB_PAD; 3273 - skb->len = 0; 3274 - skb_reset_tail_pointer(skb); 3275 - __skb_queue_head(&ugeth->rx_recycle, skb); 3276 - } 3277 + dev_kfree_skb(skb); 3277 3278 3278 3279 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3279 3280 dev->stats.rx_dropped++; ··· 3338 3349 3339 3350 dev->stats.tx_packets++; 3340 3351 3341 - if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3342 - skb_recycle_check(skb, 3343 - ugeth->ug_info->uf_info.max_rx_buf_length + 3344 - UCC_GETH_RX_DATA_BUF_ALIGNMENT)) 3345 - __skb_queue_head(&ugeth->rx_recycle, skb); 3346 - else 3347 - dev_kfree_skb(skb); 3352 + dev_kfree_skb(skb); 3348 3353 3349 3354 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3350 3355 ugeth->skb_dirtytx[txQ] =
-2
drivers/net/ethernet/freescale/ucc_geth.h
··· 1214 1214 /* index of the first skb which hasn't been transmitted yet. */ 1215 1215 u16 skb_dirtytx[NUM_TX_QUEUES]; 1216 1216 1217 - struct sk_buff_head rx_recycle; 1218 - 1219 1217 struct ugeth_mii_info *mii_info; 1220 1218 struct phy_device *phydev; 1221 1219 phy_interface_t phy_interface;
+2
drivers/net/ethernet/intel/e1000e/hw.h
··· 412 412 #define E1000_DEV_ID_PCH2_LV_V 0x1503 413 413 #define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A 414 414 #define E1000_DEV_ID_PCH_LPT_I217_V 0x153B 415 + #define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A 416 + #define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 415 417 416 418 #define E1000_REVISION_4 4 417 419
+2
drivers/net/ethernet/intel/e1000e/netdev.c
··· 6558 6558 6559 6559 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, 6560 6560 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, 6561 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, 6562 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, 6561 6563 6562 6564 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 6563 6565 };
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 410 410 #define IXGBE_TX_CTXTDESC(R, i) \ 411 411 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) 412 412 413 - #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 413 + #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ 414 414 #ifdef IXGBE_FCOE 415 415 /* Use 3K as the baby jumbo frame size for FCoE */ 416 416 #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
+1 -1
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
··· 175 175 #define IXGBEVF_TX_CTXTDESC(R, i) \ 176 176 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) 177 177 178 - #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 178 + #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ 179 179 180 180 #define OTHER_VECTOR 1 181 181 #define NON_Q_VECTORS (OTHER_VECTOR)
+7
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 1747 1747 **/ 1748 1748 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1749 1749 { 1750 + struct net_device *netdev = adapter->netdev; 1750 1751 int err = 0; 1751 1752 int vector, v_budget; 1752 1753 ··· 1775 1774 adapter->msix_entries[vector].entry = vector; 1776 1775 1777 1776 ixgbevf_acquire_msix_vectors(adapter, v_budget); 1777 + 1778 + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 1779 + if (err) 1780 + goto out; 1781 + 1782 + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 1778 1783 1779 1784 out: 1780 1785 return err;
+2 -16
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 412 412 u8 work_rx_refill; 413 413 414 414 int skb_size; 415 - struct sk_buff_head rx_recycle; 416 415 417 416 /* 418 417 * RX state. ··· 672 673 struct rx_desc *rx_desc; 673 674 int size; 674 675 675 - skb = __skb_dequeue(&mp->rx_recycle); 676 - if (skb == NULL) 677 - skb = netdev_alloc_skb(mp->dev, mp->skb_size); 676 + skb = netdev_alloc_skb(mp->dev, mp->skb_size); 678 677 679 678 if (skb == NULL) { 680 679 mp->oom = 1; ··· 986 989 desc->byte_cnt, DMA_TO_DEVICE); 987 990 } 988 991 989 - if (skb != NULL) { 990 - if (skb_queue_len(&mp->rx_recycle) < 991 - mp->rx_ring_size && 992 - skb_recycle_check(skb, mp->skb_size)) 993 - __skb_queue_head(&mp->rx_recycle, skb); 994 - else 995 - dev_kfree_skb(skb); 996 - } 992 + dev_kfree_skb(skb); 997 993 } 998 994 999 995 __netif_tx_unlock(nq); ··· 2339 2349 2340 2350 napi_enable(&mp->napi); 2341 2351 2342 - skb_queue_head_init(&mp->rx_recycle); 2343 - 2344 2352 mp->int_mask = INT_EXT; 2345 2353 2346 2354 for (i = 0; i < mp->rxq_count; i++) { ··· 2432 2444 mv643xx_eth_get_stats(dev); 2433 2445 mib_counters_update(mp); 2434 2446 del_timer_sync(&mp->mib_counters_timer); 2435 - 2436 - skb_queue_purge(&mp->rx_recycle); 2437 2447 2438 2448 for (i = 0; i < mp->rxq_count; i++) 2439 2449 rxq_deinit(mp->rxq + i);
+11 -2
drivers/net/ethernet/marvell/skge.c
··· 3189 3189 if (work_done < to_do) { 3190 3190 unsigned long flags; 3191 3191 3192 - napi_gro_flush(napi); 3192 + napi_gro_flush(napi, false); 3193 3193 spin_lock_irqsave(&hw->hw_lock, flags); 3194 3194 __napi_complete(napi); 3195 3195 hw->intr_mask |= napimask[skge->port]; ··· 3945 3945 skge_board_name(hw), hw->chip_rev); 3946 3946 3947 3947 dev = skge_devinit(hw, 0, using_dac); 3948 - if (!dev) 3948 + if (!dev) { 3949 + err = -ENOMEM; 3949 3950 goto err_out_led_off; 3951 + } 3950 3952 3951 3953 /* Some motherboards are broken and has zero in ROM. */ 3952 3954 if (!is_valid_ether_addr(dev->dev_addr)) ··· 4153 4151 .matches = { 4154 4152 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), 4155 4153 DMI_MATCH(DMI_BOARD_NAME, "nForce"), 4154 + }, 4155 + }, 4156 + { 4157 + .ident = "ASUS P5NSLI", 4158 + .matches = { 4159 + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 4160 + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") 4156 4161 }, 4157 4162 }, 4158 4163 {}
+4 -1
drivers/net/ethernet/marvell/sky2.c
··· 4924 4924 4925 4925 if (~reg == 0) { 4926 4926 dev_err(&pdev->dev, "PCI configuration read error\n"); 4927 + err = -EIO; 4927 4928 goto err_out; 4928 4929 } 4929 4930 ··· 4994 4993 hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); 4995 4994 hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), 4996 4995 &hw->st_dma); 4997 - if (!hw->st_le) 4996 + if (!hw->st_le) { 4997 + err = -ENOMEM; 4998 4998 goto err_out_reset; 4999 + } 4999 5000 5000 5001 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", 5001 5002 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
+2 -2
drivers/net/ethernet/natsemi/natsemi.c
··· 947 947 i = register_netdev(dev); 948 948 if (i) 949 949 goto err_register_netdev; 950 - 951 - if (NATSEMI_CREATE_FILE(pdev, dspcfg_workaround)) 950 + i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround); 951 + if (i) 952 952 goto err_create_file; 953 953 954 954 if (netif_msg_drv(np)) {
+1
drivers/net/ethernet/natsemi/xtsonic.c
··· 205 205 if (lp->descriptors == NULL) { 206 206 printk(KERN_ERR "%s: couldn't alloc DMA memory for " 207 207 " descriptors.\n", dev_name(lp->device)); 208 + err = -ENOMEM; 208 209 goto out; 209 210 } 210 211
+3
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
··· 26 26 config PCH_PTP 27 27 bool "PCH PTP clock support" 28 28 default n 29 + depends on EXPERIMENTAL 30 + select PPS 31 + select PTP_1588_CLOCK 29 32 select PTP_1588_CLOCK_PCH 30 33 ---help--- 31 34 Say Y here if you want to use Precision Time Protocol (PTP) in the
+2 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 1601 1601 adapter->netdev = netdev; 1602 1602 adapter->pdev = pdev; 1603 1603 1604 - if (qlcnic_alloc_adapter_resources(adapter)) 1604 + err = qlcnic_alloc_adapter_resources(adapter); 1605 + if (err) 1605 1606 goto err_out_free_netdev; 1606 1607 1607 1608 adapter->dev_rst_time = jiffies;
+1 -1
drivers/net/ethernet/realtek/8139cp.c
··· 563 563 if (cpr16(IntrStatus) & cp_rx_intr_mask) 564 564 goto rx_status_loop; 565 565 566 - napi_gro_flush(napi); 566 + napi_gro_flush(napi, false); 567 567 spin_lock_irqsave(&cp->lock, flags); 568 568 __napi_complete(napi); 569 569 cpw16_f(IntrMask, cp_intr_mask);
+1
drivers/net/ethernet/renesas/sh_eth.c
··· 2438 2438 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2439 2439 if (!rtsu) { 2440 2440 dev_err(&pdev->dev, "Not found TSU resource\n"); 2441 + ret = -ENODEV; 2441 2442 goto out_release; 2442 2443 } 2443 2444 mdp->tsu_addr = ioremap(rtsu->start,
+3 -6
drivers/net/ethernet/sfc/ptp.c
··· 640 640 evt = list_entry(cursor, struct efx_ptp_event_rx, 641 641 link); 642 642 if (time_after(jiffies, evt->expiry)) { 643 - list_del(&evt->link); 644 - list_add(&evt->link, &ptp->evt_free_list); 643 + list_move(&evt->link, &ptp->evt_free_list); 645 644 netif_warn(efx, hw, efx->net_dev, 646 645 "PTP rx event dropped\n"); 647 646 } ··· 683 684 684 685 match->state = PTP_PACKET_STATE_MATCHED; 685 686 rc = PTP_PACKET_STATE_MATCHED; 686 - list_del(&evt->link); 687 - list_add(&evt->link, &ptp->evt_free_list); 687 + list_move(&evt->link, &ptp->evt_free_list); 688 688 break; 689 689 } 690 690 } ··· 818 820 /* Drop any pending receive events */ 819 821 spin_lock_bh(&efx->ptp_data->evt_lock); 820 822 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { 821 - list_del(cursor); 822 - list_add(cursor, &efx->ptp_data->evt_free_list); 823 + list_move(cursor, &efx->ptp_data->evt_free_list); 823 824 } 824 825 spin_unlock_bh(&efx->ptp_data->evt_lock); 825 826
+3 -1
drivers/net/ethernet/sis/sis900.c
··· 478 478 479 479 /* IO region. */ 480 480 ioaddr = pci_iomap(pci_dev, 0, 0); 481 - if (!ioaddr) 481 + if (!ioaddr) { 482 + ret = -ENOMEM; 482 483 goto err_out_cleardev; 484 + } 483 485 484 486 sis_priv = netdev_priv(net_dev); 485 487 sis_priv->ioaddr = ioaddr;
-1
drivers/net/ethernet/stmicro/stmmac/stmmac.h
··· 50 50 unsigned int dirty_rx; 51 51 struct sk_buff **rx_skbuff; 52 52 dma_addr_t *rx_skbuff_dma; 53 - struct sk_buff_head rx_recycle; 54 53 55 54 struct net_device *dev; 56 55 dma_addr_t dma_rx_phy;
+2 -18
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 747 747 priv->hw->ring->clean_desc3(p); 748 748 749 749 if (likely(skb != NULL)) { 750 - /* 751 - * If there's room in the queue (limit it to size) 752 - * we add this skb back into the pool, 753 - * if it's the right size. 754 - */ 755 - if ((skb_queue_len(&priv->rx_recycle) < 756 - priv->dma_rx_size) && 757 - skb_recycle_check(skb, priv->dma_buf_sz)) 758 - __skb_queue_head(&priv->rx_recycle, skb); 759 - else 760 - dev_kfree_skb(skb); 761 - 750 + dev_kfree_skb(skb); 762 751 priv->tx_skbuff[entry] = NULL; 763 752 } 764 753 ··· 1158 1169 priv->eee_enabled = stmmac_eee_init(priv); 1159 1170 1160 1171 napi_enable(&priv->napi); 1161 - skb_queue_head_init(&priv->rx_recycle); 1162 1172 netif_start_queue(dev); 1163 1173 1164 1174 return 0; ··· 1210 1222 kfree(priv->tm); 1211 1223 #endif 1212 1224 napi_disable(&priv->napi); 1213 - skb_queue_purge(&priv->rx_recycle); 1214 1225 1215 1226 /* Free the IRQ lines */ 1216 1227 free_irq(dev->irq, dev); ··· 1375 1388 if (likely(priv->rx_skbuff[entry] == NULL)) { 1376 1389 struct sk_buff *skb; 1377 1390 1378 - skb = __skb_dequeue(&priv->rx_recycle); 1379 - if (skb == NULL) 1380 - skb = netdev_alloc_skb_ip_align(priv->dev, 1381 - bfsize); 1391 + skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); 1382 1392 1383 1393 if (unlikely(skb == NULL)) 1384 1394 break;
+1
drivers/net/ethernet/sun/niu.c
··· 9788 9788 9789 9789 if (!pci_is_pcie(pdev)) { 9790 9790 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); 9791 + err = -ENODEV; 9791 9792 goto err_out_free_res; 9792 9793 } 9793 9794
+2 -1
drivers/net/ethernet/sun/sungem.c
··· 2963 2963 goto err_out_iounmap; 2964 2964 } 2965 2965 2966 - if (gem_get_device_address(gp)) 2966 + err = gem_get_device_address(gp); 2967 + if (err) 2967 2968 goto err_out_free_consistent; 2968 2969 2969 2970 dev->netdev_ops = &gem_netdev_ops;
+3 -1
drivers/net/irda/irtty-sir.c
··· 459 459 460 460 /* allocate private device info block */ 461 461 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 462 - if (!priv) 462 + if (!priv) { 463 + ret = -ENOMEM; 463 464 goto out_put; 465 + } 464 466 465 467 priv->magic = IRTTY_MAGIC; 466 468 priv->tty = tty;
+3 -1
drivers/net/irda/mcs7780.c
··· 920 920 921 921 ndev->netdev_ops = &mcs_netdev_ops; 922 922 923 - if (!intf->cur_altsetting) 923 + if (!intf->cur_altsetting) { 924 + ret = -ENOMEM; 924 925 goto error2; 926 + } 925 927 926 928 ret = mcs_find_endpoints(mcs, intf->cur_altsetting->endpoint, 927 929 intf->cur_altsetting->desc.bNumEndpoints);
+3 -1
drivers/net/irda/pxaficp_ir.c
··· 846 846 goto err_mem_2; 847 847 848 848 dev = alloc_irdadev(sizeof(struct pxa_irda)); 849 - if (!dev) 849 + if (!dev) { 850 + err = -ENOMEM; 850 851 goto err_mem_3; 852 + } 851 853 852 854 SET_NETDEV_DEV(dev, &pdev->dev); 853 855 si = netdev_priv(dev);
+3 -1
drivers/net/irda/sa1100_ir.c
··· 940 940 goto err_mem_3; 941 941 942 942 dev = alloc_irdadev(sizeof(struct sa1100_irda)); 943 - if (!dev) 943 + if (!dev) { 944 + err = -ENOMEM; 944 945 goto err_mem_4; 946 + } 945 947 946 948 SET_NETDEV_DEV(dev, &pdev->dev); 947 949
+2 -2
drivers/net/irda/sh_irda.c
··· 808 808 goto err_mem_4; 809 809 810 810 platform_set_drvdata(pdev, ndev); 811 - 812 - if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) { 811 + err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self); 812 + if (err) { 813 813 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n"); 814 814 goto err_mem_4; 815 815 }
+3 -2
drivers/net/irda/sh_sir.c
··· 741 741 self->clk = clk_get(&pdev->dev, clk_name); 742 742 if (IS_ERR(self->clk)) { 743 743 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); 744 + err = -ENODEV; 744 745 goto err_mem_3; 745 746 } 746 747 ··· 761 760 goto err_mem_4; 762 761 763 762 platform_set_drvdata(pdev, ndev); 764 - 765 - if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) { 763 + err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self); 764 + if (err) { 766 765 dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n"); 767 766 goto err_mem_4; 768 767 }
+1
drivers/net/phy/mdio_bus.c
··· 26 26 #include <linux/delay.h> 27 27 #include <linux/device.h> 28 28 #include <linux/of_device.h> 29 + #include <linux/of_mdio.h> 29 30 #include <linux/netdevice.h> 30 31 #include <linux/etherdevice.h> 31 32 #include <linux/skbuff.h>
+2 -3
drivers/net/vxlan.c
··· 28 28 #include <linux/igmp.h> 29 29 #include <linux/etherdevice.h> 30 30 #include <linux/if_ether.h> 31 - #include <linux/version.h> 32 31 #include <linux/hash.h> 33 32 #include <net/ip.h> 34 33 #include <net/icmp.h> ··· 1083 1084 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni)) 1084 1085 goto nla_put_failure; 1085 1086 1086 - if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr)) 1087 + if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr)) 1087 1088 goto nla_put_failure; 1088 1089 1089 1090 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link)) 1090 1091 goto nla_put_failure; 1091 1092 1092 - if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr)) 1093 + if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr)) 1093 1094 goto nla_put_failure; 1094 1095 1095 1096 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
+1 -1
drivers/net/wan/farsync.c
··· 597 597 * bottom half for the card. Note the limitation of 64 cards. 598 598 * That ought to be enough 599 599 */ 600 - mask = 1 << card_index; 600 + mask = (u64)1 << card_index; 601 601 *queue |= mask; 602 602 spin_unlock_irqrestore(&fst_work_q_lock, flags); 603 603 }
-2
include/linux/caif/Kbuild
··· 1 - header-y += caif_socket.h 2 - header-y += if_caif.h
include/linux/caif/caif_socket.h include/uapi/linux/caif/caif_socket.h
include/linux/caif/if_caif.h include/uapi/linux/caif/if_caif.h
+4 -4
include/linux/if_vlan.h
··· 80 80 } 81 81 82 82 #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) 83 + #define vlan_tx_nonzero_tag_present(__skb) \ 84 + (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK)) 83 85 #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) 84 86 85 87 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) ··· 91 89 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); 92 90 extern u16 vlan_dev_vlan_id(const struct net_device *dev); 93 91 94 - extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler); 92 + extern bool vlan_do_receive(struct sk_buff **skb); 95 93 extern struct sk_buff *vlan_untag(struct sk_buff *skb); 96 94 97 95 extern int vlan_vid_add(struct net_device *dev, unsigned short vid); ··· 122 120 return 0; 123 121 } 124 122 125 - static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler) 123 + static inline bool vlan_do_receive(struct sk_buff **skb) 126 124 { 127 - if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler) 128 - (*skb)->pkt_type = PACKET_OTHERHOST; 129 125 return false; 130 126 } 131 127
-1
include/linux/isdn/Kbuild
··· 1 - header-y += capicmd.h
include/linux/isdn/capicmd.h include/uapi/linux/isdn/capicmd.h
+12 -7
include/linux/netdevice.h
··· 1497 1497 /* This indicates where we are processing relative to skb->data. */ 1498 1498 int data_offset; 1499 1499 1500 - /* This is non-zero if the packet may be of the same flow. */ 1501 - int same_flow; 1502 - 1503 1500 /* This is non-zero if the packet cannot be merged with the new skb. */ 1504 1501 int flush; 1505 1502 1506 1503 /* Number of segments aggregated. */ 1507 - int count; 1504 + u16 count; 1505 + 1506 + /* This is non-zero if the packet may be of the same flow. */ 1507 + u8 same_flow; 1508 1508 1509 1509 /* Free the skb? */ 1510 - int free; 1510 + u8 free; 1511 1511 #define NAPI_GRO_FREE 1 1512 1512 #define NAPI_GRO_FREE_STOLEN_HEAD 2 1513 + 1514 + /* jiffies when first packet was created/queued */ 1515 + unsigned long age; 1516 + 1517 + /* Used in ipv6_gro_receive() */ 1518 + int proto; 1513 1519 }; 1514 1520 1515 1521 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) ··· 1669 1663 #endif 1670 1664 extern int skb_gro_receive(struct sk_buff **head, 1671 1665 struct sk_buff *skb); 1672 - extern void skb_gro_reset_offset(struct sk_buff *skb); 1673 1666 1674 1667 static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 1675 1668 { ··· 2162 2157 extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); 2163 2158 extern gro_result_t napi_gro_receive(struct napi_struct *napi, 2164 2159 struct sk_buff *skb); 2165 - extern void napi_gro_flush(struct napi_struct *napi); 2160 + extern void napi_gro_flush(struct napi_struct *napi, bool flush_old); 2166 2161 extern struct sk_buff * napi_get_frags(struct napi_struct *napi); 2167 2162 extern gro_result_t napi_frags_finish(struct napi_struct *napi, 2168 2163 struct sk_buff *skb,
-77
include/linux/netfilter/Kbuild
··· 1 1 header-y += ipset/ 2 - 3 - header-y += nf_conntrack_common.h 4 - header-y += nf_conntrack_ftp.h 5 - header-y += nf_conntrack_sctp.h 6 - header-y += nf_conntrack_tcp.h 7 - header-y += nf_conntrack_tuple_common.h 8 - header-y += nf_nat.h 9 - header-y += nfnetlink.h 10 - header-y += nfnetlink_acct.h 11 - header-y += nfnetlink_compat.h 12 - header-y += nfnetlink_conntrack.h 13 - header-y += nfnetlink_cthelper.h 14 - header-y += nfnetlink_cttimeout.h 15 - header-y += nfnetlink_log.h 16 - header-y += nfnetlink_queue.h 17 - header-y += x_tables.h 18 - header-y += xt_AUDIT.h 19 - header-y += xt_CHECKSUM.h 20 - header-y += xt_CLASSIFY.h 21 - header-y += xt_CONNMARK.h 22 - header-y += xt_CONNSECMARK.h 23 - header-y += xt_CT.h 24 - header-y += xt_DSCP.h 25 - header-y += xt_IDLETIMER.h 26 - header-y += xt_LED.h 27 - header-y += xt_LOG.h 28 - header-y += xt_MARK.h 29 - header-y += xt_nfacct.h 30 - header-y += xt_NFLOG.h 31 - header-y += xt_NFQUEUE.h 32 - header-y += xt_RATEEST.h 33 - header-y += xt_SECMARK.h 34 - header-y += xt_TCPMSS.h 35 - header-y += xt_TCPOPTSTRIP.h 36 - header-y += xt_TEE.h 37 - header-y += xt_TPROXY.h 38 - header-y += xt_addrtype.h 39 - header-y += xt_cluster.h 40 - header-y += xt_comment.h 41 - header-y += xt_connbytes.h 42 - header-y += xt_connlimit.h 43 - header-y += xt_connmark.h 44 - header-y += xt_conntrack.h 45 - header-y += xt_cpu.h 46 - header-y += xt_dccp.h 47 - header-y += xt_devgroup.h 48 - header-y += xt_dscp.h 49 - header-y += xt_ecn.h 50 - header-y += xt_esp.h 51 - header-y += xt_hashlimit.h 52 - header-y += xt_helper.h 53 - header-y += xt_iprange.h 54 - header-y += xt_ipvs.h 55 - header-y += xt_length.h 56 - header-y += xt_limit.h 57 - header-y += xt_mac.h 58 - header-y += xt_mark.h 59 - header-y += xt_multiport.h 60 - header-y += xt_osf.h 61 - header-y += xt_owner.h 62 - header-y += xt_physdev.h 63 - header-y += xt_pkttype.h 64 - header-y += xt_policy.h 65 - header-y += xt_quota.h 66 - header-y += xt_rateest.h 67 - header-y += xt_realm.h 68 - header-y += xt_recent.h 69 - header-y += xt_set.h 70 - header-y += xt_sctp.h 71 - header-y += xt_socket.h 72 - header-y += xt_state.h 73 - header-y += xt_statistic.h 74 - header-y += xt_string.h 75 - header-y += xt_tcpmss.h 76 - header-y += xt_tcpudp.h 77 - header-y += xt_time.h 78 - header-y += xt_u32.h
-4
include/linux/netfilter/ipset/Kbuild
··· 1 - header-y += ip_set.h 2 - header-y += ip_set_bitmap.h 3 - header-y += ip_set_hash.h 4 - header-y += ip_set_list.h
+3 -222
include/linux/netfilter/ipset/ip_set.h
··· 1 - #ifndef _IP_SET_H 2 - #define _IP_SET_H 3 - 4 1 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> 5 2 * Patrick Schaaf <bof@bof.de> 6 3 * Martin Josefsson <gandalf@wlug.westbo.se> ··· 7 10 * it under the terms of the GNU General Public License version 2 as 8 11 * published by the Free Software Foundation. 9 12 */ 13 + #ifndef _IP_SET_H 14 + #define _IP_SET_H 10 15 11 - #include <linux/types.h> 12 - 13 - /* The protocol version */ 14 - #define IPSET_PROTOCOL 6 15 - 16 - /* The max length of strings including NUL: set and type identifiers */ 17 - #define IPSET_MAXNAMELEN 32 18 - 19 - /* Message types and commands */ 20 - enum ipset_cmd { 21 - IPSET_CMD_NONE, 22 - IPSET_CMD_PROTOCOL, /* 1: Return protocol version */ 23 - IPSET_CMD_CREATE, /* 2: Create a new (empty) set */ 24 - IPSET_CMD_DESTROY, /* 3: Destroy a (empty) set */ 25 - IPSET_CMD_FLUSH, /* 4: Remove all elements from a set */ 26 - IPSET_CMD_RENAME, /* 5: Rename a set */ 27 - IPSET_CMD_SWAP, /* 6: Swap two sets */ 28 - IPSET_CMD_LIST, /* 7: List sets */ 29 - IPSET_CMD_SAVE, /* 8: Save sets */ 30 - IPSET_CMD_ADD, /* 9: Add an element to a set */ 31 - IPSET_CMD_DEL, /* 10: Delete an element from a set */ 32 - IPSET_CMD_TEST, /* 11: Test an element in a set */ 33 - IPSET_CMD_HEADER, /* 12: Get set header data only */ 34 - IPSET_CMD_TYPE, /* 13: Get set type */ 35 - IPSET_MSG_MAX, /* Netlink message commands */ 36 - 37 - /* Commands in userspace: */ 38 - IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */ 39 - IPSET_CMD_HELP, /* 15: Get help */ 40 - IPSET_CMD_VERSION, /* 16: Get program version */ 41 - IPSET_CMD_QUIT, /* 17: Quit from interactive mode */ 42 - 43 - IPSET_CMD_MAX, 44 - 45 - IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */ 46 - }; 47 - 48 - /* Attributes at command level */ 49 - enum { 50 - IPSET_ATTR_UNSPEC, 51 - IPSET_ATTR_PROTOCOL, /* 1: Protocol version */ 52 - IPSET_ATTR_SETNAME, /* 2: Name of the set */ 53 - IPSET_ATTR_TYPENAME, /* 3: Typename */ 54 - IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */ 55 - IPSET_ATTR_REVISION, /* 4: Settype revision */ 56 - IPSET_ATTR_FAMILY, /* 5: Settype family */ 57 - IPSET_ATTR_FLAGS, /* 6: Flags at command level */ 58 - IPSET_ATTR_DATA, /* 7: Nested attributes */ 59 - IPSET_ATTR_ADT, /* 8: Multiple data containers */ 60 - IPSET_ATTR_LINENO, /* 9: Restore lineno */ 61 - IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */ 62 - IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */ 63 - __IPSET_ATTR_CMD_MAX, 64 - }; 65 - #define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1) 66 - 67 - /* CADT specific attributes */ 68 - enum { 69 - IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1, 70 - IPSET_ATTR_IP_FROM = IPSET_ATTR_IP, 71 - IPSET_ATTR_IP_TO, /* 2 */ 72 - IPSET_ATTR_CIDR, /* 3 */ 73 - IPSET_ATTR_PORT, /* 4 */ 74 - IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT, 75 - IPSET_ATTR_PORT_TO, /* 5 */ 76 - IPSET_ATTR_TIMEOUT, /* 6 */ 77 - IPSET_ATTR_PROTO, /* 7 */ 78 - IPSET_ATTR_CADT_FLAGS, /* 8 */ 79 - IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO, /* 9 */ 80 - /* Reserve empty slots */ 81 - IPSET_ATTR_CADT_MAX = 16, 82 - /* Create-only specific attributes */ 83 - IPSET_ATTR_GC, 84 - IPSET_ATTR_HASHSIZE, 85 - IPSET_ATTR_MAXELEM, 86 - IPSET_ATTR_NETMASK, 87 - IPSET_ATTR_PROBES, 88 - IPSET_ATTR_RESIZE, 89 - IPSET_ATTR_SIZE, 90 - /* Kernel-only */ 91 - IPSET_ATTR_ELEMENTS, 92 - IPSET_ATTR_REFERENCES, 93 - IPSET_ATTR_MEMSIZE, 94 - 95 - __IPSET_ATTR_CREATE_MAX, 96 - }; 97 - #define IPSET_ATTR_CREATE_MAX (__IPSET_ATTR_CREATE_MAX - 1) 98 - 99 - /* ADT specific attributes */ 100 - enum { 101 - IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1, 102 - IPSET_ATTR_NAME, 103 - IPSET_ATTR_NAMEREF, 104 - IPSET_ATTR_IP2, 105 - IPSET_ATTR_CIDR2, 106 - IPSET_ATTR_IP2_TO, 107 - IPSET_ATTR_IFACE, 108 - __IPSET_ATTR_ADT_MAX, 109 - }; 110 - #define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) 111 - 112 - /* IP specific attributes */ 113 - enum { 114 - IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1, 115 - IPSET_ATTR_IPADDR_IPV6, 116 - __IPSET_ATTR_IPADDR_MAX, 117 - }; 118 - #define IPSET_ATTR_IPADDR_MAX (__IPSET_ATTR_IPADDR_MAX - 1) 119 - 120 - /* Error codes */ 121 - enum ipset_errno { 122 - IPSET_ERR_PRIVATE = 4096, 123 - IPSET_ERR_PROTOCOL, 124 - IPSET_ERR_FIND_TYPE, 125 - IPSET_ERR_MAX_SETS, 126 - IPSET_ERR_BUSY, 127 - IPSET_ERR_EXIST_SETNAME2, 128 - IPSET_ERR_TYPE_MISMATCH, 129 - IPSET_ERR_EXIST, 130 - IPSET_ERR_INVALID_CIDR, 131 - IPSET_ERR_INVALID_NETMASK, 132 - IPSET_ERR_INVALID_FAMILY, 133 - IPSET_ERR_TIMEOUT, 134 - IPSET_ERR_REFERENCED, 135 - IPSET_ERR_IPADDR_IPV4, 136 - IPSET_ERR_IPADDR_IPV6, 137 - 138 - /* Type specific error codes */ 139 - IPSET_ERR_TYPE_SPECIFIC = 4352, 140 - }; 141 - 142 - /* Flags at command level */ 143 - enum ipset_cmd_flags { 144 - IPSET_FLAG_BIT_EXIST = 0, 145 - IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST), 146 - IPSET_FLAG_BIT_LIST_SETNAME = 1, 147 - IPSET_FLAG_LIST_SETNAME = (1 << IPSET_FLAG_BIT_LIST_SETNAME), 148 - IPSET_FLAG_BIT_LIST_HEADER = 2, 149 - IPSET_FLAG_LIST_HEADER = (1 << IPSET_FLAG_BIT_LIST_HEADER), 150 - IPSET_FLAG_CMD_MAX = 15, /* Lower half */ 151 - }; 152 - 153 - /* Flags at CADT attribute level */ 154 - enum ipset_cadt_flags { 155 - IPSET_FLAG_BIT_BEFORE = 0, 156 - IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE), 157 - IPSET_FLAG_BIT_PHYSDEV = 1, 158 - IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV), 159 - IPSET_FLAG_BIT_NOMATCH = 2, 160 - IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH), 161 - IPSET_FLAG_CADT_MAX = 15, /* Upper half */ 162 - }; 163 - 164 - /* Commands with settype-specific attributes */ 165 - enum ipset_adt { 166 - IPSET_ADD, 167 - IPSET_DEL, 168 - IPSET_TEST, 169 - IPSET_ADT_MAX, 170 - IPSET_CREATE = IPSET_ADT_MAX, 171 - IPSET_CADT_MAX, 172 - }; 173 - 174 - /* Sets are identified by an index in kernel space. Tweak with ip_set_id_t 175 - * and IPSET_INVALID_ID if you want to increase the max number of sets. 176 - */ 177 - typedef __u16 ip_set_id_t; 178 - 179 - #define IPSET_INVALID_ID 65535 180 - 181 - enum ip_set_dim { 182 - IPSET_DIM_ZERO = 0, 183 - IPSET_DIM_ONE, 184 - IPSET_DIM_TWO, 185 - IPSET_DIM_THREE, 186 - /* Max dimension in elements. 187 - * If changed, new revision of iptables match/target is required. 188 - */ 189 - IPSET_DIM_MAX = 6, 190 - IPSET_BIT_RETURN_NOMATCH = 7, 191 - }; 192 - 193 - /* Option flags for kernel operations */ 194 - enum ip_set_kopt { 195 - IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO), 196 - IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE), 197 - IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO), 198 - IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE), 199 - IPSET_RETURN_NOMATCH = (1 << IPSET_BIT_RETURN_NOMATCH), 200 - }; 201 - 202 - #ifdef __KERNEL__ 203 16 #include <linux/ip.h> 204 17 #include <linux/ipv6.h> 205 18 #include <linux/netlink.h> ··· 18 211 #include <linux/stringify.h> 19 212 #include <linux/vmalloc.h> 20 213 #include <net/netlink.h> 214 + #include <uapi/linux/netfilter/ipset/ip_set.h> 21 215 22 216 #define _IP_SET_MODULE_DESC(a, b, c) \ 23 217 MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c) ··· 283 475 { 284 476 return 4 * ((((b - a + 8) / 8) + 3) / 4); 285 477 } 286 - 287 - #endif /* __KERNEL__ */ 288 - 289 - /* Interface to iptables/ip6tables */ 290 - 291 - #define SO_IP_SET 83 292 - 293 - union ip_set_name_index { 294 - char name[IPSET_MAXNAMELEN]; 295 - ip_set_id_t index; 296 - }; 297 - 298 - #define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */ 299 - struct ip_set_req_get_set { 300 - unsigned int op; 301 - unsigned int version; 302 - union ip_set_name_index set; 303 - }; 304 - 305 - #define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */ 306 - /* Uses ip_set_req_get_set */ 307 - 308 - #define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */ 309 - struct ip_set_req_version { 310 - unsigned int op; 311 - unsigned int version; 312 - }; 313 478 314 479 #endif /*_IP_SET_H */
+1 -10
include/linux/netfilter/ipset/ip_set_bitmap.h
··· 1 1 #ifndef __IP_SET_BITMAP_H 2 2 #define __IP_SET_BITMAP_H 3 3 4 - /* Bitmap type specific error codes */ 5 - enum { 6 - /* The element is out of the range of the set */ 7 - IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC, 8 - /* The range exceeds the size limit of the set type */ 9 - IPSET_ERR_BITMAP_RANGE_SIZE, 10 - }; 4 + #include <uapi/linux/netfilter/ipset/ip_set_bitmap.h> 11 5 12 - #ifdef __KERNEL__ 13 6 #define IPSET_BITMAP_MAX_RANGE 0x0000FFFF 14 7 15 8 /* Common functions */ ··· 18 25 19 26 return mask; 20 27 } 21 - 22 - #endif /* __KERNEL__ */ 23 28 24 29 #endif /* __IP_SET_BITMAP_H */
+1 -18
include/linux/netfilter/ipset/ip_set_hash.h
··· 1 1 #ifndef __IP_SET_HASH_H 2 2 #define __IP_SET_HASH_H 3 3 4 - /* Hash type specific error codes */ 5 - enum { 6 - /* Hash is full */ 7 - IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC, 8 - /* Null-valued element */ 9 - IPSET_ERR_HASH_ELEM, 10 - /* Invalid protocol */ 11 - IPSET_ERR_INVALID_PROTO, 12 - /* Protocol missing but must be specified */ 13 - IPSET_ERR_MISSING_PROTO, 14 - /* Range not supported */ 15 - IPSET_ERR_HASH_RANGE_UNSUPPORTED, 16 - /* Invalid range */ 17 - IPSET_ERR_HASH_RANGE, 18 - }; 4 + #include <uapi/linux/netfilter/ipset/ip_set_hash.h> 19 5 20 - #ifdef __KERNEL__ 21 6 22 7 #define IPSET_DEFAULT_HASHSIZE 1024 23 8 #define IPSET_MIMINAL_HASHSIZE 64 24 9 #define IPSET_DEFAULT_MAXELEM 65536 25 10 #define IPSET_DEFAULT_PROBES 4 26 11 #define IPSET_DEFAULT_RESIZE 100 27 - 28 - #endif /* __KERNEL__ */ 29 12 30 13 #endif /* __IP_SET_HASH_H */
+1 -18
include/linux/netfilter/ipset/ip_set_list.h
··· 1 1 #ifndef __IP_SET_LIST_H 2 2 #define __IP_SET_LIST_H 3 3 4 - /* List type specific error codes */ 5 - enum { 6 - /* Set name to be added/deleted/tested does not exist. */ 7 - IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC, 8 - /* list:set type is not permitted to add */ 9 - IPSET_ERR_LOOP, 10 - /* Missing reference set */ 11 - IPSET_ERR_BEFORE, 12 - /* Reference set does not exist */ 13 - IPSET_ERR_NAMEREF, 14 - /* Set is full */ 15 - IPSET_ERR_LIST_FULL, 16 - /* Reference set is not added to the set */ 17 - IPSET_ERR_REF_EXIST, 18 - }; 4 + #include <uapi/linux/netfilter/ipset/ip_set_list.h> 19 5 20 - #ifdef __KERNEL__ 21 6 22 7 #define IP_SET_LIST_DEFAULT_SIZE 8 23 8 #define IP_SET_LIST_MIN_SIZE 4 24 - 25 - #endif /* __KERNEL__ */ 26 9 27 10 #endif /* __IP_SET_LIST_H */
+1 -114
include/linux/netfilter/nf_conntrack_common.h
··· 1 1 #ifndef _NF_CONNTRACK_COMMON_H 2 2 #define _NF_CONNTRACK_COMMON_H 3 - /* Connection state tracking for netfilter. This is separated from, 4 - but required by, the NAT layer; it can also be used by an iptables 5 - extension. */ 6 - enum ip_conntrack_info { 7 - /* Part of an established connection (either direction). */ 8 - IP_CT_ESTABLISHED, 9 3 10 - /* Like NEW, but related to an existing connection, or ICMP error 11 - (in either direction). */ 12 - IP_CT_RELATED, 4 + #include <uapi/linux/netfilter/nf_conntrack_common.h> 13 5 14 - /* Started a new connection to track (only 15 - IP_CT_DIR_ORIGINAL); may be a retransmission. */ 16 - IP_CT_NEW, 17 - 18 - /* >= this indicates reply direction */ 19 - IP_CT_IS_REPLY, 20 - 21 - IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY, 22 - IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY, 23 - IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY, 24 - /* Number of distinct IP_CT types (no NEW in reply dirn). */ 25 - IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1 26 - }; 27 - 28 - /* Bitset representing status of connection. */ 29 - enum ip_conntrack_status { 30 - /* It's an expected connection: bit 0 set. This bit never changed */ 31 - IPS_EXPECTED_BIT = 0, 32 - IPS_EXPECTED = (1 << IPS_EXPECTED_BIT), 33 - 34 - /* We've seen packets both ways: bit 1 set. Can be set, not unset. */ 35 - IPS_SEEN_REPLY_BIT = 1, 36 - IPS_SEEN_REPLY = (1 << IPS_SEEN_REPLY_BIT), 37 - 38 - /* Conntrack should never be early-expired. */ 39 - IPS_ASSURED_BIT = 2, 40 - IPS_ASSURED = (1 << IPS_ASSURED_BIT), 41 - 42 - /* Connection is confirmed: originating packet has left box */ 43 - IPS_CONFIRMED_BIT = 3, 44 - IPS_CONFIRMED = (1 << IPS_CONFIRMED_BIT), 45 - 46 - /* Connection needs src nat in orig dir. This bit never changed. */ 47 - IPS_SRC_NAT_BIT = 4, 48 - IPS_SRC_NAT = (1 << IPS_SRC_NAT_BIT), 49 - 50 - /* Connection needs dst nat in orig dir. This bit never changed. */ 51 - IPS_DST_NAT_BIT = 5, 52 - IPS_DST_NAT = (1 << IPS_DST_NAT_BIT), 53 - 54 - /* Both together. */ 55 - IPS_NAT_MASK = (IPS_DST_NAT | IPS_SRC_NAT), 56 - 57 - /* Connection needs TCP sequence adjusted. */ 58 - IPS_SEQ_ADJUST_BIT = 6, 59 - IPS_SEQ_ADJUST = (1 << IPS_SEQ_ADJUST_BIT), 60 - 61 - /* NAT initialization bits. */ 62 - IPS_SRC_NAT_DONE_BIT = 7, 63 - IPS_SRC_NAT_DONE = (1 << IPS_SRC_NAT_DONE_BIT), 64 - 65 - IPS_DST_NAT_DONE_BIT = 8, 66 - IPS_DST_NAT_DONE = (1 << IPS_DST_NAT_DONE_BIT), 67 - 68 - /* Both together */ 69 - IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE), 70 - 71 - /* Connection is dying (removed from lists), can not be unset. */ 72 - IPS_DYING_BIT = 9, 73 - IPS_DYING = (1 << IPS_DYING_BIT), 74 - 75 - /* Connection has fixed timeout. */ 76 - IPS_FIXED_TIMEOUT_BIT = 10, 77 - IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT), 78 - 79 - /* Conntrack is a template */ 80 - IPS_TEMPLATE_BIT = 11, 81 - IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), 82 - 83 - /* Conntrack is a fake untracked entry */ 84 - IPS_UNTRACKED_BIT = 12, 85 - IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), 86 - 87 - /* Conntrack got a helper explicitly attached via CT target. */ 88 - IPS_HELPER_BIT = 13, 89 - IPS_HELPER = (1 << IPS_HELPER_BIT), 90 - }; 91 - 92 - /* Connection tracking event types */ 93 - enum ip_conntrack_events { 94 - IPCT_NEW, /* new conntrack */ 95 - IPCT_RELATED, /* related conntrack */ 96 - IPCT_DESTROY, /* destroyed conntrack */ 97 - IPCT_REPLY, /* connection has seen two-way traffic */ 98 - IPCT_ASSURED, /* connection status has changed to assured */ 99 - IPCT_PROTOINFO, /* protocol information has changed */ 100 - IPCT_HELPER, /* new helper has been set */ 101 - IPCT_MARK, /* new mark has been set */ 102 - IPCT_NATSEQADJ, /* NAT is doing sequence adjustment */ 103 - IPCT_SECMARK, /* new security mark has been set */ 104 - }; 105 - 106 - enum ip_conntrack_expect_events { 107 - IPEXP_NEW, /* new expectation */ 108 - IPEXP_DESTROY, /* destroyed expectation */ 109 - }; 110 - 111 - /* expectation flags */ 112 - #define NF_CT_EXPECT_PERMANENT 0x1 113 - #define NF_CT_EXPECT_INACTIVE 0x2 114 - #define NF_CT_EXPECT_USERSPACE 0x4 115 - 116 - #ifdef __KERNEL__ 117 6 struct ip_conntrack_stat { 118 7 unsigned int searched; 119 8 unsigned int found; ··· 24 135 25 136 /* call to create an explicit dependency on nf_conntrack. */ 26 137 extern void need_conntrack(void); 27 - 28 - #endif /* __KERNEL__ */ 29 138 30 139 #endif /* _NF_CONNTRACK_COMMON_H */
+1 -15
include/linux/netfilter/nf_conntrack_ftp.h
··· 1 1 #ifndef _NF_CONNTRACK_FTP_H 2 2 #define _NF_CONNTRACK_FTP_H 3 - /* FTP tracking. */ 4 3 5 - /* This enum is exposed to userspace */ 6 - enum nf_ct_ftp_type { 7 - /* PORT command from client */ 8 - NF_CT_FTP_PORT, 9 - /* PASV response from server */ 10 - NF_CT_FTP_PASV, 11 - /* EPRT command from client */ 12 - NF_CT_FTP_EPRT, 13 - /* EPSV response from server */ 14 - NF_CT_FTP_EPSV, 15 - }; 4 + #include <uapi/linux/netfilter/nf_conntrack_ftp.h> 16 5 17 - #ifdef __KERNEL__ 18 6 19 7 #define FTP_PORT 21 20 8 ··· 30 42 unsigned int matchoff, 31 43 unsigned int matchlen, 32 44 struct nf_conntrack_expect *exp); 33 - #endif /* __KERNEL__ */ 34 - 35 45 #endif /* _NF_CONNTRACK_FTP_H */
include/linux/netfilter/nf_conntrack_sctp.h include/uapi/linux/netfilter/nf_conntrack_sctp.h
+1 -48
include/linux/netfilter/nf_conntrack_tcp.h
··· 1 1 #ifndef _NF_CONNTRACK_TCP_H 2 2 #define _NF_CONNTRACK_TCP_H 3 - /* TCP tracking. */ 4 3 5 - #include <linux/types.h> 4 + #include <uapi/linux/netfilter/nf_conntrack_tcp.h> 6 5 7 - /* This is exposed to userspace (ctnetlink) */ 8 - enum tcp_conntrack { 9 - TCP_CONNTRACK_NONE, 10 - TCP_CONNTRACK_SYN_SENT, 11 - TCP_CONNTRACK_SYN_RECV, 12 - TCP_CONNTRACK_ESTABLISHED, 13 - TCP_CONNTRACK_FIN_WAIT, 14 - TCP_CONNTRACK_CLOSE_WAIT, 15 - TCP_CONNTRACK_LAST_ACK, 16 - TCP_CONNTRACK_TIME_WAIT, 17 - TCP_CONNTRACK_CLOSE, 18 - TCP_CONNTRACK_LISTEN, /* obsolete */ 19 - #define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN 20 - TCP_CONNTRACK_MAX, 21 - TCP_CONNTRACK_IGNORE, 22 - TCP_CONNTRACK_RETRANS, 23 - TCP_CONNTRACK_UNACK, 24 - TCP_CONNTRACK_TIMEOUT_MAX 25 - }; 26 - 27 - /* Window scaling is advertised by the sender */ 28 - #define IP_CT_TCP_FLAG_WINDOW_SCALE 0x01 29 - 30 - /* SACK is permitted by the sender */ 31 - #define IP_CT_TCP_FLAG_SACK_PERM 0x02 32 - 33 - /* This sender sent FIN first */ 34 - #define IP_CT_TCP_FLAG_CLOSE_INIT 0x04 35 - 36 - /* Be liberal in window checking */ 37 - #define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 38 - 39 - /* Has unacknowledged data */ 40 - #define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 41 - 42 - /* The field td_maxack has been set */ 43 - #define IP_CT_TCP_FLAG_MAXACK_SET 0x20 44 - 45 - struct nf_ct_tcp_flags { 46 - __u8 flags; 47 - __u8 mask; 48 - }; 49 - 50 - #ifdef __KERNEL__ 51 6 52 7 struct ip_ct_tcp_state { 53 8 u_int32_t td_end; /* max of seq + len */ ··· 28 73 u_int8_t last_wscale; /* Last window scaling factor seen */ 29 74 u_int8_t last_flags; /* Last flags set */ 30 75 }; 31 - 32 - #endif /* __KERNEL__ */ 33 76 34 77 #endif /* _NF_CONNTRACK_TCP_H */
include/linux/netfilter/nf_conntrack_tuple_common.h include/uapi/linux/netfilter/nf_conntrack_tuple_common.h
include/linux/netfilter/nf_nat.h include/uapi/linux/netfilter/nf_nat.h
+1 -54
include/linux/netfilter/nfnetlink.h
··· 1 1 #ifndef _NFNETLINK_H 2 2 #define _NFNETLINK_H 3 - #include <linux/types.h> 4 - #include <linux/netfilter/nfnetlink_compat.h> 5 3 6 - enum nfnetlink_groups { 7 - NFNLGRP_NONE, 8 - #define NFNLGRP_NONE NFNLGRP_NONE 9 - NFNLGRP_CONNTRACK_NEW, 10 - #define NFNLGRP_CONNTRACK_NEW NFNLGRP_CONNTRACK_NEW 11 - NFNLGRP_CONNTRACK_UPDATE, 12 - #define NFNLGRP_CONNTRACK_UPDATE NFNLGRP_CONNTRACK_UPDATE 13 - NFNLGRP_CONNTRACK_DESTROY, 14 - #define NFNLGRP_CONNTRACK_DESTROY NFNLGRP_CONNTRACK_DESTROY 15 - NFNLGRP_CONNTRACK_EXP_NEW, 16 - #define NFNLGRP_CONNTRACK_EXP_NEW NFNLGRP_CONNTRACK_EXP_NEW 17 - NFNLGRP_CONNTRACK_EXP_UPDATE, 18 - #define NFNLGRP_CONNTRACK_EXP_UPDATE NFNLGRP_CONNTRACK_EXP_UPDATE 19 - NFNLGRP_CONNTRACK_EXP_DESTROY, 20 - #define NFNLGRP_CONNTRACK_EXP_DESTROY NFNLGRP_CONNTRACK_EXP_DESTROY 21 - __NFNLGRP_MAX, 22 - }; 23 - #define NFNLGRP_MAX (__NFNLGRP_MAX - 1) 24 - 25 - /* General form of address family dependent message. 26 - */ 27 - struct nfgenmsg { 28 - __u8 nfgen_family; /* AF_xxx */ 29 - __u8 version; /* nfnetlink version */ 30 - __be16 res_id; /* resource id */ 31 - }; 32 - 33 - #define NFNETLINK_V0 0 34 - 35 - /* netfilter netlink message types are split in two pieces: 36 - * 8 bit subsystem, 8bit operation. 37 - */ 38 - 39 - #define NFNL_SUBSYS_ID(x) ((x & 0xff00) >> 8) 40 - #define NFNL_MSG_TYPE(x) (x & 0x00ff) 41 - 42 - /* No enum here, otherwise __stringify() trick of MODULE_ALIAS_NFNL_SUBSYS() 43 - * won't work anymore */ 44 - #define NFNL_SUBSYS_NONE 0 45 - #define NFNL_SUBSYS_CTNETLINK 1 46 - #define NFNL_SUBSYS_CTNETLINK_EXP 2 47 - #define NFNL_SUBSYS_QUEUE 3 48 - #define NFNL_SUBSYS_ULOG 4 49 - #define NFNL_SUBSYS_OSF 5 50 - #define NFNL_SUBSYS_IPSET 6 51 - #define NFNL_SUBSYS_ACCT 7 52 - #define NFNL_SUBSYS_CTNETLINK_TIMEOUT 8 53 - #define NFNL_SUBSYS_CTHELPER 9 54 - #define NFNL_SUBSYS_COUNT 10 55 - 56 - #ifdef __KERNEL__ 57 4 58 5 #include <linux/netlink.h> 59 6 #include <linux/capability.h> 60 7 #include <net/netlink.h> 8 + #include <uapi/linux/netfilter/nfnetlink.h> 61 9 62 10 struct nfnl_callback { 63 11 int (*call)(struct sock *nl, struct sk_buff *skb, ··· 40 92 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ 41 93 MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) 42 94 43 - #endif /* __KERNEL__ */ 44 95 #endif /* _NFNETLINK_H */
+1 -185
include/linux/netfilter/x_tables.h
··· 1 1 #ifndef _X_TABLES_H 2 2 #define _X_TABLES_H 3 - #include <linux/kernel.h> 4 - #include <linux/types.h> 5 3 6 - #define XT_FUNCTION_MAXNAMELEN 30 7 - #define XT_EXTENSION_MAXNAMELEN 29 8 - #define XT_TABLE_MAXNAMELEN 32 9 - 10 - struct xt_entry_match { 11 - union { 12 - struct { 13 - __u16 match_size; 14 - 15 - /* Used by userspace */ 16 - char name[XT_EXTENSION_MAXNAMELEN]; 17 - __u8 revision; 18 - } user; 19 - struct { 20 - __u16 match_size; 21 - 22 - /* Used inside the kernel */ 23 - struct xt_match *match; 24 - } kernel; 25 - 26 - /* Total length */ 27 - __u16 match_size; 28 - } u; 29 - 30 - unsigned char data[0]; 31 - }; 32 - 33 - struct xt_entry_target { 34 - union { 35 - struct { 36 - __u16 target_size; 37 - 38 - /* Used by userspace */ 39 - char name[XT_EXTENSION_MAXNAMELEN]; 40 - __u8 revision; 41 - } user; 42 - struct { 43 - __u16 target_size; 44 - 45 - /* Used inside the kernel */ 46 - struct xt_target *target; 47 - } kernel; 48 - 49 - /* Total length */ 50 - __u16 target_size; 51 - } u; 52 - 53 - unsigned char data[0]; 54 - }; 55 - 56 - #define XT_TARGET_INIT(__name, __size) \ 57 - { \ 58 - .target.u.user = { \ 59 - .target_size = XT_ALIGN(__size), \ 60 - .name = __name, \ 61 - }, \ 62 - } 63 - 64 - struct xt_standard_target { 65 - struct xt_entry_target target; 66 - int verdict; 67 - }; 68 - 69 - struct xt_error_target { 70 - struct xt_entry_target target; 71 - char errorname[XT_FUNCTION_MAXNAMELEN]; 72 - }; 73 - 74 - /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision 75 - * kernel supports, if >= revision. */ 76 - struct xt_get_revision { 77 - char name[XT_EXTENSION_MAXNAMELEN]; 78 - __u8 revision; 79 - }; 80 - 81 - /* CONTINUE verdict for targets */ 82 - #define XT_CONTINUE 0xFFFFFFFF 83 - 84 - /* For standard target */ 85 - #define XT_RETURN (-NF_REPEAT - 1) 86 - 87 - /* this is a dummy structure to find out the alignment requirement for a struct 88 - * containing all the fundamental data types that are used in ipt_entry, 89 - * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my 90 - * personal pleasure to remove it -HW 91 - */ 92 - struct _xt_align { 93 - __u8 u8; 94 - __u16 u16; 95 - __u32 u32; 96 - __u64 u64; 97 - }; 98 - 99 - #define XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _xt_align)) 100 - 101 - /* Standard return verdict, or do jump. */ 102 - #define XT_STANDARD_TARGET "" 103 - /* Error verdict. */ 104 - #define XT_ERROR_TARGET "ERROR" 105 - 106 - #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0) 107 - #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0) 108 - 109 - struct xt_counters { 110 - __u64 pcnt, bcnt; /* Packet and byte counters */ 111 - }; 112 - 113 - /* The argument to IPT_SO_ADD_COUNTERS. */ 114 - struct xt_counters_info { 115 - /* Which table. */ 116 - char name[XT_TABLE_MAXNAMELEN]; 117 - 118 - unsigned int num_counters; 119 - 120 - /* The counters (actually `number' of these). */ 121 - struct xt_counters counters[0]; 122 - }; 123 - 124 - #define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */ 125 - 126 - #ifndef __KERNEL__ 127 - /* fn returns 0 to continue iteration */ 128 - #define XT_MATCH_ITERATE(type, e, fn, args...) \ 129 - ({ \ 130 - unsigned int __i; \ 131 - int __ret = 0; \ 132 - struct xt_entry_match *__m; \ 133 - \ 134 - for (__i = sizeof(type); \ 135 - __i < (e)->target_offset; \ 136 - __i += __m->u.match_size) { \ 137 - __m = (void *)e + __i; \ 138 - \ 139 - __ret = fn(__m , ## args); \ 140 - if (__ret != 0) \ 141 - break; \ 142 - } \ 143 - __ret; \ 144 - }) 145 - 146 - /* fn returns 0 to continue iteration */ 147 - #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \ 148 - ({ \ 149 - unsigned int __i, __n; \ 150 - int __ret = 0; \ 151 - type *__entry; \ 152 - \ 153 - for (__i = 0, __n = 0; __i < (size); \ 154 - __i += __entry->next_offset, __n++) { \ 155 - __entry = (void *)(entries) + __i; \ 156 - if (__n < n) \ 157 - continue; \ 158 - \ 159 - __ret = fn(__entry , ## args); \ 160 - if (__ret != 0) \ 161 - break; \ 162 - } \ 163 - __ret; \ 164 - }) 165 - 166 - /* fn returns 0 to continue iteration */ 167 - #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \ 168 - XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args) 169 - 170 - #endif /* !__KERNEL__ */ 171 - 172 - /* pos is normally a struct ipt_entry/ip6t_entry/etc. */ 173 - #define xt_entry_foreach(pos, ehead, esize) \ 174 - for ((pos) = (typeof(pos))(ehead); \ 175 - (pos) < (typeof(pos))((char *)(ehead) + (esize)); \ 176 - (pos) = (typeof(pos))((char *)(pos) + (pos)->next_offset)) 177 - 178 - /* can only be xt_entry_match, so no use of typeof here */ 179 - #define xt_ematch_foreach(pos, entry) \ 180 - for ((pos) = (struct xt_entry_match *)entry->elems; \ 181 - (pos) < (struct xt_entry_match *)((char *)(entry) + \ 182 - (entry)->target_offset); \ 183 - (pos) = (struct xt_entry_match *)((char *)(pos) + \ 184 - (pos)->u.match_size)) 185 - 186 - #ifdef __KERNEL__ 187 4 188 5 #include <linux/netdevice.h> 6 + #include <uapi/linux/netfilter/x_tables.h> 189 7 190 8 /** 191 9 * struct xt_action_param - parameters for matches/targets ··· 435 617 void __user **dstptr, unsigned int *size); 436 618 437 619 #endif /* CONFIG_COMPAT */ 438 - #endif /* __KERNEL__ */ 439 - 440 620 #endif /* _X_TABLES_H */
include/linux/netfilter/xt_AUDIT.h include/uapi/linux/netfilter/xt_AUDIT.h
include/linux/netfilter/xt_CHECKSUM.h include/uapi/linux/netfilter/xt_CHECKSUM.h
include/linux/netfilter/xt_CLASSIFY.h include/uapi/linux/netfilter/xt_CLASSIFY.h
include/linux/netfilter/xt_CONNMARK.h include/uapi/linux/netfilter/xt_CONNMARK.h
include/linux/netfilter/xt_CONNSECMARK.h include/uapi/linux/netfilter/xt_CONNSECMARK.h
include/linux/netfilter/xt_CT.h include/uapi/linux/netfilter/xt_CT.h
include/linux/netfilter/xt_DSCP.h include/uapi/linux/netfilter/xt_DSCP.h
include/linux/netfilter/xt_IDLETIMER.h include/uapi/linux/netfilter/xt_IDLETIMER.h
include/linux/netfilter/xt_LED.h include/uapi/linux/netfilter/xt_LED.h
include/linux/netfilter/xt_LOG.h include/uapi/linux/netfilter/xt_LOG.h
include/linux/netfilter/xt_MARK.h include/uapi/linux/netfilter/xt_MARK.h
include/linux/netfilter/xt_NFLOG.h include/uapi/linux/netfilter/xt_NFLOG.h
include/linux/netfilter/xt_NFQUEUE.h include/uapi/linux/netfilter/xt_NFQUEUE.h
include/linux/netfilter/xt_RATEEST.h include/uapi/linux/netfilter/xt_RATEEST.h
include/linux/netfilter/xt_SECMARK.h include/uapi/linux/netfilter/xt_SECMARK.h
include/linux/netfilter/xt_TCPMSS.h include/uapi/linux/netfilter/xt_TCPMSS.h
include/linux/netfilter/xt_TCPOPTSTRIP.h include/uapi/linux/netfilter/xt_TCPOPTSTRIP.h
include/linux/netfilter/xt_TEE.h include/uapi/linux/netfilter/xt_TEE.h
include/linux/netfilter/xt_TPROXY.h include/uapi/linux/netfilter/xt_TPROXY.h
include/linux/netfilter/xt_addrtype.h include/uapi/linux/netfilter/xt_addrtype.h
include/linux/netfilter/xt_cluster.h include/uapi/linux/netfilter/xt_cluster.h
include/linux/netfilter/xt_comment.h include/uapi/linux/netfilter/xt_comment.h
include/linux/netfilter/xt_connbytes.h include/uapi/linux/netfilter/xt_connbytes.h
include/linux/netfilter/xt_connlimit.h include/uapi/linux/netfilter/xt_connlimit.h
include/linux/netfilter/xt_connmark.h include/uapi/linux/netfilter/xt_connmark.h
include/linux/netfilter/xt_conntrack.h include/uapi/linux/netfilter/xt_conntrack.h
include/linux/netfilter/xt_cpu.h include/uapi/linux/netfilter/xt_cpu.h
include/linux/netfilter/xt_dccp.h include/uapi/linux/netfilter/xt_dccp.h
include/linux/netfilter/xt_devgroup.h include/uapi/linux/netfilter/xt_devgroup.h
include/linux/netfilter/xt_dscp.h include/uapi/linux/netfilter/xt_dscp.h
include/linux/netfilter/xt_ecn.h include/uapi/linux/netfilter/xt_ecn.h
include/linux/netfilter/xt_esp.h include/uapi/linux/netfilter/xt_esp.h
+1 -70
include/linux/netfilter/xt_hashlimit.h
··· 1 1 #ifndef _XT_HASHLIMIT_H 2 2 #define _XT_HASHLIMIT_H 3 3 4 - #include <linux/types.h> 4 + #include <uapi/linux/netfilter/xt_hashlimit.h> 5 5 6 - /* timings are in milliseconds. */ 7 - #define XT_HASHLIMIT_SCALE 10000 8 - /* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 9 - * seconds, or one packet every 59 hours. 10 - */ 11 - 12 - /* packet length accounting is done in 16-byte steps */ 13 - #define XT_HASHLIMIT_BYTE_SHIFT 4 14 - 15 - /* details of this structure hidden by the implementation */ 16 - struct xt_hashlimit_htable; 17 - 18 - enum { 19 - XT_HASHLIMIT_HASH_DIP = 1 << 0, 20 - XT_HASHLIMIT_HASH_DPT = 1 << 1, 21 - XT_HASHLIMIT_HASH_SIP = 1 << 2, 22 - XT_HASHLIMIT_HASH_SPT = 1 << 3, 23 - XT_HASHLIMIT_INVERT = 1 << 4, 24 - XT_HASHLIMIT_BYTES = 1 << 5, 25 - }; 26 - #ifdef __KERNEL__ 27 6 #define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \ 28 7 XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \ 29 8 XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES) 30 - #endif 31 - 32 - struct hashlimit_cfg { 33 - __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */ 34 - __u32 avg; /* Average secs between packets * scale */ 35 - __u32 burst; /* Period multiplier for upper limit. */ 36 - 37 - /* user specified */ 38 - __u32 size; /* how many buckets */ 39 - __u32 max; /* max number of entries */ 40 - __u32 gc_interval; /* gc interval */ 41 - __u32 expire; /* when do entries expire? */ 42 - }; 43 - 44 - struct xt_hashlimit_info { 45 - char name [IFNAMSIZ]; /* name */ 46 - struct hashlimit_cfg cfg; 47 - 48 - /* Used internally by the kernel */ 49 - struct xt_hashlimit_htable *hinfo; 50 - union { 51 - void *ptr; 52 - struct xt_hashlimit_info *master; 53 - } u; 54 - }; 55 - 56 - struct hashlimit_cfg1 { 57 - __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */ 58 - __u32 avg; /* Average secs between packets * scale */ 59 - __u32 burst; /* Period multiplier for upper limit. */ 60 - 61 - /* user specified */ 62 - __u32 size; /* how many buckets */ 63 - __u32 max; /* max number of entries */ 64 - __u32 gc_interval; /* gc interval */ 65 - __u32 expire; /* when do entries expire? */ 66 - 67 - __u8 srcmask, dstmask; 68 - }; 69 - 70 - struct xt_hashlimit_mtinfo1 { 71 - char name[IFNAMSIZ]; 72 - struct hashlimit_cfg1 cfg; 73 - 74 - /* Used internally by the kernel */ 75 - struct xt_hashlimit_htable *hinfo __attribute__((aligned(8))); 76 - }; 77 - 78 9 #endif /*_XT_HASHLIMIT_H*/
include/linux/netfilter/xt_helper.h include/uapi/linux/netfilter/xt_helper.h
include/linux/netfilter/xt_iprange.h include/uapi/linux/netfilter/xt_iprange.h
include/linux/netfilter/xt_ipvs.h include/uapi/linux/netfilter/xt_ipvs.h
include/linux/netfilter/xt_length.h include/uapi/linux/netfilter/xt_length.h
include/linux/netfilter/xt_limit.h include/uapi/linux/netfilter/xt_limit.h
include/linux/netfilter/xt_mac.h include/uapi/linux/netfilter/xt_mac.h
include/linux/netfilter/xt_mark.h include/uapi/linux/netfilter/xt_mark.h
include/linux/netfilter/xt_multiport.h include/uapi/linux/netfilter/xt_multiport.h
include/linux/netfilter/xt_nfacct.h include/uapi/linux/netfilter/xt_nfacct.h
include/linux/netfilter/xt_osf.h include/uapi/linux/netfilter/xt_osf.h
include/linux/netfilter/xt_owner.h include/uapi/linux/netfilter/xt_owner.h
+1 -20
include/linux/netfilter/xt_physdev.h
··· 1 1 #ifndef _XT_PHYSDEV_H 2 2 #define _XT_PHYSDEV_H 3 3 4 - #include <linux/types.h> 5 - 6 - #ifdef __KERNEL__ 7 4 #include <linux/if.h> 8 - #endif 9 - 10 - #define XT_PHYSDEV_OP_IN 0x01 11 - #define XT_PHYSDEV_OP_OUT 0x02 12 - #define XT_PHYSDEV_OP_BRIDGED 0x04 13 - #define XT_PHYSDEV_OP_ISIN 0x08 14 - #define XT_PHYSDEV_OP_ISOUT 0x10 15 - #define XT_PHYSDEV_OP_MASK (0x20 - 1) 16 - 17 - struct xt_physdev_info { 18 - char physindev[IFNAMSIZ]; 19 - char in_mask[IFNAMSIZ]; 20 - char physoutdev[IFNAMSIZ]; 21 - char out_mask[IFNAMSIZ]; 22 - __u8 invert; 23 - __u8 bitmask; 24 - }; 5 + #include <uapi/linux/netfilter/xt_physdev.h> 25 6 26 7 #endif /*_XT_PHYSDEV_H*/
include/linux/netfilter/xt_pkttype.h include/uapi/linux/netfilter/xt_pkttype.h
include/linux/netfilter/xt_policy.h include/uapi/linux/netfilter/xt_policy.h
include/linux/netfilter/xt_quota.h include/uapi/linux/netfilter/xt_quota.h
include/linux/netfilter/xt_rateest.h include/uapi/linux/netfilter/xt_rateest.h
include/linux/netfilter/xt_realm.h include/uapi/linux/netfilter/xt_realm.h
include/linux/netfilter/xt_recent.h include/uapi/linux/netfilter/xt_recent.h
include/linux/netfilter/xt_sctp.h include/uapi/linux/netfilter/xt_sctp.h
include/linux/netfilter/xt_set.h include/uapi/linux/netfilter/xt_set.h
include/linux/netfilter/xt_socket.h include/uapi/linux/netfilter/xt_socket.h
include/linux/netfilter/xt_state.h include/uapi/linux/netfilter/xt_state.h
include/linux/netfilter/xt_statistic.h include/uapi/linux/netfilter/xt_statistic.h
include/linux/netfilter/xt_string.h include/uapi/linux/netfilter/xt_string.h
include/linux/netfilter/xt_tcpmss.h include/uapi/linux/netfilter/xt_tcpmss.h
include/linux/netfilter/xt_tcpudp.h include/uapi/linux/netfilter/xt_tcpudp.h
include/linux/netfilter/xt_time.h include/uapi/linux/netfilter/xt_time.h
include/linux/netfilter/xt_u32.h include/uapi/linux/netfilter/xt_u32.h
-2
include/linux/netfilter_arp/Kbuild
··· 1 - header-y += arp_tables.h 2 - header-y += arpt_mangle.h
+1 -199
include/linux/netfilter_arp/arp_tables.h
··· 5 5 * network byte order. 6 6 * flags are stored in host byte order (of course). 7 7 */ 8 - 9 8 #ifndef _ARPTABLES_H 10 9 #define _ARPTABLES_H 11 10 12 - #ifdef __KERNEL__ 13 11 #include <linux/if.h> 14 12 #include <linux/in.h> 15 13 #include <linux/if_arp.h> 16 14 #include <linux/skbuff.h> 17 - #endif 18 - #include <linux/types.h> 19 - #include <linux/compiler.h> 20 - #include <linux/netfilter_arp.h> 21 - 22 - #include <linux/netfilter/x_tables.h> 23 - 24 - #ifndef __KERNEL__ 25 - #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN 26 - #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN 27 - #define arpt_entry_target xt_entry_target 28 - #define arpt_standard_target xt_standard_target 29 - #define arpt_error_target xt_error_target 30 - #define ARPT_CONTINUE XT_CONTINUE 31 - #define ARPT_RETURN XT_RETURN 32 - #define arpt_counters_info xt_counters_info 33 - #define arpt_counters xt_counters 34 - #define ARPT_STANDARD_TARGET XT_STANDARD_TARGET 35 - #define ARPT_ERROR_TARGET XT_ERROR_TARGET 36 - #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ 37 - XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) 38 - #endif 39 - 40 - #define ARPT_DEV_ADDR_LEN_MAX 16 41 - 42 - struct arpt_devaddr_info { 43 - char addr[ARPT_DEV_ADDR_LEN_MAX]; 44 - char mask[ARPT_DEV_ADDR_LEN_MAX]; 45 - }; 46 - 47 - /* Yes, Virginia, you have to zero the padding. */ 48 - struct arpt_arp { 49 - /* Source and target IP addr */ 50 - struct in_addr src, tgt; 51 - /* Mask for src and target IP addr */ 52 - struct in_addr smsk, tmsk; 53 - 54 - /* Device hw address length, src+target device addresses */ 55 - __u8 arhln, arhln_mask; 56 - struct arpt_devaddr_info src_devaddr; 57 - struct arpt_devaddr_info tgt_devaddr; 58 - 59 - /* ARP operation code. */ 60 - __be16 arpop, arpop_mask; 61 - 62 - /* ARP hardware address and protocol address format. */ 63 - __be16 arhrd, arhrd_mask; 64 - __be16 arpro, arpro_mask; 65 - 66 - /* The protocol address length is only accepted if it is 4 67 - * so there is no use in offering a way to do filtering on it. 68 - */ 69 - 70 - char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 71 - unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 72 - 73 - /* Flags word */ 74 - __u8 flags; 75 - /* Inverse flags */ 76 - __u16 invflags; 77 - }; 78 - 79 - /* Values for "flag" field in struct arpt_ip (general arp structure). 80 - * No flags defined yet. 81 - */ 82 - #define ARPT_F_MASK 0x00 /* All possible flag bits mask. */ 83 - 84 - /* Values for "inv" field in struct arpt_arp. */ 85 - #define ARPT_INV_VIA_IN 0x0001 /* Invert the sense of IN IFACE. */ 86 - #define ARPT_INV_VIA_OUT 0x0002 /* Invert the sense of OUT IFACE */ 87 - #define ARPT_INV_SRCIP 0x0004 /* Invert the sense of SRC IP. */ 88 - #define ARPT_INV_TGTIP 0x0008 /* Invert the sense of TGT IP. */ 89 - #define ARPT_INV_SRCDEVADDR 0x0010 /* Invert the sense of SRC DEV ADDR. */ 90 - #define ARPT_INV_TGTDEVADDR 0x0020 /* Invert the sense of TGT DEV ADDR. */ 91 - #define ARPT_INV_ARPOP 0x0040 /* Invert the sense of ARP OP. */ 92 - #define ARPT_INV_ARPHRD 0x0080 /* Invert the sense of ARP HRD. */ 93 - #define ARPT_INV_ARPPRO 0x0100 /* Invert the sense of ARP PRO. */ 94 - #define ARPT_INV_ARPHLN 0x0200 /* Invert the sense of ARP HLN. */ 95 - #define ARPT_INV_MASK 0x03FF /* All possible flag bits mask. */ 96 - 97 - /* This structure defines each of the firewall rules. Consists of 3 98 - parts which are 1) general ARP header stuff 2) match specific 99 - stuff 3) the target to perform if the rule matches */ 100 - struct arpt_entry 101 - { 102 - struct arpt_arp arp; 103 - 104 - /* Size of arpt_entry + matches */ 105 - __u16 target_offset; 106 - /* Size of arpt_entry + matches + target */ 107 - __u16 next_offset; 108 - 109 - /* Back pointer */ 110 - unsigned int comefrom; 111 - 112 - /* Packet and byte counters. */ 113 - struct xt_counters counters; 114 - 115 - /* The matches (if any), then the target. */ 116 - unsigned char elems[0]; 117 - }; 118 - 119 - /* 120 - * New IP firewall options for [gs]etsockopt at the RAW IP level. 121 - * Unlike BSD Linux inherits IP options so you don't have to use a raw 122 - * socket for this. Instead we check rights in the calls. 123 - * 124 - * ATTENTION: check linux/in.h before adding new number here. 125 - */ 126 - #define ARPT_BASE_CTL 96 127 - 128 - #define ARPT_SO_SET_REPLACE (ARPT_BASE_CTL) 129 - #define ARPT_SO_SET_ADD_COUNTERS (ARPT_BASE_CTL + 1) 130 - #define ARPT_SO_SET_MAX ARPT_SO_SET_ADD_COUNTERS 131 - 132 - #define ARPT_SO_GET_INFO (ARPT_BASE_CTL) 133 - #define ARPT_SO_GET_ENTRIES (ARPT_BASE_CTL + 1) 134 - /* #define ARPT_SO_GET_REVISION_MATCH (APRT_BASE_CTL + 2) */ 135 - #define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3) 136 - #define ARPT_SO_GET_MAX (ARPT_SO_GET_REVISION_TARGET) 137 - 138 - /* The argument to ARPT_SO_GET_INFO */ 139 - struct arpt_getinfo { 140 - /* Which table: caller fills this in. */ 141 - char name[XT_TABLE_MAXNAMELEN]; 142 - 143 - /* Kernel fills these in. */ 144 - /* Which hook entry points are valid: bitmask */ 145 - unsigned int valid_hooks; 146 - 147 - /* Hook entry points: one per netfilter hook. */ 148 - unsigned int hook_entry[NF_ARP_NUMHOOKS]; 149 - 150 - /* Underflow points. */ 151 - unsigned int underflow[NF_ARP_NUMHOOKS]; 152 - 153 - /* Number of entries */ 154 - unsigned int num_entries; 155 - 156 - /* Size of entries. */ 157 - unsigned int size; 158 - }; 159 - 160 - /* The argument to ARPT_SO_SET_REPLACE. */ 161 - struct arpt_replace { 162 - /* Which table. */ 163 - char name[XT_TABLE_MAXNAMELEN]; 164 - 165 - /* Which hook entry points are valid: bitmask. You can't 166 - change this. */ 167 - unsigned int valid_hooks; 168 - 169 - /* Number of entries */ 170 - unsigned int num_entries; 171 - 172 - /* Total size of new entries */ 173 - unsigned int size; 174 - 175 - /* Hook entry points. */ 176 - unsigned int hook_entry[NF_ARP_NUMHOOKS]; 177 - 178 - /* Underflow points. */ 179 - unsigned int underflow[NF_ARP_NUMHOOKS]; 180 - 181 - /* Information about old entries: */ 182 - /* Number of counters (must be equal to current number of entries). */ 183 - unsigned int num_counters; 184 - /* The old entries' counters. */ 185 - struct xt_counters __user *counters; 186 - 187 - /* The entries (hang off end: not really an array). */ 188 - struct arpt_entry entries[0]; 189 - }; 190 - 191 - /* The argument to ARPT_SO_GET_ENTRIES. */ 192 - struct arpt_get_entries { 193 - /* Which table: user fills this in. */ 194 - char name[XT_TABLE_MAXNAMELEN]; 195 - 196 - /* User fills this in: total entry size. */ 197 - unsigned int size; 198 - 199 - /* The entries. */ 200 - struct arpt_entry entrytable[0]; 201 - }; 202 - 203 - /* Helper functions */ 204 - static __inline__ struct xt_entry_target *arpt_get_target(struct arpt_entry *e) 205 - { 206 - return (void *)e + e->target_offset; 207 - } 208 - 209 - /* 210 - * Main firewall chains definitions and global var's definitions. 211 - */ 212 - #ifdef __KERNEL__ 15 + #include <uapi/linux/netfilter_arp/arp_tables.h> 213 16 214 17 /* Standard entry. */ 215 18 struct arpt_standard { ··· 77 274 } 78 275 79 276 #endif /* CONFIG_COMPAT */ 80 - #endif /*__KERNEL__*/ 81 277 #endif /* _ARPTABLES_H */
include/linux/netfilter_arp/arpt_mangle.h include/uapi/linux/netfilter_arp/arpt_mangle.h
-18
include/linux/netfilter_bridge/Kbuild
··· 1 - header-y += ebt_802_3.h 2 - header-y += ebt_among.h 3 - header-y += ebt_arp.h 4 - header-y += ebt_arpreply.h 5 - header-y += ebt_ip.h 6 - header-y += ebt_ip6.h 7 - header-y += ebt_limit.h 8 - header-y += ebt_log.h 9 - header-y += ebt_mark_m.h 10 - header-y += ebt_mark_t.h 11 - header-y += ebt_nat.h 12 - header-y += ebt_nflog.h 13 - header-y += ebt_pkttype.h 14 - header-y += ebt_redirect.h 15 - header-y += ebt_stp.h 16 - header-y += ebt_ulog.h 17 - header-y += ebt_vlan.h 18 - header-y += ebtables.h
+1 -60
include/linux/netfilter_bridge/ebt_802_3.h
··· 1 1 #ifndef __LINUX_BRIDGE_EBT_802_3_H 2 2 #define __LINUX_BRIDGE_EBT_802_3_H 3 3 4 - #include <linux/types.h> 5 - 6 - #define EBT_802_3_SAP 0x01 7 - #define EBT_802_3_TYPE 0x02 8 - 9 - #define EBT_802_3_MATCH "802_3" 10 - 11 - /* 12 - * If frame has DSAP/SSAP value 0xaa you must check the SNAP type 13 - * to discover what kind of packet we're carrying. 14 - */ 15 - #define CHECK_TYPE 0xaa 16 - 17 - /* 18 - * Control field may be one or two bytes. If the first byte has 19 - * the value 0x03 then the entire length is one byte, otherwise it is two. 20 - * One byte controls are used in Unnumbered Information frames. 21 - * Two byte controls are used in Numbered Information frames. 22 - */ 23 - #define IS_UI 0x03 24 - 25 - #define EBT_802_3_MASK (EBT_802_3_SAP | EBT_802_3_TYPE | EBT_802_3) 26 - 27 - /* ui has one byte ctrl, ni has two */ 28 - struct hdr_ui { 29 - __u8 dsap; 30 - __u8 ssap; 31 - __u8 ctrl; 32 - __u8 orig[3]; 33 - __be16 type; 34 - }; 35 - 36 - struct hdr_ni { 37 - __u8 dsap; 38 - __u8 ssap; 39 - __be16 ctrl; 40 - __u8 orig[3]; 41 - __be16 type; 42 - }; 43 - 44 - struct ebt_802_3_hdr { 45 - __u8 daddr[6]; 46 - __u8 saddr[6]; 47 - __be16 len; 48 - union { 49 - struct hdr_ui ui; 50 - struct hdr_ni ni; 51 - } llc; 52 - }; 53 - 54 - #ifdef __KERNEL__ 55 4 #include <linux/skbuff.h> 5 + #include <uapi/linux/netfilter_bridge/ebt_802_3.h> 56 6 57 7 static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb) 58 8 { 59 9 return (struct ebt_802_3_hdr *)skb_mac_header(skb); 60 10 } 61 - #endif 62 - 63 - struct ebt_802_3_info { 64 - __u8 sap; 65 - __be16 type; 66 - __u8 bitmask; 67 - __u8 invflags; 68 - }; 69 - 70 11 #endif
include/linux/netfilter_bridge/ebt_among.h include/uapi/linux/netfilter_bridge/ebt_among.h
include/linux/netfilter_bridge/ebt_arp.h include/uapi/linux/netfilter_bridge/ebt_arp.h
include/linux/netfilter_bridge/ebt_arpreply.h include/uapi/linux/netfilter_bridge/ebt_arpreply.h
include/linux/netfilter_bridge/ebt_ip.h include/uapi/linux/netfilter_bridge/ebt_ip.h
include/linux/netfilter_bridge/ebt_ip6.h include/uapi/linux/netfilter_bridge/ebt_ip6.h
include/linux/netfilter_bridge/ebt_limit.h include/uapi/linux/netfilter_bridge/ebt_limit.h
include/linux/netfilter_bridge/ebt_log.h include/uapi/linux/netfilter_bridge/ebt_log.h
include/linux/netfilter_bridge/ebt_mark_m.h include/uapi/linux/netfilter_bridge/ebt_mark_m.h
include/linux/netfilter_bridge/ebt_mark_t.h include/uapi/linux/netfilter_bridge/ebt_mark_t.h
include/linux/netfilter_bridge/ebt_nat.h include/uapi/linux/netfilter_bridge/ebt_nat.h
include/linux/netfilter_bridge/ebt_nflog.h include/uapi/linux/netfilter_bridge/ebt_nflog.h
include/linux/netfilter_bridge/ebt_pkttype.h include/uapi/linux/netfilter_bridge/ebt_pkttype.h
include/linux/netfilter_bridge/ebt_redirect.h include/uapi/linux/netfilter_bridge/ebt_redirect.h
include/linux/netfilter_bridge/ebt_stp.h include/uapi/linux/netfilter_bridge/ebt_stp.h
include/linux/netfilter_bridge/ebt_ulog.h include/uapi/linux/netfilter_bridge/ebt_ulog.h
include/linux/netfilter_bridge/ebt_vlan.h include/uapi/linux/netfilter_bridge/ebt_vlan.h
+1 -254
include/linux/netfilter_bridge/ebtables.h
··· 9 9 * This code is stongly inspired on the iptables code which is 10 10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 11 11 */ 12 - 13 12 #ifndef __LINUX_BRIDGE_EFF_H 14 13 #define __LINUX_BRIDGE_EFF_H 15 - #include <linux/if.h> 16 - #include <linux/netfilter_bridge.h> 17 - #include <linux/if_ether.h> 18 14 19 - #define EBT_TABLE_MAXNAMELEN 32 20 - #define EBT_CHAIN_MAXNAMELEN EBT_TABLE_MAXNAMELEN 21 - #define EBT_FUNCTION_MAXNAMELEN EBT_TABLE_MAXNAMELEN 15 + #include <uapi/linux/netfilter_bridge/ebtables.h> 22 16 23 - /* verdicts >0 are "branches" */ 24 - #define EBT_ACCEPT -1 25 - #define EBT_DROP -2 26 - #define EBT_CONTINUE -3 27 - #define EBT_RETURN -4 28 - #define NUM_STANDARD_TARGETS 4 29 - /* ebtables target modules store the verdict inside an int. We can 30 - * reclaim a part of this int for backwards compatible extensions. 31 - * The 4 lsb are more than enough to store the verdict. */ 32 - #define EBT_VERDICT_BITS 0x0000000F 33 - 34 - struct xt_match; 35 - struct xt_target; 36 - 37 - struct ebt_counter { 38 - uint64_t pcnt; 39 - uint64_t bcnt; 40 - }; 41 - 42 - struct ebt_replace { 43 - char name[EBT_TABLE_MAXNAMELEN]; 44 - unsigned int valid_hooks; 45 - /* nr of rules in the table */ 46 - unsigned int nentries; 47 - /* total size of the entries */ 48 - unsigned int entries_size; 49 - /* start of the chains */ 50 - struct ebt_entries __user *hook_entry[NF_BR_NUMHOOKS]; 51 - /* nr of counters userspace expects back */ 52 - unsigned int num_counters; 53 - /* where the kernel will put the old counters */ 54 - struct ebt_counter __user *counters; 55 - char __user *entries; 56 - }; 57 - 58 - struct ebt_replace_kernel { 59 - char name[EBT_TABLE_MAXNAMELEN]; 60 - unsigned int valid_hooks; 61 - /* nr of rules in the table */ 62 - unsigned int nentries; 63 - /* total size of the entries */ 64 - unsigned int entries_size; 65 - /* start of the chains */ 66 - struct ebt_entries *hook_entry[NF_BR_NUMHOOKS]; 67 - /* nr of counters userspace expects back */ 68 - unsigned int num_counters; 69 - /* where the kernel will put the old counters */ 70 - struct ebt_counter *counters; 71 - char *entries; 72 - }; 73 - 74 - struct ebt_entries { 75 - /* this field is always set to zero 76 - * See EBT_ENTRY_OR_ENTRIES. 77 - * Must be same size as ebt_entry.bitmask */ 78 - unsigned int distinguisher; 79 - /* the chain name */ 80 - char name[EBT_CHAIN_MAXNAMELEN]; 81 - /* counter offset for this chain */ 82 - unsigned int counter_offset; 83 - /* one standard (accept, drop, return) per hook */ 84 - int policy; 85 - /* nr. of entries */ 86 - unsigned int nentries; 87 - /* entry list */ 88 - char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 89 - }; 90 - 91 - /* used for the bitmask of struct ebt_entry */ 92 - 93 - /* This is a hack to make a difference between an ebt_entry struct and an 94 - * ebt_entries struct when traversing the entries from start to end. 95 - * Using this simplifies the code a lot, while still being able to use 96 - * ebt_entries. 97 - * Contrary, iptables doesn't use something like ebt_entries and therefore uses 98 - * different techniques for naming the policy and such. So, iptables doesn't 99 - * need a hack like this. 100 - */ 101 - #define EBT_ENTRY_OR_ENTRIES 0x01 102 - /* these are the normal masks */ 103 - #define EBT_NOPROTO 0x02 104 - #define EBT_802_3 0x04 105 - #define EBT_SOURCEMAC 0x08 106 - #define EBT_DESTMAC 0x10 107 - #define EBT_F_MASK (EBT_NOPROTO | EBT_802_3 | EBT_SOURCEMAC | EBT_DESTMAC \ 108 - | EBT_ENTRY_OR_ENTRIES) 109 - 110 - #define EBT_IPROTO 0x01 111 - #define EBT_IIN 0x02 112 - #define EBT_IOUT 0x04 113 - #define EBT_ISOURCE 0x8 114 - #define EBT_IDEST 0x10 115 - #define EBT_ILOGICALIN 0x20 116 - #define EBT_ILOGICALOUT 0x40 117 - #define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ILOGICALIN \ 118 - | EBT_ILOGICALOUT | EBT_ISOURCE | EBT_IDEST) 119 - 120 - struct ebt_entry_match { 121 - union { 122 - char name[EBT_FUNCTION_MAXNAMELEN]; 123 - struct xt_match *match; 124 - } u; 125 - /* size of data */ 126 - unsigned int match_size; 127 - unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 128 - }; 129 - 130 - struct ebt_entry_watcher { 131 - union { 132 - char name[EBT_FUNCTION_MAXNAMELEN]; 133 - struct xt_target *watcher; 134 - } u; 135 - /* size of data */ 136 - unsigned int watcher_size; 137 - unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 138 - }; 139 - 140 - struct ebt_entry_target { 141 - union { 142 - char name[EBT_FUNCTION_MAXNAMELEN]; 143 - struct xt_target *target; 144 - } u; 145 - /* size of data */ 146 - unsigned int target_size; 147 - unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 148 - }; 149 - 150 - #define EBT_STANDARD_TARGET "standard" 151 - struct ebt_standard_target { 152 - struct ebt_entry_target target; 153 - int verdict; 154 - }; 155 - 156 - /* one entry */ 157 - struct ebt_entry { 158 - /* this needs to be the first field */ 159 - unsigned int bitmask; 160 - unsigned int invflags; 161 - __be16 ethproto; 162 - /* the physical in-dev */ 163 - char in[IFNAMSIZ]; 164 - /* the logical in-dev */ 165 - char logical_in[IFNAMSIZ]; 166 - /* the physical out-dev */ 167 - char out[IFNAMSIZ]; 168 - /* the logical out-dev */ 169 - char logical_out[IFNAMSIZ]; 170 - unsigned char sourcemac[ETH_ALEN]; 171 - unsigned char sourcemsk[ETH_ALEN]; 172 - unsigned char destmac[ETH_ALEN]; 173 - unsigned char destmsk[ETH_ALEN]; 174 - /* sizeof ebt_entry + matches */ 175 - unsigned int watchers_offset; 176 - /* sizeof ebt_entry + matches + watchers */ 177 - unsigned int target_offset; 178 - /* sizeof ebt_entry + matches + watchers + target */ 179 - unsigned int next_offset; 180 - unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 181 - }; 182 - 183 - /* {g,s}etsockopt numbers */ 184 - #define EBT_BASE_CTL 128 185 - 186 - #define EBT_SO_SET_ENTRIES (EBT_BASE_CTL) 187 - #define EBT_SO_SET_COUNTERS (EBT_SO_SET_ENTRIES+1) 188 - #define EBT_SO_SET_MAX (EBT_SO_SET_COUNTERS+1) 189 - 190 - #define EBT_SO_GET_INFO (EBT_BASE_CTL) 191 - #define EBT_SO_GET_ENTRIES (EBT_SO_GET_INFO+1) 192 - #define EBT_SO_GET_INIT_INFO (EBT_SO_GET_ENTRIES+1) 193 - #define EBT_SO_GET_INIT_ENTRIES (EBT_SO_GET_INIT_INFO+1) 194 - #define EBT_SO_GET_MAX (EBT_SO_GET_INIT_ENTRIES+1) 195 - 196 - #ifdef __KERNEL__ 197 17 198 18 /* return values for match() functions */ 199 19 #define EBT_MATCH 0 ··· 123 303 #define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS)) 124 304 /* True if the target is not a standard target */ 125 305 #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) 126 - 127 - #endif /* __KERNEL__ */ 128 - 129 - /* blatently stolen from ip_tables.h 130 - * fn returns 0 to continue iteration */ 131 - #define EBT_MATCH_ITERATE(e, fn, args...) \ 132 - ({ \ 133 - unsigned int __i; \ 134 - int __ret = 0; \ 135 - struct ebt_entry_match *__match; \ 136 - \ 137 - for (__i = sizeof(struct ebt_entry); \ 138 - __i < (e)->watchers_offset; \ 139 - __i += __match->match_size + \ 140 - sizeof(struct ebt_entry_match)) { \ 141 - __match = (void *)(e) + __i; \ 142 - \ 143 - __ret = fn(__match , ## args); \ 144 - if (__ret != 0) \ 145 - break; \ 146 - } \ 147 - if (__ret == 0) { \ 148 - if (__i != (e)->watchers_offset) \ 149 - __ret = -EINVAL; \ 150 - } \ 151 - __ret; \ 152 - }) 153 - 154 - #define EBT_WATCHER_ITERATE(e, fn, args...) \ 155 - ({ \ 156 - unsigned int __i; \ 157 - int __ret = 0; \ 158 - struct ebt_entry_watcher *__watcher; \ 159 - \ 160 - for (__i = e->watchers_offset; \ 161 - __i < (e)->target_offset; \ 162 - __i += __watcher->watcher_size + \ 163 - sizeof(struct ebt_entry_watcher)) { \ 164 - __watcher = (void *)(e) + __i; \ 165 - \ 166 - __ret = fn(__watcher , ## args); \ 167 - if (__ret != 0) \ 168 - break; \ 169 - } \ 170 - if (__ret == 0) { \ 171 - if (__i != (e)->target_offset) \ 172 - __ret = -EINVAL; \ 173 - } \ 174 - __ret; \ 175 - }) 176 - 177 - #define EBT_ENTRY_ITERATE(entries, size, fn, args...) \ 178 - ({ \ 179 - unsigned int __i; \ 180 - int __ret = 0; \ 181 - struct ebt_entry *__entry; \ 182 - \ 183 - for (__i = 0; __i < (size);) { \ 184 - __entry = (void *)(entries) + __i; \ 185 - __ret = fn(__entry , ## args); \ 186 - if (__ret != 0) \ 187 - break; \ 188 - if (__entry->bitmask != 0) \ 189 - __i += __entry->next_offset; \ 190 - else \ 191 - __i += sizeof(struct ebt_entries); \ 192 - } \ 193 - if (__ret == 0) { \ 194 - if (__i != (size)) \ 195 - __ret = -EINVAL; \ 196 - } \ 197 - __ret; \ 198 - }) 199 306 200 307 #endif
-10
include/linux/netfilter_ipv4/Kbuild
··· 1 - header-y += ip_tables.h 2 - header-y += ipt_CLUSTERIP.h 3 - header-y += ipt_ECN.h 4 - header-y += ipt_LOG.h 5 - header-y += ipt_REJECT.h 6 - header-y += ipt_TTL.h 7 - header-y += ipt_ULOG.h 8 - header-y += ipt_ah.h 9 - header-y += ipt_ecn.h 10 - header-y += ipt_ttl.h
+2 -216
include/linux/netfilter_ipv4/ip_tables.h
··· 11 11 * flags are stored in host byte order (of course). 12 12 * Port numbers are stored in HOST byte order. 13 13 */ 14 - 15 14 #ifndef _IPTABLES_H 16 15 #define _IPTABLES_H 17 16 18 - #ifdef __KERNEL__ 19 17 #include <linux/if.h> 20 18 #include <linux/in.h> 21 19 #include <linux/ip.h> 22 20 #include <linux/skbuff.h> 23 - #endif 24 - #include <linux/types.h> 25 - #include <linux/compiler.h> 26 - #include <linux/netfilter_ipv4.h> 27 - 28 - #include <linux/netfilter/x_tables.h> 29 - 30 - #ifndef __KERNEL__ 31 - #define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN 32 - #define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN 33 - #define ipt_match xt_match 34 - #define ipt_target xt_target 35 - #define ipt_table xt_table 36 - #define ipt_get_revision xt_get_revision 37 - #define ipt_entry_match xt_entry_match 38 - #define ipt_entry_target xt_entry_target 39 - #define ipt_standard_target xt_standard_target 40 - #define ipt_error_target xt_error_target 41 - #define ipt_counters xt_counters 42 - #define IPT_CONTINUE XT_CONTINUE 43 - #define IPT_RETURN XT_RETURN 44 - 45 - /* This group is older than old (iptables < v1.4.0-rc1~89) */ 46 - #include <linux/netfilter/xt_tcpudp.h> 47 - #define ipt_udp xt_udp 48 - #define ipt_tcp xt_tcp 49 - #define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT 50 - #define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT 51 - #define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS 52 - #define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION 53 - #define IPT_TCP_INV_MASK XT_TCP_INV_MASK 54 - #define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT 55 - #define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT 56 - #define IPT_UDP_INV_MASK XT_UDP_INV_MASK 57 - 58 - /* The argument to IPT_SO_ADD_COUNTERS. */ 59 - #define ipt_counters_info xt_counters_info 60 - /* Standard return verdict, or do jump. */ 61 - #define IPT_STANDARD_TARGET XT_STANDARD_TARGET 62 - /* Error verdict. */ 63 - #define IPT_ERROR_TARGET XT_ERROR_TARGET 64 - 65 - /* fn returns 0 to continue iteration */ 66 - #define IPT_MATCH_ITERATE(e, fn, args...) \ 67 - XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) 68 - 69 - /* fn returns 0 to continue iteration */ 70 - #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ 71 - XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) 72 - #endif 73 - 74 - /* Yes, Virginia, you have to zero the padding. */ 75 - struct ipt_ip { 76 - /* Source and destination IP addr */ 77 - struct in_addr src, dst; 78 - /* Mask for src and dest IP addr */ 79 - struct in_addr smsk, dmsk; 80 - char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 81 - unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 82 - 83 - /* Protocol, 0 = ANY */ 84 - __u16 proto; 85 - 86 - /* Flags word */ 87 - __u8 flags; 88 - /* Inverse flags */ 89 - __u8 invflags; 90 - }; 91 - 92 - /* Values for "flag" field in struct ipt_ip (general ip structure). */ 93 - #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ 94 - #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ 95 - #define IPT_F_MASK 0x03 /* All possible flag bits mask. */ 96 - 97 - /* Values for "inv" field in struct ipt_ip. */ 98 - #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ 99 - #define IPT_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */ 100 - #define IPT_INV_TOS 0x04 /* Invert the sense of TOS. */ 101 - #define IPT_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */ 102 - #define IPT_INV_DSTIP 0x10 /* Invert the sense of DST OP. */ 103 - #define IPT_INV_FRAG 0x20 /* Invert the sense of FRAG. */ 104 - #define IPT_INV_PROTO XT_INV_PROTO 105 - #define IPT_INV_MASK 0x7F /* All possible flag bits mask. */ 106 - 107 - /* This structure defines each of the firewall rules. Consists of 3 108 - parts which are 1) general IP header stuff 2) match specific 109 - stuff 3) the target to perform if the rule matches */ 110 - struct ipt_entry { 111 - struct ipt_ip ip; 112 - 113 - /* Mark with fields that we care about. */ 114 - unsigned int nfcache; 115 - 116 - /* Size of ipt_entry + matches */ 117 - __u16 target_offset; 118 - /* Size of ipt_entry + matches + target */ 119 - __u16 next_offset; 120 - 121 - /* Back pointer */ 122 - unsigned int comefrom; 123 - 124 - /* Packet and byte counters. */ 125 - struct xt_counters counters; 126 - 127 - /* The matches (if any), then the target. */ 128 - unsigned char elems[0]; 129 - }; 130 - 131 - /* 132 - * New IP firewall options for [gs]etsockopt at the RAW IP level. 133 - * Unlike BSD Linux inherits IP options so you don't have to use a raw 134 - * socket for this. Instead we check rights in the calls. 135 - * 136 - * ATTENTION: check linux/in.h before adding new number here. 137 - */ 138 - #define IPT_BASE_CTL 64 139 - 140 - #define IPT_SO_SET_REPLACE (IPT_BASE_CTL) 141 - #define IPT_SO_SET_ADD_COUNTERS (IPT_BASE_CTL + 1) 142 - #define IPT_SO_SET_MAX IPT_SO_SET_ADD_COUNTERS 143 - 144 - #define IPT_SO_GET_INFO (IPT_BASE_CTL) 145 - #define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1) 146 - #define IPT_SO_GET_REVISION_MATCH (IPT_BASE_CTL + 2) 147 - #define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3) 148 - #define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET 149 - 150 - /* ICMP matching stuff */ 151 - struct ipt_icmp { 152 - __u8 type; /* type to match */ 153 - __u8 code[2]; /* range of code */ 154 - __u8 invflags; /* Inverse flags */ 155 - }; 156 - 157 - /* Values for "inv" field for struct ipt_icmp. */ 158 - #define IPT_ICMP_INV 0x01 /* Invert the sense of type/code test */ 159 - 160 - /* The argument to IPT_SO_GET_INFO */ 161 - struct ipt_getinfo { 162 - /* Which table: caller fills this in. */ 163 - char name[XT_TABLE_MAXNAMELEN]; 164 - 165 - /* Kernel fills these in. */ 166 - /* Which hook entry points are valid: bitmask */ 167 - unsigned int valid_hooks; 168 - 169 - /* Hook entry points: one per netfilter hook. */ 170 - unsigned int hook_entry[NF_INET_NUMHOOKS]; 171 - 172 - /* Underflow points. */ 173 - unsigned int underflow[NF_INET_NUMHOOKS]; 174 - 175 - /* Number of entries */ 176 - unsigned int num_entries; 177 - 178 - /* Size of entries. */ 179 - unsigned int size; 180 - }; 181 - 182 - /* The argument to IPT_SO_SET_REPLACE. */ 183 - struct ipt_replace { 184 - /* Which table. */ 185 - char name[XT_TABLE_MAXNAMELEN]; 186 - 187 - /* Which hook entry points are valid: bitmask. You can't 188 - change this. */ 189 - unsigned int valid_hooks; 190 - 191 - /* Number of entries */ 192 - unsigned int num_entries; 193 - 194 - /* Total size of new entries */ 195 - unsigned int size; 196 - 197 - /* Hook entry points. */ 198 - unsigned int hook_entry[NF_INET_NUMHOOKS]; 199 - 200 - /* Underflow points. */ 201 - unsigned int underflow[NF_INET_NUMHOOKS]; 202 - 203 - /* Information about old entries: */ 204 - /* Number of counters (must be equal to current number of entries). */ 205 - unsigned int num_counters; 206 - /* The old entries' counters. */ 207 - struct xt_counters __user *counters; 208 - 209 - /* The entries (hang off end: not really an array). */ 210 - struct ipt_entry entries[0]; 211 - }; 212 - 213 - /* The argument to IPT_SO_GET_ENTRIES. */ 214 - struct ipt_get_entries { 215 - /* Which table: user fills this in. */ 216 - char name[XT_TABLE_MAXNAMELEN]; 217 - 218 - /* User fills this in: total entry size. */ 219 - unsigned int size; 220 - 221 - /* The entries. */ 222 - struct ipt_entry entrytable[0]; 223 - }; 224 - 225 - /* Helper functions */ 226 - static __inline__ struct xt_entry_target * 227 - ipt_get_target(struct ipt_entry *e) 228 - { 229 - return (void *)e + e->target_offset; 230 - } 231 - 232 - /* 233 - * Main firewall chains definitions and global var's definitions. 234 - */ 235 - #ifdef __KERNEL__ 236 21 237 22 #include <linux/init.h> 23 + #include <uapi/linux/netfilter_ipv4/ip_tables.h> 24 + 238 25 extern void ipt_init(void) __init; 239 26 240 27 extern struct xt_table *ipt_register_table(struct net *net, ··· 90 303 } 91 304 92 305 #endif /* CONFIG_COMPAT */ 93 - #endif /*__KERNEL__*/ 94 306 #endif /* _IPTABLES_H */
include/linux/netfilter_ipv4/ipt_CLUSTERIP.h include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h
include/linux/netfilter_ipv4/ipt_ECN.h include/uapi/linux/netfilter_ipv4/ipt_ECN.h
include/linux/netfilter_ipv4/ipt_LOG.h include/uapi/linux/netfilter_ipv4/ipt_LOG.h
include/linux/netfilter_ipv4/ipt_REJECT.h include/uapi/linux/netfilter_ipv4/ipt_REJECT.h
include/linux/netfilter_ipv4/ipt_TTL.h include/uapi/linux/netfilter_ipv4/ipt_TTL.h
include/linux/netfilter_ipv4/ipt_ULOG.h include/uapi/linux/netfilter_ipv4/ipt_ULOG.h
include/linux/netfilter_ipv4/ipt_ah.h include/uapi/linux/netfilter_ipv4/ipt_ah.h
include/linux/netfilter_ipv4/ipt_ecn.h include/uapi/linux/netfilter_ipv4/ipt_ecn.h
include/linux/netfilter_ipv4/ipt_ttl.h include/uapi/linux/netfilter_ipv4/ipt_ttl.h
-12
include/linux/netfilter_ipv6/Kbuild
··· 1 - header-y += ip6_tables.h 2 - header-y += ip6t_HL.h 3 - header-y += ip6t_LOG.h 4 - header-y += ip6t_NPT.h 5 - header-y += ip6t_REJECT.h 6 - header-y += ip6t_ah.h 7 - header-y += ip6t_frag.h 8 - header-y += ip6t_hl.h 9 - header-y += ip6t_ipv6header.h 10 - header-y += ip6t_mh.h 11 - header-y += ip6t_opts.h 12 - header-y += ip6t_rt.h
+2 -254
include/linux/netfilter_ipv6/ip6_tables.h
··· 11 11 * flags are stored in host byte order (of course). 12 12 * Port numbers are stored in HOST byte order. 13 13 */ 14 - 15 14 #ifndef _IP6_TABLES_H 16 15 #define _IP6_TABLES_H 17 16 18 - #ifdef __KERNEL__ 19 17 #include <linux/if.h> 20 18 #include <linux/in6.h> 21 19 #include <linux/ipv6.h> 22 20 #include <linux/skbuff.h> 23 - #endif 24 - #include <linux/types.h> 25 - #include <linux/compiler.h> 26 - #include <linux/netfilter_ipv6.h> 27 - 28 - #include <linux/netfilter/x_tables.h> 29 - 30 - #ifndef __KERNEL__ 31 - #define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN 32 - #define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN 33 - #define ip6t_match xt_match 34 - #define ip6t_target xt_target 35 - #define ip6t_table xt_table 36 - #define ip6t_get_revision xt_get_revision 37 - #define ip6t_entry_match xt_entry_match 38 - #define ip6t_entry_target xt_entry_target 39 - #define ip6t_standard_target xt_standard_target 40 - #define ip6t_error_target xt_error_target 41 - #define ip6t_counters xt_counters 42 - #define IP6T_CONTINUE XT_CONTINUE 43 - #define IP6T_RETURN XT_RETURN 44 - 45 - /* Pre-iptables-1.4.0 */ 46 - #include <linux/netfilter/xt_tcpudp.h> 47 - #define ip6t_tcp xt_tcp 48 - #define ip6t_udp xt_udp 49 - #define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT 50 - #define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT 51 - #define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS 52 - #define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION 53 - #define IP6T_TCP_INV_MASK XT_TCP_INV_MASK 54 - #define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT 55 - #define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT 56 - #define IP6T_UDP_INV_MASK XT_UDP_INV_MASK 57 - 58 - #define ip6t_counters_info xt_counters_info 59 - #define IP6T_STANDARD_TARGET XT_STANDARD_TARGET 60 - #define IP6T_ERROR_TARGET XT_ERROR_TARGET 61 - #define IP6T_MATCH_ITERATE(e, fn, args...) \ 62 - XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args) 63 - #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ 64 - XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) 65 - #endif 66 - 67 - /* Yes, Virginia, you have to zero the padding. */ 68 - struct ip6t_ip6 { 69 - /* Source and destination IP6 addr */ 70 - struct in6_addr src, dst; 71 - /* Mask for src and dest IP6 addr */ 72 - struct in6_addr smsk, dmsk; 73 - char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 74 - unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 75 - 76 - /* Upper protocol number 77 - * - The allowed value is 0 (any) or protocol number of last parsable 78 - * header, which is 50 (ESP), 59 (No Next Header), 135 (MH), or 79 - * the non IPv6 extension headers. 80 - * - The protocol numbers of IPv6 extension headers except of ESP and 81 - * MH do not match any packets. 82 - * - You also need to set IP6T_FLAGS_PROTO to "flags" to check protocol. 83 - */ 84 - __u16 proto; 85 - /* TOS to match iff flags & IP6T_F_TOS */ 86 - __u8 tos; 87 - 88 - /* Flags word */ 89 - __u8 flags; 90 - /* Inverse flags */ 91 - __u8 invflags; 92 - }; 93 - 94 - /* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */ 95 - #define IP6T_F_PROTO 0x01 /* Set if rule cares about upper 96 - protocols */ 97 - #define IP6T_F_TOS 0x02 /* Match the TOS. */ 98 - #define IP6T_F_GOTO 0x04 /* Set if jump is a goto */ 99 - #define IP6T_F_MASK 0x07 /* All possible flag bits mask. */ 100 - 101 - /* Values for "inv" field in struct ip6t_ip6. */ 102 - #define IP6T_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ 103 - #define IP6T_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */ 104 - #define IP6T_INV_TOS 0x04 /* Invert the sense of TOS. */ 105 - #define IP6T_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */ 106 - #define IP6T_INV_DSTIP 0x10 /* Invert the sense of DST OP. */ 107 - #define IP6T_INV_FRAG 0x20 /* Invert the sense of FRAG. */ 108 - #define IP6T_INV_PROTO XT_INV_PROTO 109 - #define IP6T_INV_MASK 0x7F /* All possible flag bits mask. */ 110 - 111 - /* This structure defines each of the firewall rules. Consists of 3 112 - parts which are 1) general IP header stuff 2) match specific 113 - stuff 3) the target to perform if the rule matches */ 114 - struct ip6t_entry { 115 - struct ip6t_ip6 ipv6; 116 - 117 - /* Mark with fields that we care about. */ 118 - unsigned int nfcache; 119 - 120 - /* Size of ipt_entry + matches */ 121 - __u16 target_offset; 122 - /* Size of ipt_entry + matches + target */ 123 - __u16 next_offset; 124 - 125 - /* Back pointer */ 126 - unsigned int comefrom; 127 - 128 - /* Packet and byte counters. */ 129 - struct xt_counters counters; 130 - 131 - /* The matches (if any), then the target. */ 132 - unsigned char elems[0]; 133 - }; 134 - 135 - /* Standard entry */ 136 - struct ip6t_standard { 137 - struct ip6t_entry entry; 138 - struct xt_standard_target target; 139 - }; 140 - 141 - struct ip6t_error { 142 - struct ip6t_entry entry; 143 - struct xt_error_target target; 144 - }; 145 - 146 - #define IP6T_ENTRY_INIT(__size) \ 147 - { \ 148 - .target_offset = sizeof(struct ip6t_entry), \ 149 - .next_offset = (__size), \ 150 - } 151 - 152 - #define IP6T_STANDARD_INIT(__verdict) \ 153 - { \ 154 - .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \ 155 - .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ 156 - sizeof(struct xt_standard_target)), \ 157 - .target.verdict = -(__verdict) - 1, \ 158 - } 159 - 160 - #define IP6T_ERROR_INIT \ 161 - { \ 162 - .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \ 163 - .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ 164 - sizeof(struct xt_error_target)), \ 165 - .target.errorname = "ERROR", \ 166 - } 167 - 168 - /* 169 - * New IP firewall options for [gs]etsockopt at the RAW IP level. 170 - * Unlike BSD Linux inherits IP options so you don't have to use 171 - * a raw socket for this. Instead we check rights in the calls. 172 - * 173 - * ATTENTION: check linux/in6.h before adding new number here. 174 - */ 175 - #define IP6T_BASE_CTL 64 176 - 177 - #define IP6T_SO_SET_REPLACE (IP6T_BASE_CTL) 178 - #define IP6T_SO_SET_ADD_COUNTERS (IP6T_BASE_CTL + 1) 179 - #define IP6T_SO_SET_MAX IP6T_SO_SET_ADD_COUNTERS 180 - 181 - #define IP6T_SO_GET_INFO (IP6T_BASE_CTL) 182 - #define IP6T_SO_GET_ENTRIES (IP6T_BASE_CTL + 1) 183 - #define IP6T_SO_GET_REVISION_MATCH (IP6T_BASE_CTL + 4) 184 - #define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 5) 185 - #define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET 186 - 187 - /* ICMP matching stuff */ 188 - struct ip6t_icmp { 189 - __u8 type; /* type to match */ 190 - __u8 code[2]; /* range of code */ 191 - __u8 invflags; /* Inverse flags */ 192 - }; 193 - 194 - /* Values for "inv" field for struct ipt_icmp. */ 195 - #define IP6T_ICMP_INV 0x01 /* Invert the sense of type/code test */ 196 - 197 - /* The argument to IP6T_SO_GET_INFO */ 198 - struct ip6t_getinfo { 199 - /* Which table: caller fills this in. */ 200 - char name[XT_TABLE_MAXNAMELEN]; 201 - 202 - /* Kernel fills these in. */ 203 - /* Which hook entry points are valid: bitmask */ 204 - unsigned int valid_hooks; 205 - 206 - /* Hook entry points: one per netfilter hook. */ 207 - unsigned int hook_entry[NF_INET_NUMHOOKS]; 208 - 209 - /* Underflow points. */ 210 - unsigned int underflow[NF_INET_NUMHOOKS]; 211 - 212 - /* Number of entries */ 213 - unsigned int num_entries; 214 - 215 - /* Size of entries. */ 216 - unsigned int size; 217 - }; 218 - 219 - /* The argument to IP6T_SO_SET_REPLACE. */ 220 - struct ip6t_replace { 221 - /* Which table. */ 222 - char name[XT_TABLE_MAXNAMELEN]; 223 - 224 - /* Which hook entry points are valid: bitmask. You can't 225 - change this. */ 226 - unsigned int valid_hooks; 227 - 228 - /* Number of entries */ 229 - unsigned int num_entries; 230 - 231 - /* Total size of new entries */ 232 - unsigned int size; 233 - 234 - /* Hook entry points. */ 235 - unsigned int hook_entry[NF_INET_NUMHOOKS]; 236 - 237 - /* Underflow points. */ 238 - unsigned int underflow[NF_INET_NUMHOOKS]; 239 - 240 - /* Information about old entries: */ 241 - /* Number of counters (must be equal to current number of entries). */ 242 - unsigned int num_counters; 243 - /* The old entries' counters. */ 244 - struct xt_counters __user *counters; 245 - 246 - /* The entries (hang off end: not really an array). */ 247 - struct ip6t_entry entries[0]; 248 - }; 249 - 250 - /* The argument to IP6T_SO_GET_ENTRIES. */ 251 - struct ip6t_get_entries { 252 - /* Which table: user fills this in. */ 253 - char name[XT_TABLE_MAXNAMELEN]; 254 - 255 - /* User fills this in: total entry size. */ 256 - unsigned int size; 257 - 258 - /* The entries. */ 259 - struct ip6t_entry entrytable[0]; 260 - }; 261 - 262 - /* Helper functions */ 263 - static __inline__ struct xt_entry_target * 264 - ip6t_get_target(struct ip6t_entry *e) 265 - { 266 - return (void *)e + e->target_offset; 267 - } 268 - 269 - /* 270 - * Main firewall chains definitions and global var's definitions. 271 - */ 272 - 273 - #ifdef __KERNEL__ 274 21 275 22 #include <linux/init.h> 23 + #include <uapi/linux/netfilter_ipv6/ip6_tables.h> 24 + 276 25 extern void ip6t_init(void) __init; 277 26 278 27 extern void *ip6t_alloc_initial_table(const struct xt_table *); ··· 76 327 } 77 328 78 329 #endif /* CONFIG_COMPAT */ 79 - #endif /*__KERNEL__*/ 80 330 #endif /* _IP6_TABLES_H */
include/linux/netfilter_ipv6/ip6t_HL.h include/uapi/linux/netfilter_ipv6/ip6t_HL.h
include/linux/netfilter_ipv6/ip6t_LOG.h include/uapi/linux/netfilter_ipv6/ip6t_LOG.h
include/linux/netfilter_ipv6/ip6t_NPT.h include/uapi/linux/netfilter_ipv6/ip6t_NPT.h
include/linux/netfilter_ipv6/ip6t_REJECT.h include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
include/linux/netfilter_ipv6/ip6t_ah.h include/uapi/linux/netfilter_ipv6/ip6t_ah.h
include/linux/netfilter_ipv6/ip6t_frag.h include/uapi/linux/netfilter_ipv6/ip6t_frag.h
include/linux/netfilter_ipv6/ip6t_hl.h include/uapi/linux/netfilter_ipv6/ip6t_hl.h
include/linux/netfilter_ipv6/ip6t_ipv6header.h include/uapi/linux/netfilter_ipv6/ip6t_ipv6header.h
include/linux/netfilter_ipv6/ip6t_mh.h include/uapi/linux/netfilter_ipv6/ip6t_mh.h
include/linux/netfilter_ipv6/ip6t_opts.h include/uapi/linux/netfilter_ipv6/ip6t_opts.h
include/linux/netfilter_ipv6/ip6t_rt.h include/uapi/linux/netfilter_ipv6/ip6t_rt.h
+16 -4
include/linux/netlink.h
··· 245 245 struct netlink_callback *cb); 246 246 int (*done)(struct netlink_callback *cb); 247 247 void *data; 248 + /* the module that dump function belong to */ 249 + struct module *module; 248 250 u16 family; 249 251 u16 min_dump_alloc; 250 252 unsigned int prev_seq, seq; ··· 264 262 265 263 struct netlink_dump_control { 266 264 int (*dump)(struct sk_buff *skb, struct netlink_callback *); 267 - int (*done)(struct netlink_callback*); 265 + int (*done)(struct netlink_callback *); 268 266 void *data; 267 + struct module *module; 269 268 u16 min_dump_alloc; 270 269 }; 271 270 272 - extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 273 - const struct nlmsghdr *nlh, 274 - struct netlink_dump_control *control); 271 + extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 272 + const struct nlmsghdr *nlh, 273 + struct netlink_dump_control *control); 274 + static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 275 + const struct nlmsghdr *nlh, 276 + struct netlink_dump_control *control) 277 + { 278 + if (!control->module) 279 + control->module = THIS_MODULE; 280 + 281 + return __netlink_dump_start(ssk, skb, nlh, control); 282 + } 275 283 276 284 #endif /* __KERNEL__ */ 277 285
-24
include/linux/skbuff.h
··· 589 589 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); 590 590 } 591 591 592 - extern void skb_recycle(struct sk_buff *skb); 593 - extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 594 - 595 592 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 596 593 extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 597 594 extern struct sk_buff *skb_clone(struct sk_buff *skb, ··· 2641 2644 } 2642 2645 2643 2646 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2644 - 2645 - static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) 2646 - { 2647 - if (irqs_disabled()) 2648 - return false; 2649 - 2650 - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 2651 - return false; 2652 - 2653 - if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 2654 - return false; 2655 - 2656 - skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 2657 - if (skb_end_offset(skb) < skb_size) 2658 - return false; 2659 - 2660 - if (skb_shared(skb) || skb_cloned(skb)) 2661 - return false; 2662 - 2663 - return true; 2664 - } 2665 2647 2666 2648 /** 2667 2649 * skb_head_is_locked - Determine if the skb->head is locked down
-7
include/linux/tc_act/Kbuild
··· 1 - header-y += tc_gact.h 2 - header-y += tc_ipt.h 3 - header-y += tc_mirred.h 4 - header-y += tc_pedit.h 5 - header-y += tc_nat.h 6 - header-y += tc_skbedit.h 7 - header-y += tc_csum.h
include/linux/tc_act/tc_csum.h include/uapi/linux/tc_act/tc_csum.h
include/linux/tc_act/tc_gact.h include/uapi/linux/tc_act/tc_gact.h
include/linux/tc_act/tc_ipt.h include/uapi/linux/tc_act/tc_ipt.h
include/linux/tc_act/tc_mirred.h include/uapi/linux/tc_act/tc_mirred.h
include/linux/tc_act/tc_nat.h include/uapi/linux/tc_act/tc_nat.h
include/linux/tc_act/tc_pedit.h include/uapi/linux/tc_act/tc_pedit.h
include/linux/tc_act/tc_skbedit.h include/uapi/linux/tc_act/tc_skbedit.h
-4
include/linux/tc_ematch/Kbuild
··· 1 - header-y += tc_em_cmp.h 2 - header-y += tc_em_meta.h 3 - header-y += tc_em_nbyte.h 4 - header-y += tc_em_text.h
include/linux/tc_ematch/tc_em_cmp.h include/uapi/linux/tc_ematch/tc_em_cmp.h
include/linux/tc_ematch/tc_em_meta.h include/uapi/linux/tc_ematch/tc_em_meta.h
include/linux/tc_ematch/tc_em_nbyte.h include/uapi/linux/tc_ematch/tc_em_nbyte.h
include/linux/tc_ematch/tc_em_text.h include/uapi/linux/tc_ematch/tc_em_text.h
+1
include/net/flow.h
··· 21 21 __u8 flowic_flags; 22 22 #define FLOWI_FLAG_ANYSRC 0x01 23 23 #define FLOWI_FLAG_CAN_SLEEP 0x02 24 + #define FLOWI_FLAG_KNOWN_NH 0x04 24 25 __u32 flowic_secid; 25 26 }; 26 27
+2 -1
include/net/route.h
··· 48 48 int rt_genid; 49 49 unsigned int rt_flags; 50 50 __u16 rt_type; 51 - __u16 rt_is_input; 51 + __u8 rt_is_input; 52 + __u8 rt_uses_gateway; 52 53 53 54 int rt_iif; 54 55
+1
include/rdma/rdma_netlink.h
··· 39 39 40 40 struct ibnl_client_cbs { 41 41 int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb); 42 + struct module *module; 42 43 }; 43 44 44 45 int ibnl_init(void);
+2
include/uapi/linux/caif/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += caif_socket.h 3 + header-y += if_caif.h
+1
include/uapi/linux/isdn/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += capicmd.h
+76
include/uapi/linux/netfilter/Kbuild
··· 1 1 # UAPI Header export list 2 2 header-y += ipset/ 3 + header-y += nf_conntrack_common.h 4 + header-y += nf_conntrack_ftp.h 5 + header-y += nf_conntrack_sctp.h 6 + header-y += nf_conntrack_tcp.h 7 + header-y += nf_conntrack_tuple_common.h 8 + header-y += nf_nat.h 9 + header-y += nfnetlink.h 10 + header-y += nfnetlink_acct.h 11 + header-y += nfnetlink_compat.h 12 + header-y += nfnetlink_conntrack.h 13 + header-y += nfnetlink_cthelper.h 14 + header-y += nfnetlink_cttimeout.h 15 + header-y += nfnetlink_log.h 16 + header-y += nfnetlink_queue.h 17 + header-y += x_tables.h 18 + header-y += xt_AUDIT.h 19 + header-y += xt_CHECKSUM.h 20 + header-y += xt_CLASSIFY.h 21 + header-y += xt_CONNMARK.h 22 + header-y += xt_CONNSECMARK.h 23 + header-y += xt_CT.h 24 + header-y += xt_DSCP.h 25 + header-y += xt_IDLETIMER.h 26 + header-y += xt_LED.h 27 + header-y += xt_LOG.h 28 + header-y += xt_MARK.h 29 + header-y += xt_NFLOG.h 30 + header-y += xt_NFQUEUE.h 31 + header-y += xt_RATEEST.h 32 + header-y += xt_SECMARK.h 33 + header-y += xt_TCPMSS.h 34 + header-y += xt_TCPOPTSTRIP.h 35 + header-y += xt_TEE.h 36 + header-y += xt_TPROXY.h 37 + header-y += xt_addrtype.h 38 + header-y += xt_cluster.h 39 + header-y += xt_comment.h 40 + header-y += xt_connbytes.h 41 + header-y += xt_connlimit.h 42 + header-y += xt_connmark.h 43 + header-y += xt_conntrack.h 44 + header-y += xt_cpu.h 45 + header-y += xt_dccp.h 46 + header-y += xt_devgroup.h 47 + header-y += xt_dscp.h 48 + header-y += xt_ecn.h 49 + header-y += xt_esp.h 50 + header-y += xt_hashlimit.h 51 + header-y += xt_helper.h 52 + header-y += xt_iprange.h 53 + header-y += xt_ipvs.h 54 + header-y += xt_length.h 55 + header-y += xt_limit.h 56 + header-y += xt_mac.h 57 + header-y += xt_mark.h 58 + header-y += xt_multiport.h 59 + header-y += xt_nfacct.h 60 + header-y += xt_osf.h 61 + header-y += xt_owner.h 62 + header-y += xt_physdev.h 63 + header-y += xt_pkttype.h 64 + header-y += xt_policy.h 65 + header-y += xt_quota.h 66 + header-y += xt_rateest.h 67 + header-y += xt_realm.h 68 + header-y += xt_recent.h 69 + header-y += xt_sctp.h 70 + header-y += xt_set.h 71 + header-y += xt_socket.h 72 + header-y += xt_state.h 73 + header-y += xt_statistic.h 74 + header-y += xt_string.h 75 + header-y += xt_tcpmss.h 76 + header-y += xt_tcpudp.h 77 + header-y += xt_time.h 78 + header-y += xt_u32.h
+4
include/uapi/linux/netfilter/ipset/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += ip_set.h 3 + header-y += ip_set_bitmap.h 4 + header-y += ip_set_hash.h 5 + header-y += ip_set_list.h
+231
include/uapi/linux/netfilter/ipset/ip_set.h
··· 1 + /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> 2 + * Patrick Schaaf <bof@bof.de> 3 + * Martin Josefsson <gandalf@wlug.westbo.se> 4 + * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + #ifndef _UAPI_IP_SET_H 11 + #define _UAPI_IP_SET_H 12 + 13 + 14 + #include <linux/types.h> 15 + 16 + /* The protocol version */ 17 + #define IPSET_PROTOCOL 6 18 + 19 + /* The max length of strings including NUL: set and type identifiers */ 20 + #define IPSET_MAXNAMELEN 32 21 + 22 + /* Message types and commands */ 23 + enum ipset_cmd { 24 + IPSET_CMD_NONE, 25 + IPSET_CMD_PROTOCOL, /* 1: Return protocol version */ 26 + IPSET_CMD_CREATE, /* 2: Create a new (empty) set */ 27 + IPSET_CMD_DESTROY, /* 3: Destroy a (empty) set */ 28 + IPSET_CMD_FLUSH, /* 4: Remove all elements from a set */ 29 + IPSET_CMD_RENAME, /* 5: Rename a set */ 30 + IPSET_CMD_SWAP, /* 6: Swap two sets */ 31 + IPSET_CMD_LIST, /* 7: List sets */ 32 + IPSET_CMD_SAVE, /* 8: Save sets */ 33 + IPSET_CMD_ADD, /* 9: Add an element to a set */ 34 + IPSET_CMD_DEL, /* 10: Delete an element from a set */ 35 + IPSET_CMD_TEST, /* 11: Test an element in a set */ 36 + IPSET_CMD_HEADER, /* 12: Get set header data only */ 37 + IPSET_CMD_TYPE, /* 13: Get set type */ 38 + IPSET_MSG_MAX, /* Netlink message commands */ 39 + 40 + /* Commands in userspace: */ 41 + IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */ 42 + IPSET_CMD_HELP, /* 15: Get help */ 43 + IPSET_CMD_VERSION, /* 16: Get program version */ 44 + IPSET_CMD_QUIT, /* 17: Quit from interactive mode */ 45 + 46 + IPSET_CMD_MAX, 47 + 48 + IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */ 49 + }; 50 + 51 + /* Attributes at command level */ 52 + enum { 53 + IPSET_ATTR_UNSPEC, 54 + IPSET_ATTR_PROTOCOL, /* 1: Protocol version */ 55 + IPSET_ATTR_SETNAME, /* 2: Name of the set */ 56 + IPSET_ATTR_TYPENAME, /* 3: Typename */ 57 + IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */ 58 + IPSET_ATTR_REVISION, /* 4: Settype revision */ 59 + IPSET_ATTR_FAMILY, /* 5: Settype family */ 60 + IPSET_ATTR_FLAGS, /* 6: Flags at command level */ 61 + IPSET_ATTR_DATA, /* 7: Nested attributes */ 62 + IPSET_ATTR_ADT, /* 8: Multiple data containers */ 63 + IPSET_ATTR_LINENO, /* 9: Restore lineno */ 64 + IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */ 65 + IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */ 66 + __IPSET_ATTR_CMD_MAX, 67 + }; 68 + #define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1) 69 + 70 + /* CADT specific attributes */ 71 + enum { 72 + IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1, 73 + IPSET_ATTR_IP_FROM = IPSET_ATTR_IP, 74 + IPSET_ATTR_IP_TO, /* 2 */ 75 + IPSET_ATTR_CIDR, /* 3 */ 76 + IPSET_ATTR_PORT, /* 4 */ 77 + IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT, 78 + IPSET_ATTR_PORT_TO, /* 5 */ 79 + IPSET_ATTR_TIMEOUT, /* 6 */ 80 + IPSET_ATTR_PROTO, /* 7 */ 81 + IPSET_ATTR_CADT_FLAGS, /* 8 */ 82 + IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO, /* 9 */ 83 + /* Reserve empty slots */ 84 + IPSET_ATTR_CADT_MAX = 16, 85 + /* Create-only specific attributes */ 86 + IPSET_ATTR_GC, 87 + IPSET_ATTR_HASHSIZE, 88 + IPSET_ATTR_MAXELEM, 89 + IPSET_ATTR_NETMASK, 90 + IPSET_ATTR_PROBES, 91 + IPSET_ATTR_RESIZE, 92 + IPSET_ATTR_SIZE, 93 + /* Kernel-only */ 94 + IPSET_ATTR_ELEMENTS, 95 + IPSET_ATTR_REFERENCES, 96 + IPSET_ATTR_MEMSIZE, 97 + 98 + __IPSET_ATTR_CREATE_MAX, 99 + }; 100 + #define IPSET_ATTR_CREATE_MAX (__IPSET_ATTR_CREATE_MAX - 1) 101 + 102 + /* ADT specific attributes */ 103 + enum { 104 + IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1, 105 + IPSET_ATTR_NAME, 106 + IPSET_ATTR_NAMEREF, 107 + IPSET_ATTR_IP2, 108 + IPSET_ATTR_CIDR2, 109 + IPSET_ATTR_IP2_TO, 110 + IPSET_ATTR_IFACE, 111 + __IPSET_ATTR_ADT_MAX, 112 + }; 113 + #define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) 114 + 115 + /* IP specific attributes */ 116 + enum { 117 + IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1, 118 + IPSET_ATTR_IPADDR_IPV6, 119 + __IPSET_ATTR_IPADDR_MAX, 120 + }; 121 + #define IPSET_ATTR_IPADDR_MAX (__IPSET_ATTR_IPADDR_MAX - 1) 122 + 123 + /* Error codes */ 124 + enum ipset_errno { 125 + IPSET_ERR_PRIVATE = 4096, 126 + IPSET_ERR_PROTOCOL, 127 + IPSET_ERR_FIND_TYPE, 128 + IPSET_ERR_MAX_SETS, 129 + IPSET_ERR_BUSY, 130 + IPSET_ERR_EXIST_SETNAME2, 131 + IPSET_ERR_TYPE_MISMATCH, 132 + IPSET_ERR_EXIST, 133 + IPSET_ERR_INVALID_CIDR, 134 + IPSET_ERR_INVALID_NETMASK, 135 + IPSET_ERR_INVALID_FAMILY, 136 + IPSET_ERR_TIMEOUT, 137 + IPSET_ERR_REFERENCED, 138 + IPSET_ERR_IPADDR_IPV4, 139 + IPSET_ERR_IPADDR_IPV6, 140 + 141 + /* Type specific error codes */ 142 + IPSET_ERR_TYPE_SPECIFIC = 4352, 143 + }; 144 + 145 + /* Flags at command level */ 146 + enum ipset_cmd_flags { 147 + IPSET_FLAG_BIT_EXIST = 0, 148 + IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST), 149 + IPSET_FLAG_BIT_LIST_SETNAME = 1, 150 + IPSET_FLAG_LIST_SETNAME = (1 << IPSET_FLAG_BIT_LIST_SETNAME), 151 + IPSET_FLAG_BIT_LIST_HEADER = 2, 152 + IPSET_FLAG_LIST_HEADER = (1 << IPSET_FLAG_BIT_LIST_HEADER), 153 + IPSET_FLAG_CMD_MAX = 15, /* Lower half */ 154 + }; 155 + 156 + /* Flags at CADT attribute level */ 157 + enum ipset_cadt_flags { 158 + IPSET_FLAG_BIT_BEFORE = 0, 159 + IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE), 160 + IPSET_FLAG_BIT_PHYSDEV = 1, 161 + IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV), 162 + IPSET_FLAG_BIT_NOMATCH = 2, 163 + IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH), 164 + IPSET_FLAG_CADT_MAX = 15, /* Upper half */ 165 + }; 166 + 167 + /* Commands with settype-specific attributes */ 168 + enum ipset_adt { 169 + IPSET_ADD, 170 + IPSET_DEL, 171 + IPSET_TEST, 172 + IPSET_ADT_MAX, 173 + IPSET_CREATE = IPSET_ADT_MAX, 174 + IPSET_CADT_MAX, 175 + }; 176 + 177 + /* Sets are identified by an index in kernel space. Tweak with ip_set_id_t 178 + * and IPSET_INVALID_ID if you want to increase the max number of sets. 179 + */ 180 + typedef __u16 ip_set_id_t; 181 + 182 + #define IPSET_INVALID_ID 65535 183 + 184 + enum ip_set_dim { 185 + IPSET_DIM_ZERO = 0, 186 + IPSET_DIM_ONE, 187 + IPSET_DIM_TWO, 188 + IPSET_DIM_THREE, 189 + /* Max dimension in elements. 190 + * If changed, new revision of iptables match/target is required. 191 + */ 192 + IPSET_DIM_MAX = 6, 193 + IPSET_BIT_RETURN_NOMATCH = 7, 194 + }; 195 + 196 + /* Option flags for kernel operations */ 197 + enum ip_set_kopt { 198 + IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO), 199 + IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE), 200 + IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO), 201 + IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE), 202 + IPSET_RETURN_NOMATCH = (1 << IPSET_BIT_RETURN_NOMATCH), 203 + }; 204 + 205 + 206 + /* Interface to iptables/ip6tables */ 207 + 208 + #define SO_IP_SET 83 209 + 210 + union ip_set_name_index { 211 + char name[IPSET_MAXNAMELEN]; 212 + ip_set_id_t index; 213 + }; 214 + 215 + #define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */ 216 + struct ip_set_req_get_set { 217 + unsigned int op; 218 + unsigned int version; 219 + union ip_set_name_index set; 220 + }; 221 + 222 + #define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */ 223 + /* Uses ip_set_req_get_set */ 224 + 225 + #define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */ 226 + struct ip_set_req_version { 227 + unsigned int op; 228 + unsigned int version; 229 + }; 230 + 231 + #endif /* _UAPI_IP_SET_H */
+13
include/uapi/linux/netfilter/ipset/ip_set_bitmap.h
··· 1 + #ifndef _UAPI__IP_SET_BITMAP_H 2 + #define _UAPI__IP_SET_BITMAP_H 3 + 4 + /* Bitmap type specific error codes */ 5 + enum { 6 + /* The element is out of the range of the set */ 7 + IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC, 8 + /* The range exceeds the size limit of the set type */ 9 + IPSET_ERR_BITMAP_RANGE_SIZE, 10 + }; 11 + 12 + 13 + #endif /* _UAPI__IP_SET_BITMAP_H */
+21
include/uapi/linux/netfilter/ipset/ip_set_hash.h
··· 1 + #ifndef _UAPI__IP_SET_HASH_H 2 + #define _UAPI__IP_SET_HASH_H 3 + 4 + /* Hash type specific error codes */ 5 + enum { 6 + /* Hash is full */ 7 + IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC, 8 + /* Null-valued element */ 9 + IPSET_ERR_HASH_ELEM, 10 + /* Invalid protocol */ 11 + IPSET_ERR_INVALID_PROTO, 12 + /* Protocol missing but must be specified */ 13 + IPSET_ERR_MISSING_PROTO, 14 + /* Range not supported */ 15 + IPSET_ERR_HASH_RANGE_UNSUPPORTED, 16 + /* Invalid range */ 17 + IPSET_ERR_HASH_RANGE, 18 + }; 19 + 20 + 21 + #endif /* _UAPI__IP_SET_HASH_H */
+21
include/uapi/linux/netfilter/ipset/ip_set_list.h
··· 1 + #ifndef _UAPI__IP_SET_LIST_H 2 + #define _UAPI__IP_SET_LIST_H 3 + 4 + /* List type specific error codes */ 5 + enum { 6 + /* Set name to be added/deleted/tested does not exist. */ 7 + IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC, 8 + /* list:set type is not permitted to add */ 9 + IPSET_ERR_LOOP, 10 + /* Missing reference set */ 11 + IPSET_ERR_BEFORE, 12 + /* Reference set does not exist */ 13 + IPSET_ERR_NAMEREF, 14 + /* Set is full */ 15 + IPSET_ERR_LIST_FULL, 16 + /* Reference set is not added to the set */ 17 + IPSET_ERR_REF_EXIST, 18 + }; 19 + 20 + 21 + #endif /* _UAPI__IP_SET_LIST_H */
+117
include/uapi/linux/netfilter/nf_conntrack_common.h
··· 1 + #ifndef _UAPI_NF_CONNTRACK_COMMON_H 2 + #define _UAPI_NF_CONNTRACK_COMMON_H 3 + /* Connection state tracking for netfilter. This is separated from, 4 + but required by, the NAT layer; it can also be used by an iptables 5 + extension. */ 6 + enum ip_conntrack_info { 7 + /* Part of an established connection (either direction). */ 8 + IP_CT_ESTABLISHED, 9 + 10 + /* Like NEW, but related to an existing connection, or ICMP error 11 + (in either direction). */ 12 + IP_CT_RELATED, 13 + 14 + /* Started a new connection to track (only 15 + IP_CT_DIR_ORIGINAL); may be a retransmission. */ 16 + IP_CT_NEW, 17 + 18 + /* >= this indicates reply direction */ 19 + IP_CT_IS_REPLY, 20 + 21 + IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY, 22 + IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY, 23 + IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY, 24 + /* Number of distinct IP_CT types (no NEW in reply dirn). */ 25 + IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1 26 + }; 27 + 28 + /* Bitset representing status of connection. */ 29 + enum ip_conntrack_status { 30 + /* It's an expected connection: bit 0 set. This bit never changed */ 31 + IPS_EXPECTED_BIT = 0, 32 + IPS_EXPECTED = (1 << IPS_EXPECTED_BIT), 33 + 34 + /* We've seen packets both ways: bit 1 set. Can be set, not unset. */ 35 + IPS_SEEN_REPLY_BIT = 1, 36 + IPS_SEEN_REPLY = (1 << IPS_SEEN_REPLY_BIT), 37 + 38 + /* Conntrack should never be early-expired. */ 39 + IPS_ASSURED_BIT = 2, 40 + IPS_ASSURED = (1 << IPS_ASSURED_BIT), 41 + 42 + /* Connection is confirmed: originating packet has left box */ 43 + IPS_CONFIRMED_BIT = 3, 44 + IPS_CONFIRMED = (1 << IPS_CONFIRMED_BIT), 45 + 46 + /* Connection needs src nat in orig dir. This bit never changed. */ 47 + IPS_SRC_NAT_BIT = 4, 48 + IPS_SRC_NAT = (1 << IPS_SRC_NAT_BIT), 49 + 50 + /* Connection needs dst nat in orig dir. This bit never changed. */ 51 + IPS_DST_NAT_BIT = 5, 52 + IPS_DST_NAT = (1 << IPS_DST_NAT_BIT), 53 + 54 + /* Both together. */ 55 + IPS_NAT_MASK = (IPS_DST_NAT | IPS_SRC_NAT), 56 + 57 + /* Connection needs TCP sequence adjusted. */ 58 + IPS_SEQ_ADJUST_BIT = 6, 59 + IPS_SEQ_ADJUST = (1 << IPS_SEQ_ADJUST_BIT), 60 + 61 + /* NAT initialization bits. */ 62 + IPS_SRC_NAT_DONE_BIT = 7, 63 + IPS_SRC_NAT_DONE = (1 << IPS_SRC_NAT_DONE_BIT), 64 + 65 + IPS_DST_NAT_DONE_BIT = 8, 66 + IPS_DST_NAT_DONE = (1 << IPS_DST_NAT_DONE_BIT), 67 + 68 + /* Both together */ 69 + IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE), 70 + 71 + /* Connection is dying (removed from lists), can not be unset. */ 72 + IPS_DYING_BIT = 9, 73 + IPS_DYING = (1 << IPS_DYING_BIT), 74 + 75 + /* Connection has fixed timeout. */ 76 + IPS_FIXED_TIMEOUT_BIT = 10, 77 + IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT), 78 + 79 + /* Conntrack is a template */ 80 + IPS_TEMPLATE_BIT = 11, 81 + IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), 82 + 83 + /* Conntrack is a fake untracked entry */ 84 + IPS_UNTRACKED_BIT = 12, 85 + IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), 86 + 87 + /* Conntrack got a helper explicitly attached via CT target. */ 88 + IPS_HELPER_BIT = 13, 89 + IPS_HELPER = (1 << IPS_HELPER_BIT), 90 + }; 91 + 92 + /* Connection tracking event types */ 93 + enum ip_conntrack_events { 94 + IPCT_NEW, /* new conntrack */ 95 + IPCT_RELATED, /* related conntrack */ 96 + IPCT_DESTROY, /* destroyed conntrack */ 97 + IPCT_REPLY, /* connection has seen two-way traffic */ 98 + IPCT_ASSURED, /* connection status has changed to assured */ 99 + IPCT_PROTOINFO, /* protocol information has changed */ 100 + IPCT_HELPER, /* new helper has been set */ 101 + IPCT_MARK, /* new mark has been set */ 102 + IPCT_NATSEQADJ, /* NAT is doing sequence adjustment */ 103 + IPCT_SECMARK, /* new security mark has been set */ 104 + }; 105 + 106 + enum ip_conntrack_expect_events { 107 + IPEXP_NEW, /* new expectation */ 108 + IPEXP_DESTROY, /* destroyed expectation */ 109 + }; 110 + 111 + /* expectation flags */ 112 + #define NF_CT_EXPECT_PERMANENT 0x1 113 + #define NF_CT_EXPECT_INACTIVE 0x2 114 + #define NF_CT_EXPECT_USERSPACE 0x4 115 + 116 + 117 + #endif /* _UAPI_NF_CONNTRACK_COMMON_H */
+18
include/uapi/linux/netfilter/nf_conntrack_ftp.h
··· 1 + #ifndef _UAPI_NF_CONNTRACK_FTP_H 2 + #define _UAPI_NF_CONNTRACK_FTP_H 3 + /* FTP tracking. */ 4 + 5 + /* This enum is exposed to userspace */ 6 + enum nf_ct_ftp_type { 7 + /* PORT command from client */ 8 + NF_CT_FTP_PORT, 9 + /* PASV response from server */ 10 + NF_CT_FTP_PASV, 11 + /* EPRT command from client */ 12 + NF_CT_FTP_EPRT, 13 + /* EPSV response from server */ 14 + NF_CT_FTP_EPSV, 15 + }; 16 + 17 + 18 + #endif /* _UAPI_NF_CONNTRACK_FTP_H */
+51
include/uapi/linux/netfilter/nf_conntrack_tcp.h
··· 1 + #ifndef _UAPI_NF_CONNTRACK_TCP_H 2 + #define _UAPI_NF_CONNTRACK_TCP_H 3 + /* TCP tracking. */ 4 + 5 + #include <linux/types.h> 6 + 7 + /* This is exposed to userspace (ctnetlink) */ 8 + enum tcp_conntrack { 9 + TCP_CONNTRACK_NONE, 10 + TCP_CONNTRACK_SYN_SENT, 11 + TCP_CONNTRACK_SYN_RECV, 12 + TCP_CONNTRACK_ESTABLISHED, 13 + TCP_CONNTRACK_FIN_WAIT, 14 + TCP_CONNTRACK_CLOSE_WAIT, 15 + TCP_CONNTRACK_LAST_ACK, 16 + TCP_CONNTRACK_TIME_WAIT, 17 + TCP_CONNTRACK_CLOSE, 18 + TCP_CONNTRACK_LISTEN, /* obsolete */ 19 + #define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN 20 + TCP_CONNTRACK_MAX, 21 + TCP_CONNTRACK_IGNORE, 22 + TCP_CONNTRACK_RETRANS, 23 + TCP_CONNTRACK_UNACK, 24 + TCP_CONNTRACK_TIMEOUT_MAX 25 + }; 26 + 27 + /* Window scaling is advertised by the sender */ 28 + #define IP_CT_TCP_FLAG_WINDOW_SCALE 0x01 29 + 30 + /* SACK is permitted by the sender */ 31 + #define IP_CT_TCP_FLAG_SACK_PERM 0x02 32 + 33 + /* This sender sent FIN first */ 34 + #define IP_CT_TCP_FLAG_CLOSE_INIT 0x04 35 + 36 + /* Be liberal in window checking */ 37 + #define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 38 + 39 + /* Has unacknowledged data */ 40 + #define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 41 + 42 + /* The field td_maxack has been set */ 43 + #define IP_CT_TCP_FLAG_MAXACK_SET 0x20 44 + 45 + struct nf_ct_tcp_flags { 46 + __u8 flags; 47 + __u8 mask; 48 + }; 49 + 50 + 51 + #endif /* _UAPI_NF_CONNTRACK_TCP_H */
+56
include/uapi/linux/netfilter/nfnetlink.h
··· 1 + #ifndef _UAPI_NFNETLINK_H 2 + #define _UAPI_NFNETLINK_H 3 + #include <linux/types.h> 4 + #include <linux/netfilter/nfnetlink_compat.h> 5 + 6 + enum nfnetlink_groups { 7 + NFNLGRP_NONE, 8 + #define NFNLGRP_NONE NFNLGRP_NONE 9 + NFNLGRP_CONNTRACK_NEW, 10 + #define NFNLGRP_CONNTRACK_NEW NFNLGRP_CONNTRACK_NEW 11 + NFNLGRP_CONNTRACK_UPDATE, 12 + #define NFNLGRP_CONNTRACK_UPDATE NFNLGRP_CONNTRACK_UPDATE 13 + NFNLGRP_CONNTRACK_DESTROY, 14 + #define NFNLGRP_CONNTRACK_DESTROY NFNLGRP_CONNTRACK_DESTROY 15 + NFNLGRP_CONNTRACK_EXP_NEW, 16 + #define NFNLGRP_CONNTRACK_EXP_NEW NFNLGRP_CONNTRACK_EXP_NEW 17 + NFNLGRP_CONNTRACK_EXP_UPDATE, 18 + #define NFNLGRP_CONNTRACK_EXP_UPDATE NFNLGRP_CONNTRACK_EXP_UPDATE 19 + NFNLGRP_CONNTRACK_EXP_DESTROY, 20 + #define NFNLGRP_CONNTRACK_EXP_DESTROY NFNLGRP_CONNTRACK_EXP_DESTROY 21 + __NFNLGRP_MAX, 22 + }; 23 + #define NFNLGRP_MAX (__NFNLGRP_MAX - 1) 24 + 25 + /* General form of address family dependent message. 26 + */ 27 + struct nfgenmsg { 28 + __u8 nfgen_family; /* AF_xxx */ 29 + __u8 version; /* nfnetlink version */ 30 + __be16 res_id; /* resource id */ 31 + }; 32 + 33 + #define NFNETLINK_V0 0 34 + 35 + /* netfilter netlink message types are split in two pieces: 36 + * 8 bit subsystem, 8bit operation. 37 + */ 38 + 39 + #define NFNL_SUBSYS_ID(x) ((x & 0xff00) >> 8) 40 + #define NFNL_MSG_TYPE(x) (x & 0x00ff) 41 + 42 + /* No enum here, otherwise __stringify() trick of MODULE_ALIAS_NFNL_SUBSYS() 43 + * won't work anymore */ 44 + #define NFNL_SUBSYS_NONE 0 45 + #define NFNL_SUBSYS_CTNETLINK 1 46 + #define NFNL_SUBSYS_CTNETLINK_EXP 2 47 + #define NFNL_SUBSYS_QUEUE 3 48 + #define NFNL_SUBSYS_ULOG 4 49 + #define NFNL_SUBSYS_OSF 5 50 + #define NFNL_SUBSYS_IPSET 6 51 + #define NFNL_SUBSYS_ACCT 7 52 + #define NFNL_SUBSYS_CTNETLINK_TIMEOUT 8 53 + #define NFNL_SUBSYS_CTHELPER 9 54 + #define NFNL_SUBSYS_COUNT 10 55 + 56 + #endif /* _UAPI_NFNETLINK_H */
+187
include/uapi/linux/netfilter/x_tables.h
··· 1 + #ifndef _UAPI_X_TABLES_H 2 + #define _UAPI_X_TABLES_H 3 + #include <linux/kernel.h> 4 + #include <linux/types.h> 5 + 6 + #define XT_FUNCTION_MAXNAMELEN 30 7 + #define XT_EXTENSION_MAXNAMELEN 29 8 + #define XT_TABLE_MAXNAMELEN 32 9 + 10 + struct xt_entry_match { 11 + union { 12 + struct { 13 + __u16 match_size; 14 + 15 + /* Used by userspace */ 16 + char name[XT_EXTENSION_MAXNAMELEN]; 17 + __u8 revision; 18 + } user; 19 + struct { 20 + __u16 match_size; 21 + 22 + /* Used inside the kernel */ 23 + struct xt_match *match; 24 + } kernel; 25 + 26 + /* Total length */ 27 + __u16 match_size; 28 + } u; 29 + 30 + unsigned char data[0]; 31 + }; 32 + 33 + struct xt_entry_target { 34 + union { 35 + struct { 36 + __u16 target_size; 37 + 38 + /* Used by userspace */ 39 + char name[XT_EXTENSION_MAXNAMELEN]; 40 + __u8 revision; 41 + } user; 42 + struct { 43 + __u16 target_size; 44 + 45 + /* Used inside the kernel */ 46 + struct xt_target *target; 47 + } kernel; 48 + 49 + /* Total length */ 50 + __u16 target_size; 51 + } u; 52 + 53 + unsigned char data[0]; 54 + }; 55 + 56 + #define XT_TARGET_INIT(__name, __size) \ 57 + { \ 58 + .target.u.user = { \ 59 + .target_size = XT_ALIGN(__size), \ 60 + .name = __name, \ 61 + }, \ 62 + } 63 + 64 + struct xt_standard_target { 65 + struct xt_entry_target target; 66 + int verdict; 67 + }; 68 + 69 + struct xt_error_target { 70 + struct xt_entry_target target; 71 + char errorname[XT_FUNCTION_MAXNAMELEN]; 72 + }; 73 + 74 + /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision 75 + * kernel supports, if >= revision. */ 76 + struct xt_get_revision { 77 + char name[XT_EXTENSION_MAXNAMELEN]; 78 + __u8 revision; 79 + }; 80 + 81 + /* CONTINUE verdict for targets */ 82 + #define XT_CONTINUE 0xFFFFFFFF 83 + 84 + /* For standard target */ 85 + #define XT_RETURN (-NF_REPEAT - 1) 86 + 87 + /* this is a dummy structure to find out the alignment requirement for a struct 88 + * containing all the fundamental data types that are used in ipt_entry, 89 + * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my 90 + * personal pleasure to remove it -HW 91 + */ 92 + struct _xt_align { 93 + __u8 u8; 94 + __u16 u16; 95 + __u32 u32; 96 + __u64 u64; 97 + }; 98 + 99 + #define XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _xt_align)) 100 + 101 + /* Standard return verdict, or do jump. */ 102 + #define XT_STANDARD_TARGET "" 103 + /* Error verdict. */ 104 + #define XT_ERROR_TARGET "ERROR" 105 + 106 + #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0) 107 + #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0) 108 + 109 + struct xt_counters { 110 + __u64 pcnt, bcnt; /* Packet and byte counters */ 111 + }; 112 + 113 + /* The argument to IPT_SO_ADD_COUNTERS. */ 114 + struct xt_counters_info { 115 + /* Which table. */ 116 + char name[XT_TABLE_MAXNAMELEN]; 117 + 118 + unsigned int num_counters; 119 + 120 + /* The counters (actually `number' of these). */ 121 + struct xt_counters counters[0]; 122 + }; 123 + 124 + #define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */ 125 + 126 + #ifndef __KERNEL__ 127 + /* fn returns 0 to continue iteration */ 128 + #define XT_MATCH_ITERATE(type, e, fn, args...) \ 129 + ({ \ 130 + unsigned int __i; \ 131 + int __ret = 0; \ 132 + struct xt_entry_match *__m; \ 133 + \ 134 + for (__i = sizeof(type); \ 135 + __i < (e)->target_offset; \ 136 + __i += __m->u.match_size) { \ 137 + __m = (void *)e + __i; \ 138 + \ 139 + __ret = fn(__m , ## args); \ 140 + if (__ret != 0) \ 141 + break; \ 142 + } \ 143 + __ret; \ 144 + }) 145 + 146 + /* fn returns 0 to continue iteration */ 147 + #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \ 148 + ({ \ 149 + unsigned int __i, __n; \ 150 + int __ret = 0; \ 151 + type *__entry; \ 152 + \ 153 + for (__i = 0, __n = 0; __i < (size); \ 154 + __i += __entry->next_offset, __n++) { \ 155 + __entry = (void *)(entries) + __i; \ 156 + if (__n < n) \ 157 + continue; \ 158 + \ 159 + __ret = fn(__entry , ## args); \ 160 + if (__ret != 0) \ 161 + break; \ 162 + } \ 163 + __ret; \ 164 + }) 165 + 166 + /* fn returns 0 to continue iteration */ 167 + #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \ 168 + XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args) 169 + 170 + #endif /* !__KERNEL__ */ 171 + 172 + /* pos is normally a struct ipt_entry/ip6t_entry/etc. */ 173 + #define xt_entry_foreach(pos, ehead, esize) \ 174 + for ((pos) = (typeof(pos))(ehead); \ 175 + (pos) < (typeof(pos))((char *)(ehead) + (esize)); \ 176 + (pos) = (typeof(pos))((char *)(pos) + (pos)->next_offset)) 177 + 178 + /* can only be xt_entry_match, so no use of typeof here */ 179 + #define xt_ematch_foreach(pos, entry) \ 180 + for ((pos) = (struct xt_entry_match *)entry->elems; \ 181 + (pos) < (struct xt_entry_match *)((char *)(entry) + \ 182 + (entry)->target_offset); \ 183 + (pos) = (struct xt_entry_match *)((char *)(pos) + \ 184 + (pos)->u.match_size)) 185 + 186 + 187 + #endif /* _UAPI_X_TABLES_H */
+73
include/uapi/linux/netfilter/xt_hashlimit.h
··· 1 + #ifndef _UAPI_XT_HASHLIMIT_H 2 + #define _UAPI_XT_HASHLIMIT_H 3 + 4 + #include <linux/types.h> 5 + 6 + /* timings are in milliseconds. */ 7 + #define XT_HASHLIMIT_SCALE 10000 8 + /* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 9 + * seconds, or one packet every 59 hours. 10 + */ 11 + 12 + /* packet length accounting is done in 16-byte steps */ 13 + #define XT_HASHLIMIT_BYTE_SHIFT 4 14 + 15 + /* details of this structure hidden by the implementation */ 16 + struct xt_hashlimit_htable; 17 + 18 + enum { 19 + XT_HASHLIMIT_HASH_DIP = 1 << 0, 20 + XT_HASHLIMIT_HASH_DPT = 1 << 1, 21 + XT_HASHLIMIT_HASH_SIP = 1 << 2, 22 + XT_HASHLIMIT_HASH_SPT = 1 << 3, 23 + XT_HASHLIMIT_INVERT = 1 << 4, 24 + XT_HASHLIMIT_BYTES = 1 << 5, 25 + }; 26 + 27 + struct hashlimit_cfg { 28 + __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */ 29 + __u32 avg; /* Average secs between packets * scale */ 30 + __u32 burst; /* Period multiplier for upper limit. */ 31 + 32 + /* user specified */ 33 + __u32 size; /* how many buckets */ 34 + __u32 max; /* max number of entries */ 35 + __u32 gc_interval; /* gc interval */ 36 + __u32 expire; /* when do entries expire? */ 37 + }; 38 + 39 + struct xt_hashlimit_info { 40 + char name [IFNAMSIZ]; /* name */ 41 + struct hashlimit_cfg cfg; 42 + 43 + /* Used internally by the kernel */ 44 + struct xt_hashlimit_htable *hinfo; 45 + union { 46 + void *ptr; 47 + struct xt_hashlimit_info *master; 48 + } u; 49 + }; 50 + 51 + struct hashlimit_cfg1 { 52 + __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */ 53 + __u32 avg; /* Average secs between packets * scale */ 54 + __u32 burst; /* Period multiplier for upper limit. */ 55 + 56 + /* user specified */ 57 + __u32 size; /* how many buckets */ 58 + __u32 max; /* max number of entries */ 59 + __u32 gc_interval; /* gc interval */ 60 + __u32 expire; /* when do entries expire? */ 61 + 62 + __u8 srcmask, dstmask; 63 + }; 64 + 65 + struct xt_hashlimit_mtinfo1 { 66 + char name[IFNAMSIZ]; 67 + struct hashlimit_cfg1 cfg; 68 + 69 + /* Used internally by the kernel */ 70 + struct xt_hashlimit_htable *hinfo __attribute__((aligned(8))); 71 + }; 72 + 73 + #endif /* _UAPI_XT_HASHLIMIT_H */
+23
include/uapi/linux/netfilter/xt_physdev.h
··· 1 + #ifndef _UAPI_XT_PHYSDEV_H 2 + #define _UAPI_XT_PHYSDEV_H 3 + 4 + #include <linux/types.h> 5 + 6 + 7 + #define XT_PHYSDEV_OP_IN 0x01 8 + #define XT_PHYSDEV_OP_OUT 0x02 9 + #define XT_PHYSDEV_OP_BRIDGED 0x04 10 + #define XT_PHYSDEV_OP_ISIN 0x08 11 + #define XT_PHYSDEV_OP_ISOUT 0x10 12 + #define XT_PHYSDEV_OP_MASK (0x20 - 1) 13 + 14 + struct xt_physdev_info { 15 + char physindev[IFNAMSIZ]; 16 + char in_mask[IFNAMSIZ]; 17 + char physoutdev[IFNAMSIZ]; 18 + char out_mask[IFNAMSIZ]; 19 + __u8 invert; 20 + __u8 bitmask; 21 + }; 22 + 23 + #endif /* _UAPI_XT_PHYSDEV_H */
+2
include/uapi/linux/netfilter_arp/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += arp_tables.h 3 + header-y += arpt_mangle.h
+206
include/uapi/linux/netfilter_arp/arp_tables.h
··· 1 + /* 2 + * Format of an ARP firewall descriptor 3 + * 4 + * src, tgt, src_mask, tgt_mask, arpop, arpop_mask are always stored in 5 + * network byte order. 6 + * flags are stored in host byte order (of course). 7 + */ 8 + 9 + #ifndef _UAPI_ARPTABLES_H 10 + #define _UAPI_ARPTABLES_H 11 + 12 + #include <linux/types.h> 13 + #include <linux/compiler.h> 14 + #include <linux/netfilter_arp.h> 15 + 16 + #include <linux/netfilter/x_tables.h> 17 + 18 + #ifndef __KERNEL__ 19 + #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN 20 + #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN 21 + #define arpt_entry_target xt_entry_target 22 + #define arpt_standard_target xt_standard_target 23 + #define arpt_error_target xt_error_target 24 + #define ARPT_CONTINUE XT_CONTINUE 25 + #define ARPT_RETURN XT_RETURN 26 + #define arpt_counters_info xt_counters_info 27 + #define arpt_counters xt_counters 28 + #define ARPT_STANDARD_TARGET XT_STANDARD_TARGET 29 + #define ARPT_ERROR_TARGET XT_ERROR_TARGET 30 + #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ 31 + XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) 32 + #endif 33 + 34 + #define ARPT_DEV_ADDR_LEN_MAX 16 35 + 36 + struct arpt_devaddr_info { 37 + char addr[ARPT_DEV_ADDR_LEN_MAX]; 38 + char mask[ARPT_DEV_ADDR_LEN_MAX]; 39 + }; 40 + 41 + /* Yes, Virginia, you have to zero the padding. */ 42 + struct arpt_arp { 43 + /* Source and target IP addr */ 44 + struct in_addr src, tgt; 45 + /* Mask for src and target IP addr */ 46 + struct in_addr smsk, tmsk; 47 + 48 + /* Device hw address length, src+target device addresses */ 49 + __u8 arhln, arhln_mask; 50 + struct arpt_devaddr_info src_devaddr; 51 + struct arpt_devaddr_info tgt_devaddr; 52 + 53 + /* ARP operation code. */ 54 + __be16 arpop, arpop_mask; 55 + 56 + /* ARP hardware address and protocol address format. */ 57 + __be16 arhrd, arhrd_mask; 58 + __be16 arpro, arpro_mask; 59 + 60 + /* The protocol address length is only accepted if it is 4 61 + * so there is no use in offering a way to do filtering on it. 62 + */ 63 + 64 + char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 65 + unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 66 + 67 + /* Flags word */ 68 + __u8 flags; 69 + /* Inverse flags */ 70 + __u16 invflags; 71 + }; 72 + 73 + /* Values for "flag" field in struct arpt_ip (general arp structure). 74 + * No flags defined yet. 75 + */ 76 + #define ARPT_F_MASK 0x00 /* All possible flag bits mask. */ 77 + 78 + /* Values for "inv" field in struct arpt_arp. */ 79 + #define ARPT_INV_VIA_IN 0x0001 /* Invert the sense of IN IFACE. */ 80 + #define ARPT_INV_VIA_OUT 0x0002 /* Invert the sense of OUT IFACE */ 81 + #define ARPT_INV_SRCIP 0x0004 /* Invert the sense of SRC IP. */ 82 + #define ARPT_INV_TGTIP 0x0008 /* Invert the sense of TGT IP. */ 83 + #define ARPT_INV_SRCDEVADDR 0x0010 /* Invert the sense of SRC DEV ADDR. */ 84 + #define ARPT_INV_TGTDEVADDR 0x0020 /* Invert the sense of TGT DEV ADDR. */ 85 + #define ARPT_INV_ARPOP 0x0040 /* Invert the sense of ARP OP. */ 86 + #define ARPT_INV_ARPHRD 0x0080 /* Invert the sense of ARP HRD. */ 87 + #define ARPT_INV_ARPPRO 0x0100 /* Invert the sense of ARP PRO. */ 88 + #define ARPT_INV_ARPHLN 0x0200 /* Invert the sense of ARP HLN. */ 89 + #define ARPT_INV_MASK 0x03FF /* All possible flag bits mask. */ 90 + 91 + /* This structure defines each of the firewall rules. Consists of 3 92 + parts which are 1) general ARP header stuff 2) match specific 93 + stuff 3) the target to perform if the rule matches */ 94 + struct arpt_entry 95 + { 96 + struct arpt_arp arp; 97 + 98 + /* Size of arpt_entry + matches */ 99 + __u16 target_offset; 100 + /* Size of arpt_entry + matches + target */ 101 + __u16 next_offset; 102 + 103 + /* Back pointer */ 104 + unsigned int comefrom; 105 + 106 + /* Packet and byte counters. */ 107 + struct xt_counters counters; 108 + 109 + /* The matches (if any), then the target. */ 110 + unsigned char elems[0]; 111 + }; 112 + 113 + /* 114 + * New IP firewall options for [gs]etsockopt at the RAW IP level. 115 + * Unlike BSD Linux inherits IP options so you don't have to use a raw 116 + * socket for this. Instead we check rights in the calls. 117 + * 118 + * ATTENTION: check linux/in.h before adding new number here. 119 + */ 120 + #define ARPT_BASE_CTL 96 121 + 122 + #define ARPT_SO_SET_REPLACE (ARPT_BASE_CTL) 123 + #define ARPT_SO_SET_ADD_COUNTERS (ARPT_BASE_CTL + 1) 124 + #define ARPT_SO_SET_MAX ARPT_SO_SET_ADD_COUNTERS 125 + 126 + #define ARPT_SO_GET_INFO (ARPT_BASE_CTL) 127 + #define ARPT_SO_GET_ENTRIES (ARPT_BASE_CTL + 1) 128 + /* #define ARPT_SO_GET_REVISION_MATCH (APRT_BASE_CTL + 2) */ 129 + #define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3) 130 + #define ARPT_SO_GET_MAX (ARPT_SO_GET_REVISION_TARGET) 131 + 132 + /* The argument to ARPT_SO_GET_INFO */ 133 + struct arpt_getinfo { 134 + /* Which table: caller fills this in. */ 135 + char name[XT_TABLE_MAXNAMELEN]; 136 + 137 + /* Kernel fills these in. */ 138 + /* Which hook entry points are valid: bitmask */ 139 + unsigned int valid_hooks; 140 + 141 + /* Hook entry points: one per netfilter hook. */ 142 + unsigned int hook_entry[NF_ARP_NUMHOOKS]; 143 + 144 + /* Underflow points. */ 145 + unsigned int underflow[NF_ARP_NUMHOOKS]; 146 + 147 + /* Number of entries */ 148 + unsigned int num_entries; 149 + 150 + /* Size of entries. */ 151 + unsigned int size; 152 + }; 153 + 154 + /* The argument to ARPT_SO_SET_REPLACE. */ 155 + struct arpt_replace { 156 + /* Which table. */ 157 + char name[XT_TABLE_MAXNAMELEN]; 158 + 159 + /* Which hook entry points are valid: bitmask. You can't 160 + change this. */ 161 + unsigned int valid_hooks; 162 + 163 + /* Number of entries */ 164 + unsigned int num_entries; 165 + 166 + /* Total size of new entries */ 167 + unsigned int size; 168 + 169 + /* Hook entry points. */ 170 + unsigned int hook_entry[NF_ARP_NUMHOOKS]; 171 + 172 + /* Underflow points. */ 173 + unsigned int underflow[NF_ARP_NUMHOOKS]; 174 + 175 + /* Information about old entries: */ 176 + /* Number of counters (must be equal to current number of entries). */ 177 + unsigned int num_counters; 178 + /* The old entries' counters. */ 179 + struct xt_counters __user *counters; 180 + 181 + /* The entries (hang off end: not really an array). */ 182 + struct arpt_entry entries[0]; 183 + }; 184 + 185 + /* The argument to ARPT_SO_GET_ENTRIES. */ 186 + struct arpt_get_entries { 187 + /* Which table: user fills this in. */ 188 + char name[XT_TABLE_MAXNAMELEN]; 189 + 190 + /* User fills this in: total entry size. */ 191 + unsigned int size; 192 + 193 + /* The entries. */ 194 + struct arpt_entry entrytable[0]; 195 + }; 196 + 197 + /* Helper functions */ 198 + static __inline__ struct xt_entry_target *arpt_get_target(struct arpt_entry *e) 199 + { 200 + return (void *)e + e->target_offset; 201 + } 202 + 203 + /* 204 + * Main firewall chains definitions and global var's definitions. 205 + */ 206 + #endif /* _UAPI_ARPTABLES_H */
+18
include/uapi/linux/netfilter_bridge/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += ebt_802_3.h 3 + header-y += ebt_among.h 4 + header-y += ebt_arp.h 5 + header-y += ebt_arpreply.h 6 + header-y += ebt_ip.h 7 + header-y += ebt_ip6.h 8 + header-y += ebt_limit.h 9 + header-y += ebt_log.h 10 + header-y += ebt_mark_m.h 11 + header-y += ebt_mark_t.h 12 + header-y += ebt_nat.h 13 + header-y += ebt_nflog.h 14 + header-y += ebt_pkttype.h 15 + header-y += ebt_redirect.h 16 + header-y += ebt_stp.h 17 + header-y += ebt_ulog.h 18 + header-y += ebt_vlan.h 19 + header-y += ebtables.h
+62
include/uapi/linux/netfilter_bridge/ebt_802_3.h
··· 1 + #ifndef _UAPI__LINUX_BRIDGE_EBT_802_3_H 2 + #define _UAPI__LINUX_BRIDGE_EBT_802_3_H 3 + 4 + #include <linux/types.h> 5 + 6 + #define EBT_802_3_SAP 0x01 7 + #define EBT_802_3_TYPE 0x02 8 + 9 + #define EBT_802_3_MATCH "802_3" 10 + 11 + /* 12 + * If frame has DSAP/SSAP value 0xaa you must check the SNAP type 13 + * to discover what kind of packet we're carrying. 14 + */ 15 + #define CHECK_TYPE 0xaa 16 + 17 + /* 18 + * Control field may be one or two bytes. If the first byte has 19 + * the value 0x03 then the entire length is one byte, otherwise it is two. 20 + * One byte controls are used in Unnumbered Information frames. 21 + * Two byte controls are used in Numbered Information frames. 22 + */ 23 + #define IS_UI 0x03 24 + 25 + #define EBT_802_3_MASK (EBT_802_3_SAP | EBT_802_3_TYPE | EBT_802_3) 26 + 27 + /* ui has one byte ctrl, ni has two */ 28 + struct hdr_ui { 29 + __u8 dsap; 30 + __u8 ssap; 31 + __u8 ctrl; 32 + __u8 orig[3]; 33 + __be16 type; 34 + }; 35 + 36 + struct hdr_ni { 37 + __u8 dsap; 38 + __u8 ssap; 39 + __be16 ctrl; 40 + __u8 orig[3]; 41 + __be16 type; 42 + }; 43 + 44 + struct ebt_802_3_hdr { 45 + __u8 daddr[6]; 46 + __u8 saddr[6]; 47 + __be16 len; 48 + union { 49 + struct hdr_ui ui; 50 + struct hdr_ni ni; 51 + } llc; 52 + }; 53 + 54 + 55 + struct ebt_802_3_info { 56 + __u8 sap; 57 + __be16 type; 58 + __u8 bitmask; 59 + __u8 invflags; 60 + }; 61 + 62 + #endif /* _UAPI__LINUX_BRIDGE_EBT_802_3_H */
+268
include/uapi/linux/netfilter_bridge/ebtables.h
··· 1 + /* 2 + * ebtables 3 + * 4 + * Authors: 5 + * Bart De Schuymer <bdschuym@pandora.be> 6 + * 7 + * ebtables.c,v 2.0, April, 2002 8 + * 9 + * This code is stongly inspired on the iptables code which is 10 + * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 11 + */ 12 + 13 + #ifndef _UAPI__LINUX_BRIDGE_EFF_H 14 + #define _UAPI__LINUX_BRIDGE_EFF_H 15 + #include <linux/if.h> 16 + #include <linux/netfilter_bridge.h> 17 + #include <linux/if_ether.h> 18 + 19 + #define EBT_TABLE_MAXNAMELEN 32 20 + #define EBT_CHAIN_MAXNAMELEN EBT_TABLE_MAXNAMELEN 21 + #define EBT_FUNCTION_MAXNAMELEN EBT_TABLE_MAXNAMELEN 22 + 23 + /* verdicts >0 are "branches" */ 24 + #define EBT_ACCEPT -1 25 + #define EBT_DROP -2 26 + #define EBT_CONTINUE -3 27 + #define EBT_RETURN -4 28 + #define NUM_STANDARD_TARGETS 4 29 + /* ebtables target modules store the verdict inside an int. We can 30 + * reclaim a part of this int for backwards compatible extensions. 31 + * The 4 lsb are more than enough to store the verdict. */ 32 + #define EBT_VERDICT_BITS 0x0000000F 33 + 34 + struct xt_match; 35 + struct xt_target; 36 + 37 + struct ebt_counter { 38 + uint64_t pcnt; 39 + uint64_t bcnt; 40 + }; 41 + 42 + struct ebt_replace { 43 + char name[EBT_TABLE_MAXNAMELEN]; 44 + unsigned int valid_hooks; 45 + /* nr of rules in the table */ 46 + unsigned int nentries; 47 + /* total size of the entries */ 48 + unsigned int entries_size; 49 + /* start of the chains */ 50 + struct ebt_entries __user *hook_entry[NF_BR_NUMHOOKS]; 51 + /* nr of counters userspace expects back */ 52 + unsigned int num_counters; 53 + /* where the kernel will put the old counters */ 54 + struct ebt_counter __user *counters; 55 + char __user *entries; 56 + }; 57 + 58 + struct ebt_replace_kernel { 59 + char name[EBT_TABLE_MAXNAMELEN]; 60 + unsigned int valid_hooks; 61 + /* nr of rules in the table */ 62 + unsigned int nentries; 63 + /* total size of the entries */ 64 + unsigned int entries_size; 65 + /* start of the chains */ 66 + struct ebt_entries *hook_entry[NF_BR_NUMHOOKS]; 67 + /* nr of counters userspace expects back */ 68 + unsigned int num_counters; 69 + /* where the kernel will put the old counters */ 70 + struct ebt_counter *counters; 71 + char *entries; 72 + }; 73 + 74 + struct ebt_entries { 75 + /* this field is always set to zero 76 + * See EBT_ENTRY_OR_ENTRIES. 77 + * Must be same size as ebt_entry.bitmask */ 78 + unsigned int distinguisher; 79 + /* the chain name */ 80 + char name[EBT_CHAIN_MAXNAMELEN]; 81 + /* counter offset for this chain */ 82 + unsigned int counter_offset; 83 + /* one standard (accept, drop, return) per hook */ 84 + int policy; 85 + /* nr. of entries */ 86 + unsigned int nentries; 87 + /* entry list */ 88 + char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 89 + }; 90 + 91 + /* used for the bitmask of struct ebt_entry */ 92 + 93 + /* This is a hack to make a difference between an ebt_entry struct and an 94 + * ebt_entries struct when traversing the entries from start to end. 95 + * Using this simplifies the code a lot, while still being able to use 96 + * ebt_entries. 97 + * Contrary, iptables doesn't use something like ebt_entries and therefore uses 98 + * different techniques for naming the policy and such. So, iptables doesn't 99 + * need a hack like this. 100 + */ 101 + #define EBT_ENTRY_OR_ENTRIES 0x01 102 + /* these are the normal masks */ 103 + #define EBT_NOPROTO 0x02 104 + #define EBT_802_3 0x04 105 + #define EBT_SOURCEMAC 0x08 106 + #define EBT_DESTMAC 0x10 107 + #define EBT_F_MASK (EBT_NOPROTO | EBT_802_3 | EBT_SOURCEMAC | EBT_DESTMAC \ 108 + | EBT_ENTRY_OR_ENTRIES) 109 + 110 + #define EBT_IPROTO 0x01 111 + #define EBT_IIN 0x02 112 + #define EBT_IOUT 0x04 113 + #define EBT_ISOURCE 0x8 114 + #define EBT_IDEST 0x10 115 + #define EBT_ILOGICALIN 0x20 116 + #define EBT_ILOGICALOUT 0x40 117 + #define EBT_INV_MASK (EBT_IPROTO | EBT_IIN | EBT_IOUT | EBT_ILOGICALIN \ 118 + | EBT_ILOGICALOUT | EBT_ISOURCE | EBT_IDEST) 119 + 120 + struct ebt_entry_match { 121 + union { 122 + char name[EBT_FUNCTION_MAXNAMELEN]; 123 + struct xt_match *match; 124 + } u; 125 + /* size of data */ 126 + unsigned int match_size; 127 + unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 128 + }; 129 + 130 + struct ebt_entry_watcher { 131 + union { 132 + char name[EBT_FUNCTION_MAXNAMELEN]; 133 + struct xt_target *watcher; 134 + } u; 135 + /* size of data */ 136 + unsigned int watcher_size; 137 + unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 138 + }; 139 + 140 + struct ebt_entry_target { 141 + union { 142 + char name[EBT_FUNCTION_MAXNAMELEN]; 143 + struct xt_target *target; 144 + } u; 145 + /* size of data */ 146 + unsigned int target_size; 147 + unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 148 + }; 149 + 150 + #define EBT_STANDARD_TARGET "standard" 151 + struct ebt_standard_target { 152 + struct ebt_entry_target target; 153 + int verdict; 154 + }; 155 + 156 + /* one entry */ 157 + struct ebt_entry { 158 + /* this needs to be the first field */ 159 + unsigned int bitmask; 160 + unsigned int invflags; 161 + __be16 ethproto; 162 + /* the physical in-dev */ 163 + char in[IFNAMSIZ]; 164 + /* the logical in-dev */ 165 + char logical_in[IFNAMSIZ]; 166 + /* the physical out-dev */ 167 + char out[IFNAMSIZ]; 168 + /* the logical out-dev */ 169 + char logical_out[IFNAMSIZ]; 170 + unsigned char sourcemac[ETH_ALEN]; 171 + unsigned char sourcemsk[ETH_ALEN]; 172 + unsigned char destmac[ETH_ALEN]; 173 + unsigned char destmsk[ETH_ALEN]; 174 + /* sizeof ebt_entry + matches */ 175 + unsigned int watchers_offset; 176 + /* sizeof ebt_entry + matches + watchers */ 177 + unsigned int target_offset; 178 + /* sizeof ebt_entry + matches + watchers + target */ 179 + unsigned int next_offset; 180 + unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 181 + }; 182 + 183 + /* {g,s}etsockopt numbers */ 184 + #define EBT_BASE_CTL 128 185 + 186 + #define EBT_SO_SET_ENTRIES (EBT_BASE_CTL) 187 + #define EBT_SO_SET_COUNTERS (EBT_SO_SET_ENTRIES+1) 188 + #define EBT_SO_SET_MAX (EBT_SO_SET_COUNTERS+1) 189 + 190 + #define EBT_SO_GET_INFO (EBT_BASE_CTL) 191 + #define EBT_SO_GET_ENTRIES (EBT_SO_GET_INFO+1) 192 + #define EBT_SO_GET_INIT_INFO (EBT_SO_GET_ENTRIES+1) 193 + #define EBT_SO_GET_INIT_ENTRIES (EBT_SO_GET_INIT_INFO+1) 194 + #define EBT_SO_GET_MAX (EBT_SO_GET_INIT_ENTRIES+1) 195 + 196 + 197 + /* blatently stolen from ip_tables.h 198 + * fn returns 0 to continue iteration */ 199 + #define EBT_MATCH_ITERATE(e, fn, args...) \ 200 + ({ \ 201 + unsigned int __i; \ 202 + int __ret = 0; \ 203 + struct ebt_entry_match *__match; \ 204 + \ 205 + for (__i = sizeof(struct ebt_entry); \ 206 + __i < (e)->watchers_offset; \ 207 + __i += __match->match_size + \ 208 + sizeof(struct ebt_entry_match)) { \ 209 + __match = (void *)(e) + __i; \ 210 + \ 211 + __ret = fn(__match , ## args); \ 212 + if (__ret != 0) \ 213 + break; \ 214 + } \ 215 + if (__ret == 0) { \ 216 + if (__i != (e)->watchers_offset) \ 217 + __ret = -EINVAL; \ 218 + } \ 219 + __ret; \ 220 + }) 221 + 222 + #define EBT_WATCHER_ITERATE(e, fn, args...) \ 223 + ({ \ 224 + unsigned int __i; \ 225 + int __ret = 0; \ 226 + struct ebt_entry_watcher *__watcher; \ 227 + \ 228 + for (__i = e->watchers_offset; \ 229 + __i < (e)->target_offset; \ 230 + __i += __watcher->watcher_size + \ 231 + sizeof(struct ebt_entry_watcher)) { \ 232 + __watcher = (void *)(e) + __i; \ 233 + \ 234 + __ret = fn(__watcher , ## args); \ 235 + if (__ret != 0) \ 236 + break; \ 237 + } \ 238 + if (__ret == 0) { \ 239 + if (__i != (e)->target_offset) \ 240 + __ret = -EINVAL; \ 241 + } \ 242 + __ret; \ 243 + }) 244 + 245 + #define EBT_ENTRY_ITERATE(entries, size, fn, args...) \ 246 + ({ \ 247 + unsigned int __i; \ 248 + int __ret = 0; \ 249 + struct ebt_entry *__entry; \ 250 + \ 251 + for (__i = 0; __i < (size);) { \ 252 + __entry = (void *)(entries) + __i; \ 253 + __ret = fn(__entry , ## args); \ 254 + if (__ret != 0) \ 255 + break; \ 256 + if (__entry->bitmask != 0) \ 257 + __i += __entry->next_offset; \ 258 + else \ 259 + __i += sizeof(struct ebt_entries); \ 260 + } \ 261 + if (__ret == 0) { \ 262 + if (__i != (size)) \ 263 + __ret = -EINVAL; \ 264 + } \ 265 + __ret; \ 266 + }) 267 + 268 + #endif /* _UAPI__LINUX_BRIDGE_EFF_H */
+10
include/uapi/linux/netfilter_ipv4/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += ip_tables.h 3 + header-y += ipt_CLUSTERIP.h 4 + header-y += ipt_ECN.h 5 + header-y += ipt_LOG.h 6 + header-y += ipt_REJECT.h 7 + header-y += ipt_TTL.h 8 + header-y += ipt_ULOG.h 9 + header-y += ipt_ah.h 10 + header-y += ipt_ecn.h 11 + header-y += ipt_ttl.h
+229
include/uapi/linux/netfilter_ipv4/ip_tables.h
··· 1 + /* 2 + * 25-Jul-1998 Major changes to allow for ip chain table 3 + * 4 + * 3-Jan-2000 Named tables to allow packet selection for different uses. 5 + */ 6 + 7 + /* 8 + * Format of an IP firewall descriptor 9 + * 10 + * src, dst, src_mask, dst_mask are always stored in network byte order. 11 + * flags are stored in host byte order (of course). 12 + * Port numbers are stored in HOST byte order. 13 + */ 14 + 15 + #ifndef _UAPI_IPTABLES_H 16 + #define _UAPI_IPTABLES_H 17 + 18 + #include <linux/types.h> 19 + #include <linux/compiler.h> 20 + #include <linux/netfilter_ipv4.h> 21 + 22 + #include <linux/netfilter/x_tables.h> 23 + 24 + #ifndef __KERNEL__ 25 + #define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN 26 + #define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN 27 + #define ipt_match xt_match 28 + #define ipt_target xt_target 29 + #define ipt_table xt_table 30 + #define ipt_get_revision xt_get_revision 31 + #define ipt_entry_match xt_entry_match 32 + #define ipt_entry_target xt_entry_target 33 + #define ipt_standard_target xt_standard_target 34 + #define ipt_error_target xt_error_target 35 + #define ipt_counters xt_counters 36 + #define IPT_CONTINUE XT_CONTINUE 37 + #define IPT_RETURN XT_RETURN 38 + 39 + /* This group is older than old (iptables < v1.4.0-rc1~89) */ 40 + #include <linux/netfilter/xt_tcpudp.h> 41 + #define ipt_udp xt_udp 42 + #define ipt_tcp xt_tcp 43 + #define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT 44 + #define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT 45 + #define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS 46 + #define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION 47 + #define IPT_TCP_INV_MASK XT_TCP_INV_MASK 48 + #define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT 49 + #define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT 50 + #define IPT_UDP_INV_MASK XT_UDP_INV_MASK 51 + 52 + /* The argument to IPT_SO_ADD_COUNTERS. */ 53 + #define ipt_counters_info xt_counters_info 54 + /* Standard return verdict, or do jump. */ 55 + #define IPT_STANDARD_TARGET XT_STANDARD_TARGET 56 + /* Error verdict. */ 57 + #define IPT_ERROR_TARGET XT_ERROR_TARGET 58 + 59 + /* fn returns 0 to continue iteration */ 60 + #define IPT_MATCH_ITERATE(e, fn, args...) \ 61 + XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) 62 + 63 + /* fn returns 0 to continue iteration */ 64 + #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ 65 + XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) 66 + #endif 67 + 68 + /* Yes, Virginia, you have to zero the padding. */ 69 + struct ipt_ip { 70 + /* Source and destination IP addr */ 71 + struct in_addr src, dst; 72 + /* Mask for src and dest IP addr */ 73 + struct in_addr smsk, dmsk; 74 + char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 75 + unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 76 + 77 + /* Protocol, 0 = ANY */ 78 + __u16 proto; 79 + 80 + /* Flags word */ 81 + __u8 flags; 82 + /* Inverse flags */ 83 + __u8 invflags; 84 + }; 85 + 86 + /* Values for "flag" field in struct ipt_ip (general ip structure). */ 87 + #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ 88 + #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ 89 + #define IPT_F_MASK 0x03 /* All possible flag bits mask. */ 90 + 91 + /* Values for "inv" field in struct ipt_ip. */ 92 + #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ 93 + #define IPT_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */ 94 + #define IPT_INV_TOS 0x04 /* Invert the sense of TOS. */ 95 + #define IPT_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */ 96 + #define IPT_INV_DSTIP 0x10 /* Invert the sense of DST OP. */ 97 + #define IPT_INV_FRAG 0x20 /* Invert the sense of FRAG. */ 98 + #define IPT_INV_PROTO XT_INV_PROTO 99 + #define IPT_INV_MASK 0x7F /* All possible flag bits mask. */ 100 + 101 + /* This structure defines each of the firewall rules. Consists of 3 102 + parts which are 1) general IP header stuff 2) match specific 103 + stuff 3) the target to perform if the rule matches */ 104 + struct ipt_entry { 105 + struct ipt_ip ip; 106 + 107 + /* Mark with fields that we care about. */ 108 + unsigned int nfcache; 109 + 110 + /* Size of ipt_entry + matches */ 111 + __u16 target_offset; 112 + /* Size of ipt_entry + matches + target */ 113 + __u16 next_offset; 114 + 115 + /* Back pointer */ 116 + unsigned int comefrom; 117 + 118 + /* Packet and byte counters. */ 119 + struct xt_counters counters; 120 + 121 + /* The matches (if any), then the target. */ 122 + unsigned char elems[0]; 123 + }; 124 + 125 + /* 126 + * New IP firewall options for [gs]etsockopt at the RAW IP level. 127 + * Unlike BSD Linux inherits IP options so you don't have to use a raw 128 + * socket for this. Instead we check rights in the calls. 129 + * 130 + * ATTENTION: check linux/in.h before adding new number here. 131 + */ 132 + #define IPT_BASE_CTL 64 133 + 134 + #define IPT_SO_SET_REPLACE (IPT_BASE_CTL) 135 + #define IPT_SO_SET_ADD_COUNTERS (IPT_BASE_CTL + 1) 136 + #define IPT_SO_SET_MAX IPT_SO_SET_ADD_COUNTERS 137 + 138 + #define IPT_SO_GET_INFO (IPT_BASE_CTL) 139 + #define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1) 140 + #define IPT_SO_GET_REVISION_MATCH (IPT_BASE_CTL + 2) 141 + #define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3) 142 + #define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET 143 + 144 + /* ICMP matching stuff */ 145 + struct ipt_icmp { 146 + __u8 type; /* type to match */ 147 + __u8 code[2]; /* range of code */ 148 + __u8 invflags; /* Inverse flags */ 149 + }; 150 + 151 + /* Values for "inv" field for struct ipt_icmp. */ 152 + #define IPT_ICMP_INV 0x01 /* Invert the sense of type/code test */ 153 + 154 + /* The argument to IPT_SO_GET_INFO */ 155 + struct ipt_getinfo { 156 + /* Which table: caller fills this in. */ 157 + char name[XT_TABLE_MAXNAMELEN]; 158 + 159 + /* Kernel fills these in. */ 160 + /* Which hook entry points are valid: bitmask */ 161 + unsigned int valid_hooks; 162 + 163 + /* Hook entry points: one per netfilter hook. */ 164 + unsigned int hook_entry[NF_INET_NUMHOOKS]; 165 + 166 + /* Underflow points. */ 167 + unsigned int underflow[NF_INET_NUMHOOKS]; 168 + 169 + /* Number of entries */ 170 + unsigned int num_entries; 171 + 172 + /* Size of entries. */ 173 + unsigned int size; 174 + }; 175 + 176 + /* The argument to IPT_SO_SET_REPLACE. */ 177 + struct ipt_replace { 178 + /* Which table. */ 179 + char name[XT_TABLE_MAXNAMELEN]; 180 + 181 + /* Which hook entry points are valid: bitmask. You can't 182 + change this. */ 183 + unsigned int valid_hooks; 184 + 185 + /* Number of entries */ 186 + unsigned int num_entries; 187 + 188 + /* Total size of new entries */ 189 + unsigned int size; 190 + 191 + /* Hook entry points. */ 192 + unsigned int hook_entry[NF_INET_NUMHOOKS]; 193 + 194 + /* Underflow points. */ 195 + unsigned int underflow[NF_INET_NUMHOOKS]; 196 + 197 + /* Information about old entries: */ 198 + /* Number of counters (must be equal to current number of entries). */ 199 + unsigned int num_counters; 200 + /* The old entries' counters. */ 201 + struct xt_counters __user *counters; 202 + 203 + /* The entries (hang off end: not really an array). */ 204 + struct ipt_entry entries[0]; 205 + }; 206 + 207 + /* The argument to IPT_SO_GET_ENTRIES. */ 208 + struct ipt_get_entries { 209 + /* Which table: user fills this in. */ 210 + char name[XT_TABLE_MAXNAMELEN]; 211 + 212 + /* User fills this in: total entry size. */ 213 + unsigned int size; 214 + 215 + /* The entries. */ 216 + struct ipt_entry entrytable[0]; 217 + }; 218 + 219 + /* Helper functions */ 220 + static __inline__ struct xt_entry_target * 221 + ipt_get_target(struct ipt_entry *e) 222 + { 223 + return (void *)e + e->target_offset; 224 + } 225 + 226 + /* 227 + * Main firewall chains definitions and global var's definitions. 228 + */ 229 + #endif /* _UAPI_IPTABLES_H */
+12
include/uapi/linux/netfilter_ipv6/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += ip6_tables.h 3 + header-y += ip6t_HL.h 4 + header-y += ip6t_LOG.h 5 + header-y += ip6t_NPT.h 6 + header-y += ip6t_REJECT.h 7 + header-y += ip6t_ah.h 8 + header-y += ip6t_frag.h 9 + header-y += ip6t_hl.h 10 + header-y += ip6t_ipv6header.h 11 + header-y += ip6t_mh.h 12 + header-y += ip6t_opts.h 13 + header-y += ip6t_rt.h
+267
include/uapi/linux/netfilter_ipv6/ip6_tables.h
··· 1 + /* 2 + * 25-Jul-1998 Major changes to allow for ip chain table 3 + * 4 + * 3-Jan-2000 Named tables to allow packet selection for different uses. 5 + */ 6 + 7 + /* 8 + * Format of an IP6 firewall descriptor 9 + * 10 + * src, dst, src_mask, dst_mask are always stored in network byte order. 11 + * flags are stored in host byte order (of course). 12 + * Port numbers are stored in HOST byte order. 13 + */ 14 + 15 + #ifndef _UAPI_IP6_TABLES_H 16 + #define _UAPI_IP6_TABLES_H 17 + 18 + #include <linux/types.h> 19 + #include <linux/compiler.h> 20 + #include <linux/netfilter_ipv6.h> 21 + 22 + #include <linux/netfilter/x_tables.h> 23 + 24 + #ifndef __KERNEL__ 25 + #define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN 26 + #define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN 27 + #define ip6t_match xt_match 28 + #define ip6t_target xt_target 29 + #define ip6t_table xt_table 30 + #define ip6t_get_revision xt_get_revision 31 + #define ip6t_entry_match xt_entry_match 32 + #define ip6t_entry_target xt_entry_target 33 + #define ip6t_standard_target xt_standard_target 34 + #define ip6t_error_target xt_error_target 35 + #define ip6t_counters xt_counters 36 + #define IP6T_CONTINUE XT_CONTINUE 37 + #define IP6T_RETURN XT_RETURN 38 + 39 + /* Pre-iptables-1.4.0 */ 40 + #include <linux/netfilter/xt_tcpudp.h> 41 + #define ip6t_tcp xt_tcp 42 + #define ip6t_udp xt_udp 43 + #define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT 44 + #define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT 45 + #define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS 46 + #define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION 47 + #define IP6T_TCP_INV_MASK XT_TCP_INV_MASK 48 + #define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT 49 + #define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT 50 + #define IP6T_UDP_INV_MASK XT_UDP_INV_MASK 51 + 52 + #define ip6t_counters_info xt_counters_info 53 + #define IP6T_STANDARD_TARGET XT_STANDARD_TARGET 54 + #define IP6T_ERROR_TARGET XT_ERROR_TARGET 55 + #define IP6T_MATCH_ITERATE(e, fn, args...) \ 56 + XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args) 57 + #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ 58 + XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) 59 + #endif 60 + 61 + /* Yes, Virginia, you have to zero the padding. */ 62 + struct ip6t_ip6 { 63 + /* Source and destination IP6 addr */ 64 + struct in6_addr src, dst; 65 + /* Mask for src and dest IP6 addr */ 66 + struct in6_addr smsk, dmsk; 67 + char iniface[IFNAMSIZ], outiface[IFNAMSIZ]; 68 + unsigned char iniface_mask[IFNAMSIZ], outiface_mask[IFNAMSIZ]; 69 + 70 + /* Upper protocol number 71 + * - The allowed value is 0 (any) or protocol number of last parsable 72 + * header, which is 50 (ESP), 59 (No Next Header), 135 (MH), or 73 + * the non IPv6 extension headers. 74 + * - The protocol numbers of IPv6 extension headers except of ESP and 75 + * MH do not match any packets. 76 + * - You also need to set IP6T_FLAGS_PROTO to "flags" to check protocol. 77 + */ 78 + __u16 proto; 79 + /* TOS to match iff flags & IP6T_F_TOS */ 80 + __u8 tos; 81 + 82 + /* Flags word */ 83 + __u8 flags; 84 + /* Inverse flags */ 85 + __u8 invflags; 86 + }; 87 + 88 + /* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */ 89 + #define IP6T_F_PROTO 0x01 /* Set if rule cares about upper 90 + protocols */ 91 + #define IP6T_F_TOS 0x02 /* Match the TOS. */ 92 + #define IP6T_F_GOTO 0x04 /* Set if jump is a goto */ 93 + #define IP6T_F_MASK 0x07 /* All possible flag bits mask. */ 94 + 95 + /* Values for "inv" field in struct ip6t_ip6. */ 96 + #define IP6T_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ 97 + #define IP6T_INV_VIA_OUT 0x02 /* Invert the sense of OUT IFACE */ 98 + #define IP6T_INV_TOS 0x04 /* Invert the sense of TOS. */ 99 + #define IP6T_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */ 100 + #define IP6T_INV_DSTIP 0x10 /* Invert the sense of DST OP. */ 101 + #define IP6T_INV_FRAG 0x20 /* Invert the sense of FRAG. */ 102 + #define IP6T_INV_PROTO XT_INV_PROTO 103 + #define IP6T_INV_MASK 0x7F /* All possible flag bits mask. */ 104 + 105 + /* This structure defines each of the firewall rules. Consists of 3 106 + parts which are 1) general IP header stuff 2) match specific 107 + stuff 3) the target to perform if the rule matches */ 108 + struct ip6t_entry { 109 + struct ip6t_ip6 ipv6; 110 + 111 + /* Mark with fields that we care about. */ 112 + unsigned int nfcache; 113 + 114 + /* Size of ipt_entry + matches */ 115 + __u16 target_offset; 116 + /* Size of ipt_entry + matches + target */ 117 + __u16 next_offset; 118 + 119 + /* Back pointer */ 120 + unsigned int comefrom; 121 + 122 + /* Packet and byte counters. */ 123 + struct xt_counters counters; 124 + 125 + /* The matches (if any), then the target. */ 126 + unsigned char elems[0]; 127 + }; 128 + 129 + /* Standard entry */ 130 + struct ip6t_standard { 131 + struct ip6t_entry entry; 132 + struct xt_standard_target target; 133 + }; 134 + 135 + struct ip6t_error { 136 + struct ip6t_entry entry; 137 + struct xt_error_target target; 138 + }; 139 + 140 + #define IP6T_ENTRY_INIT(__size) \ 141 + { \ 142 + .target_offset = sizeof(struct ip6t_entry), \ 143 + .next_offset = (__size), \ 144 + } 145 + 146 + #define IP6T_STANDARD_INIT(__verdict) \ 147 + { \ 148 + .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \ 149 + .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ 150 + sizeof(struct xt_standard_target)), \ 151 + .target.verdict = -(__verdict) - 1, \ 152 + } 153 + 154 + #define IP6T_ERROR_INIT \ 155 + { \ 156 + .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \ 157 + .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ 158 + sizeof(struct xt_error_target)), \ 159 + .target.errorname = "ERROR", \ 160 + } 161 + 162 + /* 163 + * New IP firewall options for [gs]etsockopt at the RAW IP level. 164 + * Unlike BSD Linux inherits IP options so you don't have to use 165 + * a raw socket for this. Instead we check rights in the calls. 166 + * 167 + * ATTENTION: check linux/in6.h before adding new number here. 168 + */ 169 + #define IP6T_BASE_CTL 64 170 + 171 + #define IP6T_SO_SET_REPLACE (IP6T_BASE_CTL) 172 + #define IP6T_SO_SET_ADD_COUNTERS (IP6T_BASE_CTL + 1) 173 + #define IP6T_SO_SET_MAX IP6T_SO_SET_ADD_COUNTERS 174 + 175 + #define IP6T_SO_GET_INFO (IP6T_BASE_CTL) 176 + #define IP6T_SO_GET_ENTRIES (IP6T_BASE_CTL + 1) 177 + #define IP6T_SO_GET_REVISION_MATCH (IP6T_BASE_CTL + 4) 178 + #define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 5) 179 + #define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET 180 + 181 + /* ICMP matching stuff */ 182 + struct ip6t_icmp { 183 + __u8 type; /* type to match */ 184 + __u8 code[2]; /* range of code */ 185 + __u8 invflags; /* Inverse flags */ 186 + }; 187 + 188 + /* Values for "inv" field for struct ipt_icmp. */ 189 + #define IP6T_ICMP_INV 0x01 /* Invert the sense of type/code test */ 190 + 191 + /* The argument to IP6T_SO_GET_INFO */ 192 + struct ip6t_getinfo { 193 + /* Which table: caller fills this in. */ 194 + char name[XT_TABLE_MAXNAMELEN]; 195 + 196 + /* Kernel fills these in. */ 197 + /* Which hook entry points are valid: bitmask */ 198 + unsigned int valid_hooks; 199 + 200 + /* Hook entry points: one per netfilter hook. */ 201 + unsigned int hook_entry[NF_INET_NUMHOOKS]; 202 + 203 + /* Underflow points. */ 204 + unsigned int underflow[NF_INET_NUMHOOKS]; 205 + 206 + /* Number of entries */ 207 + unsigned int num_entries; 208 + 209 + /* Size of entries. */ 210 + unsigned int size; 211 + }; 212 + 213 + /* The argument to IP6T_SO_SET_REPLACE. */ 214 + struct ip6t_replace { 215 + /* Which table. */ 216 + char name[XT_TABLE_MAXNAMELEN]; 217 + 218 + /* Which hook entry points are valid: bitmask. You can't 219 + change this. */ 220 + unsigned int valid_hooks; 221 + 222 + /* Number of entries */ 223 + unsigned int num_entries; 224 + 225 + /* Total size of new entries */ 226 + unsigned int size; 227 + 228 + /* Hook entry points. */ 229 + unsigned int hook_entry[NF_INET_NUMHOOKS]; 230 + 231 + /* Underflow points. */ 232 + unsigned int underflow[NF_INET_NUMHOOKS]; 233 + 234 + /* Information about old entries: */ 235 + /* Number of counters (must be equal to current number of entries). */ 236 + unsigned int num_counters; 237 + /* The old entries' counters. */ 238 + struct xt_counters __user *counters; 239 + 240 + /* The entries (hang off end: not really an array). */ 241 + struct ip6t_entry entries[0]; 242 + }; 243 + 244 + /* The argument to IP6T_SO_GET_ENTRIES. */ 245 + struct ip6t_get_entries { 246 + /* Which table: user fills this in. */ 247 + char name[XT_TABLE_MAXNAMELEN]; 248 + 249 + /* User fills this in: total entry size. */ 250 + unsigned int size; 251 + 252 + /* The entries. */ 253 + struct ip6t_entry entrytable[0]; 254 + }; 255 + 256 + /* Helper functions */ 257 + static __inline__ struct xt_entry_target * 258 + ip6t_get_target(struct ip6t_entry *e) 259 + { 260 + return (void *)e + e->target_offset; 261 + } 262 + 263 + /* 264 + * Main firewall chains definitions and global var's definitions. 265 + */ 266 + 267 + #endif /* _UAPI_IP6_TABLES_H */
+7
include/uapi/linux/tc_act/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += tc_csum.h 3 + header-y += tc_gact.h 4 + header-y += tc_ipt.h 5 + header-y += tc_mirred.h 6 + header-y += tc_nat.h 7 + header-y += tc_pedit.h 8 + header-y += tc_skbedit.h
+4
include/uapi/linux/tc_ematch/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += tc_em_cmp.h 3 + header-y += tc_em_meta.h 4 + header-y += tc_em_nbyte.h 5 + header-y += tc_em_text.h
+2 -8
net/8021q/vlan_core.c
··· 5 5 #include <linux/export.h> 6 6 #include "vlan.h" 7 7 8 - bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) 8 + bool vlan_do_receive(struct sk_buff **skbp) 9 9 { 10 10 struct sk_buff *skb = *skbp; 11 11 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; ··· 13 13 struct vlan_pcpu_stats *rx_stats; 14 14 15 15 vlan_dev = vlan_find_dev(skb->dev, vlan_id); 16 - if (!vlan_dev) { 17 - /* Only the last call to vlan_do_receive() should change 18 - * pkt_type to PACKET_OTHERHOST 19 - */ 20 - if (vlan_id && last_handler) 21 - skb->pkt_type = PACKET_OTHERHOST; 16 + if (!vlan_dev) 22 17 return false; 23 - } 24 18 25 19 skb = *skbp = skb_share_check(skb, GFP_ATOMIC); 26 20 if (unlikely(!skb))
+46 -17
net/core/dev.c
··· 3300 3300 && !skb_pfmemalloc_protocol(skb)) 3301 3301 goto drop; 3302 3302 3303 - rx_handler = rcu_dereference(skb->dev->rx_handler); 3304 3303 if (vlan_tx_tag_present(skb)) { 3305 3304 if (pt_prev) { 3306 3305 ret = deliver_skb(skb, pt_prev, orig_dev); 3307 3306 pt_prev = NULL; 3308 3307 } 3309 - if (vlan_do_receive(&skb, !rx_handler)) 3308 + if (vlan_do_receive(&skb)) 3310 3309 goto another_round; 3311 3310 else if (unlikely(!skb)) 3312 3311 goto unlock; 3313 3312 } 3314 3313 3314 + rx_handler = rcu_dereference(skb->dev->rx_handler); 3315 3315 if (rx_handler) { 3316 3316 if (pt_prev) { 3317 3317 ret = deliver_skb(skb, pt_prev, orig_dev); ··· 3330 3330 BUG(); 3331 3331 } 3332 3332 } 3333 + 3334 + if (vlan_tx_nonzero_tag_present(skb)) 3335 + skb->pkt_type = PACKET_OTHERHOST; 3333 3336 3334 3337 /* deliver only exact match when indicated */ 3335 3338 null_or_dev = deliver_exact ? skb->dev : NULL; ··· 3474 3471 return netif_receive_skb(skb); 3475 3472 } 3476 3473 3477 - inline void napi_gro_flush(struct napi_struct *napi) 3474 + /* napi->gro_list contains packets ordered by age. 3475 + * youngest packets at the head of it. 3476 + * Complete skbs in reverse order to reduce latencies. 3477 + */ 3478 + void napi_gro_flush(struct napi_struct *napi, bool flush_old) 3478 3479 { 3479 - struct sk_buff *skb, *next; 3480 + struct sk_buff *skb, *prev = NULL; 3480 3481 3481 - for (skb = napi->gro_list; skb; skb = next) { 3482 - next = skb->next; 3483 - skb->next = NULL; 3484 - napi_gro_complete(skb); 3482 + /* scan list and build reverse chain */ 3483 + for (skb = napi->gro_list; skb != NULL; skb = skb->next) { 3484 + skb->prev = prev; 3485 + prev = skb; 3485 3486 } 3486 3487 3487 - napi->gro_count = 0; 3488 + for (skb = prev; skb; skb = prev) { 3489 + skb->next = NULL; 3490 + 3491 + if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 3492 + return; 3493 + 3494 + prev = skb->prev; 3495 + napi_gro_complete(skb); 3496 + napi->gro_count--; 3497 + } 3498 + 3488 3499 napi->gro_list = NULL; 3489 3500 } 3490 3501 EXPORT_SYMBOL(napi_gro_flush); ··· 3559 3542 3560 3543 napi->gro_count++; 3561 3544 NAPI_GRO_CB(skb)->count = 1; 3545 + NAPI_GRO_CB(skb)->age = jiffies; 3562 3546 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3563 3547 skb->next = napi->gro_list; 3564 3548 napi->gro_list = skb; ··· 3649 3631 } 3650 3632 EXPORT_SYMBOL(napi_skb_finish); 3651 3633 3652 - void skb_gro_reset_offset(struct sk_buff *skb) 3634 + static void skb_gro_reset_offset(struct sk_buff *skb) 3653 3635 { 3636 + const struct skb_shared_info *pinfo = skb_shinfo(skb); 3637 + const skb_frag_t *frag0 = &pinfo->frags[0]; 3638 + 3654 3639 NAPI_GRO_CB(skb)->data_offset = 0; 3655 3640 NAPI_GRO_CB(skb)->frag0 = NULL; 3656 3641 NAPI_GRO_CB(skb)->frag0_len = 0; 3657 3642 3658 3643 if (skb->mac_header == skb->tail && 3659 - !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) { 3660 - NAPI_GRO_CB(skb)->frag0 = 3661 - skb_frag_address(&skb_shinfo(skb)->frags[0]); 3662 - NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]); 3644 + pinfo->nr_frags && 3645 + !PageHighMem(skb_frag_page(frag0))) { 3646 + NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 3647 + NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); 3663 3648 } 3664 3649 } 3665 - EXPORT_SYMBOL(skb_gro_reset_offset); 3666 3650 3667 3651 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3668 3652 { ··· 3896 3876 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) 3897 3877 return; 3898 3878 3899 - napi_gro_flush(n); 3879 + napi_gro_flush(n, false); 3900 3880 local_irq_save(flags); 3901 3881 __napi_complete(n); 3902 3882 local_irq_restore(flags); ··· 4001 3981 local_irq_enable(); 4002 3982 napi_complete(n); 4003 3983 local_irq_disable(); 4004 - } else 3984 + } else { 3985 + if (n->gro_list) { 3986 + /* flush too old packets 3987 + * If HZ < 1000, flush all packets. 3988 + */ 3989 + local_irq_enable(); 3990 + napi_gro_flush(n, HZ >= 1000); 3991 + local_irq_disable(); 3992 + } 4005 3993 list_move_tail(&n->poll_list, &sd->poll_list); 3994 + } 4006 3995 } 4007 3996 4008 3997 netpoll_poll_unlock(have);
+2 -4
net/core/neighbour.c
··· 1301 1301 if (!dst) 1302 1302 goto discard; 1303 1303 1304 - __skb_pull(skb, skb_network_offset(skb)); 1305 - 1306 1304 if (!neigh_event_send(neigh, skb)) { 1307 1305 int err; 1308 1306 struct net_device *dev = neigh->dev; ··· 1310 1312 neigh_hh_init(neigh, dst); 1311 1313 1312 1314 do { 1315 + __skb_pull(skb, skb_network_offset(skb)); 1313 1316 seq = read_seqbegin(&neigh->ha_lock); 1314 1317 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1315 1318 neigh->ha, NULL, skb->len); ··· 1341 1342 unsigned int seq; 1342 1343 int err; 1343 1344 1344 - __skb_pull(skb, skb_network_offset(skb)); 1345 - 1346 1345 do { 1346 + __skb_pull(skb, skb_network_offset(skb)); 1347 1347 seq = read_seqbegin(&neigh->ha_lock); 1348 1348 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1349 1349 neigh->ha, NULL, skb->len);
-47
net/core/skbuff.c
··· 655 655 } 656 656 EXPORT_SYMBOL(consume_skb); 657 657 658 - /** 659 - * skb_recycle - clean up an skb for reuse 660 - * @skb: buffer 661 - * 662 - * Recycles the skb to be reused as a receive buffer. This 663 - * function does any necessary reference count dropping, and 664 - * cleans up the skbuff as if it just came from __alloc_skb(). 665 - */ 666 - void skb_recycle(struct sk_buff *skb) 667 - { 668 - struct skb_shared_info *shinfo; 669 - 670 - skb_release_head_state(skb); 671 - 672 - shinfo = skb_shinfo(skb); 673 - memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 674 - atomic_set(&shinfo->dataref, 1); 675 - 676 - memset(skb, 0, offsetof(struct sk_buff, tail)); 677 - skb->data = skb->head + NET_SKB_PAD; 678 - skb_reset_tail_pointer(skb); 679 - } 680 - EXPORT_SYMBOL(skb_recycle); 681 - 682 - /** 683 - * skb_recycle_check - check if skb can be reused for receive 684 - * @skb: buffer 685 - * @skb_size: minimum receive buffer size 686 - * 687 - * Checks that the skb passed in is not shared or cloned, and 688 - * that it is linear and its head portion at least as large as 689 - * skb_size so that it can be recycled as a receive buffer. 690 - * If these conditions are met, this function does any necessary 691 - * reference count dropping and cleans up the skbuff as if it 692 - * just came from __alloc_skb(). 693 - */ 694 - bool skb_recycle_check(struct sk_buff *skb, int skb_size) 695 - { 696 - if (!skb_is_recycleable(skb, skb_size)) 697 - return false; 698 - 699 - skb_recycle(skb); 700 - 701 - return true; 702 - } 703 - EXPORT_SYMBOL(skb_recycle_check); 704 - 705 658 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 706 659 { 707 660 new->tstamp = old->tstamp;
+2 -1
net/ipv4/fib_frontend.c
··· 322 322 { 323 323 int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev); 324 324 325 - if (!r && !fib_num_tclassid_users(dev_net(dev))) { 325 + if (!r && !fib_num_tclassid_users(dev_net(dev)) && 326 + (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) { 326 327 *itag = 0; 327 328 return 0; 328 329 }
+2
net/ipv4/fib_semantics.c
··· 840 840 change_nexthops(fi) { 841 841 nexthop_nh->nh_parent = fi; 842 842 nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *); 843 + if (!nexthop_nh->nh_pcpu_rth_output) 844 + goto failure; 843 845 } endfor_nexthops(fi) 844 846 845 847 if (cfg->fc_mx) {
+2 -2
net/ipv4/inet_connection_sock.c
··· 406 406 rt = ip_route_output_flow(net, fl4, sk); 407 407 if (IS_ERR(rt)) 408 408 goto no_route; 409 - if (opt && opt->opt.is_strictroute && rt->rt_gateway) 409 + if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 410 410 goto route_err; 411 411 return &rt->dst; 412 412 ··· 442 442 rt = ip_route_output_flow(net, fl4, sk); 443 443 if (IS_ERR(rt)) 444 444 goto no_route; 445 - if (opt && opt->opt.is_strictroute && rt->rt_gateway) 445 + if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 446 446 goto route_err; 447 447 rcu_read_unlock(); 448 448 return &rt->dst;
+1 -1
net/ipv4/ip_forward.c
··· 85 85 86 86 rt = skb_rtable(skb); 87 87 88 - if (opt->is_strictroute && opt->nexthop != rt->rt_gateway) 88 + if (opt->is_strictroute && rt->rt_uses_gateway) 89 89 goto sr_failed; 90 90 91 91 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
+2 -2
net/ipv4/ip_output.c
··· 193 193 } 194 194 195 195 rcu_read_lock_bh(); 196 - nexthop = rt->rt_gateway ? rt->rt_gateway : ip_hdr(skb)->daddr; 196 + nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr); 197 197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop); 198 198 if (unlikely(!neigh)) 199 199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); ··· 371 371 skb_dst_set_noref(skb, &rt->dst); 372 372 373 373 packet_routed: 374 - if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gateway) 374 + if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) 375 375 goto no_route; 376 376 377 377 /* OK, we know where to send it, allocate and build IP header. */
+84 -62
net/ipv4/route.c
··· 802 802 net = dev_net(rt->dst.dev); 803 803 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); 804 804 if (!peer) { 805 - icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 805 + icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, 806 + rt_nexthop(rt, ip_hdr(skb)->daddr)); 806 807 return; 807 808 } 808 809 ··· 828 827 time_after(jiffies, 829 828 (peer->rate_last + 830 829 (ip_rt_redirect_load << peer->rate_tokens)))) { 831 - icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 830 + __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); 831 + 832 + icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); 832 833 peer->rate_last = jiffies; 833 834 ++peer->rate_tokens; 834 835 #ifdef CONFIG_IP_ROUTE_VERBOSE ··· 838 835 peer->rate_tokens == ip_rt_redirect_number) 839 836 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", 840 837 &ip_hdr(skb)->saddr, inet_iif(skb), 841 - &ip_hdr(skb)->daddr, &rt->rt_gateway); 838 + &ip_hdr(skb)->daddr, &gw); 842 839 #endif 843 840 } 844 841 out_put_peer: ··· 907 904 return 0; 908 905 } 909 906 910 - static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) 907 + static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) 911 908 { 909 + struct dst_entry *dst = &rt->dst; 912 910 struct fib_result res; 911 + 912 + if (dst->dev->mtu < mtu) 913 + return; 913 914 914 915 if (mtu < ip_rt_min_pmtu) 915 916 mtu = ip_rt_min_pmtu; 916 917 918 + if (!rt->rt_pmtu) { 919 + dst->obsolete = DST_OBSOLETE_KILL; 920 + } else { 921 + rt->rt_pmtu = mtu; 922 + dst->expires = max(1UL, jiffies + ip_rt_mtu_expires); 923 + } 924 + 917 925 rcu_read_lock(); 918 - if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { 926 + if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) { 919 927 struct fib_nh *nh = &FIB_RES_NH(res); 920 928 921 929 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 922 930 jiffies + ip_rt_mtu_expires); 923 931 } 924 932 rcu_read_unlock(); 925 - return mtu; 926 933 } 927 934 928 935 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, ··· 942 929 struct flowi4 fl4; 943 930 944 931 ip_rt_build_flow_key(&fl4, sk, skb); 945 - mtu = __ip_rt_update_pmtu(rt, &fl4, mtu); 946 - 947 - if (!rt->rt_pmtu) { 948 - dst->obsolete = DST_OBSOLETE_KILL; 949 - } else { 950 - rt->rt_pmtu = mtu; 951 - rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires); 952 - } 932 + __ip_rt_update_pmtu(rt, &fl4, mtu); 953 933 } 954 934 955 935 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, ··· 1126 1120 mtu = dst->dev->mtu; 1127 1121 1128 1122 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { 1129 - if (rt->rt_gateway && mtu > 576) 1123 + if (rt->rt_uses_gateway && mtu > 576) 1130 1124 mtu = 576; 1131 1125 } 1132 1126 ··· 1177 1171 if (fnhe->fnhe_gw) { 1178 1172 rt->rt_flags |= RTCF_REDIRECTED; 1179 1173 rt->rt_gateway = fnhe->fnhe_gw; 1180 - } 1174 + rt->rt_uses_gateway = 1; 1175 + } else if (!rt->rt_gateway) 1176 + rt->rt_gateway = daddr; 1181 1177 1182 1178 orig = rcu_dereference(fnhe->fnhe_rth); 1183 1179 rcu_assign_pointer(fnhe->fnhe_rth, rt); ··· 1188 1180 1189 1181 fnhe->fnhe_stamp = jiffies; 1190 1182 ret = true; 1191 - } else { 1192 - /* Routes we intend to cache in nexthop exception have 1193 - * the DST_NOCACHE bit clear. However, if we are 1194 - * unsuccessful at storing this route into the cache 1195 - * we really need to set it. 1196 - */ 1197 - rt->dst.flags |= DST_NOCACHE; 1198 1183 } 1199 1184 spin_unlock_bh(&fnhe_lock); 1200 1185 ··· 1202 1201 if (rt_is_input_route(rt)) { 1203 1202 p = (struct rtable **)&nh->nh_rth_input; 1204 1203 } else { 1205 - if (!nh->nh_pcpu_rth_output) 1206 - goto nocache; 1207 1204 p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); 1208 1205 } 1209 1206 orig = *p; ··· 1210 1211 if (prev == orig) { 1211 1212 if (orig) 1212 1213 rt_free(orig); 1213 - } else { 1214 - /* Routes we intend to cache in the FIB nexthop have 1215 - * the DST_NOCACHE bit clear. However, if we are 1216 - * unsuccessful at storing this route into the cache 1217 - * we really need to set it. 1218 - */ 1219 - nocache: 1220 - rt->dst.flags |= DST_NOCACHE; 1214 + } else 1221 1215 ret = false; 1222 - } 1223 1216 1224 1217 return ret; 1225 1218 } ··· 1272 1281 if (fi) { 1273 1282 struct fib_nh *nh = &FIB_RES_NH(*res); 1274 1283 1275 - if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) 1284 + if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) { 1276 1285 rt->rt_gateway = nh->nh_gw; 1286 + rt->rt_uses_gateway = 1; 1287 + } 1277 1288 dst_init_metrics(&rt->dst, fi->fib_metrics, true); 1278 1289 #ifdef CONFIG_IP_ROUTE_CLASSID 1279 1290 rt->dst.tclassid = nh->nh_tclassid; ··· 1284 1291 cached = rt_bind_exception(rt, fnhe, daddr); 1285 1292 else if (!(rt->dst.flags & DST_NOCACHE)) 1286 1293 cached = rt_cache_route(nh, rt); 1287 - } 1288 - if (unlikely(!cached)) 1294 + if (unlikely(!cached)) { 1295 + /* Routes we intend to cache in nexthop exception or 1296 + * FIB nexthop have the DST_NOCACHE bit clear. 1297 + * However, if we are unsuccessful at storing this 1298 + * route into the cache we really need to set it. 1299 + */ 1300 + rt->dst.flags |= DST_NOCACHE; 1301 + if (!rt->rt_gateway) 1302 + rt->rt_gateway = daddr; 1303 + rt_add_uncached_list(rt); 1304 + } 1305 + } else 1289 1306 rt_add_uncached_list(rt); 1290 1307 1291 1308 #ifdef CONFIG_IP_ROUTE_CLASSID ··· 1363 1360 rth->rt_iif = 0; 1364 1361 rth->rt_pmtu = 0; 1365 1362 rth->rt_gateway = 0; 1363 + rth->rt_uses_gateway = 0; 1366 1364 INIT_LIST_HEAD(&rth->rt_uncached); 1367 1365 if (our) { 1368 1366 rth->dst.input= ip_local_deliver; ··· 1433 1429 return -EINVAL; 1434 1430 } 1435 1431 1436 - 1437 1432 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), 1438 1433 in_dev->dev, in_dev, &itag); 1439 1434 if (err < 0) { ··· 1442 1439 goto cleanup; 1443 1440 } 1444 1441 1445 - if (out_dev == in_dev && err && 1442 + do_cache = res->fi && !itag; 1443 + if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && 1446 1444 (IN_DEV_SHARED_MEDIA(out_dev) || 1447 - inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) 1445 + inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { 1448 1446 flags |= RTCF_DOREDIRECT; 1447 + do_cache = false; 1448 + } 1449 1449 1450 1450 if (skb->protocol != htons(ETH_P_IP)) { 1451 1451 /* Not IP (i.e. ARP). Do not create route, if it is ··· 1465 1459 } 1466 1460 } 1467 1461 1468 - do_cache = false; 1469 - if (res->fi) { 1470 - if (!itag) { 1471 - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1472 - if (rt_cache_valid(rth)) { 1473 - skb_dst_set_noref(skb, &rth->dst); 1474 - goto out; 1475 - } 1476 - do_cache = true; 1462 + if (do_cache) { 1463 + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1464 + if (rt_cache_valid(rth)) { 1465 + skb_dst_set_noref(skb, &rth->dst); 1466 + goto out; 1477 1467 } 1478 1468 } 1479 1469 ··· 1488 1486 rth->rt_iif = 0; 1489 1487 rth->rt_pmtu = 0; 1490 1488 rth->rt_gateway = 0; 1489 + rth->rt_uses_gateway = 0; 1491 1490 INIT_LIST_HEAD(&rth->rt_uncached); 1492 1491 1493 1492 rth->dst.input = ip_forward; ··· 1659 1656 rth->rt_iif = 0; 1660 1657 rth->rt_pmtu = 0; 1661 1658 rth->rt_gateway = 0; 1659 + rth->rt_uses_gateway = 0; 1662 1660 INIT_LIST_HEAD(&rth->rt_uncached); 1663 1661 if (res.type == RTN_UNREACHABLE) { 1664 1662 rth->dst.input= ip_error; ··· 1762 1758 struct in_device *in_dev; 1763 1759 u16 type = res->type; 1764 1760 struct rtable *rth; 1761 + bool do_cache; 1765 1762 1766 1763 in_dev = __in_dev_get_rcu(dev_out); 1767 1764 if (!in_dev) ··· 1799 1794 } 1800 1795 1801 1796 fnhe = NULL; 1797 + do_cache = fi != NULL; 1802 1798 if (fi) { 1803 1799 struct rtable __rcu **prth; 1800 + struct fib_nh *nh = &FIB_RES_NH(*res); 1804 1801 1805 - fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr); 1802 + fnhe = find_exception(nh, fl4->daddr); 1806 1803 if (fnhe) 1807 1804 prth = &fnhe->fnhe_rth; 1808 - else 1809 - prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output); 1805 + else { 1806 + if (unlikely(fl4->flowi4_flags & 1807 + FLOWI_FLAG_KNOWN_NH && 1808 + !(nh->nh_gw && 1809 + nh->nh_scope == RT_SCOPE_LINK))) { 1810 + do_cache = false; 1811 + goto add; 1812 + } 1813 + prth = __this_cpu_ptr(nh->nh_pcpu_rth_output); 1814 + } 1810 1815 rth = rcu_dereference(*prth); 1811 1816 if (rt_cache_valid(rth)) { 1812 1817 dst_hold(&rth->dst); 1813 1818 return rth; 1814 1819 } 1815 1820 } 1821 + 1822 + add: 1816 1823 rth = rt_dst_alloc(dev_out, 1817 1824 IN_DEV_CONF_GET(in_dev, NOPOLICY), 1818 1825 IN_DEV_CONF_GET(in_dev, NOXFRM), 1819 - fi); 1826 + do_cache); 1820 1827 if (!rth) 1821 1828 return ERR_PTR(-ENOBUFS); 1822 1829 ··· 1841 1824 rth->rt_iif = orig_oif ? : 0; 1842 1825 rth->rt_pmtu = 0; 1843 1826 rth->rt_gateway = 0; 1827 + rth->rt_uses_gateway = 0; 1844 1828 INIT_LIST_HEAD(&rth->rt_uncached); 1845 1829 1846 1830 RT_CACHE_STAT_INC(out_slow_tot); ··· 2120 2102 rt->rt_flags = ort->rt_flags; 2121 2103 rt->rt_type = ort->rt_type; 2122 2104 rt->rt_gateway = ort->rt_gateway; 2105 + rt->rt_uses_gateway = ort->rt_uses_gateway; 2123 2106 2124 2107 INIT_LIST_HEAD(&rt->rt_uncached); 2125 2108 ··· 2199 2180 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr)) 2200 2181 goto nla_put_failure; 2201 2182 } 2202 - if (rt->rt_gateway && 2183 + if (rt->rt_uses_gateway && 2203 2184 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) 2204 2185 goto nla_put_failure; 2205 2186 2187 + expires = rt->dst.expires; 2188 + if (expires) { 2189 + unsigned long now = jiffies; 2190 + 2191 + if (time_before(now, expires)) 2192 + expires -= now; 2193 + else 2194 + expires = 0; 2195 + } 2196 + 2206 2197 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 2207 - if (rt->rt_pmtu) 2198 + if (rt->rt_pmtu && expires) 2208 2199 metrics[RTAX_MTU - 1] = rt->rt_pmtu; 2209 2200 if (rtnetlink_put_metrics(skb, metrics) < 0) 2210 2201 goto nla_put_failure; ··· 2224 2195 goto nla_put_failure; 2225 2196 2226 2197 error = rt->dst.error; 2227 - expires = rt->dst.expires; 2228 - if (expires) { 2229 - if (time_before(jiffies, expires)) 2230 - expires -= jiffies; 2231 - else 2232 - expires = 0; 2233 - } 2234 2198 2235 2199 if (rt_is_input_route(rt)) { 2236 2200 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
+1
net/ipv4/xfrm4_policy.c
··· 91 91 RTCF_LOCAL); 92 92 xdst->u.rt.rt_type = rt->rt_type; 93 93 xdst->u.rt.rt_gateway = rt->rt_gateway; 94 + xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; 94 95 xdst->u.rt.rt_pmtu = rt->rt_pmtu; 95 96 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); 96 97
+9 -13
net/ipv6/af_inet6.c
··· 822 822 return segs; 823 823 } 824 824 825 - struct ipv6_gro_cb { 826 - struct napi_gro_cb napi; 827 - int proto; 828 - }; 829 - 830 - #define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb) 831 - 832 825 static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, 833 826 struct sk_buff *skb) 834 827 { ··· 867 874 iph = ipv6_hdr(skb); 868 875 } 869 876 870 - IPV6_GRO_CB(skb)->proto = proto; 877 + NAPI_GRO_CB(skb)->proto = proto; 871 878 872 879 flush--; 873 880 nlen = skb_network_header_len(skb); 874 881 875 882 for (p = *head; p; p = p->next) { 876 - struct ipv6hdr *iph2; 883 + const struct ipv6hdr *iph2; 884 + __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ 877 885 878 886 if (!NAPI_GRO_CB(p)->same_flow) 879 887 continue; 880 888 881 889 iph2 = ipv6_hdr(p); 890 + first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ; 882 891 883 - /* All fields must match except length. */ 892 + /* All fields must match except length and Traffic Class. */ 884 893 if (nlen != skb_network_header_len(p) || 885 - memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) || 894 + (first_word & htonl(0xF00FFFFF)) || 886 895 memcmp(&iph->nexthdr, &iph2->nexthdr, 887 896 nlen - offsetof(struct ipv6hdr, nexthdr))) { 888 897 NAPI_GRO_CB(p)->same_flow = 0; 889 898 continue; 890 899 } 891 - 900 + /* flush if Traffic Class fields are different */ 901 + NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); 892 902 NAPI_GRO_CB(p)->flush |= flush; 893 903 } 894 904 ··· 923 927 sizeof(*iph)); 924 928 925 929 rcu_read_lock(); 926 - ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]); 930 + ops = rcu_dereference(inet6_protos[NAPI_GRO_CB(skb)->proto]); 927 931 if (WARN_ON(!ops || !ops->gro_complete)) 928 932 goto out_unlock; 929 933
+5 -1
net/netfilter/ipvs/ip_vs_xmit.c
··· 50 50 * local 51 51 */ 52 52 IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */ 53 + IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */ 53 54 }; 54 55 55 56 /* ··· 114 113 fl4.daddr = daddr; 115 114 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0; 116 115 fl4.flowi4_tos = rtos; 116 + fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? 117 + FLOWI_FLAG_KNOWN_NH : 0; 117 118 118 119 retry: 119 120 rt = ip_route_output_key(net, &fl4); ··· 1064 1061 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1065 1062 RT_TOS(iph->tos), 1066 1063 IP_VS_RT_MODE_LOCAL | 1067 - IP_VS_RT_MODE_NON_LOCAL, NULL))) 1064 + IP_VS_RT_MODE_NON_LOCAL | 1065 + IP_VS_RT_MODE_KNOWN_NH, NULL))) 1068 1066 goto tx_error_icmp; 1069 1067 if (rt->rt_flags & RTCF_LOCAL) { 1070 1068 ip_rt_put(rt);
+21 -8
net/netlink/af_netlink.c
··· 169 169 if (nlk->cb) { 170 170 if (nlk->cb->done) 171 171 nlk->cb->done(nlk->cb); 172 + 173 + module_put(nlk->cb->module); 172 174 netlink_destroy_callback(nlk->cb); 173 175 } 174 176 ··· 1760 1758 nlk->cb = NULL; 1761 1759 mutex_unlock(nlk->cb_mutex); 1762 1760 1761 + module_put(cb->module); 1763 1762 netlink_consume_callback(cb); 1764 1763 return 0; 1765 1764 ··· 1770 1767 return err; 1771 1768 } 1772 1769 1773 - int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 1774 - const struct nlmsghdr *nlh, 1775 - struct netlink_dump_control *control) 1770 + int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 1771 + const struct nlmsghdr *nlh, 1772 + struct netlink_dump_control *control) 1776 1773 { 1777 1774 struct netlink_callback *cb; 1778 1775 struct sock *sk; ··· 1787 1784 cb->done = control->done; 1788 1785 cb->nlh = nlh; 1789 1786 cb->data = control->data; 1787 + cb->module = control->module; 1790 1788 cb->min_dump_alloc = control->min_dump_alloc; 1791 1789 atomic_inc(&skb->users); 1792 1790 cb->skb = skb; ··· 1798 1794 return -ECONNREFUSED; 1799 1795 } 1800 1796 nlk = nlk_sk(sk); 1801 - /* A dump is in progress... */ 1797 + 1802 1798 mutex_lock(nlk->cb_mutex); 1799 + /* A dump is in progress... */ 1803 1800 if (nlk->cb) { 1804 1801 mutex_unlock(nlk->cb_mutex); 1805 1802 netlink_destroy_callback(cb); 1806 - sock_put(sk); 1807 - return -EBUSY; 1803 + ret = -EBUSY; 1804 + goto out; 1808 1805 } 1806 + /* add reference of module which cb->dump belongs to */ 1807 + if (!try_module_get(cb->module)) { 1808 + mutex_unlock(nlk->cb_mutex); 1809 + netlink_destroy_callback(cb); 1810 + ret = -EPROTONOSUPPORT; 1811 + goto out; 1812 + } 1813 + 1809 1814 nlk->cb = cb; 1810 1815 mutex_unlock(nlk->cb_mutex); 1811 1816 1812 1817 ret = netlink_dump(sk); 1813 - 1818 + out: 1814 1819 sock_put(sk); 1815 1820 1816 1821 if (ret) ··· 1830 1817 */ 1831 1818 return -EINTR; 1832 1819 } 1833 - EXPORT_SYMBOL(netlink_dump_start); 1820 + EXPORT_SYMBOL(__netlink_dump_start); 1834 1821 1835 1822 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) 1836 1823 {
+1 -1
net/rds/send.c
··· 1122 1122 rds_stats_inc(s_send_pong); 1123 1123 1124 1124 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) 1125 - rds_send_xmit(conn); 1125 + queue_delayed_work(rds_wq, &conn->c_send_w, 0); 1126 1126 1127 1127 rds_message_put(rm); 1128 1128 return 0;