Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (29 commits)
drivers/net: Correct NULL test
MAINTAINERS: networking drivers - Add git net-next tree
net/sched: Fix module name in Kconfig
cxgb3: fix GRO checksum check
dst: call cond_resched() in dst_gc_task()
netfilter: nf_conntrack: fix hash resizing with namespaces
netfilter: xtables: compat out of scope fix
netfilter: nf_conntrack: restrict runtime expect hashsize modifications
netfilter: nf_conntrack: per netns nf_conntrack_cachep
netfilter: nf_conntrack: fix memory corruption with multiple namespaces
Bluetooth: Keep a copy of each HID device's report descriptor
pktgen: Fix freezing problem
igb: make certain to reassign legacy interrupt vectors after reset
irda: add missing BKL in irnet_ppp ioctl
irda: unbalanced lock_kernel in irnet_ppp
ixgbe: Fix return of invalid txq
ixgbe: Fix ixgbe_tx_map error path
netxen: protect resource cleanup by rtnl lock
netxen: fix tx timeout recovery for NX2031 chip
Bluetooth: Enter active mode before establishing a SCO link.
...

+197 -164
+1
MAINTAINERS
··· 3836 L: netdev@vger.kernel.org 3837 W: http://www.linuxfoundation.org/en/Net 3838 T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git 3839 S: Odd Fixes 3840 F: drivers/net/ 3841 F: include/linux/if_*
··· 3836 L: netdev@vger.kernel.org 3837 W: http://www.linuxfoundation.org/en/Net 3838 T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git 3839 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git 3840 S: Odd Fixes 3841 F: drivers/net/ 3842 F: include/linux/if_*
+1
drivers/bluetooth/btmrvl_sdio.c
··· 808 809 exit: 810 sdio_release_host(card->func); 811 812 return ret; 813 }
··· 808 809 exit: 810 sdio_release_host(card->func); 811 + kfree(tmpbuf); 812 813 return ret; 814 }
+1 -1
drivers/net/ax88796.c
··· 921 size = (res->end - res->start) + 1; 922 923 ax->mem2 = request_mem_region(res->start, size, pdev->name); 924 - if (ax->mem == NULL) { 925 dev_err(&pdev->dev, "cannot reserve registers\n"); 926 ret = -ENXIO; 927 goto exit_mem1;
··· 921 size = (res->end - res->start) + 1; 922 923 ax->mem2 = request_mem_region(res->start, size, pdev->name); 924 + if (ax->mem2 == NULL) { 925 dev_err(&pdev->dev, "cannot reserve registers\n"); 926 ret = -ENXIO; 927 goto exit_mem1;
+12 -8
drivers/net/cxgb3/sge.c
··· 2079 struct sge_fl *fl, int len, int complete) 2080 { 2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2082 struct sk_buff *skb = NULL; 2083 struct cpl_rx_pkt *cpl; 2084 struct skb_frag_struct *rx_frag; ··· 2117 2118 if (!nr_frags) { 2119 offset = 2 + sizeof(struct cpl_rx_pkt); 2120 - qs->lro_va = sd->pg_chunk.va + 2; 2121 - } 2122 - len -= offset; 2123 2124 - prefetch(qs->lro_va); 2125 2126 rx_frag += nr_frags; 2127 rx_frag->page = sd->pg_chunk.page; ··· 2144 return; 2145 2146 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2147 - skb->ip_summed = CHECKSUM_UNNECESSARY; 2148 - cpl = qs->lro_va; 2149 2150 if (unlikely(cpl->vlan_valid)) { 2151 - struct net_device *dev = qs->netdev; 2152 - struct port_info *pi = netdev_priv(dev); 2153 struct vlan_group *grp = pi->vlan_grp; 2154 2155 if (likely(grp != NULL)) {
··· 2079 struct sge_fl *fl, int len, int complete) 2080 { 2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2082 + struct port_info *pi = netdev_priv(qs->netdev); 2083 struct sk_buff *skb = NULL; 2084 struct cpl_rx_pkt *cpl; 2085 struct skb_frag_struct *rx_frag; ··· 2116 2117 if (!nr_frags) { 2118 offset = 2 + sizeof(struct cpl_rx_pkt); 2119 + cpl = qs->lro_va = sd->pg_chunk.va + 2; 2120 2121 + if ((pi->rx_offload & T3_RX_CSUM) && 2122 + cpl->csum_valid && cpl->csum == htons(0xffff)) { 2123 + skb->ip_summed = CHECKSUM_UNNECESSARY; 2124 + qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2125 + } else 2126 + skb->ip_summed = CHECKSUM_NONE; 2127 + } else 2128 + cpl = qs->lro_va; 2129 + 2130 + len -= offset; 2131 2132 rx_frag += nr_frags; 2133 rx_frag->page = sd->pg_chunk.page; ··· 2136 return; 2137 2138 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2139 2140 if (unlikely(cpl->vlan_valid)) { 2141 struct vlan_group *grp = pi->vlan_grp; 2142 2143 if (likely(grp != NULL)) {
+5 -15
drivers/net/igb/igb_main.c
··· 421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 422 if (tx_queue > IGB_N0_QUEUE) 423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 424 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 425 q_vector->eims_value = msixbm; 426 break; ··· 879 { 880 struct net_device *netdev = adapter->netdev; 881 struct pci_dev *pdev = adapter->pdev; 882 - struct e1000_hw *hw = &adapter->hw; 883 int err = 0; 884 885 if (adapter->msix_entries) { ··· 910 igb_setup_all_tx_resources(adapter); 911 igb_setup_all_rx_resources(adapter); 912 } else { 913 - switch (hw->mac.type) { 914 - case e1000_82575: 915 - wr32(E1000_MSIXBM(0), 916 - (E1000_EICR_RX_QUEUE0 | 917 - E1000_EICR_TX_QUEUE0 | 918 - E1000_EIMS_OTHER)); 919 - break; 920 - case e1000_82580: 921 - case e1000_82576: 922 - wr32(E1000_IVAR0, E1000_IVAR_VALID); 923 - break; 924 - default: 925 - break; 926 - } 927 } 928 929 if (adapter->flags & IGB_FLAG_HAS_MSI) { ··· 1128 } 1129 if (adapter->msix_entries) 1130 igb_configure_msix(adapter); 1131 1132 /* Clear any pending interrupts. */ 1133 rd32(E1000_ICR);
··· 421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 422 if (tx_queue > IGB_N0_QUEUE) 423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 424 + if (!adapter->msix_entries && msix_vector == 0) 425 + msixbm |= E1000_EIMS_OTHER; 426 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 427 q_vector->eims_value = msixbm; 428 break; ··· 877 { 878 struct net_device *netdev = adapter->netdev; 879 struct pci_dev *pdev = adapter->pdev; 880 int err = 0; 881 882 if (adapter->msix_entries) { ··· 909 igb_setup_all_tx_resources(adapter); 910 igb_setup_all_rx_resources(adapter); 911 } else { 912 + igb_assign_vector(adapter->q_vector[0], 0); 913 } 914 915 if (adapter->flags & IGB_FLAG_HAS_MSI) { ··· 1140 } 1141 if (adapter->msix_entries) 1142 igb_configure_msix(adapter); 1143 + else 1144 + igb_assign_vector(adapter->q_vector[0], 0); 1145 1146 /* Clear any pending interrupts. */ 1147 rd32(E1000_ICR);
+5 -2
drivers/net/ixgbe/ixgbe_main.c
··· 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5180 } 5181 5182 - return count; 5183 } 5184 5185 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, ··· 5329 struct ixgbe_adapter *adapter = netdev_priv(dev); 5330 int txq = smp_processor_id(); 5331 5332 - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) 5333 return txq; 5334 5335 #ifdef IXGBE_FCOE 5336 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
··· 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5180 } 5181 5182 + return 0; 5183 } 5184 5185 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, ··· 5329 struct ixgbe_adapter *adapter = netdev_priv(dev); 5330 int txq = smp_processor_id(); 5331 5332 + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 5333 + while (unlikely(txq >= dev->real_num_tx_queues)) 5334 + txq -= dev->real_num_tx_queues; 5335 return txq; 5336 + } 5337 5338 #ifdef IXGBE_FCOE 5339 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+3 -1
drivers/net/netxen/netxen_nic_main.c
··· 1941 netif_wake_queue(adapter->netdev); 1942 1943 clear_bit(__NX_RESETTING, &adapter->state); 1944 - 1945 } else { 1946 clear_bit(__NX_RESETTING, &adapter->state); 1947 if (!netxen_nic_reset_context(adapter)) { ··· 2240 2241 netxen_nic_down(adapter, netdev); 2242 2243 netxen_nic_detach(adapter); 2244 2245 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2246
··· 1941 netif_wake_queue(adapter->netdev); 1942 1943 clear_bit(__NX_RESETTING, &adapter->state); 1944 + return; 1945 } else { 1946 clear_bit(__NX_RESETTING, &adapter->state); 1947 if (!netxen_nic_reset_context(adapter)) { ··· 2240 2241 netxen_nic_down(adapter, netdev); 2242 2243 + rtnl_lock(); 2244 netxen_nic_detach(adapter); 2245 + rtnl_unlock(); 2246 2247 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2248
+3 -5
drivers/net/sky2.c
··· 1025 static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) 1026 { 1027 struct sky2_tx_le *le = sky2->tx_le + *slot; 1028 - struct tx_ring_info *re = sky2->tx_ring + *slot; 1029 1030 *slot = RING_NEXT(*slot, sky2->tx_ring_size); 1031 - re->flags = 0; 1032 - re->skb = NULL; 1033 le->ctrl = 0; 1034 return le; 1035 } ··· 1619 return count; 1620 } 1621 1622 - static void sky2_tx_unmap(struct pci_dev *pdev, 1623 - const struct tx_ring_info *re) 1624 { 1625 if (re->flags & TX_MAP_SINGLE) 1626 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), ··· 1629 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1630 pci_unmap_len(re, maplen), 1631 PCI_DMA_TODEVICE); 1632 } 1633 1634 /* ··· 1836 dev->stats.tx_packets++; 1837 dev->stats.tx_bytes += skb->len; 1838 1839 dev_kfree_skb_any(skb); 1840 1841 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
··· 1025 static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) 1026 { 1027 struct sky2_tx_le *le = sky2->tx_le + *slot; 1028 1029 *slot = RING_NEXT(*slot, sky2->tx_ring_size); 1030 le->ctrl = 0; 1031 return le; 1032 } ··· 1622 return count; 1623 } 1624 1625 + static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) 1626 { 1627 if (re->flags & TX_MAP_SINGLE) 1628 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), ··· 1633 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1634 pci_unmap_len(re, maplen), 1635 PCI_DMA_TODEVICE); 1636 + re->flags = 0; 1637 } 1638 1639 /* ··· 1839 dev->stats.tx_packets++; 1840 dev->stats.tx_bytes += skb->len; 1841 1842 + re->skb = NULL; 1843 dev_kfree_skb_any(skb); 1844 1845 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
+3
include/net/netns/conntrack.h
··· 11 struct netns_ct { 12 atomic_t count; 13 unsigned int expect_count; 14 struct hlist_nulls_head *hash; 15 struct hlist_head *expect_hash; 16 struct hlist_nulls_head unconfirmed; ··· 30 #endif 31 int hash_vmalloc; 32 int expect_vmalloc; 33 }; 34 #endif
··· 11 struct netns_ct { 12 atomic_t count; 13 unsigned int expect_count; 14 + unsigned int htable_size; 15 + struct kmem_cache *nf_conntrack_cachep; 16 struct hlist_nulls_head *hash; 17 struct hlist_head *expect_hash; 18 struct hlist_nulls_head unconfirmed; ··· 28 #endif 29 int hash_vmalloc; 30 int expect_vmalloc; 31 + char *slabname; 32 }; 33 #endif
+1
include/net/netns/ipv4.h
··· 40 struct xt_table *iptable_security; 41 struct xt_table *nat_table; 42 struct hlist_head *nat_bysource; 43 int nat_vmalloced; 44 #endif 45
··· 40 struct xt_table *iptable_security; 41 struct xt_table *nat_table; 42 struct hlist_head *nat_bysource; 43 + unsigned int nat_htable_size; 44 int nat_vmalloced; 45 #endif 46
+3
net/bluetooth/hci_conn.c
··· 377 378 if (acl->state == BT_CONNECTED && 379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 380 if (lmp_esco_capable(hdev)) 381 hci_setup_sync(sco, acl->handle); 382 else
··· 377 378 if (acl->state == BT_CONNECTED && 379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 380 + acl->power_save = 1; 381 + hci_conn_enter_active_mode(acl); 382 + 383 if (lmp_esco_capable(hdev)) 384 hci_setup_sync(sco, acl->handle); 385 else
+1
net/bluetooth/hci_event.c
··· 1699 break; 1700 1701 case 0x1c: /* SCO interval rejected */ 1702 case 0x1f: /* Unspecified error */ 1703 if (conn->out && conn->attempt < 2) { 1704 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
··· 1699 break; 1700 1701 case 0x1c: /* SCO interval rejected */ 1702 + case 0x1a: /* Unsupported Remote Feature */ 1703 case 0x1f: /* Unspecified error */ 1704 if (conn->out && conn->attempt < 2) { 1705 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
+24 -25
net/bluetooth/hidp/core.c
··· 703 static int hidp_parse(struct hid_device *hid) 704 { 705 struct hidp_session *session = hid->driver_data; 706 - struct hidp_connadd_req *req = session->req; 707 - unsigned char *buf; 708 - int ret; 709 710 - buf = kmalloc(req->rd_size, GFP_KERNEL); 711 - if (!buf) 712 - return -ENOMEM; 713 - 714 - if (copy_from_user(buf, req->rd_data, req->rd_size)) { 715 - kfree(buf); 716 - return -EFAULT; 717 - } 718 - 719 - ret = hid_parse_report(session->hid, buf, req->rd_size); 720 - 721 - kfree(buf); 722 - 723 - if (ret) 724 - return ret; 725 - 726 - session->req = NULL; 727 - 728 - return 0; 729 } 730 731 static int hidp_start(struct hid_device *hid) ··· 750 bdaddr_t src, dst; 751 int err; 752 753 hid = hid_allocate_device(); 754 - if (IS_ERR(hid)) 755 - return PTR_ERR(hid); 756 757 session->hid = hid; 758 - session->req = req; 759 hid->driver_data = session; 760 761 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); ··· 797 failed: 798 hid_destroy_device(hid); 799 session->hid = NULL; 800 801 return err; 802 } ··· 895 hid_destroy_device(session->hid); 896 session->hid = NULL; 897 } 898 899 purge: 900 skb_queue_purge(&session->ctrl_transmit);
··· 703 static int hidp_parse(struct hid_device *hid) 704 { 705 struct hidp_session *session = hid->driver_data; 706 707 + return hid_parse_report(session->hid, session->rd_data, 708 + session->rd_size); 709 } 710 711 static int hidp_start(struct hid_device *hid) ··· 770 bdaddr_t src, dst; 771 int err; 772 773 + session->rd_data = kzalloc(req->rd_size, GFP_KERNEL); 774 + if (!session->rd_data) 775 + return -ENOMEM; 776 + 777 + if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) { 778 + err = -EFAULT; 779 + goto fault; 780 + } 781 + session->rd_size = req->rd_size; 782 + 783 hid = hid_allocate_device(); 784 + if (IS_ERR(hid)) { 785 + err = PTR_ERR(hid); 786 + goto fault; 787 + } 788 789 session->hid = hid; 790 + 791 hid->driver_data = session; 792 793 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); ··· 805 failed: 806 hid_destroy_device(hid); 807 session->hid = NULL; 808 + 809 + fault: 810 + kfree(session->rd_data); 811 + session->rd_data = NULL; 812 813 return err; 814 } ··· 899 hid_destroy_device(session->hid); 900 session->hid = NULL; 901 } 902 + 903 + kfree(session->rd_data); 904 + session->rd_data = NULL; 905 906 purge: 907 skb_queue_purge(&session->ctrl_transmit);
+3 -1
net/bluetooth/hidp/hidp.h
··· 154 struct sk_buff_head ctrl_transmit; 155 struct sk_buff_head intr_transmit; 156 157 - struct hidp_connadd_req *req; 158 }; 159 160 static inline void hidp_schedule(struct hidp_session *session)
··· 154 struct sk_buff_head ctrl_transmit; 155 struct sk_buff_head intr_transmit; 156 157 + /* Report descriptor */ 158 + __u8 *rd_data; 159 + uint rd_size; 160 }; 161 162 static inline void hidp_schedule(struct hidp_session *session)
+6 -2
net/bluetooth/rfcomm/core.c
··· 252 BT_DBG("session %p state %ld", s, s->state); 253 254 set_bit(RFCOMM_TIMED_OUT, &s->flags); 255 - rfcomm_session_put(s); 256 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 257 } 258 ··· 1150 break; 1151 1152 case BT_DISCONN: 1153 - rfcomm_session_put(s); 1154 break; 1155 } 1156 } ··· 1923 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1924 s->state = BT_DISCONN; 1925 rfcomm_send_disc(s, 0); 1926 continue; 1927 } 1928
··· 252 BT_DBG("session %p state %ld", s, s->state); 253 254 set_bit(RFCOMM_TIMED_OUT, &s->flags); 255 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 256 } 257 ··· 1151 break; 1152 1153 case BT_DISCONN: 1154 + /* When socket is closed and we are not RFCOMM 1155 + * initiator rfcomm_process_rx already calls 1156 + * rfcomm_session_put() */ 1157 + if (s->sock->sk->sk_state != BT_CLOSED) 1158 + rfcomm_session_put(s); 1159 break; 1160 } 1161 } ··· 1920 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1921 s->state = BT_DISCONN; 1922 rfcomm_send_disc(s, 0); 1923 + rfcomm_session_put(s); 1924 continue; 1925 } 1926
+2
net/core/dst.c
··· 17 #include <linux/string.h> 18 #include <linux/types.h> 19 #include <net/net_namespace.h> 20 21 #include <net/dst.h> 22 ··· 80 while ((dst = next) != NULL) { 81 next = dst->next; 82 prefetch(&next->next); 83 if (likely(atomic_read(&dst->__refcnt))) { 84 last->next = dst; 85 last = dst;
··· 17 #include <linux/string.h> 18 #include <linux/types.h> 19 #include <net/net_namespace.h> 20 + #include <linux/sched.h> 21 22 #include <net/dst.h> 23 ··· 79 while ((dst = next) != NULL) { 80 next = dst->next; 81 prefetch(&next->next); 82 + cond_resched(); 83 if (likely(atomic_read(&dst->__refcnt))) { 84 last->next = dst; 85 last = dst;
+1
net/core/pktgen.c
··· 3524 wait_event_interruptible_timeout(t->queue, 3525 t->control != 0, 3526 HZ/10); 3527 continue; 3528 } 3529
··· 3524 wait_event_interruptible_timeout(t->queue, 3525 t->control != 0, 3526 HZ/10); 3527 + try_to_freeze(); 3528 continue; 3529 } 3530
+1 -1
net/dccp/ccid.c
··· 83 va_list args; 84 85 va_start(args, fmt); 86 - vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 87 va_end(args); 88 89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
··· 83 va_list args; 84 85 va_start(args, fmt); 86 + vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args); 87 va_end(args); 88 89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
+5 -3
net/dccp/ccid.h
··· 19 #include <linux/list.h> 20 #include <linux/module.h> 21 22 - #define CCID_MAX 255 23 24 struct tcp_info; 25 ··· 51 const char *ccid_name; 52 struct kmem_cache *ccid_hc_rx_slab, 53 *ccid_hc_tx_slab; 54 - char ccid_hc_rx_slab_name[32]; 55 - char ccid_hc_tx_slab_name[32]; 56 __u32 ccid_hc_rx_obj_size, 57 ccid_hc_tx_obj_size; 58 /* Interface Routines */
··· 19 #include <linux/list.h> 20 #include <linux/module.h> 21 22 + /* maximum value for a CCID (RFC 4340, 19.5) */ 23 + #define CCID_MAX 255 24 + #define CCID_SLAB_NAME_LENGTH 32 25 26 struct tcp_info; 27 ··· 49 const char *ccid_name; 50 struct kmem_cache *ccid_hc_rx_slab, 51 *ccid_hc_tx_slab; 52 + char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH]; 53 + char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH]; 54 __u32 ccid_hc_rx_obj_size, 55 ccid_hc_tx_obj_size; 56 /* Interface Routines */
+2 -2
net/dccp/probe.c
··· 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 162 goto err0; 163 164 - ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), 165 - "dccp"); 166 if (ret) 167 goto err1; 168
··· 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 162 goto err0; 163 164 + try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0, 165 + "dccp"); 166 if (ret) 167 goto err1; 168
+2 -2
net/ipv4/netfilter/arp_tables.c
··· 925 if (t && !IS_ERR(t)) { 926 struct arpt_getinfo info; 927 const struct xt_table_info *private = t->private; 928 - 929 #ifdef CONFIG_COMPAT 930 if (compat) { 931 - struct xt_table_info tmp; 932 ret = compat_table_info(private, &tmp); 933 xt_compat_flush_offsets(NFPROTO_ARP); 934 private = &tmp;
··· 925 if (t && !IS_ERR(t)) { 926 struct arpt_getinfo info; 927 const struct xt_table_info *private = t->private; 928 #ifdef CONFIG_COMPAT 929 + struct xt_table_info tmp; 930 + 931 if (compat) { 932 ret = compat_table_info(private, &tmp); 933 xt_compat_flush_offsets(NFPROTO_ARP); 934 private = &tmp;
+2 -2
net/ipv4/netfilter/ip_tables.c
··· 1132 if (t && !IS_ERR(t)) { 1133 struct ipt_getinfo info; 1134 const struct xt_table_info *private = t->private; 1135 - 1136 #ifdef CONFIG_COMPAT 1137 if (compat) { 1138 - struct xt_table_info tmp; 1139 ret = compat_table_info(private, &tmp); 1140 xt_compat_flush_offsets(AF_INET); 1141 private = &tmp;
··· 1132 if (t && !IS_ERR(t)) { 1133 struct ipt_getinfo info; 1134 const struct xt_table_info *private = t->private; 1135 #ifdef CONFIG_COMPAT 1136 + struct xt_table_info tmp; 1137 + 1138 if (compat) { 1139 ret = compat_table_info(private, &tmp); 1140 xt_compat_flush_offsets(AF_INET); 1141 private = &tmp;
+1 -1
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
··· 210 }, 211 { 212 .procname = "ip_conntrack_buckets", 213 - .data = &nf_conntrack_htable_size, 214 .maxlen = sizeof(unsigned int), 215 .mode = 0444, 216 .proc_handler = proc_dointvec,
··· 210 }, 211 { 212 .procname = "ip_conntrack_buckets", 213 + .data = &init_net.ct.htable_size, 214 .maxlen = sizeof(unsigned int), 215 .mode = 0444, 216 .proc_handler = proc_dointvec,
+2 -2
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
··· 32 struct hlist_nulls_node *n; 33 34 for (st->bucket = 0; 35 - st->bucket < nf_conntrack_htable_size; 36 st->bucket++) { 37 n = rcu_dereference(net->ct.hash[st->bucket].first); 38 if (!is_a_nulls(n)) ··· 50 head = rcu_dereference(head->next); 51 while (is_a_nulls(head)) { 52 if (likely(get_nulls_value(head) == st->bucket)) { 53 - if (++st->bucket >= nf_conntrack_htable_size) 54 return NULL; 55 } 56 head = rcu_dereference(net->ct.hash[st->bucket].first);
··· 32 struct hlist_nulls_node *n; 33 34 for (st->bucket = 0; 35 + st->bucket < net->ct.htable_size; 36 st->bucket++) { 37 n = rcu_dereference(net->ct.hash[st->bucket].first); 38 if (!is_a_nulls(n)) ··· 50 head = rcu_dereference(head->next); 51 while (is_a_nulls(head)) { 52 if (likely(get_nulls_value(head) == st->bucket)) { 53 + if (++st->bucket >= net->ct.htable_size) 54 return NULL; 55 } 56 head = rcu_dereference(net->ct.hash[st->bucket].first);
+9 -13
net/ipv4/netfilter/nf_nat_core.c
··· 35 36 static struct nf_conntrack_l3proto *l3proto __read_mostly; 37 38 - /* Calculated at init based on memory size */ 39 - static unsigned int nf_nat_htable_size __read_mostly; 40 - 41 #define MAX_IP_NAT_PROTO 256 42 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 43 __read_mostly; ··· 69 70 /* We keep an extra hash for each conntrack, for fast searching. */ 71 static inline unsigned int 72 - hash_by_src(const struct nf_conntrack_tuple *tuple) 73 { 74 unsigned int hash; 75 ··· 77 hash = jhash_3words((__force u32)tuple->src.u3.ip, 78 (__force u32)tuple->src.u.all, 79 tuple->dst.protonum, 0); 80 - return ((u64)hash * nf_nat_htable_size) >> 32; 81 } 82 83 /* Is this tuple already taken? (not by us) */ ··· 144 struct nf_conntrack_tuple *result, 145 const struct nf_nat_range *range) 146 { 147 - unsigned int h = hash_by_src(tuple); 148 const struct nf_conn_nat *nat; 149 const struct nf_conn *ct; 150 const struct hlist_node *n; ··· 327 if (have_to_hash) { 328 unsigned int srchash; 329 330 - srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 331 spin_lock_bh(&nf_nat_lock); 332 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 333 nat = nfct_nat(ct); ··· 676 677 static int __net_init nf_nat_net_init(struct net *net) 678 { 679 - net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 680 - &net->ipv4.nat_vmalloced, 0); 681 if (!net->ipv4.nat_bysource) 682 return -ENOMEM; 683 return 0; ··· 702 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 703 synchronize_rcu(); 704 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, 705 - nf_nat_htable_size); 706 } 707 708 static struct pernet_operations nf_nat_net_ops = { ··· 722 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 723 return ret; 724 } 725 - 726 - /* Leave them the same for the moment. */ 727 - nf_nat_htable_size = nf_conntrack_htable_size; 728 729 ret = register_pernet_subsys(&nf_nat_net_ops); 730 if (ret < 0)
··· 35 36 static struct nf_conntrack_l3proto *l3proto __read_mostly; 37 38 #define MAX_IP_NAT_PROTO 256 39 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 40 __read_mostly; ··· 72 73 /* We keep an extra hash for each conntrack, for fast searching. */ 74 static inline unsigned int 75 + hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) 76 { 77 unsigned int hash; 78 ··· 80 hash = jhash_3words((__force u32)tuple->src.u3.ip, 81 (__force u32)tuple->src.u.all, 82 tuple->dst.protonum, 0); 83 + return ((u64)hash * net->ipv4.nat_htable_size) >> 32; 84 } 85 86 /* Is this tuple already taken? (not by us) */ ··· 147 struct nf_conntrack_tuple *result, 148 const struct nf_nat_range *range) 149 { 150 + unsigned int h = hash_by_src(net, tuple); 151 const struct nf_conn_nat *nat; 152 const struct nf_conn *ct; 153 const struct hlist_node *n; ··· 330 if (have_to_hash) { 331 unsigned int srchash; 332 333 + srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 334 spin_lock_bh(&nf_nat_lock); 335 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 336 nat = nfct_nat(ct); ··· 679 680 static int __net_init nf_nat_net_init(struct net *net) 681 { 682 + /* Leave them the same for the moment. */ 683 + net->ipv4.nat_htable_size = net->ct.htable_size; 684 + net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 685 + &net->ipv4.nat_vmalloced, 0); 686 if (!net->ipv4.nat_bysource) 687 return -ENOMEM; 688 return 0; ··· 703 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 704 synchronize_rcu(); 705 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, 706 + net->ipv4.nat_htable_size); 707 } 708 709 static struct pernet_operations nf_nat_net_ops = { ··· 723 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 724 return ret; 725 } 726 727 ret = register_pernet_subsys(&nf_nat_net_ops); 728 if (ret < 0)
+2 -2
net/ipv6/netfilter/ip6_tables.c
··· 1164 if (t && !IS_ERR(t)) { 1165 struct ip6t_getinfo info; 1166 const struct xt_table_info *private = t->private; 1167 - 1168 #ifdef CONFIG_COMPAT 1169 if (compat) { 1170 - struct xt_table_info tmp; 1171 ret = compat_table_info(private, &tmp); 1172 xt_compat_flush_offsets(AF_INET6); 1173 private = &tmp;
··· 1164 if (t && !IS_ERR(t)) { 1165 struct ip6t_getinfo info; 1166 const struct xt_table_info *private = t->private; 1167 #ifdef CONFIG_COMPAT 1168 + struct xt_table_info tmp; 1169 + 1170 if (compat) { 1171 ret = compat_table_info(private, &tmp); 1172 xt_compat_flush_offsets(AF_INET6); 1173 private = &tmp;
+4 -1
net/irda/irnet/irnet_ppp.c
··· 698 699 /* Query PPP channel and unit number */ 700 case PPPIOCGCHAN: 701 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), 702 (int __user *)argp)) 703 err = 0; 704 break; 705 case PPPIOCGUNIT: 706 lock_kernel(); 707 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), 708 (int __user *)argp)) 709 - err = 0; 710 break; 711 712 /* All these ioctls can be passed both directly and from ppp_generic,
··· 698 699 /* Query PPP channel and unit number */ 700 case PPPIOCGCHAN: 701 + lock_kernel(); 702 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), 703 (int __user *)argp)) 704 err = 0; 705 + unlock_kernel(); 706 break; 707 case PPPIOCGUNIT: 708 lock_kernel(); 709 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), 710 (int __user *)argp)) 711 + err = 0; 712 + unlock_kernel(); 713 break; 714 715 /* All these ioctls can be passed both directly and from ppp_generic,
+8 -7
net/key/af_key.c
··· 3794 3795 static void __exit ipsec_pfkey_exit(void) 3796 { 3797 - unregister_pernet_subsys(&pfkey_net_ops); 3798 xfrm_unregister_km(&pfkeyv2_mgr); 3799 sock_unregister(PF_KEY); 3800 proto_unregister(&key_proto); 3801 } 3802 ··· 3807 if (err != 0) 3808 goto out; 3809 3810 - err = sock_register(&pfkey_family_ops); 3811 if (err != 0) 3812 goto out_unregister_key_proto; 3813 err = xfrm_register_km(&pfkeyv2_mgr); 3814 if (err != 0) 3815 goto out_sock_unregister; 3816 - err = register_pernet_subsys(&pfkey_net_ops); 3817 - if (err != 0) 3818 - goto out_xfrm_unregister_km; 3819 out: 3820 return err; 3821 - out_xfrm_unregister_km: 3822 - xfrm_unregister_km(&pfkeyv2_mgr); 3823 out_sock_unregister: 3824 sock_unregister(PF_KEY); 3825 out_unregister_key_proto: 3826 proto_unregister(&key_proto); 3827 goto out;
··· 3794 3795 static void __exit ipsec_pfkey_exit(void) 3796 { 3797 xfrm_unregister_km(&pfkeyv2_mgr); 3798 sock_unregister(PF_KEY); 3799 + unregister_pernet_subsys(&pfkey_net_ops); 3800 proto_unregister(&key_proto); 3801 } 3802 ··· 3807 if (err != 0) 3808 goto out; 3809 3810 + err = register_pernet_subsys(&pfkey_net_ops); 3811 if (err != 0) 3812 goto out_unregister_key_proto; 3813 + err = sock_register(&pfkey_family_ops); 3814 + if (err != 0) 3815 + goto out_unregister_pernet; 3816 err = xfrm_register_km(&pfkeyv2_mgr); 3817 if (err != 0) 3818 goto out_sock_unregister; 3819 out: 3820 return err; 3821 + 3822 out_sock_unregister: 3823 sock_unregister(PF_KEY); 3824 + out_unregister_pernet: 3825 + unregister_pernet_subsys(&pfkey_net_ops); 3826 out_unregister_key_proto: 3827 proto_unregister(&key_proto); 3828 goto out;
+63 -53
net/netfilter/nf_conntrack_core.c
··· 30 #include <linux/netdevice.h> 31 #include <linux/socket.h> 32 #include <linux/mm.h> 33 #include <linux/rculist_nulls.h> 34 35 #include <net/netfilter/nf_conntrack.h> ··· 64 struct nf_conn nf_conntrack_untracked __read_mostly; 65 EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 66 67 - static struct kmem_cache *nf_conntrack_cachep __read_mostly; 68 - 69 static int nf_conntrack_hash_rnd_initted; 70 static unsigned int nf_conntrack_hash_rnd; 71 ··· 85 return ((u64)h * size) >> 32; 86 } 87 88 - static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 89 { 90 - return __hash_conntrack(tuple, nf_conntrack_htable_size, 91 nf_conntrack_hash_rnd); 92 } 93 ··· 296 { 297 struct nf_conntrack_tuple_hash *h; 298 struct hlist_nulls_node *n; 299 - unsigned int hash = hash_conntrack(tuple); 300 301 /* Disable BHs the entire time since we normally need to disable them 302 * at least once for the stats anyway. ··· 366 367 void nf_conntrack_hash_insert(struct nf_conn *ct) 368 { 369 unsigned int hash, repl_hash; 370 371 - hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 372 - repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 373 374 __nf_conntrack_hash_insert(ct, hash, repl_hash); 375 } ··· 398 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 399 return NF_ACCEPT; 400 401 - hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 402 - repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 403 404 /* We're not in hash table, and we refuse to set up related 405 connections for unconfirmed conns. But packet copies and ··· 469 struct net *net = nf_ct_net(ignored_conntrack); 470 struct nf_conntrack_tuple_hash *h; 471 struct hlist_nulls_node *n; 472 - unsigned int hash = hash_conntrack(tuple); 473 474 /* Disable BHs the entire time since we need to disable them at 475 * least once for the stats anyway. ··· 504 int dropped = 0; 505 506 rcu_read_lock(); 507 - for (i = 0; i < nf_conntrack_htable_size; i++) { 508 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 509 hnnode) { 510 tmp = nf_ct_tuplehash_to_ctrack(h); ··· 524 if (cnt >= NF_CT_EVICTION_RANGE) 525 break; 526 527 - hash = (hash + 1) % nf_conntrack_htable_size; 528 } 529 rcu_read_unlock(); 530 ··· 558 559 if (nf_conntrack_max && 560 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 561 - unsigned int hash = hash_conntrack(orig); 562 if (!early_drop(net, hash)) { 563 atomic_dec(&net->ct.count); 564 if (net_ratelimit()) ··· 573 * Do not use kmem_cache_zalloc(), as this cache uses 574 * SLAB_DESTROY_BY_RCU. 575 */ 576 - ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); 577 if (ct == NULL) { 578 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 579 atomic_dec(&net->ct.count); ··· 612 nf_ct_ext_destroy(ct); 613 atomic_dec(&net->ct.count); 614 nf_ct_ext_free(ct); 615 - kmem_cache_free(nf_conntrack_cachep, ct); 616 } 617 EXPORT_SYMBOL_GPL(nf_conntrack_free); 618 ··· 1015 struct hlist_nulls_node *n; 1016 1017 spin_lock_bh(&nf_conntrack_lock); 1018 - for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 1019 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1020 ct = nf_ct_tuplehash_to_ctrack(h); 1021 if (iter(ct, data)) ··· 1114 1115 static void nf_conntrack_cleanup_init_net(void) 1116 { 1117 nf_conntrack_helper_fini(); 1118 nf_conntrack_proto_fini(); 1119 - kmem_cache_destroy(nf_conntrack_cachep); 1120 } 1121 1122 static void nf_conntrack_cleanup_net(struct net *net) ··· 1131 schedule(); 1132 goto i_see_dead_people; 1133 } 1134 - /* wait until all references to nf_conntrack_untracked are dropped */ 1135 - while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) 1136 - schedule(); 1137 1138 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1139 - nf_conntrack_htable_size); 1140 nf_conntrack_ecache_fini(net); 1141 nf_conntrack_acct_fini(net); 1142 nf_conntrack_expect_fini(net); 1143 free_percpu(net->ct.stat); 1144 } 1145 ··· 1193 { 1194 int i, bucket, vmalloced, old_vmalloced; 1195 unsigned int hashsize, old_size; 1196 - int rnd; 1197 struct hlist_nulls_head *hash, *old_hash; 1198 struct nf_conntrack_tuple_hash *h; 1199 1200 /* On boot, we can set this without any fancy locking. */ 1201 if (!nf_conntrack_htable_size) ··· 1211 if (!hash) 1212 return -ENOMEM; 1213 1214 - /* We have to rehahs for the new table anyway, so we also can 1215 - * use a newrandom seed */ 1216 - get_random_bytes(&rnd, sizeof(rnd)); 1217 - 1218 /* Lookups in the old hash might happen in parallel, which means we 1219 * might get false negatives during connection lookup. New connections 1220 * created because of a false negative won't make it into the hash 1221 * though since that required taking the lock. 1222 */ 1223 spin_lock_bh(&nf_conntrack_lock); 1224 - for (i = 0; i < nf_conntrack_htable_size; i++) { 1225 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1226 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1227 struct nf_conntrack_tuple_hash, hnnode); 1228 hlist_nulls_del_rcu(&h->hnnode); 1229 - bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1230 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1231 } 1232 } 1233 - old_size = nf_conntrack_htable_size; 1234 old_vmalloced = init_net.ct.hash_vmalloc; 1235 old_hash = init_net.ct.hash; 1236 1237 - nf_conntrack_htable_size = hashsize; 1238 init_net.ct.hash_vmalloc = vmalloced; 1239 init_net.ct.hash = hash; 1240 - nf_conntrack_hash_rnd = rnd; 1241 spin_unlock_bh(&nf_conntrack_lock); 1242 1243 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); ··· 1272 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1273 nf_conntrack_max); 1274 1275 - nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1276 - sizeof(struct nf_conn), 1277 - 0, SLAB_DESTROY_BY_RCU, NULL); 1278 - if (!nf_conntrack_cachep) { 1279 - printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1280 - ret = -ENOMEM; 1281 - goto err_cache; 1282 - } 1283 - 1284 ret = nf_conntrack_proto_init(); 1285 if (ret < 0) 1286 goto err_proto; ··· 1280 if (ret < 0) 1281 goto err_helper; 1282 1283 return 0; 1284 1285 err_helper: 1286 nf_conntrack_proto_fini(); 1287 err_proto: 1288 - kmem_cache_destroy(nf_conntrack_cachep); 1289 - err_cache: 1290 return ret; 1291 } 1292 ··· 1314 ret = -ENOMEM; 1315 goto err_stat; 1316 } 1317 - net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1318 &net->ct.hash_vmalloc, 1); 1319 if (!net->ct.hash) { 1320 ret = -ENOMEM; ··· 1348 if (ret < 0) 1349 goto err_ecache; 1350 1351 - /* Set up fake conntrack: 1352 - - to never be deleted, not in any hashes */ 1353 - #ifdef CONFIG_NET_NS 1354 - nf_conntrack_untracked.ct_net = &init_net; 1355 - #endif 1356 - atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1357 - /* - and look it like as a confirmed connection */ 1358 - set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); 1359 - 1360 return 0; 1361 1362 err_ecache: ··· 1356 nf_conntrack_expect_fini(net); 1357 err_expect: 1358 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1359 - nf_conntrack_htable_size); 1360 err_hash: 1361 free_percpu(net->ct.stat); 1362 err_stat: 1363 return ret;
··· 30 #include <linux/netdevice.h> 31 #include <linux/socket.h> 32 #include <linux/mm.h> 33 + #include <linux/nsproxy.h> 34 #include <linux/rculist_nulls.h> 35 36 #include <net/netfilter/nf_conntrack.h> ··· 63 struct nf_conn nf_conntrack_untracked __read_mostly; 64 EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 65 66 static int nf_conntrack_hash_rnd_initted; 67 static unsigned int nf_conntrack_hash_rnd; 68 ··· 86 return ((u64)h * size) >> 32; 87 } 88 89 + static inline u_int32_t hash_conntrack(const struct net *net, 90 + const struct nf_conntrack_tuple *tuple) 91 { 92 + return __hash_conntrack(tuple, net->ct.htable_size, 93 nf_conntrack_hash_rnd); 94 } 95 ··· 296 { 297 struct nf_conntrack_tuple_hash *h; 298 struct hlist_nulls_node *n; 299 + unsigned int hash = hash_conntrack(net, tuple); 300 301 /* Disable BHs the entire time since we normally need to disable them 302 * at least once for the stats anyway. ··· 366 367 void nf_conntrack_hash_insert(struct nf_conn *ct) 368 { 369 + struct net *net = nf_ct_net(ct); 370 unsigned int hash, repl_hash; 371 372 + hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 373 + repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 374 375 __nf_conntrack_hash_insert(ct, hash, repl_hash); 376 } ··· 397 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 398 return NF_ACCEPT; 399 400 + hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 401 + repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 402 403 /* We're not in hash table, and we refuse to set up related 404 connections for unconfirmed conns. But packet copies and ··· 468 struct net *net = nf_ct_net(ignored_conntrack); 469 struct nf_conntrack_tuple_hash *h; 470 struct hlist_nulls_node *n; 471 + unsigned int hash = hash_conntrack(net, tuple); 472 473 /* Disable BHs the entire time since we need to disable them at 474 * least once for the stats anyway. ··· 503 int dropped = 0; 504 505 rcu_read_lock(); 506 + for (i = 0; i < net->ct.htable_size; i++) { 507 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 508 hnnode) { 509 tmp = nf_ct_tuplehash_to_ctrack(h); ··· 523 if (cnt >= NF_CT_EVICTION_RANGE) 524 break; 525 526 + hash = (hash + 1) % net->ct.htable_size; 527 } 528 rcu_read_unlock(); 529 ··· 557 558 if (nf_conntrack_max && 559 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 560 + unsigned int hash = hash_conntrack(net, orig); 561 if (!early_drop(net, hash)) { 562 atomic_dec(&net->ct.count); 563 if (net_ratelimit()) ··· 572 * Do not use kmem_cache_zalloc(), as this cache uses 573 * SLAB_DESTROY_BY_RCU. 574 */ 575 + ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); 576 if (ct == NULL) { 577 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 578 atomic_dec(&net->ct.count); ··· 611 nf_ct_ext_destroy(ct); 612 atomic_dec(&net->ct.count); 613 nf_ct_ext_free(ct); 614 + kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 615 } 616 EXPORT_SYMBOL_GPL(nf_conntrack_free); 617 ··· 1014 struct hlist_nulls_node *n; 1015 1016 spin_lock_bh(&nf_conntrack_lock); 1017 + for (; *bucket < net->ct.htable_size; (*bucket)++) { 1018 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1019 ct = nf_ct_tuplehash_to_ctrack(h); 1020 if (iter(ct, data)) ··· 1113 1114 static void nf_conntrack_cleanup_init_net(void) 1115 { 1116 + /* wait until all references to nf_conntrack_untracked are dropped */ 1117 + while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) 1118 + schedule(); 1119 + 1120 nf_conntrack_helper_fini(); 1121 nf_conntrack_proto_fini(); 1122 } 1123 1124 static void nf_conntrack_cleanup_net(struct net *net) ··· 1127 schedule(); 1128 goto i_see_dead_people; 1129 } 1130 1131 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1132 + net->ct.htable_size); 1133 nf_conntrack_ecache_fini(net); 1134 nf_conntrack_acct_fini(net); 1135 nf_conntrack_expect_fini(net); 1136 + kmem_cache_destroy(net->ct.nf_conntrack_cachep); 1137 + kfree(net->ct.slabname); 1138 free_percpu(net->ct.stat); 1139 } 1140 ··· 1190 { 1191 int i, bucket, vmalloced, old_vmalloced; 1192 unsigned int hashsize, old_size; 1193 struct hlist_nulls_head *hash, *old_hash; 1194 struct nf_conntrack_tuple_hash *h; 1195 + 1196 + if (current->nsproxy->net_ns != &init_net) 1197 + return -EOPNOTSUPP; 1198 1199 /* On boot, we can set this without any fancy locking. */ 1200 if (!nf_conntrack_htable_size) ··· 1206 if (!hash) 1207 return -ENOMEM; 1208 1209 /* Lookups in the old hash might happen in parallel, which means we 1210 * might get false negatives during connection lookup. New connections 1211 * created because of a false negative won't make it into the hash 1212 * though since that required taking the lock. 1213 */ 1214 spin_lock_bh(&nf_conntrack_lock); 1215 + for (i = 0; i < init_net.ct.htable_size; i++) { 1216 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1217 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1218 struct nf_conntrack_tuple_hash, hnnode); 1219 hlist_nulls_del_rcu(&h->hnnode); 1220 + bucket = __hash_conntrack(&h->tuple, hashsize, 1221 + nf_conntrack_hash_rnd); 1222 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1223 } 1224 } 1225 + old_size = init_net.ct.htable_size; 1226 old_vmalloced = init_net.ct.hash_vmalloc; 1227 old_hash = init_net.ct.hash; 1228 1229 + init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; 1230 init_net.ct.hash_vmalloc = vmalloced; 1231 init_net.ct.hash = hash; 1232 spin_unlock_bh(&nf_conntrack_lock); 1233 1234 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); ··· 1271 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1272 nf_conntrack_max); 1273 1274 ret = nf_conntrack_proto_init(); 1275 if (ret < 0) 1276 goto err_proto; ··· 1288 if (ret < 0) 1289 goto err_helper; 1290 1291 + /* Set up fake conntrack: to never be deleted, not in any hashes */ 1292 + #ifdef CONFIG_NET_NS 1293 + nf_conntrack_untracked.ct_net = &init_net; 1294 + #endif 1295 + atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1296 + /* - and look it like as a confirmed connection */ 1297 + set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); 1298 + 1299 return 0; 1300 1301 err_helper: 1302 nf_conntrack_proto_fini(); 1303 err_proto: 1304 return ret; 1305 } 1306 ··· 1316 ret = -ENOMEM; 1317 goto err_stat; 1318 } 1319 + 1320 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); 1321 + if (!net->ct.slabname) { 1322 + ret = -ENOMEM; 1323 + goto err_slabname; 1324 + } 1325 + 1326 + net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, 1327 + sizeof(struct nf_conn), 0, 1328 + SLAB_DESTROY_BY_RCU, NULL); 1329 + if (!net->ct.nf_conntrack_cachep) { 1330 + printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1331 + ret = -ENOMEM; 1332 + goto err_cache; 1333 + } 1334 + 1335 + net->ct.htable_size = nf_conntrack_htable_size; 1336 + net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1337 &net->ct.hash_vmalloc, 1); 1338 if (!net->ct.hash) { 1339 ret = -ENOMEM; ··· 1333 if (ret < 0) 1334 goto err_ecache; 1335 1336 return 0; 1337 1338 err_ecache: ··· 1350 nf_conntrack_expect_fini(net); 1351 err_expect: 1352 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1353 + net->ct.htable_size); 1354 err_hash: 1355 + kmem_cache_destroy(net->ct.nf_conntrack_cachep); 1356 + err_cache: 1357 + kfree(net->ct.slabname); 1358 + err_slabname: 1359 free_percpu(net->ct.stat); 1360 err_stat: 1361 return ret;
+2 -2
net/netfilter/nf_conntrack_expect.c
··· 569 #endif /* CONFIG_PROC_FS */ 570 } 571 572 - module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); 573 574 int nf_conntrack_expect_init(struct net *net) 575 { ··· 577 578 if (net_eq(net, &init_net)) { 579 if (!nf_ct_expect_hsize) { 580 - nf_ct_expect_hsize = nf_conntrack_htable_size / 256; 581 if (!nf_ct_expect_hsize) 582 nf_ct_expect_hsize = 1; 583 }
··· 569 #endif /* CONFIG_PROC_FS */ 570 } 571 572 + module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); 573 574 int nf_conntrack_expect_init(struct net *net) 575 { ··· 577 578 if (net_eq(net, &init_net)) { 579 if (!nf_ct_expect_hsize) { 580 + nf_ct_expect_hsize = net->ct.htable_size / 256; 581 if (!nf_ct_expect_hsize) 582 nf_ct_expect_hsize = 1; 583 }
+1 -1
net/netfilter/nf_conntrack_helper.c
··· 192 /* Get rid of expecteds, set helpers to NULL. */ 193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 194 unhelp(h, me); 195 - for (i = 0; i < nf_conntrack_htable_size; i++) { 196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 197 unhelp(h, me); 198 }
··· 192 /* Get rid of expecteds, set helpers to NULL. */ 193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 194 unhelp(h, me); 195 + for (i = 0; i < net->ct.htable_size; i++) { 196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 197 unhelp(h, me); 198 }
+1 -1
net/netfilter/nf_conntrack_netlink.c
··· 594 595 rcu_read_lock(); 596 last = (struct nf_conn *)cb->args[1]; 597 - for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 598 restart: 599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 600 hnnode) {
··· 594 595 rcu_read_lock(); 596 last = (struct nf_conn *)cb->args[1]; 597 + for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) { 598 restart: 599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 600 hnnode) {
+4 -3
net/netfilter/nf_conntrack_standalone.c
··· 51 struct hlist_nulls_node *n; 52 53 for (st->bucket = 0; 54 - st->bucket < nf_conntrack_htable_size; 55 st->bucket++) { 56 n = rcu_dereference(net->ct.hash[st->bucket].first); 57 if (!is_a_nulls(n)) ··· 69 head = rcu_dereference(head->next); 70 while (is_a_nulls(head)) { 71 if (likely(get_nulls_value(head) == st->bucket)) { 72 - if (++st->bucket >= nf_conntrack_htable_size) 73 return NULL; 74 } 75 head = rcu_dereference(net->ct.hash[st->bucket].first); ··· 355 }, 356 { 357 .procname = "nf_conntrack_buckets", 358 - .data = &nf_conntrack_htable_size, 359 .maxlen = sizeof(unsigned int), 360 .mode = 0444, 361 .proc_handler = proc_dointvec, ··· 421 goto out_kmemdup; 422 423 table[1].data = &net->ct.count; 424 table[3].data = &net->ct.sysctl_checksum; 425 table[4].data = &net->ct.sysctl_log_invalid; 426
··· 51 struct hlist_nulls_node *n; 52 53 for (st->bucket = 0; 54 + st->bucket < net->ct.htable_size; 55 st->bucket++) { 56 n = rcu_dereference(net->ct.hash[st->bucket].first); 57 if (!is_a_nulls(n)) ··· 69 head = rcu_dereference(head->next); 70 while (is_a_nulls(head)) { 71 if (likely(get_nulls_value(head) == st->bucket)) { 72 + if (++st->bucket >= net->ct.htable_size) 73 return NULL; 74 } 75 head = rcu_dereference(net->ct.hash[st->bucket].first); ··· 355 }, 356 { 357 .procname = "nf_conntrack_buckets", 358 + .data = &init_net.ct.htable_size, 359 .maxlen = sizeof(unsigned int), 360 .mode = 0444, 361 .proc_handler = proc_dointvec, ··· 421 goto out_kmemdup; 422 423 table[1].data = &net->ct.count; 424 + table[2].data = &net->ct.htable_size; 425 table[3].data = &net->ct.sysctl_checksum; 426 table[4].data = &net->ct.sysctl_log_invalid; 427
+5
net/netlink/af_netlink.c
··· 455 if (nl_table[protocol].registered && 456 try_module_get(nl_table[protocol].module)) 457 module = nl_table[protocol].module; 458 cb_mutex = nl_table[protocol].cb_mutex; 459 netlink_unlock_table(); 460 461 err = __netlink_create(net, sock, cb_mutex, protocol); 462 if (err < 0)
··· 455 if (nl_table[protocol].registered && 456 try_module_get(nl_table[protocol].module)) 457 module = nl_table[protocol].module; 458 + else 459 + err = -EPROTONOSUPPORT; 460 cb_mutex = nl_table[protocol].cb_mutex; 461 netlink_unlock_table(); 462 + 463 + if (err < 0) 464 + goto out; 465 466 err = __netlink_create(net, sock, cb_mutex, protocol); 467 if (err < 0)
+8 -8
net/sched/Kconfig
··· 433 module. 434 435 To compile this code as a module, choose M here: the 436 - module will be called police. 437 438 config NET_ACT_GACT 439 tristate "Generic actions" ··· 443 accepting packets. 444 445 To compile this code as a module, choose M here: the 446 - module will be called gact. 447 448 config GACT_PROB 449 bool "Probability support" ··· 459 other devices. 460 461 To compile this code as a module, choose M here: the 462 - module will be called mirred. 463 464 config NET_ACT_IPT 465 tristate "IPtables targets" ··· 469 classification. 470 471 To compile this code as a module, choose M here: the 472 - module will be called ipt. 473 474 config NET_ACT_NAT 475 tristate "Stateless NAT" ··· 479 netfilter for NAT unless you know what you are doing. 480 481 To compile this code as a module, choose M here: the 482 - module will be called nat. 483 484 config NET_ACT_PEDIT 485 tristate "Packet Editing" ··· 488 Say Y here if you want to mangle the content of packets. 489 490 To compile this code as a module, choose M here: the 491 - module will be called pedit. 492 493 config NET_ACT_SIMP 494 tristate "Simple Example (Debug)" ··· 502 If unsure, say N. 503 504 To compile this code as a module, choose M here: the 505 - module will be called simple. 506 507 config NET_ACT_SKBEDIT 508 tristate "SKB Editing" ··· 513 If unsure, say N. 514 515 To compile this code as a module, choose M here: the 516 - module will be called skbedit. 517 518 config NET_CLS_IND 519 bool "Incoming device classification"
··· 433 module. 434 435 To compile this code as a module, choose M here: the 436 + module will be called act_police. 437 438 config NET_ACT_GACT 439 tristate "Generic actions" ··· 443 accepting packets. 444 445 To compile this code as a module, choose M here: the 446 + module will be called act_gact. 447 448 config GACT_PROB 449 bool "Probability support" ··· 459 other devices. 460 461 To compile this code as a module, choose M here: the 462 + module will be called act_mirred. 463 464 config NET_ACT_IPT 465 tristate "IPtables targets" ··· 469 classification. 470 471 To compile this code as a module, choose M here: the 472 + module will be called act_ipt. 473 474 config NET_ACT_NAT 475 tristate "Stateless NAT" ··· 479 netfilter for NAT unless you know what you are doing. 480 481 To compile this code as a module, choose M here: the 482 + module will be called act_nat. 483 484 config NET_ACT_PEDIT 485 tristate "Packet Editing" ··· 488 Say Y here if you want to mangle the content of packets. 489 490 To compile this code as a module, choose M here: the 491 + module will be called act_pedit. 492 493 config NET_ACT_SIMP 494 tristate "Simple Example (Debug)" ··· 502 If unsure, say N. 503 504 To compile this code as a module, choose M here: the 505 + module will be called act_simple. 506 507 config NET_ACT_SKBEDIT 508 tristate "SKB Editing" ··· 513 If unsure, say N. 514 515 To compile this code as a module, choose M here: the 516 + module will be called act_skbedit. 517 518 config NET_CLS_IND 519 bool "Incoming device classification"