Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[VLAN]: Avoid a 4-order allocation.
[HDLC] Fix dev->header_cache_update having a random value.
[NetLabel]: Verify sensitivity level has a valid CIPSO mapping
[PPPOE]: Key connections properly on local device.
[AF_UNIX]: Test against sk_max_ack_backlog properly.
[NET]: Fix bugs in "Whether sock accept queue is full" checking

+165 -130
+1 -2
drivers/net/8139cp.c
··· 448 spin_lock_irqsave(&cp->lock, flags); 449 cp->cpcmd &= ~RxVlanOn; 450 cpw16(CpCmd, cp->cpcmd); 451 - if (cp->vlgrp) 452 - cp->vlgrp->vlan_devices[vid] = NULL; 453 spin_unlock_irqrestore(&cp->lock, flags); 454 } 455 #endif /* CP_VLAN_TAG_USED */
··· 448 spin_lock_irqsave(&cp->lock, flags); 449 cp->cpcmd &= ~RxVlanOn; 450 cpw16(CpCmd, cp->cpcmd); 451 + vlan_group_set_device(cp->vlgrp, vid, NULL); 452 spin_unlock_irqrestore(&cp->lock, flags); 453 } 454 #endif /* CP_VLAN_TAG_USED */
+1 -4
drivers/net/acenic.c
··· 2293 2294 local_irq_save(flags); 2295 ace_mask_irq(dev); 2296 - 2297 - if (ap->vlgrp) 2298 - ap->vlgrp->vlan_devices[vid] = NULL; 2299 - 2300 ace_unmask_irq(dev); 2301 local_irq_restore(flags); 2302 }
··· 2293 2294 local_irq_save(flags); 2295 ace_mask_irq(dev); 2296 + vlan_group_set_device(ap->vlgrp, vid, NULL); 2297 ace_unmask_irq(dev); 2298 local_irq_restore(flags); 2299 }
+1 -2
drivers/net/amd8111e.c
··· 1737 { 1738 struct amd8111e_priv *lp = netdev_priv(dev); 1739 spin_lock_irq(&lp->lock); 1740 - if (lp->vlgrp) 1741 - lp->vlgrp->vlan_devices[vid] = NULL; 1742 spin_unlock_irq(&lp->lock); 1743 } 1744 #endif
··· 1737 { 1738 struct amd8111e_priv *lp = netdev_priv(dev); 1739 spin_lock_irq(&lp->lock); 1740 + vlan_group_set_device(lp->vlgrp, vid, NULL); 1741 spin_unlock_irq(&lp->lock); 1742 } 1743 #endif
+2 -3
drivers/net/atl1/atl1_main.c
··· 1252 1253 spin_lock_irqsave(&adapter->lock, flags); 1254 /* atl1_irq_disable(adapter); */ 1255 - if (adapter->vlgrp) 1256 - adapter->vlgrp->vlan_devices[vid] = NULL; 1257 /* atl1_irq_enable(adapter); */ 1258 spin_unlock_irqrestore(&adapter->lock, flags); 1259 /* We don't do Vlan filtering */ ··· 1265 if (adapter->vlgrp) { 1266 u16 vid; 1267 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1268 - if (!adapter->vlgrp->vlan_devices[vid]) 1269 continue; 1270 atl1_vlan_rx_add_vid(adapter->netdev, vid); 1271 }
··· 1252 1253 spin_lock_irqsave(&adapter->lock, flags); 1254 /* atl1_irq_disable(adapter); */ 1255 + vlan_group_set_device(adapter->vlgrp, vid, NULL); 1256 /* atl1_irq_enable(adapter); */ 1257 spin_unlock_irqrestore(&adapter->lock, flags); 1258 /* We don't do Vlan filtering */ ··· 1266 if (adapter->vlgrp) { 1267 u16 vid; 1268 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1269 + if (!vlan_group_get_device(adapter->vlgrp, vid)) 1270 continue; 1271 atl1_vlan_rx_add_vid(adapter->netdev, vid); 1272 }
+1 -3
drivers/net/bnx2.c
··· 4467 struct bnx2 *bp = netdev_priv(dev); 4468 4469 bnx2_netif_stop(bp); 4470 - 4471 - if (bp->vlgrp) 4472 - bp->vlgrp->vlan_devices[vid] = NULL; 4473 bnx2_set_rx_mode(dev); 4474 4475 bnx2_netif_start(bp);
··· 4467 struct bnx2 *bp = netdev_priv(dev); 4468 4469 bnx2_netif_stop(bp); 4470 + vlan_group_set_device(bp->vlgrp, vid, NULL); 4471 bnx2_set_rx_mode(dev); 4472 4473 bnx2_netif_start(bp);
+7 -7
drivers/net/bonding/bond_main.c
··· 488 /* Save and then restore vlan_dev in the grp array, 489 * since the slave's driver might clear it. 490 */ 491 - vlan_dev = bond->vlgrp->vlan_devices[vid]; 492 slave_dev->vlan_rx_kill_vid(slave_dev, vid); 493 - bond->vlgrp->vlan_devices[vid] = vlan_dev; 494 } 495 } 496 ··· 550 /* Save and then restore vlan_dev in the grp array, 551 * since the slave's driver might clear it. 552 */ 553 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 554 slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); 555 - bond->vlgrp->vlan_devices[vlan->vlan_id] = vlan_dev; 556 } 557 558 unreg: ··· 2397 vlan_id = 0; 2398 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 2399 vlan_list) { 2400 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 2401 if (vlan_dev == rt->u.dst.dev) { 2402 vlan_id = vlan->vlan_id; 2403 dprintk("basa: vlan match on %s %d\n", ··· 2444 } 2445 2446 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2447 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 2448 if (vlan->vlan_ip) { 2449 bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, 2450 vlan->vlan_ip, vlan->vlan_id); ··· 3371 3372 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 3373 vlan_list) { 3374 - vlan_dev = bond->vlgrp->vlan_devices[vlan->vlan_id]; 3375 if (vlan_dev == event_dev) { 3376 switch (event) { 3377 case NETDEV_UP:
··· 488 /* Save and then restore vlan_dev in the grp array, 489 * since the slave's driver might clear it. 490 */ 491 + vlan_dev = vlan_group_get_device(bond->vlgrp, vid); 492 slave_dev->vlan_rx_kill_vid(slave_dev, vid); 493 + vlan_group_set_device(bond->vlgrp, vid, vlan_dev); 494 } 495 } 496 ··· 550 /* Save and then restore vlan_dev in the grp array, 551 * since the slave's driver might clear it. 552 */ 553 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 554 slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); 555 + vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev); 556 } 557 558 unreg: ··· 2397 vlan_id = 0; 2398 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 2399 vlan_list) { 2400 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2401 if (vlan_dev == rt->u.dst.dev) { 2402 vlan_id = vlan->vlan_id; 2403 dprintk("basa: vlan match on %s %d\n", ··· 2444 } 2445 2446 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2447 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2448 if (vlan->vlan_ip) { 2449 bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip, 2450 vlan->vlan_ip, vlan->vlan_id); ··· 3371 3372 list_for_each_entry_safe(vlan, vlan_next, &bond->vlan_list, 3373 vlan_list) { 3374 + vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 3375 if (vlan_dev == event_dev) { 3376 switch (event) { 3377 case NETDEV_UP:
+1 -2
drivers/net/chelsio/cxgb2.c
··· 889 struct adapter *adapter = dev->priv; 890 891 spin_lock_irq(&adapter->async_lock); 892 - if (adapter->vlan_grp) 893 - adapter->vlan_grp->vlan_devices[vid] = NULL; 894 spin_unlock_irq(&adapter->async_lock); 895 } 896 #endif
··· 889 struct adapter *adapter = dev->priv; 890 891 spin_lock_irq(&adapter->async_lock); 892 + vlan_group_set_device(adapter->vlan_grp, vid, NULL); 893 spin_unlock_irq(&adapter->async_lock); 894 } 895 #endif
+4 -2
drivers/net/cxgb3/cxgb3_offload.c
··· 160 int i; 161 162 for_each_port(adapter, i) { 163 - const struct vlan_group *grp; 164 struct net_device *dev = adapter->port[i]; 165 const struct port_info *p = netdev_priv(dev); 166 167 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 168 if (vlan && vlan != VLAN_VID_MASK) { 169 grp = p->vlan_grp; 170 - dev = grp ? grp->vlan_devices[vlan] : NULL; 171 } else 172 while (dev->master) 173 dev = dev->master;
··· 160 int i; 161 162 for_each_port(adapter, i) { 163 + struct vlan_group *grp; 164 struct net_device *dev = adapter->port[i]; 165 const struct port_info *p = netdev_priv(dev); 166 167 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 168 if (vlan && vlan != VLAN_VID_MASK) { 169 grp = p->vlan_grp; 170 + dev = NULL; 171 + if (grp) 172 + dev = vlan_group_get_device(grp, vlan); 173 } else 174 while (dev->master) 175 dev = dev->master;
+5 -8
drivers/net/e1000/e1000_main.c
··· 376 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 377 uint16_t old_vid = adapter->mng_vlan_id; 378 if (adapter->vlgrp) { 379 - if (!adapter->vlgrp->vlan_devices[vid]) { 380 if (adapter->hw.mng_cookie.status & 381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 382 e1000_vlan_rx_add_vid(netdev, vid); ··· 386 387 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 388 (vid != old_vid) && 389 - !adapter->vlgrp->vlan_devices[old_vid]) 390 e1000_vlan_rx_kill_vid(netdev, old_vid); 391 } else 392 adapter->mng_vlan_id = vid; ··· 1482 if ((adapter->hw.mng_cookie.status & 1483 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1484 !(adapter->vlgrp && 1485 - adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) { 1486 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1487 } 1488 ··· 4998 uint32_t vfta, index; 4999 5000 e1000_irq_disable(adapter); 5001 - 5002 - if (adapter->vlgrp) 5003 - adapter->vlgrp->vlan_devices[vid] = NULL; 5004 - 5005 e1000_irq_enable(adapter); 5006 5007 if ((adapter->hw.mng_cookie.status & ··· 5024 if (adapter->vlgrp) { 5025 uint16_t vid; 5026 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 5027 - if (!adapter->vlgrp->vlan_devices[vid]) 5028 continue; 5029 e1000_vlan_rx_add_vid(adapter->netdev, vid); 5030 }
··· 376 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 377 uint16_t old_vid = adapter->mng_vlan_id; 378 if (adapter->vlgrp) { 379 + if (!vlan_group_get_device(adapter->vlgrp, vid)) { 380 if (adapter->hw.mng_cookie.status & 381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 382 e1000_vlan_rx_add_vid(netdev, vid); ··· 386 387 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 388 (vid != old_vid) && 389 + !vlan_group_get_device(adapter->vlgrp, old_vid)) 390 e1000_vlan_rx_kill_vid(netdev, old_vid); 391 } else 392 adapter->mng_vlan_id = vid; ··· 1482 if ((adapter->hw.mng_cookie.status & 1483 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1484 !(adapter->vlgrp && 1485 + vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { 1486 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1487 } 1488 ··· 4998 uint32_t vfta, index; 4999 5000 e1000_irq_disable(adapter); 5001 + vlan_group_set_device(adapter->vlgrp, vid, NULL); 5002 e1000_irq_enable(adapter); 5003 5004 if ((adapter->hw.mng_cookie.status & ··· 5027 if (adapter->vlgrp) { 5028 uint16_t vid; 5029 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 5030 + if (!vlan_group_get_device(adapter->vlgrp, vid)) 5031 continue; 5032 e1000_vlan_rx_add_vid(adapter->netdev, vid); 5033 }
+1 -2
drivers/net/ehea/ehea_main.c
··· 1939 int index; 1940 u64 hret; 1941 1942 - if (port->vgrp) 1943 - port->vgrp->vlan_devices[vid] = NULL; 1944 1945 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1946 if (!cb1) {
··· 1939 int index; 1940 u64 hret; 1941 1942 + vlan_group_set_device(port->vgrp, vid, NULL); 1943 1944 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); 1945 if (!cb1) {
+1 -2
drivers/net/gianfar.c
··· 1132 1133 spin_lock_irqsave(&priv->rxlock, flags); 1134 1135 - if (priv->vlgrp) 1136 - priv->vlgrp->vlan_devices[vid] = NULL; 1137 1138 spin_unlock_irqrestore(&priv->rxlock, flags); 1139 }
··· 1132 1133 spin_lock_irqsave(&priv->rxlock, flags); 1134 1135 + vlan_group_set_device(priv->vgrp, vid, NULL); 1136 1137 spin_unlock_irqrestore(&priv->rxlock, flags); 1138 }
+2 -3
drivers/net/ixgb/ixgb_main.c
··· 2213 2214 ixgb_irq_disable(adapter); 2215 2216 - if(adapter->vlgrp) 2217 - adapter->vlgrp->vlan_devices[vid] = NULL; 2218 2219 ixgb_irq_enable(adapter); 2220 ··· 2233 if(adapter->vlgrp) { 2234 uint16_t vid; 2235 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2236 - if(!adapter->vlgrp->vlan_devices[vid]) 2237 continue; 2238 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2239 }
··· 2213 2214 ixgb_irq_disable(adapter); 2215 2216 + vlan_group_set_device(adapter->vlgrp, vid, NULL); 2217 2218 ixgb_irq_enable(adapter); 2219 ··· 2234 if(adapter->vlgrp) { 2235 uint16_t vid; 2236 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2237 + if(!vlan_group_get_device(adapter->vlgrp, vid)) 2238 continue; 2239 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2240 }
+1 -2
drivers/net/ns83820.c
··· 514 515 spin_lock_irq(&dev->misc_lock); 516 spin_lock(&dev->tx_lock); 517 - if (dev->vlgrp) 518 - dev->vlgrp->vlan_devices[vid] = NULL; 519 spin_unlock(&dev->tx_lock); 520 spin_unlock_irq(&dev->misc_lock); 521 }
··· 514 515 spin_lock_irq(&dev->misc_lock); 516 spin_lock(&dev->tx_lock); 517 + vlan_group_set_device(dev->vlgrp, vid, NULL); 518 spin_unlock(&dev->tx_lock); 519 spin_unlock_irq(&dev->misc_lock); 520 }
+32 -20
drivers/net/pppoe.c
··· 7 * 8 * Version: 0.7.0 9 * 10 * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme 11 * 030700 : Fixed connect logic to allow for disconnect. 12 * 270700 : Fixed potential SMP problems; we must protect against ··· 133 * Set/get/delete/rehash items (internal versions) 134 * 135 **********************************************************************/ 136 - static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr) 137 { 138 int hash = hash_item(sid, addr); 139 struct pppox_sock *ret; 140 141 ret = item_hash_table[hash]; 142 143 - while (ret && !cmp_addr(&ret->pppoe_pa, sid, addr)) 144 ret = ret->next; 145 146 return ret; ··· 153 154 ret = item_hash_table[hash]; 155 while (ret) { 156 - if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa)) 157 return -EALREADY; 158 159 ret = ret->next; 160 } 161 162 - if (!ret) { 163 - po->next = item_hash_table[hash]; 164 - item_hash_table[hash] = po; 165 - } 166 167 return 0; 168 } 169 170 - static struct pppox_sock *__delete_item(unsigned long sid, char *addr) 171 { 172 int hash = hash_item(sid, addr); 173 struct pppox_sock *ret, **src; ··· 174 src = &item_hash_table[hash]; 175 176 while (ret) { 177 - if (cmp_addr(&ret->pppoe_pa, sid, addr)) { 178 *src = ret->next; 179 break; 180 } ··· 192 * 193 **********************************************************************/ 194 static inline struct pppox_sock *get_item(unsigned long sid, 195 - unsigned char *addr) 196 { 197 struct pppox_sock *po; 198 199 read_lock_bh(&pppoe_hash_lock); 200 - po = __get_item(sid, addr); 201 if (po) 202 sock_hold(sk_pppox(po)); 203 read_unlock_bh(&pppoe_hash_lock); ··· 207 208 static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) 209 { 210 - return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote); 211 } 212 213 static inline int set_item(struct pppox_sock *po) ··· 232 return i; 233 } 234 235 - static inline struct pppox_sock *delete_item(unsigned long sid, char *addr) 236 { 237 struct pppox_sock *ret; 238 239 write_lock_bh(&pppoe_hash_lock); 240 - ret = __delete_item(sid, addr); 241 write_unlock_bh(&pppoe_hash_lock); 242 243 return ret; ··· 403 404 ph = (struct pppoe_hdr *) skb->nh.raw; 405 406 - po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); 407 if (po != NULL) 408 return sk_receive_skb(sk_pppox(po), skb, 0); 409 drop: ··· 437 if (ph->code != PADT_CODE) 438 goto abort; 439 440 - po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source); 441 if (po) { 442 struct sock *sk = sk_pppox(po); 443 ··· 529 530 po = pppox_sk(sk); 531 if (po->pppoe_pa.sid) { 532 - delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 533 } 534 535 if (po->pppoe_dev) ··· 551 int sockaddr_len, int flags) 552 { 553 struct sock *sk = sock->sk; 554 - struct net_device *dev = NULL; 555 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; 556 struct pppox_sock *po = pppox_sk(sk); 557 int error; ··· 577 pppox_unbind_sock(sk); 578 579 /* Delete the old binding */ 580 - delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote); 581 582 if(po->pppoe_dev) 583 dev_put(po->pppoe_dev); ··· 717 break; 718 719 /* PPPoE address from the user specifies an outbound 720 - PPPoE address to which frames are forwarded to */ 721 err = -EFAULT; 722 if (copy_from_user(&po->pppoe_relay, 723 (void __user *)arg,
··· 7 * 8 * Version: 0.7.0 9 * 10 + * 070228 : Fix to allow multiple sessions with same remote MAC and same 11 + * session id by including the local device ifindex in the 12 + * tuple identifying a session. This also ensures packets can't 13 + * be injected into a session from interfaces other than the one 14 + * specified by userspace. Florian Zumbiehl <florz@florz.de> 15 + * (Oh, BTW, this one is YYMMDD, in case you were wondering ...) 16 * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme 17 * 030700 : Fixed connect logic to allow for disconnect. 18 * 270700 : Fixed potential SMP problems; we must protect against ··· 127 * Set/get/delete/rehash items (internal versions) 128 * 129 **********************************************************************/ 130 + static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int ifindex) 131 { 132 int hash = hash_item(sid, addr); 133 struct pppox_sock *ret; 134 135 ret = item_hash_table[hash]; 136 137 + while (ret && !(cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex)) 138 ret = ret->next; 139 140 return ret; ··· 147 148 ret = item_hash_table[hash]; 149 while (ret) { 150 + if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_dev->ifindex == po->pppoe_dev->ifindex) 151 return -EALREADY; 152 153 ret = ret->next; 154 } 155 156 + po->next = item_hash_table[hash]; 157 + item_hash_table[hash] = po; 158 159 return 0; 160 } 161 162 + static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifindex) 163 { 164 int hash = hash_item(sid, addr); 165 struct pppox_sock *ret, **src; ··· 170 src = &item_hash_table[hash]; 171 172 while (ret) { 173 + if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_dev->ifindex == ifindex) { 174 *src = ret->next; 175 break; 176 } ··· 188 * 189 **********************************************************************/ 190 static inline struct pppox_sock *get_item(unsigned long sid, 191 + unsigned char *addr, int ifindex) 192 { 193 struct pppox_sock *po; 194 195 read_lock_bh(&pppoe_hash_lock); 196 + po = __get_item(sid, addr, ifindex); 197 if (po) 198 sock_hold(sk_pppox(po)); 199 read_unlock_bh(&pppoe_hash_lock); ··· 203 204 static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) 205 { 206 + struct net_device *dev = NULL; 207 + int ifindex; 208 + 209 + dev = dev_get_by_name(sp->sa_addr.pppoe.dev); 210 + if(!dev) 211 + return NULL; 212 + ifindex = dev->ifindex; 213 + dev_put(dev); 214 + return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); 215 } 216 217 static inline int set_item(struct pppox_sock *po) ··· 220 return i; 221 } 222 223 + static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) 224 { 225 struct pppox_sock *ret; 226 227 write_lock_bh(&pppoe_hash_lock); 228 + ret = __delete_item(sid, addr, ifindex); 229 write_unlock_bh(&pppoe_hash_lock); 230 231 return ret; ··· 391 392 ph = (struct pppoe_hdr *) skb->nh.raw; 393 394 + po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 395 if (po != NULL) 396 return sk_receive_skb(sk_pppox(po), skb, 0); 397 drop: ··· 425 if (ph->code != PADT_CODE) 426 goto abort; 427 428 + po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 429 if (po) { 430 struct sock *sk = sk_pppox(po); 431 ··· 517 518 po = pppox_sk(sk); 519 if (po->pppoe_pa.sid) { 520 + delete_item(po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_dev->ifindex); 521 } 522 523 if (po->pppoe_dev) ··· 539 int sockaddr_len, int flags) 540 { 541 struct sock *sk = sock->sk; 542 + struct net_device *dev; 543 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; 544 struct pppox_sock *po = pppox_sk(sk); 545 int error; ··· 565 pppox_unbind_sock(sk); 566 567 /* Delete the old binding */ 568 + delete_item(po->pppoe_pa.sid,po->pppoe_pa.remote,po->pppoe_dev->ifindex); 569 570 if(po->pppoe_dev) 571 dev_put(po->pppoe_dev); ··· 705 break; 706 707 /* PPPoE address from the user specifies an outbound 708 + PPPoE address which frames are forwarded to */ 709 err = -EFAULT; 710 if (copy_from_user(&po->pppoe_relay, 711 (void __user *)arg,
+1 -2
drivers/net/r8169.c
··· 890 unsigned long flags; 891 892 spin_lock_irqsave(&tp->lock, flags); 893 - if (tp->vlgrp) 894 - tp->vlgrp->vlan_devices[vid] = NULL; 895 spin_unlock_irqrestore(&tp->lock, flags); 896 } 897
··· 890 unsigned long flags; 891 892 spin_lock_irqsave(&tp->lock, flags); 893 + vlan_group_set_device(tp->vlgrp, vid, NULL); 894 spin_unlock_irqrestore(&tp->lock, flags); 895 } 896
+1 -2
drivers/net/s2io.c
··· 325 unsigned long flags; 326 327 spin_lock_irqsave(&nic->tx_lock, flags); 328 - if (nic->vlgrp) 329 - nic->vlgrp->vlan_devices[vid] = NULL; 330 spin_unlock_irqrestore(&nic->tx_lock, flags); 331 } 332
··· 325 unsigned long flags; 326 327 spin_lock_irqsave(&nic->tx_lock, flags); 328 + vlan_group_set_device(nic->vlgrp, vid, NULL); 329 spin_unlock_irqrestore(&nic->tx_lock, flags); 330 } 331
+1 -2
drivers/net/sky2.c
··· 1053 1054 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 1055 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 1056 - if (sky2->vlgrp) 1057 - sky2->vlgrp->vlan_devices[vid] = NULL; 1058 1059 netif_tx_unlock_bh(dev); 1060 }
··· 1053 1054 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 1055 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 1056 + vlan_group_set_device(sky2->vlgrp, vid, NULL); 1057 1058 netif_tx_unlock_bh(dev); 1059 }
+2 -3
drivers/net/starfire.c
··· 677 spin_lock(&np->lock); 678 if (debug > 1) 679 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); 680 - if (np->vlgrp) 681 - np->vlgrp->vlan_devices[vid] = NULL; 682 set_rx_mode(dev); 683 spin_unlock(&np->lock); 684 } ··· 1737 int vlan_count = 0; 1738 void __iomem *filter_addr = ioaddr + HashTable + 8; 1739 for (i = 0; i < VLAN_VID_MASK; i++) { 1740 - if (np->vlgrp->vlan_devices[i]) { 1741 if (vlan_count >= 32) 1742 break; 1743 writew(cpu_to_be16(i), filter_addr);
··· 677 spin_lock(&np->lock); 678 if (debug > 1) 679 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); 680 + vlan_group_set_device(np->vlgrp, vid, NULL); 681 set_rx_mode(dev); 682 spin_unlock(&np->lock); 683 } ··· 1738 int vlan_count = 0; 1739 void __iomem *filter_addr = ioaddr + HashTable + 8; 1740 for (i = 0; i < VLAN_VID_MASK; i++) { 1741 + if (vlan_group_get_device(np->vlgrp, i)) { 1742 if (vlan_count >= 32) 1743 break; 1744 writew(cpu_to_be16(i), filter_addr);
+1 -2
drivers/net/tg3.c
··· 9114 tg3_netif_stop(tp); 9115 9116 tg3_full_lock(tp, 0); 9117 - if (tp->vlgrp) 9118 - tp->vlgrp->vlan_devices[vid] = NULL; 9119 tg3_full_unlock(tp); 9120 9121 if (netif_running(dev))
··· 9114 tg3_netif_stop(tp); 9115 9116 tg3_full_lock(tp, 0); 9117 + vlan_group_set_device(tp->vlgrp, vid, NULL); 9118 tg3_full_unlock(tp); 9119 9120 if (netif_running(dev))
+1 -2
drivers/net/typhoon.c
··· 746 { 747 struct typhoon *tp = netdev_priv(dev); 748 spin_lock_bh(&tp->state_lock); 749 - if(tp->vlgrp) 750 - tp->vlgrp->vlan_devices[vid] = NULL; 751 spin_unlock_bh(&tp->state_lock); 752 } 753
··· 746 { 747 struct typhoon *tp = netdev_priv(dev); 748 spin_lock_bh(&tp->state_lock); 749 + vlan_group_set_device(tp->vlgrp, vid, NULL); 750 spin_unlock_bh(&tp->state_lock); 751 } 752
+23 -10
drivers/net/wan/hdlc.c
··· 38 #include <linux/hdlc.h> 39 40 41 - static const char* version = "HDLC support module revision 1.20"; 42 43 #undef DEBUG_LINK 44 ··· 222 return -EINVAL; 223 } 224 225 static void hdlc_setup(struct net_device *dev) 226 { 227 hdlc_device *hdlc = dev_to_hdlc(dev); 228 229 - dev->get_stats = hdlc_get_stats; 230 - dev->change_mtu = hdlc_change_mtu; 231 - dev->mtu = HDLC_MAX_MTU; 232 - 233 - dev->type = ARPHRD_RAWHDLC; 234 - dev->hard_header_len = 16; 235 - 236 - dev->flags = IFF_POINTOPOINT | IFF_NOARP; 237 - 238 hdlc->carrier = 1; 239 hdlc->open = 0; 240 spin_lock_init(&hdlc->state_lock); ··· 306 } 307 kfree(hdlc->state); 308 hdlc->state = NULL; 309 } 310 311
··· 38 #include <linux/hdlc.h> 39 40 41 + static const char* version = "HDLC support module revision 1.21"; 42 43 #undef DEBUG_LINK 44 ··· 222 return -EINVAL; 223 } 224 225 + static void hdlc_setup_dev(struct net_device *dev) 226 + { 227 + /* Re-init all variables changed by HDLC protocol drivers, 228 + * including ether_setup() called from hdlc_raw_eth.c. 229 + */ 230 + dev->get_stats = hdlc_get_stats; 231 + dev->flags = IFF_POINTOPOINT | IFF_NOARP; 232 + dev->mtu = HDLC_MAX_MTU; 233 + dev->type = ARPHRD_RAWHDLC; 234 + dev->hard_header_len = 16; 235 + dev->addr_len = 0; 236 + dev->hard_header = NULL; 237 + dev->rebuild_header = NULL; 238 + dev->set_mac_address = NULL; 239 + dev->hard_header_cache = NULL; 240 + dev->header_cache_update = NULL; 241 + dev->change_mtu = hdlc_change_mtu; 242 + dev->hard_header_parse = NULL; 243 + } 244 + 245 static void hdlc_setup(struct net_device *dev) 246 { 247 hdlc_device *hdlc = dev_to_hdlc(dev); 248 249 + hdlc_setup_dev(dev); 250 hdlc->carrier = 1; 251 hdlc->open = 0; 252 spin_lock_init(&hdlc->state_lock); ··· 294 } 295 kfree(hdlc->state); 296 hdlc->state = NULL; 297 + hdlc_setup_dev(dev); 298 } 299 300
-3
drivers/net/wan/hdlc_cisco.c
··· 365 memcpy(&state(hdlc)->settings, &new_settings, size); 366 dev->hard_start_xmit = hdlc->xmit; 367 dev->hard_header = cisco_hard_header; 368 - dev->hard_header_cache = NULL; 369 dev->type = ARPHRD_CISCO; 370 - dev->flags = IFF_POINTOPOINT | IFF_NOARP; 371 - dev->addr_len = 0; 372 netif_dormant_on(dev); 373 return 0; 374 }
··· 365 memcpy(&state(hdlc)->settings, &new_settings, size); 366 dev->hard_start_xmit = hdlc->xmit; 367 dev->hard_header = cisco_hard_header; 368 dev->type = ARPHRD_CISCO; 369 netif_dormant_on(dev); 370 return 0; 371 }
-3
drivers/net/wan/hdlc_fr.c
··· 1289 memcpy(&state(hdlc)->settings, &new_settings, size); 1290 1291 dev->hard_start_xmit = hdlc->xmit; 1292 - dev->hard_header = NULL; 1293 dev->type = ARPHRD_FRAD; 1294 - dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1295 - dev->addr_len = 0; 1296 return 0; 1297 1298 case IF_PROTO_FR_ADD_PVC:
··· 1289 memcpy(&state(hdlc)->settings, &new_settings, size); 1290 1291 dev->hard_start_xmit = hdlc->xmit; 1292 dev->type = ARPHRD_FRAD; 1293 return 0; 1294 1295 case IF_PROTO_FR_ADD_PVC:
-2
drivers/net/wan/hdlc_ppp.c
··· 127 if (result) 128 return result; 129 dev->hard_start_xmit = hdlc->xmit; 130 - dev->hard_header = NULL; 131 dev->type = ARPHRD_PPP; 132 - dev->addr_len = 0; 133 netif_dormant_off(dev); 134 return 0; 135 }
··· 127 if (result) 128 return result; 129 dev->hard_start_xmit = hdlc->xmit; 130 dev->type = ARPHRD_PPP; 131 netif_dormant_off(dev); 132 return 0; 133 }
-3
drivers/net/wan/hdlc_raw.c
··· 88 return result; 89 memcpy(hdlc->state, &new_settings, size); 90 dev->hard_start_xmit = hdlc->xmit; 91 - dev->hard_header = NULL; 92 dev->type = ARPHRD_RAWHDLC; 93 - dev->flags = IFF_POINTOPOINT | IFF_NOARP; 94 - dev->addr_len = 0; 95 netif_dormant_off(dev); 96 return 0; 97 }
··· 88 return result; 89 memcpy(hdlc->state, &new_settings, size); 90 dev->hard_start_xmit = hdlc->xmit; 91 dev->type = ARPHRD_RAWHDLC; 92 netif_dormant_off(dev); 93 return 0; 94 }
-2
drivers/net/wan/hdlc_x25.c
··· 215 x25_rx, 0)) != 0) 216 return result; 217 dev->hard_start_xmit = x25_xmit; 218 - dev->hard_header = NULL; 219 dev->type = ARPHRD_X25; 220 - dev->addr_len = 0; 221 netif_dormant_off(dev); 222 return 0; 223 }
··· 215 x25_rx, 0)) != 0) 216 return result; 217 dev->hard_start_xmit = x25_xmit; 218 dev->type = ARPHRD_X25; 219 netif_dormant_off(dev); 220 return 0; 221 }
+13 -12
drivers/s390/net/qeth_main.c
··· 3654 return rc; 3655 3656 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ 3657 - if (vg->vlan_devices[i] == dev){ 3658 rc = QETH_VLAN_CARD; 3659 break; 3660 } ··· 5261 QETH_DBF_TEXT(trace, 4, "frvaddr4"); 5262 5263 rcu_read_lock(); 5264 - in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]); 5265 if (!in_dev) 5266 goto out; 5267 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { ··· 5288 5289 QETH_DBF_TEXT(trace, 4, "frvaddr6"); 5290 5291 - in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]); 5292 if (!in6_dev) 5293 return; 5294 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ ··· 5360 if (!card->vlangrp) 5361 return; 5362 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5363 - if (card->vlangrp->vlan_devices[i] == NULL) 5364 continue; 5365 if (clear) 5366 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); ··· 5398 spin_lock_irqsave(&card->vlanlock, flags); 5399 /* unregister IP addresses of vlan device */ 5400 qeth_free_vlan_addresses(card, vid); 5401 - if (card->vlangrp) 5402 - card->vlangrp->vlan_devices[vid] = NULL; 5403 spin_unlock_irqrestore(&card->vlanlock, flags); 5404 if (card->options.layer2) 5405 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); ··· 5661 5662 vg = card->vlangrp; 5663 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5664 - if (vg->vlan_devices[i] == NULL || 5665 - !(vg->vlan_devices[i]->flags & IFF_UP)) 5666 continue; 5667 - in_dev = in_dev_get(vg->vlan_devices[i]); 5668 if (!in_dev) 5669 continue; 5670 read_lock(&in_dev->mc_list_lock); ··· 5749 5750 vg = card->vlangrp; 5751 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5752 - if (vg->vlan_devices[i] == NULL || 5753 - !(vg->vlan_devices[i]->flags & IFF_UP)) 5754 continue; 5755 - in_dev = in6_dev_get(vg->vlan_devices[i]); 5756 if (!in_dev) 5757 continue; 5758 read_lock(&in_dev->lock);
··· 3654 return rc; 3655 3656 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ 3657 + if (vlan_group_get_device(vg, i) == dev){ 3658 rc = QETH_VLAN_CARD; 3659 break; 3660 } ··· 5261 QETH_DBF_TEXT(trace, 4, "frvaddr4"); 5262 5263 rcu_read_lock(); 5264 + in_dev = __in_dev_get_rcu(vlan_group_get_device(card->vlangrp, vid)); 5265 if (!in_dev) 5266 goto out; 5267 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { ··· 5288 5289 QETH_DBF_TEXT(trace, 4, "frvaddr6"); 5290 5291 + in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); 5292 if (!in6_dev) 5293 return; 5294 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ ··· 5360 if (!card->vlangrp) 5361 return; 5362 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5363 + if (vlan_group_get_device(card->vlangrp, i) == NULL) 5364 continue; 5365 if (clear) 5366 qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); ··· 5398 spin_lock_irqsave(&card->vlanlock, flags); 5399 /* unregister IP addresses of vlan device */ 5400 qeth_free_vlan_addresses(card, vid); 5401 + vlan_group_set_device(card->vlangrp, vid, NULL); 5402 spin_unlock_irqrestore(&card->vlanlock, flags); 5403 if (card->options.layer2) 5404 qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); ··· 5662 5663 vg = card->vlangrp; 5664 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5665 + struct net_device *netdev = vlan_group_get_device(vg, i); 5666 + if (netdev == NULL || 5667 + !(netdev->flags & IFF_UP)) 5668 continue; 5669 + in_dev = in_dev_get(netdev); 5670 if (!in_dev) 5671 continue; 5672 read_lock(&in_dev->mc_list_lock); ··· 5749 5750 vg = card->vlangrp; 5751 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 5752 + struct net_device *netdev = vlan_group_get_device(vg, i); 5753 + if (netdev == NULL || 5754 + !(netdev->flags & IFF_UP)) 5755 continue; 5756 + in_dev = in6_dev_get(netdev); 5757 if (!in_dev) 5758 continue; 5759 read_lock(&in_dev->lock);
+22 -3
include/linux/if_vlan.h
··· 70 * depends on completely exhausting the VLAN identifier space. Thus 71 * it gives constant time look-up, but in many cases it wastes memory. 72 */ 73 - #define VLAN_GROUP_ARRAY_LEN 4096 74 75 struct vlan_group { 76 int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */ 77 struct hlist_node hlist; /* linked list */ 78 - struct net_device *vlan_devices[VLAN_GROUP_ARRAY_LEN]; 79 struct rcu_head rcu; 80 }; 81 82 struct vlan_priority_tci_mapping { 83 unsigned long priority; ··· 179 return NET_RX_DROP; 180 } 181 182 - skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; 183 if (skb->dev == NULL) { 184 dev_kfree_skb_any(skb); 185
··· 70 * depends on completely exhausting the VLAN identifier space. Thus 71 * it gives constant time look-up, but in many cases it wastes memory. 72 */ 73 + #define VLAN_GROUP_ARRAY_LEN 4096 74 + #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 75 + #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS) 76 77 struct vlan_group { 78 int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */ 79 struct hlist_node hlist; /* linked list */ 80 + struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; 81 struct rcu_head rcu; 82 }; 83 + 84 + static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, int vlan_id) 85 + { 86 + struct net_device **array; 87 + array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; 88 + return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN]; 89 + } 90 + 91 + static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id, 92 + struct net_device *dev) 93 + { 94 + struct net_device **array; 95 + if (!vg) 96 + return; 97 + array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; 98 + array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; 99 + } 100 101 struct vlan_priority_tci_mapping { 102 unsigned long priority; ··· 160 return NET_RX_DROP; 161 } 162 163 + skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK); 164 if (skb->dev == NULL) { 165 dev_kfree_skb_any(skb); 166
+1 -1
include/net/sock.h
··· 426 427 static inline int sk_acceptq_is_full(struct sock *sk) 428 { 429 - return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 430 } 431 432 /*
··· 426 427 static inline int sk_acceptq_is_full(struct sock *sk) 428 { 429 + return sk->sk_ack_backlog >= sk->sk_max_ack_backlog; 430 } 431 432 /*
+32 -10
net/8021q/vlan.c
··· 184 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex); 185 186 if (grp) 187 - return grp->vlan_devices[VID]; 188 189 return NULL; 190 } 191 192 static void vlan_rcu_free(struct rcu_head *rcu) 193 { 194 - kfree(container_of(rcu, struct vlan_group, rcu)); 195 } 196 197 ··· 232 ret = 0; 233 234 if (grp) { 235 - dev = grp->vlan_devices[vlan_id]; 236 if (dev) { 237 /* Remove proc entry */ 238 vlan_proc_rem_dev(dev); ··· 246 real_dev->vlan_rx_kill_vid(real_dev, vlan_id); 247 } 248 249 - grp->vlan_devices[vlan_id] = NULL; 250 synchronize_net(); 251 252 ··· 260 * group. 261 */ 262 for (i = 0; i < VLAN_VID_MASK; i++) 263 - if (grp->vlan_devices[i]) 264 break; 265 266 if (i == VLAN_VID_MASK) { ··· 388 struct net_device *new_dev; 389 struct net_device *real_dev; /* the ethernet device */ 390 char name[IFNAMSIZ]; 391 392 #ifdef VLAN_DEBUG 393 printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n", ··· 554 if (!grp) 555 goto out_free_unregister; 556 557 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 558 grp->real_dev_ifindex = real_dev->ifindex; 559 ··· 573 real_dev->vlan_rx_register(real_dev, grp); 574 } 575 576 - grp->vlan_devices[VLAN_ID] = new_dev; 577 578 if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */ 579 printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n", ··· 589 printk(VLAN_DBG "Allocated new device successfully, returning.\n"); 590 #endif 591 return new_dev; 592 593 out_free_unregister: 594 unregister_netdev(new_dev); ··· 628 case NETDEV_CHANGE: 629 /* Propagate real device state to vlan devices */ 630 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 631 - vlandev = grp->vlan_devices[i]; 632 if (!vlandev) 633 continue; 634 ··· 639 case NETDEV_DOWN: 640 /* Put all VLANs for this dev in the down state too. */ 641 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 642 - vlandev = grp->vlan_devices[i]; 643 if (!vlandev) 644 continue; 645 ··· 654 case NETDEV_UP: 655 /* Put all VLANs for this dev in the up state too. */ 656 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 657 - vlandev = grp->vlan_devices[i]; 658 if (!vlandev) 659 continue; 660 ··· 671 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 672 int ret; 673 674 - vlandev = grp->vlan_devices[i]; 675 if (!vlandev) 676 continue; 677
··· 184 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex); 185 186 if (grp) 187 + return vlan_group_get_device(grp, VID); 188 189 return NULL; 190 } 191 192 + static void vlan_group_free(struct vlan_group *grp) 193 + { 194 + int i; 195 + 196 + for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) 197 + kfree(grp->vlan_devices_arrays[i]); 198 + kfree(grp); 199 + } 200 + 201 static void vlan_rcu_free(struct rcu_head *rcu) 202 { 203 + vlan_group_free(container_of(rcu, struct vlan_group, rcu)); 204 } 205 206 ··· 223 ret = 0; 224 225 if (grp) { 226 + dev = vlan_group_get_device(grp, vlan_id); 227 if (dev) { 228 /* Remove proc entry */ 229 vlan_proc_rem_dev(dev); ··· 237 real_dev->vlan_rx_kill_vid(real_dev, vlan_id); 238 } 239 240 + vlan_group_set_device(grp, vlan_id, NULL); 241 synchronize_net(); 242 243 ··· 251 * group. 252 */ 253 for (i = 0; i < VLAN_VID_MASK; i++) 254 + if (vlan_group_get_device(grp, i)) 255 break; 256 257 if (i == VLAN_VID_MASK) { ··· 379 struct net_device *new_dev; 380 struct net_device *real_dev; /* the ethernet device */ 381 char name[IFNAMSIZ]; 382 + int i; 383 384 #ifdef VLAN_DEBUG 385 printk(VLAN_DBG "%s: if_name -:%s:- vid: %i\n", ··· 544 if (!grp) 545 goto out_free_unregister; 546 547 + for (i=0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) { 548 + grp->vlan_devices_arrays[i] = kzalloc( 549 + sizeof(struct net_device *)*VLAN_GROUP_ARRAY_PART_LEN, 550 + GFP_KERNEL); 551 + 552 + if (!grp->vlan_devices_arrays[i]) 553 + goto out_free_arrays; 554 + } 555 + 556 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 557 grp->real_dev_ifindex = real_dev->ifindex; 558 ··· 554 real_dev->vlan_rx_register(real_dev, grp); 555 } 556 557 + vlan_group_set_device(grp, VLAN_ID, new_dev); 558 559 if (vlan_proc_add_dev(new_dev)<0)/* create it's proc entry */ 560 printk(KERN_WARNING "VLAN: failed to add proc entry for %s\n", ··· 570 printk(VLAN_DBG "Allocated new device successfully, returning.\n"); 571 #endif 572 return new_dev; 573 + 574 + out_free_arrays: 575 + vlan_group_free(grp); 576 577 out_free_unregister: 578 unregister_netdev(new_dev); ··· 606 case NETDEV_CHANGE: 607 /* Propagate real device state to vlan devices */ 608 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 609 + vlandev = vlan_group_get_device(grp, i); 610 if (!vlandev) 611 continue; 612 ··· 617 case NETDEV_DOWN: 618 /* Put all VLANs for this dev in the down state too. */ 619 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 620 + vlandev = vlan_group_get_device(grp, i); 621 if (!vlandev) 622 continue; 623 ··· 632 case NETDEV_UP: 633 /* Put all VLANs for this dev in the up state too. */ 634 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 635 + vlandev = vlan_group_get_device(grp, i); 636 if (!vlandev) 637 continue; 638 ··· 649 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 650 int ret; 651 652 + vlandev = vlan_group_get_device(grp, i); 653 if (!vlandev) 654 continue; 655
+4 -3
net/ipv4/cipso_ipv4.c
··· 732 *net_lvl = host_lvl; 733 return 0; 734 case CIPSO_V4_MAP_STD: 735 - if (host_lvl < doi_def->map.std->lvl.local_size) { 736 *net_lvl = doi_def->map.std->lvl.local[host_lvl]; 737 return 0; 738 } 739 - break; 740 } 741 742 return -EINVAL; ··· 772 *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; 773 return 0; 774 } 775 - break; 776 } 777 778 return -EINVAL;
··· 732 *net_lvl = host_lvl; 733 return 0; 734 case CIPSO_V4_MAP_STD: 735 + if (host_lvl < doi_def->map.std->lvl.local_size && 736 + doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { 737 *net_lvl = doi_def->map.std->lvl.local[host_lvl]; 738 return 0; 739 } 740 + return -EPERM; 741 } 742 743 return -EINVAL; ··· 771 *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; 772 return 0; 773 } 774 + return -EPERM; 775 } 776 777 return -EINVAL;
+3 -3
net/unix/af_unix.c
··· 934 935 sched = !sock_flag(other, SOCK_DEAD) && 936 !(other->sk_shutdown & RCV_SHUTDOWN) && 937 - (skb_queue_len(&other->sk_receive_queue) > 938 other->sk_max_ack_backlog); 939 940 unix_state_runlock(other); ··· 1008 if (other->sk_state != TCP_LISTEN) 1009 goto out_unlock; 1010 1011 - if (skb_queue_len(&other->sk_receive_queue) > 1012 other->sk_max_ack_backlog) { 1013 err = -EAGAIN; 1014 if (!timeo) ··· 1381 } 1382 1383 if (unix_peer(other) != sk && 1384 - (skb_queue_len(&other->sk_receive_queue) > 1385 other->sk_max_ack_backlog)) { 1386 if (!timeo) { 1387 err = -EAGAIN;
··· 934 935 sched = !sock_flag(other, SOCK_DEAD) && 936 !(other->sk_shutdown & RCV_SHUTDOWN) && 937 + (skb_queue_len(&other->sk_receive_queue) >= 938 other->sk_max_ack_backlog); 939 940 unix_state_runlock(other); ··· 1008 if (other->sk_state != TCP_LISTEN) 1009 goto out_unlock; 1010 1011 + if (skb_queue_len(&other->sk_receive_queue) >= 1012 other->sk_max_ack_backlog) { 1013 err = -EAGAIN; 1014 if (!timeo) ··· 1381 } 1382 1383 if (unix_peer(other) != sk && 1384 + (skb_queue_len(&other->sk_receive_queue) >= 1385 other->sk_max_ack_backlog)) { 1386 if (!timeo) { 1387 err = -EAGAIN;