Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
drivers/net/sfc/net_driver.h
drivers/net/sfc/siena.c

+324 -271
+3 -3
drivers/isdn/capi/kcapi.c
··· 1020 1020 if (cmd == AVMB1_ADDCARD) { 1021 1021 if ((retval = copy_from_user(&cdef, data, 1022 1022 sizeof(avmb1_carddef)))) 1023 - return retval; 1023 + return -EFAULT; 1024 1024 cdef.cardtype = AVM_CARDTYPE_B1; 1025 1025 } else { 1026 1026 if ((retval = copy_from_user(&cdef, data, 1027 1027 sizeof(avmb1_extcarddef)))) 1028 - return retval; 1028 + return -EFAULT; 1029 1029 } 1030 1030 cparams.port = cdef.port; 1031 1031 cparams.irq = cdef.irq; ··· 1218 1218 kcapi_carddef cdef; 1219 1219 1220 1220 if ((retval = copy_from_user(&cdef, data, sizeof(cdef)))) 1221 - return retval; 1221 + return -EFAULT; 1222 1222 1223 1223 cparams.port = cdef.port; 1224 1224 cparams.irq = cdef.irq;
+2 -2
drivers/isdn/hardware/mISDN/netjet.c
··· 320 320 return -ENOMEM; 321 321 } 322 322 for (i = 0; i < 2; i++) { 323 - card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_KERNEL); 323 + card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC); 324 324 if (!card->bc[i].hsbuf) { 325 325 pr_info("%s: no B%d send buffer\n", card->name, i + 1); 326 326 return -ENOMEM; 327 327 } 328 - card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_KERNEL); 328 + card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC); 329 329 if (!card->bc[i].hrbuf) { 330 330 pr_info("%s: no B%d recv buffer\n", card->name, i + 1); 331 331 return -ENOMEM;
+1 -1
drivers/net/benet/be_cmds.c
··· 1592 1592 1593 1593 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 1594 1594 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); 1595 - req->hdr.timeout = 4; 1595 + req->hdr.timeout = cpu_to_le32(4); 1596 1596 1597 1597 req->pattern = cpu_to_le64(pattern); 1598 1598 req->src_port = cpu_to_le32(port_num);
+13 -1
drivers/net/bnx2.c
··· 247 247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 248 248 249 249 static void bnx2_init_napi(struct bnx2 *bp); 250 + static void bnx2_del_napi(struct bnx2 *bp); 250 251 251 252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) 252 253 { ··· 6271 6270 bnx2_free_skbs(bp); 6272 6271 bnx2_free_irq(bp); 6273 6272 bnx2_free_mem(bp); 6273 + bnx2_del_napi(bp); 6274 6274 return rc; 6275 6275 } 6276 6276 ··· 6539 6537 bnx2_free_irq(bp); 6540 6538 bnx2_free_skbs(bp); 6541 6539 bnx2_free_mem(bp); 6540 + bnx2_del_napi(bp); 6542 6541 bp->link_up = 0; 6543 6542 netif_carrier_off(bp->dev); 6544 6543 bnx2_set_power_state(bp, PCI_D3hot); ··· 8230 8227 return str; 8231 8228 } 8232 8229 8233 - static void __devinit 8230 + static void 8231 + bnx2_del_napi(struct bnx2 *bp) 8232 + { 8233 + int i; 8234 + 8235 + for (i = 0; i < bp->irq_nvecs; i++) 8236 + netif_napi_del(&bp->bnx2_napi[i].napi); 8237 + } 8238 + 8239 + static void 8234 8240 bnx2_init_napi(struct bnx2 *bp) 8235 8241 { 8236 8242 int i;
+5 -5
drivers/net/can/mscan/mpc5xxx_can.c
··· 73 73 else 74 74 *mscan_clksrc = MSCAN_CLKSRC_XTAL; 75 75 76 - freq = mpc5xxx_get_bus_frequency(ofdev->node); 76 + freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); 77 77 if (!freq) 78 78 return 0; 79 79 ··· 152 152 } 153 153 154 154 /* Determine the MSCAN device index from the physical address */ 155 - pval = of_get_property(ofdev->node, "reg", &plen); 155 + pval = of_get_property(ofdev->dev.of_node, "reg", &plen); 156 156 BUG_ON(!pval || plen < sizeof(*pval)); 157 157 clockidx = (*pval & 0x80) ? 1 : 0; 158 158 if (*pval & 0x2000) ··· 168 168 */ 169 169 if (clock_name && !strcmp(clock_name, "ip")) { 170 170 *mscan_clksrc = MSCAN_CLKSRC_IPS; 171 - freq = mpc5xxx_get_bus_frequency(ofdev->node); 171 + freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); 172 172 } else { 173 173 *mscan_clksrc = MSCAN_CLKSRC_BUS; 174 174 175 - pval = of_get_property(ofdev->node, 175 + pval = of_get_property(ofdev->dev.of_node, 176 176 "fsl,mscan-clock-divider", &plen); 177 177 if (pval && plen == sizeof(*pval)) 178 178 clockdiv = *pval; ··· 251 251 const struct of_device_id *id) 252 252 { 253 253 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data; 254 - struct device_node *np = ofdev->node; 254 + struct device_node *np = ofdev->dev.of_node; 255 255 struct net_device *dev; 256 256 struct mscan_priv *priv; 257 257 void __iomem *base;
+1 -1
drivers/net/e1000e/netdev.c
··· 2554 2554 mdef = er32(MDEF(i)); 2555 2555 2556 2556 /* Ignore filters with anything other than IPMI ports */ 2557 - if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2557 + if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2558 2558 continue; 2559 2559 2560 2560 /* Enable this decision filter in MANC2H */
+7
drivers/net/enic/enic.h
··· 74 74 void *devid; 75 75 }; 76 76 77 + #define ENIC_SET_APPLIED (1 << 0) 78 + #define ENIC_SET_REQUEST (1 << 1) 79 + #define ENIC_SET_NAME (1 << 2) 80 + #define ENIC_SET_INSTANCE (1 << 3) 81 + #define ENIC_SET_HOST (1 << 4) 82 + 77 83 struct enic_port_profile { 84 + u32 set; 78 85 u8 request; 79 86 char name[PORT_PROFILE_MAX]; 80 87 u8 instance_uuid[PORT_UUID_MAX];
+100 -106
drivers/net/enic/enic_main.c
··· 1029 1029 return err; 1030 1030 } 1031 1031 1032 - static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac, 1033 - char *name, u8 *instance_uuid, u8 *host_uuid) 1032 + static int enic_set_port_profile(struct enic *enic, u8 *mac) 1034 1033 { 1035 1034 struct vic_provinfo *vp; 1036 1035 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; ··· 1039 1040 "%02X%02X-%02X%02X%02X%02X%0X%02X"; 1040 1041 int err; 1041 1042 1042 - if (!name) 1043 - return -EINVAL; 1044 - 1045 - if (!is_valid_ether_addr(mac)) 1046 - return -EADDRNOTAVAIL; 1047 - 1048 - vp = vic_provinfo_alloc(GFP_KERNEL, oui, VIC_PROVINFO_LINUX_TYPE); 1049 - if (!vp) 1050 - return -ENOMEM; 1051 - 1052 - vic_provinfo_add_tlv(vp, 1053 - VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR, 1054 - strlen(name) + 1, name); 1055 - 1056 - vic_provinfo_add_tlv(vp, 1057 - VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR, 1058 - ETH_ALEN, mac); 1059 - 1060 - if (instance_uuid) { 1061 - uuid = instance_uuid; 1062 - sprintf(uuid_str, uuid_fmt, 1063 - uuid[0], uuid[1], uuid[2], uuid[3], 1064 - uuid[4], uuid[5], uuid[6], uuid[7], 1065 - uuid[8], uuid[9], uuid[10], uuid[11], 1066 - uuid[12], uuid[13], uuid[14], uuid[15]); 1067 - vic_provinfo_add_tlv(vp, 1068 - VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1069 - sizeof(uuid_str), uuid_str); 1070 - } 1071 - 1072 - if (host_uuid) { 1073 - uuid = host_uuid; 1074 - sprintf(uuid_str, uuid_fmt, 1075 - uuid[0], uuid[1], uuid[2], uuid[3], 1076 - uuid[4], uuid[5], uuid[6], uuid[7], 1077 - uuid[8], uuid[9], uuid[10], uuid[11], 1078 - uuid[12], uuid[13], uuid[14], uuid[15]); 1079 - vic_provinfo_add_tlv(vp, 1080 - VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1081 - sizeof(uuid_str), uuid_str); 1082 - } 1083 - 1084 1043 err = enic_vnic_dev_deinit(enic); 1085 1044 if (err) 1086 - goto err_out; 1045 + return err; 1087 1046 1088 - memset(&enic->pp, 0, sizeof(enic->pp)); 1047 + switch (enic->pp.request) { 1089 1048 1090 - err = enic_dev_init_prov(enic, vp); 1091 - if (err) 1092 - goto err_out; 1049 + case PORT_REQUEST_ASSOCIATE: 1093 1050 1094 - enic->pp.request = request; 1095 - memcpy(enic->pp.name, name, PORT_PROFILE_MAX); 1096 - if (instance_uuid) 1097 - memcpy(enic->pp.instance_uuid, 1098 - instance_uuid, PORT_UUID_MAX); 1099 - if (host_uuid) 1100 - memcpy(enic->pp.host_uuid, 1101 - host_uuid, PORT_UUID_MAX); 1051 + if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name)) 1052 + return -EINVAL; 1102 1053 1103 - err_out: 1104 - vic_provinfo_free(vp); 1054 + if (!is_valid_ether_addr(mac)) 1055 + return -EADDRNOTAVAIL; 1105 1056 1106 - return err; 1107 - } 1057 + vp = vic_provinfo_alloc(GFP_KERNEL, oui, 1058 + VIC_PROVINFO_LINUX_TYPE); 1059 + if (!vp) 1060 + return -ENOMEM; 1108 1061 1109 - static int enic_unset_port_profile(struct enic *enic) 1110 - { 1111 - memset(&enic->pp, 0, sizeof(enic->pp)); 1112 - return enic_vnic_dev_deinit(enic); 1062 + vic_provinfo_add_tlv(vp, 1063 + VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR, 1064 + strlen(enic->pp.name) + 1, enic->pp.name); 1065 + 1066 + vic_provinfo_add_tlv(vp, 1067 + VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR, 1068 + ETH_ALEN, mac); 1069 + 1070 + if (enic->pp.set & ENIC_SET_INSTANCE) { 1071 + uuid = enic->pp.instance_uuid; 1072 + sprintf(uuid_str, uuid_fmt, 1073 + uuid[0], uuid[1], uuid[2], uuid[3], 1074 + uuid[4], uuid[5], uuid[6], uuid[7], 1075 + uuid[8], uuid[9], uuid[10], uuid[11], 1076 + uuid[12], uuid[13], uuid[14], uuid[15]); 1077 + vic_provinfo_add_tlv(vp, 1078 + VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1079 + sizeof(uuid_str), uuid_str); 1080 + } 1081 + 1082 + if (enic->pp.set & ENIC_SET_HOST) { 1083 + uuid = enic->pp.host_uuid; 1084 + sprintf(uuid_str, uuid_fmt, 1085 + uuid[0], uuid[1], uuid[2], uuid[3], 1086 + uuid[4], uuid[5], uuid[6], uuid[7], 1087 + uuid[8], uuid[9], uuid[10], uuid[11], 1088 + uuid[12], uuid[13], uuid[14], uuid[15]); 1089 + vic_provinfo_add_tlv(vp, 1090 + VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1091 + sizeof(uuid_str), uuid_str); 1092 + } 1093 + 1094 + err = enic_dev_init_prov(enic, vp); 1095 + vic_provinfo_free(vp); 1096 + if (err) 1097 + return err; 1098 + break; 1099 + 1100 + case PORT_REQUEST_DISASSOCIATE: 1101 + break; 1102 + 1103 + default: 1104 + return -EINVAL; 1105 + } 1106 + 1107 + enic->pp.set |= ENIC_SET_APPLIED; 1108 + return 0; 1113 1109 } 1114 1110 1115 1111 static int enic_set_vf_port(struct net_device *netdev, int vf, 1116 1112 struct nlattr *port[]) 1117 1113 { 1118 1114 struct enic *enic = netdev_priv(netdev); 1119 - char *name = NULL; 1120 - u8 *instance_uuid = NULL; 1121 - u8 *host_uuid = NULL; 1122 - u8 request = PORT_REQUEST_DISASSOCIATE; 1115 + 1116 + memset(&enic->pp, 0, sizeof(enic->pp)); 1117 + 1118 + if (port[IFLA_PORT_REQUEST]) { 1119 + enic->pp.set |= ENIC_SET_REQUEST; 1120 + enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]); 1121 + } 1122 + 1123 + if (port[IFLA_PORT_PROFILE]) { 1124 + enic->pp.set |= ENIC_SET_NAME; 1125 + memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]), 1126 + PORT_PROFILE_MAX); 1127 + } 1128 + 1129 + if (port[IFLA_PORT_INSTANCE_UUID]) { 1130 + enic->pp.set |= ENIC_SET_INSTANCE; 1131 + memcpy(enic->pp.instance_uuid, 1132 + nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); 1133 + } 1134 + 1135 + if (port[IFLA_PORT_HOST_UUID]) { 1136 + enic->pp.set |= ENIC_SET_HOST; 1137 + memcpy(enic->pp.host_uuid, 1138 + nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); 1139 + } 1123 1140 1124 1141 /* don't support VFs, yet */ 1125 1142 if (vf != PORT_SELF_VF) 1126 1143 return -EOPNOTSUPP; 1127 1144 1128 - if (port[IFLA_PORT_REQUEST]) 1129 - request = nla_get_u8(port[IFLA_PORT_REQUEST]); 1145 + if (!(enic->pp.set & ENIC_SET_REQUEST)) 1146 + return -EOPNOTSUPP; 1130 1147 1131 - switch (request) { 1132 - case PORT_REQUEST_ASSOCIATE: 1148 + if (enic->pp.request == PORT_REQUEST_ASSOCIATE) { 1133 1149 1134 1150 /* If the interface mac addr hasn't been assigned, 1135 1151 * assign a random mac addr before setting port- ··· 1153 1139 1154 1140 if (is_zero_ether_addr(netdev->dev_addr)) 1155 1141 random_ether_addr(netdev->dev_addr); 1156 - 1157 - if (port[IFLA_PORT_PROFILE]) 1158 - name = nla_data(port[IFLA_PORT_PROFILE]); 1159 - 1160 - if (port[IFLA_PORT_INSTANCE_UUID]) 1161 - instance_uuid = 1162 - nla_data(port[IFLA_PORT_INSTANCE_UUID]); 1163 - 1164 - if (port[IFLA_PORT_HOST_UUID]) 1165 - host_uuid = nla_data(port[IFLA_PORT_HOST_UUID]); 1166 - 1167 - return enic_set_port_profile(enic, request, 1168 - netdev->dev_addr, name, 1169 - instance_uuid, host_uuid); 1170 - 1171 - case PORT_REQUEST_DISASSOCIATE: 1172 - 1173 - return enic_unset_port_profile(enic); 1174 - 1175 - default: 1176 - break; 1177 1142 } 1178 1143 1179 - return -EOPNOTSUPP; 1144 + return enic_set_port_profile(enic, netdev->dev_addr); 1180 1145 } 1181 1146 1182 1147 static int enic_get_vf_port(struct net_device *netdev, int vf, ··· 1165 1172 int err, error, done; 1166 1173 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1167 1174 1168 - /* don't support VFs, yet */ 1169 - if (vf != PORT_SELF_VF) 1170 - return -EOPNOTSUPP; 1175 + if (!(enic->pp.set & ENIC_SET_APPLIED)) 1176 + return -ENODATA; 1171 1177 1172 1178 err = enic_dev_init_done(enic, &done, &error); 1173 - 1174 1179 if (err) 1175 - return err; 1180 + error = err; 1176 1181 1177 1182 switch (error) { 1178 1183 case ERR_SUCCESS: ··· 1193 1202 1194 1203 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request); 1195 1204 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); 1196 - NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, 1197 - enic->pp.name); 1198 - NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 1199 - enic->pp.instance_uuid); 1200 - NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, 1201 - enic->pp.host_uuid); 1205 + if (enic->pp.set & ENIC_SET_NAME) 1206 + NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, 1207 + enic->pp.name); 1208 + if (enic->pp.set & ENIC_SET_INSTANCE) 1209 + NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 1210 + enic->pp.instance_uuid); 1211 + if (enic->pp.set & ENIC_SET_HOST) 1212 + NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, 1213 + enic->pp.host_uuid); 1202 1214 1203 1215 return 0; 1204 1216
+4 -3
drivers/net/epic100.c
··· 87 87 #include <linux/bitops.h> 88 88 #include <asm/io.h> 89 89 #include <asm/uaccess.h> 90 + #include <asm/byteorder.h> 90 91 91 92 /* These identify the driver base version and may not be removed. */ 92 93 static char version[] __devinitdata = ··· 231 230 * The EPIC100 Rx and Tx buffer descriptors. Note that these 232 231 * really ARE host-endian; it's not a misannotation. We tell 233 232 * the card to byteswap them internally on big-endian hosts - 234 - * look for #ifdef CONFIG_BIG_ENDIAN in epic_open(). 233 + * look for #ifdef __BIG_ENDIAN in epic_open(). 235 234 */ 236 235 237 236 struct epic_tx_desc { ··· 691 690 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 692 691 693 692 /* Tell the chip to byteswap descriptors on big-endian hosts */ 694 - #ifdef CONFIG_BIG_ENDIAN 693 + #ifdef __BIG_ENDIAN 695 694 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 696 695 inl(ioaddr + GENCTL); 697 696 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); ··· 807 806 for (i = 16; i > 0; i--) 808 807 outl(0x0008, ioaddr + TEST1); 809 808 810 - #ifdef CONFIG_BIG_ENDIAN 809 + #ifdef __BIG_ENDIAN 811 810 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 812 811 #else 813 812 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+8 -8
drivers/net/fec.c
··· 1369 1369 1370 1370 if (ndev) { 1371 1371 fep = netdev_priv(ndev); 1372 - if (netif_running(ndev)) { 1373 - netif_device_detach(ndev); 1374 - fec_stop(ndev); 1375 - } 1372 + if (netif_running(ndev)) 1373 + fec_enet_close(ndev); 1374 + clk_disable(fep->clk); 1376 1375 } 1377 1376 return 0; 1378 1377 } ··· 1380 1381 fec_resume(struct platform_device *dev) 1381 1382 { 1382 1383 struct net_device *ndev = platform_get_drvdata(dev); 1384 + struct fec_enet_private *fep; 1383 1385 1384 1386 if (ndev) { 1385 - if (netif_running(ndev)) { 1386 - fec_enet_init(ndev, 0); 1387 - netif_device_attach(ndev); 1388 - } 1387 + fep = netdev_priv(ndev); 1388 + clk_enable(fep->clk); 1389 + if (netif_running(ndev)) 1390 + fec_enet_open(ndev); 1389 1391 } 1390 1392 return 0; 1391 1393 }
+5 -6
drivers/net/greth.c
··· 1607 1607 MODULE_DEVICE_TABLE(of, greth_of_match); 1608 1608 1609 1609 static struct of_platform_driver greth_of_driver = { 1610 - .name = "grlib-greth", 1611 - .match_table = greth_of_match, 1610 + .driver = { 1611 + .name = "grlib-greth", 1612 + .owner = THIS_MODULE, 1613 + .of_match_table = greth_of_match, 1614 + }, 1612 1615 .probe = greth_of_probe, 1613 1616 .remove = __devexit_p(greth_of_remove), 1614 - .driver = { 1615 - .owner = THIS_MODULE, 1616 - .name = "grlib-greth", 1617 - }, 1618 1617 }; 1619 1618 1620 1619 static int __init greth_init(void)
+2
drivers/net/ixgbe/ixgbe_common.c
··· 1188 1188 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1189 1189 } else { 1190 1190 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1191 + return IXGBE_ERR_RAR_INDEX; 1191 1192 } 1192 1193 1193 1194 return 0; ··· 1220 1219 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1221 1220 } else { 1222 1221 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1222 + return IXGBE_ERR_RAR_INDEX; 1223 1223 } 1224 1224 1225 1225 /* clear VMDq pool/queue selection for this RAR */
+1 -1
drivers/net/ixgbe/ixgbe_main.c
··· 642 642 u32 txoff = IXGBE_TFCS_TXOFF; 643 643 644 644 #ifdef CONFIG_IXGBE_DCB 645 - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 645 + if (adapter->dcb_cfg.pfc_mode_enable) { 646 646 int tc; 647 647 int reg_idx = tx_ring->reg_idx; 648 648 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+1
drivers/net/ixgbe/ixgbe_type.h
··· 2609 2609 #define IXGBE_ERR_EEPROM_VERSION -24 2610 2610 #define IXGBE_ERR_NO_SPACE -25 2611 2611 #define IXGBE_ERR_OVERTEMP -26 2612 + #define IXGBE_ERR_RAR_INDEX -27 2612 2613 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2613 2614 2614 2615 #endif /* _IXGBE_TYPE_H_ */
+15 -17
drivers/net/korina.c
··· 135 135 struct napi_struct napi; 136 136 struct timer_list media_check_timer; 137 137 struct mii_if_info mii_if; 138 + struct work_struct restart_task; 138 139 struct net_device *dev; 139 140 int phy_addr; 140 141 }; ··· 376 375 if (devcs & ETH_RX_LE) 377 376 dev->stats.rx_length_errors++; 378 377 if (devcs & ETH_RX_OVR) 379 - dev->stats.rx_over_errors++; 378 + dev->stats.rx_fifo_errors++; 380 379 if (devcs & ETH_RX_CV) 381 380 dev->stats.rx_frame_errors++; 382 381 if (devcs & ETH_RX_CES) ··· 765 764 766 765 /* Initialize the receive descriptors */ 767 766 for (i = 0; i < KORINA_NUM_RDS; i++) { 768 - skb = dev_alloc_skb(KORINA_RBSIZE + 2); 767 + skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); 769 768 if (!skb) 770 769 return -ENOMEM; 771 - skb_reserve(skb, 2); 772 770 lp->rx_skb[i] = skb; 773 771 lp->rd_ring[i].control = DMA_DESC_IOD | 774 772 DMA_COUNT(KORINA_RBSIZE); ··· 890 890 891 891 /* 892 892 * Restart the RC32434 ethernet controller. 893 - * FIXME: check the return status where we call it 894 893 */ 895 - static int korina_restart(struct net_device *dev) 894 + static void korina_restart_task(struct work_struct *work) 896 895 { 897 - struct korina_private *lp = netdev_priv(dev); 898 - int ret; 896 + struct korina_private *lp = container_of(work, 897 + struct korina_private, restart_task); 898 + struct net_device *dev = lp->dev; 899 899 900 900 /* 901 901 * Disable interrupts ··· 916 916 917 917 napi_disable(&lp->napi); 918 918 919 - ret = korina_init(dev); 920 - if (ret < 0) { 919 + if (korina_init(dev) < 0) { 921 920 printk(KERN_ERR "%s: cannot restart device\n", dev->name); 922 - return ret; 921 + return; 923 922 } 924 923 korina_multicast_list(dev); 925 924 ··· 926 927 enable_irq(lp->ovr_irq); 927 928 enable_irq(lp->tx_irq); 928 929 enable_irq(lp->rx_irq); 929 - 930 - return ret; 931 930 } 932 931 933 932 static void korina_clear_and_restart(struct net_device *dev, u32 value) ··· 934 937 935 938 netif_stop_queue(dev); 936 939 writel(value, &lp->eth_regs->ethintfc); 937 - korina_restart(dev); 940 + schedule_work(&lp->restart_task); 938 941 } 939 942 940 943 /* Ethernet Tx Underflow interrupt */ ··· 959 962 static void korina_tx_timeout(struct net_device *dev) 960 963 { 961 964 struct korina_private *lp = netdev_priv(dev); 962 - unsigned long flags; 963 965 964 - spin_lock_irqsave(&lp->lock, flags); 965 - korina_restart(dev); 966 - spin_unlock_irqrestore(&lp->lock, flags); 966 + schedule_work(&lp->restart_task); 967 967 } 968 968 969 969 /* Ethernet Rx Overflow interrupt */ ··· 1080 1086 1081 1087 napi_disable(&lp->napi); 1082 1088 1089 + cancel_work_sync(&lp->restart_task); 1090 + 1083 1091 free_irq(lp->rx_irq, dev); 1084 1092 free_irq(lp->tx_irq, dev); 1085 1093 free_irq(lp->ovr_irq, dev); ··· 1193 1197 goto probe_err_register; 1194 1198 } 1195 1199 setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev); 1200 + 1201 + INIT_WORK(&lp->restart_task, korina_restart_task); 1196 1202 1197 1203 printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n", 1198 1204 dev->name);
+2 -1
drivers/net/ksz884x.c
··· 4854 4854 * 4855 4855 * Return 0 if successful; otherwise an error code indicating failure. 4856 4856 */ 4857 - static int netdev_tx(struct sk_buff *skb, struct net_device *dev) 4857 + static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev) 4858 4858 { 4859 4859 struct dev_priv *priv = netdev_priv(dev); 4860 4860 struct dev_info *hw_priv = priv->adapter; ··· 6863 6863 .ndo_tx_timeout = netdev_tx_timeout, 6864 6864 .ndo_change_mtu = netdev_change_mtu, 6865 6865 .ndo_set_mac_address = netdev_set_mac_address, 6866 + .ndo_validate_addr = eth_validate_addr, 6866 6867 .ndo_do_ioctl = netdev_ioctl, 6867 6868 .ndo_set_rx_mode = netdev_set_rx_mode, 6868 6869 #ifdef CONFIG_NET_POLL_CONTROLLER
+1 -1
drivers/net/ppp_generic.c
··· 1416 1416 flen = len; 1417 1417 if (nfree > 0) { 1418 1418 if (pch->speed == 0) { 1419 - flen = totlen/nfree; 1419 + flen = len/nfree; 1420 1420 if (nbigger > 0) { 1421 1421 flen++; 1422 1422 nbigger--;
+5
drivers/net/r8169.c
··· 559 559 break; 560 560 udelay(25); 561 561 } 562 + /* 563 + * Some configurations require a small delay even after the write 564 + * completed indication or the next write might fail. 565 + */ 566 + udelay(25); 562 567 } 563 568 564 569 static int mdio_read(void __iomem *ioaddr, int reg_addr)
+1 -1
drivers/net/sfc/net_driver.h
··· 836 836 837 837 static inline unsigned int efx_port_num(struct efx_nic *efx) 838 838 { 839 - return efx->port_num; 839 + return efx->net_dev->dev_id; 840 840 } 841 841 842 842 /**
+1 -1
drivers/net/sfc/siena.c
··· 222 222 } 223 223 224 224 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 225 - efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 225 + efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 226 226 227 227 efx_mcdi_init(efx); 228 228
+2 -2
drivers/net/tehuti.c
··· 646 646 error = copy_from_user(data, ifr->ifr_data, sizeof(data)); 647 647 if (error) { 648 648 pr_err("cant copy from user\n"); 649 - RET(error); 649 + RET(-EFAULT); 650 650 } 651 651 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); 652 652 } ··· 665 665 data[2]); 666 666 error = copy_to_user(ifr->ifr_data, data, sizeof(data)); 667 667 if (error) 668 - RET(error); 668 + RET(-EFAULT); 669 669 break; 670 670 671 671 case BDX_OP_WRITE:
+4 -4
drivers/net/virtio_net.c
··· 340 340 341 341 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); 342 342 343 - err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb); 343 + err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); 344 344 if (err < 0) 345 345 dev_kfree_skb(skb); 346 346 ··· 385 385 386 386 /* chain first in list head */ 387 387 first->private = (unsigned long)list; 388 - err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 389 - first); 388 + err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 389 + first, gfp); 390 390 if (err < 0) 391 391 give_pages(vi, first); 392 392 ··· 404 404 405 405 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); 406 406 407 - err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page); 407 + err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp); 408 408 if (err < 0) 409 409 give_pages(vi, page); 410 410
-2
drivers/net/wan/x25_asy.c
··· 37 37 #include <net/x25device.h> 38 38 #include "x25_asy.h" 39 39 40 - #include <net/x25device.h> 41 - 42 40 static struct net_device **x25_asy_devs; 43 41 static int x25_asy_maxdev = SL_NRUNIT; 44 42
+3 -14
drivers/net/wireless/ath/ath5k/base.c
··· 222 222 static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 223 223 struct ath5k_txq *txq); 224 224 static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan); 225 - static int ath5k_reset_wake(struct ath5k_softc *sc); 226 225 static int ath5k_start(struct ieee80211_hw *hw); 227 226 static void ath5k_stop(struct ieee80211_hw *hw); 228 227 static int ath5k_add_interface(struct ieee80211_hw *hw, ··· 2769 2770 { 2770 2771 struct ath5k_softc *sc = (void *)data; 2771 2772 2772 - ath5k_reset_wake(sc); 2773 + ath5k_reset(sc, sc->curchan); 2773 2774 } 2774 2775 2775 2776 /* ··· 2940 2941 ath5k_beacon_config(sc); 2941 2942 /* intrs are enabled by ath5k_beacon_config */ 2942 2943 2944 + ieee80211_wake_queues(sc->hw); 2945 + 2943 2946 return 0; 2944 2947 err: 2945 - return ret; 2946 - } 2947 - 2948 - static int 2949 - ath5k_reset_wake(struct ath5k_softc *sc) 2950 - { 2951 - int ret; 2952 - 2953 - ret = ath5k_reset(sc, sc->curchan); 2954 - if (!ret) 2955 - ieee80211_wake_queues(sc->hw); 2956 - 2957 2948 return ret; 2958 2949 } 2959 2950
+1 -1
include/linux/netfilter/x_tables.h
··· 397 397 * @stacksize jumps (number of user chains) can possibly be made. 398 398 */ 399 399 unsigned int stacksize; 400 - unsigned int *stackptr; 400 + unsigned int __percpu *stackptr; 401 401 void ***jumpstack; 402 402 /* ipt_entry tables: one per CPU */ 403 403 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
+3 -3
include/net/dst.h
··· 250 250 * Linux networking. Thus, destinations are stackable. 251 251 */ 252 252 253 - static inline struct dst_entry *dst_pop(struct dst_entry *dst) 253 + static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) 254 254 { 255 - struct dst_entry *child = dst_clone(dst->child); 255 + struct dst_entry *child = skb_dst(skb)->child; 256 256 257 - dst_release(dst); 257 + skb_dst_drop(skb); 258 258 return child; 259 259 } 260 260
+1 -14
include/net/sock.h
··· 1524 1524 1525 1525 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1526 1526 1527 - static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 1528 - { 1529 - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 1530 - number of warnings when compiling with -W --ANK 1531 - */ 1532 - if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1533 - (unsigned)sk->sk_rcvbuf) 1534 - return -ENOMEM; 1535 - skb_set_owner_r(skb, sk); 1536 - skb_queue_tail(&sk->sk_error_queue, skb); 1537 - if (!sock_flag(sk, SOCK_DEAD)) 1538 - sk->sk_data_ready(sk, skb->len); 1539 - return 0; 1540 - } 1527 + extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 1541 1528 1542 1529 /* 1543 1530 * Recover an error report and clear atomically
+2 -1
net/8021q/vlan_dev.c
··· 708 708 netif_carrier_off(dev); 709 709 710 710 /* IFF_BROADCAST|IFF_MULTICAST; ??? */ 711 - dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); 711 + dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | 712 + IFF_MASTER | IFF_SLAVE); 712 713 dev->iflink = real_dev->ifindex; 713 714 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | 714 715 (1<<__LINK_STATE_DORMANT))) |
+4 -4
net/core/dev.c
··· 2805 2805 struct net_device *orig_dev; 2806 2806 struct net_device *master; 2807 2807 struct net_device *null_or_orig; 2808 - struct net_device *null_or_bond; 2808 + struct net_device *orig_or_bond; 2809 2809 int ret = NET_RX_DROP; 2810 2810 __be16 type; 2811 2811 ··· 2882 2882 * device that may have registered for a specific ptype. The 2883 2883 * handler may have to adjust skb->dev and orig_dev. 2884 2884 */ 2885 - null_or_bond = NULL; 2885 + orig_or_bond = orig_dev; 2886 2886 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && 2887 2887 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { 2888 - null_or_bond = vlan_dev_real_dev(skb->dev); 2888 + orig_or_bond = vlan_dev_real_dev(skb->dev); 2889 2889 } 2890 2890 2891 2891 type = skb->protocol; ··· 2893 2893 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2894 2894 if (ptype->type == type && (ptype->dev == null_or_orig || 2895 2895 ptype->dev == skb->dev || ptype->dev == orig_dev || 2896 - ptype->dev == null_or_bond)) { 2896 + ptype->dev == orig_or_bond)) { 2897 2897 if (pt_prev) 2898 2898 ret = deliver_skb(skb, pt_prev, orig_dev); 2899 2899 pt_prev = ptype;
+28 -2
net/core/skbuff.c
··· 2965 2965 } 2966 2966 EXPORT_SYMBOL_GPL(skb_cow_data); 2967 2967 2968 + static void sock_rmem_free(struct sk_buff *skb) 2969 + { 2970 + struct sock *sk = skb->sk; 2971 + 2972 + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 2973 + } 2974 + 2975 + /* 2976 + * Note: We dont mem charge error packets (no sk_forward_alloc changes) 2977 + */ 2978 + int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 2979 + { 2980 + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 2981 + (unsigned)sk->sk_rcvbuf) 2982 + return -ENOMEM; 2983 + 2984 + skb_orphan(skb); 2985 + skb->sk = sk; 2986 + skb->destructor = sock_rmem_free; 2987 + atomic_add(skb->truesize, &sk->sk_rmem_alloc); 2988 + 2989 + skb_queue_tail(&sk->sk_error_queue, skb); 2990 + if (!sock_flag(sk, SOCK_DEAD)) 2991 + sk->sk_data_ready(sk, skb->len); 2992 + return 0; 2993 + } 2994 + EXPORT_SYMBOL(sock_queue_err_skb); 2995 + 2968 2996 void skb_tstamp_tx(struct sk_buff *orig_skb, 2969 2997 struct skb_shared_hwtstamps *hwtstamps) 2970 2998 { ··· 3025 2997 serr->ee.ee_errno = ENOMSG; 3026 2998 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3027 2999 3028 - bh_lock_sock(sk); 3029 3000 err = sock_queue_err_skb(sk, skb); 3030 - bh_unlock_sock(sk); 3031 3001 3032 3002 if (err) 3033 3003 kfree_skb(skb);
+5 -5
net/ipv4/Kconfig
··· 303 303 If unsure, say N. 304 304 305 305 config SYN_COOKIES 306 - bool "IP: TCP syncookie support (disabled per default)" 306 + bool "IP: TCP syncookie support" 307 307 ---help--- 308 308 Normal TCP/IP networking is open to an attack known as "SYN 309 309 flooding". This denial-of-service attack prevents legitimate remote ··· 328 328 server is really overloaded. If this happens frequently better turn 329 329 them off. 330 330 331 - If you say Y here, note that SYN cookies aren't enabled by default; 332 - you can enable them by saying Y to "/proc file system support" and 331 + If you say Y here, you can disable SYN cookies at run time by 332 + saying Y to "/proc file system support" and 333 333 "Sysctl support" below and executing the command 334 334 335 - echo 1 >/proc/sys/net/ipv4/tcp_syncookies 335 + echo 0 > /proc/sys/net/ipv4/tcp_syncookies 336 336 337 - at boot time after the /proc file system has been mounted. 337 + after the /proc file system has been mounted. 338 338 339 339 If unsure, say N. 340 340
+1 -1
net/ipv4/netfilter/ip_tables.c
··· 336 336 cpu = smp_processor_id(); 337 337 table_base = private->entries[cpu]; 338 338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 339 - stackptr = &private->stackptr[cpu]; 339 + stackptr = per_cpu_ptr(private->stackptr, cpu); 340 340 origptr = *stackptr; 341 341 342 342 e = get_entry(table_base, private->hook_entry[hook]);
+1 -1
net/ipv4/syncookies.c
··· 347 347 { .sport = th->dest, 348 348 .dport = th->source } } }; 349 349 security_req_classify_flow(req, &fl); 350 - if (ip_route_output_key(&init_net, &rt, &fl)) { 350 + if (ip_route_output_key(sock_net(sk), &rt, &fl)) { 351 351 reqsk_free(req); 352 352 goto out; 353 353 }
+2 -2
net/ipv4/tcp_hybla.c
··· 126 126 * calculate 2^fract in a <<7 value. 127 127 */ 128 128 is_slowstart = 1; 129 - increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) 130 - - 128; 129 + increment = ((1 << min(ca->rho, 16U)) * 130 + hybla_fraction(rho_fractions)) - 128; 131 131 } else { 132 132 /* 133 133 * congestion avoidance
+2 -2
net/ipv4/tcp_input.c
··· 2639 2639 if (sk->sk_family == AF_INET) { 2640 2640 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2641 2641 msg, 2642 - &inet->daddr, ntohs(inet->dport), 2642 + &inet->inet_daddr, ntohs(inet->inet_dport), 2643 2643 tp->snd_cwnd, tcp_left_out(tp), 2644 2644 tp->snd_ssthresh, tp->prior_ssthresh, 2645 2645 tp->packets_out); ··· 2649 2649 struct ipv6_pinfo *np = inet6_sk(sk); 2650 2650 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2651 2651 msg, 2652 - &np->daddr, ntohs(inet->dport), 2652 + &np->daddr, ntohs(inet->inet_dport), 2653 2653 tp->snd_cwnd, tcp_left_out(tp), 2654 2654 tp->snd_ssthresh, tp->prior_ssthresh, 2655 2655 tp->packets_out);
+4 -3
net/ipv4/tcp_ipv4.c
··· 1557 1557 #endif 1558 1558 1559 1559 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1560 + sock_rps_save_rxhash(sk, skb->rxhash); 1560 1561 TCP_CHECK_TIMER(sk); 1561 1562 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1562 1563 rsk = sk; ··· 1582 1581 } 1583 1582 return 0; 1584 1583 } 1585 - } 1584 + } else 1585 + sock_rps_save_rxhash(sk, skb->rxhash); 1586 + 1586 1587 1587 1588 TCP_CHECK_TIMER(sk); 1588 1589 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { ··· 1676 1673 goto discard_and_relse; 1677 1674 1678 1675 skb->dev = NULL; 1679 - 1680 - sock_rps_save_rxhash(sk, skb->rxhash); 1681 1676 1682 1677 bh_lock_sock_nested(sk); 1683 1678 ret = 0;
+2 -4
net/ipv4/udp.c
··· 633 633 if (!inet->recverr) { 634 634 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 635 635 goto out; 636 - } else { 637 - bh_lock_sock(sk); 636 + } else 638 637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 639 - bh_unlock_sock(sk); 640 - } 638 + 641 639 sk->sk_err = err; 642 640 sk->sk_error_report(sk); 643 641 out:
+1 -1
net/ipv6/ip6mr.c
··· 120 120 static void ipmr_expire_process(unsigned long arg); 121 121 122 122 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 123 - #define ip6mr_for_each_table(mrt, met) \ 123 + #define ip6mr_for_each_table(mrt, net) \ 124 124 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) 125 125 126 126 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
+4 -1
net/ipv6/mcast.c
··· 1356 1356 IPV6_TLV_PADN, 0 }; 1357 1357 1358 1358 /* we assume size > sizeof(ra) here */ 1359 - skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); 1359 + size += LL_ALLOCATED_SPACE(dev); 1360 + /* limit our allocations to order-0 page */ 1361 + size = min_t(int, size, SKB_MAX_ORDER(0, 0)); 1362 + skb = sock_alloc_send_skb(sk, size, 1, &err); 1360 1363 1361 1364 if (!skb) 1362 1365 return NULL;
+1 -1
net/ipv6/netfilter/ip6_tables.c
··· 363 363 cpu = smp_processor_id(); 364 364 table_base = private->entries[cpu]; 365 365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; 366 - stackptr = &private->stackptr[cpu]; 366 + stackptr = per_cpu_ptr(private->stackptr, cpu); 367 367 origptr = *stackptr; 368 368 369 369 e = get_entry(table_base, private->hook_entry[hook]);
+2 -4
net/ipv6/udp.c
··· 466 466 if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) 467 467 goto out; 468 468 469 - if (np->recverr) { 470 - bh_lock_sock(sk); 469 + if (np->recverr) 471 470 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 472 - bh_unlock_sock(sk); 473 - } 471 + 474 472 sk->sk_err = err; 475 473 sk->sk_error_report(sk); 476 474 out:
+4 -2
net/mac80211/agg-tx.c
··· 332 332 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 333 333 334 334 spin_unlock(&local->ampdu_lock); 335 - spin_unlock_bh(&sta->lock); 336 335 337 - /* send an addBA request */ 336 + /* prepare tid data */ 338 337 sta->ampdu_mlme.dialog_token_allocator++; 339 338 sta->ampdu_mlme.tid_tx[tid]->dialog_token = 340 339 sta->ampdu_mlme.dialog_token_allocator; 341 340 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 342 341 342 + spin_unlock_bh(&sta->lock); 343 + 344 + /* send AddBA request */ 343 345 ieee80211_send_addba_request(sdata, pubsta->addr, tid, 344 346 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 345 347 sta->ampdu_mlme.tid_tx[tid]->ssn,
+11 -2
net/mac80211/rx.c
··· 1818 1818 return RX_CONTINUE; 1819 1819 1820 1820 if (ieee80211_is_back_req(bar->frame_control)) { 1821 + struct { 1822 + __le16 control, start_seq_num; 1823 + } __packed bar_data; 1824 + 1821 1825 if (!rx->sta) 1822 1826 return RX_DROP_MONITOR; 1827 + 1828 + if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 1829 + &bar_data, sizeof(bar_data))) 1830 + return RX_DROP_MONITOR; 1831 + 1823 1832 spin_lock(&rx->sta->lock); 1824 - tid = le16_to_cpu(bar->control) >> 12; 1833 + tid = le16_to_cpu(bar_data.control) >> 12; 1825 1834 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { 1826 1835 spin_unlock(&rx->sta->lock); 1827 1836 return RX_DROP_MONITOR; 1828 1837 } 1829 1838 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; 1830 1839 1831 - start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1840 + start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 1832 1841 1833 1842 /* reset session timer */ 1834 1843 if (tid_agg_rx->timeout)
+3 -14
net/netfilter/x_tables.c
··· 699 699 vfree(info->jumpstack); 700 700 else 701 701 kfree(info->jumpstack); 702 - if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) 703 - vfree(info->stackptr); 704 - else 705 - kfree(info->stackptr); 702 + 703 + free_percpu(info->stackptr); 706 704 707 705 kfree(info); 708 706 } ··· 751 753 unsigned int size; 752 754 int cpu; 753 755 754 - size = sizeof(unsigned int) * nr_cpu_ids; 755 - if (size > PAGE_SIZE) 756 - i->stackptr = vmalloc(size); 757 - else 758 - i->stackptr = kmalloc(size, GFP_KERNEL); 756 + i->stackptr = alloc_percpu(unsigned int); 759 757 if (i->stackptr == NULL) 760 758 return -ENOMEM; 761 - memset(i->stackptr, 0, size); 762 759 763 760 size = sizeof(void **) * nr_cpu_ids; 764 761 if (size > PAGE_SIZE) ··· 836 843 int ret; 837 844 struct xt_table_info *private; 838 845 struct xt_table *t, *table; 839 - 840 - ret = xt_jumpstack_alloc(newinfo); 841 - if (ret < 0) 842 - return ERR_PTR(ret); 843 846 844 847 /* Don't add one object to multiple lists. */ 845 848 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
+4
net/sched/act_nat.c
··· 159 159 iph->daddr = new_addr; 160 160 161 161 csum_replace4(&iph->check, addr, new_addr); 162 + } else if ((iph->frag_off & htons(IP_OFFSET)) || 163 + iph->protocol != IPPROTO_ICMP) { 164 + goto out; 162 165 } 163 166 164 167 ihl = iph->ihl * 4; ··· 250 247 break; 251 248 } 252 249 250 + out: 253 251 return action; 254 252 255 253 drop:
+14 -10
net/sched/act_pedit.c
··· 125 125 { 126 126 struct tcf_pedit *p = a->priv; 127 127 int i, munged = 0; 128 - u8 *pptr; 128 + unsigned int off; 129 129 130 130 if (!(skb->tc_verd & TC_OK2MUNGE)) { 131 131 /* should we set skb->cloned? */ ··· 134 134 } 135 135 } 136 136 137 - pptr = skb_network_header(skb); 137 + off = skb_network_offset(skb); 138 138 139 139 spin_lock(&p->tcf_lock); 140 140 ··· 144 144 struct tc_pedit_key *tkey = p->tcfp_keys; 145 145 146 146 for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { 147 - u32 *ptr; 147 + u32 *ptr, _data; 148 148 int offset = tkey->off; 149 149 150 150 if (tkey->offmask) { 151 - if (skb->len > tkey->at) { 152 - char *j = pptr + tkey->at; 153 - offset += ((*j & tkey->offmask) >> 154 - tkey->shift); 155 - } else { 151 + char *d, _d; 152 + 153 + d = skb_header_pointer(skb, off + tkey->at, 1, 154 + &_d); 155 + if (!d) 156 156 goto bad; 157 - } 157 + offset += (*d & tkey->offmask) >> tkey->shift; 158 158 } 159 159 160 160 if (offset % 4) { ··· 169 169 goto bad; 170 170 } 171 171 172 - ptr = (u32 *)(pptr+offset); 172 + ptr = skb_header_pointer(skb, off + offset, 4, &_data); 173 + if (!ptr) 174 + goto bad; 173 175 /* just do it, baby */ 174 176 *ptr = ((*ptr & tkey->mask) ^ tkey->val); 177 + if (ptr == &_data) 178 + skb_store_bits(skb, off + offset, ptr, 4); 175 179 munged++; 176 180 } 177 181
+34 -11
net/sched/cls_u32.c
··· 98 98 { 99 99 struct { 100 100 struct tc_u_knode *knode; 101 - u8 *ptr; 101 + unsigned int off; 102 102 } stack[TC_U32_MAXDEPTH]; 103 103 104 104 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; 105 - u8 *ptr = skb_network_header(skb); 105 + unsigned int off = skb_network_offset(skb); 106 106 struct tc_u_knode *n; 107 107 int sdepth = 0; 108 108 int off2 = 0; ··· 134 134 #endif 135 135 136 136 for (i = n->sel.nkeys; i>0; i--, key++) { 137 + unsigned int toff; 138 + __be32 *data, _data; 137 139 138 - if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { 140 + toff = off + key->off + (off2 & key->offmask); 141 + data = skb_header_pointer(skb, toff, 4, &_data); 142 + if (!data) 143 + goto out; 144 + if ((*data ^ key->val) & key->mask) { 139 145 n = n->next; 140 146 goto next_knode; 141 147 } ··· 180 174 if (sdepth >= TC_U32_MAXDEPTH) 181 175 goto deadloop; 182 176 stack[sdepth].knode = n; 183 - stack[sdepth].ptr = ptr; 177 + stack[sdepth].off = off; 184 178 sdepth++; 185 179 186 180 ht = n->ht_down; 187 181 sel = 0; 188 - if (ht->divisor) 189 - sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); 182 + if (ht->divisor) { 183 + __be32 *data, _data; 190 184 185 + data = skb_header_pointer(skb, off + n->sel.hoff, 4, 186 + &_data); 187 + if (!data) 188 + goto out; 189 + sel = ht->divisor & u32_hash_fold(*data, &n->sel, 190 + n->fshift); 191 + } 191 192 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) 192 193 goto next_ht; 193 194 194 195 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { 195 196 off2 = n->sel.off + 3; 196 - if (n->sel.flags&TC_U32_VAROFFSET) 197 - off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; 197 + if (n->sel.flags & TC_U32_VAROFFSET) { 198 + __be16 *data, _data; 199 + 200 + data = skb_header_pointer(skb, 201 + off + n->sel.offoff, 202 + 2, &_data); 203 + if (!data) 204 + goto out; 205 + off2 += ntohs(n->sel.offmask & *data) >> 206 + n->sel.offshift; 207 + } 198 208 off2 &= ~3; 199 209 } 200 210 if (n->sel.flags&TC_U32_EAT) { 201 - ptr += off2; 211 + off += off2; 202 212 off2 = 0; 203 213 } 204 214 205 - if (ptr < skb_tail_pointer(skb)) 215 + if (off < skb->len) 206 216 goto next_ht; 207 217 } 208 218 ··· 226 204 if (sdepth--) { 227 205 n = stack[sdepth].knode; 228 206 ht = n->ht_up; 229 - ptr = stack[sdepth].ptr; 207 + off = stack[sdepth].off; 230 208 goto check_terminal; 231 209 } 210 + out: 232 211 return -1; 233 212 234 213 deadloop:
+2 -2
net/xfrm/xfrm_output.c
··· 95 95 goto error_nolock; 96 96 } 97 97 98 - dst = dst_pop(dst); 98 + dst = skb_dst_pop(skb); 99 99 if (!dst) { 100 100 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 101 101 err = -EHOSTUNREACH; 102 102 goto error_nolock; 103 103 } 104 - skb_dst_set(skb, dst); 104 + skb_dst_set_noref(skb, dst); 105 105 x = dst->xfrm; 106 106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); 107 107
+1
net/xfrm/xfrm_policy.c
··· 2153 2153 return 0; 2154 2154 } 2155 2155 2156 + skb_dst_force(skb); 2156 2157 dst = skb_dst(skb); 2157 2158 2158 2159 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;