Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (63 commits)
drivers/net/usb/asix.c: Fix pointer cast.
be2net: Bug fix to avoid disabling bottom half during firmware upgrade.
proc_dointvec: write a single value
hso: add support for new products
Phonet: fix potential use-after-free in pep_sock_close()
ath9k: remove VEOL support for ad-hoc
ath9k: change beacon allocation to prefer the first beacon slot
sock.h: fix kernel-doc warning
cls_cgroup: Fix build error when built-in
macvlan: do proper cleanup in macvlan_common_newlink() V2
be2net: Bug fix in init code in probe
net/dccp: expansion of error code size
ath9k: Fix rx of mcast/bcast frames in PS mode with auto sleep
wireless: fix sta_info.h kernel-doc warnings
wireless: fix mac80211.h kernel-doc warnings
iwlwifi: testing the wrong variable in iwl_add_bssid_station()
ath9k_htc: rare leak in ath9k_hif_usb_alloc_tx_urbs()
ath9k_htc: dereferencing before check in hif_usb_tx_cb()
rt2x00: Fix rt2800usb TX descriptor writing.
rt2x00: Fix failed SLEEP->AWAKE and AWAKE->SLEEP transitions.
...

+682 -363
+6
drivers/isdn/capi/kcapi.c
··· 1147 if (ctr->state == CAPI_CTR_DETECTED) 1148 goto reset_unlock_out; 1149 1150 ctr->reset_ctr(ctr); 1151 1152 retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
··· 1147 if (ctr->state == CAPI_CTR_DETECTED) 1148 goto reset_unlock_out; 1149 1150 + if (ctr->reset_ctr == NULL) { 1151 + printk(KERN_DEBUG "kcapi: reset: no reset function\n"); 1152 + retval = -ESRCH; 1153 + goto reset_unlock_out; 1154 + } 1155 + 1156 ctr->reset_ctr(ctr); 1157 1158 retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
+2 -26
drivers/isdn/gigaset/capi.c
··· 922 */ 923 924 /* 925 - * load firmware 926 - */ 927 - static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data) 928 - { 929 - struct cardstate *cs = ctr->driverdata; 930 - 931 - /* AVM specific operation, not needed for Gigaset -- ignore */ 932 - dev_notice(cs->dev, "load_firmware ignored\n"); 933 - 934 - return 0; 935 - } 936 - 937 - /* 938 - * reset (deactivate) controller 939 - */ 940 - static void gigaset_reset_ctr(struct capi_ctr *ctr) 941 - { 942 - struct cardstate *cs = ctr->driverdata; 943 - 944 - /* AVM specific operation, not needed for Gigaset -- ignore */ 945 - dev_notice(cs->dev, "reset_ctr ignored\n"); 946 - } 947 - 948 - /* 949 * register CAPI application 950 */ 951 static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl, ··· 2178 iif->ctr.driverdata = cs; 2179 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name)); 2180 iif->ctr.driver_name = "gigaset"; 2181 - iif->ctr.load_firmware = gigaset_load_firmware; 2182 - iif->ctr.reset_ctr = gigaset_reset_ctr; 2183 iif->ctr.register_appl = gigaset_register_appl; 2184 iif->ctr.release_appl = gigaset_release_appl; 2185 iif->ctr.send_message = gigaset_send_message;
··· 922 */ 923 924 /* 925 * register CAPI application 926 */ 927 static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl, ··· 2202 iif->ctr.driverdata = cs; 2203 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name)); 2204 iif->ctr.driver_name = "gigaset"; 2205 + iif->ctr.load_firmware = NULL; 2206 + iif->ctr.reset_ctr = NULL; 2207 iif->ctr.register_appl = gigaset_register_appl; 2208 iif->ctr.release_appl = gigaset_release_appl; 2209 iif->ctr.send_message = gigaset_send_message;
+2
drivers/net/benet/be.h
··· 283 u8 port_type; 284 u8 transceiver; 285 u8 generation; /* BladeEngine ASIC generation */ 286 287 bool sriov_enabled; 288 u32 vf_if_handle[BE_MAX_VF];
··· 283 u8 port_type; 284 u8 transceiver; 285 u8 generation; /* BladeEngine ASIC generation */ 286 + u32 flash_status; 287 + struct completion flash_compl; 288 289 bool sriov_enabled; 290 u32 vf_if_handle[BE_MAX_VF];
+17 -2
drivers/net/benet/be_cmds.c
··· 59 60 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 61 CQE_STATUS_COMPL_MASK; 62 if (compl_status == MCC_STATUS_SUCCESS) { 63 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { 64 struct be_cmd_resp_get_stats *resp = ··· 1424 int status; 1425 1426 spin_lock_bh(&adapter->mcc_lock); 1427 1428 wrb = wrb_from_mccq(adapter); 1429 if (!wrb) { ··· 1436 1437 be_wrb_hdr_prepare(wrb, cmd->size, false, 1, 1438 OPCODE_COMMON_WRITE_FLASHROM); 1439 1440 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1441 OPCODE_COMMON_WRITE_FLASHROM, cmd->size); ··· 1448 req->params.op_code = cpu_to_le32(flash_opcode); 1449 req->params.data_buf_size = cpu_to_le32(buf_size); 1450 1451 - status = be_mcc_notify_wait(adapter); 1452 1453 err: 1454 - spin_unlock_bh(&adapter->mcc_lock); 1455 return status; 1456 } 1457
··· 59 60 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 61 CQE_STATUS_COMPL_MASK; 62 + 63 + if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) && 64 + (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { 65 + adapter->flash_status = compl_status; 66 + complete(&adapter->flash_compl); 67 + } 68 + 69 if (compl_status == MCC_STATUS_SUCCESS) { 70 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { 71 struct be_cmd_resp_get_stats *resp = ··· 1417 int status; 1418 1419 spin_lock_bh(&adapter->mcc_lock); 1420 + adapter->flash_status = 0; 1421 1422 wrb = wrb_from_mccq(adapter); 1423 if (!wrb) { ··· 1428 1429 be_wrb_hdr_prepare(wrb, cmd->size, false, 1, 1430 OPCODE_COMMON_WRITE_FLASHROM); 1431 + wrb->tag1 = CMD_SUBSYSTEM_COMMON; 1432 1433 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1434 OPCODE_COMMON_WRITE_FLASHROM, cmd->size); ··· 1439 req->params.op_code = cpu_to_le32(flash_opcode); 1440 req->params.data_buf_size = cpu_to_le32(buf_size); 1441 1442 + be_mcc_notify(adapter); 1443 + spin_unlock_bh(&adapter->mcc_lock); 1444 + 1445 + if (!wait_for_completion_timeout(&adapter->flash_compl, 1446 + msecs_to_jiffies(12000))) 1447 + status = -1; 1448 + else 1449 + status = adapter->flash_status; 1450 1451 err: 1452 return status; 1453 } 1454
+7 -4
drivers/net/benet/be_main.c
··· 2319 spin_lock_init(&adapter->mcc_lock); 2320 spin_lock_init(&adapter->mcc_cq_lock); 2321 2322 pci_save_state(adapter->pdev); 2323 return 0; 2324 ··· 2488 status = be_cmd_POST(adapter); 2489 if (status) 2490 goto ctrl_clean; 2491 - 2492 - status = be_cmd_reset_function(adapter); 2493 - if (status) 2494 - goto ctrl_clean; 2495 } 2496 2497 /* tell fw we're ready to fire cmds */ 2498 status = be_cmd_fw_init(adapter); 2499 if (status) 2500 goto ctrl_clean; 2501 2502 status = be_stats_init(adapter); 2503 if (status)
··· 2319 spin_lock_init(&adapter->mcc_lock); 2320 spin_lock_init(&adapter->mcc_cq_lock); 2321 2322 + init_completion(&adapter->flash_compl); 2323 pci_save_state(adapter->pdev); 2324 return 0; 2325 ··· 2487 status = be_cmd_POST(adapter); 2488 if (status) 2489 goto ctrl_clean; 2490 } 2491 2492 /* tell fw we're ready to fire cmds */ 2493 status = be_cmd_fw_init(adapter); 2494 if (status) 2495 goto ctrl_clean; 2496 + 2497 + if (be_physfn(adapter)) { 2498 + status = be_cmd_reset_function(adapter); 2499 + if (status) 2500 + goto ctrl_clean; 2501 + } 2502 2503 status = be_stats_init(adapter); 2504 if (status)
+2
drivers/net/bfin_mac.c
··· 1626 return 0; 1627 1628 out_err_mdiobus_register: 1629 mdiobus_free(miibus); 1630 out_err_alloc: 1631 peripheral_free_list(pin_req); ··· 1639 struct mii_bus *miibus = platform_get_drvdata(pdev); 1640 platform_set_drvdata(pdev, NULL); 1641 mdiobus_unregister(miibus); 1642 mdiobus_free(miibus); 1643 peripheral_free_list(pin_req); 1644 return 0;
··· 1626 return 0; 1627 1628 out_err_mdiobus_register: 1629 + kfree(miibus->irq); 1630 mdiobus_free(miibus); 1631 out_err_alloc: 1632 peripheral_free_list(pin_req); ··· 1638 struct mii_bus *miibus = platform_get_drvdata(pdev); 1639 platform_set_drvdata(pdev, NULL); 1640 mdiobus_unregister(miibus); 1641 + kfree(miibus->irq); 1642 mdiobus_free(miibus); 1643 peripheral_free_list(pin_req); 1644 return 0;
+2
drivers/net/can/sja1000/sja1000.c
··· 599 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 600 CAN_CTRLMODE_BERR_REPORTING; 601 602 if (sizeof_priv) 603 priv->priv = (void *)priv + sizeof(struct sja1000_priv); 604
··· 599 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 600 CAN_CTRLMODE_BERR_REPORTING; 601 602 + spin_lock_init(&priv->cmdreg_lock); 603 + 604 if (sizeof_priv) 605 priv->priv = (void *)priv + sizeof(struct sja1000_priv); 606
+21 -8
drivers/net/enic/enic_main.c
··· 1034 { 1035 struct vic_provinfo *vp; 1036 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1037 - unsigned short *uuid; 1038 char uuid_str[38]; 1039 - static char *uuid_fmt = "%04X%04X-%04X-%04X-%04X-%04X%04X%04X"; 1040 int err; 1041 1042 if (!name) ··· 1059 ETH_ALEN, mac); 1060 1061 if (instance_uuid) { 1062 - uuid = (unsigned short *)instance_uuid; 1063 sprintf(uuid_str, uuid_fmt, 1064 - uuid[0], uuid[1], uuid[2], uuid[3], 1065 - uuid[4], uuid[5], uuid[6], uuid[7]); 1066 vic_provinfo_add_tlv(vp, 1067 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1068 sizeof(uuid_str), uuid_str); 1069 } 1070 1071 if (host_uuid) { 1072 - uuid = (unsigned short *)host_uuid; 1073 sprintf(uuid_str, uuid_fmt, 1074 - uuid[0], uuid[1], uuid[2], uuid[3], 1075 - uuid[4], uuid[5], uuid[6], uuid[7]); 1076 vic_provinfo_add_tlv(vp, 1077 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1078 sizeof(uuid_str), uuid_str); ··· 1131 1132 switch (request) { 1133 case PORT_REQUEST_ASSOCIATE: 1134 1135 if (port[IFLA_PORT_PROFILE]) 1136 name = nla_data(port[IFLA_PORT_PROFILE]);
··· 1034 { 1035 struct vic_provinfo *vp; 1036 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1037 + u8 *uuid; 1038 char uuid_str[38]; 1039 + static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-" 1040 + "%02X%02X-%02X%02X%02X%02X%0X%02X"; 1041 int err; 1042 1043 if (!name) ··· 1058 ETH_ALEN, mac); 1059 1060 if (instance_uuid) { 1061 + uuid = instance_uuid; 1062 sprintf(uuid_str, uuid_fmt, 1063 + uuid[0], uuid[1], uuid[2], uuid[3], 1064 + uuid[4], uuid[5], uuid[6], uuid[7], 1065 + uuid[8], uuid[9], uuid[10], uuid[11], 1066 + uuid[12], uuid[13], uuid[14], uuid[15]); 1067 vic_provinfo_add_tlv(vp, 1068 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1069 sizeof(uuid_str), uuid_str); 1070 } 1071 1072 if (host_uuid) { 1073 + uuid = host_uuid; 1074 sprintf(uuid_str, uuid_fmt, 1075 + uuid[0], uuid[1], uuid[2], uuid[3], 1076 + uuid[4], uuid[5], uuid[6], uuid[7], 1077 + uuid[8], uuid[9], uuid[10], uuid[11], 1078 + uuid[12], uuid[13], uuid[14], uuid[15]); 1079 vic_provinfo_add_tlv(vp, 1080 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1081 sizeof(uuid_str), uuid_str); ··· 1126 1127 switch (request) { 1128 case PORT_REQUEST_ASSOCIATE: 1129 + 1130 + /* If the interface mac addr hasn't been assigned, 1131 + * assign a random mac addr before setting port- 1132 + * profile. 1133 + */ 1134 + 1135 + if (is_zero_ether_addr(netdev->dev_addr)) 1136 + random_ether_addr(netdev->dev_addr); 1137 1138 if (port[IFLA_PORT_PROFILE]) 1139 name = nla_data(port[IFLA_PORT_PROFILE]);
+30 -4
drivers/net/ethoc.c
··· 174 * @iobase: pointer to I/O memory region 175 * @membase: pointer to buffer memory region 176 * @dma_alloc: dma allocated buffer size 177 * @num_tx: number of send buffers 178 * @cur_tx: last send buffer written 179 * @dty_tx: last buffer actually sent ··· 194 void __iomem *iobase; 195 void __iomem *membase; 196 int dma_alloc; 197 198 unsigned int num_tx; 199 unsigned int cur_tx; ··· 945 priv = netdev_priv(netdev); 946 priv->netdev = netdev; 947 priv->dma_alloc = 0; 948 949 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 950 resource_size(mmio)); ··· 1050 ret = register_netdev(netdev); 1051 if (ret < 0) { 1052 dev_err(&netdev->dev, "failed to register interface\n"); 1053 - goto error; 1054 } 1055 1056 goto out; 1057 1058 error: 1059 mdiobus_unregister(priv->mdio); 1060 free_mdio: 1061 kfree(priv->mdio->irq); 1062 mdiobus_free(priv->mdio); 1063 free: 1064 - if (priv->dma_alloc) 1065 - dma_free_coherent(NULL, priv->dma_alloc, priv->membase, 1066 - netdev->mem_start); 1067 free_netdev(netdev); 1068 out: 1069 return ret; ··· 1095 platform_set_drvdata(pdev, NULL); 1096 1097 if (netdev) { 1098 phy_disconnect(priv->phy); 1099 priv->phy = NULL; 1100 ··· 1107 if (priv->dma_alloc) 1108 dma_free_coherent(NULL, priv->dma_alloc, priv->membase, 1109 netdev->mem_start); 1110 unregister_netdev(netdev); 1111 free_netdev(netdev); 1112 }
··· 174 * @iobase: pointer to I/O memory region 175 * @membase: pointer to buffer memory region 176 * @dma_alloc: dma allocated buffer size 177 + * @io_region_size: I/O memory region size 178 * @num_tx: number of send buffers 179 * @cur_tx: last send buffer written 180 * @dty_tx: last buffer actually sent ··· 193 void __iomem *iobase; 194 void __iomem *membase; 195 int dma_alloc; 196 + resource_size_t io_region_size; 197 198 unsigned int num_tx; 199 unsigned int cur_tx; ··· 943 priv = netdev_priv(netdev); 944 priv->netdev = netdev; 945 priv->dma_alloc = 0; 946 + priv->io_region_size = mmio->end - mmio->start + 1; 947 948 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 949 resource_size(mmio)); ··· 1047 ret = register_netdev(netdev); 1048 if (ret < 0) { 1049 dev_err(&netdev->dev, "failed to register interface\n"); 1050 + goto error2; 1051 } 1052 1053 goto out; 1054 1055 + error2: 1056 + netif_napi_del(&priv->napi); 1057 error: 1058 mdiobus_unregister(priv->mdio); 1059 free_mdio: 1060 kfree(priv->mdio->irq); 1061 mdiobus_free(priv->mdio); 1062 free: 1063 + if (priv) { 1064 + if (priv->dma_alloc) 1065 + dma_free_coherent(NULL, priv->dma_alloc, priv->membase, 1066 + netdev->mem_start); 1067 + else if (priv->membase) 1068 + devm_iounmap(&pdev->dev, priv->membase); 1069 + if (priv->iobase) 1070 + devm_iounmap(&pdev->dev, priv->iobase); 1071 + } 1072 + if (mem) 1073 + devm_release_mem_region(&pdev->dev, mem->start, 1074 + mem->end - mem->start + 1); 1075 + if (mmio) 1076 + devm_release_mem_region(&pdev->dev, mmio->start, 1077 + mmio->end - mmio->start + 1); 1078 free_netdev(netdev); 1079 out: 1080 return ret; ··· 1078 platform_set_drvdata(pdev, NULL); 1079 1080 if (netdev) { 1081 + netif_napi_del(&priv->napi); 1082 phy_disconnect(priv->phy); 1083 priv->phy = NULL; 1084 ··· 1089 if (priv->dma_alloc) 1090 dma_free_coherent(NULL, priv->dma_alloc, priv->membase, 1091 netdev->mem_start); 1092 + else { 1093 + devm_iounmap(&pdev->dev, priv->membase); 1094 + devm_release_mem_region(&pdev->dev, netdev->mem_start, 1095 + netdev->mem_end - netdev->mem_start + 1); 1096 + } 1097 + devm_iounmap(&pdev->dev, priv->iobase); 1098 + devm_release_mem_region(&pdev->dev, netdev->base_addr, 1099 + priv->io_region_size); 1100 unregister_netdev(netdev); 1101 free_netdev(netdev); 1102 }
+22
drivers/net/fec.c
··· 41 #include <linux/clk.h> 42 #include <linux/platform_device.h> 43 #include <linux/phy.h> 44 45 #include <asm/cacheflush.h> 46 ··· 183 struct phy_device *phy_dev; 184 int mii_timeout; 185 uint phy_speed; 186 int index; 187 int link; 188 int full_duplex; ··· 1193 /* Set MII speed */ 1194 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1195 1196 /* And last, enable the transmit and receive processing */ 1197 writel(2, fep->hwp + FEC_ECNTRL); 1198 writel(0, fep->hwp + FEC_R_DES_ACTIVE); ··· 1243 fec_probe(struct platform_device *pdev) 1244 { 1245 struct fec_enet_private *fep; 1246 struct net_device *ndev; 1247 int i, irq, ret = 0; 1248 struct resource *r; ··· 1276 } 1277 1278 platform_set_drvdata(pdev, ndev); 1279 1280 /* This device has up to three irqs on some platforms */ 1281 for (i = 0; i < 3; i++) {
··· 41 #include <linux/clk.h> 42 #include <linux/platform_device.h> 43 #include <linux/phy.h> 44 + #include <linux/fec.h> 45 46 #include <asm/cacheflush.h> 47 ··· 182 struct phy_device *phy_dev; 183 int mii_timeout; 184 uint phy_speed; 185 + phy_interface_t phy_interface; 186 int index; 187 int link; 188 int full_duplex; ··· 1191 /* Set MII speed */ 1192 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1193 1194 + #ifdef FEC_MIIGSK_ENR 1195 + if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 1196 + /* disable the gasket and wait */ 1197 + writel(0, fep->hwp + FEC_MIIGSK_ENR); 1198 + while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1199 + udelay(1); 1200 + 1201 + /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ 1202 + writel(1, fep->hwp + FEC_MIIGSK_CFGR); 1203 + 1204 + /* re-enable the gasket */ 1205 + writel(2, fep->hwp + FEC_MIIGSK_ENR); 1206 + } 1207 + #endif 1208 + 1209 /* And last, enable the transmit and receive processing */ 1210 writel(2, fep->hwp + FEC_ECNTRL); 1211 writel(0, fep->hwp + FEC_R_DES_ACTIVE); ··· 1226 fec_probe(struct platform_device *pdev) 1227 { 1228 struct fec_enet_private *fep; 1229 + struct fec_platform_data *pdata; 1230 struct net_device *ndev; 1231 int i, irq, ret = 0; 1232 struct resource *r; ··· 1258 } 1259 1260 platform_set_drvdata(pdev, ndev); 1261 + 1262 + pdata = pdev->dev.platform_data; 1263 + if (pdata) 1264 + fep->phy_interface = pdata->phy; 1265 1266 /* This device has up to three irqs on some platforms */ 1267 for (i = 0; i < 3; i++) {
+2
drivers/net/fec.h
··· 43 #define FEC_R_DES_START 0x180 /* Receive descriptor ring */ 44 #define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ 45 #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ 46 47 #else 48
··· 43 #define FEC_R_DES_START 0x180 /* Receive descriptor ring */ 44 #define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ 45 #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ 46 + #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ 47 + #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ 48 49 #else 50
+6 -2
drivers/net/irda/bfin_sir.c
··· 107 case 57600: 108 case 115200: 109 110 - quot = (port->clk + (8 * speed)) / (16 * speed)\ 111 - - ANOMALY_05000230; 112 113 do { 114 udelay(utime);
··· 107 case 57600: 108 case 115200: 109 110 + /* 111 + * IRDA is not affected by anomaly 05000230, so there is no 112 + * need to tweak the divisor like he UART driver (which will 113 + * slightly speed up the baud rate on us). 114 + */ 115 + quot = (port->clk + (8 * speed)) / (16 * speed); 116 117 do { 118 udelay(utime);
+3
drivers/net/ixgbe/ixgbe.h
··· 360 u32 flags2; 361 #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) 362 #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) 363 /* default to trying for four seconds */ 364 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 365 ··· 408 u16 eeprom_version; 409 410 int node; 411 412 /* SR-IOV */ 413 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
··· 360 u32 flags2; 361 #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) 362 #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) 363 + #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) 364 /* default to trying for four seconds */ 365 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 366 ··· 407 u16 eeprom_version; 408 409 int node; 410 + struct work_struct check_overtemp_task; 411 + u32 interrupt_event; 412 413 /* SR-IOV */ 414 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+1
drivers/net/ixgbe/ixgbe_82598.c
··· 1236 .setup_link = &ixgbe_setup_phy_link_generic, 1237 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1238 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, 1239 }; 1240 1241 struct ixgbe_info ixgbe_82598_info = {
··· 1236 .setup_link = &ixgbe_setup_phy_link_generic, 1237 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1238 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, 1239 + .check_overtemp = &ixgbe_tn_check_overtemp, 1240 }; 1241 1242 struct ixgbe_info ixgbe_82598_info = {
+1
drivers/net/ixgbe/ixgbe_82599.c
··· 2395 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, 2396 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, 2397 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, 2398 }; 2399 2400 struct ixgbe_info ixgbe_82599_info = {
··· 2395 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, 2396 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, 2397 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, 2398 + .check_overtemp = &ixgbe_tn_check_overtemp, 2399 }; 2400 2401 struct ixgbe_info ixgbe_82599_info = {
+69
drivers/net/ixgbe/ixgbe_main.c
··· 108 board_82599 }, 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 110 board_82599 }, 111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 112 board_82599 }, 113 ··· 1620 } 1621 } 1622 1623 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1624 { 1625 struct ixgbe_hw *hw = &adapter->hw; ··· 1733 1734 if (hw->mac.type == ixgbe_mac_82599EB) { 1735 ixgbe_check_sfp_event(adapter, eicr); 1736 1737 /* Handle Flow Director Full threshold interrupt */ 1738 if (eicr & IXGBE_EICR_FLOW_DIR) { ··· 2238 u32 mask; 2239 2240 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 2241 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2242 mask |= IXGBE_EIMS_GPI_SDP1; 2243 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { ··· 2300 ixgbe_check_sfp_event(adapter, eicr); 2301 2302 ixgbe_check_fan_failure(adapter, eicr); 2303 2304 if (napi_schedule_prep(&(q_vector->napi))) { 2305 adapter->tx_ring[0]->total_packets = 0; ··· 3318 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3319 } 3320 3321 /* Enable fan failure interrupt if media type is copper */ 3322 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 3323 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); ··· 3725 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3726 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3727 cancel_work_sync(&adapter->fdir_reinit_task); 3728 3729 /* disable transmits in the hardware now that interrupts are off */ 3730 for (i = 0; i < adapter->num_tx_queues; i++) { ··· 4708 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 4709 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 4710 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 4711 if (dev->features & NETIF_F_NTUPLE) { 4712 /* Flow Director perfect filter enabled */ 4713 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ··· 6626 } 6627 6628 /* reset_hw fills in the perm_addr as well */ 6629 err = hw->mac.ops.reset_hw(hw); 6630 if (err == IXGBE_ERR_SFP_NOT_PRESENT && 6631 hw->mac.type == ixgbe_mac_82598EB) { 6632 /* ··· 6797 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 6798 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); 6799 6800 #ifdef CONFIG_IXGBE_DCA 6801 if (dca_add_requester(&pdev->dev) == 0) { 6802 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
··· 108 board_82599 }, 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 110 board_82599 }, 111 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 112 + board_82599 }, 113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 114 board_82599 }, 115 ··· 1618 } 1619 } 1620 1621 + /** 1622 + * ixgbe_check_overtemp_task - worker thread to check over tempurature 1623 + * @work: pointer to work_struct containing our data 1624 + **/ 1625 + static void ixgbe_check_overtemp_task(struct work_struct *work) 1626 + { 1627 + struct ixgbe_adapter *adapter = container_of(work, 1628 + struct ixgbe_adapter, 1629 + check_overtemp_task); 1630 + struct ixgbe_hw *hw = &adapter->hw; 1631 + u32 eicr = adapter->interrupt_event; 1632 + 1633 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 1634 + switch (hw->device_id) { 1635 + case IXGBE_DEV_ID_82599_T3_LOM: { 1636 + u32 autoneg; 1637 + bool link_up = false; 1638 + 1639 + if (hw->mac.ops.check_link) 1640 + hw->mac.ops.check_link(hw, &autoneg, &link_up, false); 1641 + 1642 + if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || 1643 + (eicr & IXGBE_EICR_LSC)) 1644 + /* Check if this is due to overtemp */ 1645 + if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) 1646 + break; 1647 + } 1648 + return; 1649 + default: 1650 + if (!(eicr & IXGBE_EICR_GPI_SDP0)) 1651 + return; 1652 + break; 1653 + } 1654 + DPRINTK(DRV, ERR, "Network adapter has been stopped because it " 1655 + "has over heated. Restart the computer. If the problem " 1656 + "persists, power off the system and replace the " 1657 + "adapter\n"); 1658 + /* write to clear the interrupt */ 1659 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); 1660 + } 1661 + } 1662 + 1663 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1664 { 1665 struct ixgbe_hw *hw = &adapter->hw; ··· 1689 1690 if (hw->mac.type == ixgbe_mac_82599EB) { 1691 ixgbe_check_sfp_event(adapter, eicr); 1692 + adapter->interrupt_event = eicr; 1693 + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && 1694 + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) 1695 + schedule_work(&adapter->check_overtemp_task); 1696 1697 /* Handle Flow Director Full threshold interrupt */ 1698 if (eicr & IXGBE_EICR_FLOW_DIR) { ··· 2190 u32 mask; 2191 2192 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 2193 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 2194 + mask |= IXGBE_EIMS_GPI_SDP0; 2195 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2196 mask |= IXGBE_EIMS_GPI_SDP1; 2197 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { ··· 2250 ixgbe_check_sfp_event(adapter, eicr); 2251 2252 ixgbe_check_fan_failure(adapter, eicr); 2253 + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && 2254 + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) 2255 + schedule_work(&adapter->check_overtemp_task); 2256 2257 if (napi_schedule_prep(&(q_vector->napi))) { 2258 adapter->tx_ring[0]->total_packets = 0; ··· 3265 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3266 } 3267 3268 + /* Enable Thermal over heat sensor interrupt */ 3269 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 3270 + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3271 + gpie |= IXGBE_SDP0_GPIEN; 3272 + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3273 + } 3274 + 3275 /* Enable fan failure interrupt if media type is copper */ 3276 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 3277 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); ··· 3665 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3666 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3667 cancel_work_sync(&adapter->fdir_reinit_task); 3668 + 3669 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 3670 + cancel_work_sync(&adapter->check_overtemp_task); 3671 3672 /* disable transmits in the hardware now that interrupts are off */ 3673 for (i = 0; i < adapter->num_tx_queues; i++) { ··· 4645 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 4646 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 4647 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 4648 + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 4649 + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 4650 if (dev->features & NETIF_F_NTUPLE) { 4651 /* Flow Director perfect filter enabled */ 4652 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ··· 6561 } 6562 6563 /* reset_hw fills in the perm_addr as well */ 6564 + hw->phy.reset_if_overtemp = true; 6565 err = hw->mac.ops.reset_hw(hw); 6566 + hw->phy.reset_if_overtemp = false; 6567 if (err == IXGBE_ERR_SFP_NOT_PRESENT && 6568 hw->mac.type == ixgbe_mac_82598EB) { 6569 /* ··· 6730 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 6731 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); 6732 6733 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 6734 + INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task); 6735 #ifdef CONFIG_IXGBE_DCA 6736 if (dca_add_requester(&pdev->dev) == 0) { 6737 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+30
drivers/net/ixgbe/ixgbe_phy.c
··· 135 **/ 136 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 137 { 138 /* 139 * Perform soft PHY reset to the PHY_XS. 140 * This will cause a soft reset to the PHY ··· 1350 return status; 1351 } 1352
··· 135 **/ 136 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 137 { 138 + /* Don't reset PHY if it's shut down due to overtemp. */ 139 + if (!hw->phy.reset_if_overtemp && 140 + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 141 + return 0; 142 + 143 /* 144 * Perform soft PHY reset to the PHY_XS. 145 * This will cause a soft reset to the PHY ··· 1345 return status; 1346 } 1347 1348 + /** 1349 + * ixgbe_tn_check_overtemp - Checks if an overtemp occured. 1350 + * @hw: pointer to hardware structure 1351 + * 1352 + * Checks if the LASI temp alarm status was triggered due to overtemp 1353 + **/ 1354 + s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) 1355 + { 1356 + s32 status = 0; 1357 + u16 phy_data = 0; 1358 + 1359 + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) 1360 + goto out; 1361 + 1362 + /* Check that the LASI temp alarm status was triggered */ 1363 + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, 1364 + MDIO_MMD_PMAPMD, &phy_data); 1365 + 1366 + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) 1367 + goto out; 1368 + 1369 + status = IXGBE_ERR_OVERTEMP; 1370 + out: 1371 + return status; 1372 + }
+3
drivers/net/ixgbe/ixgbe_phy.h
··· 80 #define IXGBE_I2C_T_SU_STO 4 81 #define IXGBE_I2C_T_BUF 5 82 83 84 s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); 85 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); ··· 108 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 109 u16 *list_offset, 110 u16 *data_offset); 111 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 112 u8 dev_addr, u8 *data); 113 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
··· 80 #define IXGBE_I2C_T_SU_STO 4 81 #define IXGBE_I2C_T_BUF 5 82 83 + #define IXGBE_TN_LASI_STATUS_REG 0x9005 84 + #define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 85 86 s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); 87 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); ··· 106 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 107 u16 *list_offset, 108 u16 *data_offset); 109 + s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); 110 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 111 u8 dev_addr, u8 *data); 112 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+4
drivers/net/ixgbe/ixgbe_type.h
··· 51 #define IXGBE_DEV_ID_82599_KX4 0x10F7 52 #define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 53 #define IXGBE_DEV_ID_82599_KR 0x1517 54 #define IXGBE_DEV_ID_82599_CX4 0x10F9 55 #define IXGBE_DEV_ID_82599_SFP 0x10FB 56 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 ··· 2471 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); 2472 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); 2473 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); 2474 }; 2475 2476 struct ixgbe_eeprom_info { ··· 2520 enum ixgbe_smart_speed smart_speed; 2521 bool smart_speed_active; 2522 bool multispeed_fiber; 2523 }; 2524 2525 #include "ixgbe_mbx.h" ··· 2608 #define IXGBE_ERR_FDIR_REINIT_FAILED -23 2609 #define IXGBE_ERR_EEPROM_VERSION -24 2610 #define IXGBE_ERR_NO_SPACE -25 2611 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2612 2613 #endif /* _IXGBE_TYPE_H_ */
··· 51 #define IXGBE_DEV_ID_82599_KX4 0x10F7 52 #define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 53 #define IXGBE_DEV_ID_82599_KR 0x1517 54 + #define IXGBE_DEV_ID_82599_T3_LOM 0x151C 55 #define IXGBE_DEV_ID_82599_CX4 0x10F9 56 #define IXGBE_DEV_ID_82599_SFP 0x10FB 57 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 ··· 2470 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); 2471 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); 2472 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); 2473 + s32 (*check_overtemp)(struct ixgbe_hw *); 2474 }; 2475 2476 struct ixgbe_eeprom_info { ··· 2518 enum ixgbe_smart_speed smart_speed; 2519 bool smart_speed_active; 2520 bool multispeed_fiber; 2521 + bool reset_if_overtemp; 2522 }; 2523 2524 #include "ixgbe_mbx.h" ··· 2605 #define IXGBE_ERR_FDIR_REINIT_FAILED -23 2606 #define IXGBE_ERR_EEPROM_VERSION -24 2607 #define IXGBE_ERR_NO_SPACE -25 2608 + #define IXGBE_ERR_OVERTEMP -26 2609 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2610 2611 #endif /* _IXGBE_TYPE_H_ */
+8 -1
drivers/net/macvlan.c
··· 634 635 err = register_netdevice(dev); 636 if (err < 0) 637 - return err; 638 639 list_add_tail(&vlan->list, &port->vlans); 640 netif_stacked_transfer_operstate(lowerdev, dev); 641 return 0; 642 } 643 EXPORT_SYMBOL_GPL(macvlan_common_newlink); 644
··· 634 635 err = register_netdevice(dev); 636 if (err < 0) 637 + goto destroy_port; 638 639 list_add_tail(&vlan->list, &port->vlans); 640 netif_stacked_transfer_operstate(lowerdev, dev); 641 + 642 return 0; 643 + 644 + destroy_port: 645 + if (list_empty(&port->vlans)) 646 + macvlan_port_destroy(lowerdev); 647 + 648 + return err; 649 } 650 EXPORT_SYMBOL_GPL(macvlan_common_newlink); 651
+1
drivers/net/pppoe.c
··· 289 struct pppoe_net *pn; 290 int i; 291 292 write_lock_bh(&pn->hash_lock); 293 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 294 struct pppox_sock *po = pn->hash_table[i];
··· 289 struct pppoe_net *pn; 290 int i; 291 292 + pn = pppoe_pernet(dev_net(dev)); 293 write_lock_bh(&pn->hash_lock); 294 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 295 struct pppox_sock *po = pn->hash_table[i];
+3
drivers/net/sh_eth.c
··· 1294 /* remove mdio bus info from net_device */ 1295 dev_set_drvdata(&ndev->dev, NULL); 1296 1297 /* free bitbang info */ 1298 free_mdio_bitbang(bus); 1299
··· 1294 /* remove mdio bus info from net_device */ 1295 dev_set_drvdata(&ndev->dev, NULL); 1296 1297 + /* free interrupts memory */ 1298 + kfree(bus->irq); 1299 + 1300 /* free bitbang info */ 1301 free_mdio_bitbang(bus); 1302
+2
drivers/net/tun.c
··· 526 struct sk_buff *skb; 527 int err; 528 529 /* Under a page? Don't bother with paged skb. */ 530 if (prepad + len < PAGE_SIZE || !linear) 531 linear = len;
··· 526 struct sk_buff *skb; 527 int err; 528 529 + sock_update_classid(sk); 530 + 531 /* Under a page? Don't bother with paged skb. */ 532 if (prepad + len < PAGE_SIZE || !linear) 533 linear = len;
+2 -2
drivers/net/usb/asix.c
··· 322 size = (u16) (header & 0x0000ffff); 323 324 if ((skb->len) - ((size + 1) & 0xfffe) == 0) { 325 - u8 alignment = (u32)skb->data & 0x3; 326 if (alignment != 0x2) { 327 /* 328 * not 16bit aligned so use the room provided by ··· 351 } 352 ax_skb = skb_clone(skb, GFP_ATOMIC); 353 if (ax_skb) { 354 - u8 alignment = (u32)packet & 0x3; 355 ax_skb->len = size; 356 357 if (alignment != 0x2) {
··· 322 size = (u16) (header & 0x0000ffff); 323 324 if ((skb->len) - ((size + 1) & 0xfffe) == 0) { 325 + u8 alignment = (unsigned long)skb->data & 0x3; 326 if (alignment != 0x2) { 327 /* 328 * not 16bit aligned so use the room provided by ··· 351 } 352 ax_skb = skb_clone(skb, GFP_ATOMIC); 353 if (ax_skb) { 354 + u8 alignment = (unsigned long)packet & 0x3; 355 ax_skb->len = size; 356 357 if (alignment != 0x2) {
+3
drivers/net/usb/hso.c
··· 475 {USB_DEVICE(0x0af0, 0x8302)}, 476 {USB_DEVICE(0x0af0, 0x8304)}, 477 {USB_DEVICE(0x0af0, 0x8400)}, 478 {USB_DEVICE(0x0af0, 0xd035)}, 479 {USB_DEVICE(0x0af0, 0xd055)}, 480 {USB_DEVICE(0x0af0, 0xd155)},
··· 475 {USB_DEVICE(0x0af0, 0x8302)}, 476 {USB_DEVICE(0x0af0, 0x8304)}, 477 {USB_DEVICE(0x0af0, 0x8400)}, 478 + {USB_DEVICE(0x0af0, 0x8600)}, 479 + {USB_DEVICE(0x0af0, 0x8800)}, 480 + {USB_DEVICE(0x0af0, 0x8900)}, 481 {USB_DEVICE(0x0af0, 0xd035)}, 482 {USB_DEVICE(0x0af0, 0xd055)}, 483 {USB_DEVICE(0x0af0, 0xd155)},
+2 -2
drivers/net/wimax/i2400m/rx.c
··· 1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; 1028 1029 spin_lock_irqsave(&i2400m->rx_lock, flags); 1030 - roq = &i2400m->rx_roq[ro_cin]; 1031 - if (roq == NULL) { 1032 kfree_skb(skb); /* rx_roq is already destroyed */ 1033 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1034 goto error; 1035 } 1036 kref_get(&i2400m->rx_roq_refcount); 1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1038
··· 1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; 1028 1029 spin_lock_irqsave(&i2400m->rx_lock, flags); 1030 + if (i2400m->rx_roq == NULL) { 1031 kfree_skb(skb); /* rx_roq is already destroyed */ 1032 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1033 goto error; 1034 } 1035 + roq = &i2400m->rx_roq[ro_cin]; 1036 kref_get(&i2400m->rx_roq_refcount); 1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1038
+4 -3
drivers/net/wireless/ath/ath5k/base.c
··· 1214 struct ath5k_hw *ah = sc->ah; 1215 struct sk_buff *skb = bf->skb; 1216 struct ath5k_desc *ds; 1217 1218 if (!skb) { 1219 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); ··· 1241 ds = bf->desc; 1242 ds->ds_link = bf->daddr; /* link to self */ 1243 ds->ds_data = bf->skbaddr; 1244 - ah->ah_setup_rx_desc(ah, ds, 1245 - skb_tailroom(skb), /* buffer size */ 1246 - 0); 1247 1248 if (sc->rxlink != NULL) 1249 *sc->rxlink = bf->daddr;
··· 1214 struct ath5k_hw *ah = sc->ah; 1215 struct sk_buff *skb = bf->skb; 1216 struct ath5k_desc *ds; 1217 + int ret; 1218 1219 if (!skb) { 1220 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); ··· 1240 ds = bf->desc; 1241 ds->ds_link = bf->daddr; /* link to self */ 1242 ds->ds_data = bf->skbaddr; 1243 + ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); 1244 + if (ret) 1245 + return ret; 1246 1247 if (sc->rxlink != NULL) 1248 *sc->rxlink = bf->daddr;
+13 -62
drivers/net/wireless/ath/ath9k/beacon.c
··· 76 ds = bf->bf_desc; 77 flags = ATH9K_TXDESC_NOACK; 78 79 - if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || 80 - (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) && 81 - (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 82 - ds->ds_link = bf->bf_daddr; /* self-linked */ 83 - flags |= ATH9K_TXDESC_VEOL; 84 - /* Let hardware handle antenna switching. */ 85 - antenna = 0; 86 - } else { 87 - ds->ds_link = 0; 88 - /* 89 - * Switch antenna every beacon. 90 - * Should only switch every beacon period, not for every SWBA 91 - * XXX assumes two antennae 92 - */ 93 - antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1); 94 - } 95 96 sband = &sc->sbands[common->hw->conf.channel->band]; 97 rate = sband->bitrates[rateidx].hw_value; ··· 206 return bf; 207 } 208 209 - /* 210 - * Startup beacon transmission for adhoc mode when they are sent entirely 211 - * by the hardware using the self-linked descriptor + veol trick. 212 - */ 213 - static void ath_beacon_start_adhoc(struct ath_softc *sc, 214 - struct ieee80211_vif *vif) 215 - { 216 - struct ath_hw *ah = sc->sc_ah; 217 - struct ath_common *common = ath9k_hw_common(ah); 218 - struct ath_buf *bf; 219 - struct ath_vif *avp; 220 - struct sk_buff *skb; 221 - 222 - avp = (void *)vif->drv_priv; 223 - 224 - if (avp->av_bcbuf == NULL) 225 - return; 226 - 227 - bf = avp->av_bcbuf; 228 - skb = bf->bf_mpdu; 229 - 230 - ath_beacon_setup(sc, avp, bf, 0); 231 - 232 - /* NB: caller is known to have already stopped tx dma */ 233 - ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr); 234 - ath9k_hw_txstart(ah, sc->beacon.beaconq); 235 - ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n", 236 - sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc); 237 - } 238 - 239 int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) 240 { 241 struct ath_softc *sc = aphy->sc; ··· 226 list_del(&avp->av_bcbuf->list); 227 228 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || 229 - !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 230 int slot; 231 /* 232 * Assign the vif to a beacon xmit slot. As ··· 236 avp->av_bslot = 0; 237 for (slot = 0; slot < ATH_BCBUF; slot++) 238 if (sc->beacon.bslot[slot] == NULL) { 239 - /* 240 - * XXX hack, space out slots to better 241 - * deal with misses 242 - */ 243 - if (slot+1 < ATH_BCBUF && 244 - sc->beacon.bslot[slot+1] == NULL) { 245 - avp->av_bslot = slot+1; 246 - break; 247 - } 248 avp->av_bslot = slot; 249 /* NB: keep looking for a double slot */ 250 } 251 BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL); 252 sc->beacon.bslot[avp->av_bslot] = vif; ··· 677 * self-linked tx descriptor and let the hardware deal with things. 678 */ 679 intval |= ATH9K_BEACON_ENA; 680 - if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) 681 - ah->imask |= ATH9K_INT_SWBA; 682 683 ath_beaconq_config(sc); 684 ··· 687 ath9k_beacon_init(sc, nexttbtt, intval); 688 sc->beacon.bmisscnt = 0; 689 ath9k_hw_set_interrupts(ah, ah->imask); 690 - 691 - /* FIXME: Handle properly when vif is NULL */ 692 - if (vif && ah->caps.hw_caps & ATH9K_HW_CAP_VEOL) 693 - ath_beacon_start_adhoc(sc, vif); 694 } 695 696 void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
··· 76 ds = bf->bf_desc; 77 flags = ATH9K_TXDESC_NOACK; 78 79 + ds->ds_link = 0; 80 + /* 81 + * Switch antenna every beacon. 82 + * Should only switch every beacon period, not for every SWBA 83 + * XXX assumes two antennae 84 + */ 85 + antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1); 86 87 sband = &sc->sbands[common->hw->conf.channel->band]; 88 rate = sband->bitrates[rateidx].hw_value; ··· 215 return bf; 216 } 217 218 int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) 219 { 220 struct ath_softc *sc = aphy->sc; ··· 265 list_del(&avp->av_bcbuf->list); 266 267 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || 268 + sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC || 269 + sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) { 270 int slot; 271 /* 272 * Assign the vif to a beacon xmit slot. As ··· 274 avp->av_bslot = 0; 275 for (slot = 0; slot < ATH_BCBUF; slot++) 276 if (sc->beacon.bslot[slot] == NULL) { 277 avp->av_bslot = slot; 278 + 279 /* NB: keep looking for a double slot */ 280 + if (slot == 0 || !sc->beacon.bslot[slot-1]) 281 + break; 282 } 283 BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL); 284 sc->beacon.bslot[avp->av_bslot] = vif; ··· 721 * self-linked tx descriptor and let the hardware deal with things. 722 */ 723 intval |= ATH9K_BEACON_ENA; 724 + ah->imask |= ATH9K_INT_SWBA; 725 726 ath_beaconq_config(sc); 727 ··· 732 ath9k_beacon_init(sc, nexttbtt, intval); 733 sc->beacon.bmisscnt = 0; 734 ath9k_hw_set_interrupts(ah, ah->imask); 735 } 736 737 void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
+8 -2
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 107 static void hif_usb_tx_cb(struct urb *urb) 108 { 109 struct tx_buf *tx_buf = (struct tx_buf *) urb->context; 110 - struct hif_device_usb *hif_dev = tx_buf->hif_dev; 111 struct sk_buff *skb; 112 113 - if (!hif_dev || !tx_buf) 114 return; 115 116 switch (urb->status) { 117 case 0: ··· 609 610 return 0; 611 err: 612 ath9k_hif_usb_dealloc_tx_urbs(hif_dev); 613 return -ENOMEM; 614 }
··· 107 static void hif_usb_tx_cb(struct urb *urb) 108 { 109 struct tx_buf *tx_buf = (struct tx_buf *) urb->context; 110 + struct hif_device_usb *hif_dev; 111 struct sk_buff *skb; 112 113 + if (!tx_buf || !tx_buf->hif_dev) 114 return; 115 + 116 + hif_dev = tx_buf->hif_dev; 117 118 switch (urb->status) { 119 case 0: ··· 607 608 return 0; 609 err: 610 + if (tx_buf) { 611 + kfree(tx_buf->buf); 612 + kfree(tx_buf); 613 + } 614 ath9k_hif_usb_dealloc_tx_urbs(hif_dev); 615 return -ENOMEM; 616 }
+1
drivers/net/wireless/ath/ath9k/htc.h
··· 23 #include <linux/skbuff.h> 24 #include <linux/netdevice.h> 25 #include <linux/leds.h> 26 #include <net/mac80211.h> 27 28 #include "common.h"
··· 23 #include <linux/skbuff.h> 24 #include <linux/netdevice.h> 25 #include <linux/leds.h> 26 + #include <linux/slab.h> 27 #include <net/mac80211.h> 28 29 #include "common.h"
-1
drivers/net/wireless/ath/ath9k/pci.c
··· 28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ 29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 31 - { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ 32 { 0 } 33 }; 34
··· 28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ 29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 31 { 0 } 32 }; 33
+12 -5
drivers/net/wireless/ath/ath9k/recv.c
··· 19 20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21 22 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 23 struct ieee80211_hdr *hdr) 24 { ··· 622 hdr = (struct ieee80211_hdr *)skb->data; 623 624 /* Process Beacon and CAB receive in PS state */ 625 - if ((sc->ps_flags & PS_WAIT_FOR_BEACON) && 626 - ieee80211_is_beacon(hdr->frame_control)) 627 ath_rx_ps_beacon(sc, skb); 628 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 629 (ieee80211_is_data(hdr->frame_control) || ··· 938 sc->rx.rxotherant = 0; 939 } 940 941 - if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON | 942 - PS_WAIT_FOR_CAB | 943 - PS_WAIT_FOR_PSPOLL_DATA))) 944 ath_rx_ps(sc, skb); 945 946 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
··· 19 20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21 22 + static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 23 + { 24 + return sc->ps_enabled && 25 + (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 26 + } 27 + 28 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 29 struct ieee80211_hdr *hdr) 30 { ··· 616 hdr = (struct ieee80211_hdr *)skb->data; 617 618 /* Process Beacon and CAB receive in PS state */ 619 + if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 620 + && ieee80211_is_beacon(hdr->frame_control)) 621 ath_rx_ps_beacon(sc, skb); 622 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 623 (ieee80211_is_data(hdr->frame_control) || ··· 932 sc->rx.rxotherant = 0; 933 } 934 935 + if (unlikely(ath9k_check_auto_sleep(sc) || 936 + (sc->ps_flags & (PS_WAIT_FOR_BEACON | 937 + PS_WAIT_FOR_CAB | 938 + PS_WAIT_FOR_PSPOLL_DATA)))) 939 ath_rx_ps(sc, skb); 940 941 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
+1
drivers/net/wireless/iwlwifi/iwl-agn-ict.c
··· 30 #include <linux/module.h> 31 #include <linux/etherdevice.h> 32 #include <linux/sched.h> 33 #include <net/mac80211.h> 34 35 #include "iwl-dev.h"
··· 30 #include <linux/module.h> 31 #include <linux/etherdevice.h> 32 #include <linux/sched.h> 33 + #include <linux/gfp.h> 34 #include <net/mac80211.h> 35 36 #include "iwl-dev.h"
+18 -3
drivers/net/wireless/iwlwifi/iwl-scan.c
··· 376 377 mutex_lock(&priv->mutex); 378 379 if (!iwl_is_ready_rf(priv)) { 380 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); 381 goto unlock; ··· 502 { 503 struct iwl_priv *priv = 504 container_of(work, struct iwl_priv, scan_completed); 505 506 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); 507 508 cancel_delayed_work(&priv->scan_check); 509 510 - if (!priv->is_internal_short_scan) 511 - ieee80211_scan_completed(priv->hw, false); 512 - else { 513 priv->is_internal_short_scan = false; 514 IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); 515 } 516 517 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 518 return;
··· 376 377 mutex_lock(&priv->mutex); 378 379 + if (priv->is_internal_short_scan == true) { 380 + IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); 381 + goto unlock; 382 + } 383 + 384 if (!iwl_is_ready_rf(priv)) { 385 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); 386 goto unlock; ··· 497 { 498 struct iwl_priv *priv = 499 container_of(work, struct iwl_priv, scan_completed); 500 + bool internal = false; 501 502 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); 503 504 cancel_delayed_work(&priv->scan_check); 505 506 + mutex_lock(&priv->mutex); 507 + if (priv->is_internal_short_scan) { 508 priv->is_internal_short_scan = false; 509 IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); 510 + internal = true; 511 } 512 + mutex_unlock(&priv->mutex); 513 + 514 + /* 515 + * Do not hold mutex here since this will cause mac80211 to call 516 + * into driver again into functions that will attempt to take 517 + * mutex. 518 + */ 519 + if (!internal) 520 + ieee80211_scan_completed(priv->hw, false); 521 522 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 523 return;
+1 -1
drivers/net/wireless/iwlwifi/iwl-sta.c
··· 431 struct iwl_link_quality_cmd *link_cmd; 432 unsigned long flags; 433 434 - if (*sta_id_r) 435 *sta_id_r = IWL_INVALID_STATION; 436 437 ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
··· 431 struct iwl_link_quality_cmd *link_cmd; 432 unsigned long flags; 433 434 + if (sta_id_r) 435 *sta_id_r = IWL_INVALID_STATION; 436 437 ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
+10 -6
drivers/net/wireless/rndis_wlan.c
··· 2572 2573 static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) 2574 { 2575 - union iwreq_data evt; 2576 2577 netif_carrier_off(usbdev->net); 2578 - 2579 - evt.data.flags = 0; 2580 - evt.data.length = 0; 2581 - memset(evt.ap_addr.sa_data, 0, ETH_ALEN); 2582 - wireless_send_event(usbdev->net, SIOCGIWAP, &evt, NULL); 2583 } 2584 2585 static void rndis_wlan_worker(struct work_struct *work)
··· 2572 2573 static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) 2574 { 2575 + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2576 + 2577 + if (priv->connected) { 2578 + priv->connected = false; 2579 + memset(priv->bssid, 0, ETH_ALEN); 2580 + 2581 + deauthenticate(usbdev); 2582 + 2583 + cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL); 2584 + } 2585 2586 netif_carrier_off(usbdev->net); 2587 } 2588 2589 static void rndis_wlan_worker(struct work_struct *work)
+5 -4
drivers/net/wireless/rt2x00/rt2400pci.c
··· 926 static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, 927 enum dev_state state) 928 { 929 - u32 reg; 930 unsigned int i; 931 char put_to_sleep; 932 char bbp_state; ··· 947 * device has entered the correct state. 948 */ 949 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 950 - rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg); 951 - bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); 952 - rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); 953 if (bbp_state == state && rf_state == state) 954 return 0; 955 msleep(10); 956 } 957
··· 926 static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, 927 enum dev_state state) 928 { 929 + u32 reg, reg2; 930 unsigned int i; 931 char put_to_sleep; 932 char bbp_state; ··· 947 * device has entered the correct state. 948 */ 949 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 950 + rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2); 951 + bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); 952 + rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); 953 if (bbp_state == state && rf_state == state) 954 return 0; 955 + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); 956 msleep(10); 957 } 958
+5 -4
drivers/net/wireless/rt2x00/rt2500pci.c
··· 1084 static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, 1085 enum dev_state state) 1086 { 1087 - u32 reg; 1088 unsigned int i; 1089 char put_to_sleep; 1090 char bbp_state; ··· 1105 * device has entered the correct state. 1106 */ 1107 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1108 - rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg); 1109 - bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); 1110 - rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); 1111 if (bbp_state == state && rf_state == state) 1112 return 0; 1113 msleep(10); 1114 } 1115
··· 1084 static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, 1085 enum dev_state state) 1086 { 1087 + u32 reg, reg2; 1088 unsigned int i; 1089 char put_to_sleep; 1090 char bbp_state; ··· 1105 * device has entered the correct state. 1106 */ 1107 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1108 + rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2); 1109 + bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); 1110 + rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); 1111 if (bbp_state == state && rf_state == state) 1112 return 0; 1113 + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); 1114 msleep(10); 1115 } 1116
+1 -1
drivers/net/wireless/rt2x00/rt2800usb.c
··· 413 */ 414 rt2x00_desc_read(txi, 0, &word); 415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 416 - skb->len + TXWI_DESC_SIZE); 417 rt2x00_set_field32(&word, TXINFO_W0_WIV, 418 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 419 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
··· 413 */ 414 rt2x00_desc_read(txi, 0, &word); 415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 416 + skb->len - TXINFO_DESC_SIZE); 417 rt2x00_set_field32(&word, TXINFO_W0_WIV, 418 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 419 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
+1 -1
drivers/net/wireless/rt2x00/rt2x00pci.c
··· 206 /* 207 * Free irq line. 208 */ 209 - free_irq(to_pci_dev(rt2x00dev->dev)->irq, rt2x00dev); 210 211 /* 212 * Free DMA
··· 206 /* 207 * Free irq line. 208 */ 209 + free_irq(rt2x00dev->irq, rt2x00dev); 210 211 /* 212 * Free DMA
+4 -3
drivers/net/wireless/rt2x00/rt61pci.c
··· 1689 1690 static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1691 { 1692 - u32 reg; 1693 unsigned int i; 1694 char put_to_sleep; 1695 ··· 1706 * device has entered the correct state. 1707 */ 1708 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1709 - rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg); 1710 - state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); 1711 if (state == !put_to_sleep) 1712 return 0; 1713 msleep(10); 1714 } 1715
··· 1689 1690 static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1691 { 1692 + u32 reg, reg2; 1693 unsigned int i; 1694 char put_to_sleep; 1695 ··· 1706 * device has entered the correct state. 1707 */ 1708 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1709 + rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg2); 1710 + state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); 1711 if (state == !put_to_sleep) 1712 return 0; 1713 + rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg); 1714 msleep(10); 1715 } 1716
+4 -3
drivers/net/wireless/rt2x00/rt73usb.c
··· 1366 1367 static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1368 { 1369 - u32 reg; 1370 unsigned int i; 1371 char put_to_sleep; 1372 ··· 1383 * device has entered the correct state. 1384 */ 1385 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1386 - rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg); 1387 - state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); 1388 if (state == !put_to_sleep) 1389 return 0; 1390 msleep(10); 1391 } 1392
··· 1366 1367 static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1368 { 1369 + u32 reg, reg2; 1370 unsigned int i; 1371 char put_to_sleep; 1372 ··· 1383 * device has entered the correct state. 1384 */ 1385 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1386 + rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg2); 1387 + state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); 1388 if (state == !put_to_sleep) 1389 return 0; 1390 + rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg); 1391 msleep(10); 1392 } 1393
+2
drivers/net/wireless/wl12xx/wl1271_rx.c
··· 113 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, 114 beacon ? "beacon" : ""); 115 116 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 117 ieee80211_rx_ni(wl->hw, skb); 118 }
··· 113 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, 114 beacon ? "beacon" : ""); 115 116 + skb_trim(skb, skb->len - desc->pad_len); 117 + 118 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 119 ieee80211_rx_ni(wl->hw, skb); 120 }
+21
include/linux/fec.h
···
··· 1 + /* include/linux/fec.h 2 + * 3 + * Copyright (c) 2009 Orex Computed Radiography 4 + * Baruch Siach <baruch@tkos.co.il> 5 + * 6 + * Header file for the FEC platform data 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + #ifndef __LINUX_FEC_H__ 13 + #define __LINUX_FEC_H__ 14 + 15 + #include <linux/phy.h> 16 + 17 + struct fec_platform_data { 18 + phy_interface_t phy; 19 + }; 20 + 21 + #endif
+12 -4
include/linux/netdevice.h
··· 1407 struct softnet_data *rps_ipi_next; 1408 unsigned int cpu; 1409 unsigned int input_queue_head; 1410 #endif 1411 unsigned dropped; 1412 struct sk_buff_head input_pkt_queue; 1413 struct napi_struct backlog; 1414 }; 1415 1416 - static inline void input_queue_head_add(struct softnet_data *sd, 1417 - unsigned int len) 1418 { 1419 #ifdef CONFIG_RPS 1420 - sd->input_queue_head += len; 1421 #endif 1422 } 1423 ··· 2334 #define netif_vdbg(priv, type, dev, format, args...) \ 2335 ({ \ 2336 if (0) \ 2337 - netif_printk(KERN_DEBUG, dev, format, ##args); \ 2338 0; \ 2339 }) 2340 #endif
··· 1407 struct softnet_data *rps_ipi_next; 1408 unsigned int cpu; 1409 unsigned int input_queue_head; 1410 + unsigned int input_queue_tail; 1411 #endif 1412 unsigned dropped; 1413 struct sk_buff_head input_pkt_queue; 1414 struct napi_struct backlog; 1415 }; 1416 1417 + static inline void input_queue_head_incr(struct softnet_data *sd) 1418 { 1419 #ifdef CONFIG_RPS 1420 + sd->input_queue_head++; 1421 + #endif 1422 + } 1423 + 1424 + static inline void input_queue_tail_incr_save(struct softnet_data *sd, 1425 + unsigned int *qtail) 1426 + { 1427 + #ifdef CONFIG_RPS 1428 + *qtail = ++sd->input_queue_tail; 1429 #endif 1430 } 1431 ··· 2326 #define netif_vdbg(priv, type, dev, format, args...) \ 2327 ({ \ 2328 if (0) \ 2329 + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 2330 0; \ 2331 }) 2332 #endif
+1 -1
include/linux/netfilter/x_tables.h
··· 333 /* Called when user tries to insert an entry of this type: 334 hook_mask is a bitmask of hooks from which it can be 335 called. */ 336 - /* Should return true or false, or an error code (-Exxxx). */ 337 int (*checkentry)(const struct xt_tgchk_param *); 338 339 /* Called when entry of this type deleted. */
··· 333 /* Called when user tries to insert an entry of this type: 334 hook_mask is a bitmask of hooks from which it can be 335 called. */ 336 + /* Should return 0 on success or an error code otherwise (-Exxxx). */ 337 int (*checkentry)(const struct xt_tgchk_param *); 338 339 /* Called when entry of this type deleted. */
+2 -2
include/net/caif/cfctrl.h
··· 94 enum cfctrl_cmd cmd; 95 u8 channel_id; 96 struct cfctrl_link_param param; 97 - struct cfctrl_request_info *next; 98 struct cflayer *client_layer; 99 }; 100 101 struct cfctrl { ··· 103 struct cfctrl_rsp res; 104 atomic_t req_seq_no; 105 atomic_t rsp_seq_no; 106 - struct cfctrl_request_info *first_req; 107 /* Protects from simultaneous access to first_req list */ 108 spinlock_t info_list_lock; 109 #ifndef CAIF_NO_LOOP
··· 94 enum cfctrl_cmd cmd; 95 u8 channel_id; 96 struct cfctrl_link_param param; 97 struct cflayer *client_layer; 98 + struct list_head list; 99 }; 100 101 struct cfctrl { ··· 103 struct cfctrl_rsp res; 104 atomic_t req_seq_no; 105 atomic_t rsp_seq_no; 106 + struct list_head list; 107 /* Protects from simultaneous access to first_req list */ 108 spinlock_t info_list_lock; 109 #ifndef CAIF_NO_LOOP
+63
include/net/cls_cgroup.h
···
··· 1 + /* 2 + * cls_cgroup.h Control Group Classifier 3 + * 4 + * Authors: Thomas Graf <tgraf@suug.ch> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + * 11 + */ 12 + 13 + #ifndef _NET_CLS_CGROUP_H 14 + #define _NET_CLS_CGROUP_H 15 + 16 + #include <linux/cgroup.h> 17 + #include <linux/hardirq.h> 18 + #include <linux/rcupdate.h> 19 + 20 + #ifdef CONFIG_CGROUPS 21 + struct cgroup_cls_state 22 + { 23 + struct cgroup_subsys_state css; 24 + u32 classid; 25 + }; 26 + 27 + #ifdef CONFIG_NET_CLS_CGROUP 28 + static inline u32 task_cls_classid(struct task_struct *p) 29 + { 30 + if (in_interrupt()) 31 + return 0; 32 + 33 + return container_of(task_subsys_state(p, net_cls_subsys_id), 34 + struct cgroup_cls_state, css)->classid; 35 + } 36 + #else 37 + extern int net_cls_subsys_id; 38 + 39 + static inline u32 task_cls_classid(struct task_struct *p) 40 + { 41 + int id; 42 + u32 classid; 43 + 44 + if (in_interrupt()) 45 + return 0; 46 + 47 + rcu_read_lock(); 48 + id = rcu_dereference(net_cls_subsys_id); 49 + if (id >= 0) 50 + classid = container_of(task_subsys_state(p, id), 51 + struct cgroup_cls_state, css)->classid; 52 + rcu_read_unlock(); 53 + 54 + return classid; 55 + } 56 + #endif 57 + #else 58 + static inline u32 task_cls_classid(struct task_struct *p) 59 + { 60 + return 0; 61 + } 62 + #endif 63 + #endif /* _NET_CLS_CGROUP_H */
+3
include/net/mac80211.h
··· 815 * encrypted in hardware. 816 * @alg: The key algorithm. 817 * @flags: key flags, see &enum ieee80211_key_flags. 818 * @keyidx: the key index (0-3) 819 * @keylen: key material length 820 * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte) ··· 1637 * that TX/RX_STOP can pass NULL for this parameter. 1638 * Returns a negative error code on failure. 1639 * The callback must be atomic. 1640 * 1641 * @rfkill_poll: Poll rfkill hardware state. If you need this, you also 1642 * need to set wiphy->rfkill_poll to %true before registration,
··· 815 * encrypted in hardware. 816 * @alg: The key algorithm. 817 * @flags: key flags, see &enum ieee80211_key_flags. 818 + * @ap_addr: AP's MAC address 819 * @keyidx: the key index (0-3) 820 * @keylen: key material length 821 * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte) ··· 1636 * that TX/RX_STOP can pass NULL for this parameter. 1637 * Returns a negative error code on failure. 1638 * The callback must be atomic. 1639 + * 1640 + * @get_survey: Return per-channel survey information 1641 * 1642 * @rfkill_poll: Poll rfkill hardware state. If you need this, you also 1643 * need to set wiphy->rfkill_poll to %true before registration,
+1 -1
include/net/netfilter/nf_conntrack_core.h
··· 61 int ret = NF_ACCEPT; 62 63 if (ct && ct != &nf_conntrack_untracked) { 64 - if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) 65 ret = __nf_conntrack_confirm(skb); 66 if (likely(ret == NF_ACCEPT)) 67 nf_ct_deliver_cached_events(ct);
··· 61 int ret = NF_ACCEPT; 62 63 if (ct && ct != &nf_conntrack_untracked) { 64 + if (!nf_ct_is_confirmed(ct)) 65 ret = __nf_conntrack_confirm(skb); 66 if (likely(ret == NF_ACCEPT)) 67 nf_ct_deliver_cached_events(ct);
+10 -2
include/net/sock.h
··· 312 void *sk_security; 313 #endif 314 __u32 sk_mark; 315 - /* XXX 4 bytes hole on 64 bit */ 316 void (*sk_state_change)(struct sock *sk); 317 void (*sk_data_ready)(struct sock *sk, int bytes); 318 void (*sk_write_space)(struct sock *sk); ··· 1074 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 1075 extern void sk_send_sigurg(struct sock *sk); 1076 1077 /* 1078 * Functions to fill in entries in struct proto_ops when a protocol 1079 * does not implement a particular function. ··· 1412 1413 /** 1414 * wq_has_sleeper - check if there are any waiting processes 1415 - * @sk: struct socket_wq 1416 * 1417 * Returns true if socket_wq has waiting processes 1418 *
··· 312 void *sk_security; 313 #endif 314 __u32 sk_mark; 315 + u32 sk_classid; 316 void (*sk_state_change)(struct sock *sk); 317 void (*sk_data_ready)(struct sock *sk, int bytes); 318 void (*sk_write_space)(struct sock *sk); ··· 1074 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 1075 extern void sk_send_sigurg(struct sock *sk); 1076 1077 + #ifdef CONFIG_CGROUPS 1078 + extern void sock_update_classid(struct sock *sk); 1079 + #else 1080 + static inline void sock_update_classid(struct sock *sk) 1081 + { 1082 + } 1083 + #endif 1084 + 1085 /* 1086 * Functions to fill in entries in struct proto_ops when a protocol 1087 * does not implement a particular function. ··· 1404 1405 /** 1406 * wq_has_sleeper - check if there are any waiting processes 1407 + * @wq: struct socket_wq 1408 * 1409 * Returns true if socket_wq has waiting processes 1410 *
+3 -1
kernel/sysctl.c
··· 2287 if (write) { 2288 left -= proc_skip_spaces(&kbuf); 2289 2290 err = proc_get_long(&kbuf, &left, &lval, &neg, 2291 proc_wspace_sep, 2292 sizeof(proc_wspace_sep), NULL); ··· 2315 2316 if (!write && !first && left && !err) 2317 err = proc_put_char(&buffer, &left, '\n'); 2318 - if (write && !err) 2319 left -= proc_skip_spaces(&kbuf); 2320 free: 2321 if (write) {
··· 2287 if (write) { 2288 left -= proc_skip_spaces(&kbuf); 2289 2290 + if (!left) 2291 + break; 2292 err = proc_get_long(&kbuf, &left, &lval, &neg, 2293 proc_wspace_sep, 2294 sizeof(proc_wspace_sep), NULL); ··· 2313 2314 if (!write && !first && left && !err) 2315 err = proc_put_char(&buffer, &left, '\n'); 2316 + if (write && !err && left) 2317 left -= proc_skip_spaces(&kbuf); 2318 free: 2319 if (write) {
+1 -4
net/caif/Kconfig
··· 2 # CAIF net configurations 3 # 4 5 - #menu "CAIF Support" 6 - comment "CAIF Support" 7 menuconfig CAIF 8 - tristate "Enable CAIF support" 9 select CRC_CCITT 10 default n 11 ---help--- ··· 43 If unsure say Y. 44 45 endif 46 - #endmenu
··· 2 # CAIF net configurations 3 # 4 5 menuconfig CAIF 6 + tristate "CAIF support" 7 select CRC_CCITT 8 default n 9 ---help--- ··· 45 If unsure say Y. 46 47 endif
+36 -57
net/caif/caif_socket.c
··· 60 atomic_t num_rx_flow_off; 61 atomic_t num_rx_flow_on; 62 }; 63 - struct debug_fs_counter cnt; 64 #define dbfs_atomic_inc(v) atomic_inc(v) 65 #define dbfs_atomic_dec(v) atomic_dec(v) 66 #else ··· 128 mutex_unlock(&cf_sk->readlock); 129 } 130 131 - int sk_rcvbuf_lowwater(struct caifsock *cf_sk) 132 { 133 /* A quarter of full buffer is used a low water mark */ 134 return cf_sk->sk.sk_rcvbuf / 4; 135 } 136 137 - void caif_flow_ctrl(struct sock *sk, int mode) 138 { 139 struct caifsock *cf_sk; 140 cf_sk = container_of(sk, struct caifsock, sk); 141 - if (cf_sk->layer.dn) 142 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); 143 } 144 ··· 146 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 147 * not dropped, but CAIF is sending flow off instead. 148 */ 149 - int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 150 { 151 int err; 152 int skb_len; ··· 162 atomic_read(&cf_sk->sk.sk_rmem_alloc), 163 sk_rcvbuf_lowwater(cf_sk)); 164 set_rx_flow_off(cf_sk); 165 - if (cf_sk->layer.dn) 166 - cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 167 - CAIF_MODEMCMD_FLOW_OFF_REQ); 168 } 169 170 err = sk_filter(sk, skb); ··· 174 trace_printk("CAIF: %s():" 175 " sending flow OFF due to rmem_schedule\n", 176 __func__); 177 - if (cf_sk->layer.dn) 178 - cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 179 - CAIF_MODEMCMD_FLOW_OFF_REQ); 180 } 181 skb->dev = NULL; 182 skb_set_owner_r(skb, sk); ··· 283 { 284 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 285 286 - if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL) 287 - return; 288 if (rx_flow_is_on(cf_sk)) 289 return; 290 291 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { 292 dbfs_atomic_inc(&cnt.num_rx_flow_on); 293 set_rx_flow_on(cf_sk); 294 - cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 295 - CAIF_MODEMCMD_FLOW_ON_REQ); 296 } 297 } 298 - /* 299 - * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer 300 - * has sufficient size. 301 - */ 302 303 static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, 304 - struct msghdr *m, size_t buf_len, int flags) 305 306 { 307 struct sock *sk = sock->sk; 308 struct sk_buff *skb; 309 - int ret = 0; 310 - int len; 311 312 - if (unlikely(!buf_len)) 313 - return -EINVAL; 314 315 skb = skb_recv_datagram(sk, flags, 0 , &ret); 316 if (!skb) 317 goto read_error; 318 - 319 - len = skb->len; 320 - 321 - if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) { 322 - len = buf_len; 323 - /* 324 - * Push skb back on receive queue if buffer too small. 325 - * This has a built-in race where multi-threaded receive 326 - * may get packet in wrong order, but multiple read does 327 - * not really guarantee ordered delivery anyway. 328 - * Let's optimize for speed without taking locks. 329 - */ 330 - 331 - skb_queue_head(&sk->sk_receive_queue, skb); 332 - ret = -EMSGSIZE; 333 - goto read_error; 334 } 335 336 - ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); 337 if (ret) 338 - goto read_error; 339 340 skb_free_datagram(sk, skb); 341 - 342 caif_check_flow_release(sk); 343 - 344 - return len; 345 346 read_error: 347 return ret; ··· 904 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 905 906 release_sock(sk); 907 - err = wait_event_interruptible_timeout(*sk_sleep(sk), 908 sk->sk_state != CAIF_CONNECTING, 909 timeo); 910 lock_sock(sk); 911 - if (err < 0) 912 goto out; /* -ERESTARTSYS */ 913 - if (err == 0 && sk->sk_state != CAIF_CONNECTED) { 914 - err = -ETIMEDOUT; 915 - goto out; 916 - } 917 918 if (sk->sk_state != CAIF_CONNECTED) { 919 sock->state = SS_UNCONNECTED; 920 err = sock_error(sk); ··· 928 release_sock(sk); 929 return err; 930 } 931 - 932 933 /* 934 * caif_release() - Disconnect a CAIF Socket ··· 1001 if (!skb_queue_empty(&sk->sk_receive_queue) || 1002 (sk->sk_shutdown & RCV_SHUTDOWN)) 1003 mask |= POLLIN | POLLRDNORM; 1004 - 1005 - /* Connection-based need to check for termination and startup */ 1006 - if (sk->sk_state == CAIF_DISCONNECTED) 1007 - mask |= POLLHUP; 1008 1009 /* 1010 * we set writable also when the other side has shut down the ··· 1173 .owner = THIS_MODULE, 1174 }; 1175 1176 - int af_caif_init(void) 1177 { 1178 int err = sock_register(&caif_family_ops); 1179 if (!err)
··· 60 atomic_t num_rx_flow_off; 61 atomic_t num_rx_flow_on; 62 }; 63 + static struct debug_fs_counter cnt; 64 #define dbfs_atomic_inc(v) atomic_inc(v) 65 #define dbfs_atomic_dec(v) atomic_dec(v) 66 #else ··· 128 mutex_unlock(&cf_sk->readlock); 129 } 130 131 + static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) 132 { 133 /* A quarter of full buffer is used a low water mark */ 134 return cf_sk->sk.sk_rcvbuf / 4; 135 } 136 137 + static void caif_flow_ctrl(struct sock *sk, int mode) 138 { 139 struct caifsock *cf_sk; 140 cf_sk = container_of(sk, struct caifsock, sk); 141 + if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) 142 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); 143 } 144 ··· 146 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 147 * not dropped, but CAIF is sending flow off instead. 148 */ 149 + static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 150 { 151 int err; 152 int skb_len; ··· 162 atomic_read(&cf_sk->sk.sk_rmem_alloc), 163 sk_rcvbuf_lowwater(cf_sk)); 164 set_rx_flow_off(cf_sk); 165 + dbfs_atomic_inc(&cnt.num_rx_flow_off); 166 + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 167 } 168 169 err = sk_filter(sk, skb); ··· 175 trace_printk("CAIF: %s():" 176 " sending flow OFF due to rmem_schedule\n", 177 __func__); 178 + dbfs_atomic_inc(&cnt.num_rx_flow_off); 179 + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 180 } 181 skb->dev = NULL; 182 skb_set_owner_r(skb, sk); ··· 285 { 286 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 287 288 if (rx_flow_is_on(cf_sk)) 289 return; 290 291 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { 292 dbfs_atomic_inc(&cnt.num_rx_flow_on); 293 set_rx_flow_on(cf_sk); 294 + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); 295 } 296 } 297 298 + /* 299 + * Copied from unix_dgram_recvmsg, but removed credit checks, 300 + * changed locking, address handling and added MSG_TRUNC. 301 + */ 302 static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, 303 + struct msghdr *m, size_t len, int flags) 304 305 { 306 struct sock *sk = sock->sk; 307 struct sk_buff *skb; 308 + int ret; 309 + int copylen; 310 311 + ret = -EOPNOTSUPP; 312 + if (m->msg_flags&MSG_OOB) 313 + goto read_error; 314 315 skb = skb_recv_datagram(sk, flags, 0 , &ret); 316 if (!skb) 317 goto read_error; 318 + copylen = skb->len; 319 + if (len < copylen) { 320 + m->msg_flags |= MSG_TRUNC; 321 + copylen = len; 322 } 323 324 + ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); 325 if (ret) 326 + goto out_free; 327 328 + ret = (flags & MSG_TRUNC) ? skb->len : copylen; 329 + out_free: 330 skb_free_datagram(sk, skb); 331 caif_check_flow_release(sk); 332 + return ret; 333 334 read_error: 335 return ret; ··· 920 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 921 922 release_sock(sk); 923 + err = -ERESTARTSYS; 924 + timeo = wait_event_interruptible_timeout(*sk_sleep(sk), 925 sk->sk_state != CAIF_CONNECTING, 926 timeo); 927 lock_sock(sk); 928 + if (timeo < 0) 929 goto out; /* -ERESTARTSYS */ 930 931 + err = -ETIMEDOUT; 932 + if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) 933 + goto out; 934 if (sk->sk_state != CAIF_CONNECTED) { 935 sock->state = SS_UNCONNECTED; 936 err = sock_error(sk); ··· 944 release_sock(sk); 945 return err; 946 } 947 948 /* 949 * caif_release() - Disconnect a CAIF Socket ··· 1018 if (!skb_queue_empty(&sk->sk_receive_queue) || 1019 (sk->sk_shutdown & RCV_SHUTDOWN)) 1020 mask |= POLLIN | POLLRDNORM; 1021 1022 /* 1023 * we set writable also when the other side has shut down the ··· 1194 .owner = THIS_MODULE, 1195 }; 1196 1197 + static int af_caif_init(void) 1198 { 1199 int err = sock_register(&caif_family_ops); 1200 if (!err)
+28 -68
net/caif/cfctrl.c
··· 44 dev_info.id = 0xff; 45 memset(this, 0, sizeof(*this)); 46 cfsrvl_init(&this->serv, 0, &dev_info); 47 - spin_lock_init(&this->info_list_lock); 48 atomic_set(&this->req_seq_no, 1); 49 atomic_set(&this->rsp_seq_no, 1); 50 this->serv.layer.receive = cfctrl_recv; 51 sprintf(this->serv.layer.name, "ctrl"); 52 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; 53 spin_lock_init(&this->loop_linkid_lock); 54 this->loop_linkid = 1; 55 return &this->serv.layer; 56 } ··· 113 void cfctrl_insert_req(struct cfctrl *ctrl, 114 struct cfctrl_request_info *req) 115 { 116 - struct cfctrl_request_info *p; 117 spin_lock(&ctrl->info_list_lock); 118 - req->next = NULL; 119 atomic_inc(&ctrl->req_seq_no); 120 req->sequence_no = atomic_read(&ctrl->req_seq_no); 121 - if (ctrl->first_req == NULL) { 122 - ctrl->first_req = req; 123 - spin_unlock(&ctrl->info_list_lock); 124 - return; 125 - } 126 - p = ctrl->first_req; 127 - while (p->next != NULL) 128 - p = p->next; 129 - p->next = req; 130 spin_unlock(&ctrl->info_list_lock); 131 } 132 ··· 124 struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 125 struct cfctrl_request_info *req) 126 { 127 - struct cfctrl_request_info *p; 128 - struct cfctrl_request_info *ret; 129 130 spin_lock(&ctrl->info_list_lock); 131 - if (ctrl->first_req == NULL) { 132 - spin_unlock(&ctrl->info_list_lock); 133 - return NULL; 134 - } 135 136 - if (cfctrl_req_eq(req, ctrl->first_req)) { 137 - ret = ctrl->first_req; 138 - caif_assert(ctrl->first_req); 139 - atomic_set(&ctrl->rsp_seq_no, 140 - ctrl->first_req->sequence_no); 141 - ctrl->first_req = ctrl->first_req->next; 142 - spin_unlock(&ctrl->info_list_lock); 143 - return ret; 144 - } 145 - 146 - p = ctrl->first_req; 147 - 148 - while (p->next != NULL) { 149 - if (cfctrl_req_eq(req, p->next)) { 150 - pr_warning("CAIF: %s(): Requests are not " 151 "received in order\n", 152 __func__); 153 - ret = p->next; 154 - atomic_set(&ctrl->rsp_seq_no, 155 - p->next->sequence_no); 156 - p->next = p->next->next; 157 - spin_unlock(&ctrl->info_list_lock); 158 - return ret; 159 - } 160 - p = p->next; 161 - } 162 - spin_unlock(&ctrl->info_list_lock); 163 164 - pr_warning("CAIF: %s(): Request does not match\n", 165 - __func__); 166 - return NULL; 167 } 168 169 struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) ··· 361 362 void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) 363 { 364 - struct cfctrl_request_info *p, *req; 365 struct cfctrl *ctrl = container_obj(layr); 366 spin_lock(&ctrl->info_list_lock); 367 368 - if (ctrl->first_req == NULL) { 369 - spin_unlock(&ctrl->info_list_lock); 370 - return; 371 - } 372 - 373 - if (ctrl->first_req->client_layer == adap_layer) { 374 - 375 - req = ctrl->first_req; 376 - ctrl->first_req = ctrl->first_req->next; 377 - kfree(req); 378 - } 379 - 380 - p = ctrl->first_req; 381 - while (p != NULL && p->next != NULL) { 382 - if (p->next->client_layer == adap_layer) { 383 - 384 - req = p->next; 385 - p->next = p->next->next; 386 - kfree(p->next); 387 } 388 - p = p->next; 389 } 390 391 spin_unlock(&ctrl->info_list_lock); ··· 594 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: 595 case CAIF_CTRLCMD_FLOW_OFF_IND: 596 spin_lock(&this->info_list_lock); 597 - if (this->first_req != NULL) { 598 pr_debug("CAIF: %s(): Received flow off in " 599 "control layer", __func__); 600 }
··· 44 dev_info.id = 0xff; 45 memset(this, 0, sizeof(*this)); 46 cfsrvl_init(&this->serv, 0, &dev_info); 47 atomic_set(&this->req_seq_no, 1); 48 atomic_set(&this->rsp_seq_no, 1); 49 this->serv.layer.receive = cfctrl_recv; 50 sprintf(this->serv.layer.name, "ctrl"); 51 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; 52 spin_lock_init(&this->loop_linkid_lock); 53 + spin_lock_init(&this->info_list_lock); 54 + INIT_LIST_HEAD(&this->list); 55 this->loop_linkid = 1; 56 return &this->serv.layer; 57 } ··· 112 void cfctrl_insert_req(struct cfctrl *ctrl, 113 struct cfctrl_request_info *req) 114 { 115 spin_lock(&ctrl->info_list_lock); 116 atomic_inc(&ctrl->req_seq_no); 117 req->sequence_no = atomic_read(&ctrl->req_seq_no); 118 + list_add_tail(&req->list, &ctrl->list); 119 spin_unlock(&ctrl->info_list_lock); 120 } 121 ··· 133 struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 134 struct cfctrl_request_info *req) 135 { 136 + struct cfctrl_request_info *p, *tmp, *first; 137 138 spin_lock(&ctrl->info_list_lock); 139 + first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); 140 141 + list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 142 + if (cfctrl_req_eq(req, p)) { 143 + if (p != first) 144 + pr_warning("CAIF: %s(): Requests are not " 145 "received in order\n", 146 __func__); 147 148 + atomic_set(&ctrl->rsp_seq_no, 149 + p->sequence_no); 150 + list_del(&p->list); 151 + goto out; 152 + } 153 + } 154 + p = NULL; 155 + out: 156 + spin_unlock(&ctrl->info_list_lock); 157 + return p; 158 } 159 160 struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) ··· 388 389 void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) 390 { 391 + struct cfctrl_request_info *p, *tmp; 392 struct cfctrl *ctrl = container_obj(layr); 393 spin_lock(&ctrl->info_list_lock); 394 + pr_warning("CAIF: %s(): enter\n", __func__); 395 396 + list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 397 + if (p->client_layer == adap_layer) { 398 + pr_warning("CAIF: %s(): cancel req :%d\n", __func__, 399 + p->sequence_no); 400 + list_del(&p->list); 401 + kfree(p); 402 } 403 } 404 405 spin_unlock(&ctrl->info_list_lock); ··· 634 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: 635 case CAIF_CTRLCMD_FLOW_OFF_IND: 636 spin_lock(&this->info_list_lock); 637 + if (!list_empty(&this->list)) { 638 pr_debug("CAIF: %s(): Received flow off in " 639 "control layer", __func__); 640 }
+2 -1
net/caif/cfmuxl.c
··· 174 spin_lock(&muxl->receive_lock); 175 up = get_up(muxl, id); 176 if (up == NULL) 177 - return NULL; 178 memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); 179 list_del(&up->node); 180 cfsrvl_put(up); 181 spin_unlock(&muxl->receive_lock); 182 return up; 183 }
··· 174 spin_lock(&muxl->receive_lock); 175 up = get_up(muxl, id); 176 if (up == NULL) 177 + goto out; 178 memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); 179 list_del(&up->node); 180 cfsrvl_put(up); 181 + out: 182 spin_unlock(&muxl->receive_lock); 183 return up; 184 }
+17 -8
net/caif/cfpkt_skbuff.c
··· 238 struct sk_buff *lastskb; 239 u8 *to; 240 const u8 *data = data2; 241 if (unlikely(is_erronous(pkt))) 242 return -EPROTO; 243 if (unlikely(skb_headroom(skb) < len)) { ··· 247 } 248 249 /* Make sure data is writable */ 250 - if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { 251 PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); 252 - return -EPROTO; 253 } 254 255 to = skb_push(skb, len); ··· 318 struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) 319 { 320 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); 321 if (unlikely(data != NULL)) 322 cfpkt_add_body(pkt, data, len); 323 return pkt; ··· 348 349 if (dst->tail + neededtailspace > dst->end) { 350 /* Create a dumplicate of 'dst' with more tail space */ 351 dstlen = skb_headlen(dst); 352 createlen = dstlen + neededtailspace; 353 - tmp = pkt_to_skb( 354 - cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); 355 - if (!tmp) 356 return NULL; 357 skb_set_tail_pointer(tmp, dstlen); 358 tmp->len = dstlen; 359 memcpy(tmp->data, dst->data, dstlen); ··· 373 { 374 struct sk_buff *skb2; 375 struct sk_buff *skb = pkt_to_skb(pkt); 376 u8 *split = skb->data + pos; 377 u16 len2nd = skb_tail_pointer(skb) - split; 378 ··· 387 } 388 389 /* Create a new packet for the second part of the data */ 390 - skb2 = pkt_to_skb( 391 - cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, 392 - PKT_PREFIX)); 393 394 if (skb2 == NULL) 395 return NULL;
··· 238 struct sk_buff *lastskb; 239 u8 *to; 240 const u8 *data = data2; 241 + int ret; 242 if (unlikely(is_erronous(pkt))) 243 return -EPROTO; 244 if (unlikely(skb_headroom(skb) < len)) { ··· 246 } 247 248 /* Make sure data is writable */ 249 + ret = skb_cow_data(skb, 0, &lastskb); 250 + if (unlikely(ret < 0)) { 251 PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); 252 + return ret; 253 } 254 255 to = skb_push(skb, len); ··· 316 struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) 317 { 318 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); 319 + if (!pkt) 320 + return NULL; 321 if (unlikely(data != NULL)) 322 cfpkt_add_body(pkt, data, len); 323 return pkt; ··· 344 345 if (dst->tail + neededtailspace > dst->end) { 346 /* Create a dumplicate of 'dst' with more tail space */ 347 + struct cfpkt *tmppkt; 348 dstlen = skb_headlen(dst); 349 createlen = dstlen + neededtailspace; 350 + tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX); 351 + if (tmppkt == NULL) 352 return NULL; 353 + tmp = pkt_to_skb(tmppkt); 354 skb_set_tail_pointer(tmp, dstlen); 355 tmp->len = dstlen; 356 memcpy(tmp->data, dst->data, dstlen); ··· 368 { 369 struct sk_buff *skb2; 370 struct sk_buff *skb = pkt_to_skb(pkt); 371 + struct cfpkt *tmppkt; 372 u8 *split = skb->data + pos; 373 u16 len2nd = skb_tail_pointer(skb) - split; 374 ··· 381 } 382 383 /* Create a new packet for the second part of the data */ 384 + tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, 385 + PKT_PREFIX); 386 + if (tmppkt == NULL) 387 + return NULL; 388 + skb2 = pkt_to_skb(tmppkt); 389 + 390 391 if (skb2 == NULL) 392 return NULL;
+2 -1
net/caif/cfserl.c
··· 67 layr->incomplete_frm = 68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen); 69 pkt = layr->incomplete_frm; 70 } else { 71 pkt = newpkt; 72 } ··· 156 if (layr->usestx) { 157 if (tail_pkt != NULL) 158 pkt = cfpkt_append(pkt, tail_pkt, 0); 159 - 160 /* Start search for next STX if frame failed */ 161 continue; 162 } else {
··· 67 layr->incomplete_frm = 68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen); 69 pkt = layr->incomplete_frm; 70 + if (pkt == NULL) 71 + return -ENOMEM; 72 } else { 73 pkt = newpkt; 74 } ··· 154 if (layr->usestx) { 155 if (tail_pkt != NULL) 156 pkt = cfpkt_append(pkt, tail_pkt, 0); 157 /* Start search for next STX if frame failed */ 158 continue; 159 } else {
+6
net/caif/cfsrvl.c
··· 123 struct caif_payload_info *info; 124 u8 flow_off = SRVL_FLOW_OFF; 125 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 126 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { 127 pr_err("CAIF: %s(): Packet is erroneous!\n", 128 __func__);
··· 123 struct caif_payload_info *info; 124 u8 flow_off = SRVL_FLOW_OFF; 125 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 126 + if (!pkt) { 127 + pr_warning("CAIF: %s(): Out of memory\n", 128 + __func__); 129 + return -ENOMEM; 130 + } 131 + 132 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { 133 pr_err("CAIF: %s(): Packet is erroneous!\n", 134 __func__);
+27 -21
net/core/dev.c
··· 954 } 955 EXPORT_SYMBOL(dev_alloc_name); 956 957 - static int dev_get_valid_name(struct net *net, const char *name, char *buf, 958 - bool fmt) 959 { 960 if (!dev_valid_name(name)) 961 return -EINVAL; 962 963 if (fmt && strchr(name, '%')) 964 - return __dev_alloc_name(net, name, buf); 965 else if (__dev_get_by_name(net, name)) 966 return -EEXIST; 967 - else if (buf != name) 968 - strlcpy(buf, name, IFNAMSIZ); 969 970 return 0; 971 } ··· 1001 1002 memcpy(oldname, dev->name, IFNAMSIZ); 1003 1004 - err = dev_get_valid_name(net, newname, dev->name, 1); 1005 if (err < 0) 1006 return err; 1007 ··· 2425 if (skb_queue_len(&sd->input_pkt_queue)) { 2426 enqueue: 2427 __skb_queue_tail(&sd->input_pkt_queue, skb); 2428 - #ifdef CONFIG_RPS 2429 - *qtail = sd->input_queue_head + 2430 - skb_queue_len(&sd->input_pkt_queue); 2431 - #endif 2432 rps_unlock(sd); 2433 local_irq_restore(flags); 2434 return NET_RX_SUCCESS; ··· 2960 if (skb->dev == dev) { 2961 __skb_unlink(skb, &sd->input_pkt_queue); 2962 kfree_skb(skb); 2963 - input_queue_head_add(sd, 1); 2964 } 2965 } 2966 rps_unlock(sd); ··· 2969 if (skb->dev == dev) { 2970 __skb_unlink(skb, &sd->process_queue); 2971 kfree_skb(skb); 2972 } 2973 } 2974 } ··· 3325 while ((skb = __skb_dequeue(&sd->process_queue))) { 3326 local_irq_enable(); 3327 __netif_receive_skb(skb); 3328 - if (++work >= quota) 3329 - return work; 3330 local_irq_disable(); 3331 } 3332 3333 rps_lock(sd); 3334 qlen = skb_queue_len(&sd->input_pkt_queue); 3335 - if (qlen) { 3336 - input_queue_head_add(sd, qlen); 3337 skb_queue_splice_tail_init(&sd->input_pkt_queue, 3338 &sd->process_queue); 3339 - } 3340 if (qlen < quota - work) { 3341 /* 3342 * Inline a custom version of __napi_complete(). ··· 4964 } 4965 } 4966 4967 - ret = dev_get_valid_name(net, dev->name, dev->name, 0); 4968 if (ret) 4969 goto err_uninit; 4970 ··· 5562 /* We get here if we can't use the current device name */ 5563 if (!pat) 5564 goto out; 5565 - if (dev_get_valid_name(net, pat, dev->name, 1)) 5566 goto out; 5567 } 5568 ··· 5665 local_irq_enable(); 5666 5667 /* Process offline CPU's input_pkt_queue */ 5668 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 5669 netif_rx(skb); 5670 - input_queue_head_add(oldsd, 1); 5671 } 5672 - while ((skb = __skb_dequeue(&oldsd->process_queue))) 5673 - netif_rx(skb); 5674 5675 return NOTIFY_OK; 5676 }
··· 954 } 955 EXPORT_SYMBOL(dev_alloc_name); 956 957 + static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) 958 { 959 + struct net *net; 960 + 961 + BUG_ON(!dev_net(dev)); 962 + net = dev_net(dev); 963 + 964 if (!dev_valid_name(name)) 965 return -EINVAL; 966 967 if (fmt && strchr(name, '%')) 968 + return dev_alloc_name(dev, name); 969 else if (__dev_get_by_name(net, name)) 970 return -EEXIST; 971 + else if (dev->name != name) 972 + strlcpy(dev->name, name, IFNAMSIZ); 973 974 return 0; 975 } ··· 997 998 memcpy(oldname, dev->name, IFNAMSIZ); 999 1000 + err = dev_get_valid_name(dev, newname, 1); 1001 if (err < 0) 1002 return err; 1003 ··· 2421 if (skb_queue_len(&sd->input_pkt_queue)) { 2422 enqueue: 2423 __skb_queue_tail(&sd->input_pkt_queue, skb); 2424 + input_queue_tail_incr_save(sd, qtail); 2425 rps_unlock(sd); 2426 local_irq_restore(flags); 2427 return NET_RX_SUCCESS; ··· 2959 if (skb->dev == dev) { 2960 __skb_unlink(skb, &sd->input_pkt_queue); 2961 kfree_skb(skb); 2962 + input_queue_head_incr(sd); 2963 } 2964 } 2965 rps_unlock(sd); ··· 2968 if (skb->dev == dev) { 2969 __skb_unlink(skb, &sd->process_queue); 2970 kfree_skb(skb); 2971 + input_queue_head_incr(sd); 2972 } 2973 } 2974 } ··· 3323 while ((skb = __skb_dequeue(&sd->process_queue))) { 3324 local_irq_enable(); 3325 __netif_receive_skb(skb); 3326 local_irq_disable(); 3327 + input_queue_head_incr(sd); 3328 + if (++work >= quota) { 3329 + local_irq_enable(); 3330 + return work; 3331 + } 3332 } 3333 3334 rps_lock(sd); 3335 qlen = skb_queue_len(&sd->input_pkt_queue); 3336 + if (qlen) 3337 skb_queue_splice_tail_init(&sd->input_pkt_queue, 3338 &sd->process_queue); 3339 + 3340 if (qlen < quota - work) { 3341 /* 3342 * Inline a custom version of __napi_complete(). ··· 4960 } 4961 } 4962 4963 + ret = dev_get_valid_name(dev, dev->name, 0); 4964 if (ret) 4965 goto err_uninit; 4966 ··· 5558 /* We get here if we can't use the current device name */ 5559 if (!pat) 5560 goto out; 5561 + if (dev_get_valid_name(dev, pat, 1)) 5562 goto out; 5563 } 5564 ··· 5661 local_irq_enable(); 5662 5663 /* Process offline CPU's input_pkt_queue */ 5664 + while ((skb = __skb_dequeue(&oldsd->process_queue))) { 5665 + netif_rx(skb); 5666 + input_queue_head_incr(oldsd); 5667 + } 5668 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 5669 netif_rx(skb); 5670 + input_queue_head_incr(oldsd); 5671 } 5672 5673 return NOTIFY_OK; 5674 }
+3 -1
net/core/rtnetlink.c
··· 1199 struct nlattr *attr; 1200 int rem; 1201 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 1202 - if (nla_type(attr) != IFLA_VF_INFO) 1203 goto errout; 1204 err = do_setvfinfo(dev, attr); 1205 if (err < 0) 1206 goto errout;
··· 1199 struct nlattr *attr; 1200 int rem; 1201 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 1202 + if (nla_type(attr) != IFLA_VF_INFO) { 1203 + err = -EINVAL; 1204 goto errout; 1205 + } 1206 err = do_setvfinfo(dev, attr); 1207 if (err < 0) 1208 goto errout;
+1
net/core/skbuff.c
··· 2722 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2723 skb_shinfo(nskb)->frag_list = p; 2724 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2725 skb_header_release(p); 2726 nskb->prev = p; 2727
··· 2722 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2723 skb_shinfo(nskb)->frag_list = p; 2724 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2725 + pinfo->gso_size = 0; 2726 skb_header_release(p); 2727 nskb->prev = p; 2728
+19
net/core/sock.c
··· 123 #include <linux/net_tstamp.h> 124 #include <net/xfrm.h> 125 #include <linux/ipsec.h> 126 127 #include <linux/filter.h> 128 ··· 217 /* Maximal space eaten by iovec or ancilliary data plus some space */ 218 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 219 EXPORT_SYMBOL(sysctl_optmem_max); 220 221 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 222 { ··· 1056 module_put(owner); 1057 } 1058 1059 /** 1060 * sk_alloc - All socket objects are allocated here 1061 * @net: the applicable net namespace ··· 1090 sock_lock_init(sk); 1091 sock_net_set(sk, get_net(net)); 1092 atomic_set(&sk->sk_wmem_alloc, 1); 1093 } 1094 1095 return sk;
··· 123 #include <linux/net_tstamp.h> 124 #include <net/xfrm.h> 125 #include <linux/ipsec.h> 126 + #include <net/cls_cgroup.h> 127 128 #include <linux/filter.h> 129 ··· 216 /* Maximal space eaten by iovec or ancilliary data plus some space */ 217 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 218 EXPORT_SYMBOL(sysctl_optmem_max); 219 + 220 + #if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) 221 + int net_cls_subsys_id = -1; 222 + EXPORT_SYMBOL_GPL(net_cls_subsys_id); 223 + #endif 224 225 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 226 { ··· 1050 module_put(owner); 1051 } 1052 1053 + #ifdef CONFIG_CGROUPS 1054 + void sock_update_classid(struct sock *sk) 1055 + { 1056 + u32 classid = task_cls_classid(current); 1057 + 1058 + if (classid && classid != sk->sk_classid) 1059 + sk->sk_classid = classid; 1060 + } 1061 + EXPORT_SYMBOL(sock_update_classid); 1062 + #endif 1063 + 1064 /** 1065 * sk_alloc - All socket objects are allocated here 1066 * @net: the applicable net namespace ··· 1073 sock_lock_init(sk); 1074 sock_net_set(sk, get_net(net)); 1075 atomic_set(&sk->sk_wmem_alloc, 1); 1076 + 1077 + sock_update_classid(sk); 1078 } 1079 1080 return sk;
+3 -3
net/dccp/input.c
··· 124 return queued; 125 } 126 127 - static u8 dccp_reset_code_convert(const u8 code) 128 { 129 - const u8 error_code[] = { 130 [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ 131 [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ 132 [DCCP_RESET_CODE_ABORTED] = ECONNRESET, ··· 148 149 static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) 150 { 151 - u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); 152 153 sk->sk_err = err; 154
··· 124 return queued; 125 } 126 127 + static u16 dccp_reset_code_convert(const u8 code) 128 { 129 + const u16 error_code[] = { 130 [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ 131 [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ 132 [DCCP_RESET_CODE_ABORTED] = ECONNRESET, ··· 148 149 static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) 150 { 151 + u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); 152 153 sk->sk_err = err; 154
+6 -1
net/ieee802154/wpan-class.c
··· 147 struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, 148 GFP_KERNEL); 149 150 mutex_lock(&wpan_phy_mutex); 151 phy->idx = wpan_phy_idx++; 152 if (unlikely(!wpan_phy_idx_valid(phy->idx))) { 153 wpan_phy_idx--; 154 mutex_unlock(&wpan_phy_mutex); 155 kfree(phy); 156 - return NULL; 157 } 158 mutex_unlock(&wpan_phy_mutex); 159 ··· 170 phy->current_page = 0; /* for compatibility */ 171 172 return phy; 173 } 174 EXPORT_SYMBOL(wpan_phy_alloc); 175
··· 147 struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, 148 GFP_KERNEL); 149 150 + if (!phy) 151 + goto out; 152 mutex_lock(&wpan_phy_mutex); 153 phy->idx = wpan_phy_idx++; 154 if (unlikely(!wpan_phy_idx_valid(phy->idx))) { 155 wpan_phy_idx--; 156 mutex_unlock(&wpan_phy_mutex); 157 kfree(phy); 158 + goto out; 159 } 160 mutex_unlock(&wpan_phy_mutex); 161 ··· 168 phy->current_page = 0; /* for compatibility */ 169 170 return phy; 171 + 172 + out: 173 + return NULL; 174 } 175 EXPORT_SYMBOL(wpan_phy_alloc); 176
+1 -1
net/mac80211/sta_info.h
··· 145 /** 146 * struct sta_ampdu_mlme - STA aggregation information. 147 * 148 - * @tid_state_rx: TID's state in Rx session state machine. 149 * @tid_rx: aggregation info for Rx per TID 150 * @tid_state_tx: TID's state in Tx session state machine. 151 * @tid_tx: aggregation info for Tx per TID
··· 145 /** 146 * struct sta_ampdu_mlme - STA aggregation information. 147 * 148 + * @tid_active_rx: TID's state in Rx session state machine. 149 * @tid_rx: aggregation info for Rx per TID 150 * @tid_state_tx: TID's state in Tx session state machine. 151 * @tid_tx: aggregation info for Tx per TID
+10
net/netfilter/nf_conntrack_core.c
··· 424 425 spin_lock_bh(&nf_conntrack_lock); 426 427 /* See if there's one in the list already, including reverse: 428 NAT could have grabbed it without realizing, since we're 429 not in the hash. If there is, we lost race. */
··· 424 425 spin_lock_bh(&nf_conntrack_lock); 426 427 + /* We have to check the DYING flag inside the lock to prevent 428 + a race against nf_ct_get_next_corpse() possibly called from 429 + user context, else we insert an already 'dead' hash, blocking 430 + further use of that particular connection -JM */ 431 + 432 + if (unlikely(nf_ct_is_dying(ct))) { 433 + spin_unlock_bh(&nf_conntrack_lock); 434 + return NF_ACCEPT; 435 + } 436 + 437 /* See if there's one in the list already, including reverse: 438 NAT could have grabbed it without realizing, since we're 439 not in the hash. If there is, we lost race. */
+4 -8
net/netfilter/nf_conntrack_sip.c
··· 1393 1394 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1395 1396 - if (skb_is_nonlinear(skb)) { 1397 - pr_debug("Copy of skbuff not supported yet.\n"); 1398 - return NF_ACCEPT; 1399 - } 1400 1401 dptr = skb->data + dataoff; 1402 datalen = skb->len - dataoff; ··· 1453 1454 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1455 1456 - if (skb_is_nonlinear(skb)) { 1457 - pr_debug("Copy of skbuff not supported yet.\n"); 1458 - return NF_ACCEPT; 1459 - } 1460 1461 dptr = skb->data + dataoff; 1462 datalen = skb->len - dataoff;
··· 1393 1394 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1395 1396 + if (unlikely(skb_linearize(skb))) 1397 + return NF_DROP; 1398 1399 dptr = skb->data + dataoff; 1400 datalen = skb->len - dataoff; ··· 1455 1456 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1457 1458 + if (unlikely(skb_linearize(skb))) 1459 + return NF_DROP; 1460 1461 dptr = skb->data + dataoff; 1462 datalen = skb->len - dataoff;
+2
net/phonet/pep.c
··· 626 struct pep_sock *pn = pep_sk(sk); 627 int ifindex = 0; 628 629 sk_common_release(sk); 630 631 lock_sock(sk); ··· 645 646 if (ifindex) 647 gprs_detach(sk); 648 } 649 650 static int pep_wait_connreq(struct sock *sk, int noblock)
··· 626 struct pep_sock *pn = pep_sk(sk); 627 int ifindex = 0; 628 629 + sock_hold(sk); /* keep a reference after sk_common_release() */ 630 sk_common_release(sk); 631 632 lock_sock(sk); ··· 644 645 if (ifindex) 646 gprs_detach(sk); 647 + sock_put(sk); 648 } 649 650 static int pep_wait_connreq(struct sock *sk, int noblock)
+34 -16
net/sched/cls_cgroup.c
··· 16 #include <linux/errno.h> 17 #include <linux/skbuff.h> 18 #include <linux/cgroup.h> 19 #include <net/rtnetlink.h> 20 #include <net/pkt_cls.h> 21 - 22 - struct cgroup_cls_state 23 - { 24 - struct cgroup_subsys_state css; 25 - u32 classid; 26 - }; 27 28 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 29 struct cgroup *cgrp); ··· 109 struct cls_cgroup_head *head = tp->root; 110 u32 classid; 111 112 /* 113 * Due to the nature of the classifier it is required to ignore all 114 * packets originating from softirq context as accessing `current' ··· 123 * calls by looking at the number of nested bh disable calls because 124 * softirqs always disables bh. 125 */ 126 - if (softirq_count() != SOFTIRQ_OFFSET) 127 - return -1; 128 - 129 - rcu_read_lock(); 130 - classid = task_cls_state(current)->classid; 131 - rcu_read_unlock(); 132 133 if (!classid) 134 return -1; ··· 290 291 static int __init init_cgroup_cls(void) 292 { 293 - int ret = register_tcf_proto_ops(&cls_cgroup_ops); 294 - if (ret) 295 - return ret; 296 ret = cgroup_load_subsys(&net_cls_subsys); 297 if (ret) 298 - unregister_tcf_proto_ops(&cls_cgroup_ops); 299 return ret; 300 } 301 302 static void __exit exit_cgroup_cls(void) 303 { 304 unregister_tcf_proto_ops(&cls_cgroup_ops); 305 cgroup_unload_subsys(&net_cls_subsys); 306 } 307
··· 16 #include <linux/errno.h> 17 #include <linux/skbuff.h> 18 #include <linux/cgroup.h> 19 + #include <linux/rcupdate.h> 20 #include <net/rtnetlink.h> 21 #include <net/pkt_cls.h> 22 + #include <net/sock.h> 23 + #include <net/cls_cgroup.h> 24 25 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 26 struct cgroup *cgrp); ··· 112 struct cls_cgroup_head *head = tp->root; 113 u32 classid; 114 115 + rcu_read_lock(); 116 + classid = task_cls_state(current)->classid; 117 + rcu_read_unlock(); 118 + 119 /* 120 * Due to the nature of the classifier it is required to ignore all 121 * packets originating from softirq context as accessing `current' ··· 122 * calls by looking at the number of nested bh disable calls because 123 * softirqs always disables bh. 124 */ 125 + if (softirq_count() != SOFTIRQ_OFFSET) { 126 + /* If there is an sk_classid we'll use that. */ 127 + if (!skb->sk) 128 + return -1; 129 + classid = skb->sk->sk_classid; 130 + } 131 132 if (!classid) 133 return -1; ··· 289 290 static int __init init_cgroup_cls(void) 291 { 292 + int ret; 293 + 294 ret = cgroup_load_subsys(&net_cls_subsys); 295 if (ret) 296 + goto out; 297 + 298 + #ifndef CONFIG_NET_CLS_CGROUP 299 + /* We can't use rcu_assign_pointer because this is an int. */ 300 + smp_wmb(); 301 + net_cls_subsys_id = net_cls_subsys.subsys_id; 302 + #endif 303 + 304 + ret = register_tcf_proto_ops(&cls_cgroup_ops); 305 + if (ret) 306 + cgroup_unload_subsys(&net_cls_subsys); 307 + 308 + out: 309 return ret; 310 } 311 312 static void __exit exit_cgroup_cls(void) 313 { 314 unregister_tcf_proto_ops(&cls_cgroup_ops); 315 + 316 + #ifndef CONFIG_NET_CLS_CGROUP 317 + net_cls_subsys_id = -1; 318 + synchronize_rcu(); 319 + #endif 320 + 321 cgroup_unload_subsys(&net_cls_subsys); 322 } 323
+7 -7
net/sched/sch_api.c
··· 1195 return -1; 1196 } 1197 1198 static int qdisc_notify(struct net *net, struct sk_buff *oskb, 1199 struct nlmsghdr *n, u32 clid, 1200 struct Qdisc *old, struct Qdisc *new) ··· 1211 if (!skb) 1212 return -ENOBUFS; 1213 1214 - if (old && old->handle) { 1215 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) 1216 goto err_out; 1217 } 1218 - if (new) { 1219 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) 1220 goto err_out; 1221 } ··· 1226 err_out: 1227 kfree_skb(skb); 1228 return -EINVAL; 1229 - } 1230 - 1231 - static bool tc_qdisc_dump_ignore(struct Qdisc *q) 1232 - { 1233 - return (q->flags & TCQ_F_BUILTIN) ? true : false; 1234 } 1235 1236 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
··· 1195 return -1; 1196 } 1197 1198 + static bool tc_qdisc_dump_ignore(struct Qdisc *q) 1199 + { 1200 + return (q->flags & TCQ_F_BUILTIN) ? true : false; 1201 + } 1202 + 1203 static int qdisc_notify(struct net *net, struct sk_buff *oskb, 1204 struct nlmsghdr *n, u32 clid, 1205 struct Qdisc *old, struct Qdisc *new) ··· 1206 if (!skb) 1207 return -ENOBUFS; 1208 1209 + if (old && !tc_qdisc_dump_ignore(old)) { 1210 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) 1211 goto err_out; 1212 } 1213 + if (new && !tc_qdisc_dump_ignore(new)) { 1214 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) 1215 goto err_out; 1216 } ··· 1221 err_out: 1222 kfree_skb(skb); 1223 return -EINVAL; 1224 } 1225 1226 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
+9
net/socket.c
··· 94 95 #include <net/compat.h> 96 #include <net/wext.h> 97 98 #include <net/sock.h> 99 #include <linux/netfilter.h> ··· 559 struct sock_iocb *si = kiocb_to_siocb(iocb); 560 int err; 561 562 si->sock = sock; 563 si->scm = NULL; 564 si->msg = msg; ··· 687 { 688 struct sock_iocb *si = kiocb_to_siocb(iocb); 689 690 si->sock = sock; 691 si->scm = NULL; 692 si->msg = msg; ··· 781 782 if (unlikely(!sock->ops->splice_read)) 783 return -EINVAL; 784 785 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 786 } ··· 3076 int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3077 size_t size, int flags) 3078 { 3079 if (sock->ops->sendpage) 3080 return sock->ops->sendpage(sock, page, offset, size, flags); 3081
··· 94 95 #include <net/compat.h> 96 #include <net/wext.h> 97 + #include <net/cls_cgroup.h> 98 99 #include <net/sock.h> 100 #include <linux/netfilter.h> ··· 558 struct sock_iocb *si = kiocb_to_siocb(iocb); 559 int err; 560 561 + sock_update_classid(sock->sk); 562 + 563 si->sock = sock; 564 si->scm = NULL; 565 si->msg = msg; ··· 684 { 685 struct sock_iocb *si = kiocb_to_siocb(iocb); 686 687 + sock_update_classid(sock->sk); 688 + 689 si->sock = sock; 690 si->scm = NULL; 691 si->msg = msg; ··· 776 777 if (unlikely(!sock->ops->splice_read)) 778 return -EINVAL; 779 + 780 + sock_update_classid(sock->sk); 781 782 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 783 } ··· 3069 int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3070 size_t size, int flags) 3071 { 3072 + sock_update_classid(sock->sk); 3073 + 3074 if (sock->ops->sendpage) 3075 return sock->ops->sendpage(sock, page, offset, size, flags); 3076
+1 -1
net/wireless/chan.c
··· 50 struct ieee80211_channel *chan; 51 int result; 52 53 - if (wdev->iftype == NL80211_IFTYPE_MONITOR) 54 wdev = NULL; 55 56 if (wdev) {
··· 50 struct ieee80211_channel *chan; 51 int result; 52 53 + if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR) 54 wdev = NULL; 55 56 if (wdev) {
+4 -2
net/wireless/nl80211.c
··· 4443 if (channel_type != NL80211_CHAN_NO_HT && 4444 channel_type != NL80211_CHAN_HT20 && 4445 channel_type != NL80211_CHAN_HT40PLUS && 4446 - channel_type != NL80211_CHAN_HT40MINUS) 4447 err = -EINVAL; 4448 goto out; 4449 } 4450 4451 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); ··· 4718 if (channel_type != NL80211_CHAN_NO_HT && 4719 channel_type != NL80211_CHAN_HT20 && 4720 channel_type != NL80211_CHAN_HT40PLUS && 4721 - channel_type != NL80211_CHAN_HT40MINUS) 4722 err = -EINVAL; 4723 goto out; 4724 } 4725 4726 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
··· 4443 if (channel_type != NL80211_CHAN_NO_HT && 4444 channel_type != NL80211_CHAN_HT20 && 4445 channel_type != NL80211_CHAN_HT40PLUS && 4446 + channel_type != NL80211_CHAN_HT40MINUS) { 4447 err = -EINVAL; 4448 goto out; 4449 + } 4450 } 4451 4452 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); ··· 4717 if (channel_type != NL80211_CHAN_NO_HT && 4718 channel_type != NL80211_CHAN_HT20 && 4719 channel_type != NL80211_CHAN_HT40PLUS && 4720 + channel_type != NL80211_CHAN_HT40MINUS) { 4721 err = -EINVAL; 4722 goto out; 4723 + } 4724 } 4725 4726 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+2 -2
net/wireless/scan.c
··· 515 516 privsz = wiphy->bss_priv_size; 517 518 - if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 519 (signal < 0 || signal > 100))) 520 return NULL; 521 ··· 571 u.probe_resp.variable); 572 size_t privsz = wiphy->bss_priv_size; 573 574 - if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 575 (signal < 0 || signal > 100))) 576 return NULL; 577
··· 515 516 privsz = wiphy->bss_priv_size; 517 518 + if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && 519 (signal < 0 || signal > 100))) 520 return NULL; 521 ··· 571 u.probe_resp.variable); 572 size_t privsz = wiphy->bss_priv_size; 573 574 + if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && 575 (signal < 0 || signal > 100))) 576 return NULL; 577