Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (63 commits)
drivers/net/usb/asix.c: Fix pointer cast.
be2net: Bug fix to avoid disabling bottom half during firmware upgrade.
proc_dointvec: write a single value
hso: add support for new products
Phonet: fix potential use-after-free in pep_sock_close()
ath9k: remove VEOL support for ad-hoc
ath9k: change beacon allocation to prefer the first beacon slot
sock.h: fix kernel-doc warning
cls_cgroup: Fix build error when built-in
macvlan: do proper cleanup in macvlan_common_newlink() V2
be2net: Bug fix in init code in probe
net/dccp: expansion of error code size
ath9k: Fix rx of mcast/bcast frames in PS mode with auto sleep
wireless: fix sta_info.h kernel-doc warnings
wireless: fix mac80211.h kernel-doc warnings
iwlwifi: testing the wrong variable in iwl_add_bssid_station()
ath9k_htc: rare leak in ath9k_hif_usb_alloc_tx_urbs()
ath9k_htc: dereferencing before check in hif_usb_tx_cb()
rt2x00: Fix rt2800usb TX descriptor writing.
rt2x00: Fix failed SLEEP->AWAKE and AWAKE->SLEEP transitions.
...

+682 -363
+6
drivers/isdn/capi/kcapi.c
··· 1147 1147 if (ctr->state == CAPI_CTR_DETECTED) 1148 1148 goto reset_unlock_out; 1149 1149 1150 + if (ctr->reset_ctr == NULL) { 1151 + printk(KERN_DEBUG "kcapi: reset: no reset function\n"); 1152 + retval = -ESRCH; 1153 + goto reset_unlock_out; 1154 + } 1155 + 1150 1156 ctr->reset_ctr(ctr); 1151 1157 1152 1158 retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
+2 -26
drivers/isdn/gigaset/capi.c
··· 922 922 */ 923 923 924 924 /* 925 - * load firmware 926 - */ 927 - static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data) 928 - { 929 - struct cardstate *cs = ctr->driverdata; 930 - 931 - /* AVM specific operation, not needed for Gigaset -- ignore */ 932 - dev_notice(cs->dev, "load_firmware ignored\n"); 933 - 934 - return 0; 935 - } 936 - 937 - /* 938 - * reset (deactivate) controller 939 - */ 940 - static void gigaset_reset_ctr(struct capi_ctr *ctr) 941 - { 942 - struct cardstate *cs = ctr->driverdata; 943 - 944 - /* AVM specific operation, not needed for Gigaset -- ignore */ 945 - dev_notice(cs->dev, "reset_ctr ignored\n"); 946 - } 947 - 948 - /* 949 925 * register CAPI application 950 926 */ 951 927 static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl, ··· 2178 2202 iif->ctr.driverdata = cs; 2179 2203 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name)); 2180 2204 iif->ctr.driver_name = "gigaset"; 2181 - iif->ctr.load_firmware = gigaset_load_firmware; 2182 - iif->ctr.reset_ctr = gigaset_reset_ctr; 2205 + iif->ctr.load_firmware = NULL; 2206 + iif->ctr.reset_ctr = NULL; 2183 2207 iif->ctr.register_appl = gigaset_register_appl; 2184 2208 iif->ctr.release_appl = gigaset_release_appl; 2185 2209 iif->ctr.send_message = gigaset_send_message;
+2
drivers/net/benet/be.h
··· 283 283 u8 port_type; 284 284 u8 transceiver; 285 285 u8 generation; /* BladeEngine ASIC generation */ 286 + u32 flash_status; 287 + struct completion flash_compl; 286 288 287 289 bool sriov_enabled; 288 290 u32 vf_if_handle[BE_MAX_VF];
+17 -2
drivers/net/benet/be_cmds.c
··· 59 59 60 60 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 61 61 CQE_STATUS_COMPL_MASK; 62 + 63 + if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) && 64 + (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { 65 + adapter->flash_status = compl_status; 66 + complete(&adapter->flash_compl); 67 + } 68 + 62 69 if (compl_status == MCC_STATUS_SUCCESS) { 63 70 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { 64 71 struct be_cmd_resp_get_stats *resp = ··· 1424 1417 int status; 1425 1418 1426 1419 spin_lock_bh(&adapter->mcc_lock); 1420 + adapter->flash_status = 0; 1427 1421 1428 1422 wrb = wrb_from_mccq(adapter); 1429 1423 if (!wrb) { ··· 1436 1428 1437 1429 be_wrb_hdr_prepare(wrb, cmd->size, false, 1, 1438 1430 OPCODE_COMMON_WRITE_FLASHROM); 1431 + wrb->tag1 = CMD_SUBSYSTEM_COMMON; 1439 1432 1440 1433 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1441 1434 OPCODE_COMMON_WRITE_FLASHROM, cmd->size); ··· 1448 1439 req->params.op_code = cpu_to_le32(flash_opcode); 1449 1440 req->params.data_buf_size = cpu_to_le32(buf_size); 1450 1441 1451 - status = be_mcc_notify_wait(adapter); 1442 + be_mcc_notify(adapter); 1443 + spin_unlock_bh(&adapter->mcc_lock); 1444 + 1445 + if (!wait_for_completion_timeout(&adapter->flash_compl, 1446 + msecs_to_jiffies(12000))) 1447 + status = -1; 1448 + else 1449 + status = adapter->flash_status; 1452 1450 1453 1451 err: 1454 - spin_unlock_bh(&adapter->mcc_lock); 1455 1452 return status; 1456 1453 } 1457 1454
+7 -4
drivers/net/benet/be_main.c
··· 2319 2319 spin_lock_init(&adapter->mcc_lock); 2320 2320 spin_lock_init(&adapter->mcc_cq_lock); 2321 2321 2322 + init_completion(&adapter->flash_compl); 2322 2323 pci_save_state(adapter->pdev); 2323 2324 return 0; 2324 2325 ··· 2488 2487 status = be_cmd_POST(adapter); 2489 2488 if (status) 2490 2489 goto ctrl_clean; 2491 - 2492 - status = be_cmd_reset_function(adapter); 2493 - if (status) 2494 - goto ctrl_clean; 2495 2490 } 2496 2491 2497 2492 /* tell fw we're ready to fire cmds */ 2498 2493 status = be_cmd_fw_init(adapter); 2499 2494 if (status) 2500 2495 goto ctrl_clean; 2496 + 2497 + if (be_physfn(adapter)) { 2498 + status = be_cmd_reset_function(adapter); 2499 + if (status) 2500 + goto ctrl_clean; 2501 + } 2501 2502 2502 2503 status = be_stats_init(adapter); 2503 2504 if (status)
+2
drivers/net/bfin_mac.c
··· 1626 1626 return 0; 1627 1627 1628 1628 out_err_mdiobus_register: 1629 + kfree(miibus->irq); 1629 1630 mdiobus_free(miibus); 1630 1631 out_err_alloc: 1631 1632 peripheral_free_list(pin_req); ··· 1639 1638 struct mii_bus *miibus = platform_get_drvdata(pdev); 1640 1639 platform_set_drvdata(pdev, NULL); 1641 1640 mdiobus_unregister(miibus); 1641 + kfree(miibus->irq); 1642 1642 mdiobus_free(miibus); 1643 1643 peripheral_free_list(pin_req); 1644 1644 return 0;
+2
drivers/net/can/sja1000/sja1000.c
··· 599 599 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 600 600 CAN_CTRLMODE_BERR_REPORTING; 601 601 602 + spin_lock_init(&priv->cmdreg_lock); 603 + 602 604 if (sizeof_priv) 603 605 priv->priv = (void *)priv + sizeof(struct sja1000_priv); 604 606
+21 -8
drivers/net/enic/enic_main.c
··· 1034 1034 { 1035 1035 struct vic_provinfo *vp; 1036 1036 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1037 - unsigned short *uuid; 1037 + u8 *uuid; 1038 1038 char uuid_str[38]; 1039 - static char *uuid_fmt = "%04X%04X-%04X-%04X-%04X-%04X%04X%04X"; 1039 + static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-" 1040 + "%02X%02X-%02X%02X%02X%02X%0X%02X"; 1040 1041 int err; 1041 1042 1042 1043 if (!name) ··· 1059 1058 ETH_ALEN, mac); 1060 1059 1061 1060 if (instance_uuid) { 1062 - uuid = (unsigned short *)instance_uuid; 1061 + uuid = instance_uuid; 1063 1062 sprintf(uuid_str, uuid_fmt, 1064 - uuid[0], uuid[1], uuid[2], uuid[3], 1065 - uuid[4], uuid[5], uuid[6], uuid[7]); 1063 + uuid[0], uuid[1], uuid[2], uuid[3], 1064 + uuid[4], uuid[5], uuid[6], uuid[7], 1065 + uuid[8], uuid[9], uuid[10], uuid[11], 1066 + uuid[12], uuid[13], uuid[14], uuid[15]); 1066 1067 vic_provinfo_add_tlv(vp, 1067 1068 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1068 1069 sizeof(uuid_str), uuid_str); 1069 1070 } 1070 1071 1071 1072 if (host_uuid) { 1072 - uuid = (unsigned short *)host_uuid; 1073 + uuid = host_uuid; 1073 1074 sprintf(uuid_str, uuid_fmt, 1074 - uuid[0], uuid[1], uuid[2], uuid[3], 1075 - uuid[4], uuid[5], uuid[6], uuid[7]); 1075 + uuid[0], uuid[1], uuid[2], uuid[3], 1076 + uuid[4], uuid[5], uuid[6], uuid[7], 1077 + uuid[8], uuid[9], uuid[10], uuid[11], 1078 + uuid[12], uuid[13], uuid[14], uuid[15]); 1076 1079 vic_provinfo_add_tlv(vp, 1077 1080 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1078 1081 sizeof(uuid_str), uuid_str); ··· 1131 1126 1132 1127 switch (request) { 1133 1128 case PORT_REQUEST_ASSOCIATE: 1129 + 1130 + /* If the interface mac addr hasn't been assigned, 1131 + * assign a random mac addr before setting port- 1132 + * profile. 1133 + */ 1134 + 1135 + if (is_zero_ether_addr(netdev->dev_addr)) 1136 + random_ether_addr(netdev->dev_addr); 1134 1137 1135 1138 if (port[IFLA_PORT_PROFILE]) 1136 1139 name = nla_data(port[IFLA_PORT_PROFILE]);
+30 -4
drivers/net/ethoc.c
··· 174 174 * @iobase: pointer to I/O memory region 175 175 * @membase: pointer to buffer memory region 176 176 * @dma_alloc: dma allocated buffer size 177 + * @io_region_size: I/O memory region size 177 178 * @num_tx: number of send buffers 178 179 * @cur_tx: last send buffer written 179 180 * @dty_tx: last buffer actually sent ··· 194 193 void __iomem *iobase; 195 194 void __iomem *membase; 196 195 int dma_alloc; 196 + resource_size_t io_region_size; 197 197 198 198 unsigned int num_tx; 199 199 unsigned int cur_tx; ··· 945 943 priv = netdev_priv(netdev); 946 944 priv->netdev = netdev; 947 945 priv->dma_alloc = 0; 946 + priv->io_region_size = mmio->end - mmio->start + 1; 948 947 949 948 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 950 949 resource_size(mmio)); ··· 1050 1047 ret = register_netdev(netdev); 1051 1048 if (ret < 0) { 1052 1049 dev_err(&netdev->dev, "failed to register interface\n"); 1053 - goto error; 1050 + goto error2; 1054 1051 } 1055 1052 1056 1053 goto out; 1057 1054 1055 + error2: 1056 + netif_napi_del(&priv->napi); 1058 1057 error: 1059 1058 mdiobus_unregister(priv->mdio); 1060 1059 free_mdio: 1061 1060 kfree(priv->mdio->irq); 1062 1061 mdiobus_free(priv->mdio); 1063 1062 free: 1064 - if (priv->dma_alloc) 1065 - dma_free_coherent(NULL, priv->dma_alloc, priv->membase, 1066 - netdev->mem_start); 1063 + if (priv) { 1064 + if (priv->dma_alloc) 1065 + dma_free_coherent(NULL, priv->dma_alloc, priv->membase, 1066 + netdev->mem_start); 1067 + else if (priv->membase) 1068 + devm_iounmap(&pdev->dev, priv->membase); 1069 + if (priv->iobase) 1070 + devm_iounmap(&pdev->dev, priv->iobase); 1071 + } 1072 + if (mem) 1073 + devm_release_mem_region(&pdev->dev, mem->start, 1074 + mem->end - mem->start + 1); 1075 + if (mmio) 1076 + devm_release_mem_region(&pdev->dev, mmio->start, 1077 + mmio->end - mmio->start + 1); 1067 1078 free_netdev(netdev); 1068 1079 out: 1069 1080 return ret; ··· 1095 1078 platform_set_drvdata(pdev, NULL); 1096 1079 1097 1080 if (netdev) { 1081 + netif_napi_del(&priv->napi); 1098 1082 phy_disconnect(priv->phy); 1099 1083 priv->phy = NULL; 1100 1084 ··· 1107 1089 if (priv->dma_alloc) 1108 1090 dma_free_coherent(NULL, priv->dma_alloc, priv->membase, 1109 1091 netdev->mem_start); 1092 + else { 1093 + devm_iounmap(&pdev->dev, priv->membase); 1094 + devm_release_mem_region(&pdev->dev, netdev->mem_start, 1095 + netdev->mem_end - netdev->mem_start + 1); 1096 + } 1097 + devm_iounmap(&pdev->dev, priv->iobase); 1098 + devm_release_mem_region(&pdev->dev, netdev->base_addr, 1099 + priv->io_region_size); 1110 1100 unregister_netdev(netdev); 1111 1101 free_netdev(netdev); 1112 1102 }
+22
drivers/net/fec.c
··· 41 41 #include <linux/clk.h> 42 42 #include <linux/platform_device.h> 43 43 #include <linux/phy.h> 44 + #include <linux/fec.h> 44 45 45 46 #include <asm/cacheflush.h> 46 47 ··· 183 182 struct phy_device *phy_dev; 184 183 int mii_timeout; 185 184 uint phy_speed; 185 + phy_interface_t phy_interface; 186 186 int index; 187 187 int link; 188 188 int full_duplex; ··· 1193 1191 /* Set MII speed */ 1194 1192 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1195 1193 1194 + #ifdef FEC_MIIGSK_ENR 1195 + if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 1196 + /* disable the gasket and wait */ 1197 + writel(0, fep->hwp + FEC_MIIGSK_ENR); 1198 + while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1199 + udelay(1); 1200 + 1201 + /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ 1202 + writel(1, fep->hwp + FEC_MIIGSK_CFGR); 1203 + 1204 + /* re-enable the gasket */ 1205 + writel(2, fep->hwp + FEC_MIIGSK_ENR); 1206 + } 1207 + #endif 1208 + 1196 1209 /* And last, enable the transmit and receive processing */ 1197 1210 writel(2, fep->hwp + FEC_ECNTRL); 1198 1211 writel(0, fep->hwp + FEC_R_DES_ACTIVE); ··· 1243 1226 fec_probe(struct platform_device *pdev) 1244 1227 { 1245 1228 struct fec_enet_private *fep; 1229 + struct fec_platform_data *pdata; 1246 1230 struct net_device *ndev; 1247 1231 int i, irq, ret = 0; 1248 1232 struct resource *r; ··· 1276 1258 } 1277 1259 1278 1260 platform_set_drvdata(pdev, ndev); 1261 + 1262 + pdata = pdev->dev.platform_data; 1263 + if (pdata) 1264 + fep->phy_interface = pdata->phy; 1279 1265 1280 1266 /* This device has up to three irqs on some platforms */ 1281 1267 for (i = 0; i < 3; i++) {
+2
drivers/net/fec.h
··· 43 43 #define FEC_R_DES_START 0x180 /* Receive descriptor ring */ 44 44 #define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ 45 45 #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ 46 + #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ 47 + #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ 46 48 47 49 #else 48 50
+6 -2
drivers/net/irda/bfin_sir.c
··· 107 107 case 57600: 108 108 case 115200: 109 109 110 - quot = (port->clk + (8 * speed)) / (16 * speed)\ 111 - - ANOMALY_05000230; 110 + /* 111 + * IRDA is not affected by anomaly 05000230, so there is no 112 + * need to tweak the divisor like he UART driver (which will 113 + * slightly speed up the baud rate on us). 114 + */ 115 + quot = (port->clk + (8 * speed)) / (16 * speed); 112 116 113 117 do { 114 118 udelay(utime);
+3
drivers/net/ixgbe/ixgbe.h
··· 360 360 u32 flags2; 361 361 #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) 362 362 #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) 363 + #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) 363 364 /* default to trying for four seconds */ 364 365 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 365 366 ··· 408 407 u16 eeprom_version; 409 408 410 409 int node; 410 + struct work_struct check_overtemp_task; 411 + u32 interrupt_event; 411 412 412 413 /* SR-IOV */ 413 414 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+1
drivers/net/ixgbe/ixgbe_82598.c
··· 1236 1236 .setup_link = &ixgbe_setup_phy_link_generic, 1237 1237 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1238 1238 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, 1239 + .check_overtemp = &ixgbe_tn_check_overtemp, 1239 1240 }; 1240 1241 1241 1242 struct ixgbe_info ixgbe_82598_info = {
+1
drivers/net/ixgbe/ixgbe_82599.c
··· 2395 2395 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, 2396 2396 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, 2397 2397 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, 2398 + .check_overtemp = &ixgbe_tn_check_overtemp, 2398 2399 }; 2399 2400 2400 2401 struct ixgbe_info ixgbe_82599_info = {
+69
drivers/net/ixgbe/ixgbe_main.c
··· 108 108 board_82599 }, 109 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 110 110 board_82599 }, 111 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 112 + board_82599 }, 111 113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 112 114 board_82599 }, 113 115 ··· 1620 1618 } 1621 1619 } 1622 1620 1621 + /** 1622 + * ixgbe_check_overtemp_task - worker thread to check over tempurature 1623 + * @work: pointer to work_struct containing our data 1624 + **/ 1625 + static void ixgbe_check_overtemp_task(struct work_struct *work) 1626 + { 1627 + struct ixgbe_adapter *adapter = container_of(work, 1628 + struct ixgbe_adapter, 1629 + check_overtemp_task); 1630 + struct ixgbe_hw *hw = &adapter->hw; 1631 + u32 eicr = adapter->interrupt_event; 1632 + 1633 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 1634 + switch (hw->device_id) { 1635 + case IXGBE_DEV_ID_82599_T3_LOM: { 1636 + u32 autoneg; 1637 + bool link_up = false; 1638 + 1639 + if (hw->mac.ops.check_link) 1640 + hw->mac.ops.check_link(hw, &autoneg, &link_up, false); 1641 + 1642 + if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || 1643 + (eicr & IXGBE_EICR_LSC)) 1644 + /* Check if this is due to overtemp */ 1645 + if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) 1646 + break; 1647 + } 1648 + return; 1649 + default: 1650 + if (!(eicr & IXGBE_EICR_GPI_SDP0)) 1651 + return; 1652 + break; 1653 + } 1654 + DPRINTK(DRV, ERR, "Network adapter has been stopped because it " 1655 + "has over heated. Restart the computer. If the problem " 1656 + "persists, power off the system and replace the " 1657 + "adapter\n"); 1658 + /* write to clear the interrupt */ 1659 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); 1660 + } 1661 + } 1662 + 1623 1663 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1624 1664 { 1625 1665 struct ixgbe_hw *hw = &adapter->hw; ··· 1733 1689 1734 1690 if (hw->mac.type == ixgbe_mac_82599EB) { 1735 1691 ixgbe_check_sfp_event(adapter, eicr); 1692 + adapter->interrupt_event = eicr; 1693 + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && 1694 + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) 1695 + schedule_work(&adapter->check_overtemp_task); 1736 1696 1737 1697 /* Handle Flow Director Full threshold interrupt */ 1738 1698 if (eicr & IXGBE_EICR_FLOW_DIR) { ··· 2238 2190 u32 mask; 2239 2191 2240 2192 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 2193 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 2194 + mask |= IXGBE_EIMS_GPI_SDP0; 2241 2195 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2242 2196 mask |= IXGBE_EIMS_GPI_SDP1; 2243 2197 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { ··· 2300 2250 ixgbe_check_sfp_event(adapter, eicr); 2301 2251 2302 2252 ixgbe_check_fan_failure(adapter, eicr); 2253 + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && 2254 + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) 2255 + schedule_work(&adapter->check_overtemp_task); 2303 2256 2304 2257 if (napi_schedule_prep(&(q_vector->napi))) { 2305 2258 adapter->tx_ring[0]->total_packets = 0; ··· 3318 3265 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3319 3266 } 3320 3267 3268 + /* Enable Thermal over heat sensor interrupt */ 3269 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 3270 + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3271 + gpie |= IXGBE_SDP0_GPIEN; 3272 + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3273 + } 3274 + 3321 3275 /* Enable fan failure interrupt if media type is copper */ 3322 3276 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 3323 3277 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); ··· 3725 3665 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3726 3666 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3727 3667 cancel_work_sync(&adapter->fdir_reinit_task); 3668 + 3669 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 3670 + cancel_work_sync(&adapter->check_overtemp_task); 3728 3671 3729 3672 /* disable transmits in the hardware now that interrupts are off */ 3730 3673 for (i = 0; i < adapter->num_tx_queues; i++) { ··· 4708 4645 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 4709 4646 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 4710 4647 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 4648 + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 4649 + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 4711 4650 if (dev->features & NETIF_F_NTUPLE) { 4712 4651 /* Flow Director perfect filter enabled */ 4713 4652 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ··· 6626 6561 } 6627 6562 6628 6563 /* reset_hw fills in the perm_addr as well */ 6564 + hw->phy.reset_if_overtemp = true; 6629 6565 err = hw->mac.ops.reset_hw(hw); 6566 + hw->phy.reset_if_overtemp = false; 6630 6567 if (err == IXGBE_ERR_SFP_NOT_PRESENT && 6631 6568 hw->mac.type == ixgbe_mac_82598EB) { 6632 6569 /* ··· 6797 6730 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 6798 6731 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); 6799 6732 6733 + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 6734 + INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task); 6800 6735 #ifdef CONFIG_IXGBE_DCA 6801 6736 if (dca_add_requester(&pdev->dev) == 0) { 6802 6737 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+30
drivers/net/ixgbe/ixgbe_phy.c
··· 135 135 **/ 136 136 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 137 137 { 138 + /* Don't reset PHY if it's shut down due to overtemp. */ 139 + if (!hw->phy.reset_if_overtemp && 140 + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 141 + return 0; 142 + 138 143 /* 139 144 * Perform soft PHY reset to the PHY_XS. 140 145 * This will cause a soft reset to the PHY ··· 1350 1345 return status; 1351 1346 } 1352 1347 1348 + /** 1349 + * ixgbe_tn_check_overtemp - Checks if an overtemp occured. 1350 + * @hw: pointer to hardware structure 1351 + * 1352 + * Checks if the LASI temp alarm status was triggered due to overtemp 1353 + **/ 1354 + s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) 1355 + { 1356 + s32 status = 0; 1357 + u16 phy_data = 0; 1358 + 1359 + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) 1360 + goto out; 1361 + 1362 + /* Check that the LASI temp alarm status was triggered */ 1363 + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, 1364 + MDIO_MMD_PMAPMD, &phy_data); 1365 + 1366 + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) 1367 + goto out; 1368 + 1369 + status = IXGBE_ERR_OVERTEMP; 1370 + out: 1371 + return status; 1372 + }
+3
drivers/net/ixgbe/ixgbe_phy.h
··· 80 80 #define IXGBE_I2C_T_SU_STO 4 81 81 #define IXGBE_I2C_T_BUF 5 82 82 83 + #define IXGBE_TN_LASI_STATUS_REG 0x9005 84 + #define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 83 85 84 86 s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); 85 87 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); ··· 108 106 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 109 107 u16 *list_offset, 110 108 u16 *data_offset); 109 + s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); 111 110 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 112 111 u8 dev_addr, u8 *data); 113 112 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+4
drivers/net/ixgbe/ixgbe_type.h
··· 51 51 #define IXGBE_DEV_ID_82599_KX4 0x10F7 52 52 #define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 53 53 #define IXGBE_DEV_ID_82599_KR 0x1517 54 + #define IXGBE_DEV_ID_82599_T3_LOM 0x151C 54 55 #define IXGBE_DEV_ID_82599_CX4 0x10F9 55 56 #define IXGBE_DEV_ID_82599_SFP 0x10FB 56 57 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 ··· 2471 2470 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); 2472 2471 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); 2473 2472 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); 2473 + s32 (*check_overtemp)(struct ixgbe_hw *); 2474 2474 }; 2475 2475 2476 2476 struct ixgbe_eeprom_info { ··· 2520 2518 enum ixgbe_smart_speed smart_speed; 2521 2519 bool smart_speed_active; 2522 2520 bool multispeed_fiber; 2521 + bool reset_if_overtemp; 2523 2522 }; 2524 2523 2525 2524 #include "ixgbe_mbx.h" ··· 2608 2605 #define IXGBE_ERR_FDIR_REINIT_FAILED -23 2609 2606 #define IXGBE_ERR_EEPROM_VERSION -24 2610 2607 #define IXGBE_ERR_NO_SPACE -25 2608 + #define IXGBE_ERR_OVERTEMP -26 2611 2609 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2612 2610 2613 2611 #endif /* _IXGBE_TYPE_H_ */
+8 -1
drivers/net/macvlan.c
··· 634 634 635 635 err = register_netdevice(dev); 636 636 if (err < 0) 637 - return err; 637 + goto destroy_port; 638 638 639 639 list_add_tail(&vlan->list, &port->vlans); 640 640 netif_stacked_transfer_operstate(lowerdev, dev); 641 + 641 642 return 0; 643 + 644 + destroy_port: 645 + if (list_empty(&port->vlans)) 646 + macvlan_port_destroy(lowerdev); 647 + 648 + return err; 642 649 } 643 650 EXPORT_SYMBOL_GPL(macvlan_common_newlink); 644 651
+1
drivers/net/pppoe.c
··· 289 289 struct pppoe_net *pn; 290 290 int i; 291 291 292 + pn = pppoe_pernet(dev_net(dev)); 292 293 write_lock_bh(&pn->hash_lock); 293 294 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 294 295 struct pppox_sock *po = pn->hash_table[i];
+3
drivers/net/sh_eth.c
··· 1294 1294 /* remove mdio bus info from net_device */ 1295 1295 dev_set_drvdata(&ndev->dev, NULL); 1296 1296 1297 + /* free interrupts memory */ 1298 + kfree(bus->irq); 1299 + 1297 1300 /* free bitbang info */ 1298 1301 free_mdio_bitbang(bus); 1299 1302
+2
drivers/net/tun.c
··· 526 526 struct sk_buff *skb; 527 527 int err; 528 528 529 + sock_update_classid(sk); 530 + 529 531 /* Under a page? Don't bother with paged skb. */ 530 532 if (prepad + len < PAGE_SIZE || !linear) 531 533 linear = len;
+2 -2
drivers/net/usb/asix.c
··· 322 322 size = (u16) (header & 0x0000ffff); 323 323 324 324 if ((skb->len) - ((size + 1) & 0xfffe) == 0) { 325 - u8 alignment = (u32)skb->data & 0x3; 325 + u8 alignment = (unsigned long)skb->data & 0x3; 326 326 if (alignment != 0x2) { 327 327 /* 328 328 * not 16bit aligned so use the room provided by ··· 351 351 } 352 352 ax_skb = skb_clone(skb, GFP_ATOMIC); 353 353 if (ax_skb) { 354 - u8 alignment = (u32)packet & 0x3; 354 + u8 alignment = (unsigned long)packet & 0x3; 355 355 ax_skb->len = size; 356 356 357 357 if (alignment != 0x2) {
+3
drivers/net/usb/hso.c
··· 475 475 {USB_DEVICE(0x0af0, 0x8302)}, 476 476 {USB_DEVICE(0x0af0, 0x8304)}, 477 477 {USB_DEVICE(0x0af0, 0x8400)}, 478 + {USB_DEVICE(0x0af0, 0x8600)}, 479 + {USB_DEVICE(0x0af0, 0x8800)}, 480 + {USB_DEVICE(0x0af0, 0x8900)}, 478 481 {USB_DEVICE(0x0af0, 0xd035)}, 479 482 {USB_DEVICE(0x0af0, 0xd055)}, 480 483 {USB_DEVICE(0x0af0, 0xd155)},
+2 -2
drivers/net/wimax/i2400m/rx.c
··· 1027 1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; 1028 1028 1029 1029 spin_lock_irqsave(&i2400m->rx_lock, flags); 1030 - roq = &i2400m->rx_roq[ro_cin]; 1031 - if (roq == NULL) { 1030 + if (i2400m->rx_roq == NULL) { 1032 1031 kfree_skb(skb); /* rx_roq is already destroyed */ 1033 1032 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1034 1033 goto error; 1035 1034 } 1035 + roq = &i2400m->rx_roq[ro_cin]; 1036 1036 kref_get(&i2400m->rx_roq_refcount); 1037 1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1038 1038
+4 -3
drivers/net/wireless/ath/ath5k/base.c
··· 1214 1214 struct ath5k_hw *ah = sc->ah; 1215 1215 struct sk_buff *skb = bf->skb; 1216 1216 struct ath5k_desc *ds; 1217 + int ret; 1217 1218 1218 1219 if (!skb) { 1219 1220 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); ··· 1241 1240 ds = bf->desc; 1242 1241 ds->ds_link = bf->daddr; /* link to self */ 1243 1242 ds->ds_data = bf->skbaddr; 1244 - ah->ah_setup_rx_desc(ah, ds, 1245 - skb_tailroom(skb), /* buffer size */ 1246 - 0); 1243 + ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); 1244 + if (ret) 1245 + return ret; 1247 1246 1248 1247 if (sc->rxlink != NULL) 1249 1248 *sc->rxlink = bf->daddr;
+13 -62
drivers/net/wireless/ath/ath9k/beacon.c
··· 76 76 ds = bf->bf_desc; 77 77 flags = ATH9K_TXDESC_NOACK; 78 78 79 - if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || 80 - (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) && 81 - (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 82 - ds->ds_link = bf->bf_daddr; /* self-linked */ 83 - flags |= ATH9K_TXDESC_VEOL; 84 - /* Let hardware handle antenna switching. */ 85 - antenna = 0; 86 - } else { 87 - ds->ds_link = 0; 88 - /* 89 - * Switch antenna every beacon. 90 - * Should only switch every beacon period, not for every SWBA 91 - * XXX assumes two antennae 92 - */ 93 - antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1); 94 - } 79 + ds->ds_link = 0; 80 + /* 81 + * Switch antenna every beacon. 82 + * Should only switch every beacon period, not for every SWBA 83 + * XXX assumes two antennae 84 + */ 85 + antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1); 95 86 96 87 sband = &sc->sbands[common->hw->conf.channel->band]; 97 88 rate = sband->bitrates[rateidx].hw_value; ··· 206 215 return bf; 207 216 } 208 217 209 - /* 210 - * Startup beacon transmission for adhoc mode when they are sent entirely 211 - * by the hardware using the self-linked descriptor + veol trick. 212 - */ 213 - static void ath_beacon_start_adhoc(struct ath_softc *sc, 214 - struct ieee80211_vif *vif) 215 - { 216 - struct ath_hw *ah = sc->sc_ah; 217 - struct ath_common *common = ath9k_hw_common(ah); 218 - struct ath_buf *bf; 219 - struct ath_vif *avp; 220 - struct sk_buff *skb; 221 - 222 - avp = (void *)vif->drv_priv; 223 - 224 - if (avp->av_bcbuf == NULL) 225 - return; 226 - 227 - bf = avp->av_bcbuf; 228 - skb = bf->bf_mpdu; 229 - 230 - ath_beacon_setup(sc, avp, bf, 0); 231 - 232 - /* NB: caller is known to have already stopped tx dma */ 233 - ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr); 234 - ath9k_hw_txstart(ah, sc->beacon.beaconq); 235 - ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n", 236 - sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc); 237 - } 238 - 239 218 int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) 240 219 { 241 220 struct ath_softc *sc = aphy->sc; ··· 226 265 list_del(&avp->av_bcbuf->list); 227 266 228 267 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || 229 - !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 268 + sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC || 269 + sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) { 230 270 int slot; 231 271 /* 232 272 * Assign the vif to a beacon xmit slot. As ··· 236 274 avp->av_bslot = 0; 237 275 for (slot = 0; slot < ATH_BCBUF; slot++) 238 276 if (sc->beacon.bslot[slot] == NULL) { 239 - /* 240 - * XXX hack, space out slots to better 241 - * deal with misses 242 - */ 243 - if (slot+1 < ATH_BCBUF && 244 - sc->beacon.bslot[slot+1] == NULL) { 245 - avp->av_bslot = slot+1; 246 - break; 247 - } 248 277 avp->av_bslot = slot; 278 + 249 279 /* NB: keep looking for a double slot */ 280 + if (slot == 0 || !sc->beacon.bslot[slot-1]) 281 + break; 250 282 } 251 283 BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL); 252 284 sc->beacon.bslot[avp->av_bslot] = vif; ··· 677 721 * self-linked tx descriptor and let the hardware deal with things. 678 722 */ 679 723 intval |= ATH9K_BEACON_ENA; 680 - if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) 681 - ah->imask |= ATH9K_INT_SWBA; 724 + ah->imask |= ATH9K_INT_SWBA; 682 725 683 726 ath_beaconq_config(sc); 684 727 ··· 687 732 ath9k_beacon_init(sc, nexttbtt, intval); 688 733 sc->beacon.bmisscnt = 0; 689 734 ath9k_hw_set_interrupts(ah, ah->imask); 690 - 691 - /* FIXME: Handle properly when vif is NULL */ 692 - if (vif && ah->caps.hw_caps & ATH9K_HW_CAP_VEOL) 693 - ath_beacon_start_adhoc(sc, vif); 694 735 } 695 736 696 737 void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
+8 -2
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 107 107 static void hif_usb_tx_cb(struct urb *urb) 108 108 { 109 109 struct tx_buf *tx_buf = (struct tx_buf *) urb->context; 110 - struct hif_device_usb *hif_dev = tx_buf->hif_dev; 110 + struct hif_device_usb *hif_dev; 111 111 struct sk_buff *skb; 112 112 113 - if (!hif_dev || !tx_buf) 113 + if (!tx_buf || !tx_buf->hif_dev) 114 114 return; 115 + 116 + hif_dev = tx_buf->hif_dev; 115 117 116 118 switch (urb->status) { 117 119 case 0: ··· 609 607 610 608 return 0; 611 609 err: 610 + if (tx_buf) { 611 + kfree(tx_buf->buf); 612 + kfree(tx_buf); 613 + } 612 614 ath9k_hif_usb_dealloc_tx_urbs(hif_dev); 613 615 return -ENOMEM; 614 616 }
+1
drivers/net/wireless/ath/ath9k/htc.h
··· 23 23 #include <linux/skbuff.h> 24 24 #include <linux/netdevice.h> 25 25 #include <linux/leds.h> 26 + #include <linux/slab.h> 26 27 #include <net/mac80211.h> 27 28 28 29 #include "common.h"
-1
drivers/net/wireless/ath/ath9k/pci.c
··· 28 28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ 29 29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 30 30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 31 - { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ 32 31 { 0 } 33 32 }; 34 33
+12 -5
drivers/net/wireless/ath/ath9k/recv.c
··· 19 19 20 20 #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21 21 22 + static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 23 + { 24 + return sc->ps_enabled && 25 + (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 26 + } 27 + 22 28 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 23 29 struct ieee80211_hdr *hdr) 24 30 { ··· 622 616 hdr = (struct ieee80211_hdr *)skb->data; 623 617 624 618 /* Process Beacon and CAB receive in PS state */ 625 - if ((sc->ps_flags & PS_WAIT_FOR_BEACON) && 626 - ieee80211_is_beacon(hdr->frame_control)) 619 + if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 620 + && ieee80211_is_beacon(hdr->frame_control)) 627 621 ath_rx_ps_beacon(sc, skb); 628 622 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 629 623 (ieee80211_is_data(hdr->frame_control) || ··· 938 932 sc->rx.rxotherant = 0; 939 933 } 940 934 941 - if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON | 942 - PS_WAIT_FOR_CAB | 943 - PS_WAIT_FOR_PSPOLL_DATA))) 935 + if (unlikely(ath9k_check_auto_sleep(sc) || 936 + (sc->ps_flags & (PS_WAIT_FOR_BEACON | 937 + PS_WAIT_FOR_CAB | 938 + PS_WAIT_FOR_PSPOLL_DATA)))) 944 939 ath_rx_ps(sc, skb); 945 940 946 941 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
+1
drivers/net/wireless/iwlwifi/iwl-agn-ict.c
··· 30 30 #include <linux/module.h> 31 31 #include <linux/etherdevice.h> 32 32 #include <linux/sched.h> 33 + #include <linux/gfp.h> 33 34 #include <net/mac80211.h> 34 35 35 36 #include "iwl-dev.h"
+18 -3
drivers/net/wireless/iwlwifi/iwl-scan.c
··· 376 376 377 377 mutex_lock(&priv->mutex); 378 378 379 + if (priv->is_internal_short_scan == true) { 380 + IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); 381 + goto unlock; 382 + } 383 + 379 384 if (!iwl_is_ready_rf(priv)) { 380 385 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); 381 386 goto unlock; ··· 502 497 { 503 498 struct iwl_priv *priv = 504 499 container_of(work, struct iwl_priv, scan_completed); 500 + bool internal = false; 505 501 506 502 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); 507 503 508 504 cancel_delayed_work(&priv->scan_check); 509 505 510 - if (!priv->is_internal_short_scan) 511 - ieee80211_scan_completed(priv->hw, false); 512 - else { 506 + mutex_lock(&priv->mutex); 507 + if (priv->is_internal_short_scan) { 513 508 priv->is_internal_short_scan = false; 514 509 IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); 510 + internal = true; 515 511 } 512 + mutex_unlock(&priv->mutex); 513 + 514 + /* 515 + * Do not hold mutex here since this will cause mac80211 to call 516 + * into driver again into functions that will attempt to take 517 + * mutex. 518 + */ 519 + if (!internal) 520 + ieee80211_scan_completed(priv->hw, false); 516 521 517 522 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 518 523 return;
+1 -1
drivers/net/wireless/iwlwifi/iwl-sta.c
··· 431 431 struct iwl_link_quality_cmd *link_cmd; 432 432 unsigned long flags; 433 433 434 - if (*sta_id_r) 434 + if (sta_id_r) 435 435 *sta_id_r = IWL_INVALID_STATION; 436 436 437 437 ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
+10 -6
drivers/net/wireless/rndis_wlan.c
··· 2572 2572 2573 2573 static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) 2574 2574 { 2575 - union iwreq_data evt; 2575 + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2576 + 2577 + if (priv->connected) { 2578 + priv->connected = false; 2579 + memset(priv->bssid, 0, ETH_ALEN); 2580 + 2581 + deauthenticate(usbdev); 2582 + 2583 + cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL); 2584 + } 2576 2585 2577 2586 netif_carrier_off(usbdev->net); 2578 - 2579 - evt.data.flags = 0; 2580 - evt.data.length = 0; 2581 - memset(evt.ap_addr.sa_data, 0, ETH_ALEN); 2582 - wireless_send_event(usbdev->net, SIOCGIWAP, &evt, NULL); 2583 2587 } 2584 2588 2585 2589 static void rndis_wlan_worker(struct work_struct *work)
+5 -4
drivers/net/wireless/rt2x00/rt2400pci.c
··· 926 926 static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, 927 927 enum dev_state state) 928 928 { 929 - u32 reg; 929 + u32 reg, reg2; 930 930 unsigned int i; 931 931 char put_to_sleep; 932 932 char bbp_state; ··· 947 947 * device has entered the correct state. 948 948 */ 949 949 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 950 - rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg); 951 - bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); 952 - rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); 950 + rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2); 951 + bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); 952 + rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); 953 953 if (bbp_state == state && rf_state == state) 954 954 return 0; 955 + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); 955 956 msleep(10); 956 957 } 957 958
+5 -4
drivers/net/wireless/rt2x00/rt2500pci.c
··· 1084 1084 static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, 1085 1085 enum dev_state state) 1086 1086 { 1087 - u32 reg; 1087 + u32 reg, reg2; 1088 1088 unsigned int i; 1089 1089 char put_to_sleep; 1090 1090 char bbp_state; ··· 1105 1105 * device has entered the correct state. 1106 1106 */ 1107 1107 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1108 - rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg); 1109 - bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); 1110 - rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); 1108 + rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2); 1109 + bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); 1110 + rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); 1111 1111 if (bbp_state == state && rf_state == state) 1112 1112 return 0; 1113 + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); 1113 1114 msleep(10); 1114 1115 } 1115 1116
+1 -1
drivers/net/wireless/rt2x00/rt2800usb.c
··· 413 413 */ 414 414 rt2x00_desc_read(txi, 0, &word); 415 415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 416 - skb->len + TXWI_DESC_SIZE); 416 + skb->len - TXINFO_DESC_SIZE); 417 417 rt2x00_set_field32(&word, TXINFO_W0_WIV, 418 418 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 419 419 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
+1 -1
drivers/net/wireless/rt2x00/rt2x00pci.c
··· 206 206 /* 207 207 * Free irq line. 208 208 */ 209 - free_irq(to_pci_dev(rt2x00dev->dev)->irq, rt2x00dev); 209 + free_irq(rt2x00dev->irq, rt2x00dev); 210 210 211 211 /* 212 212 * Free DMA
+4 -3
drivers/net/wireless/rt2x00/rt61pci.c
··· 1689 1689 1690 1690 static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1691 1691 { 1692 - u32 reg; 1692 + u32 reg, reg2; 1693 1693 unsigned int i; 1694 1694 char put_to_sleep; 1695 1695 ··· 1706 1706 * device has entered the correct state. 1707 1707 */ 1708 1708 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1709 - rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg); 1710 - state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); 1709 + rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg2); 1710 + state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); 1711 1711 if (state == !put_to_sleep) 1712 1712 return 0; 1713 + rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg); 1713 1714 msleep(10); 1714 1715 } 1715 1716
+4 -3
drivers/net/wireless/rt2x00/rt73usb.c
··· 1366 1366 1367 1367 static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1368 1368 { 1369 - u32 reg; 1369 + u32 reg, reg2; 1370 1370 unsigned int i; 1371 1371 char put_to_sleep; 1372 1372 ··· 1383 1383 * device has entered the correct state. 1384 1384 */ 1385 1385 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1386 - rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg); 1387 - state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); 1386 + rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg2); 1387 + state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); 1388 1388 if (state == !put_to_sleep) 1389 1389 return 0; 1390 + rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg); 1390 1391 msleep(10); 1391 1392 } 1392 1393
+2
drivers/net/wireless/wl12xx/wl1271_rx.c
··· 113 113 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, 114 114 beacon ? "beacon" : ""); 115 115 116 + skb_trim(skb, skb->len - desc->pad_len); 117 + 116 118 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 117 119 ieee80211_rx_ni(wl->hw, skb); 118 120 }
+21
include/linux/fec.h
··· 1 + /* include/linux/fec.h 2 + * 3 + * Copyright (c) 2009 Orex Computed Radiography 4 + * Baruch Siach <baruch@tkos.co.il> 5 + * 6 + * Header file for the FEC platform data 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + #ifndef __LINUX_FEC_H__ 13 + #define __LINUX_FEC_H__ 14 + 15 + #include <linux/phy.h> 16 + 17 + struct fec_platform_data { 18 + phy_interface_t phy; 19 + }; 20 + 21 + #endif
+12 -4
include/linux/netdevice.h
··· 1407 1407 struct softnet_data *rps_ipi_next; 1408 1408 unsigned int cpu; 1409 1409 unsigned int input_queue_head; 1410 + unsigned int input_queue_tail; 1410 1411 #endif 1411 1412 unsigned dropped; 1412 1413 struct sk_buff_head input_pkt_queue; 1413 1414 struct napi_struct backlog; 1414 1415 }; 1415 1416 1416 - static inline void input_queue_head_add(struct softnet_data *sd, 1417 - unsigned int len) 1417 + static inline void input_queue_head_incr(struct softnet_data *sd) 1418 1418 { 1419 1419 #ifdef CONFIG_RPS 1420 - sd->input_queue_head += len; 1420 + sd->input_queue_head++; 1421 + #endif 1422 + } 1423 + 1424 + static inline void input_queue_tail_incr_save(struct softnet_data *sd, 1425 + unsigned int *qtail) 1426 + { 1427 + #ifdef CONFIG_RPS 1428 + *qtail = ++sd->input_queue_tail; 1421 1429 #endif 1422 1430 } 1423 1431 ··· 2334 2326 #define netif_vdbg(priv, type, dev, format, args...) \ 2335 2327 ({ \ 2336 2328 if (0) \ 2337 - netif_printk(KERN_DEBUG, dev, format, ##args); \ 2329 + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 2338 2330 0; \ 2339 2331 }) 2340 2332 #endif
+1 -1
include/linux/netfilter/x_tables.h
··· 333 333 /* Called when user tries to insert an entry of this type: 334 334 hook_mask is a bitmask of hooks from which it can be 335 335 called. */ 336 - /* Should return true or false, or an error code (-Exxxx). */ 336 + /* Should return 0 on success or an error code otherwise (-Exxxx). */ 337 337 int (*checkentry)(const struct xt_tgchk_param *); 338 338 339 339 /* Called when entry of this type deleted. */
+2 -2
include/net/caif/cfctrl.h
··· 94 94 enum cfctrl_cmd cmd; 95 95 u8 channel_id; 96 96 struct cfctrl_link_param param; 97 - struct cfctrl_request_info *next; 98 97 struct cflayer *client_layer; 98 + struct list_head list; 99 99 }; 100 100 101 101 struct cfctrl { ··· 103 103 struct cfctrl_rsp res; 104 104 atomic_t req_seq_no; 105 105 atomic_t rsp_seq_no; 106 - struct cfctrl_request_info *first_req; 106 + struct list_head list; 107 107 /* Protects from simultaneous access to first_req list */ 108 108 spinlock_t info_list_lock; 109 109 #ifndef CAIF_NO_LOOP
+63
include/net/cls_cgroup.h
··· 1 + /* 2 + * cls_cgroup.h Control Group Classifier 3 + * 4 + * Authors: Thomas Graf <tgraf@suug.ch> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the Free 8 + * Software Foundation; either version 2 of the License, or (at your option) 9 + * any later version. 10 + * 11 + */ 12 + 13 + #ifndef _NET_CLS_CGROUP_H 14 + #define _NET_CLS_CGROUP_H 15 + 16 + #include <linux/cgroup.h> 17 + #include <linux/hardirq.h> 18 + #include <linux/rcupdate.h> 19 + 20 + #ifdef CONFIG_CGROUPS 21 + struct cgroup_cls_state 22 + { 23 + struct cgroup_subsys_state css; 24 + u32 classid; 25 + }; 26 + 27 + #ifdef CONFIG_NET_CLS_CGROUP 28 + static inline u32 task_cls_classid(struct task_struct *p) 29 + { 30 + if (in_interrupt()) 31 + return 0; 32 + 33 + return container_of(task_subsys_state(p, net_cls_subsys_id), 34 + struct cgroup_cls_state, css)->classid; 35 + } 36 + #else 37 + extern int net_cls_subsys_id; 38 + 39 + static inline u32 task_cls_classid(struct task_struct *p) 40 + { 41 + int id; 42 + u32 classid; 43 + 44 + if (in_interrupt()) 45 + return 0; 46 + 47 + rcu_read_lock(); 48 + id = rcu_dereference(net_cls_subsys_id); 49 + if (id >= 0) 50 + classid = container_of(task_subsys_state(p, id), 51 + struct cgroup_cls_state, css)->classid; 52 + rcu_read_unlock(); 53 + 54 + return classid; 55 + } 56 + #endif 57 + #else 58 + static inline u32 task_cls_classid(struct task_struct *p) 59 + { 60 + return 0; 61 + } 62 + #endif 63 + #endif /* _NET_CLS_CGROUP_H */
+3
include/net/mac80211.h
··· 815 815 * encrypted in hardware. 816 816 * @alg: The key algorithm. 817 817 * @flags: key flags, see &enum ieee80211_key_flags. 818 + * @ap_addr: AP's MAC address 818 819 * @keyidx: the key index (0-3) 819 820 * @keylen: key material length 820 821 * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte) ··· 1637 1636 * that TX/RX_STOP can pass NULL for this parameter. 1638 1637 * Returns a negative error code on failure. 1639 1638 * The callback must be atomic. 1639 + * 1640 + * @get_survey: Return per-channel survey information 1640 1641 * 1641 1642 * @rfkill_poll: Poll rfkill hardware state. If you need this, you also 1642 1643 * need to set wiphy->rfkill_poll to %true before registration,
+1 -1
include/net/netfilter/nf_conntrack_core.h
··· 61 61 int ret = NF_ACCEPT; 62 62 63 63 if (ct && ct != &nf_conntrack_untracked) { 64 - if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) 64 + if (!nf_ct_is_confirmed(ct)) 65 65 ret = __nf_conntrack_confirm(skb); 66 66 if (likely(ret == NF_ACCEPT)) 67 67 nf_ct_deliver_cached_events(ct);
+10 -2
include/net/sock.h
··· 312 312 void *sk_security; 313 313 #endif 314 314 __u32 sk_mark; 315 - /* XXX 4 bytes hole on 64 bit */ 315 + u32 sk_classid; 316 316 void (*sk_state_change)(struct sock *sk); 317 317 void (*sk_data_ready)(struct sock *sk, int bytes); 318 318 void (*sk_write_space)(struct sock *sk); ··· 1074 1074 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 1075 1075 extern void sk_send_sigurg(struct sock *sk); 1076 1076 1077 + #ifdef CONFIG_CGROUPS 1078 + extern void sock_update_classid(struct sock *sk); 1079 + #else 1080 + static inline void sock_update_classid(struct sock *sk) 1081 + { 1082 + } 1083 + #endif 1084 + 1077 1085 /* 1078 1086 * Functions to fill in entries in struct proto_ops when a protocol 1079 1087 * does not implement a particular function. ··· 1412 1404 1413 1405 /** 1414 1406 * wq_has_sleeper - check if there are any waiting processes 1415 - * @sk: struct socket_wq 1407 + * @wq: struct socket_wq 1416 1408 * 1417 1409 * Returns true if socket_wq has waiting processes 1418 1410 *
+3 -1
kernel/sysctl.c
··· 2287 2287 if (write) { 2288 2288 left -= proc_skip_spaces(&kbuf); 2289 2289 2290 + if (!left) 2291 + break; 2290 2292 err = proc_get_long(&kbuf, &left, &lval, &neg, 2291 2293 proc_wspace_sep, 2292 2294 sizeof(proc_wspace_sep), NULL); ··· 2315 2313 2316 2314 if (!write && !first && left && !err) 2317 2315 err = proc_put_char(&buffer, &left, '\n'); 2318 - if (write && !err) 2316 + if (write && !err && left) 2319 2317 left -= proc_skip_spaces(&kbuf); 2320 2318 free: 2321 2319 if (write) {
+1 -4
net/caif/Kconfig
··· 2 2 # CAIF net configurations 3 3 # 4 4 5 - #menu "CAIF Support" 6 - comment "CAIF Support" 7 5 menuconfig CAIF 8 - tristate "Enable CAIF support" 6 + tristate "CAIF support" 9 7 select CRC_CCITT 10 8 default n 11 9 ---help--- ··· 43 45 If unsure say Y. 44 46 45 47 endif 46 - #endmenu
+36 -57
net/caif/caif_socket.c
··· 60 60 atomic_t num_rx_flow_off; 61 61 atomic_t num_rx_flow_on; 62 62 }; 63 - struct debug_fs_counter cnt; 63 + static struct debug_fs_counter cnt; 64 64 #define dbfs_atomic_inc(v) atomic_inc(v) 65 65 #define dbfs_atomic_dec(v) atomic_dec(v) 66 66 #else ··· 128 128 mutex_unlock(&cf_sk->readlock); 129 129 } 130 130 131 - int sk_rcvbuf_lowwater(struct caifsock *cf_sk) 131 + static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) 132 132 { 133 133 /* A quarter of full buffer is used a low water mark */ 134 134 return cf_sk->sk.sk_rcvbuf / 4; 135 135 } 136 136 137 - void caif_flow_ctrl(struct sock *sk, int mode) 137 + static void caif_flow_ctrl(struct sock *sk, int mode) 138 138 { 139 139 struct caifsock *cf_sk; 140 140 cf_sk = container_of(sk, struct caifsock, sk); 141 - if (cf_sk->layer.dn) 141 + if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) 142 142 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); 143 143 } 144 144 ··· 146 146 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 147 147 * not dropped, but CAIF is sending flow off instead. 148 148 */ 149 - int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 149 + static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 150 150 { 151 151 int err; 152 152 int skb_len; ··· 162 162 atomic_read(&cf_sk->sk.sk_rmem_alloc), 163 163 sk_rcvbuf_lowwater(cf_sk)); 164 164 set_rx_flow_off(cf_sk); 165 - if (cf_sk->layer.dn) 166 - cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 167 - CAIF_MODEMCMD_FLOW_OFF_REQ); 165 + dbfs_atomic_inc(&cnt.num_rx_flow_off); 166 + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 168 167 } 169 168 170 169 err = sk_filter(sk, skb); ··· 174 175 trace_printk("CAIF: %s():" 175 176 " sending flow OFF due to rmem_schedule\n", 176 177 __func__); 177 - if (cf_sk->layer.dn) 178 - cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 179 - CAIF_MODEMCMD_FLOW_OFF_REQ); 178 + dbfs_atomic_inc(&cnt.num_rx_flow_off); 179 + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 180 180 } 181 181 skb->dev = NULL; 182 182 skb_set_owner_r(skb, sk); ··· 283 285 { 284 286 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 285 287 286 - if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL) 287 - return; 288 288 if (rx_flow_is_on(cf_sk)) 289 289 return; 290 290 291 291 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { 292 292 dbfs_atomic_inc(&cnt.num_rx_flow_on); 293 293 set_rx_flow_on(cf_sk); 294 - cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 295 - CAIF_MODEMCMD_FLOW_ON_REQ); 294 + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); 296 295 } 297 296 } 298 - /* 299 - * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer 300 - * has sufficient size. 301 - */ 302 297 298 + /* 299 + * Copied from unix_dgram_recvmsg, but removed credit checks, 300 + * changed locking, address handling and added MSG_TRUNC. 301 + */ 303 302 static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, 304 - struct msghdr *m, size_t buf_len, int flags) 303 + struct msghdr *m, size_t len, int flags) 305 304 306 305 { 307 306 struct sock *sk = sock->sk; 308 307 struct sk_buff *skb; 309 - int ret = 0; 310 - int len; 308 + int ret; 309 + int copylen; 311 310 312 - if (unlikely(!buf_len)) 313 - return -EINVAL; 311 + ret = -EOPNOTSUPP; 312 + if (m->msg_flags&MSG_OOB) 313 + goto read_error; 314 314 315 315 skb = skb_recv_datagram(sk, flags, 0 , &ret); 316 316 if (!skb) 317 317 goto read_error; 318 - 319 - len = skb->len; 320 - 321 - if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) { 322 - len = buf_len; 323 - /* 324 - * Push skb back on receive queue if buffer too small. 325 - * This has a built-in race where multi-threaded receive 326 - * may get packet in wrong order, but multiple read does 327 - * not really guarantee ordered delivery anyway. 328 - * Let's optimize for speed without taking locks. 329 - */ 330 - 331 - skb_queue_head(&sk->sk_receive_queue, skb); 332 - ret = -EMSGSIZE; 333 - goto read_error; 318 + copylen = skb->len; 319 + if (len < copylen) { 320 + m->msg_flags |= MSG_TRUNC; 321 + copylen = len; 334 322 } 335 323 336 - ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); 324 + ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); 337 325 if (ret) 338 - goto read_error; 326 + goto out_free; 339 327 328 + ret = (flags & MSG_TRUNC) ? skb->len : copylen; 329 + out_free: 340 330 skb_free_datagram(sk, skb); 341 - 342 331 caif_check_flow_release(sk); 343 - 344 - return len; 332 + return ret; 345 333 346 334 read_error: 347 335 return ret; ··· 904 920 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 905 921 906 922 release_sock(sk); 907 - err = wait_event_interruptible_timeout(*sk_sleep(sk), 923 + err = -ERESTARTSYS; 924 + timeo = wait_event_interruptible_timeout(*sk_sleep(sk), 908 925 sk->sk_state != CAIF_CONNECTING, 909 926 timeo); 910 927 lock_sock(sk); 911 - if (err < 0) 928 + if (timeo < 0) 912 929 goto out; /* -ERESTARTSYS */ 913 - if (err == 0 && sk->sk_state != CAIF_CONNECTED) { 914 - err = -ETIMEDOUT; 915 - goto out; 916 - } 917 930 931 + err = -ETIMEDOUT; 932 + if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) 933 + goto out; 918 934 if (sk->sk_state != CAIF_CONNECTED) { 919 935 sock->state = SS_UNCONNECTED; 920 936 err = sock_error(sk); ··· 928 944 release_sock(sk); 929 945 return err; 930 946 } 931 - 932 947 933 948 /* 934 949 * caif_release() - Disconnect a CAIF Socket ··· 1001 1018 if (!skb_queue_empty(&sk->sk_receive_queue) || 1002 1019 (sk->sk_shutdown & RCV_SHUTDOWN)) 1003 1020 mask |= POLLIN | POLLRDNORM; 1004 - 1005 - /* Connection-based need to check for termination and startup */ 1006 - if (sk->sk_state == CAIF_DISCONNECTED) 1007 - mask |= POLLHUP; 1008 1021 1009 1022 /* 1010 1023 * we set writable also when the other side has shut down the ··· 1173 1194 .owner = THIS_MODULE, 1174 1195 }; 1175 1196 1176 - int af_caif_init(void) 1197 + static int af_caif_init(void) 1177 1198 { 1178 1199 int err = sock_register(&caif_family_ops); 1179 1200 if (!err)
+28 -68
net/caif/cfctrl.c
··· 44 44 dev_info.id = 0xff; 45 45 memset(this, 0, sizeof(*this)); 46 46 cfsrvl_init(&this->serv, 0, &dev_info); 47 - spin_lock_init(&this->info_list_lock); 48 47 atomic_set(&this->req_seq_no, 1); 49 48 atomic_set(&this->rsp_seq_no, 1); 50 49 this->serv.layer.receive = cfctrl_recv; 51 50 sprintf(this->serv.layer.name, "ctrl"); 52 51 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; 53 52 spin_lock_init(&this->loop_linkid_lock); 53 + spin_lock_init(&this->info_list_lock); 54 + INIT_LIST_HEAD(&this->list); 54 55 this->loop_linkid = 1; 55 56 return &this->serv.layer; 56 57 } ··· 113 112 void cfctrl_insert_req(struct cfctrl *ctrl, 114 113 struct cfctrl_request_info *req) 115 114 { 116 - struct cfctrl_request_info *p; 117 115 spin_lock(&ctrl->info_list_lock); 118 - req->next = NULL; 119 116 atomic_inc(&ctrl->req_seq_no); 120 117 req->sequence_no = atomic_read(&ctrl->req_seq_no); 121 - if (ctrl->first_req == NULL) { 122 - ctrl->first_req = req; 123 - spin_unlock(&ctrl->info_list_lock); 124 - return; 125 - } 126 - p = ctrl->first_req; 127 - while (p->next != NULL) 128 - p = p->next; 129 - p->next = req; 118 + list_add_tail(&req->list, &ctrl->list); 130 119 spin_unlock(&ctrl->info_list_lock); 131 120 } 132 121 ··· 124 133 struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 125 134 struct cfctrl_request_info *req) 126 135 { 127 - struct cfctrl_request_info *p; 128 - struct cfctrl_request_info *ret; 136 + struct cfctrl_request_info *p, *tmp, *first; 129 137 130 138 spin_lock(&ctrl->info_list_lock); 131 - if (ctrl->first_req == NULL) { 132 - spin_unlock(&ctrl->info_list_lock); 133 - return NULL; 134 - } 139 + first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); 135 140 136 - if (cfctrl_req_eq(req, ctrl->first_req)) { 137 - ret = ctrl->first_req; 138 - caif_assert(ctrl->first_req); 139 - atomic_set(&ctrl->rsp_seq_no, 140 - ctrl->first_req->sequence_no); 141 - ctrl->first_req = ctrl->first_req->next; 142 - spin_unlock(&ctrl->info_list_lock); 143 - return ret; 144 - } 145 - 146 - p = ctrl->first_req; 147 - 148 - while (p->next != NULL) { 149 - if (cfctrl_req_eq(req, p->next)) { 150 - pr_warning("CAIF: %s(): Requests are not " 141 + list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 142 + if (cfctrl_req_eq(req, p)) { 143 + if (p != first) 144 + pr_warning("CAIF: %s(): Requests are not " 151 145 "received in order\n", 152 146 __func__); 153 - ret = p->next; 154 - atomic_set(&ctrl->rsp_seq_no, 155 - p->next->sequence_no); 156 - p->next = p->next->next; 157 - spin_unlock(&ctrl->info_list_lock); 158 - return ret; 159 - } 160 - p = p->next; 161 - } 162 - spin_unlock(&ctrl->info_list_lock); 163 147 164 - pr_warning("CAIF: %s(): Request does not match\n", 165 - __func__); 166 - return NULL; 148 + atomic_set(&ctrl->rsp_seq_no, 149 + p->sequence_no); 150 + list_del(&p->list); 151 + goto out; 152 + } 153 + } 154 + p = NULL; 155 + out: 156 + spin_unlock(&ctrl->info_list_lock); 157 + return p; 167 158 } 168 159 169 160 struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) ··· 361 388 362 389 void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) 363 390 { 364 - struct cfctrl_request_info *p, *req; 391 + struct cfctrl_request_info *p, *tmp; 365 392 struct cfctrl *ctrl = container_obj(layr); 366 393 spin_lock(&ctrl->info_list_lock); 394 + pr_warning("CAIF: %s(): enter\n", __func__); 367 395 368 - if (ctrl->first_req == NULL) { 369 - spin_unlock(&ctrl->info_list_lock); 370 - return; 371 - } 372 - 373 - if (ctrl->first_req->client_layer == adap_layer) { 374 - 375 - req = ctrl->first_req; 376 - ctrl->first_req = ctrl->first_req->next; 377 - kfree(req); 378 - } 379 - 380 - p = ctrl->first_req; 381 - while (p != NULL && p->next != NULL) { 382 - if (p->next->client_layer == adap_layer) { 383 - 384 - req = p->next; 385 - p->next = p->next->next; 386 - kfree(p->next); 396 + list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 397 + if (p->client_layer == adap_layer) { 398 + pr_warning("CAIF: %s(): cancel req :%d\n", __func__, 399 + p->sequence_no); 400 + list_del(&p->list); 401 + kfree(p); 387 402 } 388 - p = p->next; 389 403 } 390 404 391 405 spin_unlock(&ctrl->info_list_lock); ··· 594 634 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: 595 635 case CAIF_CTRLCMD_FLOW_OFF_IND: 596 636 spin_lock(&this->info_list_lock); 597 - if (this->first_req != NULL) { 637 + if (!list_empty(&this->list)) { 598 638 pr_debug("CAIF: %s(): Received flow off in " 599 639 "control layer", __func__); 600 640 }
+2 -1
net/caif/cfmuxl.c
··· 174 174 spin_lock(&muxl->receive_lock); 175 175 up = get_up(muxl, id); 176 176 if (up == NULL) 177 - return NULL; 177 + goto out; 178 178 memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); 179 179 list_del(&up->node); 180 180 cfsrvl_put(up); 181 + out: 181 182 spin_unlock(&muxl->receive_lock); 182 183 return up; 183 184 }
+17 -8
net/caif/cfpkt_skbuff.c
··· 238 238 struct sk_buff *lastskb; 239 239 u8 *to; 240 240 const u8 *data = data2; 241 + int ret; 241 242 if (unlikely(is_erronous(pkt))) 242 243 return -EPROTO; 243 244 if (unlikely(skb_headroom(skb) < len)) { ··· 247 246 } 248 247 249 248 /* Make sure data is writable */ 250 - if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { 249 + ret = skb_cow_data(skb, 0, &lastskb); 250 + if (unlikely(ret < 0)) { 251 251 PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); 252 - return -EPROTO; 252 + return ret; 253 253 } 254 254 255 255 to = skb_push(skb, len); ··· 318 316 struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) 319 317 { 320 318 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); 319 + if (!pkt) 320 + return NULL; 321 321 if (unlikely(data != NULL)) 322 322 cfpkt_add_body(pkt, data, len); 323 323 return pkt; ··· 348 344 349 345 if (dst->tail + neededtailspace > dst->end) { 350 346 /* Create a dumplicate of 'dst' with more tail space */ 347 + struct cfpkt *tmppkt; 351 348 dstlen = skb_headlen(dst); 352 349 createlen = dstlen + neededtailspace; 353 - tmp = pkt_to_skb( 354 - cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); 355 - if (!tmp) 350 + tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX); 351 + if (tmppkt == NULL) 356 352 return NULL; 353 + tmp = pkt_to_skb(tmppkt); 357 354 skb_set_tail_pointer(tmp, dstlen); 358 355 tmp->len = dstlen; 359 356 memcpy(tmp->data, dst->data, dstlen); ··· 373 368 { 374 369 struct sk_buff *skb2; 375 370 struct sk_buff *skb = pkt_to_skb(pkt); 371 + struct cfpkt *tmppkt; 376 372 u8 *split = skb->data + pos; 377 373 u16 len2nd = skb_tail_pointer(skb) - split; 378 374 ··· 387 381 } 388 382 389 383 /* Create a new packet for the second part of the data */ 390 - skb2 = pkt_to_skb( 391 - cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, 392 - PKT_PREFIX)); 384 + tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, 385 + PKT_PREFIX); 386 + if (tmppkt == NULL) 387 + return NULL; 388 + skb2 = pkt_to_skb(tmppkt); 389 + 393 390 394 391 if (skb2 == NULL) 395 392 return NULL;
+2 -1
net/caif/cfserl.c
··· 67 67 layr->incomplete_frm = 68 68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen); 69 69 pkt = layr->incomplete_frm; 70 + if (pkt == NULL) 71 + return -ENOMEM; 70 72 } else { 71 73 pkt = newpkt; 72 74 } ··· 156 154 if (layr->usestx) { 157 155 if (tail_pkt != NULL) 158 156 pkt = cfpkt_append(pkt, tail_pkt, 0); 159 - 160 157 /* Start search for next STX if frame failed */ 161 158 continue; 162 159 } else {
+6
net/caif/cfsrvl.c
··· 123 123 struct caif_payload_info *info; 124 124 u8 flow_off = SRVL_FLOW_OFF; 125 125 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 126 + if (!pkt) { 127 + pr_warning("CAIF: %s(): Out of memory\n", 128 + __func__); 129 + return -ENOMEM; 130 + } 131 + 126 132 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { 127 133 pr_err("CAIF: %s(): Packet is erroneous!\n", 128 134 __func__);
+27 -21
net/core/dev.c
··· 954 954 } 955 955 EXPORT_SYMBOL(dev_alloc_name); 956 956 957 - static int dev_get_valid_name(struct net *net, const char *name, char *buf, 958 - bool fmt) 957 + static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) 959 958 { 959 + struct net *net; 960 + 961 + BUG_ON(!dev_net(dev)); 962 + net = dev_net(dev); 963 + 960 964 if (!dev_valid_name(name)) 961 965 return -EINVAL; 962 966 963 967 if (fmt && strchr(name, '%')) 964 - return __dev_alloc_name(net, name, buf); 968 + return dev_alloc_name(dev, name); 965 969 else if (__dev_get_by_name(net, name)) 966 970 return -EEXIST; 967 - else if (buf != name) 968 - strlcpy(buf, name, IFNAMSIZ); 971 + else if (dev->name != name) 972 + strlcpy(dev->name, name, IFNAMSIZ); 969 973 970 974 return 0; 971 975 } ··· 1001 997 1002 998 memcpy(oldname, dev->name, IFNAMSIZ); 1003 999 1004 - err = dev_get_valid_name(net, newname, dev->name, 1); 1000 + err = dev_get_valid_name(dev, newname, 1); 1005 1001 if (err < 0) 1006 1002 return err; 1007 1003 ··· 2425 2421 if (skb_queue_len(&sd->input_pkt_queue)) { 2426 2422 enqueue: 2427 2423 __skb_queue_tail(&sd->input_pkt_queue, skb); 2428 - #ifdef CONFIG_RPS 2429 - *qtail = sd->input_queue_head + 2430 - skb_queue_len(&sd->input_pkt_queue); 2431 - #endif 2424 + input_queue_tail_incr_save(sd, qtail); 2432 2425 rps_unlock(sd); 2433 2426 local_irq_restore(flags); 2434 2427 return NET_RX_SUCCESS; ··· 2960 2959 if (skb->dev == dev) { 2961 2960 __skb_unlink(skb, &sd->input_pkt_queue); 2962 2961 kfree_skb(skb); 2963 - input_queue_head_add(sd, 1); 2962 + input_queue_head_incr(sd); 2964 2963 } 2965 2964 } 2966 2965 rps_unlock(sd); ··· 2969 2968 if (skb->dev == dev) { 2970 2969 __skb_unlink(skb, &sd->process_queue); 2971 2970 kfree_skb(skb); 2971 + input_queue_head_incr(sd); 2972 2972 } 2973 2973 } 2974 2974 } ··· 3325 3323 while ((skb = __skb_dequeue(&sd->process_queue))) { 3326 3324 local_irq_enable(); 3327 3325 __netif_receive_skb(skb); 3328 - if (++work >= quota) 3329 - return work; 3330 3326 local_irq_disable(); 3327 + input_queue_head_incr(sd); 3328 + if (++work >= quota) { 3329 + local_irq_enable(); 3330 + return work; 3331 + } 3331 3332 } 3332 3333 3333 3334 rps_lock(sd); 3334 3335 qlen = skb_queue_len(&sd->input_pkt_queue); 3335 - if (qlen) { 3336 - input_queue_head_add(sd, qlen); 3336 + if (qlen) 3337 3337 skb_queue_splice_tail_init(&sd->input_pkt_queue, 3338 3338 &sd->process_queue); 3339 - } 3339 + 3340 3340 if (qlen < quota - work) { 3341 3341 /* 3342 3342 * Inline a custom version of __napi_complete(). ··· 4964 4960 } 4965 4961 } 4966 4962 4967 - ret = dev_get_valid_name(net, dev->name, dev->name, 0); 4963 + ret = dev_get_valid_name(dev, dev->name, 0); 4968 4964 if (ret) 4969 4965 goto err_uninit; 4970 4966 ··· 5562 5558 /* We get here if we can't use the current device name */ 5563 5559 if (!pat) 5564 5560 goto out; 5565 - if (dev_get_valid_name(net, pat, dev->name, 1)) 5561 + if (dev_get_valid_name(dev, pat, 1)) 5566 5562 goto out; 5567 5563 } 5568 5564 ··· 5665 5661 local_irq_enable(); 5666 5662 5667 5663 /* Process offline CPU's input_pkt_queue */ 5664 + while ((skb = __skb_dequeue(&oldsd->process_queue))) { 5665 + netif_rx(skb); 5666 + input_queue_head_incr(oldsd); 5667 + } 5668 5668 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 5669 5669 netif_rx(skb); 5670 - input_queue_head_add(oldsd, 1); 5670 + input_queue_head_incr(oldsd); 5671 5671 } 5672 - while ((skb = __skb_dequeue(&oldsd->process_queue))) 5673 - netif_rx(skb); 5674 5672 5675 5673 return NOTIFY_OK; 5676 5674 }
+3 -1
net/core/rtnetlink.c
··· 1199 1199 struct nlattr *attr; 1200 1200 int rem; 1201 1201 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 1202 - if (nla_type(attr) != IFLA_VF_INFO) 1202 + if (nla_type(attr) != IFLA_VF_INFO) { 1203 + err = -EINVAL; 1203 1204 goto errout; 1205 + } 1204 1206 err = do_setvfinfo(dev, attr); 1205 1207 if (err < 0) 1206 1208 goto errout;
+1
net/core/skbuff.c
··· 2722 2722 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2723 2723 skb_shinfo(nskb)->frag_list = p; 2724 2724 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2725 + pinfo->gso_size = 0; 2725 2726 skb_header_release(p); 2726 2727 nskb->prev = p; 2727 2728
+19
net/core/sock.c
··· 123 123 #include <linux/net_tstamp.h> 124 124 #include <net/xfrm.h> 125 125 #include <linux/ipsec.h> 126 + #include <net/cls_cgroup.h> 126 127 127 128 #include <linux/filter.h> 128 129 ··· 217 216 /* Maximal space eaten by iovec or ancilliary data plus some space */ 218 217 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 219 218 EXPORT_SYMBOL(sysctl_optmem_max); 219 + 220 + #if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) 221 + int net_cls_subsys_id = -1; 222 + EXPORT_SYMBOL_GPL(net_cls_subsys_id); 223 + #endif 220 224 221 225 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 222 226 { ··· 1056 1050 module_put(owner); 1057 1051 } 1058 1052 1053 + #ifdef CONFIG_CGROUPS 1054 + void sock_update_classid(struct sock *sk) 1055 + { 1056 + u32 classid = task_cls_classid(current); 1057 + 1058 + if (classid && classid != sk->sk_classid) 1059 + sk->sk_classid = classid; 1060 + } 1061 + EXPORT_SYMBOL(sock_update_classid); 1062 + #endif 1063 + 1059 1064 /** 1060 1065 * sk_alloc - All socket objects are allocated here 1061 1066 * @net: the applicable net namespace ··· 1090 1073 sock_lock_init(sk); 1091 1074 sock_net_set(sk, get_net(net)); 1092 1075 atomic_set(&sk->sk_wmem_alloc, 1); 1076 + 1077 + sock_update_classid(sk); 1093 1078 } 1094 1079 1095 1080 return sk;
+3 -3
net/dccp/input.c
··· 124 124 return queued; 125 125 } 126 126 127 - static u8 dccp_reset_code_convert(const u8 code) 127 + static u16 dccp_reset_code_convert(const u8 code) 128 128 { 129 - const u8 error_code[] = { 129 + const u16 error_code[] = { 130 130 [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ 131 131 [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ 132 132 [DCCP_RESET_CODE_ABORTED] = ECONNRESET, ··· 148 148 149 149 static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) 150 150 { 151 - u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); 151 + u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); 152 152 153 153 sk->sk_err = err; 154 154
+6 -1
net/ieee802154/wpan-class.c
··· 147 147 struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, 148 148 GFP_KERNEL); 149 149 150 + if (!phy) 151 + goto out; 150 152 mutex_lock(&wpan_phy_mutex); 151 153 phy->idx = wpan_phy_idx++; 152 154 if (unlikely(!wpan_phy_idx_valid(phy->idx))) { 153 155 wpan_phy_idx--; 154 156 mutex_unlock(&wpan_phy_mutex); 155 157 kfree(phy); 156 - return NULL; 158 + goto out; 157 159 } 158 160 mutex_unlock(&wpan_phy_mutex); 159 161 ··· 170 168 phy->current_page = 0; /* for compatibility */ 171 169 172 170 return phy; 171 + 172 + out: 173 + return NULL; 173 174 } 174 175 EXPORT_SYMBOL(wpan_phy_alloc); 175 176
+1 -1
net/mac80211/sta_info.h
··· 145 145 /** 146 146 * struct sta_ampdu_mlme - STA aggregation information. 147 147 * 148 - * @tid_state_rx: TID's state in Rx session state machine. 148 + * @tid_active_rx: TID's state in Rx session state machine. 149 149 * @tid_rx: aggregation info for Rx per TID 150 150 * @tid_state_tx: TID's state in Tx session state machine. 151 151 * @tid_tx: aggregation info for Tx per TID
+10
net/netfilter/nf_conntrack_core.c
··· 424 424 425 425 spin_lock_bh(&nf_conntrack_lock); 426 426 427 + /* We have to check the DYING flag inside the lock to prevent 428 + a race against nf_ct_get_next_corpse() possibly called from 429 + user context, else we insert an already 'dead' hash, blocking 430 + further use of that particular connection -JM */ 431 + 432 + if (unlikely(nf_ct_is_dying(ct))) { 433 + spin_unlock_bh(&nf_conntrack_lock); 434 + return NF_ACCEPT; 435 + } 436 + 427 437 /* See if there's one in the list already, including reverse: 428 438 NAT could have grabbed it without realizing, since we're 429 439 not in the hash. If there is, we lost race. */
+4 -8
net/netfilter/nf_conntrack_sip.c
··· 1393 1393 1394 1394 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1395 1395 1396 - if (skb_is_nonlinear(skb)) { 1397 - pr_debug("Copy of skbuff not supported yet.\n"); 1398 - return NF_ACCEPT; 1399 - } 1396 + if (unlikely(skb_linearize(skb))) 1397 + return NF_DROP; 1400 1398 1401 1399 dptr = skb->data + dataoff; 1402 1400 datalen = skb->len - dataoff; ··· 1453 1455 1454 1456 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1455 1457 1456 - if (skb_is_nonlinear(skb)) { 1457 - pr_debug("Copy of skbuff not supported yet.\n"); 1458 - return NF_ACCEPT; 1459 - } 1458 + if (unlikely(skb_linearize(skb))) 1459 + return NF_DROP; 1460 1460 1461 1461 dptr = skb->data + dataoff; 1462 1462 datalen = skb->len - dataoff;
+2
net/phonet/pep.c
··· 626 626 struct pep_sock *pn = pep_sk(sk); 627 627 int ifindex = 0; 628 628 629 + sock_hold(sk); /* keep a reference after sk_common_release() */ 629 630 sk_common_release(sk); 630 631 631 632 lock_sock(sk); ··· 645 644 646 645 if (ifindex) 647 646 gprs_detach(sk); 647 + sock_put(sk); 648 648 } 649 649 650 650 static int pep_wait_connreq(struct sock *sk, int noblock)
+34 -16
net/sched/cls_cgroup.c
··· 16 16 #include <linux/errno.h> 17 17 #include <linux/skbuff.h> 18 18 #include <linux/cgroup.h> 19 + #include <linux/rcupdate.h> 19 20 #include <net/rtnetlink.h> 20 21 #include <net/pkt_cls.h> 21 - 22 - struct cgroup_cls_state 23 - { 24 - struct cgroup_subsys_state css; 25 - u32 classid; 26 - }; 22 + #include <net/sock.h> 23 + #include <net/cls_cgroup.h> 27 24 28 25 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 29 26 struct cgroup *cgrp); ··· 109 112 struct cls_cgroup_head *head = tp->root; 110 113 u32 classid; 111 114 115 + rcu_read_lock(); 116 + classid = task_cls_state(current)->classid; 117 + rcu_read_unlock(); 118 + 112 119 /* 113 120 * Due to the nature of the classifier it is required to ignore all 114 121 * packets originating from softirq context as accessing `current' ··· 123 122 * calls by looking at the number of nested bh disable calls because 124 123 * softirqs always disables bh. 125 124 */ 126 - if (softirq_count() != SOFTIRQ_OFFSET) 127 - return -1; 128 - 129 - rcu_read_lock(); 130 - classid = task_cls_state(current)->classid; 131 - rcu_read_unlock(); 125 + if (softirq_count() != SOFTIRQ_OFFSET) { 126 + /* If there is an sk_classid we'll use that. */ 127 + if (!skb->sk) 128 + return -1; 129 + classid = skb->sk->sk_classid; 130 + } 132 131 133 132 if (!classid) 134 133 return -1; ··· 290 289 291 290 static int __init init_cgroup_cls(void) 292 291 { 293 - int ret = register_tcf_proto_ops(&cls_cgroup_ops); 294 - if (ret) 295 - return ret; 292 + int ret; 293 + 296 294 ret = cgroup_load_subsys(&net_cls_subsys); 297 295 if (ret) 298 - unregister_tcf_proto_ops(&cls_cgroup_ops); 296 + goto out; 297 + 298 + #ifndef CONFIG_NET_CLS_CGROUP 299 + /* We can't use rcu_assign_pointer because this is an int. */ 300 + smp_wmb(); 301 + net_cls_subsys_id = net_cls_subsys.subsys_id; 302 + #endif 303 + 304 + ret = register_tcf_proto_ops(&cls_cgroup_ops); 305 + if (ret) 306 + cgroup_unload_subsys(&net_cls_subsys); 307 + 308 + out: 299 309 return ret; 300 310 } 301 311 302 312 static void __exit exit_cgroup_cls(void) 303 313 { 304 314 unregister_tcf_proto_ops(&cls_cgroup_ops); 315 + 316 + #ifndef CONFIG_NET_CLS_CGROUP 317 + net_cls_subsys_id = -1; 318 + synchronize_rcu(); 319 + #endif 320 + 305 321 cgroup_unload_subsys(&net_cls_subsys); 306 322 } 307 323
+7 -7
net/sched/sch_api.c
··· 1195 1195 return -1; 1196 1196 } 1197 1197 1198 + static bool tc_qdisc_dump_ignore(struct Qdisc *q) 1199 + { 1200 + return (q->flags & TCQ_F_BUILTIN) ? true : false; 1201 + } 1202 + 1198 1203 static int qdisc_notify(struct net *net, struct sk_buff *oskb, 1199 1204 struct nlmsghdr *n, u32 clid, 1200 1205 struct Qdisc *old, struct Qdisc *new) ··· 1211 1206 if (!skb) 1212 1207 return -ENOBUFS; 1213 1208 1214 - if (old && old->handle) { 1209 + if (old && !tc_qdisc_dump_ignore(old)) { 1215 1210 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) 1216 1211 goto err_out; 1217 1212 } 1218 - if (new) { 1213 + if (new && !tc_qdisc_dump_ignore(new)) { 1219 1214 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) 1220 1215 goto err_out; 1221 1216 } ··· 1226 1221 err_out: 1227 1222 kfree_skb(skb); 1228 1223 return -EINVAL; 1229 - } 1230 - 1231 - static bool tc_qdisc_dump_ignore(struct Qdisc *q) 1232 - { 1233 - return (q->flags & TCQ_F_BUILTIN) ? true : false; 1234 1224 } 1235 1225 1236 1226 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
+9
net/socket.c
··· 94 94 95 95 #include <net/compat.h> 96 96 #include <net/wext.h> 97 + #include <net/cls_cgroup.h> 97 98 98 99 #include <net/sock.h> 99 100 #include <linux/netfilter.h> ··· 559 558 struct sock_iocb *si = kiocb_to_siocb(iocb); 560 559 int err; 561 560 561 + sock_update_classid(sock->sk); 562 + 562 563 si->sock = sock; 563 564 si->scm = NULL; 564 565 si->msg = msg; ··· 687 684 { 688 685 struct sock_iocb *si = kiocb_to_siocb(iocb); 689 686 687 + sock_update_classid(sock->sk); 688 + 690 689 si->sock = sock; 691 690 si->scm = NULL; 692 691 si->msg = msg; ··· 781 776 782 777 if (unlikely(!sock->ops->splice_read)) 783 778 return -EINVAL; 779 + 780 + sock_update_classid(sock->sk); 784 781 785 782 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 786 783 } ··· 3076 3069 int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3077 3070 size_t size, int flags) 3078 3071 { 3072 + sock_update_classid(sock->sk); 3073 + 3079 3074 if (sock->ops->sendpage) 3080 3075 return sock->ops->sendpage(sock, page, offset, size, flags); 3081 3076
+1 -1
net/wireless/chan.c
··· 50 50 struct ieee80211_channel *chan; 51 51 int result; 52 52 53 - if (wdev->iftype == NL80211_IFTYPE_MONITOR) 53 + if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR) 54 54 wdev = NULL; 55 55 56 56 if (wdev) {
+4 -2
net/wireless/nl80211.c
··· 4443 4443 if (channel_type != NL80211_CHAN_NO_HT && 4444 4444 channel_type != NL80211_CHAN_HT20 && 4445 4445 channel_type != NL80211_CHAN_HT40PLUS && 4446 - channel_type != NL80211_CHAN_HT40MINUS) 4446 + channel_type != NL80211_CHAN_HT40MINUS) { 4447 4447 err = -EINVAL; 4448 4448 goto out; 4449 + } 4449 4450 } 4450 4451 4451 4452 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); ··· 4718 4717 if (channel_type != NL80211_CHAN_NO_HT && 4719 4718 channel_type != NL80211_CHAN_HT20 && 4720 4719 channel_type != NL80211_CHAN_HT40PLUS && 4721 - channel_type != NL80211_CHAN_HT40MINUS) 4720 + channel_type != NL80211_CHAN_HT40MINUS) { 4722 4721 err = -EINVAL; 4723 4722 goto out; 4723 + } 4724 4724 } 4725 4725 4726 4726 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+2 -2
net/wireless/scan.c
··· 515 515 516 516 privsz = wiphy->bss_priv_size; 517 517 518 - if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 518 + if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && 519 519 (signal < 0 || signal > 100))) 520 520 return NULL; 521 521 ··· 571 571 u.probe_resp.variable); 572 572 size_t privsz = wiphy->bss_priv_size; 573 573 574 - if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 574 + if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && 575 575 (signal < 0 || signal > 100))) 576 576 return NULL; 577 577