Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (51 commits)
netfilter: ipset: Fix the order of listing of sets
ip6_pol_route panic: Do not allow VLAN on loopback
bnx2x: Fix port identification problem
r8169: add Realtek as maintainer.
ip: ip_options_compile() resilient to NULL skb route
bna: fix memory leak during RX path cleanup
bna: fix for clean fw re-initialization
usbnet: Fix up 'FLAG_POINTTOPOINT' and 'FLAG_MULTI_PACKET' overlaps.
iwlegacy: fix tx_power initialization
Revert "tcp: disallow bind() to reuse addr/port"
qlcnic: limit skb frags for non tso packet
net: can: mscan: fix build breakage in mpc5xxx_can
netfilter: ipset: set match and SET target fixes
netfilter: ipset: bitmap:ip,mac type requires "src" for MAC
sctp: fix oops while removed transport still using as retran path
sctp: fix oops when updating retransmit path with DEBUG on
net: Disable NETIF_F_TSO_ECN when TSO is disabled
net: Disable all TSO features when SG is disabled
sfc: Use rmb() to ensure reads occur in order
ieee802154: Remove hacked CFLAGS in net/ieee802154/Makefile
...

+330 -203
+1
MAINTAINERS
··· 151 151 F: drivers/net/hamradio/6pack.c 152 152 153 153 8169 10/100/1000 GIGABIT ETHERNET DRIVER 154 + M: Realtek linux nic maintainers <nic_swsd@realtek.com> 154 155 M: Francois Romieu <romieu@fr.zoreil.com> 155 156 L: netdev@vger.kernel.org 156 157 S: Maintained
+1
drivers/connector/connector.c
··· 142 142 cbq->callback(msg, nsp); 143 143 kfree_skb(skb); 144 144 cn_queue_release_callback(cbq); 145 + err = 0; 145 146 } 146 147 147 148 return err;
+18 -13
drivers/net/bna/bfa_ioc.c
··· 38 38 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 39 39 #define bfa_ioc_notify_fail(__ioc) \ 40 40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 41 + #define bfa_ioc_sync_start(__ioc) \ 42 + ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) 41 43 #define bfa_ioc_sync_join(__ioc) \ 42 44 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 43 45 #define bfa_ioc_sync_leave(__ioc) \ ··· 604 602 switch (event) { 605 603 case IOCPF_E_SEMLOCKED: 606 604 if (bfa_ioc_firmware_lock(ioc)) { 607 - if (bfa_ioc_sync_complete(ioc)) { 605 + if (bfa_ioc_sync_start(ioc)) { 608 606 iocpf->retry_count = 0; 609 607 bfa_ioc_sync_join(ioc); 610 608 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); ··· 1316 1314 * execution context (driver/bios) must match. 1317 1315 */ 1318 1316 static bool 1319 - bfa_ioc_fwver_valid(struct bfa_ioc *ioc) 1317 + bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) 1320 1318 { 1321 1319 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; 1322 1320 ··· 1327 1325 if (fwhdr.signature != drv_fwhdr->signature) 1328 1326 return false; 1329 1327 1330 - if (fwhdr.exec != drv_fwhdr->exec) 1328 + if (swab32(fwhdr.param) != boot_env) 1331 1329 return false; 1332 1330 1333 1331 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); ··· 1354 1352 { 1355 1353 enum bfi_ioc_state ioc_fwstate; 1356 1354 bool fwvalid; 1355 + u32 boot_env; 1357 1356 1358 1357 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1358 + 1359 + boot_env = BFI_BOOT_LOADER_OS; 1359 1360 1360 1361 if (force) 1361 1362 ioc_fwstate = BFI_IOC_UNINIT; ··· 1367 1362 * check if firmware is valid 1368 1363 */ 1369 1364 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1370 - false : bfa_ioc_fwver_valid(ioc); 1365 + false : bfa_ioc_fwver_valid(ioc, boot_env); 1371 1366 1372 1367 if (!fwvalid) { 1373 - bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1368 + bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env); 1374 1369 return; 1375 1370 } 1376 1371 ··· 1401 1396 /** 1402 1397 * Initialize the h/w for any other states. 1403 1398 */ 1404 - bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1399 + bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env); 1405 1400 } 1406 1401 1407 1402 void ··· 1511 1506 */ 1512 1507 static void 1513 1508 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 1514 - u32 boot_param) 1509 + u32 boot_env) 1515 1510 { 1516 1511 u32 *fwimg; 1517 1512 u32 pgnum, pgoff; ··· 1563 1558 /* 1564 1559 * Set boot type and boot param at the end. 1565 1560 */ 1566 - writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start) 1561 + writel(boot_type, ((ioc->ioc_regs.smem_page_start) 1567 1562 + (BFI_BOOT_TYPE_OFF))); 1568 - writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start) 1569 - + (BFI_BOOT_PARAM_OFF))); 1563 + writel(boot_env, ((ioc->ioc_regs.smem_page_start) 1564 + + (BFI_BOOT_LOADER_OFF))); 1570 1565 } 1571 1566 1572 1567 static void ··· 1726 1721 * as the entry vector. 1727 1722 */ 1728 1723 static void 1729 - bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) 1724 + bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env) 1730 1725 { 1731 1726 void __iomem *rb; 1732 1727 ··· 1739 1734 * Initialize IOC state of all functions on a chip reset. 1740 1735 */ 1741 1736 rb = ioc->pcidev.pci_bar_kva; 1742 - if (boot_param == BFI_BOOT_TYPE_MEMTEST) { 1737 + if (boot_type == BFI_BOOT_TYPE_MEMTEST) { 1743 1738 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); 1744 1739 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); 1745 1740 } else { ··· 1748 1743 } 1749 1744 1750 1745 bfa_ioc_msgflush(ioc); 1751 - bfa_ioc_download_fw(ioc, boot_type, boot_param); 1746 + bfa_ioc_download_fw(ioc, boot_type, boot_env); 1752 1747 1753 1748 /** 1754 1749 * Enable interrupts just before starting LPU
+1
drivers/net/bna/bfa_ioc.h
··· 194 194 bool msix); 195 195 void (*ioc_notify_fail) (struct bfa_ioc *ioc); 196 196 void (*ioc_ownership_reset) (struct bfa_ioc *ioc); 197 + bool (*ioc_sync_start) (struct bfa_ioc *ioc); 197 198 void (*ioc_sync_join) (struct bfa_ioc *ioc); 198 199 void (*ioc_sync_leave) (struct bfa_ioc *ioc); 199 200 void (*ioc_sync_ack) (struct bfa_ioc *ioc);
+28
drivers/net/bna/bfa_ioc_ct.c
··· 41 41 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 42 42 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); 43 43 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 44 + static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); 44 45 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); 45 46 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); 46 47 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); ··· 64 63 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 65 64 nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; 66 65 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 66 + nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start; 67 67 nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; 68 68 nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; 69 69 nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; ··· 344 342 bfa_nw_ioc_hw_sem_release(ioc); 345 343 } 346 344 345 + /** 346 + * Synchronized IOC failure processing routines 347 + */ 348 + static bool 349 + bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) 350 + { 351 + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); 352 + u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); 353 + 354 + /* 355 + * Driver load time. If the sync required bit for this PCI fn 356 + * is set, it is due to an unclean exit by the driver for this 357 + * PCI fn in the previous incarnation. Whoever comes here first 358 + * should clean it up, no matter which PCI fn. 359 + */ 360 + 361 + if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { 362 + writel(0, ioc->ioc_regs.ioc_fail_sync); 363 + writel(1, ioc->ioc_regs.ioc_usage_reg); 364 + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 365 + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); 366 + return true; 367 + } 368 + 369 + return bfa_ioc_ct_sync_complete(ioc); 370 + } 347 371 /** 348 372 * Synchronized IOC failure processing routines 349 373 */
+4 -2
drivers/net/bna/bfi.h
··· 184 184 #define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ 185 185 186 186 #define BFI_BOOT_TYPE_OFF 8 187 - #define BFI_BOOT_PARAM_OFF 12 187 + #define BFI_BOOT_LOADER_OFF 12 188 188 189 - #define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */ 189 + #define BFI_BOOT_TYPE_NORMAL 0 190 190 #define BFI_BOOT_TYPE_FLASH 1 191 191 #define BFI_BOOT_TYPE_MEMTEST 2 192 + 193 + #define BFI_BOOT_LOADER_OS 0 192 194 193 195 #define BFI_BOOT_MEMTEST_RES_ADDR 0x900 194 196 #define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
-1
drivers/net/bna/bnad.c
··· 1837 1837 /* Initialize the Rx event handlers */ 1838 1838 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; 1839 1839 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; 1840 - rx_cbfn.rcb_destroy_cbfn = NULL; 1841 1840 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; 1842 1841 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; 1843 1842 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
+4 -5
drivers/net/bnx2x/bnx2x_ethtool.c
··· 2114 2114 for (i = 0; i < (data * 2); i++) { 2115 2115 if ((i % 2) == 0) 2116 2116 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2117 - LED_MODE_OPER, SPEED_1000); 2117 + LED_MODE_ON, SPEED_1000); 2118 2118 else 2119 2119 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2120 - LED_MODE_OFF, 0); 2120 + LED_MODE_FRONT_PANEL_OFF, 0); 2121 2121 2122 2122 msleep_interruptible(500); 2123 2123 if (signal_pending(current)) 2124 2124 break; 2125 2125 } 2126 2126 2127 - if (bp->link_vars.link_up) 2128 - bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER, 2129 - bp->link_vars.line_speed); 2127 + bnx2x_set_led(&bp->link_params, &bp->link_vars, 2128 + LED_MODE_OPER, bp->link_vars.line_speed); 2130 2129 2131 2130 return 0; 2132 2131 }
+3 -3
drivers/net/bonding/bond_alb.c
··· 176 176 bond_info->tx_hashtbl = new_hashtbl; 177 177 178 178 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { 179 - tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); 179 + tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); 180 180 } 181 181 182 182 _unlock_tx_hashtbl(bond); ··· 701 701 */ 702 702 rlb_choose_channel(skb, bond); 703 703 704 - /* The ARP relpy packets must be delayed so that 704 + /* The ARP reply packets must be delayed so that 705 705 * they can cancel out the influence of the ARP request. 706 706 */ 707 707 bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY; ··· 1042 1042 * 1043 1043 * If the permanent hw address of @slave is @bond's hw address, we need to 1044 1044 * find a different hw address to give @slave, that isn't in use by any other 1045 - * slave in the bond. This address must be, of course, one of the premanent 1045 + * slave in the bond. This address must be, of course, one of the permanent 1046 1046 * addresses of the other slaves. 1047 1047 * 1048 1048 * We go over the slave list, and for each slave there we compare its
+1 -3
drivers/net/bonding/bond_alb.h
··· 75 75 * gave this entry index. 76 76 */ 77 77 u32 tx_bytes; /* Each Client accumulates the BytesTx that 78 - * were tranmitted to it, and after each 78 + * were transmitted to it, and after each 79 79 * CallBack the LoadHistory is divided 80 80 * by the balance interval 81 81 */ ··· 122 122 }; 123 123 124 124 struct alb_bond_info { 125 - struct timer_list alb_timer; 126 125 struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ 127 126 spinlock_t tx_hashtbl_lock; 128 127 u32 unbalanced_load; ··· 139 140 struct slave *next_rx_slave;/* next slave to be assigned 140 141 * to a new rx client for 141 142 */ 142 - u32 rlb_interval_counter; 143 143 u8 primary_is_promisc; /* boolean */ 144 144 u32 rlb_promisc_timeout_counter;/* counts primary 145 145 * promiscuity time
+1 -1
drivers/net/can/mscan/mpc5xxx_can.c
··· 260 260 261 261 if (!ofdev->dev.of_match) 262 262 return -EINVAL; 263 - data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data; 263 + data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; 264 264 265 265 base = of_iomap(np, 0); 266 266 if (!base) {
+2 -1
drivers/net/loopback.c
··· 173 173 | NETIF_F_RXCSUM 174 174 | NETIF_F_HIGHDMA 175 175 | NETIF_F_LLTX 176 - | NETIF_F_NETNS_LOCAL; 176 + | NETIF_F_NETNS_LOCAL 177 + | NETIF_F_VLAN_CHALLENGED; 177 178 dev->ethtool_ops = &loopback_ethtool_ops; 178 179 dev->header_ops = &eth_header_ops; 179 180 dev->netdev_ops = &loopback_ops;
+3
drivers/net/natsemi.c
··· 860 860 prev_eedata = eedata; 861 861 } 862 862 863 + /* Store MAC Address in perm_addr */ 864 + memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); 865 + 863 866 dev->base_addr = (unsigned long __force) ioaddr; 864 867 dev->irq = irq; 865 868
+2 -2
drivers/net/netxen/netxen_nic.h
··· 174 174 175 175 #define MAX_NUM_CARDS 4 176 176 177 - #define MAX_BUFFERS_PER_CMD 32 177 + #define NETXEN_MAX_FRAGS_PER_TX 14 178 178 #define MAX_TSO_HEADER_DESC 2 179 179 #define MGMT_CMD_DESC_RESV 4 180 180 #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ ··· 558 558 */ 559 559 struct netxen_cmd_buffer { 560 560 struct sk_buff *skb; 561 - struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; 561 + struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1]; 562 562 u32 frag_count; 563 563 }; 564 564
+17
drivers/net/netxen/netxen_nic_main.c
··· 1844 1844 struct cmd_desc_type0 *hwdesc, *first_desc; 1845 1845 struct pci_dev *pdev; 1846 1846 int i, k; 1847 + int delta = 0; 1848 + struct skb_frag_struct *frag; 1847 1849 1848 1850 u32 producer; 1849 1851 int frag_count, no_of_desc; ··· 1853 1851 1854 1852 frag_count = skb_shinfo(skb)->nr_frags + 1; 1855 1853 1854 + /* 14 frags supported for normal packet and 1855 + * 32 frags supported for TSO packet 1856 + */ 1857 + if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { 1858 + 1859 + for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { 1860 + frag = &skb_shinfo(skb)->frags[i]; 1861 + delta += frag->size; 1862 + } 1863 + 1864 + if (!__pskb_pull_tail(skb, delta)) 1865 + goto drop_packet; 1866 + 1867 + frag_count = 1 + skb_shinfo(skb)->nr_frags; 1868 + } 1856 1869 /* 4 fragments per cmd des */ 1857 1870 no_of_desc = (frag_count + 3) >> 2; 1858 1871
+1
drivers/net/qlcnic/qlcnic.h
··· 99 99 #define TX_UDPV6_PKT 0x0c 100 100 101 101 /* Tx defines */ 102 + #define QLCNIC_MAX_FRAGS_PER_TX 14 102 103 #define MAX_TSO_HEADER_DESC 2 103 104 #define MGMT_CMD_DESC_RESV 4 104 105 #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+14
drivers/net/qlcnic/qlcnic_main.c
··· 2099 2099 struct cmd_desc_type0 *hwdesc, *first_desc; 2100 2100 struct pci_dev *pdev; 2101 2101 struct ethhdr *phdr; 2102 + int delta = 0; 2102 2103 int i, k; 2103 2104 2104 2105 u32 producer; ··· 2119 2118 } 2120 2119 2121 2120 frag_count = skb_shinfo(skb)->nr_frags + 1; 2121 + /* 14 frags supported for normal packet and 2122 + * 32 frags supported for TSO packet 2123 + */ 2124 + if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { 2125 + 2126 + for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) 2127 + delta += skb_shinfo(skb)->frags[i].size; 2128 + 2129 + if (!__pskb_pull_tail(skb, delta)) 2130 + goto drop_packet; 2131 + 2132 + frag_count = 1 + skb_shinfo(skb)->nr_frags; 2133 + } 2122 2134 2123 2135 /* 4 fragments per cmd des */ 2124 2136 no_of_desc = (frag_count + 3) >> 2;
+4 -2
drivers/net/sfc/efx.c
··· 328 328 * processing to finish, then directly poll (and ack ) the eventq. 329 329 * Finally reenable NAPI and interrupts. 330 330 * 331 - * Since we are touching interrupts the caller should hold the suspend lock 331 + * This is for use only during a loopback self-test. It must not 332 + * deliver any packets up the stack as this can result in deadlock. 332 333 */ 333 334 void efx_process_channel_now(struct efx_channel *channel) 334 335 { ··· 337 336 338 337 BUG_ON(channel->channel >= efx->n_channels); 339 338 BUG_ON(!channel->enabled); 339 + BUG_ON(!efx->loopback_selftest); 340 340 341 341 /* Disable interrupts and wait for ISRs to complete */ 342 342 efx_nic_disable_interrupts(efx); ··· 1438 1436 * restart the transmit interface early so the watchdog timer stops */ 1439 1437 efx_start_port(efx); 1440 1438 1441 - if (efx_dev_registered(efx)) 1439 + if (efx_dev_registered(efx) && !efx->port_inhibited) 1442 1440 netif_tx_wake_all_queues(efx->net_dev); 1443 1441 1444 1442 efx_for_each_channel(channel, efx)
+2
drivers/net/sfc/io.h
··· 152 152 153 153 spin_lock_irqsave(&efx->biu_lock, flags); 154 154 value->u32[0] = _efx_readd(efx, reg + 0); 155 + rmb(); 155 156 value->u32[1] = _efx_readd(efx, reg + 4); 156 157 value->u32[2] = _efx_readd(efx, reg + 8); 157 158 value->u32[3] = _efx_readd(efx, reg + 12); ··· 175 174 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 176 175 #else 177 176 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 177 + rmb(); 178 178 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 179 179 #endif 180 180 spin_unlock_irqrestore(&efx->biu_lock, flags);
-2
drivers/net/sfc/net_driver.h
··· 330 330 * @eventq_mask: Event queue pointer mask 331 331 * @eventq_read_ptr: Event queue read pointer 332 332 * @last_eventq_read_ptr: Last event queue read pointer value. 333 - * @magic_count: Event queue test event count 334 333 * @irq_count: Number of IRQs since last adaptive moderation decision 335 334 * @irq_mod_score: IRQ moderation score 336 335 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors ··· 359 360 unsigned int eventq_mask; 360 361 unsigned int eventq_read_ptr; 361 362 unsigned int last_eventq_read_ptr; 362 - unsigned int magic_count; 363 363 364 364 unsigned int irq_count; 365 365 unsigned int irq_mod_score;
+15 -7
drivers/net/sfc/nic.c
··· 84 84 static inline efx_qword_t *efx_event(struct efx_channel *channel, 85 85 unsigned int index) 86 86 { 87 - return ((efx_qword_t *) (channel->eventq.addr)) + index; 87 + return ((efx_qword_t *) (channel->eventq.addr)) + 88 + (index & channel->eventq_mask); 88 89 } 89 90 90 91 /* See if an event is present ··· 674 673 efx_dword_t reg; 675 674 struct efx_nic *efx = channel->efx; 676 675 677 - EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); 676 + EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 677 + channel->eventq_read_ptr & channel->eventq_mask); 678 678 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 679 channel->channel); 680 680 } ··· 910 908 911 909 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 912 910 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 913 - ++channel->magic_count; 911 + ; /* ignore */ 914 912 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 915 913 /* The queue must be empty, so we won't receive any rx 916 914 * events, so efx_process_channel() won't refill the ··· 1017 1015 /* Clear this event by marking it all ones */ 1018 1016 EFX_SET_QWORD(*p_event); 1019 1017 1020 - /* Increment read pointer */ 1021 - read_ptr = (read_ptr + 1) & channel->eventq_mask; 1018 + ++read_ptr; 1022 1019 1023 1020 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1024 1021 ··· 1061 1060 return spent; 1062 1061 } 1063 1062 1063 + /* Check whether an event is present in the eventq at the current 1064 + * read pointer. Only useful for self-test. 1065 + */ 1066 + bool efx_nic_event_present(struct efx_channel *channel) 1067 + { 1068 + return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1069 + } 1064 1070 1065 1071 /* Allocate buffer table entries for event queue */ 1066 1072 int efx_nic_probe_eventq(struct efx_channel *channel) ··· 1173 1165 struct efx_tx_queue *tx_queue; 1174 1166 struct efx_rx_queue *rx_queue; 1175 1167 unsigned int read_ptr = channel->eventq_read_ptr; 1176 - unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; 1168 + unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; 1177 1169 1178 1170 do { 1179 1171 efx_qword_t *event = efx_event(channel, read_ptr); ··· 1213 1205 * it's ok to throw away every non-flush event */ 1214 1206 EFX_SET_QWORD(*event); 1215 1207 1216 - read_ptr = (read_ptr + 1) & channel->eventq_mask; 1208 + ++read_ptr; 1217 1209 } while (read_ptr != end_ptr); 1218 1210 1219 1211 channel->eventq_read_ptr = read_ptr;
+1
drivers/net/sfc/nic.h
··· 184 184 extern void efx_nic_remove_eventq(struct efx_channel *channel); 185 185 extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 186 186 extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 187 + extern bool efx_nic_event_present(struct efx_channel *channel); 187 188 188 189 /* MAC/PHY */ 189 190 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
+6 -19
drivers/net/sfc/selftest.c
··· 131 131 static int efx_test_interrupts(struct efx_nic *efx, 132 132 struct efx_self_tests *tests) 133 133 { 134 - struct efx_channel *channel; 135 - 136 134 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 137 135 tests->interrupt = -1; 138 136 139 137 /* Reset interrupt flag */ 140 138 efx->last_irq_cpu = -1; 141 139 smp_wmb(); 142 - 143 - /* ACK each interrupting event queue. Receiving an interrupt due to 144 - * traffic before a test event is raised is considered a pass */ 145 - efx_for_each_channel(channel, efx) { 146 - if (channel->work_pending) 147 - efx_process_channel_now(channel); 148 - if (efx->last_irq_cpu >= 0) 149 - goto success; 150 - } 151 140 152 141 efx_nic_generate_interrupt(efx); 153 142 ··· 162 173 struct efx_self_tests *tests) 163 174 { 164 175 struct efx_nic *efx = channel->efx; 165 - unsigned int magic_count, count; 176 + unsigned int read_ptr, count; 166 177 167 178 tests->eventq_dma[channel->channel] = -1; 168 179 tests->eventq_int[channel->channel] = -1; 169 180 tests->eventq_poll[channel->channel] = -1; 170 181 171 - magic_count = channel->magic_count; 182 + read_ptr = channel->eventq_read_ptr; 172 183 channel->efx->last_irq_cpu = -1; 173 184 smp_wmb(); 174 185 ··· 179 190 do { 180 191 schedule_timeout_uninterruptible(HZ / 100); 181 192 182 - if (channel->work_pending) 183 - efx_process_channel_now(channel); 184 - 185 - if (channel->magic_count != magic_count) 193 + if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) 186 194 goto eventq_ok; 187 195 } while (++count < 2); 188 196 ··· 197 211 } 198 212 199 213 /* Check to see if event was received even if interrupt wasn't */ 200 - efx_process_channel_now(channel); 201 - if (channel->magic_count != magic_count) { 214 + if (efx_nic_event_present(channel)) { 202 215 netif_err(efx, drv, efx->net_dev, 203 216 "channel %d event was generated, but " 204 217 "failed to trigger an interrupt\n", channel->channel); ··· 754 769 efx->loopback_mode = loopback_mode; 755 770 __efx_reconfigure_port(efx); 756 771 mutex_unlock(&efx->mac_lock); 772 + 773 + netif_tx_wake_all_queues(efx->net_dev); 757 774 758 775 return rc_test; 759 776 }
+2 -1
drivers/net/sfc/tx.c
··· 435 435 * queue state. */ 436 436 smp_mb(); 437 437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 438 - likely(efx->port_enabled)) { 438 + likely(efx->port_enabled) && 439 + likely(!efx->port_inhibited)) { 439 440 fill_level = tx_queue->insert_count - tx_queue->read_count; 440 441 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 441 442 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
+19 -4
drivers/net/sis900.c
··· 240 240 * @net_dev: the net device to get address for 241 241 * 242 242 * Older SiS900 and friends, use EEPROM to store MAC address. 243 - * MAC address is read from read_eeprom() into @net_dev->dev_addr. 243 + * MAC address is read from read_eeprom() into @net_dev->dev_addr and 244 + * @net_dev->perm_addr. 244 245 */ 245 246 246 247 static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) ··· 262 261 for (i = 0; i < 3; i++) 263 262 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 264 263 264 + /* Store MAC Address in perm_addr */ 265 + memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 266 + 265 267 return 1; 266 268 } 267 269 ··· 275 271 * 276 272 * SiS630E model, use APC CMOS RAM to store MAC address. 277 273 * APC CMOS RAM is accessed through ISA bridge. 278 - * MAC address is read into @net_dev->dev_addr. 274 + * MAC address is read into @net_dev->dev_addr and 275 + * @net_dev->perm_addr. 279 276 */ 280 277 281 278 static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, ··· 301 296 outb(0x09 + i, 0x70); 302 297 ((u8 *)(net_dev->dev_addr))[i] = inb(0x71); 303 298 } 299 + 300 + /* Store MAC Address in perm_addr */ 301 + memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 302 + 304 303 pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40); 305 304 pci_dev_put(isa_bridge); 306 305 ··· 319 310 * 320 311 * SiS635 model, set MAC Reload Bit to load Mac address from APC 321 312 * to rfdr. rfdr is accessed through rfcr. MAC address is read into 322 - * @net_dev->dev_addr. 313 + * @net_dev->dev_addr and @net_dev->perm_addr. 323 314 */ 324 315 325 316 static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, ··· 343 334 *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); 344 335 } 345 336 337 + /* Store MAC Address in perm_addr */ 338 + memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 339 + 346 340 /* enable packet filtering */ 347 341 outl(rfcrSave | RFEN, rfcr + ioaddr); 348 342 ··· 365 353 * EEDONE signal to refuse EEPROM access by LAN. 366 354 * The EEPROM map of SiS962 or SiS963 is different to SiS900. 367 355 * The signature field in SiS962 or SiS963 spec is meaningless. 368 - * MAC address is read into @net_dev->dev_addr. 356 + * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr. 369 357 */ 370 358 371 359 static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, ··· 383 371 /* get MAC address from EEPROM */ 384 372 for (i = 0; i < 3; i++) 385 373 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 374 + 375 + /* Store MAC Address in perm_addr */ 376 + memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 386 377 387 378 outl(EEDONE, ee_addr); 388 379 return 1;
+14 -14
drivers/net/stmmac/dwmac_lib.c
··· 26 26 27 27 #undef DWMAC_DMA_DEBUG 28 28 #ifdef DWMAC_DMA_DEBUG 29 - #define DBG(fmt, args...) printk(fmt, ## args) 29 + #define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args) 30 30 #else 31 - #define DBG(fmt, args...) do { } while (0) 31 + #define DWMAC_LIB_DBG(fmt, args...) do { } while (0) 32 32 #endif 33 33 34 34 /* CSR1 enables the transmit DMA to check for new descriptor */ ··· 152 152 /* read the status register (CSR5) */ 153 153 u32 intr_status = readl(ioaddr + DMA_STATUS); 154 154 155 - DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status); 155 + DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status); 156 156 #ifdef DWMAC_DMA_DEBUG 157 157 /* It displays the DMA process states (CSR5 register) */ 158 158 show_tx_process_state(intr_status); ··· 160 160 #endif 161 161 /* ABNORMAL interrupts */ 162 162 if (unlikely(intr_status & DMA_STATUS_AIS)) { 163 - DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: "); 163 + DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: "); 164 164 if (unlikely(intr_status & DMA_STATUS_UNF)) { 165 - DBG(INFO, "transmit underflow\n"); 165 + DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n"); 166 166 ret = tx_hard_error_bump_tc; 167 167 x->tx_undeflow_irq++; 168 168 } 169 169 if (unlikely(intr_status & DMA_STATUS_TJT)) { 170 - DBG(INFO, "transmit jabber\n"); 170 + DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n"); 171 171 x->tx_jabber_irq++; 172 172 } 173 173 if (unlikely(intr_status & DMA_STATUS_OVF)) { 174 - DBG(INFO, "recv overflow\n"); 174 + DWMAC_LIB_DBG(KERN_INFO "recv overflow\n"); 175 175 x->rx_overflow_irq++; 176 176 } 177 177 if (unlikely(intr_status & DMA_STATUS_RU)) { 178 - DBG(INFO, "receive buffer unavailable\n"); 178 + DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n"); 179 179 x->rx_buf_unav_irq++; 180 180 } 181 181 if (unlikely(intr_status & DMA_STATUS_RPS)) { 182 - DBG(INFO, "receive process stopped\n"); 182 + DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n"); 183 183 x->rx_process_stopped_irq++; 184 184 } 185 185 if (unlikely(intr_status & DMA_STATUS_RWT)) { 186 - DBG(INFO, "receive watchdog\n"); 186 + DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n"); 187 187 x->rx_watchdog_irq++; 188 188 } 189 189 if (unlikely(intr_status & DMA_STATUS_ETI)) { 190 - DBG(INFO, "transmit early interrupt\n"); 190 + DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n"); 191 191 x->tx_early_irq++; 192 192 } 193 193 if (unlikely(intr_status & DMA_STATUS_TPS)) { 194 - DBG(INFO, "transmit process stopped\n"); 194 + DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n"); 195 195 x->tx_process_stopped_irq++; 196 196 ret = tx_hard_error; 197 197 } 198 198 if (unlikely(intr_status & DMA_STATUS_FBI)) { 199 - DBG(INFO, "fatal bus error\n"); 199 + DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n"); 200 200 x->fatal_bus_error_irq++; 201 201 ret = tx_hard_error; 202 202 } ··· 215 215 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ 216 216 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); 217 217 218 - DBG(INFO, "\n\n"); 218 + DWMAC_LIB_DBG(KERN_INFO "\n\n"); 219 219 return ret; 220 220 } 221 221
+28 -21
drivers/net/stmmac/stmmac_main.c
··· 750 750 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 751 751 priv->xstats.threshold = tc; 752 752 } 753 - stmmac_tx_err(priv); 754 753 } else if (unlikely(status == tx_hard_error)) 755 754 stmmac_tx_err(priv); 756 755 } ··· 780 781 781 782 stmmac_verify_args(); 782 783 783 - ret = stmmac_init_phy(dev); 784 - if (unlikely(ret)) { 785 - pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 786 - return ret; 787 - } 788 - 789 - /* Request the IRQ lines */ 790 - ret = request_irq(dev->irq, stmmac_interrupt, 791 - IRQF_SHARED, dev->name, dev); 792 - if (unlikely(ret < 0)) { 793 - pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 794 - __func__, dev->irq, ret); 795 - return ret; 796 - } 797 - 798 784 #ifdef CONFIG_STMMAC_TIMER 799 785 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 800 786 if (unlikely(priv->tm == NULL)) { ··· 798 814 } else 799 815 priv->tm->enable = 1; 800 816 #endif 817 + ret = stmmac_init_phy(dev); 818 + if (unlikely(ret)) { 819 + pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 820 + goto open_error; 821 + } 801 822 802 823 /* Create and initialize the TX/RX descriptors chains. */ 803 824 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); ··· 811 822 init_dma_desc_rings(dev); 812 823 813 824 /* DMA initialization and SW reset */ 814 - if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, 815 - priv->dma_tx_phy, 816 - priv->dma_rx_phy) < 0)) { 817 - 825 + ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, 826 + priv->dma_tx_phy, priv->dma_rx_phy); 827 + if (ret < 0) { 818 828 pr_err("%s: DMA initialization failed\n", __func__); 819 - return -1; 829 + goto open_error; 820 830 } 821 831 822 832 /* Copy the MAC addr into the HW */ ··· 835 847 /* Initialise the MMC (if present) to disable all interrupts. */ 836 848 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 837 849 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 850 + 851 + /* Request the IRQ lines */ 852 + ret = request_irq(dev->irq, stmmac_interrupt, 853 + IRQF_SHARED, dev->name, dev); 854 + if (unlikely(ret < 0)) { 855 + pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 856 + __func__, dev->irq, ret); 857 + goto open_error; 858 + } 838 859 839 860 /* Enable the MAC Rx/Tx */ 840 861 stmmac_enable_mac(priv->ioaddr); ··· 875 878 napi_enable(&priv->napi); 876 879 skb_queue_head_init(&priv->rx_recycle); 877 880 netif_start_queue(dev); 881 + 878 882 return 0; 883 + 884 + open_error: 885 + #ifdef CONFIG_STMMAC_TIMER 886 + kfree(priv->tm); 887 + #endif 888 + if (priv->phydev) 889 + phy_disconnect(priv->phydev); 890 + 891 + return ret; 879 892 } 880 893 881 894 /**
+2 -2
drivers/net/tokenring/3c359.c
··· 1251 1251 /* 1252 1252 * The NIC has told us that a packet has been downloaded onto the card, we must 1253 1253 * find out which packet it has done, clear the skb and information for the packet 1254 - * then advance around the ring for all tranmitted packets 1254 + * then advance around the ring for all transmitted packets 1255 1255 */ 1256 1256 1257 1257 static void xl_dn_comp(struct net_device *dev) ··· 1568 1568 if (lan_status_diff & LSC_SOFT_ERR) 1569 1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); 1570 1570 if (lan_status_diff & LSC_TRAN_BCN) 1571 - printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1571 + printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); 1572 1572 if (lan_status_diff & LSC_SS) 1573 1573 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1574 1574 if (lan_status_diff & LSC_RING_REC)
+1 -1
drivers/net/tokenring/lanstreamer.c
··· 1675 1675 if (lan_status_diff & LSC_SOFT_ERR) 1676 1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name); 1677 1677 if (lan_status_diff & LSC_TRAN_BCN) 1678 - printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name); 1678 + printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name); 1679 1679 if (lan_status_diff & LSC_SS) 1680 1680 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1681 1681 if (lan_status_diff & LSC_RING_REC)
+1 -1
drivers/net/tokenring/olympic.c
··· 1500 1500 if (lan_status_diff & LSC_SOFT_ERR) 1501 1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); 1502 1502 if (lan_status_diff & LSC_TRAN_BCN) 1503 - printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1503 + printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); 1504 1504 if (lan_status_diff & LSC_SS) 1505 1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1506 1506 if (lan_status_diff & LSC_RING_REC)
+2 -2
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 1040 1040 } 1041 1041 1042 1042 ret = ath9k_htc_hw_init(hif_dev->htc_handle, 1043 - &hif_dev->udev->dev, hif_dev->device_id, 1043 + &interface->dev, hif_dev->device_id, 1044 1044 hif_dev->udev->product, id->driver_info); 1045 1045 if (ret) { 1046 1046 ret = -EINVAL; ··· 1158 1158 #endif 1159 1159 1160 1160 static struct usb_driver ath9k_hif_usb_driver = { 1161 - .name = "ath9k_hif_usb", 1161 + .name = KBUILD_MODNAME, 1162 1162 .probe = ath9k_hif_usb_probe, 1163 1163 .disconnect = ath9k_hif_usb_disconnect, 1164 1164 #ifdef CONFIG_PM
-9
drivers/net/wireless/ath/ath9k/hw.c
··· 1254 1254 ah->txchainmask = common->tx_chainmask; 1255 1255 ah->rxchainmask = common->rx_chainmask; 1256 1256 1257 - if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) { 1258 - ath9k_hw_abortpcurecv(ah); 1259 - if (!ath9k_hw_stopdmarecv(ah)) { 1260 - ath_dbg(common, ATH_DBG_XMIT, 1261 - "Failed to stop receive dma\n"); 1262 - bChannelChange = false; 1263 - } 1264 - } 1265 - 1266 1257 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1267 1258 return -EIO; 1268 1259
+22 -3
drivers/net/wireless/ath/ath9k/mac.c
··· 751 751 } 752 752 EXPORT_SYMBOL(ath9k_hw_abortpcurecv); 753 753 754 - bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 754 + bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) 755 755 { 756 756 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 757 757 #define AH_RX_TIME_QUANTUM 100 /* usec */ 758 758 struct ath_common *common = ath9k_hw_common(ah); 759 + u32 mac_status, last_mac_status = 0; 759 760 int i; 761 + 762 + /* Enable access to the DMA observation bus */ 763 + REG_WRITE(ah, AR_MACMISC, 764 + ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | 765 + (AR_MACMISC_MISC_OBS_BUS_1 << 766 + AR_MACMISC_MISC_OBS_BUS_MSB_S))); 760 767 761 768 REG_WRITE(ah, AR_CR, AR_CR_RXD); 762 769 ··· 771 764 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 772 765 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 773 766 break; 767 + 768 + if (!AR_SREV_9300_20_OR_LATER(ah)) { 769 + mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0; 770 + if (mac_status == 0x1c0 && mac_status == last_mac_status) { 771 + *reset = true; 772 + break; 773 + } 774 + 775 + last_mac_status = mac_status; 776 + } 777 + 774 778 udelay(AH_TIME_QUANTUM); 775 779 } 776 780 777 781 if (i == 0) { 778 782 ath_err(common, 779 - "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 783 + "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n", 780 784 AH_RX_STOP_DMA_TIMEOUT / 1000, 781 785 REG_READ(ah, AR_CR), 782 - REG_READ(ah, AR_DIAG_SW)); 786 + REG_READ(ah, AR_DIAG_SW), 787 + REG_READ(ah, AR_DMADBG_7)); 783 788 return false; 784 789 } else { 785 790 return true;
+1 -1
drivers/net/wireless/ath/ath9k/mac.h
··· 695 695 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); 696 696 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); 697 697 void ath9k_hw_abortpcurecv(struct ath_hw *ah); 698 - bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 698 + bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset); 699 699 int ath9k_hw_beaconq_setup(struct ath_hw *ah); 700 700 701 701 /* Interrupt Handling */
+10 -2
drivers/net/wireless/ath/ath9k/main.c
··· 1376 1376 1377 1377 ath9k_calculate_iter_data(hw, vif, &iter_data); 1378 1378 1379 - ath9k_ps_wakeup(sc); 1380 1379 /* Set BSSID mask. */ 1381 1380 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 1382 1381 ath_hw_setbssidmask(common); ··· 1410 1411 } 1411 1412 1412 1413 ath9k_hw_set_interrupts(ah, ah->imask); 1413 - ath9k_ps_restore(sc); 1414 1414 1415 1415 /* Set up ANI */ 1416 1416 if ((iter_data.naps + iter_data.nadhocs) > 0) { ··· 1455 1457 struct ath_vif *avp = (void *)vif->drv_priv; 1456 1458 int ret = 0; 1457 1459 1460 + ath9k_ps_wakeup(sc); 1458 1461 mutex_lock(&sc->mutex); 1459 1462 1460 1463 switch (vif->type) { ··· 1502 1503 ath9k_do_vif_add_setup(hw, vif); 1503 1504 out: 1504 1505 mutex_unlock(&sc->mutex); 1506 + ath9k_ps_restore(sc); 1505 1507 return ret; 1506 1508 } 1507 1509 ··· 1517 1517 1518 1518 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); 1519 1519 mutex_lock(&sc->mutex); 1520 + ath9k_ps_wakeup(sc); 1520 1521 1521 1522 /* See if new interface type is valid. */ 1522 1523 if ((new_type == NL80211_IFTYPE_ADHOC) && ··· 1547 1546 1548 1547 ath9k_do_vif_add_setup(hw, vif); 1549 1548 out: 1549 + ath9k_ps_restore(sc); 1550 1550 mutex_unlock(&sc->mutex); 1551 1551 return ret; 1552 1552 } ··· 1560 1558 1561 1559 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1562 1560 1561 + ath9k_ps_wakeup(sc); 1563 1562 mutex_lock(&sc->mutex); 1564 1563 1565 1564 sc->nvifs--; ··· 1572 1569 ath9k_calculate_summary_state(hw, NULL); 1573 1570 1574 1571 mutex_unlock(&sc->mutex); 1572 + ath9k_ps_restore(sc); 1575 1573 } 1576 1574 1577 1575 static void ath9k_enable_ps(struct ath_softc *sc) ··· 1813 1809 1814 1810 txq = sc->tx.txq_map[queue]; 1815 1811 1812 + ath9k_ps_wakeup(sc); 1816 1813 mutex_lock(&sc->mutex); 1817 1814 1818 1815 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); ··· 1837 1832 ath_beaconq_config(sc); 1838 1833 1839 1834 mutex_unlock(&sc->mutex); 1835 + ath9k_ps_restore(sc); 1840 1836 1841 1837 return ret; 1842 1838 } ··· 1900 1894 int slottime; 1901 1895 int error; 1902 1896 1897 + ath9k_ps_wakeup(sc); 1903 1898 mutex_lock(&sc->mutex); 1904 1899 1905 1900 if (changed & BSS_CHANGED_BSSID) { ··· 2001 1994 } 2002 1995 2003 1996 mutex_unlock(&sc->mutex); 1997 + ath9k_ps_restore(sc); 2004 1998 } 2005 1999 2006 2000 static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
+3 -3
drivers/net/wireless/ath/ath9k/recv.c
··· 486 486 bool ath_stoprecv(struct ath_softc *sc) 487 487 { 488 488 struct ath_hw *ah = sc->sc_ah; 489 - bool stopped; 489 + bool stopped, reset = false; 490 490 491 491 spin_lock_bh(&sc->rx.rxbuflock); 492 492 ath9k_hw_abortpcurecv(ah); 493 493 ath9k_hw_setrxfilter(ah, 0); 494 - stopped = ath9k_hw_stopdmarecv(ah); 494 + stopped = ath9k_hw_stopdmarecv(ah, &reset); 495 495 496 496 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 497 497 ath_edma_stop_recv(sc); ··· 506 506 "confusing the DMA engine when we start RX up\n"); 507 507 ATH_DBG_WARN_ON_ONCE(!stopped); 508 508 } 509 - return stopped; 509 + return stopped || reset; 510 510 } 511 511 512 512 void ath_flushrecv(struct ath_softc *sc)
+1
drivers/net/wireless/ath/regd_common.h
··· 195 195 {APL9_WORLD, CTL_ETSI, CTL_ETSI}, 196 196 197 197 {APL3_FCCA, CTL_FCC, CTL_FCC}, 198 + {APL7_FCCA, CTL_FCC, CTL_FCC}, 198 199 {APL1_ETSIC, CTL_FCC, CTL_ETSI}, 199 200 {APL2_ETSIC, CTL_FCC, CTL_ETSI}, 200 201 {APL2_APLD, CTL_FCC, NO_CTL},
+5 -4
drivers/net/wireless/iwlegacy/Kconfig
··· 1 1 config IWLWIFI_LEGACY 2 - tristate "Intel Wireless Wifi legacy devices" 3 - depends on PCI && MAC80211 2 + tristate 4 3 select FW_LOADER 5 4 select NEW_LEDS 6 5 select LEDS_CLASS ··· 64 65 65 66 config IWL4965 66 67 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" 67 - depends on IWLWIFI_LEGACY 68 + depends on PCI && MAC80211 69 + select IWLWIFI_LEGACY 68 70 ---help--- 69 71 This option enables support for 70 72 ··· 92 92 93 93 config IWL3945 94 94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 95 - depends on IWLWIFI_LEGACY 95 + depends on PCI && MAC80211 96 + select IWLWIFI_LEGACY 96 97 ---help--- 97 98 Select to build the driver supporting the: 98 99
-2
drivers/net/wireless/iwlegacy/iwl-3945-hw.h
··· 74 74 /* RSSI to dBm */ 75 75 #define IWL39_RSSI_OFFSET 95 76 76 77 - #define IWL_DEFAULT_TX_POWER 0x0F 78 - 79 77 /* 80 78 * EEPROM related constants, enums, and structures. 81 79 */
-3
drivers/net/wireless/iwlegacy/iwl-4965-hw.h
··· 804 804 805 805 #define IWL4965_DEFAULT_TX_RETRY 15 806 806 807 - /* Limit range of txpower output target to be between these values */ 808 - #define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ 809 - 810 807 /* EEPROM */ 811 808 #define IWL4965_FIRST_AMPDU_QUEUE 10 812 809
+11 -6
drivers/net/wireless/iwlegacy/iwl-core.c
··· 160 160 struct ieee80211_channel *geo_ch; 161 161 struct ieee80211_rate *rates; 162 162 int i = 0; 163 + s8 max_tx_power = 0; 163 164 164 165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || 165 166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { ··· 236 235 237 236 geo_ch->flags |= ch->ht40_extension_channel; 238 237 239 - if (ch->max_power_avg > priv->tx_power_device_lmt) 240 - priv->tx_power_device_lmt = ch->max_power_avg; 238 + if (ch->max_power_avg > max_tx_power) 239 + max_tx_power = ch->max_power_avg; 241 240 } else { 242 241 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 243 242 } ··· 249 248 "restricted" : "valid", 250 249 geo_ch->flags); 251 250 } 251 + 252 + priv->tx_power_device_lmt = max_tx_power; 253 + priv->tx_power_user_lmt = max_tx_power; 254 + priv->tx_power_next = max_tx_power; 252 255 253 256 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && 254 257 priv->cfg->sku & IWL_SKU_A) { ··· 1129 1124 if (!priv->cfg->ops->lib->send_tx_power) 1130 1125 return -EOPNOTSUPP; 1131 1126 1132 - if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) { 1127 + /* 0 dBm mean 1 milliwatt */ 1128 + if (tx_power < 0) { 1133 1129 IWL_WARN(priv, 1134 - "Requested user TXPOWER %d below lower limit %d.\n", 1135 - tx_power, 1136 - IWL4965_TX_POWER_TARGET_POWER_MIN); 1130 + "Requested user TXPOWER %d below 1 mW.\n", 1131 + tx_power); 1137 1132 return -EINVAL; 1138 1133 } 1139 1134
-7
drivers/net/wireless/iwlegacy/iwl-eeprom.c
··· 471 471 flags & EEPROM_CHANNEL_RADAR)) 472 472 ? "" : "not "); 473 473 474 - /* Set the tx_power_user_lmt to the highest power 475 - * supported by any channel */ 476 - if (eeprom_ch_info[ch].max_power_avg > 477 - priv->tx_power_user_lmt) 478 - priv->tx_power_user_lmt = 479 - eeprom_ch_info[ch].max_power_avg; 480 - 481 474 ch_info++; 482 475 } 483 476 }
-4
drivers/net/wireless/iwlegacy/iwl3945-base.c
··· 3825 3825 priv->force_reset[IWL_FW_RESET].reset_duration = 3826 3826 IWL_DELAY_NEXT_FORCE_FW_RELOAD; 3827 3827 3828 - 3829 - priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; 3830 - priv->tx_power_next = IWL_DEFAULT_TX_POWER; 3831 - 3832 3828 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3833 3829 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", 3834 3830 eeprom->version);
-6
drivers/net/wireless/iwlegacy/iwl4965-base.c
··· 3140 3140 3141 3141 iwl_legacy_init_scan_params(priv); 3142 3142 3143 - /* Set the tx_power_user_lmt to the lowest power level 3144 - * this value will get overwritten by channel max power avg 3145 - * from eeprom */ 3146 - priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN; 3147 - priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN; 3148 - 3149 3143 ret = iwl_legacy_init_channel_map(priv); 3150 3144 if (ret) { 3151 3145 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
+3
drivers/net/wireless/iwlwifi/iwl-5000.c
··· 530 530 struct iwl_cfg iwl5300_agn_cfg = { 531 531 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", 532 532 IWL_DEVICE_5000, 533 + /* at least EEPROM 0x11A has wrong info */ 534 + .valid_tx_ant = ANT_ABC, /* .cfg overwrite */ 535 + .valid_rx_ant = ANT_ABC, /* .cfg overwrite */ 533 536 .ht_params = &iwl5000_ht_params, 534 537 }; 535 538
+8 -1
drivers/net/wireless/mwl8k.c
··· 137 137 struct mwl8k_priv { 138 138 struct ieee80211_hw *hw; 139 139 struct pci_dev *pdev; 140 + int irq; 140 141 141 142 struct mwl8k_device_info *device_info; 142 143 ··· 3762 3761 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 3763 3762 IRQF_SHARED, MWL8K_NAME, hw); 3764 3763 if (rc) { 3764 + priv->irq = -1; 3765 3765 wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); 3766 3766 return -EIO; 3767 3767 } 3768 + priv->irq = priv->pdev->irq; 3768 3769 3769 3770 /* Enable TX reclaim and RX tasklets. */ 3770 3771 tasklet_enable(&priv->poll_tx_task); ··· 3803 3800 if (rc) { 3804 3801 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3805 3802 free_irq(priv->pdev->irq, hw); 3803 + priv->irq = -1; 3806 3804 tasklet_disable(&priv->poll_tx_task); 3807 3805 tasklet_disable(&priv->poll_rx_task); 3808 3806 } ··· 3822 3818 3823 3819 /* Disable interrupts */ 3824 3820 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3825 - free_irq(priv->pdev->irq, hw); 3821 + if (priv->irq != -1) { 3822 + free_irq(priv->pdev->irq, hw); 3823 + priv->irq = -1; 3824 + } 3826 3825 3827 3826 /* Stop finalize join worker */ 3828 3827 cancel_work_sync(&priv->finalize_join_worker);
+1 -1
drivers/net/wireless/p54/txrx.c
··· 703 703 struct p54_tx_info *p54info; 704 704 struct p54_hdr *hdr; 705 705 struct p54_tx_data *txhdr; 706 - unsigned int padding, len, extra_len; 706 + unsigned int padding, len, extra_len = 0; 707 707 int i, j, ridx; 708 708 u16 hdr_flags = 0, aid = 0; 709 709 u8 rate, queue = 0, crypt_offset = 0;
+2 -2
include/linux/usb/usbnet.h
··· 103 103 * Indicates to usbnet, that USB driver accumulates multiple IP packets. 104 104 * Affects statistic (counters) and short packet handling. 105 105 */ 106 - #define FLAG_MULTI_PACKET 0x1000 107 - #define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */ 106 + #define FLAG_MULTI_PACKET 0x2000 107 + #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ 108 108 109 109 /* init device ... can sleep, or cause probe() failure */ 110 110 int (*bind)(struct usbnet *, struct usb_interface *);
+2 -4
net/bridge/br_netfilter.c
··· 249 249 goto drop; 250 250 } 251 251 252 - /* Zero out the CB buffer if no options present */ 253 - if (iph->ihl == 5) { 254 - memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 252 + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 253 + if (iph->ihl == 5) 255 254 return 0; 256 - } 257 255 258 256 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); 259 257 if (ip_options_compile(dev_net(dev), opt, skb))
+5 -1
net/caif/cfdgml.c
··· 13 13 #include <net/caif/cfsrvl.h> 14 14 #include <net/caif/cfpkt.h> 15 15 16 + 16 17 #define container_obj(layr) ((struct cfsrvl *) layr) 17 18 18 19 #define DGM_CMD_BIT 0x80 ··· 84 83 85 84 static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) 86 85 { 86 + u8 packet_type; 87 87 u32 zero = 0; 88 88 struct caif_payload_info *info; 89 89 struct cfsrvl *service = container_obj(layr); ··· 96 94 if (cfpkt_getlen(pkt) > DGM_MTU) 97 95 return -EMSGSIZE; 98 96 99 - cfpkt_add_head(pkt, &zero, 4); 97 + cfpkt_add_head(pkt, &zero, 3); 98 + packet_type = 0x08; /* B9 set - UNCLASSIFIED */ 99 + cfpkt_add_head(pkt, &packet_type, 1); 100 100 101 101 /* Add info for MUX-layer to route the packet out. */ 102 102 info = cfpkt_info(pkt);
+2 -2
net/caif/cfmuxl.c
··· 244 244 int phyid) 245 245 { 246 246 struct cfmuxl *muxl = container_obj(layr); 247 - struct list_head *node; 247 + struct list_head *node, *next; 248 248 struct cflayer *layer; 249 - list_for_each(node, &muxl->srvl_list) { 249 + list_for_each_safe(node, next, &muxl->srvl_list) { 250 250 layer = list_entry(node, struct cflayer, node); 251 251 if (cfsrvl_phyid_match(layer, phyid)) 252 252 layer->ctrlcmd(layer, ctrl, phyid);
+7 -3
net/core/dev.c
··· 5203 5203 } 5204 5204 5205 5205 /* TSO requires that SG is present as well. */ 5206 - if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { 5207 - netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n"); 5208 - features &= ~NETIF_F_TSO; 5206 + if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 5207 + netdev_info(dev, "Dropping TSO features since no SG feature.\n"); 5208 + features &= ~NETIF_F_ALL_TSO; 5209 5209 } 5210 + 5211 + /* TSO ECN requires that TSO is present as well. */ 5212 + if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 5213 + features &= ~NETIF_F_TSO_ECN; 5210 5214 5211 5215 /* Software GSO depends on SG. */ 5212 5216 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
-2
net/ieee802154/Makefile
··· 1 1 obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o 2 2 ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o 3 3 af_802154-y := af_ieee802154.o raw.o dgram.o 4 - 5 - ccflags-y += -Wall -DDEBUG
+2 -3
net/ipv4/inet_connection_sock.c
··· 73 73 !sk2->sk_bound_dev_if || 74 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 75 75 if (!reuse || !sk2->sk_reuse || 76 - ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { 76 + sk2->sk_state == TCP_LISTEN) { 77 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 78 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 79 79 sk2_rcv_saddr == sk_rcv_saddr(sk)) ··· 122 122 (tb->num_owners < smallest_size || smallest_size == -1)) { 123 123 smallest_size = tb->num_owners; 124 124 smallest_rover = rover; 125 - if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && 126 - !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { 125 + if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 127 126 spin_unlock(&head->lock); 128 127 snum = smallest_rover; 129 128 goto have_snum;
+7 -6
net/ipv4/inetpeer.c
··· 354 354 } 355 355 356 356 /* May be called with local BH enabled. */ 357 - static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) 357 + static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, 358 + struct inet_peer __rcu **stack[PEER_MAXDEPTH]) 358 359 { 359 360 int do_free; 360 361 ··· 369 368 * We use refcnt=-1 to alert lockless readers this entry is deleted. 370 369 */ 371 370 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 372 - struct inet_peer __rcu **stack[PEER_MAXDEPTH]; 373 371 struct inet_peer __rcu ***stackptr, ***delp; 374 372 if (lookup(&p->daddr, stack, base) != p) 375 373 BUG(); ··· 422 422 } 423 423 424 424 /* May be called with local BH enabled. */ 425 - static int cleanup_once(unsigned long ttl) 425 + static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) 426 426 { 427 427 struct inet_peer *p = NULL; 428 428 ··· 454 454 * happen because of entry limits in route cache. */ 455 455 return -1; 456 456 457 - unlink_from_pool(p, peer_to_base(p)); 457 + unlink_from_pool(p, peer_to_base(p), stack); 458 458 return 0; 459 459 } 460 460 ··· 524 524 525 525 if (base->total >= inet_peer_threshold) 526 526 /* Remove one less-recently-used entry. */ 527 - cleanup_once(0); 527 + cleanup_once(0, stack); 528 528 529 529 return p; 530 530 } ··· 540 540 { 541 541 unsigned long now = jiffies; 542 542 int ttl, total; 543 + struct inet_peer __rcu **stack[PEER_MAXDEPTH]; 543 544 544 545 total = compute_total(); 545 546 if (total >= inet_peer_threshold) ··· 549 548 ttl = inet_peer_maxttl 550 549 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 551 550 total / inet_peer_threshold * HZ; 552 - while (!cleanup_once(ttl)) { 551 + while (!cleanup_once(ttl, stack)) { 553 552 if (jiffies != now) 554 553 break; 555 554 }
+3 -3
net/ipv4/ip_options.c
··· 329 329 pp_ptr = optptr + 2; 330 330 goto error; 331 331 } 332 - if (skb) { 332 + if (rt) { 333 333 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 334 334 opt->is_changed = 1; 335 335 } ··· 371 371 goto error; 372 372 } 373 373 opt->ts = optptr - iph; 374 - if (skb) { 374 + if (rt) { 375 375 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 376 376 timeptr = (__be32*)&optptr[optptr[2]+3]; 377 377 } ··· 603 603 unsigned long orefdst; 604 604 int err; 605 605 606 - if (!opt->srr) 606 + if (!opt->srr || !rt) 607 607 return 0; 608 608 609 609 if (skb->pkt_type != PACKET_HOST)
-3
net/ipv4/sysctl_net_ipv4.c
··· 311 311 .mode = 0644, 312 312 .proc_handler = proc_do_large_bitmap, 313 313 }, 314 - #ifdef CONFIG_IP_MULTICAST 315 314 { 316 315 .procname = "igmp_max_memberships", 317 316 .data = &sysctl_igmp_max_memberships, ··· 318 319 .mode = 0644, 319 320 .proc_handler = proc_dointvec 320 321 }, 321 - 322 - #endif 323 322 { 324 323 .procname = "igmp_max_msf", 325 324 .data = &sysctl_igmp_max_msf,
+1 -1
net/ipv6/inet6_connection_sock.c
··· 44 44 !sk2->sk_bound_dev_if || 45 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 46 46 (!sk->sk_reuse || !sk2->sk_reuse || 47 - ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && 47 + sk2->sk_state == TCP_LISTEN) && 48 48 ipv6_rcv_saddr_equal(sk, sk2)) 49 49 break; 50 50 }
+1 -2
net/irda/af_irda.c
··· 1297 1297 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ 1298 1298 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | 1299 1299 MSG_NOSIGNAL)) { 1300 - err = -EINVAL; 1301 - goto out; 1300 + return -EINVAL; 1302 1301 } 1303 1302 1304 1303 lock_sock(sk);
+1 -2
net/llc/llc_input.c
··· 121 121 s32 data_size = ntohs(pdulen) - llc_len; 122 122 123 123 if (data_size < 0 || 124 - ((skb_tail_pointer(skb) - 125 - (u8 *)pdu) - llc_len) < data_size) 124 + !pskb_may_pull(skb, data_size)) 126 125 return 0; 127 126 if (unlikely(pskb_trim_rcsum(skb, data_size))) 128 127 return 0;
+4
net/netfilter/ipset/ip_set_bitmap_ipmac.c
··· 343 343 ipset_adtfn adtfn = set->variant->adt[adt]; 344 344 struct ipmac data; 345 345 346 + /* MAC can be src only */ 347 + if (!(flags & IPSET_DIM_TWO_SRC)) 348 + return 0; 349 + 346 350 data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC)); 347 351 if (data.id < map->first_ip || data.id > map->last_ip) 348 352 return -IPSET_ERR_BITMAP_RANGE;
+10 -8
net/netfilter/ipset/ip_set_core.c
··· 1022 1022 if (cb->args[1] >= ip_set_max) 1023 1023 goto out; 1024 1024 1025 - pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]); 1026 1025 max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max; 1026 + dump_last: 1027 + pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]); 1027 1028 for (; cb->args[1] < max; cb->args[1]++) { 1028 1029 index = (ip_set_id_t) cb->args[1]; 1029 1030 set = ip_set_list[index]; ··· 1039 1038 * so that lists (unions of sets) are dumped last. 1040 1039 */ 1041 1040 if (cb->args[0] != DUMP_ONE && 1042 - !((cb->args[0] == DUMP_ALL) ^ 1043 - (set->type->features & IPSET_DUMP_LAST))) 1041 + ((cb->args[0] == DUMP_ALL) == 1042 + !!(set->type->features & IPSET_DUMP_LAST))) 1044 1043 continue; 1045 1044 pr_debug("List set: %s\n", set->name); 1046 1045 if (!cb->args[2]) { ··· 1084 1083 goto release_refcount; 1085 1084 } 1086 1085 } 1086 + /* If we dump all sets, continue with dumping last ones */ 1087 + if (cb->args[0] == DUMP_ALL) { 1088 + cb->args[0] = DUMP_LAST; 1089 + cb->args[1] = 0; 1090 + goto dump_last; 1091 + } 1087 1092 goto out; 1088 1093 1089 1094 nla_put_failure: ··· 1100 1093 pr_debug("release set %s\n", ip_set_list[index]->name); 1101 1094 ip_set_put_byindex(index); 1102 1095 } 1103 - 1104 - /* If we dump all sets, continue with dumping last ones */ 1105 - if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2]) 1106 - cb->args[0] = DUMP_LAST; 1107 - 1108 1096 out: 1109 1097 if (nlh) { 1110 1098 nlmsg_end(skb, nlh);
+16 -2
net/netfilter/xt_set.c
··· 81 81 if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { 82 82 pr_warning("Protocol error: set match dimension " 83 83 "is over the limit!\n"); 84 + ip_set_nfnl_put(info->match_set.index); 84 85 return -ERANGE; 85 86 } 86 87 ··· 136 135 if (index == IPSET_INVALID_ID) { 137 136 pr_warning("Cannot find del_set index %u as target\n", 138 137 info->del_set.index); 138 + if (info->add_set.index != IPSET_INVALID_ID) 139 + ip_set_nfnl_put(info->add_set.index); 139 140 return -ENOENT; 140 141 } 141 142 } ··· 145 142 info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { 146 143 pr_warning("Protocol error: SET target dimension " 147 144 "is over the limit!\n"); 145 + if (info->add_set.index != IPSET_INVALID_ID) 146 + ip_set_nfnl_put(info->add_set.index); 147 + if (info->del_set.index != IPSET_INVALID_ID) 148 + ip_set_nfnl_put(info->del_set.index); 148 149 return -ERANGE; 149 150 } 150 151 ··· 199 192 if (info->match_set.dim > IPSET_DIM_MAX) { 200 193 pr_warning("Protocol error: set match dimension " 201 194 "is over the limit!\n"); 195 + ip_set_nfnl_put(info->match_set.index); 202 196 return -ERANGE; 203 197 } 204 198 ··· 227 219 if (info->del_set.index != IPSET_INVALID_ID) 228 220 ip_set_del(info->del_set.index, 229 221 skb, par->family, 230 - info->add_set.dim, 222 + info->del_set.dim, 231 223 info->del_set.flags); 232 224 233 225 return XT_CONTINUE; ··· 253 245 if (index == IPSET_INVALID_ID) { 254 246 pr_warning("Cannot find del_set index %u as target\n", 255 247 info->del_set.index); 248 + if (info->add_set.index != IPSET_INVALID_ID) 249 + ip_set_nfnl_put(info->add_set.index); 256 250 return -ENOENT; 257 251 } 258 252 } 259 253 if (info->add_set.dim > IPSET_DIM_MAX || 260 - info->del_set.flags > IPSET_DIM_MAX) { 254 + info->del_set.dim > IPSET_DIM_MAX) { 261 255 pr_warning("Protocol error: SET target dimension " 262 256 "is over the limit!\n"); 257 + if (info->add_set.index != IPSET_INVALID_ID) 258 + ip_set_nfnl_put(info->add_set.index); 259 + if (info->del_set.index != IPSET_INVALID_ID) 260 + ip_set_nfnl_put(info->del_set.index); 263 261 return -ERANGE; 264 262 } 265 263
+4
net/sctp/associola.c
··· 569 569 sctp_assoc_set_primary(asoc, transport); 570 570 if (asoc->peer.active_path == peer) 571 571 asoc->peer.active_path = transport; 572 + if (asoc->peer.retran_path == peer) 573 + asoc->peer.retran_path = transport; 572 574 if (asoc->peer.last_data_from == peer) 573 575 asoc->peer.last_data_from = transport; 574 576 ··· 1325 1323 1326 1324 if (t) 1327 1325 asoc->peer.retran_path = t; 1326 + else 1327 + t = asoc->peer.retran_path; 1328 1328 1329 1329 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" 1330 1330 " %p addr: ",