Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (47 commits)
forcedeth: fix a few sparse warnings (variable shadowing)
forcedeth: Improve stats counters
forcedeth: remove unneeded stats updates
forcedeth: Acknowledge only interrupts that are being processed
forcedeth: fix race when unloading module
MAINTAINERS/rds: update maintainer
wanrouter: Remove kernel_lock annotations
usbnet: fix oops in usbnet_start_xmit
ixgbe: Fix compile for kernel without CONFIG_PCI_IOV defined
etherh: Add MAINTAINERS entry for etherh
bonding: comparing a u8 with -1 is always false
sky2: fix regression on Yukon Optima
netlink: clarify attribute length check documentation
netlink: validate NLA_MSECS length
i825xx:xscale:8390:freescale: Fix Kconfig dependancies
macvlan: receive multicast with local address
tg3: Update version to 3.121
tg3: Eliminate timer race with reset_task
tg3: Schedule at most one tg3_reset_task run
tg3: Obtain PCI function number from device
...

+325 -267
+2 -1
MAINTAINERS
··· 1032 1032 F: arch/arm/include/asm/hardware/iomd.h 1033 1033 F: arch/arm/include/asm/hardware/memc.h 1034 1034 F: arch/arm/mach-rpc/ 1035 + F: drivers/net/ethernet/8390/etherh.c 1035 1036 F: drivers/net/ethernet/i825xx/ether1* 1036 1037 F: drivers/net/ethernet/seeq/ether3* 1037 1038 F: drivers/scsi/arm/ ··· 5471 5470 F: drivers/net/ethernet/rdc/r6040.c 5472 5471 5473 5472 RDS - RELIABLE DATAGRAM SOCKETS 5474 - M: Andy Grover <andy.grover@oracle.com> 5473 + M: Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com> 5475 5474 L: rds-devel@oss.oracle.com (moderated for non-subscribers) 5476 5475 S: Supported 5477 5476 F: net/rds/
+2 -2
drivers/bluetooth/ath3k.c
··· 105 105 106 106 pipe = usb_sndctrlpipe(udev, 0); 107 107 108 - send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); 108 + send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); 109 109 if (!send_buf) { 110 110 BT_ERR("Can't allocate memory chunk for firmware"); 111 111 return -ENOMEM; ··· 176 176 177 177 count = firmware->size; 178 178 179 - send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); 179 + send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); 180 180 if (!send_buf) { 181 181 BT_ERR("Can't allocate memory chunk for firmware"); 182 182 return -ENOMEM;
+11 -1
drivers/bluetooth/bcm203x.c
··· 24 24 25 25 #include <linux/module.h> 26 26 27 + #include <linux/atomic.h> 27 28 #include <linux/kernel.h> 28 29 #include <linux/init.h> 29 30 #include <linux/slab.h> ··· 66 65 unsigned long state; 67 66 68 67 struct work_struct work; 68 + atomic_t shutdown; 69 69 70 70 struct urb *urb; 71 71 unsigned char *buffer; ··· 99 97 100 98 data->state = BCM203X_SELECT_MEMORY; 101 99 100 + /* use workqueue to have a small delay */ 102 101 schedule_work(&data->work); 103 102 break; 104 103 ··· 158 155 struct bcm203x_data *data = 159 156 container_of(work, struct bcm203x_data, work); 160 157 161 - if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) 158 + if (atomic_read(&data->shutdown)) 159 + return; 160 + 161 + if (usb_submit_urb(data->urb, GFP_KERNEL) < 0) 162 162 BT_ERR("Can't submit URB"); 163 163 } 164 164 ··· 249 243 250 244 usb_set_intfdata(intf, data); 251 245 246 + /* use workqueue to have a small delay */ 252 247 schedule_work(&data->work); 253 248 254 249 return 0; ··· 260 253 struct bcm203x_data *data = usb_get_intfdata(intf); 261 254 262 255 BT_DBG("intf %p", intf); 256 + 257 + atomic_inc(&data->shutdown); 258 + cancel_work_sync(&data->work); 263 259 264 260 usb_kill_urb(data->urb); 265 261
+7 -6
drivers/bluetooth/bfusb.c
··· 568 568 569 569 BT_INFO("BlueFRITZ! USB loading firmware"); 570 570 571 + buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_KERNEL); 572 + if (!buf) { 573 + BT_ERR("Can't allocate memory chunk for firmware"); 574 + return -ENOMEM; 575 + } 576 + 571 577 pipe = usb_sndctrlpipe(data->udev, 0); 572 578 573 579 if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, 574 580 0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) { 575 581 BT_ERR("Can't change to loading configuration"); 582 + kfree(buf); 576 583 return -EBUSY; 577 584 } 578 585 579 586 data->udev->toggle[0] = data->udev->toggle[1] = 0; 580 - 581 - buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_ATOMIC); 582 - if (!buf) { 583 - BT_ERR("Can't allocate memory chunk for firmware"); 584 - return -ENOMEM; 585 - } 586 587 587 588 pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); 588 589
+2 -2
drivers/net/bonding/bond_main.c
··· 560 560 u32 slave_speed; 561 561 int res; 562 562 563 - slave->speed = -1; 564 - slave->duplex = -1; 563 + slave->speed = SPEED_UNKNOWN; 564 + slave->duplex = DUPLEX_UNKNOWN; 565 565 566 566 res = __ethtool_get_settings(slave_dev, &ecmd); 567 567 if (res < 0)
+2 -2
drivers/net/bonding/bond_procfs.c
··· 158 158 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); 159 159 seq_printf(seq, "MII Status: %s\n", 160 160 (slave->link == BOND_LINK_UP) ? "up" : "down"); 161 - if (slave->speed == -1) 161 + if (slave->speed == SPEED_UNKNOWN) 162 162 seq_printf(seq, "Speed: %s\n", "Unknown"); 163 163 else 164 164 seq_printf(seq, "Speed: %d Mbps\n", slave->speed); 165 165 166 - if (slave->duplex == -1) 166 + if (slave->duplex == DUPLEX_UNKNOWN) 167 167 seq_printf(seq, "Duplex: %s\n", "Unknown"); 168 168 else 169 169 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
+110 -85
drivers/net/ethernet/broadcom/tg3.c
··· 89 89 90 90 #define DRV_MODULE_NAME "tg3" 91 91 #define TG3_MAJ_NUM 3 92 - #define TG3_MIN_NUM 120 92 + #define TG3_MIN_NUM 121 93 93 #define DRV_MODULE_VERSION \ 94 94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 95 - #define DRV_MODULE_RELDATE "August 18, 2011" 95 + #define DRV_MODULE_RELDATE "November 2, 2011" 96 96 97 97 #define RESET_KIND_SHUTDOWN 0 98 98 #define RESET_KIND_INIT 1 ··· 628 628 regbase = TG3_APE_PER_LOCK_GRANT; 629 629 630 630 /* Make sure the driver hasn't any stale locks. */ 631 - for (i = 0; i < 8; i++) { 632 - if (i == TG3_APE_LOCK_GPIO) 633 - continue; 634 - tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); 631 + for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { 632 + switch (i) { 633 + case TG3_APE_LOCK_PHY0: 634 + case TG3_APE_LOCK_PHY1: 635 + case TG3_APE_LOCK_PHY2: 636 + case TG3_APE_LOCK_PHY3: 637 + bit = APE_LOCK_GRANT_DRIVER; 638 + break; 639 + default: 640 + if (!tp->pci_fn) 641 + bit = APE_LOCK_GRANT_DRIVER; 642 + else 643 + bit = 1 << tp->pci_fn; 644 + } 645 + tg3_ape_write32(tp, regbase + 4 * i, bit); 635 646 } 636 647 637 - /* Clear the correct bit of the GPIO lock too. */ 638 - if (!tp->pci_fn) 639 - bit = APE_LOCK_GRANT_DRIVER; 640 - else 641 - bit = 1 << tp->pci_fn; 642 - 643 - tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit); 644 648 } 645 649 646 650 static int tg3_ape_lock(struct tg3 *tp, int locknum) ··· 662 658 return 0; 663 659 case TG3_APE_LOCK_GRC: 664 660 case TG3_APE_LOCK_MEM: 661 + if (!tp->pci_fn) 662 + bit = APE_LOCK_REQ_DRIVER; 663 + else 664 + bit = 1 << tp->pci_fn; 665 665 break; 666 666 default: 667 667 return -EINVAL; ··· 680 672 } 681 673 682 674 off = 4 * locknum; 683 - 684 - if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) 685 - bit = APE_LOCK_REQ_DRIVER; 686 - else 687 - bit = 1 << tp->pci_fn; 688 675 689 676 tg3_ape_write32(tp, req + off, bit); 690 677 ··· 713 710 return; 714 711 case TG3_APE_LOCK_GRC: 715 712 case TG3_APE_LOCK_MEM: 713 + if (!tp->pci_fn) 714 + bit = APE_LOCK_GRANT_DRIVER; 715 + else 716 + bit = 1 << tp->pci_fn; 716 717 break; 717 718 default: 718 719 return; ··· 726 719 gnt = TG3_APE_LOCK_GRANT; 727 720 else 728 721 gnt = TG3_APE_PER_LOCK_GRANT; 729 - 730 - if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) 731 - bit = APE_LOCK_GRANT_DRIVER; 732 - else 733 - bit = 1 << tp->pci_fn; 734 722 735 723 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 736 724 } ··· 5929 5927 return work_done; 5930 5928 } 5931 5929 5930 + static inline void tg3_reset_task_schedule(struct tg3 *tp) 5931 + { 5932 + if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) 5933 + schedule_work(&tp->reset_task); 5934 + } 5935 + 5936 + static inline void tg3_reset_task_cancel(struct tg3 *tp) 5937 + { 5938 + cancel_work_sync(&tp->reset_task); 5939 + tg3_flag_clear(tp, RESET_TASK_PENDING); 5940 + } 5941 + 5932 5942 static int tg3_poll_msix(struct napi_struct *napi, int budget) 5933 5943 { 5934 5944 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); ··· 5981 5967 tx_recovery: 5982 5968 /* work_done is guaranteed to be less than budget. */ 5983 5969 napi_complete(napi); 5984 - schedule_work(&tp->reset_task); 5970 + tg3_reset_task_schedule(tp); 5985 5971 return work_done; 5986 5972 } 5987 5973 ··· 6016 6002 tg3_dump_state(tp); 6017 6003 6018 6004 tg3_flag_set(tp, ERROR_PROCESSED); 6019 - schedule_work(&tp->reset_task); 6005 + tg3_reset_task_schedule(tp); 6020 6006 } 6021 6007 6022 6008 static int tg3_poll(struct napi_struct *napi, int budget) ··· 6063 6049 tx_recovery: 6064 6050 /* work_done is guaranteed to be less than budget. */ 6065 6051 napi_complete(napi); 6066 - schedule_work(&tp->reset_task); 6052 + tg3_reset_task_schedule(tp); 6067 6053 return work_done; 6068 6054 } 6069 6055 ··· 6352 6338 { 6353 6339 struct tg3 *tp = container_of(work, struct tg3, reset_task); 6354 6340 int err; 6355 - unsigned int restart_timer; 6356 6341 6357 6342 tg3_full_lock(tp, 0); 6358 6343 6359 6344 if (!netif_running(tp->dev)) { 6345 + tg3_flag_clear(tp, RESET_TASK_PENDING); 6360 6346 tg3_full_unlock(tp); 6361 6347 return; 6362 6348 } ··· 6368 6354 tg3_netif_stop(tp); 6369 6355 6370 6356 tg3_full_lock(tp, 1); 6371 - 6372 - restart_timer = tg3_flag(tp, RESTART_TIMER); 6373 - tg3_flag_clear(tp, RESTART_TIMER); 6374 6357 6375 6358 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 6376 6359 tp->write32_tx_mbox = tg3_write32_tx_mbox; ··· 6383 6372 6384 6373 tg3_netif_start(tp); 6385 6374 6386 - if (restart_timer) 6387 - mod_timer(&tp->timer, jiffies + 1); 6388 - 6389 6375 out: 6390 6376 tg3_full_unlock(tp); 6391 6377 6392 6378 if (!err) 6393 6379 tg3_phy_start(tp); 6380 + 6381 + tg3_flag_clear(tp, RESET_TASK_PENDING); 6394 6382 } 6395 6383 6396 6384 static void tg3_tx_timeout(struct net_device *dev) ··· 6401 6391 tg3_dump_state(tp); 6402 6392 } 6403 6393 6404 - schedule_work(&tp->reset_task); 6394 + tg3_reset_task_schedule(tp); 6405 6395 } 6406 6396 6407 6397 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ ··· 6452 6442 hwbug = 1; 6453 6443 6454 6444 if (tg3_flag(tp, 4K_FIFO_LIMIT)) { 6445 + u32 prvidx = *entry; 6455 6446 u32 tmp_flag = flags & ~TXD_FLAG_END; 6456 - while (len > TG3_TX_BD_DMA_MAX) { 6447 + while (len > TG3_TX_BD_DMA_MAX && *budget) { 6457 6448 u32 frag_len = TG3_TX_BD_DMA_MAX; 6458 6449 len -= TG3_TX_BD_DMA_MAX; 6459 6450 6460 - if (len) { 6461 - tnapi->tx_buffers[*entry].fragmented = true; 6462 - /* Avoid the 8byte DMA problem */ 6463 - if (len <= 8) { 6464 - len += TG3_TX_BD_DMA_MAX / 2; 6465 - frag_len = TG3_TX_BD_DMA_MAX / 2; 6466 - } 6467 - } else 6468 - tmp_flag = flags; 6469 - 6470 - if (*budget) { 6471 - tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 6472 - frag_len, tmp_flag, mss, vlan); 6473 - (*budget)--; 6474 - *entry = NEXT_TX(*entry); 6475 - } else { 6476 - hwbug = 1; 6477 - break; 6451 + /* Avoid the 8byte DMA problem */ 6452 + if (len <= 8) { 6453 + len += TG3_TX_BD_DMA_MAX / 2; 6454 + frag_len = TG3_TX_BD_DMA_MAX / 2; 6478 6455 } 6456 + 6457 + tnapi->tx_buffers[*entry].fragmented = true; 6458 + 6459 + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 6460 + frag_len, tmp_flag, mss, vlan); 6461 + *budget -= 1; 6462 + prvidx = *entry; 6463 + *entry = NEXT_TX(*entry); 6479 6464 6480 6465 map += frag_len; 6481 6466 } ··· 6479 6474 if (*budget) { 6480 6475 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 6481 6476 len, flags, mss, vlan); 6482 - (*budget)--; 6477 + *budget -= 1; 6483 6478 *entry = NEXT_TX(*entry); 6484 6479 } else { 6485 6480 hwbug = 1; 6481 + tnapi->tx_buffers[prvidx].fragmented = false; 6486 6482 } 6487 6483 } 6488 6484 } else { ··· 6515 6509 txb = &tnapi->tx_buffers[entry]; 6516 6510 } 6517 6511 6518 - for (i = 0; i < last; i++) { 6512 + for (i = 0; i <= last; i++) { 6519 6513 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6520 6514 6521 6515 entry = NEXT_TX(entry); ··· 6565 6559 dev_kfree_skb(new_skb); 6566 6560 ret = -1; 6567 6561 } else { 6562 + u32 save_entry = *entry; 6563 + 6568 6564 base_flags |= TXD_FLAG_END; 6569 6565 6570 6566 tnapi->tx_buffers[*entry].skb = new_skb; ··· 6576 6568 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 6577 6569 new_skb->len, base_flags, 6578 6570 mss, vlan)) { 6579 - tg3_tx_skb_unmap(tnapi, *entry, 0); 6571 + tg3_tx_skb_unmap(tnapi, save_entry, -1); 6580 6572 dev_kfree_skb(new_skb); 6581 6573 ret = -1; 6582 6574 } ··· 6766 6758 6767 6759 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 6768 6760 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 6769 - mss, vlan)) 6761 + mss, vlan)) { 6770 6762 would_hit_hwbug = 1; 6771 - 6772 6763 /* Now loop through additional data fragments, and queue them. */ 6773 - if (skb_shinfo(skb)->nr_frags > 0) { 6764 + } else if (skb_shinfo(skb)->nr_frags > 0) { 6774 6765 u32 tmp_mss = mss; 6775 6766 6776 6767 if (!tg3_flag(tp, HW_TSO_1) && ··· 6791 6784 if (dma_mapping_error(&tp->pdev->dev, mapping)) 6792 6785 goto dma_error; 6793 6786 6794 - if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 6787 + if (!budget || 6788 + tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 6795 6789 len, base_flags | 6796 6790 ((i == last) ? TXD_FLAG_END : 0), 6797 - tmp_mss, vlan)) 6791 + tmp_mss, vlan)) { 6798 6792 would_hit_hwbug = 1; 6793 + break; 6794 + } 6799 6795 } 6800 6796 } 6801 6797 ··· 6838 6828 return NETDEV_TX_OK; 6839 6829 6840 6830 dma_error: 6841 - tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 6831 + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 6842 6832 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 6843 6833 drop: 6844 6834 dev_kfree_skb(skb); ··· 7291 7281 if (!skb) 7292 7282 continue; 7293 7283 7294 - tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); 7284 + tg3_tx_skb_unmap(tnapi, i, 7285 + skb_shinfo(skb)->nr_frags - 1); 7295 7286 7296 7287 dev_kfree_skb_any(skb); 7297 7288 } ··· 9211 9200 { 9212 9201 struct tg3 *tp = (struct tg3 *) __opaque; 9213 9202 9214 - if (tp->irq_sync) 9203 + if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) 9215 9204 goto restart_timer; 9216 9205 9217 9206 spin_lock(&tp->lock); ··· 9234 9223 } 9235 9224 9236 9225 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 9237 - tg3_flag_set(tp, RESTART_TIMER); 9238 9226 spin_unlock(&tp->lock); 9239 - schedule_work(&tp->reset_task); 9240 - return; 9227 + tg3_reset_task_schedule(tp); 9228 + goto restart_timer; 9241 9229 } 9242 9230 } 9243 9231 ··· 9684 9674 struct tg3_napi *tnapi = &tp->napi[i]; 9685 9675 err = tg3_request_irq(tp, i); 9686 9676 if (err) { 9687 - for (i--; i >= 0; i--) 9677 + for (i--; i >= 0; i--) { 9678 + tnapi = &tp->napi[i]; 9688 9679 free_irq(tnapi->irq_vec, tnapi); 9689 - break; 9680 + } 9681 + goto err_out2; 9690 9682 } 9691 9683 } 9692 - 9693 - if (err) 9694 - goto err_out2; 9695 9684 9696 9685 tg3_full_lock(tp, 0); 9697 9686 ··· 9792 9783 struct tg3 *tp = netdev_priv(dev); 9793 9784 9794 9785 tg3_napi_disable(tp); 9795 - cancel_work_sync(&tp->reset_task); 9786 + tg3_reset_task_cancel(tp); 9796 9787 9797 9788 netif_tx_stop_all_queues(dev); 9798 9789 ··· 11529 11520 break; 11530 11521 } 11531 11522 11532 - tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); 11523 + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); 11533 11524 dev_kfree_skb(skb); 11534 11525 11535 11526 if (tx_idx != tnapi->tx_prod) ··· 14237 14228 val = tr32(MEMARB_MODE); 14238 14229 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 14239 14230 14240 - if (tg3_flag(tp, PCIX_MODE)) { 14241 - pci_read_config_dword(tp->pdev, 14242 - tp->pcix_cap + PCI_X_STATUS, &val); 14243 - tp->pci_fn = val & 0x7; 14244 - } else { 14245 - tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 14231 + tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 14232 + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 14233 + tg3_flag(tp, 5780_CLASS)) { 14234 + if (tg3_flag(tp, PCIX_MODE)) { 14235 + pci_read_config_dword(tp->pdev, 14236 + tp->pcix_cap + PCI_X_STATUS, 14237 + &val); 14238 + tp->pci_fn = val & 0x7; 14239 + } 14240 + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 14241 + tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 14242 + if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == 14243 + NIC_SRAM_CPMUSTAT_SIG) { 14244 + tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717; 14245 + tp->pci_fn = tp->pci_fn ? 1 : 0; 14246 + } 14247 + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 14248 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { 14249 + tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); 14250 + if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == 14251 + NIC_SRAM_CPMUSTAT_SIG) { 14252 + tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> 14253 + TG3_CPMU_STATUS_FSHFT_5719; 14254 + } 14246 14255 } 14247 14256 14248 14257 /* Get eeprom hw config before calling tg3_set_power_state(). ··· 15692 15665 if (tp->fw) 15693 15666 release_firmware(tp->fw); 15694 15667 15695 - cancel_work_sync(&tp->reset_task); 15668 + tg3_reset_task_cancel(tp); 15696 15669 15697 15670 if (tg3_flag(tp, USE_PHYLIB)) { 15698 15671 tg3_phy_fini(tp); ··· 15726 15699 if (!netif_running(dev)) 15727 15700 return 0; 15728 15701 15729 - flush_work_sync(&tp->reset_task); 15702 + tg3_reset_task_cancel(tp); 15730 15703 tg3_phy_stop(tp); 15731 15704 tg3_netif_stop(tp); 15732 15705 ··· 15839 15812 tg3_netif_stop(tp); 15840 15813 15841 15814 del_timer_sync(&tp->timer); 15842 - tg3_flag_clear(tp, RESTART_TIMER); 15843 15815 15844 15816 /* Want to make sure that the reset task doesn't run */ 15845 - cancel_work_sync(&tp->reset_task); 15817 + tg3_reset_task_cancel(tp); 15846 15818 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 15847 - tg3_flag_clear(tp, RESTART_TIMER); 15848 15819 15849 15820 netif_device_detach(netdev); 15850 15821
+17 -4
drivers/net/ethernet/broadcom/tg3.h
··· 1095 1095 #define TG3_CPMU_CLCK_ORIDE 0x00003624 1096 1096 #define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 1097 1097 1098 + #define TG3_CPMU_STATUS 0x0000362c 1099 + #define TG3_CPMU_STATUS_FMSK_5717 0x20000000 1100 + #define TG3_CPMU_STATUS_FMSK_5719 0xc0000000 1101 + #define TG3_CPMU_STATUS_FSHFT_5719 30 1102 + 1098 1103 #define TG3_CPMU_CLCK_STAT 0x00003630 1099 1104 #define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 1100 1105 #define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 ··· 2133 2128 #define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 2134 2129 #define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 2135 2130 2131 + #define NIC_SRAM_CPMU_STATUS 0x00000e00 2132 + #define NIC_SRAM_CPMUSTAT_SIG 0x0000362c 2133 + #define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff 2134 + 2136 2135 #define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 2137 2136 2138 2137 #define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 ··· 2353 2344 #define APE_PER_LOCK_GRANT_DRIVER 0x00001000 2354 2345 2355 2346 /* APE convenience enumerations. */ 2356 - #define TG3_APE_LOCK_GRC 1 2357 - #define TG3_APE_LOCK_MEM 4 2358 - #define TG3_APE_LOCK_GPIO 7 2347 + #define TG3_APE_LOCK_PHY0 0 2348 + #define TG3_APE_LOCK_GRC 1 2349 + #define TG3_APE_LOCK_PHY1 2 2350 + #define TG3_APE_LOCK_PHY2 3 2351 + #define TG3_APE_LOCK_MEM 4 2352 + #define TG3_APE_LOCK_PHY3 5 2353 + #define TG3_APE_LOCK_GPIO 7 2359 2354 2360 2355 #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 2361 2356 ··· 2879 2866 TG3_FLAG_JUMBO_CAPABLE, 2880 2867 TG3_FLAG_CHIP_RESETTING, 2881 2868 TG3_FLAG_INIT_COMPLETE, 2882 - TG3_FLAG_RESTART_TIMER, 2883 2869 TG3_FLAG_TSO_BUG, 2884 2870 TG3_FLAG_IS_5788, 2885 2871 TG3_FLAG_MAX_RXPEND_64, ··· 2921 2909 TG3_FLAG_APE_HAS_NCSI, 2922 2910 TG3_FLAG_5717_PLUS, 2923 2911 TG3_FLAG_4K_FIFO_LIMIT, 2912 + TG3_FLAG_RESET_TASK_PENDING, 2924 2913 2925 2914 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ 2926 2915 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
+1 -2
drivers/net/ethernet/freescale/Kconfig
··· 7 7 default y 8 8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ 9 9 M523x || M527x || M5272 || M528x || M520x || M532x || \ 10 - ARCH_MXC || ARCH_MXS || \ 11 - (PPC_MPC52xx && PPC_BESTCOMM) 10 + ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) 12 11 ---help--- 13 12 If you have a network (Ethernet) card belonging to this class, say Y 14 13 and read the Ethernet-HOWTO, available from
+5 -1
drivers/net/ethernet/intel/Kconfig
··· 5 5 config NET_VENDOR_INTEL 6 6 bool "Intel devices" 7 7 default y 8 - depends on PCI || PCI_MSI 8 + depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \ 9 + ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \ 10 + GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \ 11 + (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \ 12 + EXPERIMENTAL 9 13 ---help--- 10 14 If you have a network (Ethernet) card belonging to this class, say Y 11 15 and read the Ethernet-HOWTO, available from
+2
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
··· 442 442 443 443 int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) 444 444 { 445 + #ifdef CONFIG_PCI_IOV 445 446 int i; 446 447 for (i = 0; i < adapter->num_vfs; i++) { 447 448 if (adapter->vfinfo[i].vfdev->dev_flags & 448 449 PCI_DEV_FLAGS_ASSIGNED) 449 450 return true; 450 451 } 452 + #endif 451 453 return false; 452 454 } 453 455
+2 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
··· 42 42 int ixgbe_ndo_get_vf_config(struct net_device *netdev, 43 43 int vf, struct ifla_vf_info *ivi); 44 44 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); 45 - #ifdef CONFIG_PCI_IOV 46 45 void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); 46 + int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter); 47 + #ifdef CONFIG_PCI_IOV 47 48 void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, 48 49 const struct ixgbe_info *ii); 49 - int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter); 50 50 #endif 51 51 52 52
-11
drivers/net/ethernet/marvell/sky2.c
··· 366 366 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); 367 367 } 368 368 } else { 369 - if (hw->chip_id >= CHIP_ID_YUKON_OPT) { 370 - u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2); 371 - 372 - /* enable PHY Reverse Auto-Negotiation */ 373 - ctrl2 |= 1u << 13; 374 - 375 - /* Write PHY changes (SW-reset must follow) */ 376 - gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2); 377 - } 378 - 379 - 380 369 /* disable energy detect */ 381 370 ctrl &= ~PHY_M_PC_EN_DET_MSK; 382 371
+4 -1
drivers/net/ethernet/natsemi/Kconfig
··· 5 5 config NET_VENDOR_NATSEMI 6 6 bool "National Semi-conductor devices" 7 7 default y 8 - depends on MCA || MAC || MACH_JAZZ || PCI || XTENSA_PLATFORM_XT2000 8 + depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \ 9 + ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \ 10 + MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \ 11 + XTENSA_PLATFORM_XT2000 || ZORRO 9 12 ---help--- 10 13 If you have a network (Ethernet) card belonging to this class, say Y 11 14 and read the Ethernet-HOWTO, available from
+31 -57
drivers/net/ethernet/nvidia/forcedeth.c
··· 1682 1682 np->estats.tx_pause += readl(base + NvRegTxPause); 1683 1683 np->estats.rx_pause += readl(base + NvRegRxPause); 1684 1684 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1685 + np->estats.rx_errors_total += np->estats.rx_drop_frame; 1685 1686 } 1686 1687 1687 1688 if (np->driver_data & DEV_HAS_STATISTICS_V3) { ··· 1707 1706 nv_get_hw_stats(dev); 1708 1707 1709 1708 /* copy to net_device stats */ 1709 + dev->stats.tx_packets = np->estats.tx_packets; 1710 + dev->stats.rx_bytes = np->estats.rx_bytes; 1710 1711 dev->stats.tx_bytes = np->estats.tx_bytes; 1711 1712 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1712 1713 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1713 1714 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1714 1715 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1716 + dev->stats.rx_fifo_errors = np->estats.rx_drop_frame; 1715 1717 dev->stats.rx_errors = np->estats.rx_errors_total; 1716 1718 dev->stats.tx_errors = np->estats.tx_errors_total; 1717 1719 } ··· 2103 2099 2104 2100 /* add fragments to entries count */ 2105 2101 for (i = 0; i < fragments; i++) { 2106 - u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2102 + u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2107 2103 2108 - entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2109 - ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2104 + entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + 2105 + ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2110 2106 } 2111 2107 2112 2108 spin_lock_irqsave(&np->lock, flags); ··· 2145 2141 /* setup the fragments */ 2146 2142 for (i = 0; i < fragments; i++) { 2147 2143 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2148 - u32 size = skb_frag_size(frag); 2144 + u32 frag_size = skb_frag_size(frag); 2149 2145 offset = 0; 2150 2146 2151 2147 do { 2152 2148 prev_tx = put_tx; 2153 2149 prev_tx_ctx = np->put_tx_ctx; 2154 - bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2150 + bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2155 2151 np->put_tx_ctx->dma = skb_frag_dma_map( 2156 2152 &np->pci_dev->dev, 2157 2153 frag, offset, ··· 2163 2159 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2164 2160 2165 2161 offset += bcnt; 2166 - size -= bcnt; 2162 + frag_size -= bcnt; 2167 2163 if (unlikely(put_tx++ == np->last_tx.orig)) 2168 2164 put_tx = np->first_tx.orig; 2169 2165 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2170 2166 np->put_tx_ctx = np->first_tx_ctx; 2171 - } while (size); 2167 + } while (frag_size); 2172 2168 } 2173 2169 2174 2170 /* set last fragment flag */ ··· 2217 2213 2218 2214 /* add fragments to entries count */ 2219 2215 for (i = 0; i < fragments; i++) { 2220 - u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2216 + u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2221 2217 2222 - entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2223 - ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2218 + entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + 2219 + ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2224 2220 } 2225 2221 2226 2222 spin_lock_irqsave(&np->lock, flags); ··· 2261 2257 /* setup the fragments */ 2262 2258 for (i = 0; i < fragments; i++) { 2263 2259 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2264 - u32 size = skb_frag_size(frag); 2260 + u32 frag_size = skb_frag_size(frag); 2265 2261 offset = 0; 2266 2262 2267 2263 do { 2268 2264 prev_tx = put_tx; 2269 2265 prev_tx_ctx = np->put_tx_ctx; 2270 - bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2266 + bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2271 2267 np->put_tx_ctx->dma = skb_frag_dma_map( 2272 2268 &np->pci_dev->dev, 2273 2269 frag, offset, ··· 2280 2276 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2281 2277 2282 2278 offset += bcnt; 2283 - size -= bcnt; 2279 + frag_size -= bcnt; 2284 2280 if (unlikely(put_tx++ == np->last_tx.ex)) 2285 2281 put_tx = np->first_tx.ex; 2286 2282 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2287 2283 np->put_tx_ctx = np->first_tx_ctx; 2288 - } while (size); 2284 + } while (frag_size); 2289 2285 } 2290 2286 2291 2287 /* set last fragment flag */ ··· 2378 2374 if (np->desc_ver == DESC_VER_1) { 2379 2375 if (flags & NV_TX_LASTPACKET) { 2380 2376 if (flags & NV_TX_ERROR) { 2381 - if (flags & NV_TX_UNDERFLOW) 2382 - dev->stats.tx_fifo_errors++; 2383 - if (flags & NV_TX_CARRIERLOST) 2384 - dev->stats.tx_carrier_errors++; 2385 2377 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2386 2378 nv_legacybackoff_reseed(dev); 2387 - dev->stats.tx_errors++; 2388 - } else { 2389 - dev->stats.tx_packets++; 2390 - dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2391 2379 } 2392 2380 dev_kfree_skb_any(np->get_tx_ctx->skb); 2393 2381 np->get_tx_ctx->skb = NULL; ··· 2388 2392 } else { 2389 2393 if (flags & NV_TX2_LASTPACKET) { 2390 2394 if (flags & NV_TX2_ERROR) { 2391 - if (flags & NV_TX2_UNDERFLOW) 2392 - dev->stats.tx_fifo_errors++; 2393 - if (flags & NV_TX2_CARRIERLOST) 2394 - dev->stats.tx_carrier_errors++; 2395 2395 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2396 2396 nv_legacybackoff_reseed(dev); 2397 - dev->stats.tx_errors++; 2398 - } else { 2399 - dev->stats.tx_packets++; 2400 - dev->stats.tx_bytes += np->get_tx_ctx->skb->len; 2401 2397 } 2402 2398 dev_kfree_skb_any(np->get_tx_ctx->skb); 2403 2399 np->get_tx_ctx->skb = NULL; ··· 2422 2434 nv_unmap_txskb(np, np->get_tx_ctx); 2423 2435 2424 2436 if (flags & NV_TX2_LASTPACKET) { 2425 - if (!(flags & NV_TX2_ERROR)) 2426 - dev->stats.tx_packets++; 2427 - else { 2437 + if (flags & NV_TX2_ERROR) { 2428 2438 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2429 2439 if (np->driver_data & DEV_HAS_GEAR_MODE) 2430 2440 nv_gear_backoff_reseed(dev); ··· 2622 2636 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2623 2637 len = nv_getlen(dev, skb->data, len); 2624 2638 if (len < 0) { 2625 - dev->stats.rx_errors++; 2626 2639 dev_kfree_skb(skb); 2627 2640 goto next_pkt; 2628 2641 } ··· 2635 2650 else { 2636 2651 if (flags & NV_RX_MISSEDFRAME) 2637 2652 dev->stats.rx_missed_errors++; 2638 - if (flags & NV_RX_CRCERR) 2639 - dev->stats.rx_crc_errors++; 2640 - if (flags & NV_RX_OVERFLOW) 2641 - dev->stats.rx_over_errors++; 2642 - dev->stats.rx_errors++; 2643 2653 dev_kfree_skb(skb); 2644 2654 goto next_pkt; 2645 2655 } ··· 2650 2670 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2651 2671 len = nv_getlen(dev, skb->data, len); 2652 2672 if (len < 0) { 2653 - dev->stats.rx_errors++; 2654 2673 dev_kfree_skb(skb); 2655 2674 goto next_pkt; 2656 2675 } ··· 2661 2682 } 2662 2683 /* the rest are hard errors */ 2663 2684 else { 2664 - if (flags & NV_RX2_CRCERR) 2665 - dev->stats.rx_crc_errors++; 2666 - if (flags & NV_RX2_OVERFLOW) 2667 - dev->stats.rx_over_errors++; 2668 - dev->stats.rx_errors++; 2669 2685 dev_kfree_skb(skb); 2670 2686 goto next_pkt; 2671 2687 } ··· 2678 2704 skb->protocol = eth_type_trans(skb, dev); 2679 2705 napi_gro_receive(&np->napi, skb); 2680 2706 dev->stats.rx_packets++; 2681 - dev->stats.rx_bytes += len; 2682 2707 next_pkt: 2683 2708 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2684 2709 np->get_rx.orig = np->first_rx.orig; ··· 2760 2787 __vlan_hwaccel_put_tag(skb, vid); 2761 2788 } 2762 2789 napi_gro_receive(&np->napi, skb); 2763 - 2764 2790 dev->stats.rx_packets++; 2765 - dev->stats.rx_bytes += len; 2766 2791 } else { 2767 2792 dev_kfree_skb(skb); 2768 2793 } ··· 2933 2962 struct netdev_hw_addr *ha; 2934 2963 2935 2964 netdev_for_each_mc_addr(ha, dev) { 2936 - unsigned char *addr = ha->addr; 2965 + unsigned char *hw_addr = ha->addr; 2937 2966 u32 a, b; 2938 2967 2939 - a = le32_to_cpu(*(__le32 *) addr); 2940 - b = le16_to_cpu(*(__le16 *) (&addr[4])); 2968 + a = le32_to_cpu(*(__le32 *) hw_addr); 2969 + b = le16_to_cpu(*(__le16 *) (&hw_addr[4])); 2941 2970 alwaysOn[0] &= a; 2942 2971 alwaysOff[0] &= ~a; 2943 2972 alwaysOn[1] &= b; ··· 3369 3398 3370 3399 for (i = 0;; i++) { 3371 3400 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3372 - writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3401 + writel(events, base + NvRegMSIXIrqStatus); 3402 + netdev_dbg(dev, "tx irq events: %08x\n", events); 3373 3403 if (!(events & np->irqmask)) 3374 3404 break; 3375 3405 ··· 3481 3509 3482 3510 for (i = 0;; i++) { 3483 3511 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3484 - writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3512 + writel(events, base + NvRegMSIXIrqStatus); 3513 + netdev_dbg(dev, "rx irq events: %08x\n", events); 3485 3514 if (!(events & np->irqmask)) 3486 3515 break; 3487 3516 ··· 3526 3553 3527 3554 for (i = 0;; i++) { 3528 3555 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3529 - writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3556 + writel(events, base + NvRegMSIXIrqStatus); 3557 + netdev_dbg(dev, "irq events: %08x\n", events); 3530 3558 if (!(events & np->irqmask)) 3531 3559 break; 3532 3560 ··· 3591 3617 3592 3618 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3593 3619 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3594 - writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3620 + writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3595 3621 } else { 3596 3622 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3597 - writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3623 + writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3598 3624 } 3599 3625 pci_push(base); 3600 3626 if (!(events & NVREG_IRQ_TIMER)) ··· 4540 4566 struct fe_priv *np = netdev_priv(dev); 4541 4567 4542 4568 /* update stats */ 4543 - nv_do_stats_poll((unsigned long)dev); 4569 + nv_get_hw_stats(dev); 4544 4570 4545 4571 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4546 4572 }
+7
drivers/net/macvlan.c
··· 192 192 */ 193 193 macvlan_broadcast(skb, port, src->dev, 194 194 MACVLAN_MODE_VEPA); 195 + else { 196 + /* forward to original port. */ 197 + vlan = src; 198 + ret = macvlan_broadcast_one(skb, vlan, eth, 0); 199 + goto out; 200 + } 201 + 195 202 return RX_HANDLER_PASS; 196 203 } 197 204
+2 -1
drivers/net/usb/usbnet.c
··· 1057 1057 unsigned long flags; 1058 1058 int retval; 1059 1059 1060 - skb_tx_timestamp(skb); 1060 + if (skb) 1061 + skb_tx_timestamp(skb); 1061 1062 1062 1063 // some devices want funky USB-level framing, for 1063 1064 // win32 driver (usually) and/or hardware quirks
-4
drivers/net/wireless/ath/ath9k/ar9002_calib.c
··· 868 868 /* Do PA Calibration */ 869 869 ar9002_hw_pa_cal(ah, true); 870 870 871 - /* Do NF Calibration after DC offset and other calibrations */ 872 - ath9k_hw_loadnf(ah, chan); 873 - ath9k_hw_start_nfcal(ah, true); 874 - 875 871 if (ah->caldata) 876 872 ah->caldata->nfcal_pending = true; 877 873
+6 -5
drivers/net/wireless/ath/ath9k/ar9003_calib.c
··· 908 908 int i; 909 909 bool restore; 910 910 911 - if (!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT) || !ah->caldata) 911 + if (!ah->caldata) 912 912 return false; 913 913 914 914 hist = &ah->caldata->rtt_hist; 915 + if (!hist->num_readings) 916 + return false; 917 + 915 918 ar9003_hw_rtt_enable(ah); 916 - ar9003_hw_rtt_set_mask(ah, 0x10); 919 + ar9003_hw_rtt_set_mask(ah, 0x00); 917 920 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 918 921 if (!(ah->rxchainmask & (1 << i))) 919 922 continue; ··· 1073 1070 if (is_reusable && (hist->num_readings < RTT_HIST_MAX)) { 1074 1071 u32 *table; 1075 1072 1073 + hist->num_readings++; 1076 1074 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 1077 1075 if (!(ah->rxchainmask & (1 << i))) 1078 1076 continue; ··· 1084 1080 1085 1081 ar9003_hw_rtt_disable(ah); 1086 1082 } 1087 - 1088 - ath9k_hw_loadnf(ah, chan); 1089 - ath9k_hw_start_nfcal(ah, true); 1090 1083 1091 1084 /* Initialize list pointers */ 1092 1085 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+17 -17
drivers/net/wireless/ath/ath9k/ar9003_phy.h
··· 572 572 573 573 #define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300) 574 574 575 - #define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + AR_SREV_9485(ah) ? \ 576 - 0x3c4 : 0x444) 577 - #define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + AR_SREV_9485(ah) ? \ 578 - 0x3c8 : 0x448) 579 - #define AR_PHY_TX_IQCAL_START (AR_SM_BASE + AR_SREV_9485(ah) ? \ 580 - 0x3c4 : 0x440) 581 - #define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + AR_SREV_9485(ah) ? \ 582 - 0x3f0 : 0x48c) 575 + #define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \ 576 + 0x3c4 : 0x444)) 577 + #define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + (AR_SREV_9485(ah) ? \ 578 + 0x3c8 : 0x448)) 579 + #define AR_PHY_TX_IQCAL_START (AR_SM_BASE + (AR_SREV_9485(ah) ? \ 580 + 0x3c4 : 0x440)) 581 + #define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \ 582 + 0x3f0 : 0x48c)) 583 583 #define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \ 584 584 (AR_SREV_9485(ah) ? \ 585 585 0x3d0 : 0x450) + ((_i) << 2)) ··· 651 651 #define AR_SWITCH_TABLE_ALL_S (0) 652 652 653 653 #define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\ 654 - (AR_SREV_9485(ah) ? 0x1628c : 0x16294)) 654 + (AR_SREV_9462(ah) ? 0x16294 : 0x1628c)) 655 655 656 656 #define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000 657 657 #define AR_PHY_65NM_CH0_THERM_LOCAL_S 31 ··· 668 668 #define AR_PHY_65NM_CH2_RXTX2 0x16904 669 669 670 670 #define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ 671 - (AR_SREV_9485(ah) ? 0x16284 : 0x16290)) 671 + (AR_SREV_9462(ah) ? 0x16290 : 0x16284)) 672 672 #define AR_CH0_TOP2_XPABIASLVL 0xf000 673 673 #define AR_CH0_TOP2_XPABIASLVL_S 12 674 674 675 675 #define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ 676 - (AR_SREV_9485(ah) ? 0x16290 : 0x16298)) 676 + (AR_SREV_9462(ah) ? 0x16298 : 0x16290)) 677 677 #define AR_CH0_XTAL_CAPINDAC 0x7f000000 678 678 #define AR_CH0_XTAL_CAPINDAC_S 24 679 679 #define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000 ··· 908 908 #define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208) 909 909 #define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c) 910 910 #define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) 911 - #define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9300(ah) ? \ 912 - 0x240 : 0x280)) 911 + #define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9462(ah) ? \ 912 + 0x280 : 0x240)) 913 913 #define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240) 914 914 #define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff 915 915 #define AR_PHY_TPC_19_B1_ALPHA_THERM_S 0 ··· 931 931 #define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0) 932 932 #define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4) 933 933 934 - #define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + (i) ? \ 935 - AR_SM1_BASE : AR_SM_BASE) 936 - #define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + (i) ? \ 937 - AR_SM1_BASE : AR_SM_BASE) 934 + #define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + ((i) ? \ 935 + AR_SM1_BASE : AR_SM_BASE)) 936 + #define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + ((i) ? \ 937 + AR_SM1_BASE : AR_SM_BASE)) 938 938 /* 939 939 * Channel 2 Register Map 940 940 */
+5 -5
drivers/net/wireless/ath/ath9k/ar9485_initvals.h
··· 521 521 {0x000160ac, 0x24611800}, 522 522 {0x000160b0, 0x03284f3e}, 523 523 {0x0001610c, 0x00170000}, 524 - {0x00016140, 0x10804008}, 524 + {0x00016140, 0x50804008}, 525 525 }; 526 526 527 527 static const u32 ar9485_1_1_mac_postamble[][5] = { ··· 603 603 604 604 static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { 605 605 /* Addr allmodes */ 606 - {0x00018c00, 0x10052e5e}, 606 + {0x00018c00, 0x18052e5e}, 607 607 {0x00018c04, 0x000801d8}, 608 608 {0x00018c08, 0x0000080c}, 609 609 }; ··· 776 776 777 777 static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { 778 778 /* Addr allmodes */ 779 - {0x00018c00, 0x10013e5e}, 779 + {0x00018c00, 0x18013e5e}, 780 780 {0x00018c04, 0x000801d8}, 781 781 {0x00018c08, 0x0000080c}, 782 782 }; ··· 882 882 883 883 static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { 884 884 /* Addr allmodes */ 885 - {0x00018c00, 0x10012e5e}, 885 + {0x00018c00, 0x18012e5e}, 886 886 {0x00018c04, 0x000801d8}, 887 887 {0x00018c08, 0x0000080c}, 888 888 }; ··· 1021 1021 1022 1022 static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { 1023 1023 /* Addr allmodes */ 1024 - {0x00018c00, 0x10053e5e}, 1024 + {0x00018c00, 0x18053e5e}, 1025 1025 {0x00018c04, 0x000801d8}, 1026 1026 {0x00018c08, 0x0000080c}, 1027 1027 };
+3
drivers/net/wireless/ath/ath9k/hw.c
··· 1725 1725 if (!ath9k_hw_init_cal(ah, chan)) 1726 1726 return -EIO; 1727 1727 1728 + ath9k_hw_loadnf(ah, chan); 1729 + ath9k_hw_start_nfcal(ah, true); 1730 + 1728 1731 ENABLE_REGWRITE_BUFFER(ah); 1729 1732 1730 1733 ath9k_hw_restore_chainmask(ah);
+7 -4
drivers/net/wireless/ath/carl9170/tx.c
··· 296 296 super = (void *)skb->data; 297 297 txinfo->status.ampdu_len = super->s.rix; 298 298 txinfo->status.ampdu_ack_len = super->s.cnt; 299 - } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) { 299 + } else if ((txinfo->flags & IEEE80211_TX_STAT_ACK) && 300 + !(txinfo->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) { 300 301 /* 301 302 * drop redundant tx_status reports: 302 303 * ··· 309 308 * 310 309 * 3. minstrel_ht is picky, it only accepts 311 310 * reports of frames with the TX_STATUS_AMPDU flag. 311 + * 312 + * 4. mac80211 is not particularly interested in 313 + * feedback either [CTL_REQ_TX_STATUS not set] 312 314 */ 313 315 314 316 dev_kfree_skb_any(skb); 315 317 return; 316 318 } else { 317 319 /* 318 - * Frame has failed, but we want to keep it in 319 - * case it was lost due to a power-state 320 - * transition. 320 + * Either the frame transmission has failed or 321 + * mac80211 requested tx status. 321 322 */ 322 323 } 323 324 }
-1
drivers/net/wireless/b43/xmit.c
··· 827 827 #endif 828 828 return; 829 829 drop: 830 - b43dbg(dev->wl, "RX: Packet dropped\n"); 831 830 dev_kfree_skb_any(skb); 832 831 } 833 832
-10
drivers/net/wireless/iwlwifi/iwl-core.c
··· 1755 1755 { 1756 1756 if (iwl_trans_check_stuck_queue(trans(priv), txq)) { 1757 1757 int ret; 1758 - if (txq == priv->shrd->cmd_queue) { 1759 - /* 1760 - * validate command queue still working 1761 - * by sending "ECHO" command 1762 - */ 1763 - if (!iwl_cmd_echo_test(priv)) 1764 - return 0; 1765 - else 1766 - IWL_DEBUG_HC(priv, "echo testing fail\n"); 1767 - } 1768 1758 ret = iwl_force_reset(priv, IWL_FW_RESET, false); 1769 1759 return (ret == -EAGAIN) ? 0 : 1; 1770 1760 }
+3 -5
drivers/net/wireless/iwlwifi/iwl-pci.c
··· 446 446 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 447 447 448 448 err = pci_enable_msi(pdev); 449 - if (err) { 450 - dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed"); 451 - goto out_iounmap; 452 - } 449 + if (err) 450 + dev_printk(KERN_ERR, &pdev->dev, 451 + "pci_enable_msi failed(0X%x)", err); 453 452 454 453 /* TODO: Move this away, not needed if not MSI */ 455 454 /* enable rfkill interrupt: hw bug w/a */ ··· 469 470 470 471 out_disable_msi: 471 472 pci_disable_msi(pdev); 472 - out_iounmap: 473 473 pci_iounmap(pdev, pci_bus->hw_base); 474 474 out_pci_release_regions: 475 475 pci_set_drvdata(pdev, NULL);
+8 -4
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
··· 407 407 struct iwl_queue *q = &txq->q; 408 408 enum dma_data_direction dma_dir; 409 409 unsigned long flags; 410 + spinlock_t *lock; 410 411 411 412 if (!q->n_bd) 412 413 return; ··· 415 414 /* In the command queue, all the TBs are mapped as BIDI 416 415 * so unmap them as such. 417 416 */ 418 - if (txq_id == trans->shrd->cmd_queue) 417 + if (txq_id == trans->shrd->cmd_queue) { 419 418 dma_dir = DMA_BIDIRECTIONAL; 420 - else 419 + lock = &trans->hcmd_lock; 420 + } else { 421 421 dma_dir = DMA_TO_DEVICE; 422 + lock = &trans->shrd->sta_lock; 423 + } 422 424 423 - spin_lock_irqsave(&trans->shrd->sta_lock, flags); 425 + spin_lock_irqsave(lock, flags); 424 426 while (q->write_ptr != q->read_ptr) { 425 427 /* The read_ptr needs to bound by q->n_window */ 426 428 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), 427 429 dma_dir); 428 430 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 429 431 } 430 - spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); 432 + spin_unlock_irqrestore(lock, flags); 431 433 } 432 434 433 435 /**
+17 -8
drivers/net/wireless/libertas/cfg.c
··· 728 728 le16_to_cpu(scan_cmd->hdr.size), 729 729 lbs_ret_scan, 0); 730 730 731 - if (priv->scan_channel >= priv->scan_req->n_channels) { 731 + if (priv->scan_channel >= priv->scan_req->n_channels) 732 732 /* Mark scan done */ 733 - if (priv->internal_scan) 734 - kfree(priv->scan_req); 735 - else 736 - cfg80211_scan_done(priv->scan_req, false); 737 - 738 - priv->scan_req = NULL; 739 - } 733 + lbs_scan_done(priv); 740 734 741 735 /* Restart network */ 742 736 if (carrier) ··· 766 772 priv->internal_scan = internal; 767 773 768 774 lbs_deb_leave(LBS_DEB_CFG80211); 775 + } 776 + 777 + /* 778 + * Clean up priv->scan_req. Should be used to handle the allocation details. 779 + */ 780 + void lbs_scan_done(struct lbs_private *priv) 781 + { 782 + WARN_ON(!priv->scan_req); 783 + 784 + if (priv->internal_scan) 785 + kfree(priv->scan_req); 786 + else 787 + cfg80211_scan_done(priv->scan_req, false); 788 + 789 + priv->scan_req = NULL; 769 790 } 770 791 771 792 static int lbs_cfg_scan(struct wiphy *wiphy,
+1
drivers/net/wireless/libertas/cfg.h
··· 16 16 void lbs_send_disconnect_notification(struct lbs_private *priv); 17 17 void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); 18 18 19 + void lbs_scan_done(struct lbs_private *priv); 19 20 void lbs_scan_deinit(struct lbs_private *priv); 20 21 int lbs_disconnect(struct lbs_private *priv, u16 reason); 21 22
+2 -4
drivers/net/wireless/libertas/main.c
··· 255 255 256 256 lbs_update_mcast(priv); 257 257 cancel_delayed_work_sync(&priv->scan_work); 258 - if (priv->scan_req) { 259 - cfg80211_scan_done(priv->scan_req, false); 260 - priv->scan_req = NULL; 261 - } 258 + if (priv->scan_req) 259 + lbs_scan_done(priv); 262 260 263 261 netif_carrier_off(priv->dev); 264 262
+2
include/linux/ethtool.h
··· 1097 1097 #define SPEED_1000 1000 1098 1098 #define SPEED_2500 2500 1099 1099 #define SPEED_10000 10000 1100 + #define SPEED_UNKNOWN -1 1100 1101 1101 1102 /* Duplex, half or full. */ 1102 1103 #define DUPLEX_HALF 0x00 1103 1104 #define DUPLEX_FULL 0x01 1105 + #define DUPLEX_UNKNOWN 0xff 1104 1106 1105 1107 /* Which connector port. */ 1106 1108 #define PORT_TP 0x00
+1
include/net/bluetooth/rfcomm.h
··· 211 211 #define RFCOMM_AUTH_ACCEPT 6 212 212 #define RFCOMM_AUTH_REJECT 7 213 213 #define RFCOMM_DEFER_SETUP 8 214 + #define RFCOMM_ENC_DROP 9 214 215 215 216 /* Scheduling flags and events */ 216 217 #define RFCOMM_SCHED_WAKEUP 31
+2 -1
include/net/mac80211.h
··· 3567 3567 return i; 3568 3568 3569 3569 /* warn when we cannot find a rate. */ 3570 - WARN_ON(1); 3570 + WARN_ON_ONCE(1); 3571 3571 3572 + /* and return 0 (the lowest index) */ 3572 3573 return 0; 3573 3574 } 3574 3575
+9 -2
include/net/netlink.h
··· 192 192 * NLA_NUL_STRING Maximum length of string (excluding NUL) 193 193 * NLA_FLAG Unused 194 194 * NLA_BINARY Maximum length of attribute payload 195 - * NLA_NESTED_COMPAT Exact length of structure payload 196 - * All other Exact length of attribute payload 195 + * NLA_NESTED Don't use `len' field -- length verification is 196 + * done by checking len of nested header (or empty) 197 + * NLA_NESTED_COMPAT Minimum length of structure payload 198 + * NLA_U8, NLA_U16, 199 + * NLA_U32, NLA_U64, 200 + * NLA_MSECS Leaving the length field zero will verify the 201 + * given type fits, using it verifies minimum length 202 + * just like "All other" 203 + * All other Minimum length of attribute payload 197 204 * 198 205 * Example: 199 206 * static const struct nla_policy my_policy[ATTR_MAX+1] = {
+1
lib/nlattr.c
··· 20 20 [NLA_U16] = sizeof(u16), 21 21 [NLA_U32] = sizeof(u32), 22 22 [NLA_U64] = sizeof(u64), 23 + [NLA_MSECS] = sizeof(u64), 23 24 [NLA_NESTED] = NLA_HDRLEN, 24 25 }; 25 26
+1 -1
net/bluetooth/hci_core.c
··· 613 613 if (!test_bit(HCI_RAW, &hdev->flags)) { 614 614 set_bit(HCI_INIT, &hdev->flags); 615 615 __hci_request(hdev, hci_reset_req, 0, 616 - msecs_to_jiffies(250)); 616 + msecs_to_jiffies(HCI_INIT_TIMEOUT)); 617 617 clear_bit(HCI_INIT, &hdev->flags); 618 618 } 619 619
-2
net/bluetooth/mgmt.c
··· 148 148 149 149 hci_del_off_timer(d); 150 150 151 - set_bit(HCI_MGMT, &d->flags); 152 - 153 151 if (test_bit(HCI_SETUP, &d->flags)) 154 152 continue; 155 153
+7 -2
net/bluetooth/rfcomm/core.c
··· 1802 1802 continue; 1803 1803 } 1804 1804 1805 + if (test_bit(RFCOMM_ENC_DROP, &d->flags)) { 1806 + __rfcomm_dlc_close(d, ECONNREFUSED); 1807 + continue; 1808 + } 1809 + 1805 1810 if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) { 1806 1811 rfcomm_dlc_clear_timer(d); 1807 1812 if (d->out) { ··· 2082 2077 if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) { 2083 2078 rfcomm_dlc_clear_timer(d); 2084 2079 if (status || encrypt == 0x00) { 2085 - __rfcomm_dlc_close(d, ECONNREFUSED); 2080 + set_bit(RFCOMM_ENC_DROP, &d->flags); 2086 2081 continue; 2087 2082 } 2088 2083 } ··· 2093 2088 rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); 2094 2089 continue; 2095 2090 } else if (d->sec_level == BT_SECURITY_HIGH) { 2096 - __rfcomm_dlc_close(d, ECONNREFUSED); 2091 + set_bit(RFCOMM_ENC_DROP, &d->flags); 2097 2092 continue; 2098 2093 } 2099 2094 }
+6 -6
net/mac80211/cfg.c
··· 832 832 if (is_multicast_ether_addr(mac)) 833 833 return -EINVAL; 834 834 835 + /* Only TDLS-supporting stations can add TDLS peers */ 836 + if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) && 837 + !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) && 838 + sdata->vif.type == NL80211_IFTYPE_STATION)) 839 + return -ENOTSUPP; 840 + 835 841 sta = sta_info_alloc(sdata, mac, GFP_KERNEL); 836 842 if (!sta) 837 843 return -ENOMEM; ··· 846 840 set_sta_flag(sta, WLAN_STA_ASSOC); 847 841 848 842 sta_apply_parameters(local, sta, params); 849 - 850 - /* Only TDLS-supporting stations can add TDLS peers */ 851 - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && 852 - !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) && 853 - sdata->vif.type == NL80211_IFTYPE_STATION)) 854 - return -ENOTSUPP; 855 843 856 844 rate_control_rate_init(sta); 857 845
+1
net/mac80211/ieee80211_i.h
··· 389 389 390 390 unsigned long timers_running; /* used for quiesce/restart */ 391 391 bool powersave; /* powersave requested for this iface */ 392 + bool broken_ap; /* AP is broken -- turn off powersave */ 392 393 enum ieee80211_smps_mode req_smps, /* requested smps mode */ 393 394 ap_smps, /* smps mode AP thinks we're in */ 394 395 driver_smps_mode; /* smps mode request */
+16 -2
net/mac80211/mlme.c
··· 639 639 if (!mgd->powersave) 640 640 return false; 641 641 642 + if (mgd->broken_ap) 643 + return false; 644 + 642 645 if (!mgd->associated) 643 646 return false; 644 647 ··· 1494 1491 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); 1495 1492 1496 1493 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1497 - printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1498 - "set\n", sdata->name, aid); 1494 + printk(KERN_DEBUG 1495 + "%s: invalid AID value 0x%x; bits 15:14 not set\n", 1496 + sdata->name, aid); 1499 1497 aid &= ~(BIT(15) | BIT(14)); 1498 + 1499 + ifmgd->broken_ap = false; 1500 + 1501 + if (aid == 0 || aid > IEEE80211_MAX_AID) { 1502 + printk(KERN_DEBUG 1503 + "%s: invalid AID value %d (out of range), turn off PS\n", 1504 + sdata->name, aid); 1505 + aid = 0; 1506 + ifmgd->broken_ap = true; 1507 + } 1500 1508 1501 1509 pos = mgmt->u.assoc_resp.variable; 1502 1510 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
+3 -4
net/mac80211/work.c
··· 1084 1084 continue; 1085 1085 if (wk->chan != local->tmp_channel) 1086 1086 continue; 1087 - if (ieee80211_work_ct_coexists(wk->chan_type, 1088 - local->tmp_channel_type)) 1087 + if (!ieee80211_work_ct_coexists(wk->chan_type, 1088 + local->tmp_channel_type)) 1089 1089 continue; 1090 1090 remain_off_channel = true; 1091 1091 } 1092 1092 1093 1093 if (!remain_off_channel && local->tmp_channel) { 1094 - bool on_oper_chan = ieee80211_cfg_on_oper_channel(local); 1095 1094 local->tmp_channel = NULL; 1096 1095 /* If tmp_channel wasn't operating channel, then 1097 1096 * we need to go back on-channel. ··· 1100 1101 * we still need to do a hardware config. Currently, 1101 1102 * we cannot be here while scanning, however. 1102 1103 */ 1103 - if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan) 1104 + if (!ieee80211_cfg_on_oper_channel(local)) 1104 1105 ieee80211_hw_config(local, 0); 1105 1106 1106 1107 /* At the least, we need to disable offchannel_ps,
-2
net/wanrouter/wanproc.c
··· 81 81 * Iterator 82 82 */ 83 83 static void *r_start(struct seq_file *m, loff_t *pos) 84 - __acquires(kernel_lock) 85 84 { 86 85 struct wan_device *wandev; 87 86 loff_t l = *pos; ··· 102 103 } 103 104 104 105 static void r_stop(struct seq_file *m, void *v) 105 - __releases(kernel_lock) 106 106 { 107 107 mutex_unlock(&config_mutex); 108 108 }