Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (37 commits)
net: deinit automatic LIST_HEAD
net: dont leave active on stack LIST_HEAD
net: provide default_advmss() methods to blackhole dst_ops
tg3: Restrict phy ioctl access
drivers/net: Call netif_carrier_off at the end of the probe
ixgbe: work around for DDP last buffer size
ixgbe: fix panic due to uninitialised pointer
e1000e: flush all writebacks before unload
e1000e: check down flag in tasks
isdn: hisax: Use l2headersize() instead of dup (and buggy) func.
arp_notify: unconditionally send gratuitous ARP for NETDEV_NOTIFY_PEERS.
cxgb4vf: Use defined Mailbox Timeout
cxgb4vf: Quiesce Virtual Interfaces on shutdown ...
cxgb4vf: Behave properly when CONFIG_DEBUG_FS isn't defined ...
cxgb4vf: Check driver parameters in the right place ...
pch_gbe: Fix the MAC Address load issue.
iwlwifi: Delete iwl3945_good_plcp_health.
net/can/softing: make CAN_SOFTING_CS depend on CAN_SOFTING
netfilter: nf_iterate: fix incorrect RCU usage
pch_gbe: Fix the issue that the receiving data is not normal.
...

+328 -202
+2
Documentation/networking/Makefile
··· 4 4 # List of programs to build 5 5 hostprogs-y := ifenslave 6 6 7 + HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include 8 + 7 9 # Tell kbuild to always build the programs 8 10 always := $(hostprogs-y) 9 11
+3 -2
drivers/atm/solos-pci.c
··· 866 866 } 867 867 868 868 skb = alloc_skb(sizeof(*header), GFP_ATOMIC); 869 - if (!skb && net_ratelimit()) { 870 - dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); 869 + if (!skb) { 870 + if (net_ratelimit()) 871 + dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); 871 872 return -ENOMEM; 872 873 } 873 874 header = (void *)skb_put(skb, sizeof(*header));
+2
drivers/bluetooth/ath3k.c
··· 39 39 /* Atheros AR3011 with sflash firmware*/ 40 40 { USB_DEVICE(0x0CF3, 0x3002) }, 41 41 42 + /* Atheros AR9285 Malbec with sflash firmware */ 43 + { USB_DEVICE(0x03F0, 0x311D) }, 42 44 { } /* Terminating entry */ 43 45 }; 44 46
+3
drivers/bluetooth/btusb.c
··· 102 102 /* Atheros 3011 with sflash firmware */ 103 103 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 104 104 105 + /* Atheros AR9285 Malbec with sflash firmware */ 106 + { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, 107 + 105 108 /* Broadcom BCM2035 */ 106 109 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, 107 110 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
+13 -15
drivers/isdn/hisax/isdnl2.c
··· 1247 1247 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) 1248 1248 { 1249 1249 struct PStack *st = fi->userdata; 1250 - struct sk_buff *skb, *oskb; 1250 + struct sk_buff *skb; 1251 1251 struct Layer2 *l2 = &st->l2; 1252 1252 u_char header[MAX_HEADER_LEN]; 1253 - int i; 1253 + int i, hdr_space_needed; 1254 1254 int unsigned p1; 1255 1255 u_long flags; 1256 1256 ··· 1261 1261 if (!skb) 1262 1262 return; 1263 1263 1264 + hdr_space_needed = l2headersize(l2, 0); 1265 + if (hdr_space_needed > skb_headroom(skb)) { 1266 + struct sk_buff *orig_skb = skb; 1267 + 1268 + skb = skb_realloc_headroom(skb, hdr_space_needed); 1269 + if (!skb) { 1270 + dev_kfree_skb(orig_skb); 1271 + return; 1272 + } 1273 + } 1264 1274 spin_lock_irqsave(&l2->lock, flags); 1265 1275 if(test_bit(FLG_MOD128, &l2->flag)) 1266 1276 p1 = (l2->vs - l2->va) % 128; ··· 1295 1285 l2->vs = (l2->vs + 1) % 8; 1296 1286 } 1297 1287 spin_unlock_irqrestore(&l2->lock, flags); 1298 - p1 = skb->data - skb->head; 1299 - if (p1 >= i) 1300 - memcpy(skb_push(skb, i), header, i); 1301 - else { 1302 - printk(KERN_WARNING 1303 - "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); 1304 - oskb = skb; 1305 - skb = alloc_skb(oskb->len + i, GFP_ATOMIC); 1306 - memcpy(skb_put(skb, i), header, i); 1307 - skb_copy_from_linear_data(oskb, 1308 - skb_put(skb, oskb->len), oskb->len); 1309 - dev_kfree_skb(oskb); 1310 - } 1288 + memcpy(skb_push(skb, i), header, i); 1311 1289 st->l2.l2l1(st, PH_PULL | INDICATION, skb); 1312 1290 test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); 1313 1291 if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
+1 -1
drivers/net/can/softing/Kconfig
··· 18 18 config CAN_SOFTING_CS 19 19 tristate "Softing Gmbh CAN pcmcia cards" 20 20 depends on PCMCIA 21 - select CAN_SOFTING 21 + depends on CAN_SOFTING 22 22 ---help--- 23 23 Support for PCMCIA cards from Softing Gmbh & some cards 24 24 from Vector Gmbh.
+60 -20
drivers/net/cxgb4vf/cxgb4vf_main.c
··· 2040 2040 { 2041 2041 int i; 2042 2042 2043 - BUG_ON(adapter->debugfs_root == NULL); 2043 + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); 2044 2044 2045 2045 /* 2046 2046 * Debugfs support is best effort. ··· 2061 2061 */ 2062 2062 static void cleanup_debugfs(struct adapter *adapter) 2063 2063 { 2064 - BUG_ON(adapter->debugfs_root == NULL); 2064 + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); 2065 2065 2066 2066 /* 2067 2067 * Unlike our sister routine cleanup_proc(), we don't need to remove ··· 2489 2489 struct net_device *netdev; 2490 2490 2491 2491 /* 2492 - * Vet our module parameters. 2493 - */ 2494 - if (msi != MSI_MSIX && msi != MSI_MSI) { 2495 - dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" 2496 - " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, 2497 - MSI_MSI); 2498 - err = -EINVAL; 2499 - goto err_out; 2500 - } 2501 - 2502 - /* 2503 2492 * Print our driver banner the first time we're called to initialize a 2504 2493 * device. 2505 2494 */ ··· 2700 2711 /* 2701 2712 * Set up our debugfs entries. 2702 2713 */ 2703 - if (cxgb4vf_debugfs_root) { 2714 + if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { 2704 2715 adapter->debugfs_root = 2705 2716 debugfs_create_dir(pci_name(pdev), 2706 2717 cxgb4vf_debugfs_root); 2707 - if (adapter->debugfs_root == NULL) 2718 + if (IS_ERR_OR_NULL(adapter->debugfs_root)) 2708 2719 dev_warn(&pdev->dev, "could not create debugfs" 2709 2720 " directory"); 2710 2721 else ··· 2759 2770 */ 2760 2771 2761 2772 err_free_debugfs: 2762 - if (adapter->debugfs_root) { 2773 + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { 2763 2774 cleanup_debugfs(adapter); 2764 2775 debugfs_remove_recursive(adapter->debugfs_root); 2765 2776 } ··· 2791 2802 err_disable_device: 2792 2803 pci_disable_device(pdev); 2793 2804 2794 - err_out: 2795 2805 return err; 2796 2806 } 2797 2807 ··· 2828 2840 /* 2829 2841 * Tear down our debugfs entries. 2830 2842 */ 2831 - if (adapter->debugfs_root) { 2843 + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { 2832 2844 cleanup_debugfs(adapter); 2833 2845 debugfs_remove_recursive(adapter->debugfs_root); 2834 2846 } ··· 2859 2871 pci_disable_device(pdev); 2860 2872 pci_clear_master(pdev); 2861 2873 pci_release_regions(pdev); 2874 + } 2875 + 2876 + /* 2877 + * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt 2878 + * delivery. 2879 + */ 2880 + static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) 2881 + { 2882 + struct adapter *adapter; 2883 + int pidx; 2884 + 2885 + adapter = pci_get_drvdata(pdev); 2886 + if (!adapter) 2887 + return; 2888 + 2889 + /* 2890 + * Disable all Virtual Interfaces. This will shut down the 2891 + * delivery of all ingress packets into the chip for these 2892 + * Virtual Interfaces. 2893 + */ 2894 + for_each_port(adapter, pidx) { 2895 + struct net_device *netdev; 2896 + struct port_info *pi; 2897 + 2898 + if (!test_bit(pidx, &adapter->registered_device_map)) 2899 + continue; 2900 + 2901 + netdev = adapter->port[pidx]; 2902 + if (!netdev) 2903 + continue; 2904 + 2905 + pi = netdev_priv(netdev); 2906 + t4vf_enable_vi(adapter, pi->viid, false, false); 2907 + } 2908 + 2909 + /* 2910 + * Free up all Queues which will prevent further DMA and 2911 + * Interrupts allowing various internal pathways to drain. 2912 + */ 2913 + t4vf_free_sge_resources(adapter); 2862 2914 } 2863 2915 2864 2916 /* ··· 2934 2906 .id_table = cxgb4vf_pci_tbl, 2935 2907 .probe = cxgb4vf_pci_probe, 2936 2908 .remove = __devexit_p(cxgb4vf_pci_remove), 2909 + .shutdown = __devexit_p(cxgb4vf_pci_shutdown), 2937 2910 }; 2938 2911 2939 2912 /* ··· 2944 2915 { 2945 2916 int ret; 2946 2917 2918 + /* 2919 + * Vet our module parameters. 2920 + */ 2921 + if (msi != MSI_MSIX && msi != MSI_MSI) { 2922 + printk(KERN_WARNING KBUILD_MODNAME 2923 + ": bad module parameter msi=%d; must be %d" 2924 + " (MSI-X or MSI) or %d (MSI)\n", 2925 + msi, MSI_MSIX, MSI_MSI); 2926 + return -EINVAL; 2927 + } 2928 + 2947 2929 /* Debugfs support is optional, just warn if this fails */ 2948 2930 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 2949 - if (!cxgb4vf_debugfs_root) 2931 + if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) 2950 2932 printk(KERN_WARNING KBUILD_MODNAME ": could not create" 2951 2933 " debugfs entry, continuing\n"); 2952 2934 2953 2935 ret = pci_register_driver(&cxgb4vf_driver); 2954 - if (ret < 0) 2936 + if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) 2955 2937 debugfs_remove(cxgb4vf_debugfs_root); 2956 2938 return ret; 2957 2939 }
+1 -1
drivers/net/cxgb4vf/t4vf_hw.c
··· 171 171 delay_idx = 0; 172 172 ms = delay[0]; 173 173 174 - for (i = 0; i < 500; i += ms) { 174 + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 175 175 if (sleep_ok) { 176 176 ms = delay[delay_idx]; 177 177 if (delay_idx < ARRAY_SIZE(delay) - 1)
+42 -10
drivers/net/e1000e/netdev.c
··· 937 937 u16 phy_status, phy_1000t_status, phy_ext_status; 938 938 u16 pci_status; 939 939 940 + if (test_bit(__E1000_DOWN, &adapter->state)) 941 + return; 942 + 940 943 e1e_rphy(hw, PHY_STATUS, &phy_status); 941 944 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 942 945 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); ··· 1508 1505 { 1509 1506 struct e1000_adapter *adapter = container_of(work, 1510 1507 struct e1000_adapter, downshift_task); 1508 + 1509 + if (test_bit(__E1000_DOWN, &adapter->state)) 1510 + return; 1511 1511 1512 1512 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1513 1513 } ··· 3344 3338 return 0; 3345 3339 } 3346 3340 3341 + static void e1000e_flush_descriptors(struct e1000_adapter *adapter) 3342 + { 3343 + struct e1000_hw *hw = &adapter->hw; 3344 + 3345 + if (!(adapter->flags2 & FLAG2_DMA_BURST)) 3346 + return; 3347 + 3348 + /* flush pending descriptor writebacks to memory */ 3349 + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 3350 + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 3351 + 3352 + /* execute the writes immediately */ 3353 + e1e_flush(); 3354 + } 3355 + 3347 3356 void e1000e_down(struct e1000_adapter *adapter) 3348 3357 { 3349 3358 struct net_device *netdev = adapter->netdev; ··· 3398 3377 3399 3378 if (!pci_channel_offline(adapter->pdev)) 3400 3379 e1000e_reset(adapter); 3380 + 3381 + e1000e_flush_descriptors(adapter); 3382 + 3401 3383 e1000_clean_tx_ring(adapter); 3402 3384 e1000_clean_rx_ring(adapter); 3403 3385 ··· 3789 3765 { 3790 3766 struct e1000_adapter *adapter = container_of(work, 3791 3767 struct e1000_adapter, update_phy_task); 3768 + 3769 + if (test_bit(__E1000_DOWN, &adapter->state)) 3770 + return; 3771 + 3792 3772 e1000_get_phy_info(&adapter->hw); 3793 3773 } 3794 3774 ··· 3803 3775 static void e1000_update_phy_info(unsigned long data) 3804 3776 { 3805 3777 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 3778 + 3779 + if (test_bit(__E1000_DOWN, &adapter->state)) 3780 + return; 3781 + 3806 3782 schedule_work(&adapter->update_phy_task); 3807 3783 } 3808 3784 ··· 4181 4149 u32 link, tctl; 4182 4150 int tx_pending = 0; 4183 4151 4152 + if (test_bit(__E1000_DOWN, &adapter->state)) 4153 + return; 4154 + 4184 4155 link = e1000e_has_link(adapter); 4185 4156 if ((netif_carrier_ok(netdev)) && link) { 4186 4157 /* Cancel scheduled suspend requests. */ ··· 4372 4337 else 4373 4338 ew32(ICS, E1000_ICS_RXDMT0); 4374 4339 4340 + /* flush pending descriptors to memory before detecting Tx hang */ 4341 + e1000e_flush_descriptors(adapter); 4342 + 4375 4343 /* Force detection of hung controller every watchdog period */ 4376 4344 adapter->detect_tx_hung = 1; 4377 - 4378 - /* flush partial descriptors to memory before detecting Tx hang */ 4379 - if (adapter->flags2 & FLAG2_DMA_BURST) { 4380 - ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 4381 - ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 4382 - /* 4383 - * no need to flush the writes because the timeout code does 4384 - * an er32 first thing 4385 - */ 4386 - } 4387 4345 4388 4346 /* 4389 4347 * With 82571 controllers, LAA may be overwritten due to controller ··· 4914 4886 { 4915 4887 struct e1000_adapter *adapter; 4916 4888 adapter = container_of(work, struct e1000_adapter, reset_task); 4889 + 4890 + /* don't run the task if already down */ 4891 + if (test_bit(__E1000_DOWN, &adapter->state)) 4892 + return; 4917 4893 4918 4894 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4919 4895 (adapter->flags & FLAG_RX_RESTART_NOW))) {
+2
drivers/net/forcedeth.c
··· 5645 5645 goto out_error; 5646 5646 } 5647 5647 5648 + netif_carrier_off(dev); 5649 + 5648 5650 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5649 5651 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5650 5652
+50 -1
drivers/net/ixgbe/ixgbe_fcoe.c
··· 159 159 struct scatterlist *sg; 160 160 unsigned int i, j, dmacount; 161 161 unsigned int len; 162 - static const unsigned int bufflen = 4096; 162 + static const unsigned int bufflen = IXGBE_FCBUFF_MIN; 163 163 unsigned int firstoff = 0; 164 164 unsigned int lastsize; 165 165 unsigned int thisoff = 0; ··· 253 253 } 254 254 /* only the last buffer may have non-full bufflen */ 255 255 lastsize = thisoff + thislen; 256 + 257 + /* 258 + * lastsize can not be buffer len. 259 + * If it is then adding another buffer with lastsize = 1. 260 + */ 261 + if (lastsize == bufflen) { 262 + if (j >= IXGBE_BUFFCNT_MAX) { 263 + e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " 264 + "not enough user buffers. We need an extra " 265 + "buffer because lastsize is bufflen.\n", 266 + xid, i, j, dmacount, (u64)addr); 267 + goto out_noddp_free; 268 + } 269 + 270 + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); 271 + j++; 272 + lastsize = 1; 273 + } 256 274 257 275 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 258 276 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); ··· 550 532 e_err(drv, "failed to allocated FCoE DDP pool\n"); 551 533 552 534 spin_lock_init(&fcoe->lock); 535 + 536 + /* Extra buffer to be shared by all DDPs for HW work around */ 537 + fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 538 + if (fcoe->extra_ddp_buffer == NULL) { 539 + e_err(drv, "failed to allocated extra DDP buffer\n"); 540 + goto out_extra_ddp_buffer_alloc; 541 + } 542 + 543 + fcoe->extra_ddp_buffer_dma = 544 + dma_map_single(&adapter->pdev->dev, 545 + fcoe->extra_ddp_buffer, 546 + IXGBE_FCBUFF_MIN, 547 + DMA_FROM_DEVICE); 548 + if (dma_mapping_error(&adapter->pdev->dev, 549 + fcoe->extra_ddp_buffer_dma)) { 550 + e_err(drv, "failed to map extra DDP buffer\n"); 551 + goto out_extra_ddp_buffer_dma; 552 + } 553 553 } 554 554 555 555 /* Enable L2 eth type filter for FCoE */ ··· 617 581 } 618 582 } 619 583 #endif 584 + 585 + return; 586 + 587 + out_extra_ddp_buffer_dma: 588 + kfree(fcoe->extra_ddp_buffer); 589 + out_extra_ddp_buffer_alloc: 590 + pci_pool_destroy(fcoe->pool); 591 + fcoe->pool = NULL; 620 592 } 621 593 622 594 /** ··· 644 600 if (fcoe->pool) { 645 601 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 646 602 ixgbe_fcoe_ddp_put(adapter->netdev, i); 603 + dma_unmap_single(&adapter->pdev->dev, 604 + fcoe->extra_ddp_buffer_dma, 605 + IXGBE_FCBUFF_MIN, 606 + DMA_FROM_DEVICE); 607 + kfree(fcoe->extra_ddp_buffer); 647 608 pci_pool_destroy(fcoe->pool); 648 609 fcoe->pool = NULL; 649 610 }
+2
drivers/net/ixgbe/ixgbe_fcoe.h
··· 70 70 spinlock_t lock; 71 71 struct pci_pool *pool; 72 72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 73 + unsigned char *extra_ddp_buffer; 74 + dma_addr_t extra_ddp_buffer_dma; 73 75 }; 74 76 75 77 #endif /* _IXGBE_FCOE_H */
+4 -2
drivers/net/ixgbe/ixgbe_main.c
··· 3728 3728 * We need to try and force an autonegotiation 3729 3729 * session, then bring up link. 3730 3730 */ 3731 - hw->mac.ops.setup_sfp(hw); 3731 + if (hw->mac.ops.setup_sfp) 3732 + hw->mac.ops.setup_sfp(hw); 3732 3733 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 3733 3734 schedule_work(&adapter->multispeed_fiber_task); 3734 3735 } else { ··· 5969 5968 unregister_netdev(adapter->netdev); 5970 5969 return; 5971 5970 } 5972 - hw->mac.ops.setup_sfp(hw); 5971 + if (hw->mac.ops.setup_sfp) 5972 + hw->mac.ops.setup_sfp(hw); 5973 5973 5974 5974 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 5975 5975 /* This will also work for DA Twinax connections */
+1 -1
drivers/net/pch_gbe/pch_gbe.h
··· 73 73 struct pch_gbe_regs_mac_adr mac_adr[16]; 74 74 u32 ADDR_MASK; 75 75 u32 MIIM; 76 - u32 reserve2; 76 + u32 MAC_ADDR_LOAD; 77 77 u32 RGMII_ST; 78 78 u32 RGMII_CTRL; 79 79 u32 reserve3[3];
+63 -43
drivers/net/pch_gbe/pch_gbe_main.c
··· 29 29 #define PCH_GBE_SHORT_PKT 64 30 30 #define DSC_INIT16 0xC000 31 31 #define PCH_GBE_DMA_ALIGN 0 32 + #define PCH_GBE_DMA_PADDING 2 32 33 #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33 34 #define PCH_GBE_COPYBREAK_DEFAULT 256 34 35 #define PCH_GBE_PCI_BAR 1 ··· 89 88 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 90 89 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 91 90 int data); 91 + 92 + inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) 93 + { 94 + iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); 95 + } 96 + 92 97 /** 93 98 * pch_gbe_mac_read_mac_addr - Read MAC address 94 99 * @hw: Pointer to the HW structure ··· 1372 1365 struct pch_gbe_buffer *buffer_info; 1373 1366 struct pch_gbe_rx_desc *rx_desc; 1374 1367 u32 length; 1375 - unsigned char tmp_packet[ETH_HLEN]; 1376 1368 unsigned int i; 1377 1369 unsigned int cleaned_count = 0; 1378 1370 bool cleaned = false; 1379 - struct sk_buff *skb; 1371 + struct sk_buff *skb, *new_skb; 1380 1372 u8 dma_status; 1381 1373 u16 gbec_status; 1382 1374 u32 tcp_ip_status; 1383 - u8 skb_copy_flag = 0; 1384 - u8 skb_padding_flag = 0; 1385 1375 1386 1376 i = rx_ring->next_to_clean; 1387 1377 ··· 1422 1418 pr_err("Receive CRC Error\n"); 1423 1419 } else { 1424 1420 /* get receive length */ 1425 - /* length convert[-3], padding[-2] */ 1426 - length = (rx_desc->rx_words_eob) - 3 - 2; 1421 + /* length convert[-3] */ 1422 + length = (rx_desc->rx_words_eob) - 3; 1427 1423 1428 1424 /* Decide the data conversion method */ 1429 1425 if (!adapter->rx_csum) { 1430 1426 /* [Header:14][payload] */ 1431 - skb_padding_flag = 0; 1432 - skb_copy_flag = 1; 1433 - } else { 1434 - /* [Header:14][padding:2][payload] */ 1435 - skb_padding_flag = 1; 1436 - if (length < copybreak) 1437 - skb_copy_flag = 1; 1438 - else 1439 - skb_copy_flag = 0; 1440 - } 1441 - 1442 - /* Data conversion */ 1443 - if (skb_copy_flag) { /* recycle skb */ 1444 - struct sk_buff *new_skb; 1445 - new_skb = 1446 - netdev_alloc_skb(netdev, 1447 - length + NET_IP_ALIGN); 1448 - if (new_skb) { 1449 - if (!skb_padding_flag) { 1450 - skb_reserve(new_skb, 1451 - NET_IP_ALIGN); 1427 + if (NET_IP_ALIGN) { 1428 + /* Because alignment differs, 1429 + * the new_skb is newly allocated, 1430 + * and data is copied to new_skb.*/ 1431 + new_skb = netdev_alloc_skb(netdev, 1432 + length + NET_IP_ALIGN); 1433 + if (!new_skb) { 1434 + /* dorrop error */ 1435 + pr_err("New skb allocation " 1436 + "Error\n"); 1437 + goto dorrop; 1452 1438 } 1439 + skb_reserve(new_skb, NET_IP_ALIGN); 1453 1440 memcpy(new_skb->data, skb->data, 1454 - length); 1455 - /* save the skb 1456 - * in buffer_info as good */ 1441 + length); 1457 1442 skb = new_skb; 1458 - } else if (!skb_padding_flag) { 1459 - /* dorrop error */ 1460 - pr_err("New skb allocation Error\n"); 1461 - goto dorrop; 1443 + } else { 1444 + /* DMA buffer is used as SKB as it is.*/ 1445 + buffer_info->skb = NULL; 1462 1446 } 1463 1447 } else { 1464 - buffer_info->skb = NULL; 1448 + /* [Header:14][padding:2][payload] */ 1449 + /* The length includes padding length */ 1450 + length = length - PCH_GBE_DMA_PADDING; 1451 + if ((length < copybreak) || 1452 + (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { 1453 + /* Because alignment differs, 1454 + * the new_skb is newly allocated, 1455 + * and data is copied to new_skb. 1456 + * Padding data is deleted 1457 + * at the time of a copy.*/ 1458 + new_skb = netdev_alloc_skb(netdev, 1459 + length + NET_IP_ALIGN); 1460 + if (!new_skb) { 1461 + /* dorrop error */ 1462 + pr_err("New skb allocation " 1463 + "Error\n"); 1464 + goto dorrop; 1465 + } 1466 + skb_reserve(new_skb, NET_IP_ALIGN); 1467 + memcpy(new_skb->data, skb->data, 1468 + ETH_HLEN); 1469 + memcpy(&new_skb->data[ETH_HLEN], 1470 + &skb->data[ETH_HLEN + 1471 + PCH_GBE_DMA_PADDING], 1472 + length - ETH_HLEN); 1473 + skb = new_skb; 1474 + } else { 1475 + /* Padding data is deleted 1476 + * by moving header data.*/ 1477 + memmove(&skb->data[PCH_GBE_DMA_PADDING], 1478 + &skb->data[0], ETH_HLEN); 1479 + skb_reserve(skb, NET_IP_ALIGN); 1480 + buffer_info->skb = NULL; 1481 + } 1465 1482 } 1466 - if (skb_padding_flag) { 1467 - memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); 1468 - memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], 1469 - ETH_HLEN); 1470 - skb_reserve(skb, NET_IP_ALIGN); 1471 - 1472 - } 1473 - 1483 + /* The length includes FCS length */ 1484 + length = length - ETH_FCS_LEN; 1474 1485 /* update status of driver */ 1475 1486 adapter->stats.rx_bytes += length; 1476 1487 adapter->stats.rx_packets++; ··· 2337 2318 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; 2338 2319 pch_gbe_set_ethtool_ops(netdev); 2339 2320 2321 + pch_gbe_mac_load_mac_addr(&adapter->hw); 2340 2322 pch_gbe_mac_reset_hw(&adapter->hw); 2341 2323 2342 2324 /* setup the private structure */
+2
drivers/net/r8169.c
··· 3190 3190 if (pci_dev_run_wake(pdev)) 3191 3191 pm_runtime_put_noidle(&pdev->dev); 3192 3192 3193 + netif_carrier_off(dev); 3194 + 3193 3195 out: 3194 3196 return rc; 3195 3197
+3 -1
drivers/net/stmmac/stmmac_main.c
··· 1560 1560 1561 1561 priv->hw = device; 1562 1562 1563 - if (device_can_wakeup(priv->device)) 1563 + if (device_can_wakeup(priv->device)) { 1564 1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1565 + enable_irq_wake(dev->irq); 1566 + } 1565 1567 1566 1568 return 0; 1567 1569 }
+6 -2
drivers/net/tg3.c
··· 11158 11158 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11159 11159 break; /* We have no PHY */ 11160 11160 11161 - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11161 + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || 11162 + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 11163 + !netif_running(dev))) 11162 11164 return -EAGAIN; 11163 11165 11164 11166 spin_lock_bh(&tp->lock); ··· 11176 11174 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11177 11175 break; /* We have no PHY */ 11178 11176 11179 - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11177 + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || 11178 + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 11179 + !netif_running(dev))) 11180 11180 return -EAGAIN; 11181 11181 11182 11182 spin_lock_bh(&tp->lock);
+6 -6
drivers/net/usb/hso.c
··· 2628 2628 2629 2629 static void hso_free_tiomget(struct hso_serial *serial) 2630 2630 { 2631 - struct hso_tiocmget *tiocmget = serial->tiocmget; 2631 + struct hso_tiocmget *tiocmget; 2632 + if (!serial) 2633 + return; 2634 + tiocmget = serial->tiocmget; 2632 2635 if (tiocmget) { 2633 - if (tiocmget->urb) { 2634 - usb_free_urb(tiocmget->urb); 2635 - tiocmget->urb = NULL; 2636 - } 2636 + usb_free_urb(tiocmget->urb); 2637 + tiocmget->urb = NULL; 2637 2638 serial->tiocmget = NULL; 2638 2639 kfree(tiocmget); 2639 - 2640 2640 } 2641 2641 } 2642 2642
+3 -1
drivers/net/usb/usbnet.c
··· 931 931 if (urb != NULL) { 932 932 clear_bit (EVENT_RX_MEMORY, &dev->flags); 933 933 status = usb_autopm_get_interface(dev->intf); 934 - if (status < 0) 934 + if (status < 0) { 935 + usb_free_urb(urb); 935 936 goto fail_lowmem; 937 + } 936 938 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 937 939 resched = 0; 938 940 usb_autopm_put_interface(dev->intf);
-67
drivers/net/wireless/iwlwifi/iwl-3945.c
··· 402 402 } 403 403 #endif 404 404 405 - /** 406 - * iwl3945_good_plcp_health - checks for plcp error. 407 - * 408 - * When the plcp error is exceeding the thresholds, reset the radio 409 - * to improve the throughput. 410 - */ 411 - static bool iwl3945_good_plcp_health(struct iwl_priv *priv, 412 - struct iwl_rx_packet *pkt) 413 - { 414 - bool rc = true; 415 - struct iwl3945_notif_statistics current_stat; 416 - int combined_plcp_delta; 417 - unsigned int plcp_msec; 418 - unsigned long plcp_received_jiffies; 419 - 420 - if (priv->cfg->base_params->plcp_delta_threshold == 421 - IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { 422 - IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); 423 - return rc; 424 - } 425 - memcpy(&current_stat, pkt->u.raw, sizeof(struct 426 - iwl3945_notif_statistics)); 427 - /* 428 - * check for plcp_err and trigger radio reset if it exceeds 429 - * the plcp error threshold plcp_delta. 430 - */ 431 - plcp_received_jiffies = jiffies; 432 - plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - 433 - (long) priv->plcp_jiffies); 434 - priv->plcp_jiffies = plcp_received_jiffies; 435 - /* 436 - * check to make sure plcp_msec is not 0 to prevent division 437 - * by zero. 438 - */ 439 - if (plcp_msec) { 440 - combined_plcp_delta = 441 - (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - 442 - le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); 443 - 444 - if ((combined_plcp_delta > 0) && 445 - ((combined_plcp_delta * 100) / plcp_msec) > 446 - priv->cfg->base_params->plcp_delta_threshold) { 447 - /* 448 - * if plcp_err exceed the threshold, the following 449 - * data is printed in csv format: 450 - * Text: plcp_err exceeded %d, 451 - * Received ofdm.plcp_err, 452 - * Current ofdm.plcp_err, 453 - * combined_plcp_delta, 454 - * plcp_msec 455 - */ 456 - IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " 457 - "%u, %d, %u mSecs\n", 458 - priv->cfg->base_params->plcp_delta_threshold, 459 - le32_to_cpu(current_stat.rx.ofdm.plcp_err), 460 - combined_plcp_delta, plcp_msec); 461 - /* 462 - * Reset the RF radio due to the high plcp 463 - * error rate 464 - */ 465 - rc = false; 466 - } 467 - } 468 - return rc; 469 - } 470 - 471 405 void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 472 406 struct iwl_rx_mem_buffer *rxb) 473 407 { ··· 2668 2734 .isr_ops = { 2669 2735 .isr = iwl_isr_legacy, 2670 2736 }, 2671 - .check_plcp_health = iwl3945_good_plcp_health, 2672 2737 2673 2738 .debugfs_ops = { 2674 2739 .rx_stats_read = iwl3945_ucode_rx_stats_read,
+1
net/bluetooth/l2cap.c
··· 859 859 result = L2CAP_CR_SEC_BLOCK; 860 860 else 861 861 result = L2CAP_CR_BAD_PSM; 862 + sk->sk_state = BT_DISCONN; 862 863 863 864 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 864 865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
+1 -1
net/bridge/br_input.c
··· 80 80 if (is_multicast_ether_addr(dest)) { 81 81 mdst = br_mdb_get(br, skb); 82 82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 83 - if ((mdst && !hlist_unhashed(&mdst->mglist)) || 83 + if ((mdst && mdst->mglist) || 84 84 br_multicast_is_router(br)) 85 85 skb2 = skb; 86 86 br_multicast_forward(mdst, skb, skb2);
+9 -10
net/bridge/br_multicast.c
··· 232 232 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 233 233 goto out; 234 234 235 - if (!hlist_unhashed(&mp->mglist)) 236 - hlist_del_init(&mp->mglist); 235 + mp->mglist = false; 237 236 238 237 if (mp->ports) 239 238 goto out; ··· 275 276 del_timer(&p->query_timer); 276 277 call_rcu_bh(&p->rcu, br_multicast_free_pg); 277 278 278 - if (!mp->ports && hlist_unhashed(&mp->mglist) && 279 + if (!mp->ports && !mp->mglist && 279 280 netif_running(br->dev)) 280 281 mod_timer(&mp->timer, jiffies); 281 282 ··· 527 528 struct net_bridge *br = mp->br; 528 529 529 530 spin_lock(&br->multicast_lock); 530 - if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || 531 + if (!netif_running(br->dev) || !mp->mglist || 531 532 mp->queries_sent >= br->multicast_last_member_count) 532 533 goto out; 533 534 ··· 718 719 goto err; 719 720 720 721 if (!port) { 721 - hlist_add_head(&mp->mglist, &br->mglist); 722 + mp->mglist = true; 722 723 mod_timer(&mp->timer, now + br->multicast_membership_interval); 723 724 goto out; 724 725 } ··· 1164 1165 1165 1166 max_delay *= br->multicast_last_member_count; 1166 1167 1167 - if (!hlist_unhashed(&mp->mglist) && 1168 + if (mp->mglist && 1168 1169 (timer_pending(&mp->timer) ? 1169 1170 time_after(mp->timer.expires, now + max_delay) : 1170 1171 try_to_del_timer_sync(&mp->timer) >= 0)) ··· 1176 1177 if (timer_pending(&p->timer) ? 1177 1178 time_after(p->timer.expires, now + max_delay) : 1178 1179 try_to_del_timer_sync(&p->timer) >= 0) 1179 - mod_timer(&mp->timer, now + max_delay); 1180 + mod_timer(&p->timer, now + max_delay); 1180 1181 } 1181 1182 1182 1183 out: ··· 1235 1236 goto out; 1236 1237 1237 1238 max_delay *= br->multicast_last_member_count; 1238 - if (!hlist_unhashed(&mp->mglist) && 1239 + if (mp->mglist && 1239 1240 (timer_pending(&mp->timer) ? 1240 1241 time_after(mp->timer.expires, now + max_delay) : 1241 1242 try_to_del_timer_sync(&mp->timer) >= 0)) ··· 1247 1248 if (timer_pending(&p->timer) ? 1248 1249 time_after(p->timer.expires, now + max_delay) : 1249 1250 try_to_del_timer_sync(&p->timer) >= 0) 1250 - mod_timer(&mp->timer, now + max_delay); 1251 + mod_timer(&p->timer, now + max_delay); 1251 1252 } 1252 1253 1253 1254 out: ··· 1282 1283 br->multicast_last_member_interval; 1283 1284 1284 1285 if (!port) { 1285 - if (!hlist_unhashed(&mp->mglist) && 1286 + if (mp->mglist && 1286 1287 (timer_pending(&mp->timer) ? 1287 1288 time_after(mp->timer.expires, time) : 1288 1289 try_to_del_timer_sync(&mp->timer) >= 0)) {
+1 -2
net/bridge/br_private.h
··· 84 84 struct net_bridge_mdb_entry 85 85 { 86 86 struct hlist_node hlist[2]; 87 - struct hlist_node mglist; 88 87 struct net_bridge *br; 89 88 struct net_bridge_port_group __rcu *ports; 90 89 struct rcu_head rcu; 91 90 struct timer_list timer; 92 91 struct timer_list query_timer; 93 92 struct br_ip addr; 93 + bool mglist; 94 94 u32 queries_sent; 95 95 }; 96 96 ··· 238 238 spinlock_t multicast_lock; 239 239 struct net_bridge_mdb_htable __rcu *mdb; 240 240 struct hlist_head router_list; 241 - struct hlist_head mglist; 242 241 243 242 struct timer_list multicast_router_timer; 244 243 struct timer_list multicast_querier_timer;
+7 -2
net/core/dev.c
··· 1280 1280 1281 1281 static int __dev_close(struct net_device *dev) 1282 1282 { 1283 + int retval; 1283 1284 LIST_HEAD(single); 1284 1285 1285 1286 list_add(&dev->unreg_list, &single); 1286 - return __dev_close_many(&single); 1287 + retval = __dev_close_many(&single); 1288 + list_del(&single); 1289 + return retval; 1287 1290 } 1288 1291 1289 1292 int dev_close_many(struct list_head *head) ··· 1328 1325 1329 1326 list_add(&dev->unreg_list, &single); 1330 1327 dev_close_many(&single); 1331 - 1328 + list_del(&single); 1332 1329 return 0; 1333 1330 } 1334 1331 EXPORT_SYMBOL(dev_close); ··· 5066 5063 5067 5064 list_add(&dev->unreg_list, &single); 5068 5065 rollback_registered_many(&single); 5066 + list_del(&single); 5069 5067 } 5070 5068 5071 5069 unsigned long netdev_fix_features(unsigned long features, const char *name) ··· 6220 6216 } 6221 6217 } 6222 6218 unregister_netdevice_many(&dev_kill_list); 6219 + list_del(&dev_kill_list); 6223 6220 rtnl_unlock(); 6224 6221 } 6225 6222
+8 -1
net/dcb/dcbnl.c
··· 626 626 dcb->cmd = DCB_CMD_GAPP; 627 627 628 628 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); 629 + if (!app_nest) 630 + goto out_cancel; 631 + 629 632 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); 630 633 if (ret) 631 634 goto out_cancel; ··· 1616 1613 u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) 1617 1614 { 1618 1615 struct dcb_app_type *itr; 1616 + struct dcb_app_type event; 1617 + 1618 + memcpy(&event.name, dev->name, sizeof(event.name)); 1619 + memcpy(&event.app, new, sizeof(event.app)); 1619 1620 1620 1621 spin_lock(&dcb_lock); 1621 1622 /* Search for existing match and replace */ ··· 1651 1644 } 1652 1645 out: 1653 1646 spin_unlock(&dcb_lock); 1654 - call_dcbevent_notifiers(DCB_APP_EVENT, new); 1647 + call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1655 1648 return 0; 1656 1649 } 1657 1650 EXPORT_SYMBOL(dcb_setapp);
+20 -10
net/ipv4/devinet.c
··· 1030 1030 return mtu >= 68; 1031 1031 } 1032 1032 1033 + static void inetdev_send_gratuitous_arp(struct net_device *dev, 1034 + struct in_device *in_dev) 1035 + 1036 + { 1037 + struct in_ifaddr *ifa = in_dev->ifa_list; 1038 + 1039 + if (!ifa) 1040 + return; 1041 + 1042 + arp_send(ARPOP_REQUEST, ETH_P_ARP, 1043 + ifa->ifa_address, dev, 1044 + ifa->ifa_address, NULL, 1045 + dev->dev_addr, NULL); 1046 + } 1047 + 1033 1048 /* Called only under RTNL semaphore */ 1034 1049 1035 1050 static int inetdev_event(struct notifier_block *this, unsigned long event, ··· 1097 1082 } 1098 1083 ip_mc_up(in_dev); 1099 1084 /* fall through */ 1100 - case NETDEV_NOTIFY_PEERS: 1101 1085 case NETDEV_CHANGEADDR: 1086 + if (!IN_DEV_ARP_NOTIFY(in_dev)) 1087 + break; 1088 + /* fall through */ 1089 + case NETDEV_NOTIFY_PEERS: 1102 1090 /* Send gratuitous ARP to notify of link change */ 1103 - if (IN_DEV_ARP_NOTIFY(in_dev)) { 1104 - struct in_ifaddr *ifa = in_dev->ifa_list; 1105 - 1106 - if (ifa) 1107 - arp_send(ARPOP_REQUEST, ETH_P_ARP, 1108 - ifa->ifa_address, dev, 1109 - ifa->ifa_address, NULL, 1110 - dev->dev_addr, NULL); 1111 - } 1091 + inetdev_send_gratuitous_arp(dev, in_dev); 1112 1092 break; 1113 1093 case NETDEV_DOWN: 1114 1094 ip_mc_down(in_dev);
+1
net/ipv4/ip_gre.c
··· 775 775 .fl4_dst = dst, 776 776 .fl4_src = tiph->saddr, 777 777 .fl4_tos = RT_TOS(tos), 778 + .proto = IPPROTO_GRE, 778 779 .fl_gre_key = tunnel->parms.o_key 779 780 }; 780 781 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
+1
net/ipv4/route.c
··· 2722 2722 .destroy = ipv4_dst_destroy, 2723 2723 .check = ipv4_blackhole_dst_check, 2724 2724 .default_mtu = ipv4_blackhole_default_mtu, 2725 + .default_advmss = ipv4_default_advmss, 2725 2726 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2726 2727 }; 2727 2728
+1
net/ipv6/route.c
··· 128 128 .destroy = ip6_dst_destroy, 129 129 .check = ip6_dst_check, 130 130 .default_mtu = ip6_blackhole_default_mtu, 131 + .default_advmss = ip6_default_advmss, 131 132 .update_pmtu = ip6_rt_blackhole_update_pmtu, 132 133 }; 133 134
+2
net/mac80211/util.c
··· 1210 1210 switch (sdata->vif.type) { 1211 1211 case NL80211_IFTYPE_STATION: 1212 1212 changed |= BSS_CHANGED_ASSOC; 1213 + mutex_lock(&sdata->u.mgd.mtx); 1213 1214 ieee80211_bss_info_change_notify(sdata, changed); 1215 + mutex_unlock(&sdata->u.mgd.mtx); 1214 1216 break; 1215 1217 case NL80211_IFTYPE_ADHOC: 1216 1218 changed |= BSS_CHANGED_IBSS;
+2 -1
net/netfilter/core.c
··· 133 133 134 134 /* Optimization: we don't need to hold module 135 135 reference here, since function can't sleep. --RR */ 136 + repeat: 136 137 verdict = elem->hook(hook, skb, indev, outdev, okfn); 137 138 if (verdict != NF_ACCEPT) { 138 139 #ifdef CONFIG_NETFILTER_DEBUG ··· 146 145 #endif 147 146 if (verdict != NF_REPEAT) 148 147 return verdict; 149 - *i = (*i)->prev; 148 + goto repeat; 150 149 } 151 150 } 152 151 return NF_ACCEPT;
+5 -2
net/xfrm/xfrm_policy.c
··· 1340 1340 default: 1341 1341 BUG(); 1342 1342 } 1343 - xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); 1343 + xdst = dst_alloc(dst_ops); 1344 1344 xfrm_policy_put_afinfo(afinfo); 1345 1345 1346 - xdst->flo.ops = &xfrm_bundle_fc_ops; 1346 + if (likely(xdst)) 1347 + xdst->flo.ops = &xfrm_bundle_fc_ops; 1348 + else 1349 + xdst = ERR_PTR(-ENOBUFS); 1347 1350 1348 1351 return xdst; 1349 1352 }