Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (43 commits)
bnx2: Eliminate AER error messages on systems not supporting it
cnic: Fix big endian bug
xfrm6: Don't forget to propagate peer into ipsec route.
tg3: Use new VLAN code
bonding: update documentation - alternate configuration.
TCP: fix a bug that triggers large number of TCP RST by mistake
MAINTAINERS: remove Reinette Chatre as iwlwifi maintainer
rt2x00: add device id for windy31 usb device
mac80211: fix a crash in ieee80211_beacon_get_tim on change_interface
ipv6: Revert 'administrative down' address handling changes.
textsearch: doc - fix spelling in lib/textsearch.c.
USB NET KL5KUSB101: Fix mem leak in error path of kaweth_download_firmware()
pch_gbe: don't use flush_scheduled_work()
bnx2: Always set ETH_FLAG_TXVLAN
net: clear heap allocation for ethtool_get_regs()
ipv6: Always clone offlink routes.
dcbnl: make get_app handling symmetric for IEEE and CEE DCBx
tcp: fix bug in listening_get_next()
inetpeer: Use correct AVL tree base pointer in inet_getpeer().
GRO: fix merging a paged skb after non-paged skbs
...

+2656 -390
+25
Documentation/ABI/testing/sysfs-platform-at91
···
··· 1 + What: /sys/devices/platform/at91_can/net/<iface>/mb0_id 2 + Date: January 2011 3 + KernelVersion: 2.6.38 4 + Contact: Marc Kleine-Budde <kernel@pengutronix.de> 5 + Description: 6 + Value representing the can_id of mailbox 0. 7 + 8 + Default: 0x7ff (standard frame) 9 + 10 + Due to a chip bug (errata 50.2.6.3 & 50.3.5.3 in 11 + "AT91SAM9263 Preliminary 6249H-ATARM-27-Jul-09") the 12 + contents of mailbox 0 may be send under certain 13 + conditions (even if disabled or in rx mode). 14 + 15 + The workaround in the errata suggests not to use the 16 + mailbox and load it with an unused identifier. 17 + 18 + In order to use an extended can_id add the 19 + CAN_EFF_FLAG (0x80000000U) to the can_id. Example: 20 + 21 + - standard id 0x7ff: 22 + echo 0x7ff > /sys/class/net/can0/mb0_id 23 + 24 + - extended id 0x1fffffff: 25 + echo 0x9fffffff > /sys/class/net/can0/mb0_id
+71 -12
Documentation/networking/bonding.txt
··· 49 3.3 Configuring Bonding Manually with Ifenslave 50 3.3.1 Configuring Multiple Bonds Manually 51 3.4 Configuring Bonding Manually via Sysfs 52 - 3.5 Overriding Configuration for Special Cases 53 54 4. Querying Bonding Configuration 55 4.1 Bonding Configuration ··· 162 default kernel source include directory. 163 164 SECOND IMPORTANT NOTE: 165 - If you plan to configure bonding using sysfs, you do not need 166 - to use ifenslave. 167 168 2. Bonding Driver Options 169 ========================= ··· 780 781 You can configure bonding using either your distro's network 782 initialization scripts, or manually using either ifenslave or the 783 - sysfs interface. Distros generally use one of two packages for the 784 - network initialization scripts: initscripts or sysconfig. Recent 785 - versions of these packages have support for bonding, while older 786 versions do not. 787 788 We will first describe the options for configuring bonding for 789 - distros using versions of initscripts and sysconfig with full or 790 - partial support for bonding, then provide information on enabling 791 bonding without support from the network initialization scripts (i.e., 792 older versions of initscripts or sysconfig). 793 794 - If you're unsure whether your distro uses sysconfig or 795 - initscripts, or don't know if it's new enough, have no fear. 796 Determining this is fairly straightforward. 797 798 - First, issue the command: 799 800 $ rpm -qf /sbin/ifup 801 ··· 1332 echo +eth2 > /sys/class/net/bond1/bonding/slaves 1333 echo +eth3 > /sys/class/net/bond1/bonding/slaves 1334 1335 - 3.5 Overriding Configuration for Special Cases 1336 ---------------------------------------------- 1337 When using the bonding driver, the physical port which transmits a frame is 1338 typically selected by the bonding driver, and is not relevant to the user or 1339 system administrator. The output port is simply selected using the policies of
··· 49 3.3 Configuring Bonding Manually with Ifenslave 50 3.3.1 Configuring Multiple Bonds Manually 51 3.4 Configuring Bonding Manually via Sysfs 52 + 3.5 Configuration with Interfaces Support 53 + 3.6 Overriding Configuration for Special Cases 54 55 4. Querying Bonding Configuration 56 4.1 Bonding Configuration ··· 161 default kernel source include directory. 162 163 SECOND IMPORTANT NOTE: 164 + If you plan to configure bonding using sysfs or using the 165 + /etc/network/interfaces file, you do not need to use ifenslave. 166 167 2. Bonding Driver Options 168 ========================= ··· 779 780 You can configure bonding using either your distro's network 781 initialization scripts, or manually using either ifenslave or the 782 + sysfs interface. Distros generally use one of three packages for the 783 + network initialization scripts: initscripts, sysconfig or interfaces. 784 + Recent versions of these packages have support for bonding, while older 785 versions do not. 786 787 We will first describe the options for configuring bonding for 788 + distros using versions of initscripts, sysconfig and interfaces with full 789 + or partial support for bonding, then provide information on enabling 790 bonding without support from the network initialization scripts (i.e., 791 older versions of initscripts or sysconfig). 792 793 + If you're unsure whether your distro uses sysconfig, 794 + initscripts or interfaces, or don't know if it's new enough, have no fear. 795 Determining this is fairly straightforward. 796 797 + First, look for a file called interfaces in /etc/network directory. 798 + If this file is present in your system, then your system use interfaces. See 799 + Configuration with Interfaces Support. 800 + 801 + Else, issue the command: 802 803 $ rpm -qf /sbin/ifup 804 ··· 1327 echo +eth2 > /sys/class/net/bond1/bonding/slaves 1328 echo +eth3 > /sys/class/net/bond1/bonding/slaves 1329 1330 + 3.5 Configuration with Interfaces Support 1331 + ----------------------------------------- 1332 + 1333 + This section applies to distros which use /etc/network/interfaces file 1334 + to describe network interface configuration, most notably Debian and it's 1335 + derivatives. 1336 + 1337 + The ifup and ifdown commands on Debian don't support bonding out of 1338 + the box. The ifenslave-2.6 package should be installed to provide bonding 1339 + support. Once installed, this package will provide bond-* options to be used 1340 + into /etc/network/interfaces. 1341 + 1342 + Note that ifenslave-2.6 package will load the bonding module and use 1343 + the ifenslave command when appropriate. 1344 + 1345 + Example Configurations 1346 + ---------------------- 1347 + 1348 + In /etc/network/interfaces, the following stanza will configure bond0, in 1349 + active-backup mode, with eth0 and eth1 as slaves. 1350 + 1351 + auto bond0 1352 + iface bond0 inet dhcp 1353 + bond-slaves eth0 eth1 1354 + bond-mode active-backup 1355 + bond-miimon 100 1356 + bond-primary eth0 eth1 1357 + 1358 + If the above configuration doesn't work, you might have a system using 1359 + upstart for system startup. This is most notably true for recent 1360 + Ubuntu versions. The following stanza in /etc/network/interfaces will 1361 + produce the same result on those systems. 1362 + 1363 + auto bond0 1364 + iface bond0 inet dhcp 1365 + bond-slaves none 1366 + bond-mode active-backup 1367 + bond-miimon 100 1368 + 1369 + auto eth0 1370 + iface eth0 inet manual 1371 + bond-master bond0 1372 + bond-primary eth0 eth1 1373 + 1374 + auto eth1 1375 + iface eth1 inet manual 1376 + bond-master bond0 1377 + bond-primary eth0 eth1 1378 + 1379 + For a full list of bond-* supported options in /etc/network/interfaces and some 1380 + more advanced examples tailored to you particular distros, see the files in 1381 + /usr/share/doc/ifenslave-2.6. 1382 + 1383 + 3.6 Overriding Configuration for Special Cases 1384 ---------------------------------------------- 1385 + 1386 When using the bonding driver, the physical port which transmits a frame is 1387 typically selected by the bonding driver, and is not relevant to the user or 1388 system administrator. The output port is simply selected using the policies of
-1
MAINTAINERS
··· 3327 F: include/linux/wimax/i2400m.h 3328 3329 INTEL WIRELESS WIFI LINK (iwlwifi) 3330 - M: Reinette Chatre <reinette.chatre@intel.com> 3331 M: Wey-Yi Guy <wey-yi.w.guy@intel.com> 3332 M: Intel Linux Wireless <ilw@linux.intel.com> 3333 L: linux-wireless@vger.kernel.org
··· 3327 F: include/linux/wimax/i2400m.h 3328 3329 INTEL WIRELESS WIFI LINK (iwlwifi) 3330 M: Wey-Yi Guy <wey-yi.w.guy@intel.com> 3331 M: Intel Linux Wireless <ilw@linux.intel.com> 3332 L: linux-wireless@vger.kernel.org
+1 -1
drivers/atm/idt77105.c
··· 151 spin_unlock_irqrestore(&idt77105_priv_lock, flags); 152 if (arg == NULL) 153 return 0; 154 - return copy_to_user(arg, &PRIV(dev)->stats, 155 sizeof(struct idt77105_stats)) ? -EFAULT : 0; 156 } 157
··· 151 spin_unlock_irqrestore(&idt77105_priv_lock, flags); 152 if (arg == NULL) 153 return 0; 154 + return copy_to_user(arg, &stats, 155 sizeof(struct idt77105_stats)) ? -EFAULT : 0; 156 } 157
+21 -56
drivers/bluetooth/ath3k.c
··· 47 #define USB_REQ_DFU_DNLOAD 1 48 #define BULK_SIZE 4096 49 50 - struct ath3k_data { 51 - struct usb_device *udev; 52 - u8 *fw_data; 53 - u32 fw_size; 54 - u32 fw_sent; 55 - }; 56 - 57 - static int ath3k_load_firmware(struct ath3k_data *data, 58 - unsigned char *firmware, 59 - int count) 60 { 61 u8 *send_buf; 62 int err, pipe, len, size, sent = 0; 63 64 - BT_DBG("ath3k %p udev %p", data, data->udev); 65 66 - pipe = usb_sndctrlpipe(data->udev, 0); 67 - 68 - if ((usb_control_msg(data->udev, pipe, 69 - USB_REQ_DFU_DNLOAD, 70 - USB_TYPE_VENDOR, 0, 0, 71 - firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) { 72 - BT_ERR("Can't change to loading configuration err"); 73 - return -EBUSY; 74 - } 75 - sent += 20; 76 - count -= 20; 77 78 send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); 79 if (!send_buf) { ··· 64 return -ENOMEM; 65 } 66 67 while (count) { 68 size = min_t(uint, count, BULK_SIZE); 69 - pipe = usb_sndbulkpipe(data->udev, 0x02); 70 - memcpy(send_buf, firmware + sent, size); 71 72 - err = usb_bulk_msg(data->udev, pipe, send_buf, size, 73 &len, 3000); 74 75 if (err || (len != size)) { ··· 106 { 107 const struct firmware *firmware; 108 struct usb_device *udev = interface_to_usbdev(intf); 109 - struct ath3k_data *data; 110 - int size; 111 112 BT_DBG("intf %p id %p", intf, id); 113 114 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 115 return -ENODEV; 116 117 - data = kzalloc(sizeof(*data), GFP_KERNEL); 118 - if (!data) 119 - return -ENOMEM; 120 - 121 - data->udev = udev; 122 - 123 if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { 124 - kfree(data); 125 return -EIO; 126 } 127 128 - size = max_t(uint, firmware->size, 4096); 129 - data->fw_data = kmalloc(size, GFP_KERNEL); 130 - if (!data->fw_data) { 131 release_firmware(firmware); 132 - kfree(data); 133 - return -ENOMEM; 134 - } 135 - 136 - memcpy(data->fw_data, firmware->data, firmware->size); 137 - data->fw_size = firmware->size; 138 - data->fw_sent = 0; 139 - release_firmware(firmware); 140 - 141 - usb_set_intfdata(intf, data); 142 - if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) { 143 - usb_set_intfdata(intf, NULL); 144 - kfree(data->fw_data); 145 - kfree(data); 146 return -EIO; 147 } 148 149 return 0; 150 } 151 152 static void ath3k_disconnect(struct usb_interface *intf) 153 { 154 - struct ath3k_data *data = usb_get_intfdata(intf); 155 - 156 BT_DBG("ath3k_disconnect intf %p", intf); 157 - 158 - kfree(data->fw_data); 159 - kfree(data); 160 } 161 162 static struct usb_driver ath3k_driver = {
··· 47 #define USB_REQ_DFU_DNLOAD 1 48 #define BULK_SIZE 4096 49 50 + static int ath3k_load_firmware(struct usb_device *udev, 51 + const struct firmware *firmware) 52 { 53 u8 *send_buf; 54 int err, pipe, len, size, sent = 0; 55 + int count = firmware->size; 56 57 + BT_DBG("udev %p", udev); 58 59 + pipe = usb_sndctrlpipe(udev, 0); 60 61 send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); 62 if (!send_buf) { ··· 81 return -ENOMEM; 82 } 83 84 + memcpy(send_buf, firmware->data, 20); 85 + if ((err = usb_control_msg(udev, pipe, 86 + USB_REQ_DFU_DNLOAD, 87 + USB_TYPE_VENDOR, 0, 0, 88 + send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) { 89 + BT_ERR("Can't change to loading configuration err"); 90 + goto error; 91 + } 92 + sent += 20; 93 + count -= 20; 94 + 95 while (count) { 96 size = min_t(uint, count, BULK_SIZE); 97 + pipe = usb_sndbulkpipe(udev, 0x02); 98 + memcpy(send_buf, firmware->data + sent, size); 99 100 + err = usb_bulk_msg(udev, pipe, send_buf, size, 101 &len, 3000); 102 103 if (err || (len != size)) { ··· 112 { 113 const struct firmware *firmware; 114 struct usb_device *udev = interface_to_usbdev(intf); 115 116 BT_DBG("intf %p id %p", intf, id); 117 118 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 119 return -ENODEV; 120 121 if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { 122 return -EIO; 123 } 124 125 + if (ath3k_load_firmware(udev, firmware)) { 126 release_firmware(firmware); 127 return -EIO; 128 } 129 + release_firmware(firmware); 130 131 return 0; 132 } 133 134 static void ath3k_disconnect(struct usb_interface *intf) 135 { 136 BT_DBG("ath3k_disconnect intf %p", intf); 137 } 138 139 static struct usb_driver ath3k_driver = {
+13 -8
drivers/net/bnx2.c
··· 7553 !(data & ETH_FLAG_RXVLAN)) 7554 return -EINVAL; 7555 7556 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | 7557 ETH_FLAG_TXVLAN); 7558 if (rc) ··· 7966 7967 /* AER (Advanced Error Reporting) hooks */ 7968 err = pci_enable_pcie_error_reporting(pdev); 7969 - if (err) { 7970 - dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " 7971 - "failed 0x%x\n", err); 7972 - /* non-fatal, continue */ 7973 - } 7974 7975 } else { 7976 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); ··· 8230 return 0; 8231 8232 err_out_unmap: 8233 - if (bp->flags & BNX2_FLAG_PCIE) 8234 pci_disable_pcie_error_reporting(pdev); 8235 8236 if (bp->regview) { 8237 iounmap(bp->regview); ··· 8421 8422 kfree(bp->temp_stats_blk); 8423 8424 - if (bp->flags & BNX2_FLAG_PCIE) 8425 pci_disable_pcie_error_reporting(pdev); 8426 8427 free_netdev(dev); 8428 ··· 8540 } 8541 rtnl_unlock(); 8542 8543 - if (!(bp->flags & BNX2_FLAG_PCIE)) 8544 return result; 8545 8546 err = pci_cleanup_aer_uncorrect_error_status(pdev);
··· 7553 !(data & ETH_FLAG_RXVLAN)) 7554 return -EINVAL; 7555 7556 + /* TSO with VLAN tag won't work with current firmware */ 7557 + if (!(data & ETH_FLAG_TXVLAN)) 7558 + return -EINVAL; 7559 + 7560 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | 7561 ETH_FLAG_TXVLAN); 7562 if (rc) ··· 7962 7963 /* AER (Advanced Error Reporting) hooks */ 7964 err = pci_enable_pcie_error_reporting(pdev); 7965 + if (!err) 7966 + bp->flags |= BNX2_FLAG_AER_ENABLED; 7967 7968 } else { 7969 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); ··· 8229 return 0; 8230 8231 err_out_unmap: 8232 + if (bp->flags & BNX2_FLAG_AER_ENABLED) { 8233 pci_disable_pcie_error_reporting(pdev); 8234 + bp->flags &= ~BNX2_FLAG_AER_ENABLED; 8235 + } 8236 8237 if (bp->regview) { 8238 iounmap(bp->regview); ··· 8418 8419 kfree(bp->temp_stats_blk); 8420 8421 + if (bp->flags & BNX2_FLAG_AER_ENABLED) { 8422 pci_disable_pcie_error_reporting(pdev); 8423 + bp->flags &= ~BNX2_FLAG_AER_ENABLED; 8424 + } 8425 8426 free_netdev(dev); 8427 ··· 8535 } 8536 rtnl_unlock(); 8537 8538 + if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) 8539 return result; 8540 8541 err = pci_cleanup_aer_uncorrect_error_status(pdev);
+1
drivers/net/bnx2.h
··· 6741 #define BNX2_FLAG_JUMBO_BROKEN 0x00000800 6742 #define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 6743 #define BNX2_FLAG_BROKEN_STATS 0x00002000 6744 6745 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; 6746
··· 6741 #define BNX2_FLAG_JUMBO_BROKEN 0x00000800 6742 #define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 6743 #define BNX2_FLAG_BROKEN_STATS 0x00002000 6744 + #define BNX2_FLAG_AER_ENABLED 0x00004000 6745 6746 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; 6747
+4
drivers/net/bonding/bond_3ad.c
··· 2470 if (!(dev->flags & IFF_MASTER)) 2471 goto out; 2472 2473 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2474 goto out; 2475
··· 2470 if (!(dev->flags & IFF_MASTER)) 2471 goto out; 2472 2473 + skb = skb_share_check(skb, GFP_ATOMIC); 2474 + if (!skb) 2475 + goto out; 2476 + 2477 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2478 goto out; 2479
+4
drivers/net/bonding/bond_alb.c
··· 326 goto out; 327 } 328 329 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) 330 goto out; 331
··· 326 goto out; 327 } 328 329 + skb = skb_share_check(skb, GFP_ATOMIC); 330 + if (!skb) 331 + goto out; 332 + 333 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) 334 goto out; 335
+4
drivers/net/bonding/bond_main.c
··· 2733 if (!slave || !slave_do_arp_validate(bond, slave)) 2734 goto out_unlock; 2735 2736 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 2737 goto out_unlock; 2738
··· 2733 if (!slave || !slave_do_arp_validate(bond, slave)) 2734 goto out_unlock; 2735 2736 + skb = skb_share_check(skb, GFP_ATOMIC); 2737 + if (!skb) 2738 + goto out_unlock; 2739 + 2740 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 2741 goto out_unlock; 2742
+2
drivers/net/can/Kconfig
··· 117 118 source "drivers/net/can/usb/Kconfig" 119 120 config CAN_DEBUG_DEVICES 121 bool "CAN devices debugging messages" 122 depends on CAN
··· 117 118 source "drivers/net/can/usb/Kconfig" 119 120 + source "drivers/net/can/softing/Kconfig" 121 + 122 config CAN_DEBUG_DEVICES 123 bool "CAN devices debugging messages" 124 depends on CAN
+1
drivers/net/can/Makefile
··· 9 can-dev-y := dev.o 10 11 obj-y += usb/ 12 13 obj-$(CONFIG_CAN_SJA1000) += sja1000/ 14 obj-$(CONFIG_CAN_MSCAN) += mscan/
··· 9 can-dev-y := dev.o 10 11 obj-y += usb/ 12 + obj-y += softing/ 13 14 obj-$(CONFIG_CAN_SJA1000) += sja1000/ 15 obj-$(CONFIG_CAN_MSCAN) += mscan/
+112 -26
drivers/net/can/at91_can.c
··· 2 * at91_can.c - CAN network driver for AT91 SoC CAN controller 3 * 4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> 5 - * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de> 6 * 7 * This software may be distributed under the terms of the GNU General 8 * Public License ("GPL") version 2 as distributed in the 'COPYING' ··· 30 #include <linux/module.h> 31 #include <linux/netdevice.h> 32 #include <linux/platform_device.h> 33 #include <linux/skbuff.h> 34 #include <linux/spinlock.h> 35 #include <linux/string.h> ··· 41 42 #include <mach/board.h> 43 44 - #define AT91_NAPI_WEIGHT 12 45 46 /* 47 * RX/TX Mailbox split 48 * don't dare to touch 49 */ 50 - #define AT91_MB_RX_NUM 12 51 #define AT91_MB_TX_SHIFT 2 52 53 - #define AT91_MB_RX_FIRST 0 54 #define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) 55 56 #define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) 57 #define AT91_MB_RX_SPLIT 8 58 #define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) 59 - #define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT)) 60 61 #define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) 62 #define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) ··· 170 171 struct clk *clk; 172 struct at91_can_data *pdata; 173 }; 174 175 static struct can_bittiming_const at91_bittiming_const = { ··· 224 set_mb_mode_prio(priv, mb, mode, 0); 225 } 226 227 /* 228 * Swtich transceiver on or off 229 */ ··· 249 { 250 struct at91_priv *priv = netdev_priv(dev); 251 unsigned int i; 252 253 /* 254 - * The first 12 mailboxes are used as a reception FIFO. The 255 - * last mailbox is configured with overwrite option. The 256 - * overwrite flag indicates a FIFO overflow. 257 */ 258 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) 259 set_mb_mode(priv, i, AT91_MB_MODE_RX); 260 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); ··· 280 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 281 282 /* Reset tx and rx helper pointers */ 283 - priv->tx_next = priv->tx_echo = priv->rx_next = 0; 284 } 285 286 static int at91_set_bittiming(struct net_device *dev) ··· 399 netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); 400 return NETDEV_TX_BUSY; 401 } 402 - 403 - if (cf->can_id & CAN_EFF_FLAG) 404 - reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE; 405 - else 406 - reg_mid = (cf->can_id & CAN_SFF_MASK) << 18; 407 - 408 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | 409 (cf->can_dlc << 16) | AT91_MCR_MTCR; 410 ··· 561 * 562 * Theory of Operation: 563 * 564 - * 12 of the 16 mailboxes on the chip are reserved for RX. we split 565 - * them into 2 groups. The lower group holds 8 and upper 4 mailboxes. 566 * 567 * Like it or not, but the chip always saves a received CAN message 568 * into the first free mailbox it finds (starting with the 569 * lowest). This makes it very difficult to read the messages in the 570 * right order from the chip. This is how we work around that problem: 571 * 572 - * The first message goes into mb nr. 0 and issues an interrupt. All 573 * rx ints are disabled in the interrupt handler and a napi poll is 574 * scheduled. We read the mailbox, but do _not_ reenable the mb (to 575 * receive another message). 576 * 577 * lower mbxs upper 578 - * ______^______ __^__ 579 - * / \ / \ 580 * +-+-+-+-+-+-+-+-++-+-+-+-+ 581 - * |x|x|x|x|x|x|x|x|| | | | | 582 * +-+-+-+-+-+-+-+-++-+-+-+-+ 583 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail 584 * 0 1 2 3 4 5 6 7 8 9 0 1 / box 585 * 586 * The variable priv->rx_next points to the next mailbox to read a 587 * message from. As long we're in the lower mailboxes we just read the ··· 616 "order of incoming frames cannot be guaranteed\n"); 617 618 again: 619 - for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); 620 - mb < AT91_MB_RX_NUM && quota > 0; 621 reg_sr = at91_read(priv, AT91_SR), 622 - mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) { 623 at91_read_msg(dev, mb); 624 625 /* reactivate mailboxes */ ··· 636 637 /* upper group completed, look again in lower */ 638 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 639 - quota > 0 && mb >= AT91_MB_RX_NUM) { 640 - priv->rx_next = 0; 641 goto again; 642 } 643 ··· 1063 .ndo_start_xmit = at91_start_xmit, 1064 }; 1065 1066 static int __devinit at91_can_probe(struct platform_device *pdev) 1067 { 1068 struct net_device *dev; ··· 1166 dev->netdev_ops = &at91_netdev_ops; 1167 dev->irq = irq; 1168 dev->flags |= IFF_ECHO; 1169 1170 priv = netdev_priv(dev); 1171 priv->can.clock.freq = clk_get_rate(clk); ··· 1178 priv->dev = dev; 1179 priv->clk = clk; 1180 priv->pdata = pdev->dev.platform_data; 1181 1182 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); 1183
··· 2 * at91_can.c - CAN network driver for AT91 SoC CAN controller 3 * 4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> 5 + * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de> 6 * 7 * This software may be distributed under the terms of the GNU General 8 * Public License ("GPL") version 2 as distributed in the 'COPYING' ··· 30 #include <linux/module.h> 31 #include <linux/netdevice.h> 32 #include <linux/platform_device.h> 33 + #include <linux/rtnetlink.h> 34 #include <linux/skbuff.h> 35 #include <linux/spinlock.h> 36 #include <linux/string.h> ··· 40 41 #include <mach/board.h> 42 43 + #define AT91_NAPI_WEIGHT 11 44 45 /* 46 * RX/TX Mailbox split 47 * don't dare to touch 48 */ 49 + #define AT91_MB_RX_NUM 11 50 #define AT91_MB_TX_SHIFT 2 51 52 + #define AT91_MB_RX_FIRST 1 53 #define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) 54 55 #define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) 56 #define AT91_MB_RX_SPLIT 8 57 #define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) 58 + #define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \ 59 + ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST)) 60 61 #define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) 62 #define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) ··· 168 169 struct clk *clk; 170 struct at91_can_data *pdata; 171 + 172 + canid_t mb0_id; 173 }; 174 175 static struct can_bittiming_const at91_bittiming_const = { ··· 220 set_mb_mode_prio(priv, mb, mode, 0); 221 } 222 223 + static inline u32 at91_can_id_to_reg_mid(canid_t can_id) 224 + { 225 + u32 reg_mid; 226 + 227 + if (can_id & CAN_EFF_FLAG) 228 + reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE; 229 + else 230 + reg_mid = (can_id & CAN_SFF_MASK) << 18; 231 + 232 + return reg_mid; 233 + } 234 + 235 /* 236 * Swtich transceiver on or off 237 */ ··· 233 { 234 struct at91_priv *priv = netdev_priv(dev); 235 unsigned int i; 236 + u32 reg_mid; 237 238 /* 239 + * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first 240 + * mailbox is disabled. The next 11 mailboxes are used as a 241 + * reception FIFO. The last mailbox is configured with 242 + * overwrite option. The overwrite flag indicates a FIFO 243 + * overflow. 244 */ 245 + reg_mid = at91_can_id_to_reg_mid(priv->mb0_id); 246 + for (i = 0; i < AT91_MB_RX_FIRST; i++) { 247 + set_mb_mode(priv, i, AT91_MB_MODE_DISABLED); 248 + at91_write(priv, AT91_MID(i), reg_mid); 249 + at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */ 250 + } 251 + 252 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) 253 set_mb_mode(priv, i, AT91_MB_MODE_RX); 254 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); ··· 254 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 255 256 /* Reset tx and rx helper pointers */ 257 + priv->tx_next = priv->tx_echo = 0; 258 + priv->rx_next = AT91_MB_RX_FIRST; 259 } 260 261 static int at91_set_bittiming(struct net_device *dev) ··· 372 netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); 373 return NETDEV_TX_BUSY; 374 } 375 + reg_mid = at91_can_id_to_reg_mid(cf->can_id); 376 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | 377 (cf->can_dlc << 16) | AT91_MCR_MTCR; 378 ··· 539 * 540 * Theory of Operation: 541 * 542 + * 11 of the 16 mailboxes on the chip are reserved for RX. we split 543 + * them into 2 groups. The lower group holds 7 and upper 4 mailboxes. 544 * 545 * Like it or not, but the chip always saves a received CAN message 546 * into the first free mailbox it finds (starting with the 547 * lowest). This makes it very difficult to read the messages in the 548 * right order from the chip. This is how we work around that problem: 549 * 550 + * The first message goes into mb nr. 1 and issues an interrupt. All 551 * rx ints are disabled in the interrupt handler and a napi poll is 552 * scheduled. We read the mailbox, but do _not_ reenable the mb (to 553 * receive another message). 554 * 555 * lower mbxs upper 556 + * ____^______ __^__ 557 + * / \ / \ 558 * +-+-+-+-+-+-+-+-++-+-+-+-+ 559 + * | |x|x|x|x|x|x|x|| | | | | 560 * +-+-+-+-+-+-+-+-++-+-+-+-+ 561 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail 562 * 0 1 2 3 4 5 6 7 8 9 0 1 / box 563 + * ^ 564 + * | 565 + * \ 566 + * unused, due to chip bug 567 * 568 * The variable priv->rx_next points to the next mailbox to read a 569 * message from. As long we're in the lower mailboxes we just read the ··· 590 "order of incoming frames cannot be guaranteed\n"); 591 592 again: 593 + for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next); 594 + mb < AT91_MB_RX_LAST + 1 && quota > 0; 595 reg_sr = at91_read(priv, AT91_SR), 596 + mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) { 597 at91_read_msg(dev, mb); 598 599 /* reactivate mailboxes */ ··· 610 611 /* upper group completed, look again in lower */ 612 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 613 + quota > 0 && mb > AT91_MB_RX_LAST) { 614 + priv->rx_next = AT91_MB_RX_FIRST; 615 goto again; 616 } 617 ··· 1037 .ndo_start_xmit = at91_start_xmit, 1038 }; 1039 1040 + static ssize_t at91_sysfs_show_mb0_id(struct device *dev, 1041 + struct device_attribute *attr, char *buf) 1042 + { 1043 + struct at91_priv *priv = netdev_priv(to_net_dev(dev)); 1044 + 1045 + if (priv->mb0_id & CAN_EFF_FLAG) 1046 + return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id); 1047 + else 1048 + return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id); 1049 + } 1050 + 1051 + static ssize_t at91_sysfs_set_mb0_id(struct device *dev, 1052 + struct device_attribute *attr, const char *buf, size_t count) 1053 + { 1054 + struct net_device *ndev = to_net_dev(dev); 1055 + struct at91_priv *priv = netdev_priv(ndev); 1056 + unsigned long can_id; 1057 + ssize_t ret; 1058 + int err; 1059 + 1060 + rtnl_lock(); 1061 + 1062 + if (ndev->flags & IFF_UP) { 1063 + ret = -EBUSY; 1064 + goto out; 1065 + } 1066 + 1067 + err = strict_strtoul(buf, 0, &can_id); 1068 + if (err) { 1069 + ret = err; 1070 + goto out; 1071 + } 1072 + 1073 + if (can_id & CAN_EFF_FLAG) 1074 + can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; 1075 + else 1076 + can_id &= CAN_SFF_MASK; 1077 + 1078 + priv->mb0_id = can_id; 1079 + ret = count; 1080 + 1081 + out: 1082 + rtnl_unlock(); 1083 + return ret; 1084 + } 1085 + 1086 + static DEVICE_ATTR(mb0_id, S_IWUGO | S_IRUGO, 1087 + at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); 1088 + 1089 + static struct attribute *at91_sysfs_attrs[] = { 1090 + &dev_attr_mb0_id.attr, 1091 + NULL, 1092 + }; 1093 + 1094 + static struct attribute_group at91_sysfs_attr_group = { 1095 + .attrs = at91_sysfs_attrs, 1096 + }; 1097 + 1098 static int __devinit at91_can_probe(struct platform_device *pdev) 1099 { 1100 struct net_device *dev; ··· 1082 dev->netdev_ops = &at91_netdev_ops; 1083 dev->irq = irq; 1084 dev->flags |= IFF_ECHO; 1085 + dev->sysfs_groups[0] = &at91_sysfs_attr_group; 1086 1087 priv = netdev_priv(dev); 1088 priv->can.clock.freq = clk_get_rate(clk); ··· 1093 priv->dev = dev; 1094 priv->clk = clk; 1095 priv->pdata = pdev->dev.platform_data; 1096 + priv->mb0_id = 0x7ff; 1097 1098 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); 1099
+30
drivers/net/can/softing/Kconfig
···
··· 1 + config CAN_SOFTING 2 + tristate "Softing Gmbh CAN generic support" 3 + depends on CAN_DEV 4 + ---help--- 5 + Support for CAN cards from Softing Gmbh & some cards 6 + from Vector Gmbh. 7 + Softing Gmbh CAN cards come with 1 or 2 physical busses. 8 + Those cards typically use Dual Port RAM to communicate 9 + with the host CPU. The interface is then identical for PCI 10 + and PCMCIA cards. This driver operates on a platform device, 11 + which has been created by softing_cs or softing_pci driver. 12 + Warning: 13 + The API of the card does not allow fine control per bus, but 14 + controls the 2 busses on the card together. 15 + As such, some actions (start/stop/busoff recovery) on 1 bus 16 + must bring down the other bus too temporarily. 17 + 18 + config CAN_SOFTING_CS 19 + tristate "Softing Gmbh CAN pcmcia cards" 20 + depends on PCMCIA 21 + select CAN_SOFTING 22 + ---help--- 23 + Support for PCMCIA cards from Softing Gmbh & some cards 24 + from Vector Gmbh. 25 + You need firmware for these, which you can get at 26 + http://developer.berlios.de/projects/socketcan/ 27 + This version of the driver is written against 28 + firmware version 4.6 (softing-fw-4.6-binaries.tar.gz) 29 + In order to use the card as CAN device, you need the Softing generic 30 + support too.
+6
drivers/net/can/softing/Makefile
···
··· 1 + 2 + softing-y := softing_main.o softing_fw.o 3 + obj-$(CONFIG_CAN_SOFTING) += softing.o 4 + obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o 5 + 6 + ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
+167
drivers/net/can/softing/softing.h
···
··· 1 + /* 2 + * softing common interfaces 3 + * 4 + * by Kurt Van Dijck, 2008-2010 5 + */ 6 + 7 + #include <linux/atomic.h> 8 + #include <linux/netdevice.h> 9 + #include <linux/ktime.h> 10 + #include <linux/mutex.h> 11 + #include <linux/spinlock.h> 12 + #include <linux/can.h> 13 + #include <linux/can/dev.h> 14 + 15 + #include "softing_platform.h" 16 + 17 + struct softing; 18 + 19 + struct softing_priv { 20 + struct can_priv can; /* must be the first member! */ 21 + struct net_device *netdev; 22 + struct softing *card; 23 + struct { 24 + int pending; 25 + /* variables wich hold the circular buffer */ 26 + int echo_put; 27 + int echo_get; 28 + } tx; 29 + struct can_bittiming_const btr_const; 30 + int index; 31 + uint8_t output; 32 + uint16_t chip; 33 + }; 34 + #define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev)) 35 + 36 + struct softing { 37 + const struct softing_platform_data *pdat; 38 + struct platform_device *pdev; 39 + struct net_device *net[2]; 40 + spinlock_t spin; /* protect this structure & DPRAM access */ 41 + ktime_t ts_ref; 42 + ktime_t ts_overflow; /* timestamp overflow value, in ktime */ 43 + 44 + struct { 45 + /* indication of firmware status */ 46 + int up; 47 + /* protection of the 'up' variable */ 48 + struct mutex lock; 49 + } fw; 50 + struct { 51 + int nr; 52 + int requested; 53 + int svc_count; 54 + unsigned int dpram_position; 55 + } irq; 56 + struct { 57 + int pending; 58 + int last_bus; 59 + /* 60 + * keep the bus that last tx'd a message, 61 + * in order to let every netdev queue resume 62 + */ 63 + } tx; 64 + __iomem uint8_t *dpram; 65 + unsigned long dpram_phys; 66 + unsigned long dpram_size; 67 + struct { 68 + uint16_t fw_version, hw_version, license, serial; 69 + uint16_t chip[2]; 70 + unsigned int freq; /* remote cpu's operating frequency */ 71 + } id; 72 + }; 73 + 74 + extern int softing_default_output(struct net_device *netdev); 75 + 76 + extern ktime_t softing_raw2ktime(struct softing *card, u32 raw); 77 + 78 + extern int softing_chip_poweron(struct softing *card); 79 + 80 + extern int softing_bootloader_command(struct softing *card, int16_t cmd, 81 + const char *msg); 82 + 83 + /* Load firmware after reset */ 84 + extern int softing_load_fw(const char *file, struct softing *card, 85 + __iomem uint8_t *virt, unsigned int size, int offset); 86 + 87 + /* Load final application firmware after bootloader */ 88 + extern int softing_load_app_fw(const char *file, struct softing *card); 89 + 90 + /* 91 + * enable or disable irq 92 + * only called with fw.lock locked 93 + */ 94 + extern int softing_enable_irq(struct softing *card, int enable); 95 + 96 + /* start/stop 1 bus on card */ 97 + extern int softing_startstop(struct net_device *netdev, int up); 98 + 99 + /* netif_rx() */ 100 + extern int softing_netdev_rx(struct net_device *netdev, 101 + const struct can_frame *msg, ktime_t ktime); 102 + 103 + /* SOFTING DPRAM mappings */ 104 + #define DPRAM_RX 0x0000 105 + #define DPRAM_RX_SIZE 32 106 + #define DPRAM_RX_CNT 16 107 + #define DPRAM_RX_RD 0x0201 /* uint8_t */ 108 + #define DPRAM_RX_WR 0x0205 /* uint8_t */ 109 + #define DPRAM_RX_LOST 0x0207 /* uint8_t */ 110 + 111 + #define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */ 112 + #define DPRAM_FCT_RESULT 0x0328 /* int16_t */ 113 + #define DPRAM_FCT_HOST 0x032b /* uint16_t */ 114 + 115 + #define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */ 116 + #define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */ 117 + #define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */ 118 + #define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */ 119 + #define DPRAM_RESET 0x0341 /* uint16_t */ 120 + #define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */ 121 + #define DPRAM_RESET_TIME 0x034d /* uint16_t */ 122 + #define DPRAM_TIME 0x0350 /* uint64_t */ 123 + #define DPRAM_WR_START 0x0358 /* uint8_t */ 124 + #define DPRAM_WR_END 0x0359 /* uint8_t */ 125 + #define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */ 126 + #define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */ 127 + #define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */ 128 + #define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */ 129 + #define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */ 130 + 131 + #define DPRAM_TX 0x0400 /* uint16_t */ 132 + #define DPRAM_TX_SIZE 16 133 + #define DPRAM_TX_CNT 32 134 + #define DPRAM_TX_RD 0x0601 /* uint8_t */ 135 + #define DPRAM_TX_WR 0x0605 /* uint8_t */ 136 + 137 + #define DPRAM_COMMAND 0x07e0 /* uint16_t */ 138 + #define DPRAM_RECEIPT 0x07f0 /* uint16_t */ 139 + #define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */ 140 + #define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */ 141 + 142 + #define DPRAM_V2_RESET 0x0e00 /* uint8_t */ 143 + #define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */ 144 + 145 + #define TXMAX (DPRAM_TX_CNT - 1) 146 + 147 + /* DPRAM return codes */ 148 + #define RES_NONE 0 149 + #define RES_OK 1 150 + #define RES_NOK 2 151 + #define RES_UNKNOWN 3 152 + /* DPRAM flags */ 153 + #define CMD_TX 0x01 154 + #define CMD_ACK 0x02 155 + #define CMD_XTD 0x04 156 + #define CMD_RTR 0x08 157 + #define CMD_ERR 0x10 158 + #define CMD_BUS2 0x80 159 + 160 + /* returned fifo entry bus state masks */ 161 + #define SF_MASK_BUSOFF 0x80 162 + #define SF_MASK_EPASSIVE 0x60 163 + 164 + /* bus states */ 165 + #define STATE_BUSOFF 2 166 + #define STATE_EPASSIVE 1 167 + #define STATE_EACTIVE 0
+359
drivers/net/can/softing/softing_cs.c
···
··· 1 + /* 2 + * Copyright (C) 2008-2010 3 + * 4 + * - Kurt Van Dijck, EIA Electronics 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the version 2 of the GNU General Public License 8 + * as published by the Free Software Foundation 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 + */ 19 + 20 + #include <linux/module.h> 21 + #include <linux/kernel.h> 22 + 23 + #include <pcmcia/cistpl.h> 24 + #include <pcmcia/ds.h> 25 + 26 + #include "softing_platform.h" 27 + 28 + static int softingcs_index; 29 + static spinlock_t softingcs_index_lock; 30 + 31 + static int softingcs_reset(struct platform_device *pdev, int v); 32 + static int softingcs_enable_irq(struct platform_device *pdev, int v); 33 + 34 + /* 35 + * platform_data descriptions 36 + */ 37 + #define MHZ (1000*1000) 38 + static const struct softing_platform_data softingcs_platform_data[] = { 39 + { 40 + .name = "CANcard", 41 + .manf = 0x0168, .prod = 0x001, 42 + .generation = 1, 43 + .nbus = 2, 44 + .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, 45 + .dpram_size = 0x0800, 46 + .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, 47 + .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, 48 + .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, 49 + .reset = softingcs_reset, 50 + .enable_irq = softingcs_enable_irq, 51 + }, { 52 + .name = "CANcard-NEC", 53 + .manf = 0x0168, .prod = 0x002, 54 + .generation = 1, 55 + .nbus = 2, 56 + .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, 57 + .dpram_size = 0x0800, 58 + .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, 59 + .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, 60 + .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, 61 + .reset = softingcs_reset, 62 + .enable_irq = softingcs_enable_irq, 63 + }, { 64 + .name = "CANcard-SJA", 65 + .manf = 0x0168, .prod = 0x004, 66 + .generation = 1, 67 + .nbus = 2, 68 + .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, 69 + .dpram_size = 0x0800, 70 + .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, 71 + .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, 72 + .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, 73 + .reset = softingcs_reset, 74 + .enable_irq = softingcs_enable_irq, 75 + }, { 76 + .name = "CANcard-2", 77 + .manf = 0x0168, .prod = 0x005, 78 + .generation = 2, 79 + .nbus = 2, 80 + .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, 81 + .dpram_size = 0x1000, 82 + .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, 83 + .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, 84 + .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, 85 + .reset = softingcs_reset, 86 + .enable_irq = NULL, 87 + }, { 88 + .name = "Vector-CANcard", 89 + .manf = 0x0168, .prod = 0x081, 90 + .generation = 1, 91 + .nbus = 2, 92 + .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, 93 + .dpram_size = 0x0800, 94 + .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, 95 + .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, 96 + .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, 97 + .reset = softingcs_reset, 98 + .enable_irq = softingcs_enable_irq, 99 + }, { 100 + .name = "Vector-CANcard-SJA", 101 + .manf = 0x0168, .prod = 0x084, 102 + .generation = 1, 103 + .nbus = 2, 104 + .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, 105 + .dpram_size = 0x0800, 106 + .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, 107 + .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, 108 + .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, 109 + .reset = softingcs_reset, 110 + .enable_irq = softingcs_enable_irq, 111 + }, { 112 + .name = "Vector-CANcard-2", 113 + .manf = 0x0168, .prod = 0x085, 114 + .generation = 2, 115 + .nbus = 2, 116 + .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, 117 + .dpram_size = 0x1000, 118 + .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, 119 + .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, 120 + .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, 121 + .reset = softingcs_reset, 122 + .enable_irq = NULL, 123 + }, { 124 + .name = "EDICcard-NEC", 125 + .manf = 0x0168, .prod = 0x102, 126 + .generation = 1, 127 + .nbus = 2, 128 + .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, 129 + .dpram_size = 0x0800, 130 + .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, 131 + .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, 132 + .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, 133 + .reset = softingcs_reset, 134 + .enable_irq = softingcs_enable_irq, 135 + }, { 136 + .name = "EDICcard-2", 137 + .manf = 0x0168, .prod = 0x105, 138 + .generation = 2, 139 + .nbus = 2, 140 + .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, 141 + .dpram_size = 0x1000, 142 + .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, 143 + .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, 144 + .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, 145 + .reset = softingcs_reset, 146 + .enable_irq = NULL, 147 + }, { 148 + 0, 0, 149 + }, 150 + }; 151 + 152 + MODULE_FIRMWARE(fw_dir "bcard.bin"); 153 + MODULE_FIRMWARE(fw_dir "ldcard.bin"); 154 + MODULE_FIRMWARE(fw_dir "cancard.bin"); 155 + MODULE_FIRMWARE(fw_dir "cansja.bin"); 156 + 157 + MODULE_FIRMWARE(fw_dir "bcard2.bin"); 158 + MODULE_FIRMWARE(fw_dir "ldcard2.bin"); 159 + MODULE_FIRMWARE(fw_dir "cancrd2.bin"); 160 + 161 + static __devinit const struct softing_platform_data 162 + *softingcs_find_platform_data(unsigned int manf, unsigned int prod) 163 + { 164 + const struct softing_platform_data *lp; 165 + 166 + for (lp = softingcs_platform_data; lp->manf; ++lp) { 167 + if ((lp->manf == manf) && (lp->prod == prod)) 168 + return lp; 169 + } 170 + return NULL; 171 + } 172 + 173 + /* 174 + * platformdata callbacks 175 + */ 176 + static int softingcs_reset(struct platform_device *pdev, int v) 177 + { 178 + struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); 179 + 180 + dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20); 181 + return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20); 182 + } 183 + 184 + static int softingcs_enable_irq(struct platform_device *pdev, int v) 185 + { 186 + struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); 187 + 188 + dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0); 189 + return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0); 190 + } 191 + 192 + /* 193 + * pcmcia check 194 + */ 195 + static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia, 196 + void *priv_data) 197 + { 198 + struct softing_platform_data *pdat = priv_data; 199 + struct resource *pres; 200 + int memspeed = 0; 201 + 202 + WARN_ON(!pdat); 203 + pres = pcmcia->resource[PCMCIA_IOMEM_0]; 204 + if (resource_size(pres) < 0x1000) 205 + return -ERANGE; 206 + 207 + pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE; 208 + if (pdat->generation < 2) { 209 + pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8; 210 + memspeed = 3; 211 + } else { 212 + pres->flags |= WIN_DATA_WIDTH_16; 213 + } 214 + return pcmcia_request_window(pcmcia, pres, memspeed); 215 + } 216 + 217 + static __devexit void softingcs_remove(struct pcmcia_device *pcmcia) 218 + { 219 + struct platform_device *pdev = pcmcia->priv; 220 + 221 + /* free bits */ 222 + platform_device_unregister(pdev); 223 + /* release pcmcia stuff */ 224 + pcmcia_disable_device(pcmcia); 225 + } 226 + 227 + /* 228 + * platform_device wrapper 229 + * pdev->resource has 2 entries: io & irq 230 + */ 231 + static void softingcs_pdev_release(struct device *dev) 232 + { 233 + struct platform_device *pdev = to_platform_device(dev); 234 + kfree(pdev); 235 + } 236 + 237 + static __devinit int softingcs_probe(struct pcmcia_device *pcmcia) 238 + { 239 + int ret; 240 + struct platform_device *pdev; 241 + const struct softing_platform_data *pdat; 242 + struct resource *pres; 243 + struct dev { 244 + struct platform_device pdev; 245 + struct resource res[2]; 246 + } *dev; 247 + 248 + /* find matching platform_data */ 249 + pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id); 250 + if (!pdat) 251 + return -ENOTTY; 252 + 253 + /* setup pcmcia device */ 254 + pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM | 255 + CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; 256 + ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat); 257 + if (ret) 258 + goto pcmcia_failed; 259 + 260 + ret = pcmcia_enable_device(pcmcia); 261 + if (ret < 0) 262 + goto pcmcia_failed; 263 + 264 + pres = pcmcia->resource[PCMCIA_IOMEM_0]; 265 + if (!pres) { 266 + ret = -EBADF; 267 + goto pcmcia_bad; 268 + } 269 + 270 + /* create softing platform device */ 271 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 272 + if (!dev) { 273 + ret = -ENOMEM; 274 + goto mem_failed; 275 + } 276 + dev->pdev.resource = dev->res; 277 + dev->pdev.num_resources = ARRAY_SIZE(dev->res); 278 + dev->pdev.dev.release = softingcs_pdev_release; 279 + 280 + pdev = &dev->pdev; 281 + pdev->dev.platform_data = (void *)pdat; 282 + pdev->dev.parent = &pcmcia->dev; 283 + pcmcia->priv = pdev; 284 + 285 + /* platform device resources */ 286 + pdev->resource[0].flags = IORESOURCE_MEM; 287 + pdev->resource[0].start = pres->start; 288 + pdev->resource[0].end = pres->end; 289 + 290 + pdev->resource[1].flags = IORESOURCE_IRQ; 291 + pdev->resource[1].start = pcmcia->irq; 292 + pdev->resource[1].end = pdev->resource[1].start; 293 + 294 + /* platform device setup */ 295 + spin_lock(&softingcs_index_lock); 296 + pdev->id = softingcs_index++; 297 + spin_unlock(&softingcs_index_lock); 298 + pdev->name = "softing"; 299 + dev_set_name(&pdev->dev, "softingcs.%i", pdev->id); 300 + ret = platform_device_register(pdev); 301 + if (ret < 0) 302 + goto platform_failed; 303 + 304 + dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev)); 305 + return 0; 306 + 307 + platform_failed: 308 + kfree(dev); 309 + mem_failed: 310 + pcmcia_bad: 311 + pcmcia_failed: 312 + pcmcia_disable_device(pcmcia); 313 + pcmcia->priv = NULL; 314 + return ret ?: -ENODEV; 315 + } 316 + 317 + static /*const*/ struct pcmcia_device_id softingcs_ids[] = { 318 + /* softing */ 319 + PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001), 320 + PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002), 321 + PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004), 322 + PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005), 323 + /* vector, manufacturer? */ 324 + PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081), 325 + PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084), 326 + PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085), 327 + /* EDIC */ 328 + PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102), 329 + PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105), 330 + PCMCIA_DEVICE_NULL, 331 + }; 332 + 333 + MODULE_DEVICE_TABLE(pcmcia, softingcs_ids); 334 + 335 + static struct pcmcia_driver softingcs_driver = { 336 + .owner = THIS_MODULE, 337 + .name = "softingcs", 338 + .id_table = softingcs_ids, 339 + .probe = softingcs_probe, 340 + .remove = __devexit_p(softingcs_remove), 341 + }; 342 + 343 + static int __init softingcs_start(void) 344 + { 345 + spin_lock_init(&softingcs_index_lock); 346 + return pcmcia_register_driver(&softingcs_driver); 347 + } 348 + 349 + static void __exit softingcs_stop(void) 350 + { 351 + pcmcia_unregister_driver(&softingcs_driver); 352 + } 353 + 354 + module_init(softingcs_start); 355 + module_exit(softingcs_stop); 356 + 357 + MODULE_DESCRIPTION("softing CANcard driver" 358 + ", links PCMCIA card to softing driver"); 359 + MODULE_LICENSE("GPL v2");
+691
drivers/net/can/softing/softing_fw.c
···
··· 1 + /* 2 + * Copyright (C) 2008-2010 3 + * 4 + * - Kurt Van Dijck, EIA Electronics 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the version 2 of the GNU General Public License 8 + * as published by the Free Software Foundation 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 + */ 19 + 20 + #include <linux/firmware.h> 21 + #include <linux/sched.h> 22 + #include <asm/div64.h> 23 + 24 + #include "softing.h" 25 + 26 + /* 27 + * low level DPRAM command. 28 + * Make sure that card->dpram[DPRAM_FCT_HOST] is preset 29 + */ 30 + static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector, 31 + const char *msg) 32 + { 33 + int ret; 34 + unsigned long stamp; 35 + 36 + iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]); 37 + iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]); 38 + iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]); 39 + /* be sure to flush this to the card */ 40 + wmb(); 41 + stamp = jiffies + 1 * HZ; 42 + /* wait for card */ 43 + do { 44 + /* DPRAM_FCT_HOST is _not_ aligned */ 45 + ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) + 46 + (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8); 47 + /* don't have any cached variables */ 48 + rmb(); 49 + if (ret == RES_OK) 50 + /* read return-value now */ 51 + return ioread16(&card->dpram[DPRAM_FCT_RESULT]); 52 + 53 + if ((ret != vector) || time_after(jiffies, stamp)) 54 + break; 55 + /* process context => relax */ 56 + usleep_range(500, 10000); 57 + } while (1); 58 + 59 + ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; 60 + dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret); 61 + return ret; 62 + } 63 + 64 + static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg) 65 + { 66 + int ret; 67 + 68 + ret = _softing_fct_cmd(card, cmd, 0, msg); 69 + if (ret > 0) { 70 + dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret); 71 + ret = -EIO; 72 + } 73 + return ret; 74 + } 75 + 76 + int softing_bootloader_command(struct softing *card, int16_t cmd, 77 + const char *msg) 78 + { 79 + int ret; 80 + unsigned long stamp; 81 + 82 + iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]); 83 + iowrite16(cmd, &card->dpram[DPRAM_COMMAND]); 84 + /* be sure to flush this to the card */ 85 + wmb(); 86 + stamp = jiffies + 3 * HZ; 87 + /* wait for card */ 88 + do { 89 + ret = ioread16(&card->dpram[DPRAM_RECEIPT]); 90 + /* don't have any cached variables */ 91 + rmb(); 92 + if (ret == RES_OK) 93 + return 0; 94 + if (time_after(jiffies, stamp)) 95 + break; 96 + /* process context => relax */ 97 + usleep_range(500, 10000); 98 + } while (!signal_pending(current)); 99 + 100 + ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; 101 + dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret); 102 + return ret; 103 + } 104 + 105 + static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr, 106 + uint16_t *plen, const uint8_t **pdat) 107 + { 108 + uint16_t checksum[2]; 109 + const uint8_t *mem; 110 + const uint8_t *end; 111 + 112 + /* 113 + * firmware records are a binary, unaligned stream composed of: 114 + * uint16_t type; 115 + * uint32_t addr; 116 + * uint16_t len; 117 + * uint8_t dat[len]; 118 + * uint16_t checksum; 119 + * all values in little endian. 120 + * We could define a struct for this, with __attribute__((packed)), 121 + * but would that solve the alignment in _all_ cases (cfr. the 122 + * struct itself may be an odd address)? 123 + * 124 + * I chose to use leXX_to_cpup() since this solves both 125 + * endianness & alignment. 126 + */ 127 + mem = *pmem; 128 + *ptype = le16_to_cpup((void *)&mem[0]); 129 + *paddr = le32_to_cpup((void *)&mem[2]); 130 + *plen = le16_to_cpup((void *)&mem[6]); 131 + *pdat = &mem[8]; 132 + /* verify checksum */ 133 + end = &mem[8 + *plen]; 134 + checksum[0] = le16_to_cpup((void *)end); 135 + for (checksum[1] = 0; mem < end; ++mem) 136 + checksum[1] += *mem; 137 + if (checksum[0] != checksum[1]) 138 + return -EINVAL; 139 + /* increment */ 140 + *pmem += 10 + *plen; 141 + return 0; 142 + } 143 + 144 + int softing_load_fw(const char *file, struct softing *card, 145 + __iomem uint8_t *dpram, unsigned int size, int offset) 146 + { 147 + const struct firmware *fw; 148 + int ret; 149 + const uint8_t *mem, *end, *dat; 150 + uint16_t type, len; 151 + uint32_t addr; 152 + uint8_t *buf = NULL; 153 + int buflen = 0; 154 + int8_t type_end = 0; 155 + 156 + ret = request_firmware(&fw, file, &card->pdev->dev); 157 + if (ret < 0) 158 + return ret; 159 + dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes" 160 + ", offset %c0x%04x\n", 161 + card->pdat->name, file, (unsigned int)fw->size, 162 + (offset >= 0) ? '+' : '-', (unsigned int)abs(offset)); 163 + /* parse the firmware */ 164 + mem = fw->data; 165 + end = &mem[fw->size]; 166 + /* look for header record */ 167 + ret = fw_parse(&mem, &type, &addr, &len, &dat); 168 + if (ret < 0) 169 + goto failed; 170 + if (type != 0xffff) 171 + goto failed; 172 + if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) { 173 + ret = -EINVAL; 174 + goto failed; 175 + } 176 + /* ok, we had a header */ 177 + while (mem < end) { 178 + ret = fw_parse(&mem, &type, &addr, &len, &dat); 179 + if (ret < 0) 180 + goto failed; 181 + if (type == 3) { 182 + /* start address, not used here */ 183 + continue; 184 + } else if (type == 1) { 185 + /* eof */ 186 + type_end = 1; 187 + break; 188 + } else if (type != 0) { 189 + ret = -EINVAL; 190 + goto failed; 191 + } 192 + 193 + if ((addr + len + offset) > size) 194 + goto failed; 195 + memcpy_toio(&dpram[addr + offset], dat, len); 196 + /* be sure to flush caches from IO space */ 197 + mb(); 198 + if (len > buflen) { 199 + /* align buflen */ 200 + buflen = (len + (1024-1)) & ~(1024-1); 201 + buf = krealloc(buf, buflen, GFP_KERNEL); 202 + if (!buf) { 203 + ret = -ENOMEM; 204 + goto failed; 205 + } 206 + } 207 + /* verify record data */ 208 + memcpy_fromio(buf, &dpram[addr + offset], len); 209 + if (memcmp(buf, dat, len)) { 210 + /* is not ok */ 211 + dev_alert(&card->pdev->dev, "DPRAM readback failed\n"); 212 + ret = -EIO; 213 + goto failed; 214 + } 215 + } 216 + if (!type_end) 217 + /* no end record seen */ 218 + goto failed; 219 + ret = 0; 220 + failed: 221 + kfree(buf); 222 + release_firmware(fw); 223 + if (ret < 0) 224 + dev_info(&card->pdev->dev, "firmware %s failed\n", file); 225 + return ret; 226 + } 227 + 228 + int softing_load_app_fw(const char *file, struct softing *card) 229 + { 230 + const struct firmware *fw; 231 + const uint8_t *mem, *end, *dat; 232 + int ret, j; 233 + uint16_t type, len; 234 + uint32_t addr, start_addr = 0; 235 + unsigned int sum, rx_sum; 236 + int8_t type_end = 0, type_entrypoint = 0; 237 + 238 + ret = request_firmware(&fw, file, &card->pdev->dev); 239 + if (ret) { 240 + dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n", 241 + file, ret); 242 + return ret; 243 + } 244 + dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n", 245 + file, (unsigned long)fw->size); 246 + /* parse the firmware */ 247 + mem = fw->data; 248 + end = &mem[fw->size]; 249 + /* look for header record */ 250 + ret = fw_parse(&mem, &type, &addr, &len, &dat); 251 + if (ret) 252 + goto failed; 253 + ret = -EINVAL; 254 + if (type != 0xffff) { 255 + dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n", 256 + type); 257 + goto failed; 258 + } 259 + if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) { 260 + dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n", 261 + len, dat); 262 + goto failed; 263 + } 264 + /* ok, we had a header */ 265 + while (mem < end) { 266 + ret = fw_parse(&mem, &type, &addr, &len, &dat); 267 + if (ret) 268 + goto failed; 269 + 270 + if (type == 3) { 271 + /* start address */ 272 + start_addr = addr; 273 + type_entrypoint = 1; 274 + continue; 275 + } else if (type == 1) { 276 + /* eof */ 277 + type_end = 1; 278 + break; 279 + } else if (type != 0) { 280 + dev_alert(&card->pdev->dev, 281 + "unknown record type 0x%04x\n", type); 282 + ret = -EINVAL; 283 + goto failed; 284 + } 285 + 286 + /* regualar data */ 287 + for (sum = 0, j = 0; j < len; ++j) 288 + sum += dat[j]; 289 + /* work in 16bit (target) */ 290 + sum &= 0xffff; 291 + 292 + memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len); 293 + iowrite32(card->pdat->app.offs + card->pdat->app.addr, 294 + &card->dpram[DPRAM_COMMAND + 2]); 295 + iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]); 296 + iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]); 297 + iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]); 298 + ret = softing_bootloader_command(card, 1, "loading app."); 299 + if (ret < 0) 300 + goto failed; 301 + /* verify checksum */ 302 + rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]); 303 + if (rx_sum != sum) { 304 + dev_alert(&card->pdev->dev, "SRAM seems to be damaged" 305 + ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum); 306 + ret = -EIO; 307 + goto failed; 308 + } 309 + } 310 + if (!type_end || !type_entrypoint) 311 + goto failed; 312 + /* start application in card */ 313 + iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]); 314 + iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]); 315 + ret = softing_bootloader_command(card, 3, "start app."); 316 + if (ret < 0) 317 + goto failed; 318 + ret = 0; 319 + failed: 320 + release_firmware(fw); 321 + if (ret < 0) 322 + dev_info(&card->pdev->dev, "firmware %s failed\n", file); 323 + return ret; 324 + } 325 + 326 + static int softing_reset_chip(struct softing *card) 327 + { 328 + int ret; 329 + 330 + do { 331 + /* reset chip */ 332 + iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]); 333 + iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]); 334 + iowrite8(1, &card->dpram[DPRAM_RESET]); 335 + iowrite8(0, &card->dpram[DPRAM_RESET+1]); 336 + 337 + ret = softing_fct_cmd(card, 0, "reset_can"); 338 + if (!ret) 339 + break; 340 + if (signal_pending(current)) 341 + /* don't wait any longer */ 342 + break; 343 + } while (1); 344 + card->tx.pending = 0; 345 + return ret; 346 + } 347 + 348 + int softing_chip_poweron(struct softing *card) 349 + { 350 + int ret; 351 + /* sync */ 352 + ret = _softing_fct_cmd(card, 99, 0x55, "sync-a"); 353 + if (ret < 0) 354 + goto failed; 355 + 356 + ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b"); 357 + if (ret < 0) 358 + goto failed; 359 + 360 + ret = softing_reset_chip(card); 361 + if (ret < 0) 362 + goto failed; 363 + /* get_serial */ 364 + ret = softing_fct_cmd(card, 43, "get_serial_number"); 365 + if (ret < 0) 366 + goto failed; 367 + card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]); 368 + /* get_version */ 369 + ret = softing_fct_cmd(card, 12, "get_version"); 370 + if (ret < 0) 371 + goto failed; 372 + card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]); 373 + card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]); 374 + card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]); 375 + card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]); 376 + card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]); 377 + return 0; 378 + failed: 379 + return ret; 380 + } 381 + 382 + static void softing_initialize_timestamp(struct softing *card) 383 + { 384 + uint64_t ovf; 385 + 386 + card->ts_ref = ktime_get(); 387 + 388 + /* 16MHz is the reference */ 389 + ovf = 0x100000000ULL * 16; 390 + do_div(ovf, card->pdat->freq ?: 16); 391 + 392 + card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf); 393 + } 394 + 395 + ktime_t softing_raw2ktime(struct softing *card, u32 raw) 396 + { 397 + uint64_t rawl; 398 + ktime_t now, real_offset; 399 + ktime_t target; 400 + ktime_t tmp; 401 + 402 + now = ktime_get(); 403 + real_offset = ktime_sub(ktime_get_real(), now); 404 + 405 + /* find nsec from card */ 406 + rawl = raw * 16; 407 + do_div(rawl, card->pdat->freq ?: 16); 408 + target = ktime_add_us(card->ts_ref, rawl); 409 + /* test for overflows */ 410 + tmp = ktime_add(target, card->ts_overflow); 411 + while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) { 412 + card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow); 413 + target = tmp; 414 + tmp = ktime_add(target, card->ts_overflow); 415 + } 416 + return ktime_add(target, real_offset); 417 + } 418 + 419 + static inline int softing_error_reporting(struct net_device *netdev) 420 + { 421 + struct softing_priv *priv = netdev_priv(netdev); 422 + 423 + return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 424 + ? 1 : 0; 425 + } 426 + 427 + int softing_startstop(struct net_device *dev, int up) 428 + { 429 + int ret; 430 + struct softing *card; 431 + struct softing_priv *priv; 432 + struct net_device *netdev; 433 + int bus_bitmask_start; 434 + int j, error_reporting; 435 + struct can_frame msg; 436 + const struct can_bittiming *bt; 437 + 438 + priv = netdev_priv(dev); 439 + card = priv->card; 440 + 441 + if (!card->fw.up) 442 + return -EIO; 443 + 444 + ret = mutex_lock_interruptible(&card->fw.lock); 445 + if (ret) 446 + return ret; 447 + 448 + bus_bitmask_start = 0; 449 + if (dev && up) 450 + /* prepare to start this bus as well */ 451 + bus_bitmask_start |= (1 << priv->index); 452 + /* bring netdevs down */ 453 + for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 454 + netdev = card->net[j]; 455 + if (!netdev) 456 + continue; 457 + priv = netdev_priv(netdev); 458 + 459 + if (dev != netdev) 460 + netif_stop_queue(netdev); 461 + 462 + if (netif_running(netdev)) { 463 + if (dev != netdev) 464 + bus_bitmask_start |= (1 << j); 465 + priv->tx.pending = 0; 466 + priv->tx.echo_put = 0; 467 + priv->tx.echo_get = 0; 468 + /* 469 + * this bus' may just have called open_candev() 470 + * which is rather stupid to call close_candev() 471 + * already 472 + * but we may come here from busoff recovery too 473 + * in which case the echo_skb _needs_ flushing too. 474 + * just be sure to call open_candev() again 475 + */ 476 + close_candev(netdev); 477 + } 478 + priv->can.state = CAN_STATE_STOPPED; 479 + } 480 + card->tx.pending = 0; 481 + 482 + softing_enable_irq(card, 0); 483 + ret = softing_reset_chip(card); 484 + if (ret) 485 + goto failed; 486 + if (!bus_bitmask_start) 487 + /* no busses to be brought up */ 488 + goto card_done; 489 + 490 + if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2) 491 + && (softing_error_reporting(card->net[0]) 492 + != softing_error_reporting(card->net[1]))) { 493 + dev_alert(&card->pdev->dev, 494 + "err_reporting flag differs for busses\n"); 495 + goto invalid; 496 + } 497 + error_reporting = 0; 498 + if (bus_bitmask_start & 1) { 499 + netdev = card->net[0]; 500 + priv = netdev_priv(netdev); 501 + error_reporting += softing_error_reporting(netdev); 502 + /* init chip 1 */ 503 + bt = &priv->can.bittiming; 504 + iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); 505 + iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); 506 + iowrite16(bt->phase_seg1 + bt->prop_seg, 507 + &card->dpram[DPRAM_FCT_PARAM + 6]); 508 + iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); 509 + iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, 510 + &card->dpram[DPRAM_FCT_PARAM + 10]); 511 + ret = softing_fct_cmd(card, 1, "initialize_chip[0]"); 512 + if (ret < 0) 513 + goto failed; 514 + /* set mode */ 515 + iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); 516 + iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); 517 + ret = softing_fct_cmd(card, 3, "set_mode[0]"); 518 + if (ret < 0) 519 + goto failed; 520 + /* set filter */ 521 + /* 11bit id & mask */ 522 + iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); 523 + iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); 524 + /* 29bit id.lo & mask.lo & id.hi & mask.hi */ 525 + iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); 526 + iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); 527 + iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); 528 + iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); 529 + ret = softing_fct_cmd(card, 7, "set_filter[0]"); 530 + if (ret < 0) 531 + goto failed; 532 + /* set output control */ 533 + iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); 534 + ret = softing_fct_cmd(card, 5, "set_output[0]"); 535 + if (ret < 0) 536 + goto failed; 537 + } 538 + if (bus_bitmask_start & 2) { 539 + netdev = card->net[1]; 540 + priv = netdev_priv(netdev); 541 + error_reporting += softing_error_reporting(netdev); 542 + /* init chip2 */ 543 + bt = &priv->can.bittiming; 544 + iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); 545 + iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); 546 + iowrite16(bt->phase_seg1 + bt->prop_seg, 547 + &card->dpram[DPRAM_FCT_PARAM + 6]); 548 + iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); 549 + iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, 550 + &card->dpram[DPRAM_FCT_PARAM + 10]); 551 + ret = softing_fct_cmd(card, 2, "initialize_chip[1]"); 552 + if (ret < 0) 553 + goto failed; 554 + /* set mode2 */ 555 + iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); 556 + iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); 557 + ret = softing_fct_cmd(card, 4, "set_mode[1]"); 558 + if (ret < 0) 559 + goto failed; 560 + /* set filter2 */ 561 + /* 11bit id & mask */ 562 + iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); 563 + iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); 564 + /* 29bit id.lo & mask.lo & id.hi & mask.hi */ 565 + iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); 566 + iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); 567 + iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); 568 + iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); 569 + ret = softing_fct_cmd(card, 8, "set_filter[1]"); 570 + if (ret < 0) 571 + goto failed; 572 + /* set output control2 */ 573 + iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); 574 + ret = softing_fct_cmd(card, 6, "set_output[1]"); 575 + if (ret < 0) 576 + goto failed; 577 + } 578 + /* enable_error_frame */ 579 + /* 580 + * Error reporting is switched off at the moment since 581 + * the receiving of them is not yet 100% verified 582 + * This should be enabled sooner or later 583 + * 584 + if (error_reporting) { 585 + ret = softing_fct_cmd(card, 51, "enable_error_frame"); 586 + if (ret < 0) 587 + goto failed; 588 + } 589 + */ 590 + /* initialize interface */ 591 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); 592 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); 593 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]); 594 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]); 595 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]); 596 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]); 597 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]); 598 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]); 599 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]); 600 + iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]); 601 + ret = softing_fct_cmd(card, 17, "initialize_interface"); 602 + if (ret < 0) 603 + goto failed; 604 + /* enable_fifo */ 605 + ret = softing_fct_cmd(card, 36, "enable_fifo"); 606 + if (ret < 0) 607 + goto failed; 608 + /* enable fifo tx ack */ 609 + ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]"); 610 + if (ret < 0) 611 + goto failed; 612 + /* enable fifo tx ack2 */ 613 + ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]"); 614 + if (ret < 0) 615 + goto failed; 616 + /* start_chip */ 617 + ret = softing_fct_cmd(card, 11, "start_chip"); 618 + if (ret < 0) 619 + goto failed; 620 + iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]); 621 + iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]); 622 + if (card->pdat->generation < 2) { 623 + iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); 624 + /* flush the DPRAM caches */ 625 + wmb(); 626 + } 627 + 628 + softing_initialize_timestamp(card); 629 + 630 + /* 631 + * do socketcan notifications/status changes 632 + * from here, no errors should occur, or the failed: part 633 + * must be reviewed 634 + */ 635 + memset(&msg, 0, sizeof(msg)); 636 + msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED; 637 + msg.can_dlc = CAN_ERR_DLC; 638 + for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 639 + if (!(bus_bitmask_start & (1 << j))) 640 + continue; 641 + netdev = card->net[j]; 642 + if (!netdev) 643 + continue; 644 + priv = netdev_priv(netdev); 645 + priv->can.state = CAN_STATE_ERROR_ACTIVE; 646 + open_candev(netdev); 647 + if (dev != netdev) { 648 + /* notify other busses on the restart */ 649 + softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); 650 + ++priv->can.can_stats.restarts; 651 + } 652 + netif_wake_queue(netdev); 653 + } 654 + 655 + /* enable interrupts */ 656 + ret = softing_enable_irq(card, 1); 657 + if (ret) 658 + goto failed; 659 + card_done: 660 + mutex_unlock(&card->fw.lock); 661 + return 0; 662 + invalid: 663 + ret = -EINVAL; 664 + failed: 665 + softing_enable_irq(card, 0); 666 + softing_reset_chip(card); 667 + mutex_unlock(&card->fw.lock); 668 + /* bring all other interfaces down */ 669 + for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 670 + netdev = card->net[j]; 671 + if (!netdev) 672 + continue; 673 + dev_close(netdev); 674 + } 675 + return ret; 676 + } 677 + 678 + int softing_default_output(struct net_device *netdev) 679 + { 680 + struct softing_priv *priv = netdev_priv(netdev); 681 + struct softing *card = priv->card; 682 + 683 + switch (priv->chip) { 684 + case 1000: 685 + return (card->pdat->generation < 2) ? 0xfb : 0xfa; 686 + case 5: 687 + return 0x60; 688 + default: 689 + return 0x40; 690 + } 691 + }
+893
drivers/net/can/softing/softing_main.c
···
··· 1 + /* 2 + * Copyright (C) 2008-2010 3 + * 4 + * - Kurt Van Dijck, EIA Electronics 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the version 2 of the GNU General Public License 8 + * as published by the Free Software Foundation 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 + */ 19 + 20 + #include <linux/version.h> 21 + #include <linux/module.h> 22 + #include <linux/init.h> 23 + #include <linux/interrupt.h> 24 + 25 + #include "softing.h" 26 + 27 + #define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1) 28 + 29 + /* 30 + * test is a specific CAN netdev 31 + * is online (ie. up 'n running, not sleeping, not busoff 32 + */ 33 + static inline int canif_is_active(struct net_device *netdev) 34 + { 35 + struct can_priv *can = netdev_priv(netdev); 36 + 37 + if (!netif_running(netdev)) 38 + return 0; 39 + return (can->state <= CAN_STATE_ERROR_PASSIVE); 40 + } 41 + 42 + /* reset DPRAM */ 43 + static inline void softing_set_reset_dpram(struct softing *card) 44 + { 45 + if (card->pdat->generation >= 2) { 46 + spin_lock_bh(&card->spin); 47 + iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1, 48 + &card->dpram[DPRAM_V2_RESET]); 49 + spin_unlock_bh(&card->spin); 50 + } 51 + } 52 + 53 + static inline void softing_clr_reset_dpram(struct softing *card) 54 + { 55 + if (card->pdat->generation >= 2) { 56 + spin_lock_bh(&card->spin); 57 + iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1, 58 + &card->dpram[DPRAM_V2_RESET]); 59 + spin_unlock_bh(&card->spin); 60 + } 61 + } 62 + 63 + /* trigger the tx queue-ing */ 64 + static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, 65 + struct net_device *dev) 66 + { 67 + struct softing_priv *priv = netdev_priv(dev); 68 + struct softing *card = priv->card; 69 + int ret; 70 + uint8_t *ptr; 71 + uint8_t fifo_wr, fifo_rd; 72 + struct can_frame *cf = (struct can_frame *)skb->data; 73 + uint8_t buf[DPRAM_TX_SIZE]; 74 + 75 + if (can_dropped_invalid_skb(dev, skb)) 76 + return NETDEV_TX_OK; 77 + 78 + spin_lock(&card->spin); 79 + 80 + ret = NETDEV_TX_BUSY; 81 + if (!card->fw.up || 82 + (card->tx.pending >= TXMAX) || 83 + (priv->tx.pending >= TX_ECHO_SKB_MAX)) 84 + goto xmit_done; 85 + fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]); 86 + fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]); 87 + if (fifo_wr == fifo_rd) 88 + /* fifo full */ 89 + goto xmit_done; 90 + memset(buf, 0, sizeof(buf)); 91 + ptr = buf; 92 + *ptr = CMD_TX; 93 + if (cf->can_id & CAN_RTR_FLAG) 94 + *ptr |= CMD_RTR; 95 + if (cf->can_id & CAN_EFF_FLAG) 96 + *ptr |= CMD_XTD; 97 + if (priv->index) 98 + *ptr |= CMD_BUS2; 99 + ++ptr; 100 + *ptr++ = cf->can_dlc; 101 + *ptr++ = (cf->can_id >> 0); 102 + *ptr++ = (cf->can_id >> 8); 103 + if (cf->can_id & CAN_EFF_FLAG) { 104 + *ptr++ = (cf->can_id >> 16); 105 + *ptr++ = (cf->can_id >> 24); 106 + } else { 107 + /* increment 1, not 2 as you might think */ 108 + ptr += 1; 109 + } 110 + if (!(cf->can_id & CAN_RTR_FLAG)) 111 + memcpy(ptr, &cf->data[0], cf->can_dlc); 112 + memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr], 113 + buf, DPRAM_TX_SIZE); 114 + if (++fifo_wr >= DPRAM_TX_CNT) 115 + fifo_wr = 0; 116 + iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]); 117 + card->tx.last_bus = priv->index; 118 + ++card->tx.pending; 119 + ++priv->tx.pending; 120 + can_put_echo_skb(skb, dev, priv->tx.echo_put); 121 + ++priv->tx.echo_put; 122 + if (priv->tx.echo_put >= TX_ECHO_SKB_MAX) 123 + priv->tx.echo_put = 0; 124 + /* can_put_echo_skb() saves the skb, safe to return TX_OK */ 125 + ret = NETDEV_TX_OK; 126 + xmit_done: 127 + spin_unlock(&card->spin); 128 + if (card->tx.pending >= TXMAX) { 129 + int j; 130 + for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 131 + if (card->net[j]) 132 + netif_stop_queue(card->net[j]); 133 + } 134 + } 135 + if (ret != NETDEV_TX_OK) 136 + netif_stop_queue(dev); 137 + 138 + return ret; 139 + } 140 + 141 + /* 142 + * shortcut for skb delivery 143 + */ 144 + int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg, 145 + ktime_t ktime) 146 + { 147 + struct sk_buff *skb; 148 + struct can_frame *cf; 149 + 150 + skb = alloc_can_skb(netdev, &cf); 151 + if (!skb) 152 + return -ENOMEM; 153 + memcpy(cf, msg, sizeof(*msg)); 154 + skb->tstamp = ktime; 155 + return netif_rx(skb); 156 + } 157 + 158 + /* 159 + * softing_handle_1 160 + * pop 1 entry from the DPRAM queue, and process 161 + */ 162 + static int softing_handle_1(struct softing *card) 163 + { 164 + struct net_device *netdev; 165 + struct softing_priv *priv; 166 + ktime_t ktime; 167 + struct can_frame msg; 168 + int cnt = 0, lost_msg; 169 + uint8_t fifo_rd, fifo_wr, cmd; 170 + uint8_t *ptr; 171 + uint32_t tmp_u32; 172 + uint8_t buf[DPRAM_RX_SIZE]; 173 + 174 + memset(&msg, 0, sizeof(msg)); 175 + /* test for lost msgs */ 176 + lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]); 177 + if (lost_msg) { 178 + int j; 179 + /* reset condition */ 180 + iowrite8(0, &card->dpram[DPRAM_RX_LOST]); 181 + /* prepare msg */ 182 + msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; 183 + msg.can_dlc = CAN_ERR_DLC; 184 + msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 185 + /* 186 + * service to all busses, we don't know which it was applicable 187 + * but only service busses that are online 188 + */ 189 + for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 190 + netdev = card->net[j]; 191 + if (!netdev) 192 + continue; 193 + if (!canif_is_active(netdev)) 194 + /* a dead bus has no overflows */ 195 + continue; 196 + ++netdev->stats.rx_over_errors; 197 + softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); 198 + } 199 + /* prepare for other use */ 200 + memset(&msg, 0, sizeof(msg)); 201 + ++cnt; 202 + } 203 + 204 + fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]); 205 + fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]); 206 + 207 + if (++fifo_rd >= DPRAM_RX_CNT) 208 + fifo_rd = 0; 209 + if (fifo_wr == fifo_rd) 210 + return cnt; 211 + 212 + memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd], 213 + DPRAM_RX_SIZE); 214 + mb(); 215 + /* trigger dual port RAM */ 216 + iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]); 217 + 218 + ptr = buf; 219 + cmd = *ptr++; 220 + if (cmd == 0xff) 221 + /* not quite usefull, probably the card has got out */ 222 + return 0; 223 + netdev = card->net[0]; 224 + if (cmd & CMD_BUS2) 225 + netdev = card->net[1]; 226 + priv = netdev_priv(netdev); 227 + 228 + if (cmd & CMD_ERR) { 229 + uint8_t can_state, state; 230 + 231 + state = *ptr++; 232 + 233 + msg.can_id = CAN_ERR_FLAG; 234 + msg.can_dlc = CAN_ERR_DLC; 235 + 236 + if (state & SF_MASK_BUSOFF) { 237 + can_state = CAN_STATE_BUS_OFF; 238 + msg.can_id |= CAN_ERR_BUSOFF; 239 + state = STATE_BUSOFF; 240 + } else if (state & SF_MASK_EPASSIVE) { 241 + can_state = CAN_STATE_ERROR_PASSIVE; 242 + msg.can_id |= CAN_ERR_CRTL; 243 + msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE; 244 + state = STATE_EPASSIVE; 245 + } else { 246 + can_state = CAN_STATE_ERROR_ACTIVE; 247 + msg.can_id |= CAN_ERR_CRTL; 248 + state = STATE_EACTIVE; 249 + } 250 + /* update DPRAM */ 251 + iowrite8(state, &card->dpram[priv->index ? 252 + DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]); 253 + /* timestamp */ 254 + tmp_u32 = le32_to_cpup((void *)ptr); 255 + ptr += 4; 256 + ktime = softing_raw2ktime(card, tmp_u32); 257 + 258 + ++netdev->stats.rx_errors; 259 + /* update internal status */ 260 + if (can_state != priv->can.state) { 261 + priv->can.state = can_state; 262 + if (can_state == CAN_STATE_ERROR_PASSIVE) 263 + ++priv->can.can_stats.error_passive; 264 + else if (can_state == CAN_STATE_BUS_OFF) { 265 + /* this calls can_close_cleanup() */ 266 + can_bus_off(netdev); 267 + netif_stop_queue(netdev); 268 + } 269 + /* trigger socketcan */ 270 + softing_netdev_rx(netdev, &msg, ktime); 271 + } 272 + 273 + } else { 274 + if (cmd & CMD_RTR) 275 + msg.can_id |= CAN_RTR_FLAG; 276 + msg.can_dlc = get_can_dlc(*ptr++); 277 + if (cmd & CMD_XTD) { 278 + msg.can_id |= CAN_EFF_FLAG; 279 + msg.can_id |= le32_to_cpup((void *)ptr); 280 + ptr += 4; 281 + } else { 282 + msg.can_id |= le16_to_cpup((void *)ptr); 283 + ptr += 2; 284 + } 285 + /* timestamp */ 286 + tmp_u32 = le32_to_cpup((void *)ptr); 287 + ptr += 4; 288 + ktime = softing_raw2ktime(card, tmp_u32); 289 + if (!(msg.can_id & CAN_RTR_FLAG)) 290 + memcpy(&msg.data[0], ptr, 8); 291 + ptr += 8; 292 + /* update socket */ 293 + if (cmd & CMD_ACK) { 294 + /* acknowledge, was tx msg */ 295 + struct sk_buff *skb; 296 + skb = priv->can.echo_skb[priv->tx.echo_get]; 297 + if (skb) 298 + skb->tstamp = ktime; 299 + can_get_echo_skb(netdev, priv->tx.echo_get); 300 + ++priv->tx.echo_get; 301 + if (priv->tx.echo_get >= TX_ECHO_SKB_MAX) 302 + priv->tx.echo_get = 0; 303 + if (priv->tx.pending) 304 + --priv->tx.pending; 305 + if (card->tx.pending) 306 + --card->tx.pending; 307 + ++netdev->stats.tx_packets; 308 + if (!(msg.can_id & CAN_RTR_FLAG)) 309 + netdev->stats.tx_bytes += msg.can_dlc; 310 + } else { 311 + int ret; 312 + 313 + ret = softing_netdev_rx(netdev, &msg, ktime); 314 + if (ret == NET_RX_SUCCESS) { 315 + ++netdev->stats.rx_packets; 316 + if (!(msg.can_id & CAN_RTR_FLAG)) 317 + netdev->stats.rx_bytes += msg.can_dlc; 318 + } else { 319 + ++netdev->stats.rx_dropped; 320 + } 321 + } 322 + } 323 + ++cnt; 324 + return cnt; 325 + } 326 + 327 + /* 328 + * real interrupt handler 329 + */ 330 + static irqreturn_t softing_irq_thread(int irq, void *dev_id) 331 + { 332 + struct softing *card = (struct softing *)dev_id; 333 + struct net_device *netdev; 334 + struct softing_priv *priv; 335 + int j, offset, work_done; 336 + 337 + work_done = 0; 338 + spin_lock_bh(&card->spin); 339 + while (softing_handle_1(card) > 0) { 340 + ++card->irq.svc_count; 341 + ++work_done; 342 + } 343 + spin_unlock_bh(&card->spin); 344 + /* resume tx queue's */ 345 + offset = card->tx.last_bus; 346 + for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 347 + if (card->tx.pending >= TXMAX) 348 + break; 349 + netdev = card->net[(j + offset + 1) % card->pdat->nbus]; 350 + if (!netdev) 351 + continue; 352 + priv = netdev_priv(netdev); 353 + if (!canif_is_active(netdev)) 354 + /* it makes no sense to wake dead busses */ 355 + continue; 356 + if (priv->tx.pending >= TX_ECHO_SKB_MAX) 357 + continue; 358 + ++work_done; 359 + netif_wake_queue(netdev); 360 + } 361 + return work_done ? IRQ_HANDLED : IRQ_NONE; 362 + } 363 + 364 + /* 365 + * interrupt routines: 366 + * schedule the 'real interrupt handler' 367 + */ 368 + static irqreturn_t softing_irq_v2(int irq, void *dev_id) 369 + { 370 + struct softing *card = (struct softing *)dev_id; 371 + uint8_t ir; 372 + 373 + ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]); 374 + iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); 375 + return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE; 376 + } 377 + 378 + static irqreturn_t softing_irq_v1(int irq, void *dev_id) 379 + { 380 + struct softing *card = (struct softing *)dev_id; 381 + uint8_t ir; 382 + 383 + ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]); 384 + iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]); 385 + return ir ? IRQ_WAKE_THREAD : IRQ_NONE; 386 + } 387 + 388 + /* 389 + * netdev/candev inter-operability 390 + */ 391 + static int softing_netdev_open(struct net_device *ndev) 392 + { 393 + int ret; 394 + 395 + /* check or determine and set bittime */ 396 + ret = open_candev(ndev); 397 + if (!ret) 398 + ret = softing_startstop(ndev, 1); 399 + return ret; 400 + } 401 + 402 + static int softing_netdev_stop(struct net_device *ndev) 403 + { 404 + int ret; 405 + 406 + netif_stop_queue(ndev); 407 + 408 + /* softing cycle does close_candev() */ 409 + ret = softing_startstop(ndev, 0); 410 + return ret; 411 + } 412 + 413 + static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode) 414 + { 415 + int ret; 416 + 417 + switch (mode) { 418 + case CAN_MODE_START: 419 + /* softing_startstop does close_candev() */ 420 + ret = softing_startstop(ndev, 1); 421 + return ret; 422 + case CAN_MODE_STOP: 423 + case CAN_MODE_SLEEP: 424 + return -EOPNOTSUPP; 425 + } 426 + return 0; 427 + } 428 + 429 + /* 430 + * Softing device management helpers 431 + */ 432 + int softing_enable_irq(struct softing *card, int enable) 433 + { 434 + int ret; 435 + 436 + if (!card->irq.nr) { 437 + return 0; 438 + } else if (card->irq.requested && !enable) { 439 + free_irq(card->irq.nr, card); 440 + card->irq.requested = 0; 441 + } else if (!card->irq.requested && enable) { 442 + ret = request_threaded_irq(card->irq.nr, 443 + (card->pdat->generation >= 2) ? 444 + softing_irq_v2 : softing_irq_v1, 445 + softing_irq_thread, IRQF_SHARED, 446 + dev_name(&card->pdev->dev), card); 447 + if (ret) { 448 + dev_alert(&card->pdev->dev, 449 + "request_threaded_irq(%u) failed\n", 450 + card->irq.nr); 451 + return ret; 452 + } 453 + card->irq.requested = 1; 454 + } 455 + return 0; 456 + } 457 + 458 + static void softing_card_shutdown(struct softing *card) 459 + { 460 + int fw_up = 0; 461 + 462 + if (mutex_lock_interruptible(&card->fw.lock)) 463 + /* return -ERESTARTSYS */; 464 + fw_up = card->fw.up; 465 + card->fw.up = 0; 466 + 467 + if (card->irq.requested && card->irq.nr) { 468 + free_irq(card->irq.nr, card); 469 + card->irq.requested = 0; 470 + } 471 + if (fw_up) { 472 + if (card->pdat->enable_irq) 473 + card->pdat->enable_irq(card->pdev, 0); 474 + softing_set_reset_dpram(card); 475 + if (card->pdat->reset) 476 + card->pdat->reset(card->pdev, 1); 477 + } 478 + mutex_unlock(&card->fw.lock); 479 + } 480 + 481 + static __devinit int softing_card_boot(struct softing *card) 482 + { 483 + int ret, j; 484 + static const uint8_t stream[] = { 485 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }; 486 + unsigned char back[sizeof(stream)]; 487 + 488 + if (mutex_lock_interruptible(&card->fw.lock)) 489 + return -ERESTARTSYS; 490 + if (card->fw.up) { 491 + mutex_unlock(&card->fw.lock); 492 + return 0; 493 + } 494 + /* reset board */ 495 + if (card->pdat->enable_irq) 496 + card->pdat->enable_irq(card->pdev, 1); 497 + /* boot card */ 498 + softing_set_reset_dpram(card); 499 + if (card->pdat->reset) 500 + card->pdat->reset(card->pdev, 1); 501 + for (j = 0; (j + sizeof(stream)) < card->dpram_size; 502 + j += sizeof(stream)) { 503 + 504 + memcpy_toio(&card->dpram[j], stream, sizeof(stream)); 505 + /* flush IO cache */ 506 + mb(); 507 + memcpy_fromio(back, &card->dpram[j], sizeof(stream)); 508 + 509 + if (!memcmp(back, stream, sizeof(stream))) 510 + continue; 511 + /* memory is not equal */ 512 + dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j); 513 + ret = -EIO; 514 + goto failed; 515 + } 516 + wmb(); 517 + /* load boot firmware */ 518 + ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram, 519 + card->dpram_size, 520 + card->pdat->boot.offs - card->pdat->boot.addr); 521 + if (ret < 0) 522 + goto failed; 523 + /* load loader firmware */ 524 + ret = softing_load_fw(card->pdat->load.fw, card, card->dpram, 525 + card->dpram_size, 526 + card->pdat->load.offs - card->pdat->load.addr); 527 + if (ret < 0) 528 + goto failed; 529 + 530 + if (card->pdat->reset) 531 + card->pdat->reset(card->pdev, 0); 532 + softing_clr_reset_dpram(card); 533 + ret = softing_bootloader_command(card, 0, "card boot"); 534 + if (ret < 0) 535 + goto failed; 536 + ret = softing_load_app_fw(card->pdat->app.fw, card); 537 + if (ret < 0) 538 + goto failed; 539 + 540 + ret = softing_chip_poweron(card); 541 + if (ret < 0) 542 + goto failed; 543 + 544 + card->fw.up = 1; 545 + mutex_unlock(&card->fw.lock); 546 + return 0; 547 + failed: 548 + card->fw.up = 0; 549 + if (card->pdat->enable_irq) 550 + card->pdat->enable_irq(card->pdev, 0); 551 + softing_set_reset_dpram(card); 552 + if (card->pdat->reset) 553 + card->pdat->reset(card->pdev, 1); 554 + mutex_unlock(&card->fw.lock); 555 + return ret; 556 + } 557 + 558 + /* 559 + * netdev sysfs 560 + */ 561 + static ssize_t show_channel(struct device *dev, struct device_attribute *attr, 562 + char *buf) 563 + { 564 + struct net_device *ndev = to_net_dev(dev); 565 + struct softing_priv *priv = netdev2softing(ndev); 566 + 567 + return sprintf(buf, "%i\n", priv->index); 568 + } 569 + 570 + static ssize_t show_chip(struct device *dev, struct device_attribute *attr, 571 + char *buf) 572 + { 573 + struct net_device *ndev = to_net_dev(dev); 574 + struct softing_priv *priv = netdev2softing(ndev); 575 + 576 + return sprintf(buf, "%i\n", priv->chip); 577 + } 578 + 579 + static ssize_t show_output(struct device *dev, struct device_attribute *attr, 580 + char *buf) 581 + { 582 + struct net_device *ndev = to_net_dev(dev); 583 + struct softing_priv *priv = netdev2softing(ndev); 584 + 585 + return sprintf(buf, "0x%02x\n", priv->output); 586 + } 587 + 588 + static ssize_t store_output(struct device *dev, struct device_attribute *attr, 589 + const char *buf, size_t count) 590 + { 591 + struct net_device *ndev = to_net_dev(dev); 592 + struct softing_priv *priv = netdev2softing(ndev); 593 + struct softing *card = priv->card; 594 + unsigned long val; 595 + int ret; 596 + 597 + ret = strict_strtoul(buf, 0, &val); 598 + if (ret < 0) 599 + return ret; 600 + val &= 0xFF; 601 + 602 + ret = mutex_lock_interruptible(&card->fw.lock); 603 + if (ret) 604 + return -ERESTARTSYS; 605 + if (netif_running(ndev)) { 606 + mutex_unlock(&card->fw.lock); 607 + return -EBUSY; 608 + } 609 + priv->output = val; 610 + mutex_unlock(&card->fw.lock); 611 + return count; 612 + } 613 + 614 + static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); 615 + static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL); 616 + static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output); 617 + 618 + static const struct attribute *const netdev_sysfs_attrs[] = { 619 + &dev_attr_channel.attr, 620 + &dev_attr_chip.attr, 621 + &dev_attr_output.attr, 622 + NULL, 623 + }; 624 + static const struct attribute_group netdev_sysfs_group = { 625 + .name = NULL, 626 + .attrs = (struct attribute **)netdev_sysfs_attrs, 627 + }; 628 + 629 + static const struct net_device_ops softing_netdev_ops = { 630 + .ndo_open = softing_netdev_open, 631 + .ndo_stop = softing_netdev_stop, 632 + .ndo_start_xmit = softing_netdev_start_xmit, 633 + }; 634 + 635 + static const struct can_bittiming_const softing_btr_const = { 636 + .tseg1_min = 1, 637 + .tseg1_max = 16, 638 + .tseg2_min = 1, 639 + .tseg2_max = 8, 640 + .sjw_max = 4, /* overruled */ 641 + .brp_min = 1, 642 + .brp_max = 32, /* overruled */ 643 + .brp_inc = 1, 644 + }; 645 + 646 + 647 + static __devinit struct net_device *softing_netdev_create(struct softing *card, 648 + uint16_t chip_id) 649 + { 650 + struct net_device *netdev; 651 + struct softing_priv *priv; 652 + 653 + netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX); 654 + if (!netdev) { 655 + dev_alert(&card->pdev->dev, "alloc_candev failed\n"); 656 + return NULL; 657 + } 658 + priv = netdev_priv(netdev); 659 + priv->netdev = netdev; 660 + priv->card = card; 661 + memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const)); 662 + priv->btr_const.brp_max = card->pdat->max_brp; 663 + priv->btr_const.sjw_max = card->pdat->max_sjw; 664 + priv->can.bittiming_const = &priv->btr_const; 665 + priv->can.clock.freq = 8000000; 666 + priv->chip = chip_id; 667 + priv->output = softing_default_output(netdev); 668 + SET_NETDEV_DEV(netdev, &card->pdev->dev); 669 + 670 + netdev->flags |= IFF_ECHO; 671 + netdev->netdev_ops = &softing_netdev_ops; 672 + priv->can.do_set_mode = softing_candev_set_mode; 673 + priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 674 + 675 + return netdev; 676 + } 677 + 678 + static __devinit int softing_netdev_register(struct net_device *netdev) 679 + { 680 + int ret; 681 + 682 + netdev->sysfs_groups[0] = &netdev_sysfs_group; 683 + ret = register_candev(netdev); 684 + if (ret) { 685 + dev_alert(&netdev->dev, "register failed\n"); 686 + return ret; 687 + } 688 + return 0; 689 + } 690 + 691 + static void softing_netdev_cleanup(struct net_device *netdev) 692 + { 693 + unregister_candev(netdev); 694 + free_candev(netdev); 695 + } 696 + 697 + /* 698 + * sysfs for Platform device 699 + */ 700 + #define DEV_ATTR_RO(name, member) \ 701 + static ssize_t show_##name(struct device *dev, \ 702 + struct device_attribute *attr, char *buf) \ 703 + { \ 704 + struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ 705 + return sprintf(buf, "%u\n", card->member); \ 706 + } \ 707 + static DEVICE_ATTR(name, 0444, show_##name, NULL) 708 + 709 + #define DEV_ATTR_RO_STR(name, member) \ 710 + static ssize_t show_##name(struct device *dev, \ 711 + struct device_attribute *attr, char *buf) \ 712 + { \ 713 + struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ 714 + return sprintf(buf, "%s\n", card->member); \ 715 + } \ 716 + static DEVICE_ATTR(name, 0444, show_##name, NULL) 717 + 718 + DEV_ATTR_RO(serial, id.serial); 719 + DEV_ATTR_RO_STR(firmware, pdat->app.fw); 720 + DEV_ATTR_RO(firmware_version, id.fw_version); 721 + DEV_ATTR_RO_STR(hardware, pdat->name); 722 + DEV_ATTR_RO(hardware_version, id.hw_version); 723 + DEV_ATTR_RO(license, id.license); 724 + DEV_ATTR_RO(frequency, id.freq); 725 + DEV_ATTR_RO(txpending, tx.pending); 726 + 727 + static struct attribute *softing_pdev_attrs[] = { 728 + &dev_attr_serial.attr, 729 + &dev_attr_firmware.attr, 730 + &dev_attr_firmware_version.attr, 731 + &dev_attr_hardware.attr, 732 + &dev_attr_hardware_version.attr, 733 + &dev_attr_license.attr, 734 + &dev_attr_frequency.attr, 735 + &dev_attr_txpending.attr, 736 + NULL, 737 + }; 738 + 739 + static const struct attribute_group softing_pdev_group = { 740 + .name = NULL, 741 + .attrs = softing_pdev_attrs, 742 + }; 743 + 744 + /* 745 + * platform driver 746 + */ 747 + static __devexit int softing_pdev_remove(struct platform_device *pdev) 748 + { 749 + struct softing *card = platform_get_drvdata(pdev); 750 + int j; 751 + 752 + /* first, disable card*/ 753 + softing_card_shutdown(card); 754 + 755 + for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 756 + if (!card->net[j]) 757 + continue; 758 + softing_netdev_cleanup(card->net[j]); 759 + card->net[j] = NULL; 760 + } 761 + sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); 762 + 763 + iounmap(card->dpram); 764 + kfree(card); 765 + return 0; 766 + } 767 + 768 + static __devinit int softing_pdev_probe(struct platform_device *pdev) 769 + { 770 + const struct softing_platform_data *pdat = pdev->dev.platform_data; 771 + struct softing *card; 772 + struct net_device *netdev; 773 + struct softing_priv *priv; 774 + struct resource *pres; 775 + int ret; 776 + int j; 777 + 778 + if (!pdat) { 779 + dev_warn(&pdev->dev, "no platform data\n"); 780 + return -EINVAL; 781 + } 782 + if (pdat->nbus > ARRAY_SIZE(card->net)) { 783 + dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus); 784 + return -EINVAL; 785 + } 786 + 787 + card = kzalloc(sizeof(*card), GFP_KERNEL); 788 + if (!card) 789 + return -ENOMEM; 790 + card->pdat = pdat; 791 + card->pdev = pdev; 792 + platform_set_drvdata(pdev, card); 793 + mutex_init(&card->fw.lock); 794 + spin_lock_init(&card->spin); 795 + 796 + ret = -EINVAL; 797 + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 798 + if (!pres) 799 + goto platform_resource_failed;; 800 + card->dpram_phys = pres->start; 801 + card->dpram_size = pres->end - pres->start + 1; 802 + card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size); 803 + if (!card->dpram) { 804 + dev_alert(&card->pdev->dev, "dpram ioremap failed\n"); 805 + goto ioremap_failed; 806 + } 807 + 808 + pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 809 + if (pres) 810 + card->irq.nr = pres->start; 811 + 812 + /* reset card */ 813 + ret = softing_card_boot(card); 814 + if (ret < 0) { 815 + dev_alert(&pdev->dev, "failed to boot\n"); 816 + goto boot_failed; 817 + } 818 + 819 + /* only now, the chip's are known */ 820 + card->id.freq = card->pdat->freq; 821 + 822 + ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group); 823 + if (ret < 0) { 824 + dev_alert(&card->pdev->dev, "sysfs failed\n"); 825 + goto sysfs_failed; 826 + } 827 + 828 + ret = -ENOMEM; 829 + for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 830 + card->net[j] = netdev = 831 + softing_netdev_create(card, card->id.chip[j]); 832 + if (!netdev) { 833 + dev_alert(&pdev->dev, "failed to make can[%i]", j); 834 + goto netdev_failed; 835 + } 836 + priv = netdev_priv(card->net[j]); 837 + priv->index = j; 838 + ret = softing_netdev_register(netdev); 839 + if (ret) { 840 + free_candev(netdev); 841 + card->net[j] = NULL; 842 + dev_alert(&card->pdev->dev, 843 + "failed to register can[%i]\n", j); 844 + goto netdev_failed; 845 + } 846 + } 847 + dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name); 848 + return 0; 849 + 850 + netdev_failed: 851 + for (j = 0; j < ARRAY_SIZE(card->net); ++j) { 852 + if (!card->net[j]) 853 + continue; 854 + softing_netdev_cleanup(card->net[j]); 855 + } 856 + sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); 857 + sysfs_failed: 858 + softing_card_shutdown(card); 859 + boot_failed: 860 + iounmap(card->dpram); 861 + ioremap_failed: 862 + platform_resource_failed: 863 + kfree(card); 864 + return ret; 865 + } 866 + 867 + static struct platform_driver softing_driver = { 868 + .driver = { 869 + .name = "softing", 870 + .owner = THIS_MODULE, 871 + }, 872 + .probe = softing_pdev_probe, 873 + .remove = __devexit_p(softing_pdev_remove), 874 + }; 875 + 876 + MODULE_ALIAS("platform:softing"); 877 + 878 + static int __init softing_start(void) 879 + { 880 + return platform_driver_register(&softing_driver); 881 + } 882 + 883 + static void __exit softing_stop(void) 884 + { 885 + platform_driver_unregister(&softing_driver); 886 + } 887 + 888 + module_init(softing_start); 889 + module_exit(softing_stop); 890 + 891 + MODULE_DESCRIPTION("Softing DPRAM CAN driver"); 892 + MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>"); 893 + MODULE_LICENSE("GPL v2");
+40
drivers/net/can/softing/softing_platform.h
···
··· 1 + 2 + #include <linux/platform_device.h> 3 + 4 + #ifndef _SOFTING_DEVICE_H_ 5 + #define _SOFTING_DEVICE_H_ 6 + 7 + /* softing firmware directory prefix */ 8 + #define fw_dir "softing-4.6/" 9 + 10 + struct softing_platform_data { 11 + unsigned int manf; 12 + unsigned int prod; 13 + /* 14 + * generation 15 + * 1st with NEC or SJA1000 16 + * 8bit, exclusive interrupt, ... 17 + * 2nd only SJA1000 18 + * 16bit, shared interrupt 19 + */ 20 + int generation; 21 + int nbus; /* # busses on device */ 22 + unsigned int freq; /* operating frequency in Hz */ 23 + unsigned int max_brp; 24 + unsigned int max_sjw; 25 + unsigned long dpram_size; 26 + const char *name; 27 + struct { 28 + unsigned long offs; 29 + unsigned long addr; 30 + const char *fw; 31 + } boot, load, app; 32 + /* 33 + * reset() function 34 + * bring pdev in or out of reset, depending on value 35 + */ 36 + int (*reset)(struct platform_device *pdev, int value); 37 + int (*enable_irq)(struct platform_device *pdev, int value); 38 + }; 39 + 40 + #endif
+6 -6
drivers/net/cnic.c
··· 699 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 700 { 701 int i; 702 - u32 *page_table = dma->pgtbl; 703 704 for (i = 0; i < dma->num_pages; i++) { 705 /* Each entry needs to be in big endian format. */ 706 - *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 707 page_table++; 708 - *page_table = (u32) dma->pg_map_arr[i]; 709 page_table++; 710 } 711 } ··· 713 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 714 { 715 int i; 716 - u32 *page_table = dma->pgtbl; 717 718 for (i = 0; i < dma->num_pages; i++) { 719 /* Each entry needs to be in little endian format. */ 720 - *page_table = dma->pg_map_arr[i] & 0xffffffff; 721 page_table++; 722 - *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 723 page_table++; 724 } 725 }
··· 699 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 700 { 701 int i; 702 + __le32 *page_table = (__le32 *) dma->pgtbl; 703 704 for (i = 0; i < dma->num_pages; i++) { 705 /* Each entry needs to be in big endian format. */ 706 + *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 707 page_table++; 708 + *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 709 page_table++; 710 } 711 } ··· 713 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 714 { 715 int i; 716 + __le32 *page_table = (__le32 *) dma->pgtbl; 717 718 for (i = 0; i < dma->num_pages; i++) { 719 /* Each entry needs to be in little endian format. */ 720 + *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 721 page_table++; 722 + *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 723 page_table++; 724 } 725 }
+2 -1
drivers/net/cxgb4/cxgb4_main.c
··· 2710 struct port_info *pi = netdev_priv(dev); 2711 struct adapter *adapter = pi->adapter; 2712 2713 if (!(adapter->flags & FULL_INIT_DONE)) { 2714 err = cxgb_up(adapter); 2715 if (err < 0) ··· 3663 pi->xact_addr_filt = -1; 3664 pi->rx_offload = RX_CSO; 3665 pi->port_id = i; 3666 - netif_carrier_off(netdev); 3667 netdev->irq = pdev->irq; 3668 3669 netdev->features |= NETIF_F_SG | TSO_FLAGS;
··· 2710 struct port_info *pi = netdev_priv(dev); 2711 struct adapter *adapter = pi->adapter; 2712 2713 + netif_carrier_off(dev); 2714 + 2715 if (!(adapter->flags & FULL_INIT_DONE)) { 2716 err = cxgb_up(adapter); 2717 if (err < 0) ··· 3661 pi->xact_addr_filt = -1; 3662 pi->rx_offload = RX_CSO; 3663 pi->port_id = i; 3664 netdev->irq = pdev->irq; 3665 3666 netdev->features |= NETIF_F_SG | TSO_FLAGS;
+1 -1
drivers/net/pch_gbe/pch_gbe_main.c
··· 2247 struct net_device *netdev = pci_get_drvdata(pdev); 2248 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2249 2250 - flush_scheduled_work(); 2251 unregister_netdev(netdev); 2252 2253 pch_gbe_hal_phy_hw_reset(&adapter->hw);
··· 2247 struct net_device *netdev = pci_get_drvdata(pdev); 2248 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2249 2250 + cancel_work_sync(&adapter->reset_task); 2251 unregister_netdev(netdev); 2252 2253 pch_gbe_hal_phy_hw_reset(&adapter->hw);
+10 -85
drivers/net/tg3.c
··· 60 #define BAR_0 0 61 #define BAR_2 2 62 63 - #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 64 - #define TG3_VLAN_TAG_USED 1 65 - #else 66 - #define TG3_VLAN_TAG_USED 0 67 - #endif 68 - 69 #include "tg3.h" 70 71 #define DRV_MODULE_NAME "tg3" ··· 127 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 128 TG3_TX_RING_SIZE) 129 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 130 - 131 - #define TG3_RX_DMA_ALIGN 16 132 - #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN) 133 134 #define TG3_DMA_BYTE_ENAB 64 135 ··· 4713 struct sk_buff *skb; 4714 dma_addr_t dma_addr; 4715 u32 opaque_key, desc_idx, *post_ptr; 4716 - bool hw_vlan __maybe_unused = false; 4717 - u16 vtag __maybe_unused = 0; 4718 4719 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4720 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; ··· 4771 tg3_recycle_rx(tnapi, tpr, opaque_key, 4772 desc_idx, *post_ptr); 4773 4774 - copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + 4775 TG3_RAW_IP_ALIGN); 4776 if (copy_skb == NULL) 4777 goto drop_it_no_recycle; 4778 4779 - skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); 4780 skb_put(copy_skb, len); 4781 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4782 skb_copy_from_linear_data(skb, copy_skb->data, len); ··· 4803 } 4804 4805 if (desc->type_flags & RXD_FLAG_VLAN && 4806 - !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { 4807 - vtag = desc->err_vlan & RXD_VLAN_MASK; 4808 - #if TG3_VLAN_TAG_USED 4809 - if (tp->vlgrp) 4810 - hw_vlan = true; 4811 - else 4812 - #endif 4813 - { 4814 - struct vlan_ethhdr *ve = (struct vlan_ethhdr *) 4815 - __skb_push(skb, VLAN_HLEN); 4816 4817 - memmove(ve, skb->data + VLAN_HLEN, 4818 - ETH_ALEN * 2); 4819 - ve->h_vlan_proto = htons(ETH_P_8021Q); 4820 - ve->h_vlan_TCI = htons(vtag); 4821 - } 4822 - } 4823 - 4824 - #if TG3_VLAN_TAG_USED 4825 - if (hw_vlan) 4826 - vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb); 4827 - else 4828 - #endif 4829 - napi_gro_receive(&tnapi->napi, skb); 4830 4831 received++; 4832 budget--; ··· 5710 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5711 } 5712 5713 - #if TG3_VLAN_TAG_USED 5714 if (vlan_tx_tag_present(skb)) 5715 base_flags |= (TXD_FLAG_VLAN | 5716 (vlan_tx_tag_get(skb) << 16)); 5717 - #endif 5718 5719 len = skb_headlen(skb); 5720 ··· 5954 } 5955 } 5956 } 5957 - #if TG3_VLAN_TAG_USED 5958 if (vlan_tx_tag_present(skb)) 5959 base_flags |= (TXD_FLAG_VLAN | 5960 (vlan_tx_tag_get(skb) << 16)); 5961 - #endif 5962 5963 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5964 !mss && skb->len > VLAN_ETH_FRAME_LEN) ··· 9499 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9500 RX_MODE_KEEP_VLAN_TAG); 9501 9502 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9503 * flag clear. 9504 - */ 9505 - #if TG3_VLAN_TAG_USED 9506 - if (!tp->vlgrp && 9507 - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9508 - rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9509 - #else 9510 - /* By definition, VLAN is disabled always in this 9511 - * case. 9512 */ 9513 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9514 rx_mode |= RX_MODE_KEEP_VLAN_TAG; ··· 11189 } 11190 return -EOPNOTSUPP; 11191 } 11192 - 11193 - #if TG3_VLAN_TAG_USED 11194 - static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 11195 - { 11196 - struct tg3 *tp = netdev_priv(dev); 11197 - 11198 - if (!netif_running(dev)) { 11199 - tp->vlgrp = grp; 11200 - return; 11201 - } 11202 - 11203 - tg3_netif_stop(tp); 11204 - 11205 - tg3_full_lock(tp, 0); 11206 - 11207 - tp->vlgrp = grp; 11208 - 11209 - /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ 11210 - __tg3_set_rx_mode(dev); 11211 - 11212 - tg3_netif_start(tp); 11213 - 11214 - tg3_full_unlock(tp); 11215 - } 11216 - #endif 11217 11218 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 11219 { ··· 13001 13002 static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 13003 { 13004 - #if TG3_VLAN_TAG_USED 13005 dev->vlan_features |= flags; 13006 - #endif 13007 } 13008 13009 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) ··· 13794 else 13795 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13796 13797 - tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; 13798 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 13799 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13800 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { 13801 - tp->rx_offset -= NET_IP_ALIGN; 13802 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 13803 tp->rx_copy_thresh = ~(u16)0; 13804 #endif ··· 14562 .ndo_do_ioctl = tg3_ioctl, 14563 .ndo_tx_timeout = tg3_tx_timeout, 14564 .ndo_change_mtu = tg3_change_mtu, 14565 - #if TG3_VLAN_TAG_USED 14566 - .ndo_vlan_rx_register = tg3_vlan_rx_register, 14567 - #endif 14568 #ifdef CONFIG_NET_POLL_CONTROLLER 14569 .ndo_poll_controller = tg3_poll_controller, 14570 #endif ··· 14578 .ndo_do_ioctl = tg3_ioctl, 14579 .ndo_tx_timeout = tg3_tx_timeout, 14580 .ndo_change_mtu = tg3_change_mtu, 14581 - #if TG3_VLAN_TAG_USED 14582 - .ndo_vlan_rx_register = tg3_vlan_rx_register, 14583 - #endif 14584 #ifdef CONFIG_NET_POLL_CONTROLLER 14585 .ndo_poll_controller = tg3_poll_controller, 14586 #endif ··· 14627 14628 SET_NETDEV_DEV(dev, &pdev->dev); 14629 14630 - #if TG3_VLAN_TAG_USED 14631 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 14632 - #endif 14633 14634 tp = netdev_priv(dev); 14635 tp->pdev = pdev;
··· 60 #define BAR_0 0 61 #define BAR_2 2 62 63 #include "tg3.h" 64 65 #define DRV_MODULE_NAME "tg3" ··· 133 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 134 TG3_TX_RING_SIZE) 135 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 136 137 #define TG3_DMA_BYTE_ENAB 64 138 ··· 4722 struct sk_buff *skb; 4723 dma_addr_t dma_addr; 4724 u32 opaque_key, desc_idx, *post_ptr; 4725 4726 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4727 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; ··· 4782 tg3_recycle_rx(tnapi, tpr, opaque_key, 4783 desc_idx, *post_ptr); 4784 4785 + copy_skb = netdev_alloc_skb(tp->dev, len + 4786 TG3_RAW_IP_ALIGN); 4787 if (copy_skb == NULL) 4788 goto drop_it_no_recycle; 4789 4790 + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); 4791 skb_put(copy_skb, len); 4792 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4793 skb_copy_from_linear_data(skb, copy_skb->data, len); ··· 4814 } 4815 4816 if (desc->type_flags & RXD_FLAG_VLAN && 4817 + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) 4818 + __vlan_hwaccel_put_tag(skb, 4819 + desc->err_vlan & RXD_VLAN_MASK); 4820 4821 + napi_gro_receive(&tnapi->napi, skb); 4822 4823 received++; 4824 budget--; ··· 5740 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5741 } 5742 5743 if (vlan_tx_tag_present(skb)) 5744 base_flags |= (TXD_FLAG_VLAN | 5745 (vlan_tx_tag_get(skb) << 16)); 5746 5747 len = skb_headlen(skb); 5748 ··· 5986 } 5987 } 5988 } 5989 + 5990 if (vlan_tx_tag_present(skb)) 5991 base_flags |= (TXD_FLAG_VLAN | 5992 (vlan_tx_tag_get(skb) << 16)); 5993 5994 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5995 !mss && skb->len > VLAN_ETH_FRAME_LEN) ··· 9532 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9533 RX_MODE_KEEP_VLAN_TAG); 9534 9535 + #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) 9536 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9537 * flag clear. 9538 */ 9539 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9540 rx_mode |= RX_MODE_KEEP_VLAN_TAG; ··· 11229 } 11230 return -EOPNOTSUPP; 11231 } 11232 11233 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 11234 { ··· 13066 13067 static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 13068 { 13069 dev->vlan_features |= flags; 13070 } 13071 13072 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) ··· 13861 else 13862 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13863 13864 + tp->rx_offset = NET_IP_ALIGN; 13865 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 13866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13867 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { 13868 + tp->rx_offset = 0; 13869 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 13870 tp->rx_copy_thresh = ~(u16)0; 13871 #endif ··· 14629 .ndo_do_ioctl = tg3_ioctl, 14630 .ndo_tx_timeout = tg3_tx_timeout, 14631 .ndo_change_mtu = tg3_change_mtu, 14632 #ifdef CONFIG_NET_POLL_CONTROLLER 14633 .ndo_poll_controller = tg3_poll_controller, 14634 #endif ··· 14648 .ndo_do_ioctl = tg3_ioctl, 14649 .ndo_tx_timeout = tg3_tx_timeout, 14650 .ndo_change_mtu = tg3_change_mtu, 14651 #ifdef CONFIG_NET_POLL_CONTROLLER 14652 .ndo_poll_controller = tg3_poll_controller, 14653 #endif ··· 14700 14701 SET_NETDEV_DEV(dev, &pdev->dev); 14702 14703 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 14704 14705 tp = netdev_priv(dev); 14706 tp->pdev = pdev;
-3
drivers/net/tg3.h
··· 2808 u32 rx_std_max_post; 2809 u32 rx_offset; 2810 u32 rx_pkt_map_sz; 2811 - #if TG3_VLAN_TAG_USED 2812 - struct vlan_group *vlgrp; 2813 - #endif 2814 2815 2816 /* begin "everything else" cacheline(s) section */
··· 2808 u32 rx_std_max_post; 2809 u32 rx_offset; 2810 u32 rx_pkt_map_sz; 2811 2812 2813 /* begin "everything else" cacheline(s) section */
+1
drivers/net/usb/kaweth.c
··· 406 407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { 408 err("Firmware too big: %zu", fw->size); 409 return -ENOSPC; 410 } 411 data_len = fw->size;
··· 406 407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { 408 err("Firmware too big: %zu", fw->size); 409 + release_firmware(fw); 410 return -ENOSPC; 411 } 412 data_len = fw->size;
+5 -1
drivers/net/wireless/ath/ath9k/hw.c
··· 369 else 370 ah->config.ht_enable = 0; 371 372 ah->config.rx_intr_mitigation = true; 373 ah->config.pcieSerDesWrite = true; 374 ··· 1936 pCap->rx_status_len = sizeof(struct ar9003_rxs); 1937 pCap->tx_desc_len = sizeof(struct ar9003_txc); 1938 pCap->txs_len = sizeof(struct ar9003_txs); 1939 - if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) 1940 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; 1941 } else { 1942 pCap->tx_desc_len = sizeof(struct ath_desc);
··· 369 else 370 ah->config.ht_enable = 0; 371 372 + /* PAPRD needs some more work to be enabled */ 373 + ah->config.paprd_disable = 1; 374 + 375 ah->config.rx_intr_mitigation = true; 376 ah->config.pcieSerDesWrite = true; 377 ··· 1933 pCap->rx_status_len = sizeof(struct ar9003_rxs); 1934 pCap->tx_desc_len = sizeof(struct ar9003_txc); 1935 pCap->txs_len = sizeof(struct ar9003_txs); 1936 + if (!ah->config.paprd_disable && 1937 + ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) 1938 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; 1939 } else { 1940 pCap->tx_desc_len = sizeof(struct ath_desc);
+1
drivers/net/wireless/ath/ath9k/hw.h
··· 225 u32 pcie_waen; 226 u8 analog_shiftreg; 227 u8 ht_enable; 228 u32 ofdm_trig_low; 229 u32 ofdm_trig_high; 230 u32 cck_trig_high;
··· 225 u32 pcie_waen; 226 u8 analog_shiftreg; 227 u8 ht_enable; 228 + u8 paprd_disable; 229 u32 ofdm_trig_low; 230 u32 ofdm_trig_high; 231 u32 cck_trig_high;
+5 -3
drivers/net/wireless/ath/ath9k/main.c
··· 592 u32 status = sc->intrstatus; 593 u32 rxmask; 594 595 - ath9k_ps_wakeup(sc); 596 - 597 if (status & ATH9K_INT_FATAL) { 598 ath_reset(sc, true); 599 - ath9k_ps_restore(sc); 600 return; 601 } 602 603 spin_lock(&sc->sc_pcu_lock); 604 605 if (!ath9k_hw_check_alive(ah)) ··· 967 /* Stop ANI */ 968 del_timer_sync(&common->ani.timer); 969 970 spin_lock_bh(&sc->sc_pcu_lock); 971 972 ieee80211_stop_queues(hw); ··· 1014 1015 /* Start ANI */ 1016 ath_start_ani(common); 1017 1018 return r; 1019 } ··· 1701 skip_chan_change: 1702 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1703 sc->config.txpowlimit = 2 * conf->power_level; 1704 ath_update_txpow(sc); 1705 } 1706 1707 spin_lock_bh(&sc->wiphy_lock);
··· 592 u32 status = sc->intrstatus; 593 u32 rxmask; 594 595 if (status & ATH9K_INT_FATAL) { 596 ath_reset(sc, true); 597 return; 598 } 599 600 + ath9k_ps_wakeup(sc); 601 spin_lock(&sc->sc_pcu_lock); 602 603 if (!ath9k_hw_check_alive(ah)) ··· 969 /* Stop ANI */ 970 del_timer_sync(&common->ani.timer); 971 972 + ath9k_ps_wakeup(sc); 973 spin_lock_bh(&sc->sc_pcu_lock); 974 975 ieee80211_stop_queues(hw); ··· 1015 1016 /* Start ANI */ 1017 ath_start_ani(common); 1018 + ath9k_ps_restore(sc); 1019 1020 return r; 1021 } ··· 1701 skip_chan_change: 1702 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1703 sc->config.txpowlimit = 2 * conf->power_level; 1704 + ath9k_ps_wakeup(sc); 1705 ath_update_txpow(sc); 1706 + ath9k_ps_restore(sc); 1707 } 1708 1709 spin_lock_bh(&sc->wiphy_lock);
-2
drivers/net/wireless/ath/ath9k/xmit.c
··· 2113 if (needreset) { 2114 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2115 "tx hung, resetting the chip\n"); 2116 - ath9k_ps_wakeup(sc); 2117 ath_reset(sc, true); 2118 - ath9k_ps_restore(sc); 2119 } 2120 2121 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
··· 2113 if (needreset) { 2114 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2115 "tx hung, resetting the chip\n"); 2116 ath_reset(sc, true); 2117 } 2118 2119 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
+1
drivers/net/wireless/iwlwifi/iwl-4965.c
··· 2624 .fw_name_pre = IWL4965_FW_PRE, 2625 .ucode_api_max = IWL4965_UCODE_API_MAX, 2626 .ucode_api_min = IWL4965_UCODE_API_MIN, 2627 .valid_tx_ant = ANT_AB, 2628 .valid_rx_ant = ANT_ABC, 2629 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
··· 2624 .fw_name_pre = IWL4965_FW_PRE, 2625 .ucode_api_max = IWL4965_UCODE_API_MAX, 2626 .ucode_api_min = IWL4965_UCODE_API_MIN, 2627 + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 2628 .valid_tx_ant = ANT_AB, 2629 .valid_rx_ant = ANT_ABC, 2630 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
+7 -4
drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
··· 152 153 eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); 154 155 - priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> 156 EEPROM_SKU_CAP_BAND_POS); 157 - if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) 158 - priv->cfg->sku |= IWL_SKU_N; 159 - 160 if (!priv->cfg->sku) { 161 IWL_ERR(priv, "Invalid device sku\n"); 162 return -EINVAL;
··· 152 153 eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); 154 155 + if (!priv->cfg->sku) { 156 + /* not using sku overwrite */ 157 + priv->cfg->sku = 158 + ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> 159 EEPROM_SKU_CAP_BAND_POS); 160 + if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) 161 + priv->cfg->sku |= IWL_SKU_N; 162 + } 163 if (!priv->cfg->sku) { 164 IWL_ERR(priv, "Invalid device sku\n"); 165 return -EINVAL;
+1
drivers/net/wireless/rt2x00/rt73usb.c
··· 2446 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, 2447 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, 2448 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, 2449 /* Qcom */ 2450 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, 2451 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
··· 2446 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, 2447 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, 2448 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, 2449 + { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) }, 2450 /* Qcom */ 2451 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, 2452 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
+9 -2
drivers/net/wireless/rtlwifi/pci.c
··· 619 struct sk_buff *uskb = NULL; 620 u8 *pdata; 621 uskb = dev_alloc_skb(skb->len + 128); 622 memcpy(IEEE80211_SKB_RXCB(uskb), 623 &rx_status, 624 sizeof(rx_status)); ··· 648 new_skb = dev_alloc_skb(rtlpci->rxbuffersize); 649 if (unlikely(!new_skb)) { 650 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), 651 - DBG_DMESG, 652 ("can't alloc skb for rx\n")); 653 goto done; 654 } ··· 1073 struct sk_buff *skb = 1074 dev_alloc_skb(rtlpci->rxbuffersize); 1075 u32 bufferaddress; 1076 - entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; 1077 if (!skb) 1078 return 0; 1079 1080 /*skb->dev = dev; */ 1081
··· 619 struct sk_buff *uskb = NULL; 620 u8 *pdata; 621 uskb = dev_alloc_skb(skb->len + 128); 622 + if (!uskb) { 623 + RT_TRACE(rtlpriv, 624 + (COMP_INTR | COMP_RECV), 625 + DBG_EMERG, 626 + ("can't alloc rx skb\n")); 627 + goto done; 628 + } 629 memcpy(IEEE80211_SKB_RXCB(uskb), 630 &rx_status, 631 sizeof(rx_status)); ··· 641 new_skb = dev_alloc_skb(rtlpci->rxbuffersize); 642 if (unlikely(!new_skb)) { 643 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), 644 + DBG_EMERG, 645 ("can't alloc skb for rx\n")); 646 goto done; 647 } ··· 1066 struct sk_buff *skb = 1067 dev_alloc_skb(rtlpci->rxbuffersize); 1068 u32 bufferaddress; 1069 if (!skb) 1070 return 0; 1071 + entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; 1072 1073 /*skb->dev = dev; */ 1074
+1
include/net/bluetooth/hci_core.h
··· 184 __u32 link_mode; 185 __u8 auth_type; 186 __u8 sec_level; 187 __u8 power_save; 188 __u16 disc_timeout; 189 unsigned long pend;
··· 184 __u32 link_mode; 185 __u8 auth_type; 186 __u8 sec_level; 187 + __u8 pending_sec_level; 188 __u8 power_save; 189 __u16 disc_timeout; 190 unsigned long pend;
+5 -3
include/net/sch_generic.h
··· 445 { 446 __skb_queue_tail(list, skb); 447 sch->qstats.backlog += qdisc_pkt_len(skb); 448 - qdisc_bstats_update(sch, skb); 449 450 return NET_XMIT_SUCCESS; 451 } ··· 459 { 460 struct sk_buff *skb = __skb_dequeue(list); 461 462 - if (likely(skb != NULL)) 463 sch->qstats.backlog -= qdisc_pkt_len(skb); 464 465 return skb; 466 } ··· 475 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 476 struct sk_buff_head *list) 477 { 478 - struct sk_buff *skb = __qdisc_dequeue_head(sch, list); 479 480 if (likely(skb != NULL)) { 481 unsigned int len = qdisc_pkt_len(skb); 482 kfree_skb(skb); 483 return len; 484 }
··· 445 { 446 __skb_queue_tail(list, skb); 447 sch->qstats.backlog += qdisc_pkt_len(skb); 448 449 return NET_XMIT_SUCCESS; 450 } ··· 460 { 461 struct sk_buff *skb = __skb_dequeue(list); 462 463 + if (likely(skb != NULL)) { 464 sch->qstats.backlog -= qdisc_pkt_len(skb); 465 + qdisc_bstats_update(sch, skb); 466 + } 467 468 return skb; 469 } ··· 474 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 475 struct sk_buff_head *list) 476 { 477 + struct sk_buff *skb = __skb_dequeue(list); 478 479 if (likely(skb != NULL)) { 480 unsigned int len = qdisc_pkt_len(skb); 481 + sch->qstats.backlog -= len; 482 kfree_skb(skb); 483 return len; 484 }
+5 -5
lib/textsearch.c
··· 13 * 14 * INTRODUCTION 15 * 16 - * The textsearch infrastructure provides text searching facitilies for 17 * both linear and non-linear data. Individual search algorithms are 18 * implemented in modules and chosen by the user. 19 * ··· 43 * to the algorithm to store persistent variables. 44 * (4) Core eventually resets the search offset and forwards the find() 45 * request to the algorithm. 46 - * (5) Algorithm calls get_next_block() provided by the user continously 47 * to fetch the data to be searched in block by block. 48 * (6) Algorithm invokes finish() after the last call to get_next_block 49 * to clean up any leftovers from get_next_block. (Optional) ··· 58 * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE 59 * to perform case insensitive matching. But it might slow down 60 * performance of algorithm, so you should use it at own your risk. 61 - * The returned configuration may then be used for an arbitary 62 * amount of times and even in parallel as long as a separate struct 63 * ts_state variable is provided to every instance. 64 * 65 * The actual search is performed by either calling textsearch_find_- 66 * continuous() for linear data or by providing an own get_next_block() 67 * implementation and calling textsearch_find(). Both functions return 68 - * the position of the first occurrence of the patern or UINT_MAX if 69 - * no match was found. Subsequent occurences can be found by calling 70 * textsearch_next() regardless of the linearity of the data. 71 * 72 * Once you're done using a configuration it must be given back via
··· 13 * 14 * INTRODUCTION 15 * 16 + * The textsearch infrastructure provides text searching facilities for 17 * both linear and non-linear data. Individual search algorithms are 18 * implemented in modules and chosen by the user. 19 * ··· 43 * to the algorithm to store persistent variables. 44 * (4) Core eventually resets the search offset and forwards the find() 45 * request to the algorithm. 46 + * (5) Algorithm calls get_next_block() provided by the user continuously 47 * to fetch the data to be searched in block by block. 48 * (6) Algorithm invokes finish() after the last call to get_next_block 49 * to clean up any leftovers from get_next_block. (Optional) ··· 58 * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE 59 * to perform case insensitive matching. But it might slow down 60 * performance of algorithm, so you should use it at own your risk. 61 + * The returned configuration may then be used for an arbitrary 62 * amount of times and even in parallel as long as a separate struct 63 * ts_state variable is provided to every instance. 64 * 65 * The actual search is performed by either calling textsearch_find_- 66 * continuous() for linear data or by providing an own get_next_block() 67 * implementation and calling textsearch_find(). Both functions return 68 + * the position of the first occurrence of the pattern or UINT_MAX if 69 + * no match was found. Subsequent occurrences can be found by calling 70 * textsearch_next() regardless of the linearity of the data. 71 * 72 * Once you're done using a configuration it must be given back via
+9 -7
net/bluetooth/hci_conn.c
··· 379 hci_conn_hold(acl); 380 381 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { 382 - acl->sec_level = sec_level; 383 acl->auth_type = auth_type; 384 hci_acl_connect(acl); 385 - } else { 386 - if (acl->sec_level < sec_level) 387 - acl->sec_level = sec_level; 388 - if (acl->auth_type < auth_type) 389 - acl->auth_type = auth_type; 390 } 391 392 if (type == ACL_LINK) ··· 438 { 439 BT_DBG("conn %p", conn); 440 441 if (sec_level > conn->sec_level) 442 - conn->sec_level = sec_level; 443 else if (conn->link_mode & HCI_LM_AUTH) 444 return 1; 445 446 conn->auth_type = auth_type; 447
··· 379 hci_conn_hold(acl); 380 381 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { 382 + acl->sec_level = BT_SECURITY_LOW; 383 + acl->pending_sec_level = sec_level; 384 acl->auth_type = auth_type; 385 hci_acl_connect(acl); 386 } 387 388 if (type == ACL_LINK) ··· 442 { 443 BT_DBG("conn %p", conn); 444 445 + if (conn->pending_sec_level > sec_level) 446 + sec_level = conn->pending_sec_level; 447 + 448 if (sec_level > conn->sec_level) 449 + conn->pending_sec_level = sec_level; 450 else if (conn->link_mode & HCI_LM_AUTH) 451 return 1; 452 + 453 + /* Make sure we preserve an existing MITM requirement*/ 454 + auth_type |= (conn->auth_type & 0x01); 455 456 conn->auth_type = auth_type; 457
+4
net/bluetooth/hci_core.c
··· 1011 1012 destroy_workqueue(hdev->workqueue); 1013 1014 __hci_dev_put(hdev); 1015 1016 return 0;
··· 1011 1012 destroy_workqueue(hdev->workqueue); 1013 1014 + hci_dev_lock_bh(hdev); 1015 + hci_blacklist_clear(hdev); 1016 + hci_dev_unlock_bh(hdev); 1017 + 1018 __hci_dev_put(hdev); 1019 1020 return 0;
+5 -4
net/bluetooth/hci_event.c
··· 692 if (conn->state != BT_CONFIG || !conn->out) 693 return 0; 694 695 - if (conn->sec_level == BT_SECURITY_SDP) 696 return 0; 697 698 /* Only request authentication for SSP connections or non-SSP 699 * devices with sec_level HIGH */ 700 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 701 - conn->sec_level != BT_SECURITY_HIGH) 702 return 0; 703 704 return 1; ··· 1095 1096 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1097 if (conn) { 1098 - if (!ev->status) 1099 conn->link_mode |= HCI_LM_AUTH; 1100 - else 1101 conn->sec_level = BT_SECURITY_LOW; 1102 1103 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
··· 692 if (conn->state != BT_CONFIG || !conn->out) 693 return 0; 694 695 + if (conn->pending_sec_level == BT_SECURITY_SDP) 696 return 0; 697 698 /* Only request authentication for SSP connections or non-SSP 699 * devices with sec_level HIGH */ 700 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 701 + conn->pending_sec_level != BT_SECURITY_HIGH) 702 return 0; 703 704 return 1; ··· 1095 1096 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1097 if (conn) { 1098 + if (!ev->status) { 1099 conn->link_mode |= HCI_LM_AUTH; 1100 + conn->sec_level = conn->pending_sec_level; 1101 + } else 1102 conn->sec_level = BT_SECURITY_LOW; 1103 1104 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+37 -57
net/bluetooth/l2cap.c
··· 305 } 306 } 307 308 /* Service level security */ 309 static inline int l2cap_check_security(struct sock *sk) 310 { 311 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 312 __u8 auth_type; 313 314 - if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { 315 - if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 316 - auth_type = HCI_AT_NO_BONDING_MITM; 317 - else 318 - auth_type = HCI_AT_NO_BONDING; 319 - 320 - if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) 321 - l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 322 - } else { 323 - switch (l2cap_pi(sk)->sec_level) { 324 - case BT_SECURITY_HIGH: 325 - auth_type = HCI_AT_GENERAL_BONDING_MITM; 326 - break; 327 - case BT_SECURITY_MEDIUM: 328 - auth_type = HCI_AT_GENERAL_BONDING; 329 - break; 330 - default: 331 - auth_type = HCI_AT_NO_BONDING; 332 - break; 333 - } 334 - } 335 336 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, 337 auth_type); ··· 1079 1080 err = -ENOMEM; 1081 1082 - if (sk->sk_type == SOCK_RAW) { 1083 - switch (l2cap_pi(sk)->sec_level) { 1084 - case BT_SECURITY_HIGH: 1085 - auth_type = HCI_AT_DEDICATED_BONDING_MITM; 1086 - break; 1087 - case BT_SECURITY_MEDIUM: 1088 - auth_type = HCI_AT_DEDICATED_BONDING; 1089 - break; 1090 - default: 1091 - auth_type = HCI_AT_NO_BONDING; 1092 - break; 1093 - } 1094 - } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { 1095 - if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 1096 - auth_type = HCI_AT_NO_BONDING_MITM; 1097 - else 1098 - auth_type = HCI_AT_NO_BONDING; 1099 - 1100 - if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) 1101 - l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 1102 - } else { 1103 - switch (l2cap_pi(sk)->sec_level) { 1104 - case BT_SECURITY_HIGH: 1105 - auth_type = HCI_AT_GENERAL_BONDING_MITM; 1106 - break; 1107 - case BT_SECURITY_MEDIUM: 1108 - auth_type = HCI_AT_GENERAL_BONDING; 1109 - break; 1110 - default: 1111 - auth_type = HCI_AT_NO_BONDING; 1112 - break; 1113 - } 1114 - } 1115 1116 hcon = hci_connect(hdev, ACL_LINK, dst, 1117 l2cap_pi(sk)->sec_level, auth_type); ··· 1106 if (sk->sk_type != SOCK_SEQPACKET && 1107 sk->sk_type != SOCK_STREAM) { 1108 l2cap_sock_clear_timer(sk); 1109 - sk->sk_state = BT_CONNECTED; 1110 } else 1111 l2cap_do_start(sk); 1112 } ··· 1873 if (pi->mode == L2CAP_MODE_STREAMING) { 1874 l2cap_streaming_send(sk); 1875 } else { 1876 - if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && 1877 - pi->conn_state && L2CAP_CONN_WAIT_F) { 1878 err = len; 1879 break; 1880 }
··· 305 } 306 } 307 308 + static inline u8 l2cap_get_auth_type(struct sock *sk) 309 + { 310 + if (sk->sk_type == SOCK_RAW) { 311 + switch (l2cap_pi(sk)->sec_level) { 312 + case BT_SECURITY_HIGH: 313 + return HCI_AT_DEDICATED_BONDING_MITM; 314 + case BT_SECURITY_MEDIUM: 315 + return HCI_AT_DEDICATED_BONDING; 316 + default: 317 + return HCI_AT_NO_BONDING; 318 + } 319 + } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { 320 + if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) 321 + l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 322 + 323 + if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 324 + return HCI_AT_NO_BONDING_MITM; 325 + else 326 + return HCI_AT_NO_BONDING; 327 + } else { 328 + switch (l2cap_pi(sk)->sec_level) { 329 + case BT_SECURITY_HIGH: 330 + return HCI_AT_GENERAL_BONDING_MITM; 331 + case BT_SECURITY_MEDIUM: 332 + return HCI_AT_GENERAL_BONDING; 333 + default: 334 + return HCI_AT_NO_BONDING; 335 + } 336 + } 337 + } 338 + 339 /* Service level security */ 340 static inline int l2cap_check_security(struct sock *sk) 341 { 342 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 343 __u8 auth_type; 344 345 + auth_type = l2cap_get_auth_type(sk); 346 347 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, 348 auth_type); ··· 1068 1069 err = -ENOMEM; 1070 1071 + auth_type = l2cap_get_auth_type(sk); 1072 1073 hcon = hci_connect(hdev, ACL_LINK, dst, 1074 l2cap_pi(sk)->sec_level, auth_type); ··· 1127 if (sk->sk_type != SOCK_SEQPACKET && 1128 sk->sk_type != SOCK_STREAM) { 1129 l2cap_sock_clear_timer(sk); 1130 + if (l2cap_check_security(sk)) 1131 + sk->sk_state = BT_CONNECTED; 1132 } else 1133 l2cap_do_start(sk); 1134 } ··· 1893 if (pi->mode == L2CAP_MODE_STREAMING) { 1894 l2cap_streaming_send(sk); 1895 } else { 1896 + if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 1897 + (pi->conn_state & L2CAP_CONN_WAIT_F)) { 1898 err = len; 1899 break; 1900 }
+2 -1
net/bluetooth/rfcomm/core.c
··· 1164 * initiator rfcomm_process_rx already calls 1165 * rfcomm_session_put() */ 1166 if (s->sock->sk->sk_state != BT_CLOSED) 1167 - rfcomm_session_put(s); 1168 break; 1169 } 1170 }
··· 1164 * initiator rfcomm_process_rx already calls 1165 * rfcomm_session_put() */ 1166 if (s->sock->sk->sk_state != BT_CLOSED) 1167 + if (list_empty(&s->dlcs)) 1168 + rfcomm_session_put(s); 1169 break; 1170 } 1171 }
+2 -1
net/core/dev.c
··· 749 * @ha: hardware address 750 * 751 * Search for an interface by MAC address. Returns NULL if the device 752 - * is not found or a pointer to the device. The caller must hold RCU 753 * The returned device has not had its ref count increased 754 * and the caller must therefore be careful about locking 755 *
··· 749 * @ha: hardware address 750 * 751 * Search for an interface by MAC address. Returns NULL if the device 752 + * is not found or a pointer to the device. 753 + * The caller must hold RCU or RTNL. 754 * The returned device has not had its ref count increased 755 * and the caller must therefore be careful about locking 756 *
+1 -1
net/core/ethtool.c
··· 817 if (regs.len > reglen) 818 regs.len = reglen; 819 820 - regbuf = vmalloc(reglen); 821 if (!regbuf) 822 return -ENOMEM; 823
··· 817 if (regs.len > reglen) 818 regs.len = reglen; 819 820 + regbuf = vzalloc(reglen); 821 if (!regbuf) 822 return -ENOMEM; 823
+6 -2
net/core/skbuff.c
··· 2744 2745 merge: 2746 if (offset > headlen) { 2747 - skbinfo->frags[0].page_offset += offset - headlen; 2748 - skbinfo->frags[0].size -= offset - headlen; 2749 offset = headlen; 2750 } 2751
··· 2744 2745 merge: 2746 if (offset > headlen) { 2747 + unsigned int eat = offset - headlen; 2748 + 2749 + skbinfo->frags[0].page_offset += eat; 2750 + skbinfo->frags[0].size -= eat; 2751 + skb->data_len -= eat; 2752 + skb->len -= eat; 2753 offset = headlen; 2754 } 2755
+11 -2
net/dcb/dcbnl.c
··· 583 u8 up, idtype; 584 int ret = -EINVAL; 585 586 - if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp) 587 goto out; 588 589 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], ··· 604 goto out; 605 606 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 607 - up = netdev->dcbnl_ops->getapp(netdev, idtype, id); 608 609 /* send this back */ 610 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
··· 583 u8 up, idtype; 584 int ret = -EINVAL; 585 586 + if (!tb[DCB_ATTR_APP]) 587 goto out; 588 589 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], ··· 604 goto out; 605 606 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 607 + 608 + if (netdev->dcbnl_ops->getapp) { 609 + up = netdev->dcbnl_ops->getapp(netdev, idtype, id); 610 + } else { 611 + struct dcb_app app = { 612 + .selector = idtype, 613 + .protocol = id, 614 + }; 615 + up = dcb_getapp(netdev, &app); 616 + } 617 618 /* send this back */ 619 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+5 -6
net/ipv4/arp.c
··· 1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; 1018 return 0; 1019 } 1020 - if (__in_dev_get_rcu(dev)) { 1021 - IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on); 1022 return 0; 1023 } 1024 return -ENXIO; 1025 } 1026 1027 - /* must be called with rcu_read_lock() */ 1028 static int arp_req_set_public(struct net *net, struct arpreq *r, 1029 struct net_device *dev) 1030 { ··· 1232 if (!(r.arp_flags & ATF_NETMASK)) 1233 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr = 1234 htonl(0xFFFFFFFFUL); 1235 - rcu_read_lock(); 1236 if (r.arp_dev[0]) { 1237 err = -ENODEV; 1238 - dev = dev_get_by_name_rcu(net, r.arp_dev); 1239 if (dev == NULL) 1240 goto out; 1241 ··· 1262 break; 1263 } 1264 out: 1265 - rcu_read_unlock(); 1266 if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r))) 1267 err = -EFAULT; 1268 return err;
··· 1017 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; 1018 return 0; 1019 } 1020 + if (__in_dev_get_rtnl(dev)) { 1021 + IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on); 1022 return 0; 1023 } 1024 return -ENXIO; 1025 } 1026 1027 static int arp_req_set_public(struct net *net, struct arpreq *r, 1028 struct net_device *dev) 1029 { ··· 1233 if (!(r.arp_flags & ATF_NETMASK)) 1234 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr = 1235 htonl(0xFFFFFFFFUL); 1236 + rtnl_lock(); 1237 if (r.arp_dev[0]) { 1238 err = -ENODEV; 1239 + dev = __dev_get_by_name(net, r.arp_dev); 1240 if (dev == NULL) 1241 goto out; 1242 ··· 1263 break; 1264 } 1265 out: 1266 + rtnl_unlock(); 1267 if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r))) 1268 err = -EFAULT; 1269 return err;
+1 -1
net/ipv4/inetpeer.c
··· 475 struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) 476 { 477 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 478 - struct inet_peer_base *base = family_to_base(AF_INET); 479 struct inet_peer *p; 480 481 /* Look up for the address quickly, lockless.
··· 475 struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) 476 { 477 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 478 + struct inet_peer_base *base = family_to_base(daddr->family); 479 struct inet_peer *p; 480 481 /* Look up for the address quickly, lockless.
+1 -1
net/ipv4/tcp_input.c
··· 4399 if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { 4400 tp->ucopy.len -= chunk; 4401 tp->copied_seq += chunk; 4402 - eaten = (chunk == skb->len && !th->fin); 4403 tcp_rcv_space_adjust(sk); 4404 } 4405 local_bh_disable();
··· 4399 if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { 4400 tp->ucopy.len -= chunk; 4401 tp->copied_seq += chunk; 4402 + eaten = (chunk == skb->len); 4403 tcp_rcv_space_adjust(sk); 4404 } 4405 local_bh_disable();
-1
net/ipv4/tcp_ipv4.c
··· 1994 } 1995 req = req->dl_next; 1996 } 1997 - st->offset = 0; 1998 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) 1999 break; 2000 get_req:
··· 1994 } 1995 req = req->dl_next; 1996 } 1997 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) 1998 break; 1999 get_req:
+33 -48
net/ipv6/addrconf.c
··· 2661 struct net *net = dev_net(dev); 2662 struct inet6_dev *idev; 2663 struct inet6_ifaddr *ifa; 2664 - LIST_HEAD(keep_list); 2665 - int state; 2666 2667 ASSERT_RTNL(); 2668 2669 - /* Flush routes if device is being removed or it is not loopback */ 2670 - if (how || !(dev->flags & IFF_LOOPBACK)) 2671 - rt6_ifdown(net, dev); 2672 2673 idev = __in6_dev_get(dev); 2674 if (idev == NULL) ··· 2685 /* Step 1.5: remove snmp6 entry */ 2686 snmp6_unregister_dev(idev); 2687 2688 } 2689 2690 write_lock_bh(&idev->lock); ··· 2737 struct inet6_ifaddr, if_list); 2738 addrconf_del_timer(ifa); 2739 2740 - /* If just doing link down, and address is permanent 2741 - and not link-local, then retain it. */ 2742 - if (!how && 2743 - (ifa->flags&IFA_F_PERMANENT) && 2744 - !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { 2745 - list_move_tail(&ifa->if_list, &keep_list); 2746 2747 - /* If not doing DAD on this address, just keep it. */ 2748 - if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || 2749 - idev->cnf.accept_dad <= 0 || 2750 - (ifa->flags & IFA_F_NODAD)) 2751 - continue; 2752 2753 - /* If it was tentative already, no need to notify */ 2754 - if (ifa->flags & IFA_F_TENTATIVE) 2755 - continue; 2756 2757 - /* Flag it for later restoration when link comes up */ 2758 - ifa->flags |= IFA_F_TENTATIVE; 2759 - ifa->state = INET6_IFADDR_STATE_DAD; 2760 - } else { 2761 - list_del(&ifa->if_list); 2762 - 2763 - /* clear hash table */ 2764 - spin_lock_bh(&addrconf_hash_lock); 2765 - hlist_del_init_rcu(&ifa->addr_lst); 2766 - spin_unlock_bh(&addrconf_hash_lock); 2767 - 2768 - write_unlock_bh(&idev->lock); 2769 - spin_lock_bh(&ifa->state_lock); 2770 - state = ifa->state; 2771 - ifa->state = INET6_IFADDR_STATE_DEAD; 2772 - spin_unlock_bh(&ifa->state_lock); 2773 - 2774 - if (state != INET6_IFADDR_STATE_DEAD) { 2775 - __ipv6_ifa_notify(RTM_DELADDR, ifa); 2776 - atomic_notifier_call_chain(&inet6addr_chain, 2777 - NETDEV_DOWN, ifa); 2778 - } 2779 - 2780 - in6_ifa_put(ifa); 2781 - write_lock_bh(&idev->lock); 2782 } 2783 - } 2784 2785 - list_splice(&keep_list, &idev->addr_list); 2786 2787 write_unlock_bh(&idev->lock); 2788 ··· 4142 addrconf_leave_solict(ifp->idev, &ifp->addr); 4143 dst_hold(&ifp->rt->dst); 4144 4145 - if (ifp->state == INET6_IFADDR_STATE_DEAD && 4146 - ip6_del_rt(ifp->rt)) 4147 dst_free(&ifp->rt->dst); 4148 break; 4149 }
··· 2661 struct net *net = dev_net(dev); 2662 struct inet6_dev *idev; 2663 struct inet6_ifaddr *ifa; 2664 + int state, i; 2665 2666 ASSERT_RTNL(); 2667 2668 + rt6_ifdown(net, dev); 2669 + neigh_ifdown(&nd_tbl, dev); 2670 2671 idev = __in6_dev_get(dev); 2672 if (idev == NULL) ··· 2687 /* Step 1.5: remove snmp6 entry */ 2688 snmp6_unregister_dev(idev); 2689 2690 + } 2691 + 2692 + /* Step 2: clear hash table */ 2693 + for (i = 0; i < IN6_ADDR_HSIZE; i++) { 2694 + struct hlist_head *h = &inet6_addr_lst[i]; 2695 + struct hlist_node *n; 2696 + 2697 + spin_lock_bh(&addrconf_hash_lock); 2698 + restart: 2699 + hlist_for_each_entry_rcu(ifa, n, h, addr_lst) { 2700 + if (ifa->idev == idev) { 2701 + hlist_del_init_rcu(&ifa->addr_lst); 2702 + addrconf_del_timer(ifa); 2703 + goto restart; 2704 + } 2705 + } 2706 + spin_unlock_bh(&addrconf_hash_lock); 2707 } 2708 2709 write_lock_bh(&idev->lock); ··· 2722 struct inet6_ifaddr, if_list); 2723 addrconf_del_timer(ifa); 2724 2725 + list_del(&ifa->if_list); 2726 2727 + write_unlock_bh(&idev->lock); 2728 2729 + spin_lock_bh(&ifa->state_lock); 2730 + state = ifa->state; 2731 + ifa->state = INET6_IFADDR_STATE_DEAD; 2732 + spin_unlock_bh(&ifa->state_lock); 2733 2734 + if (state != INET6_IFADDR_STATE_DEAD) { 2735 + __ipv6_ifa_notify(RTM_DELADDR, ifa); 2736 + atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); 2737 } 2738 + in6_ifa_put(ifa); 2739 2740 + write_lock_bh(&idev->lock); 2741 + } 2742 2743 write_unlock_bh(&idev->lock); 2744 ··· 4156 addrconf_leave_solict(ifp->idev, &ifp->addr); 4157 dst_hold(&ifp->rt->dst); 4158 4159 + if (ip6_del_rt(ifp->rt)) 4160 dst_free(&ifp->rt->dst); 4161 break; 4162 }
+1 -8
net/ipv6/route.c
··· 72 #define RT6_TRACE(x...) do { ; } while (0) 73 #endif 74 75 - #define CLONE_OFFLINK_ROUTE 0 76 - 77 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort); 78 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 79 static unsigned int ip6_default_advmss(const struct dst_entry *dst); ··· 736 737 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 738 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); 739 - else { 740 - #if CLONE_OFFLINK_ROUTE 741 nrt = rt6_alloc_clone(rt, &fl->fl6_dst); 742 - #else 743 - goto out2; 744 - #endif 745 - } 746 747 dst_release(&rt->dst); 748 rt = nrt ? : net->ipv6.ip6_null_entry;
··· 72 #define RT6_TRACE(x...) do { ; } while (0) 73 #endif 74 75 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort); 76 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 77 static unsigned int ip6_default_advmss(const struct dst_entry *dst); ··· 738 739 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 740 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); 741 + else 742 nrt = rt6_alloc_clone(rt, &fl->fl6_dst); 743 744 dst_release(&rt->dst); 745 rt = nrt ? : net->ipv6.ip6_null_entry;
+6
net/ipv6/xfrm6_policy.c
··· 98 if (!xdst->u.rt6.rt6i_idev) 99 return -ENODEV; 100 101 /* Sheit... I remember I did this right. Apparently, 102 * it was magically lost, so this code needs audit */ 103 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | ··· 220 221 if (likely(xdst->u.rt6.rt6i_idev)) 222 in6_dev_put(xdst->u.rt6.rt6i_idev); 223 xfrm_dst_destroy(xdst); 224 } 225
··· 98 if (!xdst->u.rt6.rt6i_idev) 99 return -ENODEV; 100 101 + xdst->u.rt6.rt6i_peer = rt->rt6i_peer; 102 + if (rt->rt6i_peer) 103 + atomic_inc(&rt->rt6i_peer->refcnt); 104 + 105 /* Sheit... I remember I did this right. Apparently, 106 * it was magically lost, so this code needs audit */ 107 xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST | ··· 216 217 if (likely(xdst->u.rt6.rt6i_idev)) 218 in6_dev_put(xdst->u.rt6.rt6i_idev); 219 + if (likely(xdst->u.rt6.rt6i_peer)) 220 + inet_putpeer(xdst->u.rt6.rt6i_peer); 221 xfrm_dst_destroy(xdst); 222 } 223
+3
net/mac80211/tx.c
··· 2230 2231 sdata = vif_to_sdata(vif); 2232 2233 if (tim_offset) 2234 *tim_offset = 0; 2235 if (tim_length)
··· 2230 2231 sdata = vif_to_sdata(vif); 2232 2233 + if (!ieee80211_sdata_running(sdata)) 2234 + goto out; 2235 + 2236 if (tim_offset) 2237 *tim_offset = 0; 2238 if (tim_length)
+1 -2
net/sched/sch_cbq.c
··· 390 ret = qdisc_enqueue(skb, cl->q); 391 if (ret == NET_XMIT_SUCCESS) { 392 sch->q.qlen++; 393 - qdisc_bstats_update(sch, skb); 394 cbq_mark_toplevel(q, cl); 395 if (!cl->next_alive) 396 cbq_activate_class(cl); ··· 648 ret = qdisc_enqueue(skb, cl->q); 649 if (ret == NET_XMIT_SUCCESS) { 650 sch->q.qlen++; 651 - qdisc_bstats_update(sch, skb); 652 if (!cl->next_alive) 653 cbq_activate_class(cl); 654 return 0; ··· 969 970 skb = cbq_dequeue_1(sch); 971 if (skb) { 972 sch->q.qlen--; 973 sch->flags &= ~TCQ_F_THROTTLED; 974 return skb;
··· 390 ret = qdisc_enqueue(skb, cl->q); 391 if (ret == NET_XMIT_SUCCESS) { 392 sch->q.qlen++; 393 cbq_mark_toplevel(q, cl); 394 if (!cl->next_alive) 395 cbq_activate_class(cl); ··· 649 ret = qdisc_enqueue(skb, cl->q); 650 if (ret == NET_XMIT_SUCCESS) { 651 sch->q.qlen++; 652 if (!cl->next_alive) 653 cbq_activate_class(cl); 654 return 0; ··· 971 972 skb = cbq_dequeue_1(sch); 973 if (skb) { 974 + qdisc_bstats_update(sch, skb); 975 sch->q.qlen--; 976 sch->flags &= ~TCQ_F_THROTTLED; 977 return skb;
+1 -1
net/sched/sch_drr.c
··· 376 } 377 378 bstats_update(&cl->bstats, skb); 379 - qdisc_bstats_update(sch, skb); 380 381 sch->q.qlen++; 382 return err; ··· 402 skb = qdisc_dequeue_peeked(cl->qdisc); 403 if (cl->qdisc->q.qlen == 0) 404 list_del(&cl->alist); 405 sch->q.qlen--; 406 return skb; 407 }
··· 376 } 377 378 bstats_update(&cl->bstats, skb); 379 380 sch->q.qlen++; 381 return err; ··· 403 skb = qdisc_dequeue_peeked(cl->qdisc); 404 if (cl->qdisc->q.qlen == 0) 405 list_del(&cl->alist); 406 + qdisc_bstats_update(sch, skb); 407 sch->q.qlen--; 408 return skb; 409 }
+1 -1
net/sched/sch_dsmark.c
··· 260 return err; 261 } 262 263 - qdisc_bstats_update(sch, skb); 264 sch->q.qlen++; 265 266 return NET_XMIT_SUCCESS; ··· 282 if (skb == NULL) 283 return NULL; 284 285 sch->q.qlen--; 286 287 index = skb->tc_index & (p->indices - 1);
··· 260 return err; 261 } 262 263 sch->q.qlen++; 264 265 return NET_XMIT_SUCCESS; ··· 283 if (skb == NULL) 284 return NULL; 285 286 + qdisc_bstats_update(sch, skb); 287 sch->q.qlen--; 288 289 index = skb->tc_index & (p->indices - 1);
+1 -4
net/sched/sch_fifo.c
··· 46 47 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch) 48 { 49 - struct sk_buff *skb_head; 50 struct fifo_sched_data *q = qdisc_priv(sch); 51 52 if (likely(skb_queue_len(&sch->q) < q->limit)) 53 return qdisc_enqueue_tail(skb, sch); 54 55 /* queue full, remove one skb to fulfill the limit */ 56 - skb_head = qdisc_dequeue_head(sch); 57 sch->qstats.drops++; 58 - kfree_skb(skb_head); 59 - 60 qdisc_enqueue_tail(skb, sch); 61 62 return NET_XMIT_CN;
··· 46 47 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch) 48 { 49 struct fifo_sched_data *q = qdisc_priv(sch); 50 51 if (likely(skb_queue_len(&sch->q) < q->limit)) 52 return qdisc_enqueue_tail(skb, sch); 53 54 /* queue full, remove one skb to fulfill the limit */ 55 + __qdisc_queue_drop_head(sch, &sch->q); 56 sch->qstats.drops++; 57 qdisc_enqueue_tail(skb, sch); 58 59 return NET_XMIT_CN;
+1 -1
net/sched/sch_hfsc.c
··· 1600 set_active(cl, qdisc_pkt_len(skb)); 1601 1602 bstats_update(&cl->bstats, skb); 1603 - qdisc_bstats_update(sch, skb); 1604 sch->q.qlen++; 1605 1606 return NET_XMIT_SUCCESS; ··· 1665 } 1666 1667 sch->flags &= ~TCQ_F_THROTTLED; 1668 sch->q.qlen--; 1669 1670 return skb;
··· 1600 set_active(cl, qdisc_pkt_len(skb)); 1601 1602 bstats_update(&cl->bstats, skb); 1603 sch->q.qlen++; 1604 1605 return NET_XMIT_SUCCESS; ··· 1666 } 1667 1668 sch->flags &= ~TCQ_F_THROTTLED; 1669 + qdisc_bstats_update(sch, skb); 1670 sch->q.qlen--; 1671 1672 return skb;
+5 -7
net/sched/sch_htb.c
··· 574 } 575 576 sch->q.qlen++; 577 - qdisc_bstats_update(sch, skb); 578 return NET_XMIT_SUCCESS; 579 } 580 ··· 841 842 static struct sk_buff *htb_dequeue(struct Qdisc *sch) 843 { 844 - struct sk_buff *skb = NULL; 845 struct htb_sched *q = qdisc_priv(sch); 846 int level; 847 psched_time_t next_event; ··· 850 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 851 skb = __skb_dequeue(&q->direct_queue); 852 if (skb != NULL) { 853 sch->flags &= ~TCQ_F_THROTTLED; 854 sch->q.qlen--; 855 return skb; ··· 885 int prio = ffz(m); 886 m |= 1 << prio; 887 skb = htb_dequeue_tree(q, prio, level); 888 - if (likely(skb != NULL)) { 889 - sch->q.qlen--; 890 - sch->flags &= ~TCQ_F_THROTTLED; 891 - goto fin; 892 - } 893 } 894 } 895 sch->qstats.overlimits++;
··· 574 } 575 576 sch->q.qlen++; 577 return NET_XMIT_SUCCESS; 578 } 579 ··· 842 843 static struct sk_buff *htb_dequeue(struct Qdisc *sch) 844 { 845 + struct sk_buff *skb; 846 struct htb_sched *q = qdisc_priv(sch); 847 int level; 848 psched_time_t next_event; ··· 851 /* try to dequeue direct packets as high prio (!) to minimize cpu work */ 852 skb = __skb_dequeue(&q->direct_queue); 853 if (skb != NULL) { 854 + ok: 855 + qdisc_bstats_update(sch, skb); 856 sch->flags &= ~TCQ_F_THROTTLED; 857 sch->q.qlen--; 858 return skb; ··· 884 int prio = ffz(m); 885 m |= 1 << prio; 886 skb = htb_dequeue_tree(q, prio, level); 887 + if (likely(skb != NULL)) 888 + goto ok; 889 } 890 } 891 sch->qstats.overlimits++;
+1 -1
net/sched/sch_multiq.c
··· 83 84 ret = qdisc_enqueue(skb, qdisc); 85 if (ret == NET_XMIT_SUCCESS) { 86 - qdisc_bstats_update(sch, skb); 87 sch->q.qlen++; 88 return NET_XMIT_SUCCESS; 89 } ··· 111 qdisc = q->queues[q->curband]; 112 skb = qdisc->dequeue(qdisc); 113 if (skb) { 114 sch->q.qlen--; 115 return skb; 116 }
··· 83 84 ret = qdisc_enqueue(skb, qdisc); 85 if (ret == NET_XMIT_SUCCESS) { 86 sch->q.qlen++; 87 return NET_XMIT_SUCCESS; 88 } ··· 112 qdisc = q->queues[q->curband]; 113 skb = qdisc->dequeue(qdisc); 114 if (skb) { 115 + qdisc_bstats_update(sch, skb); 116 sch->q.qlen--; 117 return skb; 118 }
+1 -2
net/sched/sch_netem.c
··· 240 241 if (likely(ret == NET_XMIT_SUCCESS)) { 242 sch->q.qlen++; 243 - qdisc_bstats_update(sch, skb); 244 } else if (net_xmit_drop_count(ret)) { 245 sch->qstats.drops++; 246 } ··· 288 skb->tstamp.tv64 = 0; 289 #endif 290 pr_debug("netem_dequeue: return skb=%p\n", skb); 291 sch->q.qlen--; 292 return skb; 293 } ··· 476 __skb_queue_after(list, skb, nskb); 477 478 sch->qstats.backlog += qdisc_pkt_len(nskb); 479 - qdisc_bstats_update(sch, nskb); 480 481 return NET_XMIT_SUCCESS; 482 }
··· 240 241 if (likely(ret == NET_XMIT_SUCCESS)) { 242 sch->q.qlen++; 243 } else if (net_xmit_drop_count(ret)) { 244 sch->qstats.drops++; 245 } ··· 289 skb->tstamp.tv64 = 0; 290 #endif 291 pr_debug("netem_dequeue: return skb=%p\n", skb); 292 + qdisc_bstats_update(sch, skb); 293 sch->q.qlen--; 294 return skb; 295 } ··· 476 __skb_queue_after(list, skb, nskb); 477 478 sch->qstats.backlog += qdisc_pkt_len(nskb); 479 480 return NET_XMIT_SUCCESS; 481 }
+1 -1
net/sched/sch_prio.c
··· 84 85 ret = qdisc_enqueue(skb, qdisc); 86 if (ret == NET_XMIT_SUCCESS) { 87 - qdisc_bstats_update(sch, skb); 88 sch->q.qlen++; 89 return NET_XMIT_SUCCESS; 90 } ··· 115 struct Qdisc *qdisc = q->queues[prio]; 116 struct sk_buff *skb = qdisc->dequeue(qdisc); 117 if (skb) { 118 sch->q.qlen--; 119 return skb; 120 }
··· 84 85 ret = qdisc_enqueue(skb, qdisc); 86 if (ret == NET_XMIT_SUCCESS) { 87 sch->q.qlen++; 88 return NET_XMIT_SUCCESS; 89 } ··· 116 struct Qdisc *qdisc = q->queues[prio]; 117 struct sk_buff *skb = qdisc->dequeue(qdisc); 118 if (skb) { 119 + qdisc_bstats_update(sch, skb); 120 sch->q.qlen--; 121 return skb; 122 }
+6 -5
net/sched/sch_red.c
··· 94 95 ret = qdisc_enqueue(skb, child); 96 if (likely(ret == NET_XMIT_SUCCESS)) { 97 - qdisc_bstats_update(sch, skb); 98 sch->q.qlen++; 99 } else if (net_xmit_drop_count(ret)) { 100 q->stats.pdrop++; ··· 113 struct Qdisc *child = q->qdisc; 114 115 skb = child->dequeue(child); 116 - if (skb) 117 sch->q.qlen--; 118 - else if (!red_is_idling(&q->parms)) 119 - red_start_of_idle_period(&q->parms); 120 - 121 return skb; 122 } 123
··· 94 95 ret = qdisc_enqueue(skb, child); 96 if (likely(ret == NET_XMIT_SUCCESS)) { 97 sch->q.qlen++; 98 } else if (net_xmit_drop_count(ret)) { 99 q->stats.pdrop++; ··· 114 struct Qdisc *child = q->qdisc; 115 116 skb = child->dequeue(child); 117 + if (skb) { 118 + qdisc_bstats_update(sch, skb); 119 sch->q.qlen--; 120 + } else { 121 + if (!red_is_idling(&q->parms)) 122 + red_start_of_idle_period(&q->parms); 123 + } 124 return skb; 125 } 126
+2 -3
net/sched/sch_sfq.c
··· 402 q->tail = slot; 403 slot->allot = q->scaled_quantum; 404 } 405 - if (++sch->q.qlen <= q->limit) { 406 - qdisc_bstats_update(sch, skb); 407 return NET_XMIT_SUCCESS; 408 - } 409 410 sfq_drop(sch); 411 return NET_XMIT_CN; ··· 443 } 444 skb = slot_dequeue_head(slot); 445 sfq_dec(q, a); 446 sch->q.qlen--; 447 sch->qstats.backlog -= qdisc_pkt_len(skb); 448
··· 402 q->tail = slot; 403 slot->allot = q->scaled_quantum; 404 } 405 + if (++sch->q.qlen <= q->limit) 406 return NET_XMIT_SUCCESS; 407 408 sfq_drop(sch); 409 return NET_XMIT_CN; ··· 445 } 446 skb = slot_dequeue_head(slot); 447 sfq_dec(q, a); 448 + qdisc_bstats_update(sch, skb); 449 sch->q.qlen--; 450 sch->qstats.backlog -= qdisc_pkt_len(skb); 451
+1 -1
net/sched/sch_tbf.c
··· 134 } 135 136 sch->q.qlen++; 137 - qdisc_bstats_update(sch, skb); 138 return NET_XMIT_SUCCESS; 139 } 140 ··· 186 q->ptokens = ptoks; 187 sch->q.qlen--; 188 sch->flags &= ~TCQ_F_THROTTLED; 189 return skb; 190 } 191
··· 134 } 135 136 sch->q.qlen++; 137 return NET_XMIT_SUCCESS; 138 } 139 ··· 187 q->ptokens = ptoks; 188 sch->q.qlen--; 189 sch->flags &= ~TCQ_F_THROTTLED; 190 + qdisc_bstats_update(sch, skb); 191 return skb; 192 } 193
+2 -1
net/sched/sch_teql.c
··· 87 88 if (q->q.qlen < dev->tx_queue_len) { 89 __skb_queue_tail(&q->q, skb); 90 - qdisc_bstats_update(sch, skb); 91 return NET_XMIT_SUCCESS; 92 } 93 ··· 110 dat->m->slaves = sch; 111 netif_wake_queue(m); 112 } 113 } 114 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; 115 return skb;
··· 87 88 if (q->q.qlen < dev->tx_queue_len) { 89 __skb_queue_tail(&q->q, skb); 90 return NET_XMIT_SUCCESS; 91 } 92 ··· 111 dat->m->slaves = sch; 112 netif_wake_queue(m); 113 } 114 + } else { 115 + qdisc_bstats_update(sch, skb); 116 } 117 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; 118 return skb;