···444 static struct property *tmp = NULL;445 struct property *p;446 int len;447+ const char *name;448449 if (tmp) {450 p = tmp;···456457 p->name = (char *) (p + 1);458 if (special_name) {459+ strcpy(p->name, special_name);460 p->length = special_len;461 p->value = prom_early_alloc(special_len);462 memcpy(p->value, special_val, special_len);463 } else {464 if (prev == NULL) {465+ name = prom_firstprop(node, NULL);466 } else {467+ name = prom_nextprop(node, prev, NULL);468 }469+ if (strlen(name) == 0) {470 tmp = p;471 return NULL;472 }473+ strcpy(p->name, name);474 p->length = prom_getproplen(node, p->name);475 if (p->length <= 0) {476 p->length = 0;
+88-6
arch/sparc/kernel/smp.c
···87void __init smp_cpus_done(unsigned int max_cpus)88{89 extern void smp4m_smp_done(void);090 unsigned long bogosum = 0;91 int cpu, num;92···101 num, bogosum/(500000/HZ),102 (bogosum/(5000/HZ))%100);103104- BUG_ON(sparc_cpu_model != sun4m);105- smp4m_smp_done();00000000000000000000000000106}107108void cpu_panic(void)···294void __init smp_prepare_cpus(unsigned int max_cpus)295{296 extern void smp4m_boot_cpus(void);0297 int i, cpuid, extra;298299- BUG_ON(sparc_cpu_model != sun4m);300 printk("Entering SMP Mode...\n");301302 extra = 0;···310311 smp_store_cpu_info(boot_cpu_id);312313- smp4m_boot_cpus();000000000000000000000000000314}315316/* Set this up early so that things like the scheduler can init···377int __cpuinit __cpu_up(unsigned int cpu)378{379 extern int smp4m_boot_one_cpu(int);380- int ret;0381382- ret = smp4m_boot_one_cpu(cpu);000000000000000000000000000383384 if (!ret) {385 cpu_set(cpu, smp_commenced_mask);
···87void __init smp_cpus_done(unsigned int max_cpus)88{89 extern void smp4m_smp_done(void);90+ extern void smp4d_smp_done(void);91 unsigned long bogosum = 0;92 int cpu, num;93···100 num, bogosum/(500000/HZ),101 (bogosum/(5000/HZ))%100);102103+ switch(sparc_cpu_model) {104+ case sun4:105+ printk("SUN4\n");106+ BUG();107+ break;108+ case sun4c:109+ printk("SUN4C\n");110+ BUG();111+ break;112+ case sun4m:113+ smp4m_smp_done();114+ break;115+ case sun4d:116+ smp4d_smp_done();117+ break;118+ case sun4e:119+ printk("SUN4E\n");120+ BUG();121+ break;122+ case sun4u:123+ printk("SUN4U\n");124+ BUG();125+ break;126+ default:127+ printk("UNKNOWN!\n");128+ BUG();129+ break;130+ };131}132133void cpu_panic(void)···267void __init smp_prepare_cpus(unsigned int max_cpus)268{269 extern void smp4m_boot_cpus(void);270+ extern void smp4d_boot_cpus(void);271 int i, cpuid, extra;2720273 printk("Entering SMP Mode...\n");274275 extra = 0;···283284 smp_store_cpu_info(boot_cpu_id);285286+ switch(sparc_cpu_model) {287+ case sun4:288+ printk("SUN4\n");289+ BUG();290+ break;291+ case sun4c:292+ printk("SUN4C\n");293+ BUG();294+ break;295+ case sun4m:296+ smp4m_boot_cpus();297+ break;298+ case sun4d:299+ smp4d_boot_cpus();300+ break;301+ case sun4e:302+ printk("SUN4E\n");303+ BUG();304+ break;305+ case sun4u:306+ printk("SUN4U\n");307+ BUG();308+ break;309+ default:310+ printk("UNKNOWN!\n");311+ BUG();312+ break;313+ };314}315316/* Set this up early so that things like the scheduler can init···323int __cpuinit __cpu_up(unsigned int cpu)324{325 extern int smp4m_boot_one_cpu(int);326+ extern int smp4d_boot_one_cpu(int);327+ int ret=0;328329+ switch(sparc_cpu_model) {330+ case sun4:331+ printk("SUN4\n");332+ BUG();333+ break;334+ case sun4c:335+ printk("SUN4C\n");336+ BUG();337+ break;338+ case sun4m:339+ ret = smp4m_boot_one_cpu(cpu);340+ break;341+ case sun4d:342+ ret = smp4d_boot_one_cpu(cpu);343+ break;344+ case sun4e:345+ printk("SUN4E\n");346+ BUG();347+ break;348+ case sun4u:349+ printk("SUN4U\n");350+ BUG();351+ break;352+ default:353+ printk("UNKNOWN!\n");354+ BUG();355+ break;356+ };357358 if (!ret) {359 cpu_set(cpu, smp_commenced_mask);
···205 return 0;206}207208-/* Gets name in the form prom v2+ uses it (name@x,yyyyy or name (if no reg)) */209-int prom_getname (int node, char *buffer, int len)210-{211- int i;212- struct linux_prom_registers reg[PROMREG_MAX];213-214- i = prom_getproperty (node, "name", buffer, len);215- if (i <= 0) return -1;216- buffer [i] = 0;217- len -= i;218- i = prom_getproperty (node, "reg", (char *)reg, sizeof (reg));219- if (i <= 0) return 0;220- if (len < 11) return -1;221- buffer = strchr (buffer, 0);222- sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);223- return 0;224-}225-226/* Interal version of nextprop that does not alter return values. */227char * __prom_nextprop(int node, char * oprop)228{
···205 return 0;206}207000000000000000000208/* Interal version of nextprop that does not alter return values. */209char * __prom_nextprop(int node, char * oprop)210{
+4-4
arch/sparc64/defconfig
···1#2# Automatically generated make config: don't edit3-# Linux kernel version: 2.6.18-rc14-# Wed Jul 12 14:00:58 20065#6CONFIG_SPARC=y7CONFIG_SPARC64=y···36CONFIG_SYSVIPC=y37CONFIG_POSIX_MQUEUE=y38# CONFIG_BSD_PROCESS_ACCT is not set039CONFIG_SYSCTL=y40# CONFIG_AUDIT is not set41# CONFIG_IKCONFIG is not set···1121# CONFIG_USB_LEGOTOWER is not set1122# CONFIG_USB_LCD is not set1123# CONFIG_USB_LED is not set1124-# CONFIG_USB_CY7C63 is not set1125# CONFIG_USB_CYTHERM is not set1126# CONFIG_USB_PHIDGETKIT is not set1127# CONFIG_USB_PHIDGETSERVO is not set···1280# CONFIG_NFSD is not set1281# CONFIG_SMB_FS is not set1282# CONFIG_CIFS is not set1283-# CONFIG_CIFS_DEBUG2 is not set1284# CONFIG_NCP_FS is not set1285# CONFIG_CODA_FS is not set1286# CONFIG_AFS_FS is not set
···1#2# Automatically generated make config: don't edit3+# Linux kernel version: 2.6.18-rc24+# Fri Jul 21 14:19:24 20065#6CONFIG_SPARC=y7CONFIG_SPARC64=y···36CONFIG_SYSVIPC=y37CONFIG_POSIX_MQUEUE=y38# CONFIG_BSD_PROCESS_ACCT is not set39+# CONFIG_TASKSTATS is not set40CONFIG_SYSCTL=y41# CONFIG_AUDIT is not set42# CONFIG_IKCONFIG is not set···1120# CONFIG_USB_LEGOTOWER is not set1121# CONFIG_USB_LCD is not set1122# CONFIG_USB_LED is not set1123+# CONFIG_USB_CYPRESS_CY7C63 is not set1124# CONFIG_USB_CYTHERM is not set1125# CONFIG_USB_PHIDGETKIT is not set1126# CONFIG_USB_PHIDGETSERVO is not set···1279# CONFIG_NFSD is not set1280# CONFIG_SMB_FS is not set1281# CONFIG_CIFS is not set01282# CONFIG_NCP_FS is not set1283# CONFIG_CODA_FS is not set1284# CONFIG_AFS_FS is not set
-3
arch/sparc64/kernel/devices.c
···66 void *compare_arg,67 struct device_node **dev_node, int *mid)68{69- if (strcmp(dp->type, "cpu"))70- return -ENODEV;71-72 if (!compare(dp, *cur_inst, compare_arg)) {73 if (dev_node)74 *dev_node = dp;
···66 void *compare_arg,67 struct device_node **dev_node, int *mid)68{00069 if (!compare(dp, *cur_inst, compare_arg)) {70 if (dev_node)71 *dev_node = dp;
+32-2
arch/sparc64/kernel/of_device.c
···542 /* Convert to num-cells. */543 num_reg /= 4;544545- /* Conver to num-entries. */546 num_reg /= na + ns;00000000547548 for (index = 0; index < num_reg; index++) {549 struct resource *r = &op->resource[index];···658 next:659 imap += (na + 3);660 }661- if (i == imlen)0000000000000662 return NULL;0663664 *irq_p = irq;665 cp = of_find_node_by_phandle(handle);···823 op->num_irqs = len / 4;824 } else {825 op->num_irqs = 0;00000000826 }827828 build_device_resources(op, parent);
···542 /* Convert to num-cells. */543 num_reg /= 4;544545+ /* Convert to num-entries. */546 num_reg /= na + ns;547+548+ /* Prevent overruning the op->resources[] array. */549+ if (num_reg > PROMREG_MAX) {550+ printk(KERN_WARNING "%s: Too many regs (%d), "551+ "limiting to %d.\n",552+ op->node->full_name, num_reg, PROMREG_MAX);553+ num_reg = PROMREG_MAX;554+ }555556 for (index = 0; index < num_reg; index++) {557 struct resource *r = &op->resource[index];···650 next:651 imap += (na + 3);652 }653+ if (i == imlen) {654+ /* Psycho and Sabre PCI controllers can have 'interrupt-map'655+ * properties that do not include the on-board device656+ * interrupts. Instead, the device's 'interrupts' property657+ * is already a fully specified INO value.658+ *659+ * Handle this by deciding that, if we didn't get a660+ * match in the parent's 'interrupt-map', and the661+ * parent is an IRQ translater, then use the parent as662+ * our IRQ controller.663+ */664+ if (pp->irq_trans)665+ return pp;666+667 return NULL;668+ }669670 *irq_p = irq;671 cp = of_find_node_by_phandle(handle);···801 op->num_irqs = len / 4;802 } else {803 op->num_irqs = 0;804+ }805+806+ /* Prevent overruning the op->irqs[] array. */807+ if (op->num_irqs > PROMINTR_MAX) {808+ printk(KERN_WARNING "%s: Too many irqs (%d), "809+ "limiting to %d.\n",810+ dp->full_name, op->num_irqs, PROMINTR_MAX);811+ op->num_irqs = PROMINTR_MAX;812 }813814 build_device_resources(op, parent);
···701702asmlinkage long sys_getdomainname(char __user *name, int len)703{704- int nlen;705- int err = -EFAULT;00706707 down_read(&uts_sem);708709 nlen = strlen(system_utsname.domainname) + 1;710-711 if (nlen < len)712 len = nlen;713- if (len > __NEW_UTS_LEN)714- goto done;715- if (copy_to_user(name, system_utsname.domainname, len))716- goto done;717- err = 0;718-done:719 up_read(&uts_sem);720 return err;721}
···701702asmlinkage long sys_getdomainname(char __user *name, int len)703{704+ int nlen, err;705+706+ if (len < 0 || len > __NEW_UTS_LEN)707+ return -EINVAL;708709 down_read(&uts_sem);710711 nlen = strlen(system_utsname.domainname) + 1;0712 if (nlen < len)713 len = nlen;714+715+ err = -EFAULT;716+ if (!copy_to_user(name, system_utsname.domainname, len))717+ err = 0;718+0719 up_read(&uts_sem);720 return err;721}
-85
arch/sparc64/prom/tree.c
···193 return 0;194}195196-/* Gets name in the {name@x,yyyyy|name (if no reg)} form */197-int 198-prom_getname (int node, char *buffer, int len)199-{200- int i, sbus = 0;201- int pci = 0, ebus = 0, ide = 0;202- struct linux_prom_registers *reg;203- struct linux_prom64_registers reg64[PROMREG_MAX];204-205- for (sbus = prom_getparent (node); sbus; sbus = prom_getparent (sbus)) {206- i = prom_getproperty (sbus, "name", buffer, len);207- if (i > 0) {208- buffer [i] = 0;209- if (!strcmp (buffer, "sbus"))210- goto getit;211- }212- }213- if ((pci = prom_getparent (node))) {214- i = prom_getproperty (pci, "name", buffer, len);215- if (i > 0) {216- buffer [i] = 0;217- if (!strcmp (buffer, "pci"))218- goto getit;219- }220- pci = 0;221- }222- if ((ebus = prom_getparent (node))) {223- i = prom_getproperty (ebus, "name", buffer, len);224- if (i > 0) {225- buffer[i] = 0;226- if (!strcmp (buffer, "ebus"))227- goto getit;228- }229- ebus = 0;230- }231- if ((ide = prom_getparent (node))) {232- i = prom_getproperty (ide, "name", buffer, len);233- if (i > 0) {234- buffer [i] = 0;235- if (!strcmp (buffer, "ide"))236- goto getit;237- }238- ide = 0;239- }240-getit:241- i = prom_getproperty (node, "name", buffer, len);242- if (i <= 0) {243- buffer [0] = 0;244- return -1;245- }246- buffer [i] = 0;247- len -= i;248- i = prom_getproperty (node, "reg", (char *)reg64, sizeof (reg64));249- if (i <= 0) return 0;250- if (len < 16) return -1;251- buffer = strchr (buffer, 0);252- if (sbus) {253- reg = (struct linux_prom_registers *)reg64;254- sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);255- } else if (pci) {256- int dev, fn;257- reg = (struct linux_prom_registers *)reg64;258- fn = (reg[0].which_io >> 8) & 0x07;259- dev = (reg[0].which_io >> 11) & 0x1f;260- if (fn)261- sprintf (buffer, "@%x,%x", dev, fn);262- else263- sprintf (buffer, "@%x", dev);264- } else if (ebus) {265- reg = (struct linux_prom_registers *)reg64;266- sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);267- } else if (ide) {268- reg = (struct linux_prom_registers *)reg64;269- sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);270- } else if (i == 4) { /* Happens on 8042's children on Ultra/PCI. */271- reg = (struct linux_prom_registers *)reg64;272- sprintf (buffer, "@%x", reg[0].which_io);273- } else {274- sprintf (buffer, "@%x,%x",275- (unsigned int)(reg64[0].phys_addr >> 36),276- (unsigned int)(reg64[0].phys_addr));277- }278- return 0;279-}280-281/* Return the first property type for node 'node'.282 * buffer should be at least 32B in length283 */
···193 return 0;194}1950000000000000000000000000000000000000000000000000000000000000000000000000000000000000196/* Return the first property type for node 'node'.197 * buffer should be at least 32B in length198 */
···1752 spin_lock_irqsave(&info->lock, flags);1753 get_signals(info);1754 spin_unlock_irqrestore(&info->lock, flags);1755+ if (info->serial_signals & SerialSignal_DCD)1756+ netif_carrier_on(dev);1757+ else1758+ netif_carrier_off(dev);1759 return 0;1760}1761···2522 } else2523 info->input_signal_events.dcd_down++;2524#ifdef CONFIG_HDLC2525+ if (info->netcount) {2526+ if (status & SerialSignal_DCD)2527+ netif_carrier_on(info->netdev);2528+ else2529+ netif_carrier_off(info->netdev);2530+ }2531#endif2532 }2533 if (status & MISCSTATUS_CTS_LATCHED)
+10-6
drivers/cpufreq/cpufreq_ondemand.c
···239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,240 this_dbs_info->prev_cpu_wall);241 this_dbs_info->prev_cpu_wall = cur_jiffies;00242 /*243 * Every sampling_rate, we check, if current idle time is less244 * than 20% (default), then we try to increase frequency···306 unsigned int cpu = smp_processor_id();307 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);308000309 dbs_check_cpu(dbs_info);310 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,311 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));···324 return;325}326327-static inline void dbs_timer_exit(unsigned int cpu)328{329- struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);330-331- cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work);332}333334static int cpufreq_governor_dbs(struct cpufreq_policy *policy,···401402 case CPUFREQ_GOV_STOP:403 mutex_lock(&dbs_mutex);404- dbs_timer_exit(policy->cpu);405- this_dbs_info->enable = 0;406 sysfs_remove_group(&policy->kobj, &dbs_attr_group);407 dbs_enable--;408 if (dbs_enable == 0)
···239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,240 this_dbs_info->prev_cpu_wall);241 this_dbs_info->prev_cpu_wall = cur_jiffies;242+ if (!total_ticks)243+ return;244 /*245 * Every sampling_rate, we check, if current idle time is less246 * than 20% (default), then we try to increase frequency···304 unsigned int cpu = smp_processor_id();305 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);306307+ if (!dbs_info->enable)308+ return;309+310 dbs_check_cpu(dbs_info);311 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,312 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));···319 return;320}321322+static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)323{324+ dbs_info->enable = 0;325+ cancel_delayed_work(&dbs_info->work);326+ flush_workqueue(kondemand_wq);327}328329static int cpufreq_governor_dbs(struct cpufreq_policy *policy,···396397 case CPUFREQ_GOV_STOP:398 mutex_lock(&dbs_mutex);399+ dbs_timer_exit(this_dbs_info);0400 sysfs_remove_group(&policy->kobj, &dbs_attr_group);401 dbs_enable--;402 if (dbs_enable == 0)
+1-1
drivers/dma/ioatdma.c
···828 /* if forced, worst case is that rmmod hangs */829 __unsafe(THIS_MODULE);830831- return pci_module_init(&ioat_pci_drv);832}833834module_init(ioat_init_module);
···828 /* if forced, worst case is that rmmod hangs */829 __unsafe(THIS_MODULE);830831+ return pci_register_driver(&ioat_pci_drv);832}833834module_init(ioat_init_module);
···2332}23332334/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/2335-/* Prototype Routine for the HP HOST INFO command.2336 *2337 * Outputs: None.2338 * Return: 0 if successful···2568}25692570/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/2571-/* Prototype Routine for the HP TARGET INFO command.2572 *2573 * Outputs: None.2574 * Return: 0 if successful
···2332}23332334/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/2335+/* Prototype Routine for the HOST INFO command.2336 *2337 * Outputs: None.2338 * Return: 0 if successful···2568}25692570/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/2571+/* Prototype Routine for the TARGET INFO command.2572 *2573 * Outputs: None.2574 * Return: 0 if successful
-5
drivers/message/fusion/mptctl.h
···354355356/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/357-/*358- * HP Specific IOCTL Defines and Structures359- */360361#define CPQFCTS_IOC_MAGIC 'Z'362#define HP_IOC_MAGIC 'Z'···361#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t)362#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t)363364-/* All HP IOCTLs must include this header365- */366typedef struct _hp_header {367 unsigned int iocnum;368 unsigned int host;
···132 for (i = 0; i < numdummies && !err; i++)133 err = dummy_init_one(i); 134 if (err) { 0135 while (--i >= 0)136 dummy_free_one(i);137 }
···132 for (i = 0; i < numdummies && !err; i++)133 err = dummy_init_one(i); 134 if (err) { 135+ i--;136 while (--i >= 0)137 dummy_free_one(i);138 }
+3
drivers/net/e1000/e1000.h
···110#define E1000_MIN_RXD 80111#define E1000_MAX_82544_RXD 4096112000113/* Supported Rx Buffer Sizes */114#define E1000_RXBUFFER_128 128 /* Used for packet split */115#define E1000_RXBUFFER_256 256 /* Used for packet split */
···110#define E1000_MIN_RXD 80111#define E1000_MAX_82544_RXD 4096112113+/* this is the size past which hardware will drop packets when setting LPE=0 */114+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522115+116/* Supported Rx Buffer Sizes */117#define E1000_RXBUFFER_128 128 /* Used for packet split */118#define E1000_RXBUFFER_256 256 /* Used for packet split */
+26-26
drivers/net/e1000/e1000_main.c
···36#else37#define DRIVERNAPI "-NAPI"38#endif39-#define DRV_VERSION "7.1.9-k2"DRIVERNAPI40char e1000_driver_version[] = DRV_VERSION;41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";42···10681069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);10701071- adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;1073 hw->max_frame_size = netdev->mtu +1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;···3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384;31493150 /* adjust allocation if LPE protects us, and we aren't using SBP */3151-#define MAXIMUM_ETHERNET_VLAN_SIZE 15223152 if (!adapter->hw.tbi_compatibility_on &&3153 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||3154 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))···3386 E1000_WRITE_REG(hw, IMC, ~0);3387 E1000_WRITE_FLUSH(hw);3388 }3389- if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))3390- __netif_rx_schedule(&adapter->polling_netdev[0]);3391 else3392 e1000_irq_enable(adapter);3393#else···3430{3431 struct e1000_adapter *adapter;3432 int work_to_do = min(*budget, poll_dev->quota);3433- int tx_cleaned = 0, i = 0, work_done = 0;34343435 /* Must NOT use netdev_priv macro here. */3436 adapter = poll_dev->priv;34373438 /* Keep link state information with original netdev */3439- if (!netif_carrier_ok(adapter->netdev))3440 goto quit_polling;34413442- while (poll_dev != &adapter->polling_netdev[i]) {3443- i++;3444- BUG_ON(i == adapter->num_rx_queues);000003445 }34463447- if (likely(adapter->num_tx_queues == 1)) {3448- /* e1000_clean is called per-cpu. This lock protects3449- * tx_ring[0] from being cleaned by multiple cpus3450- * simultaneously. A failure obtaining the lock means3451- * tx_ring[0] is currently being cleaned anyway. */3452- if (spin_trylock(&adapter->tx_queue_lock)) {3453- tx_cleaned = e1000_clean_tx_irq(adapter,3454- &adapter->tx_ring[0]);3455- spin_unlock(&adapter->tx_queue_lock);3456- }3457- } else3458- tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);3459-3460- adapter->clean_rx(adapter, &adapter->rx_ring[i],3461 &work_done, work_to_do);34623463 *budget -= work_done;···34573458 /* If no Tx and not enough Rx work done, exit the polling mode */3459 if ((!tx_cleaned && (work_done == 0)) ||3460- !netif_running(adapter->netdev)) {3461quit_polling:3462 netif_rx_complete(poll_dev);3463 e1000_irq_enable(adapter);···36723673 length = le16_to_cpu(rx_desc->length);36740003675 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {3676 /* All receives must fit into a single buffer */3677 E1000_DBG("%s: Receive packet consumed multiple"···3879 pci_dma_sync_single_for_device(pdev,3880 ps_page_dma->ps_page_dma[0],3881 PAGE_SIZE, PCI_DMA_FROMDEVICE);003882 skb_put(skb, l1);3883- length += l1;3884 goto copydone;3885 } /* if */3886 }···3899 skb->data_len += length;3900 skb->truesize += length;3901 }000039023903copydone:3904 e1000_rx_checksum(adapter, staterr,···4751e1000_netpoll(struct net_device *netdev)4752{4753 struct e1000_adapter *adapter = netdev_priv(netdev);04754 disable_irq(adapter->pdev->irq);4755 e1000_intr(adapter->pdev->irq, netdev, NULL);4756 e1000_clean_tx_irq(adapter, adapter->tx_ring);
···36#else37#define DRIVERNAPI "-NAPI"38#endif39+#define DRV_VERSION "7.1.9-k4"DRIVERNAPI40char e1000_driver_version[] = DRV_VERSION;41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";42···10681069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);10701071+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;1073 hw->max_frame_size = netdev->mtu +1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;···3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384;31493150 /* adjust allocation if LPE protects us, and we aren't using SBP */03151 if (!adapter->hw.tbi_compatibility_on &&3152 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||3153 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))···3387 E1000_WRITE_REG(hw, IMC, ~0);3388 E1000_WRITE_FLUSH(hw);3389 }3390+ if (likely(netif_rx_schedule_prep(netdev)))3391+ __netif_rx_schedule(netdev);3392 else3393 e1000_irq_enable(adapter);3394#else···3431{3432 struct e1000_adapter *adapter;3433 int work_to_do = min(*budget, poll_dev->quota);3434+ int tx_cleaned = 0, work_done = 0;34353436 /* Must NOT use netdev_priv macro here. */3437 adapter = poll_dev->priv;34383439 /* Keep link state information with original netdev */3440+ if (!netif_carrier_ok(poll_dev))3441 goto quit_polling;34423443+ /* e1000_clean is called per-cpu. This lock protects3444+ * tx_ring[0] from being cleaned by multiple cpus3445+ * simultaneously. A failure obtaining the lock means3446+ * tx_ring[0] is currently being cleaned anyway. */3447+ if (spin_trylock(&adapter->tx_queue_lock)) {3448+ tx_cleaned = e1000_clean_tx_irq(adapter,3449+ &adapter->tx_ring[0]);3450+ spin_unlock(&adapter->tx_queue_lock);3451 }34523453+ adapter->clean_rx(adapter, &adapter->rx_ring[0],00000000000003454 &work_done, work_to_do);34553456 *budget -= work_done;···34663467 /* If no Tx and not enough Rx work done, exit the polling mode */3468 if ((!tx_cleaned && (work_done == 0)) ||3469+ !netif_running(poll_dev)) {3470quit_polling:3471 netif_rx_complete(poll_dev);3472 e1000_irq_enable(adapter);···36813682 length = le16_to_cpu(rx_desc->length);36833684+ /* adjust length to remove Ethernet CRC */3685+ length -= 4;3686+3687 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {3688 /* All receives must fit into a single buffer */3689 E1000_DBG("%s: Receive packet consumed multiple"···3885 pci_dma_sync_single_for_device(pdev,3886 ps_page_dma->ps_page_dma[0],3887 PAGE_SIZE, PCI_DMA_FROMDEVICE);3888+ /* remove the CRC */3889+ l1 -= 4;3890 skb_put(skb, l1);03891 goto copydone;3892 } /* if */3893 }···3904 skb->data_len += length;3905 skb->truesize += length;3906 }3907+3908+ /* strip the ethernet crc, problem is we're using pages now so3909+ * this whole operation can get a little cpu intensive */3910+ pskb_trim(skb, skb->len - 4);39113912copydone:3913 e1000_rx_checksum(adapter, staterr,···4752e1000_netpoll(struct net_device *netdev)4753{4754 struct e1000_adapter *adapter = netdev_priv(netdev);4755+4756 disable_irq(adapter->pdev->irq);4757 e1000_intr(adapter->pdev->irq, netdev, NULL);4758 e1000_clean_tx_irq(adapter, adapter->tx_ring);
+1
drivers/net/ifb.c
···271 for (i = 0; i < numifbs && !err; i++)272 err = ifb_init_one(i); 273 if (err) { 0274 while (--i >= 0)275 ifb_free_one(i);276 }
···271 for (i = 0; i < numifbs && !err; i++)272 err = ifb_init_one(i); 273 if (err) { 274+ i--;275 while (--i >= 0)276 ifb_free_one(i);277 }
+2-5
drivers/net/sky2.c
···50#include "sky2.h"5152#define DRV_NAME "sky2"53-#define DRV_VERSION "1.4"54#define PFX DRV_NAME " "5556/*···2204 int work_done = 0;2205 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);22062207- if (!~status)2208- goto out;2209-2210 if (status & Y2_IS_HW_ERR)2211 sky2_hw_intr(hw);2212···22402241 if (sky2_more_work(hw))2242 return 1;2243-out:2244 netif_rx_complete(dev0);22452246 sky2_read32(hw, B0_Y2_SP_LISR);
···50#include "sky2.h"5152#define DRV_NAME "sky2"53+#define DRV_VERSION "1.5"54#define PFX DRV_NAME " "5556/*···2204 int work_done = 0;2205 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);22060002207 if (status & Y2_IS_HW_ERR)2208 sky2_hw_intr(hw);2209···22432244 if (sky2_more_work(hw))2245 return 1;2246+2247 netif_rx_complete(dev0);22482249 sky2_read32(hw, B0_Y2_SP_LISR);
+258-330
drivers/net/spider_net.c
···84 *85 * returns the content of the specified SMMIO register.86 */87-static u3288spider_net_read_reg(struct spider_net_card *card, u32 reg)89{90 u32 value;···101 * @reg: register to write to102 * @value: value to write into the specified SMMIO register103 */104-static void105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)106{107 value = cpu_to_le32(value);···259 *260 * returns the status as in the dmac_cmd_status field of the descriptor261 */262-static enum spider_net_descr_status263spider_net_get_descr_status(struct spider_net_descr *descr)264{265- u32 cmd_status;266-267- cmd_status = descr->dmac_cmd_status;268- cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;269- /* no need to mask out any bits, as cmd_status is 32 bits wide only270- * (and unsigned) */271- return cmd_status;272-}273-274-/**275- * spider_net_set_descr_status -- sets the status of a descriptor276- * @descr: descriptor to change277- * @status: status to set in the descriptor278- *279- * changes the status to the specified value. Doesn't change other bits280- * in the status281- */282-static void283-spider_net_set_descr_status(struct spider_net_descr *descr,284- enum spider_net_descr_status status)285-{286- u32 cmd_status;287- /* read the status */288- cmd_status = descr->dmac_cmd_status;289- /* clean the upper 4 bits */290- cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;291- /* add the status to it */292- cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;293- /* and write it back */294- descr->dmac_cmd_status = cmd_status;295}296297/**···299static int300spider_net_init_chain(struct spider_net_card *card,301 struct spider_net_descr_chain *chain,302- struct spider_net_descr *start_descr, int no)0303{304 int i;305 struct spider_net_descr *descr;306 dma_addr_t buf;307-308- atomic_set(&card->rx_chain_refill,0);309310 descr = start_descr;311 memset(descr, 0, sizeof(*descr) * no);312313 /* set up the hardware pointers in each descriptor */314 for (i=0; i<no; i++, descr++) {315- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);316317 buf = pci_map_single(card->pdev, descr,318 SPIDER_NET_DESCR_SIZE,319- PCI_DMA_BIDIRECTIONAL);320321 if (buf == DMA_ERROR_CODE)322 goto iommu_error;···330 start_descr->prev = descr-1;331332 descr = start_descr;333- for (i=0; i < no; i++, descr++) {334- descr->next_descr_addr = descr->next->bus_addr;335- }3360337 chain->head = start_descr;338 chain->tail = start_descr;339···346 if (descr->bus_addr)347 pci_unmap_single(card->pdev, descr->bus_addr,348 SPIDER_NET_DESCR_SIZE,349- PCI_DMA_BIDIRECTIONAL);350 return -ENOMEM;351}352···367 dev_kfree_skb(descr->skb);368 pci_unmap_single(card->pdev, descr->buf_addr,369 SPIDER_NET_MAX_FRAME,370- PCI_DMA_BIDIRECTIONAL);371 }372 descr = descr->next;373 }···417 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);418 /* io-mmu-map the skb */419 buf = pci_map_single(card->pdev, descr->skb->data,420- SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);421 descr->buf_addr = buf;422 if (buf == DMA_ERROR_CODE) {423 dev_kfree_skb_any(descr->skb);424 if (netif_msg_rx_err(card) && net_ratelimit())425 pr_err("Could not iommu-map rx buffer\n");426- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);427 } else {428- descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED;0429 }430431 return error;···440 * chip by writing to the appropriate register. DMA is enabled in441 * spider_net_enable_rxdmac.442 */443-static void444spider_net_enable_rxchtails(struct spider_net_card *card)445{446 /* assume chain is aligned correctly */···455 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN456 * in the GDADMACCNTR register457 */458-static void459spider_net_enable_rxdmac(struct spider_net_card *card)460{461 wmb();···472static void473spider_net_refill_rx_chain(struct spider_net_card *card)474{475- struct spider_net_descr_chain *chain;476-477- chain = &card->rx_chain;478479 /* one context doing the refill (and a second context seeing that480 * and omitting it) is ok. If called by NAPI, we'll be called again481 * as spider_net_decode_one_descr is called several times. If some482 * interrupt calls us, the NAPI is about to clean up anyway. */483- if (atomic_inc_return(&card->rx_chain_refill) == 1)484- while (spider_net_get_descr_status(chain->head) ==485- SPIDER_NET_DESCR_NOT_IN_USE) {486- if (spider_net_prepare_rx_descr(card, chain->head))487- break;488- chain->head = chain->head->next;489- }490491- atomic_dec(&card->rx_chain_refill);0000000492}493494/**···524error:525 spider_net_free_rx_chain_contents(card);526 return result;527-}528-529-/**530- * spider_net_release_tx_descr - processes a used tx descriptor531- * @card: card structure532- * @descr: descriptor to release533- *534- * releases a used tx descriptor (unmapping, freeing of skb)535- */536-static void537-spider_net_release_tx_descr(struct spider_net_card *card,538- struct spider_net_descr *descr)539-{540- struct sk_buff *skb;541-542- /* unmap the skb */543- skb = descr->skb;544- pci_unmap_single(card->pdev, descr->buf_addr, skb->len,545- PCI_DMA_BIDIRECTIONAL);546-547- dev_kfree_skb_any(skb);548-549- /* set status to not used */550- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);551-}552-553-/**554- * spider_net_release_tx_chain - processes sent tx descriptors555- * @card: adapter structure556- * @brutal: if set, don't care about whether descriptor seems to be in use557- *558- * returns 0 if the tx ring is empty, otherwise 1.559- *560- * spider_net_release_tx_chain releases the tx descriptors that spider has561- * finished with (if non-brutal) or simply release tx descriptors (if brutal).562- * If some other context is calling this function, we return 1 so that we're563- * scheduled again (if we were scheduled) and will not loose initiative.564- */565-static int566-spider_net_release_tx_chain(struct spider_net_card *card, int brutal)567-{568- struct spider_net_descr_chain *tx_chain = &card->tx_chain;569- enum spider_net_descr_status status;570-571- if (atomic_inc_return(&card->tx_chain_release) != 1) {572- atomic_dec(&card->tx_chain_release);573- return 1;574- }575-576- for (;;) {577- status = spider_net_get_descr_status(tx_chain->tail);578- switch (status) {579- case SPIDER_NET_DESCR_CARDOWNED:580- if (!brutal)581- goto out;582- /* fallthrough, if we release the descriptors583- * brutally (then we don't care about584- * SPIDER_NET_DESCR_CARDOWNED) */585- case SPIDER_NET_DESCR_RESPONSE_ERROR:586- case SPIDER_NET_DESCR_PROTECTION_ERROR:587- case SPIDER_NET_DESCR_FORCE_END:588- if (netif_msg_tx_err(card))589- pr_err("%s: forcing end of tx descriptor "590- "with status x%02x\n",591- card->netdev->name, status);592- card->netdev_stats.tx_dropped++;593- break;594-595- case SPIDER_NET_DESCR_COMPLETE:596- card->netdev_stats.tx_packets++;597- card->netdev_stats.tx_bytes +=598- tx_chain->tail->skb->len;599- break;600-601- default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */602- goto out;603- }604- spider_net_release_tx_descr(card, tx_chain->tail);605- tx_chain->tail = tx_chain->tail->next;606- }607-out:608- atomic_dec(&card->tx_chain_release);609-610- netif_wake_queue(card->netdev);611-612- if (status == SPIDER_NET_DESCR_CARDOWNED)613- return 1;614- return 0;615-}616-617-/**618- * spider_net_cleanup_tx_ring - cleans up the TX ring619- * @card: card structure620- *621- * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use622- * interrupts to cleanup our TX ring) and returns sent packets to the stack623- * by freeing them624- */625-static void626-spider_net_cleanup_tx_ring(struct spider_net_card *card)627-{628- if ( (spider_net_release_tx_chain(card, 0)) &&629- (card->netdev->flags & IFF_UP) ) {630- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);631- }632}633634/**···629}630631/**632- * spider_net_stop - called upon ifconfig down633- * @netdev: interface device structure634- *635- * always returns 0636- */637-int638-spider_net_stop(struct net_device *netdev)639-{640- struct spider_net_card *card = netdev_priv(netdev);641-642- tasklet_kill(&card->rxram_full_tl);643- netif_poll_disable(netdev);644- netif_carrier_off(netdev);645- netif_stop_queue(netdev);646- del_timer_sync(&card->tx_timer);647-648- /* disable/mask all interrupts */649- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);650- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);651- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);652-653- /* free_irq(netdev->irq, netdev);*/654- free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);655-656- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,657- SPIDER_NET_DMA_TX_FEND_VALUE);658-659- /* turn off DMA, force end */660- spider_net_disable_rxdmac(card);661-662- /* release chains */663- spider_net_release_tx_chain(card, 1);664-665- spider_net_free_chain(card, &card->tx_chain);666- spider_net_free_chain(card, &card->rx_chain);667-668- return 0;669-}670-671-/**672- * spider_net_get_next_tx_descr - returns the next available tx descriptor673- * @card: device structure to get descriptor from674- *675- * returns the address of the next descriptor, or NULL if not available.676- */677-static struct spider_net_descr *678-spider_net_get_next_tx_descr(struct spider_net_card *card)679-{680- /* check, if head points to not-in-use descr */681- if ( spider_net_get_descr_status(card->tx_chain.head) ==682- SPIDER_NET_DESCR_NOT_IN_USE ) {683- return card->tx_chain.head;684- } else {685- return NULL;686- }687-}688-689-/**690- * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field691- * @descr: descriptor structure to fill out692- * @skb: packet to consider693- *694- * fills out the command and status field of the descriptor structure,695- * depending on hardware checksum settings.696- */697-static void698-spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,699- struct sk_buff *skb)700-{701- /* make sure the other fields in the descriptor are written */702- wmb();703-704- if (skb->ip_summed != CHECKSUM_HW) {705- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;706- return;707- }708-709- /* is packet ip?710- * if yes: tcp? udp? */711- if (skb->protocol == htons(ETH_P_IP)) {712- if (skb->nh.iph->protocol == IPPROTO_TCP)713- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;714- else if (skb->nh.iph->protocol == IPPROTO_UDP)715- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;716- else /* the stack should checksum non-tcp and non-udp717- packets on his own: NETIF_F_IP_CSUM */718- descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;719- }720-}721-722-/**723 * spider_net_prepare_tx_descr - fill tx descriptor with skb data724 * @card: card structure725 * @descr: descriptor structure to fill out···641 */642static int643spider_net_prepare_tx_descr(struct spider_net_card *card,644- struct spider_net_descr *descr,645 struct sk_buff *skb)646{0647 dma_addr_t buf;648649- buf = pci_map_single(card->pdev, skb->data,650- skb->len, PCI_DMA_BIDIRECTIONAL);651 if (buf == DMA_ERROR_CODE) {652 if (netif_msg_tx_err(card) && net_ratelimit())653 pr_err("could not iommu-map packet (%p, %i). "···656657 descr->buf_addr = buf;658 descr->buf_size = skb->len;0659 descr->skb = skb;660 descr->data_status = 0;661662- spider_net_set_txdescr_cmdstat(descr,skb);000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000663664 return 0;665}···763 * spider_net_kick_tx_dma writes the current tx chain head as start address764 * of the tx descriptor chain and enables the transmission DMA engine765 */766-static void767-spider_net_kick_tx_dma(struct spider_net_card *card,768- struct spider_net_descr *descr)769{770- /* this is the only descriptor in the output chain.771- * Enable TX DMA */772773- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,774- descr->bus_addr);0775776- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,777- SPIDER_NET_DMA_TX_VALUE);000000000000000778}779780/**···796 * @skb: packet to send out797 * @netdev: interface device structure798 *799- * returns 0 on success, <0 on failure800 */801static int802spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)803{804 struct spider_net_card *card = netdev_priv(netdev);805- struct spider_net_descr *descr;00806 int result;00807808 spider_net_release_tx_chain(card, 0);809810- descr = spider_net_get_next_tx_descr(card);811-812- if (!descr)813- goto error;814-815- result = spider_net_prepare_tx_descr(card, descr, skb);816- if (result)817- goto error;818-819- card->tx_chain.head = card->tx_chain.head->next;820-821- if (spider_net_get_descr_status(descr->prev) !=822- SPIDER_NET_DESCR_CARDOWNED) {823- /* make sure the current descriptor is in memory. Then824- * kicking it on again makes sense, if the previous is not825- * card-owned anymore. Check the previous descriptor twice826- * to omit an mb() in heavy traffic cases */827- mb();828- if (spider_net_get_descr_status(descr->prev) !=829- SPIDER_NET_DESCR_CARDOWNED)830- spider_net_kick_tx_dma(card, descr);831 }832833- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);000834835- return NETDEV_TX_OK;0000836837-error:838- card->netdev_stats.tx_dropped++;839- return NETDEV_TX_BUSY;0000000000000000000000000000840}841842/**···905906 /* unmap descriptor */907 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,908- PCI_DMA_BIDIRECTIONAL);909910 /* the cases we'll throw away the packet immediately */911 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {···970static int971spider_net_decode_one_descr(struct spider_net_card *card, int napi)972{973- enum spider_net_descr_status status;974- struct spider_net_descr *descr;975- struct spider_net_descr_chain *chain;976 int result;977-978- chain = &card->rx_chain;979- descr = chain->tail;980981 status = spider_net_get_descr_status(descr);982···1003 card->netdev->name, status);1004 card->netdev_stats.rx_dropped++;1005 pci_unmap_single(card->pdev, descr->buf_addr,1006- SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);1007 dev_kfree_skb_irq(descr->skb);1008 goto refill;1009 }···1019 /* ok, we've got a packet in descr */1020 result = spider_net_pass_skb_up(descr, card, napi);1021refill:1022- spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);1023 /* change the descriptor state: */1024 if (!napi)1025 spider_net_refill_rx_chain(card);···1188 return -EADDRNOTAVAIL;11891190 return 0;1191-}1192-1193-/**1194- * spider_net_enable_txdmac - enables a TX DMA controller1195- * @card: card structure1196- *1197- * spider_net_enable_txdmac enables the TX DMA controller by setting the1198- * descriptor chain tail address1199- */1200-static void1201-spider_net_enable_txdmac(struct spider_net_card *card)1202-{1203- /* assume chain is aligned correctly */1204- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,1205- card->tx_chain.tail->bus_addr);1206}12071208/**···1538 { SPIDER_NET_GMRWOLCTRL, 0 },1539 { SPIDER_NET_GTESTMD, 0x10000000 },1540 { SPIDER_NET_GTTQMSK, 0x00400040 },1541- { SPIDER_NET_GTESTMD, 0 },15421543 { SPIDER_NET_GMACINTEN, 0 },1544···15761577 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);15781579- /* set chain tail adress for TX chain */1580- spider_net_enable_txdmac(card);1581-1582 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,1583 SPIDER_NET_LENLMT_VALUE);1584 spider_net_write_reg(card, SPIDER_NET_GMACMODE,···1590 SPIDER_NET_INT1_MASK_VALUE);1591 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,1592 SPIDER_NET_INT2_MASK_VALUE);0001593}15941595/**···16121613 result = -ENOMEM;1614 if (spider_net_init_chain(card, &card->tx_chain,1615- card->descr, tx_descriptors))01616 goto alloc_tx_failed;1617 if (spider_net_init_chain(card, &card->rx_chain,1618- card->descr + tx_descriptors, rx_descriptors))01619 goto alloc_rx_failed;16201621 /* allocate rx skbs */···1824 /* empty sequencer data */1825 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;1826 sequencer++) {1827- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +1828 sequencer * 8, 0x0);1829 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {1830 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +···1838 /* reset */1839 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,1840 SPIDER_NET_CKRCTRL_STOP_VALUE);00000000000000000000000000000000000000000001841}18421843/**···1911 goto out;19121913 spider_net_open(netdev);1914- spider_net_kick_tx_dma(card, card->tx_chain.head);1915 netif_device_attach(netdev);19161917out:···19941995 pci_set_drvdata(card->pdev, netdev);19961997- atomic_set(&card->tx_chain_release,0);1998 card->rxram_full_tl.data = (unsigned long) card;1999 card->rxram_full_tl.func =2000 (void (*)(unsigned long)) spider_net_handle_rxram_full;···20072008 spider_net_setup_netdev_ops(netdev);20092010- netdev->features = NETIF_F_HW_CSUM;2011 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |2012 * NETIF_F_HW_VLAN_FILTER */2013
···84 *85 * returns the content of the specified SMMIO register.86 */87+static inline u3288spider_net_read_reg(struct spider_net_card *card, u32 reg)89{90 u32 value;···101 * @reg: register to write to102 * @value: value to write into the specified SMMIO register103 */104+static inline void105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)106{107 value = cpu_to_le32(value);···259 *260 * returns the status as in the dmac_cmd_status field of the descriptor261 */262+static inline int263spider_net_get_descr_status(struct spider_net_descr *descr)264{265+ return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;00000000000000000000000000000266}267268/**···328static int329spider_net_init_chain(struct spider_net_card *card,330 struct spider_net_descr_chain *chain,331+ struct spider_net_descr *start_descr,332+ int direction, int no)333{334 int i;335 struct spider_net_descr *descr;336 dma_addr_t buf;00337338 descr = start_descr;339 memset(descr, 0, sizeof(*descr) * no);340341 /* set up the hardware pointers in each descriptor */342 for (i=0; i<no; i++, descr++) {343+ descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;344345 buf = pci_map_single(card->pdev, descr,346 SPIDER_NET_DESCR_SIZE,347+ direction);348349 if (buf == DMA_ERROR_CODE)350 goto iommu_error;···360 start_descr->prev = descr-1;361362 descr = start_descr;363+ if (direction == PCI_DMA_FROMDEVICE)364+ for (i=0; i < no; i++, descr++)365+ descr->next_descr_addr = descr->next->bus_addr;366367+ spin_lock_init(&chain->lock);368 chain->head = start_descr;369 chain->tail = start_descr;370···375 if (descr->bus_addr)376 pci_unmap_single(card->pdev, descr->bus_addr,377 SPIDER_NET_DESCR_SIZE,378+ direction);379 return -ENOMEM;380}381···396 dev_kfree_skb(descr->skb);397 pci_unmap_single(card->pdev, descr->buf_addr,398 SPIDER_NET_MAX_FRAME,399+ PCI_DMA_FROMDEVICE);400 }401 descr = descr->next;402 }···446 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);447 /* io-mmu-map the skb */448 buf = pci_map_single(card->pdev, descr->skb->data,449+ SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);450 descr->buf_addr = buf;451 if (buf == DMA_ERROR_CODE) {452 dev_kfree_skb_any(descr->skb);453 if (netif_msg_rx_err(card) && net_ratelimit())454 pr_err("Could not iommu-map rx buffer\n");455+ descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;456 } else {457+ descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |458+ SPIDER_NET_DMAC_NOINTR_COMPLETE;459 }460461 return error;···468 * chip by writing to the appropriate register. DMA is enabled in469 * spider_net_enable_rxdmac.470 */471+static inline void472spider_net_enable_rxchtails(struct spider_net_card *card)473{474 /* assume chain is aligned correctly */···483 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN484 * in the GDADMACCNTR register485 */486+static inline void487spider_net_enable_rxdmac(struct spider_net_card *card)488{489 wmb();···500static void501spider_net_refill_rx_chain(struct spider_net_card *card)502{503+ struct spider_net_descr_chain *chain = &card->rx_chain;504+ unsigned long flags;0505506 /* one context doing the refill (and a second context seeing that507 * and omitting it) is ok. If called by NAPI, we'll be called again508 * as spider_net_decode_one_descr is called several times. If some509 * interrupt calls us, the NAPI is about to clean up anyway. */510+ if (!spin_trylock_irqsave(&chain->lock, flags))511+ return;00000512513+ while (spider_net_get_descr_status(chain->head) ==514+ SPIDER_NET_DESCR_NOT_IN_USE) {515+ if (spider_net_prepare_rx_descr(card, chain->head))516+ break;517+ chain->head = chain->head->next;518+ }519+520+ spin_unlock_irqrestore(&chain->lock, flags);521}522523/**···551error:552 spider_net_free_rx_chain_contents(card);553 return result;000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000554}555556/**···761}762763/**0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000764 * spider_net_prepare_tx_descr - fill tx descriptor with skb data765 * @card: card structure766 * @descr: descriptor structure to fill out···864 */865static int866spider_net_prepare_tx_descr(struct spider_net_card *card,0867 struct sk_buff *skb)868{869+ struct spider_net_descr *descr = card->tx_chain.head;870 dma_addr_t buf;871872+ buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);0873 if (buf == DMA_ERROR_CODE) {874 if (netif_msg_tx_err(card) && net_ratelimit())875 pr_err("could not iommu-map packet (%p, %i). "···880881 descr->buf_addr = buf;882 descr->buf_size = skb->len;883+ descr->next_descr_addr = 0;884 descr->skb = skb;885 descr->data_status = 0;886887+ descr->dmac_cmd_status =888+ SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;889+ if (skb->protocol == htons(ETH_P_IP))890+ switch (skb->nh.iph->protocol) {891+ case IPPROTO_TCP:892+ descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;893+ break;894+ case IPPROTO_UDP:895+ descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;896+ break;897+ }898+899+ descr->prev->next_descr_addr = descr->bus_addr;900+901+ return 0;902+}903+904+/**905+ * spider_net_release_tx_descr - processes a used tx descriptor906+ * @card: card structure907+ * @descr: descriptor to release908+ *909+ * releases a used tx descriptor (unmapping, freeing of skb)910+ */911+static inline void912+spider_net_release_tx_descr(struct spider_net_card *card)913+{914+ struct spider_net_descr *descr = card->tx_chain.tail;915+ struct sk_buff *skb;916+917+ card->tx_chain.tail = card->tx_chain.tail->next;918+ descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;919+920+ /* unmap the skb */921+ skb = descr->skb;922+ pci_unmap_single(card->pdev, descr->buf_addr, skb->len,923+ PCI_DMA_TODEVICE);924+ dev_kfree_skb_any(skb);925+}926+927+/**928+ * spider_net_release_tx_chain - processes sent tx descriptors929+ * @card: adapter structure930+ * @brutal: if set, don't care about whether descriptor seems to be in use931+ *932+ * returns 0 if the tx ring is empty, otherwise 1.933+ *934+ * spider_net_release_tx_chain releases the tx descriptors that spider has935+ * finished with (if non-brutal) or simply release tx descriptors (if brutal).936+ * If some other context is calling this function, we return 1 so that we're937+ * scheduled again (if we were scheduled) and will not loose initiative.938+ */939+static int940+spider_net_release_tx_chain(struct spider_net_card *card, int brutal)941+{942+ struct spider_net_descr_chain *chain = &card->tx_chain;943+ int status;944+945+ spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);946+947+ while (chain->tail != chain->head) {948+ status = spider_net_get_descr_status(chain->tail);949+ switch (status) {950+ case SPIDER_NET_DESCR_COMPLETE:951+ card->netdev_stats.tx_packets++;952+ card->netdev_stats.tx_bytes += chain->tail->skb->len;953+ break;954+955+ case SPIDER_NET_DESCR_CARDOWNED:956+ if (!brutal)957+ return 1;958+ /* fallthrough, if we release the descriptors959+ * brutally (then we don't care about960+ * SPIDER_NET_DESCR_CARDOWNED) */961+962+ case SPIDER_NET_DESCR_RESPONSE_ERROR:963+ case SPIDER_NET_DESCR_PROTECTION_ERROR:964+ case SPIDER_NET_DESCR_FORCE_END:965+ if (netif_msg_tx_err(card))966+ pr_err("%s: forcing end of tx descriptor "967+ "with status x%02x\n",968+ card->netdev->name, status);969+ card->netdev_stats.tx_errors++;970+ break;971+972+ default:973+ card->netdev_stats.tx_dropped++;974+ return 1;975+ }976+ spider_net_release_tx_descr(card);977+ }978979 return 0;980}···896 * spider_net_kick_tx_dma writes the current tx chain head as start address897 * of the tx descriptor chain and enables the transmission DMA engine898 */899+static inline void900+spider_net_kick_tx_dma(struct spider_net_card *card)0901{902+ struct spider_net_descr *descr;0903904+ if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &905+ SPIDER_NET_TX_DMA_EN)906+ goto out;907908+ descr = card->tx_chain.tail;909+ for (;;) {910+ if (spider_net_get_descr_status(descr) ==911+ SPIDER_NET_DESCR_CARDOWNED) {912+ spider_net_write_reg(card, SPIDER_NET_GDTDCHA,913+ descr->bus_addr);914+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,915+ SPIDER_NET_DMA_TX_VALUE);916+ break;917+ }918+ if (descr == card->tx_chain.head)919+ break;920+ descr = descr->next;921+ }922+923+out:924+ mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);925}926927/**···915 * @skb: packet to send out916 * @netdev: interface device structure917 *918+ * returns 0 on success, !0 on failure919 */920static int921spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)922{923 struct spider_net_card *card = netdev_priv(netdev);924+ struct spider_net_descr_chain *chain = &card->tx_chain;925+ struct spider_net_descr *descr = chain->head;926+ unsigned long flags;927 int result;928+929+ spin_lock_irqsave(&chain->lock, flags);930931 spider_net_release_tx_chain(card, 0);932933+ if (chain->head->next == chain->tail->prev) {934+ card->netdev_stats.tx_dropped++;935+ result = NETDEV_TX_LOCKED;936+ goto out;00000000000000000937 }938939+ if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {940+ result = NETDEV_TX_LOCKED;941+ goto out;942+ }943944+ if (spider_net_prepare_tx_descr(card, skb) != 0) {945+ card->netdev_stats.tx_dropped++;946+ result = NETDEV_TX_BUSY;947+ goto out;948+ }949950+ result = NETDEV_TX_OK;951+952+ spider_net_kick_tx_dma(card);953+ card->tx_chain.head = card->tx_chain.head->next;954+955+out:956+ spin_unlock_irqrestore(&chain->lock, flags);957+ netif_wake_queue(netdev);958+ return result;959+}960+961+/**962+ * spider_net_cleanup_tx_ring - cleans up the TX ring963+ * @card: card structure964+ *965+ * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use966+ * interrupts to cleanup our TX ring) and returns sent packets to the stack967+ * by freeing them968+ */969+static void970+spider_net_cleanup_tx_ring(struct spider_net_card *card)971+{972+ unsigned long flags;973+974+ spin_lock_irqsave(&card->tx_chain.lock, flags);975+976+ if ((spider_net_release_tx_chain(card, 0) != 0) &&977+ (card->netdev->flags & IFF_UP))978+ spider_net_kick_tx_dma(card);979+980+ spin_unlock_irqrestore(&card->tx_chain.lock, flags);981}982983/**···10021003 /* unmap descriptor */1004 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,1005+ PCI_DMA_FROMDEVICE);10061007 /* the cases we'll throw away the packet immediately */1008 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {···1067static int1068spider_net_decode_one_descr(struct spider_net_card *card, int napi)1069{1070+ struct spider_net_descr_chain *chain = &card->rx_chain;1071+ struct spider_net_descr *descr = chain->tail;1072+ int status;1073 int result;00010741075 status = spider_net_get_descr_status(descr);1076···1103 card->netdev->name, status);1104 card->netdev_stats.rx_dropped++;1105 pci_unmap_single(card->pdev, descr->buf_addr,1106+ SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);1107 dev_kfree_skb_irq(descr->skb);1108 goto refill;1109 }···1119 /* ok, we've got a packet in descr */1120 result = spider_net_pass_skb_up(descr, card, napi);1121refill:1122+ descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;1123 /* change the descriptor state: */1124 if (!napi)1125 spider_net_refill_rx_chain(card);···1288 return -EADDRNOTAVAIL;12891290 return 0;0000000000000001291}12921293/**···1653 { SPIDER_NET_GMRWOLCTRL, 0 },1654 { SPIDER_NET_GTESTMD, 0x10000000 },1655 { SPIDER_NET_GTTQMSK, 0x00400040 },016561657 { SPIDER_NET_GMACINTEN, 0 },1658···16921693 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);16940001695 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,1696 SPIDER_NET_LENLMT_VALUE);1697 spider_net_write_reg(card, SPIDER_NET_GMACMODE,···1709 SPIDER_NET_INT1_MASK_VALUE);1710 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,1711 SPIDER_NET_INT2_MASK_VALUE);1712+1713+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,1714+ SPIDER_NET_GDTDCEIDIS);1715}17161717/**···17281729 result = -ENOMEM;1730 if (spider_net_init_chain(card, &card->tx_chain,1731+ card->descr,1732+ PCI_DMA_TODEVICE, tx_descriptors))1733 goto alloc_tx_failed;1734 if (spider_net_init_chain(card, &card->rx_chain,1735+ card->descr + tx_descriptors,1736+ PCI_DMA_FROMDEVICE, rx_descriptors))1737 goto alloc_rx_failed;17381739 /* allocate rx skbs */···1938 /* empty sequencer data */1939 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;1940 sequencer++) {1941+ spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +1942 sequencer * 8, 0x0);1943 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {1944 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +···1952 /* reset */1953 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,1954 SPIDER_NET_CKRCTRL_STOP_VALUE);1955+}1956+1957+/**1958+ * spider_net_stop - called upon ifconfig down1959+ * @netdev: interface device structure1960+ *1961+ * always returns 01962+ */1963+int1964+spider_net_stop(struct net_device *netdev)1965+{1966+ struct spider_net_card *card = netdev_priv(netdev);1967+1968+ tasklet_kill(&card->rxram_full_tl);1969+ netif_poll_disable(netdev);1970+ netif_carrier_off(netdev);1971+ netif_stop_queue(netdev);1972+ del_timer_sync(&card->tx_timer);1973+1974+ /* disable/mask all interrupts */1975+ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);1976+ spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);1977+ spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);1978+1979+ /* free_irq(netdev->irq, netdev);*/1980+ free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);1981+1982+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,1983+ SPIDER_NET_DMA_TX_FEND_VALUE);1984+1985+ /* turn off DMA, force end */1986+ spider_net_disable_rxdmac(card);1987+1988+ /* release chains */1989+ if (spin_trylock(&card->tx_chain.lock)) {1990+ spider_net_release_tx_chain(card, 1);1991+ spin_unlock(&card->tx_chain.lock);1992+ }1993+1994+ spider_net_free_chain(card, &card->tx_chain);1995+ spider_net_free_chain(card, &card->rx_chain);1996+1997+ return 0;1998}19992000/**···1982 goto out;19831984 spider_net_open(netdev);1985+ spider_net_kick_tx_dma(card);1986 netif_device_attach(netdev);19871988out:···20652066 pci_set_drvdata(card->pdev, netdev);206702068 card->rxram_full_tl.data = (unsigned long) card;2069 card->rxram_full_tl.func =2070 (void (*)(unsigned long)) spider_net_handle_rxram_full;···20792080 spider_net_setup_netdev_ops(netdev);20812082+ netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;2083 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |2084 * NETIF_F_HW_VLAN_FILTER */2085
+20-51
drivers/net/spider_net.h
···208#define SPIDER_NET_DMA_RX_VALUE 0x80000000209#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003210/* to set TX_DMA_EN */211-#define SPIDER_NET_DMA_TX_VALUE 0x80000000000212#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003213214/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */···332 (~SPIDER_NET_TXINT) & \333 (~SPIDER_NET_RXINT) )334335-#define SPIDER_NET_GPREXEC 0x80000000336-#define SPIDER_NET_GPRDAT_MASK 0x0000ffff337338-/* descriptor bits339- *340- * 1010 descriptor ready341- * 0 descr in middle of chain342- * 000 fixed to 0343- *344- * 0 no interrupt on completion345- * 000 fixed to 0346- * 1 no ipsec processing347- * 1 last descriptor for this frame348- * 00 no checksum349- * 10 tcp checksum350- * 11 udp checksum351- *352- * 00 fixed to 0353- * 0 fixed to 0354- * 0 no interrupt on response errors355- * 0 no interrupt on invalid descr356- * 0 no interrupt on dma process termination357- * 0 no interrupt on descr chain end358- * 0 no interrupt on descr complete359- *360- * 000 fixed to 0361- * 0 response error interrupt status362- * 0 invalid descr status363- * 0 dma termination status364- * 0 descr chain end status365- * 0 descr complete status */366-#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000367-#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000368-#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000369-#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28370-#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff371372-/* descr ready, descr is in middle of chain, get interrupt on completion */373-#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000374-375-enum spider_net_descr_status {376- SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */377- SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */378- SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */379- SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */380- SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */381- SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */382- SPIDER_NET_DESCR_NOT_IN_USE /* any other value */383-};384385struct spider_net_descr {386 /* as defined by the hardware */···369} __attribute__((aligned(32)));370371struct spider_net_descr_chain {372- /* we walk from tail to head */373 struct spider_net_descr *head;374 struct spider_net_descr *tail;375};···424425 struct spider_net_descr_chain tx_chain;426 struct spider_net_descr_chain rx_chain;427- atomic_t rx_chain_refill;428- atomic_t tx_chain_release;429430 struct net_device_stats netdev_stats;431
···208#define SPIDER_NET_DMA_RX_VALUE 0x80000000209#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003210/* to set TX_DMA_EN */211+#define SPIDER_NET_TX_DMA_EN 0x80000000212+#define SPIDER_NET_GDTDCEIDIS 0x00000002213+#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \214+ SPIDER_NET_GDTDCEIDIS215#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003216217/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */···329 (~SPIDER_NET_TXINT) & \330 (~SPIDER_NET_RXINT) )331332+#define SPIDER_NET_GPREXEC 0x80000000333+#define SPIDER_NET_GPRDAT_MASK 0x0000ffff334335+#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000336+#define SPIDER_NET_DMAC_NOCS 0x00040000337+#define SPIDER_NET_DMAC_TCP 0x00020000338+#define SPIDER_NET_DMAC_UDP 0x00030000339+#define SPIDER_NET_TXDCEST 0x080000000000000000000000000000000000340341+#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000342+#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */343+#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */344+#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */345+#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */346+#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */347+#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */348+#define SPIDER_NET_DESCR_NOT_IN_USE 0xF00000000000349350struct spider_net_descr {351 /* as defined by the hardware */···398} __attribute__((aligned(32)));399400struct spider_net_descr_chain {401+ spinlock_t lock;402 struct spider_net_descr *head;403 struct spider_net_descr *tail;404};···453454 struct spider_net_descr_chain tx_chain;455 struct spider_net_descr_chain rx_chain;00456457 struct net_device_stats netdev_stats;458
···564MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");565MODULE_DESCRIPTION("RISCom/N2 serial port driver");566MODULE_LICENSE("GPL v2");567-module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */0
···564MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");565MODULE_DESCRIPTION("RISCom/N2 serial port driver");566MODULE_LICENSE("GPL v2");567+module_param(hw, charp, 0444);568+MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
+14-3
drivers/s390/block/xpram.c
···304{305 unsigned long mem_needed;306 unsigned long mem_auto;0307 int mem_auto_no;308 int i;309···322 mem_needed = 0;323 mem_auto_no = 0;324 for (i = 0; i < xpram_devs; i++) {325- if (sizes[i])326- xpram_sizes[i] =327- (memparse(sizes[i], &sizes[i]) + 3) & -4UL;0000000000328 if (xpram_sizes[i])329 mem_needed += xpram_sizes[i];330 else
···304{305 unsigned long mem_needed;306 unsigned long mem_auto;307+ unsigned long long size;308 int mem_auto_no;309 int i;310···321 mem_needed = 0;322 mem_auto_no = 0;323 for (i = 0; i < xpram_devs; i++) {324+ if (sizes[i]) {325+ size = simple_strtoull(sizes[i], &sizes[i], 0);326+ switch (sizes[i][0]) {327+ case 'g':328+ case 'G':329+ size <<= 20;330+ break;331+ case 'm':332+ case 'M':333+ size <<= 10;334+ }335+ xpram_sizes[i] = (size + 3) & -4UL;336+ }337 if (xpram_sizes[i])338 mem_needed += xpram_sizes[i];339 else
···114MODULE_LICENSE("GPL");115module_param(NCR_D700, charp, 0);116117-static __u8 __initdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };119120#ifdef MODULE···173 char pad;174};175176-static int 177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,178 int slot, u32 region, int differential)179{···243 * essentially connectecd to the MCA bus independently, it is easier244 * to set them up as two separate host adapters, rather than one245 * adapter with two channels */246-static int247NCR_D700_probe(struct device *dev)248{249 struct NCR_D700_private *p;···329 for (i = 0; i < 2; i++) {330 int err;331332- if ((err = NCR_D700_probe_one(p, i, slot, irq,333 offset_addr + (0x80 * i),334 differential)) != 0)335 printk("D700: SIOP%d: probe failed, error = %d\n",···349 return 0;350}351352-static void353NCR_D700_remove_one(struct Scsi_Host *host)354{355 scsi_remove_host(host);···359 release_region(host->base, 64);360}361362-static int363NCR_D700_remove(struct device *dev)364{365 struct NCR_D700_private *p = dev_get_drvdata(dev);···380 .name = "NCR_D700",381 .bus = &mca_bus_type,382 .probe = NCR_D700_probe,383- .remove = NCR_D700_remove,384 },385};386
···114MODULE_LICENSE("GPL");115module_param(NCR_D700, charp, 0);116117+static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };119120#ifdef MODULE···173 char pad;174};175176+static int __devinit177NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,178 int slot, u32 region, int differential)179{···243 * essentially connectecd to the MCA bus independently, it is easier244 * to set them up as two separate host adapters, rather than one245 * adapter with two channels */246+static int __devinit247NCR_D700_probe(struct device *dev)248{249 struct NCR_D700_private *p;···329 for (i = 0; i < 2; i++) {330 int err;331332+ if ((err = NCR_D700_probe_one(p, i, irq, slot,333 offset_addr + (0x80 * i),334 differential)) != 0)335 printk("D700: SIOP%d: probe failed, error = %d\n",···349 return 0;350}351352+static void __devexit353NCR_D700_remove_one(struct Scsi_Host *host)354{355 scsi_remove_host(host);···359 release_region(host->base, 64);360}361362+static int __devexit363NCR_D700_remove(struct device *dev)364{365 struct NCR_D700_private *p = dev_get_drvdata(dev);···380 .name = "NCR_D700",381 .bus = &mca_bus_type,382 .probe = NCR_D700_probe,383+ .remove = __devexit_p(NCR_D700_remove),384 },385};386
···243static uint32_t aic79xx_no_reset;244245/*246- * Certain PCI motherboards will scan PCI devices from highest to lowest,247- * others scan from lowest to highest, and they tend to do all kinds of248- * strange things when they come into contact with PCI bridge chips. The249- * net result of all this is that the PCI card that is actually used to boot250- * the machine is very hard to detect. Most motherboards go from lowest251- * PCI slot number to highest, and the first SCSI controller found is the252- * one you boot from. The only exceptions to this are when a controller253- * has its BIOS disabled. So, we by default sort all of our SCSI controllers254- * from lowest PCI slot number to highest PCI slot number. We also force255- * all controllers with their BIOS disabled to the end of the list. This256- * works on *almost* all computers. Where it doesn't work, we have this257- * option. Setting this option to non-0 will reverse the order of the sort258- * to highest first, then lowest, but will still leave cards with their BIOS259- * disabled at the very end. That should fix everyone up unless there are260- * really strange cirumstances.261- */262-static uint32_t aic79xx_reverse_scan;263-264-/*265 * Should we force EXTENDED translation on a controller.266 * 0 == Use whatever is in the SEEPROM or default to off267 * 1 == Use whatever is in the SEEPROM or default to on···331" periodically to prevent tag starvation.\n"332" This may be required by some older disk\n"333" or drives/RAID arrays.\n"334-" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"335" tag_info:<tag_str> Set per-target tag depth\n"336" global_tag_depth:<int> Global tag depth for all targets on all buses\n"337" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"···1011#ifdef AHD_DEBUG1012 { "debug", &ahd_debug },1013#endif1014- { "reverse_scan", &aic79xx_reverse_scan },1015 { "periodic_otag", &aic79xx_periodic_otag },1016 { "pci_parity", &aic79xx_pci_parity },1017 { "seltime", &aic79xx_seltime },
···243static uint32_t aic79xx_no_reset;244245/*0000000000000000000246 * Should we force EXTENDED translation on a controller.247 * 0 == Use whatever is in the SEEPROM or default to off248 * 1 == Use whatever is in the SEEPROM or default to on···350" periodically to prevent tag starvation.\n"351" This may be required by some older disk\n"352" or drives/RAID arrays.\n"0353" tag_info:<tag_str> Set per-target tag depth\n"354" global_tag_depth:<int> Global tag depth for all targets on all buses\n"355" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"···1031#ifdef AHD_DEBUG1032 { "debug", &ahd_debug },1033#endif01034 { "periodic_otag", &aic79xx_periodic_otag },1035 { "pci_parity", &aic79xx_pci_parity },1036 { "seltime", &aic79xx_seltime },
-1
drivers/scsi/aic7xxx/aic7xxx_osm.c
···353" periodically to prevent tag starvation.\n"354" This may be required by some older disk\n"355" drives or RAID arrays.\n"356-" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"357" tag_info:<tag_str> Set per-target tag depth\n"358" global_tag_depth:<int> Global tag depth for every target\n"359" on every bus\n"
···353" periodically to prevent tag starvation.\n"354" This may be required by some older disk\n"355" drives or RAID arrays.\n"0356" tag_info:<tag_str> Set per-target tag depth\n"357" global_tag_depth:<int> Global tag depth for every target\n"358" on every bus\n"
···81 int rc;8283 single_host_data = hostdata;84- rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0);85 if (rc < 0) {86 printk("viopath_open failed with rc %d in open_event_path\n",87 rc);
···81 int rc;8283 single_host_data = hostdata;84+ rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests);85 if (rc < 0) {86 printk("viopath_open failed with rc %d in open_event_path\n",87 rc);
+1
drivers/scsi/ibmvscsi/rpa_vscsi.c
···238 if (rc == 2) {239 /* Adapter is good, but other end is not ready */240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");0241 } else if (rc != 0) {242 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);243 goto reg_crq_failed;
···238 if (rc == 2) {239 /* Adapter is good, but other end is not ready */240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");241+ retrc = 0;242 } else if (rc != 0) {243 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);244 goto reg_crq_failed;
···2122struct lpfc_sli2_slim;2324-#define LPFC_MAX_TARGET 256 /* max targets supported */25-#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */26-#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */270000028#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */29#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */30#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */···43 (( (u64)(high)<<16 ) << 16)|( (u64)(low))))44/* Provide maximum configuration definitions. */45#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */46-#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */47#define FC_MAX_ADPTMSG 644849#define MAX_HBAEVT 32
···2122struct lpfc_sli2_slim;230002425+#define LPFC_MAX_TARGET 256 /* max number of targets supported */26+#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els27+ requests */28+#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact29+ the NameServer before giving up. */30#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */31#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */32#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */···41 (( (u64)(high)<<16 ) << 16)|( (u64)(low))))42/* Provide maximum configuration definitions. */43#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */044#define FC_MAX_ADPTMSG 644546#define MAX_HBAEVT 32
+59-48
drivers/scsi/lpfc/lpfc_attr.c
···219 return -ENOMEM;220221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));222- lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed);00223 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);00000000224225 if (mbxstatus == MBX_TIMEOUT)226 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;···243 return 0;244}24500000000000000000000000000000000000000000246static ssize_t247lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)248{249 struct Scsi_Host *host = class_to_shost(cdev);250 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;251 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);252-}253-254-static ssize_t255-lpfc_board_online_show(struct class_device *cdev, char *buf)256-{257- struct Scsi_Host *host = class_to_shost(cdev);258- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;259-260- if (phba->fc_flag & FC_OFFLINE_MODE)261- return snprintf(buf, PAGE_SIZE, "0\n");262- else263- return snprintf(buf, PAGE_SIZE, "1\n");264-}265-266-static ssize_t267-lpfc_board_online_store(struct class_device *cdev, const char *buf,268- size_t count)269-{270- struct Scsi_Host *host = class_to_shost(cdev);271- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;272- struct completion online_compl;273- int val=0, status=0;274-275- if (sscanf(buf, "%d", &val) != 1)276- return -EINVAL;277-278- init_completion(&online_compl);279-280- if (val)281- lpfc_workq_post_event(phba, &status, &online_compl,282- LPFC_EVT_ONLINE);283- else284- lpfc_workq_post_event(phba, &status, &online_compl,285- LPFC_EVT_OFFLINE);286- wait_for_completion(&online_compl);287- if (!status)288- return strlen(buf);289- else290- return -EIO;291}292293static ssize_t···544 NULL);545static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,546 NULL);547-static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,548- lpfc_board_online_show, lpfc_board_online_store);549static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,550 lpfc_board_mode_show, lpfc_board_mode_store);0551552static int lpfc_poll = 0;553module_param(lpfc_poll, int, 0);···706 "during discovery");707708/*709-# lpfc_max_luns: maximum number of LUNs per target driver will support710-# Value range is [1,32768]. Default value is 256.711-# NOTE: The SCSI layer will scan each target for this many luns712*/713-LPFC_ATTR_R(max_luns, 256, 1, 32768,714- "Maximum number of LUNs per target driver will support");715716/*717# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.···750 &class_device_attr_lpfc_max_luns,751 &class_device_attr_nport_evt_cnt,752 &class_device_attr_management_version,753- &class_device_attr_board_online,754 &class_device_attr_board_mode,0755 &class_device_attr_lpfc_poll,756 &class_device_attr_lpfc_poll_tmo,757 NULL,
···219 return -ENOMEM;220221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));222+ pmboxq->mb.mbxCommand = MBX_DOWN_LINK;223+ pmboxq->mb.mbxOwner = OWN_HOST;224+225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);226+227+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {228+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));229+ lpfc_init_link(phba, pmboxq, phba->cfg_topology,230+ phba->cfg_link_speed);231+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,232+ phba->fc_ratov * 2);233+ }234235 if (mbxstatus == MBX_TIMEOUT)236 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;···233 return 0;234}235236+static int237+lpfc_selective_reset(struct lpfc_hba *phba)238+{239+ struct completion online_compl;240+ int status = 0;241+242+ init_completion(&online_compl);243+ lpfc_workq_post_event(phba, &status, &online_compl,244+ LPFC_EVT_OFFLINE);245+ wait_for_completion(&online_compl);246+247+ if (status != 0)248+ return -EIO;249+250+ init_completion(&online_compl);251+ lpfc_workq_post_event(phba, &status, &online_compl,252+ LPFC_EVT_ONLINE);253+ wait_for_completion(&online_compl);254+255+ if (status != 0)256+ return -EIO;257+258+ return 0;259+}260+261+static ssize_t262+lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)263+{264+ struct Scsi_Host *host = class_to_shost(cdev);265+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;266+ int status = -EINVAL;267+268+ if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)269+ status = lpfc_selective_reset(phba);270+271+ if (status == 0)272+ return strlen(buf);273+ else274+ return status;275+}276+277static ssize_t278lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)279{280 struct Scsi_Host *host = class_to_shost(cdev);281 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;282 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);000000000000000000000000000000000000000283}284285static ssize_t···532 NULL);533static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,534 NULL);00535static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,536 lpfc_board_mode_show, lpfc_board_mode_store);537+static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);538539static int lpfc_poll = 0;540module_param(lpfc_poll, int, 0);···695 "during discovery");696697/*698+# lpfc_max_luns: maximum allowed LUN.699+# Value range is [0,65535]. Default value is 255.700+# NOTE: The SCSI layer might probe all allowed LUN on some old targets.701*/702+LPFC_ATTR_R(max_luns, 255, 0, 65535,703+ "Maximum allowed LUN");704705/*706# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.···739 &class_device_attr_lpfc_max_luns,740 &class_device_attr_nport_evt_cnt,741 &class_device_attr_management_version,0742 &class_device_attr_board_mode,743+ &class_device_attr_issue_reset,744 &class_device_attr_lpfc_poll,745 &class_device_attr_lpfc_poll_tmo,746 NULL,
···71 uint16_t offset = 0;72 static char licensed[56] =73 "key unlock for use with gnu public licensed code only\0";07475 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);76 if (!pmb) {···83 phba->hba_state = LPFC_INIT_MBX_CMDS;8485 if (lpfc_is_LC_HBA(phba->pcidev->device)) {86- uint32_t *ptext = (uint32_t *) licensed;08788- for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)89- *ptext = cpu_to_be32(*ptext);009091 lpfc_read_nv(phba, pmb);92 memset((char*)mb->un.varRDnvp.rsvd3, 0,···409 }410 /* MBOX buffer will be freed in mbox compl */411412- i = 0;0000000413 while ((phba->hba_state != LPFC_HBA_READY) ||414 (phba->num_disc_nodes) || (phba->fc_prli_sent) ||415 ((phba->fc_map_cnt == 0) && (i<2)) ||416- (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) {417 /* Check every second for 30 retries. */418 i++;419 if (i > 30) {420- break;421 }422 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {423 /* The link is down. Set linkdown timeout */424- break;425 }426427 /* Delay for 1 second to give discovery time to complete. */···436437 }438439- /* Since num_disc_nodes keys off of PLOGI, delay a bit to let440- * any potential PRLIs to flush thru the SLI sub-system.441- */442- msleep(50);443-444- return (0);445}446447/************************************************************************/···1345 struct lpfc_sli_ring *pring;1346 struct lpfc_sli *psli;1347 unsigned long iflag;1348- int i = 0;013491350 if (!phba)1351 return 0;···1355 return 0;13561357 psli = &phba->sli;1358- pring = &psli->ring[psli->fcp_ring];13591360 lpfc_linkdown(phba);013611362- /* The linkdown event takes 30 seconds to timeout. */1363- while (pring->txcmplq_cnt) {1364- mdelay(10);1365- if (i++ > 3000)1366- break;0000000001367 }013681369 /* stop all timers associated with this hba */1370 lpfc_stop_timer(phba);···1655 error = -ENODEV;1656 goto out_free_irq;1657 }0016581659 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {1660 spin_lock_irq(phba->host->host_lock);
···71 uint16_t offset = 0;72 static char licensed[56] =73 "key unlock for use with gnu public licensed code only\0";74+ static int init_key = 1;7576 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);77 if (!pmb) {···82 phba->hba_state = LPFC_INIT_MBX_CMDS;8384 if (lpfc_is_LC_HBA(phba->pcidev->device)) {85+ if (init_key) {86+ uint32_t *ptext = (uint32_t *) licensed;8788+ for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)89+ *ptext = cpu_to_be32(*ptext);90+ init_key = 0;91+ }9293 lpfc_read_nv(phba, pmb);94 memset((char*)mb->un.varRDnvp.rsvd3, 0,···405 }406 /* MBOX buffer will be freed in mbox compl */407408+ return (0);409+}410+411+static int412+lpfc_discovery_wait(struct lpfc_hba *phba)413+{414+ int i = 0;415+416 while ((phba->hba_state != LPFC_HBA_READY) ||417 (phba->num_disc_nodes) || (phba->fc_prli_sent) ||418 ((phba->fc_map_cnt == 0) && (i<2)) ||419+ (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {420 /* Check every second for 30 retries. */421 i++;422 if (i > 30) {423+ return -ETIMEDOUT;424 }425 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {426 /* The link is down. Set linkdown timeout */427+ return -ETIMEDOUT;428 }429430 /* Delay for 1 second to give discovery time to complete. */···425426 }427428+ return 0;00000429}430431/************************************************************************/···1339 struct lpfc_sli_ring *pring;1340 struct lpfc_sli *psli;1341 unsigned long iflag;1342+ int i;1343+ int cnt = 0;13441345 if (!phba)1346 return 0;···1348 return 0;13491350 psli = &phba->sli;013511352 lpfc_linkdown(phba);1353+ lpfc_sli_flush_mbox_queue(phba);13541355+ for (i = 0; i < psli->num_rings; i++) {1356+ pring = &psli->ring[i];1357+ /* The linkdown event takes 30 seconds to timeout. */1358+ while (pring->txcmplq_cnt) {1359+ mdelay(10);1360+ if (cnt++ > 3000) {1361+ lpfc_printf_log(phba,1362+ KERN_WARNING, LOG_INIT,1363+ "%d:0466 Outstanding IO when "1364+ "bringing Adapter offline\n",1365+ phba->brd_no);1366+ break;1367+ }1368+ }1369 }1370+13711372 /* stop all timers associated with this hba */1373 lpfc_stop_timer(phba);···1638 error = -ENODEV;1639 goto out_free_irq;1640 }1641+1642+ lpfc_discovery_wait(phba);16431644 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {1645 spin_lock_irq(phba->host->host_lock);
···1110 phba->brd_no,1111 did, mb->mbxStatus, phba->hba_state);1112000000000001113 /* Put ndlp in npr list set plogi timer for 1 sec */1114 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);1115 spin_lock_irq(phba->host->host_lock);
···1110 phba->brd_no,1111 did, mb->mbxStatus, phba->hba_state);11121113+ /*1114+ * If RegLogin failed due to lack of HBA resources do not1115+ * retry discovery.1116+ */1117+ if (mb->mbxStatus == MBXERR_RPI_FULL) {1118+ ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;1119+ ndlp->nlp_state = NLP_STE_UNUSED_NODE;1120+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);1121+ return ndlp->nlp_state;1122+ }1123+1124 /* Put ndlp in npr list set plogi timer for 1 sec */1125 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);1126 spin_lock_irq(phba->host->host_lock);
+37-27
drivers/scsi/lpfc/lpfc_scsi.c
···153lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)154{155 unsigned long iflag = 0;156- /*157- * There are only two special cases to consider. (1) the scsi command158- * requested scatter-gather usage or (2) the scsi command allocated159- * a request buffer, but did not request use_sg. There is a third160- * case, but it does not require resource deallocation.161- */162- if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {163- dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,164- psb->seg_cnt, psb->pCmd->sc_data_direction);165- } else {166- if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {167- dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,168- psb->pCmd->request_bufflen,169- psb->pCmd->sc_data_direction);170- }171- }172173 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);174 psb->pCmd = NULL;···263 iocb_cmd->ulpLe = 1;264 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);265 return 0;000000000000000000000266}267268static void···459 cmd->scsi_done(cmd);460461 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {0462 lpfc_release_scsi_buf(phba, lpfc_cmd);463 return;464 }···517 }518 }5190520 lpfc_release_scsi_buf(phba, lpfc_cmd);521}522···616static int617lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,618 struct lpfc_scsi_buf *lpfc_cmd,0619 uint8_t task_mgmt_cmd)620{621 struct lpfc_sli *psli;···635 piocb = &piocbq->iocb;636637 fcp_cmnd = lpfc_cmd->fcp_cmnd;638- int_to_scsilun(lpfc_cmd->pCmd->device->lun,639- &lpfc_cmd->fcp_cmnd->fcp_lun);640 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;641642 piocb->ulpCommand = CMD_FCP_ICMND64_CR;···662663static int664lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,665- unsigned tgt_id, struct lpfc_rport_data *rdata)0666{667 struct lpfc_iocbq *iocbq;668 struct lpfc_iocbq *iocbqrsp;669 int ret;670671 lpfc_cmd->rdata = rdata;672- ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);0673 if (!ret)674 return FAILED;675···831 return 0;832833 out_host_busy_free_buf:0834 lpfc_release_scsi_buf(phba, lpfc_cmd);835 out_host_busy:836 return SCSI_MLQUEUE_HOST_BUSY;···979 if (lpfc_cmd == NULL)980 goto out;981982- lpfc_cmd->pCmd = cmnd;983 lpfc_cmd->timeout = 60;984 lpfc_cmd->scsi_hba = phba;985 lpfc_cmd->rdata = rdata;986987- ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);0988 if (!ret)989 goto out_free_scsi_buf;990···1011 cmd_status = iocbqrsp->iocb.ulpStatus;10121013 lpfc_sli_release_iocbq(phba, iocbqrsp);1014- lpfc_release_scsi_buf(phba, lpfc_cmd);10151016 /*1017 * All outstanding txcmplq I/Os should have been aborted by the device.···1049 }10501051out_free_scsi_buf:001052 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,1053 "%d:0713 SCSI layer issued LUN reset (%d, %d) "1054 "Data: x%x x%x x%x\n",···10811082 /* The lpfc_cmd storage is reused. Set all loop invariants. */1083 lpfc_cmd->timeout = 60;1084- lpfc_cmd->pCmd = cmnd;1085 lpfc_cmd->scsi_hba = phba;10861087 /*···1088 * targets known to the driver. Should any target reset1089 * fail, this routine returns failure to the midlayer.1090 */1091- for (i = 0; i < MAX_FCP_TARGET; i++) {1092 /* Search the mapped list for this target ID */1093 match = 0;1094 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {···1100 if (!match)1101 continue;11021103- ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba,1104- i, ndlp->rport->dd_data);1105 if (ret != SUCCESS) {1106 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,1107 "%d:0713 Bus Reset on target %d failed\n",
···153lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)154{155 unsigned long iflag = 0;0000000000000000156157 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);158 psb->pCmd = NULL;···279 iocb_cmd->ulpLe = 1;280 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);281 return 0;282+}283+284+static void285+lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)286+{287+ /*288+ * There are only two special cases to consider. (1) the scsi command289+ * requested scatter-gather usage or (2) the scsi command allocated290+ * a request buffer, but did not request use_sg. There is a third291+ * case, but it does not require resource deallocation.292+ */293+ if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {294+ dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,295+ psb->seg_cnt, psb->pCmd->sc_data_direction);296+ } else {297+ if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {298+ dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,299+ psb->pCmd->request_bufflen,300+ psb->pCmd->sc_data_direction);301+ }302+ }303}304305static void···454 cmd->scsi_done(cmd);455456 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {457+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);458 lpfc_release_scsi_buf(phba, lpfc_cmd);459 return;460 }···511 }512 }513514+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);515 lpfc_release_scsi_buf(phba, lpfc_cmd);516}517···609static int610lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,611 struct lpfc_scsi_buf *lpfc_cmd,612+ unsigned int lun,613 uint8_t task_mgmt_cmd)614{615 struct lpfc_sli *psli;···627 piocb = &piocbq->iocb;628629 fcp_cmnd = lpfc_cmd->fcp_cmnd;630+ int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);0631 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;632633 piocb->ulpCommand = CMD_FCP_ICMND64_CR;···655656static int657lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,658+ unsigned tgt_id, unsigned int lun,659+ struct lpfc_rport_data *rdata)660{661 struct lpfc_iocbq *iocbq;662 struct lpfc_iocbq *iocbqrsp;663 int ret;664665 lpfc_cmd->rdata = rdata;666+ ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,667+ FCP_TARGET_RESET);668 if (!ret)669 return FAILED;670···822 return 0;823824 out_host_busy_free_buf:825+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);826 lpfc_release_scsi_buf(phba, lpfc_cmd);827 out_host_busy:828 return SCSI_MLQUEUE_HOST_BUSY;···969 if (lpfc_cmd == NULL)970 goto out;9710972 lpfc_cmd->timeout = 60;973 lpfc_cmd->scsi_hba = phba;974 lpfc_cmd->rdata = rdata;975976+ ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,977+ FCP_LUN_RESET);978 if (!ret)979 goto out_free_scsi_buf;980···1001 cmd_status = iocbqrsp->iocb.ulpStatus;10021003 lpfc_sli_release_iocbq(phba, iocbqrsp);010041005 /*1006 * All outstanding txcmplq I/Os should have been aborted by the device.···1040 }10411042out_free_scsi_buf:1043+ lpfc_release_scsi_buf(phba, lpfc_cmd);1044+1045 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,1046 "%d:0713 SCSI layer issued LUN reset (%d, %d) "1047 "Data: x%x x%x x%x\n",···10701071 /* The lpfc_cmd storage is reused. Set all loop invariants. */1072 lpfc_cmd->timeout = 60;01073 lpfc_cmd->scsi_hba = phba;10741075 /*···1078 * targets known to the driver. Should any target reset1079 * fail, this routine returns failure to the midlayer.1080 */1081+ for (i = 0; i < LPFC_MAX_TARGET; i++) {1082 /* Search the mapped list for this target ID */1083 match = 0;1084 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {···1090 if (!match)1091 continue;10921093+ ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,1094+ ndlp->rport->dd_data);1095 if (ret != SUCCESS) {1096 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,1097 "%d:0713 Bus Reset on target %d failed\n",
+24-31
drivers/scsi/lpfc/lpfc_sli.c
···191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)193{194- uint16_t iotag;195-196 list_add_tail(&piocb->list, &pring->txcmplq);197 pring->txcmplq_cnt++;198 if (unlikely(pring->ringno == LPFC_ELS_RING))199 mod_timer(&phba->els_tmofunc,200 jiffies + HZ * (phba->fc_ratov << 1));201202- if (pring->fast_lookup) {203- /* Setup fast lookup based on iotag for completion */204- iotag = piocb->iocb.ulpIoTag;205- if (iotag && (iotag < pring->fast_iotag))206- *(pring->fast_lookup + iotag) = piocb;207- else {208-209- /* Cmd ring <ringno> put: iotag <iotag> greater then210- configured max <fast_iotag> wd0 <icmd> */211- lpfc_printf_log(phba,212- KERN_ERR,213- LOG_SLI,214- "%d:0316 Cmd ring %d put: iotag x%x "215- "greater then configured max x%x "216- "wd0 x%x\n",217- phba->brd_no,218- pring->ringno, iotag,219- pring->fast_iotag,220- *(((uint32_t *)(&piocb->iocb)) + 7));221- }222- }223 return (0);224}225···578 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus579 <status> */580 lpfc_printf_log(phba,581- KERN_ERR,582 LOG_MBOX | LOG_SLI,583 "%d:0304 Stray Mailbox Interrupt "584 "mbxCommand x%x mbxStatus x%x\n",···15471548void lpfc_reset_barrier(struct lpfc_hba * phba)1549{1550- uint32_t * resp_buf;1551- uint32_t * mbox_buf;1552 volatile uint32_t mbox;1553 uint32_t hc_copy;1554 int i;···1564 * Tell the other part of the chip to suspend temporarily all1565 * its DMA activity.1566 */1567- resp_buf = (uint32_t *)phba->MBslimaddr;15681569 /* Disable the error attention */1570 hc_copy = readl(phba->HCregaddr);···1582 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;15831584 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));1585- mbox_buf = (uint32_t *)phba->MBslimaddr;1586 writel(mbox, mbox_buf);15871588 for (i = 0;···1782 skip_post = 0;1783 word0 = 0; /* This is really setting up word1 */1784 }1785- to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t);1786 writel(*(uint32_t *) mb, to_slim);1787 readl(to_slim); /* flush */1788···26362637 INIT_LIST_HEAD(&(pring->txq));26382639- kfree(pring->fast_lookup);2640- pring->fast_lookup = NULL;2641 }26422643 spin_unlock_irqrestore(phba->host->host_lock, flags);···3083 set_current_state(TASK_RUNNING);3084 remove_wait_queue(&done_q, &wq_entry);3085 return retval;0000000000000000003086}30873088irqreturn_t
···191lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)193{00194 list_add_tail(&piocb->list, &pring->txcmplq);195 pring->txcmplq_cnt++;196 if (unlikely(pring->ringno == LPFC_ELS_RING))197 mod_timer(&phba->els_tmofunc,198 jiffies + HZ * (phba->fc_ratov << 1));199000000000000000000000200 return (0);201}202···601 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus602 <status> */603 lpfc_printf_log(phba,604+ KERN_WARNING,605 LOG_MBOX | LOG_SLI,606 "%d:0304 Stray Mailbox Interrupt "607 "mbxCommand x%x mbxStatus x%x\n",···15701571void lpfc_reset_barrier(struct lpfc_hba * phba)1572{1573+ uint32_t __iomem *resp_buf;1574+ uint32_t __iomem *mbox_buf;1575 volatile uint32_t mbox;1576 uint32_t hc_copy;1577 int i;···1587 * Tell the other part of the chip to suspend temporarily all1588 * its DMA activity.1589 */1590+ resp_buf = phba->MBslimaddr;15911592 /* Disable the error attention */1593 hc_copy = readl(phba->HCregaddr);···1605 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;16061607 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));1608+ mbox_buf = phba->MBslimaddr;1609 writel(mbox, mbox_buf);16101611 for (i = 0;···1805 skip_post = 0;1806 word0 = 0; /* This is really setting up word1 */1807 }1808+ to_slim = phba->MBslimaddr + sizeof (uint32_t);1809 writel(*(uint32_t *) mb, to_slim);1810 readl(to_slim); /* flush */1811···26592660 INIT_LIST_HEAD(&(pring->txq));2661002662 }26632664 spin_unlock_irqrestore(phba->host->host_lock, flags);···3108 set_current_state(TASK_RUNNING);3109 remove_wait_queue(&done_q, &wq_entry);3110 return retval;3111+}3112+3113+int3114+lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)3115+{3116+ int i = 0;3117+3118+ while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {3119+ if (i++ > LPFC_MBOX_TMO * 1000)3120+ return 1;3121+3122+ if (lpfc_sli_handle_mb_event(phba) == 0)3123+ i = 0;3124+3125+ msleep(1);3126+ }3127+3128+ return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;3129}31303131irqreturn_t
-2
drivers/scsi/lpfc/lpfc_sli.h
···135 uint32_t fast_iotag; /* max fastlookup based iotag */136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */137 uint32_t iotag_max; /* max iotag value to use */138- struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by139- iotag */140 struct list_head txq;141 uint16_t txq_cnt; /* current length of queue */142 uint16_t txq_max; /* max length */
···135 uint32_t fast_iotag; /* max fastlookup based iotag */136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */137 uint32_t iotag_max; /* max iotag value to use */00138 struct list_head txq;139 uint16_t txq_cnt; /* current length of queue */140 uint16_t txq_max; /* max length */
+1-1
drivers/scsi/lpfc/lpfc_version.h
···18 * included with this package. *19 *******************************************************************/2021-#define LPFC_DRIVER_VERSION "8.1.6"2223#define LPFC_DRIVER_NAME "lpfc"24
···18 * included with this package. *19 *******************************************************************/2021+#define LPFC_DRIVER_VERSION "8.1.7"2223#define LPFC_DRIVER_NAME "lpfc"24
+1-1
drivers/scsi/mac53c94.c
···378 int nseg;379380 total = 0;381- scl = (struct scatterlist *) cmd->buffer;382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg,383 cmd->sc_data_direction);384 for (i = 0; i < nseg; ++i) {
···378 int nseg;379380 total = 0;381+ scl = (struct scatterlist *) cmd->request_buffer;382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg,383 cmd->sc_data_direction);384 for (i = 0; i < nseg; ++i) {
+1-1
drivers/scsi/mesh.c
···1268 if (cmd->use_sg > 0) {1269 int nseg;1270 total = 0;1271- scl = (struct scatterlist *) cmd->buffer;1272 off = ms->data_ptr;1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,1274 cmd->sc_data_direction);
···1268 if (cmd->use_sg > 0) {1269 int nseg;1270 total = 0;1271+ scl = (struct scatterlist *) cmd->request_buffer;1272 off = ms->data_ptr;1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,1274 cmd->sc_data_direction);
···346 if (level > 3) {347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"348 " done = 0x%p, queuecommand 0x%p\n",349- cmd->buffer, cmd->bufflen,350 cmd->done,351 sdev->host->hostt->queuecommand);352···661 */662int scsi_retry_command(struct scsi_cmnd *cmd)663{664- /*665- * Restore the SCSI command state.666- */667- scsi_setup_cmd_retry(cmd);668-669 /*670 * Zero the sense information from the last time we tried671 * this command.···706 "Notifying upper driver of completion "707 "(result %x)\n", cmd->result));708709- /*710- * We can get here with use_sg=0, causing a panic in the upper level711- */712- cmd->use_sg = cmd->old_use_sg;713 cmd->done(cmd);714}715EXPORT_SYMBOL(scsi_finish_command);
···346 if (level > 3) {347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"348 " done = 0x%p, queuecommand 0x%p\n",349+ cmd->request_buffer, cmd->request_bufflen,350 cmd->done,351 sdev->host->hostt->queuecommand);352···661 */662int scsi_retry_command(struct scsi_cmnd *cmd)663{00000664 /*665 * Zero the sense information from the last time we tried666 * this command.···711 "Notifying upper driver of completion "712 "(result %x)\n", cmd->result));7130000714 cmd->done(cmd);715}716EXPORT_SYMBOL(scsi_finish_command);
+54-18
drivers/scsi/scsi_debug.c
···286 int dev_id_num, const char * dev_id_str,287 int dev_id_str_len);288static int inquiry_evpd_88(unsigned char * arr, int target_dev_id);289-static void do_create_driverfs_files(void);290static void do_remove_driverfs_files(void);291292static int sdebug_add_adapter(void);···2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 2488 sdebug_add_host_store);24892490-static void do_create_driverfs_files(void)2491{2492- driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);2493- driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);2494- driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);2495- driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);2496- driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);2497- driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);2498- driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);2499- driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);2500- driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);2501- driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);2502- driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);0002503}25042505static void do_remove_driverfs_files(void)···2525 unsigned int sz;2526 int host_to_add;2527 int k;025282529 if (scsi_debug_dev_size_mb < 1)2530 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */···2564 if (scsi_debug_num_parts > 0)2565 sdebug_build_parts(fake_storep);25662567- init_all_queued();0000000000000000000000025682569- device_register(&pseudo_primary);2570- bus_register(&pseudo_lld_bus);2571- driver_register(&sdebug_driverfs_driver);2572- do_create_driverfs_files();25732574 sdebug_driver_template.proc_name = (char *)sdebug_proc_name;2575···2609 scsi_debug_add_host);2610 }2611 return 0;0000000000002612}26132614static void __exit scsi_debug_exit(void)
···286 int dev_id_num, const char * dev_id_str,287 int dev_id_str_len);288static int inquiry_evpd_88(unsigned char * arr, int target_dev_id);289+static int do_create_driverfs_files(void);290static void do_remove_driverfs_files(void);291292static int sdebug_add_adapter(void);···2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 2488 sdebug_add_host_store);24892490+static int do_create_driverfs_files(void)2491{2492+ int ret;2493+2494+ ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);2495+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);2496+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);2497+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);2498+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);2499+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);2500+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);2501+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);2502+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);2503+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);2504+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);2505+ return ret;2506}25072508static void do_remove_driverfs_files(void)···2522 unsigned int sz;2523 int host_to_add;2524 int k;2525+ int ret;25262527 if (scsi_debug_dev_size_mb < 1)2528 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */···2560 if (scsi_debug_num_parts > 0)2561 sdebug_build_parts(fake_storep);25622563+ ret = device_register(&pseudo_primary);2564+ if (ret < 0) {2565+ printk(KERN_WARNING "scsi_debug: device_register error: %d\n",2566+ ret);2567+ goto free_vm;2568+ }2569+ ret = bus_register(&pseudo_lld_bus);2570+ if (ret < 0) {2571+ printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",2572+ ret);2573+ goto dev_unreg;2574+ }2575+ ret = driver_register(&sdebug_driverfs_driver);2576+ if (ret < 0) {2577+ printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",2578+ ret);2579+ goto bus_unreg;2580+ }2581+ ret = do_create_driverfs_files();2582+ if (ret < 0) {2583+ printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",2584+ ret);2585+ goto del_files;2586+ }25872588+ init_all_queued();00025892590 sdebug_driver_template.proc_name = (char *)sdebug_proc_name;2591···2585 scsi_debug_add_host);2586 }2587 return 0;2588+2589+del_files:2590+ do_remove_driverfs_files();2591+ driver_unregister(&sdebug_driverfs_driver);2592+bus_unreg:2593+ bus_unregister(&pseudo_lld_bus);2594+dev_unreg:2595+ device_unregister(&pseudo_primary);2596+free_vm:2597+ vfree(fake_storep);2598+2599+ return ret;2600}26012602static void __exit scsi_debug_exit(void)
+89-121
drivers/scsi/scsi_error.c
···460 * Return value:461 * SUCCESS or FAILED or NEEDS_RETRY462 **/463-static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)464{465 struct scsi_device *sdev = scmd->device;466 struct Scsi_Host *shost = sdev->host;0467 DECLARE_COMPLETION(done);468 unsigned long timeleft;469 unsigned long flags;000000470 int rtn;00000000000000000000000000000000000471472 if (sdev->scsi_level <= SCSI_2)473 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |474 (sdev->lun << 5 & 0xe0);000000475476 shost->eh_action = &done;477···570 rtn = FAILED;571 }57200000000000000000000000573 return rtn;574}575···608static int scsi_request_sense(struct scsi_cmnd *scmd)609{610 static unsigned char generic_sense[6] =611- {REQUEST_SENSE, 0, 0, 0, 252, 0};612- unsigned char *scsi_result;613- int saved_result;614- int rtn;615616 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));617-618- scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0));619-620-621- if (unlikely(!scsi_result)) {622- printk(KERN_ERR "%s: cannot allocate scsi_result.\n",623- __FUNCTION__);624- return FAILED;625- }626-627- /*628- * zero the sense buffer. some host adapters automatically always629- * request sense, so it is not a good idea that630- * scmd->request_buffer and scmd->sense_buffer point to the same631- * address (db). 0 is not a valid sense code. 632- */633- memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));634- memset(scsi_result, 0, 252);635-636- saved_result = scmd->result;637- scmd->request_buffer = scsi_result;638- scmd->request_bufflen = 252;639- scmd->use_sg = 0;640- scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);641- scmd->sc_data_direction = DMA_FROM_DEVICE;642- scmd->underflow = 0;643-644- rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);645-646- /* last chance to have valid sense data */647- if(!SCSI_SENSE_VALID(scmd)) {648- memcpy(scmd->sense_buffer, scmd->request_buffer,649- sizeof(scmd->sense_buffer));650- }651-652- kfree(scsi_result);653-654- /*655- * when we eventually call scsi_finish, we really wish to complete656- * the original request, so let's restore the original data. (db)657- */658- scsi_setup_cmd_retry(scmd);659- scmd->result = saved_result;660- return rtn;661}662663/**···630{631 scmd->device->host->host_failed--;632 scmd->eh_eflags = 0;633-634- /*635- * set this back so that the upper level can correctly free up636- * things.637- */638- scsi_setup_cmd_retry(scmd);639 list_move_tail(&scmd->eh_entry, done_q);640}641EXPORT_SYMBOL(scsi_eh_finish_cmd);···734{735 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};736 int retry_cnt = 1, rtn;737- int saved_result;738739retry_tur:740 memcpy(scmd->cmnd, tur_command, sizeof(tur_command));741742- /*743- * zero the sense buffer. the scsi spec mandates that any744- * untransferred sense data should be interpreted as being zero.745- */746- memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));747748- saved_result = scmd->result;749- scmd->request_buffer = NULL;750- scmd->request_bufflen = 0;751- scmd->use_sg = 0;752- scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);753- scmd->underflow = 0;754- scmd->sc_data_direction = DMA_NONE;755756- rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);757-758- /*759- * when we eventually call scsi_finish, we really wish to complete760- * the original request, so let's restore the original data. (db)761- */762- scsi_setup_cmd_retry(scmd);763- scmd->result = saved_result;764-765- /*766- * hey, we are done. let's look to see what happened.767- */768 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",769 __FUNCTION__, scmd, rtn));770- if (rtn == SUCCESS)771- return 0;772- else if (rtn == NEEDS_RETRY) {773 if (retry_cnt--)774 goto retry_tur;00775 return 0;00776 }777- return 1;778}779780/**···835static int scsi_eh_try_stu(struct scsi_cmnd *scmd)836{837 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};838- int rtn;839- int saved_result;840841- if (!scmd->device->allow_restart)842- return 1;843844- memcpy(scmd->cmnd, stu_command, sizeof(stu_command));0000845846- /*847- * zero the sense buffer. the scsi spec mandates that any848- * untransferred sense data should be interpreted as being zero.849- */850- memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));851-852- saved_result = scmd->result;853- scmd->request_buffer = NULL;854- scmd->request_bufflen = 0;855- scmd->use_sg = 0;856- scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);857- scmd->underflow = 0;858- scmd->sc_data_direction = DMA_NONE;859-860- rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT);861-862- /*863- * when we eventually call scsi_finish, we really wish to complete864- * the original request, so let's restore the original data. (db)865- */866- scsi_setup_cmd_retry(scmd);867- scmd->result = saved_result;868-869- /*870- * hey, we are done. let's look to see what happened.871- */872- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",873- __FUNCTION__, scmd, rtn));874- if (rtn == SUCCESS)875- return 0;876 return 1;877}878···16541655 scmd->scsi_done = scsi_reset_provider_done_command;1656 scmd->done = NULL;1657- scmd->buffer = NULL;1658- scmd->bufflen = 0;1659 scmd->request_buffer = NULL;1660 scmd->request_bufflen = 0;1661
···460 * Return value:461 * SUCCESS or FAILED or NEEDS_RETRY462 **/463+static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense)464{465 struct scsi_device *sdev = scmd->device;466 struct Scsi_Host *shost = sdev->host;467+ int old_result = scmd->result;468 DECLARE_COMPLETION(done);469 unsigned long timeleft;470 unsigned long flags;471+ unsigned char old_cmnd[MAX_COMMAND_SIZE];472+ enum dma_data_direction old_data_direction;473+ unsigned short old_use_sg;474+ unsigned char old_cmd_len;475+ unsigned old_bufflen;476+ void *old_buffer;477 int rtn;478+479+ /*480+ * We need saved copies of a number of fields - this is because481+ * error handling may need to overwrite these with different values482+ * to run different commands, and once error handling is complete,483+ * we will need to restore these values prior to running the actual484+ * command.485+ */486+ old_buffer = scmd->request_buffer;487+ old_bufflen = scmd->request_bufflen;488+ memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd));489+ old_data_direction = scmd->sc_data_direction;490+ old_cmd_len = scmd->cmd_len;491+ old_use_sg = scmd->use_sg;492+493+ if (copy_sense) {494+ int gfp_mask = GFP_ATOMIC;495+496+ if (shost->hostt->unchecked_isa_dma)497+ gfp_mask |= __GFP_DMA;498+499+ scmd->sc_data_direction = DMA_FROM_DEVICE;500+ scmd->request_bufflen = 252;501+ scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);502+ if (!scmd->request_buffer)503+ return FAILED;504+ } else {505+ scmd->request_buffer = NULL;506+ scmd->request_bufflen = 0;507+ scmd->sc_data_direction = DMA_NONE;508+ }509+510+ scmd->underflow = 0;511+ scmd->use_sg = 0;512+ scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);513514 if (sdev->scsi_level <= SCSI_2)515 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |516 (sdev->lun << 5 & 0xe0);517+518+ /*519+ * Zero the sense buffer. The scsi spec mandates that any520+ * untransferred sense data should be interpreted as being zero.521+ */522+ memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));523524 shost->eh_action = &done;525···522 rtn = FAILED;523 }524525+526+ /*527+ * Last chance to have valid sense data.528+ */529+ if (copy_sense) {530+ if (!SCSI_SENSE_VALID(scmd)) {531+ memcpy(scmd->sense_buffer, scmd->request_buffer,532+ sizeof(scmd->sense_buffer));533+ }534+ kfree(scmd->request_buffer);535+ }536+537+538+ /*539+ * Restore original data540+ */541+ scmd->request_buffer = old_buffer;542+ scmd->request_bufflen = old_bufflen;543+ memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd));544+ scmd->sc_data_direction = old_data_direction;545+ scmd->cmd_len = old_cmd_len;546+ scmd->use_sg = old_use_sg;547+ scmd->result = old_result;548 return rtn;549}550···537static int scsi_request_sense(struct scsi_cmnd *scmd)538{539 static unsigned char generic_sense[6] =540+ {REQUEST_SENSE, 0, 0, 0, 252, 0};000541542 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));543+ return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1);0000000000000000000000000000000000000000000544}545546/**···605{606 scmd->device->host->host_failed--;607 scmd->eh_eflags = 0;000000608 list_move_tail(&scmd->eh_entry, done_q);609}610EXPORT_SYMBOL(scsi_eh_finish_cmd);···715{716 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};717 int retry_cnt = 1, rtn;0718719retry_tur:720 memcpy(scmd->cmnd, tur_command, sizeof(tur_command));72100000722723+ rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0);000000724000000000000725 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",726 __FUNCTION__, scmd, rtn));727+728+ switch (rtn) {729+ case NEEDS_RETRY:730 if (retry_cnt--)731 goto retry_tur;732+ /*FALLTHRU*/733+ case SUCCESS:734 return 0;735+ default:736+ return 1;737 }0738}739740/**···837static int scsi_eh_try_stu(struct scsi_cmnd *scmd)838{839 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};00840841+ if (scmd->device->allow_restart) {842+ int rtn;843844+ memcpy(scmd->cmnd, stu_command, sizeof(stu_command));845+ rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0);846+ if (rtn == SUCCESS)847+ return 0;848+ }849000000000000000000000000000000850 return 1;851}852···16841685 scmd->scsi_done = scsi_reset_provider_done_command;1686 scmd->done = NULL;001687 scmd->request_buffer = NULL;1688 scmd->request_bufflen = 0;1689
+6-82
drivers/scsi/scsi_lib.c
···436 *437 * Arguments: cmd - command that is ready to be queued.438 *439- * Returns: Nothing440- *441 * Notes: This function has the job of initializing a number of442 * fields related to error handling. Typically this will443 * be called once for each command, as required.444 */445-static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)446{447 cmd->serial_number = 0;448-449 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);450-451 if (cmd->cmd_len == 0)452 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);453-454- /*455- * We need saved copies of a number of fields - this is because456- * error handling may need to overwrite these with different values457- * to run different commands, and once error handling is complete,458- * we will need to restore these values prior to running the actual459- * command.460- */461- cmd->old_use_sg = cmd->use_sg;462- cmd->old_cmd_len = cmd->cmd_len;463- cmd->sc_old_data_direction = cmd->sc_data_direction;464- cmd->old_underflow = cmd->underflow;465- memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));466- cmd->buffer = cmd->request_buffer;467- cmd->bufflen = cmd->request_bufflen;468-469- return 1;470-}471-472-/*473- * Function: scsi_setup_cmd_retry()474- *475- * Purpose: Restore the command state for a retry476- *477- * Arguments: cmd - command to be restored478- *479- * Returns: Nothing480- *481- * Notes: Immediately prior to retrying a command, we need482- * to restore certain fields that we saved above.483- */484-void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)485-{486- memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));487- cmd->request_buffer = cmd->buffer;488- cmd->request_bufflen = cmd->bufflen;489- cmd->use_sg = cmd->old_use_sg;490- cmd->cmd_len = cmd->old_cmd_len;491- cmd->sc_data_direction = cmd->sc_old_data_direction;492- cmd->underflow = cmd->old_underflow;493}494495void scsi_device_unbusy(struct scsi_device *sdev)···763 */764static void scsi_release_buffers(struct scsi_cmnd *cmd)765{766- struct request *req = cmd->request;767-768- /*769- * Free up any indirection buffers we allocated for DMA purposes. 770- */771 if (cmd->use_sg)772 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);773- else if (cmd->request_buffer != req->buffer)774- kfree(cmd->request_buffer);775776 /*777 * Zero these out. They now point to freed memory, and it is778 * dangerous to hang onto the pointers.779 */780- cmd->buffer = NULL;781- cmd->bufflen = 0;782 cmd->request_buffer = NULL;783 cmd->request_bufflen = 0;784}···805void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)806{807 int result = cmd->result;808- int this_count = cmd->bufflen;809 request_queue_t *q = cmd->device->request_queue;810 struct request *req = cmd->request;811 int clear_errors = 1;···813 int sense_valid = 0;814 int sense_deferred = 0;815816- /*817- * Free up any indirection buffers we allocated for DMA purposes. 818- * For the case of a READ, we need to copy the data out of the819- * bounce buffer and into the real buffer.820- */821- if (cmd->use_sg)822- scsi_free_sgtable(cmd->buffer, cmd->sglist_len);823- else if (cmd->buffer != req->buffer) {824- if (rq_data_dir(req) == READ) {825- unsigned long flags;826- char *to = bio_kmap_irq(req->bio, &flags);827- memcpy(to, cmd->buffer, cmd->bufflen);828- bio_kunmap_irq(to, &flags);829- }830- kfree(cmd->buffer);831- }832833 if (result) {834 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);835 if (sense_valid)836 sense_deferred = scsi_sense_is_deferred(&sshdr);837 }0838 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */839 req->errors = result;840 if (result) {···839 } else840 req->data_len = cmd->resid;841 }842-843- /*844- * Zero these out. They now point to freed memory, and it is845- * dangerous to hang onto the pointers.846- */847- cmd->buffer = NULL;848- cmd->bufflen = 0;849- cmd->request_buffer = NULL;850- cmd->request_bufflen = 0;851852 /*853 * Next deal with any sectors which we were able to correctly···936 if (!(req->flags & REQ_QUIET)) {937 scmd_printk(KERN_INFO, cmd,938 "Volume overflow, CDB: ");939- __scsi_print_command(cmd->data_cmnd);940 scsi_print_sense("", cmd);941 }942 /* See SSC3rXX or current. */···1067 * successfully. Since this is a REQ_BLOCK_PC command the1068 * caller should check the request's errors value1069 */1070- scsi_io_completion(cmd, cmd->bufflen);1071}10721073static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
···436 *437 * Arguments: cmd - command that is ready to be queued.438 *00439 * Notes: This function has the job of initializing a number of440 * fields related to error handling. Typically this will441 * be called once for each command, as required.442 */443+static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)444{445 cmd->serial_number = 0;0446 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);0447 if (cmd->cmd_len == 0)448 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);0000000000000000000000000000000000000000449}450451void scsi_device_unbusy(struct scsi_device *sdev)···807 */808static void scsi_release_buffers(struct scsi_cmnd *cmd)809{00000810 if (cmd->use_sg)811 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);00812813 /*814 * Zero these out. They now point to freed memory, and it is815 * dangerous to hang onto the pointers.816 */00817 cmd->request_buffer = NULL;818 cmd->request_bufflen = 0;819}···858void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)859{860 int result = cmd->result;861+ int this_count = cmd->request_bufflen;862 request_queue_t *q = cmd->device->request_queue;863 struct request *req = cmd->request;864 int clear_errors = 1;···866 int sense_valid = 0;867 int sense_deferred = 0;868869+ scsi_release_buffers(cmd);000000000000000870871 if (result) {872 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);873 if (sense_valid)874 sense_deferred = scsi_sense_is_deferred(&sshdr);875 }876+877 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */878 req->errors = result;879 if (result) {···906 } else907 req->data_len = cmd->resid;908 }000000000909910 /*911 * Next deal with any sectors which we were able to correctly···1012 if (!(req->flags & REQ_QUIET)) {1013 scmd_printk(KERN_INFO, cmd,1014 "Volume overflow, CDB: ");1015+ __scsi_print_command(cmd->cmnd);1016 scsi_print_sense("", cmd);1017 }1018 /* See SSC3rXX or current. */···1143 * successfully. Since this is a REQ_BLOCK_PC command the1144 * caller should check the request's errors value1145 */1146+ scsi_io_completion(cmd, cmd->request_bufflen);1147}11481149static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
-1
drivers/scsi/scsi_priv.h
···5758/* scsi_lib.c */59extern int scsi_maybe_unblock_host(struct scsi_device *sdev);60-extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd);61extern void scsi_device_unbusy(struct scsi_device *sdev);62extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);63extern void scsi_next_command(struct scsi_cmnd *cmd);
···5758/* scsi_lib.c */59extern int scsi_maybe_unblock_host(struct scsi_device *sdev);060extern void scsi_device_unbusy(struct scsi_device *sdev);61extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);62extern void scsi_next_command(struct scsi_cmnd *cmd);
+58-6
drivers/scsi/scsi_transport_sas.c
···41 struct mutex lock;42 u32 next_target_id;43 u32 next_expander_id;044};45#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)46···147 mutex_init(&sas_host->lock);148 sas_host->next_target_id = 0;149 sas_host->next_expander_id = 0;0150 return 0;151}152···329sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",330 unsigned long long);331sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);332-//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8);333sas_phy_linkspeed_attr(negotiated_linkrate);334sas_phy_linkspeed_attr(minimum_linkrate_hw);335sas_phy_linkspeed_attr(minimum_linkrate);···592}593EXPORT_SYMBOL(sas_port_alloc);59400000000000000000000000000000000595/**596 * sas_port_add - add a SAS port to the device hierarchy597 *···691 list_del_init(&phy->port_siblings);692 }693 mutex_unlock(&port->phy_list_mutex);0000000694695 transport_remove_device(dev);696 device_del(dev);···773 mutex_unlock(&port->phy_list_mutex);774}775EXPORT_SYMBOL(sas_port_delete_phy);0000000000000776777/*778 * SAS remote PHY attributes.···11941195 if (identify->device_type == SAS_END_DEVICE &&1196 rphy->scsi_target_id != -1) {1197- scsi_scan_target(&rphy->dev, parent->port_identifier,1198 rphy->scsi_target_id, ~0, 0);1199 }1200···12961297 mutex_lock(&sas_host->lock);1298 list_for_each_entry(rphy, &sas_host->rphy_list, list) {1299- struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);1300-1301 if (rphy->identify.device_type != SAS_END_DEVICE ||1302 rphy->scsi_target_id == -1)1303 continue;13041305- if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) &&1306 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {1307- scsi_scan_target(&rphy->dev, parent->port_identifier,1308 rphy->scsi_target_id, lun, 1);1309 }1310 }
···41 struct mutex lock;42 u32 next_target_id;43 u32 next_expander_id;44+ int next_port_id;45};46#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)47···146 mutex_init(&sas_host->lock);147 sas_host->next_target_id = 0;148 sas_host->next_expander_id = 0;149+ sas_host->next_port_id = 0;150 return 0;151}152···327sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",328 unsigned long long);329sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);330+//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);331sas_phy_linkspeed_attr(negotiated_linkrate);332sas_phy_linkspeed_attr(minimum_linkrate_hw);333sas_phy_linkspeed_attr(minimum_linkrate);···590}591EXPORT_SYMBOL(sas_port_alloc);592593+/** sas_port_alloc_num - allocate and initialize a SAS port structure594+ *595+ * @parent: parent device596+ *597+ * Allocates a SAS port structure and a number to go with it. This598+ * interface is really for adapters where the port number has no599+ * meansing, so the sas class should manage them. It will be added to600+ * the device tree below the device specified by @parent which must be601+ * either a Scsi_Host or a sas_expander_device.602+ *603+ * Returns %NULL on error604+ */605+struct sas_port *sas_port_alloc_num(struct device *parent)606+{607+ int index;608+ struct Scsi_Host *shost = dev_to_shost(parent);609+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);610+611+ /* FIXME: use idr for this eventually */612+ mutex_lock(&sas_host->lock);613+ if (scsi_is_sas_expander_device(parent)) {614+ struct sas_rphy *rphy = dev_to_rphy(parent);615+ struct sas_expander_device *exp = rphy_to_expander_device(rphy);616+617+ index = exp->next_port_id++;618+ } else619+ index = sas_host->next_port_id++;620+ mutex_unlock(&sas_host->lock);621+ return sas_port_alloc(parent, index);622+}623+EXPORT_SYMBOL(sas_port_alloc_num);624+625/**626 * sas_port_add - add a SAS port to the device hierarchy627 *···657 list_del_init(&phy->port_siblings);658 }659 mutex_unlock(&port->phy_list_mutex);660+661+ if (port->is_backlink) {662+ struct device *parent = port->dev.parent;663+664+ sysfs_remove_link(&port->dev.kobj, parent->bus_id);665+ port->is_backlink = 0;666+ }667668 transport_remove_device(dev);669 device_del(dev);···732 mutex_unlock(&port->phy_list_mutex);733}734EXPORT_SYMBOL(sas_port_delete_phy);735+736+void sas_port_mark_backlink(struct sas_port *port)737+{738+ struct device *parent = port->dev.parent->parent->parent;739+740+ if (port->is_backlink)741+ return;742+ port->is_backlink = 1;743+ sysfs_create_link(&port->dev.kobj, &parent->kobj,744+ parent->bus_id);745+746+}747+EXPORT_SYMBOL(sas_port_mark_backlink);748749/*750 * SAS remote PHY attributes.···11401141 if (identify->device_type == SAS_END_DEVICE &&1142 rphy->scsi_target_id != -1) {1143+ scsi_scan_target(&rphy->dev, 0,1144 rphy->scsi_target_id, ~0, 0);1145 }1146···12421243 mutex_lock(&sas_host->lock);1244 list_for_each_entry(rphy, &sas_host->rphy_list, list) {001245 if (rphy->identify.device_type != SAS_END_DEVICE ||1246 rphy->scsi_target_id == -1)1247 continue;12481249+ if ((channel == SCAN_WILD_CARD || channel == 0) &&1250 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {1251+ scsi_scan_target(&rphy->dev, 0,1252 rphy->scsi_target_id, lun, 1);1253 }1254 }
+1-2
drivers/scsi/sd.c
···502 SCpnt->cmnd[4] = (unsigned char) this_count;503 SCpnt->cmnd[5] = 0;504 }505- SCpnt->request_bufflen = SCpnt->bufflen =506- this_count * sdp->sector_size;507508 /*509 * We shouldn't disconnect in the middle of a sector, so with a dumb
···502 SCpnt->cmnd[4] = (unsigned char) this_count;503 SCpnt->cmnd[5] = 0;504 }505+ SCpnt->request_bufflen = this_count * sdp->sector_size;0506507 /*508 * We shouldn't disconnect in the middle of a sector, so with a dumb
+1-1
drivers/scsi/seagate.c
···1002 }1003#endif10041005- buffer = (struct scatterlist *) SCint->buffer;1006 len = buffer->length;1007 data = page_address(buffer->page) + buffer->offset;1008 } else {
···1002 }1003#endif10041005+ buffer = (struct scatterlist *) SCint->request_buffer;1006 len = buffer->length;1007 data = page_address(buffer->page) + buffer->offset;1008 } else {
···244/* Does the passed node have the given "name"? YES=1 NO=0 */245extern int prom_nodematch(int thisnode, char *name);246247-/* Puts in buffer a prom name in the form name@x,y or name (x for which_io248- * and y for first regs phys address249- */250-extern int prom_getname(int node, char *buf, int buflen);251-252/* Search all siblings starting at the passed node for "name" matching253 * the given string. Returns the node on success, zero on failure.254 */
···244/* Does the passed node have the given "name"? YES=1 NO=0 */245extern int prom_nodematch(int thisnode, char *name);24600000247/* Search all siblings starting at the passed node for "name" matching248 * the given string. Returns the node on success, zero on failure.249 */
+7-2
include/asm-s390/system.h
···128129#define nop() __asm__ __volatile__ ("nop")130131-#define xchg(ptr,x) \132- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr))))00000133134static inline unsigned long __xchg(unsigned long x, void * ptr, int size)135{
···19{20 cycles_t cycles;2122- __asm__("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc");23 return cycles >> 2;24}25···27{28 unsigned long long clk;2930- __asm__("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");31 return clk;32}33
···19{20 cycles_t cycles;2122+ __asm__ __volatile__ ("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc");23 return cycles >> 2;24}25···27{28 unsigned long long clk;2930+ __asm__ __volatile__ ("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");31 return clk;32}33
-5
include/asm-sparc/oplib.h
···267/* Does the passed node have the given "name"? YES=1 NO=0 */268extern int prom_nodematch(int thisnode, char *name);269270-/* Puts in buffer a prom name in the form name@x,y or name (x for which_io 271- * and y for first regs phys address272- */273-extern int prom_getname(int node, char *buf, int buflen);274-275/* Search all siblings starting at the passed node for "name" matching276 * the given string. Returns the node on success, zero on failure.277 */
···267/* Does the passed node have the given "name"? YES=1 NO=0 */268extern int prom_nodematch(int thisnode, char *name);26900000270/* Search all siblings starting at the passed node for "name" matching271 * the given string. Returns the node on success, zero on failure.272 */
+1-1
include/asm-sparc64/openprom.h
···175};176177/* More fun PROM structures for device probing. */178-#define PROMREG_MAX 16179#define PROMVADDR_MAX 16180#define PROMINTR_MAX 15181
···175};176177/* More fun PROM structures for device probing. */178+#define PROMREG_MAX 24179#define PROMVADDR_MAX 16180#define PROMINTR_MAX 15181
-5
include/asm-sparc64/oplib.h
···287/* Does the passed node have the given "name"? YES=1 NO=0 */288extern int prom_nodematch(int thisnode, const char *name);289290-/* Puts in buffer a prom name in the form name@x,y or name (x for which_io 291- * and y for first regs phys address292- */293-extern int prom_getname(int node, char *buf, int buflen);294-295/* Search all siblings starting at the passed node for "name" matching296 * the given string. Returns the node on success, zero on failure.297 */
···287/* Does the passed node have the given "name"? YES=1 NO=0 */288extern int prom_nodematch(int thisnode, const char *name);28900000290/* Search all siblings starting at the passed node for "name" matching291 * the given string. Returns the node on success, zero on failure.292 */
-6
include/linux/cpu.h
···48{49}50#endif51-extern int current_in_cpu_hotplug(void);5253int cpu_up(unsigned int cpu);54···60static inline void unregister_cpu_notifier(struct notifier_block *nb)61{62}63-static inline int current_in_cpu_hotplug(void)64-{65- return 0;66-}6768#endif /* CONFIG_SMP */69extern struct sysdev_class cpu_sysdev_class;···68/* Stop CPUs going up and down. */69extern void lock_cpu_hotplug(void);70extern void unlock_cpu_hotplug(void);71-extern int lock_cpu_hotplug_interruptible(void);72#define hotcpu_notifier(fn, pri) { \73 static struct notifier_block fn##_nb = \74 { .notifier_call = fn, .priority = pri }; \
···445 struct sctp_paramhdr param_hdr;446 union sctp_addr daddr;447 unsigned long sent_at;0448} __attribute__((packed)) sctp_sender_hb_info_t;449450/*···731const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);732733/* This is a structure for holding either an IPv6 or an IPv4 address. */734-/* sin_family -- AF_INET or AF_INET6735- * sin_port -- ordinary port number736- * sin_addr -- cast to either (struct in_addr) or (struct in6_addr)737- */738struct sctp_sockaddr_entry {739 struct list_head list;740 union sctp_addr a;0741};742743typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *);···982 */983 char cacc_saw_newack;984 } cacc;000985};986987struct sctp_transport *sctp_transport_new(const union sctp_addr *,···1139 sctp_scope_t scope, gfp_t gfp,1140 int flags);1141int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,1142- gfp_t gfp);1143int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);1144int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,1145 struct sctp_sock *);
···445 struct sctp_paramhdr param_hdr;446 union sctp_addr daddr;447 unsigned long sent_at;448+ __u64 hb_nonce;449} __attribute__((packed)) sctp_sender_hb_info_t;450451/*···730const union sctp_addr *sctp_source(const struct sctp_chunk *chunk);731732/* This is a structure for holding either an IPv6 or an IPv4 address. */0000733struct sctp_sockaddr_entry {734 struct list_head list;735 union sctp_addr a;736+ __u8 use_as_src;737};738739typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *);···984 */985 char cacc_saw_newack;986 } cacc;987+988+ /* 64-bit random number sent with heartbeat. */989+ __u64 hb_nonce;990};991992struct sctp_transport *sctp_transport_new(const union sctp_addr *,···1138 sctp_scope_t scope, gfp_t gfp,1139 int flags);1140int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,1141+ __u8 use_as_src, gfp_t gfp);1142int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);1143int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,1144 struct sctp_sock *);
+9
include/net/sctp/user.h
···560} __attribute__((packed, aligned(4)));561562/* Peer addresses's state. */00000000563enum sctp_spinfo_state {564 SCTP_INACTIVE,565 SCTP_ACTIVE,0566 SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */567};568
···560} __attribute__((packed, aligned(4)));561562/* Peer addresses's state. */563+/* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x]564+ * calls.565+ * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters.566+ * Not yet confirmed by a heartbeat and not available for data567+ * transfers.568+ * ACTIVE : Peer address confirmed, active and available for data transfers.569+ * INACTIVE: Peer address inactive and not available for data transfers.570+ */571enum sctp_spinfo_state {572 SCTP_INACTIVE,573 SCTP_ACTIVE,574+ SCTP_UNCONFIRMED,575 SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */576};577
-9
include/scsi/scsi_cmnd.h
···58 int timeout_per_command;5960 unsigned char cmd_len;61- unsigned char old_cmd_len;62 enum dma_data_direction sc_data_direction;63- enum dma_data_direction sc_old_data_direction;6465 /* These elements define the operation we are about to perform */66#define MAX_COMMAND_SIZE 16···69 void *request_buffer; /* Actual requested buffer */7071 /* These elements define the operation we ultimately want to perform */72- unsigned char data_cmnd[MAX_COMMAND_SIZE];73- unsigned short old_use_sg; /* We save use_sg here when requesting74- * sense info */75 unsigned short use_sg; /* Number of pieces of scatter-gather */76 unsigned short sglist_len; /* size of malloc'd scatter-gather list */77- unsigned bufflen; /* Size of data buffer */78- void *buffer; /* Data buffer */7980 unsigned underflow; /* Return error if less than81 this amount is transferred */82- unsigned old_underflow; /* save underflow here when reusing the83- * command for error handling */8485 unsigned transfersize; /* How much we are guaranteed to86 transfer with each SCSI transfer
···58 int timeout_per_command;5960 unsigned char cmd_len;061 enum dma_data_direction sc_data_direction;06263 /* These elements define the operation we are about to perform */64#define MAX_COMMAND_SIZE 16···71 void *request_buffer; /* Actual requested buffer */7273 /* These elements define the operation we ultimately want to perform */00074 unsigned short use_sg; /* Number of pieces of scatter-gather */75 unsigned short sglist_len; /* size of malloc'd scatter-gather list */007677 unsigned underflow; /* Return error if less than78 this amount is transferred */007980 unsigned transfersize; /* How much we are guaranteed to81 transfer with each SCSI transfer
+6-1
include/scsi/scsi_transport_sas.h
···106107struct sas_expander_device {108 int level;0109110 #define SAS_EXPANDER_VENDOR_ID_LEN 8111 char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1];···128struct sas_port {129 struct device dev;130131- u8 port_identifier;132 int num_phys;00133134 /* the other end of the link */135 struct sas_rphy *rphy;···171extern int scsi_is_sas_rphy(const struct device *);172173struct sas_port *sas_port_alloc(struct device *, int);0174int sas_port_add(struct sas_port *);175void sas_port_free(struct sas_port *);176void sas_port_delete(struct sas_port *);177void sas_port_add_phy(struct sas_port *, struct sas_phy *);178void sas_port_delete_phy(struct sas_port *, struct sas_phy *);0179int scsi_is_sas_port(const struct device *);180181extern struct scsi_transport_template *
···106107struct sas_expander_device {108 int level;109+ int next_port_id;110111 #define SAS_EXPANDER_VENDOR_ID_LEN 8112 char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1];···127struct sas_port {128 struct device dev;129130+ int port_identifier;131 int num_phys;132+ /* port flags */133+ unsigned int is_backlink:1;134135 /* the other end of the link */136 struct sas_rphy *rphy;···168extern int scsi_is_sas_rphy(const struct device *);169170struct sas_port *sas_port_alloc(struct device *, int);171+struct sas_port *sas_port_alloc_num(struct device *);172int sas_port_add(struct sas_port *);173void sas_port_free(struct sas_port *);174void sas_port_delete(struct sas_port *);175void sas_port_add_phy(struct sas_port *, struct sas_phy *);176void sas_port_delete_phy(struct sas_port *, struct sas_phy *);177+void sas_port_mark_backlink(struct sas_port *);178int scsi_is_sas_port(const struct device *);179180extern struct scsi_transport_template *
+34-41
kernel/cpu.c
···16#include <linux/mutex.h>1718/* This protects CPUs going up and down... */19-static DEFINE_MUTEX(cpucontrol);02021static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);2223#ifdef CONFIG_HOTPLUG_CPU24-static struct task_struct *lock_cpu_hotplug_owner;25-static int lock_cpu_hotplug_depth;2627-static int __lock_cpu_hotplug(int interruptible)28-{29- int ret = 0;30-31- if (lock_cpu_hotplug_owner != current) {32- if (interruptible)33- ret = mutex_lock_interruptible(&cpucontrol);34- else35- mutex_lock(&cpucontrol);36- }37-38- /*39- * Set only if we succeed in locking40- */41- if (!ret) {42- lock_cpu_hotplug_depth++;43- lock_cpu_hotplug_owner = current;44- }45-46- return ret;47-}4849void lock_cpu_hotplug(void)50{51- __lock_cpu_hotplug(0);000000000000052}53EXPORT_SYMBOL_GPL(lock_cpu_hotplug);5455void unlock_cpu_hotplug(void)56{57- if (--lock_cpu_hotplug_depth == 0) {58- lock_cpu_hotplug_owner = NULL;59- mutex_unlock(&cpucontrol);060 }0061}62EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);6364-int lock_cpu_hotplug_interruptible(void)65-{66- return __lock_cpu_hotplug(1);67-}68-EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);69#endif /* CONFIG_HOTPLUG_CPU */7071/* Need to know about CPUs going up/down? */···114 struct task_struct *p;115 cpumask_t old_allowed, tmp;116117- if ((err = lock_cpu_hotplug_interruptible()) != 0)118- return err;119-120 if (num_online_cpus() == 1) {121 err = -EBUSY;122 goto out;···140 cpu_clear(cpu, tmp);141 set_cpus_allowed(current, tmp);1420143 p = __stop_machine_run(take_cpu_down, NULL, cpu);00144 if (IS_ERR(p)) {145 /* CPU didn't die: tell everyone. Can't complain. */146 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,···180out_allowed:181 set_cpus_allowed(current, old_allowed);182out:183- unlock_cpu_hotplug();184 return err;185}186#endif /*CONFIG_HOTPLUG_CPU*/···190 int ret;191 void *hcpu = (void *)(long)cpu;192193- if ((ret = lock_cpu_hotplug_interruptible()) != 0)194- return ret;195-196 if (cpu_online(cpu) || !cpu_present(cpu)) {197 ret = -EINVAL;198 goto out;···205 }206207 /* Arch-specific enabling code. */0208 ret = __cpu_up(cpu);0209 if (ret != 0)210 goto out_notify;211 BUG_ON(!cpu_online(cpu));···220 blocking_notifier_call_chain(&cpu_chain,221 CPU_UP_CANCELED, hcpu);222out:223- unlock_cpu_hotplug();224 return ret;225}
···16#include <linux/mutex.h>1718/* This protects CPUs going up and down... */19+static DEFINE_MUTEX(cpu_add_remove_lock);20+static DEFINE_MUTEX(cpu_bitmask_lock);2122static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);2324#ifdef CONFIG_HOTPLUG_CPU002526+/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */27+static struct task_struct *recursive;28+static int recursive_depth;0000000000000000002930void lock_cpu_hotplug(void)31{32+ struct task_struct *tsk = current;33+34+ if (tsk == recursive) {35+ static int warnings = 10;36+ if (warnings) {37+ printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n");38+ WARN_ON(1);39+ warnings--;40+ }41+ recursive_depth++;42+ return;43+ }44+ mutex_lock(&cpu_bitmask_lock);45+ recursive = tsk;46}47EXPORT_SYMBOL_GPL(lock_cpu_hotplug);4849void unlock_cpu_hotplug(void)50{51+ WARN_ON(recursive != current);52+ if (recursive_depth) {53+ recursive_depth--;54+ return;55 }56+ mutex_unlock(&cpu_bitmask_lock);57+ recursive = NULL;58}59EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);600000061#endif /* CONFIG_HOTPLUG_CPU */6263/* Need to know about CPUs going up/down? */···122 struct task_struct *p;123 cpumask_t old_allowed, tmp;124125+ mutex_lock(&cpu_add_remove_lock);00126 if (num_online_cpus() == 1) {127 err = -EBUSY;128 goto out;···150 cpu_clear(cpu, tmp);151 set_cpus_allowed(current, tmp);152153+ mutex_lock(&cpu_bitmask_lock);154 p = __stop_machine_run(take_cpu_down, NULL, cpu);155+ mutex_unlock(&cpu_bitmask_lock);156+157 if (IS_ERR(p)) {158 /* CPU didn't die: tell everyone. Can't complain. */159 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,···187out_allowed:188 set_cpus_allowed(current, old_allowed);189out:190+ mutex_unlock(&cpu_add_remove_lock);191 return err;192}193#endif /*CONFIG_HOTPLUG_CPU*/···197 int ret;198 void *hcpu = (void *)(long)cpu;199200+ mutex_lock(&cpu_add_remove_lock);00201 if (cpu_online(cpu) || !cpu_present(cpu)) {202 ret = -EINVAL;203 goto out;···214 }215216 /* Arch-specific enabling code. */217+ mutex_lock(&cpu_bitmask_lock);218 ret = __cpu_up(cpu);219+ mutex_unlock(&cpu_bitmask_lock);220 if (ret != 0)221 goto out_notify;222 BUG_ON(!cpu_online(cpu));···227 blocking_notifier_call_chain(&cpu_chain,228 CPU_UP_CANCELED, hcpu);229out:230+ mutex_unlock(&cpu_add_remove_lock);231 return ret;232}
+21-3
kernel/cpuset.c
···762 *763 * Call with manage_mutex held. May nest a call to the764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.00765 */766767static void update_cpu_domains(struct cpuset *cur)···783 if (is_cpu_exclusive(c))784 cpus_andnot(pspan, pspan, c->cpus_allowed);785 }786- if (is_removed(cur) || !is_cpu_exclusive(cur)) {787 cpus_or(pspan, pspan, cur->cpus_allowed);788 if (cpus_equal(pspan, cur->cpus_allowed))789 return;···1919 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);1920}1921000000000001922static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)1923{1924 struct cpuset *cs = dentry->d_fsdata;···1949 mutex_unlock(&manage_mutex);1950 return -EBUSY;1951 }00000001952 parent = cs->parent;1953 mutex_lock(&callback_mutex);1954 set_bit(CS_REMOVED, &cs->flags);1955- if (is_cpu_exclusive(cs))1956- update_cpu_domains(cs);1957 list_del(&cs->sibling); /* delete my sibling from parent->children */1958 spin_lock(&cs->dentry->d_lock);1959 d = dget(cs->dentry);
···762 *763 * Call with manage_mutex held. May nest a call to the764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.765+ * Must not be called holding callback_mutex, because we must766+ * not call lock_cpu_hotplug() while holding callback_mutex.767 */768769static void update_cpu_domains(struct cpuset *cur)···781 if (is_cpu_exclusive(c))782 cpus_andnot(pspan, pspan, c->cpus_allowed);783 }784+ if (!is_cpu_exclusive(cur)) {785 cpus_or(pspan, pspan, cur->cpus_allowed);786 if (cpus_equal(pspan, cur->cpus_allowed))787 return;···1917 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);1918}19191920+/*1921+ * Locking note on the strange update_flag() call below:1922+ *1923+ * If the cpuset being removed is marked cpu_exclusive, then simulate1924+ * turning cpu_exclusive off, which will call update_cpu_domains().1925+ * The lock_cpu_hotplug() call in update_cpu_domains() must not be1926+ * made while holding callback_mutex. Elsewhere the kernel nests1927+ * callback_mutex inside lock_cpu_hotplug() calls. So the reverse1928+ * nesting would risk an ABBA deadlock.1929+ */1930+1931static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)1932{1933 struct cpuset *cs = dentry->d_fsdata;···1936 mutex_unlock(&manage_mutex);1937 return -EBUSY;1938 }1939+ if (is_cpu_exclusive(cs)) {1940+ int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");1941+ if (retval < 0) {1942+ mutex_unlock(&manage_mutex);1943+ return retval;1944+ }1945+ }1946 parent = cs->parent;1947 mutex_lock(&callback_mutex);1948 set_bit(CS_REMOVED, &cs->flags);001949 list_del(&cs->sibling); /* delete my sibling from parent->children */1950 spin_lock(&cs->dentry->d_lock);1951 d = dget(cs->dentry);
+1-2
net/8021q/vlan.c
···542 * so it cannot "appear" on us.543 */544 if (!grp) { /* need to add a new group */545- grp = kmalloc(sizeof(struct vlan_group), GFP_KERNEL);546 if (!grp)547 goto out_free_unregister;548549 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */550- memset(grp, 0, sizeof(struct vlan_group));551 grp->real_dev_ifindex = real_dev->ifindex;552553 hlist_add_head_rcu(&grp->hlist,
···542 * so it cannot "appear" on us.543 */544 if (!grp) { /* need to add a new group */545+ grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);546 if (!grp)547 goto out_free_unregister;548549 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */0550 grp->real_dev_ifindex = real_dev->ifindex;551552 hlist_add_head_rcu(&grp->hlist,
···8182 /* Any userdata supplied? */83 if (userdata == NULL) {84- tx_skb = dev_alloc_skb(64);85 if (!tx_skb)86 return -ENOMEM;87···115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );116117 if (!userdata) {118- tx_skb = dev_alloc_skb(64);119 if (!tx_skb)120 return -ENOMEM;121
···8182 /* Any userdata supplied? */83 if (userdata == NULL) {84+ tx_skb = alloc_skb(64, GFP_ATOMIC);85 if (!tx_skb)86 return -ENOMEM;87···115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );116117 if (!userdata) {118+ tx_skb = alloc_skb(64, GFP_ATOMIC);119 if (!tx_skb)120 return -ENOMEM;121
+1-1
net/irda/ircomm/ircomm_param.c
···121122 skb = self->ctrl_skb; 123 if (!skb) {124- skb = dev_alloc_skb(256);125 if (!skb) {126 spin_unlock_irqrestore(&self->spinlock, flags);127 return -ENOMEM;
···121122 skb = self->ctrl_skb; 123 if (!skb) {124+ skb = alloc_skb(256, GFP_ATOMIC);125 if (!skb) {126 spin_unlock_irqrestore(&self->spinlock, flags);127 return -ENOMEM;
+4-4
net/irda/ircomm/ircomm_tty.c
···379 self = hashbin_lock_find(ircomm_tty, line, NULL);380 if (!self) {381 /* No, so make new instance */382- self = kmalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);383 if (self == NULL) {384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__);385 return -ENOMEM;386 }387- memset(self, 0, sizeof(struct ircomm_tty_cb));388389 self->magic = IRCOMM_TTY_MAGIC;390 self->flow = FLOW_STOP;···758 }759 } else {760 /* Prepare a full sized frame */761- skb = dev_alloc_skb(self->max_data_size+762- self->max_header_size);0763 if (!skb) {764 spin_unlock_irqrestore(&self->spinlock, flags);765 return -ENOBUFS;
···379 self = hashbin_lock_find(ircomm_tty, line, NULL);380 if (!self) {381 /* No, so make new instance */382+ self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);383 if (self == NULL) {384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__);385 return -ENOMEM;386 }0387388 self->magic = IRCOMM_TTY_MAGIC;389 self->flow = FLOW_STOP;···759 }760 } else {761 /* Prepare a full sized frame */762+ skb = alloc_skb(self->max_data_size+763+ self->max_header_size,764+ GFP_ATOMIC);765 if (!skb) {766 spin_unlock_irqrestore(&self->spinlock, flags);767 return -ENOBUFS;
+1-3
net/irda/irda_device.c
···401 }402403 /* Allocate dongle info for this instance */404- dongle = kmalloc(sizeof(dongle_t), GFP_KERNEL);405 if (!dongle)406 goto out;407-408- memset(dongle, 0, sizeof(dongle_t));409410 /* Bind the registration info to this particular instance */411 dongle->issue = reg;
···401 }402403 /* Allocate dongle info for this instance */404+ dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL);405 if (!dongle)406 goto out;00407408 /* Bind the registration info to this particular instance */409 dongle->issue = reg;
+5-4
net/irda/iriap.c
···345 IRDA_ASSERT(self != NULL, return;);346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);347348- tx_skb = dev_alloc_skb(64);349 if (tx_skb == NULL) {350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n", 351 __FUNCTION__, 64);···396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */397398 skb_len = self->max_header_size+2+name_len+1+attr_len+4;399- tx_skb = dev_alloc_skb(skb_len);400 if (!tx_skb)401 return -ENOMEM;402···562 * value. We add 32 bytes because of the 6 bytes for the frame and563 * max 5 bytes for the value coding.564 */565- tx_skb = dev_alloc_skb(value->len + self->max_header_size + 32);0566 if (!tx_skb)567 return;568···701 IRDA_ASSERT(self != NULL, return;);702 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);703704- tx_skb = dev_alloc_skb(64);705 if (!tx_skb)706 return;707
···345 IRDA_ASSERT(self != NULL, return;);346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);347348+ tx_skb = alloc_skb(64, GFP_ATOMIC);349 if (tx_skb == NULL) {350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n", 351 __FUNCTION__, 64);···396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */397398 skb_len = self->max_header_size+2+name_len+1+attr_len+4;399+ tx_skb = alloc_skb(skb_len, GFP_ATOMIC);400 if (!tx_skb)401 return -ENOMEM;402···562 * value. We add 32 bytes because of the 6 bytes for the frame and563 * max 5 bytes for the value coding.564 */565+ tx_skb = alloc_skb(value->len + self->max_header_size + 32,566+ GFP_ATOMIC);567 if (!tx_skb)568 return;569···700 IRDA_ASSERT(self != NULL, return;);701 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);702703+ tx_skb = alloc_skb(64, GFP_ATOMIC);704 if (!tx_skb)705 return;706
+1-1
net/irda/iriap_event.c
···365366 switch (event) {367 case IAP_LM_CONNECT_INDICATION:368- tx_skb = dev_alloc_skb(64);369 if (tx_skb == NULL) {370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__);371 return;
···365366 switch (event) {367 case IAP_LM_CONNECT_INDICATION:368+ tx_skb = alloc_skb(64, GFP_ATOMIC);369 if (tx_skb == NULL) {370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__);371 return;
···321 list_len = RTA_PAYLOAD(rt_list);322 matches_len = tree_hdr->nmatches * sizeof(*em);323324- tree->matches = kmalloc(matches_len, GFP_KERNEL);325 if (tree->matches == NULL)326 goto errout;327- memset(tree->matches, 0, matches_len);328329 /* We do not use rtattr_parse_nested here because the maximum330 * number of attributes is unknown. This saves us the allocation
···321 list_len = RTA_PAYLOAD(rt_list);322 matches_len = tree_hdr->nmatches * sizeof(*em);323324+ tree->matches = kzalloc(matches_len, GFP_KERNEL);325 if (tree->matches == NULL)326 goto errout;0327328 /* We do not use rtattr_parse_nested here because the maximum329 * number of attributes is unknown. This saves us the allocation
···148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)149{150 struct netem_sched_data *q = qdisc_priv(sch);151- struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;0152 struct sk_buff *skb2;153 int ret;154 int count = 1;···201 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);202 }2030204 if (q->gap == 0 /* not doing reordering */205 || q->counter < q->gap /* inside last reordering gap */206 || q->reorder < get_crandom(&q->reorder_cor)) {
···148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)149{150 struct netem_sched_data *q = qdisc_priv(sch);151+ /* We don't fill cb now as skb_unshare() may invalidate it */152+ struct netem_skb_cb *cb;153 struct sk_buff *skb2;154 int ret;155 int count = 1;···200 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);201 }202203+ cb = (struct netem_skb_cb *)skb->cb;204 if (q->gap == 0 /* not doing reordering */205 || q->counter < q->gap /* inside last reordering gap */206 || q->reorder < get_crandom(&q->reorder_cor)) {
+17-10
net/sctp/associola.c
···441 /* If the primary path is changing, assume that the442 * user wants to use this new path.443 */444- if (transport->state != SCTP_INACTIVE)0445 asoc->peer.active_path = transport;446447 /*···533 port = addr->v4.sin_port;534535 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",536- " port: %d state:%s\n",537 asoc,538 addr,539 addr->v4.sin_port,540- peer_state == SCTP_UNKNOWN?"UNKNOWN":"ACTIVE");541542 /* Set the port if it has not been set yet. */543 if (0 == asoc->peer.port)···546 /* Check to see if this is a duplicate. */547 peer = sctp_assoc_lookup_paddr(asoc, addr);548 if (peer) {549- if (peer_state == SCTP_ACTIVE &&550- peer->state == SCTP_UNKNOWN)551- peer->state = SCTP_ACTIVE;000552 return peer;553 }554···743 list_for_each(pos, &asoc->peer.transport_addr_list) {744 t = list_entry(pos, struct sctp_transport, transports);745746- if (t->state == SCTP_INACTIVE)0747 continue;748 if (!first || t->last_time_heard > first->last_time_heard) {749 second = first;···764 * [If the primary is active but not most recent, bump the most765 * recently used transport.]766 */767- if (asoc->peer.primary_path->state != SCTP_INACTIVE &&0768 first != asoc->peer.primary_path) {769 second = first;770 first = asoc->peer.primary_path;···1060 transports);1061 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))1062 sctp_assoc_add_peer(asoc, &trans->ipaddr,1063- GFP_ATOMIC, SCTP_ACTIVE);1064 }10651066 asoc->ctsn_ack_point = asoc->next_tsn - 1;···11001101 /* Try to find an active transport. */11021103- if (t->state != SCTP_INACTIVE) {01104 break;1105 } else {1106 /* Keep track of the next transport in case
···441 /* If the primary path is changing, assume that the442 * user wants to use this new path.443 */444+ if ((transport->state == SCTP_ACTIVE) ||445+ (transport->state == SCTP_UNKNOWN))446 asoc->peer.active_path = transport;447448 /*···532 port = addr->v4.sin_port;533534 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",535+ " port: %d state:%d\n",536 asoc,537 addr,538 addr->v4.sin_port,539+ peer_state);540541 /* Set the port if it has not been set yet. */542 if (0 == asoc->peer.port)···545 /* Check to see if this is a duplicate. */546 peer = sctp_assoc_lookup_paddr(asoc, addr);547 if (peer) {548+ if (peer->state == SCTP_UNKNOWN) {549+ if (peer_state == SCTP_ACTIVE)550+ peer->state = SCTP_ACTIVE;551+ if (peer_state == SCTP_UNCONFIRMED)552+ peer->state = SCTP_UNCONFIRMED;553+ }554 return peer;555 }556···739 list_for_each(pos, &asoc->peer.transport_addr_list) {740 t = list_entry(pos, struct sctp_transport, transports);741742+ if ((t->state == SCTP_INACTIVE) ||743+ (t->state == SCTP_UNCONFIRMED))744 continue;745 if (!first || t->last_time_heard > first->last_time_heard) {746 second = first;···759 * [If the primary is active but not most recent, bump the most760 * recently used transport.]761 */762+ if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||763+ (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&764 first != asoc->peer.primary_path) {765 second = first;766 first = asoc->peer.primary_path;···1054 transports);1055 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))1056 sctp_assoc_add_peer(asoc, &trans->ipaddr,1057+ GFP_ATOMIC, trans->state);1058 }10591060 asoc->ctsn_ack_point = asoc->next_tsn - 1;···10941095 /* Try to find an active transport. */10961097+ if ((t->state == SCTP_ACTIVE) ||1098+ (t->state == SCTP_UNKNOWN)) {1099 break;1100 } else {1101 /* Keep track of the next transport in case
+5-3
net/sctp/bind_addr.c
···146147/* Add an address to the bind address list in the SCTP_bind_addr structure. */148int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,149- gfp_t gfp)150{151 struct sctp_sockaddr_entry *addr;152···162 */163 if (!addr->a.v4.sin_port)164 addr->a.v4.sin_port = bp->port;00165166 INIT_LIST_HEAD(&addr->list);167 list_add_tail(&addr->list, &bp->address_list);···276 }277278 af->from_addr_param(&addr, rawaddr, port, 0);279- retval = sctp_add_bind_addr(bp, &addr, gfp);280 if (retval) {281 /* Can't finish building the list, clean up. */282 sctp_bind_addr_clean(bp);···369 (((AF_INET6 == addr->sa.sa_family) &&370 (flags & SCTP_ADDR6_ALLOWED) &&371 (flags & SCTP_ADDR6_PEERSUPP))))372- error = sctp_add_bind_addr(dest, addr, gfp);373 }374375 return error;
···146147/* Add an address to the bind address list in the SCTP_bind_addr structure. */148int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,149+ __u8 use_as_src, gfp_t gfp)150{151 struct sctp_sockaddr_entry *addr;152···162 */163 if (!addr->a.v4.sin_port)164 addr->a.v4.sin_port = bp->port;165+166+ addr->use_as_src = use_as_src;167168 INIT_LIST_HEAD(&addr->list);169 list_add_tail(&addr->list, &bp->address_list);···274 }275276 af->from_addr_param(&addr, rawaddr, port, 0);277+ retval = sctp_add_bind_addr(bp, &addr, 1, gfp);278 if (retval) {279 /* Can't finish building the list, clean up. */280 sctp_bind_addr_clean(bp);···367 (((AF_INET6 == addr->sa.sa_family) &&368 (flags & SCTP_ADDR6_ALLOWED) &&369 (flags & SCTP_ADDR6_PEERSUPP))))370+ error = sctp_add_bind_addr(dest, addr, 1, gfp);371 }372373 return error;
+6-5
net/sctp/endpointola.c
···158void sctp_endpoint_free(struct sctp_endpoint *ep)159{160 ep->base.dead = 1;000000161 sctp_endpoint_put(ep);162}163···171static void sctp_endpoint_destroy(struct sctp_endpoint *ep)172{173 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);174-175- ep->base.sk->sk_state = SCTP_SS_CLOSED;176-177- /* Unlink this endpoint, so we can't find it again! */178- sctp_unhash_endpoint(ep);179180 /* Free up the HMAC transform. */181 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
···158void sctp_endpoint_free(struct sctp_endpoint *ep)159{160 ep->base.dead = 1;161+162+ ep->base.sk->sk_state = SCTP_SS_CLOSED;163+164+ /* Unlink this endpoint, so we can't find it again! */165+ sctp_unhash_endpoint(ep);166+167 sctp_endpoint_put(ep);168}169···165static void sctp_endpoint_destroy(struct sctp_endpoint *ep)166{167 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);00000168169 /* Free up the HMAC transform. */170 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
···691692 if (!new_transport) {693 new_transport = asoc->peer.active_path;694- } else if (new_transport->state == SCTP_INACTIVE) {0695 /* If the chunk is Heartbeat or Heartbeat Ack,696 * send it to chunk->transport, even if it's697 * inactive.···849 */850 new_transport = chunk->transport;851 if (!new_transport ||852- new_transport->state == SCTP_INACTIVE)0853 new_transport = asoc->peer.active_path;854855 /* Change packets if necessary. */···1466 /* Mark the destination transport address as1467 * active if it is not so marked.1468 */1469- if (transport->state == SCTP_INACTIVE) {01470 sctp_assoc_control_transport(1471 transport->asoc,1472 transport,
···691692 if (!new_transport) {693 new_transport = asoc->peer.active_path;694+ } else if ((new_transport->state == SCTP_INACTIVE) ||695+ (new_transport->state == SCTP_UNCONFIRMED)) {696 /* If the chunk is Heartbeat or Heartbeat Ack,697 * send it to chunk->transport, even if it's698 * inactive.···848 */849 new_transport = chunk->transport;850 if (!new_transport ||851+ ((new_transport->state == SCTP_INACTIVE) ||852+ (new_transport->state == SCTP_UNCONFIRMED)))853 new_transport = asoc->peer.active_path;854855 /* Change packets if necessary. */···1464 /* Mark the destination transport address as1465 * active if it is not so marked.1466 */1467+ if ((transport->state == SCTP_INACTIVE) ||1468+ (transport->state == SCTP_UNCONFIRMED)) {1469 sctp_assoc_control_transport(1470 transport->asoc,1471 transport,
···14931494 /* Also, add the destination address. */1495 if (list_empty(&retval->base.bind_addr.address_list)) {1496- sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest,1497 GFP_ATOMIC);1498 }1499···2017 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0);2018 scope = sctp_scope(peer_addr);2019 if (sctp_in_scope(&addr, scope))2020- if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_ACTIVE))2021 return 0;2022 break;2023···2418 * Due to Resource Shortage'.2419 */24202421- peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_ACTIVE);2422 if (!peer)2423 return SCTP_ERROR_RSRC_LOW;2424···2565 union sctp_addr_param *addr_param;2566 struct list_head *pos;2567 struct sctp_transport *transport;02568 int retval = 0;25692570 addr_param = (union sctp_addr_param *)···2579 case SCTP_PARAM_ADD_IP:2580 sctp_local_bh_disable();2581 sctp_write_lock(&asoc->base.addr_lock);2582- retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC);00002583 sctp_write_unlock(&asoc->base.addr_lock);2584 sctp_local_bh_enable();2585 break;···2596 list_for_each(pos, &asoc->peer.transport_addr_list) {2597 transport = list_entry(pos, struct sctp_transport,2598 transports);02599 sctp_transport_route(transport, NULL,2600 sctp_sk(asoc->base.sk));2601 }
···14931494 /* Also, add the destination address. */1495 if (list_empty(&retval->base.bind_addr.address_list)) {1496+ sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,1497 GFP_ATOMIC);1498 }1499···2017 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0);2018 scope = sctp_scope(peer_addr);2019 if (sctp_in_scope(&addr, scope))2020+ if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))2021 return 0;2022 break;2023···2418 * Due to Resource Shortage'.2419 */24202421+ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);2422 if (!peer)2423 return SCTP_ERROR_RSRC_LOW;2424···2565 union sctp_addr_param *addr_param;2566 struct list_head *pos;2567 struct sctp_transport *transport;2568+ struct sctp_sockaddr_entry *saddr;2569 int retval = 0;25702571 addr_param = (union sctp_addr_param *)···2578 case SCTP_PARAM_ADD_IP:2579 sctp_local_bh_disable();2580 sctp_write_lock(&asoc->base.addr_lock);2581+ list_for_each(pos, &bp->address_list) {2582+ saddr = list_entry(pos, struct sctp_sockaddr_entry, list);2583+ if (sctp_cmp_addr_exact(&saddr->a, &addr))2584+ saddr->use_as_src = 1;2585+ }2586 sctp_write_unlock(&asoc->base.addr_lock);2587 sctp_local_bh_enable();2588 break;···2591 list_for_each(pos, &asoc->peer.transport_addr_list) {2592 transport = list_entry(pos, struct sctp_transport,2593 transports);2594+ dst_release(transport->dst);2595 sctp_transport_route(transport, NULL,2596 sctp_sk(asoc->base.sk));2597 }
+10-2
net/sctp/sm_sideeffect.c
···430 /* The check for association's overall error counter exceeding the431 * threshold is done in the state function.432 */433- asoc->overall_error_count++;0000434435 if (transport->state != SCTP_INACTIVE &&436 (transport->error_count++ >= transport->pathmaxrxt)) {···614 /* Mark the destination transport address as active if it is not so615 * marked.616 */617- if (t->state == SCTP_INACTIVE)618 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,619 SCTP_HEARTBEAT_SUCCESS);620···624 */625 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;626 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));0000627}628629/* Helper function to do a transport reset at the expiry of the hearbeat
···430 /* The check for association's overall error counter exceeding the431 * threshold is done in the state function.432 */433+ /* When probing UNCONFIRMED addresses, the association overall434+ * error count is NOT incremented435+ */436+ if (transport->state != SCTP_UNCONFIRMED)437+ asoc->overall_error_count++;438439 if (transport->state != SCTP_INACTIVE &&440 (transport->error_count++ >= transport->pathmaxrxt)) {···610 /* Mark the destination transport address as active if it is not so611 * marked.612 */613+ if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))614 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,615 SCTP_HEARTBEAT_SUCCESS);616···620 */621 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;622 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));623+624+ /* Update the heartbeat timer. */625+ if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))626+ sctp_transport_hold(t);627}628629/* Helper function to do a transport reset at the expiry of the hearbeat
+7-1
net/sctp/sm_statefuns.c
···846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));847 hbinfo.daddr = transport->ipaddr;848 hbinfo.sent_at = jiffies;0849850 /* Send a heartbeat to our peer. */851 paylen = sizeof(sctp_sender_hb_info_t);···1048 }1049 return SCTP_DISPOSITION_DISCARD;1050 }000010511052 max_interval = link->hbinterval + link->rto;1053···5283 datalen -= sizeof(sctp_data_chunk_t);52845285 deliver = SCTP_CMD_CHUNK_ULP;5286- chunk->data_accepted = 1;52875288 /* Think about partial delivery. */5289 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {···5360 */5361 if (SCTP_CMD_CHUNK_ULP == deliver)5362 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));0053635364 /* Note: Some chunks may get overcounted (if we drop) or overcounted5365 * if we renege and the chunk arrives again.
···846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));847 hbinfo.daddr = transport->ipaddr;848 hbinfo.sent_at = jiffies;849+ hbinfo.hb_nonce = transport->hb_nonce;850851 /* Send a heartbeat to our peer. */852 paylen = sizeof(sctp_sender_hb_info_t);···1047 }1048 return SCTP_DISPOSITION_DISCARD;1049 }1050+1051+ /* Validate the 64-bit random nonce. */1052+ if (hbinfo->hb_nonce != link->hb_nonce)1053+ return SCTP_DISPOSITION_DISCARD;10541055 max_interval = link->hbinterval + link->rto;1056···5278 datalen -= sizeof(sctp_data_chunk_t);52795280 deliver = SCTP_CMD_CHUNK_ULP;052815282 /* Think about partial delivery. */5283 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {···5356 */5357 if (SCTP_CMD_CHUNK_ULP == deliver)5358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));5359+5360+ chunk->data_accepted = 1;53615362 /* Note: Some chunks may get overcounted (if we drop) or overcounted5363 * if we renege and the chunk arrives again.
+60-16
net/sctp/socket.c
···369370 /* Use GFP_ATOMIC since BHs are disabled. */371 addr->v4.sin_port = ntohs(addr->v4.sin_port);372- ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC);373 addr->v4.sin_port = htons(addr->v4.sin_port);374 sctp_write_unlock(&ep->base.addr_lock);375 sctp_local_bh_enable();···491 struct sctp_chunk *chunk;492 struct sctp_sockaddr_entry *laddr;493 union sctp_addr *addr;0494 void *addr_buf;495 struct sctp_af *af;496 struct list_head *pos;···559 }560561 retval = sctp_send_asconf(asoc, chunk);00562563- /* FIXME: After sending the add address ASCONF chunk, we564- * cannot append the address to the association's binding565- * address list, because the new address may be used as the566- * source of a message sent to the peer before the ASCONF567- * chunk is received by the peer. So we should wait until568- * ASCONF_ACK is received.569 */00000000000000570 }571572out:···689 struct sctp_sock *sp;690 struct sctp_endpoint *ep;691 struct sctp_association *asoc;0692 struct sctp_bind_addr *bp;693 struct sctp_chunk *chunk;694 union sctp_addr *laddr;0695 void *addr_buf;696 struct sctp_af *af;697- struct list_head *pos;0698 int i;699 int retval = 0;700···764 goto out;765 }766767- retval = sctp_send_asconf(asoc, chunk);768-769- /* FIXME: After sending the delete address ASCONF chunk, we770- * cannot remove the addresses from the association's bind771- * address list, because there maybe some packet send to772- * the delete addresses, so we should wait until ASCONF_ACK773- * packet is received.774 */000000000000000000000000000000000775 }776out:777 return retval;···5021/* Caller must hold hashbucket lock for this tb with local BH disabled */5022static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)5023{5024- if (hlist_empty(&pp->owner)) {5025 if (pp->next)5026 pp->next->pprev = pp->pprev;5027 *(pp->pprev) = pp->next;
···369370 /* Use GFP_ATOMIC since BHs are disabled. */371 addr->v4.sin_port = ntohs(addr->v4.sin_port);372+ ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);373 addr->v4.sin_port = htons(addr->v4.sin_port);374 sctp_write_unlock(&ep->base.addr_lock);375 sctp_local_bh_enable();···491 struct sctp_chunk *chunk;492 struct sctp_sockaddr_entry *laddr;493 union sctp_addr *addr;494+ union sctp_addr saveaddr;495 void *addr_buf;496 struct sctp_af *af;497 struct list_head *pos;···558 }559560 retval = sctp_send_asconf(asoc, chunk);561+ if (retval)562+ goto out;563564+ /* Add the new addresses to the bind address list with565+ * use_as_src set to 0.0000566 */567+ sctp_local_bh_disable();568+ sctp_write_lock(&asoc->base.addr_lock);569+ addr_buf = addrs;570+ for (i = 0; i < addrcnt; i++) {571+ addr = (union sctp_addr *)addr_buf;572+ af = sctp_get_af_specific(addr->v4.sin_family);573+ memcpy(&saveaddr, addr, af->sockaddr_len);574+ saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);575+ retval = sctp_add_bind_addr(bp, &saveaddr, 0,576+ GFP_ATOMIC);577+ addr_buf += af->sockaddr_len;578+ }579+ sctp_write_unlock(&asoc->base.addr_lock);580+ sctp_local_bh_enable();581 }582583out:···676 struct sctp_sock *sp;677 struct sctp_endpoint *ep;678 struct sctp_association *asoc;679+ struct sctp_transport *transport;680 struct sctp_bind_addr *bp;681 struct sctp_chunk *chunk;682 union sctp_addr *laddr;683+ union sctp_addr saveaddr;684 void *addr_buf;685 struct sctp_af *af;686+ struct list_head *pos, *pos1;687+ struct sctp_sockaddr_entry *saddr;688 int i;689 int retval = 0;690···748 goto out;749 }750751+ /* Reset use_as_src flag for the addresses in the bind address752+ * list that are to be deleted.00000753 */754+ sctp_local_bh_disable();755+ sctp_write_lock(&asoc->base.addr_lock);756+ addr_buf = addrs;757+ for (i = 0; i < addrcnt; i++) {758+ laddr = (union sctp_addr *)addr_buf;759+ af = sctp_get_af_specific(laddr->v4.sin_family);760+ memcpy(&saveaddr, laddr, af->sockaddr_len);761+ saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);762+ list_for_each(pos1, &bp->address_list) {763+ saddr = list_entry(pos1,764+ struct sctp_sockaddr_entry,765+ list);766+ if (sctp_cmp_addr_exact(&saddr->a, &saveaddr))767+ saddr->use_as_src = 0;768+ }769+ addr_buf += af->sockaddr_len;770+ }771+ sctp_write_unlock(&asoc->base.addr_lock);772+ sctp_local_bh_enable();773+774+ /* Update the route and saddr entries for all the transports775+ * as some of the addresses in the bind address list are776+ * about to be deleted and cannot be used as source addresses.777+ */778+ list_for_each(pos1, &asoc->peer.transport_addr_list) {779+ transport = list_entry(pos1, struct sctp_transport,780+ transports);781+ dst_release(transport->dst);782+ sctp_transport_route(transport, NULL,783+ sctp_sk(asoc->base.sk));784+ }785+786+ retval = sctp_send_asconf(asoc, chunk);787 }788out:789 return retval;···4977/* Caller must hold hashbucket lock for this tb with local BH disabled */4978static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)4979{4980+ if (pp && hlist_empty(&pp->owner)) {4981 if (pp->next)4982 pp->next->pprev = pp->pprev;4983 *(pp->pprev) = pp->next;
···8283 spin_lock_bh(®_lock);84 if (!users) {85- users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC);86 if (users) {87- memset(users, 0, USER_LIST_SIZE);88 for (i = 1; i <= MAX_USERID; i++) {89 users[i].next = i - 1;90 }
···8283 spin_lock_bh(®_lock);84 if (!users) {85+ users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);86 if (users) {087 for (i = 1; i <= MAX_USERID; i++) {88 users[i].next = i - 1;89 }