Merge branch 'master' into upstream-fixes

+2039 -1984
+1 -3
MAINTAINERS
··· 1676 1676 S: Maintained 1677 1677 1678 1678 LAPB module 1679 - P: Henner Eisen 1680 - M: eis@baty.hanse.de 1681 1679 L: linux-x25@vger.kernel.org 1682 - S: Maintained 1680 + S: Orphan 1683 1681 1684 1682 LASI 53c700 driver for PARISC 1685 1683 P: James E.J. Bottomley
+2 -2
arch/s390/kernel/head31.S
··· 273 273 .Lbss_end: .long _end 274 274 .Lparmaddr: .long PARMAREA 275 275 .Lsccbaddr: .long .Lsccb 276 - .align 4096 276 + .org 0x12000 277 277 .Lsccb: 278 278 .hword 0x1000 # length, one page 279 279 .byte 0x00,0x00,0x00 ··· 290 290 .Lscpincr2: 291 291 .quad 0x00 292 292 .fill 3984,1,0 293 - .align 4096 293 + .org 0x13000 294 294 295 295 #ifdef CONFIG_SHARED_KERNEL 296 296 .org 0x100000
+2 -2
arch/s390/kernel/head64.S
··· 268 268 .Lparmaddr: 269 269 .quad PARMAREA 270 270 271 - .align 4096 271 + .org 0x12000 272 272 .Lsccb: 273 273 .hword 0x1000 # length, one page 274 274 .byte 0x00,0x00,0x00 ··· 285 285 .Lscpincr2: 286 286 .quad 0x00 287 287 .fill 3984,1,0 288 - .align 4096 288 + .org 0x13000 289 289 290 290 #ifdef CONFIG_SHARED_KERNEL 291 291 .org 0x100000
+36 -10
arch/s390/kernel/setup.c
··· 877 877 878 878 static decl_subsys(ipl, NULL, NULL); 879 879 880 + static int ipl_register_fcp_files(void) 881 + { 882 + int rc; 883 + 884 + rc = sysfs_create_group(&ipl_subsys.kset.kobj, 885 + &ipl_fcp_attr_group); 886 + if (rc) 887 + goto out; 888 + rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj, 889 + &ipl_parameter_attr); 890 + if (rc) 891 + goto out_ipl_parm; 892 + rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj, 893 + &ipl_scp_data_attr); 894 + if (!rc) 895 + goto out; 896 + 897 + sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr); 898 + 899 + out_ipl_parm: 900 + sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group); 901 + out: 902 + return rc; 903 + } 904 + 880 905 static int __init 881 906 ipl_device_sysfs_register(void) { 882 907 int rc; 883 908 884 909 rc = firmware_register(&ipl_subsys); 885 910 if (rc) 886 - return rc; 911 + goto out; 887 912 888 913 switch (get_ipl_type()) { 889 914 case ipl_type_ccw: 890 - sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group); 915 + rc = sysfs_create_group(&ipl_subsys.kset.kobj, 916 + &ipl_ccw_attr_group); 891 917 break; 892 918 case ipl_type_fcp: 893 - sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group); 894 - sysfs_create_bin_file(&ipl_subsys.kset.kobj, 895 - &ipl_parameter_attr); 896 - sysfs_create_bin_file(&ipl_subsys.kset.kobj, 897 - &ipl_scp_data_attr); 919 + rc = ipl_register_fcp_files(); 898 920 break; 899 921 default: 900 - sysfs_create_group(&ipl_subsys.kset.kobj, 901 - &ipl_unknown_attr_group); 922 + rc = sysfs_create_group(&ipl_subsys.kset.kobj, 923 + &ipl_unknown_attr_group); 902 924 break; 903 925 } 904 - return 0; 926 + 927 + if (rc) 928 + firmware_unregister(&ipl_subsys); 929 + out: 930 + return rc; 905 931 } 906 932 907 933 __initcall(ipl_device_sysfs_register);
+7 -18
arch/sparc/kernel/devices.c
··· 15 15 16 16 #include <asm/page.h> 17 17 #include <asm/oplib.h> 18 + #include <asm/prom.h> 18 19 #include <asm/smp.h> 19 20 #include <asm/system.h> 20 21 #include <asm/cpudata.h> ··· 35 34 int (*compare)(int, int, void *), void *compare_arg, 36 35 int *prom_node, int *mid) 37 36 { 38 - char node_str[128]; 39 - 40 - prom_getstring(nd, "device_type", node_str, sizeof(node_str)); 41 - if (strcmp(node_str, "cpu")) 42 - return -ENODEV; 43 - 44 37 if (!compare(nd, *cur_inst, compare_arg)) { 45 38 if (prom_node) 46 39 *prom_node = nd; ··· 54 59 static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg, 55 60 int *prom_node, int *mid) 56 61 { 57 - int nd, cur_inst, err; 62 + struct device_node *dp; 63 + int cur_inst; 58 64 59 - nd = prom_root_node; 60 65 cur_inst = 0; 61 - 62 - err = check_cpu_node(nd, &cur_inst, compare, compare_arg, 63 - prom_node, mid); 64 - if (!err) 65 - return 0; 66 - 67 - nd = prom_getchild(nd); 68 - while ((nd = prom_getsibling(nd)) != 0) { 69 - err = check_cpu_node(nd, &cur_inst, compare, compare_arg, 70 - prom_node, mid); 66 + for_each_node_by_type(dp, "cpu") { 67 + int err = check_cpu_node(dp->node, &cur_inst, 68 + compare, compare_arg, 69 + prom_node, mid); 71 70 if (!err) 72 71 return 0; 73 72 }
+1 -1
arch/sparc/kernel/irq.c
··· 329 329 disable_pil_irq(irq); 330 330 #ifdef CONFIG_SMP 331 331 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */ 332 - if(irq < 10) 332 + if((sparc_cpu_model==sun4m) && (irq < 10)) 333 333 smp4m_irq_rotate(cpu); 334 334 #endif 335 335 action = sparc_irq[irq].action;
+31 -3
arch/sparc/kernel/of_device.c
··· 596 596 static int pil_to_sbus[] = { 597 597 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, 598 598 }; 599 - struct device_node *busp = dp->parent; 599 + struct device_node *io_unit, *sbi = dp->parent; 600 600 struct linux_prom_registers *regs; 601 - int board = of_getintprop_default(busp, "board#", 0); 602 - int slot; 601 + int board, slot; 602 + 603 + while (sbi) { 604 + if (!strcmp(sbi->name, "sbi")) 605 + break; 606 + 607 + sbi = sbi->parent; 608 + } 609 + if (!sbi) 610 + goto build_resources; 603 611 604 612 regs = of_get_property(dp, "reg", NULL); 613 + if (!regs) 614 + goto build_resources; 615 + 605 616 slot = regs->which_io; 617 + 618 + /* If SBI's parent is not io-unit or the io-unit lacks 619 + * a "board#" property, something is very wrong. 620 + */ 621 + if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) { 622 + printk("%s: Error, parent is not io-unit.\n", 623 + sbi->full_name); 624 + goto build_resources; 625 + } 626 + io_unit = sbi->parent; 627 + board = of_getintprop_default(io_unit, "board#", -1); 628 + if (board == -1) { 629 + printk("%s: Error, lacks board# property.\n", 630 + io_unit->full_name); 631 + goto build_resources; 632 + } 606 633 607 634 for (i = 0; i < op->num_irqs; i++) { 608 635 int this_irq = op->irqs[i]; ··· 644 617 } 645 618 } 646 619 620 + build_resources: 647 621 build_device_resources(op, parent); 648 622 649 623 op->dev.parent = parent;
+6 -3
arch/sparc/kernel/prom.c
··· 444 444 static struct property *tmp = NULL; 445 445 struct property *p; 446 446 int len; 447 + const char *name; 447 448 448 449 if (tmp) { 449 450 p = tmp; ··· 457 456 458 457 p->name = (char *) (p + 1); 459 458 if (special_name) { 459 + strcpy(p->name, special_name); 460 460 p->length = special_len; 461 461 p->value = prom_early_alloc(special_len); 462 462 memcpy(p->value, special_val, special_len); 463 463 } else { 464 464 if (prev == NULL) { 465 - prom_firstprop(node, p->name); 465 + name = prom_firstprop(node, NULL); 466 466 } else { 467 - prom_nextprop(node, prev, p->name); 467 + name = prom_nextprop(node, prev, NULL); 468 468 } 469 - if (strlen(p->name) == 0) { 469 + if (strlen(name) == 0) { 470 470 tmp = p; 471 471 return NULL; 472 472 } 473 + strcpy(p->name, name); 473 474 p->length = prom_getproplen(node, p->name); 474 475 if (p->length <= 0) { 475 476 p->length = 0;
+88 -6
arch/sparc/kernel/smp.c
··· 87 87 void __init smp_cpus_done(unsigned int max_cpus) 88 88 { 89 89 extern void smp4m_smp_done(void); 90 + extern void smp4d_smp_done(void); 90 91 unsigned long bogosum = 0; 91 92 int cpu, num; 92 93 ··· 101 100 num, bogosum/(500000/HZ), 102 101 (bogosum/(5000/HZ))%100); 103 102 104 - BUG_ON(sparc_cpu_model != sun4m); 105 - smp4m_smp_done(); 103 + switch(sparc_cpu_model) { 104 + case sun4: 105 + printk("SUN4\n"); 106 + BUG(); 107 + break; 108 + case sun4c: 109 + printk("SUN4C\n"); 110 + BUG(); 111 + break; 112 + case sun4m: 113 + smp4m_smp_done(); 114 + break; 115 + case sun4d: 116 + smp4d_smp_done(); 117 + break; 118 + case sun4e: 119 + printk("SUN4E\n"); 120 + BUG(); 121 + break; 122 + case sun4u: 123 + printk("SUN4U\n"); 124 + BUG(); 125 + break; 126 + default: 127 + printk("UNKNOWN!\n"); 128 + BUG(); 129 + break; 130 + }; 106 131 } 107 132 108 133 void cpu_panic(void) ··· 294 267 void __init smp_prepare_cpus(unsigned int max_cpus) 295 268 { 296 269 extern void smp4m_boot_cpus(void); 270 + extern void smp4d_boot_cpus(void); 297 271 int i, cpuid, extra; 298 272 299 - BUG_ON(sparc_cpu_model != sun4m); 300 273 printk("Entering SMP Mode...\n"); 301 274 302 275 extra = 0; ··· 310 283 311 284 smp_store_cpu_info(boot_cpu_id); 312 285 313 - smp4m_boot_cpus(); 286 + switch(sparc_cpu_model) { 287 + case sun4: 288 + printk("SUN4\n"); 289 + BUG(); 290 + break; 291 + case sun4c: 292 + printk("SUN4C\n"); 293 + BUG(); 294 + break; 295 + case sun4m: 296 + smp4m_boot_cpus(); 297 + break; 298 + case sun4d: 299 + smp4d_boot_cpus(); 300 + break; 301 + case sun4e: 302 + printk("SUN4E\n"); 303 + BUG(); 304 + break; 305 + case sun4u: 306 + printk("SUN4U\n"); 307 + BUG(); 308 + break; 309 + default: 310 + printk("UNKNOWN!\n"); 311 + BUG(); 312 + break; 313 + }; 314 314 } 315 315 316 316 /* Set this up early so that things like the scheduler can init ··· 377 323 int __cpuinit __cpu_up(unsigned int cpu) 378 324 { 379 325 extern int smp4m_boot_one_cpu(int); 380 - int ret; 326 + extern int smp4d_boot_one_cpu(int); 327 + int ret=0; 381 328 382 - ret = smp4m_boot_one_cpu(cpu); 329 + switch(sparc_cpu_model) { 330 + case sun4: 331 + printk("SUN4\n"); 332 + BUG(); 333 + break; 334 + case sun4c: 335 + printk("SUN4C\n"); 336 + BUG(); 337 + break; 338 + case sun4m: 339 + ret = smp4m_boot_one_cpu(cpu); 340 + break; 341 + case sun4d: 342 + ret = smp4d_boot_one_cpu(cpu); 343 + break; 344 + case sun4e: 345 + printk("SUN4E\n"); 346 + BUG(); 347 + break; 348 + case sun4u: 349 + printk("SUN4U\n"); 350 + BUG(); 351 + break; 352 + default: 353 + printk("UNKNOWN!\n"); 354 + BUG(); 355 + break; 356 + }; 383 357 384 358 if (!ret) { 385 359 cpu_set(cpu, smp_commenced_mask);
-1
arch/sparc/kernel/sparc_ksyms.c
··· 237 237 EXPORT_SYMBOL(prom_setprop); 238 238 EXPORT_SYMBOL(saved_command_line); 239 239 EXPORT_SYMBOL(prom_apply_obio_ranges); 240 - EXPORT_SYMBOL(prom_getname); 241 240 EXPORT_SYMBOL(prom_feval); 242 241 EXPORT_SYMBOL(prom_getbool); 243 242 EXPORT_SYMBOL(prom_getstring);
+31 -70
arch/sparc/kernel/sun4d_smp.c
··· 43 43 extern void calibrate_delay(void); 44 44 45 45 extern volatile int smp_processors_ready; 46 - extern int smp_num_cpus; 47 46 static int smp_highest_cpu; 48 47 extern volatile unsigned long cpu_callin_map[NR_CPUS]; 49 48 extern cpuinfo_sparc cpu_data[NR_CPUS]; 50 49 extern unsigned char boot_cpu_id; 51 - extern int smp_activated; 52 - extern volatile int __cpu_number_map[NR_CPUS]; 53 - extern volatile int __cpu_logical_map[NR_CPUS]; 54 - extern volatile unsigned long ipi_count; 55 50 extern volatile int smp_process_available; 56 51 57 52 extern cpumask_t smp_commenced_mask; ··· 139 144 spin_lock_irqsave(&sun4d_imsk_lock, flags); 140 145 cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ 141 146 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 147 + cpu_set(cpuid, cpu_online_map); 148 + 142 149 } 143 150 144 151 extern void init_IRQ(void); ··· 157 160 158 161 void __init smp4d_boot_cpus(void) 159 162 { 160 - int cpucount = 0; 161 - int i, mid; 162 - 163 - printk("Entering SMP Mode...\n"); 164 - 165 163 if (boot_cpu_id) 166 164 current_set[0] = NULL; 167 - 168 - local_irq_enable(); 169 - cpus_clear(cpu_present_map); 170 - 171 - /* XXX This whole thing has to go. See sparc64. */ 172 - for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) 173 - cpu_set(mid, cpu_present_map); 174 - SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0])); 175 - for(i=0; i < NR_CPUS; i++) 176 - __cpu_number_map[i] = -1; 177 - for(i=0; i < NR_CPUS; i++) 178 - __cpu_logical_map[i] = -1; 179 - __cpu_number_map[boot_cpu_id] = 0; 180 - __cpu_logical_map[0] = boot_cpu_id; 181 - current_thread_info()->cpu = boot_cpu_id; 182 - smp_store_cpu_info(boot_cpu_id); 183 165 smp_setup_percpu_timer(); 184 166 local_flush_cache_all(); 185 - if (cpu_find_by_instance(1, NULL, NULL)) 186 - return; /* Not an MP box. */ 187 - SMP_PRINTK(("Iterating over CPUs\n")); 188 - for(i = 0; i < NR_CPUS; i++) { 189 - if(i == boot_cpu_id) 190 - continue; 167 + } 191 168 192 - if (cpu_isset(i, cpu_present_map)) { 169 + int smp4d_boot_one_cpu(int i) 170 + { 193 171 extern unsigned long sun4d_cpu_startup; 194 172 unsigned long *entry = &sun4d_cpu_startup; 195 173 struct task_struct *p; 196 174 int timeout; 197 - int no; 175 + int cpu_node; 198 176 177 + cpu_find_by_instance(i, &cpu_node,NULL); 199 178 /* Cook up an idler for this guy. */ 200 179 p = fork_idle(i); 201 - cpucount++; 202 180 current_set[i] = task_thread_info(p); 203 - for (no = 0; !cpu_find_by_instance(no, NULL, &mid) 204 - && mid != i; no++) ; 205 181 206 182 /* 207 183 * Initialize the contexts table ··· 186 216 smp_penguin_ctable.reg_size = 0; 187 217 188 218 /* whirrr, whirrr, whirrrrrrrrr... */ 189 - SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, cpu_data(no).prom_node)); 219 + SMP_PRINTK(("Starting CPU %d at %p \n", i, entry)); 190 220 local_flush_cache_all(); 191 - prom_startcpu(cpu_data(no).prom_node, 221 + prom_startcpu(cpu_node, 192 222 &smp_penguin_ctable, 0, (char *)entry); 193 223 194 224 SMP_PRINTK(("prom_startcpu returned :)\n")); ··· 200 230 udelay(200); 201 231 } 202 232 203 - if(cpu_callin_map[i]) { 204 - /* Another "Red Snapper". */ 205 - __cpu_number_map[i] = cpucount; 206 - __cpu_logical_map[cpucount] = i; 207 - } else { 208 - cpucount--; 209 - printk("Processor %d is stuck.\n", i); 210 - } 211 - } 212 - if(!(cpu_callin_map[i])) { 213 - cpu_clear(i, cpu_present_map); 214 - __cpu_number_map[i] = -1; 215 - } 233 + if (!(cpu_callin_map[i])) { 234 + printk("Processor %d is stuck.\n", i); 235 + return -ENODEV; 236 + 216 237 } 217 238 local_flush_cache_all(); 218 - if(cpucount == 0) { 219 - printk("Error: only one Processor found.\n"); 220 - cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id()); 221 - } else { 222 - unsigned long bogosum = 0; 223 - 224 - for_each_present_cpu(i) { 225 - bogosum += cpu_data(i).udelay_val; 226 - smp_highest_cpu = i; 239 + return 0; 240 + } 241 + 242 + void __init smp4d_smp_done(void) 243 + { 244 + int i, first; 245 + int *prev; 246 + 247 + /* setup cpu list for irq rotation */ 248 + first = 0; 249 + prev = &first; 250 + for (i = 0; i < NR_CPUS; i++) 251 + if (cpu_online(i)) { 252 + *prev = i; 253 + prev = &cpu_data(i).next; 227 254 } 228 - SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); 229 - printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", 230 - cpucount + 1, 231 - bogosum/(500000/HZ), 232 - (bogosum/(5000/HZ))%100); 233 - smp_activated = 1; 234 - smp_num_cpus = cpucount + 1; 235 - } 255 + *prev = first; 256 + local_flush_cache_all(); 236 257 237 258 /* Free unneeded trap tables */ 238 259 ClearPageReserved(virt_to_page(trapbase_cpu1)); ··· 295 334 register int i; 296 335 297 336 mask = cpumask_of_cpu(hard_smp4d_processor_id()); 298 - cpus_andnot(mask, cpu_present_map, mask); 337 + cpus_andnot(mask, cpu_online_map, mask); 299 338 for(i = 0; i <= high; i++) { 300 339 if (cpu_isset(i, mask)) { 301 340 ccall_info.processors_in[i] = 0;
+9 -9
arch/sparc/kernel/sys_sparc.c
··· 465 465 466 466 asmlinkage int sys_getdomainname(char __user *name, int len) 467 467 { 468 - int nlen; 469 - int err = -EFAULT; 468 + int nlen, err; 470 469 470 + if (len < 0 || len > __NEW_UTS_LEN) 471 + return -EINVAL; 472 + 471 473 down_read(&uts_sem); 472 474 473 475 nlen = strlen(system_utsname.domainname) + 1; 474 - 475 476 if (nlen < len) 476 477 len = nlen; 477 - if (len > __NEW_UTS_LEN) 478 - goto done; 479 - if (copy_to_user(name, system_utsname.domainname, len)) 480 - goto done; 481 - err = 0; 482 - done: 478 + 479 + err = -EFAULT; 480 + if (!copy_to_user(name, system_utsname.domainname, len)) 481 + err = 0; 482 + 483 483 up_read(&uts_sem); 484 484 return err; 485 485 }
+1
arch/sparc/mm/io-unit.c
··· 64 64 65 65 sbus->iommu = (struct iommu_struct *)iounit; 66 66 iounit->page_table = xpt; 67 + spin_lock_init(&iounit->lock); 67 68 68 69 for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); 69 70 xpt < xptend;)
-18
arch/sparc/prom/tree.c
··· 205 205 return 0; 206 206 } 207 207 208 - /* Gets name in the form prom v2+ uses it (name@x,yyyyy or name (if no reg)) */ 209 - int prom_getname (int node, char *buffer, int len) 210 - { 211 - int i; 212 - struct linux_prom_registers reg[PROMREG_MAX]; 213 - 214 - i = prom_getproperty (node, "name", buffer, len); 215 - if (i <= 0) return -1; 216 - buffer [i] = 0; 217 - len -= i; 218 - i = prom_getproperty (node, "reg", (char *)reg, sizeof (reg)); 219 - if (i <= 0) return 0; 220 - if (len < 11) return -1; 221 - buffer = strchr (buffer, 0); 222 - sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr); 223 - return 0; 224 - } 225 - 226 208 /* Interal version of nextprop that does not alter return values. */ 227 209 char * __prom_nextprop(int node, char * oprop) 228 210 {
+4 -4
arch/sparc64/defconfig
··· 1 1 # 2 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.18-rc1 4 - # Wed Jul 12 14:00:58 2006 3 + # Linux kernel version: 2.6.18-rc2 4 + # Fri Jul 21 14:19:24 2006 5 5 # 6 6 CONFIG_SPARC=y 7 7 CONFIG_SPARC64=y ··· 36 36 CONFIG_SYSVIPC=y 37 37 CONFIG_POSIX_MQUEUE=y 38 38 # CONFIG_BSD_PROCESS_ACCT is not set 39 + # CONFIG_TASKSTATS is not set 39 40 CONFIG_SYSCTL=y 40 41 # CONFIG_AUDIT is not set 41 42 # CONFIG_IKCONFIG is not set ··· 1121 1120 # CONFIG_USB_LEGOTOWER is not set 1122 1121 # CONFIG_USB_LCD is not set 1123 1122 # CONFIG_USB_LED is not set 1124 - # CONFIG_USB_CY7C63 is not set 1123 + # CONFIG_USB_CYPRESS_CY7C63 is not set 1125 1124 # CONFIG_USB_CYTHERM is not set 1126 1125 # CONFIG_USB_PHIDGETKIT is not set 1127 1126 # CONFIG_USB_PHIDGETSERVO is not set ··· 1280 1279 # CONFIG_NFSD is not set 1281 1280 # CONFIG_SMB_FS is not set 1282 1281 # CONFIG_CIFS is not set 1283 - # CONFIG_CIFS_DEBUG2 is not set 1284 1282 # CONFIG_NCP_FS is not set 1285 1283 # CONFIG_CODA_FS is not set 1286 1284 # CONFIG_AFS_FS is not set
-3
arch/sparc64/kernel/devices.c
··· 66 66 void *compare_arg, 67 67 struct device_node **dev_node, int *mid) 68 68 { 69 - if (strcmp(dp->type, "cpu")) 70 - return -ENODEV; 71 - 72 69 if (!compare(dp, *cur_inst, compare_arg)) { 73 70 if (dev_node) 74 71 *dev_node = dp;
+32 -2
arch/sparc64/kernel/of_device.c
··· 542 542 /* Convert to num-cells. */ 543 543 num_reg /= 4; 544 544 545 - /* Conver to num-entries. */ 545 + /* Convert to num-entries. */ 546 546 num_reg /= na + ns; 547 + 548 + /* Prevent overruning the op->resources[] array. */ 549 + if (num_reg > PROMREG_MAX) { 550 + printk(KERN_WARNING "%s: Too many regs (%d), " 551 + "limiting to %d.\n", 552 + op->node->full_name, num_reg, PROMREG_MAX); 553 + num_reg = PROMREG_MAX; 554 + } 547 555 548 556 for (index = 0; index < num_reg; index++) { 549 557 struct resource *r = &op->resource[index]; ··· 658 650 next: 659 651 imap += (na + 3); 660 652 } 661 - if (i == imlen) 653 + if (i == imlen) { 654 + /* Psycho and Sabre PCI controllers can have 'interrupt-map' 655 + * properties that do not include the on-board device 656 + * interrupts. Instead, the device's 'interrupts' property 657 + * is already a fully specified INO value. 658 + * 659 + * Handle this by deciding that, if we didn't get a 660 + * match in the parent's 'interrupt-map', and the 661 + * parent is an IRQ translater, then use the parent as 662 + * our IRQ controller. 663 + */ 664 + if (pp->irq_trans) 665 + return pp; 666 + 662 667 return NULL; 668 + } 663 669 664 670 *irq_p = irq; 665 671 cp = of_find_node_by_phandle(handle); ··· 823 801 op->num_irqs = len / 4; 824 802 } else { 825 803 op->num_irqs = 0; 804 + } 805 + 806 + /* Prevent overruning the op->irqs[] array. */ 807 + if (op->num_irqs > PROMINTR_MAX) { 808 + printk(KERN_WARNING "%s: Too many irqs (%d), " 809 + "limiting to %d.\n", 810 + dp->full_name, op->num_irqs, PROMINTR_MAX); 811 + op->num_irqs = PROMINTR_MAX; 826 812 } 827 813 828 814 build_device_resources(op, parent);
+10 -2
arch/sparc64/kernel/prom.c
··· 344 344 /*0x2f*/ PSYCHO_IMAP_CE, 345 345 /*0x30*/ PSYCHO_IMAP_A_ERR, 346 346 /*0x31*/ PSYCHO_IMAP_B_ERR, 347 - /*0x32*/ PSYCHO_IMAP_PMGMT 347 + /*0x32*/ PSYCHO_IMAP_PMGMT, 348 + /*0x33*/ PSYCHO_IMAP_GFX, 349 + /*0x34*/ PSYCHO_IMAP_EUPA, 348 350 }; 349 351 #define PSYCHO_ONBOARD_IRQ_BASE 0x20 350 - #define PSYCHO_ONBOARD_IRQ_LAST 0x32 352 + #define PSYCHO_ONBOARD_IRQ_LAST 0x34 351 353 #define psycho_onboard_imap_offset(__ino) \ 352 354 __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE] 353 355 ··· 531 529 /*0x2e*/ SABRE_IMAP_UE, 532 530 /*0x2f*/ SABRE_IMAP_CE, 533 531 /*0x30*/ SABRE_IMAP_PCIERR, 532 + /*0x31*/ 0 /* reserved */, 533 + /*0x32*/ 0 /* reserved */, 534 + /*0x33*/ SABRE_IMAP_GFX, 535 + /*0x34*/ SABRE_IMAP_EUPA, 534 536 }; 535 537 #define SABRE_ONBOARD_IRQ_BASE 0x20 536 538 #define SABRE_ONBOARD_IRQ_LAST 0x30 ··· 901 895 SYSIO_IMAP_CE, 902 896 SYSIO_IMAP_SBERR, 903 897 SYSIO_IMAP_PMGMT, 898 + SYSIO_IMAP_GFX, 899 + SYSIO_IMAP_EUPA, 904 900 }; 905 901 906 902 #undef bogon
-1
arch/sparc64/kernel/sparc64_ksyms.c
··· 254 254 EXPORT_SYMBOL(prom_node_has_property); 255 255 EXPORT_SYMBOL(prom_setprop); 256 256 EXPORT_SYMBOL(saved_command_line); 257 - EXPORT_SYMBOL(prom_getname); 258 257 EXPORT_SYMBOL(prom_finddevice); 259 258 EXPORT_SYMBOL(prom_feval); 260 259 EXPORT_SYMBOL(prom_getbool);
+9 -9
arch/sparc64/kernel/sys_sparc.c
··· 701 701 702 702 asmlinkage long sys_getdomainname(char __user *name, int len) 703 703 { 704 - int nlen; 705 - int err = -EFAULT; 704 + int nlen, err; 705 + 706 + if (len < 0 || len > __NEW_UTS_LEN) 707 + return -EINVAL; 706 708 707 709 down_read(&uts_sem); 708 710 709 711 nlen = strlen(system_utsname.domainname) + 1; 710 - 711 712 if (nlen < len) 712 713 len = nlen; 713 - if (len > __NEW_UTS_LEN) 714 - goto done; 715 - if (copy_to_user(name, system_utsname.domainname, len)) 716 - goto done; 717 - err = 0; 718 - done: 714 + 715 + err = -EFAULT; 716 + if (!copy_to_user(name, system_utsname.domainname, len)) 717 + err = 0; 718 + 719 719 up_read(&uts_sem); 720 720 return err; 721 721 }
-85
arch/sparc64/prom/tree.c
··· 193 193 return 0; 194 194 } 195 195 196 - /* Gets name in the {name@x,yyyyy|name (if no reg)} form */ 197 - int 198 - prom_getname (int node, char *buffer, int len) 199 - { 200 - int i, sbus = 0; 201 - int pci = 0, ebus = 0, ide = 0; 202 - struct linux_prom_registers *reg; 203 - struct linux_prom64_registers reg64[PROMREG_MAX]; 204 - 205 - for (sbus = prom_getparent (node); sbus; sbus = prom_getparent (sbus)) { 206 - i = prom_getproperty (sbus, "name", buffer, len); 207 - if (i > 0) { 208 - buffer [i] = 0; 209 - if (!strcmp (buffer, "sbus")) 210 - goto getit; 211 - } 212 - } 213 - if ((pci = prom_getparent (node))) { 214 - i = prom_getproperty (pci, "name", buffer, len); 215 - if (i > 0) { 216 - buffer [i] = 0; 217 - if (!strcmp (buffer, "pci")) 218 - goto getit; 219 - } 220 - pci = 0; 221 - } 222 - if ((ebus = prom_getparent (node))) { 223 - i = prom_getproperty (ebus, "name", buffer, len); 224 - if (i > 0) { 225 - buffer[i] = 0; 226 - if (!strcmp (buffer, "ebus")) 227 - goto getit; 228 - } 229 - ebus = 0; 230 - } 231 - if ((ide = prom_getparent (node))) { 232 - i = prom_getproperty (ide, "name", buffer, len); 233 - if (i > 0) { 234 - buffer [i] = 0; 235 - if (!strcmp (buffer, "ide")) 236 - goto getit; 237 - } 238 - ide = 0; 239 - } 240 - getit: 241 - i = prom_getproperty (node, "name", buffer, len); 242 - if (i <= 0) { 243 - buffer [0] = 0; 244 - return -1; 245 - } 246 - buffer [i] = 0; 247 - len -= i; 248 - i = prom_getproperty (node, "reg", (char *)reg64, sizeof (reg64)); 249 - if (i <= 0) return 0; 250 - if (len < 16) return -1; 251 - buffer = strchr (buffer, 0); 252 - if (sbus) { 253 - reg = (struct linux_prom_registers *)reg64; 254 - sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr); 255 - } else if (pci) { 256 - int dev, fn; 257 - reg = (struct linux_prom_registers *)reg64; 258 - fn = (reg[0].which_io >> 8) & 0x07; 259 - dev = (reg[0].which_io >> 11) & 0x1f; 260 - if (fn) 261 - sprintf (buffer, "@%x,%x", dev, fn); 262 - else 263 - sprintf (buffer, "@%x", dev); 264 - } else if (ebus) { 265 - reg = (struct linux_prom_registers *)reg64; 266 - sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr); 267 - } else if (ide) { 268 - reg = (struct linux_prom_registers *)reg64; 269 - sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr); 270 - } else if (i == 4) { /* Happens on 8042's children on Ultra/PCI. */ 271 - reg = (struct linux_prom_registers *)reg64; 272 - sprintf (buffer, "@%x", reg[0].which_io); 273 - } else { 274 - sprintf (buffer, "@%x,%x", 275 - (unsigned int)(reg64[0].phys_addr >> 36), 276 - (unsigned int)(reg64[0].phys_addr)); 277 - } 278 - return 0; 279 - } 280 - 281 196 /* Return the first property type for node 'node'. 282 197 * buffer should be at least 32B in length 283 198 */
+10 -4
drivers/char/synclink.c
··· 1344 1344 } else 1345 1345 info->input_signal_events.dcd_down++; 1346 1346 #ifdef CONFIG_HDLC 1347 - if (info->netcount) 1348 - hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev); 1347 + if (info->netcount) { 1348 + if (status & MISCSTATUS_DCD) 1349 + netif_carrier_on(info->netdev); 1350 + else 1351 + netif_carrier_off(info->netdev); 1352 + } 1349 1353 #endif 1350 1354 } 1351 1355 if (status & MISCSTATUS_CTS_LATCHED) ··· 7848 7844 spin_lock_irqsave(&info->irq_spinlock, flags); 7849 7845 usc_get_serial_signals(info); 7850 7846 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7851 - hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 7852 - 7847 + if (info->serial_signals & SerialSignal_DCD) 7848 + netif_carrier_on(dev); 7849 + else 7850 + netif_carrier_off(dev); 7853 7851 return 0; 7854 7852 } 7855 7853
+10 -4
drivers/char/synclinkmp.c
··· 1752 1752 spin_lock_irqsave(&info->lock, flags); 1753 1753 get_signals(info); 1754 1754 spin_unlock_irqrestore(&info->lock, flags); 1755 - hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 1756 - 1755 + if (info->serial_signals & SerialSignal_DCD) 1756 + netif_carrier_on(dev); 1757 + else 1758 + netif_carrier_off(dev); 1757 1759 return 0; 1758 1760 } 1759 1761 ··· 2524 2522 } else 2525 2523 info->input_signal_events.dcd_down++; 2526 2524 #ifdef CONFIG_HDLC 2527 - if (info->netcount) 2528 - hdlc_set_carrier(status & SerialSignal_DCD, info->netdev); 2525 + if (info->netcount) { 2526 + if (status & SerialSignal_DCD) 2527 + netif_carrier_on(info->netdev); 2528 + else 2529 + netif_carrier_off(info->netdev); 2530 + } 2529 2531 #endif 2530 2532 } 2531 2533 if (status & MISCSTATUS_CTS_LATCHED)
+10 -6
drivers/cpufreq/cpufreq_ondemand.c
··· 239 239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 240 240 this_dbs_info->prev_cpu_wall); 241 241 this_dbs_info->prev_cpu_wall = cur_jiffies; 242 + if (!total_ticks) 243 + return; 242 244 /* 243 245 * Every sampling_rate, we check, if current idle time is less 244 246 * than 20% (default), then we try to increase frequency ··· 306 304 unsigned int cpu = smp_processor_id(); 307 305 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 308 306 307 + if (!dbs_info->enable) 308 + return; 309 + 309 310 dbs_check_cpu(dbs_info); 310 311 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 311 312 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); ··· 324 319 return; 325 320 } 326 321 327 - static inline void dbs_timer_exit(unsigned int cpu) 322 + static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 328 323 { 329 - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 330 - 331 - cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work); 324 + dbs_info->enable = 0; 325 + cancel_delayed_work(&dbs_info->work); 326 + flush_workqueue(kondemand_wq); 332 327 } 333 328 334 329 static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ··· 401 396 402 397 case CPUFREQ_GOV_STOP: 403 398 mutex_lock(&dbs_mutex); 404 - dbs_timer_exit(policy->cpu); 405 - this_dbs_info->enable = 0; 399 + dbs_timer_exit(this_dbs_info); 406 400 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 407 401 dbs_enable--; 408 402 if (dbs_enable == 0)
+1 -1
drivers/dma/ioatdma.c
··· 828 828 /* if forced, worst case is that rmmod hangs */ 829 829 __unsafe(THIS_MODULE); 830 830 831 - return pci_module_init(&ioat_pci_drv); 831 + return pci_register_driver(&ioat_pci_drv); 832 832 } 833 833 834 834 module_init(ioat_init_module);
+2 -2
drivers/fc4/fc.c
··· 429 429 430 430 if (fcmd->data) { 431 431 if (SCpnt->use_sg) 432 - dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->buffer, 432 + dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->request_buffer, 433 433 SCpnt->use_sg, 434 434 SCpnt->sc_data_direction); 435 435 else ··· 810 810 SCpnt->request_bufflen, 811 811 SCpnt->sc_data_direction); 812 812 } else { 813 - struct scatterlist *sg = (struct scatterlist *)SCpnt->buffer; 813 + struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer; 814 814 int nents; 815 815 816 816 FCD(("XXX: Use_sg %d %d\n", SCpnt->use_sg, sg->length))
-2
drivers/message/fusion/Kconfig
··· 48 48 List of supported controllers: 49 49 50 50 LSISAS1064 51 - LSISAS1066 52 51 LSISAS1068 53 52 LSISAS1064E 54 - LSISAS1066E 55 53 LSISAS1068E 56 54 57 55 config FUSION_MAX_SGE
-1
drivers/message/fusion/Makefile
··· 9 9 #EXTRA_CFLAGS += -DMPT_DEBUG_EXIT 10 10 #EXTRA_CFLAGS += -DMPT_DEBUG_FAIL 11 11 12 - 13 12 # 14 13 # driver/module specifics... 15 14 #
+64 -35
drivers/message/fusion/mptbase.c
··· 436 436 */ 437 437 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) { 438 438 freereq = 0; 439 - devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p does not return Request frame\n", 440 - ioc->name, pEvReply)); 441 439 } else { 442 440 devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", 443 441 ioc->name, pEvReply)); ··· 676 678 mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx) 677 679 { 678 680 MPT_ADAPTER *ioc; 681 + const struct pci_device_id *id; 679 682 680 - if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) { 683 + if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) 681 684 return -EINVAL; 682 - } 683 685 684 686 MptDeviceDriverHandlers[cb_idx] = dd_cbfunc; 685 687 686 688 /* call per pci device probe entry point */ 687 689 list_for_each_entry(ioc, &ioc_list, list) { 688 - if(dd_cbfunc->probe) { 689 - dd_cbfunc->probe(ioc->pcidev, 690 - ioc->pcidev->driver->id_table); 691 - } 690 + id = ioc->pcidev->driver ? 691 + ioc->pcidev->driver->id_table : NULL; 692 + if (dd_cbfunc->probe) 693 + dd_cbfunc->probe(ioc->pcidev, id); 692 694 } 693 695 694 696 return 0; ··· 1054 1056 1055 1057 dinitprintk((MYIOC_s_INFO_FMT 1056 1058 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n", 1057 - ioc->name, 1058 - ioc->HostPageBuffer, 1059 - ioc->HostPageBuffer_dma, 1059 + ioc->name, ioc->HostPageBuffer, 1060 + (u32)ioc->HostPageBuffer_dma, 1060 1061 host_page_buffer_sz)); 1061 1062 ioc->alloc_total += host_page_buffer_sz; 1062 1063 ioc->HostPageBuffer_sz = host_page_buffer_sz; ··· 1377 1380 printk(KERN_WARNING MYNAM 1378 1381 ": WARNING - %s did not initialize properly! (%d)\n", 1379 1382 ioc->name, r); 1383 + 1380 1384 list_del(&ioc->list); 1381 1385 if (ioc->alt_ioc) 1382 1386 ioc->alt_ioc->alt_ioc = NULL; ··· 1760 1762 * chips (mpt_adapter_disable, 1761 1763 * mpt_diag_reset) 1762 1764 */ 1763 - ioc->cached_fw = NULL; 1764 1765 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n", 1765 1766 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw)); 1767 + ioc->alt_ioc->cached_fw = NULL; 1766 1768 } 1767 1769 } else { 1768 1770 printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); ··· 1883 1885 /* FIXME? Examine results here? */ 1884 1886 } 1885 1887 1886 - out: 1888 + out: 1887 1889 if ((ret != 0) && irq_allocated) { 1888 1890 free_irq(ioc->pci_irq, ioc); 1889 1891 if (mpt_msi_enable) ··· 2668 2670 dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", 2669 2671 ioc->name, count)); 2670 2672 2673 + ioc->aen_event_read_flag=0; 2671 2674 return r; 2672 2675 } 2673 2676 ··· 2736 2737 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) { 2737 2738 ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */ 2738 2739 ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma; 2740 + ioc->alloc_total += size; 2741 + ioc->alt_ioc->alloc_total -= size; 2739 2742 } else { 2740 2743 if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) ) 2741 2744 ioc->alloc_total += size; ··· 3167 3166 static int 3168 3167 mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) 3169 3168 { 3169 + MPT_ADAPTER *iocp=NULL; 3170 3170 u32 diag0val; 3171 3171 u32 doorbell; 3172 3172 int hard_reset_done = 0; ··· 3303 3301 /* FIXME? Examine results here? */ 3304 3302 } 3305 3303 3306 - if (ioc->cached_fw) { 3304 + if (ioc->cached_fw) 3305 + iocp = ioc; 3306 + else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) 3307 + iocp = ioc->alt_ioc; 3308 + if (iocp) { 3307 3309 /* If the DownloadBoot operation fails, the 3308 3310 * IOC will be left unusable. This is a fatal error 3309 3311 * case. _diag_reset will return < 0 3310 3312 */ 3311 3313 for (count = 0; count < 30; count ++) { 3312 - diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3314 + diag0val = CHIPREG_READ32(&iocp->chip->Diagnostic); 3313 3315 if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) { 3314 3316 break; 3315 3317 } 3316 3318 3319 + dprintk((MYIOC_s_INFO_FMT "cached_fw: diag0val=%x count=%d\n", 3320 + iocp->name, diag0val, count)); 3317 3321 /* wait 1 sec */ 3318 3322 if (sleepFlag == CAN_SLEEP) { 3319 3323 msleep (1000); ··· 3328 3320 } 3329 3321 } 3330 3322 if ((count = mpt_downloadboot(ioc, 3331 - (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) { 3323 + (MpiFwHeader_t *)iocp->cached_fw, sleepFlag)) < 0) { 3332 3324 printk(KERN_WARNING MYNAM 3333 3325 ": firmware downloadboot failure (%d)!\n", count); 3334 3326 } ··· 3915 3907 3916 3908 if (sleepFlag == CAN_SLEEP) { 3917 3909 while (--cntdn) { 3910 + msleep (1); 3918 3911 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3919 3912 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 3920 3913 break; 3921 - msleep (1); 3922 3914 count++; 3923 3915 } 3924 3916 } else { 3925 3917 while (--cntdn) { 3918 + mdelay (1); 3926 3919 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3927 3920 if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) 3928 3921 break; 3929 - mdelay (1); 3930 3922 count++; 3931 3923 } 3932 3924 } ··· 4891 4883 pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma); 4892 4884 if (!pIoc4) 4893 4885 return; 4886 + ioc->alloc_total += iocpage4sz; 4894 4887 } else { 4895 4888 ioc4_dma = ioc->spi_data.IocPg4_dma; 4896 4889 iocpage4sz = ioc->spi_data.IocPg4Sz; ··· 4908 4899 } else { 4909 4900 pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma); 4910 4901 ioc->spi_data.pIocPg4 = NULL; 4902 + ioc->alloc_total -= iocpage4sz; 4911 4903 } 4912 4904 } 4913 4905 ··· 5040 5030 EventAck_t *pAck; 5041 5031 5042 5032 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5043 - printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK " 5044 - "request frame for Event=%x EventContext=%x EventData=%x!\n", 5045 - ioc->name, evnp->Event, le32_to_cpu(evnp->EventContext), 5046 - le32_to_cpu(evnp->Data[0])); 5033 + dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 5034 + ioc->name,__FUNCTION__)); 5047 5035 return -1; 5048 5036 } 5049 - memset(pAck, 0, sizeof(*pAck)); 5050 5037 5051 - dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name)); 5038 + devtverboseprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name)); 5052 5039 5053 5040 pAck->Function = MPI_FUNCTION_EVENT_ACK; 5054 5041 pAck->ChainOffset = 0; 5042 + pAck->Reserved[0] = pAck->Reserved[1] = 0; 5055 5043 pAck->MsgFlags = 0; 5044 + pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0; 5056 5045 pAck->Event = evnp->Event; 5057 5046 pAck->EventContext = evnp->EventContext; 5058 5047 ··· 5713 5704 break; 5714 5705 case MPI_EVENT_EVENT_CHANGE: 5715 5706 if (evData0) 5716 - ds = "Events(ON) Change"; 5707 + ds = "Events ON"; 5717 5708 else 5718 - ds = "Events(OFF) Change"; 5709 + ds = "Events OFF"; 5719 5710 break; 5720 5711 case MPI_EVENT_INTEGRATED_RAID: 5721 5712 { ··· 5786 5777 break; 5787 5778 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 5788 5779 snprintf(evStr, EVENT_DESCR_STR_SZ, 5789 - "SAS Device Status Change: No Persistancy " 5790 - "Added: id=%d", id); 5780 + "SAS Device Status Change: No Persistancy: id=%d", id); 5781 + break; 5782 + case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 5783 + snprintf(evStr, EVENT_DESCR_STR_SZ, 5784 + "SAS Device Status Change: Internal Device Reset : id=%d", id); 5785 + break; 5786 + case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 5787 + snprintf(evStr, EVENT_DESCR_STR_SZ, 5788 + "SAS Device Status Change: Internal Task Abort : id=%d", id); 5789 + break; 5790 + case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 5791 + snprintf(evStr, EVENT_DESCR_STR_SZ, 5792 + "SAS Device Status Change: Internal Abort Task Set : id=%d", id); 5793 + break; 5794 + case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 5795 + snprintf(evStr, EVENT_DESCR_STR_SZ, 5796 + "SAS Device Status Change: Internal Clear Task Set : id=%d", id); 5797 + break; 5798 + case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: 5799 + snprintf(evStr, EVENT_DESCR_STR_SZ, 5800 + "SAS Device Status Change: Internal Query Task : id=%d", id); 5791 5801 break; 5792 5802 default: 5793 5803 snprintf(evStr, EVENT_DESCR_STR_SZ, ··· 6062 6034 * @ioc: Pointer to MPT_ADAPTER structure 6063 6035 * @log_info: U32 LogInfo reply word from the IOC 6064 6036 * 6065 - * Refer to lsi/fc_log.h. 6037 + * Refer to lsi/mpi_log_fc.h. 6066 6038 */ 6067 6039 static void 6068 6040 mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info) ··· 6159 6131 "Invalid SAS Address", /* 01h */ 6160 6132 NULL, /* 02h */ 6161 6133 "Invalid Page", /* 03h */ 6162 - NULL, /* 04h */ 6163 - "Task Terminated" /* 05h */ 6134 + "Diag Message Error", /* 04h */ 6135 + "Task Terminated", /* 05h */ 6136 + "Enclosure Management", /* 06h */ 6137 + "Target Mode" /* 07h */ 6164 6138 }; 6165 6139 static char *pl_code_str[] = { 6166 6140 NULL, /* 00h */ ··· 6188 6158 "IO Executed", /* 14h */ 6189 6159 "Persistant Reservation Out Not Affiliation Owner", /* 15h */ 6190 6160 "Open Transmit DMA Abort", /* 16h */ 6191 - NULL, /* 17h */ 6161 + "IO Device Missing Delay Retry", /* 17h */ 6192 6162 NULL, /* 18h */ 6193 6163 NULL, /* 19h */ 6194 6164 NULL, /* 1Ah */ ··· 6268 6238 mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf) 6269 6239 { 6270 6240 u32 status = ioc_status & MPI_IOCSTATUS_MASK; 6271 - char *desc = ""; 6241 + char *desc = NULL; 6272 6242 6273 6243 switch (status) { 6274 6244 case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */ ··· 6378 6348 desc = "Others"; 6379 6349 break; 6380 6350 } 6381 - if (desc != "") 6351 + if (desc != NULL) 6382 6352 printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc); 6383 6353 } 6384 6354 ··· 6415 6385 EXPORT_SYMBOL(mpt_alloc_fw_memory); 6416 6386 EXPORT_SYMBOL(mpt_free_fw_memory); 6417 6387 EXPORT_SYMBOL(mptbase_sas_persist_operation); 6418 - 6419 6388 6420 6389 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6421 6390 /*
+6 -7
drivers/message/fusion/mptbase.h
··· 75 75 #define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 76 76 #endif 77 77 78 - #define MPT_LINUX_VERSION_COMMON "3.04.00" 79 - #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.00" 78 + #define MPT_LINUX_VERSION_COMMON "3.04.01" 79 + #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.01" 80 80 #define WHAT_MAGIC_STRING "@" "(" "#" ")" 81 81 82 82 #define show_mptmod_ver(s,ver) \ ··· 307 307 u32 HostIndex; /* 50 Host Index register */ 308 308 u32 Reserved4[15]; /* 54-8F */ 309 309 u32 Fubar; /* 90 For Fubar usage */ 310 - u32 Reserved5[1050];/* 94-10F8 */ 311 - u32 Reset_1078; /* 10FC Reset 1078 */ 310 + u32 Reserved5[1050];/* 94-10F8 */ 311 + u32 Reset_1078; /* 10FC Reset 1078 */ 312 312 } SYSIF_REGS; 313 313 314 314 /* ··· 363 363 #define MPT_TARGET_FLAGS_VALID_56 0x10 364 364 #define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20 365 365 #define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40 366 + #define MPT_TARGET_FLAGS_LED_ON 0x80 366 367 367 368 /* 368 369 * /proc/mpt interface ··· 635 634 u16 handle; 636 635 int sas_index; /* index refrencing */ 637 636 MPT_SAS_MGMT sas_mgmt; 638 - int num_ports; 639 637 struct work_struct sas_persist_task; 640 638 641 639 struct work_struct fc_setup_reset_work; ··· 644 644 struct work_struct fc_rescan_work; 645 645 char fc_rescan_work_q_name[KOBJ_NAME_LEN]; 646 646 struct workqueue_struct *fc_rescan_work_q; 647 - u8 port_serial_number; 648 647 } MPT_ADAPTER; 649 648 650 649 /* ··· 981 982 wait_queue_head_t scandv_waitq; 982 983 int scandv_wait_done; 983 984 long last_queue_full; 984 - u8 mpt_pq_filter; 985 + u16 tm_iocstatus; 985 986 } MPT_SCSI_HOST; 986 987 987 988 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+2 -2
drivers/message/fusion/mptctl.c
··· 2332 2332 } 2333 2333 2334 2334 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2335 - /* Prototype Routine for the HP HOST INFO command. 2335 + /* Prototype Routine for the HOST INFO command. 2336 2336 * 2337 2337 * Outputs: None. 2338 2338 * Return: 0 if successful ··· 2568 2568 } 2569 2569 2570 2570 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2571 - /* Prototype Routine for the HP TARGET INFO command. 2571 + /* Prototype Routine for the TARGET INFO command. 2572 2572 * 2573 2573 * Outputs: None. 2574 2574 * Return: 0 if successful
-5
drivers/message/fusion/mptctl.h
··· 354 354 355 355 356 356 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 357 - /* 358 - * HP Specific IOCTL Defines and Structures 359 - */ 360 357 361 358 #define CPQFCTS_IOC_MAGIC 'Z' 362 359 #define HP_IOC_MAGIC 'Z' ··· 361 364 #define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t) 362 365 #define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t) 363 366 364 - /* All HP IOCTLs must include this header 365 - */ 366 367 typedef struct _hp_header { 367 368 unsigned int iocnum; 368 369 unsigned int host;
+1 -13
drivers/message/fusion/mptfc.c
··· 77 77 MODULE_LICENSE("GPL"); 78 78 79 79 /* Command line args */ 80 - static int mpt_pq_filter = 0; 81 - module_param(mpt_pq_filter, int, 0); 82 - MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)"); 83 - 84 80 #define MPTFC_DEV_LOSS_TMO (60) 85 81 static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */ 86 82 module_param(mptfc_dev_loss_tmo, int, 0); ··· 509 513 510 514 if (vtarget->num_luns == 0) { 511 515 vtarget->ioc_id = hd->ioc->id; 512 - vtarget->tflags = MPT_TARGET_FLAGS_Q_YES | 513 - MPT_TARGET_FLAGS_VALID_INQUIRY; 516 + vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; 514 517 hd->Targets[sdev->id] = vtarget; 515 518 } 516 519 ··· 1123 1128 init_timer(&hd->timer); 1124 1129 hd->timer.data = (unsigned long) hd; 1125 1130 hd->timer.function = mptscsih_timer_expired; 1126 - 1127 - hd->mpt_pq_filter = mpt_pq_filter; 1128 - 1129 - ddvprintk((MYIOC_s_INFO_FMT 1130 - "mpt_pq_filter %x\n", 1131 - ioc->name, 1132 - mpt_pq_filter)); 1133 1131 1134 1132 init_waitqueue_head(&hd->scandv_waitq); 1135 1133 hd->scandv_wait_done = 0;
+38 -71
drivers/message/fusion/mptsas.c
··· 67 67 #define my_VERSION MPT_LINUX_VERSION_COMMON 68 68 #define MYNAM "mptsas" 69 69 70 + /* 71 + * Reserved channel for integrated raid 72 + */ 73 + #define MPTSAS_RAID_CHANNEL 1 74 + 70 75 MODULE_AUTHOR(MODULEAUTHOR); 71 76 MODULE_DESCRIPTION(my_NAME); 72 77 MODULE_LICENSE("GPL"); 73 78 74 - static int mpt_pq_filter; 75 - module_param(mpt_pq_filter, int, 0); 76 - MODULE_PARM_DESC(mpt_pq_filter, 77 - "Enable peripheral qualifier filter: enable=1 " 78 - "(default=0)"); 79 - 80 79 static int mpt_pt_clear; 81 80 module_param(mpt_pt_clear, int, 0); 82 81 MODULE_PARM_DESC(mpt_pt_clear, 83 - "Clear persistency table: enable=1 " 82 + " Clear persistency table: enable=1 " 84 83 "(default=MPTSCSIH_PT_CLEAR=0)"); 85 84 86 85 static int mptsasDoneCtx = -1; ··· 143 144 * Specific details on ports, wide/narrow 144 145 */ 145 146 struct mptsas_portinfo_details{ 146 - u8 port_id; /* port number provided to transport */ 147 147 u16 num_phys; /* number of phys belong to this port */ 148 148 u64 phy_bitmask; /* TODO, extend support for 255 phys */ 149 149 struct sas_rphy *rphy; /* transport layer rphy object */ ··· 348 350 port_info = port_details->port_info; 349 351 phy_info = port_info->phy_info; 350 352 351 - dsaswideprintk((KERN_DEBUG "%s: [%p]: port=%02d num_phys=%02d " 353 + dsaswideprintk((KERN_DEBUG "%s: [%p]: num_phys=%02d " 352 354 "bitmask=0x%016llX\n", 353 - __FUNCTION__, port_details, port_details->port_id, 354 - port_details->num_phys, port_details->phy_bitmask)); 355 + __FUNCTION__, port_details, port_details->num_phys, 356 + port_details->phy_bitmask)); 355 357 356 358 for (i = 0; i < port_info->num_phys; i++, phy_info++) { 357 359 if(phy_info->port_details != port_details) ··· 460 462 * phy be removed by firmware events. 461 463 */ 462 464 dsaswideprintk((KERN_DEBUG 463 - "%s: [%p]: port=%d deleting phy = %d\n", 464 - __FUNCTION__, port_details, 465 - port_details->port_id, i)); 465 + "%s: [%p]: deleting phy = %d\n", 466 + __FUNCTION__, port_details, i)); 466 467 port_details->num_phys--; 467 468 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); 468 469 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); ··· 490 493 goto out; 491 494 port_details->num_phys = 1; 492 495 port_details->port_info = port_info; 493 - port_details->port_id = ioc->port_serial_number++; 494 496 if (phy_info->phy_id < 64 ) 495 497 port_details->phy_bitmask |= 496 498 (1 << phy_info->phy_id); ··· 521 525 mptsas_get_port(phy_info_cmp); 522 526 port_details->starget = 523 527 mptsas_get_starget(phy_info_cmp); 524 - port_details->port_id = 525 - phy_info_cmp->port_details->port_id; 526 528 port_details->num_phys = 527 529 phy_info_cmp->port_details->num_phys; 528 - // port_info->port_serial_number--; 529 - ioc->port_serial_number--; 530 530 if (!phy_info_cmp->port_details->num_phys) 531 531 kfree(phy_info_cmp->port_details); 532 532 } else ··· 546 554 if (!port_details) 547 555 continue; 548 556 dsaswideprintk((KERN_DEBUG 549 - "%s: [%p]: phy_id=%02d port_id=%02d num_phys=%02d " 557 + "%s: [%p]: phy_id=%02d num_phys=%02d " 550 558 "bitmask=0x%016llX\n", 551 559 __FUNCTION__, 552 - port_details, i, port_details->port_id, 553 - port_details->num_phys, port_details->phy_bitmask)); 560 + port_details, i, port_details->num_phys, 561 + port_details->phy_bitmask)); 554 562 dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n", 555 563 port_details->port, port_details->rphy)); 556 564 } ··· 643 651 static int 644 652 mptsas_slave_configure(struct scsi_device *sdev) 645 653 { 646 - struct Scsi_Host *host = sdev->host; 647 - MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata; 648 654 649 - /* 650 - * RAID volumes placed beyond the last expected port. 651 - * Ignore sending sas mode pages in that case.. 652 - */ 653 - if (sdev->channel < hd->ioc->num_ports) 654 - sas_read_port_mode_page(sdev); 655 + if (sdev->channel == MPTSAS_RAID_CHANNEL) 656 + goto out; 655 657 658 + sas_read_port_mode_page(sdev); 659 + 660 + out: 656 661 return mptscsih_slave_configure(sdev); 657 662 } 658 663 ··· 678 689 679 690 hd->Targets[target_id] = vtarget; 680 691 681 - /* 682 - * RAID volumes placed beyond the last expected port. 683 - */ 684 - if (starget->channel == hd->ioc->num_ports) 692 + if (starget->channel == MPTSAS_RAID_CHANNEL) 685 693 goto out; 686 694 687 695 rphy = dev_to_rphy(starget->dev.parent); ··· 729 743 if (!starget->hostdata) 730 744 return; 731 745 732 - if (starget->channel == hd->ioc->num_ports) 746 + if (starget->channel == MPTSAS_RAID_CHANNEL) 733 747 goto out; 734 748 735 749 rphy = dev_to_rphy(starget->dev.parent); ··· 769 783 starget = scsi_target(sdev); 770 784 vdev->vtarget = starget->hostdata; 771 785 772 - /* 773 - * RAID volumes placed beyond the last expected port. 774 - */ 775 - if (sdev->channel == hd->ioc->num_ports) 786 + if (sdev->channel == MPTSAS_RAID_CHANNEL) 776 787 goto out; 777 788 778 789 rphy = dev_to_rphy(sdev->sdev_target->dev.parent); ··· 1591 1608 if (phy_info->sas_port_add_phy) { 1592 1609 1593 1610 if (!port) { 1594 - port = sas_port_alloc(dev, 1595 - phy_info->port_details->port_id); 1596 - dsaswideprintk((KERN_DEBUG 1597 - "sas_port_alloc: port=%p dev=%p port_id=%d\n", 1598 - port, dev, phy_info->port_details->port_id)); 1611 + port = sas_port_alloc_num(dev); 1599 1612 if (!port) { 1600 1613 error = -ENOMEM; 1601 1614 goto out; ··· 1604 1625 goto out; 1605 1626 } 1606 1627 mptsas_set_port(phy_info, port); 1628 + dsaswideprintk((KERN_DEBUG 1629 + "sas_port_alloc: port=%p dev=%p port_id=%d\n", 1630 + port, dev, port->port_identifier)); 1607 1631 } 1608 1632 dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n", 1609 1633 phy_info->phy_id)); ··· 1718 1736 hba = NULL; 1719 1737 } 1720 1738 mutex_unlock(&ioc->sas_topology_mutex); 1721 - ioc->num_ports = port_info->num_phys; 1722 1739 1723 1740 for (i = 0; i < port_info->num_phys; i++) { 1724 1741 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], ··· 1920 1939 expander_sas_address) 1921 1940 continue; 1922 1941 #ifdef MPT_DEBUG_SAS_WIDE 1923 - dev_printk(KERN_DEBUG, &port->dev, "delete\n"); 1942 + dev_printk(KERN_DEBUG, &port->dev, 1943 + "delete port (%d)\n", port->port_identifier); 1924 1944 #endif 1925 1945 sas_port_delete(port); 1926 1946 mptsas_port_delete(phy_info->port_details); ··· 1966 1984 if (!ioc->raid_data.pIocPg2->NumActiveVolumes) 1967 1985 goto out; 1968 1986 for (i=0; i<ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 1969 - scsi_add_device(ioc->sh, ioc->num_ports, 1987 + scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, 1970 1988 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); 1971 1989 } 1972 1990 out: ··· 2167 2185 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id); 2168 2186 2169 2187 #ifdef MPT_DEBUG_SAS_WIDE 2170 - dev_printk(KERN_DEBUG, &port->dev, "delete\n"); 2188 + dev_printk(KERN_DEBUG, &port->dev, 2189 + "delete port (%d)\n", port->port_identifier); 2171 2190 #endif 2172 2191 sas_port_delete(port); 2173 2192 mptsas_port_delete(phy_info->port_details); ··· 2272 2289 mptsas_set_rphy(phy_info, rphy); 2273 2290 break; 2274 2291 case MPTSAS_ADD_RAID: 2275 - sdev = scsi_device_lookup( 2276 - ioc->sh, 2277 - ioc->num_ports, 2278 - ev->id, 2279 - 0); 2292 + sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 2293 + ev->id, 0); 2280 2294 if (sdev) { 2281 2295 scsi_device_put(sdev); 2282 2296 break; 2283 2297 } 2284 2298 printk(MYIOC_s_INFO_FMT 2285 2299 "attaching raid volume, channel %d, id %d\n", 2286 - ioc->name, ioc->num_ports, ev->id); 2287 - scsi_add_device(ioc->sh, 2288 - ioc->num_ports, 2289 - ev->id, 2290 - 0); 2300 + ioc->name, MPTSAS_RAID_CHANNEL, ev->id); 2301 + scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0); 2291 2302 mpt_findImVolumes(ioc); 2292 2303 break; 2293 2304 case MPTSAS_DEL_RAID: 2294 - sdev = scsi_device_lookup( 2295 - ioc->sh, 2296 - ioc->num_ports, 2297 - ev->id, 2298 - 0); 2305 + sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 2306 + ev->id, 0); 2299 2307 if (!sdev) 2300 2308 break; 2301 2309 printk(MYIOC_s_INFO_FMT 2302 2310 "removing raid volume, channel %d, id %d\n", 2303 - ioc->name, ioc->num_ports, ev->id); 2311 + ioc->name, MPTSAS_RAID_CHANNEL, ev->id); 2304 2312 vdevice = sdev->hostdata; 2305 2313 vdevice->vtarget->deleted = 1; 2306 2314 mptsas_target_reset(ioc, vdevice->vtarget); ··· 2697 2723 hd->timer.data = (unsigned long) hd; 2698 2724 hd->timer.function = mptscsih_timer_expired; 2699 2725 2700 - hd->mpt_pq_filter = mpt_pq_filter; 2701 2726 ioc->sas_data.ptClear = mpt_pt_clear; 2702 2727 2703 2728 if (ioc->sas_data.ptClear==1) { 2704 2729 mptbase_sas_persist_operation( 2705 2730 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT); 2706 2731 } 2707 - 2708 - ddvprintk((MYIOC_s_INFO_FMT 2709 - "mpt_pq_filter %x mpt_pq_filter %x\n", 2710 - ioc->name, 2711 - mpt_pq_filter, 2712 - mpt_pq_filter)); 2713 2732 2714 2733 init_waitqueue_head(&hd->scandv_waitq); 2715 2734 hd->scandv_wait_done = 0;
+102 -16
drivers/message/fusion/mptscsih.c
··· 66 66 67 67 #include "mptbase.h" 68 68 #include "mptscsih.h" 69 + #include "lsi/mpi_log_sas.h" 69 70 70 71 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 71 72 #define my_NAME "Fusion MPT SCSI Host driver" ··· 128 127 static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); 129 128 static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd); 130 129 static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout ); 131 - static u32 SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc); 130 + static int SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc); 132 131 133 132 static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout); 134 133 ··· 498 497 return SUCCESS; 499 498 } /* mptscsih_AddSGE() */ 500 499 500 + static void 501 + mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget, 502 + U32 SlotStatus) 503 + { 504 + MPT_FRAME_HDR *mf; 505 + SEPRequest_t *SEPMsg; 506 + 507 + if (ioc->bus_type == FC) 508 + return; 509 + 510 + if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 511 + dfailprintk((MYIOC_s_WARN_FMT "%s: no msg frames!!\n", 512 + ioc->name,__FUNCTION__)); 513 + return; 514 + } 515 + 516 + SEPMsg = (SEPRequest_t *)mf; 517 + SEPMsg->Function = MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 518 + SEPMsg->Bus = vtarget->bus_id; 519 + SEPMsg->TargetID = vtarget->target_id; 520 + SEPMsg->Action = MPI_SEP_REQ_ACTION_WRITE_STATUS; 521 + SEPMsg->SlotStatus = SlotStatus; 522 + devtverboseprintk((MYIOC_s_WARN_FMT 523 + "Sending SEP cmd=%x id=%d bus=%d\n", 524 + ioc->name, SlotStatus, SEPMsg->TargetID, SEPMsg->Bus)); 525 + mpt_put_msg_frame(ioc->DoneCtx, ioc, mf); 526 + } 527 + 501 528 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 502 529 /* 503 530 * mptscsih_io_done - Main SCSI IO callback routine registered to ··· 549 520 SCSIIORequest_t *pScsiReq; 550 521 SCSIIOReply_t *pScsiReply; 551 522 u16 req_idx, req_idx_MR; 523 + VirtDevice *vdev; 524 + VirtTarget *vtarget; 552 525 553 526 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; 554 527 ··· 569 538 } 570 539 571 540 sc = hd->ScsiLookup[req_idx]; 541 + hd->ScsiLookup[req_idx] = NULL; 572 542 if (sc == NULL) { 573 543 MPIHeader_t *hdr = (MPIHeader_t *)mf; 574 544 ··· 585 553 return 1; 586 554 } 587 555 556 + if ((unsigned char *)mf != sc->host_scribble) { 557 + mptscsih_freeChainBuffers(ioc, req_idx); 558 + return 1; 559 + } 560 + 561 + sc->host_scribble = NULL; 588 562 sc->result = DID_OK << 16; /* Set default reply as OK */ 589 563 pScsiReq = (SCSIIORequest_t *) mf; 590 564 pScsiReply = (SCSIIOReply_t *) mr; ··· 678 640 679 641 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) 680 642 hd->sel_timeout[pScsiReq->TargetID]++; 643 + 644 + vdev = sc->device->hostdata; 645 + if (!vdev) 646 + break; 647 + vtarget = vdev->vtarget; 648 + if (vtarget->tflags & MPT_TARGET_FLAGS_LED_ON) { 649 + mptscsih_issue_sep_command(ioc, vtarget, 650 + MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED); 651 + vtarget->tflags &= ~MPT_TARGET_FLAGS_LED_ON; 652 + } 681 653 break; 682 654 683 - case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ 684 655 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ 656 + if ( ioc->bus_type == SAS ) { 657 + u16 ioc_status = le16_to_cpu(pScsiReply->IOCStatus); 658 + if (ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 659 + u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo); 660 + log_info &=SAS_LOGINFO_MASK; 661 + if (log_info == SAS_LOGINFO_NEXUS_LOSS) { 662 + sc->result = (DID_BUS_BUSY << 16); 663 + break; 664 + } 665 + } 666 + } 667 + 668 + /* 669 + * Allow non-SAS & non-NEXUS_LOSS to drop into below code 670 + */ 671 + 672 + case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ 685 673 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ 686 674 /* Linux handles an unsolicited DID_RESET better 687 675 * than an unsolicited DID_ABORT. ··· 722 658 sc->result=DID_SOFT_ERROR << 16; 723 659 else /* Sufficient data transfer occurred */ 724 660 sc->result = (DID_OK << 16) | scsi_status; 725 - dreplyprintk((KERN_NOTICE 661 + dreplyprintk((KERN_NOTICE 726 662 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id)); 727 663 break; 728 664 ··· 848 784 sc->request_bufflen, sc->sc_data_direction); 849 785 } 850 786 851 - hd->ScsiLookup[req_idx] = NULL; 852 - 853 787 sc->scsi_done(sc); /* Issue the command callback */ 854 788 855 789 /* Free Chain buffers */ ··· 889 827 dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n", 890 828 mf, SCpnt)); 891 829 830 + /* Free Chain buffers */ 831 + mptscsih_freeChainBuffers(ioc, ii); 832 + 833 + /* Free Message frames */ 834 + mpt_free_msg_frame(ioc, mf); 835 + 836 + if ((unsigned char *)mf != SCpnt->host_scribble) 837 + continue; 838 + 892 839 /* Set status, free OS resources (SG DMA buffers) 893 840 * Do OS callback 894 - * Free driver resources (chain, msg buffers) 895 841 */ 896 842 if (SCpnt->use_sg) { 897 843 pci_unmap_sg(ioc->pcidev, ··· 914 844 } 915 845 SCpnt->result = DID_RESET << 16; 916 846 SCpnt->host_scribble = NULL; 917 - 918 - /* Free Chain buffers */ 919 - mptscsih_freeChainBuffers(ioc, ii); 920 - 921 - /* Free Message frames */ 922 - mpt_free_msg_frame(ioc, mf); 923 847 924 848 SCpnt->scsi_done(SCpnt); /* Issue the command callback */ 925 849 } ··· 951 887 if ((sc = hd->ScsiLookup[ii]) != NULL) { 952 888 953 889 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii); 954 - 890 + if (mf == NULL) 891 + continue; 955 892 dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n", 956 893 hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1])); 957 - 958 894 if ((mf->TargetID != ((u8)vdevice->vtarget->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun))) 959 895 continue; 960 896 ··· 963 899 hd->ScsiLookup[ii] = NULL; 964 900 mptscsih_freeChainBuffers(hd->ioc, ii); 965 901 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); 902 + if ((unsigned char *)mf != sc->host_scribble) 903 + continue; 966 904 if (sc->use_sg) { 967 905 pci_unmap_sg(hd->ioc->pcidev, 968 906 (struct scatterlist *) sc->request_buffer, ··· 1407 1341 goto fail; 1408 1342 } 1409 1343 1344 + SCpnt->host_scribble = (unsigned char *)mf; 1410 1345 hd->ScsiLookup[my_idx] = SCpnt; 1411 - SCpnt->host_scribble = NULL; 1412 1346 1413 1347 mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf); 1414 1348 dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n", ··· 1595 1529 rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP); 1596 1530 } 1597 1531 1532 + /* 1533 + * Check IOCStatus from TM reply message 1534 + */ 1535 + if (hd->tm_iocstatus != MPI_IOCSTATUS_SUCCESS) 1536 + rc = FAILED; 1537 + 1598 1538 dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc)); 1599 1539 1600 1540 return rc; ··· 1726 1654 int scpnt_idx; 1727 1655 int retval; 1728 1656 VirtDevice *vdev; 1657 + ulong sn = SCpnt->serial_number; 1729 1658 1730 1659 /* If we can't locate our host adapter structure, return FAILED status. 1731 1660 */ ··· 1779 1706 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1780 1707 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun, 1781 1708 ctx2abort, mptscsih_get_tm_timeout(hd->ioc)); 1709 + 1710 + if (SCPNT_TO_LOOKUP_IDX(SCpnt) == scpnt_idx && 1711 + SCpnt->serial_number == sn) { 1712 + retval = FAILED; 1713 + } 1782 1714 1783 1715 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", 1784 1716 hd->ioc->name, ··· 2101 2023 DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply); 2102 2024 2103 2025 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; 2026 + hd->tm_iocstatus = iocstatus; 2104 2027 dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n", 2105 2028 ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo))); 2106 2029 /* Error? (anything non-zero?) */ ··· 2480 2401 ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12]; 2481 2402 2482 2403 ioc->eventContext++; 2404 + if (hd->ioc->pcidev->vendor == 2405 + PCI_VENDOR_ID_IBM) { 2406 + mptscsih_issue_sep_command(hd->ioc, 2407 + vdev->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); 2408 + vdev->vtarget->tflags |= 2409 + MPT_TARGET_FLAGS_LED_ON; 2410 + } 2483 2411 } 2484 2412 } 2485 2413 } else { ··· 2495 2409 } 2496 2410 } 2497 2411 2498 - static u32 2412 + static int 2499 2413 SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc) 2500 2414 { 2501 2415 MPT_SCSI_HOST *hd;
+2 -8
drivers/message/fusion/mptspi.c
··· 83 83 module_param(mpt_saf_te, int, 0); 84 84 MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)"); 85 85 86 - static int mpt_pq_filter = 0; 87 - module_param(mpt_pq_filter, int, 0); 88 - MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)"); 89 - 90 86 static void mptspi_write_offset(struct scsi_target *, int); 91 87 static void mptspi_write_width(struct scsi_target *, int); 92 88 static int mptspi_write_spi_device_pg1(struct scsi_target *, ··· 1043 1047 hd->timer.function = mptscsih_timer_expired; 1044 1048 1045 1049 ioc->spi_data.Saf_Te = mpt_saf_te; 1046 - hd->mpt_pq_filter = mpt_pq_filter; 1047 1050 1048 1051 hd->negoNvram = MPT_SCSICFG_USE_NVRAM; 1049 1052 ddvprintk((MYIOC_s_INFO_FMT 1050 - "saf_te %x mpt_pq_filter %x\n", 1053 + "saf_te %x\n", 1051 1054 ioc->name, 1052 - mpt_saf_te, 1053 - mpt_pq_filter)); 1055 + mpt_saf_te)); 1054 1056 ioc->spi_data.noQas = 0; 1055 1057 1056 1058 init_waitqueue_head(&hd->scandv_waitq);
+1
drivers/net/dummy.c
··· 132 132 for (i = 0; i < numdummies && !err; i++) 133 133 err = dummy_init_one(i); 134 134 if (err) { 135 + i--; 135 136 while (--i >= 0) 136 137 dummy_free_one(i); 137 138 }
+3
drivers/net/e1000/e1000.h
··· 110 110 #define E1000_MIN_RXD 80 111 111 #define E1000_MAX_82544_RXD 4096 112 112 113 + /* this is the size past which hardware will drop packets when setting LPE=0 */ 114 + #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 115 + 113 116 /* Supported Rx Buffer Sizes */ 114 117 #define E1000_RXBUFFER_128 128 /* Used for packet split */ 115 118 #define E1000_RXBUFFER_256 256 /* Used for packet split */
+26 -26
drivers/net/e1000/e1000_main.c
··· 36 36 #else 37 37 #define DRIVERNAPI "-NAPI" 38 38 #endif 39 - #define DRV_VERSION "7.1.9-k2"DRIVERNAPI 39 + #define DRV_VERSION "7.1.9-k4"DRIVERNAPI 40 40 char e1000_driver_version[] = DRV_VERSION; 41 41 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 42 42 ··· 1068 1068 1069 1069 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1070 1070 1071 - adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; 1071 + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1072 1072 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; 1073 1073 hw->max_frame_size = netdev->mtu + 1074 1074 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; ··· 3148 3148 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3149 3149 3150 3150 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3151 - #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 3152 3151 if (!adapter->hw.tbi_compatibility_on && 3153 3152 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || 3154 3153 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) ··· 3386 3387 E1000_WRITE_REG(hw, IMC, ~0); 3387 3388 E1000_WRITE_FLUSH(hw); 3388 3389 } 3389 - if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) 3390 - __netif_rx_schedule(&adapter->polling_netdev[0]); 3390 + if (likely(netif_rx_schedule_prep(netdev))) 3391 + __netif_rx_schedule(netdev); 3391 3392 else 3392 3393 e1000_irq_enable(adapter); 3393 3394 #else ··· 3430 3431 { 3431 3432 struct e1000_adapter *adapter; 3432 3433 int work_to_do = min(*budget, poll_dev->quota); 3433 - int tx_cleaned = 0, i = 0, work_done = 0; 3434 + int tx_cleaned = 0, work_done = 0; 3434 3435 3435 3436 /* Must NOT use netdev_priv macro here. */ 3436 3437 adapter = poll_dev->priv; 3437 3438 3438 3439 /* Keep link state information with original netdev */ 3439 - if (!netif_carrier_ok(adapter->netdev)) 3440 + if (!netif_carrier_ok(poll_dev)) 3440 3441 goto quit_polling; 3441 3442 3442 - while (poll_dev != &adapter->polling_netdev[i]) { 3443 - i++; 3444 - BUG_ON(i == adapter->num_rx_queues); 3443 + /* e1000_clean is called per-cpu. This lock protects 3444 + * tx_ring[0] from being cleaned by multiple cpus 3445 + * simultaneously. A failure obtaining the lock means 3446 + * tx_ring[0] is currently being cleaned anyway. */ 3447 + if (spin_trylock(&adapter->tx_queue_lock)) { 3448 + tx_cleaned = e1000_clean_tx_irq(adapter, 3449 + &adapter->tx_ring[0]); 3450 + spin_unlock(&adapter->tx_queue_lock); 3445 3451 } 3446 3452 3447 - if (likely(adapter->num_tx_queues == 1)) { 3448 - /* e1000_clean is called per-cpu. This lock protects 3449 - * tx_ring[0] from being cleaned by multiple cpus 3450 - * simultaneously. A failure obtaining the lock means 3451 - * tx_ring[0] is currently being cleaned anyway. */ 3452 - if (spin_trylock(&adapter->tx_queue_lock)) { 3453 - tx_cleaned = e1000_clean_tx_irq(adapter, 3454 - &adapter->tx_ring[0]); 3455 - spin_unlock(&adapter->tx_queue_lock); 3456 - } 3457 - } else 3458 - tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); 3459 - 3460 - adapter->clean_rx(adapter, &adapter->rx_ring[i], 3453 + adapter->clean_rx(adapter, &adapter->rx_ring[0], 3461 3454 &work_done, work_to_do); 3462 3455 3463 3456 *budget -= work_done; ··· 3457 3466 3458 3467 /* If no Tx and not enough Rx work done, exit the polling mode */ 3459 3468 if ((!tx_cleaned && (work_done == 0)) || 3460 - !netif_running(adapter->netdev)) { 3469 + !netif_running(poll_dev)) { 3461 3470 quit_polling: 3462 3471 netif_rx_complete(poll_dev); 3463 3472 e1000_irq_enable(adapter); ··· 3672 3681 3673 3682 length = le16_to_cpu(rx_desc->length); 3674 3683 3684 + /* adjust length to remove Ethernet CRC */ 3685 + length -= 4; 3686 + 3675 3687 if (unlikely(!(status & E1000_RXD_STAT_EOP))) { 3676 3688 /* All receives must fit into a single buffer */ 3677 3689 E1000_DBG("%s: Receive packet consumed multiple" ··· 3879 3885 pci_dma_sync_single_for_device(pdev, 3880 3886 ps_page_dma->ps_page_dma[0], 3881 3887 PAGE_SIZE, PCI_DMA_FROMDEVICE); 3888 + /* remove the CRC */ 3889 + l1 -= 4; 3882 3890 skb_put(skb, l1); 3883 - length += l1; 3884 3891 goto copydone; 3885 3892 } /* if */ 3886 3893 } ··· 3899 3904 skb->data_len += length; 3900 3905 skb->truesize += length; 3901 3906 } 3907 + 3908 + /* strip the ethernet crc, problem is we're using pages now so 3909 + * this whole operation can get a little cpu intensive */ 3910 + pskb_trim(skb, skb->len - 4); 3902 3911 3903 3912 copydone: 3904 3913 e1000_rx_checksum(adapter, staterr, ··· 4751 4752 e1000_netpoll(struct net_device *netdev) 4752 4753 { 4753 4754 struct e1000_adapter *adapter = netdev_priv(netdev); 4755 + 4754 4756 disable_irq(adapter->pdev->irq); 4755 4757 e1000_intr(adapter->pdev->irq, netdev, NULL); 4756 4758 e1000_clean_tx_irq(adapter, adapter->tx_ring);
+1
drivers/net/ifb.c
··· 271 271 for (i = 0; i < numifbs && !err; i++) 272 272 err = ifb_init_one(i); 273 273 if (err) { 274 + i--; 274 275 while (--i >= 0) 275 276 ifb_free_one(i); 276 277 }
+2 -5
drivers/net/sky2.c
··· 50 50 #include "sky2.h" 51 51 52 52 #define DRV_NAME "sky2" 53 - #define DRV_VERSION "1.4" 53 + #define DRV_VERSION "1.5" 54 54 #define PFX DRV_NAME " " 55 55 56 56 /* ··· 2204 2204 int work_done = 0; 2205 2205 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2206 2206 2207 - if (!~status) 2208 - goto out; 2209 - 2210 2207 if (status & Y2_IS_HW_ERR) 2211 2208 sky2_hw_intr(hw); 2212 2209 ··· 2240 2243 2241 2244 if (sky2_more_work(hw)) 2242 2245 return 1; 2243 - out: 2246 + 2244 2247 netif_rx_complete(dev0); 2245 2248 2246 2249 sky2_read32(hw, B0_Y2_SP_LISR);
+258 -330
drivers/net/spider_net.c
··· 84 84 * 85 85 * returns the content of the specified SMMIO register. 86 86 */ 87 - static u32 87 + static inline u32 88 88 spider_net_read_reg(struct spider_net_card *card, u32 reg) 89 89 { 90 90 u32 value; ··· 101 101 * @reg: register to write to 102 102 * @value: value to write into the specified SMMIO register 103 103 */ 104 - static void 104 + static inline void 105 105 spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) 106 106 { 107 107 value = cpu_to_le32(value); ··· 259 259 * 260 260 * returns the status as in the dmac_cmd_status field of the descriptor 261 261 */ 262 - static enum spider_net_descr_status 262 + static inline int 263 263 spider_net_get_descr_status(struct spider_net_descr *descr) 264 264 { 265 - u32 cmd_status; 266 - 267 - cmd_status = descr->dmac_cmd_status; 268 - cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; 269 - /* no need to mask out any bits, as cmd_status is 32 bits wide only 270 - * (and unsigned) */ 271 - return cmd_status; 272 - } 273 - 274 - /** 275 - * spider_net_set_descr_status -- sets the status of a descriptor 276 - * @descr: descriptor to change 277 - * @status: status to set in the descriptor 278 - * 279 - * changes the status to the specified value. Doesn't change other bits 280 - * in the status 281 - */ 282 - static void 283 - spider_net_set_descr_status(struct spider_net_descr *descr, 284 - enum spider_net_descr_status status) 285 - { 286 - u32 cmd_status; 287 - /* read the status */ 288 - cmd_status = descr->dmac_cmd_status; 289 - /* clean the upper 4 bits */ 290 - cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; 291 - /* add the status to it */ 292 - cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; 293 - /* and write it back */ 294 - descr->dmac_cmd_status = cmd_status; 265 + return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; 295 266 } 296 267 297 268 /** ··· 299 328 static int 300 329 spider_net_init_chain(struct spider_net_card *card, 301 330 struct spider_net_descr_chain *chain, 302 - struct spider_net_descr *start_descr, int no) 331 + struct spider_net_descr *start_descr, 332 + int direction, int no) 303 333 { 304 334 int i; 305 335 struct spider_net_descr *descr; 306 336 dma_addr_t buf; 307 - 308 - atomic_set(&card->rx_chain_refill,0); 309 337 310 338 descr = start_descr; 311 339 memset(descr, 0, sizeof(*descr) * no); 312 340 313 341 /* set up the hardware pointers in each descriptor */ 314 342 for (i=0; i<no; i++, descr++) { 315 - spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 343 + descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 316 344 317 345 buf = pci_map_single(card->pdev, descr, 318 346 SPIDER_NET_DESCR_SIZE, 319 - PCI_DMA_BIDIRECTIONAL); 347 + direction); 320 348 321 349 if (buf == DMA_ERROR_CODE) 322 350 goto iommu_error; ··· 330 360 start_descr->prev = descr-1; 331 361 332 362 descr = start_descr; 333 - for (i=0; i < no; i++, descr++) { 334 - descr->next_descr_addr = descr->next->bus_addr; 335 - } 363 + if (direction == PCI_DMA_FROMDEVICE) 364 + for (i=0; i < no; i++, descr++) 365 + descr->next_descr_addr = descr->next->bus_addr; 336 366 367 + spin_lock_init(&chain->lock); 337 368 chain->head = start_descr; 338 369 chain->tail = start_descr; 339 370 ··· 346 375 if (descr->bus_addr) 347 376 pci_unmap_single(card->pdev, descr->bus_addr, 348 377 SPIDER_NET_DESCR_SIZE, 349 - PCI_DMA_BIDIRECTIONAL); 378 + direction); 350 379 return -ENOMEM; 351 380 } 352 381 ··· 367 396 dev_kfree_skb(descr->skb); 368 397 pci_unmap_single(card->pdev, descr->buf_addr, 369 398 SPIDER_NET_MAX_FRAME, 370 - PCI_DMA_BIDIRECTIONAL); 399 + PCI_DMA_FROMDEVICE); 371 400 } 372 401 descr = descr->next; 373 402 } ··· 417 446 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 418 447 /* io-mmu-map the skb */ 419 448 buf = pci_map_single(card->pdev, descr->skb->data, 420 - SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); 449 + SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 421 450 descr->buf_addr = buf; 422 451 if (buf == DMA_ERROR_CODE) { 423 452 dev_kfree_skb_any(descr->skb); 424 453 if (netif_msg_rx_err(card) && net_ratelimit()) 425 454 pr_err("Could not iommu-map rx buffer\n"); 426 - spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 455 + descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 427 456 } else { 428 - descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED; 457 + descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | 458 + SPIDER_NET_DMAC_NOINTR_COMPLETE; 429 459 } 430 460 431 461 return error; ··· 440 468 * chip by writing to the appropriate register. DMA is enabled in 441 469 * spider_net_enable_rxdmac. 442 470 */ 443 - static void 471 + static inline void 444 472 spider_net_enable_rxchtails(struct spider_net_card *card) 445 473 { 446 474 /* assume chain is aligned correctly */ ··· 455 483 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 456 484 * in the GDADMACCNTR register 457 485 */ 458 - static void 486 + static inline void 459 487 spider_net_enable_rxdmac(struct spider_net_card *card) 460 488 { 461 489 wmb(); ··· 472 500 static void 473 501 spider_net_refill_rx_chain(struct spider_net_card *card) 474 502 { 475 - struct spider_net_descr_chain *chain; 476 - 477 - chain = &card->rx_chain; 503 + struct spider_net_descr_chain *chain = &card->rx_chain; 504 + unsigned long flags; 478 505 479 506 /* one context doing the refill (and a second context seeing that 480 507 * and omitting it) is ok. If called by NAPI, we'll be called again 481 508 * as spider_net_decode_one_descr is called several times. If some 482 509 * interrupt calls us, the NAPI is about to clean up anyway. */ 483 - if (atomic_inc_return(&card->rx_chain_refill) == 1) 484 - while (spider_net_get_descr_status(chain->head) == 485 - SPIDER_NET_DESCR_NOT_IN_USE) { 486 - if (spider_net_prepare_rx_descr(card, chain->head)) 487 - break; 488 - chain->head = chain->head->next; 489 - } 510 + if (!spin_trylock_irqsave(&chain->lock, flags)) 511 + return; 490 512 491 - atomic_dec(&card->rx_chain_refill); 513 + while (spider_net_get_descr_status(chain->head) == 514 + SPIDER_NET_DESCR_NOT_IN_USE) { 515 + if (spider_net_prepare_rx_descr(card, chain->head)) 516 + break; 517 + chain->head = chain->head->next; 518 + } 519 + 520 + spin_unlock_irqrestore(&chain->lock, flags); 492 521 } 493 522 494 523 /** ··· 524 551 error: 525 552 spider_net_free_rx_chain_contents(card); 526 553 return result; 527 - } 528 - 529 - /** 530 - * spider_net_release_tx_descr - processes a used tx descriptor 531 - * @card: card structure 532 - * @descr: descriptor to release 533 - * 534 - * releases a used tx descriptor (unmapping, freeing of skb) 535 - */ 536 - static void 537 - spider_net_release_tx_descr(struct spider_net_card *card, 538 - struct spider_net_descr *descr) 539 - { 540 - struct sk_buff *skb; 541 - 542 - /* unmap the skb */ 543 - skb = descr->skb; 544 - pci_unmap_single(card->pdev, descr->buf_addr, skb->len, 545 - PCI_DMA_BIDIRECTIONAL); 546 - 547 - dev_kfree_skb_any(skb); 548 - 549 - /* set status to not used */ 550 - spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 551 - } 552 - 553 - /** 554 - * spider_net_release_tx_chain - processes sent tx descriptors 555 - * @card: adapter structure 556 - * @brutal: if set, don't care about whether descriptor seems to be in use 557 - * 558 - * returns 0 if the tx ring is empty, otherwise 1. 559 - * 560 - * spider_net_release_tx_chain releases the tx descriptors that spider has 561 - * finished with (if non-brutal) or simply release tx descriptors (if brutal). 562 - * If some other context is calling this function, we return 1 so that we're 563 - * scheduled again (if we were scheduled) and will not loose initiative. 564 - */ 565 - static int 566 - spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 567 - { 568 - struct spider_net_descr_chain *tx_chain = &card->tx_chain; 569 - enum spider_net_descr_status status; 570 - 571 - if (atomic_inc_return(&card->tx_chain_release) != 1) { 572 - atomic_dec(&card->tx_chain_release); 573 - return 1; 574 - } 575 - 576 - for (;;) { 577 - status = spider_net_get_descr_status(tx_chain->tail); 578 - switch (status) { 579 - case SPIDER_NET_DESCR_CARDOWNED: 580 - if (!brutal) 581 - goto out; 582 - /* fallthrough, if we release the descriptors 583 - * brutally (then we don't care about 584 - * SPIDER_NET_DESCR_CARDOWNED) */ 585 - case SPIDER_NET_DESCR_RESPONSE_ERROR: 586 - case SPIDER_NET_DESCR_PROTECTION_ERROR: 587 - case SPIDER_NET_DESCR_FORCE_END: 588 - if (netif_msg_tx_err(card)) 589 - pr_err("%s: forcing end of tx descriptor " 590 - "with status x%02x\n", 591 - card->netdev->name, status); 592 - card->netdev_stats.tx_dropped++; 593 - break; 594 - 595 - case SPIDER_NET_DESCR_COMPLETE: 596 - card->netdev_stats.tx_packets++; 597 - card->netdev_stats.tx_bytes += 598 - tx_chain->tail->skb->len; 599 - break; 600 - 601 - default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */ 602 - goto out; 603 - } 604 - spider_net_release_tx_descr(card, tx_chain->tail); 605 - tx_chain->tail = tx_chain->tail->next; 606 - } 607 - out: 608 - atomic_dec(&card->tx_chain_release); 609 - 610 - netif_wake_queue(card->netdev); 611 - 612 - if (status == SPIDER_NET_DESCR_CARDOWNED) 613 - return 1; 614 - return 0; 615 - } 616 - 617 - /** 618 - * spider_net_cleanup_tx_ring - cleans up the TX ring 619 - * @card: card structure 620 - * 621 - * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use 622 - * interrupts to cleanup our TX ring) and returns sent packets to the stack 623 - * by freeing them 624 - */ 625 - static void 626 - spider_net_cleanup_tx_ring(struct spider_net_card *card) 627 - { 628 - if ( (spider_net_release_tx_chain(card, 0)) && 629 - (card->netdev->flags & IFF_UP) ) { 630 - mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); 631 - } 632 554 } 633 555 634 556 /** ··· 629 761 } 630 762 631 763 /** 632 - * spider_net_stop - called upon ifconfig down 633 - * @netdev: interface device structure 634 - * 635 - * always returns 0 636 - */ 637 - int 638 - spider_net_stop(struct net_device *netdev) 639 - { 640 - struct spider_net_card *card = netdev_priv(netdev); 641 - 642 - tasklet_kill(&card->rxram_full_tl); 643 - netif_poll_disable(netdev); 644 - netif_carrier_off(netdev); 645 - netif_stop_queue(netdev); 646 - del_timer_sync(&card->tx_timer); 647 - 648 - /* disable/mask all interrupts */ 649 - spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 650 - spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); 651 - spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); 652 - 653 - /* free_irq(netdev->irq, netdev);*/ 654 - free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); 655 - 656 - spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 657 - SPIDER_NET_DMA_TX_FEND_VALUE); 658 - 659 - /* turn off DMA, force end */ 660 - spider_net_disable_rxdmac(card); 661 - 662 - /* release chains */ 663 - spider_net_release_tx_chain(card, 1); 664 - 665 - spider_net_free_chain(card, &card->tx_chain); 666 - spider_net_free_chain(card, &card->rx_chain); 667 - 668 - return 0; 669 - } 670 - 671 - /** 672 - * spider_net_get_next_tx_descr - returns the next available tx descriptor 673 - * @card: device structure to get descriptor from 674 - * 675 - * returns the address of the next descriptor, or NULL if not available. 676 - */ 677 - static struct spider_net_descr * 678 - spider_net_get_next_tx_descr(struct spider_net_card *card) 679 - { 680 - /* check, if head points to not-in-use descr */ 681 - if ( spider_net_get_descr_status(card->tx_chain.head) == 682 - SPIDER_NET_DESCR_NOT_IN_USE ) { 683 - return card->tx_chain.head; 684 - } else { 685 - return NULL; 686 - } 687 - } 688 - 689 - /** 690 - * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field 691 - * @descr: descriptor structure to fill out 692 - * @skb: packet to consider 693 - * 694 - * fills out the command and status field of the descriptor structure, 695 - * depending on hardware checksum settings. 696 - */ 697 - static void 698 - spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, 699 - struct sk_buff *skb) 700 - { 701 - /* make sure the other fields in the descriptor are written */ 702 - wmb(); 703 - 704 - if (skb->ip_summed != CHECKSUM_HW) { 705 - descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 706 - return; 707 - } 708 - 709 - /* is packet ip? 710 - * if yes: tcp? udp? */ 711 - if (skb->protocol == htons(ETH_P_IP)) { 712 - if (skb->nh.iph->protocol == IPPROTO_TCP) 713 - descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; 714 - else if (skb->nh.iph->protocol == IPPROTO_UDP) 715 - descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; 716 - else /* the stack should checksum non-tcp and non-udp 717 - packets on his own: NETIF_F_IP_CSUM */ 718 - descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 719 - } 720 - } 721 - 722 - /** 723 764 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 724 765 * @card: card structure 725 766 * @descr: descriptor structure to fill out ··· 641 864 */ 642 865 static int 643 866 spider_net_prepare_tx_descr(struct spider_net_card *card, 644 - struct spider_net_descr *descr, 645 867 struct sk_buff *skb) 646 868 { 869 + struct spider_net_descr *descr = card->tx_chain.head; 647 870 dma_addr_t buf; 648 871 649 - buf = pci_map_single(card->pdev, skb->data, 650 - skb->len, PCI_DMA_BIDIRECTIONAL); 872 + buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 651 873 if (buf == DMA_ERROR_CODE) { 652 874 if (netif_msg_tx_err(card) && net_ratelimit()) 653 875 pr_err("could not iommu-map packet (%p, %i). " ··· 656 880 657 881 descr->buf_addr = buf; 658 882 descr->buf_size = skb->len; 883 + descr->next_descr_addr = 0; 659 884 descr->skb = skb; 660 885 descr->data_status = 0; 661 886 662 - spider_net_set_txdescr_cmdstat(descr,skb); 887 + descr->dmac_cmd_status = 888 + SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; 889 + if (skb->protocol == htons(ETH_P_IP)) 890 + switch (skb->nh.iph->protocol) { 891 + case IPPROTO_TCP: 892 + descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; 893 + break; 894 + case IPPROTO_UDP: 895 + descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; 896 + break; 897 + } 898 + 899 + descr->prev->next_descr_addr = descr->bus_addr; 900 + 901 + return 0; 902 + } 903 + 904 + /** 905 + * spider_net_release_tx_descr - processes a used tx descriptor 906 + * @card: card structure 907 + * @descr: descriptor to release 908 + * 909 + * releases a used tx descriptor (unmapping, freeing of skb) 910 + */ 911 + static inline void 912 + spider_net_release_tx_descr(struct spider_net_card *card) 913 + { 914 + struct spider_net_descr *descr = card->tx_chain.tail; 915 + struct sk_buff *skb; 916 + 917 + card->tx_chain.tail = card->tx_chain.tail->next; 918 + descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; 919 + 920 + /* unmap the skb */ 921 + skb = descr->skb; 922 + pci_unmap_single(card->pdev, descr->buf_addr, skb->len, 923 + PCI_DMA_TODEVICE); 924 + dev_kfree_skb_any(skb); 925 + } 926 + 927 + /** 928 + * spider_net_release_tx_chain - processes sent tx descriptors 929 + * @card: adapter structure 930 + * @brutal: if set, don't care about whether descriptor seems to be in use 931 + * 932 + * returns 0 if the tx ring is empty, otherwise 1. 933 + * 934 + * spider_net_release_tx_chain releases the tx descriptors that spider has 935 + * finished with (if non-brutal) or simply release tx descriptors (if brutal). 936 + * If some other context is calling this function, we return 1 so that we're 937 + * scheduled again (if we were scheduled) and will not loose initiative. 938 + */ 939 + static int 940 + spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 941 + { 942 + struct spider_net_descr_chain *chain = &card->tx_chain; 943 + int status; 944 + 945 + spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR); 946 + 947 + while (chain->tail != chain->head) { 948 + status = spider_net_get_descr_status(chain->tail); 949 + switch (status) { 950 + case SPIDER_NET_DESCR_COMPLETE: 951 + card->netdev_stats.tx_packets++; 952 + card->netdev_stats.tx_bytes += chain->tail->skb->len; 953 + break; 954 + 955 + case SPIDER_NET_DESCR_CARDOWNED: 956 + if (!brutal) 957 + return 1; 958 + /* fallthrough, if we release the descriptors 959 + * brutally (then we don't care about 960 + * SPIDER_NET_DESCR_CARDOWNED) */ 961 + 962 + case SPIDER_NET_DESCR_RESPONSE_ERROR: 963 + case SPIDER_NET_DESCR_PROTECTION_ERROR: 964 + case SPIDER_NET_DESCR_FORCE_END: 965 + if (netif_msg_tx_err(card)) 966 + pr_err("%s: forcing end of tx descriptor " 967 + "with status x%02x\n", 968 + card->netdev->name, status); 969 + card->netdev_stats.tx_errors++; 970 + break; 971 + 972 + default: 973 + card->netdev_stats.tx_dropped++; 974 + return 1; 975 + } 976 + spider_net_release_tx_descr(card); 977 + } 663 978 664 979 return 0; 665 980 } ··· 763 896 * spider_net_kick_tx_dma writes the current tx chain head as start address 764 897 * of the tx descriptor chain and enables the transmission DMA engine 765 898 */ 766 - static void 767 - spider_net_kick_tx_dma(struct spider_net_card *card, 768 - struct spider_net_descr *descr) 899 + static inline void 900 + spider_net_kick_tx_dma(struct spider_net_card *card) 769 901 { 770 - /* this is the only descriptor in the output chain. 771 - * Enable TX DMA */ 902 + struct spider_net_descr *descr; 772 903 773 - spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 774 - descr->bus_addr); 904 + if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) & 905 + SPIDER_NET_TX_DMA_EN) 906 + goto out; 775 907 776 - spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 777 - SPIDER_NET_DMA_TX_VALUE); 908 + descr = card->tx_chain.tail; 909 + for (;;) { 910 + if (spider_net_get_descr_status(descr) == 911 + SPIDER_NET_DESCR_CARDOWNED) { 912 + spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 913 + descr->bus_addr); 914 + spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 915 + SPIDER_NET_DMA_TX_VALUE); 916 + break; 917 + } 918 + if (descr == card->tx_chain.head) 919 + break; 920 + descr = descr->next; 921 + } 922 + 923 + out: 924 + mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); 778 925 } 779 926 780 927 /** ··· 796 915 * @skb: packet to send out 797 916 * @netdev: interface device structure 798 917 * 799 - * returns 0 on success, <0 on failure 918 + * returns 0 on success, !0 on failure 800 919 */ 801 920 static int 802 921 spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) 803 922 { 804 923 struct spider_net_card *card = netdev_priv(netdev); 805 - struct spider_net_descr *descr; 924 + struct spider_net_descr_chain *chain = &card->tx_chain; 925 + struct spider_net_descr *descr = chain->head; 926 + unsigned long flags; 806 927 int result; 928 + 929 + spin_lock_irqsave(&chain->lock, flags); 807 930 808 931 spider_net_release_tx_chain(card, 0); 809 932 810 - descr = spider_net_get_next_tx_descr(card); 811 - 812 - if (!descr) 813 - goto error; 814 - 815 - result = spider_net_prepare_tx_descr(card, descr, skb); 816 - if (result) 817 - goto error; 818 - 819 - card->tx_chain.head = card->tx_chain.head->next; 820 - 821 - if (spider_net_get_descr_status(descr->prev) != 822 - SPIDER_NET_DESCR_CARDOWNED) { 823 - /* make sure the current descriptor is in memory. Then 824 - * kicking it on again makes sense, if the previous is not 825 - * card-owned anymore. Check the previous descriptor twice 826 - * to omit an mb() in heavy traffic cases */ 827 - mb(); 828 - if (spider_net_get_descr_status(descr->prev) != 829 - SPIDER_NET_DESCR_CARDOWNED) 830 - spider_net_kick_tx_dma(card, descr); 933 + if (chain->head->next == chain->tail->prev) { 934 + card->netdev_stats.tx_dropped++; 935 + result = NETDEV_TX_LOCKED; 936 + goto out; 831 937 } 832 938 833 - mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); 939 + if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) { 940 + result = NETDEV_TX_LOCKED; 941 + goto out; 942 + } 834 943 835 - return NETDEV_TX_OK; 944 + if (spider_net_prepare_tx_descr(card, skb) != 0) { 945 + card->netdev_stats.tx_dropped++; 946 + result = NETDEV_TX_BUSY; 947 + goto out; 948 + } 836 949 837 - error: 838 - card->netdev_stats.tx_dropped++; 839 - return NETDEV_TX_BUSY; 950 + result = NETDEV_TX_OK; 951 + 952 + spider_net_kick_tx_dma(card); 953 + card->tx_chain.head = card->tx_chain.head->next; 954 + 955 + out: 956 + spin_unlock_irqrestore(&chain->lock, flags); 957 + netif_wake_queue(netdev); 958 + return result; 959 + } 960 + 961 + /** 962 + * spider_net_cleanup_tx_ring - cleans up the TX ring 963 + * @card: card structure 964 + * 965 + * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use 966 + * interrupts to cleanup our TX ring) and returns sent packets to the stack 967 + * by freeing them 968 + */ 969 + static void 970 + spider_net_cleanup_tx_ring(struct spider_net_card *card) 971 + { 972 + unsigned long flags; 973 + 974 + spin_lock_irqsave(&card->tx_chain.lock, flags); 975 + 976 + if ((spider_net_release_tx_chain(card, 0) != 0) && 977 + (card->netdev->flags & IFF_UP)) 978 + spider_net_kick_tx_dma(card); 979 + 980 + spin_unlock_irqrestore(&card->tx_chain.lock, flags); 840 981 } 841 982 842 983 /** ··· 905 1002 906 1003 /* unmap descriptor */ 907 1004 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, 908 - PCI_DMA_BIDIRECTIONAL); 1005 + PCI_DMA_FROMDEVICE); 909 1006 910 1007 /* the cases we'll throw away the packet immediately */ 911 1008 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { ··· 970 1067 static int 971 1068 spider_net_decode_one_descr(struct spider_net_card *card, int napi) 972 1069 { 973 - enum spider_net_descr_status status; 974 - struct spider_net_descr *descr; 975 - struct spider_net_descr_chain *chain; 1070 + struct spider_net_descr_chain *chain = &card->rx_chain; 1071 + struct spider_net_descr *descr = chain->tail; 1072 + int status; 976 1073 int result; 977 - 978 - chain = &card->rx_chain; 979 - descr = chain->tail; 980 1074 981 1075 status = spider_net_get_descr_status(descr); 982 1076 ··· 1003 1103 card->netdev->name, status); 1004 1104 card->netdev_stats.rx_dropped++; 1005 1105 pci_unmap_single(card->pdev, descr->buf_addr, 1006 - SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); 1106 + SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 1007 1107 dev_kfree_skb_irq(descr->skb); 1008 1108 goto refill; 1009 1109 } ··· 1019 1119 /* ok, we've got a packet in descr */ 1020 1120 result = spider_net_pass_skb_up(descr, card, napi); 1021 1121 refill: 1022 - spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 1122 + descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1023 1123 /* change the descriptor state: */ 1024 1124 if (!napi) 1025 1125 spider_net_refill_rx_chain(card); ··· 1188 1288 return -EADDRNOTAVAIL; 1189 1289 1190 1290 return 0; 1191 - } 1192 - 1193 - /** 1194 - * spider_net_enable_txdmac - enables a TX DMA controller 1195 - * @card: card structure 1196 - * 1197 - * spider_net_enable_txdmac enables the TX DMA controller by setting the 1198 - * descriptor chain tail address 1199 - */ 1200 - static void 1201 - spider_net_enable_txdmac(struct spider_net_card *card) 1202 - { 1203 - /* assume chain is aligned correctly */ 1204 - spider_net_write_reg(card, SPIDER_NET_GDTDCHA, 1205 - card->tx_chain.tail->bus_addr); 1206 1291 } 1207 1292 1208 1293 /** ··· 1538 1653 { SPIDER_NET_GMRWOLCTRL, 0 }, 1539 1654 { SPIDER_NET_GTESTMD, 0x10000000 }, 1540 1655 { SPIDER_NET_GTTQMSK, 0x00400040 }, 1541 - { SPIDER_NET_GTESTMD, 0 }, 1542 1656 1543 1657 { SPIDER_NET_GMACINTEN, 0 }, 1544 1658 ··· 1576 1692 1577 1693 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); 1578 1694 1579 - /* set chain tail adress for TX chain */ 1580 - spider_net_enable_txdmac(card); 1581 - 1582 1695 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, 1583 1696 SPIDER_NET_LENLMT_VALUE); 1584 1697 spider_net_write_reg(card, SPIDER_NET_GMACMODE, ··· 1590 1709 SPIDER_NET_INT1_MASK_VALUE); 1591 1710 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 1592 1711 SPIDER_NET_INT2_MASK_VALUE); 1712 + 1713 + spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 1714 + SPIDER_NET_GDTDCEIDIS); 1593 1715 } 1594 1716 1595 1717 /** ··· 1612 1728 1613 1729 result = -ENOMEM; 1614 1730 if (spider_net_init_chain(card, &card->tx_chain, 1615 - card->descr, tx_descriptors)) 1731 + card->descr, 1732 + PCI_DMA_TODEVICE, tx_descriptors)) 1616 1733 goto alloc_tx_failed; 1617 1734 if (spider_net_init_chain(card, &card->rx_chain, 1618 - card->descr + tx_descriptors, rx_descriptors)) 1735 + card->descr + tx_descriptors, 1736 + PCI_DMA_FROMDEVICE, rx_descriptors)) 1619 1737 goto alloc_rx_failed; 1620 1738 1621 1739 /* allocate rx skbs */ ··· 1824 1938 /* empty sequencer data */ 1825 1939 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; 1826 1940 sequencer++) { 1827 - spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1941 + spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + 1828 1942 sequencer * 8, 0x0); 1829 1943 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { 1830 1944 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + ··· 1838 1952 /* reset */ 1839 1953 spider_net_write_reg(card, SPIDER_NET_CKRCTRL, 1840 1954 SPIDER_NET_CKRCTRL_STOP_VALUE); 1955 + } 1956 + 1957 + /** 1958 + * spider_net_stop - called upon ifconfig down 1959 + * @netdev: interface device structure 1960 + * 1961 + * always returns 0 1962 + */ 1963 + int 1964 + spider_net_stop(struct net_device *netdev) 1965 + { 1966 + struct spider_net_card *card = netdev_priv(netdev); 1967 + 1968 + tasklet_kill(&card->rxram_full_tl); 1969 + netif_poll_disable(netdev); 1970 + netif_carrier_off(netdev); 1971 + netif_stop_queue(netdev); 1972 + del_timer_sync(&card->tx_timer); 1973 + 1974 + /* disable/mask all interrupts */ 1975 + spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 1976 + spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); 1977 + spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); 1978 + 1979 + /* free_irq(netdev->irq, netdev);*/ 1980 + free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); 1981 + 1982 + spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 1983 + SPIDER_NET_DMA_TX_FEND_VALUE); 1984 + 1985 + /* turn off DMA, force end */ 1986 + spider_net_disable_rxdmac(card); 1987 + 1988 + /* release chains */ 1989 + if (spin_trylock(&card->tx_chain.lock)) { 1990 + spider_net_release_tx_chain(card, 1); 1991 + spin_unlock(&card->tx_chain.lock); 1992 + } 1993 + 1994 + spider_net_free_chain(card, &card->tx_chain); 1995 + spider_net_free_chain(card, &card->rx_chain); 1996 + 1997 + return 0; 1841 1998 } 1842 1999 1843 2000 /** ··· 1911 1982 goto out; 1912 1983 1913 1984 spider_net_open(netdev); 1914 - spider_net_kick_tx_dma(card, card->tx_chain.head); 1985 + spider_net_kick_tx_dma(card); 1915 1986 netif_device_attach(netdev); 1916 1987 1917 1988 out: ··· 1994 2065 1995 2066 pci_set_drvdata(card->pdev, netdev); 1996 2067 1997 - atomic_set(&card->tx_chain_release,0); 1998 2068 card->rxram_full_tl.data = (unsigned long) card; 1999 2069 card->rxram_full_tl.func = 2000 2070 (void (*)(unsigned long)) spider_net_handle_rxram_full; ··· 2007 2079 2008 2080 spider_net_setup_netdev_ops(netdev); 2009 2081 2010 - netdev->features = NETIF_F_HW_CSUM; 2082 + netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; 2011 2083 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2012 2084 * NETIF_F_HW_VLAN_FILTER */ 2013 2085
+20 -51
drivers/net/spider_net.h
··· 208 208 #define SPIDER_NET_DMA_RX_VALUE 0x80000000 209 209 #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 210 210 /* to set TX_DMA_EN */ 211 - #define SPIDER_NET_DMA_TX_VALUE 0x80000000 211 + #define SPIDER_NET_TX_DMA_EN 0x80000000 212 + #define SPIDER_NET_GDTDCEIDIS 0x00000002 213 + #define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ 214 + SPIDER_NET_GDTDCEIDIS 212 215 #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 213 216 214 217 /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ ··· 332 329 (~SPIDER_NET_TXINT) & \ 333 330 (~SPIDER_NET_RXINT) ) 334 331 335 - #define SPIDER_NET_GPREXEC 0x80000000 336 - #define SPIDER_NET_GPRDAT_MASK 0x0000ffff 332 + #define SPIDER_NET_GPREXEC 0x80000000 333 + #define SPIDER_NET_GPRDAT_MASK 0x0000ffff 337 334 338 - /* descriptor bits 339 - * 340 - * 1010 descriptor ready 341 - * 0 descr in middle of chain 342 - * 000 fixed to 0 343 - * 344 - * 0 no interrupt on completion 345 - * 000 fixed to 0 346 - * 1 no ipsec processing 347 - * 1 last descriptor for this frame 348 - * 00 no checksum 349 - * 10 tcp checksum 350 - * 11 udp checksum 351 - * 352 - * 00 fixed to 0 353 - * 0 fixed to 0 354 - * 0 no interrupt on response errors 355 - * 0 no interrupt on invalid descr 356 - * 0 no interrupt on dma process termination 357 - * 0 no interrupt on descr chain end 358 - * 0 no interrupt on descr complete 359 - * 360 - * 000 fixed to 0 361 - * 0 response error interrupt status 362 - * 0 invalid descr status 363 - * 0 dma termination status 364 - * 0 descr chain end status 365 - * 0 descr complete status */ 366 - #define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000 367 - #define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000 368 - #define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000 369 - #define SPIDER_NET_DESCR_IND_PROC_SHIFT 28 370 - #define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff 335 + #define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000 336 + #define SPIDER_NET_DMAC_NOCS 0x00040000 337 + #define SPIDER_NET_DMAC_TCP 0x00020000 338 + #define SPIDER_NET_DMAC_UDP 0x00030000 339 + #define SPIDER_NET_TXDCEST 0x08000000 371 340 372 - /* descr ready, descr is in middle of chain, get interrupt on completion */ 373 - #define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 374 - 375 - enum spider_net_descr_status { 376 - SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ 377 - SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ 378 - SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */ 379 - SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */ 380 - SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */ 381 - SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */ 382 - SPIDER_NET_DESCR_NOT_IN_USE /* any other value */ 383 - }; 341 + #define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000 342 + #define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */ 343 + #define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */ 344 + #define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */ 345 + #define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */ 346 + #define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ 347 + #define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ 348 + #define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 384 349 385 350 struct spider_net_descr { 386 351 /* as defined by the hardware */ ··· 369 398 } __attribute__((aligned(32))); 370 399 371 400 struct spider_net_descr_chain { 372 - /* we walk from tail to head */ 401 + spinlock_t lock; 373 402 struct spider_net_descr *head; 374 403 struct spider_net_descr *tail; 375 404 }; ··· 424 453 425 454 struct spider_net_descr_chain tx_chain; 426 455 struct spider_net_descr_chain rx_chain; 427 - atomic_t rx_chain_refill; 428 - atomic_t tx_chain_release; 429 456 430 457 struct net_device_stats netdev_stats; 431 458
+2 -7
drivers/net/sunhme.c
··· 3255 3255 } 3256 3256 3257 3257 static struct pci_device_id happymeal_pci_ids[] = { 3258 - { 3259 - .vendor = PCI_VENDOR_ID_SUN, 3260 - .device = PCI_DEVICE_ID_SUN_HAPPYMEAL, 3261 - .subvendor = PCI_ANY_ID, 3262 - .subdevice = PCI_ANY_ID, 3263 - }, 3258 + { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, 3264 3259 { } /* Terminating entry */ 3265 3260 }; 3266 3261 ··· 3270 3275 3271 3276 static int __init happy_meal_pci_init(void) 3272 3277 { 3273 - return pci_module_init(&hme_pci_driver); 3278 + return pci_register_driver(&hme_pci_driver); 3274 3279 } 3275 3280 3276 3281 static void happy_meal_pci_exit(void)
+2 -2
drivers/net/wan/c101.c
··· 197 197 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); 198 198 199 199 set_carrier(port); 200 - printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port)); 201 200 202 201 /* enable MSCI1 CDCD interrupt */ 203 202 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); ··· 448 449 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 449 450 MODULE_DESCRIPTION("Moxa C101 serial port driver"); 450 451 MODULE_LICENSE("GPL v2"); 451 - module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */ 452 + module_param(hw, charp, 0444); 453 + MODULE_PARM_DESC(hw, "irq,ram:irq,...");
+1
drivers/net/wan/hdlc_ppp.c
··· 107 107 dev->hard_header = NULL; 108 108 dev->type = ARPHRD_PPP; 109 109 dev->addr_len = 0; 110 + netif_dormant_off(dev); 110 111 return 0; 111 112 } 112 113
+1
drivers/net/wan/hdlc_raw.c
··· 82 82 dev->type = ARPHRD_RAWHDLC; 83 83 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 84 84 dev->addr_len = 0; 85 + netif_dormant_off(dev); 85 86 return 0; 86 87 } 87 88
+1
drivers/net/wan/hdlc_raw_eth.c
··· 100 100 dev->tx_queue_len = old_qlen; 101 101 memcpy(dev->dev_addr, "\x00\x01", 2); 102 102 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); 103 + netif_dormant_off(dev); 103 104 return 0; 104 105 } 105 106
+1
drivers/net/wan/hdlc_x25.c
··· 212 212 dev->hard_header = NULL; 213 213 dev->type = ARPHRD_X25; 214 214 dev->addr_len = 0; 215 + netif_dormant_off(dev); 215 216 return 0; 216 217 } 217 218
+2 -1
drivers/net/wan/n2.c
··· 564 564 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 565 565 MODULE_DESCRIPTION("RISCom/N2 serial port driver"); 566 566 MODULE_LICENSE("GPL v2"); 567 - module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */ 567 + module_param(hw, charp, 0444); 568 + MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
+14 -3
drivers/s390/block/xpram.c
··· 304 304 { 305 305 unsigned long mem_needed; 306 306 unsigned long mem_auto; 307 + unsigned long long size; 307 308 int mem_auto_no; 308 309 int i; 309 310 ··· 322 321 mem_needed = 0; 323 322 mem_auto_no = 0; 324 323 for (i = 0; i < xpram_devs; i++) { 325 - if (sizes[i]) 326 - xpram_sizes[i] = 327 - (memparse(sizes[i], &sizes[i]) + 3) & -4UL; 324 + if (sizes[i]) { 325 + size = simple_strtoull(sizes[i], &sizes[i], 0); 326 + switch (sizes[i][0]) { 327 + case 'g': 328 + case 'G': 329 + size <<= 20; 330 + break; 331 + case 'm': 332 + case 'M': 333 + size <<= 10; 334 + } 335 + xpram_sizes[i] = (size + 3) & -4UL; 336 + } 328 337 if (xpram_sizes[i]) 329 338 mem_needed += xpram_sizes[i]; 330 339 else
+35 -17
drivers/s390/char/raw3270.c
··· 1106 1106 1107 1107 /* Remove from device chain. */ 1108 1108 mutex_lock(&raw3270_mutex); 1109 - if (rp->clttydev) 1109 + if (rp->clttydev && !IS_ERR(rp->clttydev)) 1110 1110 class_device_destroy(class3270, 1111 1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1112 - if (rp->cltubdev) 1112 + if (rp->cltubdev && !IS_ERR(rp->cltubdev)) 1113 1113 class_device_destroy(class3270, 1114 1114 MKDEV(IBM_FS3270_MAJOR, rp->minor)); 1115 1115 list_del_init(&rp->list); ··· 1173 1173 .attrs = raw3270_attrs, 1174 1174 }; 1175 1175 1176 - static void 1177 - raw3270_create_attributes(struct raw3270 *rp) 1176 + static int raw3270_create_attributes(struct raw3270 *rp) 1178 1177 { 1179 - //FIXME: check return code 1180 - sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1181 - rp->clttydev = 1182 - class_device_create(class3270, NULL, 1183 - MKDEV(IBM_TTY3270_MAJOR, rp->minor), 1184 - &rp->cdev->dev, "tty%s", 1185 - rp->cdev->dev.bus_id); 1186 - rp->cltubdev = 1187 - class_device_create(class3270, NULL, 1188 - MKDEV(IBM_FS3270_MAJOR, rp->minor), 1189 - &rp->cdev->dev, "tub%s", 1190 - rp->cdev->dev.bus_id); 1178 + int rc; 1179 + 1180 + rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1181 + if (rc) 1182 + goto out; 1183 + 1184 + rp->clttydev = class_device_create(class3270, NULL, 1185 + MKDEV(IBM_TTY3270_MAJOR, rp->minor), 1186 + &rp->cdev->dev, "tty%s", 1187 + rp->cdev->dev.bus_id); 1188 + if (IS_ERR(rp->clttydev)) { 1189 + rc = PTR_ERR(rp->clttydev); 1190 + goto out_ttydev; 1191 + } 1192 + 1193 + rp->cltubdev = class_device_create(class3270, NULL, 1194 + MKDEV(IBM_FS3270_MAJOR, rp->minor), 1195 + &rp->cdev->dev, "tub%s", 1196 + rp->cdev->dev.bus_id); 1197 + if (!IS_ERR(rp->cltubdev)) 1198 + goto out; 1199 + 1200 + rc = PTR_ERR(rp->cltubdev); 1201 + class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1202 + 1203 + out_ttydev: 1204 + sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group); 1205 + out: 1206 + return rc; 1191 1207 } 1192 1208 1193 1209 /* ··· 1271 1255 rc = raw3270_reset_device(rp); 1272 1256 if (rc) 1273 1257 goto failure; 1274 - raw3270_create_attributes(rp); 1258 + rc = raw3270_create_attributes(rp); 1259 + if (rc) 1260 + goto failure; 1275 1261 set_bit(RAW3270_FLAGS_READY, &rp->flags); 1276 1262 mutex_lock(&raw3270_mutex); 1277 1263 list_for_each_entry(np, &raw3270_notifier, list)
+9 -1
drivers/s390/char/tape_class.c
··· 76 76 device, 77 77 "%s", tcd->device_name 78 78 ); 79 - sysfs_create_link( 79 + rc = PTR_ERR(tcd->class_device); 80 + if (rc) 81 + goto fail_with_cdev; 82 + rc = sysfs_create_link( 80 83 &device->kobj, 81 84 &tcd->class_device->kobj, 82 85 tcd->mode_name 83 86 ); 87 + if (rc) 88 + goto fail_with_class_device; 84 89 85 90 return tcd; 91 + 92 + fail_with_class_device: 93 + class_device_destroy(tape_class, tcd->char_device->dev); 86 94 87 95 fail_with_cdev: 88 96 cdev_del(tcd->char_device);
+11 -7
drivers/s390/char/tape_core.c
··· 543 543 tape_generic_probe(struct ccw_device *cdev) 544 544 { 545 545 struct tape_device *device; 546 + int ret; 546 547 547 548 device = tape_alloc_device(); 548 549 if (IS_ERR(device)) 549 550 return -ENODEV; 550 - PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 551 + ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 552 + ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 553 + if (ret) { 554 + tape_put_device(device); 555 + PRINT_ERR("probe failed for tape device %s\n", cdev->dev.bus_id); 556 + return ret; 557 + } 551 558 cdev->dev.driver_data = device; 559 + cdev->handler = __tape_do_irq; 552 560 device->cdev = cdev; 553 561 device->cdev_id = busid_to_int(cdev->dev.bus_id); 554 - cdev->handler = __tape_do_irq; 555 - 556 - ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 557 - sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 558 - 559 - return 0; 562 + PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 563 + return ret; 560 564 } 561 565 562 566 static inline void
+1
drivers/s390/cio/cmf.c
··· 1068 1068 if (count) { 1069 1069 interval = cmb_data->last_update - 1070 1070 cdev->private->cmb_start_time; 1071 + interval = (interval * 1000) >> 12; 1071 1072 interval /= count; 1072 1073 } else 1073 1074 interval = -1;
+17 -4
drivers/s390/net/ctcmain.c
··· 2686 2686 static int 2687 2687 ctc_add_attributes(struct device *dev) 2688 2688 { 2689 - device_create_file(dev, &dev_attr_loglevel); 2690 - device_create_file(dev, &dev_attr_stats); 2691 - return 0; 2689 + int rc; 2690 + 2691 + rc = device_create_file(dev, &dev_attr_loglevel); 2692 + if (rc) 2693 + goto out; 2694 + rc = device_create_file(dev, &dev_attr_stats); 2695 + if (!rc) 2696 + goto out; 2697 + device_remove_file(dev, &dev_attr_loglevel); 2698 + out: 2699 + return rc; 2692 2700 } 2693 2701 2694 2702 static void ··· 2909 2901 goto out; 2910 2902 } 2911 2903 2912 - ctc_add_attributes(&cgdev->dev); 2904 + if (ctc_add_attributes(&cgdev->dev)) { 2905 + ctc_netdev_unregister(dev); 2906 + dev->priv = NULL; 2907 + ctc_free_netdevice(dev, 1); 2908 + goto out; 2909 + } 2913 2910 2914 2911 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name)); 2915 2912
+4 -3
drivers/s390/net/qeth_main.c
··· 8451 8451 static int 8452 8452 qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) 8453 8453 { 8454 + int ret; 8454 8455 8455 - driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL, 8456 - __qeth_reboot_event_card); 8457 - return NOTIFY_DONE; 8456 + ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL, 8457 + __qeth_reboot_event_card); 8458 + return ret ? NOTIFY_BAD : NOTIFY_DONE; 8458 8459 } 8459 8460 8460 8461
+1 -1
drivers/sbus/sbus.c
··· 233 233 sbus->ofdev.node = dp; 234 234 sbus->ofdev.dev.parent = NULL; 235 235 sbus->ofdev.dev.bus = &sbus_bus_type; 236 - strcpy(sbus->ofdev.dev.bus_id, dp->path_component_name); 236 + sprintf(sbus->ofdev.dev.bus_id, "sbus%d", num_sbus); 237 237 238 238 if (of_device_register(&sbus->ofdev) != 0) 239 239 printk(KERN_DEBUG "sbus: device registration error for %s!\n",
+4 -4
drivers/scsi/53c7xx.c
··· 3451 3451 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4, 3452 3452 cmd_dataout += 4, ++i) { 3453 3453 u32 vbuf = cmd->use_sg 3454 - ? (u32)page_address(((struct scatterlist *)cmd->buffer)[i].page)+ 3455 - ((struct scatterlist *)cmd->buffer)[i].offset 3454 + ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+ 3455 + ((struct scatterlist *)cmd->request_buffer)[i].offset 3456 3456 : (u32)(cmd->request_buffer); 3457 3457 u32 bbuf = virt_to_bus((void *)vbuf); 3458 3458 u32 count = cmd->use_sg ? 3459 - ((struct scatterlist *)cmd->buffer)[i].length : 3459 + ((struct scatterlist *)cmd->request_buffer)[i].length : 3460 3460 cmd->request_bufflen; 3461 3461 3462 3462 /* ··· 5417 5417 5418 5418 if ((buffers = cmd->use_sg)) { 5419 5419 for (offset = 0, 5420 - segment = (struct scatterlist *) cmd->buffer; 5420 + segment = (struct scatterlist *) cmd->request_buffer; 5421 5421 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) && 5422 5422 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length))))); 5423 5423 --buffers, offset += segment->length, ++segment)
+1 -1
drivers/scsi/NCR53C9x.c
··· 911 911 sp->SCp.ptr = 912 912 (char *) virt_to_phys(sp->request_buffer); 913 913 } else { 914 - sp->SCp.buffer = (struct scatterlist *) sp->buffer; 914 + sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; 915 915 sp->SCp.buffers_residual = sp->use_sg - 1; 916 916 sp->SCp.this_residual = sp->SCp.buffer->length; 917 917 if (esp->dma_mmu_get_scsi_sgl)
+7 -7
drivers/scsi/NCR_D700.c
··· 114 114 MODULE_LICENSE("GPL"); 115 115 module_param(NCR_D700, charp, 0); 116 116 117 - static __u8 __initdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = 117 + static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = 118 118 { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; 119 119 120 120 #ifdef MODULE ··· 173 173 char pad; 174 174 }; 175 175 176 - static int 176 + static int __devinit 177 177 NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, 178 178 int slot, u32 region, int differential) 179 179 { ··· 243 243 * essentially connectecd to the MCA bus independently, it is easier 244 244 * to set them up as two separate host adapters, rather than one 245 245 * adapter with two channels */ 246 - static int 246 + static int __devinit 247 247 NCR_D700_probe(struct device *dev) 248 248 { 249 249 struct NCR_D700_private *p; ··· 329 329 for (i = 0; i < 2; i++) { 330 330 int err; 331 331 332 - if ((err = NCR_D700_probe_one(p, i, slot, irq, 332 + if ((err = NCR_D700_probe_one(p, i, irq, slot, 333 333 offset_addr + (0x80 * i), 334 334 differential)) != 0) 335 335 printk("D700: SIOP%d: probe failed, error = %d\n", ··· 349 349 return 0; 350 350 } 351 351 352 - static void 352 + static void __devexit 353 353 NCR_D700_remove_one(struct Scsi_Host *host) 354 354 { 355 355 scsi_remove_host(host); ··· 359 359 release_region(host->base, 64); 360 360 } 361 361 362 - static int 362 + static int __devexit 363 363 NCR_D700_remove(struct device *dev) 364 364 { 365 365 struct NCR_D700_private *p = dev_get_drvdata(dev); ··· 380 380 .name = "NCR_D700", 381 381 .bus = &mca_bus_type, 382 382 .probe = NCR_D700_probe, 383 - .remove = NCR_D700_remove, 383 + .remove = __devexit_p(NCR_D700_remove), 384 384 }, 385 385 }; 386 386
+32 -11
drivers/scsi/aha152x.c
··· 551 551 struct aha152x_scdata { 552 552 Scsi_Cmnd *next; /* next sc in queue */ 553 553 struct semaphore *sem; /* semaphore to block on */ 554 + unsigned char cmd_len; 555 + unsigned char cmnd[MAX_COMMAND_SIZE]; 556 + unsigned short use_sg; 557 + unsigned request_bufflen; 558 + void *request_buffer; 554 559 }; 555 560 556 561 ··· 1011 1006 return FAILED; 1012 1007 } 1013 1008 } else { 1009 + struct aha152x_scdata *sc; 1010 + 1014 1011 SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); 1015 1012 if(SCpnt->host_scribble==0) { 1016 1013 printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); 1017 1014 return FAILED; 1018 1015 } 1016 + 1017 + sc = SCDATA(SCpnt); 1018 + memcpy(sc->cmnd, SCpnt->cmnd, sizeof(sc->cmnd)); 1019 + sc->request_buffer = SCpnt->request_buffer; 1020 + sc->request_bufflen = SCpnt->request_bufflen; 1021 + sc->use_sg = SCpnt->use_sg; 1022 + sc->cmd_len = SCpnt->cmd_len; 1019 1023 } 1020 1024 1021 1025 SCNEXT(SCpnt) = NULL; ··· 1179 1165 DECLARE_MUTEX_LOCKED(sem); 1180 1166 struct timer_list timer; 1181 1167 int ret, issued, disconnected; 1168 + unsigned char old_cmd_len = SCpnt->cmd_len; 1169 + unsigned short old_use_sg = SCpnt->use_sg; 1170 + void *old_buffer = SCpnt->request_buffer; 1171 + unsigned old_bufflen = SCpnt->request_bufflen; 1182 1172 unsigned long flags; 1183 1173 1184 1174 #if defined(AHA152X_DEBUG) ··· 1216 1198 add_timer(&timer); 1217 1199 down(&sem); 1218 1200 del_timer(&timer); 1219 - 1220 - SCpnt->cmd_len = SCpnt->old_cmd_len; 1221 - SCpnt->use_sg = SCpnt->old_use_sg; 1222 - SCpnt->request_buffer = SCpnt->buffer; 1223 - SCpnt->request_bufflen = SCpnt->bufflen; 1201 + 1202 + SCpnt->cmd_len = old_cmd_len; 1203 + SCpnt->use_sg = old_use_sg; 1204 + SCpnt->request_buffer = old_buffer; 1205 + SCpnt->request_bufflen = old_bufflen; 1224 1206 1225 1207 DO_LOCK(flags); 1226 1208 ··· 1583 1565 #endif 1584 1566 1585 1567 if(DONE_SC->SCp.phase & check_condition) { 1568 + struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC; 1569 + struct aha152x_scdata *sc = SCDATA(cmd); 1570 + 1586 1571 #if 0 1587 1572 if(HOSTDATA(shpnt)->debug & debug_eh) { 1588 1573 printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC)); ··· 1594 1573 #endif 1595 1574 1596 1575 /* restore old command */ 1597 - memcpy((void *) DONE_SC->cmnd, (void *) DONE_SC->data_cmnd, sizeof(DONE_SC->data_cmnd)); 1598 - DONE_SC->request_buffer = DONE_SC->buffer; 1599 - DONE_SC->request_bufflen = DONE_SC->bufflen; 1600 - DONE_SC->use_sg = DONE_SC->old_use_sg; 1601 - DONE_SC->cmd_len = DONE_SC->old_cmd_len; 1576 + memcpy(cmd->cmnd, sc->cmnd, sizeof(sc->cmnd)); 1577 + cmd->request_buffer = sc->request_buffer; 1578 + cmd->request_bufflen = sc->request_bufflen; 1579 + cmd->use_sg = sc->use_sg; 1580 + cmd->cmd_len = sc->cmd_len; 1602 1581 1603 - DONE_SC->SCp.Status = 0x02; 1582 + cmd->SCp.Status = 0x02; 1604 1583 1605 1584 HOSTDATA(shpnt)->commands--; 1606 1585 if (!HOSTDATA(shpnt)->commands)
+1 -1
drivers/scsi/aic7xxx/aic79xx_core.c
··· 7289 7289 ahd->flags &= ~AHD_UPDATE_PEND_CMDS; 7290 7290 } 7291 7291 7292 - void 7292 + static void 7293 7293 ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) 7294 7294 { 7295 7295 cam_status ostat;
-21
drivers/scsi/aic7xxx/aic79xx_osm.c
··· 243 243 static uint32_t aic79xx_no_reset; 244 244 245 245 /* 246 - * Certain PCI motherboards will scan PCI devices from highest to lowest, 247 - * others scan from lowest to highest, and they tend to do all kinds of 248 - * strange things when they come into contact with PCI bridge chips. The 249 - * net result of all this is that the PCI card that is actually used to boot 250 - * the machine is very hard to detect. Most motherboards go from lowest 251 - * PCI slot number to highest, and the first SCSI controller found is the 252 - * one you boot from. The only exceptions to this are when a controller 253 - * has its BIOS disabled. So, we by default sort all of our SCSI controllers 254 - * from lowest PCI slot number to highest PCI slot number. We also force 255 - * all controllers with their BIOS disabled to the end of the list. This 256 - * works on *almost* all computers. Where it doesn't work, we have this 257 - * option. Setting this option to non-0 will reverse the order of the sort 258 - * to highest first, then lowest, but will still leave cards with their BIOS 259 - * disabled at the very end. That should fix everyone up unless there are 260 - * really strange cirumstances. 261 - */ 262 - static uint32_t aic79xx_reverse_scan; 263 - 264 - /* 265 246 * Should we force EXTENDED translation on a controller. 266 247 * 0 == Use whatever is in the SEEPROM or default to off 267 248 * 1 == Use whatever is in the SEEPROM or default to on ··· 331 350 " periodically to prevent tag starvation.\n" 332 351 " This may be required by some older disk\n" 333 352 " or drives/RAID arrays.\n" 334 - " reverse_scan Sort PCI devices highest Bus/Slot to lowest\n" 335 353 " tag_info:<tag_str> Set per-target tag depth\n" 336 354 " global_tag_depth:<int> Global tag depth for all targets on all buses\n" 337 355 " slewrate:<slewrate_list>Set the signal slew rate (0-15).\n" ··· 1011 1031 #ifdef AHD_DEBUG 1012 1032 { "debug", &ahd_debug }, 1013 1033 #endif 1014 - { "reverse_scan", &aic79xx_reverse_scan }, 1015 1034 { "periodic_otag", &aic79xx_periodic_otag }, 1016 1035 { "pci_parity", &aic79xx_pci_parity }, 1017 1036 { "seltime", &aic79xx_seltime },
-1
drivers/scsi/aic7xxx/aic7xxx_osm.c
··· 353 353 " periodically to prevent tag starvation.\n" 354 354 " This may be required by some older disk\n" 355 355 " drives or RAID arrays.\n" 356 - " reverse_scan Sort PCI devices highest Bus/Slot to lowest\n" 357 356 " tag_info:<tag_str> Set per-target tag depth\n" 358 357 " global_tag_depth:<int> Global tag depth for every target\n" 359 358 " on every bus\n"
+1 -1
drivers/scsi/atari_NCR5380.c
··· 507 507 */ 508 508 509 509 if (cmd->use_sg) { 510 - cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 510 + cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 511 511 cmd->SCp.buffers_residual = cmd->use_sg - 1; 512 512 cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+ 513 513 cmd->SCp.buffer->offset;
+88 -38
drivers/scsi/constants.c
··· 5 5 * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) 6 6 * by D. Gilbert and aeb (20020609) 7 7 * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025 8 + * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702 8 9 */ 9 10 10 11 #include <linux/blkdev.h> ··· 37 36 /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", 38 37 /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, 39 38 "Reasssign Blocks", 40 - /* 08-0d */ "Read (6)", NULL, "Write (6)", "Seek (6)", NULL, NULL, 39 + /* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL, 41 40 /* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", 42 - /* 13-16 */ "Verify (6)", "Recover Buffered Data", "Mode Select (6)", 43 - "Reserve (6)", 44 - /* 17-1a */ "Release (6)", "Copy", "Erase", "Mode Sense (6)", 41 + /* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)", 42 + "Reserve(6)", 43 + /* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)", 45 44 /* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", 46 45 /* 1e-1f */ "Prevent/Allow Medium Removal", NULL, 47 46 /* 20-22 */ NULL, NULL, NULL, 48 47 /* 23-28 */ "Read Format Capacities", "Set Window", 49 - "Read Capacity (10)", NULL, NULL, "Read (10)", 50 - /* 29-2d */ "Read Generation", "Write (10)", "Seek (10)", "Erase (10)", 51 - "Read updated block", 52 - /* 2e-31 */ "Write Verify (10)", "Verify (10)", "Search High", "Search Equal", 48 + "Read Capacity(10)", NULL, NULL, "Read(10)", 49 + /* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)", 50 + "Read updated block", 51 + /* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal", 53 52 /* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", 54 - /* 35-37 */ "Synchronize Cache (10)", "Lock/Unlock Cache (10)", 53 + /* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)", 55 54 "Read Defect Data(10)", 56 55 /* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", 57 56 "Read Buffer", 58 - /* 3d-3f */ "Update Block", "Read Long (10)", "Write Long (10)", 59 - /* 40-41 */ "Change Definition", "Write Same (10)", 57 + /* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)", 58 + /* 40-41 */ "Change Definition", "Write Same(10)", 60 59 /* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", 61 - "Play audio (10)", "Get configuration", "Play audio msf", 60 + "Play audio(10)", "Get configuration", "Play audio msf", 62 61 "Play audio track/index", 63 - /* 49-4f */ "Play track relative (10)", "Get event status notification", 62 + /* 49-4f */ "Play track relative(10)", "Get event status notification", 64 63 "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", 65 64 NULL, 66 65 /* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", 67 - "Reserve track", "Send OPC info", "Mode Select (10)", 68 - /* 56-5b */ "Reserve (10)", "Release (10)", "Repair track", "Read master cue", 69 - "Mode Sense (10)", "Close track/session", 66 + "Reserve track", "Send OPC info", "Mode Select(10)", 67 + /* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue", 68 + "Mode Sense(10)", "Close track/session", 70 69 /* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", 71 70 "Persistent reserve out", 72 71 /* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 73 72 /* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 74 73 /* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 75 74 /* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length", 76 - /* 80-84 */ "Xdwrite (16)", "Rebuild (16)", "Regenerate (16)", "Extended copy", 75 + /* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy", 77 76 "Receive copy results", 78 - /* 85-89 */ "Memory Export In (16)", "Access control in", "Access control out", 79 - "Read (16)", "Memory Export Out (16)", 80 - /* 8a-8f */ "Write (16)", NULL, "Read attributes", "Write attributes", 81 - "Write and verify (16)", "Verify (16)", 82 - /* 90-94 */ "Pre-fetch (16)", "Synchronize cache (16)", 83 - "Lock/unlock cache (16)", "Write same (16)", NULL, 77 + /* 85-89 */ "ATA command pass through(16)", "Access control in", 78 + "Access control out", "Read(16)", "Memory Export Out(16)", 79 + /* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes", 80 + "Write and verify(16)", "Verify(16)", 81 + /* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)", 82 + "Lock/unlock cache(16)", "Write same(16)", NULL, 84 83 /* 95-99 */ NULL, NULL, NULL, NULL, NULL, 85 - /* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in (16)", 86 - "Service action out (16)", 87 - /* a0-a5 */ "Report luns", "Blank", "Send event", "Maintenance in", 88 - "Maintenance out", "Move medium/play audio(12)", 84 + /* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)", 85 + "Service action out(16)", 86 + /* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank", 87 + "Security protocol in", "Maintenance in", "Maintenance out", 88 + "Move medium/play audio(12)", 89 89 /* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", 90 90 "Play track relative(12)", 91 91 /* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", ··· 94 92 /* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", 95 93 /* b2-b4 */ "Search data low(12)", "Set limits(12)", 96 94 "Read element status attached", 97 - /* b5-b6 */ "Request volume element address", "Send volume tag, set streaming", 95 + /* b5-b6 */ "Security protocol out", "Send volume tag, set streaming", 98 96 /* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", 99 97 /* ba-bc */ "Redundancy group (in), Scan", 100 - "Redundancy group (out), Set cd-rom speed", "Spare in, Play cd", 101 - /* bd-bf */ "Spare out, Mechanism status", "Volume set in, Read cd", 102 - "Volume set out, Send DVD structure", 98 + "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd", 99 + /* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd", 100 + "Volume set (out), Send DVD structure", 103 101 }; 104 102 105 103 struct value_name_pair { ··· 114 112 {0xc, "Report supported operation codes"}, 115 113 {0xd, "Report supported task management functions"}, 116 114 {0xe, "Report priority"}, 115 + {0xf, "Report timestamp"}, 117 116 }; 118 117 #define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) 119 118 ··· 123 120 {0xa, "Set target port groups"}, 124 121 {0xb, "Change aliases"}, 125 122 {0xe, "Set priority"}, 123 + {0xe, "Set timestamp"}, 126 124 }; 127 125 #define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) 128 126 ··· 431 427 {0x001A, "Rewind operation in progress"}, 432 428 {0x001B, "Set capacity operation in progress"}, 433 429 {0x001C, "Verify operation in progress"}, 430 + {0x001D, "ATA pass through information available"}, 434 431 435 432 {0x0100, "No index/sector signal"}, 436 433 ··· 443 438 444 439 {0x0400, "Logical unit not ready, cause not reportable"}, 445 440 {0x0401, "Logical unit is in process of becoming ready"}, 446 - {0x0402, "Logical unit not ready, initializing cmd. required"}, 441 + {0x0402, "Logical unit not ready, initializing command required"}, 447 442 {0x0403, "Logical unit not ready, manual intervention required"}, 448 443 {0x0404, "Logical unit not ready, format in progress"}, 449 444 {0x0405, "Logical unit not ready, rebuild in progress"}, ··· 483 478 {0x0B00, "Warning"}, 484 479 {0x0B01, "Warning - specified temperature exceeded"}, 485 480 {0x0B02, "Warning - enclosure degraded"}, 481 + {0x0B03, "Warning - background self-test failed"}, 482 + {0x0B04, "Warning - background pre-scan detected medium error"}, 483 + {0x0B05, "Warning - background medium scan detected medium error"}, 486 484 487 485 {0x0C00, "Write error"}, 488 486 {0x0C01, "Write error - recovered with auto reallocation"}, ··· 501 493 {0x0C0B, "Auxiliary memory write error"}, 502 494 {0x0C0C, "Write error - unexpected unsolicited data"}, 503 495 {0x0C0D, "Write error - not enough unsolicited data"}, 496 + {0x0C0F, "Defects in error window"}, 504 497 505 498 {0x0D00, "Error detected by third party temporary initiator"}, 506 499 {0x0D01, "Third party device failure"}, ··· 513 504 {0x0E00, "Invalid information unit"}, 514 505 {0x0E01, "Information unit too short"}, 515 506 {0x0E02, "Information unit too long"}, 507 + {0x0E03, "Invalid field in command information unit"}, 516 508 517 509 {0x1000, "Id CRC or ECC error"}, 518 - {0x1001, "Data block guard check failed"}, 519 - {0x1002, "Data block application tag check failed"}, 520 - {0x1003, "Data block reference tag check failed"}, 510 + {0x1001, "Logical block guard check failed"}, 511 + {0x1002, "Logical block application tag check failed"}, 512 + {0x1003, "Logical block reference tag check failed"}, 521 513 522 514 {0x1100, "Unrecovered read error"}, 523 515 {0x1101, "Read retries exhausted"}, ··· 540 530 {0x1111, "Read error - loss of streaming"}, 541 531 {0x1112, "Auxiliary memory read error"}, 542 532 {0x1113, "Read error - failed retransmission request"}, 533 + {0x1114, "Read error - lba marked bad by application client"}, 543 534 544 535 {0x1200, "Address mark not found for id field"}, 545 536 ··· 621 610 {0x2100, "Logical block address out of range"}, 622 611 {0x2101, "Invalid element address"}, 623 612 {0x2102, "Invalid address for write"}, 613 + {0x2103, "Invalid write crossing layer jump"}, 624 614 625 615 {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, 626 616 627 617 {0x2400, "Invalid field in cdb"}, 628 618 {0x2401, "CDB decryption error"}, 619 + {0x2402, "Obsolete"}, 620 + {0x2403, "Obsolete"}, 629 621 {0x2404, "Security audit value frozen"}, 630 622 {0x2405, "Security working key frozen"}, 631 623 {0x2406, "Nonce not unique"}, ··· 651 637 {0x260C, "Invalid operation for copy source or destination"}, 652 638 {0x260D, "Copy segment granularity violation"}, 653 639 {0x260E, "Invalid parameter while port is enabled"}, 654 - {0x260F, "Invalid data-out buffer integrity"}, 640 + {0x260F, "Invalid data-out buffer integrity check value"}, 641 + {0x2610, "Data decryption key fail limit reached"}, 642 + {0x2611, "Incomplete key-associated data set"}, 643 + {0x2612, "Vendor specific key reference not found"}, 655 644 656 645 {0x2700, "Write protected"}, 657 646 {0x2701, "Hardware write protected"}, ··· 666 649 667 650 {0x2800, "Not ready to ready change, medium may have changed"}, 668 651 {0x2801, "Import or export element accessed"}, 652 + {0x2802, "Format-layer may have changed"}, 669 653 670 654 {0x2900, "Power on, reset, or bus device reset occurred"}, 671 655 {0x2901, "Power on occurred"}, ··· 687 669 {0x2A07, "Implicit asymmetric access state transition failed"}, 688 670 {0x2A08, "Priority changed"}, 689 671 {0x2A09, "Capacity data has changed"}, 672 + {0x2A10, "Timestamp changed"}, 673 + {0x2A11, "Data encryption parameters changed by another i_t nexus"}, 674 + {0x2A12, "Data encryption parameters changed by vendor specific " 675 + "event"}, 676 + {0x2A13, "Data encryption key instance counter has changed"}, 690 677 691 678 {0x2B00, "Copy cannot execute since host cannot disconnect"}, 692 679 ··· 713 690 {0x2E00, "Insufficient time for operation"}, 714 691 715 692 {0x2F00, "Commands cleared by another initiator"}, 693 + {0x2F01, "Commands cleared by power loss notification"}, 716 694 717 695 {0x3000, "Incompatible medium installed"}, 718 696 {0x3001, "Cannot read medium - unknown format"}, ··· 726 702 {0x3008, "Cannot write - application code mismatch"}, 727 703 {0x3009, "Current session not fixated for append"}, 728 704 {0x300A, "Cleaning request rejected"}, 729 - {0x300C, "WORM medium, overwrite attempted"}, 705 + {0x300C, "WORM medium - overwrite attempted"}, 706 + {0x300D, "WORM medium - integrity check"}, 730 707 {0x3010, "Medium not formatted"}, 731 708 732 709 {0x3100, "Medium format corrupted"}, ··· 815 790 {0x3F0F, "Echo buffer overwritten"}, 816 791 {0x3F10, "Medium loadable"}, 817 792 {0x3F11, "Medium auxiliary memory accessible"}, 793 + {0x3F12, "iSCSI IP address added"}, 794 + {0x3F13, "iSCSI IP address removed"}, 795 + {0x3F14, "iSCSI IP address changed"}, 818 796 /* 819 797 * {0x40NN, "Ram failure"}, 820 798 * {0x40NN, "Diagnostic failure on component nn"}, ··· 827 799 {0x4300, "Message error"}, 828 800 829 801 {0x4400, "Internal target failure"}, 802 + {0x4471, "ATA device failed set features"}, 830 803 831 804 {0x4500, "Select or reselect failure"}, 832 805 ··· 836 807 {0x4700, "Scsi parity error"}, 837 808 {0x4701, "Data phase CRC error detected"}, 838 809 {0x4702, "Scsi parity error detected during st data phase"}, 839 - {0x4703, "Information unit CRC error detected"}, 810 + {0x4703, "Information unit iuCRC error detected"}, 840 811 {0x4704, "Asynchronous information protection error detected"}, 841 812 {0x4705, "Protocol service CRC error"}, 813 + {0x4706, "Phy test function in progress"}, 842 814 {0x477f, "Some commands cleared by iSCSI Protocol event"}, 843 815 844 816 {0x4800, "Initiator detected error message received"}, ··· 874 844 {0x5300, "Media load or eject failed"}, 875 845 {0x5301, "Unload tape failure"}, 876 846 {0x5302, "Medium removal prevented"}, 847 + {0x5303, "Medium removal prevented by data transfer element"}, 848 + {0x5304, "Medium thread or unthread failure"}, 877 849 878 850 {0x5400, "Scsi to host system interface failure"}, 879 851 ··· 887 855 {0x5505, "Insufficient access control resources"}, 888 856 {0x5506, "Auxiliary memory out of space"}, 889 857 {0x5507, "Quota error"}, 858 + {0x5508, "Maximum number of supplemental decryption keys exceeded"}, 890 859 891 860 {0x5700, "Unable to recover table-of-contents"}, 892 861 ··· 1037 1004 {0x6708, "Assign failure occurred"}, 1038 1005 {0x6709, "Multiply assigned logical unit"}, 1039 1006 {0x670A, "Set target port groups command failed"}, 1007 + {0x670B, "ATA device feature not enabled"}, 1040 1008 1041 1009 {0x6800, "Logical unit not configured"}, 1042 1010 ··· 1064 1030 {0x6F03, "Read of scrambled sector without authentication"}, 1065 1031 {0x6F04, "Media region code is mismatched to logical unit region"}, 1066 1032 {0x6F05, "Drive region must be permanent/region reset count error"}, 1033 + {0x6F06, "Insufficient block count for binding nonce recording"}, 1034 + {0x6F07, "Conflict in binding nonce recording"}, 1067 1035 /* 1068 1036 * {0x70NN, "Decompression exception short algorithm id of nn"}, 1069 1037 */ ··· 1077 1041 {0x7203, "Session fixation error - incomplete track in session"}, 1078 1042 {0x7204, "Empty or partially written reserved track"}, 1079 1043 {0x7205, "No more track reservations allowed"}, 1044 + {0x7206, "RMZ extension is not allowed"}, 1045 + {0x7207, "No more test zone extensions are allowed"}, 1080 1046 1081 1047 {0x7300, "Cd control error"}, 1082 1048 {0x7301, "Power calibration area almost full"}, ··· 1087 1049 {0x7304, "Program memory area update failure"}, 1088 1050 {0x7305, "Program memory area is full"}, 1089 1051 {0x7306, "RMA/PMA is almost full"}, 1052 + {0x7310, "Current power calibration area almost full"}, 1053 + {0x7311, "Current power calibration area is full"}, 1054 + {0x7317, "RDZ is full"}, 1055 + 1056 + {0x7400, "Security error"}, 1057 + {0x7401, "Unable to decrypt data"}, 1058 + {0x7402, "Unencrypted data encountered while decrypting"}, 1059 + {0x7403, "Incorrect data encryption key"}, 1060 + {0x7404, "Cryptographic integrity validation failed"}, 1061 + {0x7405, "Error decrypting data"}, 1062 + {0x7471, "Logical unit access not authorized"}, 1063 + 1090 1064 {0, NULL} 1091 1065 }; 1092 1066
+2 -2
drivers/scsi/esp.c
··· 1397 1397 sp->SCp.ptr = NULL; 1398 1398 } 1399 1399 } else { 1400 - sp->SCp.buffer = (struct scatterlist *) sp->buffer; 1400 + sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; 1401 1401 sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, 1402 1402 sp->SCp.buffer, 1403 1403 sp->use_sg, ··· 1410 1410 static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) 1411 1411 { 1412 1412 if (sp->use_sg) { 1413 - sbus_unmap_sg(esp->sdev, sp->buffer, sp->use_sg, 1413 + sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg, 1414 1414 sp->sc_data_direction); 1415 1415 } else if (sp->request_bufflen) { 1416 1416 sbus_unmap_single(esp->sdev,
+1 -1
drivers/scsi/ibmvscsi/iseries_vscsi.c
··· 81 81 int rc; 82 82 83 83 single_host_data = hostdata; 84 - rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0); 84 + rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests); 85 85 if (rc < 0) { 86 86 printk("viopath_open failed with rc %d in open_event_path\n", 87 87 rc);
+1
drivers/scsi/ibmvscsi/rpa_vscsi.c
··· 238 238 if (rc == 2) { 239 239 /* Adapter is good, but other end is not ready */ 240 240 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); 241 + retrc = 0; 241 242 } else if (rc != 0) { 242 243 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); 243 244 goto reg_crq_failed;
+1 -1
drivers/scsi/jazz_esp.c
··· 257 257 static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) 258 258 { 259 259 int sz = sp->use_sg - 1; 260 - struct scatterlist *sg = (struct scatterlist *)sp->buffer; 260 + struct scatterlist *sg = (struct scatterlist *)sp->request_buffer; 261 261 262 262 while(sz >= 0) { 263 263 vdma_free(sg[sz].dma_address);
+5 -4
drivers/scsi/lpfc/lpfc.h
··· 21 21 22 22 struct lpfc_sli2_slim; 23 23 24 - #define LPFC_MAX_TARGET 256 /* max targets supported */ 25 - #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */ 26 - #define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */ 27 24 25 + #define LPFC_MAX_TARGET 256 /* max number of targets supported */ 26 + #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 27 + requests */ 28 + #define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact 29 + the NameServer before giving up. */ 28 30 #define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */ 29 31 #define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */ 30 32 #define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */ ··· 43 41 (( (u64)(high)<<16 ) << 16)|( (u64)(low)))) 44 42 /* Provide maximum configuration definitions. */ 45 43 #define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */ 46 - #define MAX_FCP_TARGET 256 /* max num of FCP targets supported */ 47 44 #define FC_MAX_ADPTMSG 64 48 45 49 46 #define MAX_HBAEVT 32
+59 -48
drivers/scsi/lpfc/lpfc_attr.c
··· 219 219 return -ENOMEM; 220 220 221 221 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 222 - lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); 222 + pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 223 + pmboxq->mb.mbxOwner = OWN_HOST; 224 + 223 225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 226 + 227 + if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 228 + memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 229 + lpfc_init_link(phba, pmboxq, phba->cfg_topology, 230 + phba->cfg_link_speed); 231 + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 232 + phba->fc_ratov * 2); 233 + } 224 234 225 235 if (mbxstatus == MBX_TIMEOUT) 226 236 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; ··· 243 233 return 0; 244 234 } 245 235 236 + static int 237 + lpfc_selective_reset(struct lpfc_hba *phba) 238 + { 239 + struct completion online_compl; 240 + int status = 0; 241 + 242 + init_completion(&online_compl); 243 + lpfc_workq_post_event(phba, &status, &online_compl, 244 + LPFC_EVT_OFFLINE); 245 + wait_for_completion(&online_compl); 246 + 247 + if (status != 0) 248 + return -EIO; 249 + 250 + init_completion(&online_compl); 251 + lpfc_workq_post_event(phba, &status, &online_compl, 252 + LPFC_EVT_ONLINE); 253 + wait_for_completion(&online_compl); 254 + 255 + if (status != 0) 256 + return -EIO; 257 + 258 + return 0; 259 + } 260 + 261 + static ssize_t 262 + lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count) 263 + { 264 + struct Scsi_Host *host = class_to_shost(cdev); 265 + struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 266 + int status = -EINVAL; 267 + 268 + if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) 269 + status = lpfc_selective_reset(phba); 270 + 271 + if (status == 0) 272 + return strlen(buf); 273 + else 274 + return status; 275 + } 276 + 246 277 static ssize_t 247 278 lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) 248 279 { 249 280 struct Scsi_Host *host = class_to_shost(cdev); 250 281 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 251 282 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 252 - } 253 - 254 - static ssize_t 255 - lpfc_board_online_show(struct class_device *cdev, char *buf) 256 - { 257 - struct Scsi_Host *host = class_to_shost(cdev); 258 - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 259 - 260 - if (phba->fc_flag & FC_OFFLINE_MODE) 261 - return snprintf(buf, PAGE_SIZE, "0\n"); 262 - else 263 - return snprintf(buf, PAGE_SIZE, "1\n"); 264 - } 265 - 266 - static ssize_t 267 - lpfc_board_online_store(struct class_device *cdev, const char *buf, 268 - size_t count) 269 - { 270 - struct Scsi_Host *host = class_to_shost(cdev); 271 - struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 272 - struct completion online_compl; 273 - int val=0, status=0; 274 - 275 - if (sscanf(buf, "%d", &val) != 1) 276 - return -EINVAL; 277 - 278 - init_completion(&online_compl); 279 - 280 - if (val) 281 - lpfc_workq_post_event(phba, &status, &online_compl, 282 - LPFC_EVT_ONLINE); 283 - else 284 - lpfc_workq_post_event(phba, &status, &online_compl, 285 - LPFC_EVT_OFFLINE); 286 - wait_for_completion(&online_compl); 287 - if (!status) 288 - return strlen(buf); 289 - else 290 - return -EIO; 291 283 } 292 284 293 285 static ssize_t ··· 544 532 NULL); 545 533 static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show, 546 534 NULL); 547 - static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR, 548 - lpfc_board_online_show, lpfc_board_online_store); 549 535 static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 550 536 lpfc_board_mode_show, lpfc_board_mode_store); 537 + static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 551 538 552 539 static int lpfc_poll = 0; 553 540 module_param(lpfc_poll, int, 0); ··· 706 695 "during discovery"); 707 696 708 697 /* 709 - # lpfc_max_luns: maximum number of LUNs per target driver will support 710 - # Value range is [1,32768]. Default value is 256. 711 - # NOTE: The SCSI layer will scan each target for this many luns 698 + # lpfc_max_luns: maximum allowed LUN. 699 + # Value range is [0,65535]. Default value is 255. 700 + # NOTE: The SCSI layer might probe all allowed LUN on some old targets. 712 701 */ 713 - LPFC_ATTR_R(max_luns, 256, 1, 32768, 714 - "Maximum number of LUNs per target driver will support"); 702 + LPFC_ATTR_R(max_luns, 255, 0, 65535, 703 + "Maximum allowed LUN"); 715 704 716 705 /* 717 706 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. ··· 750 739 &class_device_attr_lpfc_max_luns, 751 740 &class_device_attr_nport_evt_cnt, 752 741 &class_device_attr_management_version, 753 - &class_device_attr_board_online, 754 742 &class_device_attr_board_mode, 743 + &class_device_attr_issue_reset, 755 744 &class_device_attr_lpfc_poll, 756 745 &class_device_attr_lpfc_poll_tmo, 757 746 NULL,
+1
drivers/scsi/lpfc/lpfc_crtn.h
··· 147 147 int lpfc_sli_hba_down(struct lpfc_hba *); 148 148 int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 149 149 int lpfc_sli_handle_mb_event(struct lpfc_hba *); 150 + int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); 150 151 int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 151 152 struct lpfc_sli_ring *, uint32_t); 152 153 void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+37 -28
drivers/scsi/lpfc/lpfc_els.c
··· 648 648 } 649 649 650 650 static struct lpfc_nodelist * 651 - lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 651 + lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp, 652 652 struct lpfc_nodelist *ndlp) 653 653 { 654 654 struct lpfc_nodelist *new_ndlp; 655 - struct lpfc_dmabuf *pcmd, *prsp; 656 655 uint32_t *lp; 657 656 struct serv_parm *sp; 658 657 uint8_t name[sizeof (struct lpfc_name)]; 659 658 uint32_t rc; 660 659 661 - pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 662 - prsp = (struct lpfc_dmabuf *) pcmd->list.next; 663 660 lp = (uint32_t *) prsp->virt; 664 661 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 662 + memset(name, 0, sizeof (struct lpfc_name)); 665 663 666 664 /* Now we to find out if the NPort we are logging into, matches the WWPN 667 665 * we have for that ndlp. If not, we have some work to do. 668 666 */ 669 667 new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName); 670 668 671 - memset(name, 0, sizeof (struct lpfc_name)); 672 - rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); 673 - if (!rc || (new_ndlp == ndlp)) { 669 + if (new_ndlp == ndlp) 674 670 return ndlp; 675 - } 676 671 677 672 if (!new_ndlp) { 673 + rc = 674 + memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); 675 + if (!rc) 676 + return ndlp; 678 677 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 679 678 if (!new_ndlp) 680 679 return ndlp; ··· 682 683 } 683 684 684 685 lpfc_unreg_rpi(phba, new_ndlp); 685 - new_ndlp->nlp_prev_state = ndlp->nlp_state; 686 686 new_ndlp->nlp_DID = ndlp->nlp_DID; 687 - new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; 688 - lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST); 687 + new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 688 + new_ndlp->nlp_state = ndlp->nlp_state; 689 + lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK); 689 690 690 691 /* Move this back to NPR list */ 691 - lpfc_unreg_rpi(phba, ndlp); 692 - ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 693 - ndlp->nlp_state = NLP_STE_NPR_NODE; 694 - lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 695 - 692 + if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 693 + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 694 + } 695 + else { 696 + lpfc_unreg_rpi(phba, ndlp); 697 + ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 698 + ndlp->nlp_state = NLP_STE_NPR_NODE; 699 + lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 700 + } 696 701 return new_ndlp; 697 702 } 698 703 ··· 706 703 { 707 704 IOCB_t *irsp; 708 705 struct lpfc_nodelist *ndlp; 706 + struct lpfc_dmabuf *prsp; 709 707 int disc, rc, did, type; 710 708 711 709 ··· 773 769 } 774 770 } else { 775 771 /* Good status, call state machine */ 776 - ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp); 772 + prsp = list_entry(((struct lpfc_dmabuf *) 773 + cmdiocb->context2)->list.next, 774 + struct lpfc_dmabuf, list); 775 + ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp); 777 776 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, 778 777 NLP_EVT_CMPL_PLOGI); 779 778 } ··· 3289 3282 } else 3290 3283 lpfc_sli_release_iocbq(phba, piocb); 3291 3284 } 3292 - if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) { 3293 - phba->els_tmofunc.expires = jiffies + HZ * timeout; 3294 - add_timer(&phba->els_tmofunc); 3295 - } 3285 + if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 3286 + mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout); 3287 + 3296 3288 spin_unlock_irq(phba->host->host_lock); 3297 3289 } 3298 3290 ··· 3448 3442 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3449 3443 ndlp->nlp_type |= NLP_FABRIC; 3450 3444 } 3445 + ndlp->nlp_state = NLP_STE_UNUSED_NODE; 3446 + lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); 3451 3447 } 3452 3448 3453 3449 phba->fc_stat.elsRcvFrame++; ··· 3471 3463 rjt_err = 1; 3472 3464 break; 3473 3465 } 3466 + ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp); 3474 3467 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); 3475 3468 break; 3476 3469 case ELS_CMD_FLOGI: 3477 3470 phba->fc_stat.elsRcvFLOGI++; 3478 3471 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); 3479 3472 if (newnode) { 3480 - mempool_free( ndlp, phba->nlp_mem_pool); 3473 + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3481 3474 } 3482 3475 break; 3483 3476 case ELS_CMD_LOGO: ··· 3501 3492 phba->fc_stat.elsRcvRSCN++; 3502 3493 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); 3503 3494 if (newnode) { 3504 - mempool_free( ndlp, phba->nlp_mem_pool); 3495 + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3505 3496 } 3506 3497 break; 3507 3498 case ELS_CMD_ADISC: ··· 3544 3535 phba->fc_stat.elsRcvLIRR++; 3545 3536 lpfc_els_rcv_lirr(phba, elsiocb, ndlp); 3546 3537 if (newnode) { 3547 - mempool_free( ndlp, phba->nlp_mem_pool); 3538 + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3548 3539 } 3549 3540 break; 3550 3541 case ELS_CMD_RPS: 3551 3542 phba->fc_stat.elsRcvRPS++; 3552 3543 lpfc_els_rcv_rps(phba, elsiocb, ndlp); 3553 3544 if (newnode) { 3554 - mempool_free( ndlp, phba->nlp_mem_pool); 3545 + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3555 3546 } 3556 3547 break; 3557 3548 case ELS_CMD_RPL: 3558 3549 phba->fc_stat.elsRcvRPL++; 3559 3550 lpfc_els_rcv_rpl(phba, elsiocb, ndlp); 3560 3551 if (newnode) { 3561 - mempool_free( ndlp, phba->nlp_mem_pool); 3552 + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3562 3553 } 3563 3554 break; 3564 3555 case ELS_CMD_RNID: 3565 3556 phba->fc_stat.elsRcvRNID++; 3566 3557 lpfc_els_rcv_rnid(phba, elsiocb, ndlp); 3567 3558 if (newnode) { 3568 - mempool_free( ndlp, phba->nlp_mem_pool); 3559 + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3569 3560 } 3570 3561 break; 3571 3562 default: ··· 3577 3568 "%d:0115 Unknown ELS command x%x received from " 3578 3569 "NPORT x%x\n", phba->brd_no, cmd, did); 3579 3570 if (newnode) { 3580 - mempool_free( ndlp, phba->nlp_mem_pool); 3571 + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 3581 3572 } 3582 3573 break; 3583 3574 }
+2 -2
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 1084 1084 fc_remote_port_rolechg(rport, rport_ids.roles); 1085 1085 1086 1086 if ((rport->scsi_target_id != -1) && 1087 - (rport->scsi_target_id < MAX_FCP_TARGET)) { 1087 + (rport->scsi_target_id < LPFC_MAX_TARGET)) { 1088 1088 ndlp->nlp_sid = rport->scsi_target_id; 1089 1089 } 1090 1090 ··· 1313 1313 if ((rport_add == mapped) && 1314 1314 ((!nlp->rport) || 1315 1315 (nlp->rport->scsi_target_id == -1) || 1316 - (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) { 1316 + (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) { 1317 1317 nlp->nlp_state = NLP_STE_UNMAPPED_NODE; 1318 1318 spin_lock_irq(phba->host->host_lock); 1319 1319 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+39 -20
drivers/scsi/lpfc/lpfc_init.c
··· 71 71 uint16_t offset = 0; 72 72 static char licensed[56] = 73 73 "key unlock for use with gnu public licensed code only\0"; 74 + static int init_key = 1; 74 75 75 76 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 76 77 if (!pmb) { ··· 83 82 phba->hba_state = LPFC_INIT_MBX_CMDS; 84 83 85 84 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 86 - uint32_t *ptext = (uint32_t *) licensed; 85 + if (init_key) { 86 + uint32_t *ptext = (uint32_t *) licensed; 87 87 88 - for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 89 - *ptext = cpu_to_be32(*ptext); 88 + for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 89 + *ptext = cpu_to_be32(*ptext); 90 + init_key = 0; 91 + } 90 92 91 93 lpfc_read_nv(phba, pmb); 92 94 memset((char*)mb->un.varRDnvp.rsvd3, 0, ··· 409 405 } 410 406 /* MBOX buffer will be freed in mbox compl */ 411 407 412 - i = 0; 408 + return (0); 409 + } 410 + 411 + static int 412 + lpfc_discovery_wait(struct lpfc_hba *phba) 413 + { 414 + int i = 0; 415 + 413 416 while ((phba->hba_state != LPFC_HBA_READY) || 414 417 (phba->num_disc_nodes) || (phba->fc_prli_sent) || 415 418 ((phba->fc_map_cnt == 0) && (i<2)) || 416 - (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 419 + (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 417 420 /* Check every second for 30 retries. */ 418 421 i++; 419 422 if (i > 30) { 420 - break; 423 + return -ETIMEDOUT; 421 424 } 422 425 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { 423 426 /* The link is down. Set linkdown timeout */ 424 - break; 427 + return -ETIMEDOUT; 425 428 } 426 429 427 430 /* Delay for 1 second to give discovery time to complete. */ ··· 436 425 437 426 } 438 427 439 - /* Since num_disc_nodes keys off of PLOGI, delay a bit to let 440 - * any potential PRLIs to flush thru the SLI sub-system. 441 - */ 442 - msleep(50); 443 - 444 - return (0); 428 + return 0; 445 429 } 446 430 447 431 /************************************************************************/ ··· 1345 1339 struct lpfc_sli_ring *pring; 1346 1340 struct lpfc_sli *psli; 1347 1341 unsigned long iflag; 1348 - int i = 0; 1342 + int i; 1343 + int cnt = 0; 1349 1344 1350 1345 if (!phba) 1351 1346 return 0; ··· 1355 1348 return 0; 1356 1349 1357 1350 psli = &phba->sli; 1358 - pring = &psli->ring[psli->fcp_ring]; 1359 1351 1360 1352 lpfc_linkdown(phba); 1353 + lpfc_sli_flush_mbox_queue(phba); 1361 1354 1362 - /* The linkdown event takes 30 seconds to timeout. */ 1363 - while (pring->txcmplq_cnt) { 1364 - mdelay(10); 1365 - if (i++ > 3000) 1366 - break; 1355 + for (i = 0; i < psli->num_rings; i++) { 1356 + pring = &psli->ring[i]; 1357 + /* The linkdown event takes 30 seconds to timeout. */ 1358 + while (pring->txcmplq_cnt) { 1359 + mdelay(10); 1360 + if (cnt++ > 3000) { 1361 + lpfc_printf_log(phba, 1362 + KERN_WARNING, LOG_INIT, 1363 + "%d:0466 Outstanding IO when " 1364 + "bringing Adapter offline\n", 1365 + phba->brd_no); 1366 + break; 1367 + } 1368 + } 1367 1369 } 1370 + 1368 1371 1369 1372 /* stop all timers associated with this hba */ 1370 1373 lpfc_stop_timer(phba); ··· 1655 1638 error = -ENODEV; 1656 1639 goto out_free_irq; 1657 1640 } 1641 + 1642 + lpfc_discovery_wait(phba); 1658 1643 1659 1644 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1660 1645 spin_lock_irq(phba->host->host_lock);
+5
drivers/scsi/lpfc/lpfc_mem.c
··· 133 133 134 134 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 135 135 pci_pool_destroy(phba->lpfc_mbuf_pool); 136 + 137 + /* Free the iocb lookup array */ 138 + kfree(psli->iocbq_lookup); 139 + psli->iocbq_lookup = NULL; 140 + 136 141 } 137 142 138 143 void *
+11
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 1110 1110 phba->brd_no, 1111 1111 did, mb->mbxStatus, phba->hba_state); 1112 1112 1113 + /* 1114 + * If RegLogin failed due to lack of HBA resources do not 1115 + * retry discovery. 1116 + */ 1117 + if (mb->mbxStatus == MBXERR_RPI_FULL) { 1118 + ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 1119 + ndlp->nlp_state = NLP_STE_UNUSED_NODE; 1120 + lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST); 1121 + return ndlp->nlp_state; 1122 + } 1123 + 1113 1124 /* Put ndlp in npr list set plogi timer for 1 sec */ 1114 1125 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1115 1126 spin_lock_irq(phba->host->host_lock);
+37 -27
drivers/scsi/lpfc/lpfc_scsi.c
··· 153 153 lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 154 154 { 155 155 unsigned long iflag = 0; 156 - /* 157 - * There are only two special cases to consider. (1) the scsi command 158 - * requested scatter-gather usage or (2) the scsi command allocated 159 - * a request buffer, but did not request use_sg. There is a third 160 - * case, but it does not require resource deallocation. 161 - */ 162 - if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 163 - dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 164 - psb->seg_cnt, psb->pCmd->sc_data_direction); 165 - } else { 166 - if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { 167 - dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, 168 - psb->pCmd->request_bufflen, 169 - psb->pCmd->sc_data_direction); 170 - } 171 - } 172 156 173 157 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 174 158 psb->pCmd = NULL; ··· 263 279 iocb_cmd->ulpLe = 1; 264 280 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); 265 281 return 0; 282 + } 283 + 284 + static void 285 + lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 286 + { 287 + /* 288 + * There are only two special cases to consider. (1) the scsi command 289 + * requested scatter-gather usage or (2) the scsi command allocated 290 + * a request buffer, but did not request use_sg. There is a third 291 + * case, but it does not require resource deallocation. 292 + */ 293 + if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 294 + dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 295 + psb->seg_cnt, psb->pCmd->sc_data_direction); 296 + } else { 297 + if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { 298 + dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, 299 + psb->pCmd->request_bufflen, 300 + psb->pCmd->sc_data_direction); 301 + } 302 + } 266 303 } 267 304 268 305 static void ··· 459 454 cmd->scsi_done(cmd); 460 455 461 456 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 457 + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 462 458 lpfc_release_scsi_buf(phba, lpfc_cmd); 463 459 return; 464 460 } ··· 517 511 } 518 512 } 519 513 514 + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 520 515 lpfc_release_scsi_buf(phba, lpfc_cmd); 521 516 } 522 517 ··· 616 609 static int 617 610 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 618 611 struct lpfc_scsi_buf *lpfc_cmd, 612 + unsigned int lun, 619 613 uint8_t task_mgmt_cmd) 620 614 { 621 615 struct lpfc_sli *psli; ··· 635 627 piocb = &piocbq->iocb; 636 628 637 629 fcp_cmnd = lpfc_cmd->fcp_cmnd; 638 - int_to_scsilun(lpfc_cmd->pCmd->device->lun, 639 - &lpfc_cmd->fcp_cmnd->fcp_lun); 630 + int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); 640 631 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 641 632 642 633 piocb->ulpCommand = CMD_FCP_ICMND64_CR; ··· 662 655 663 656 static int 664 657 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 665 - unsigned tgt_id, struct lpfc_rport_data *rdata) 658 + unsigned tgt_id, unsigned int lun, 659 + struct lpfc_rport_data *rdata) 666 660 { 667 661 struct lpfc_iocbq *iocbq; 668 662 struct lpfc_iocbq *iocbqrsp; 669 663 int ret; 670 664 671 665 lpfc_cmd->rdata = rdata; 672 - ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 666 + ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, 667 + FCP_TARGET_RESET); 673 668 if (!ret) 674 669 return FAILED; 675 670 ··· 831 822 return 0; 832 823 833 824 out_host_busy_free_buf: 825 + lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 834 826 lpfc_release_scsi_buf(phba, lpfc_cmd); 835 827 out_host_busy: 836 828 return SCSI_MLQUEUE_HOST_BUSY; ··· 979 969 if (lpfc_cmd == NULL) 980 970 goto out; 981 971 982 - lpfc_cmd->pCmd = cmnd; 983 972 lpfc_cmd->timeout = 60; 984 973 lpfc_cmd->scsi_hba = phba; 985 974 lpfc_cmd->rdata = rdata; 986 975 987 - ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 976 + ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, 977 + FCP_LUN_RESET); 988 978 if (!ret) 989 979 goto out_free_scsi_buf; 990 980 ··· 1011 1001 cmd_status = iocbqrsp->iocb.ulpStatus; 1012 1002 1013 1003 lpfc_sli_release_iocbq(phba, iocbqrsp); 1014 - lpfc_release_scsi_buf(phba, lpfc_cmd); 1015 1004 1016 1005 /* 1017 1006 * All outstanding txcmplq I/Os should have been aborted by the device. ··· 1049 1040 } 1050 1041 1051 1042 out_free_scsi_buf: 1043 + lpfc_release_scsi_buf(phba, lpfc_cmd); 1044 + 1052 1045 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1053 1046 "%d:0713 SCSI layer issued LUN reset (%d, %d) " 1054 1047 "Data: x%x x%x x%x\n", ··· 1081 1070 1082 1071 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1083 1072 lpfc_cmd->timeout = 60; 1084 - lpfc_cmd->pCmd = cmnd; 1085 1073 lpfc_cmd->scsi_hba = phba; 1086 1074 1087 1075 /* ··· 1088 1078 * targets known to the driver. Should any target reset 1089 1079 * fail, this routine returns failure to the midlayer. 1090 1080 */ 1091 - for (i = 0; i < MAX_FCP_TARGET; i++) { 1081 + for (i = 0; i < LPFC_MAX_TARGET; i++) { 1092 1082 /* Search the mapped list for this target ID */ 1093 1083 match = 0; 1094 1084 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { ··· 1100 1090 if (!match) 1101 1091 continue; 1102 1092 1103 - ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, 1104 - i, ndlp->rport->dd_data); 1093 + ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, 1094 + ndlp->rport->dd_data); 1105 1095 if (ret != SUCCESS) { 1106 1096 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1107 1097 "%d:0713 Bus Reset on target %d failed\n",
+24 -31
drivers/scsi/lpfc/lpfc_sli.c
··· 191 191 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 192 192 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 193 193 { 194 - uint16_t iotag; 195 - 196 194 list_add_tail(&piocb->list, &pring->txcmplq); 197 195 pring->txcmplq_cnt++; 198 196 if (unlikely(pring->ringno == LPFC_ELS_RING)) 199 197 mod_timer(&phba->els_tmofunc, 200 198 jiffies + HZ * (phba->fc_ratov << 1)); 201 199 202 - if (pring->fast_lookup) { 203 - /* Setup fast lookup based on iotag for completion */ 204 - iotag = piocb->iocb.ulpIoTag; 205 - if (iotag && (iotag < pring->fast_iotag)) 206 - *(pring->fast_lookup + iotag) = piocb; 207 - else { 208 - 209 - /* Cmd ring <ringno> put: iotag <iotag> greater then 210 - configured max <fast_iotag> wd0 <icmd> */ 211 - lpfc_printf_log(phba, 212 - KERN_ERR, 213 - LOG_SLI, 214 - "%d:0316 Cmd ring %d put: iotag x%x " 215 - "greater then configured max x%x " 216 - "wd0 x%x\n", 217 - phba->brd_no, 218 - pring->ringno, iotag, 219 - pring->fast_iotag, 220 - *(((uint32_t *)(&piocb->iocb)) + 7)); 221 - } 222 - } 223 200 return (0); 224 201 } 225 202 ··· 578 601 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus 579 602 <status> */ 580 603 lpfc_printf_log(phba, 581 - KERN_ERR, 604 + KERN_WARNING, 582 605 LOG_MBOX | LOG_SLI, 583 606 "%d:0304 Stray Mailbox Interrupt " 584 607 "mbxCommand x%x mbxStatus x%x\n", ··· 1547 1570 1548 1571 void lpfc_reset_barrier(struct lpfc_hba * phba) 1549 1572 { 1550 - uint32_t * resp_buf; 1551 - uint32_t * mbox_buf; 1573 + uint32_t __iomem *resp_buf; 1574 + uint32_t __iomem *mbox_buf; 1552 1575 volatile uint32_t mbox; 1553 1576 uint32_t hc_copy; 1554 1577 int i; ··· 1564 1587 * Tell the other part of the chip to suspend temporarily all 1565 1588 * its DMA activity. 1566 1589 */ 1567 - resp_buf = (uint32_t *)phba->MBslimaddr; 1590 + resp_buf = phba->MBslimaddr; 1568 1591 1569 1592 /* Disable the error attention */ 1570 1593 hc_copy = readl(phba->HCregaddr); ··· 1582 1605 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 1583 1606 1584 1607 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 1585 - mbox_buf = (uint32_t *)phba->MBslimaddr; 1608 + mbox_buf = phba->MBslimaddr; 1586 1609 writel(mbox, mbox_buf); 1587 1610 1588 1611 for (i = 0; ··· 1782 1805 skip_post = 0; 1783 1806 word0 = 0; /* This is really setting up word1 */ 1784 1807 } 1785 - to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t); 1808 + to_slim = phba->MBslimaddr + sizeof (uint32_t); 1786 1809 writel(*(uint32_t *) mb, to_slim); 1787 1810 readl(to_slim); /* flush */ 1788 1811 ··· 2636 2659 2637 2660 INIT_LIST_HEAD(&(pring->txq)); 2638 2661 2639 - kfree(pring->fast_lookup); 2640 - pring->fast_lookup = NULL; 2641 2662 } 2642 2663 2643 2664 spin_unlock_irqrestore(phba->host->host_lock, flags); ··· 3083 3108 set_current_state(TASK_RUNNING); 3084 3109 remove_wait_queue(&done_q, &wq_entry); 3085 3110 return retval; 3111 + } 3112 + 3113 + int 3114 + lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 3115 + { 3116 + int i = 0; 3117 + 3118 + while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) { 3119 + if (i++ > LPFC_MBOX_TMO * 1000) 3120 + return 1; 3121 + 3122 + if (lpfc_sli_handle_mb_event(phba) == 0) 3123 + i = 0; 3124 + 3125 + msleep(1); 3126 + } 3127 + 3128 + return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 3086 3129 } 3087 3130 3088 3131 irqreturn_t
-2
drivers/scsi/lpfc/lpfc_sli.h
··· 135 135 uint32_t fast_iotag; /* max fastlookup based iotag */ 136 136 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 137 137 uint32_t iotag_max; /* max iotag value to use */ 138 - struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by 139 - iotag */ 140 138 struct list_head txq; 141 139 uint16_t txq_cnt; /* current length of queue */ 142 140 uint16_t txq_max; /* max length */
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 18 18 * included with this package. * 19 19 *******************************************************************/ 20 20 21 - #define LPFC_DRIVER_VERSION "8.1.6" 21 + #define LPFC_DRIVER_VERSION "8.1.7" 22 22 23 23 #define LPFC_DRIVER_NAME "lpfc" 24 24
+1 -1
drivers/scsi/mac53c94.c
··· 378 378 int nseg; 379 379 380 380 total = 0; 381 - scl = (struct scatterlist *) cmd->buffer; 381 + scl = (struct scatterlist *) cmd->request_buffer; 382 382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, 383 383 cmd->sc_data_direction); 384 384 for (i = 0; i < nseg; ++i) {
+1 -1
drivers/scsi/mesh.c
··· 1268 1268 if (cmd->use_sg > 0) { 1269 1269 int nseg; 1270 1270 total = 0; 1271 - scl = (struct scatterlist *) cmd->buffer; 1271 + scl = (struct scatterlist *) cmd->request_buffer; 1272 1272 off = ms->data_ptr; 1273 1273 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg, 1274 1274 cmd->sc_data_direction);
-2
drivers/scsi/pluto.c
··· 169 169 SCpnt->request->rq_status = RQ_SCSI_BUSY; 170 170 171 171 SCpnt->done = pluto_detect_done; 172 - SCpnt->bufflen = 256; 173 - SCpnt->buffer = fcs[i].inquiry; 174 172 SCpnt->request_bufflen = 256; 175 173 SCpnt->request_buffer = fcs[i].inquiry; 176 174 PLD(("set up %d %08lx\n", i, (long)SCpnt))
+2 -2
drivers/scsi/qlogicpti.c
··· 874 874 if (Cmnd->use_sg) { 875 875 int sg_count; 876 876 877 - sg = (struct scatterlist *) Cmnd->buffer; 877 + sg = (struct scatterlist *) Cmnd->request_buffer; 878 878 sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction); 879 879 880 880 ds = cmd->dataseg; ··· 1278 1278 1279 1279 if (Cmnd->use_sg) { 1280 1280 sbus_unmap_sg(qpti->sdev, 1281 - (struct scatterlist *)Cmnd->buffer, 1281 + (struct scatterlist *)Cmnd->request_buffer, 1282 1282 Cmnd->use_sg, 1283 1283 Cmnd->sc_data_direction); 1284 1284 } else {
+1 -10
drivers/scsi/scsi.c
··· 346 346 if (level > 3) { 347 347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 348 348 " done = 0x%p, queuecommand 0x%p\n", 349 - cmd->buffer, cmd->bufflen, 349 + cmd->request_buffer, cmd->request_bufflen, 350 350 cmd->done, 351 351 sdev->host->hostt->queuecommand); 352 352 ··· 661 661 */ 662 662 int scsi_retry_command(struct scsi_cmnd *cmd) 663 663 { 664 - /* 665 - * Restore the SCSI command state. 666 - */ 667 - scsi_setup_cmd_retry(cmd); 668 - 669 664 /* 670 665 * Zero the sense information from the last time we tried 671 666 * this command. ··· 706 711 "Notifying upper driver of completion " 707 712 "(result %x)\n", cmd->result)); 708 713 709 - /* 710 - * We can get here with use_sg=0, causing a panic in the upper level 711 - */ 712 - cmd->use_sg = cmd->old_use_sg; 713 714 cmd->done(cmd); 714 715 } 715 716 EXPORT_SYMBOL(scsi_finish_command);
+54 -18
drivers/scsi/scsi_debug.c
··· 286 286 int dev_id_num, const char * dev_id_str, 287 287 int dev_id_str_len); 288 288 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id); 289 - static void do_create_driverfs_files(void); 289 + static int do_create_driverfs_files(void); 290 290 static void do_remove_driverfs_files(void); 291 291 292 292 static int sdebug_add_adapter(void); ··· 2487 2487 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 2488 2488 sdebug_add_host_store); 2489 2489 2490 - static void do_create_driverfs_files(void) 2490 + static int do_create_driverfs_files(void) 2491 2491 { 2492 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); 2493 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); 2494 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2495 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2496 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2497 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2498 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2499 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 2500 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2501 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); 2502 - driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2492 + int ret; 2493 + 2494 + ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); 2495 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); 2496 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2497 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2498 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2499 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2500 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2501 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 2502 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2503 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); 2504 + ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2505 + return ret; 2503 2506 } 2504 2507 2505 2508 static void do_remove_driverfs_files(void) ··· 2525 2522 unsigned int sz; 2526 2523 int host_to_add; 2527 2524 int k; 2525 + int ret; 2528 2526 2529 2527 if (scsi_debug_dev_size_mb < 1) 2530 2528 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ ··· 2564 2560 if (scsi_debug_num_parts > 0) 2565 2561 sdebug_build_parts(fake_storep); 2566 2562 2567 - init_all_queued(); 2563 + ret = device_register(&pseudo_primary); 2564 + if (ret < 0) { 2565 + printk(KERN_WARNING "scsi_debug: device_register error: %d\n", 2566 + ret); 2567 + goto free_vm; 2568 + } 2569 + ret = bus_register(&pseudo_lld_bus); 2570 + if (ret < 0) { 2571 + printk(KERN_WARNING "scsi_debug: bus_register error: %d\n", 2572 + ret); 2573 + goto dev_unreg; 2574 + } 2575 + ret = driver_register(&sdebug_driverfs_driver); 2576 + if (ret < 0) { 2577 + printk(KERN_WARNING "scsi_debug: driver_register error: %d\n", 2578 + ret); 2579 + goto bus_unreg; 2580 + } 2581 + ret = do_create_driverfs_files(); 2582 + if (ret < 0) { 2583 + printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n", 2584 + ret); 2585 + goto del_files; 2586 + } 2568 2587 2569 - device_register(&pseudo_primary); 2570 - bus_register(&pseudo_lld_bus); 2571 - driver_register(&sdebug_driverfs_driver); 2572 - do_create_driverfs_files(); 2588 + init_all_queued(); 2573 2589 2574 2590 sdebug_driver_template.proc_name = (char *)sdebug_proc_name; 2575 2591 ··· 2609 2585 scsi_debug_add_host); 2610 2586 } 2611 2587 return 0; 2588 + 2589 + del_files: 2590 + do_remove_driverfs_files(); 2591 + driver_unregister(&sdebug_driverfs_driver); 2592 + bus_unreg: 2593 + bus_unregister(&pseudo_lld_bus); 2594 + dev_unreg: 2595 + device_unregister(&pseudo_primary); 2596 + free_vm: 2597 + vfree(fake_storep); 2598 + 2599 + return ret; 2612 2600 } 2613 2601 2614 2602 static void __exit scsi_debug_exit(void)
+89 -121
drivers/scsi/scsi_error.c
··· 460 460 * Return value: 461 461 * SUCCESS or FAILED or NEEDS_RETRY 462 462 **/ 463 - static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) 463 + static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense) 464 464 { 465 465 struct scsi_device *sdev = scmd->device; 466 466 struct Scsi_Host *shost = sdev->host; 467 + int old_result = scmd->result; 467 468 DECLARE_COMPLETION(done); 468 469 unsigned long timeleft; 469 470 unsigned long flags; 471 + unsigned char old_cmnd[MAX_COMMAND_SIZE]; 472 + enum dma_data_direction old_data_direction; 473 + unsigned short old_use_sg; 474 + unsigned char old_cmd_len; 475 + unsigned old_bufflen; 476 + void *old_buffer; 470 477 int rtn; 478 + 479 + /* 480 + * We need saved copies of a number of fields - this is because 481 + * error handling may need to overwrite these with different values 482 + * to run different commands, and once error handling is complete, 483 + * we will need to restore these values prior to running the actual 484 + * command. 485 + */ 486 + old_buffer = scmd->request_buffer; 487 + old_bufflen = scmd->request_bufflen; 488 + memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd)); 489 + old_data_direction = scmd->sc_data_direction; 490 + old_cmd_len = scmd->cmd_len; 491 + old_use_sg = scmd->use_sg; 492 + 493 + if (copy_sense) { 494 + int gfp_mask = GFP_ATOMIC; 495 + 496 + if (shost->hostt->unchecked_isa_dma) 497 + gfp_mask |= __GFP_DMA; 498 + 499 + scmd->sc_data_direction = DMA_FROM_DEVICE; 500 + scmd->request_bufflen = 252; 501 + scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask); 502 + if (!scmd->request_buffer) 503 + return FAILED; 504 + } else { 505 + scmd->request_buffer = NULL; 506 + scmd->request_bufflen = 0; 507 + scmd->sc_data_direction = DMA_NONE; 508 + } 509 + 510 + scmd->underflow = 0; 511 + scmd->use_sg = 0; 512 + scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 471 513 472 514 if (sdev->scsi_level <= SCSI_2) 473 515 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | 474 516 (sdev->lun << 5 & 0xe0); 517 + 518 + /* 519 + * Zero the sense buffer. The scsi spec mandates that any 520 + * untransferred sense data should be interpreted as being zero. 521 + */ 522 + memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 475 523 476 524 shost->eh_action = &done; 477 525 ··· 570 522 rtn = FAILED; 571 523 } 572 524 525 + 526 + /* 527 + * Last chance to have valid sense data. 528 + */ 529 + if (copy_sense) { 530 + if (!SCSI_SENSE_VALID(scmd)) { 531 + memcpy(scmd->sense_buffer, scmd->request_buffer, 532 + sizeof(scmd->sense_buffer)); 533 + } 534 + kfree(scmd->request_buffer); 535 + } 536 + 537 + 538 + /* 539 + * Restore original data 540 + */ 541 + scmd->request_buffer = old_buffer; 542 + scmd->request_bufflen = old_bufflen; 543 + memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd)); 544 + scmd->sc_data_direction = old_data_direction; 545 + scmd->cmd_len = old_cmd_len; 546 + scmd->use_sg = old_use_sg; 547 + scmd->result = old_result; 573 548 return rtn; 574 549 } 575 550 ··· 608 537 static int scsi_request_sense(struct scsi_cmnd *scmd) 609 538 { 610 539 static unsigned char generic_sense[6] = 611 - {REQUEST_SENSE, 0, 0, 0, 252, 0}; 612 - unsigned char *scsi_result; 613 - int saved_result; 614 - int rtn; 540 + {REQUEST_SENSE, 0, 0, 0, 252, 0}; 615 541 616 542 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); 617 - 618 - scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0)); 619 - 620 - 621 - if (unlikely(!scsi_result)) { 622 - printk(KERN_ERR "%s: cannot allocate scsi_result.\n", 623 - __FUNCTION__); 624 - return FAILED; 625 - } 626 - 627 - /* 628 - * zero the sense buffer. some host adapters automatically always 629 - * request sense, so it is not a good idea that 630 - * scmd->request_buffer and scmd->sense_buffer point to the same 631 - * address (db). 0 is not a valid sense code. 632 - */ 633 - memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 634 - memset(scsi_result, 0, 252); 635 - 636 - saved_result = scmd->result; 637 - scmd->request_buffer = scsi_result; 638 - scmd->request_bufflen = 252; 639 - scmd->use_sg = 0; 640 - scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 641 - scmd->sc_data_direction = DMA_FROM_DEVICE; 642 - scmd->underflow = 0; 643 - 644 - rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); 645 - 646 - /* last chance to have valid sense data */ 647 - if(!SCSI_SENSE_VALID(scmd)) { 648 - memcpy(scmd->sense_buffer, scmd->request_buffer, 649 - sizeof(scmd->sense_buffer)); 650 - } 651 - 652 - kfree(scsi_result); 653 - 654 - /* 655 - * when we eventually call scsi_finish, we really wish to complete 656 - * the original request, so let's restore the original data. (db) 657 - */ 658 - scsi_setup_cmd_retry(scmd); 659 - scmd->result = saved_result; 660 - return rtn; 543 + return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1); 661 544 } 662 545 663 546 /** ··· 630 605 { 631 606 scmd->device->host->host_failed--; 632 607 scmd->eh_eflags = 0; 633 - 634 - /* 635 - * set this back so that the upper level can correctly free up 636 - * things. 637 - */ 638 - scsi_setup_cmd_retry(scmd); 639 608 list_move_tail(&scmd->eh_entry, done_q); 640 609 } 641 610 EXPORT_SYMBOL(scsi_eh_finish_cmd); ··· 734 715 { 735 716 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; 736 717 int retry_cnt = 1, rtn; 737 - int saved_result; 738 718 739 719 retry_tur: 740 720 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 741 721 742 - /* 743 - * zero the sense buffer. the scsi spec mandates that any 744 - * untransferred sense data should be interpreted as being zero. 745 - */ 746 - memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 747 722 748 - saved_result = scmd->result; 749 - scmd->request_buffer = NULL; 750 - scmd->request_bufflen = 0; 751 - scmd->use_sg = 0; 752 - scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 753 - scmd->underflow = 0; 754 - scmd->sc_data_direction = DMA_NONE; 723 + rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0); 755 724 756 - rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); 757 - 758 - /* 759 - * when we eventually call scsi_finish, we really wish to complete 760 - * the original request, so let's restore the original data. (db) 761 - */ 762 - scsi_setup_cmd_retry(scmd); 763 - scmd->result = saved_result; 764 - 765 - /* 766 - * hey, we are done. let's look to see what happened. 767 - */ 768 725 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 769 726 __FUNCTION__, scmd, rtn)); 770 - if (rtn == SUCCESS) 771 - return 0; 772 - else if (rtn == NEEDS_RETRY) { 727 + 728 + switch (rtn) { 729 + case NEEDS_RETRY: 773 730 if (retry_cnt--) 774 731 goto retry_tur; 732 + /*FALLTHRU*/ 733 + case SUCCESS: 775 734 return 0; 735 + default: 736 + return 1; 776 737 } 777 - return 1; 778 738 } 779 739 780 740 /** ··· 835 837 static int scsi_eh_try_stu(struct scsi_cmnd *scmd) 836 838 { 837 839 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 838 - int rtn; 839 - int saved_result; 840 840 841 - if (!scmd->device->allow_restart) 842 - return 1; 841 + if (scmd->device->allow_restart) { 842 + int rtn; 843 843 844 - memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); 844 + memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); 845 + rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0); 846 + if (rtn == SUCCESS) 847 + return 0; 848 + } 845 849 846 - /* 847 - * zero the sense buffer. the scsi spec mandates that any 848 - * untransferred sense data should be interpreted as being zero. 849 - */ 850 - memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 851 - 852 - saved_result = scmd->result; 853 - scmd->request_buffer = NULL; 854 - scmd->request_bufflen = 0; 855 - scmd->use_sg = 0; 856 - scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 857 - scmd->underflow = 0; 858 - scmd->sc_data_direction = DMA_NONE; 859 - 860 - rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); 861 - 862 - /* 863 - * when we eventually call scsi_finish, we really wish to complete 864 - * the original request, so let's restore the original data. (db) 865 - */ 866 - scsi_setup_cmd_retry(scmd); 867 - scmd->result = saved_result; 868 - 869 - /* 870 - * hey, we are done. let's look to see what happened. 871 - */ 872 - SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 873 - __FUNCTION__, scmd, rtn)); 874 - if (rtn == SUCCESS) 875 - return 0; 876 850 return 1; 877 851 } 878 852 ··· 1654 1684 1655 1685 scmd->scsi_done = scsi_reset_provider_done_command; 1656 1686 scmd->done = NULL; 1657 - scmd->buffer = NULL; 1658 - scmd->bufflen = 0; 1659 1687 scmd->request_buffer = NULL; 1660 1688 scmd->request_bufflen = 0; 1661 1689
+6 -82
drivers/scsi/scsi_lib.c
··· 436 436 * 437 437 * Arguments: cmd - command that is ready to be queued. 438 438 * 439 - * Returns: Nothing 440 - * 441 439 * Notes: This function has the job of initializing a number of 442 440 * fields related to error handling. Typically this will 443 441 * be called once for each command, as required. 444 442 */ 445 - static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) 443 + static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 446 444 { 447 445 cmd->serial_number = 0; 448 - 449 446 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); 450 - 451 447 if (cmd->cmd_len == 0) 452 448 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 453 - 454 - /* 455 - * We need saved copies of a number of fields - this is because 456 - * error handling may need to overwrite these with different values 457 - * to run different commands, and once error handling is complete, 458 - * we will need to restore these values prior to running the actual 459 - * command. 460 - */ 461 - cmd->old_use_sg = cmd->use_sg; 462 - cmd->old_cmd_len = cmd->cmd_len; 463 - cmd->sc_old_data_direction = cmd->sc_data_direction; 464 - cmd->old_underflow = cmd->underflow; 465 - memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd)); 466 - cmd->buffer = cmd->request_buffer; 467 - cmd->bufflen = cmd->request_bufflen; 468 - 469 - return 1; 470 - } 471 - 472 - /* 473 - * Function: scsi_setup_cmd_retry() 474 - * 475 - * Purpose: Restore the command state for a retry 476 - * 477 - * Arguments: cmd - command to be restored 478 - * 479 - * Returns: Nothing 480 - * 481 - * Notes: Immediately prior to retrying a command, we need 482 - * to restore certain fields that we saved above. 483 - */ 484 - void scsi_setup_cmd_retry(struct scsi_cmnd *cmd) 485 - { 486 - memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd)); 487 - cmd->request_buffer = cmd->buffer; 488 - cmd->request_bufflen = cmd->bufflen; 489 - cmd->use_sg = cmd->old_use_sg; 490 - cmd->cmd_len = cmd->old_cmd_len; 491 - cmd->sc_data_direction = cmd->sc_old_data_direction; 492 - cmd->underflow = cmd->old_underflow; 493 449 } 494 450 495 451 void scsi_device_unbusy(struct scsi_device *sdev) ··· 763 807 */ 764 808 static void scsi_release_buffers(struct scsi_cmnd *cmd) 765 809 { 766 - struct request *req = cmd->request; 767 - 768 - /* 769 - * Free up any indirection buffers we allocated for DMA purposes. 770 - */ 771 810 if (cmd->use_sg) 772 811 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 773 - else if (cmd->request_buffer != req->buffer) 774 - kfree(cmd->request_buffer); 775 812 776 813 /* 777 814 * Zero these out. They now point to freed memory, and it is 778 815 * dangerous to hang onto the pointers. 779 816 */ 780 - cmd->buffer = NULL; 781 - cmd->bufflen = 0; 782 817 cmd->request_buffer = NULL; 783 818 cmd->request_bufflen = 0; 784 819 } ··· 805 858 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 806 859 { 807 860 int result = cmd->result; 808 - int this_count = cmd->bufflen; 861 + int this_count = cmd->request_bufflen; 809 862 request_queue_t *q = cmd->device->request_queue; 810 863 struct request *req = cmd->request; 811 864 int clear_errors = 1; ··· 813 866 int sense_valid = 0; 814 867 int sense_deferred = 0; 815 868 816 - /* 817 - * Free up any indirection buffers we allocated for DMA purposes. 818 - * For the case of a READ, we need to copy the data out of the 819 - * bounce buffer and into the real buffer. 820 - */ 821 - if (cmd->use_sg) 822 - scsi_free_sgtable(cmd->buffer, cmd->sglist_len); 823 - else if (cmd->buffer != req->buffer) { 824 - if (rq_data_dir(req) == READ) { 825 - unsigned long flags; 826 - char *to = bio_kmap_irq(req->bio, &flags); 827 - memcpy(to, cmd->buffer, cmd->bufflen); 828 - bio_kunmap_irq(to, &flags); 829 - } 830 - kfree(cmd->buffer); 831 - } 869 + scsi_release_buffers(cmd); 832 870 833 871 if (result) { 834 872 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 835 873 if (sense_valid) 836 874 sense_deferred = scsi_sense_is_deferred(&sshdr); 837 875 } 876 + 838 877 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 839 878 req->errors = result; 840 879 if (result) { ··· 839 906 } else 840 907 req->data_len = cmd->resid; 841 908 } 842 - 843 - /* 844 - * Zero these out. They now point to freed memory, and it is 845 - * dangerous to hang onto the pointers. 846 - */ 847 - cmd->buffer = NULL; 848 - cmd->bufflen = 0; 849 - cmd->request_buffer = NULL; 850 - cmd->request_bufflen = 0; 851 909 852 910 /* 853 911 * Next deal with any sectors which we were able to correctly ··· 936 1012 if (!(req->flags & REQ_QUIET)) { 937 1013 scmd_printk(KERN_INFO, cmd, 938 1014 "Volume overflow, CDB: "); 939 - __scsi_print_command(cmd->data_cmnd); 1015 + __scsi_print_command(cmd->cmnd); 940 1016 scsi_print_sense("", cmd); 941 1017 } 942 1018 /* See SSC3rXX or current. */ ··· 1067 1143 * successfully. Since this is a REQ_BLOCK_PC command the 1068 1144 * caller should check the request's errors value 1069 1145 */ 1070 - scsi_io_completion(cmd, cmd->bufflen); 1146 + scsi_io_completion(cmd, cmd->request_bufflen); 1071 1147 } 1072 1148 1073 1149 static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
-1
drivers/scsi/scsi_priv.h
··· 57 57 58 58 /* scsi_lib.c */ 59 59 extern int scsi_maybe_unblock_host(struct scsi_device *sdev); 60 - extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd); 61 60 extern void scsi_device_unbusy(struct scsi_device *sdev); 62 61 extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason); 63 62 extern void scsi_next_command(struct scsi_cmnd *cmd);
+58 -6
drivers/scsi/scsi_transport_sas.c
··· 41 41 struct mutex lock; 42 42 u32 next_target_id; 43 43 u32 next_expander_id; 44 + int next_port_id; 44 45 }; 45 46 #define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) 46 47 ··· 147 146 mutex_init(&sas_host->lock); 148 147 sas_host->next_target_id = 0; 149 148 sas_host->next_expander_id = 0; 149 + sas_host->next_port_id = 0; 150 150 return 0; 151 151 } 152 152 ··· 329 327 sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", 330 328 unsigned long long); 331 329 sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); 332 - //sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8); 330 + //sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int); 333 331 sas_phy_linkspeed_attr(negotiated_linkrate); 334 332 sas_phy_linkspeed_attr(minimum_linkrate_hw); 335 333 sas_phy_linkspeed_attr(minimum_linkrate); ··· 592 590 } 593 591 EXPORT_SYMBOL(sas_port_alloc); 594 592 593 + /** sas_port_alloc_num - allocate and initialize a SAS port structure 594 + * 595 + * @parent: parent device 596 + * 597 + * Allocates a SAS port structure and a number to go with it. This 598 + * interface is really for adapters where the port number has no 599 + * meansing, so the sas class should manage them. It will be added to 600 + * the device tree below the device specified by @parent which must be 601 + * either a Scsi_Host or a sas_expander_device. 602 + * 603 + * Returns %NULL on error 604 + */ 605 + struct sas_port *sas_port_alloc_num(struct device *parent) 606 + { 607 + int index; 608 + struct Scsi_Host *shost = dev_to_shost(parent); 609 + struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); 610 + 611 + /* FIXME: use idr for this eventually */ 612 + mutex_lock(&sas_host->lock); 613 + if (scsi_is_sas_expander_device(parent)) { 614 + struct sas_rphy *rphy = dev_to_rphy(parent); 615 + struct sas_expander_device *exp = rphy_to_expander_device(rphy); 616 + 617 + index = exp->next_port_id++; 618 + } else 619 + index = sas_host->next_port_id++; 620 + mutex_unlock(&sas_host->lock); 621 + return sas_port_alloc(parent, index); 622 + } 623 + EXPORT_SYMBOL(sas_port_alloc_num); 624 + 595 625 /** 596 626 * sas_port_add - add a SAS port to the device hierarchy 597 627 * ··· 691 657 list_del_init(&phy->port_siblings); 692 658 } 693 659 mutex_unlock(&port->phy_list_mutex); 660 + 661 + if (port->is_backlink) { 662 + struct device *parent = port->dev.parent; 663 + 664 + sysfs_remove_link(&port->dev.kobj, parent->bus_id); 665 + port->is_backlink = 0; 666 + } 694 667 695 668 transport_remove_device(dev); 696 669 device_del(dev); ··· 773 732 mutex_unlock(&port->phy_list_mutex); 774 733 } 775 734 EXPORT_SYMBOL(sas_port_delete_phy); 735 + 736 + void sas_port_mark_backlink(struct sas_port *port) 737 + { 738 + struct device *parent = port->dev.parent->parent->parent; 739 + 740 + if (port->is_backlink) 741 + return; 742 + port->is_backlink = 1; 743 + sysfs_create_link(&port->dev.kobj, &parent->kobj, 744 + parent->bus_id); 745 + 746 + } 747 + EXPORT_SYMBOL(sas_port_mark_backlink); 776 748 777 749 /* 778 750 * SAS remote PHY attributes. ··· 1194 1140 1195 1141 if (identify->device_type == SAS_END_DEVICE && 1196 1142 rphy->scsi_target_id != -1) { 1197 - scsi_scan_target(&rphy->dev, parent->port_identifier, 1143 + scsi_scan_target(&rphy->dev, 0, 1198 1144 rphy->scsi_target_id, ~0, 0); 1199 1145 } 1200 1146 ··· 1296 1242 1297 1243 mutex_lock(&sas_host->lock); 1298 1244 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 1299 - struct sas_port *parent = dev_to_sas_port(rphy->dev.parent); 1300 - 1301 1245 if (rphy->identify.device_type != SAS_END_DEVICE || 1302 1246 rphy->scsi_target_id == -1) 1303 1247 continue; 1304 1248 1305 - if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && 1249 + if ((channel == SCAN_WILD_CARD || channel == 0) && 1306 1250 (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { 1307 - scsi_scan_target(&rphy->dev, parent->port_identifier, 1251 + scsi_scan_target(&rphy->dev, 0, 1308 1252 rphy->scsi_target_id, lun, 1); 1309 1253 } 1310 1254 }
+1 -2
drivers/scsi/sd.c
··· 502 502 SCpnt->cmnd[4] = (unsigned char) this_count; 503 503 SCpnt->cmnd[5] = 0; 504 504 } 505 - SCpnt->request_bufflen = SCpnt->bufflen = 506 - this_count * sdp->sector_size; 505 + SCpnt->request_bufflen = this_count * sdp->sector_size; 507 506 508 507 /* 509 508 * We shouldn't disconnect in the middle of a sector, so with a dumb
+1 -1
drivers/scsi/seagate.c
··· 1002 1002 } 1003 1003 #endif 1004 1004 1005 - buffer = (struct scatterlist *) SCint->buffer; 1005 + buffer = (struct scatterlist *) SCint->request_buffer; 1006 1006 len = buffer->length; 1007 1007 data = page_address(buffer->page) + buffer->offset; 1008 1008 } else {
+2 -3
drivers/scsi/sr.c
··· 360 360 "mismatch count %d, bytes %d\n", 361 361 size, SCpnt->request_bufflen); 362 362 if (SCpnt->request_bufflen > size) 363 - SCpnt->request_bufflen = SCpnt->bufflen = size; 363 + SCpnt->request_bufflen = size; 364 364 } 365 365 } 366 366 ··· 387 387 388 388 if (this_count > 0xffff) { 389 389 this_count = 0xffff; 390 - SCpnt->request_bufflen = SCpnt->bufflen = 391 - this_count * s_size; 390 + SCpnt->request_bufflen = this_count * s_size; 392 391 } 393 392 394 393 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
+3 -4
drivers/scsi/st.c
··· 368 368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], 369 369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); 370 370 if (cmdstatp->have_sense) 371 - __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 371 + __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 372 372 } ) /* end DEB */ 373 373 if (!debugging) { /* Abnormal conditions for tape */ 374 374 if (!cmdstatp->have_sense) ··· 384 384 scode != VOLUME_OVERFLOW && 385 385 SRpnt->cmd[0] != MODE_SENSE && 386 386 SRpnt->cmd[0] != TEST_UNIT_READY) { 387 - printk(KERN_WARNING "%s: Error with sense data: ", name); 388 - __scsi_print_sense("st", SRpnt->sense, 389 - SCSI_SENSE_BUFFERSIZE); 387 + 388 + __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 390 389 } 391 390 } 392 391
+1 -1
drivers/scsi/sun3_NCR5380.c
··· 517 517 */ 518 518 519 519 if (cmd->use_sg) { 520 - cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 520 + cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 521 521 cmd->SCp.buffers_residual = cmd->use_sg - 1; 522 522 cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); 523 523 cmd->SCp.this_residual = cmd->SCp.buffer->length;
+1 -1
drivers/scsi/sun3x_esp.c
··· 347 347 static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 348 348 { 349 349 int sz = sp->use_sg - 1; 350 - struct scatterlist *sg = (struct scatterlist *)sp->buffer; 350 + struct scatterlist *sg = (struct scatterlist *)sp->request_buffer; 351 351 352 352 while(sz >= 0) { 353 353 dvma_unmap((char *)sg[sz].dma_address);
+1 -1
drivers/scsi/wd33c93.c
··· 373 373 */ 374 374 375 375 if (cmd->use_sg) { 376 - cmd->SCp.buffer = (struct scatterlist *) cmd->buffer; 376 + cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; 377 377 cmd->SCp.buffers_residual = cmd->use_sg - 1; 378 378 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + 379 379 cmd->SCp.buffer->offset;
+4 -3
drivers/serial/sunsab.c
··· 1047 1047 up = &sunsab_ports[inst * 2]; 1048 1048 1049 1049 err = sunsab_init_one(&up[0], op, 1050 - sizeof(union sab82532_async_regs), 1050 + 0, 1051 1051 (inst * 2) + 0); 1052 1052 if (err) 1053 1053 return err; 1054 1054 1055 - err = sunsab_init_one(&up[1], op, 0, 1055 + err = sunsab_init_one(&up[1], op, 1056 + sizeof(union sab82532_async_regs), 1056 1057 (inst * 2) + 1); 1057 1058 if (err) { 1058 1059 of_iounmap(up[0].port.membase, ··· 1118 1117 int err; 1119 1118 1120 1119 num_channels = 0; 1121 - for_each_node_by_name(dp, "su") 1120 + for_each_node_by_name(dp, "se") 1122 1121 num_channels += 2; 1123 1122 for_each_node_by_name(dp, "serial") { 1124 1123 if (of_device_is_compatible(dp, "sab82532"))
+70 -55
drivers/serial/sunzilog.c
··· 68 68 #define NUM_SUNZILOG num_sunzilog 69 69 #define NUM_CHANNELS (NUM_SUNZILOG * 2) 70 70 71 - #define KEYBOARD_LINE 0x2 72 - #define MOUSE_LINE 0x3 73 - 74 71 #define ZS_CLOCK 4915200 /* Zilog input clock rate. */ 75 72 #define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */ 76 73 ··· 1222 1225 { 1223 1226 int baud, brg; 1224 1227 1225 - if (channel == KEYBOARD_LINE) { 1226 - up->flags |= SUNZILOG_FLAG_CONS_KEYB; 1228 + if (up->flags & SUNZILOG_FLAG_CONS_KEYB) { 1227 1229 up->cflag = B1200 | CS8 | CLOCAL | CREAD; 1228 1230 baud = 1200; 1229 1231 } else { 1230 - up->flags |= SUNZILOG_FLAG_CONS_MOUSE; 1231 1232 up->cflag = B4800 | CS8 | CLOCAL | CREAD; 1232 1233 baud = 4800; 1233 1234 } ··· 1238 1243 } 1239 1244 1240 1245 #ifdef CONFIG_SERIO 1241 - static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int channel) 1246 + static void __init sunzilog_register_serio(struct uart_sunzilog_port *up) 1242 1247 { 1243 1248 struct serio *serio = &up->serio; 1244 1249 1245 1250 serio->port_data = up; 1246 1251 1247 1252 serio->id.type = SERIO_RS232; 1248 - if (channel == KEYBOARD_LINE) { 1253 + if (up->flags & SUNZILOG_FLAG_CONS_KEYB) { 1249 1254 serio->id.proto = SERIO_SUNKBD; 1250 1255 strlcpy(serio->name, "zskbd", sizeof(serio->name)); 1251 1256 } else { ··· 1254 1259 strlcpy(serio->name, "zsms", sizeof(serio->name)); 1255 1260 } 1256 1261 strlcpy(serio->phys, 1257 - (channel == KEYBOARD_LINE ? "zs/serio0" : "zs/serio1"), 1262 + ((up->flags & SUNZILOG_FLAG_CONS_KEYB) ? 1263 + "zs/serio0" : "zs/serio1"), 1258 1264 sizeof(serio->phys)); 1259 1265 1260 1266 serio->write = sunzilog_serio_write; ··· 1282 1286 (void) read_zsreg(channel, R0); 1283 1287 } 1284 1288 1285 - if (up->port.line == KEYBOARD_LINE || 1286 - up->port.line == MOUSE_LINE) { 1289 + if (up->flags & (SUNZILOG_FLAG_CONS_KEYB | 1290 + SUNZILOG_FLAG_CONS_MOUSE)) { 1287 1291 sunzilog_init_kbdms(up, up->port.line); 1288 1292 up->curregs[R9] |= (NV | MIE); 1289 1293 write_zsreg(channel, R9, up->curregs[R9]); ··· 1309 1313 spin_unlock_irqrestore(&up->port.lock, flags); 1310 1314 1311 1315 #ifdef CONFIG_SERIO 1312 - if (up->port.line == KEYBOARD_LINE || up->port.line == MOUSE_LINE) 1313 - sunzilog_register_serio(up, up->port.line); 1316 + if (up->flags & (SUNZILOG_FLAG_CONS_KEYB | 1317 + SUNZILOG_FLAG_CONS_MOUSE)) 1318 + sunzilog_register_serio(up); 1314 1319 #endif 1315 - } 1316 - 1317 - static int __devinit zs_get_instance(struct device_node *dp) 1318 - { 1319 - int ret; 1320 - 1321 - ret = of_getintprop_default(dp, "slave", -1); 1322 - if (ret != -1) 1323 - return ret; 1324 - 1325 - if (of_find_property(dp, "keyboard", NULL)) 1326 - ret = 1; 1327 - else 1328 - ret = 0; 1329 - 1330 - return ret; 1331 1320 } 1332 1321 1333 1322 static int zilog_irq = -1; 1334 1323 1335 - static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *match) 1324 + static int __devinit zs_probe(struct of_device *op, const struct of_device_id *match) 1336 1325 { 1337 - struct of_device *op = to_of_device(&dev->dev); 1326 + static int inst; 1338 1327 struct uart_sunzilog_port *up; 1339 1328 struct zilog_layout __iomem *rp; 1340 - int inst = zs_get_instance(dev->node); 1329 + int keyboard_mouse; 1341 1330 int err; 1331 + 1332 + keyboard_mouse = 0; 1333 + if (of_find_property(op->node, "keyboard", NULL)) 1334 + keyboard_mouse = 1; 1342 1335 1343 1336 sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0, 1344 1337 sizeof(struct zilog_layout), ··· 1337 1352 1338 1353 rp = sunzilog_chip_regs[inst]; 1339 1354 1340 - if (zilog_irq == -1) { 1355 + if (zilog_irq == -1) 1341 1356 zilog_irq = op->irqs[0]; 1342 - err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED, 1343 - "zs", sunzilog_irq_chain); 1344 - if (err) { 1345 - of_iounmap(rp, sizeof(struct zilog_layout)); 1346 - 1347 - return err; 1348 - } 1349 - } 1350 1357 1351 1358 up = &sunzilog_port_table[inst * 2]; 1352 1359 ··· 1355 1378 up[0].port.line = (inst * 2) + 0; 1356 1379 up[0].port.dev = &op->dev; 1357 1380 up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A; 1358 - if (inst == 1) 1381 + if (keyboard_mouse) 1359 1382 up[0].flags |= SUNZILOG_FLAG_CONS_KEYB; 1360 1383 sunzilog_init_hw(&up[0]); 1361 1384 ··· 1372 1395 up[1].port.line = (inst * 2) + 1; 1373 1396 up[1].port.dev = &op->dev; 1374 1397 up[1].flags |= 0; 1375 - if (inst == 1) 1398 + if (keyboard_mouse) 1376 1399 up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE; 1377 1400 sunzilog_init_hw(&up[1]); 1378 1401 1379 - if (inst != 1) { 1402 + if (!keyboard_mouse) { 1380 1403 err = uart_add_one_port(&sunzilog_reg, &up[0].port); 1381 1404 if (err) { 1382 1405 of_iounmap(rp, sizeof(struct zilog_layout)); ··· 1388 1411 of_iounmap(rp, sizeof(struct zilog_layout)); 1389 1412 return err; 1390 1413 } 1414 + } else { 1415 + printk(KERN_INFO "%s: Keyboard at MMIO %lx (irq = %d) " 1416 + "is a zs\n", 1417 + op->dev.bus_id, up[0].port.mapbase, op->irqs[0]); 1418 + printk(KERN_INFO "%s: Mouse at MMIO %lx (irq = %d) " 1419 + "is a zs\n", 1420 + op->dev.bus_id, up[1].port.mapbase, op->irqs[0]); 1391 1421 } 1392 1422 1393 - dev_set_drvdata(&dev->dev, &up[0]); 1423 + dev_set_drvdata(&op->dev, &up[0]); 1424 + 1425 + inst++; 1394 1426 1395 1427 return 0; 1396 1428 } ··· 1448 1462 static int __init sunzilog_init(void) 1449 1463 { 1450 1464 struct device_node *dp; 1451 - int err; 1465 + int err, uart_count; 1466 + int num_keybms; 1452 1467 1453 1468 NUM_SUNZILOG = 0; 1454 - for_each_node_by_name(dp, "zs") 1469 + num_keybms = 0; 1470 + for_each_node_by_name(dp, "zs") { 1455 1471 NUM_SUNZILOG++; 1472 + if (of_find_property(dp, "keyboard", NULL)) 1473 + num_keybms++; 1474 + } 1456 1475 1476 + uart_count = 0; 1457 1477 if (NUM_SUNZILOG) { 1458 1478 int uart_count; 1459 1479 1460 1480 err = sunzilog_alloc_tables(); 1461 1481 if (err) 1462 - return err; 1482 + goto out; 1463 1483 1464 - /* Subtract 1 for keyboard, 1 for mouse. */ 1465 - uart_count = (NUM_SUNZILOG * 2) - 2; 1484 + uart_count = (NUM_SUNZILOG * 2) - (2 * num_keybms); 1466 1485 1467 1486 sunzilog_reg.nr = uart_count; 1468 1487 sunzilog_reg.minor = sunserial_current_minor; 1469 1488 err = uart_register_driver(&sunzilog_reg); 1470 - if (err) { 1471 - sunzilog_free_tables(); 1472 - return err; 1473 - } 1489 + if (err) 1490 + goto out_free_tables; 1491 + 1474 1492 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64; 1475 1493 sunzilog_reg.cons = SUNZILOG_CONSOLE(); 1476 1494 1477 1495 sunserial_current_minor += uart_count; 1478 1496 } 1479 1497 1480 - return of_register_driver(&zs_driver, &of_bus_type); 1498 + err = of_register_driver(&zs_driver, &of_bus_type); 1499 + if (err) 1500 + goto out_unregister_uart; 1501 + 1502 + if (zilog_irq != -1) { 1503 + err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED, 1504 + "zs", sunzilog_irq_chain); 1505 + if (err) 1506 + goto out_unregister_driver; 1507 + } 1508 + 1509 + out: 1510 + return err; 1511 + 1512 + out_unregister_driver: 1513 + of_unregister_driver(&zs_driver); 1514 + 1515 + out_unregister_uart: 1516 + if (NUM_SUNZILOG) { 1517 + uart_unregister_driver(&sunzilog_reg); 1518 + sunzilog_reg.cons = NULL; 1519 + } 1520 + 1521 + out_free_tables: 1522 + sunzilog_free_tables(); 1523 + goto out; 1481 1524 } 1482 1525 1483 1526 static void __exit sunzilog_exit(void)
-5
include/asm-m68k/oplib.h
··· 244 244 /* Does the passed node have the given "name"? YES=1 NO=0 */ 245 245 extern int prom_nodematch(int thisnode, char *name); 246 246 247 - /* Puts in buffer a prom name in the form name@x,y or name (x for which_io 248 - * and y for first regs phys address 249 - */ 250 - extern int prom_getname(int node, char *buf, int buflen); 251 - 252 247 /* Search all siblings starting at the passed node for "name" matching 253 248 * the given string. Returns the node on success, zero on failure. 254 249 */
+7 -2
include/asm-s390/system.h
··· 128 128 129 129 #define nop() __asm__ __volatile__ ("nop") 130 130 131 - #define xchg(ptr,x) \ 132 - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr)))) 131 + #define xchg(ptr,x) \ 132 + ({ \ 133 + __typeof__(*(ptr)) __ret; \ 134 + __ret = (__typeof__(*(ptr))) \ 135 + __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \ 136 + __ret; \ 137 + }) 133 138 134 139 static inline unsigned long __xchg(unsigned long x, void * ptr, int size) 135 140 {
+2 -2
include/asm-s390/timex.h
··· 19 19 { 20 20 cycles_t cycles; 21 21 22 - __asm__("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc"); 22 + __asm__ __volatile__ ("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc"); 23 23 return cycles >> 2; 24 24 } 25 25 ··· 27 27 { 28 28 unsigned long long clk; 29 29 30 - __asm__("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); 30 + __asm__ __volatile__ ("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); 31 31 return clk; 32 32 } 33 33
-5
include/asm-sparc/oplib.h
··· 267 267 /* Does the passed node have the given "name"? YES=1 NO=0 */ 268 268 extern int prom_nodematch(int thisnode, char *name); 269 269 270 - /* Puts in buffer a prom name in the form name@x,y or name (x for which_io 271 - * and y for first regs phys address 272 - */ 273 - extern int prom_getname(int node, char *buf, int buflen); 274 - 275 270 /* Search all siblings starting at the passed node for "name" matching 276 271 * the given string. Returns the node on success, zero on failure. 277 272 */
+1 -1
include/asm-sparc64/openprom.h
··· 175 175 }; 176 176 177 177 /* More fun PROM structures for device probing. */ 178 - #define PROMREG_MAX 16 178 + #define PROMREG_MAX 24 179 179 #define PROMVADDR_MAX 16 180 180 #define PROMINTR_MAX 15 181 181
-5
include/asm-sparc64/oplib.h
··· 287 287 /* Does the passed node have the given "name"? YES=1 NO=0 */ 288 288 extern int prom_nodematch(int thisnode, const char *name); 289 289 290 - /* Puts in buffer a prom name in the form name@x,y or name (x for which_io 291 - * and y for first regs phys address 292 - */ 293 - extern int prom_getname(int node, char *buf, int buflen); 294 - 295 290 /* Search all siblings starting at the passed node for "name" matching 296 291 * the given string. Returns the node on success, zero on failure. 297 292 */
-6
include/linux/cpu.h
··· 48 48 { 49 49 } 50 50 #endif 51 - extern int current_in_cpu_hotplug(void); 52 51 53 52 int cpu_up(unsigned int cpu); 54 53 ··· 60 61 static inline void unregister_cpu_notifier(struct notifier_block *nb) 61 62 { 62 63 } 63 - static inline int current_in_cpu_hotplug(void) 64 - { 65 - return 0; 66 - } 67 64 68 65 #endif /* CONFIG_SMP */ 69 66 extern struct sysdev_class cpu_sysdev_class; ··· 68 73 /* Stop CPUs going up and down. */ 69 74 extern void lock_cpu_hotplug(void); 70 75 extern void unlock_cpu_hotplug(void); 71 - extern int lock_cpu_hotplug_interruptible(void); 72 76 #define hotcpu_notifier(fn, pri) { \ 73 77 static struct notifier_block fn##_nb = \ 74 78 { .notifier_call = fn, .priority = pri }; \
+3 -3
include/linux/netdevice.h
··· 924 924 925 925 static inline int netif_tx_trylock(struct net_device *dev) 926 926 { 927 - int err = spin_trylock(&dev->_xmit_lock); 928 - if (!err) 927 + int ok = spin_trylock(&dev->_xmit_lock); 928 + if (likely(ok)) 929 929 dev->xmit_lock_owner = smp_processor_id(); 930 - return err; 930 + return ok; 931 931 } 932 932 933 933 static inline void netif_tx_unlock(struct net_device *dev)
+1 -1
include/net/netdma.h
··· 37 37 } 38 38 39 39 int dma_skb_copy_datagram_iovec(struct dma_chan* chan, 40 - const struct sk_buff *skb, int offset, struct iovec *to, 40 + struct sk_buff *skb, int offset, struct iovec *to, 41 41 size_t len, struct dma_pinned_list *pinned_list); 42 42 43 43 #endif /* CONFIG_NET_DMA */
+6 -5
include/net/sctp/structs.h
··· 445 445 struct sctp_paramhdr param_hdr; 446 446 union sctp_addr daddr; 447 447 unsigned long sent_at; 448 + __u64 hb_nonce; 448 449 } __attribute__((packed)) sctp_sender_hb_info_t; 449 450 450 451 /* ··· 731 730 const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); 732 731 733 732 /* This is a structure for holding either an IPv6 or an IPv4 address. */ 734 - /* sin_family -- AF_INET or AF_INET6 735 - * sin_port -- ordinary port number 736 - * sin_addr -- cast to either (struct in_addr) or (struct in6_addr) 737 - */ 738 733 struct sctp_sockaddr_entry { 739 734 struct list_head list; 740 735 union sctp_addr a; 736 + __u8 use_as_src; 741 737 }; 742 738 743 739 typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); ··· 982 984 */ 983 985 char cacc_saw_newack; 984 986 } cacc; 987 + 988 + /* 64-bit random number sent with heartbeat. */ 989 + __u64 hb_nonce; 985 990 }; 986 991 987 992 struct sctp_transport *sctp_transport_new(const union sctp_addr *, ··· 1139 1138 sctp_scope_t scope, gfp_t gfp, 1140 1139 int flags); 1141 1140 int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1142 - gfp_t gfp); 1141 + __u8 use_as_src, gfp_t gfp); 1143 1142 int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); 1144 1143 int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, 1145 1144 struct sctp_sock *);
+9
include/net/sctp/user.h
··· 560 560 } __attribute__((packed, aligned(4))); 561 561 562 562 /* Peer addresses's state. */ 563 + /* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x] 564 + * calls. 565 + * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters. 566 + * Not yet confirmed by a heartbeat and not available for data 567 + * transfers. 568 + * ACTIVE : Peer address confirmed, active and available for data transfers. 569 + * INACTIVE: Peer address inactive and not available for data transfers. 570 + */ 563 571 enum sctp_spinfo_state { 564 572 SCTP_INACTIVE, 565 573 SCTP_ACTIVE, 574 + SCTP_UNCONFIRMED, 566 575 SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */ 567 576 }; 568 577
-9
include/scsi/scsi_cmnd.h
··· 58 58 int timeout_per_command; 59 59 60 60 unsigned char cmd_len; 61 - unsigned char old_cmd_len; 62 61 enum dma_data_direction sc_data_direction; 63 - enum dma_data_direction sc_old_data_direction; 64 62 65 63 /* These elements define the operation we are about to perform */ 66 64 #define MAX_COMMAND_SIZE 16 ··· 69 71 void *request_buffer; /* Actual requested buffer */ 70 72 71 73 /* These elements define the operation we ultimately want to perform */ 72 - unsigned char data_cmnd[MAX_COMMAND_SIZE]; 73 - unsigned short old_use_sg; /* We save use_sg here when requesting 74 - * sense info */ 75 74 unsigned short use_sg; /* Number of pieces of scatter-gather */ 76 75 unsigned short sglist_len; /* size of malloc'd scatter-gather list */ 77 - unsigned bufflen; /* Size of data buffer */ 78 - void *buffer; /* Data buffer */ 79 76 80 77 unsigned underflow; /* Return error if less than 81 78 this amount is transferred */ 82 - unsigned old_underflow; /* save underflow here when reusing the 83 - * command for error handling */ 84 79 85 80 unsigned transfersize; /* How much we are guaranteed to 86 81 transfer with each SCSI transfer
+6 -1
include/scsi/scsi_transport_sas.h
··· 106 106 107 107 struct sas_expander_device { 108 108 int level; 109 + int next_port_id; 109 110 110 111 #define SAS_EXPANDER_VENDOR_ID_LEN 8 111 112 char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1]; ··· 128 127 struct sas_port { 129 128 struct device dev; 130 129 131 - u8 port_identifier; 130 + int port_identifier; 132 131 int num_phys; 132 + /* port flags */ 133 + unsigned int is_backlink:1; 133 134 134 135 /* the other end of the link */ 135 136 struct sas_rphy *rphy; ··· 171 168 extern int scsi_is_sas_rphy(const struct device *); 172 169 173 170 struct sas_port *sas_port_alloc(struct device *, int); 171 + struct sas_port *sas_port_alloc_num(struct device *); 174 172 int sas_port_add(struct sas_port *); 175 173 void sas_port_free(struct sas_port *); 176 174 void sas_port_delete(struct sas_port *); 177 175 void sas_port_add_phy(struct sas_port *, struct sas_phy *); 178 176 void sas_port_delete_phy(struct sas_port *, struct sas_phy *); 177 + void sas_port_mark_backlink(struct sas_port *); 179 178 int scsi_is_sas_port(const struct device *); 180 179 181 180 extern struct scsi_transport_template *
+34 -41
kernel/cpu.c
··· 16 16 #include <linux/mutex.h> 17 17 18 18 /* This protects CPUs going up and down... */ 19 - static DEFINE_MUTEX(cpucontrol); 19 + static DEFINE_MUTEX(cpu_add_remove_lock); 20 + static DEFINE_MUTEX(cpu_bitmask_lock); 20 21 21 22 static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); 22 23 23 24 #ifdef CONFIG_HOTPLUG_CPU 24 - static struct task_struct *lock_cpu_hotplug_owner; 25 - static int lock_cpu_hotplug_depth; 26 25 27 - static int __lock_cpu_hotplug(int interruptible) 28 - { 29 - int ret = 0; 30 - 31 - if (lock_cpu_hotplug_owner != current) { 32 - if (interruptible) 33 - ret = mutex_lock_interruptible(&cpucontrol); 34 - else 35 - mutex_lock(&cpucontrol); 36 - } 37 - 38 - /* 39 - * Set only if we succeed in locking 40 - */ 41 - if (!ret) { 42 - lock_cpu_hotplug_depth++; 43 - lock_cpu_hotplug_owner = current; 44 - } 45 - 46 - return ret; 47 - } 26 + /* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ 27 + static struct task_struct *recursive; 28 + static int recursive_depth; 48 29 49 30 void lock_cpu_hotplug(void) 50 31 { 51 - __lock_cpu_hotplug(0); 32 + struct task_struct *tsk = current; 33 + 34 + if (tsk == recursive) { 35 + static int warnings = 10; 36 + if (warnings) { 37 + printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n"); 38 + WARN_ON(1); 39 + warnings--; 40 + } 41 + recursive_depth++; 42 + return; 43 + } 44 + mutex_lock(&cpu_bitmask_lock); 45 + recursive = tsk; 52 46 } 53 47 EXPORT_SYMBOL_GPL(lock_cpu_hotplug); 54 48 55 49 void unlock_cpu_hotplug(void) 56 50 { 57 - if (--lock_cpu_hotplug_depth == 0) { 58 - lock_cpu_hotplug_owner = NULL; 59 - mutex_unlock(&cpucontrol); 51 + WARN_ON(recursive != current); 52 + if (recursive_depth) { 53 + recursive_depth--; 54 + return; 60 55 } 56 + mutex_unlock(&cpu_bitmask_lock); 57 + recursive = NULL; 61 58 } 62 59 EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); 63 60 64 - int lock_cpu_hotplug_interruptible(void) 65 - { 66 - return __lock_cpu_hotplug(1); 67 - } 68 - EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); 69 61 #endif /* CONFIG_HOTPLUG_CPU */ 70 62 71 63 /* Need to know about CPUs going up/down? */ ··· 114 122 struct task_struct *p; 115 123 cpumask_t old_allowed, tmp; 116 124 117 - if ((err = lock_cpu_hotplug_interruptible()) != 0) 118 - return err; 119 - 125 + mutex_lock(&cpu_add_remove_lock); 120 126 if (num_online_cpus() == 1) { 121 127 err = -EBUSY; 122 128 goto out; ··· 140 150 cpu_clear(cpu, tmp); 141 151 set_cpus_allowed(current, tmp); 142 152 153 + mutex_lock(&cpu_bitmask_lock); 143 154 p = __stop_machine_run(take_cpu_down, NULL, cpu); 155 + mutex_unlock(&cpu_bitmask_lock); 156 + 144 157 if (IS_ERR(p)) { 145 158 /* CPU didn't die: tell everyone. Can't complain. */ 146 159 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, ··· 180 187 out_allowed: 181 188 set_cpus_allowed(current, old_allowed); 182 189 out: 183 - unlock_cpu_hotplug(); 190 + mutex_unlock(&cpu_add_remove_lock); 184 191 return err; 185 192 } 186 193 #endif /*CONFIG_HOTPLUG_CPU*/ ··· 190 197 int ret; 191 198 void *hcpu = (void *)(long)cpu; 192 199 193 - if ((ret = lock_cpu_hotplug_interruptible()) != 0) 194 - return ret; 195 - 200 + mutex_lock(&cpu_add_remove_lock); 196 201 if (cpu_online(cpu) || !cpu_present(cpu)) { 197 202 ret = -EINVAL; 198 203 goto out; ··· 205 214 } 206 215 207 216 /* Arch-specific enabling code. */ 217 + mutex_lock(&cpu_bitmask_lock); 208 218 ret = __cpu_up(cpu); 219 + mutex_unlock(&cpu_bitmask_lock); 209 220 if (ret != 0) 210 221 goto out_notify; 211 222 BUG_ON(!cpu_online(cpu)); ··· 220 227 blocking_notifier_call_chain(&cpu_chain, 221 228 CPU_UP_CANCELED, hcpu); 222 229 out: 223 - unlock_cpu_hotplug(); 230 + mutex_unlock(&cpu_add_remove_lock); 224 231 return ret; 225 232 }
+21 -3
kernel/cpuset.c
··· 762 762 * 763 763 * Call with manage_mutex held. May nest a call to the 764 764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 765 + * Must not be called holding callback_mutex, because we must 766 + * not call lock_cpu_hotplug() while holding callback_mutex. 765 767 */ 766 768 767 769 static void update_cpu_domains(struct cpuset *cur) ··· 783 781 if (is_cpu_exclusive(c)) 784 782 cpus_andnot(pspan, pspan, c->cpus_allowed); 785 783 } 786 - if (is_removed(cur) || !is_cpu_exclusive(cur)) { 784 + if (!is_cpu_exclusive(cur)) { 787 785 cpus_or(pspan, pspan, cur->cpus_allowed); 788 786 if (cpus_equal(pspan, cur->cpus_allowed)) 789 787 return; ··· 1919 1917 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); 1920 1918 } 1921 1919 1920 + /* 1921 + * Locking note on the strange update_flag() call below: 1922 + * 1923 + * If the cpuset being removed is marked cpu_exclusive, then simulate 1924 + * turning cpu_exclusive off, which will call update_cpu_domains(). 1925 + * The lock_cpu_hotplug() call in update_cpu_domains() must not be 1926 + * made while holding callback_mutex. Elsewhere the kernel nests 1927 + * callback_mutex inside lock_cpu_hotplug() calls. So the reverse 1928 + * nesting would risk an ABBA deadlock. 1929 + */ 1930 + 1922 1931 static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) 1923 1932 { 1924 1933 struct cpuset *cs = dentry->d_fsdata; ··· 1949 1936 mutex_unlock(&manage_mutex); 1950 1937 return -EBUSY; 1951 1938 } 1939 + if (is_cpu_exclusive(cs)) { 1940 + int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0"); 1941 + if (retval < 0) { 1942 + mutex_unlock(&manage_mutex); 1943 + return retval; 1944 + } 1945 + } 1952 1946 parent = cs->parent; 1953 1947 mutex_lock(&callback_mutex); 1954 1948 set_bit(CS_REMOVED, &cs->flags); 1955 - if (is_cpu_exclusive(cs)) 1956 - update_cpu_domains(cs); 1957 1949 list_del(&cs->sibling); /* delete my sibling from parent->children */ 1958 1950 spin_lock(&cs->dentry->d_lock); 1959 1951 d = dget(cs->dentry);
+1 -2
net/8021q/vlan.c
··· 542 542 * so it cannot "appear" on us. 543 543 */ 544 544 if (!grp) { /* need to add a new group */ 545 - grp = kmalloc(sizeof(struct vlan_group), GFP_KERNEL); 545 + grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); 546 546 if (!grp) 547 547 goto out_free_unregister; 548 548 549 549 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 550 - memset(grp, 0, sizeof(struct vlan_group)); 551 550 grp->real_dev_ifindex = real_dev->ifindex; 552 551 553 552 hlist_add_head_rcu(&grp->hlist,
+2 -4
net/appletalk/ddp.c
··· 227 227 static struct atalk_iface *atif_add_device(struct net_device *dev, 228 228 struct atalk_addr *sa) 229 229 { 230 - struct atalk_iface *iface = kmalloc(sizeof(*iface), GFP_KERNEL); 230 + struct atalk_iface *iface = kzalloc(sizeof(*iface), GFP_KERNEL); 231 231 232 232 if (!iface) 233 233 goto out; 234 234 235 - memset(iface, 0, sizeof(*iface)); 236 235 dev_hold(dev); 237 236 iface->dev = dev; 238 237 dev->atalk_ptr = iface; ··· 558 559 } 559 560 560 561 if (!rt) { 561 - rt = kmalloc(sizeof(*rt), GFP_ATOMIC); 562 + rt = kzalloc(sizeof(*rt), GFP_ATOMIC); 562 563 563 564 retval = -ENOBUFS; 564 565 if (!rt) 565 566 goto out_unlock; 566 - memset(rt, 0, sizeof(*rt)); 567 567 568 568 rt->next = atalk_routes; 569 569 atalk_routes = rt;
+1 -2
net/atm/br2684.c
··· 508 508 509 509 if (copy_from_user(&be, arg, sizeof be)) 510 510 return -EFAULT; 511 - brvcc = kmalloc(sizeof(struct br2684_vcc), GFP_KERNEL); 511 + brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL); 512 512 if (!brvcc) 513 513 return -ENOMEM; 514 - memset(brvcc, 0, sizeof(struct br2684_vcc)); 515 514 write_lock_irq(&devs_lock); 516 515 net_dev = br2684_find_dev(&be.ifspec); 517 516 if (net_dev == NULL) {
+1 -2
net/atm/clip.c
··· 929 929 struct seq_file *seq; 930 930 int rc = -EAGAIN; 931 931 932 - state = kmalloc(sizeof(*state), GFP_KERNEL); 932 + state = kzalloc(sizeof(*state), GFP_KERNEL); 933 933 if (!state) { 934 934 rc = -ENOMEM; 935 935 goto out_kfree; 936 936 } 937 - memset(state, 0, sizeof(*state)); 938 937 state->ns.neigh_sub_iter = clip_seq_sub_iter; 939 938 940 939 rc = seq_open(file, &arp_seq_ops);
+1 -2
net/atm/lec.c
··· 1811 1811 { 1812 1812 struct lec_arp_table *to_return; 1813 1813 1814 - to_return = kmalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1814 + to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1815 1815 if (!to_return) { 1816 1816 printk("LEC: Arp entry kmalloc failed\n"); 1817 1817 return NULL; 1818 1818 } 1819 - memset(to_return, 0, sizeof(struct lec_arp_table)); 1820 1819 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1821 1820 init_timer(&to_return->timer); 1822 1821 to_return->timer.function = lec_arp_expire_arp;
+1 -2
net/atm/mpc.c
··· 258 258 { 259 259 struct mpoa_client *mpc; 260 260 261 - mpc = kmalloc(sizeof (struct mpoa_client), GFP_KERNEL); 261 + mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL); 262 262 if (mpc == NULL) 263 263 return NULL; 264 - memset(mpc, 0, sizeof(struct mpoa_client)); 265 264 rwlock_init(&mpc->ingress_lock); 266 265 rwlock_init(&mpc->egress_lock); 267 266 mpc->next = mpcs;
+1 -2
net/atm/pppoatm.c
··· 287 287 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT && 288 288 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC) 289 289 return -EINVAL; 290 - pvcc = kmalloc(sizeof(*pvcc), GFP_KERNEL); 290 + pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL); 291 291 if (pvcc == NULL) 292 292 return -ENOMEM; 293 - memset(pvcc, 0, sizeof(*pvcc)); 294 293 pvcc->atmvcc = atmvcc; 295 294 pvcc->old_push = atmvcc->push; 296 295 pvcc->old_pop = atmvcc->pop;
+1 -2
net/atm/resources.c
··· 33 33 { 34 34 struct atm_dev *dev; 35 35 36 - dev = kmalloc(sizeof(*dev), GFP_KERNEL); 36 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 37 37 if (!dev) 38 38 return NULL; 39 - memset(dev, 0, sizeof(*dev)); 40 39 dev->type = type; 41 40 dev->signal = ATM_PHY_SIG_UNKNOWN; 42 41 dev->link_rate = ATM_OC3_PCR;
+1 -3
net/ax25/sysctl_net_ax25.c
··· 203 203 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) 204 204 ax25_table_size += sizeof(ctl_table); 205 205 206 - if ((ax25_table = kmalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { 206 + if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { 207 207 spin_unlock_bh(&ax25_dev_lock); 208 208 return; 209 209 } 210 - 211 - memset(ax25_table, 0x00, ax25_table_size); 212 210 213 211 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { 214 212 ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC);
+2 -5
net/bridge/br_ioctl.c
··· 162 162 if (num > BR_MAX_PORTS) 163 163 num = BR_MAX_PORTS; 164 164 165 - indices = kmalloc(num*sizeof(int), GFP_KERNEL); 165 + indices = kcalloc(num, sizeof(int), GFP_KERNEL); 166 166 if (indices == NULL) 167 167 return -ENOMEM; 168 - 169 - memset(indices, 0, num*sizeof(int)); 170 168 171 169 get_port_ifindices(br, indices, num); 172 170 if (copy_to_user((void __user *)args[1], indices, num*sizeof(int))) ··· 325 327 326 328 if (args[2] >= 2048) 327 329 return -ENOMEM; 328 - indices = kmalloc(args[2]*sizeof(int), GFP_KERNEL); 330 + indices = kcalloc(args[2], sizeof(int), GFP_KERNEL); 329 331 if (indices == NULL) 330 332 return -ENOMEM; 331 333 332 - memset(indices, 0, args[2]*sizeof(int)); 333 334 args[2] = get_bridge_ifindices(indices, args[2]); 334 335 335 336 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
+1 -1
net/core/ethtool.c
··· 437 437 { 438 438 struct ethtool_pauseparam pauseparam; 439 439 440 - if (!dev->ethtool_ops->get_pauseparam) 440 + if (!dev->ethtool_ops->set_pauseparam) 441 441 return -EOPNOTSUPP; 442 442 443 443 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
+1
net/core/user_dma.c
··· 29 29 #include <linux/socket.h> 30 30 #include <linux/rtnetlink.h> /* for BUG_TRAP */ 31 31 #include <net/tcp.h> 32 + #include <net/netdma.h> 32 33 33 34 #define NET_DMA_DEFAULT_COPYBREAK 4096 34 35
+2 -7
net/decnet/dn_dev.c
··· 413 413 { 414 414 struct dn_ifaddr *ifa; 415 415 416 - ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 417 - 418 - if (ifa) { 419 - memset(ifa, 0, sizeof(*ifa)); 420 - } 416 + ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); 421 417 422 418 return ifa; 423 419 } ··· 1101 1105 return NULL; 1102 1106 1103 1107 *err = -ENOBUFS; 1104 - if ((dn_db = kmalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) 1108 + if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) 1105 1109 return NULL; 1106 1110 1107 - memset(dn_db, 0, sizeof(struct dn_dev)); 1108 1111 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1109 1112 smp_wmb(); 1110 1113 dev->dn_ptr = dn_db;
+1 -2
net/decnet/dn_fib.c
··· 283 283 goto err_inval; 284 284 } 285 285 286 - fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); 286 + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); 287 287 err = -ENOBUFS; 288 288 if (fi == NULL) 289 289 goto failure; 290 - memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct dn_fib_nh)); 291 290 292 291 fi->fib_protocol = r->rtm_protocol; 293 292 fi->fib_nhs = nhs;
+1 -2
net/decnet/dn_neigh.c
··· 580 580 { 581 581 struct seq_file *seq; 582 582 int rc = -ENOMEM; 583 - struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 583 + struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 584 584 585 585 if (!s) 586 586 goto out; 587 587 588 - memset(s, 0, sizeof(*s)); 589 588 rc = seq_open(file, &dn_neigh_seq_ops); 590 589 if (rc) 591 590 goto out_kfree;
+1 -2
net/decnet/dn_rules.c
··· 151 151 } 152 152 } 153 153 154 - new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 154 + new_r = kzalloc(sizeof(*new_r), GFP_KERNEL); 155 155 if (!new_r) 156 156 return -ENOMEM; 157 - memset(new_r, 0, sizeof(*new_r)); 158 157 159 158 if (rta[RTA_SRC-1]) 160 159 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
+3 -8
net/decnet/dn_table.c
··· 158 158 break; 159 159 } 160 160 161 - ht = kmalloc(new_divisor*sizeof(struct dn_fib_node*), GFP_KERNEL); 162 - 161 + ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL); 163 162 if (ht == NULL) 164 163 return; 165 164 166 - memset(ht, 0, new_divisor*sizeof(struct dn_fib_node *)); 167 165 write_lock_bh(&dn_fib_tables_lock); 168 166 old_ht = dz->dz_hash; 169 167 dz->dz_hash = ht; ··· 182 184 static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) 183 185 { 184 186 int i; 185 - struct dn_zone *dz = kmalloc(sizeof(struct dn_zone), GFP_KERNEL); 187 + struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL); 186 188 if (!dz) 187 189 return NULL; 188 190 189 - memset(dz, 0, sizeof(struct dn_zone)); 190 191 if (z) { 191 192 dz->dz_divisor = 16; 192 193 dz->dz_hashmask = 0x0F; ··· 194 197 dz->dz_hashmask = 0; 195 198 } 196 199 197 - dz->dz_hash = kmalloc(dz->dz_divisor*sizeof(struct dn_fib_node *), GFP_KERNEL); 198 - 200 + dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL); 199 201 if (!dz->dz_hash) { 200 202 kfree(dz); 201 203 return NULL; 202 204 } 203 205 204 - memset(dz->dz_hash, 0, dz->dz_divisor*sizeof(struct dn_fib_node*)); 205 206 dz->dz_order = z; 206 207 dz->dz_mask = dnet_make_mask(z); 207 208
+1 -2
net/econet/af_econet.c
··· 673 673 edev = dev->ec_ptr; 674 674 if (edev == NULL) { 675 675 /* Magic up a new one. */ 676 - edev = kmalloc(sizeof(struct ec_device), GFP_KERNEL); 676 + edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL); 677 677 if (edev == NULL) { 678 678 err = -ENOMEM; 679 679 break; 680 680 } 681 - memset(edev, 0, sizeof(struct ec_device)); 682 681 dev->ec_ptr = edev; 683 682 } else 684 683 net2dev_map[edev->net] = NULL;
+1 -2
net/ieee80211/ieee80211_crypt.c
··· 110 110 unsigned long flags; 111 111 struct ieee80211_crypto_alg *alg; 112 112 113 - alg = kmalloc(sizeof(*alg), GFP_KERNEL); 113 + alg = kzalloc(sizeof(*alg), GFP_KERNEL); 114 114 if (alg == NULL) 115 115 return -ENOMEM; 116 116 117 - memset(alg, 0, sizeof(*alg)); 118 117 alg->ops = ops; 119 118 120 119 spin_lock_irqsave(&ieee80211_crypto_lock, flags);
+1 -2
net/ieee80211/ieee80211_crypt_ccmp.c
··· 76 76 { 77 77 struct ieee80211_ccmp_data *priv; 78 78 79 - priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 79 + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); 80 80 if (priv == NULL) 81 81 goto fail; 82 - memset(priv, 0, sizeof(*priv)); 83 82 priv->key_idx = key_idx; 84 83 85 84 priv->tfm = crypto_alloc_tfm("aes", 0);
+1 -2
net/ieee80211/ieee80211_crypt_wep.c
··· 39 39 { 40 40 struct prism2_wep_data *priv; 41 41 42 - priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 42 + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); 43 43 if (priv == NULL) 44 44 goto fail; 45 - memset(priv, 0, sizeof(*priv)); 46 45 priv->key_idx = keyidx; 47 46 48 47 priv->tfm = crypto_alloc_tfm("arc4", 0);
+2 -5
net/ieee80211/ieee80211_wx.c
··· 369 369 struct ieee80211_crypt_data *new_crypt; 370 370 371 371 /* take WEP into use */ 372 - new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data), 372 + new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), 373 373 GFP_KERNEL); 374 374 if (new_crypt == NULL) 375 375 return -ENOMEM; 376 - memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 377 376 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 378 377 if (!new_crypt->ops) { 379 378 request_module("ieee80211_crypt_wep"); ··· 615 616 616 617 ieee80211_crypt_delayed_deinit(ieee, crypt); 617 618 618 - new_crypt = (struct ieee80211_crypt_data *) 619 - kmalloc(sizeof(*new_crypt), GFP_KERNEL); 619 + new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL); 620 620 if (new_crypt == NULL) { 621 621 ret = -ENOMEM; 622 622 goto done; 623 623 } 624 - memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 625 624 new_crypt->ops = ops; 626 625 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 627 626 new_crypt->priv = new_crypt->ops->init(idx);
+1 -2
net/ieee80211/softmac/ieee80211softmac_io.c
··· 96 96 if(size > IEEE80211_DATA_LEN) 97 97 return NULL; 98 98 /* Allocate the frame */ 99 - data = kmalloc(size, GFP_ATOMIC); 100 - memset(data, 0, size); 99 + data = kzalloc(size, GFP_ATOMIC); 101 100 return data; 102 101 } 103 102
+1 -3
net/ipv4/ah4.c
··· 215 215 if (x->encap) 216 216 goto error; 217 217 218 - ahp = kmalloc(sizeof(*ahp), GFP_KERNEL); 218 + ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); 219 219 if (ahp == NULL) 220 220 return -ENOMEM; 221 - 222 - memset(ahp, 0, sizeof(*ahp)); 223 221 224 222 ahp->key = x->aalg->alg_key; 225 223 ahp->key_len = (x->aalg->alg_key_len+7)/8;
+1 -2
net/ipv4/arp.c
··· 1372 1372 { 1373 1373 struct seq_file *seq; 1374 1374 int rc = -ENOMEM; 1375 - struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1375 + struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1376 1376 1377 1377 if (!s) 1378 1378 goto out; 1379 1379 1380 - memset(s, 0, sizeof(*s)); 1381 1380 rc = seq_open(file, &arp_seq_ops); 1382 1381 if (rc) 1383 1382 goto out_kfree;
+2 -4
net/ipv4/devinet.c
··· 93 93 94 94 static struct in_ifaddr *inet_alloc_ifa(void) 95 95 { 96 - struct in_ifaddr *ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 96 + struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); 97 97 98 98 if (ifa) { 99 - memset(ifa, 0, sizeof(*ifa)); 100 99 INIT_RCU_HEAD(&ifa->rcu_head); 101 100 } 102 101 ··· 139 140 140 141 ASSERT_RTNL(); 141 142 142 - in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL); 143 + in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL); 143 144 if (!in_dev) 144 145 goto out; 145 - memset(in_dev, 0, sizeof(*in_dev)); 146 146 INIT_RCU_HEAD(&in_dev->rcu_head); 147 147 memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf)); 148 148 in_dev->cnf.sysctl = NULL;
+1 -3
net/ipv4/esp4.c
··· 316 316 if (x->ealg == NULL) 317 317 goto error; 318 318 319 - esp = kmalloc(sizeof(*esp), GFP_KERNEL); 319 + esp = kzalloc(sizeof(*esp), GFP_KERNEL); 320 320 if (esp == NULL) 321 321 return -ENOMEM; 322 - 323 - memset(esp, 0, sizeof(*esp)); 324 322 325 323 if (x->aalg) { 326 324 struct xfrm_algo_desc *aalg_desc;
+2 -4
net/ipv4/fib_hash.c
··· 204 204 fn_new_zone(struct fn_hash *table, int z) 205 205 { 206 206 int i; 207 - struct fn_zone *fz = kmalloc(sizeof(struct fn_zone), GFP_KERNEL); 207 + struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL); 208 208 if (!fz) 209 209 return NULL; 210 210 211 - memset(fz, 0, sizeof(struct fn_zone)); 212 211 if (z) { 213 212 fz->fz_divisor = 16; 214 213 } else { ··· 1045 1046 { 1046 1047 struct seq_file *seq; 1047 1048 int rc = -ENOMEM; 1048 - struct fib_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1049 + struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1049 1050 1050 1051 if (!s) 1051 1052 goto out; ··· 1056 1057 1057 1058 seq = file->private_data; 1058 1059 seq->private = s; 1059 - memset(s, 0, sizeof(*s)); 1060 1060 out: 1061 1061 return rc; 1062 1062 out_kfree:
+1 -2
net/ipv4/fib_rules.c
··· 196 196 } 197 197 } 198 198 199 - new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 199 + new_r = kzalloc(sizeof(*new_r), GFP_KERNEL); 200 200 if (!new_r) 201 201 return -ENOMEM; 202 - memset(new_r, 0, sizeof(*new_r)); 203 202 204 203 if (rta[RTA_SRC-1]) 205 204 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
+9 -6
net/ipv4/fib_semantics.c
··· 709 709 goto failure; 710 710 } 711 711 712 - fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 712 + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 713 713 if (fi == NULL) 714 714 goto failure; 715 715 fib_info_cnt++; 716 - memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct fib_nh)); 717 716 718 717 fi->fib_protocol = r->rtm_protocol; 719 718 ··· 961 962 rtm->rtm_protocol = fi->fib_protocol; 962 963 if (fi->fib_priority) 963 964 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 964 - #ifdef CONFIG_NET_CLS_ROUTE 965 - if (fi->fib_nh[0].nh_tclassid) 966 - RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid); 967 - #endif 968 965 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 969 966 goto rtattr_failure; 970 967 if (fi->fib_prefsrc) ··· 970 975 RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw); 971 976 if (fi->fib_nh->nh_oif) 972 977 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 978 + #ifdef CONFIG_NET_CLS_ROUTE 979 + if (fi->fib_nh[0].nh_tclassid) 980 + RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid); 981 + #endif 973 982 } 974 983 #ifdef CONFIG_IP_ROUTE_MULTIPATH 975 984 if (fi->fib_nhs > 1) { ··· 992 993 nhp->rtnh_ifindex = nh->nh_oif; 993 994 if (nh->nh_gw) 994 995 RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw); 996 + #ifdef CONFIG_NET_CLS_ROUTE 997 + if (nh->nh_tclassid) 998 + RTA_PUT(skb, RTA_FLOW, 4, &nh->nh_tclassid); 999 + #endif 995 1000 nhp->rtnh_len = skb->tail - (unsigned char*)nhp; 996 1001 } endfor_nexthops(fi); 997 1002 mp_head->rta_type = RTA_MULTIPATH;
+4 -8
net/ipv4/igmp.c
··· 1028 1028 * for deleted items allows change reports to use common code with 1029 1029 * non-deleted or query-response MCA's. 1030 1030 */ 1031 - pmc = kmalloc(sizeof(*pmc), GFP_KERNEL); 1031 + pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 1032 1032 if (!pmc) 1033 1033 return; 1034 - memset(pmc, 0, sizeof(*pmc)); 1035 1034 spin_lock_bh(&im->lock); 1036 1035 pmc->interface = im->interface; 1037 1036 in_dev_hold(in_dev); ··· 1528 1529 psf_prev = psf; 1529 1530 } 1530 1531 if (!psf) { 1531 - psf = kmalloc(sizeof(*psf), GFP_ATOMIC); 1532 + psf = kzalloc(sizeof(*psf), GFP_ATOMIC); 1532 1533 if (!psf) 1533 1534 return -ENOBUFS; 1534 - memset(psf, 0, sizeof(*psf)); 1535 1535 psf->sf_inaddr = *psfsrc; 1536 1536 if (psf_prev) { 1537 1537 psf_prev->sf_next = psf; ··· 2378 2380 { 2379 2381 struct seq_file *seq; 2380 2382 int rc = -ENOMEM; 2381 - struct igmp_mc_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2383 + struct igmp_mc_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 2382 2384 2383 2385 if (!s) 2384 2386 goto out; ··· 2388 2390 2389 2391 seq = file->private_data; 2390 2392 seq->private = s; 2391 - memset(s, 0, sizeof(*s)); 2392 2393 out: 2393 2394 return rc; 2394 2395 out_kfree: ··· 2552 2555 { 2553 2556 struct seq_file *seq; 2554 2557 int rc = -ENOMEM; 2555 - struct igmp_mcf_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2558 + struct igmp_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 2556 2559 2557 2560 if (!s) 2558 2561 goto out; ··· 2562 2565 2563 2566 seq = file->private_data; 2564 2567 seq->private = s; 2565 - memset(s, 0, sizeof(*s)); 2566 2568 out: 2567 2569 return rc; 2568 2570 out_kfree:
+1 -2
net/ipv4/inet_diag.c
··· 909 909 sizeof(struct inet_diag_handler *)); 910 910 int err = -ENOMEM; 911 911 912 - inet_diag_table = kmalloc(inet_diag_table_size, GFP_KERNEL); 912 + inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL); 913 913 if (!inet_diag_table) 914 914 goto out; 915 915 916 - memset(inet_diag_table, 0, inet_diag_table_size); 917 916 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv, 918 917 THIS_MODULE); 919 918 if (idiagnl == NULL)
-1
net/ipv4/ip_gre.c
··· 617 617 skb->mac.raw = skb->nh.raw; 618 618 skb->nh.raw = __pskb_pull(skb, offset); 619 619 skb_postpull_rcsum(skb, skb->h.raw, offset); 620 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 621 620 skb->pkt_type = PACKET_HOST; 622 621 #ifdef CONFIG_NET_IPGRE_BROADCAST 623 622 if (MULTICAST(iph->daddr)) {
-1
net/ipv4/ip_options.c
··· 256 256 257 257 if (!opt) { 258 258 opt = &(IPCB(skb)->opt); 259 - memset(opt, 0, sizeof(struct ip_options)); 260 259 iph = skb->nh.raw; 261 260 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); 262 261 optptr = iph + sizeof(struct iphdr);
+1 -2
net/ipv4/ipcomp.c
··· 410 410 goto out; 411 411 412 412 err = -ENOMEM; 413 - ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL); 413 + ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 414 414 if (!ipcd) 415 415 goto out; 416 416 417 - memset(ipcd, 0, sizeof(*ipcd)); 418 417 x->props.header_len = 0; 419 418 if (x->props.mode) 420 419 x->props.header_len += sizeof(struct iphdr);
-1
net/ipv4/ipip.c
··· 487 487 488 488 skb->mac.raw = skb->nh.raw; 489 489 skb->nh.raw = skb->data; 490 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 491 490 skb->protocol = htons(ETH_P_IP); 492 491 skb->pkt_type = PACKET_HOST; 493 492
-2
net/ipv4/ipmr.c
··· 1461 1461 skb_pull(skb, (u8*)encap - skb->data); 1462 1462 skb->nh.iph = (struct iphdr *)skb->data; 1463 1463 skb->dev = reg_dev; 1464 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 1465 1464 skb->protocol = htons(ETH_P_IP); 1466 1465 skb->ip_summed = 0; 1467 1466 skb->pkt_type = PACKET_HOST; ··· 1516 1517 skb_pull(skb, (u8*)encap - skb->data); 1517 1518 skb->nh.iph = (struct iphdr *)skb->data; 1518 1519 skb->dev = reg_dev; 1519 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 1520 1520 skb->protocol = htons(ETH_P_IP); 1521 1521 skb->ip_summed = 0; 1522 1522 skb->pkt_type = PACKET_HOST;
+3 -7
net/ipv4/ipvs/ip_vs_ctl.c
··· 735 735 if (atype != RTN_LOCAL && atype != RTN_UNICAST) 736 736 return -EINVAL; 737 737 738 - dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 738 + dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 739 739 if (dest == NULL) { 740 740 IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n"); 741 741 return -ENOMEM; 742 742 } 743 - memset(dest, 0, sizeof(struct ip_vs_dest)); 744 743 745 744 dest->protocol = svc->protocol; 746 745 dest->vaddr = svc->addr; ··· 1049 1050 goto out_mod_dec; 1050 1051 } 1051 1052 1052 - svc = (struct ip_vs_service *) 1053 - kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); 1053 + svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); 1054 1054 if (svc == NULL) { 1055 1055 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); 1056 1056 ret = -ENOMEM; 1057 1057 goto out_err; 1058 1058 } 1059 - memset(svc, 0, sizeof(struct ip_vs_service)); 1060 1059 1061 1060 /* I'm the first user of the service */ 1062 1061 atomic_set(&svc->usecnt, 1); ··· 1794 1797 { 1795 1798 struct seq_file *seq; 1796 1799 int rc = -ENOMEM; 1797 - struct ip_vs_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); 1800 + struct ip_vs_iter *s = kzalloc(sizeof(*s), GFP_KERNEL); 1798 1801 1799 1802 if (!s) 1800 1803 goto out; ··· 1805 1808 1806 1809 seq = file->private_data; 1807 1810 seq->private = s; 1808 - memset(s, 0, sizeof(*s)); 1809 1811 out: 1810 1812 return rc; 1811 1813 out_kfree:
+1 -2
net/ipv4/ipvs/ip_vs_est.c
··· 123 123 { 124 124 struct ip_vs_estimator *est; 125 125 126 - est = kmalloc(sizeof(*est), GFP_KERNEL); 126 + est = kzalloc(sizeof(*est), GFP_KERNEL); 127 127 if (est == NULL) 128 128 return -ENOMEM; 129 129 130 - memset(est, 0, sizeof(*est)); 131 130 est->stats = stats; 132 131 est->last_conns = stats->conns; 133 132 est->cps = stats->cps<<10;
+1 -2
net/ipv4/netfilter/ipt_CLUSTERIP.c
··· 172 172 struct clusterip_config *c; 173 173 char buffer[16]; 174 174 175 - c = kmalloc(sizeof(*c), GFP_ATOMIC); 175 + c = kzalloc(sizeof(*c), GFP_ATOMIC); 176 176 if (!c) 177 177 return NULL; 178 178 179 - memset(c, 0, sizeof(*c)); 180 179 c->dev = dev; 181 180 c->clusterip = ip; 182 181 memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
+1 -2
net/ipv4/tcp_ipv4.c
··· 1640 1640 if (unlikely(afinfo == NULL)) 1641 1641 return -EINVAL; 1642 1642 1643 - s = kmalloc(sizeof(*s), GFP_KERNEL); 1643 + s = kzalloc(sizeof(*s), GFP_KERNEL); 1644 1644 if (!s) 1645 1645 return -ENOMEM; 1646 - memset(s, 0, sizeof(*s)); 1647 1646 s->family = afinfo->family; 1648 1647 s->seq_ops.start = tcp_seq_start; 1649 1648 s->seq_ops.next = tcp_seq_next;
+1 -2
net/ipv4/udp.c
··· 1468 1468 struct udp_seq_afinfo *afinfo = PDE(inode)->data; 1469 1469 struct seq_file *seq; 1470 1470 int rc = -ENOMEM; 1471 - struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1471 + struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1472 1472 1473 1473 if (!s) 1474 1474 goto out; 1475 - memset(s, 0, sizeof(*s)); 1476 1475 s->family = afinfo->family; 1477 1476 s->seq_ops.start = udp_seq_start; 1478 1477 s->seq_ops.next = udp_seq_next;
-1
net/ipv4/xfrm4_mode_tunnel.c
··· 92 92 skb->mac.raw = memmove(skb->data - skb->mac_len, 93 93 skb->mac.raw, skb->mac_len); 94 94 skb->nh.raw = skb->data; 95 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 96 95 err = 0; 97 96 98 97 out:
+1 -2
net/ipv6/ip6_tunnel.c
··· 567 567 568 568 int opt_len = sizeof(*opt) + 8; 569 569 570 - if (!(opt = kmalloc(opt_len, GFP_ATOMIC))) { 570 + if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) { 571 571 return NULL; 572 572 } 573 - memset(opt, 0, opt_len); 574 573 opt->tot_len = opt_len; 575 574 opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1); 576 575 opt->opt_nflen = 8;
-1
net/ipv6/sit.c
··· 380 380 secpath_reset(skb); 381 381 skb->mac.raw = skb->nh.raw; 382 382 skb->nh.raw = skb->data; 383 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 384 383 IPCB(skb)->flags = 0; 385 384 skb->protocol = htons(ETH_P_IPV6); 386 385 skb->pkt_type = PACKET_HOST;
+1 -1
net/irda/af_irda.c
··· 308 308 309 309 IRDA_ASSERT(self != NULL, return;); 310 310 311 - skb = dev_alloc_skb(64); 311 + skb = alloc_skb(64, GFP_ATOMIC); 312 312 if (skb == NULL) { 313 313 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", 314 314 __FUNCTION__);
+1 -3
net/irda/ircomm/ircomm_core.c
··· 115 115 116 116 IRDA_ASSERT(ircomm != NULL, return NULL;); 117 117 118 - self = kmalloc(sizeof(struct ircomm_cb), GFP_ATOMIC); 118 + self = kzalloc(sizeof(struct ircomm_cb), GFP_ATOMIC); 119 119 if (self == NULL) 120 120 return NULL; 121 - 122 - memset(self, 0, sizeof(struct ircomm_cb)); 123 121 124 122 self->notify = *notify; 125 123 self->magic = IRCOMM_MAGIC;
+2 -2
net/irda/ircomm/ircomm_lmp.c
··· 81 81 82 82 /* Any userdata supplied? */ 83 83 if (userdata == NULL) { 84 - tx_skb = dev_alloc_skb(64); 84 + tx_skb = alloc_skb(64, GFP_ATOMIC); 85 85 if (!tx_skb) 86 86 return -ENOMEM; 87 87 ··· 115 115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 116 116 117 117 if (!userdata) { 118 - tx_skb = dev_alloc_skb(64); 118 + tx_skb = alloc_skb(64, GFP_ATOMIC); 119 119 if (!tx_skb) 120 120 return -ENOMEM; 121 121
+1 -1
net/irda/ircomm/ircomm_param.c
··· 121 121 122 122 skb = self->ctrl_skb; 123 123 if (!skb) { 124 - skb = dev_alloc_skb(256); 124 + skb = alloc_skb(256, GFP_ATOMIC); 125 125 if (!skb) { 126 126 spin_unlock_irqrestore(&self->spinlock, flags); 127 127 return -ENOMEM;
+4 -4
net/irda/ircomm/ircomm_tty.c
··· 379 379 self = hashbin_lock_find(ircomm_tty, line, NULL); 380 380 if (!self) { 381 381 /* No, so make new instance */ 382 - self = kmalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); 382 + self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); 383 383 if (self == NULL) { 384 384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__); 385 385 return -ENOMEM; 386 386 } 387 - memset(self, 0, sizeof(struct ircomm_tty_cb)); 388 387 389 388 self->magic = IRCOMM_TTY_MAGIC; 390 389 self->flow = FLOW_STOP; ··· 758 759 } 759 760 } else { 760 761 /* Prepare a full sized frame */ 761 - skb = dev_alloc_skb(self->max_data_size+ 762 - self->max_header_size); 762 + skb = alloc_skb(self->max_data_size+ 763 + self->max_header_size, 764 + GFP_ATOMIC); 763 765 if (!skb) { 764 766 spin_unlock_irqrestore(&self->spinlock, flags); 765 767 return -ENOBUFS;
+1 -3
net/irda/irda_device.c
··· 401 401 } 402 402 403 403 /* Allocate dongle info for this instance */ 404 - dongle = kmalloc(sizeof(dongle_t), GFP_KERNEL); 404 + dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL); 405 405 if (!dongle) 406 406 goto out; 407 - 408 - memset(dongle, 0, sizeof(dongle_t)); 409 407 410 408 /* Bind the registration info to this particular instance */ 411 409 dongle->issue = reg;
+5 -4
net/irda/iriap.c
··· 345 345 IRDA_ASSERT(self != NULL, return;); 346 346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 347 347 348 - tx_skb = dev_alloc_skb(64); 348 + tx_skb = alloc_skb(64, GFP_ATOMIC); 349 349 if (tx_skb == NULL) { 350 350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n", 351 351 __FUNCTION__, 64); ··· 396 396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */ 397 397 398 398 skb_len = self->max_header_size+2+name_len+1+attr_len+4; 399 - tx_skb = dev_alloc_skb(skb_len); 399 + tx_skb = alloc_skb(skb_len, GFP_ATOMIC); 400 400 if (!tx_skb) 401 401 return -ENOMEM; 402 402 ··· 562 562 * value. We add 32 bytes because of the 6 bytes for the frame and 563 563 * max 5 bytes for the value coding. 564 564 */ 565 - tx_skb = dev_alloc_skb(value->len + self->max_header_size + 32); 565 + tx_skb = alloc_skb(value->len + self->max_header_size + 32, 566 + GFP_ATOMIC); 566 567 if (!tx_skb) 567 568 return; 568 569 ··· 701 700 IRDA_ASSERT(self != NULL, return;); 702 701 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 703 702 704 - tx_skb = dev_alloc_skb(64); 703 + tx_skb = alloc_skb(64, GFP_ATOMIC); 705 704 if (!tx_skb) 706 705 return; 707 706
+1 -1
net/irda/iriap_event.c
··· 365 365 366 366 switch (event) { 367 367 case IAP_LM_CONNECT_INDICATION: 368 - tx_skb = dev_alloc_skb(64); 368 + tx_skb = alloc_skb(64, GFP_ATOMIC); 369 369 if (tx_skb == NULL) { 370 370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); 371 371 return;
+8 -16
net/irda/irias_object.c
··· 82 82 83 83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 84 84 85 - obj = kmalloc(sizeof(struct ias_object), GFP_ATOMIC); 85 + obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); 86 86 if (obj == NULL) { 87 87 IRDA_WARNING("%s(), Unable to allocate object!\n", 88 88 __FUNCTION__); 89 89 return NULL; 90 90 } 91 - memset(obj, 0, sizeof( struct ias_object)); 92 91 93 92 obj->magic = IAS_OBJECT_MAGIC; 94 93 obj->name = strndup(name, IAS_MAX_CLASSNAME); ··· 345 346 IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); 346 347 IRDA_ASSERT(name != NULL, return;); 347 348 348 - attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 349 + attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 349 350 if (attrib == NULL) { 350 351 IRDA_WARNING("%s: Unable to allocate attribute!\n", 351 352 __FUNCTION__); 352 353 return; 353 354 } 354 - memset(attrib, 0, sizeof( struct ias_attrib)); 355 355 356 356 attrib->magic = IAS_ATTRIB_MAGIC; 357 357 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); ··· 380 382 IRDA_ASSERT(name != NULL, return;); 381 383 IRDA_ASSERT(octets != NULL, return;); 382 384 383 - attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 385 + attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 384 386 if (attrib == NULL) { 385 387 IRDA_WARNING("%s: Unable to allocate attribute!\n", 386 388 __FUNCTION__); 387 389 return; 388 390 } 389 - memset(attrib, 0, sizeof( struct ias_attrib)); 390 391 391 392 attrib->magic = IAS_ATTRIB_MAGIC; 392 393 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); ··· 413 416 IRDA_ASSERT(name != NULL, return;); 414 417 IRDA_ASSERT(value != NULL, return;); 415 418 416 - attrib = kmalloc(sizeof( struct ias_attrib), GFP_ATOMIC); 419 + attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC); 417 420 if (attrib == NULL) { 418 421 IRDA_WARNING("%s: Unable to allocate attribute!\n", 419 422 __FUNCTION__); 420 423 return; 421 424 } 422 - memset(attrib, 0, sizeof( struct ias_attrib)); 423 425 424 426 attrib->magic = IAS_ATTRIB_MAGIC; 425 427 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); ··· 439 443 { 440 444 struct ias_value *value; 441 445 442 - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 446 + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 443 447 if (value == NULL) { 444 448 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 445 449 return NULL; 446 450 } 447 - memset(value, 0, sizeof(struct ias_value)); 448 451 449 452 value->type = IAS_INTEGER; 450 453 value->len = 4; ··· 464 469 { 465 470 struct ias_value *value; 466 471 467 - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 472 + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 468 473 if (value == NULL) { 469 474 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 470 475 return NULL; 471 476 } 472 - memset( value, 0, sizeof( struct ias_value)); 473 477 474 478 value->type = IAS_STRING; 475 479 value->charset = CS_ASCII; ··· 489 495 { 490 496 struct ias_value *value; 491 497 492 - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 498 + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 493 499 if (value == NULL) { 494 500 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 495 501 return NULL; 496 502 } 497 - memset(value, 0, sizeof(struct ias_value)); 498 503 499 504 value->type = IAS_OCT_SEQ; 500 505 /* Check length */ ··· 515 522 { 516 523 struct ias_value *value; 517 524 518 - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 525 + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 519 526 if (value == NULL) { 520 527 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 521 528 return NULL; 522 529 } 523 - memset(value, 0, sizeof(struct ias_value)); 524 530 525 531 value->type = IAS_MISSING; 526 532 value->len = 0;
+8 -8
net/irda/irlan/irlan_common.c
··· 636 636 IRDA_ASSERT(self != NULL, return;); 637 637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 638 638 639 - skb = dev_alloc_skb(64); 639 + skb = alloc_skb(64, GFP_ATOMIC); 640 640 if (!skb) 641 641 return; 642 642 ··· 668 668 IRDA_ASSERT(self != NULL, return;); 669 669 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 670 670 671 - skb = dev_alloc_skb(64); 671 + skb = alloc_skb(64, GFP_ATOMIC); 672 672 if (!skb) 673 673 return; 674 674 ··· 704 704 if (self->client.tsap_ctrl == NULL) 705 705 return; 706 706 707 - skb = dev_alloc_skb(64); 707 + skb = alloc_skb(64, GFP_ATOMIC); 708 708 if (!skb) 709 709 return; 710 710 ··· 739 739 IRDA_ASSERT(self != NULL, return;); 740 740 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 741 741 742 - skb = dev_alloc_skb(128); 742 + skb = alloc_skb(128, GFP_ATOMIC); 743 743 if (!skb) 744 744 return; 745 745 ··· 777 777 IRDA_ASSERT(self != NULL, return;); 778 778 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 779 779 780 - skb = dev_alloc_skb(128); 780 + skb = alloc_skb(128, GFP_ATOMIC); 781 781 if (!skb) 782 782 return; 783 783 ··· 816 816 IRDA_ASSERT(self != NULL, return;); 817 817 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 818 818 819 - skb = dev_alloc_skb(128); 819 + skb = alloc_skb(128, GFP_ATOMIC); 820 820 if (!skb) 821 821 return; 822 822 ··· 856 856 IRDA_ASSERT(self != NULL, return;); 857 857 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 858 858 859 - skb = dev_alloc_skb(128); 859 + skb = alloc_skb(128, GFP_ATOMIC); 860 860 if (!skb) 861 861 return; 862 862 ··· 891 891 IRDA_ASSERT(self != NULL, return;); 892 892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 893 893 894 - skb = dev_alloc_skb(64); 894 + skb = alloc_skb(64, GFP_ATOMIC); 895 895 if (!skb) 896 896 return; 897 897
+1 -1
net/irda/irlan/irlan_provider.c
··· 296 296 IRDA_ASSERT(self != NULL, return;); 297 297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 298 298 299 - skb = dev_alloc_skb(128); 299 + skb = alloc_skb(128, GFP_ATOMIC); 300 300 if (!skb) 301 301 return; 302 302
+3 -5
net/irda/irlap.c
··· 116 116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 117 117 118 118 /* Initialize the irlap structure. */ 119 - self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL); 119 + self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); 120 120 if (self == NULL) 121 121 return NULL; 122 122 123 - memset(self, 0, sizeof(struct irlap_cb)); 124 123 self->magic = LAP_MAGIC; 125 124 126 125 /* Make a binding between the layers */ ··· 881 882 /* Change speed now, or just piggyback speed on frames */ 882 883 if (now) { 883 884 /* Send down empty frame to trigger speed change */ 884 - skb = dev_alloc_skb(0); 885 + skb = alloc_skb(0, GFP_ATOMIC); 885 886 if (skb) 886 887 irlap_queue_xmit(self, skb); 887 888 } ··· 1221 1222 { 1222 1223 struct seq_file *seq; 1223 1224 int rc = -ENOMEM; 1224 - struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1225 + struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1225 1226 1226 1227 if (!s) 1227 1228 goto out; ··· 1237 1238 1238 1239 seq = file->private_data; 1239 1240 seq->private = s; 1240 - memset(s, 0, sizeof(*s)); 1241 1241 out: 1242 1242 return rc; 1243 1243 out_kfree:
+9 -10
net/irda/irlap_frame.c
··· 117 117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 118 118 119 119 /* Allocate frame */ 120 - tx_skb = dev_alloc_skb(64); 120 + tx_skb = alloc_skb(64, GFP_ATOMIC); 121 121 if (!tx_skb) 122 122 return; 123 123 ··· 210 210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 211 211 212 212 /* Allocate frame */ 213 - tx_skb = dev_alloc_skb(64); 213 + tx_skb = alloc_skb(64, GFP_ATOMIC); 214 214 if (!tx_skb) 215 215 return; 216 216 ··· 250 250 IRDA_ASSERT(self != NULL, return;); 251 251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 252 252 253 - tx_skb = dev_alloc_skb(32); 253 + tx_skb = alloc_skb(32, GFP_ATOMIC); 254 254 if (!tx_skb) 255 255 return; 256 256 ··· 282 282 IRDA_ASSERT(self != NULL, return;); 283 283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 284 284 285 - tx_skb = dev_alloc_skb(16); 285 + tx_skb = alloc_skb(16, GFP_ATOMIC); 286 286 if (!tx_skb) 287 287 return; 288 288 ··· 315 315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 316 316 IRDA_ASSERT(discovery != NULL, return;); 317 317 318 - tx_skb = dev_alloc_skb(64); 318 + tx_skb = alloc_skb(64, GFP_ATOMIC); 319 319 if (!tx_skb) 320 320 return; 321 321 ··· 422 422 return; 423 423 } 424 424 425 - if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { 425 + if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { 426 426 IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__); 427 427 return; 428 428 } 429 - memset(discovery, 0, sizeof(discovery_t)); 430 429 431 430 discovery->data.daddr = info->daddr; 432 431 discovery->data.saddr = self->saddr; ··· 575 576 struct sk_buff *tx_skb; 576 577 __u8 *frame; 577 578 578 - tx_skb = dev_alloc_skb(16); 579 + tx_skb = alloc_skb(16, GFP_ATOMIC); 579 580 if (!tx_skb) 580 581 return; 581 582 ··· 600 601 struct sk_buff *tx_skb; 601 602 __u8 *frame; 602 603 603 - tx_skb = dev_alloc_skb(16); 604 + tx_skb = alloc_skb(16, GFP_ATOMIC); 604 605 if (!tx_skb) 605 606 return; 606 607 ··· 1214 1215 struct test_frame *frame; 1215 1216 __u8 *info; 1216 1217 1217 - tx_skb = dev_alloc_skb(cmd->len+sizeof(struct test_frame)); 1218 + tx_skb = alloc_skb(cmd->len+sizeof(struct test_frame), GFP_ATOMIC); 1218 1219 if (!tx_skb) 1219 1220 return; 1220 1221
+4 -7
net/irda/irlmp.c
··· 78 78 { 79 79 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 80 80 /* Initialize the irlmp structure. */ 81 - irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL); 81 + irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); 82 82 if (irlmp == NULL) 83 83 return -ENOMEM; 84 - memset(irlmp, 0, sizeof(struct irlmp_cb)); 85 84 86 85 irlmp->magic = LMP_MAGIC; 87 86 ··· 159 160 return NULL; 160 161 161 162 /* Allocate new instance of a LSAP connection */ 162 - self = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC); 163 + self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC); 163 164 if (self == NULL) { 164 165 IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__); 165 166 return NULL; 166 167 } 167 - memset(self, 0, sizeof(struct lsap_cb)); 168 168 169 169 self->magic = LMP_LSAP_MAGIC; 170 170 self->slsap_sel = slsap_sel; ··· 286 288 /* 287 289 * Allocate new instance of a LSAP connection 288 290 */ 289 - lap = kmalloc(sizeof(struct lap_cb), GFP_KERNEL); 291 + lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL); 290 292 if (lap == NULL) { 291 293 IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__); 292 294 return; 293 295 } 294 - memset(lap, 0, sizeof(struct lap_cb)); 295 296 296 297 lap->irlap = irlap; 297 298 lap->magic = LMP_LAP_MAGIC; ··· 392 395 393 396 /* Any userdata? */ 394 397 if (tx_skb == NULL) { 395 - tx_skb = dev_alloc_skb(64); 398 + tx_skb = alloc_skb(64, GFP_ATOMIC); 396 399 if (!tx_skb) 397 400 return -ENOMEM; 398 401
+1 -2
net/irda/irnet/irnet_ppp.c
··· 476 476 #endif /* SECURE_DEVIRNET */ 477 477 478 478 /* Allocate a private structure for this IrNET instance */ 479 - ap = kmalloc(sizeof(*ap), GFP_KERNEL); 479 + ap = kzalloc(sizeof(*ap), GFP_KERNEL); 480 480 DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); 481 481 482 482 /* initialize the irnet structure */ 483 - memset(ap, 0, sizeof(*ap)); 484 483 ap->file = file; 485 484 486 485 /* PPP channel setup */
+9 -11
net/irda/irttp.c
··· 85 85 */ 86 86 int __init irttp_init(void) 87 87 { 88 - irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL); 88 + irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL); 89 89 if (irttp == NULL) 90 90 return -ENOMEM; 91 - memset(irttp, 0, sizeof(struct irttp_cb)); 92 91 93 92 irttp->magic = TTP_MAGIC; 94 93 ··· 305 306 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__); 306 307 307 308 /* Make new segment */ 308 - frag = dev_alloc_skb(self->max_seg_size+self->max_header_size); 309 + frag = alloc_skb(self->max_seg_size+self->max_header_size, 310 + GFP_ATOMIC); 309 311 if (!frag) 310 312 return; 311 313 ··· 389 389 return NULL; 390 390 } 391 391 392 - self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); 392 + self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC); 393 393 if (self == NULL) { 394 394 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); 395 395 return NULL; 396 396 } 397 - memset(self, 0, sizeof(struct tsap_cb)); 398 397 spin_lock_init(&self->lock); 399 398 400 399 /* Initialise todo timer */ ··· 804 805 self->send_credit, self->avail_credit, self->remote_credit); 805 806 806 807 /* Give credit to peer */ 807 - tx_skb = dev_alloc_skb(64); 808 + tx_skb = alloc_skb(64, GFP_ATOMIC); 808 809 if (!tx_skb) 809 810 return; 810 811 ··· 1093 1094 1094 1095 /* Any userdata supplied? */ 1095 1096 if (userdata == NULL) { 1096 - tx_skb = dev_alloc_skb(64); 1097 + tx_skb = alloc_skb(64, GFP_ATOMIC); 1097 1098 if (!tx_skb) 1098 1099 return -ENOMEM; 1099 1100 ··· 1341 1342 1342 1343 /* Any userdata supplied? */ 1343 1344 if (userdata == NULL) { 1344 - tx_skb = dev_alloc_skb(64); 1345 + tx_skb = alloc_skb(64, GFP_ATOMIC); 1345 1346 if (!tx_skb) 1346 1347 return -ENOMEM; 1347 1348 ··· 1540 1541 1541 1542 if (!userdata) { 1542 1543 struct sk_buff *tx_skb; 1543 - tx_skb = dev_alloc_skb(64); 1544 + tx_skb = alloc_skb(64, GFP_ATOMIC); 1544 1545 if (!tx_skb) 1545 1546 return -ENOMEM; 1546 1547 ··· 1875 1876 int rc = -ENOMEM; 1876 1877 struct irttp_iter_state *s; 1877 1878 1878 - s = kmalloc(sizeof(*s), GFP_KERNEL); 1879 + s = kzalloc(sizeof(*s), GFP_KERNEL); 1879 1880 if (!s) 1880 1881 goto out; 1881 1882 ··· 1885 1886 1886 1887 seq = file->private_data; 1887 1888 seq->private = s; 1888 - memset(s, 0, sizeof(*s)); 1889 1889 out: 1890 1890 return rc; 1891 1891 out_kfree:
+1 -3
net/lapb/lapb_iface.c
··· 115 115 */ 116 116 static struct lapb_cb *lapb_create_cb(void) 117 117 { 118 - struct lapb_cb *lapb = kmalloc(sizeof(*lapb), GFP_ATOMIC); 118 + struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC); 119 119 120 120 121 121 if (!lapb) 122 122 goto out; 123 - 124 - memset(lapb, 0x00, sizeof(*lapb)); 125 123 126 124 skb_queue_head_init(&lapb->write_queue); 127 125 skb_queue_head_init(&lapb->ack_queue);
+1 -2
net/llc/llc_core.c
··· 33 33 */ 34 34 static struct llc_sap *llc_sap_alloc(void) 35 35 { 36 - struct llc_sap *sap = kmalloc(sizeof(*sap), GFP_ATOMIC); 36 + struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC); 37 37 38 38 if (sap) { 39 - memset(sap, 0, sizeof(*sap)); 40 39 sap->state = LLC_SAP_STATE_ACTIVE; 41 40 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN); 42 41 rwlock_init(&sap->sk_list.lock);
+4 -9
net/netlink/af_netlink.c
··· 562 562 if (err) 563 563 return err; 564 564 565 - nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL); 565 + nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL); 566 566 if (nlk->groups == NULL) 567 567 return -ENOMEM; 568 - memset(nlk->groups, 0, NLGRPSZ(groups)); 569 568 nlk->ngroups = groups; 570 569 return 0; 571 570 } ··· 1392 1393 struct sock *sk; 1393 1394 struct netlink_sock *nlk; 1394 1395 1395 - cb = kmalloc(sizeof(*cb), GFP_KERNEL); 1396 + cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1396 1397 if (cb == NULL) 1397 1398 return -ENOBUFS; 1398 1399 1399 - memset(cb, 0, sizeof(*cb)); 1400 1400 cb->dump = dump; 1401 1401 cb->done = done; 1402 1402 cb->nlh = nlh; ··· 1666 1668 struct nl_seq_iter *iter; 1667 1669 int err; 1668 1670 1669 - iter = kmalloc(sizeof(*iter), GFP_KERNEL); 1671 + iter = kzalloc(sizeof(*iter), GFP_KERNEL); 1670 1672 if (!iter) 1671 1673 return -ENOMEM; 1672 1674 ··· 1676 1678 return err; 1677 1679 } 1678 1680 1679 - memset(iter, 0, sizeof(*iter)); 1680 1681 seq = file->private_data; 1681 1682 seq->private = iter; 1682 1683 return 0; ··· 1744 1747 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) 1745 1748 netlink_skb_parms_too_large(); 1746 1749 1747 - nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL); 1750 + nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); 1748 1751 if (!nl_table) { 1749 1752 enomem: 1750 1753 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n"); 1751 1754 return -ENOMEM; 1752 1755 } 1753 - 1754 - memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS); 1755 1756 1756 1757 if (num_physpages >= (128 * 1024)) 1757 1758 max = num_physpages >> (21 - PAGE_SHIFT);
+2 -4
net/rxrpc/connection.c
··· 58 58 _enter("%p",peer); 59 59 60 60 /* allocate and initialise a connection record */ 61 - conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); 61 + conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); 62 62 if (!conn) { 63 63 _leave(" = -ENOMEM"); 64 64 return -ENOMEM; 65 65 } 66 66 67 - memset(conn, 0, sizeof(struct rxrpc_connection)); 68 67 atomic_set(&conn->usage, 1); 69 68 70 69 INIT_LIST_HEAD(&conn->link); ··· 534 535 return -EINVAL; 535 536 } 536 537 537 - msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags); 538 + msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags); 538 539 if (!msg) { 539 540 _leave(" = -ENOMEM"); 540 541 return -ENOMEM; 541 542 } 542 543 543 - memset(msg, 0, sizeof(*msg)); 544 544 atomic_set(&msg->usage, 1); 545 545 546 546 INIT_LIST_HEAD(&msg->link);
+1 -2
net/rxrpc/peer.c
··· 58 58 _enter("%p,%08x", trans, ntohl(addr)); 59 59 60 60 /* allocate and initialise a peer record */ 61 - peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); 61 + peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); 62 62 if (!peer) { 63 63 _leave(" = -ENOMEM"); 64 64 return -ENOMEM; 65 65 } 66 66 67 - memset(peer, 0, sizeof(struct rxrpc_peer)); 68 67 atomic_set(&peer->usage, 1); 69 68 70 69 INIT_LIST_HEAD(&peer->link);
+2 -4
net/rxrpc/transport.c
··· 68 68 69 69 _enter("%hu", port); 70 70 71 - trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); 71 + trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); 72 72 if (!trans) 73 73 return -ENOMEM; 74 74 75 - memset(trans, 0, sizeof(struct rxrpc_transport)); 76 75 atomic_set(&trans->usage, 1); 77 76 INIT_LIST_HEAD(&trans->services); 78 77 INIT_LIST_HEAD(&trans->link); ··· 311 312 312 313 _enter(""); 313 314 314 - msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL); 315 + msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL); 315 316 if (!msg) { 316 317 _leave(" = -ENOMEM"); 317 318 return -ENOMEM; 318 319 } 319 320 320 - memset(msg, 0, sizeof(*msg)); 321 321 atomic_set(&msg->usage, 1); 322 322 list_add_tail(&msg->link,msgq); 323 323
+3 -6
net/sched/act_api.c
··· 312 312 } 313 313 314 314 *err = -ENOMEM; 315 - a = kmalloc(sizeof(*a), GFP_KERNEL); 315 + a = kzalloc(sizeof(*a), GFP_KERNEL); 316 316 if (a == NULL) 317 317 goto err_mod; 318 - memset(a, 0, sizeof(*a)); 319 318 320 319 /* backward compatibility for policer */ 321 320 if (name == NULL) ··· 491 492 index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]); 492 493 493 494 *err = -ENOMEM; 494 - a = kmalloc(sizeof(struct tc_action), GFP_KERNEL); 495 + a = kzalloc(sizeof(struct tc_action), GFP_KERNEL); 495 496 if (a == NULL) 496 497 return NULL; 497 - memset(a, 0, sizeof(struct tc_action)); 498 498 499 499 *err = -EINVAL; 500 500 a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]); ··· 529 531 { 530 532 struct tc_action *act; 531 533 532 - act = kmalloc(sizeof(*act), GFP_KERNEL); 534 + act = kzalloc(sizeof(*act), GFP_KERNEL); 533 535 if (act == NULL) { 534 536 printk("create_a: failed to alloc!\n"); 535 537 return NULL; 536 538 } 537 - memset(act, 0, sizeof(*act)); 538 539 act->order = i; 539 540 return act; 540 541 }
+1 -2
net/sched/act_pedit.c
··· 209 209 s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key); 210 210 211 211 /* netlink spinlocks held above us - must use ATOMIC */ 212 - opt = kmalloc(s, GFP_ATOMIC); 212 + opt = kzalloc(s, GFP_ATOMIC); 213 213 if (opt == NULL) 214 214 return -ENOBUFS; 215 - memset(opt, 0, s); 216 215 217 216 memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key)); 218 217 opt->index = p->index;
+2 -4
net/sched/act_police.c
··· 196 196 return ret; 197 197 } 198 198 199 - p = kmalloc(sizeof(*p), GFP_KERNEL); 199 + p = kzalloc(sizeof(*p), GFP_KERNEL); 200 200 if (p == NULL) 201 201 return -ENOMEM; 202 - memset(p, 0, sizeof(*p)); 203 202 204 203 ret = ACT_P_CREATED; 205 204 p->refcnt = 1; ··· 428 429 return p; 429 430 } 430 431 431 - p = kmalloc(sizeof(*p), GFP_KERNEL); 432 + p = kzalloc(sizeof(*p), GFP_KERNEL); 432 433 if (p == NULL) 433 434 return NULL; 434 435 435 - memset(p, 0, sizeof(*p)); 436 436 p->refcnt = 1; 437 437 spin_lock_init(&p->lock); 438 438 p->stats_lock = &p->lock;
+2 -4
net/sched/cls_basic.c
··· 178 178 179 179 err = -ENOBUFS; 180 180 if (head == NULL) { 181 - head = kmalloc(sizeof(*head), GFP_KERNEL); 181 + head = kzalloc(sizeof(*head), GFP_KERNEL); 182 182 if (head == NULL) 183 183 goto errout; 184 184 185 - memset(head, 0, sizeof(*head)); 186 185 INIT_LIST_HEAD(&head->flist); 187 186 tp->root = head; 188 187 } 189 188 190 - f = kmalloc(sizeof(*f), GFP_KERNEL); 189 + f = kzalloc(sizeof(*f), GFP_KERNEL); 191 190 if (f == NULL) 192 191 goto errout; 193 - memset(f, 0, sizeof(*f)); 194 192 195 193 err = -EINVAL; 196 194 if (handle)
+2 -4
net/sched/cls_fw.c
··· 267 267 return -EINVAL; 268 268 269 269 if (head == NULL) { 270 - head = kmalloc(sizeof(struct fw_head), GFP_KERNEL); 270 + head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); 271 271 if (head == NULL) 272 272 return -ENOBUFS; 273 - memset(head, 0, sizeof(*head)); 274 273 275 274 tcf_tree_lock(tp); 276 275 tp->root = head; 277 276 tcf_tree_unlock(tp); 278 277 } 279 278 280 - f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL); 279 + f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); 281 280 if (f == NULL) 282 281 return -ENOBUFS; 283 - memset(f, 0, sizeof(*f)); 284 282 285 283 f->id = handle; 286 284
+3 -6
net/sched/cls_route.c
··· 396 396 h1 = to_hash(nhandle); 397 397 if ((b = head->table[h1]) == NULL) { 398 398 err = -ENOBUFS; 399 - b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL); 399 + b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); 400 400 if (b == NULL) 401 401 goto errout; 402 - memset(b, 0, sizeof(*b)); 403 402 404 403 tcf_tree_lock(tp); 405 404 head->table[h1] = b; ··· 474 475 475 476 err = -ENOBUFS; 476 477 if (head == NULL) { 477 - head = kmalloc(sizeof(struct route4_head), GFP_KERNEL); 478 + head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); 478 479 if (head == NULL) 479 480 goto errout; 480 - memset(head, 0, sizeof(struct route4_head)); 481 481 482 482 tcf_tree_lock(tp); 483 483 tp->root = head; 484 484 tcf_tree_unlock(tp); 485 485 } 486 486 487 - f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL); 487 + f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); 488 488 if (f == NULL) 489 489 goto errout; 490 - memset(f, 0, sizeof(*f)); 491 490 492 491 err = route4_set_parms(tp, base, f, handle, head, tb, 493 492 tca[TCA_RATE-1], 1);
+3 -6
net/sched/cls_rsvp.h
··· 240 240 { 241 241 struct rsvp_head *data; 242 242 243 - data = kmalloc(sizeof(struct rsvp_head), GFP_KERNEL); 243 + data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL); 244 244 if (data) { 245 - memset(data, 0, sizeof(struct rsvp_head)); 246 245 tp->root = data; 247 246 return 0; 248 247 } ··· 445 446 goto errout2; 446 447 447 448 err = -ENOBUFS; 448 - f = kmalloc(sizeof(struct rsvp_filter), GFP_KERNEL); 449 + f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL); 449 450 if (f == NULL) 450 451 goto errout2; 451 452 452 - memset(f, 0, sizeof(*f)); 453 453 h2 = 16; 454 454 if (tb[TCA_RSVP_SRC-1]) { 455 455 err = -EINVAL; ··· 530 532 /* No session found. Create new one. */ 531 533 532 534 err = -ENOBUFS; 533 - s = kmalloc(sizeof(struct rsvp_session), GFP_KERNEL); 535 + s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL); 534 536 if (s == NULL) 535 537 goto errout; 536 - memset(s, 0, sizeof(*s)); 537 538 memcpy(s->dst, dst, sizeof(s->dst)); 538 539 539 540 if (pinfo) {
+4 -8
net/sched/cls_tcindex.c
··· 148 148 struct tcindex_data *p; 149 149 150 150 DPRINTK("tcindex_init(tp %p)\n",tp); 151 - p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL); 151 + p = kzalloc(sizeof(struct tcindex_data),GFP_KERNEL); 152 152 if (!p) 153 153 return -ENOMEM; 154 154 155 - memset(p, 0, sizeof(*p)); 156 155 p->mask = 0xffff; 157 156 p->hash = DEFAULT_HASH_SIZE; 158 157 p->fall_through = 1; ··· 295 296 err = -ENOMEM; 296 297 if (!cp.perfect && !cp.h) { 297 298 if (valid_perfect_hash(&cp)) { 298 - cp.perfect = kmalloc(cp.hash * sizeof(*r), GFP_KERNEL); 299 + cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); 299 300 if (!cp.perfect) 300 301 goto errout; 301 - memset(cp.perfect, 0, cp.hash * sizeof(*r)); 302 302 balloc = 1; 303 303 } else { 304 - cp.h = kmalloc(cp.hash * sizeof(f), GFP_KERNEL); 304 + cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); 305 305 if (!cp.h) 306 306 goto errout; 307 - memset(cp.h, 0, cp.hash * sizeof(f)); 308 307 balloc = 2; 309 308 } 310 309 } ··· 313 316 r = tcindex_lookup(&cp, handle) ? : &new_filter_result; 314 317 315 318 if (r == &new_filter_result) { 316 - f = kmalloc(sizeof(*f), GFP_KERNEL); 319 + f = kzalloc(sizeof(*f), GFP_KERNEL); 317 320 if (!f) 318 321 goto errout_alloc; 319 - memset(f, 0, sizeof(*f)); 320 322 } 321 323 322 324 if (tb[TCA_TCINDEX_CLASSID-1]) {
+5 -10
net/sched/cls_u32.c
··· 307 307 if (tp_c->q == tp->q) 308 308 break; 309 309 310 - root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL); 310 + root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); 311 311 if (root_ht == NULL) 312 312 return -ENOBUFS; 313 313 314 - memset(root_ht, 0, sizeof(*root_ht)); 315 314 root_ht->divisor = 0; 316 315 root_ht->refcnt++; 317 316 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; 318 317 root_ht->prio = tp->prio; 319 318 320 319 if (tp_c == NULL) { 321 - tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL); 320 + tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); 322 321 if (tp_c == NULL) { 323 322 kfree(root_ht); 324 323 return -ENOBUFS; 325 324 } 326 - memset(tp_c, 0, sizeof(*tp_c)); 327 325 tp_c->q = tp->q; 328 326 tp_c->next = u32_list; 329 327 u32_list = tp_c; ··· 569 571 if (handle == 0) 570 572 return -ENOMEM; 571 573 } 572 - ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); 574 + ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); 573 575 if (ht == NULL) 574 576 return -ENOBUFS; 575 - memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*)); 576 577 ht->tp_c = tp_c; 577 578 ht->refcnt = 0; 578 579 ht->divisor = divisor; ··· 614 617 615 618 s = RTA_DATA(tb[TCA_U32_SEL-1]); 616 619 617 - n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 620 + n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 618 621 if (n == NULL) 619 622 return -ENOBUFS; 620 623 621 - memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key)); 622 624 #ifdef CONFIG_CLS_U32_PERF 623 - n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); 625 + n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); 624 626 if (n->pf == NULL) { 625 627 kfree(n); 626 628 return -ENOBUFS; 627 629 } 628 - memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64)); 629 630 #endif 630 631 631 632 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
+1 -2
net/sched/em_meta.c
··· 773 773 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX) 774 774 goto errout; 775 775 776 - meta = kmalloc(sizeof(*meta), GFP_KERNEL); 776 + meta = kzalloc(sizeof(*meta), GFP_KERNEL); 777 777 if (meta == NULL) 778 778 goto errout; 779 - memset(meta, 0, sizeof(*meta)); 780 779 781 780 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); 782 781 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
+1 -2
net/sched/ematch.c
··· 321 321 list_len = RTA_PAYLOAD(rt_list); 322 322 matches_len = tree_hdr->nmatches * sizeof(*em); 323 323 324 - tree->matches = kmalloc(matches_len, GFP_KERNEL); 324 + tree->matches = kzalloc(matches_len, GFP_KERNEL); 325 325 if (tree->matches == NULL) 326 326 goto errout; 327 - memset(tree->matches, 0, matches_len); 328 327 329 328 /* We do not use rtattr_parse_nested here because the maximum 330 329 * number of attributes is unknown. This saves us the allocation
+1 -2
net/sched/estimator.c
··· 139 139 if (parm->interval < -2 || parm->interval > 3) 140 140 return -EINVAL; 141 141 142 - est = kmalloc(sizeof(*est), GFP_KERNEL); 142 + est = kzalloc(sizeof(*est), GFP_KERNEL); 143 143 if (est == NULL) 144 144 return -ENOBUFS; 145 145 146 - memset(est, 0, sizeof(*est)); 147 146 est->interval = parm->interval + 2; 148 147 est->stats = stats; 149 148 est->stats_lock = stats_lock;
+1 -2
net/sched/sch_cbq.c
··· 1926 1926 } 1927 1927 1928 1928 err = -ENOBUFS; 1929 - cl = kmalloc(sizeof(*cl), GFP_KERNEL); 1929 + cl = kzalloc(sizeof(*cl), GFP_KERNEL); 1930 1930 if (cl == NULL) 1931 1931 goto failure; 1932 - memset(cl, 0, sizeof(*cl)); 1933 1932 cl->R_tab = rtab; 1934 1933 rtab = NULL; 1935 1934 cl->refcnt = 1;
+1 -2
net/sched/sch_generic.c
··· 432 432 size = QDISC_ALIGN(sizeof(*sch)); 433 433 size += ops->priv_size + (QDISC_ALIGNTO - 1); 434 434 435 - p = kmalloc(size, GFP_KERNEL); 435 + p = kzalloc(size, GFP_KERNEL); 436 436 if (!p) 437 437 goto errout; 438 - memset(p, 0, size); 439 438 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 440 439 sch->padded = (char *) sch - (char *) p; 441 440
+1 -2
net/sched/sch_gred.c
··· 406 406 struct gred_sched_data *q; 407 407 408 408 if (table->tab[dp] == NULL) { 409 - table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL); 409 + table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL); 410 410 if (table->tab[dp] == NULL) 411 411 return -ENOMEM; 412 - memset(table->tab[dp], 0, sizeof(*q)); 413 412 } 414 413 415 414 q = table->tab[dp];
+1 -2
net/sched/sch_hfsc.c
··· 1123 1123 if (rsc == NULL && fsc == NULL) 1124 1124 return -EINVAL; 1125 1125 1126 - cl = kmalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1126 + cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1127 1127 if (cl == NULL) 1128 1128 return -ENOBUFS; 1129 - memset(cl, 0, sizeof(struct hfsc_class)); 1130 1129 1131 1130 if (rsc != NULL) 1132 1131 hfsc_change_rsc(cl, rsc, 0);
+1 -2
net/sched/sch_htb.c
··· 1559 1559 goto failure; 1560 1560 } 1561 1561 err = -ENOBUFS; 1562 - if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1562 + if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1563 1563 goto failure; 1564 1564 1565 - memset(cl, 0, sizeof(*cl)); 1566 1565 cl->refcnt = 1; 1567 1566 INIT_LIST_HEAD(&cl->sibling); 1568 1567 INIT_LIST_HEAD(&cl->hlist);
+3 -1
net/sched/sch_netem.c
··· 148 148 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) 149 149 { 150 150 struct netem_sched_data *q = qdisc_priv(sch); 151 - struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; 151 + /* We don't fill cb now as skb_unshare() may invalidate it */ 152 + struct netem_skb_cb *cb; 152 153 struct sk_buff *skb2; 153 154 int ret; 154 155 int count = 1; ··· 201 200 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 202 201 } 203 202 203 + cb = (struct netem_skb_cb *)skb->cb; 204 204 if (q->gap == 0 /* not doing reordering */ 205 205 || q->counter < q->gap /* inside last reordering gap */ 206 206 || q->reorder < get_crandom(&q->reorder_cor)) {
+17 -10
net/sctp/associola.c
··· 441 441 /* If the primary path is changing, assume that the 442 442 * user wants to use this new path. 443 443 */ 444 - if (transport->state != SCTP_INACTIVE) 444 + if ((transport->state == SCTP_ACTIVE) || 445 + (transport->state == SCTP_UNKNOWN)) 445 446 asoc->peer.active_path = transport; 446 447 447 448 /* ··· 533 532 port = addr->v4.sin_port; 534 533 535 534 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", 536 - " port: %d state:%s\n", 535 + " port: %d state:%d\n", 537 536 asoc, 538 537 addr, 539 538 addr->v4.sin_port, 540 - peer_state == SCTP_UNKNOWN?"UNKNOWN":"ACTIVE"); 539 + peer_state); 541 540 542 541 /* Set the port if it has not been set yet. */ 543 542 if (0 == asoc->peer.port) ··· 546 545 /* Check to see if this is a duplicate. */ 547 546 peer = sctp_assoc_lookup_paddr(asoc, addr); 548 547 if (peer) { 549 - if (peer_state == SCTP_ACTIVE && 550 - peer->state == SCTP_UNKNOWN) 551 - peer->state = SCTP_ACTIVE; 548 + if (peer->state == SCTP_UNKNOWN) { 549 + if (peer_state == SCTP_ACTIVE) 550 + peer->state = SCTP_ACTIVE; 551 + if (peer_state == SCTP_UNCONFIRMED) 552 + peer->state = SCTP_UNCONFIRMED; 553 + } 552 554 return peer; 553 555 } 554 556 ··· 743 739 list_for_each(pos, &asoc->peer.transport_addr_list) { 744 740 t = list_entry(pos, struct sctp_transport, transports); 745 741 746 - if (t->state == SCTP_INACTIVE) 742 + if ((t->state == SCTP_INACTIVE) || 743 + (t->state == SCTP_UNCONFIRMED)) 747 744 continue; 748 745 if (!first || t->last_time_heard > first->last_time_heard) { 749 746 second = first; ··· 764 759 * [If the primary is active but not most recent, bump the most 765 760 * recently used transport.] 766 761 */ 767 - if (asoc->peer.primary_path->state != SCTP_INACTIVE && 762 + if (((asoc->peer.primary_path->state == SCTP_ACTIVE) || 763 + (asoc->peer.primary_path->state == SCTP_UNKNOWN)) && 768 764 first != asoc->peer.primary_path) { 769 765 second = first; 770 766 first = asoc->peer.primary_path; ··· 1060 1054 transports); 1061 1055 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) 1062 1056 sctp_assoc_add_peer(asoc, &trans->ipaddr, 1063 - GFP_ATOMIC, SCTP_ACTIVE); 1057 + GFP_ATOMIC, trans->state); 1064 1058 } 1065 1059 1066 1060 asoc->ctsn_ack_point = asoc->next_tsn - 1; ··· 1100 1094 1101 1095 /* Try to find an active transport. */ 1102 1096 1103 - if (t->state != SCTP_INACTIVE) { 1097 + if ((t->state == SCTP_ACTIVE) || 1098 + (t->state == SCTP_UNKNOWN)) { 1104 1099 break; 1105 1100 } else { 1106 1101 /* Keep track of the next transport in case
+5 -3
net/sctp/bind_addr.c
··· 146 146 147 147 /* Add an address to the bind address list in the SCTP_bind_addr structure. */ 148 148 int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, 149 - gfp_t gfp) 149 + __u8 use_as_src, gfp_t gfp) 150 150 { 151 151 struct sctp_sockaddr_entry *addr; 152 152 ··· 162 162 */ 163 163 if (!addr->a.v4.sin_port) 164 164 addr->a.v4.sin_port = bp->port; 165 + 166 + addr->use_as_src = use_as_src; 165 167 166 168 INIT_LIST_HEAD(&addr->list); 167 169 list_add_tail(&addr->list, &bp->address_list); ··· 276 274 } 277 275 278 276 af->from_addr_param(&addr, rawaddr, port, 0); 279 - retval = sctp_add_bind_addr(bp, &addr, gfp); 277 + retval = sctp_add_bind_addr(bp, &addr, 1, gfp); 280 278 if (retval) { 281 279 /* Can't finish building the list, clean up. */ 282 280 sctp_bind_addr_clean(bp); ··· 369 367 (((AF_INET6 == addr->sa.sa_family) && 370 368 (flags & SCTP_ADDR6_ALLOWED) && 371 369 (flags & SCTP_ADDR6_PEERSUPP)))) 372 - error = sctp_add_bind_addr(dest, addr, gfp); 370 + error = sctp_add_bind_addr(dest, addr, 1, gfp); 373 371 } 374 372 375 373 return error;
+6 -5
net/sctp/endpointola.c
··· 158 158 void sctp_endpoint_free(struct sctp_endpoint *ep) 159 159 { 160 160 ep->base.dead = 1; 161 + 162 + ep->base.sk->sk_state = SCTP_SS_CLOSED; 163 + 164 + /* Unlink this endpoint, so we can't find it again! */ 165 + sctp_unhash_endpoint(ep); 166 + 161 167 sctp_endpoint_put(ep); 162 168 } 163 169 ··· 171 165 static void sctp_endpoint_destroy(struct sctp_endpoint *ep) 172 166 { 173 167 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 174 - 175 - ep->base.sk->sk_state = SCTP_SS_CLOSED; 176 - 177 - /* Unlink this endpoint, so we can't find it again! */ 178 - sctp_unhash_endpoint(ep); 179 168 180 169 /* Free up the HMAC transform. */ 181 170 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
+2 -1
net/sctp/ipv6.c
··· 290 290 sctp_read_lock(addr_lock); 291 291 list_for_each(pos, &bp->address_list) { 292 292 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 293 - if ((laddr->a.sa.sa_family == AF_INET6) && 293 + if ((laddr->use_as_src) && 294 + (laddr->a.sa.sa_family == AF_INET6) && 294 295 (scope <= sctp_scope(&laddr->a))) { 295 296 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 296 297 if (!baddr || (matchlen < bmatchlen)) {
+6 -3
net/sctp/outqueue.c
··· 691 691 692 692 if (!new_transport) { 693 693 new_transport = asoc->peer.active_path; 694 - } else if (new_transport->state == SCTP_INACTIVE) { 694 + } else if ((new_transport->state == SCTP_INACTIVE) || 695 + (new_transport->state == SCTP_UNCONFIRMED)) { 695 696 /* If the chunk is Heartbeat or Heartbeat Ack, 696 697 * send it to chunk->transport, even if it's 697 698 * inactive. ··· 849 848 */ 850 849 new_transport = chunk->transport; 851 850 if (!new_transport || 852 - new_transport->state == SCTP_INACTIVE) 851 + ((new_transport->state == SCTP_INACTIVE) || 852 + (new_transport->state == SCTP_UNCONFIRMED))) 853 853 new_transport = asoc->peer.active_path; 854 854 855 855 /* Change packets if necessary. */ ··· 1466 1464 /* Mark the destination transport address as 1467 1465 * active if it is not so marked. 1468 1466 */ 1469 - if (transport->state == SCTP_INACTIVE) { 1467 + if ((transport->state == SCTP_INACTIVE) || 1468 + (transport->state == SCTP_UNCONFIRMED)) { 1470 1469 sctp_assoc_control_transport( 1471 1470 transport->asoc, 1472 1471 transport,
+5 -2
net/sctp/protocol.c
··· 240 240 (((AF_INET6 == addr->a.sa.sa_family) && 241 241 (copy_flags & SCTP_ADDR6_ALLOWED) && 242 242 (copy_flags & SCTP_ADDR6_PEERSUPP)))) { 243 - error = sctp_add_bind_addr(bp, &addr->a, 243 + error = sctp_add_bind_addr(bp, &addr->a, 1, 244 244 GFP_ATOMIC); 245 245 if (error) 246 246 goto end_copy; ··· 486 486 list_for_each(pos, &bp->address_list) { 487 487 laddr = list_entry(pos, struct sctp_sockaddr_entry, 488 488 list); 489 + if (!laddr->use_as_src) 490 + continue; 489 491 sctp_v4_dst_saddr(&dst_saddr, dst, bp->port); 490 492 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 491 493 goto out_unlock; ··· 508 506 list_for_each(pos, &bp->address_list) { 509 507 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 510 508 511 - if (AF_INET == laddr->a.sa.sa_family) { 509 + if ((laddr->use_as_src) && 510 + (AF_INET == laddr->a.sa.sa_family)) { 512 511 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 513 512 if (!ip_route_output_key(&rt, &fl)) { 514 513 dst = &rt->u.dst;
+10 -4
net/sctp/sm_make_chunk.c
··· 1493 1493 1494 1494 /* Also, add the destination address. */ 1495 1495 if (list_empty(&retval->base.bind_addr.address_list)) { 1496 - sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1496 + sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1, 1497 1497 GFP_ATOMIC); 1498 1498 } 1499 1499 ··· 2017 2017 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0); 2018 2018 scope = sctp_scope(peer_addr); 2019 2019 if (sctp_in_scope(&addr, scope)) 2020 - if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_ACTIVE)) 2020 + if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) 2021 2021 return 0; 2022 2022 break; 2023 2023 ··· 2418 2418 * Due to Resource Shortage'. 2419 2419 */ 2420 2420 2421 - peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_ACTIVE); 2421 + peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); 2422 2422 if (!peer) 2423 2423 return SCTP_ERROR_RSRC_LOW; 2424 2424 ··· 2565 2565 union sctp_addr_param *addr_param; 2566 2566 struct list_head *pos; 2567 2567 struct sctp_transport *transport; 2568 + struct sctp_sockaddr_entry *saddr; 2568 2569 int retval = 0; 2569 2570 2570 2571 addr_param = (union sctp_addr_param *) ··· 2579 2578 case SCTP_PARAM_ADD_IP: 2580 2579 sctp_local_bh_disable(); 2581 2580 sctp_write_lock(&asoc->base.addr_lock); 2582 - retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC); 2581 + list_for_each(pos, &bp->address_list) { 2582 + saddr = list_entry(pos, struct sctp_sockaddr_entry, list); 2583 + if (sctp_cmp_addr_exact(&saddr->a, &addr)) 2584 + saddr->use_as_src = 1; 2585 + } 2583 2586 sctp_write_unlock(&asoc->base.addr_lock); 2584 2587 sctp_local_bh_enable(); 2585 2588 break; ··· 2596 2591 list_for_each(pos, &asoc->peer.transport_addr_list) { 2597 2592 transport = list_entry(pos, struct sctp_transport, 2598 2593 transports); 2594 + dst_release(transport->dst); 2599 2595 sctp_transport_route(transport, NULL, 2600 2596 sctp_sk(asoc->base.sk)); 2601 2597 }
+10 -2
net/sctp/sm_sideeffect.c
··· 430 430 /* The check for association's overall error counter exceeding the 431 431 * threshold is done in the state function. 432 432 */ 433 - asoc->overall_error_count++; 433 + /* When probing UNCONFIRMED addresses, the association overall 434 + * error count is NOT incremented 435 + */ 436 + if (transport->state != SCTP_UNCONFIRMED) 437 + asoc->overall_error_count++; 434 438 435 439 if (transport->state != SCTP_INACTIVE && 436 440 (transport->error_count++ >= transport->pathmaxrxt)) { ··· 614 610 /* Mark the destination transport address as active if it is not so 615 611 * marked. 616 612 */ 617 - if (t->state == SCTP_INACTIVE) 613 + if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) 618 614 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, 619 615 SCTP_HEARTBEAT_SUCCESS); 620 616 ··· 624 620 */ 625 621 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 626 622 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 623 + 624 + /* Update the heartbeat timer. */ 625 + if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 626 + sctp_transport_hold(t); 627 627 } 628 628 629 629 /* Helper function to do a transport reset at the expiry of the hearbeat
+7 -1
net/sctp/sm_statefuns.c
··· 846 846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); 847 847 hbinfo.daddr = transport->ipaddr; 848 848 hbinfo.sent_at = jiffies; 849 + hbinfo.hb_nonce = transport->hb_nonce; 849 850 850 851 /* Send a heartbeat to our peer. */ 851 852 paylen = sizeof(sctp_sender_hb_info_t); ··· 1048 1047 } 1049 1048 return SCTP_DISPOSITION_DISCARD; 1050 1049 } 1050 + 1051 + /* Validate the 64-bit random nonce. */ 1052 + if (hbinfo->hb_nonce != link->hb_nonce) 1053 + return SCTP_DISPOSITION_DISCARD; 1051 1054 1052 1055 max_interval = link->hbinterval + link->rto; 1053 1056 ··· 5283 5278 datalen -= sizeof(sctp_data_chunk_t); 5284 5279 5285 5280 deliver = SCTP_CMD_CHUNK_ULP; 5286 - chunk->data_accepted = 1; 5287 5281 5288 5282 /* Think about partial delivery. */ 5289 5283 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { ··· 5360 5356 */ 5361 5357 if (SCTP_CMD_CHUNK_ULP == deliver) 5362 5358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 5359 + 5360 + chunk->data_accepted = 1; 5363 5361 5364 5362 /* Note: Some chunks may get overcounted (if we drop) or overcounted 5365 5363 * if we renege and the chunk arrives again.
+60 -16
net/sctp/socket.c
··· 369 369 370 370 /* Use GFP_ATOMIC since BHs are disabled. */ 371 371 addr->v4.sin_port = ntohs(addr->v4.sin_port); 372 - ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC); 372 + ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); 373 373 addr->v4.sin_port = htons(addr->v4.sin_port); 374 374 sctp_write_unlock(&ep->base.addr_lock); 375 375 sctp_local_bh_enable(); ··· 491 491 struct sctp_chunk *chunk; 492 492 struct sctp_sockaddr_entry *laddr; 493 493 union sctp_addr *addr; 494 + union sctp_addr saveaddr; 494 495 void *addr_buf; 495 496 struct sctp_af *af; 496 497 struct list_head *pos; ··· 559 558 } 560 559 561 560 retval = sctp_send_asconf(asoc, chunk); 561 + if (retval) 562 + goto out; 562 563 563 - /* FIXME: After sending the add address ASCONF chunk, we 564 - * cannot append the address to the association's binding 565 - * address list, because the new address may be used as the 566 - * source of a message sent to the peer before the ASCONF 567 - * chunk is received by the peer. So we should wait until 568 - * ASCONF_ACK is received. 564 + /* Add the new addresses to the bind address list with 565 + * use_as_src set to 0. 569 566 */ 567 + sctp_local_bh_disable(); 568 + sctp_write_lock(&asoc->base.addr_lock); 569 + addr_buf = addrs; 570 + for (i = 0; i < addrcnt; i++) { 571 + addr = (union sctp_addr *)addr_buf; 572 + af = sctp_get_af_specific(addr->v4.sin_family); 573 + memcpy(&saveaddr, addr, af->sockaddr_len); 574 + saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); 575 + retval = sctp_add_bind_addr(bp, &saveaddr, 0, 576 + GFP_ATOMIC); 577 + addr_buf += af->sockaddr_len; 578 + } 579 + sctp_write_unlock(&asoc->base.addr_lock); 580 + sctp_local_bh_enable(); 570 581 } 571 582 572 583 out: ··· 689 676 struct sctp_sock *sp; 690 677 struct sctp_endpoint *ep; 691 678 struct sctp_association *asoc; 679 + struct sctp_transport *transport; 692 680 struct sctp_bind_addr *bp; 693 681 struct sctp_chunk *chunk; 694 682 union sctp_addr *laddr; 683 + union sctp_addr saveaddr; 695 684 void *addr_buf; 696 685 struct sctp_af *af; 697 - struct list_head *pos; 686 + struct list_head *pos, *pos1; 687 + struct sctp_sockaddr_entry *saddr; 698 688 int i; 699 689 int retval = 0; 700 690 ··· 764 748 goto out; 765 749 } 766 750 767 - retval = sctp_send_asconf(asoc, chunk); 768 - 769 - /* FIXME: After sending the delete address ASCONF chunk, we 770 - * cannot remove the addresses from the association's bind 771 - * address list, because there maybe some packet send to 772 - * the delete addresses, so we should wait until ASCONF_ACK 773 - * packet is received. 751 + /* Reset use_as_src flag for the addresses in the bind address 752 + * list that are to be deleted. 774 753 */ 754 + sctp_local_bh_disable(); 755 + sctp_write_lock(&asoc->base.addr_lock); 756 + addr_buf = addrs; 757 + for (i = 0; i < addrcnt; i++) { 758 + laddr = (union sctp_addr *)addr_buf; 759 + af = sctp_get_af_specific(laddr->v4.sin_family); 760 + memcpy(&saveaddr, laddr, af->sockaddr_len); 761 + saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); 762 + list_for_each(pos1, &bp->address_list) { 763 + saddr = list_entry(pos1, 764 + struct sctp_sockaddr_entry, 765 + list); 766 + if (sctp_cmp_addr_exact(&saddr->a, &saveaddr)) 767 + saddr->use_as_src = 0; 768 + } 769 + addr_buf += af->sockaddr_len; 770 + } 771 + sctp_write_unlock(&asoc->base.addr_lock); 772 + sctp_local_bh_enable(); 773 + 774 + /* Update the route and saddr entries for all the transports 775 + * as some of the addresses in the bind address list are 776 + * about to be deleted and cannot be used as source addresses. 777 + */ 778 + list_for_each(pos1, &asoc->peer.transport_addr_list) { 779 + transport = list_entry(pos1, struct sctp_transport, 780 + transports); 781 + dst_release(transport->dst); 782 + sctp_transport_route(transport, NULL, 783 + sctp_sk(asoc->base.sk)); 784 + } 785 + 786 + retval = sctp_send_asconf(asoc, chunk); 775 787 } 776 788 out: 777 789 return retval; ··· 5021 4977 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 5022 4978 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 5023 4979 { 5024 - if (hlist_empty(&pp->owner)) { 4980 + if (pp && hlist_empty(&pp->owner)) { 5025 4981 if (pp->next) 5026 4982 pp->next->pprev = pp->pprev; 5027 4983 *(pp->pprev) = pp->next;
+7 -2
net/sctp/transport.c
··· 49 49 */ 50 50 51 51 #include <linux/types.h> 52 + #include <linux/random.h> 52 53 #include <net/sctp/sctp.h> 53 54 #include <net/sctp/sm.h> 54 55 ··· 86 85 87 86 peer->init_sent_count = 0; 88 87 89 - peer->state = SCTP_ACTIVE; 90 88 peer->param_flags = SPP_HB_DISABLE | 91 89 SPP_PMTUD_ENABLE | 92 90 SPP_SACKDELAY_ENABLE; ··· 108 108 init_timer(&peer->hb_timer); 109 109 peer->hb_timer.function = sctp_generate_heartbeat_event; 110 110 peer->hb_timer.data = (unsigned long)peer; 111 + 112 + /* Initialize the 64-bit random nonce sent with heartbeat. */ 113 + get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); 111 114 112 115 atomic_set(&peer->refcnt, 1); 113 116 peer->dead = 0; ··· 520 517 unsigned long sctp_transport_timeout(struct sctp_transport *t) 521 518 { 522 519 unsigned long timeout; 523 - timeout = t->hbinterval + t->rto + sctp_jitter(t->rto); 520 + timeout = t->rto + sctp_jitter(t->rto); 521 + if (t->state != SCTP_UNCONFIRMED) 522 + timeout += t->hbinterval; 524 523 timeout += jiffies; 525 524 return timeout; 526 525 }
+3 -6
net/sunrpc/auth_gss/auth_gss.c
··· 225 225 { 226 226 struct gss_cl_ctx *ctx; 227 227 228 - ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 228 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 229 229 if (ctx != NULL) { 230 - memset(ctx, 0, sizeof(*ctx)); 231 230 ctx->gc_proc = RPC_GSS_PROC_DATA; 232 231 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 233 232 spin_lock_init(&ctx->gc_seq_lock); ··· 390 391 { 391 392 struct gss_upcall_msg *gss_msg; 392 393 393 - gss_msg = kmalloc(sizeof(*gss_msg), GFP_KERNEL); 394 + gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL); 394 395 if (gss_msg != NULL) { 395 - memset(gss_msg, 0, sizeof(*gss_msg)); 396 396 INIT_LIST_HEAD(&gss_msg->list); 397 397 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 398 398 init_waitqueue_head(&gss_msg->waitqueue); ··· 774 776 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", 775 777 acred->uid, auth->au_flavor); 776 778 777 - if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) 779 + if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) 778 780 goto out_err; 779 781 780 - memset(cred, 0, sizeof(*cred)); 781 782 atomic_set(&cred->gc_count, 1); 782 783 cred->gc_uid = acred->uid; 783 784 /*
+1 -2
net/sunrpc/auth_gss/gss_krb5_mech.c
··· 129 129 const void *end = (const void *)((const char *)p + len); 130 130 struct krb5_ctx *ctx; 131 131 132 - if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 132 + if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) 133 133 goto out_err; 134 - memset(ctx, 0, sizeof(*ctx)); 135 134 136 135 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 137 136 if (IS_ERR(p))
+1 -2
net/sunrpc/auth_gss/gss_mech_switch.c
··· 237 237 struct gss_api_mech *mech, 238 238 struct gss_ctx **ctx_id) 239 239 { 240 - if (!(*ctx_id = kmalloc(sizeof(**ctx_id), GFP_KERNEL))) 240 + if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) 241 241 return GSS_S_FAILURE; 242 - memset(*ctx_id, 0, sizeof(**ctx_id)); 243 242 (*ctx_id)->mech_type = gss_mech_get(mech); 244 243 245 244 return mech->gm_ops
+1 -2
net/sunrpc/auth_gss/gss_spkm3_mech.c
··· 152 152 const void *end = (const void *)((const char *)p + len); 153 153 struct spkm3_ctx *ctx; 154 154 155 - if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 155 + if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) 156 156 goto out_err; 157 - memset(ctx, 0, sizeof(*ctx)); 158 157 159 158 p = simple_get_netobj(p, end, &ctx->ctx_id); 160 159 if (IS_ERR(p))
+1 -2
net/sunrpc/auth_gss/gss_spkm3_token.c
··· 90 90 int 91 91 decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) 92 92 { 93 - if (!(out->data = kmalloc(explen,GFP_KERNEL))) 93 + if (!(out->data = kzalloc(explen,GFP_KERNEL))) 94 94 return 0; 95 95 out->len = explen; 96 - memset(out->data, 0, explen); 97 96 memcpy(out->data, in, enclen); 98 97 return 1; 99 98 }
+1 -2
net/sunrpc/clnt.c
··· 125 125 goto out_err; 126 126 127 127 err = -ENOMEM; 128 - clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); 128 + clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 129 129 if (!clnt) 130 130 goto out_err; 131 - memset(clnt, 0, sizeof(*clnt)); 132 131 atomic_set(&clnt->cl_users, 0); 133 132 atomic_set(&clnt->cl_count, 1); 134 133 clnt->cl_parent = clnt;
+1 -6
net/sunrpc/stats.c
··· 114 114 */ 115 115 struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) 116 116 { 117 - unsigned int ops = clnt->cl_maxproc; 118 - size_t size = ops * sizeof(struct rpc_iostats); 119 117 struct rpc_iostats *new; 120 - 121 - new = kmalloc(size, GFP_KERNEL); 122 - if (new) 123 - memset(new, 0 , size); 118 + new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); 124 119 return new; 125 120 } 126 121 EXPORT_SYMBOL(rpc_alloc_iostats);
+2 -4
net/sunrpc/svc.c
··· 32 32 int vers; 33 33 unsigned int xdrsize; 34 34 35 - if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL))) 35 + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 36 36 return NULL; 37 - memset(serv, 0, sizeof(*serv)); 38 37 serv->sv_name = prog->pg_name; 39 38 serv->sv_program = prog; 40 39 serv->sv_nrthreads = 1; ··· 158 159 struct svc_rqst *rqstp; 159 160 int error = -ENOMEM; 160 161 161 - rqstp = kmalloc(sizeof(*rqstp), GFP_KERNEL); 162 + rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 162 163 if (!rqstp) 163 164 goto out; 164 165 165 - memset(rqstp, 0, sizeof(*rqstp)); 166 166 init_waitqueue_head(&rqstp->rq_wait); 167 167 168 168 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
+1 -2
net/sunrpc/svcsock.c
··· 1322 1322 struct sock *inet; 1323 1323 1324 1324 dprintk("svc: svc_setup_socket %p\n", sock); 1325 - if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) { 1325 + if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { 1326 1326 *errp = -ENOMEM; 1327 1327 return NULL; 1328 1328 } 1329 - memset(svsk, 0, sizeof(*svsk)); 1330 1329 1331 1330 inet = sock->sk; 1332 1331
+1 -2
net/sunrpc/xprt.c
··· 908 908 struct rpc_xprt *xprt; 909 909 struct rpc_rqst *req; 910 910 911 - if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) 911 + if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) 912 912 return ERR_PTR(-ENOMEM); 913 - memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ 914 913 915 914 xprt->addr = *ap; 916 915
+2 -4
net/sunrpc/xprtsock.c
··· 1276 1276 1277 1277 xprt->max_reqs = xprt_udp_slot_table_entries; 1278 1278 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1279 - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1279 + xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); 1280 1280 if (xprt->slot == NULL) 1281 1281 return -ENOMEM; 1282 - memset(xprt->slot, 0, slot_table_size); 1283 1282 1284 1283 xprt->prot = IPPROTO_UDP; 1285 1284 xprt->port = xs_get_random_port(); ··· 1317 1318 1318 1319 xprt->max_reqs = xprt_tcp_slot_table_entries; 1319 1320 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1320 - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1321 + xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); 1321 1322 if (xprt->slot == NULL) 1322 1323 return -ENOMEM; 1323 - memset(xprt->slot, 0, slot_table_size); 1324 1324 1325 1325 xprt->prot = IPPROTO_TCP; 1326 1326 xprt->port = xs_get_random_port();
+2 -4
net/tipc/bearer.c
··· 665 665 int res; 666 666 667 667 write_lock_bh(&tipc_net_lock); 668 - tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC); 669 - media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC); 668 + tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); 669 + media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); 670 670 if (tipc_bearers && media_list) { 671 - memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer)); 672 - memset(media_list, 0, MAX_MEDIA * sizeof(struct media)); 673 671 res = TIPC_OK; 674 672 } else { 675 673 kfree(tipc_bearers);
+2 -6
net/tipc/cluster.c
··· 57 57 struct _zone *z_ptr; 58 58 struct cluster *c_ptr; 59 59 int max_nodes; 60 - int alloc; 61 60 62 - c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC); 61 + c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC); 63 62 if (c_ptr == NULL) { 64 63 warn("Cluster creation failure, no memory\n"); 65 64 return NULL; 66 65 } 67 - memset(c_ptr, 0, sizeof(*c_ptr)); 68 66 69 67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 70 68 if (in_own_cluster(addr)) 71 69 max_nodes = LOWEST_SLAVE + tipc_max_slaves; 72 70 else 73 71 max_nodes = tipc_max_nodes + 1; 74 - alloc = sizeof(void *) * (max_nodes + 1); 75 72 76 - c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC); 73 + c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC); 77 74 if (c_ptr->nodes == NULL) { 78 75 warn("Cluster creation failure, no memory for node area\n"); 79 76 kfree(c_ptr); 80 77 return NULL; 81 78 } 82 - memset(c_ptr->nodes, 0, alloc); 83 79 84 80 if (in_own_cluster(addr)) 85 81 tipc_local_nodes = c_ptr->nodes;
+1 -1
net/tipc/discover.c
··· 295 295 { 296 296 struct link_req *req; 297 297 298 - req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC); 298 + req = kmalloc(sizeof(*req), GFP_ATOMIC); 299 299 if (!req) 300 300 return NULL; 301 301
+1 -2
net/tipc/link.c
··· 417 417 struct tipc_msg *msg; 418 418 char *if_name; 419 419 420 - l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC); 420 + l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 421 421 if (!l_ptr) { 422 422 warn("Link creation failed, no memory\n"); 423 423 return NULL; 424 424 } 425 - memset(l_ptr, 0, sizeof(*l_ptr)); 426 425 427 426 l_ptr->addr = peer; 428 427 if_name = strchr(b_ptr->publ.name, ':') + 1;
+4 -12
net/tipc/name_table.c
··· 117 117 u32 scope, u32 node, u32 port_ref, 118 118 u32 key) 119 119 { 120 - struct publication *publ = 121 - (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC); 120 + struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); 122 121 if (publ == NULL) { 123 122 warn("Publication creation failure, no memory\n"); 124 123 return NULL; 125 124 } 126 125 127 - memset(publ, 0, sizeof(*publ)); 128 126 publ->type = type; 129 127 publ->lower = lower; 130 128 publ->upper = upper; ··· 142 144 143 145 static struct sub_seq *tipc_subseq_alloc(u32 cnt) 144 146 { 145 - u32 sz = cnt * sizeof(struct sub_seq); 146 - struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC); 147 - 148 - if (sseq) 149 - memset(sseq, 0, sz); 147 + struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); 150 148 return sseq; 151 149 } 152 150 ··· 154 160 155 161 static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) 156 162 { 157 - struct name_seq *nseq = 158 - (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC); 163 + struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); 159 164 struct sub_seq *sseq = tipc_subseq_alloc(1); 160 165 161 166 if (!nseq || !sseq) { ··· 164 171 return NULL; 165 172 } 166 173 167 - memset(nseq, 0, sizeof(*nseq)); 168 174 spin_lock_init(&nseq->lock); 169 175 nseq->type = type; 170 176 nseq->sseqs = sseq; ··· 1052 1060 { 1053 1061 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1054 1062 1055 - table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC); 1063 + table.types = kmalloc(array_size, GFP_ATOMIC); 1056 1064 if (!table.types) 1057 1065 return -ENOMEM; 1058 1066
+1 -4
net/tipc/net.c
··· 160 160 161 161 static int net_init(void) 162 162 { 163 - u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1); 164 - 165 163 memset(&tipc_net, 0, sizeof(tipc_net)); 166 - tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC); 164 + tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC); 167 165 if (!tipc_net.zones) { 168 166 return -ENOMEM; 169 167 } 170 - memset(tipc_net.zones, 0, sz); 171 168 return TIPC_OK; 172 169 } 173 170
+2 -3
net/tipc/port.c
··· 226 226 struct tipc_msg *msg; 227 227 u32 ref; 228 228 229 - p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC); 229 + p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC); 230 230 if (!p_ptr) { 231 231 warn("Port creation failed, no memory\n"); 232 232 return 0; 233 233 } 234 - memset(p_ptr, 0, sizeof(*p_ptr)); 235 234 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); 236 235 if (!ref) { 237 236 warn("Port creation failed, reference table exhausted\n"); ··· 1057 1058 struct port *p_ptr; 1058 1059 u32 ref; 1059 1060 1060 - up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1061 + up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1061 1062 if (!up_ptr) { 1062 1063 warn("Port creation failed, no memory\n"); 1063 1064 return -ENOMEM;
+1 -1
net/tipc/ref.c
··· 79 79 while (sz < requested_size) { 80 80 sz <<= 1; 81 81 } 82 - table = (struct reference *)vmalloc(sz * sizeof(struct reference)); 82 + table = vmalloc(sz * sizeof(*table)); 83 83 if (table == NULL) 84 84 return -ENOMEM; 85 85
+1 -2
net/tipc/subscr.c
··· 393 393 394 394 /* Create subscriber object */ 395 395 396 - subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC); 396 + subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC); 397 397 if (subscriber == NULL) { 398 398 warn("Subscriber rejected, no memory\n"); 399 399 return; 400 400 } 401 - memset(subscriber, 0, sizeof(struct subscriber)); 402 401 INIT_LIST_HEAD(&subscriber->subscription_list); 403 402 INIT_LIST_HEAD(&subscriber->subscriber_list); 404 403 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
+1 -2
net/tipc/user_reg.c
··· 82 82 83 83 spin_lock_bh(&reg_lock); 84 84 if (!users) { 85 - users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC); 85 + users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC); 86 86 if (users) { 87 - memset(users, 0, USER_LIST_SIZE); 88 87 for (i = 1; i <= MAX_USERID; i++) { 89 88 users[i].next = i - 1; 90 89 }
+1 -2
net/tipc/zone.c
··· 52 52 return NULL; 53 53 } 54 54 55 - z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); 55 + z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC); 56 56 if (!z_ptr) { 57 57 warn("Zone creation failed, insufficient memory\n"); 58 58 return NULL; 59 59 } 60 60 61 - memset(z_ptr, 0, sizeof(*z_ptr)); 62 61 z_num = tipc_zone(addr); 63 62 z_ptr->addr = tipc_addr(z_num, 0, 0); 64 63 tipc_net.zones[z_num] = z_ptr;
+1 -2
net/unix/af_unix.c
··· 663 663 goto out; 664 664 665 665 err = -ENOMEM; 666 - addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); 666 + addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); 667 667 if (!addr) 668 668 goto out; 669 669 670 - memset(addr, 0, sizeof(*addr) + sizeof(short) + 16); 671 670 addr->name->sun_family = AF_UNIX; 672 671 atomic_set(&addr->refcnt, 1); 673 672
+3 -6
net/wanrouter/af_wanpipe.c
··· 370 370 * used by the ioctl call to read call information 371 371 * and to execute commands. 372 372 */ 373 - if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { 373 + if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { 374 374 wanpipe_kill_sock_irq (newsk); 375 375 release_device(dev); 376 376 return -ENOMEM; 377 377 } 378 - memset(mbox_ptr, 0, sizeof(mbox_cmd_t)); 379 378 memcpy(mbox_ptr,skb->data,skb->len); 380 379 381 380 /* Register the lcn on which incoming call came ··· 506 507 if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL) 507 508 return NULL; 508 509 509 - if ((wan_opt = kmalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) { 510 + if ((wan_opt = kzalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) { 510 511 sk_free(sk); 511 512 return NULL; 512 513 } 513 - memset(wan_opt, 0x00, sizeof(struct wanpipe_opt)); 514 514 515 515 wp_sk(sk) = wan_opt; 516 516 ··· 2009 2011 2010 2012 dev_put(dev); 2011 2013 2012 - if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) 2014 + if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) 2013 2015 return -ENOMEM; 2014 2016 2015 - memset(mbox_ptr, 0, sizeof(mbox_cmd_t)); 2016 2017 wp_sk(sk)->mbox = mbox_ptr; 2017 2018 2018 2019 wanpipe_link_driver(dev,sk);
+3 -6
net/wanrouter/wanmain.c
··· 642 642 643 643 if (cnf->config_id == WANCONFIG_MPPP) { 644 644 #ifdef CONFIG_WANPIPE_MULTPPP 645 - pppdev = kmalloc(sizeof(struct ppp_device), GFP_KERNEL); 645 + pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL); 646 646 err = -ENOBUFS; 647 647 if (pppdev == NULL) 648 648 goto out; 649 - memset(pppdev, 0, sizeof(struct ppp_device)); 650 - pppdev->dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); 649 + pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); 651 650 if (pppdev->dev == NULL) { 652 651 kfree(pppdev); 653 652 err = -ENOBUFS; 654 653 goto out; 655 654 } 656 - memset(pppdev->dev, 0, sizeof(struct net_device)); 657 655 err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf); 658 656 dev = pppdev->dev; 659 657 #else ··· 661 663 goto out; 662 664 #endif 663 665 } else { 664 - dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); 666 + dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); 665 667 err = -ENOBUFS; 666 668 if (dev == NULL) 667 669 goto out; 668 - memset(dev, 0, sizeof(struct net_device)); 669 670 err = wandev->new_if(wandev, dev, cnf); 670 671 } 671 672
+1 -2
net/xfrm/xfrm_policy.c
··· 307 307 { 308 308 struct xfrm_policy *policy; 309 309 310 - policy = kmalloc(sizeof(struct xfrm_policy), gfp); 310 + policy = kzalloc(sizeof(struct xfrm_policy), gfp); 311 311 312 312 if (policy) { 313 - memset(policy, 0, sizeof(struct xfrm_policy)); 314 313 atomic_set(&policy->refcnt, 1); 315 314 rwlock_init(&policy->lock); 316 315 init_timer(&policy->timer);
+1 -2
net/xfrm/xfrm_state.c
··· 194 194 { 195 195 struct xfrm_state *x; 196 196 197 - x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC); 197 + x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC); 198 198 199 199 if (x) { 200 - memset(x, 0, sizeof(struct xfrm_state)); 201 200 atomic_set(&x->refcnt, 1); 202 201 atomic_set(&x->tunnel_users, 0); 203 202 INIT_LIST_HEAD(&x->bydst);