Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master'

+2153 -1536
+5 -4
Documentation/serial/driver
··· 214 214 The interaction of the iflag bits is as follows (parity error 215 215 given as an example): 216 216 Parity error INPCK IGNPAR 217 - None n/a n/a character received 218 - Yes n/a 0 character discarded 219 - Yes 0 1 character received, marked as 217 + n/a 0 n/a character received, marked as 220 218 TTY_NORMAL 221 - Yes 1 1 character received, marked as 219 + None 1 n/a character received, marked as 220 + TTY_NORMAL 221 + Yes 1 0 character received, marked as 222 222 TTY_PARITY 223 + Yes 1 1 character discarded 223 224 224 225 Other flags may be used (eg, xon/xoff characters) if your 225 226 hardware supports hardware "soft" flow control.
+25 -2
MAINTAINERS
··· 565 565 P: Arnd Bergmann 566 566 M: arnd@arndb.de 567 567 L: linuxppc-dev@ozlabs.org 568 - W: http://linuxppc64.org 568 + W: http://www.penguinppc.org/ppc64/ 569 + S: Supported 570 + 571 + BROADCOM BNX2 GIGABIT ETHERNET DRIVER 572 + P: Michael Chan 573 + M: mchan@broadcom.com 574 + L: netdev@vger.kernel.org 575 + S: Supported 576 + 577 + BROADCOM TG3 GIGABIT ETHERNET DRIVER 578 + P: Michael Chan 579 + M: mchan@broadcom.com 580 + L: netdev@vger.kernel.org 569 581 S: Supported 570 582 571 583 BTTV VIDEO4LINUX DRIVER ··· 1746 1734 P: Anton Blanchard 1747 1735 M: anton@samba.org 1748 1736 M: anton@au.ibm.com 1749 - W: http://linuxppc64.org 1737 + W: http://www.penguinppc.org/ppc64/ 1750 1738 L: linuxppc-dev@ozlabs.org 1751 1739 S: Supported 1752 1740 ··· 1907 1895 W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html 1908 1896 S: Maintained 1909 1897 1898 + MULTIMEDIA CARD SUBSYSTEM 1899 + P: Russell King 1900 + M: rmk+mmc@arm.linux.org.uk 1901 + S: Maintained 1902 + 1910 1903 MULTISOUND SOUND DRIVER 1911 1904 P: Andrew Veliath 1912 1905 M: andrewtv@usa.net ··· 1932 1915 P: James E.J. Bottomley 1933 1916 M: James.Bottomley@HansenPartnership.com 1934 1917 L: linux-scsi@vger.kernel.org 1918 + S: Maintained 1919 + 1920 + NETEM NETWORK EMULATOR 1921 + P: Stephen Hemminger 1922 + M: shemminger@osdl.org 1923 + L: netem@osdl.org 1935 1924 S: Maintained 1936 1925 1937 1926 NETFILTER/IPTABLES/IPCHAINS
+2 -2
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 17 4 - EXTRAVERSION =-rc5 5 - NAME=Lordi Rules 4 + EXTRAVERSION =-rc6 5 + NAME=Crazed Snow-Weasel 6 6 7 7 # *DOCUMENTATION* 8 8 # To see a list of typical targets execute "make help"
-1
arch/alpha/kernel/alpha_ksyms.c
··· 182 182 EXPORT_SYMBOL(smp_call_function); 183 183 EXPORT_SYMBOL(smp_call_function_on_cpu); 184 184 EXPORT_SYMBOL(_atomic_dec_and_lock); 185 - EXPORT_SYMBOL(cpu_present_mask); 186 185 #endif /* CONFIG_SMP */ 187 186 188 187 /*
+3 -3
arch/alpha/kernel/process.c
··· 94 94 if (cpuid != boot_cpuid) { 95 95 flags |= 0x00040000UL; /* "remain halted" */ 96 96 *pflags = flags; 97 - clear_bit(cpuid, &cpu_present_mask); 97 + cpu_clear(cpuid, cpu_present_map); 98 98 halt(); 99 99 } 100 100 #endif ··· 120 120 121 121 #ifdef CONFIG_SMP 122 122 /* Wait for the secondaries to halt. */ 123 - cpu_clear(boot_cpuid, cpu_possible_map); 124 - while (cpus_weight(cpu_possible_map)) 123 + cpu_clear(boot_cpuid, cpu_present_map); 124 + while (cpus_weight(cpu_present_map)) 125 125 barrier(); 126 126 #endif 127 127
+4 -10
arch/alpha/kernel/smp.c
··· 68 68 static int smp_secondary_alive __initdata = 0; 69 69 70 70 /* Which cpus ids came online. */ 71 - cpumask_t cpu_present_mask; 72 71 cpumask_t cpu_online_map; 73 72 74 73 EXPORT_SYMBOL(cpu_online_map); ··· 438 439 if ((cpu->flags & 0x1cc) == 0x1cc) { 439 440 smp_num_probed++; 440 441 /* Assume here that "whami" == index */ 441 - cpu_set(i, cpu_present_mask); 442 + cpu_set(i, cpu_present_map); 442 443 cpu->pal_revision = boot_cpu_palrev; 443 444 } 444 445 ··· 449 450 } 450 451 } else { 451 452 smp_num_probed = 1; 452 - cpu_set(boot_cpuid, cpu_present_mask); 453 453 } 454 454 455 - printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", 456 - smp_num_probed, cpu_possible_map.bits[0]); 455 + printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n", 456 + smp_num_probed, cpu_present_map.bits[0]); 457 457 } 458 458 459 459 /* ··· 471 473 472 474 /* Nothing to do on a UP box, or when told not to. */ 473 475 if (smp_num_probed == 1 || max_cpus == 0) { 474 - cpu_present_mask = cpumask_of_cpu(boot_cpuid); 476 + cpu_present_map = cpumask_of_cpu(boot_cpuid); 475 477 printk(KERN_INFO "SMP mode deactivated.\n"); 476 478 return; 477 479 } ··· 484 486 void __devinit 485 487 smp_prepare_boot_cpu(void) 486 488 { 487 - /* 488 - * Mark the boot cpu (current cpu) as online 489 - */ 490 - cpu_set(smp_processor_id(), cpu_online_map); 491 489 } 492 490 493 491 int __devinit
+1 -1
arch/alpha/kernel/sys_titan.c
··· 66 66 register int bcpu = boot_cpuid; 67 67 68 68 #ifdef CONFIG_SMP 69 - cpumask_t cpm = cpu_present_mask; 69 + cpumask_t cpm = cpu_present_map; 70 70 volatile unsigned long *dim0, *dim1, *dim2, *dim3; 71 71 unsigned long mask0, mask1, mask2, mask3, dummy; 72 72
+1 -1
arch/arm/Kconfig.debug
··· 101 101 help 102 102 Choice for UART for kernel low-level using S3C2410 UARTS, 103 103 should be between zero and two. The port must have been 104 - initalised by the boot-loader before use. 104 + initialised by the boot-loader before use. 105 105 106 106 The uncompressor code port configuration is now handled 107 107 by CONFIG_S3C2410_LOWLEVEL_UART_PORT.
+15 -3
arch/arm/mach-ixp23xx/core.c
··· 178 178 179 179 static void ixp23xx_irq_mask(unsigned int irq) 180 180 { 181 - volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 181 + volatile unsigned long *intr_reg; 182 182 183 + if (irq >= 56) 184 + irq += 8; 185 + 186 + intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 183 187 *intr_reg &= ~(1 << (irq % 32)); 184 188 } 185 189 ··· 203 199 */ 204 200 static void ixp23xx_irq_level_unmask(unsigned int irq) 205 201 { 206 - volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 202 + volatile unsigned long *intr_reg; 207 203 208 204 ixp23xx_irq_ack(irq); 209 205 206 + if (irq >= 56) 207 + irq += 8; 208 + 209 + intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 210 210 *intr_reg |= (1 << (irq % 32)); 211 211 } 212 212 213 213 static void ixp23xx_irq_edge_unmask(unsigned int irq) 214 214 { 215 - volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 215 + volatile unsigned long *intr_reg; 216 216 217 + if (irq >= 56) 218 + irq += 8; 219 + 220 + intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 217 221 *intr_reg |= (1 << (irq % 32)); 218 222 } 219 223
+1 -1
arch/arm/mach-ixp4xx/Kconfig
··· 141 141 2) If > 64MB of memory space is required, the IXP4xx can be 142 142 configured to use indirect registers to access PCI This allows 143 143 for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus. 144 - The disadvantadge of this is that every PCI access requires 144 + The disadvantage of this is that every PCI access requires 145 145 three local register accesses plus a spinlock, but in some 146 146 cases the performance hit is acceptable. In addition, you cannot 147 147 mmap() PCI devices in this case due to the indirect nature
+1
arch/arm/mach-pxa/mainstone.c
··· 493 493 MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)") 494 494 /* Maintainer: MontaVista Software Inc. */ 495 495 .phys_io = 0x40000000, 496 + .boot_params = 0xa0000100, /* BLOB boot parameter setting */ 496 497 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 497 498 .map_io = mainstone_map_io, 498 499 .init_irq = mainstone_init_irq,
+1 -1
arch/arm/mach-s3c2410/Kconfig
··· 170 170 depends on ARCH_S3C2410 && PM 171 171 help 172 172 Say Y here if you want verbose debugging from the PM Suspend and 173 - Resume code. See `Documentation/arm/Samsing-S3C24XX/Suspend.txt` 173 + Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt> 174 174 for more information. 175 175 176 176 config S3C2410_PM_CHECK
+2 -2
arch/arm/mm/mm-armv.c
··· 376 376 ecc_mask = 0; 377 377 } 378 378 379 - if (cpu_arch <= CPU_ARCH_ARMv5TEJ) { 379 + if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) { 380 380 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 381 381 if (mem_types[i].prot_l1) 382 382 mem_types[i].prot_l1 |= PMD_BIT4; ··· 631 631 pgd = init_mm.pgd; 632 632 633 633 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 634 - if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ) 634 + if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 635 635 base_pmdval |= PMD_BIT4; 636 636 637 637 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
+2 -1
arch/arm/mm/proc-xsc3.S
··· 427 427 #endif 428 428 mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg 429 429 mrc p15, 0, r0, c1, c0, 0 @ get control register 430 - bic r0, r0, #0x0200 @ .... ..R. .... .... 431 430 bic r0, r0, #0x0002 @ .... .... .... ..A. 432 431 orr r0, r0, #0x0005 @ .... .... .... .C.M 433 432 #if BTB_ENABLE 433 + bic r0, r0, #0x0200 @ .... ..R. .... .... 434 434 orr r0, r0, #0x3900 @ ..VI Z..S .... .... 435 435 #else 436 + bic r0, r0, #0x0a00 @ .... Z.R. .... .... 436 437 orr r0, r0, #0x3100 @ ..VI ...S .... .... 437 438 #endif 438 439 #if L2_CACHE_ENABLE
-8
arch/i386/kernel/acpi/boot.c
··· 1066 1066 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), 1067 1067 }, 1068 1068 }, 1069 - { 1070 - .callback = disable_acpi_pci, 1071 - .ident = "HP xw9300", 1072 - .matches = { 1073 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1074 - DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"), 1075 - }, 1076 - }, 1077 1069 {} 1078 1070 }; 1079 1071
+1
arch/i386/kernel/syscall_table.S
··· 315 315 .long sys_splice 316 316 .long sys_sync_file_range 317 317 .long sys_tee /* 315 */ 318 + .long sys_vmsplice
+10 -6
arch/i386/mach-generic/probe.c
··· 93 93 int i; 94 94 for (i = 0; apic_probe[i]; ++i) { 95 95 if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) { 96 - genapic = apic_probe[i]; 97 - printk(KERN_INFO "Switched to APIC driver `%s'.\n", 98 - genapic->name); 96 + if (!cmdline_apic) { 97 + genapic = apic_probe[i]; 98 + printk(KERN_INFO "Switched to APIC driver `%s'.\n", 99 + genapic->name); 100 + } 99 101 return 1; 100 102 } 101 103 } ··· 109 107 int i; 110 108 for (i = 0; apic_probe[i]; ++i) { 111 109 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { 112 - genapic = apic_probe[i]; 113 - printk(KERN_INFO "Switched to APIC driver `%s'.\n", 114 - genapic->name); 110 + if (!cmdline_apic) { 111 + genapic = apic_probe[i]; 112 + printk(KERN_INFO "Switched to APIC driver `%s'.\n", 113 + genapic->name); 114 + } 115 115 return 1; 116 116 } 117 117 }
+47 -49
arch/mips/Kconfig
··· 13 13 default SGI_IP22 14 14 15 15 config MIPS_MTX1 16 - bool "Support for 4G Systems MTX-1 board" 16 + bool "4G Systems MTX-1 board" 17 17 select DMA_NONCOHERENT 18 18 select HW_HAS_PCI 19 19 select SOC_AU1500 ··· 120 120 select SYS_SUPPORTS_LITTLE_ENDIAN 121 121 122 122 config MIPS_COBALT 123 - bool "Support for Cobalt Server" 123 + bool "Cobalt Server" 124 124 select DMA_NONCOHERENT 125 125 select HW_HAS_PCI 126 126 select I8259 ··· 132 132 select SYS_SUPPORTS_LITTLE_ENDIAN 133 133 134 134 config MACH_DECSTATION 135 - bool "Support for DECstations" 135 + bool "DECstations" 136 136 select BOOT_ELF32 137 137 select DMA_NONCOHERENT 138 138 select EARLY_PRINTK ··· 158 158 otherwise choose R3000. 159 159 160 160 config MIPS_EV64120 161 - bool "Support for Galileo EV64120 Evaluation board (EXPERIMENTAL)" 161 + bool "Galileo EV64120 Evaluation board (EXPERIMENTAL)" 162 162 depends on EXPERIMENTAL 163 163 select DMA_NONCOHERENT 164 164 select HW_HAS_PCI ··· 175 175 kernel for this platform. 176 176 177 177 config MIPS_EV96100 178 - bool "Support for Galileo EV96100 Evaluation board (EXPERIMENTAL)" 178 + bool "Galileo EV96100 Evaluation board (EXPERIMENTAL)" 179 179 depends on EXPERIMENTAL 180 180 select DMA_NONCOHERENT 181 181 select HW_HAS_PCI ··· 195 195 here if you wish to build a kernel for this platform. 196 196 197 197 config MIPS_IVR 198 - bool "Support for Globespan IVR board" 198 + bool "Globespan IVR board" 199 199 select DMA_NONCOHERENT 200 200 select HW_HAS_PCI 201 201 select ITE_BOARD_GEN ··· 211 211 build a kernel for this platform. 212 212 213 213 config MIPS_ITE8172 214 - bool "Support for ITE 8172G board" 214 + bool "ITE 8172G board" 215 215 select DMA_NONCOHERENT 216 216 select HW_HAS_PCI 217 217 select ITE_BOARD_GEN ··· 228 228 a kernel for this platform. 229 229 230 230 config MACH_JAZZ 231 - bool "Support for the Jazz family of machines" 231 + bool "Jazz family of machines" 232 232 select ARC 233 233 select ARC32 234 234 select ARCH_MAY_HAVE_PC_FDC ··· 246 246 Olivetti M700-10 workstations. 247 247 248 248 config LASAT 249 - bool "Support for LASAT Networks platforms" 249 + bool "LASAT Networks platforms" 250 250 select DMA_NONCOHERENT 251 251 select HW_HAS_PCI 252 252 select MIPS_GT64120 ··· 258 258 select SYS_SUPPORTS_LITTLE_ENDIAN 259 259 260 260 config MIPS_ATLAS 261 - bool "Support for MIPS Atlas board" 261 + bool "MIPS Atlas board" 262 262 select BOOT_ELF32 263 263 select DMA_NONCOHERENT 264 264 select IRQ_CPU ··· 283 283 board. 284 284 285 285 config MIPS_MALTA 286 - bool "Support for MIPS Malta board" 286 + bool "MIPS Malta board" 287 287 select ARCH_MAY_HAVE_PC_FDC 288 288 select BOOT_ELF32 289 289 select HAVE_STD_PC_SERIAL_PORT ··· 311 311 board. 312 312 313 313 config MIPS_SEAD 314 - bool "Support for MIPS SEAD board (EXPERIMENTAL)" 314 + bool "MIPS SEAD board (EXPERIMENTAL)" 315 315 depends on EXPERIMENTAL 316 316 select IRQ_CPU 317 317 select DMA_NONCOHERENT ··· 328 328 board. 329 329 330 330 config MIPS_SIM 331 - bool 'Support for MIPS simulator (MIPSsim)' 331 + bool 'MIPS simulator (MIPSsim)' 332 332 select DMA_NONCOHERENT 333 333 select IRQ_CPU 334 334 select SYS_HAS_CPU_MIPS32_R1 ··· 341 341 emulator. 342 342 343 343 config MOMENCO_JAGUAR_ATX 344 - bool "Support for Momentum Jaguar board" 344 + bool "Momentum Jaguar board" 345 345 select BOOT_ELF32 346 346 select DMA_NONCOHERENT 347 347 select HW_HAS_PCI ··· 361 361 Momentum Computer <http://www.momenco.com/>. 362 362 363 363 config MOMENCO_OCELOT 364 - bool "Support for Momentum Ocelot board" 364 + bool "Momentum Ocelot board" 365 365 select DMA_NONCOHERENT 366 366 select HW_HAS_PCI 367 367 select IRQ_CPU ··· 378 378 Momentum Computer <http://www.momenco.com/>. 379 379 380 380 config MOMENCO_OCELOT_3 381 - bool "Support for Momentum Ocelot-3 board" 381 + bool "Momentum Ocelot-3 board" 382 382 select BOOT_ELF32 383 383 select DMA_NONCOHERENT 384 384 select HW_HAS_PCI ··· 397 397 PMC-Sierra Rm79000 core. 398 398 399 399 config MOMENCO_OCELOT_C 400 - bool "Support for Momentum Ocelot-C board" 400 + bool "Momentum Ocelot-C board" 401 401 select DMA_NONCOHERENT 402 402 select HW_HAS_PCI 403 403 select IRQ_CPU ··· 414 414 Momentum Computer <http://www.momenco.com/>. 415 415 416 416 config MOMENCO_OCELOT_G 417 - bool "Support for Momentum Ocelot-G board" 417 + bool "Momentum Ocelot-G board" 418 418 select DMA_NONCOHERENT 419 419 select HW_HAS_PCI 420 420 select IRQ_CPU ··· 431 431 Momentum Computer <http://www.momenco.com/>. 432 432 433 433 config MIPS_XXS1500 434 - bool "Support for MyCable XXS1500 board" 434 + bool "MyCable XXS1500 board" 435 435 select DMA_NONCOHERENT 436 436 select SOC_AU1500 437 437 select SYS_SUPPORTS_LITTLE_ENDIAN 438 438 439 439 config PNX8550_V2PCI 440 - bool "Support for Philips PNX8550 based Viper2-PCI board" 440 + bool "Philips PNX8550 based Viper2-PCI board" 441 441 select PNX8550 442 442 select SYS_SUPPORTS_LITTLE_ENDIAN 443 443 444 444 config PNX8550_JBS 445 - bool "Support for Philips PNX8550 based JBS board" 445 + bool "Philips PNX8550 based JBS board" 446 446 select PNX8550 447 447 select SYS_SUPPORTS_LITTLE_ENDIAN 448 448 449 449 config DDB5074 450 - bool "Support for NEC DDB Vrc-5074 (EXPERIMENTAL)" 450 + bool "NEC DDB Vrc-5074 (EXPERIMENTAL)" 451 451 depends on EXPERIMENTAL 452 452 select DDB5XXX_COMMON 453 453 select DMA_NONCOHERENT ··· 465 465 evaluation board. 466 466 467 467 config DDB5476 468 - bool "Support for NEC DDB Vrc-5476" 468 + bool "NEC DDB Vrc-5476" 469 469 select DDB5XXX_COMMON 470 470 select DMA_NONCOHERENT 471 471 select HAVE_STD_PC_SERIAL_PORT ··· 486 486 IDE controller, PS2 keyboard, PS2 mouse, etc. 487 487 488 488 config DDB5477 489 - bool "Support for NEC DDB Vrc-5477" 489 + bool "NEC DDB Vrc-5477" 490 490 select DDB5XXX_COMMON 491 491 select DMA_NONCOHERENT 492 492 select HW_HAS_PCI ··· 504 504 ether port USB, AC97, PCI, etc. 505 505 506 506 config MACH_VR41XX 507 - bool "Support for NEC VR4100 series based machines" 507 + bool "NEC VR41XX-based machines" 508 508 select SYS_HAS_CPU_VR41XX 509 509 select SYS_SUPPORTS_32BIT_KERNEL 510 510 select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL 511 511 512 512 config PMC_YOSEMITE 513 - bool "Support for PMC-Sierra Yosemite eval board" 513 + bool "PMC-Sierra Yosemite eval board" 514 514 select DMA_COHERENT 515 515 select HW_HAS_PCI 516 516 select IRQ_CPU ··· 527 527 manufactured by PMC-Sierra. 528 528 529 529 config QEMU 530 - bool "Support for Qemu" 530 + bool "Qemu" 531 531 select DMA_COHERENT 532 532 select GENERIC_ISA_DMA 533 533 select HAVE_STD_PC_SERIAL_PORT ··· 547 547 can be found at http://www.linux-mips.org/wiki/Qemu. 548 548 549 549 config SGI_IP22 550 - bool "Support for SGI IP22 (Indy/Indigo2)" 550 + bool "SGI IP22 (Indy/Indigo2)" 551 551 select ARC 552 552 select ARC32 553 553 select BOOT_ELF32 ··· 567 567 that runs on these, say Y here. 568 568 569 569 config SGI_IP27 570 - bool "Support for SGI IP27 (Origin200/2000)" 570 + bool "SGI IP27 (Origin200/2000)" 571 571 select ARC 572 572 select ARC64 573 573 select BOOT_ELF64 ··· 583 583 here. 584 584 585 585 config SGI_IP32 586 - bool "Support for SGI IP32 (O2) (EXPERIMENTAL)" 586 + bool "SGI IP32 (O2) (EXPERIMENTAL)" 587 587 depends on EXPERIMENTAL 588 588 select ARC 589 589 select ARC32 ··· 604 604 If you want this kernel to run on SGI O2 workstation, say Y here. 605 605 606 606 config SIBYTE_BIGSUR 607 - bool "Support for Sibyte BCM91480B-BigSur" 607 + bool "Sibyte BCM91480B-BigSur" 608 608 select BOOT_ELF32 609 609 select DMA_COHERENT 610 610 select PCI_DOMAINS ··· 615 615 select SYS_SUPPORTS_LITTLE_ENDIAN 616 616 617 617 config SIBYTE_SWARM 618 - bool "Support for Sibyte BCM91250A-SWARM" 618 + bool "Sibyte BCM91250A-SWARM" 619 619 select BOOT_ELF32 620 620 select DMA_COHERENT 621 621 select SIBYTE_SB1250 ··· 626 626 select SYS_SUPPORTS_LITTLE_ENDIAN 627 627 628 628 config SIBYTE_SENTOSA 629 - bool "Support for Sibyte BCM91250E-Sentosa" 629 + bool "Sibyte BCM91250E-Sentosa" 630 630 depends on EXPERIMENTAL 631 631 select BOOT_ELF32 632 632 select DMA_COHERENT ··· 637 637 select SYS_SUPPORTS_LITTLE_ENDIAN 638 638 639 639 config SIBYTE_RHONE 640 - bool "Support for Sibyte BCM91125E-Rhone" 640 + bool "Sibyte BCM91125E-Rhone" 641 641 depends on EXPERIMENTAL 642 642 select BOOT_ELF32 643 643 select DMA_COHERENT ··· 648 648 select SYS_SUPPORTS_LITTLE_ENDIAN 649 649 650 650 config SIBYTE_CARMEL 651 - bool "Support for Sibyte BCM91120x-Carmel" 651 + bool "Sibyte BCM91120x-Carmel" 652 652 depends on EXPERIMENTAL 653 653 select BOOT_ELF32 654 654 select DMA_COHERENT ··· 659 659 select SYS_SUPPORTS_LITTLE_ENDIAN 660 660 661 661 config SIBYTE_PTSWARM 662 - bool "Support for Sibyte BCM91250PT-PTSWARM" 662 + bool "Sibyte BCM91250PT-PTSWARM" 663 663 depends on EXPERIMENTAL 664 664 select BOOT_ELF32 665 665 select DMA_COHERENT ··· 671 671 select SYS_SUPPORTS_LITTLE_ENDIAN 672 672 673 673 config SIBYTE_LITTLESUR 674 - bool "Support for Sibyte BCM91250C2-LittleSur" 674 + bool "Sibyte BCM91250C2-LittleSur" 675 675 depends on EXPERIMENTAL 676 676 select BOOT_ELF32 677 677 select DMA_COHERENT ··· 683 683 select SYS_SUPPORTS_LITTLE_ENDIAN 684 684 685 685 config SIBYTE_CRHINE 686 - bool "Support for Sibyte BCM91120C-CRhine" 686 + bool "Sibyte BCM91120C-CRhine" 687 687 depends on EXPERIMENTAL 688 688 select BOOT_ELF32 689 689 select DMA_COHERENT ··· 694 694 select SYS_SUPPORTS_LITTLE_ENDIAN 695 695 696 696 config SIBYTE_CRHONE 697 - bool "Support for Sibyte BCM91125C-CRhone" 697 + bool "Sibyte BCM91125C-CRhone" 698 698 depends on EXPERIMENTAL 699 699 select BOOT_ELF32 700 700 select DMA_COHERENT ··· 706 706 select SYS_SUPPORTS_LITTLE_ENDIAN 707 707 708 708 config SNI_RM200_PCI 709 - bool "Support for SNI RM200 PCI" 709 + bool "SNI RM200 PCI" 710 710 select ARC 711 711 select ARC32 712 712 select ARCH_MAY_HAVE_PC_FDC ··· 732 732 support this machine type. 733 733 734 734 config TOSHIBA_JMR3927 735 - bool "Support for Toshiba JMR-TX3927 board" 735 + bool "Toshiba JMR-TX3927 board" 736 736 select DMA_NONCOHERENT 737 737 select HW_HAS_PCI 738 738 select MIPS_TX3927 ··· 743 743 select TOSHIBA_BOARDS 744 744 745 745 config TOSHIBA_RBTX4927 746 - bool "Support for Toshiba TBTX49[23]7 board" 746 + bool "Toshiba TBTX49[23]7 board" 747 747 select DMA_NONCOHERENT 748 748 select HAS_TXX9_SERIAL 749 749 select HW_HAS_PCI ··· 760 760 support this machine type 761 761 762 762 config TOSHIBA_RBTX4938 763 - bool "Support for Toshiba RBTX4938 board" 763 + bool "Toshiba RBTX4938 board" 764 764 select HAVE_STD_PC_SERIAL_PORT 765 765 select DMA_NONCOHERENT 766 766 select GENERIC_ISA_DMA ··· 1411 1411 1412 1412 config PAGE_SIZE_16KB 1413 1413 bool "16kB" 1414 - depends on EXPERIMENTAL && !CPU_R3000 && !CPU_TX39XX 1414 + depends on !CPU_R3000 && !CPU_TX39XX 1415 1415 help 1416 1416 Using 16kB page size will result in higher performance kernel at 1417 1417 the price of higher memory consumption. This option is available on 1418 - all non-R3000 family processor. Not that at the time of this 1419 - writing this option is still high experimental; there are also 1420 - issues with compatibility of user applications. 1418 + all non-R3000 family processors. Note that you will need a suitable 1419 + Linux distribution to support this. 1421 1420 1422 1421 config PAGE_SIZE_64KB 1423 1422 bool "64kB" ··· 1425 1426 Using 64kB page size will result in higher performance kernel at 1426 1427 the price of higher memory consumption. This option is available on 1427 1428 all non-R3000 family processor. Not that at the time of this 1428 - writing this option is still high experimental; there are also 1429 - issues with compatibility of user applications. 1429 + writing this option is still high experimental. 1430 1430 1431 1431 endchoice 1432 1432
+1
arch/mips/au1000/common/irq.c
··· 68 68 69 69 extern void set_debug_traps(void); 70 70 extern irq_cpustat_t irq_stat [NR_CPUS]; 71 + extern void mips_timer_interrupt(struct pt_regs *regs); 71 72 72 73 static void setup_local_irq(unsigned int irq, int type, int int_req); 73 74 static unsigned int startup_irq(unsigned int irq);
+9 -15
arch/mips/au1000/common/prom.c
··· 1 1 /* 2 2 * 3 3 * BRIEF MODULE DESCRIPTION 4 - * PROM library initialisation code, assuming a version of 5 - * pmon is the boot code. 4 + * PROM library initialisation code, assuming YAMON is the boot loader. 6 5 * 7 - * Copyright 2000,2001 MontaVista Software Inc. 6 + * Copyright 2000, 2001, 2006 MontaVista Software Inc. 8 7 * Author: MontaVista Software, Inc. 9 8 * ppopov@mvista.com or source@mvista.com 10 9 * ··· 48 49 49 50 typedef struct 50 51 { 51 - char *name; 52 - /* char *val; */ 53 - }t_env_var; 52 + char *name; 53 + char *val; 54 + } t_env_var; 54 55 55 56 56 57 char * prom_getcmdline(void) ··· 84 85 { 85 86 /* 86 87 * Return a pointer to the given environment variable. 87 - * Environment variables are stored in the form of "memsize=64". 88 88 */ 89 89 90 90 t_env_var *env = (t_env_var *)prom_envp; 91 - int i; 92 91 93 - i = strlen(envname); 94 - 95 - while(env->name) { 96 - if(strncmp(envname, env->name, i) == 0) { 97 - return(env->name + strlen(envname) + 1); 98 - } 92 + while (env->name) { 93 + if (strcmp(envname, env->name) == 0) 94 + return env->val; 99 95 env++; 100 96 } 101 - return(NULL); 97 + return NULL; 102 98 } 103 99 104 100 inline unsigned char str2hexnum(unsigned char c)
+5
arch/mips/au1000/common/sleeper.S
··· 112 112 mtc0 k0, CP0_PAGEMASK 113 113 lw k0, 0x14(sp) 114 114 mtc0 k0, CP0_CONFIG 115 + 116 + /* We need to catch the ealry Alchemy SOCs with 117 + * the write-only Config[OD] bit and set it back to one... 118 + */ 119 + jal au1x00_fixup_config_od 115 120 lw $1, PT_R1(sp) 116 121 lw $2, PT_R2(sp) 117 122 lw $3, PT_R3(sp)
+1
arch/mips/au1000/common/time.c
··· 116 116 117 117 null: 118 118 ack_r4ktimer(0); 119 + irq_exit(); 119 120 } 120 121 121 122 #ifdef CONFIG_PM
+1 -1
arch/mips/ddb5xxx/ddb5476/dbg_io.c
··· 86 86 /* disable interrupts */ 87 87 UART16550_WRITE(OFS_INTR_ENABLE, 0); 88 88 89 - /* set up buad rate */ 89 + /* set up baud rate */ 90 90 { 91 91 uint32 divisor; 92 92
+1 -1
arch/mips/ddb5xxx/ddb5477/kgdb_io.c
··· 86 86 /* disable interrupts */ 87 87 UART16550_WRITE(OFS_INTR_ENABLE, 0); 88 88 89 - /* set up buad rate */ 89 + /* set up baud rate */ 90 90 { 91 91 uint32 divisor; 92 92
+1 -1
arch/mips/gt64120/ev64120/serialGT.c
··· 149 149 #else 150 150 /* 151 151 * Note: Set baud rate, hardcoded here for rate of 115200 152 - * since became unsure of above "buad rate" algorithm (??). 152 + * since became unsure of above "baud rate" algorithm (??). 153 153 */ 154 154 outreg(channel, LCR, 0x83); 155 155 outreg(channel, DLM, 0x00); // See note above
+1 -1
arch/mips/gt64120/momenco_ocelot/dbg_io.c
··· 73 73 /* disable interrupts */ 74 74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 75 75 76 - /* set up buad rate */ 76 + /* set up baud rate */ 77 77 { 78 78 uint32 divisor; 79 79
+1 -1
arch/mips/ite-boards/generic/dbg_io.c
··· 72 72 /* disable interrupts */ 73 73 UART16550_WRITE(OFS_INTR_ENABLE, 0); 74 74 75 - /* set up buad rate */ 75 + /* set up baud rate */ 76 76 { 77 77 uint32 divisor; 78 78
+2 -2
arch/mips/kernel/asm-offsets.c
··· 272 272 text("/* Linux sigcontext offsets. */"); 273 273 offset("#define SC_REGS ", struct sigcontext, sc_regs); 274 274 offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs); 275 - offset("#define SC_MDHI ", struct sigcontext, sc_hi); 276 - offset("#define SC_MDLO ", struct sigcontext, sc_lo); 275 + offset("#define SC_MDHI ", struct sigcontext, sc_mdhi); 276 + offset("#define SC_MDLO ", struct sigcontext, sc_mdlo); 277 277 offset("#define SC_PC ", struct sigcontext, sc_pc); 278 278 offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); 279 279 linefeed;
+4 -4
arch/mips/kernel/cpu-bugs64.c
··· 206 206 "daddi %0, %1, %3\n\t" 207 207 ".set pop" 208 208 : "=r" (v), "=&r" (tmp) 209 - : "I" (0xffffffffffffdb9a), "I" (0x1234)); 209 + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); 210 210 set_except_vector(12, handler); 211 211 local_irq_restore(flags); 212 212 ··· 224 224 "dsrl %1, %1, 1\n\t" 225 225 "daddi %0, %1, %3" 226 226 : "=r" (v), "=&r" (tmp) 227 - : "I" (0xffffffffffffdb9a), "I" (0x1234)); 227 + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); 228 228 set_except_vector(12, handler); 229 229 local_irq_restore(flags); 230 230 ··· 280 280 "daddu %1, %2\n\t" 281 281 ".set pop" 282 282 : "=&r" (v), "=&r" (w), "=&r" (tmp) 283 - : "I" (0xffffffffffffdb9a), "I" (0x1234)); 283 + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); 284 284 285 285 if (v == w) { 286 286 printk("no.\n"); ··· 296 296 "addiu %1, $0, %4\n\t" 297 297 "daddu %1, %2" 298 298 : "=&r" (v), "=&r" (w), "=&r" (tmp) 299 - : "I" (0xffffffffffffdb9a), "I" (0x1234)); 299 + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); 300 300 301 301 if (v == w) { 302 302 printk("yes.\n");
+14 -1
arch/mips/kernel/cpu-probe.c
··· 121 121 case CPU_24K: 122 122 case CPU_25KF: 123 123 case CPU_34K: 124 + case CPU_74K: 124 125 case CPU_PR4450: 125 126 cpu_wait = r4k_wait; 126 127 printk(" available.\n"); ··· 433 432 MIPS_CPU_LLSC; 434 433 c->tlbsize = 64; 435 434 break; 435 + case PRID_IMP_R14000: 436 + c->cputype = CPU_R14000; 437 + c->isa_level = MIPS_CPU_ISA_IV; 438 + c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | 439 + MIPS_CPU_FPU | MIPS_CPU_32FPR | 440 + MIPS_CPU_COUNTER | MIPS_CPU_WATCH | 441 + MIPS_CPU_LLSC; 442 + c->tlbsize = 64; 443 + break; 436 444 } 437 445 } 438 446 ··· 603 593 case PRID_IMP_34K: 604 594 c->cputype = CPU_34K; 605 595 break; 596 + case PRID_IMP_74K: 597 + c->cputype = CPU_74K; 598 + break; 606 599 } 607 600 } 608 601 ··· 655 642 case PRID_IMP_SB1: 656 643 c->cputype = CPU_SB1; 657 644 /* FPU in pass1 is known to have issues. */ 658 - if ((c->processor_id & 0xff) < 0x20) 645 + if ((c->processor_id & 0xff) < 0x02) 659 646 c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); 660 647 break; 661 648 case PRID_IMP_SB1A:
+1 -1
arch/mips/kernel/entry.S
··· 101 101 EMT 102 102 1: 103 103 mfc0 v1, CP0_TCSTATUS 104 - /* We set IXMT above, XOR should cler it here */ 104 + /* We set IXMT above, XOR should clear it here */ 105 105 xori v1, v1, TCSTATUS_IXMT 106 106 or v1, v0, v1 107 107 mtc0 v1, CP0_TCSTATUS
+5 -3
arch/mips/kernel/gdb-low.S
··· 54 54 */ 55 55 mfc0 k0, CP0_CAUSE 56 56 andi k0, k0, 0x7c 57 - add k1, k1, k0 58 - PTR_L k0, saved_vectors(k1) 59 - jr k0 57 + #ifdef CONFIG_64BIT 58 + dsll k0, k0, 1 59 + #endif 60 + PTR_L k1, saved_vectors(k0) 61 + jr k1 60 62 nop 61 63 1: 62 64 move k0, sp
+6
arch/mips/kernel/module.c
··· 288 288 sym = (Elf_Sym *)sechdrs[symindex].sh_addr 289 289 + ELF_MIPS_R_SYM(rel[i]); 290 290 if (!sym->st_value) { 291 + /* Ignore unresolved weak symbol */ 292 + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) 293 + continue; 291 294 printk(KERN_WARNING "%s: Unknown symbol %s\n", 292 295 me->name, strtab + sym->st_name); 293 296 return -ENOENT; ··· 328 325 sym = (Elf_Sym *)sechdrs[symindex].sh_addr 329 326 + ELF_MIPS_R_SYM(rel[i]); 330 327 if (!sym->st_value) { 328 + /* Ignore unresolved weak symbol */ 329 + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) 330 + continue; 331 331 printk(KERN_WARNING "%s: Unknown symbol %s\n", 332 332 me->name, strtab + sym->st_name); 333 333 return -ENOENT;
+2
arch/mips/kernel/proc.c
··· 42 42 [CPU_R8000] = "R8000", 43 43 [CPU_R10000] = "R10000", 44 44 [CPU_R12000] = "R12000", 45 + [CPU_R14000] = "R14000", 45 46 [CPU_R4300] = "R4300", 46 47 [CPU_R4650] = "R4650", 47 48 [CPU_R4700] = "R4700", ··· 75 74 [CPU_24K] = "MIPS 24K", 76 75 [CPU_25KF] = "MIPS 25Kf", 77 76 [CPU_34K] = "MIPS 34K", 77 + [CPU_74K] = "MIPS 74K", 78 78 [CPU_VR4111] = "NEC VR4111", 79 79 [CPU_VR4121] = "NEC VR4121", 80 80 [CPU_VR4122] = "NEC VR4122",
+1 -1
arch/mips/kernel/scall64-o32.S
··· 209 209 PTR sys_fork 210 210 PTR sys_read 211 211 PTR sys_write 212 - PTR sys_open /* 4005 */ 212 + PTR compat_sys_open /* 4005 */ 213 213 PTR sys_close 214 214 PTR sys_waitpid 215 215 PTR sys_creat
+10 -8
arch/mips/kernel/setup.c
··· 246 246 #ifdef CONFIG_64BIT 247 247 /* HACK: Guess if the sign extension was forgotten */ 248 248 if (start > 0x0000000080000000 && start < 0x00000000ffffffff) 249 - start |= 0xffffffff00000000; 249 + start |= 0xffffffff00000000UL; 250 250 #endif 251 251 252 252 end = start + size; ··· 355 355 } 356 356 #endif 357 357 358 - memory_present(0, first_usable_pfn, max_low_pfn); 359 - 360 358 /* Initialize the boot-time allocator with low memory only. */ 361 359 bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn); 362 360 ··· 408 410 409 411 /* Register lowmem ranges */ 410 412 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); 413 + memory_present(0, curr_pfn, curr_pfn + size - 1); 411 414 } 412 415 413 416 /* Reserve the bootmap memory. */ ··· 418 419 #ifdef CONFIG_BLK_DEV_INITRD 419 420 initrd_below_start_ok = 1; 420 421 if (initrd_start) { 421 - unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start); 422 + unsigned long initrd_size = ((unsigned char *)initrd_end) - 423 + ((unsigned char *)initrd_start); 424 + const int width = sizeof(long) * 2; 425 + 422 426 printk("Initial ramdisk at: 0x%p (%lu bytes)\n", 423 427 (void *)initrd_start, initrd_size); 424 428 425 429 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) { 426 430 printk("initrd extends beyond end of memory " 427 431 "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n", 428 - sizeof(long) * 2, 429 - (unsigned long long)CPHYSADDR(initrd_end), 430 - sizeof(long) * 2, 431 - (unsigned long long)PFN_PHYS(max_low_pfn)); 432 + width, 433 + (unsigned long long) CPHYSADDR(initrd_end), 434 + width, 435 + (unsigned long long) PFN_PHYS(max_low_pfn)); 432 436 initrd_start = initrd_end = 0; 433 437 initrd_reserve_bootmem = 0; 434 438 }
-30
arch/mips/kernel/signal-common.h
··· 31 31 save_gp_reg(31); 32 32 #undef save_gp_reg 33 33 34 - #ifdef CONFIG_32BIT 35 34 err |= __put_user(regs->hi, &sc->sc_mdhi); 36 35 err |= __put_user(regs->lo, &sc->sc_mdlo); 37 36 if (cpu_has_dsp) { ··· 42 43 err |= __put_user(mflo3(), &sc->sc_lo3); 43 44 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 44 45 } 45 - #endif 46 - #ifdef CONFIG_64BIT 47 - err |= __put_user(regs->hi, &sc->sc_hi[0]); 48 - err |= __put_user(regs->lo, &sc->sc_lo[0]); 49 - if (cpu_has_dsp) { 50 - err |= __put_user(mfhi1(), &sc->sc_hi[1]); 51 - err |= __put_user(mflo1(), &sc->sc_lo[1]); 52 - err |= __put_user(mfhi2(), &sc->sc_hi[2]); 53 - err |= __put_user(mflo2(), &sc->sc_lo[2]); 54 - err |= __put_user(mfhi3(), &sc->sc_hi[3]); 55 - err |= __put_user(mflo3(), &sc->sc_lo[3]); 56 - err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 57 - } 58 - #endif 59 46 60 47 err |= __put_user(!!used_math(), &sc->sc_used_math); 61 48 ··· 77 92 current_thread_info()->restart_block.fn = do_no_restart_syscall; 78 93 79 94 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 80 - #ifdef CONFIG_32BIT 81 95 err |= __get_user(regs->hi, &sc->sc_mdhi); 82 96 err |= __get_user(regs->lo, &sc->sc_mdlo); 83 97 if (cpu_has_dsp) { ··· 88 104 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 89 105 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 90 106 } 91 - #endif 92 - #ifdef CONFIG_64BIT 93 - err |= __get_user(regs->hi, &sc->sc_hi[0]); 94 - err |= __get_user(regs->lo, &sc->sc_lo[0]); 95 - if (cpu_has_dsp) { 96 - err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg); 97 - err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg); 98 - err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg); 99 - err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg); 100 - err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg); 101 - err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg); 102 - err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 103 - } 104 - #endif 105 107 106 108 #define restore_gp_reg(i) do { \ 107 109 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
+4 -1
arch/mips/kernel/smp.c
··· 247 247 current_thread_info()->cpu = 0; 248 248 smp_tune_scheduling(); 249 249 plat_prepare_cpus(max_cpus); 250 + #ifndef CONFIG_HOTPLUG_CPU 251 + cpu_present_map = cpu_possible_map; 252 + #endif 250 253 } 251 254 252 255 /* preload SMP state for boot cpu */ ··· 445 442 int cpu; 446 443 int ret; 447 444 448 - for_each_cpu(cpu) { 445 + for_each_present_cpu(cpu) { 449 446 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 450 447 if (ret) 451 448 printk(KERN_WARNING "topology_init: register_cpu %d "
+1 -26
arch/mips/kernel/syscall.c
··· 276 276 277 277 asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) 278 278 { 279 - int tmp, len; 280 - char __user *name; 279 + int tmp; 281 280 282 281 switch(cmd) { 283 - case SETNAME: { 284 - char nodename[__NEW_UTS_LEN + 1]; 285 - 286 - if (!capable(CAP_SYS_ADMIN)) 287 - return -EPERM; 288 - 289 - name = (char __user *) arg1; 290 - 291 - len = strncpy_from_user(nodename, name, __NEW_UTS_LEN); 292 - if (len < 0) 293 - return -EFAULT; 294 - 295 - down_write(&uts_sem); 296 - strncpy(system_utsname.nodename, nodename, len); 297 - nodename[__NEW_UTS_LEN] = '\0'; 298 - strlcpy(system_utsname.nodename, nodename, 299 - sizeof(system_utsname.nodename)); 300 - up_write(&uts_sem); 301 - return 0; 302 - } 303 - 304 282 case MIPS_ATOMIC_SET: 305 283 printk(KERN_CRIT "How did I get here?\n"); 306 284 return -EINVAL; ··· 291 313 case FLUSH_CACHE: 292 314 __flush_cache_all(); 293 315 return 0; 294 - 295 - case MIPS_RDNVRAM: 296 - return -EIO; 297 316 } 298 317 299 318 return -EINVAL;
+18 -2
arch/mips/kernel/traps.c
··· 819 819 820 820 asmlinkage void do_mcheck(struct pt_regs *regs) 821 821 { 822 + const int field = 2 * sizeof(unsigned long); 823 + int multi_match = regs->cp0_status & ST0_TS; 824 + 822 825 show_regs(regs); 823 - dump_tlb_all(); 826 + 827 + if (multi_match) { 828 + printk("Index : %0x\n", read_c0_index()); 829 + printk("Pagemask: %0x\n", read_c0_pagemask()); 830 + printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); 831 + printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); 832 + printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); 833 + printk("\n"); 834 + dump_tlb_all(); 835 + } 836 + 837 + show_code((unsigned int *) regs->cp0_epc); 838 + 824 839 /* 825 840 * Some chips may have other causes of machine check (e.g. SB1 826 841 * graduation timer) 827 842 */ 828 843 panic("Caught Machine Check exception - %scaused by multiple " 829 844 "matching entries in the TLB.", 830 - (regs->cp0_status & ST0_TS) ? "" : "not "); 845 + (multi_match) ? "" : "not "); 831 846 } 832 847 833 848 asmlinkage void do_mt(struct pt_regs *regs) ··· 917 902 { 918 903 switch (current_cpu_data.cputype) { 919 904 case CPU_24K: 905 + case CPU_34K: 920 906 case CPU_5KC: 921 907 write_c0_ecc(0x80000000); 922 908 back_to_back_c0_hazard();
+5 -15
arch/mips/kernel/vmlinux.lds.S
··· 151 151 152 152 /* This is the MIPS specific mdebug section. */ 153 153 .mdebug : { *(.mdebug) } 154 - /* These are needed for ELF backends which have not yet been 155 - converted to the new style linker. */ 156 - .stab 0 : { *(.stab) } 157 - .stabstr 0 : { *(.stabstr) } 158 - /* DWARF debug sections. 159 - Symbols in the .debug DWARF section are relative to the beginning of the 160 - section so we begin .debug at 0. It's not clear yet what needs to happen 161 - for the others. */ 162 - .debug 0 : { *(.debug) } 163 - .debug_srcinfo 0 : { *(.debug_srcinfo) } 164 - .debug_aranges 0 : { *(.debug_aranges) } 165 - .debug_pubnames 0 : { *(.debug_pubnames) } 166 - .debug_sfnames 0 : { *(.debug_sfnames) } 167 - .line 0 : { *(.line) } 154 + 155 + STABS_DEBUG 156 + 157 + DWARF_DEBUG 158 + 168 159 /* These must appear regardless of . */ 169 160 .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) } 170 161 .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) } 171 - .comment : { *(.comment) } 172 162 .note : { *(.note) } 173 163 }
+3 -1
arch/mips/math-emu/dp_fint.c
··· 29 29 30 30 ieee754dp ieee754dp_fint(int x) 31 31 { 32 - COMPXDP; 32 + u64 xm; 33 + int xe; 34 + int xs; 33 35 34 36 CLEARCX; 35 37
+3 -1
arch/mips/math-emu/dp_flong.c
··· 29 29 30 30 ieee754dp ieee754dp_flong(s64 x) 31 31 { 32 - COMPXDP; 32 + u64 xm; 33 + int xe; 34 + int xs; 33 35 34 36 CLEARCX; 35 37
+3 -1
arch/mips/math-emu/sp_fint.c
··· 29 29 30 30 ieee754sp ieee754sp_fint(int x) 31 31 { 32 - COMPXSP; 32 + unsigned xm; 33 + int xe; 34 + int xs; 33 35 34 36 CLEARCX; 35 37
+3 -1
arch/mips/math-emu/sp_flong.c
··· 29 29 30 30 ieee754sp ieee754sp_flong(s64 x) 31 31 { 32 - COMPXDP; /* <--- need 64-bit mantissa temp */ 32 + u64 xm; /* <--- need 64-bit mantissa temp */ 33 + int xe; 34 + int xs; 33 35 34 36 CLEARCX; 35 37
+69 -9
arch/mips/mm/c-r4k.c
··· 29 29 #include <asm/war.h> 30 30 #include <asm/cacheflush.h> /* for run_uncached() */ 31 31 32 + 33 + /* 34 + * Special Variant of smp_call_function for use by cache functions: 35 + * 36 + * o No return value 37 + * o collapses to normal function call on UP kernels 38 + * o collapses to normal function call on systems with a single shared 39 + * primary cache. 40 + */ 41 + static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, 42 + int retry, int wait) 43 + { 44 + preempt_disable(); 45 + 46 + #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 47 + smp_call_function(func, info, retry, wait); 48 + #endif 49 + func(info); 50 + preempt_enable(); 51 + } 52 + 32 53 /* 33 54 * Must die. 34 55 */ ··· 320 299 if (!cpu_has_dc_aliases) 321 300 return; 322 301 323 - on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); 302 + r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); 324 303 } 325 304 326 305 static inline void local_r4k___flush_cache_all(void * args) ··· 335 314 case CPU_R4400MC: 336 315 case CPU_R10000: 337 316 case CPU_R12000: 317 + case CPU_R14000: 338 318 r4k_blast_scache(); 339 319 } 340 320 } 341 321 342 322 static void r4k___flush_cache_all(void) 343 323 { 344 - on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 324 + r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 345 325 } 346 326 347 327 static inline void local_r4k_flush_cache_range(void * args) ··· 363 341 static void r4k_flush_cache_range(struct vm_area_struct *vma, 364 342 unsigned long start, unsigned long end) 365 343 { 366 - on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 344 + r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 367 345 } 368 346 369 347 static inline void local_r4k_flush_cache_mm(void * args) ··· 392 370 if (!cpu_has_dc_aliases) 393 371 return; 394 372 395 - on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 373 + r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 396 374 } 397 375 398 376 struct flush_cache_page_args { ··· 483 461 args.addr = addr; 484 462 args.pfn = pfn; 485 463 486 - on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 464 + r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 487 465 } 488 466 489 467 static inline void local_r4k_flush_data_cache_page(void * addr) ··· 493 471 494 472 static void r4k_flush_data_cache_page(unsigned long addr) 495 473 { 496 - on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1); 474 + r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1); 497 475 } 498 476 499 477 struct flush_icache_range_args { ··· 536 514 args.start = start; 537 515 args.end = end; 538 516 539 - on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 517 + r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 540 518 instruction_hazard(); 541 519 } 542 520 ··· 612 590 args.vma = vma; 613 591 args.page = page; 614 592 615 - on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1); 593 + r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1); 616 594 } 617 595 618 596 ··· 711 689 712 690 static void r4k_flush_cache_sigtramp(unsigned long addr) 713 691 { 714 - on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 692 + r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 715 693 } 716 694 717 695 static void r4k_flush_icache_all(void) ··· 834 812 835 813 case CPU_R10000: 836 814 case CPU_R12000: 815 + case CPU_R14000: 837 816 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); 838 817 c->icache.linesz = 64; 839 818 c->icache.ways = 2; ··· 988 965 c->dcache.flags |= MIPS_CACHE_PINDEX; 989 966 case CPU_R10000: 990 967 case CPU_R12000: 968 + case CPU_R14000: 991 969 case CPU_SB1: 992 970 break; 993 971 case CPU_24K: 972 + case CPU_34K: 994 973 if (!(read_c0_config7() & (1 << 16))) 995 974 default: 996 975 if (c->dcache.waysize > PAGE_SIZE) ··· 1116 1091 1117 1092 case CPU_R10000: 1118 1093 case CPU_R12000: 1094 + case CPU_R14000: 1119 1095 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); 1120 1096 c->scache.linesz = 64 << ((config >> 13) & 1); 1121 1097 c->scache.ways = 2; ··· 1161 1135 c->options |= MIPS_CPU_SUBSET_CACHES; 1162 1136 } 1163 1137 1138 + void au1x00_fixup_config_od(void) 1139 + { 1140 + /* 1141 + * c0_config.od (bit 19) was write only (and read as 0) 1142 + * on the early revisions of Alchemy SOCs. It disables the bus 1143 + * transaction overlapping and needs to be set to fix various errata. 1144 + */ 1145 + switch (read_c0_prid()) { 1146 + case 0x00030100: /* Au1000 DA */ 1147 + case 0x00030201: /* Au1000 HA */ 1148 + case 0x00030202: /* Au1000 HB */ 1149 + case 0x01030200: /* Au1500 AB */ 1150 + /* 1151 + * Au1100 errata actually keeps silence about this bit, so we set it 1152 + * just in case for those revisions that require it to be set according 1153 + * to arch/mips/au1000/common/cputable.c 1154 + */ 1155 + case 0x02030200: /* Au1100 AB */ 1156 + case 0x02030201: /* Au1100 BA */ 1157 + case 0x02030202: /* Au1100 BC */ 1158 + set_c0_config(1 << 19); 1159 + break; 1160 + } 1161 + } 1162 + 1164 1163 static inline void coherency_setup(void) 1165 1164 { 1166 1165 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); ··· 1205 1154 case CPU_R4400SC: 1206 1155 case CPU_R4400MC: 1207 1156 clear_c0_config(CONF_CU); 1157 + break; 1158 + /* 1159 + * We need to catch the ealry Alchemy SOCs with 1160 + * the write-only co_config.od bit and set it back to one... 1161 + */ 1162 + case CPU_AU1000: /* rev. DA, HA, HB */ 1163 + case CPU_AU1100: /* rev. AB, BA, BC ?? */ 1164 + case CPU_AU1500: /* rev. AB */ 1165 + au1x00_fixup_config_od(); 1208 1166 break; 1209 1167 } 1210 1168 }
+1 -1
arch/mips/mm/init.c
··· 227 227 for (tmp = 0; tmp < max_low_pfn; tmp++) 228 228 if (page_is_ram(tmp)) { 229 229 ram++; 230 - if (PageReserved(mem_map+tmp)) 230 + if (PageReserved(pfn_to_page(tmp))) 231 231 reservedpages++; 232 232 } 233 233
+1
arch/mips/mm/pg-r4k.c
··· 357 357 358 358 case CPU_R10000: 359 359 case CPU_R12000: 360 + case CPU_R14000: 360 361 pref_src_mode = Pref_LoadStreamed; 361 362 pref_dst_mode = Pref_StoreStreamed; 362 363 break;
+2
arch/mips/mm/tlbex.c
··· 875 875 876 876 case CPU_R10000: 877 877 case CPU_R12000: 878 + case CPU_R14000: 878 879 case CPU_4KC: 879 880 case CPU_SB1: 880 881 case CPU_SB1A: ··· 907 906 case CPU_4KEC: 908 907 case CPU_24K: 909 908 case CPU_34K: 909 + case CPU_74K: 910 910 i_ehb(p); 911 911 tlbw(p); 912 912 break;
+1 -1
arch/mips/momentum/jaguar_atx/dbg_io.c
··· 73 73 /* disable interrupts */ 74 74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 75 75 76 - /* set up buad rate */ 76 + /* set up baud rate */ 77 77 { 78 78 uint32 divisor; 79 79
+1 -1
arch/mips/momentum/ocelot_c/dbg_io.c
··· 73 73 /* disable interrupts */ 74 74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 75 75 76 - /* set up buad rate */ 76 + /* set up baud rate */ 77 77 { 78 78 uint32 divisor; 79 79
+1 -1
arch/mips/momentum/ocelot_g/dbg_io.c
··· 73 73 /* disable interrupts */ 74 74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 75 75 76 - /* set up buad rate */ 76 + /* set up baud rate */ 77 77 { 78 78 uint32 divisor; 79 79
+5 -4
arch/mips/oprofile/common.c
··· 14 14 15 15 #include "op_impl.h" 16 16 17 - extern struct op_mips_model op_model_mipsxx __attribute__((weak)); 18 - extern struct op_mips_model op_model_rm9000 __attribute__((weak)); 17 + extern struct op_mips_model op_model_mipsxx_ops __attribute__((weak)); 18 + extern struct op_mips_model op_model_rm9000_ops __attribute__((weak)); 19 19 20 20 static struct op_mips_model *model; 21 21 ··· 80 80 case CPU_24K: 81 81 case CPU_25KF: 82 82 case CPU_34K: 83 + case CPU_74K: 83 84 case CPU_SB1: 84 85 case CPU_SB1A: 85 - lmodel = &op_model_mipsxx; 86 + lmodel = &op_model_mipsxx_ops; 86 87 break; 87 88 88 89 case CPU_RM9000: 89 - lmodel = &op_model_rm9000; 90 + lmodel = &op_model_rm9000_ops; 90 91 break; 91 92 }; 92 93
+19 -15
arch/mips/oprofile/op_model_mipsxx.c
··· 23 23 24 24 #define M_COUNTER_OVERFLOW (1UL << 31) 25 25 26 - struct op_mips_model op_model_mipsxx; 26 + struct op_mips_model op_model_mipsxx_ops; 27 27 28 28 static struct mipsxx_register_config { 29 29 unsigned int control[4]; ··· 34 34 35 35 static void mipsxx_reg_setup(struct op_counter_config *ctr) 36 36 { 37 - unsigned int counters = op_model_mipsxx.num_counters; 37 + unsigned int counters = op_model_mipsxx_ops.num_counters; 38 38 int i; 39 39 40 40 /* Compute the performance counter control word. */ ··· 62 62 63 63 static void mipsxx_cpu_setup (void *args) 64 64 { 65 - unsigned int counters = op_model_mipsxx.num_counters; 65 + unsigned int counters = op_model_mipsxx_ops.num_counters; 66 66 67 67 switch (counters) { 68 68 case 4: ··· 83 83 /* Start all counters on current CPU */ 84 84 static void mipsxx_cpu_start(void *args) 85 85 { 86 - unsigned int counters = op_model_mipsxx.num_counters; 86 + unsigned int counters = op_model_mipsxx_ops.num_counters; 87 87 88 88 switch (counters) { 89 89 case 4: ··· 100 100 /* Stop all counters on current CPU */ 101 101 static void mipsxx_cpu_stop(void *args) 102 102 { 103 - unsigned int counters = op_model_mipsxx.num_counters; 103 + unsigned int counters = op_model_mipsxx_ops.num_counters; 104 104 105 105 switch (counters) { 106 106 case 4: ··· 116 116 117 117 static int mipsxx_perfcount_handler(struct pt_regs *regs) 118 118 { 119 - unsigned int counters = op_model_mipsxx.num_counters; 119 + unsigned int counters = op_model_mipsxx_ops.num_counters; 120 120 unsigned int control; 121 121 unsigned int counter; 122 122 int handled = 0; ··· 187 187 188 188 reset_counters(counters); 189 189 190 - op_model_mipsxx.num_counters = counters; 190 + op_model_mipsxx_ops.num_counters = counters; 191 191 switch (current_cpu_data.cputype) { 192 192 case CPU_20KC: 193 - op_model_mipsxx.cpu_type = "mips/20K"; 193 + op_model_mipsxx_ops.cpu_type = "mips/20K"; 194 194 break; 195 195 196 196 case CPU_24K: 197 - op_model_mipsxx.cpu_type = "mips/24K"; 197 + op_model_mipsxx_ops.cpu_type = "mips/24K"; 198 198 break; 199 199 200 200 case CPU_25KF: 201 - op_model_mipsxx.cpu_type = "mips/25K"; 201 + op_model_mipsxx_ops.cpu_type = "mips/25K"; 202 202 break; 203 203 204 204 #ifndef CONFIG_SMP 205 205 case CPU_34K: 206 - op_model_mipsxx.cpu_type = "mips/34K"; 206 + op_model_mipsxx_ops.cpu_type = "mips/34K"; 207 + break; 208 + 209 + case CPU_74K: 210 + op_model_mipsxx_ops.cpu_type = "mips/74K"; 207 211 break; 208 212 #endif 209 213 210 214 case CPU_5KC: 211 - op_model_mipsxx.cpu_type = "mips/5K"; 215 + op_model_mipsxx_ops.cpu_type = "mips/5K"; 212 216 break; 213 217 214 218 case CPU_SB1: 215 219 case CPU_SB1A: 216 - op_model_mipsxx.cpu_type = "mips/sb1"; 220 + op_model_mipsxx_ops.cpu_type = "mips/sb1"; 217 221 break; 218 222 219 223 default: ··· 233 229 234 230 static void mipsxx_exit(void) 235 231 { 236 - reset_counters(op_model_mipsxx.num_counters); 232 + reset_counters(op_model_mipsxx_ops.num_counters); 237 233 238 234 perf_irq = null_perf_irq; 239 235 } 240 236 241 - struct op_mips_model op_model_mipsxx = { 237 + struct op_mips_model op_model_mipsxx_ops = { 242 238 .reg_setup = mipsxx_reg_setup, 243 239 .cpu_setup = mipsxx_cpu_setup, 244 240 .init = mipsxx_init,
+1 -1
arch/mips/oprofile/op_model_rm9000.c
··· 126 126 free_irq(rm9000_perfcount_irq, NULL); 127 127 } 128 128 129 - struct op_mips_model op_model_rm9000 = { 129 + struct op_mips_model op_model_rm9000_ops = { 130 130 .reg_setup = rm9000_reg_setup, 131 131 .cpu_setup = rm9000_cpu_setup, 132 132 .init = rm9000_init,
+2 -2
arch/mips/sgi-ip32/ip32-irq.c
··· 31 31 /* issue a PIO read to make sure no PIO writes are pending */ 32 32 static void inline flush_crime_bus(void) 33 33 { 34 - volatile unsigned long junk = crime->control; 34 + crime->control; 35 35 } 36 36 37 37 static void inline flush_mace_bus(void) 38 38 { 39 - volatile unsigned long junk = mace->perif.ctrl.misc; 39 + mace->perif.ctrl.misc; 40 40 } 41 41 42 42 #undef DEBUG_IRQ
+45 -3
arch/powerpc/kernel/prom_init.c
··· 2057 2057 2058 2058 } 2059 2059 2060 - 2061 - static void __init fixup_device_tree(void) 2060 + #ifdef CONFIG_PPC_MAPLE 2061 + /* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property. 2062 + * The values are bad, and it doesn't even have the right number of cells. */ 2063 + static void __init fixup_device_tree_maple(void) 2062 2064 { 2065 + phandle isa; 2066 + u32 isa_ranges[6]; 2067 + 2068 + isa = call_prom("finddevice", 1, 1, ADDR("/ht@0/isa@4")); 2069 + if (!PHANDLE_VALID(isa)) 2070 + return; 2071 + 2072 + if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges)) 2073 + == PROM_ERROR) 2074 + return; 2075 + 2076 + if (isa_ranges[0] != 0x1 || 2077 + isa_ranges[1] != 0xf4000000 || 2078 + isa_ranges[2] != 0x00010000) 2079 + return; 2080 + 2081 + prom_printf("fixing up bogus ISA range on Maple...\n"); 2082 + 2083 + isa_ranges[0] = 0x1; 2084 + isa_ranges[1] = 0x0; 2085 + isa_ranges[2] = 0x01002000; /* IO space; PCI device = 4 */ 2086 + isa_ranges[3] = 0x0; 2087 + isa_ranges[4] = 0x0; 2088 + isa_ranges[5] = 0x00010000; 2089 + prom_setprop(isa, "/ht@0/isa@4", "ranges", 2090 + isa_ranges, sizeof(isa_ranges)); 2091 + } 2092 + #else 2093 + #define fixup_device_tree_maple() 2094 + #endif 2095 + 2063 2096 #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) 2097 + static void __init fixup_device_tree_pmac(void) 2098 + { 2064 2099 phandle u3, i2c, mpic; 2065 2100 u32 u3_rev; 2066 2101 u32 interrupts[2]; ··· 2132 2097 parent = (u32)mpic; 2133 2098 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", 2134 2099 &parent, sizeof(parent)); 2135 - #endif 2136 2100 } 2101 + #else 2102 + #define fixup_device_tree_pmac() 2103 + #endif 2137 2104 2105 + static void __init fixup_device_tree(void) 2106 + { 2107 + fixup_device_tree_maple(); 2108 + fixup_device_tree_pmac(); 2109 + } 2138 2110 2139 2111 static void __init prom_find_boot_cpu(void) 2140 2112 {
+12
arch/powerpc/platforms/powermac/low_i2c.c
··· 1157 1157 /* some quirks for platform function decoding */ 1158 1158 enum { 1159 1159 pmac_i2c_quirk_invmask = 0x00000001u, 1160 + pmac_i2c_quirk_skip = 0x00000002u, 1160 1161 }; 1161 1162 1162 1163 static void pmac_i2c_devscan(void (*callback)(struct device_node *dev, ··· 1173 1172 /* XXX Study device-tree's & apple drivers are get the quirks 1174 1173 * right ! 1175 1174 */ 1175 + /* Workaround: It seems that running the clockspreading 1176 + * properties on the eMac will cause lockups during boot. 1177 + * The machine seems to work fine without that. So for now, 1178 + * let's make sure i2c-hwclock doesn't match about "imic" 1179 + * clocks and we'll figure out if we really need to do 1180 + * something special about those later. 1181 + */ 1182 + { "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip }, 1183 + { "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip }, 1176 1184 { "i2c-hwclock", NULL, pmac_i2c_quirk_invmask }, 1177 1185 { "i2c-cpu-voltage", NULL, 0}, 1178 1186 { "temp-monitor", NULL, 0 }, ··· 1208 1198 if (p->compatible && 1209 1199 !device_is_compatible(np, p->compatible)) 1210 1200 continue; 1201 + if (p->quirks & pmac_i2c_quirk_skip) 1202 + break; 1211 1203 callback(np, p->quirks); 1212 1204 break; 1213 1205 }
+11 -7
arch/powerpc/platforms/powermac/pfunc_core.c
··· 11 11 #include <linux/kernel.h> 12 12 #include <linux/spinlock.h> 13 13 #include <linux/module.h> 14 + #include <linux/mutex.h> 14 15 15 16 #include <asm/semaphore.h> 16 17 #include <asm/prom.h> ··· 547 546 548 547 static LIST_HEAD(pmf_devices); 549 548 static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED; 549 + static DEFINE_MUTEX(pmf_irq_mutex); 550 550 551 551 static void pmf_release_device(struct kref *kref) 552 552 { ··· 866 864 867 865 spin_lock_irqsave(&pmf_lock, flags); 868 866 func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN); 869 - if (func == NULL) { 870 - spin_unlock_irqrestore(&pmf_lock, flags); 867 + if (func) 868 + func = pmf_get_function(func); 869 + spin_unlock_irqrestore(&pmf_lock, flags); 870 + if (func == NULL) 871 871 return -ENODEV; 872 - } 872 + mutex_lock(&pmf_irq_mutex); 873 873 if (list_empty(&func->irq_clients)) 874 874 func->dev->handlers->irq_enable(func); 875 875 list_add(&client->link, &func->irq_clients); 876 876 client->func = func; 877 - spin_unlock_irqrestore(&pmf_lock, flags); 877 + mutex_unlock(&pmf_irq_mutex); 878 878 879 879 return 0; 880 880 } ··· 885 881 void pmf_unregister_irq_client(struct pmf_irq_client *client) 886 882 { 887 883 struct pmf_function *func = client->func; 888 - unsigned long flags; 889 884 890 885 BUG_ON(func == NULL); 891 886 892 - spin_lock_irqsave(&pmf_lock, flags); 887 + mutex_lock(&pmf_irq_mutex); 893 888 client->func = NULL; 894 889 list_del(&client->link); 895 890 if (list_empty(&func->irq_clients)) 896 891 func->dev->handlers->irq_disable(func); 897 - spin_unlock_irqrestore(&pmf_lock, flags); 892 + mutex_unlock(&pmf_irq_mutex); 893 + pmf_put_function(func); 898 894 } 899 895 EXPORT_SYMBOL_GPL(pmf_unregister_irq_client); 900 896
+12
arch/powerpc/platforms/powermac/setup.c
··· 463 463 return 0; 464 464 } 465 465 466 + static int pmac_pm_valid(suspend_state_t state) 467 + { 468 + switch (state) { 469 + case PM_SUSPEND_DISK: 470 + return 1; 471 + /* can't do any other states via generic mechanism yet */ 472 + default: 473 + return 0; 474 + } 475 + } 476 + 466 477 static struct pm_ops pmac_pm_ops = { 467 478 .pm_disk_mode = PM_DISK_SHUTDOWN, 468 479 .prepare = pmac_pm_prepare, 469 480 .enter = pmac_pm_enter, 470 481 .finish = pmac_pm_finish, 482 + .valid = pmac_pm_valid, 471 483 }; 472 484 473 485 #endif /* CONFIG_SOFTWARE_SUSPEND */
+1 -1
arch/ppc/kernel/asm-offsets.c
··· 134 134 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 135 135 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); 136 136 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 137 - DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, flags)); 137 + DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); 138 138 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 139 139 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 140 140
+5 -5
arch/ppc/platforms/mpc8272ads_setup.c
··· 279 279 static const struct platform_notify_dev_map dev_map[] = { 280 280 { 281 281 .bus_id = "fsl-cpm-fcc", 282 - .rtn = mpc8272ads_fixup_enet_pdata 282 + .rtn = mpc8272ads_fixup_enet_pdata, 283 283 }, 284 284 { 285 285 .bus_id = "fsl-cpm-scc:uart", 286 - .rtn = mpc 286 + .rtn = mpc8272ads_fixup_uart_pdata, 287 287 }, 288 288 { 289 289 .bus_id = NULL ··· 335 335 struct platform_device* pdev = NULL; 336 336 if(index) { /*assume SCC4 here*/ 337 337 pdev = &ppc_sys_platform_devices[MPC82xx_CPM_SCC4]; 338 - pinfo = &mpc8272<F12>_uart_pdata[1]; 338 + pinfo = &mpc8272_uart_pdata[fsid_scc4_uart]; 339 339 } else { /*over SCC1*/ 340 340 pdev = &ppc_sys_platform_devices[MPC82xx_CPM_SCC1]; 341 - pinfo = &mpc8272_uart_pdata[0]; 341 + pinfo = &mpc8272_uart_pdata[fsid_scc1_uart]; 342 342 } 343 343 344 344 pinfo->uart_clk = bd->bi_intfreq; 345 345 pdev->dev.platform_data = pinfo; 346 - ppc_sys_fixup_mem_resource(pdev, IMAP_ADDR); 346 + ppc_sys_fixup_mem_resource(pdev, CPM_MAP_ADDR); 347 347 return NULL; 348 348 } 349 349
+8 -8
arch/ppc/syslib/pq2_devices.c
··· 121 121 .num_resources = 3, 122 122 .resource = (struct resource[]) { 123 123 { 124 - .name = "scc_mem", 124 + .name = "regs", 125 125 .start = 0x11A00, 126 126 .end = 0x11A1F, 127 127 .flags = IORESOURCE_MEM, 128 128 }, 129 129 { 130 - .name = "scc_pram", 130 + .name = "pram", 131 131 .start = 0x8000, 132 132 .end = 0x80ff, 133 133 .flags = IORESOURCE_MEM, ··· 145 145 .num_resources = 3, 146 146 .resource = (struct resource[]) { 147 147 { 148 - .name = "scc_mem", 148 + .name = "regs", 149 149 .start = 0x11A20, 150 150 .end = 0x11A3F, 151 151 .flags = IORESOURCE_MEM, 152 152 }, 153 153 { 154 - .name = "scc_pram", 154 + .name = "pram", 155 155 .start = 0x8100, 156 156 .end = 0x81ff, 157 157 .flags = IORESOURCE_MEM, ··· 169 169 .num_resources = 3, 170 170 .resource = (struct resource[]) { 171 171 { 172 - .name = "scc_mem", 172 + .name = "regs", 173 173 .start = 0x11A40, 174 174 .end = 0x11A5F, 175 175 .flags = IORESOURCE_MEM, 176 176 }, 177 177 { 178 - .name = "scc_pram", 178 + .name = "pram", 179 179 .start = 0x8200, 180 180 .end = 0x82ff, 181 181 .flags = IORESOURCE_MEM, ··· 193 193 .num_resources = 3, 194 194 .resource = (struct resource[]) { 195 195 { 196 - .name = "scc_mem", 196 + .name = "regs", 197 197 .start = 0x11A60, 198 198 .end = 0x11A7F, 199 199 .flags = IORESOURCE_MEM, 200 200 }, 201 201 { 202 - .name = "scc_pram", 202 + .name = "pram", 203 203 .start = 0x8300, 204 204 .end = 0x83ff, 205 205 .flags = IORESOURCE_MEM,
+4 -4
arch/ppc/syslib/pq2_sys.c
··· 139 139 .ppc_sys_name = "8272", 140 140 .mask = 0x0000ff00, 141 141 .value = 0x00000c00, 142 - .num_devices = 11, 142 + .num_devices = 12, 143 143 .device_list = (enum ppc_sys_devices[]) 144 144 { 145 145 MPC82xx_CPM_FCC1, MPC82xx_CPM_FCC2, MPC82xx_CPM_SCC1, 146 - MPC82xx_CPM_SCC2, MPC82xx_CPM_SCC3, MPC82xx_CPM_SMC1, 147 - MPC82xx_CPM_SMC2, MPC82xx_CPM_SPI, MPC82xx_CPM_I2C, 148 - MPC82xx_CPM_USB, MPC82xx_SEC1, 146 + MPC82xx_CPM_SCC2, MPC82xx_CPM_SCC3, MPC82xx_CPM_SCC4, 147 + MPC82xx_CPM_SMC1, MPC82xx_CPM_SMC2, MPC82xx_CPM_SPI, 148 + MPC82xx_CPM_I2C, MPC82xx_CPM_USB, MPC82xx_SEC1, 149 149 }, 150 150 }, 151 151 /* below is a list of the 8280 family of processors */
+1 -1
arch/s390/kernel/time.c
··· 272 272 next = next_timer_interrupt(); 273 273 do { 274 274 seq = read_seqbegin_irqsave(&xtime_lock, flags); 275 - timer = (__u64 next) - (__u64 jiffies) + jiffies_64; 275 + timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64; 276 276 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 277 277 todval = -1ULL; 278 278 /* Be careful about overflows. */
+30
arch/sparc64/kernel/head.S
··· 10 10 #include <linux/config.h> 11 11 #include <linux/version.h> 12 12 #include <linux/errno.h> 13 + #include <linux/threads.h> 13 14 #include <asm/thread_info.h> 14 15 #include <asm/asi.h> 15 16 #include <asm/pstate.h> ··· 493 492 mov %l6, %o1 ! OpenPROM stack 494 493 call prom_init 495 494 mov %l7, %o0 ! OpenPROM cif handler 495 + 496 + /* Initialize current_thread_info()->cpu as early as possible. 497 + * In order to do that accurately we have to patch up the get_cpuid() 498 + * assembler sequences. And that, in turn, requires that we know 499 + * if we are on a Starfire box or not. While we're here, patch up 500 + * the sun4v sequences as well. 501 + */ 502 + call check_if_starfire 503 + nop 504 + call per_cpu_patch 505 + nop 506 + call sun4v_patch 507 + nop 508 + 509 + #ifdef CONFIG_SMP 510 + call hard_smp_processor_id 511 + nop 512 + cmp %o0, NR_CPUS 513 + blu,pt %xcc, 1f 514 + nop 515 + call boot_cpu_id_too_large 516 + nop 517 + /* Not reached... */ 518 + 519 + 1: 520 + #else 521 + mov 0, %o0 522 + #endif 523 + stb %o0, [%g6 + TI_CPU] 496 524 497 525 /* Off we go.... */ 498 526 call start_kernel
+11 -12
arch/sparc64/kernel/setup.c
··· 220 220 221 221 static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; 222 222 223 - static void __init per_cpu_patch(void) 223 + void __init per_cpu_patch(void) 224 224 { 225 225 struct cpuid_patch_entry *p; 226 226 unsigned long ver; ··· 280 280 } 281 281 } 282 282 283 - static void __init sun4v_patch(void) 283 + void __init sun4v_patch(void) 284 284 { 285 285 struct sun4v_1insn_patch_entry *p1; 286 286 struct sun4v_2insn_patch_entry *p2; ··· 315 315 } 316 316 } 317 317 318 + #ifdef CONFIG_SMP 319 + void __init boot_cpu_id_too_large(int cpu) 320 + { 321 + prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", 322 + cpu, NR_CPUS); 323 + prom_halt(); 324 + } 325 + #endif 326 + 318 327 void __init setup_arch(char **cmdline_p) 319 328 { 320 329 /* Initialize PROM console and command line. */ ··· 340 331 #elif defined(CONFIG_PROM_CONSOLE) 341 332 conswitchp = &prom_con; 342 333 #endif 343 - 344 - /* Work out if we are starfire early on */ 345 - check_if_starfire(); 346 - 347 - /* Now we know enough to patch the get_cpuid sequences 348 - * used by trap code. 349 - */ 350 - per_cpu_patch(); 351 - 352 - sun4v_patch(); 353 334 354 335 boot_flags_init(*cmdline_p); 355 336
+3 -13
arch/sparc64/kernel/smp.c
··· 1264 1264 boot_cpu_id = hard_smp_processor_id(); 1265 1265 current_tick_offset = timer_tick_offset; 1266 1266 1267 - cpu_set(boot_cpu_id, cpu_online_map); 1268 1267 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; 1269 1268 } 1270 1269 ··· 1344 1345 1345 1346 void __devinit smp_prepare_boot_cpu(void) 1346 1347 { 1347 - int cpu = hard_smp_processor_id(); 1348 - 1349 - if (cpu >= NR_CPUS) { 1350 - prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); 1351 - prom_halt(); 1352 - } 1353 - 1354 - current_thread_info()->cpu = cpu; 1355 - __local_per_cpu_offset = __per_cpu_offset(cpu); 1356 - 1357 - cpu_set(smp_processor_id(), cpu_online_map); 1358 - cpu_set(smp_processor_id(), phys_cpu_present_map); 1359 1348 } 1360 1349 1361 1350 int __devinit __cpu_up(unsigned int cpu) ··· 1420 1433 1421 1434 for (i = 0; i < NR_CPUS; i++, ptr += size) 1422 1435 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 1436 + 1437 + /* Setup %g5 for the boot cpu. */ 1438 + __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1423 1439 }
+3 -2
arch/sparc64/lib/checksum.S
··· 165 165 sll %g1, 8, %g1 166 166 or %o5, %g1, %o4 167 167 168 - 1: add %o2, %o4, %o2 168 + 1: addcc %o2, %o4, %o2 169 + addc %g0, %o2, %o2 169 170 170 171 csum_partial_finish: 171 172 retl 172 - mov %o2, %o0 173 + srl %o2, 0, %o0
+3 -2
arch/sparc64/lib/csum_copy.S
··· 221 221 sll %g1, 8, %g1 222 222 or %o5, %g1, %o4 223 223 224 - 1: add %o3, %o4, %o3 224 + 1: addcc %o3, %o4, %o3 225 + addc %g0, %o3, %o3 225 226 226 227 70: 227 228 retl 228 - mov %o3, %o0 229 + srl %o3, 0, %o0 229 230 230 231 95: mov 0, GLOBAL_SPARE 231 232 brlez,pn %o2, 4f
+4
arch/um/Makefile-i386
··· 33 33 # prevent gcc from keeping the stack 16 byte aligned. Taken from i386. 34 34 cflags-y += $(call cc-option,-mpreferred-stack-boundary=2) 35 35 36 + # Prevent sprintf in nfsd from being converted to strcpy and resulting in 37 + # an unresolved reference. 38 + cflags-y += -ffreestanding 39 + 36 40 CFLAGS += $(cflags-y) 37 41 USER_CFLAGS += $(cflags-y)
+2 -11
arch/um/include/kern_util.h
··· 120 120 extern void free_irq(unsigned int, void *); 121 121 extern int cpu(void); 122 122 123 + extern void time_init_kern(void); 124 + 123 125 /* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */ 124 126 extern int __cant_sleep(void); 125 127 extern void segv_handler(int sig, union uml_pt_regs *regs); 126 128 extern void sigio_handler(int sig, union uml_pt_regs *regs); 127 129 128 130 #endif 129 - 130 - /* 131 - * Overrides for Emacs so that we follow Linus's tabbing style. 132 - * Emacs will notice this stuff at the end of the file and automatically 133 - * adjust the settings for this buffer only. This must remain at the end 134 - * of the file. 135 - * --------------------------------------------------------------------------- 136 - * Local variables: 137 - * c-file-style: "linux" 138 - * End: 139 - */
+10
arch/um/kernel/time_kern.c
··· 84 84 } 85 85 } 86 86 87 + 88 + void time_init_kern(void) 89 + { 90 + unsigned long long nsecs; 91 + 92 + nsecs = os_nsecs(); 93 + set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION, 94 + -nsecs % BILLION); 95 + } 96 + 87 97 void do_boot_timer_handler(struct sigcontext * sc) 88 98 { 89 99 struct pt_regs regs;
+1 -1
arch/um/os-Linux/main.c
··· 59 59 initcall_t *call; 60 60 61 61 call = &__uml_initcall_start; 62 - while (call < &__uml_initcall_end){; 62 + while (call < &__uml_initcall_end){ 63 63 (*call)(); 64 64 call++; 65 65 }
+1 -9
arch/um/os-Linux/time.c
··· 81 81 set_interval(ITIMER_REAL); 82 82 } 83 83 84 - extern void ktime_get_ts(struct timespec *ts); 85 - #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) 86 - 87 84 void time_init(void) 88 85 { 89 - struct timespec now; 90 - 91 86 if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR) 92 87 panic("Couldn't set SIGVTALRM handler"); 93 88 set_interval(ITIMER_VIRTUAL); 94 - 95 - do_posix_clock_monotonic_gettime(&now); 96 - wall_to_monotonic.tv_sec = -now.tv_sec; 97 - wall_to_monotonic.tv_nsec = -now.tv_nsec; 89 + time_init_kern(); 98 90 } 99 91 100 92 unsigned long long os_nsecs(void)
+5 -4
arch/um/sys-i386/syscalls.c
··· 99 99 100 100 switch (call) { 101 101 case SEMOP: 102 - return sys_semtimedop(first, (struct sembuf *) ptr, second, 103 - NULL); 102 + return sys_semtimedop(first, (struct sembuf __user *) ptr, 103 + second, NULL); 104 104 case SEMTIMEDOP: 105 - return sys_semtimedop(first, (struct sembuf *) ptr, second, 106 - (const struct timespec *) fifth); 105 + return sys_semtimedop(first, (struct sembuf __user *) ptr, 106 + second, 107 + (const struct timespec __user *) fifth); 107 108 case SEMGET: 108 109 return sys_semget (first, second, third); 109 110 case SEMCTL: {
+14 -10
arch/um/sys-x86_64/signal.c
··· 21 21 #include "skas.h" 22 22 23 23 static int copy_sc_from_user_skas(struct pt_regs *regs, 24 - struct sigcontext *from) 24 + struct sigcontext __user *from) 25 25 { 26 26 int err = 0; 27 27 ··· 54 54 return(err); 55 55 } 56 56 57 - int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, 57 + int copy_sc_to_user_skas(struct sigcontext __user *to, 58 + struct _fpstate __user *to_fp, 58 59 struct pt_regs *regs, unsigned long mask, 59 60 unsigned long sp) 60 61 { ··· 107 106 #endif 108 107 109 108 #ifdef CONFIG_MODE_TT 110 - int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from, 109 + int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext __user *from, 111 110 int fpsize) 112 111 { 113 - struct _fpstate *to_fp, *from_fp; 112 + struct _fpstate *to_fp; 113 + struct _fpstate __user *from_fp; 114 114 unsigned long sigs; 115 115 int err; 116 116 ··· 126 124 return(err); 127 125 } 128 126 129 - int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, 127 + int copy_sc_to_user_tt(struct sigcontext __user *to, struct _fpstate __user *fp, 130 128 struct sigcontext *from, int fpsize, unsigned long sp) 131 129 { 132 - struct _fpstate *to_fp, *from_fp; 130 + struct _fpstate __user *to_fp; 131 + struct _fpstate *from_fp; 133 132 int err; 134 133 135 - to_fp = (fp ? fp : (struct _fpstate *) (to + 1)); 134 + to_fp = (fp ? fp : (struct _fpstate __user *) (to + 1)); 136 135 from_fp = from->fpstate; 137 136 err = copy_to_user(to, from, sizeof(*to)); 138 137 /* The SP in the sigcontext is the updated one for the signal ··· 161 158 return(ret); 162 159 } 163 160 164 - static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp, 161 + static int copy_sc_to_user(struct sigcontext __user *to, 162 + struct _fpstate __user *fp, 165 163 struct pt_regs *from, unsigned long mask, 166 164 unsigned long sp) 167 165 { ··· 173 169 174 170 struct rt_sigframe 175 171 { 176 - char *pretcode; 172 + char __user *pretcode; 177 173 struct ucontext uc; 178 174 struct siginfo info; 179 175 }; ··· 192 188 193 189 frame = (struct rt_sigframe __user *) 194 190 round_down(stack_top - sizeof(struct rt_sigframe), 16) - 8; 195 - frame = (struct rt_sigframe *) ((unsigned long) frame - 128); 191 + frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128); 196 192 197 193 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) 198 194 goto out;
+1 -1
arch/um/sys-x86_64/syscalls.c
··· 45 45 case ARCH_GET_GS: 46 46 ret = arch_prctl(code, (unsigned long) &tmp); 47 47 if(!ret) 48 - ret = put_user(tmp, &addr); 48 + ret = put_user(tmp, (long __user *)addr); 49 49 break; 50 50 default: 51 51 ret = -EINVAL;
+2 -2
arch/x86_64/ia32/ia32_binfmt.c
··· 339 339 struct mm_struct *mm = current->mm; 340 340 int i, ret; 341 341 342 - stack_base = IA32_STACK_TOP - MAX_ARG_PAGES * PAGE_SIZE; 342 + stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE; 343 343 mm->arg_start = bprm->p + stack_base; 344 344 345 345 bprm->p += stack_base; ··· 357 357 { 358 358 mpnt->vm_mm = mm; 359 359 mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p; 360 - mpnt->vm_end = IA32_STACK_TOP; 360 + mpnt->vm_end = stack_top; 361 361 if (executable_stack == EXSTACK_ENABLE_X) 362 362 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC; 363 363 else if (executable_stack == EXSTACK_DISABLE_X)
+1 -1
arch/x86_64/kernel/e820.c
··· 149 149 addr = start; 150 150 if (addr > ei->addr + ei->size) 151 151 continue; 152 - while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size) 152 + while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size) 153 153 ; 154 154 last = addr + size; 155 155 if (last > ei->addr + ei->size)
+1 -6
arch/x86_64/kernel/entry.S
··· 281 281 ja 1f 282 282 movq %r10,%rcx /* fixup for C */ 283 283 call *sys_call_table(,%rax,8) 284 - movq %rax,RAX-ARGOFFSET(%rsp) 285 - 1: SAVE_REST 286 - movq %rsp,%rdi 287 - call syscall_trace_leave 288 - RESTORE_TOP_OF_STACK %rbx 289 - RESTORE_REST 284 + 1: movq %rax,RAX-ARGOFFSET(%rsp) 290 285 /* Use IRET because user could have changed frame */ 291 286 jmp int_ret_from_sys_call 292 287 CFI_ENDPROC
+4
arch/x86_64/kernel/pci-dma.c
··· 54 54 else 55 55 #endif 56 56 node = numa_node_id(); 57 + 58 + if (node < first_node(node_online_map)) 59 + node = first_node(node_online_map); 60 + 57 61 page = alloc_pages_node(node, gfp, order); 58 62 return page ? page_address(page) : NULL; 59 63 }
+2 -4
arch/x86_64/kernel/pci-gart.c
··· 631 631 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n"); 632 632 if (end_pfn > MAX_DMA32_PFN) { 633 633 printk(KERN_ERR "WARNING more than 4GB of memory " 634 - "but IOMMU not compiled in.\n" 635 - KERN_ERR "WARNING 32bit PCI may malfunction.\n" 636 - KERN_ERR "You might want to enable " 637 - "CONFIG_GART_IOMMU\n"); 634 + "but IOMMU not available.\n" 635 + KERN_ERR "WARNING 32bit PCI may malfunction.\n"); 638 636 } 639 637 return -1; 640 638 }
+1 -1
arch/x86_64/kernel/pmtimer.c
··· 68 68 offset_delay = delta % (USEC_PER_SEC / HZ); 69 69 70 70 rdtscll(tsc); 71 - vxtime.last_tsc = tsc - offset_delay * cpu_khz; 71 + vxtime.last_tsc = tsc - offset_delay * (u64)cpu_khz / 1000; 72 72 73 73 /* don't calculate delay for first run, 74 74 or if we've got less then a tick */
+1 -1
arch/x86_64/kernel/setup.c
··· 1051 1051 for now. */ 1052 1052 node = apicid_to_node[hard_smp_processor_id()]; 1053 1053 if (node == NUMA_NO_NODE) 1054 - node = 0; 1054 + node = first_node(node_online_map); 1055 1055 numa_set_node(cpu, node); 1056 1056 1057 1057 if (acpi_numa > 0)
+3 -1
arch/x86_64/mm/srat.c
··· 399 399 /* First clean up the node list */ 400 400 for (i = 0; i < MAX_NUMNODES; i++) { 401 401 cutoff_node(i, start, end); 402 - if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) 402 + if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) { 403 403 unparse_node(i); 404 + node_set_offline(i); 405 + } 404 406 } 405 407 406 408 if (acpi_numa <= 0)
+54 -23
block/cfq-iosched.c
··· 33 33 34 34 #define CFQ_KEY_ASYNC (0) 35 35 36 - static DEFINE_RWLOCK(cfq_exit_lock); 36 + static DEFINE_SPINLOCK(cfq_exit_lock); 37 37 38 38 /* 39 39 * for the hash of cfqq inside the cfqd ··· 133 133 mempool_t *crq_pool; 134 134 135 135 int rq_in_driver; 136 + int hw_tag; 136 137 137 138 /* 138 139 * schedule slice state info ··· 501 500 502 501 /* 503 502 * if queue was preempted, just add to front to be fair. busy_rr 504 - * isn't sorted. 503 + * isn't sorted, but insert at the back for fairness. 505 504 */ 506 505 if (preempted || list == &cfqd->busy_rr) { 507 - list_add(&cfqq->cfq_list, list); 506 + if (preempted) 507 + list = list->prev; 508 + 509 + list_add_tail(&cfqq->cfq_list, list); 508 510 return; 509 511 } 510 512 ··· 668 664 struct cfq_data *cfqd = q->elevator->elevator_data; 669 665 670 666 cfqd->rq_in_driver++; 667 + 668 + /* 669 + * If the depth is larger 1, it really could be queueing. But lets 670 + * make the mark a little higher - idling could still be good for 671 + * low queueing, and a low queueing number could also just indicate 672 + * a SCSI mid layer like behaviour where limit+1 is often seen. 673 + */ 674 + if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) 675 + cfqd->hw_tag = 1; 671 676 } 672 677 673 678 static void cfq_deactivate_request(request_queue_t *q, struct request *rq) ··· 890 877 */ 891 878 if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) 892 879 cfqq = list_entry_cfqq(cfqd->cur_rr.next); 880 + 881 + /* 882 + * If no new queues are available, check if the busy list has some 883 + * before falling back to idle io. 884 + */ 885 + if (!cfqq && !list_empty(&cfqd->busy_rr)) 886 + cfqq = list_entry_cfqq(cfqd->busy_rr.next); 893 887 894 888 /* 895 889 * if we have idle queues and no rt or be queues had pending ··· 1304 1284 /* 1305 1285 * put the reference this task is holding to the various queues 1306 1286 */ 1307 - read_lock_irqsave(&cfq_exit_lock, flags); 1287 + spin_lock_irqsave(&cfq_exit_lock, flags); 1308 1288 1309 1289 n = rb_first(&ioc->cic_root); 1310 1290 while (n != NULL) { ··· 1314 1294 n = rb_next(n); 1315 1295 } 1316 1296 1317 - read_unlock_irqrestore(&cfq_exit_lock, flags); 1297 + spin_unlock_irqrestore(&cfq_exit_lock, flags); 1318 1298 } 1319 1299 1320 1300 static struct cfq_io_context * ··· 1420 1400 struct cfq_io_context *cic; 1421 1401 struct rb_node *n; 1422 1402 1423 - write_lock(&cfq_exit_lock); 1403 + spin_lock(&cfq_exit_lock); 1424 1404 1425 1405 n = rb_first(&ioc->cic_root); 1426 1406 while (n != NULL) { 1427 1407 cic = rb_entry(n, struct cfq_io_context, rb_node); 1428 - 1408 + 1429 1409 changed_ioprio(cic); 1430 1410 n = rb_next(n); 1431 1411 } 1432 1412 1433 - write_unlock(&cfq_exit_lock); 1413 + spin_unlock(&cfq_exit_lock); 1434 1414 1435 1415 return 0; 1436 1416 } ··· 1478 1458 * set ->slice_left to allow preemption for a new process 1479 1459 */ 1480 1460 cfqq->slice_left = 2 * cfqd->cfq_slice_idle; 1481 - cfq_mark_cfqq_idle_window(cfqq); 1461 + if (!cfqd->hw_tag) 1462 + cfq_mark_cfqq_idle_window(cfqq); 1482 1463 cfq_mark_cfqq_prio_changed(cfqq); 1483 1464 cfq_init_prio_data(cfqq); 1484 1465 } ··· 1496 1475 static void 1497 1476 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) 1498 1477 { 1499 - read_lock(&cfq_exit_lock); 1478 + spin_lock(&cfq_exit_lock); 1500 1479 rb_erase(&cic->rb_node, &ioc->cic_root); 1501 - read_unlock(&cfq_exit_lock); 1480 + list_del_init(&cic->queue_list); 1481 + spin_unlock(&cfq_exit_lock); 1502 1482 kmem_cache_free(cfq_ioc_pool, cic); 1503 1483 atomic_dec(&ioc_count); 1504 1484 } ··· 1567 1545 BUG(); 1568 1546 } 1569 1547 1570 - read_lock(&cfq_exit_lock); 1548 + spin_lock(&cfq_exit_lock); 1571 1549 rb_link_node(&cic->rb_node, parent, p); 1572 1550 rb_insert_color(&cic->rb_node, &ioc->cic_root); 1573 1551 list_add(&cic->queue_list, &cfqd->cic_list); 1574 - read_unlock(&cfq_exit_lock); 1552 + spin_unlock(&cfq_exit_lock); 1575 1553 } 1576 1554 1577 1555 /* ··· 1670 1648 { 1671 1649 int enable_idle = cfq_cfqq_idle_window(cfqq); 1672 1650 1673 - if (!cic->ioc->task || !cfqd->cfq_slice_idle) 1651 + if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag) 1674 1652 enable_idle = 0; 1675 1653 else if (sample_valid(cic->ttime_samples)) { 1676 1654 if (cic->ttime_mean > cfqd->cfq_slice_idle) ··· 1761 1739 1762 1740 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); 1763 1741 1742 + cic = crq->io_context; 1743 + 1764 1744 /* 1765 1745 * we never wait for an async request and we don't allow preemption 1766 1746 * of an async request. so just return early 1767 1747 */ 1768 - if (!cfq_crq_is_sync(crq)) 1748 + if (!cfq_crq_is_sync(crq)) { 1749 + /* 1750 + * sync process issued an async request, if it's waiting 1751 + * then expire it and kick rq handling. 1752 + */ 1753 + if (cic == cfqd->active_cic && 1754 + del_timer(&cfqd->idle_slice_timer)) { 1755 + cfq_slice_expired(cfqd, 0); 1756 + cfq_start_queueing(cfqd, cfqq); 1757 + } 1769 1758 return; 1770 - 1771 - cic = crq->io_context; 1759 + } 1772 1760 1773 1761 cfq_update_io_thinktime(cfqd, cic); 1774 1762 cfq_update_io_seektime(cfqd, cic, crq); ··· 2196 2164 * race with a non-idle queue, reset timer 2197 2165 */ 2198 2166 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 2199 - if (!time_after_eq(jiffies, end)) { 2200 - cfqd->idle_class_timer.expires = end; 2201 - add_timer(&cfqd->idle_class_timer); 2202 - } else 2167 + if (!time_after_eq(jiffies, end)) 2168 + mod_timer(&cfqd->idle_class_timer, end); 2169 + else 2203 2170 cfq_schedule_dispatch(cfqd); 2204 2171 2205 2172 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); ··· 2218 2187 2219 2188 cfq_shutdown_timer_wq(cfqd); 2220 2189 2221 - write_lock(&cfq_exit_lock); 2190 + spin_lock(&cfq_exit_lock); 2222 2191 spin_lock_irq(q->queue_lock); 2223 2192 2224 2193 if (cfqd->active_queue) ··· 2241 2210 } 2242 2211 2243 2212 spin_unlock_irq(q->queue_lock); 2244 - write_unlock(&cfq_exit_lock); 2213 + spin_unlock(&cfq_exit_lock); 2245 2214 2246 2215 cfq_shutdown_timer_wq(cfqd); 2247 2216
+1 -4
drivers/base/power/suspend.c
··· 8 8 * 9 9 */ 10 10 11 - #include <linux/vt_kern.h> 12 11 #include <linux/device.h> 13 12 #include <linux/kallsyms.h> 14 13 #include <linux/pm.h> ··· 65 66 return error; 66 67 } 67 68 69 + 68 70 /** 69 71 * device_suspend - Save state and stop all devices in system. 70 72 * @state: Power state to put each device in. ··· 84 84 int device_suspend(pm_message_t state) 85 85 { 86 86 int error = 0; 87 - 88 - if (!is_console_suspend_safe()) 89 - return -EINVAL; 90 87 91 88 down(&dpm_sem); 92 89 down(&dpm_list_sem);
+1 -1
drivers/char/agp/Kconfig
··· 86 86 87 87 config AGP_SIS 88 88 tristate "SiS chipset support" 89 - depends on AGP && X86_32 89 + depends on AGP 90 90 help 91 91 This option gives you AGP support for the GLX component of 92 92 X on Silicon Integrated Systems [SiS] chipsets.
+3
drivers/char/agp/amd64-agp.c
··· 617 617 pci_set_power_state(pdev, PCI_D0); 618 618 pci_restore_state(pdev); 619 619 620 + if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) 621 + nforce3_agp_init(pdev); 622 + 620 623 return amd_8151_configure(); 621 624 } 622 625
+7
drivers/char/agp/via-agp.c
··· 345 345 .chipset_name = "PT880", 346 346 }, 347 347 348 + /* PT880 Ultra */ 349 + { 350 + .device_id = PCI_DEVICE_ID_VIA_PT880ULTRA, 351 + .chipset_name = "PT880 Ultra", 352 + }, 353 + 348 354 /* PT890 */ 349 355 { 350 356 .device_id = PCI_DEVICE_ID_VIA_8783_0, ··· 517 511 ID(PCI_DEVICE_ID_VIA_8763_0), 518 512 ID(PCI_DEVICE_ID_VIA_8378_0), 519 513 ID(PCI_DEVICE_ID_VIA_PT880), 514 + ID(PCI_DEVICE_ID_VIA_PT880ULTRA), 520 515 ID(PCI_DEVICE_ID_VIA_8783_0), 521 516 ID(PCI_DEVICE_ID_VIA_PX8X0_0), 522 517 ID(PCI_DEVICE_ID_VIA_3269_0),
+22 -16
drivers/char/ipmi/ipmi_si_intf.c
··· 1184 1184 static void port_cleanup(struct smi_info *info) 1185 1185 { 1186 1186 unsigned int addr = info->io.addr_data; 1187 - int mapsize; 1187 + int idx; 1188 1188 1189 1189 if (addr) { 1190 - mapsize = ((info->io_size * info->io.regspacing) 1191 - - (info->io.regspacing - info->io.regsize)); 1192 - 1193 - release_region (addr, mapsize); 1190 + for (idx = 0; idx < info->io_size; idx++) { 1191 + release_region(addr + idx * info->io.regspacing, 1192 + info->io.regsize); 1193 + } 1194 1194 } 1195 1195 } 1196 1196 1197 1197 static int port_setup(struct smi_info *info) 1198 1198 { 1199 1199 unsigned int addr = info->io.addr_data; 1200 - int mapsize; 1200 + int idx; 1201 1201 1202 1202 if (!addr) 1203 1203 return -ENODEV; ··· 1225 1225 return -EINVAL; 1226 1226 } 1227 1227 1228 - /* Calculate the total amount of memory to claim. This is an 1229 - * unusual looking calculation, but it avoids claiming any 1230 - * more memory than it has to. It will claim everything 1231 - * between the first address to the end of the last full 1232 - * register. */ 1233 - mapsize = ((info->io_size * info->io.regspacing) 1234 - - (info->io.regspacing - info->io.regsize)); 1235 - 1236 - if (request_region(addr, mapsize, DEVICE_NAME) == NULL) 1237 - return -EIO; 1228 + /* Some BIOSes reserve disjoint I/O regions in their ACPI 1229 + * tables. This causes problems when trying to register the 1230 + * entire I/O region. Therefore we must register each I/O 1231 + * port separately. 1232 + */ 1233 + for (idx = 0; idx < info->io_size; idx++) { 1234 + if (request_region(addr + idx * info->io.regspacing, 1235 + info->io.regsize, DEVICE_NAME) == NULL) { 1236 + /* Undo allocations */ 1237 + while (idx--) { 1238 + release_region(addr + idx * info->io.regspacing, 1239 + info->io.regsize); 1240 + } 1241 + return -EIO; 1242 + } 1243 + } 1238 1244 return 0; 1239 1245 } 1240 1246
+1 -1
drivers/char/pcmcia/cm4000_cs.c
··· 149 149 #define ZERO_DEV(dev) \ 150 150 memset(&dev->atr_csum,0, \ 151 151 sizeof(struct cm4000_dev) - \ 152 - /*link*/ sizeof(struct pcmcia_device) - \ 152 + /*link*/ sizeof(struct pcmcia_device *) - \ 153 153 /*node*/ sizeof(dev_node_t) - \ 154 154 /*atr*/ MAX_ATR*sizeof(char) - \ 155 155 /*rbuf*/ 512*sizeof(char) - \
+29 -60
drivers/char/tpm/tpm_bios.c
··· 105 105 "Non-Host Info" 106 106 }; 107 107 108 + struct tcpa_pc_event { 109 + u32 event_id; 110 + u32 event_size; 111 + u8 event_data[0]; 112 + }; 113 + 108 114 enum tcpa_pc_event_ids { 109 115 SMBIOS = 1, 110 116 BIS_CERT, ··· 120 114 NVRAM, 121 115 OPTION_ROM_EXEC, 122 116 OPTION_ROM_CONFIG, 123 - OPTION_ROM_MICROCODE, 117 + OPTION_ROM_MICROCODE = 10, 124 118 S_CRTM_VERSION, 125 119 S_CRTM_CONTENTS, 126 120 POST_CONTENTS, 121 + HOST_TABLE_OF_DEVICES, 127 122 }; 128 123 129 124 static const char* tcpa_pc_event_id_strings[] = { 130 - "" 125 + "", 131 126 "SMBIOS", 132 127 "BIS Certificate", 133 128 "POST BIOS ", ··· 137 130 "NVRAM", 138 131 "Option ROM", 139 132 "Option ROM config", 140 - "Option ROM microcode", 133 + "", 134 + "Option ROM microcode ", 141 135 "S-CRTM Version", 142 - "S-CRTM Contents", 143 - "S-CRTM POST Contents", 144 - "POST Contents", 136 + "S-CRTM Contents ", 137 + "POST Contents ", 138 + "Table of Devices", 145 139 }; 146 140 147 141 /* returns pointer to start of pos. entry of tcg log */ ··· 214 206 const char *name = ""; 215 207 char data[40] = ""; 216 208 int i, n_len = 0, d_len = 0; 217 - u32 event_id; 209 + struct tcpa_pc_event *pc_event; 218 210 219 211 switch(event->event_type) { 220 212 case PREBOOT: ··· 243 235 } 244 236 break; 245 237 case EVENT_TAG: 246 - event_id = be32_to_cpu(*((u32 *)event_entry)); 238 + pc_event = (struct tcpa_pc_event *)event_entry; 247 239 248 240 /* ToDo Row data -> Base64 */ 249 241 250 - switch (event_id) { 242 + switch (pc_event->event_id) { 251 243 case SMBIOS: 252 244 case BIS_CERT: 253 245 case CMOS: 254 246 case NVRAM: 255 247 case OPTION_ROM_EXEC: 256 248 case OPTION_ROM_CONFIG: 257 - case OPTION_ROM_MICROCODE: 258 249 case S_CRTM_VERSION: 259 - case S_CRTM_CONTENTS: 260 - case POST_CONTENTS: 261 - name = tcpa_pc_event_id_strings[event_id]; 250 + name = tcpa_pc_event_id_strings[pc_event->event_id]; 262 251 n_len = strlen(name); 263 252 break; 253 + /* hash data */ 264 254 case POST_BIOS_ROM: 265 255 case ESCD: 266 - name = tcpa_pc_event_id_strings[event_id]; 256 + case OPTION_ROM_MICROCODE: 257 + case S_CRTM_CONTENTS: 258 + case POST_CONTENTS: 259 + name = tcpa_pc_event_id_strings[pc_event->event_id]; 267 260 n_len = strlen(name); 268 261 for (i = 0; i < 20; i++) 269 - d_len += sprintf(data, "%02x", 270 - event_entry[8 + i]); 262 + d_len += sprintf(&data[2*i], "%02x", 263 + pc_event->event_data[i]); 271 264 break; 272 265 default: 273 266 break; ··· 284 275 285 276 static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v) 286 277 { 278 + struct tcpa_event *event = v; 279 + char *data = v; 280 + int i; 287 281 288 - char *eventname; 289 - char data[4]; 290 - u32 help; 291 - int i, len; 292 - struct tcpa_event *event = (struct tcpa_event *) v; 293 - unsigned char *event_entry = 294 - (unsigned char *) (v + sizeof(struct tcpa_event)); 295 - 296 - eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL); 297 - if (!eventname) { 298 - printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", 299 - __func__); 300 - return -ENOMEM; 301 - } 302 - 303 - /* 1st: PCR used is in little-endian format (4 bytes) */ 304 - help = le32_to_cpu(event->pcr_index); 305 - memcpy(data, &help, 4); 306 - for (i = 0; i < 4; i++) 282 + for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) 307 283 seq_putc(m, data[i]); 308 284 309 - /* 2nd: SHA1 (20 bytes) */ 310 - for (i = 0; i < 20; i++) 311 - seq_putc(m, event->pcr_value[i]); 312 - 313 - /* 3rd: event type identifier (4 bytes) */ 314 - help = le32_to_cpu(event->event_type); 315 - memcpy(data, &help, 4); 316 - for (i = 0; i < 4; i++) 317 - seq_putc(m, data[i]); 318 - 319 - len = 0; 320 - 321 - len += get_event_name(eventname, event, event_entry); 322 - 323 - /* 4th: filename <= 255 + \'0' delimiter */ 324 - if (len > TCG_EVENT_NAME_LEN_MAX) 325 - len = TCG_EVENT_NAME_LEN_MAX; 326 - 327 - for (i = 0; i < len; i++) 328 - seq_putc(m, eventname[i]); 329 - 330 - /* 5th: delimiter */ 331 - seq_putc(m, '\0'); 332 - 333 - kfree(eventname); 334 285 return 0; 335 286 } 336 287
-4
drivers/char/tpm/tpm_tis.c
··· 457 457 } 458 458 459 459 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 460 - if ((vendor & 0xFFFF) == 0xFFFF) { 461 - rc = -ENODEV; 462 - goto out_err; 463 - } 464 460 465 461 /* Default timeouts */ 466 462 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
-8
drivers/char/vt.c
··· 3238 3238 } 3239 3239 } 3240 3240 3241 - int is_console_suspend_safe(void) 3242 - { 3243 - /* It is unsafe to suspend devices while X has control of the 3244 - * hardware. Make sure we are running on a kernel-controlled console. 3245 - */ 3246 - return vc_cons[fg_console].d->vc_mode == KD_TEXT; 3247 - } 3248 - 3249 3241 /* 3250 3242 * Visible symbols for modules 3251 3243 */
+1 -1
drivers/i2c/busses/scx200_acb.c
··· 491 491 492 492 #define MSR_LBAR_SMB 0x5140000B 493 493 494 - static int scx200_add_cs553x(void) 494 + static __init int scx200_add_cs553x(void) 495 495 { 496 496 u32 low, hi; 497 497 u32 smb_base;
+11 -5
drivers/ide/pci/sgiioc4.c
··· 345 345 static u8 346 346 sgiioc4_INB(unsigned long port) 347 347 { 348 - u8 reg = (u8) inb(port); 348 + u8 reg = (u8) readb((void __iomem *) port); 349 349 350 350 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */ 351 351 if (reg & 0x51) { /* Not busy...check for interrupt */ 352 352 unsigned long other_ir = port - 0x110; 353 - unsigned int intr_reg = (u32) inl(other_ir); 353 + unsigned int intr_reg = (u32) readl((void __iomem *) other_ir); 354 354 355 355 /* Clear the Interrupt, Error bits on the IOC4 */ 356 356 if (intr_reg & 0x03) { 357 - outl(0x03, other_ir); 358 - intr_reg = (u32) inl(other_ir); 357 + writel(0x03, (void __iomem *) other_ir); 358 + intr_reg = (u32) readl((void __iomem *) other_ir); 359 359 } 360 360 } 361 361 } ··· 606 606 hwif->ide_dma_host_off = &sgiioc4_ide_dma_host_off; 607 607 hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq; 608 608 hwif->ide_dma_timeout = &__ide_dma_timeout; 609 + 610 + /* 611 + * The IOC4 uses MMIO rather than Port IO. 612 + * It also needs special workarounds for INB. 613 + */ 614 + default_hwif_mmiops(hwif); 609 615 hwif->INB = &sgiioc4_INB; 610 616 } 611 617 ··· 749 743 module_init(ioc4_ide_init); 750 744 module_exit(ioc4_ide_exit); 751 745 752 - MODULE_AUTHOR("Aniket Malatpure - Silicon Graphics Inc. (SGI)"); 746 + MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon"); 753 747 MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card"); 754 748 MODULE_LICENSE("GPL");
+1 -1
drivers/ieee1394/sbp2.c
··· 845 845 &sbp2_highlevel, ud->ne->host, &sbp2_ops, 846 846 sizeof(struct sbp2_status_block), sizeof(quadlet_t), 847 847 0x010000000000ULL, CSR1212_ALL_SPACE_END); 848 - if (!scsi_id->status_fifo_addr) { 848 + if (scsi_id->status_fifo_addr == ~0ULL) { 849 849 SBP2_ERR("failed to allocate status FIFO address range"); 850 850 goto failed_alloc; 851 851 }
+21 -20
drivers/infiniband/hw/mthca/mthca_srq.c
··· 490 490 491 491 first_ind = srq->first_free; 492 492 493 - for (nreq = 0; wr; ++nreq, wr = wr->next) { 494 - if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 495 - nreq = 0; 496 - 497 - doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 498 - doorbell[1] = cpu_to_be32(srq->srqn << 8); 499 - 500 - /* 501 - * Make sure that descriptors are written 502 - * before doorbell is rung. 503 - */ 504 - wmb(); 505 - 506 - mthca_write64(doorbell, 507 - dev->kar + MTHCA_RECEIVE_DOORBELL, 508 - MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 509 - 510 - first_ind = srq->first_free; 511 - } 512 - 493 + for (nreq = 0; wr; wr = wr->next) { 513 494 ind = srq->first_free; 514 495 515 496 if (ind < 0) { ··· 550 569 551 570 srq->wrid[ind] = wr->wr_id; 552 571 srq->first_free = next_ind; 572 + 573 + ++nreq; 574 + if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 575 + nreq = 0; 576 + 577 + doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 578 + doorbell[1] = cpu_to_be32(srq->srqn << 8); 579 + 580 + /* 581 + * Make sure that descriptors are written 582 + * before doorbell is rung. 583 + */ 584 + wmb(); 585 + 586 + mthca_write64(doorbell, 587 + dev->kar + MTHCA_RECEIVE_DOORBELL, 588 + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 589 + 590 + first_ind = srq->first_free; 591 + } 553 592 } 554 593 555 594 if (likely(nreq)) {
+1
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 275 275 spin_lock_irqsave(&priv->tx_lock, flags); 276 276 ++priv->tx_tail; 277 277 if (netif_queue_stopped(dev) && 278 + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && 278 279 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) 279 280 netif_wake_queue(dev); 280 281 spin_unlock_irqrestore(&priv->tx_lock, flags);
+6 -5
drivers/input/joystick/sidewinder.c
··· 589 589 struct sw *sw; 590 590 struct input_dev *input_dev; 591 591 int i, j, k, l; 592 - int err; 592 + int err = 0; 593 593 unsigned char *buf = NULL; /* [SW_LENGTH] */ 594 594 unsigned char *idbuf = NULL; /* [SW_LENGTH] */ 595 595 unsigned char m = 1; ··· 776 776 goto fail4; 777 777 } 778 778 779 - return 0; 779 + out: kfree(buf); 780 + kfree(idbuf); 781 + 782 + return err; 780 783 781 784 fail4: input_free_device(sw->dev[i]); 782 785 fail3: while (--i >= 0) ··· 787 784 fail2: gameport_close(gameport); 788 785 fail1: gameport_set_drvdata(gameport, NULL); 789 786 kfree(sw); 790 - kfree(buf); 791 - kfree(idbuf); 792 - return err; 787 + goto out; 793 788 } 794 789 795 790 static void sw_disconnect(struct gameport *gameport)
+6 -6
drivers/input/keyboard/corgikbd.c
··· 245 245 if (hinge_count >= HINGE_STABLE_COUNT) { 246 246 spin_lock_irqsave(&corgikbd_data->lock, flags); 247 247 248 - input_report_switch(corgikbd_data->input, SW_0, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0)); 249 - input_report_switch(corgikbd_data->input, SW_1, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0)); 250 - input_report_switch(corgikbd_data->input, SW_2, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0)); 248 + input_report_switch(corgikbd_data->input, SW_LID, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0)); 249 + input_report_switch(corgikbd_data->input, SW_TABLET_MODE, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0)); 250 + input_report_switch(corgikbd_data->input, SW_HEADPHONE_INSERT, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0)); 251 251 input_sync(corgikbd_data->input); 252 252 253 253 spin_unlock_irqrestore(&corgikbd_data->lock, flags); ··· 340 340 for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++) 341 341 set_bit(corgikbd->keycode[i], input_dev->keybit); 342 342 clear_bit(0, input_dev->keybit); 343 - set_bit(SW_0, input_dev->swbit); 344 - set_bit(SW_1, input_dev->swbit); 345 - set_bit(SW_2, input_dev->swbit); 343 + set_bit(SW_LID, input_dev->swbit); 344 + set_bit(SW_TABLET_MODE, input_dev->swbit); 345 + set_bit(SW_HEADPHONE_INSERT, input_dev->swbit); 346 346 347 347 input_register_device(corgikbd->input); 348 348
+6 -6
drivers/input/keyboard/spitzkbd.c
··· 299 299 if (hinge_count >= HINGE_STABLE_COUNT) { 300 300 spin_lock_irqsave(&spitzkbd_data->lock, flags); 301 301 302 - input_report_switch(spitzkbd_data->input, SW_0, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0)); 303 - input_report_switch(spitzkbd_data->input, SW_1, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0)); 304 - input_report_switch(spitzkbd_data->input, SW_2, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0)); 302 + input_report_switch(spitzkbd_data->input, SW_LID, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0)); 303 + input_report_switch(spitzkbd_data->input, SW_TABLET_MODE, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0)); 304 + input_report_switch(spitzkbd_data->input, SW_HEADPHONE_INSERT, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0)); 305 305 input_sync(spitzkbd_data->input); 306 306 307 307 spin_unlock_irqrestore(&spitzkbd_data->lock, flags); ··· 398 398 for (i = 0; i < ARRAY_SIZE(spitzkbd_keycode); i++) 399 399 set_bit(spitzkbd->keycode[i], input_dev->keybit); 400 400 clear_bit(0, input_dev->keybit); 401 - set_bit(SW_0, input_dev->swbit); 402 - set_bit(SW_1, input_dev->swbit); 403 - set_bit(SW_2, input_dev->swbit); 401 + set_bit(SW_LID, input_dev->swbit); 402 + set_bit(SW_TABLET_MODE, input_dev->swbit); 403 + set_bit(SW_HEADPHONE_INSERT, input_dev->swbit); 404 404 405 405 input_register_device(input_dev); 406 406
+19
drivers/input/misc/wistron_btns.c
··· 318 318 { KE_END, 0 } 319 319 }; 320 320 321 + static struct key_entry keymap_aopen_1559as[] = { 322 + { KE_KEY, 0x01, KEY_HELP }, 323 + { KE_KEY, 0x06, KEY_PROG3 }, 324 + { KE_KEY, 0x11, KEY_PROG1 }, 325 + { KE_KEY, 0x12, KEY_PROG2 }, 326 + { KE_WIFI, 0x30, 0 }, 327 + { KE_KEY, 0x31, KEY_MAIL }, 328 + { KE_KEY, 0x36, KEY_WWW }, 329 + }; 330 + 321 331 /* 322 332 * If your machine is not here (which is currently rather likely), please send 323 333 * a list of buttons and their key codes (reported when loading this module ··· 378 368 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 240"), 379 369 }, 380 370 .driver_data = keymap_acer_travelmate_240 371 + }, 372 + { 373 + .callback = dmi_matched, 374 + .ident = "AOpen 1559AS", 375 + .matches = { 376 + DMI_MATCH(DMI_PRODUCT_NAME, "E2U"), 377 + DMI_MATCH(DMI_BOARD_NAME, "E2U"), 378 + }, 379 + .driver_data = keymap_aopen_1559as 381 380 }, 382 381 { NULL, } 383 382 };
+2 -2
drivers/input/mouse/alps.c
··· 100 100 } 101 101 102 102 if (priv->i->flags & ALPS_OLDPROTO) { 103 - left = packet[2] & 0x08; 104 - right = packet[2] & 0x10; 103 + left = packet[2] & 0x10; 104 + right = packet[2] & 0x08; 105 105 middle = 0; 106 106 x = packet[1] | ((packet[0] & 0x07) << 7); 107 107 y = packet[4] | ((packet[3] & 0x07) << 7);
+24
drivers/input/mouse/lifebook.c
··· 22 22 23 23 static struct dmi_system_id lifebook_dmi_table[] = { 24 24 { 25 + .ident = "LifeBook B", 26 + .matches = { 27 + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B Series"), 28 + }, 29 + }, 30 + { 25 31 .ident = "Lifebook B", 26 32 .matches = { 27 33 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK B Series"), 34 + }, 35 + }, 36 + { 37 + .ident = "Lifebook B213x/B2150", 38 + .matches = { 39 + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B2131/B2133/B2150"), 40 + }, 41 + }, 42 + { 43 + .ident = "Zephyr", 44 + .matches = { 45 + DMI_MATCH(DMI_PRODUCT_NAME, "ZEPHYR"), 46 + }, 47 + }, 48 + { 49 + .ident = "CF-18", 50 + .matches = { 51 + DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), 28 52 }, 29 53 }, 30 54 {
+6
drivers/input/mouse/logips2pp.c
··· 19 19 #define PS2PP_KIND_WHEEL 1 20 20 #define PS2PP_KIND_MX 2 21 21 #define PS2PP_KIND_TP3 3 22 + #define PS2PP_KIND_TRACKMAN 4 22 23 23 24 /* Logitech mouse features */ 24 25 #define PS2PP_WHEEL 0x01 ··· 224 223 { 73, 0, PS2PP_SIDE_BTN }, 225 224 { 75, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 226 225 { 76, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 226 + { 79, PS2PP_KIND_TRACKMAN, PS2PP_WHEEL }, /* TrackMan with wheel */ 227 227 { 80, PS2PP_KIND_WHEEL, PS2PP_SIDE_BTN | PS2PP_WHEEL }, 228 228 { 81, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 229 229 { 83, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, ··· 298 296 299 297 case PS2PP_KIND_TP3: 300 298 psmouse->name = "TouchPad 3"; 299 + break; 300 + 301 + case PS2PP_KIND_TRACKMAN: 302 + psmouse->name = "TrackMan"; 301 303 break; 302 304 303 305 default:
+29 -26
drivers/input/touchscreen/ads7846.c
··· 36 36 37 37 38 38 /* 39 - * This code has been tested on an ads7846 / N770 device. 39 + * This code has been heavily tested on a Nokia 770, and lightly 40 + * tested on other ads7846 devices (OSK/Mistral, Lubbock). 40 41 * Support for ads7843 and ads7845 has only been stubbed in. 41 - * 42 - * Not yet done: How accurate are the temperature and voltage 43 - * readings? (System-specific calibration should support 44 - * accuracy of 0.3 degrees C; otherwise it's 2.0 degrees.) 45 42 * 46 43 * IRQ handling needs a workaround because of a shortcoming in handling 47 44 * edge triggered IRQs on some platforms like the OMAP1/2. These ··· 245 248 246 249 if (req->msg.status) 247 250 status = req->msg.status; 248 - sample = be16_to_cpu(req->sample); 249 - sample = sample >> 4; 250 - kfree(req); 251 251 252 + /* on-wire is a must-ignore bit, a BE12 value, then padding */ 253 + sample = be16_to_cpu(req->sample); 254 + sample = sample >> 3; 255 + sample &= 0x0fff; 256 + 257 + kfree(req); 252 258 return status ? status : sample; 253 259 } 254 260 ··· 336 336 u16 x, y, z1, z2; 337 337 unsigned long flags; 338 338 339 - /* adjust: 12 bit samples (left aligned), built from 340 - * two 8 bit values writen msb-first. 339 + /* adjust: on-wire is a must-ignore bit, a BE12 value, then padding; 340 + * built from two 8 bit values written msb-first. 341 341 */ 342 - x = be16_to_cpu(ts->tc.x) >> 4; 343 - y = be16_to_cpu(ts->tc.y) >> 4; 344 - z1 = be16_to_cpu(ts->tc.z1) >> 4; 345 - z2 = be16_to_cpu(ts->tc.z2) >> 4; 342 + x = (be16_to_cpu(ts->tc.x) >> 3) & 0x0fff; 343 + y = (be16_to_cpu(ts->tc.y) >> 3) & 0x0fff; 344 + z1 = (be16_to_cpu(ts->tc.z1) >> 3) & 0x0fff; 345 + z2 = (be16_to_cpu(ts->tc.z2) >> 3) & 0x0fff; 346 346 347 347 /* range filtering */ 348 348 if (x == MAX_12BIT) ··· 420 420 421 421 m = &ts->msg[ts->msg_idx]; 422 422 t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list); 423 - val = (*(u16 *)t->rx_buf) >> 3; 423 + val = (be16_to_cpu(*(__be16 *)t->rx_buf) >> 3) & 0x0fff; 424 424 if (!ts->read_cnt || (abs(ts->last_read - val) > ts->debounce_tol)) { 425 425 /* Repeat it, if this was the first read or the read 426 426 * wasn't consistent enough. */ ··· 469 469 spin_lock_irq(&ts->lock); 470 470 471 471 if (unlikely(ts->msg_idx && !ts->pendown)) { 472 - /* measurment cycle ended */ 472 + /* measurement cycle ended */ 473 473 if (!device_suspended(&ts->spi->dev)) { 474 474 ts->irq_disabled = 0; 475 475 enable_irq(ts->spi->irq); ··· 495 495 spin_lock_irqsave(&ts->lock, flags); 496 496 if (likely(ts->get_pendown_state())) { 497 497 if (!ts->irq_disabled) { 498 - /* REVISIT irq logic for many ARM chips has cloned a 499 - * bug wherein disabling an irq in its handler won't 500 - * work;(it's disabled lazily, and too late to work. 501 - * until all their irq logic is fixed, we must shadow 502 - * that state here. 498 + /* The ARM do_simple_IRQ() dispatcher doesn't act 499 + * like the other dispatchers: it will report IRQs 500 + * even after they've been disabled. We work around 501 + * that here. (The "generic irq" framework may help...) 503 502 */ 504 503 ts->irq_disabled = 1; 505 504 disable_irq(ts->spi->irq); ··· 608 609 return -EINVAL; 609 610 } 610 611 612 + /* REVISIT when the irq can be triggered active-low, or if for some 613 + * reason the touchscreen isn't hooked up, we don't need to access 614 + * the pendown state. 615 + */ 611 616 if (pdata->get_pendown_state == NULL) { 612 617 dev_dbg(&spi->dev, "no get_pendown_state function?\n"); 613 618 return -EINVAL; 614 619 } 615 620 616 - /* We'd set the wordsize to 12 bits ... except that some controllers 617 - * will then treat the 8 bit command words as 12 bits (and drop the 618 - * four MSBs of the 12 bit result). Result: inputs must be shifted 619 - * to discard the four garbage LSBs. 621 + /* We'd set TX wordsize 8 bits and RX wordsize to 13 bits ... except 622 + * that even if the hardware can do that, the SPI controller driver 623 + * may not. So we stick to very-portable 8 bit words, both RX and TX. 620 624 */ 625 + spi->bits_per_word = 8; 621 626 622 627 ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL); 623 628 input_dev = input_allocate_device(); ··· 775 772 776 773 if (request_irq(spi->irq, ads7846_irq, 777 774 SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING, 778 - spi->dev.bus_id, ts)) { 775 + spi->dev.driver->name, ts)) { 779 776 dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); 780 777 err = -EBUSY; 781 778 goto err_free_mem;
+13 -2
drivers/md/md.c
··· 167 167 } 168 168 EXPORT_SYMBOL_GPL(md_new_event); 169 169 170 + /* Alternate version that can be called from interrupts 171 + * when calling sysfs_notify isn't needed. 172 + */ 173 + void md_new_event_inintr(mddev_t *mddev) 174 + { 175 + atomic_inc(&md_event_count); 176 + wake_up(&md_event_waiters); 177 + } 178 + 170 179 /* 171 180 * Enables to iterate over all existing md arrays 172 181 * all_mddevs_lock protects this list. ··· 4158 4149 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4159 4150 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4160 4151 md_wakeup_thread(mddev->thread); 4161 - md_new_event(mddev); 4152 + md_new_event_inintr(mddev); 4162 4153 } 4163 4154 4164 4155 /* seq_file implementation /proc/mdstat */ ··· 5037 5028 printk(KERN_INFO "md: stopping all md devices.\n"); 5038 5029 5039 5030 ITERATE_MDDEV(mddev,tmp) 5040 - if (mddev_trylock(mddev)) 5031 + if (mddev_trylock(mddev)) { 5041 5032 do_md_stop (mddev, 1); 5033 + mddev_unlock(mddev); 5034 + } 5042 5035 /* 5043 5036 * certain more exotic SCSI devices are known to be 5044 5037 * volatile wrt too early system reboots. While the
+21 -6
drivers/message/fusion/mptbase.c
··· 1605 1605 } 1606 1606 #endif 1607 1607 1608 + static int 1609 + mpt_signal_reset(int index, MPT_ADAPTER *ioc, int reset_phase) 1610 + { 1611 + if ((MptDriverClass[index] == MPTSPI_DRIVER && 1612 + ioc->bus_type != SPI) || 1613 + (MptDriverClass[index] == MPTFC_DRIVER && 1614 + ioc->bus_type != FC) || 1615 + (MptDriverClass[index] == MPTSAS_DRIVER && 1616 + ioc->bus_type != SAS)) 1617 + /* make sure we only call the relevant reset handler 1618 + * for the bus */ 1619 + return 0; 1620 + return (MptResetHandlers[index])(ioc, reset_phase); 1621 + } 1622 + 1608 1623 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1609 1624 /* 1610 1625 * mpt_do_ioc_recovery - Initialize or recover MPT adapter. ··· 1900 1885 if ((ret == 0) && MptResetHandlers[ii]) { 1901 1886 dprintk((MYIOC_s_INFO_FMT "Calling IOC post_reset handler #%d\n", 1902 1887 ioc->name, ii)); 1903 - rc += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_POST_RESET); 1888 + rc += mpt_signal_reset(ii, ioc, MPT_IOC_POST_RESET); 1904 1889 handlers++; 1905 1890 } 1906 1891 1907 1892 if (alt_ioc_ready && MptResetHandlers[ii]) { 1908 1893 drsprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n", 1909 1894 ioc->name, ioc->alt_ioc->name, ii)); 1910 - rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET); 1895 + rc += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_POST_RESET); 1911 1896 handlers++; 1912 1897 } 1913 1898 } ··· 3282 3267 if (MptResetHandlers[ii]) { 3283 3268 dprintk((MYIOC_s_INFO_FMT "Calling IOC pre_reset handler #%d\n", 3284 3269 ioc->name, ii)); 3285 - r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_PRE_RESET); 3270 + r += mpt_signal_reset(ii, ioc, MPT_IOC_PRE_RESET); 3286 3271 if (ioc->alt_ioc) { 3287 3272 dprintk((MYIOC_s_INFO_FMT "Calling alt-%s pre_reset handler #%d\n", 3288 3273 ioc->name, ioc->alt_ioc->name, ii)); 3289 - r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_PRE_RESET); 3274 + r += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_PRE_RESET); 3290 3275 } 3291 3276 } 3292 3277 } ··· 5721 5706 if (MptResetHandlers[ii]) { 5722 5707 dtmprintk((MYIOC_s_INFO_FMT "Calling IOC reset_setup handler #%d\n", 5723 5708 ioc->name, ii)); 5724 - r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_SETUP_RESET); 5709 + r += mpt_signal_reset(ii, ioc, MPT_IOC_SETUP_RESET); 5725 5710 if (ioc->alt_ioc) { 5726 5711 dtmprintk((MYIOC_s_INFO_FMT "Calling alt-%s setup reset handler #%d\n", 5727 5712 ioc->name, ioc->alt_ioc->name, ii)); 5728 - r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_SETUP_RESET); 5713 + r += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_SETUP_RESET); 5729 5714 } 5730 5715 } 5731 5716 }
+1 -1
drivers/mmc/Kconfig
··· 84 84 85 85 config MMC_AU1X 86 86 tristate "Alchemy AU1XX0 MMC Card Interface support" 87 - depends on SOC_AU1X00 && MMC 87 + depends on MMC && SOC_AU1200 88 88 help 89 89 This selects the AMD Alchemy(R) Multimedia card interface. 90 90 If you have a Alchemy platform with a MMC slot, say Y or M here.
+9 -1
drivers/net/e1000/e1000_main.c
··· 220 220 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 221 221 static int e1000_resume(struct pci_dev *pdev); 222 222 #endif 223 + static void e1000_shutdown(struct pci_dev *pdev); 223 224 224 225 #ifdef CONFIG_NET_POLL_CONTROLLER 225 226 /* for netdump / net console */ ··· 236 235 /* Power Managment Hooks */ 237 236 #ifdef CONFIG_PM 238 237 .suspend = e1000_suspend, 239 - .resume = e1000_resume 238 + .resume = e1000_resume, 240 239 #endif 240 + .shutdown = e1000_shutdown 241 241 }; 242 242 243 243 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); ··· 4613 4611 return 0; 4614 4612 } 4615 4613 #endif 4614 + 4615 + static void e1000_shutdown(struct pci_dev *pdev) 4616 + { 4617 + e1000_suspend(pdev, PMSG_SUSPEND); 4618 + } 4619 + 4616 4620 #ifdef CONFIG_NET_POLL_CONTROLLER 4617 4621 /* 4618 4622 * Polling 'interrupt' - used by things like netconsole to send skbs
+16
drivers/net/forcedeth.c
··· 2615 2615 return ret; 2616 2616 } 2617 2617 2618 + #ifdef NETIF_F_TSO 2619 + static int nv_set_tso(struct net_device *dev, u32 value) 2620 + { 2621 + struct fe_priv *np = netdev_priv(dev); 2622 + 2623 + if ((np->driver_data & DEV_HAS_CHECKSUM)) 2624 + return ethtool_op_set_tso(dev, value); 2625 + else 2626 + return value ? -EOPNOTSUPP : 0; 2627 + } 2628 + #endif 2629 + 2618 2630 static struct ethtool_ops ops = { 2619 2631 .get_drvinfo = nv_get_drvinfo, 2620 2632 .get_link = ethtool_op_get_link, ··· 2638 2626 .get_regs = nv_get_regs, 2639 2627 .nway_reset = nv_nway_reset, 2640 2628 .get_perm_addr = ethtool_op_get_perm_addr, 2629 + #ifdef NETIF_F_TSO 2630 + .get_tso = ethtool_op_get_tso, 2631 + .set_tso = nv_set_tso 2632 + #endif 2641 2633 }; 2642 2634 2643 2635 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+10 -10
drivers/net/irda/Kconfig
··· 33 33 34 34 config ESI_DONGLE 35 35 tristate "ESI JetEye PC dongle" 36 - depends on DONGLE && IRDA 36 + depends on IRTTY_SIR && DONGLE && IRDA 37 37 help 38 38 Say Y here if you want to build support for the Extended Systems 39 39 JetEye PC dongle. To compile it as a module, choose M here. The ESI ··· 44 44 45 45 config ACTISYS_DONGLE 46 46 tristate "ACTiSYS IR-220L and IR220L+ dongle" 47 - depends on DONGLE && IRDA 47 + depends on IRTTY_SIR && DONGLE && IRDA 48 48 help 49 49 Say Y here if you want to build support for the ACTiSYS IR-220L and 50 50 IR220L+ dongles. To compile it as a module, choose M here. The ··· 55 55 56 56 config TEKRAM_DONGLE 57 57 tristate "Tekram IrMate 210B dongle" 58 - depends on DONGLE && IRDA 58 + depends on IRTTY_SIR && DONGLE && IRDA 59 59 help 60 60 Say Y here if you want to build support for the Tekram IrMate 210B 61 61 dongle. To compile it as a module, choose M here. The Tekram dongle ··· 66 66 67 67 config TOIM3232_DONGLE 68 68 tristate "TOIM3232 IrDa dongle" 69 - depends on DONGLE && IRDA 69 + depends on IRTTY_SIR && DONGLE && IRDA 70 70 help 71 71 Say Y here if you want to build support for the Vishay/Temic 72 72 TOIM3232 and TOIM4232 based dongles. ··· 74 74 75 75 config LITELINK_DONGLE 76 76 tristate "Parallax LiteLink dongle" 77 - depends on DONGLE && IRDA 77 + depends on IRTTY_SIR && DONGLE && IRDA 78 78 help 79 79 Say Y here if you want to build support for the Parallax Litelink 80 80 dongle. To compile it as a module, choose M here. The Parallax ··· 85 85 86 86 config MA600_DONGLE 87 87 tristate "Mobile Action MA600 dongle" 88 - depends on DONGLE && IRDA && EXPERIMENTAL 88 + depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL 89 89 help 90 90 Say Y here if you want to build support for the Mobile Action MA600 91 91 dongle. To compile it as a module, choose M here. The MA600 dongle ··· 98 98 99 99 config GIRBIL_DONGLE 100 100 tristate "Greenwich GIrBIL dongle" 101 - depends on DONGLE && IRDA && EXPERIMENTAL 101 + depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL 102 102 help 103 103 Say Y here if you want to build support for the Greenwich GIrBIL 104 104 dongle. If you want to compile it as a module, choose M here. ··· 109 109 110 110 config MCP2120_DONGLE 111 111 tristate "Microchip MCP2120" 112 - depends on DONGLE && IRDA && EXPERIMENTAL 112 + depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL 113 113 help 114 114 Say Y here if you want to build support for the Microchip MCP2120 115 115 dongle. If you want to compile it as a module, choose M here. ··· 123 123 124 124 config OLD_BELKIN_DONGLE 125 125 tristate "Old Belkin dongle" 126 - depends on DONGLE && IRDA && EXPERIMENTAL 126 + depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL 127 127 help 128 128 Say Y here if you want to build support for the Adaptec Airport 1000 129 129 and 2000 dongles. If you want to compile it as a module, choose ··· 132 132 133 133 config ACT200L_DONGLE 134 134 tristate "ACTiSYS IR-200L dongle" 135 - depends on DONGLE && IRDA && EXPERIMENTAL 135 + depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL 136 136 help 137 137 Say Y here if you want to build support for the ACTiSYS IR-200L 138 138 dongle. If you want to compile it as a module, choose M here.
+1 -1
drivers/net/netconsole.c
··· 107 107 108 108 if(!configured) { 109 109 printk("netconsole: not configured, aborting\n"); 110 - return -EINVAL; 110 + return 0; 111 111 } 112 112 113 113 if(netpoll_setup(&np))
+1 -1
drivers/net/pcmcia/nmclan_cs.c
··· 1204 1204 1205 1205 dev->last_rx = jiffies; 1206 1206 lp->linux_stats.rx_packets++; 1207 - lp->linux_stats.rx_bytes += skb->len; 1207 + lp->linux_stats.rx_bytes += pkt_len; 1208 1208 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ 1209 1209 continue; 1210 1210 } else {
-2
drivers/net/pcnet32.c
··· 1774 1774 lp->rx_dma_addr[i] = 0; 1775 1775 } 1776 1776 1777 - pcnet32_free_ring(dev); 1778 - 1779 1777 /* 1780 1778 * Switch back to 16bit mode to avoid problems with dumb 1781 1779 * DOS packet driver after a warm reboot
+3
drivers/net/pppoe.c
··· 861 861 * give dev_queue_xmit something it can free. 862 862 */ 863 863 skb2 = skb_clone(skb, GFP_ATOMIC); 864 + 865 + if (skb2 == NULL) 866 + goto abort; 864 867 } 865 868 866 869 ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr));
+2 -2
drivers/net/wireless/arlan-main.c
··· 1838 1838 } 1839 1839 1840 1840 #ifdef MODULE 1841 - int init_module(void) 1841 + int __init init_module(void) 1842 1842 { 1843 1843 int i = 0; 1844 1844 ··· 1860 1860 } 1861 1861 1862 1862 1863 - void cleanup_module(void) 1863 + void __exit cleanup_module(void) 1864 1864 { 1865 1865 int i = 0; 1866 1866 struct net_device *dev;
+1 -1
drivers/net/wireless/wavelan.c
··· 4306 4306 * Insertion of the module 4307 4307 * I'm now quite proud of the multi-device support. 4308 4308 */ 4309 - int init_module(void) 4309 + int __init init_module(void) 4310 4310 { 4311 4311 int ret = -EIO; /* Return error if no cards found */ 4312 4312 int i;
+6
drivers/pcmcia/ds.c
··· 1143 1143 { 1144 1144 struct pcmcia_socket *s = pcmcia_get_socket(skt); 1145 1145 1146 + if (!s) { 1147 + printk(KERN_ERR "PCMCIA obtaining reference to socket %p " \ 1148 + "failed, event 0x%x lost!\n", skt, event); 1149 + return -ENODEV; 1150 + } 1151 + 1146 1152 ds_dbg(1, "ds_event(0x%06x, %d, 0x%p)\n", 1147 1153 event, priority, skt); 1148 1154
+36 -36
drivers/rtc/rtc-m48t86.c
··· 48 48 struct platform_device *pdev = to_platform_device(dev); 49 49 struct m48t86_ops *ops = pdev->dev.platform_data; 50 50 51 - reg = ops->readb(M48T86_REG_B); 51 + reg = ops->readbyte(M48T86_REG_B); 52 52 53 53 if (reg & M48T86_REG_B_DM) { 54 54 /* data (binary) mode */ 55 - tm->tm_sec = ops->readb(M48T86_REG_SEC); 56 - tm->tm_min = ops->readb(M48T86_REG_MIN); 57 - tm->tm_hour = ops->readb(M48T86_REG_HOUR) & 0x3F; 58 - tm->tm_mday = ops->readb(M48T86_REG_DOM); 55 + tm->tm_sec = ops->readbyte(M48T86_REG_SEC); 56 + tm->tm_min = ops->readbyte(M48T86_REG_MIN); 57 + tm->tm_hour = ops->readbyte(M48T86_REG_HOUR) & 0x3F; 58 + tm->tm_mday = ops->readbyte(M48T86_REG_DOM); 59 59 /* tm_mon is 0-11 */ 60 - tm->tm_mon = ops->readb(M48T86_REG_MONTH) - 1; 61 - tm->tm_year = ops->readb(M48T86_REG_YEAR) + 100; 62 - tm->tm_wday = ops->readb(M48T86_REG_DOW); 60 + tm->tm_mon = ops->readbyte(M48T86_REG_MONTH) - 1; 61 + tm->tm_year = ops->readbyte(M48T86_REG_YEAR) + 100; 62 + tm->tm_wday = ops->readbyte(M48T86_REG_DOW); 63 63 } else { 64 64 /* bcd mode */ 65 - tm->tm_sec = BCD2BIN(ops->readb(M48T86_REG_SEC)); 66 - tm->tm_min = BCD2BIN(ops->readb(M48T86_REG_MIN)); 67 - tm->tm_hour = BCD2BIN(ops->readb(M48T86_REG_HOUR) & 0x3F); 68 - tm->tm_mday = BCD2BIN(ops->readb(M48T86_REG_DOM)); 65 + tm->tm_sec = BCD2BIN(ops->readbyte(M48T86_REG_SEC)); 66 + tm->tm_min = BCD2BIN(ops->readbyte(M48T86_REG_MIN)); 67 + tm->tm_hour = BCD2BIN(ops->readbyte(M48T86_REG_HOUR) & 0x3F); 68 + tm->tm_mday = BCD2BIN(ops->readbyte(M48T86_REG_DOM)); 69 69 /* tm_mon is 0-11 */ 70 - tm->tm_mon = BCD2BIN(ops->readb(M48T86_REG_MONTH)) - 1; 71 - tm->tm_year = BCD2BIN(ops->readb(M48T86_REG_YEAR)) + 100; 72 - tm->tm_wday = BCD2BIN(ops->readb(M48T86_REG_DOW)); 70 + tm->tm_mon = BCD2BIN(ops->readbyte(M48T86_REG_MONTH)) - 1; 71 + tm->tm_year = BCD2BIN(ops->readbyte(M48T86_REG_YEAR)) + 100; 72 + tm->tm_wday = BCD2BIN(ops->readbyte(M48T86_REG_DOW)); 73 73 } 74 74 75 75 /* correct the hour if the clock is in 12h mode */ 76 76 if (!(reg & M48T86_REG_B_H24)) 77 - if (ops->readb(M48T86_REG_HOUR) & 0x80) 77 + if (ops->readbyte(M48T86_REG_HOUR) & 0x80) 78 78 tm->tm_hour += 12; 79 79 80 80 return 0; ··· 86 86 struct platform_device *pdev = to_platform_device(dev); 87 87 struct m48t86_ops *ops = pdev->dev.platform_data; 88 88 89 - reg = ops->readb(M48T86_REG_B); 89 + reg = ops->readbyte(M48T86_REG_B); 90 90 91 91 /* update flag and 24h mode */ 92 92 reg |= M48T86_REG_B_SET | M48T86_REG_B_H24; 93 - ops->writeb(reg, M48T86_REG_B); 93 + ops->writebyte(reg, M48T86_REG_B); 94 94 95 95 if (reg & M48T86_REG_B_DM) { 96 96 /* data (binary) mode */ 97 - ops->writeb(tm->tm_sec, M48T86_REG_SEC); 98 - ops->writeb(tm->tm_min, M48T86_REG_MIN); 99 - ops->writeb(tm->tm_hour, M48T86_REG_HOUR); 100 - ops->writeb(tm->tm_mday, M48T86_REG_DOM); 101 - ops->writeb(tm->tm_mon + 1, M48T86_REG_MONTH); 102 - ops->writeb(tm->tm_year % 100, M48T86_REG_YEAR); 103 - ops->writeb(tm->tm_wday, M48T86_REG_DOW); 97 + ops->writebyte(tm->tm_sec, M48T86_REG_SEC); 98 + ops->writebyte(tm->tm_min, M48T86_REG_MIN); 99 + ops->writebyte(tm->tm_hour, M48T86_REG_HOUR); 100 + ops->writebyte(tm->tm_mday, M48T86_REG_DOM); 101 + ops->writebyte(tm->tm_mon + 1, M48T86_REG_MONTH); 102 + ops->writebyte(tm->tm_year % 100, M48T86_REG_YEAR); 103 + ops->writebyte(tm->tm_wday, M48T86_REG_DOW); 104 104 } else { 105 105 /* bcd mode */ 106 - ops->writeb(BIN2BCD(tm->tm_sec), M48T86_REG_SEC); 107 - ops->writeb(BIN2BCD(tm->tm_min), M48T86_REG_MIN); 108 - ops->writeb(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR); 109 - ops->writeb(BIN2BCD(tm->tm_mday), M48T86_REG_DOM); 110 - ops->writeb(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH); 111 - ops->writeb(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR); 112 - ops->writeb(BIN2BCD(tm->tm_wday), M48T86_REG_DOW); 106 + ops->writebyte(BIN2BCD(tm->tm_sec), M48T86_REG_SEC); 107 + ops->writebyte(BIN2BCD(tm->tm_min), M48T86_REG_MIN); 108 + ops->writebyte(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR); 109 + ops->writebyte(BIN2BCD(tm->tm_mday), M48T86_REG_DOM); 110 + ops->writebyte(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH); 111 + ops->writebyte(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR); 112 + ops->writebyte(BIN2BCD(tm->tm_wday), M48T86_REG_DOW); 113 113 } 114 114 115 115 /* update ended */ 116 116 reg &= ~M48T86_REG_B_SET; 117 - ops->writeb(reg, M48T86_REG_B); 117 + ops->writebyte(reg, M48T86_REG_B); 118 118 119 119 return 0; 120 120 } ··· 125 125 struct platform_device *pdev = to_platform_device(dev); 126 126 struct m48t86_ops *ops = pdev->dev.platform_data; 127 127 128 - reg = ops->readb(M48T86_REG_B); 128 + reg = ops->readbyte(M48T86_REG_B); 129 129 130 130 seq_printf(seq, "mode\t\t: %s\n", 131 131 (reg & M48T86_REG_B_DM) ? "binary" : "bcd"); 132 132 133 - reg = ops->readb(M48T86_REG_D); 133 + reg = ops->readbyte(M48T86_REG_D); 134 134 135 135 seq_printf(seq, "battery\t\t: %s\n", 136 136 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); ··· 157 157 platform_set_drvdata(dev, rtc); 158 158 159 159 /* read battery status */ 160 - reg = ops->readb(M48T86_REG_D); 160 + reg = ops->readbyte(M48T86_REG_D); 161 161 dev_info(&dev->dev, "battery %s\n", 162 162 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); 163 163
+2 -2
drivers/s390/cio/css.h
··· 45 45 union { 46 46 __u8 fc; /* SPID function code */ 47 47 struct path_state ps; /* SNID path state */ 48 - } inf; 48 + } __attribute__ ((packed)) inf; 49 49 union { 50 50 __u32 cpu_addr : 16; /* CPU address */ 51 51 struct extended_cssid ext_cssid; 52 - } pgid_high; 52 + } __attribute__ ((packed)) pgid_high; 53 53 __u32 cpu_id : 24; /* CPU identification */ 54 54 __u32 cpu_model : 16; /* CPU model */ 55 55 __u32 tod_high; /* high word TOD clock */
+1 -1
drivers/s390/cio/device_fsm.c
··· 749 749 /* Unit check but no sense data. Need basic sense. */ 750 750 if (ccw_device_do_sense(cdev, irb) != 0) 751 751 goto call_handler_unsol; 752 - memcpy(irb, &cdev->private->irb, sizeof(struct irb)); 752 + memcpy(&cdev->private->irb, irb, sizeof(struct irb)); 753 753 cdev->private->state = DEV_STATE_W4SENSE; 754 754 cdev->private->intparm = 0; 755 755 return;
+13 -13
drivers/s390/net/ctcmain.c
··· 1486 1486 } 1487 1487 } 1488 1488 1489 - static void 1489 + static void 1490 1490 ch_action_reinit(fsm_instance *fi, int event, void *arg) 1491 1491 { 1492 1492 struct channel *ch = (struct channel *)arg; 1493 1493 struct net_device *dev = ch->netdev; 1494 1494 struct ctc_priv *privptr = dev->priv; 1495 - 1495 + 1496 1496 DBF_TEXT(trace, 4, __FUNCTION__); 1497 1497 ch_action_iofatal(fi, event, arg); 1498 1498 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev); ··· 1624 1624 } 1625 1625 dev1 = simple_strtoul(id1, &id1, 16); 1626 1626 dev2 = simple_strtoul(id2, &id2, 16); 1627 - 1627 + 1628 1628 return (dev1 < dev2); 1629 1629 } 1630 1630 ··· 1895 1895 irb->scsw.dstat); 1896 1896 return; 1897 1897 } 1898 - 1898 + 1899 1899 priv = ((struct ccwgroup_device *)cdev->dev.driver_data) 1900 1900 ->dev.driver_data; 1901 1901 ··· 1909 1909 "device %s\n", cdev->dev.bus_id); 1910 1910 return; 1911 1911 } 1912 - 1912 + 1913 1913 dev = (struct net_device *) (ch->netdev); 1914 1914 if (dev == NULL) { 1915 1915 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n", ··· 2008 2008 fsm_event(ch->fsm, CH_EVENT_STOP, ch); 2009 2009 } 2010 2010 } 2011 - static void 2011 + static void 2012 2012 dev_action_restart(fsm_instance *fi, int event, void *arg) 2013 2013 { 2014 2014 struct net_device *dev = (struct net_device *)arg; 2015 2015 struct ctc_priv *privptr = dev->priv; 2016 - 2016 + 2017 2017 DBF_TEXT(trace, 3, __FUNCTION__); 2018 2018 ctc_pr_debug("%s: Restarting\n", dev->name); 2019 2019 dev_action_stop(fi, event, arg); ··· 2193 2193 2194 2194 DBF_TEXT(trace, 5, __FUNCTION__); 2195 2195 /* we need to acquire the lock for testing the state 2196 - * otherwise we can have an IRQ changing the state to 2196 + * otherwise we can have an IRQ changing the state to 2197 2197 * TXIDLE after the test but before acquiring the lock. 2198 2198 */ 2199 2199 spin_lock_irqsave(&ch->collect_lock, saveflags); ··· 2393 2393 2394 2394 /** 2395 2395 * If channels are not running, try to restart them 2396 - * and throw away packet. 2396 + * and throw away packet. 2397 2397 */ 2398 2398 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { 2399 2399 fsm_event(privptr->fsm, DEV_EVENT_START, dev); ··· 2738 2738 /** 2739 2739 * Add ctc specific attributes. 2740 2740 * Add ctc private data. 2741 - * 2741 + * 2742 2742 * @param cgdev pointer to ccwgroup_device just added 2743 2743 * 2744 2744 * @returns 0 on success, !0 on failure. ··· 2869 2869 DBF_TEXT(setup, 3, buffer); 2870 2870 2871 2871 type = get_channel_type(&cgdev->cdev[0]->id); 2872 - 2872 + 2873 2873 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id); 2874 2874 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id); 2875 2875 ··· 2907 2907 channel_get(type, direction == READ ? read_id : write_id, 2908 2908 direction); 2909 2909 if (privptr->channel[direction] == NULL) { 2910 - if (direction == WRITE) 2910 + if (direction == WRITE) 2911 2911 channel_free(privptr->channel[READ]); 2912 2912 2913 2913 ctc_free_netdevice(dev, 1); ··· 2955 2955 { 2956 2956 struct ctc_priv *priv; 2957 2957 struct net_device *ndev; 2958 - 2958 + 2959 2959 DBF_TEXT(setup, 3, __FUNCTION__); 2960 2960 pr_debug("%s() called\n", __FUNCTION__); 2961 2961
+5 -5
drivers/s390/net/ctctty.c
··· 130 130 if ((tty = info->tty)) { 131 131 if (info->mcr & UART_MCR_RTS) { 132 132 struct sk_buff *skb; 133 - 133 + 134 134 if ((skb = skb_dequeue(&info->rx_queue))) { 135 135 int len = skb->len; 136 136 tty_insert_flip_string(tty, skb->data, len); ··· 328 328 { 329 329 int skb_res; 330 330 struct sk_buff *skb; 331 - 331 + 332 332 DBF_TEXT(trace, 4, __FUNCTION__); 333 333 if (ctc_tty_shuttingdown) 334 334 return; ··· 497 497 c = (count < CTC_TTY_XMIT_SIZE) ? count : CTC_TTY_XMIT_SIZE; 498 498 if (c <= 0) 499 499 break; 500 - 500 + 501 501 skb_res = info->netdev->hard_header_len + sizeof(info->mcr) + 502 502 + sizeof(__u32); 503 503 skb = dev_alloc_skb(skb_res + c); ··· 828 828 if (tty_hung_up_p(filp) || 829 829 (info->flags & CTC_ASYNC_CLOSING)) { 830 830 if (info->flags & CTC_ASYNC_CLOSING) 831 - wait_event(info->close_wait, 831 + wait_event(info->close_wait, 832 832 !(info->flags & CTC_ASYNC_CLOSING)); 833 833 #ifdef MODEM_DO_RESTART 834 834 if (info->flags & CTC_ASYNC_HUP_NOTIFY) ··· 1247 1247 void 1248 1248 ctc_tty_cleanup(void) { 1249 1249 unsigned long saveflags; 1250 - 1250 + 1251 1251 DBF_TEXT(trace, 2, __FUNCTION__); 1252 1252 spin_lock_irqsave(&ctc_tty_lock, saveflags); 1253 1253 ctc_tty_shuttingdown = 1;
+5 -5
drivers/s390/net/cu3088.c
··· 20 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 21 * 22 22 */ 23 - 23 + 24 24 #include <linux/init.h> 25 25 #include <linux/module.h> 26 26 #include <linux/err.h> ··· 77 77 int len; 78 78 79 79 if (!(end = strchr(start, delim[i]))) 80 - return count; 80 + return -EINVAL; 81 81 len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start + 1); 82 82 strlcpy (bus_ids[i], start, len); 83 83 argv[i] = bus_ids[i]; ··· 94 94 95 95 /* Register-unregister for ctc&lcs */ 96 96 int 97 - register_cu3088_discipline(struct ccwgroup_driver *dcp) 97 + register_cu3088_discipline(struct ccwgroup_driver *dcp) 98 98 { 99 99 int rc; 100 100 ··· 109 109 rc = driver_create_file(&dcp->driver, &driver_attr_group); 110 110 if (rc) 111 111 ccwgroup_driver_unregister(dcp); 112 - 112 + 113 113 return rc; 114 114 115 115 } ··· 137 137 cu3088_init (void) 138 138 { 139 139 int rc; 140 - 140 + 141 141 cu3088_root_dev = s390_root_dev_register("cu3088"); 142 142 if (IS_ERR(cu3088_root_dev)) 143 143 return PTR_ERR(cu3088_root_dev);
+18 -18
drivers/s390/net/iucv.c
··· 1 - /* 1 + /* 2 2 * IUCV network driver 3 3 * 4 4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation ··· 28 28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 29 29 * 30 30 */ 31 - 31 + 32 32 /* #define DEBUG */ 33 33 34 34 #include <linux/module.h> ··· 81 81 struct bus_type iucv_bus = { 82 82 .name = "iucv", 83 83 .match = iucv_bus_match, 84 - }; 84 + }; 85 85 86 86 struct device *iucv_root; 87 87 ··· 297 297 /* 298 298 * Debugging stuff 299 299 *******************************************************************************/ 300 - 300 + 301 301 302 302 #ifdef DEBUG 303 303 static int debuglevel = 0; ··· 344 344 /* 345 345 * Internal functions 346 346 *******************************************************************************/ 347 - 347 + 348 348 /** 349 349 * print start banner 350 350 */ ··· 810 810 sizeof (new_handler->id.userid)); 811 811 EBC_TOUPPER (new_handler->id.userid, 812 812 sizeof (new_handler->id.userid)); 813 - 813 + 814 814 if (pgmmask) { 815 815 memcpy (new_handler->id.mask, pgmmask, 816 816 sizeof (new_handler->id.mask)); ··· 1229 1229 /* parm->ipaudit has only 3 bytes */ 1230 1230 *audit >>= 8; 1231 1231 } 1232 - 1232 + 1233 1233 release_param(parm); 1234 1234 1235 1235 iucv_debug(1, "b2f0_result = %ld", b2f0_result); ··· 2330 2330 temp_buff1[j] &= (h->id.mask)[j]; 2331 2331 temp_buff2[j] &= (h->id.mask)[j]; 2332 2332 } 2333 - 2333 + 2334 2334 iucv_dumpit("temp_buff1:", 2335 2335 temp_buff1, sizeof(temp_buff1)); 2336 2336 iucv_dumpit("temp_buff2", 2337 2337 temp_buff2, sizeof(temp_buff2)); 2338 - 2338 + 2339 2339 if (!memcmp (temp_buff1, temp_buff2, 24)) { 2340 - 2340 + 2341 2341 iucv_debug(2, 2342 2342 "found a matching handler"); 2343 2343 break; ··· 2368 2368 } else 2369 2369 iucv_sever(int_buf->ippathid, no_listener); 2370 2370 break; 2371 - 2371 + 2372 2372 case 0x02: /*connection complete */ 2373 2373 if (messagesDisabled) { 2374 2374 iucv_setmask(~0); ··· 2387 2387 } else 2388 2388 iucv_sever(int_buf->ippathid, no_listener); 2389 2389 break; 2390 - 2390 + 2391 2391 case 0x03: /* connection severed */ 2392 2392 if (messagesDisabled) { 2393 2393 iucv_setmask(~0); ··· 2398 2398 interrupt->ConnectionSevered( 2399 2399 (iucv_ConnectionSevered *)int_buf, 2400 2400 h->pgm_data); 2401 - 2401 + 2402 2402 else 2403 2403 iucv_sever (int_buf->ippathid, no_listener); 2404 2404 } else 2405 2405 iucv_sever(int_buf->ippathid, no_listener); 2406 2406 break; 2407 - 2407 + 2408 2408 case 0x04: /* connection quiesced */ 2409 2409 if (messagesDisabled) { 2410 2410 iucv_setmask(~0); ··· 2420 2420 "ConnectionQuiesced not called"); 2421 2421 } 2422 2422 break; 2423 - 2423 + 2424 2424 case 0x05: /* connection resumed */ 2425 2425 if (messagesDisabled) { 2426 2426 iucv_setmask(~0); ··· 2436 2436 "ConnectionResumed not called"); 2437 2437 } 2438 2438 break; 2439 - 2439 + 2440 2440 case 0x06: /* priority message complete */ 2441 2441 case 0x07: /* nonpriority message complete */ 2442 2442 if (h) { ··· 2449 2449 "MessageComplete not called"); 2450 2450 } 2451 2451 break; 2452 - 2452 + 2453 2453 case 0x08: /* priority message pending */ 2454 2454 case 0x09: /* nonpriority message pending */ 2455 2455 if (h) { ··· 2467 2467 __FUNCTION__); 2468 2468 break; 2469 2469 } /* end switch */ 2470 - 2470 + 2471 2471 iucv_debug(2, "exiting pathid %d, type %02X", 2472 2472 int_buf->ippathid, int_buf->iptype); 2473 2473
+311 -311
drivers/s390/net/iucv.h
··· 4 4 * 5 5 * S390 version 6 6 * Copyright (C) 2000 IBM Corporation 7 - * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com) 7 + * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com) 8 8 * Xenia Tkatschow (xenia@us.ibm.com) 9 9 * 10 10 * ··· 16 16 * CP Programming Services book, also available on the web 17 17 * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 18 18 * 19 - * Definition of Return Codes 20 - * -All positive return codes including zero are reflected back 21 - * from CP except for iucv_register_program. The definition of each 22 - * return code can be found in CP Programming Services book. 23 - * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 24 - * - Return Code of: 25 - * (-EINVAL) Invalid value 26 - * (-ENOMEM) storage allocation failed 19 + * Definition of Return Codes 20 + * -All positive return codes including zero are reflected back 21 + * from CP except for iucv_register_program. The definition of each 22 + * return code can be found in CP Programming Services book. 23 + * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 24 + * - Return Code of: 25 + * (-EINVAL) Invalid value 26 + * (-ENOMEM) storage allocation failed 27 27 * pgmask defined in iucv_register_program will be set depending on input 28 - * paramters. 29 - * 28 + * paramters. 29 + * 30 30 */ 31 31 32 32 #include <linux/types.h> ··· 124 124 #define iucv_handle_t void * 125 125 126 126 /* flags1: 127 - * All flags are defined in the field IPFLAGS1 of each function 128 - * and can be found in CP Programming Services. 129 - * IPLOCAL - Indicates the connect can only be satisfied on the 130 - * local system 131 - * IPPRTY - Indicates a priority message 132 - * IPQUSCE - Indicates you do not want to receive messages on a 133 - * path until an iucv_resume is issued 127 + * All flags are defined in the field IPFLAGS1 of each function 128 + * and can be found in CP Programming Services. 129 + * IPLOCAL - Indicates the connect can only be satisfied on the 130 + * local system 131 + * IPPRTY - Indicates a priority message 132 + * IPQUSCE - Indicates you do not want to receive messages on a 133 + * path until an iucv_resume is issued 134 134 * IPRMDATA - Indicates that the message is in the parameter list 135 135 */ 136 136 #define IPLOCAL 0x01 ··· 154 154 #define AllInterrupts 0xf8 155 155 /* 156 156 * Mapping of external interrupt buffers should be used with the corresponding 157 - * interrupt types. 158 - * Names: iucv_ConnectionPending -> connection pending 157 + * interrupt types. 158 + * Names: iucv_ConnectionPending -> connection pending 159 159 * iucv_ConnectionComplete -> connection complete 160 - * iucv_ConnectionSevered -> connection severed 161 - * iucv_ConnectionQuiesced -> connection quiesced 162 - * iucv_ConnectionResumed -> connection resumed 163 - * iucv_MessagePending -> message pending 164 - * iucv_MessageComplete -> message complete 160 + * iucv_ConnectionSevered -> connection severed 161 + * iucv_ConnectionQuiesced -> connection quiesced 162 + * iucv_ConnectionResumed -> connection resumed 163 + * iucv_MessagePending -> message pending 164 + * iucv_MessageComplete -> message complete 165 165 */ 166 166 typedef struct { 167 167 u16 ippathid; ··· 260 260 uchar res2[3]; 261 261 } iucv_MessageComplete; 262 262 263 - /* 264 - * iucv_interrupt_ops_t: Is a vector of functions that handle 265 - * IUCV interrupts. 266 - * Parameter list: 267 - * eib - is a pointer to a 40-byte area described 268 - * with one of the structures above. 269 - * pgm_data - this data is strictly for the 270 - * interrupt handler that is passed by 271 - * the application. This may be an address 272 - * or token. 263 + /* 264 + * iucv_interrupt_ops_t: Is a vector of functions that handle 265 + * IUCV interrupts. 266 + * Parameter list: 267 + * eib - is a pointer to a 40-byte area described 268 + * with one of the structures above. 269 + * pgm_data - this data is strictly for the 270 + * interrupt handler that is passed by 271 + * the application. This may be an address 272 + * or token. 273 273 */ 274 274 typedef struct { 275 275 void (*ConnectionPending) (iucv_ConnectionPending * eib, ··· 287 287 } iucv_interrupt_ops_t; 288 288 289 289 /* 290 - *iucv_array_t : Defines buffer array. 291 - * Inside the array may be 31- bit addresses and 31-bit lengths. 290 + *iucv_array_t : Defines buffer array. 291 + * Inside the array may be 31- bit addresses and 31-bit lengths. 292 292 */ 293 293 typedef struct { 294 294 u32 address; ··· 299 299 extern struct device *iucv_root; 300 300 301 301 /* -prototypes- */ 302 - /* 303 - * Name: iucv_register_program 304 - * Purpose: Registers an application with IUCV 305 - * Input: prmname - user identification 302 + /* 303 + * Name: iucv_register_program 304 + * Purpose: Registers an application with IUCV 305 + * Input: prmname - user identification 306 306 * userid - machine identification 307 307 * pgmmask - indicates which bits in the prmname and userid combined will be 308 308 * used to determine who is given control 309 - * ops - address of vector of interrupt handlers 310 - * pgm_data- application data passed to interrupt handlers 311 - * Output: NA 312 - * Return: address of handler 309 + * ops - address of vector of interrupt handlers 310 + * pgm_data- application data passed to interrupt handlers 311 + * Output: NA 312 + * Return: address of handler 313 313 * (0) - Error occurred, registration not completed. 314 - * NOTE: Exact cause of failure will be recorded in syslog. 314 + * NOTE: Exact cause of failure will be recorded in syslog. 315 315 */ 316 316 iucv_handle_t iucv_register_program (uchar pgmname[16], 317 317 uchar userid[8], ··· 319 319 iucv_interrupt_ops_t * ops, 320 320 void *pgm_data); 321 321 322 - /* 323 - * Name: iucv_unregister_program 324 - * Purpose: Unregister application with IUCV 325 - * Input: address of handler 326 - * Output: NA 327 - * Return: (0) - Normal return 328 - * (-EINVAL) - Internal error, wild pointer 322 + /* 323 + * Name: iucv_unregister_program 324 + * Purpose: Unregister application with IUCV 325 + * Input: address of handler 326 + * Output: NA 327 + * Return: (0) - Normal return 328 + * (-EINVAL) - Internal error, wild pointer 329 329 */ 330 330 int iucv_unregister_program (iucv_handle_t handle); 331 331 ··· 333 333 * Name: iucv_accept 334 334 * Purpose: This function is issued after the user receives a Connection Pending external 335 335 * interrupt and now wishes to complete the IUCV communication path. 336 - * Input: pathid - u16 , Path identification number 336 + * Input: pathid - u16 , Path identification number 337 337 * msglim_reqstd - u16, The number of outstanding messages requested. 338 338 * user_data - uchar[16], Data specified by the iucv_connect function. 339 339 * flags1 - int, Contains options for this path. ··· 358 358 void *pgm_data, int *flags1_out, u16 * msglim); 359 359 360 360 /* 361 - * Name: iucv_connect 361 + * Name: iucv_connect 362 362 * Purpose: This function establishes an IUCV path. Although the connect may complete 363 - * successfully, you are not able to use the path until you receive an IUCV 364 - * Connection Complete external interrupt. 365 - * Input: pathid - u16 *, Path identification number 366 - * msglim_reqstd - u16, Number of outstanding messages requested 367 - * user_data - uchar[16], 16-byte user data 363 + * successfully, you are not able to use the path until you receive an IUCV 364 + * Connection Complete external interrupt. 365 + * Input: pathid - u16 *, Path identification number 366 + * msglim_reqstd - u16, Number of outstanding messages requested 367 + * user_data - uchar[16], 16-byte user data 368 368 * userid - uchar[8], User identification 369 - * system_name - uchar[8], 8-byte identifying the system name 369 + * system_name - uchar[8], 8-byte identifying the system name 370 370 * flags1 - int, Contains options for this path. 371 371 * -IPPRTY - 0x20, Specifies if you want to send priority message. 372 372 * -IPRMDATA - 0x80, Specifies whether your program can handle a message 373 373 * in the parameter list. 374 - * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being 374 + * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being 375 375 * established. 376 - * -IPLOCAL - 0X01, Allows an application to force the partner to be on 376 + * -IPLOCAL - 0X01, Allows an application to force the partner to be on 377 377 * the local system. If local is specified then target class cannot be 378 - * specified. 378 + * specified. 379 379 * flags1_out - int * Contains information about the path 380 380 * - IPPRTY - 0x20, Indicates you may send priority messages. 381 381 * msglim - * u16, Number of outstanding messages 382 - * handle - iucv_handle_t, Address of handler 383 - * pgm_data - void *, Application data passed to interrupt handlers 382 + * handle - iucv_handle_t, Address of handler 383 + * pgm_data - void *, Application data passed to interrupt handlers 384 384 * Output: return code from CP IUCV call 385 385 * rc - return code from iucv_declare_buffer 386 - * -EINVAL - Invalid handle passed by application 387 - * -EINVAL - Pathid address is NULL 388 - * add_pathid_result - Return code from internal function add_pathid 386 + * -EINVAL - Invalid handle passed by application 387 + * -EINVAL - Pathid address is NULL 388 + * add_pathid_result - Return code from internal function add_pathid 389 389 */ 390 390 int 391 391 iucv_connect (u16 * pathid, ··· 397 397 int *flags1_out, 398 398 u16 * msglim, iucv_handle_t handle, void *pgm_data); 399 399 400 - /* 401 - * Name: iucv_purge 402 - * Purpose: This function cancels a message that you have sent. 403 - * Input: pathid - Path identification number. 400 + /* 401 + * Name: iucv_purge 402 + * Purpose: This function cancels a message that you have sent. 403 + * Input: pathid - Path identification number. 404 404 * msgid - Specifies the message ID of the message to be purged. 405 - * srccls - Specifies the source message class. 406 - * Output: audit - Contains information about asynchronous error 407 - * that may have affected the normal completion 408 - * of this message. 409 - * Return: Return code from CP IUCV call. 405 + * srccls - Specifies the source message class. 406 + * Output: audit - Contains information about asynchronous error 407 + * that may have affected the normal completion 408 + * of this message. 409 + * Return: Return code from CP IUCV call. 410 410 */ 411 411 int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit); 412 412 /* ··· 426 426 */ 427 427 ulong iucv_query_bufsize (void); 428 428 429 - /* 430 - * Name: iucv_quiesce 431 - * Purpose: This function temporarily suspends incoming messages on an 432 - * IUCV path. You can later reactivate the path by invoking 433 - * the iucv_resume function. 434 - * Input: pathid - Path identification number 435 - * user_data - 16-bytes of user data 436 - * Output: NA 437 - * Return: Return code from CP IUCV call. 429 + /* 430 + * Name: iucv_quiesce 431 + * Purpose: This function temporarily suspends incoming messages on an 432 + * IUCV path. You can later reactivate the path by invoking 433 + * the iucv_resume function. 434 + * Input: pathid - Path identification number 435 + * user_data - 16-bytes of user data 436 + * Output: NA 437 + * Return: Return code from CP IUCV call. 438 438 */ 439 439 int iucv_quiesce (u16 pathid, uchar user_data[16]); 440 440 441 - /* 442 - * Name: iucv_receive 443 - * Purpose: This function receives messages that are being sent to you 441 + /* 442 + * Name: iucv_receive 443 + * Purpose: This function receives messages that are being sent to you 444 444 * over established paths. Data will be returned in buffer for length of 445 445 * buflen. 446 - * Input: 447 - * pathid - Path identification number. 448 - * buffer - Address of buffer to receive. 449 - * buflen - Length of buffer to receive. 450 - * msgid - Specifies the message ID. 451 - * trgcls - Specifies target class. 452 - * Output: 446 + * Input: 447 + * pathid - Path identification number. 448 + * buffer - Address of buffer to receive. 449 + * buflen - Length of buffer to receive. 450 + * msgid - Specifies the message ID. 451 + * trgcls - Specifies target class. 452 + * Output: 453 453 * flags1_out: int *, Contains information about this path. 454 454 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is 455 - * expected. 456 - * IPPRTY - 0x20 Specifies if you want to send priority message. 455 + * expected. 456 + * IPPRTY - 0x20 Specifies if you want to send priority message. 457 457 * IPRMDATA - 0x80 specifies the data is contained in the parameter list 458 458 * residual_buffer - address of buffer updated by the number 459 459 * of bytes you have received. 460 - * residual_length - 460 + * residual_length - 461 461 * Contains one of the following values, if the receive buffer is: 462 462 * The same length as the message, this field is zero. 463 463 * Longer than the message, this field contains the number of ··· 466 466 * count (that is, the number of bytes remaining in the 467 467 * message that does not fit into the buffer. In this 468 468 * case b2f0_result = 5. 469 - * Return: Return code from CP IUCV call. 470 - * (-EINVAL) - buffer address is pointing to NULL 469 + * Return: Return code from CP IUCV call. 470 + * (-EINVAL) - buffer address is pointing to NULL 471 471 */ 472 472 int iucv_receive (u16 pathid, 473 473 u32 msgid, ··· 477 477 int *flags1_out, 478 478 ulong * residual_buffer, ulong * residual_length); 479 479 480 - /* 481 - * Name: iucv_receive_array 482 - * Purpose: This function receives messages that are being sent to you 480 + /* 481 + * Name: iucv_receive_array 482 + * Purpose: This function receives messages that are being sent to you 483 483 * over established paths. Data will be returned in first buffer for 484 484 * length of first buffer. 485 - * Input: pathid - Path identification number. 485 + * Input: pathid - Path identification number. 486 486 * msgid - specifies the message ID. 487 487 * trgcls - Specifies target class. 488 - * buffer - Address of array of buffers. 489 - * buflen - Total length of buffers. 488 + * buffer - Address of array of buffers. 489 + * buflen - Total length of buffers. 490 490 * Output: 491 491 * flags1_out: int *, Contains information about this path. 492 492 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is ··· 504 504 * count (that is, the number of bytes remaining in the 505 505 * message that does not fit into the buffer. In this 506 506 * case b2f0_result = 5. 507 - * Return: Return code from CP IUCV call. 508 - * (-EINVAL) - Buffer address is NULL. 507 + * Return: Return code from CP IUCV call. 508 + * (-EINVAL) - Buffer address is NULL. 509 509 */ 510 510 int iucv_receive_array (u16 pathid, 511 511 u32 msgid, ··· 515 515 int *flags1_out, 516 516 ulong * residual_buffer, ulong * residual_length); 517 517 518 - /* 519 - * Name: iucv_reject 520 - * Purpose: The reject function refuses a specified message. Between the 521 - * time you are notified of a message and the time that you 522 - * complete the message, the message may be rejected. 523 - * Input: pathid - Path identification number. 524 - * msgid - Specifies the message ID. 525 - * trgcls - Specifies target class. 526 - * Output: NA 527 - * Return: Return code from CP IUCV call. 518 + /* 519 + * Name: iucv_reject 520 + * Purpose: The reject function refuses a specified message. Between the 521 + * time you are notified of a message and the time that you 522 + * complete the message, the message may be rejected. 523 + * Input: pathid - Path identification number. 524 + * msgid - Specifies the message ID. 525 + * trgcls - Specifies target class. 526 + * Output: NA 527 + * Return: Return code from CP IUCV call. 528 528 */ 529 529 int iucv_reject (u16 pathid, u32 msgid, u32 trgcls); 530 530 531 - /* 532 - * Name: iucv_reply 533 - * Purpose: This function responds to the two-way messages that you 534 - * receive. You must identify completely the message to 535 - * which you wish to reply. ie, pathid, msgid, and trgcls. 536 - * Input: pathid - Path identification number. 537 - * msgid - Specifies the message ID. 538 - * trgcls - Specifies target class. 531 + /* 532 + * Name: iucv_reply 533 + * Purpose: This function responds to the two-way messages that you 534 + * receive. You must identify completely the message to 535 + * which you wish to reply. ie, pathid, msgid, and trgcls. 536 + * Input: pathid - Path identification number. 537 + * msgid - Specifies the message ID. 538 + * trgcls - Specifies target class. 539 539 * flags1 - Option for path. 540 - * IPPRTY- 0x20, Specifies if you want to send priority message. 541 - * buffer - Address of reply buffer. 542 - * buflen - Length of reply buffer. 543 - * Output: residual_buffer - Address of buffer updated by the number 544 - * of bytes you have moved. 540 + * IPPRTY- 0x20, Specifies if you want to send priority message. 541 + * buffer - Address of reply buffer. 542 + * buflen - Length of reply buffer. 543 + * Output: residual_buffer - Address of buffer updated by the number 544 + * of bytes you have moved. 545 545 * residual_length - Contains one of the following values: 546 546 * If the answer buffer is the same length as the reply, this field 547 547 * contains zero. 548 548 * If the answer buffer is longer than the reply, this field contains 549 - * the number of bytes remaining in the buffer. 549 + * the number of bytes remaining in the buffer. 550 550 * If the answer buffer is shorter than the reply, this field contains 551 551 * a residual count (that is, the number of bytes remianing in the 552 552 * reply that does not fit into the buffer. In this 553 553 * case b2f0_result = 5. 554 - * Return: Return code from CP IUCV call. 555 - * (-EINVAL) - Buffer address is NULL. 554 + * Return: Return code from CP IUCV call. 555 + * (-EINVAL) - Buffer address is NULL. 556 556 */ 557 557 int iucv_reply (u16 pathid, 558 558 u32 msgid, ··· 561 561 void *buffer, ulong buflen, ulong * residual_buffer, 562 562 ulong * residual_length); 563 563 564 - /* 565 - * Name: iucv_reply_array 566 - * Purpose: This function responds to the two-way messages that you 567 - * receive. You must identify completely the message to 568 - * which you wish to reply. ie, pathid, msgid, and trgcls. 569 - * The array identifies a list of addresses and lengths of 570 - * discontiguous buffers that contains the reply data. 571 - * Input: pathid - Path identification number 572 - * msgid - Specifies the message ID. 573 - * trgcls - Specifies target class. 564 + /* 565 + * Name: iucv_reply_array 566 + * Purpose: This function responds to the two-way messages that you 567 + * receive. You must identify completely the message to 568 + * which you wish to reply. ie, pathid, msgid, and trgcls. 569 + * The array identifies a list of addresses and lengths of 570 + * discontiguous buffers that contains the reply data. 571 + * Input: pathid - Path identification number 572 + * msgid - Specifies the message ID. 573 + * trgcls - Specifies target class. 574 574 * flags1 - Option for path. 575 575 * IPPRTY- 0x20, Specifies if you want to send priority message. 576 - * buffer - Address of array of reply buffers. 577 - * buflen - Total length of reply buffers. 576 + * buffer - Address of array of reply buffers. 577 + * buflen - Total length of reply buffers. 578 578 * Output: residual_buffer - Address of buffer which IUCV is currently working on. 579 579 * residual_length - Contains one of the following values: 580 580 * If the answer buffer is the same length as the reply, this field ··· 585 585 * a residual count (that is, the number of bytes remianing in the 586 586 * reply that does not fit into the buffer. In this 587 587 * case b2f0_result = 5. 588 - * Return: Return code from CP IUCV call. 589 - * (-EINVAL) - Buffer address is NULL. 588 + * Return: Return code from CP IUCV call. 589 + * (-EINVAL) - Buffer address is NULL. 590 590 */ 591 591 int iucv_reply_array (u16 pathid, 592 592 u32 msgid, ··· 596 596 ulong buflen, ulong * residual_address, 597 597 ulong * residual_length); 598 598 599 - /* 600 - * Name: iucv_reply_prmmsg 601 - * Purpose: This function responds to the two-way messages that you 602 - * receive. You must identify completely the message to 603 - * which you wish to reply. ie, pathid, msgid, and trgcls. 604 - * Prmmsg signifies the data is moved into the 605 - * parameter list. 606 - * Input: pathid - Path identification number. 607 - * msgid - Specifies the message ID. 608 - * trgcls - Specifies target class. 599 + /* 600 + * Name: iucv_reply_prmmsg 601 + * Purpose: This function responds to the two-way messages that you 602 + * receive. You must identify completely the message to 603 + * which you wish to reply. ie, pathid, msgid, and trgcls. 604 + * Prmmsg signifies the data is moved into the 605 + * parameter list. 606 + * Input: pathid - Path identification number. 607 + * msgid - Specifies the message ID. 608 + * trgcls - Specifies target class. 609 609 * flags1 - Option for path. 610 610 * IPPRTY- 0x20 Specifies if you want to send priority message. 611 - * prmmsg - 8-bytes of data to be placed into the parameter. 612 - * list. 613 - * Output: NA 614 - * Return: Return code from CP IUCV call. 611 + * prmmsg - 8-bytes of data to be placed into the parameter. 612 + * list. 613 + * Output: NA 614 + * Return: Return code from CP IUCV call. 615 615 */ 616 616 int iucv_reply_prmmsg (u16 pathid, 617 617 u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]); 618 618 619 - /* 620 - * Name: iucv_resume 621 - * Purpose: This function restores communications over a quiesced path 622 - * Input: pathid - Path identification number. 623 - * user_data - 16-bytes of user data. 624 - * Output: NA 625 - * Return: Return code from CP IUCV call. 619 + /* 620 + * Name: iucv_resume 621 + * Purpose: This function restores communications over a quiesced path 622 + * Input: pathid - Path identification number. 623 + * user_data - 16-bytes of user data. 624 + * Output: NA 625 + * Return: Return code from CP IUCV call. 626 626 */ 627 627 int iucv_resume (u16 pathid, uchar user_data[16]); 628 628 629 - /* 630 - * Name: iucv_send 631 - * Purpose: This function transmits data to another application. 632 - * Data to be transmitted is in a buffer and this is a 633 - * one-way message and the receiver will not reply to the 634 - * message. 635 - * Input: pathid - Path identification number. 636 - * trgcls - Specifies target class. 637 - * srccls - Specifies the source message class. 638 - * msgtag - Specifies a tag to be associated with the message. 629 + /* 630 + * Name: iucv_send 631 + * Purpose: This function transmits data to another application. 632 + * Data to be transmitted is in a buffer and this is a 633 + * one-way message and the receiver will not reply to the 634 + * message. 635 + * Input: pathid - Path identification number. 636 + * trgcls - Specifies target class. 637 + * srccls - Specifies the source message class. 638 + * msgtag - Specifies a tag to be associated with the message. 639 639 * flags1 - Option for path. 640 640 * IPPRTY- 0x20 Specifies if you want to send priority message. 641 - * buffer - Address of send buffer. 642 - * buflen - Length of send buffer. 643 - * Output: msgid - Specifies the message ID. 644 - * Return: Return code from CP IUCV call. 645 - * (-EINVAL) - Buffer address is NULL. 641 + * buffer - Address of send buffer. 642 + * buflen - Length of send buffer. 643 + * Output: msgid - Specifies the message ID. 644 + * Return: Return code from CP IUCV call. 645 + * (-EINVAL) - Buffer address is NULL. 646 646 */ 647 647 int iucv_send (u16 pathid, 648 648 u32 * msgid, 649 649 u32 trgcls, 650 650 u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen); 651 651 652 - /* 653 - * Name: iucv_send_array 654 - * Purpose: This function transmits data to another application. 655 - * The contents of buffer is the address of the array of 656 - * addresses and lengths of discontiguous buffers that hold 657 - * the message text. This is a one-way message and the 658 - * receiver will not reply to the message. 659 - * Input: pathid - Path identification number. 660 - * trgcls - Specifies target class. 661 - * srccls - Specifies the source message class. 652 + /* 653 + * Name: iucv_send_array 654 + * Purpose: This function transmits data to another application. 655 + * The contents of buffer is the address of the array of 656 + * addresses and lengths of discontiguous buffers that hold 657 + * the message text. This is a one-way message and the 658 + * receiver will not reply to the message. 659 + * Input: pathid - Path identification number. 660 + * trgcls - Specifies target class. 661 + * srccls - Specifies the source message class. 662 662 * msgtag - Specifies a tag to be associated witht the message. 663 663 * flags1 - Option for path. 664 - * IPPRTY- specifies if you want to send priority message. 665 - * buffer - Address of array of send buffers. 666 - * buflen - Total length of send buffers. 667 - * Output: msgid - Specifies the message ID. 668 - * Return: Return code from CP IUCV call. 669 - * (-EINVAL) - Buffer address is NULL. 664 + * IPPRTY- specifies if you want to send priority message. 665 + * buffer - Address of array of send buffers. 666 + * buflen - Total length of send buffers. 667 + * Output: msgid - Specifies the message ID. 668 + * Return: Return code from CP IUCV call. 669 + * (-EINVAL) - Buffer address is NULL. 670 670 */ 671 671 int iucv_send_array (u16 pathid, 672 672 u32 * msgid, ··· 675 675 u32 msgtag, 676 676 int flags1, iucv_array_t * buffer, ulong buflen); 677 677 678 - /* 679 - * Name: iucv_send_prmmsg 680 - * Purpose: This function transmits data to another application. 681 - * Prmmsg specifies that the 8-bytes of data are to be moved 682 - * into the parameter list. This is a one-way message and the 683 - * receiver will not reply to the message. 684 - * Input: pathid - Path identification number. 685 - * trgcls - Specifies target class. 686 - * srccls - Specifies the source message class. 687 - * msgtag - Specifies a tag to be associated with the message. 678 + /* 679 + * Name: iucv_send_prmmsg 680 + * Purpose: This function transmits data to another application. 681 + * Prmmsg specifies that the 8-bytes of data are to be moved 682 + * into the parameter list. This is a one-way message and the 683 + * receiver will not reply to the message. 684 + * Input: pathid - Path identification number. 685 + * trgcls - Specifies target class. 686 + * srccls - Specifies the source message class. 687 + * msgtag - Specifies a tag to be associated with the message. 688 688 * flags1 - Option for path. 689 689 * IPPRTY- 0x20 specifies if you want to send priority message. 690 - * prmmsg - 8-bytes of data to be placed into parameter list. 691 - * Output: msgid - Specifies the message ID. 692 - * Return: Return code from CP IUCV call. 690 + * prmmsg - 8-bytes of data to be placed into parameter list. 691 + * Output: msgid - Specifies the message ID. 692 + * Return: Return code from CP IUCV call. 693 693 */ 694 694 int iucv_send_prmmsg (u16 pathid, 695 695 u32 * msgid, 696 696 u32 trgcls, 697 697 u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]); 698 698 699 - /* 700 - * Name: iucv_send2way 701 - * Purpose: This function transmits data to another application. 702 - * Data to be transmitted is in a buffer. The receiver 703 - * of the send is expected to reply to the message and 704 - * a buffer is provided into which IUCV moves the reply 705 - * to this message. 706 - * Input: pathid - Path identification number. 707 - * trgcls - Specifies target class. 708 - * srccls - Specifies the source message class. 709 - * msgtag - Specifies a tag associated with the message. 699 + /* 700 + * Name: iucv_send2way 701 + * Purpose: This function transmits data to another application. 702 + * Data to be transmitted is in a buffer. The receiver 703 + * of the send is expected to reply to the message and 704 + * a buffer is provided into which IUCV moves the reply 705 + * to this message. 706 + * Input: pathid - Path identification number. 707 + * trgcls - Specifies target class. 708 + * srccls - Specifies the source message class. 709 + * msgtag - Specifies a tag associated with the message. 710 710 * flags1 - Option for path. 711 711 * IPPRTY- 0x20 Specifies if you want to send priority message. 712 - * buffer - Address of send buffer. 713 - * buflen - Length of send buffer. 714 - * ansbuf - Address of buffer into which IUCV moves the reply of 715 - * this message. 716 - * anslen - Address of length of buffer. 717 - * Output: msgid - Specifies the message ID. 718 - * Return: Return code from CP IUCV call. 719 - * (-EINVAL) - Buffer or ansbuf address is NULL. 712 + * buffer - Address of send buffer. 713 + * buflen - Length of send buffer. 714 + * ansbuf - Address of buffer into which IUCV moves the reply of 715 + * this message. 716 + * anslen - Address of length of buffer. 717 + * Output: msgid - Specifies the message ID. 718 + * Return: Return code from CP IUCV call. 719 + * (-EINVAL) - Buffer or ansbuf address is NULL. 720 720 */ 721 721 int iucv_send2way (u16 pathid, 722 722 u32 * msgid, ··· 726 726 int flags1, 727 727 void *buffer, ulong buflen, void *ansbuf, ulong anslen); 728 728 729 - /* 730 - * Name: iucv_send2way_array 731 - * Purpose: This function transmits data to another application. 732 - * The contents of buffer is the address of the array of 733 - * addresses and lengths of discontiguous buffers that hold 734 - * the message text. The receiver of the send is expected to 735 - * reply to the message and a buffer is provided into which 736 - * IUCV moves the reply to this message. 737 - * Input: pathid - Path identification number. 738 - * trgcls - Specifies target class. 739 - * srccls - Specifies the source message class. 740 - * msgtag - Specifies a tag to be associated with the message. 729 + /* 730 + * Name: iucv_send2way_array 731 + * Purpose: This function transmits data to another application. 732 + * The contents of buffer is the address of the array of 733 + * addresses and lengths of discontiguous buffers that hold 734 + * the message text. The receiver of the send is expected to 735 + * reply to the message and a buffer is provided into which 736 + * IUCV moves the reply to this message. 737 + * Input: pathid - Path identification number. 738 + * trgcls - Specifies target class. 739 + * srccls - Specifies the source message class. 740 + * msgtag - Specifies a tag to be associated with the message. 741 741 * flags1 - Option for path. 742 742 * IPPRTY- 0x20 Specifies if you want to send priority message. 743 - * buffer - Sddress of array of send buffers. 744 - * buflen - Total length of send buffers. 745 - * ansbuf - Address of array of buffer into which IUCV moves the reply 746 - * of this message. 747 - * anslen - Address of length reply buffers. 748 - * Output: msgid - Specifies the message ID. 749 - * Return: Return code from CP IUCV call. 750 - * (-EINVAL) - Buffer address is NULL. 743 + * buffer - Sddress of array of send buffers. 744 + * buflen - Total length of send buffers. 745 + * ansbuf - Address of array of buffer into which IUCV moves the reply 746 + * of this message. 747 + * anslen - Address of length reply buffers. 748 + * Output: msgid - Specifies the message ID. 749 + * Return: Return code from CP IUCV call. 750 + * (-EINVAL) - Buffer address is NULL. 751 751 */ 752 752 int iucv_send2way_array (u16 pathid, 753 753 u32 * msgid, ··· 758 758 iucv_array_t * buffer, 759 759 ulong buflen, iucv_array_t * ansbuf, ulong anslen); 760 760 761 - /* 762 - * Name: iucv_send2way_prmmsg 763 - * Purpose: This function transmits data to another application. 764 - * Prmmsg specifies that the 8-bytes of data are to be moved 765 - * into the parameter list. This is a two-way message and the 766 - * receiver of the message is expected to reply. A buffer 767 - * is provided into which IUCV moves the reply to this 768 - * message. 769 - * Input: pathid - Rath identification number. 770 - * trgcls - Specifies target class. 771 - * srccls - Specifies the source message class. 772 - * msgtag - Specifies a tag to be associated with the message. 761 + /* 762 + * Name: iucv_send2way_prmmsg 763 + * Purpose: This function transmits data to another application. 764 + * Prmmsg specifies that the 8-bytes of data are to be moved 765 + * into the parameter list. This is a two-way message and the 766 + * receiver of the message is expected to reply. A buffer 767 + * is provided into which IUCV moves the reply to this 768 + * message. 769 + * Input: pathid - Rath identification number. 770 + * trgcls - Specifies target class. 771 + * srccls - Specifies the source message class. 772 + * msgtag - Specifies a tag to be associated with the message. 773 773 * flags1 - Option for path. 774 774 * IPPRTY- 0x20 Specifies if you want to send priority message. 775 - * prmmsg - 8-bytes of data to be placed in parameter list. 776 - * ansbuf - Address of buffer into which IUCV moves the reply of 775 + * prmmsg - 8-bytes of data to be placed in parameter list. 776 + * ansbuf - Address of buffer into which IUCV moves the reply of 777 777 * this message. 778 - * anslen - Address of length of buffer. 779 - * Output: msgid - Specifies the message ID. 780 - * Return: Return code from CP IUCV call. 781 - * (-EINVAL) - Buffer address is NULL. 778 + * anslen - Address of length of buffer. 779 + * Output: msgid - Specifies the message ID. 780 + * Return: Return code from CP IUCV call. 781 + * (-EINVAL) - Buffer address is NULL. 782 782 */ 783 783 int iucv_send2way_prmmsg (u16 pathid, 784 784 u32 * msgid, ··· 788 788 ulong flags1, 789 789 uchar prmmsg[8], void *ansbuf, ulong anslen); 790 790 791 - /* 792 - * Name: iucv_send2way_prmmsg_array 793 - * Purpose: This function transmits data to another application. 794 - * Prmmsg specifies that the 8-bytes of data are to be moved 795 - * into the parameter list. This is a two-way message and the 796 - * receiver of the message is expected to reply. A buffer 797 - * is provided into which IUCV moves the reply to this 798 - * message. The contents of ansbuf is the address of the 799 - * array of addresses and lengths of discontiguous buffers 800 - * that contain the reply. 801 - * Input: pathid - Path identification number. 802 - * trgcls - Specifies target class. 803 - * srccls - Specifies the source message class. 804 - * msgtag - Specifies a tag to be associated with the message. 791 + /* 792 + * Name: iucv_send2way_prmmsg_array 793 + * Purpose: This function transmits data to another application. 794 + * Prmmsg specifies that the 8-bytes of data are to be moved 795 + * into the parameter list. This is a two-way message and the 796 + * receiver of the message is expected to reply. A buffer 797 + * is provided into which IUCV moves the reply to this 798 + * message. The contents of ansbuf is the address of the 799 + * array of addresses and lengths of discontiguous buffers 800 + * that contain the reply. 801 + * Input: pathid - Path identification number. 802 + * trgcls - Specifies target class. 803 + * srccls - Specifies the source message class. 804 + * msgtag - Specifies a tag to be associated with the message. 805 805 * flags1 - Option for path. 806 806 * IPPRTY- 0x20 specifies if you want to send priority message. 807 - * prmmsg - 8-bytes of data to be placed into the parameter list. 807 + * prmmsg - 8-bytes of data to be placed into the parameter list. 808 808 * ansbuf - Address of array of buffer into which IUCV moves the reply 809 - * of this message. 810 - * anslen - Address of length of reply buffers. 811 - * Output: msgid - Specifies the message ID. 812 - * Return: Return code from CP IUCV call. 813 - * (-EINVAL) - Ansbuf address is NULL. 809 + * of this message. 810 + * anslen - Address of length of reply buffers. 811 + * Output: msgid - Specifies the message ID. 812 + * Return: Return code from CP IUCV call. 813 + * (-EINVAL) - Ansbuf address is NULL. 814 814 */ 815 815 int iucv_send2way_prmmsg_array (u16 pathid, 816 816 u32 * msgid, ··· 821 821 uchar prmmsg[8], 822 822 iucv_array_t * ansbuf, ulong anslen); 823 823 824 - /* 825 - * Name: iucv_setmask 826 - * Purpose: This function enables or disables the following IUCV 827 - * external interruptions: Nonpriority and priority message 828 - * interrupts, nonpriority and priority reply interrupts. 824 + /* 825 + * Name: iucv_setmask 826 + * Purpose: This function enables or disables the following IUCV 827 + * external interruptions: Nonpriority and priority message 828 + * interrupts, nonpriority and priority reply interrupts. 829 829 * Input: SetMaskFlag - options for interrupts 830 - * 0x80 - Nonpriority_MessagePendingInterruptsFlag 831 - * 0x40 - Priority_MessagePendingInterruptsFlag 832 - * 0x20 - Nonpriority_MessageCompletionInterruptsFlag 833 - * 0x10 - Priority_MessageCompletionInterruptsFlag 830 + * 0x80 - Nonpriority_MessagePendingInterruptsFlag 831 + * 0x40 - Priority_MessagePendingInterruptsFlag 832 + * 0x20 - Nonpriority_MessageCompletionInterruptsFlag 833 + * 0x10 - Priority_MessageCompletionInterruptsFlag 834 834 * 0x08 - IUCVControlInterruptsFlag 835 - * Output: NA 836 - * Return: Return code from CP IUCV call. 835 + * Output: NA 836 + * Return: Return code from CP IUCV call. 837 837 */ 838 838 int iucv_setmask (int SetMaskFlag); 839 839 840 - /* 841 - * Name: iucv_sever 842 - * Purpose: This function terminates an IUCV path. 843 - * Input: pathid - Path identification number. 844 - * user_data - 16-bytes of user data. 845 - * Output: NA 846 - * Return: Return code from CP IUCV call. 847 - * (-EINVAL) - Interal error, wild pointer. 840 + /* 841 + * Name: iucv_sever 842 + * Purpose: This function terminates an IUCV path. 843 + * Input: pathid - Path identification number. 844 + * user_data - 16-bytes of user data. 845 + * Output: NA 846 + * Return: Return code from CP IUCV call. 847 + * (-EINVAL) - Interal error, wild pointer. 848 848 */ 849 849 int iucv_sever (u16 pathid, uchar user_data[16]);
+173 -172
drivers/s390/net/lcs.c
··· 68 68 static void lcs_start_kernel_thread(struct lcs_card *card); 69 69 static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); 70 70 static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); 71 + static int lcs_recovery(void *ptr); 71 72 72 73 /** 73 74 * Debug Facility Stuff ··· 430 429 card->tx_buffer = NULL; 431 430 card->tx_emitted = 0; 432 431 433 - /* Initialize kernel thread task used for LGW commands. */ 434 - INIT_WORK(&card->kernel_thread_starter, 435 - (void *)lcs_start_kernel_thread,card); 436 - card->thread_start_mask = 0; 437 - card->thread_allowed_mask = 0; 438 - card->thread_running_mask = 0; 439 432 init_waitqueue_head(&card->wait_q); 440 433 spin_lock_init(&card->lock); 441 434 spin_lock_init(&card->ipm_lock); ··· 670 675 int index, rc; 671 676 672 677 LCS_DBF_TEXT(5, trace, "rdybuff"); 673 - BUG_ON(buffer->state != BUF_STATE_LOCKED && 674 - buffer->state != BUF_STATE_PROCESSED); 678 + if (buffer->state != BUF_STATE_LOCKED && 679 + buffer->state != BUF_STATE_PROCESSED) 680 + BUG(); 675 681 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 676 682 buffer->state = BUF_STATE_READY; 677 683 index = buffer - channel->iob; ··· 696 700 int index, prev, next; 697 701 698 702 LCS_DBF_TEXT(5, trace, "prcsbuff"); 699 - BUG_ON(buffer->state != BUF_STATE_READY); 703 + if (buffer->state != BUF_STATE_READY) 704 + BUG(); 700 705 buffer->state = BUF_STATE_PROCESSED; 701 706 index = buffer - channel->iob; 702 707 prev = (index - 1) & (LCS_NUM_BUFFS - 1); ··· 729 732 unsigned long flags; 730 733 731 734 LCS_DBF_TEXT(5, trace, "relbuff"); 732 - BUG_ON(buffer->state != BUF_STATE_LOCKED && 733 - buffer->state != BUF_STATE_PROCESSED); 735 + if (buffer->state != BUF_STATE_LOCKED && 736 + buffer->state != BUF_STATE_PROCESSED) 737 + BUG(); 734 738 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 735 739 buffer->state = BUF_STATE_EMPTY; 736 740 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); ··· 1145 1147 list_add_tail(&ipm->list, &card->ipm_list); 1146 1148 } 1147 1149 spin_unlock_irqrestore(&card->ipm_lock, flags); 1148 - if (card->state == DEV_STATE_UP) 1149 - netif_wake_queue(card->dev); 1150 1150 } 1151 1151 1152 1152 /** ··· 1227 1231 if (ipm != NULL) 1228 1232 continue; /* Address already in list. */ 1229 1233 ipm = (struct lcs_ipm_list *) 1230 - kmalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); 1234 + kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); 1231 1235 if (ipm == NULL) { 1232 1236 PRINT_INFO("Not enough memory to add " 1233 1237 "new multicast entry!\n"); 1234 1238 break; 1235 1239 } 1236 - memset(ipm, 0, sizeof(struct lcs_ipm_list)); 1237 1240 memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); 1238 1241 ipm->ipm.ip_addr = im4->multiaddr; 1239 1242 ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED; 1240 1243 spin_lock_irqsave(&card->ipm_lock, flags); 1244 + LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4); 1241 1245 list_add(&ipm->list, &card->ipm_list); 1242 1246 spin_unlock_irqrestore(&card->ipm_lock, flags); 1243 1247 } ··· 1265 1269 read_unlock(&in4_dev->mc_list_lock); 1266 1270 in_dev_put(in4_dev); 1267 1271 1272 + netif_carrier_off(card->dev); 1273 + netif_tx_disable(card->dev); 1274 + wait_event(card->write.wait_q, 1275 + (card->write.state != CH_STATE_RUNNING)); 1268 1276 lcs_fix_multicast_list(card); 1277 + if (card->state == DEV_STATE_UP) { 1278 + netif_carrier_on(card->dev); 1279 + netif_wake_queue(card->dev); 1280 + } 1269 1281 out: 1270 1282 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); 1271 1283 return 0; ··· 1290 1286 LCS_DBF_TEXT(4, trace, "setmulti"); 1291 1287 card = (struct lcs_card *) dev->priv; 1292 1288 1293 - if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) 1289 + if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) 1294 1290 schedule_work(&card->kernel_thread_starter); 1295 1291 } 1296 1292 ··· 1322 1318 return PTR_ERR(irb); 1323 1319 } 1324 1320 1321 + static int 1322 + lcs_get_problem(struct ccw_device *cdev, struct irb *irb) 1323 + { 1324 + int dstat, cstat; 1325 + char *sense; 1326 + 1327 + sense = (char *) irb->ecw; 1328 + cstat = irb->scsw.cstat; 1329 + dstat = irb->scsw.dstat; 1330 + 1331 + if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1332 + SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1333 + SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 1334 + LCS_DBF_TEXT(2, trace, "CGENCHK"); 1335 + return 1; 1336 + } 1337 + if (dstat & DEV_STAT_UNIT_CHECK) { 1338 + if (sense[LCS_SENSE_BYTE_1] & 1339 + LCS_SENSE_RESETTING_EVENT) { 1340 + LCS_DBF_TEXT(2, trace, "REVIND"); 1341 + return 1; 1342 + } 1343 + if (sense[LCS_SENSE_BYTE_0] & 1344 + LCS_SENSE_CMD_REJECT) { 1345 + LCS_DBF_TEXT(2, trace, "CMDREJ"); 1346 + return 0; 1347 + } 1348 + if ((!sense[LCS_SENSE_BYTE_0]) && 1349 + (!sense[LCS_SENSE_BYTE_1]) && 1350 + (!sense[LCS_SENSE_BYTE_2]) && 1351 + (!sense[LCS_SENSE_BYTE_3])) { 1352 + LCS_DBF_TEXT(2, trace, "ZEROSEN"); 1353 + return 0; 1354 + } 1355 + LCS_DBF_TEXT(2, trace, "DGENCHK"); 1356 + return 1; 1357 + } 1358 + return 0; 1359 + } 1360 + 1361 + void 1362 + lcs_schedule_recovery(struct lcs_card *card) 1363 + { 1364 + LCS_DBF_TEXT(2, trace, "startrec"); 1365 + if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD)) 1366 + schedule_work(&card->kernel_thread_starter); 1367 + } 1325 1368 1326 1369 /** 1327 1370 * IRQ Handler for LCS channels ··· 1378 1327 { 1379 1328 struct lcs_card *card; 1380 1329 struct lcs_channel *channel; 1381 - int index; 1330 + int rc, index; 1331 + int cstat, dstat; 1382 1332 1383 1333 if (lcs_check_irb_error(cdev, irb)) 1384 1334 return; ··· 1390 1338 else 1391 1339 channel = &card->write; 1392 1340 1341 + cstat = irb->scsw.cstat; 1342 + dstat = irb->scsw.dstat; 1393 1343 LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); 1394 1344 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat); 1395 1345 LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl); 1396 1346 1347 + /* Check for channel and device errors presented */ 1348 + rc = lcs_get_problem(cdev, irb); 1349 + if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { 1350 + PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n", 1351 + cdev->dev.bus_id, dstat, cstat); 1352 + if (rc) { 1353 + lcs_schedule_recovery(card); 1354 + wake_up(&card->wait_q); 1355 + return; 1356 + } 1357 + } 1397 1358 /* How far in the ccw chain have we processed? */ 1398 1359 if ((channel->state != CH_STATE_INIT) && 1399 1360 (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { 1400 - index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) 1361 + index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) 1401 1362 - channel->ccws; 1402 1363 if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || 1403 1364 (irb->scsw.cstat & SCHN_STAT_PCI)) ··· 1432 1367 else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED) 1433 1368 /* CCW execution stopped on a suspend bit. */ 1434 1369 channel->state = CH_STATE_SUSPENDED; 1435 - 1436 1370 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1437 1371 if (irb->scsw.cc != 0) { 1438 1372 ccw_device_halt(channel->ccwdev, (addr_t) channel); ··· 1440 1376 /* The channel has been stopped by halt_IO. */ 1441 1377 channel->state = CH_STATE_HALTED; 1442 1378 } 1443 - 1444 1379 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1445 1380 channel->state = CH_STATE_CLEARED; 1446 1381 } ··· 1515 1452 lcs_release_buffer(channel, buffer); 1516 1453 card = (struct lcs_card *) 1517 1454 ((char *) channel - offsetof(struct lcs_card, write)); 1518 - if (netif_queue_stopped(card->dev)) 1455 + if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev)) 1519 1456 netif_wake_queue(card->dev); 1520 1457 spin_lock(&card->lock); 1521 1458 card->tx_emitted--; ··· 1549 1486 card->stats.tx_dropped++; 1550 1487 card->stats.tx_errors++; 1551 1488 card->stats.tx_carrier_errors++; 1489 + return 0; 1490 + } 1491 + if (skb->protocol == htons(ETH_P_IPV6)) { 1492 + dev_kfree_skb(skb); 1552 1493 return 0; 1553 1494 } 1554 1495 netif_stop_queue(card->dev); ··· 1700 1633 } 1701 1634 1702 1635 /** 1703 - * reset card 1704 - */ 1705 - static int 1706 - lcs_resetcard(struct lcs_card *card) 1707 - { 1708 - int retries; 1709 - 1710 - LCS_DBF_TEXT(2, trace, "rescard"); 1711 - for (retries = 0; retries < 10; retries++) { 1712 - if (lcs_detect(card) == 0) { 1713 - netif_wake_queue(card->dev); 1714 - card->state = DEV_STATE_UP; 1715 - PRINT_INFO("LCS device %s successfully restarted!\n", 1716 - card->dev->name); 1717 - return 0; 1718 - } 1719 - msleep(3000); 1720 - } 1721 - PRINT_ERR("Error in Reseting LCS card!\n"); 1722 - return -EIO; 1723 - } 1724 - 1725 - 1726 - /** 1727 1636 * LCS Stop card 1728 1637 */ 1729 1638 static int ··· 1723 1680 } 1724 1681 1725 1682 /** 1726 - * LGW initiated commands 1727 - */ 1728 - static int 1729 - lcs_lgw_startlan_thread(void *data) 1730 - { 1731 - struct lcs_card *card; 1732 - 1733 - card = (struct lcs_card *) data; 1734 - daemonize("lgwstpln"); 1735 - 1736 - if (!lcs_do_run_thread(card, LCS_STARTLAN_THREAD)) 1737 - return 0; 1738 - LCS_DBF_TEXT(4, trace, "lgwstpln"); 1739 - if (card->dev) 1740 - netif_stop_queue(card->dev); 1741 - if (lcs_startlan(card) == 0) { 1742 - netif_wake_queue(card->dev); 1743 - card->state = DEV_STATE_UP; 1744 - PRINT_INFO("LCS Startlan for device %s succeeded!\n", 1745 - card->dev->name); 1746 - 1747 - } else 1748 - PRINT_ERR("LCS Startlan for device %s failed!\n", 1749 - card->dev->name); 1750 - lcs_clear_thread_running_bit(card, LCS_STARTLAN_THREAD); 1751 - return 0; 1752 - } 1753 - 1754 - /** 1755 - * Send startup command initiated by Lan Gateway 1756 - */ 1757 - static int 1758 - lcs_lgw_startup_thread(void *data) 1759 - { 1760 - int rc; 1761 - 1762 - struct lcs_card *card; 1763 - 1764 - card = (struct lcs_card *) data; 1765 - daemonize("lgwstaln"); 1766 - 1767 - if (!lcs_do_run_thread(card, LCS_STARTUP_THREAD)) 1768 - return 0; 1769 - LCS_DBF_TEXT(4, trace, "lgwstaln"); 1770 - if (card->dev) 1771 - netif_stop_queue(card->dev); 1772 - rc = lcs_send_startup(card, LCS_INITIATOR_LGW); 1773 - if (rc != 0) { 1774 - PRINT_ERR("Startup for LCS device %s initiated " \ 1775 - "by LGW failed!\nReseting card ...\n", 1776 - card->dev->name); 1777 - /* do a card reset */ 1778 - rc = lcs_resetcard(card); 1779 - if (rc == 0) 1780 - goto Done; 1781 - } 1782 - rc = lcs_startlan(card); 1783 - if (rc == 0) { 1784 - netif_wake_queue(card->dev); 1785 - card->state = DEV_STATE_UP; 1786 - } 1787 - Done: 1788 - if (rc == 0) 1789 - PRINT_INFO("LCS Startup for device %s succeeded!\n", 1790 - card->dev->name); 1791 - else 1792 - PRINT_ERR("LCS Startup for device %s failed!\n", 1793 - card->dev->name); 1794 - lcs_clear_thread_running_bit(card, LCS_STARTUP_THREAD); 1795 - return 0; 1796 - } 1797 - 1798 - 1799 - /** 1800 - * send stoplan command initiated by Lan Gateway 1801 - */ 1802 - static int 1803 - lcs_lgw_stoplan_thread(void *data) 1804 - { 1805 - struct lcs_card *card; 1806 - int rc; 1807 - 1808 - card = (struct lcs_card *) data; 1809 - daemonize("lgwstop"); 1810 - 1811 - if (!lcs_do_run_thread(card, LCS_STOPLAN_THREAD)) 1812 - return 0; 1813 - LCS_DBF_TEXT(4, trace, "lgwstop"); 1814 - if (card->dev) 1815 - netif_stop_queue(card->dev); 1816 - if (lcs_send_stoplan(card, LCS_INITIATOR_LGW) == 0) 1817 - PRINT_INFO("Stoplan for %s initiated by LGW succeeded!\n", 1818 - card->dev->name); 1819 - else 1820 - PRINT_ERR("Stoplan %s initiated by LGW failed!\n", 1821 - card->dev->name); 1822 - /*Try to reset the card, stop it on failure */ 1823 - rc = lcs_resetcard(card); 1824 - if (rc != 0) 1825 - rc = lcs_stopcard(card); 1826 - lcs_clear_thread_running_bit(card, LCS_STOPLAN_THREAD); 1827 - return rc; 1828 - } 1829 - 1830 - /** 1831 1683 * Kernel Thread helper functions for LGW initiated commands 1832 1684 */ 1833 1685 static void 1834 1686 lcs_start_kernel_thread(struct lcs_card *card) 1835 1687 { 1836 1688 LCS_DBF_TEXT(5, trace, "krnthrd"); 1837 - if (lcs_do_start_thread(card, LCS_STARTUP_THREAD)) 1838 - kernel_thread(lcs_lgw_startup_thread, (void *) card, SIGCHLD); 1839 - if (lcs_do_start_thread(card, LCS_STARTLAN_THREAD)) 1840 - kernel_thread(lcs_lgw_startlan_thread, (void *) card, SIGCHLD); 1841 - if (lcs_do_start_thread(card, LCS_STOPLAN_THREAD)) 1842 - kernel_thread(lcs_lgw_stoplan_thread, (void *) card, SIGCHLD); 1689 + if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD)) 1690 + kernel_thread(lcs_recovery, (void *) card, SIGCHLD); 1843 1691 #ifdef CONFIG_IP_MULTICAST 1844 1692 if (lcs_do_start_thread(card, LCS_SET_MC_THREAD)) 1845 - kernel_thread(lcs_register_mc_addresses, (void *) card, SIGCHLD); 1693 + kernel_thread(lcs_register_mc_addresses, 1694 + (void *) card, SIGCHLD); 1846 1695 #endif 1847 1696 } 1848 1697 ··· 1748 1813 if (cmd->initiator == LCS_INITIATOR_LGW) { 1749 1814 switch(cmd->cmd_code) { 1750 1815 case LCS_CMD_STARTUP: 1751 - if (!lcs_set_thread_start_bit(card, 1752 - LCS_STARTUP_THREAD)) 1753 - schedule_work(&card->kernel_thread_starter); 1754 - break; 1755 1816 case LCS_CMD_STARTLAN: 1756 - if (!lcs_set_thread_start_bit(card, 1757 - LCS_STARTLAN_THREAD)) 1758 - schedule_work(&card->kernel_thread_starter); 1817 + lcs_schedule_recovery(card); 1759 1818 break; 1760 1819 case LCS_CMD_STOPLAN: 1761 - if (!lcs_set_thread_start_bit(card, 1762 - LCS_STOPLAN_THREAD)) 1763 - schedule_work(&card->kernel_thread_starter); 1820 + PRINT_WARN("Stoplan for %s initiated by LGW.\n", 1821 + card->dev->name); 1822 + if (card->dev) 1823 + netif_carrier_off(card->dev); 1764 1824 break; 1765 1825 default: 1766 1826 PRINT_INFO("UNRECOGNIZED LGW COMMAND\n"); ··· 1871 1941 1872 1942 LCS_DBF_TEXT(2, trace, "stopdev"); 1873 1943 card = (struct lcs_card *) dev->priv; 1874 - netif_stop_queue(dev); 1944 + netif_carrier_off(dev); 1945 + netif_tx_disable(dev); 1875 1946 dev->flags &= ~IFF_UP; 1947 + wait_event(card->write.wait_q, 1948 + (card->write.state != CH_STATE_RUNNING)); 1876 1949 rc = lcs_stopcard(card); 1877 1950 if (rc) 1878 1951 PRINT_ERR("Try it again!\n "); ··· 1901 1968 1902 1969 } else { 1903 1970 dev->flags |= IFF_UP; 1971 + netif_carrier_on(dev); 1904 1972 netif_wake_queue(dev); 1905 1973 card->state = DEV_STATE_UP; 1906 1974 } ··· 1993 2059 1994 2060 DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); 1995 2061 2062 + static ssize_t 2063 + lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, 2064 + const char *buf, size_t count) 2065 + { 2066 + struct lcs_card *card = dev->driver_data; 2067 + char *tmp; 2068 + int i; 2069 + 2070 + if (!card) 2071 + return -EINVAL; 2072 + if (card->state != DEV_STATE_UP) 2073 + return -EPERM; 2074 + i = simple_strtoul(buf, &tmp, 16); 2075 + if (i == 1) 2076 + lcs_schedule_recovery(card); 2077 + return count; 2078 + } 2079 + 2080 + static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store); 2081 + 1996 2082 static struct attribute * lcs_attrs[] = { 1997 2083 &dev_attr_portno.attr, 1998 2084 &dev_attr_type.attr, 1999 2085 &dev_attr_lancmd_timeout.attr, 2086 + &dev_attr_recover.attr, 2000 2087 NULL, 2001 2088 }; 2002 2089 ··· 2054 2099 ccwgdev->dev.driver_data = card; 2055 2100 ccwgdev->cdev[0]->handler = lcs_irq; 2056 2101 ccwgdev->cdev[1]->handler = lcs_irq; 2102 + card->gdev = ccwgdev; 2103 + INIT_WORK(&card->kernel_thread_starter, 2104 + (void *) lcs_start_kernel_thread, card); 2105 + card->thread_start_mask = 0; 2106 + card->thread_allowed_mask = 0; 2107 + card->thread_running_mask = 0; 2057 2108 return 0; 2058 2109 } 2059 2110 ··· 2161 2200 if (recover_state == DEV_STATE_RECOVER) { 2162 2201 lcs_set_multicast_list(card->dev); 2163 2202 card->dev->flags |= IFF_UP; 2203 + netif_carrier_on(card->dev); 2164 2204 netif_wake_queue(card->dev); 2165 2205 card->state = DEV_STATE_UP; 2166 2206 } else { ··· 2191 2229 * lcs_shutdown_device, called when setting the group device offline. 2192 2230 */ 2193 2231 static int 2194 - lcs_shutdown_device(struct ccwgroup_device *ccwgdev) 2232 + __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode) 2195 2233 { 2196 2234 struct lcs_card *card; 2197 2235 enum lcs_dev_states recover_state; ··· 2201 2239 card = (struct lcs_card *)ccwgdev->dev.driver_data; 2202 2240 if (!card) 2203 2241 return -ENODEV; 2204 - lcs_set_allowed_threads(card, 0); 2205 - if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD)) 2206 - return -ERESTARTSYS; 2242 + if (recovery_mode == 0) { 2243 + lcs_set_allowed_threads(card, 0); 2244 + if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD)) 2245 + return -ERESTARTSYS; 2246 + } 2207 2247 LCS_DBF_HEX(3, setup, &card, sizeof(void*)); 2208 2248 recover_state = card->state; 2209 2249 ··· 2217 2253 } 2218 2254 if (ret) 2219 2255 return ret; 2256 + return 0; 2257 + } 2258 + 2259 + static int 2260 + lcs_shutdown_device(struct ccwgroup_device *ccwgdev) 2261 + { 2262 + return __lcs_shutdown_device(ccwgdev, 0); 2263 + } 2264 + 2265 + /** 2266 + * drive lcs recovery after startup and startlan initiated by Lan Gateway 2267 + */ 2268 + static int 2269 + lcs_recovery(void *ptr) 2270 + { 2271 + struct lcs_card *card; 2272 + struct ccwgroup_device *gdev; 2273 + int rc; 2274 + 2275 + card = (struct lcs_card *) ptr; 2276 + daemonize("lcs_recover"); 2277 + 2278 + LCS_DBF_TEXT(4, trace, "recover1"); 2279 + if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD)) 2280 + return 0; 2281 + LCS_DBF_TEXT(4, trace, "recover2"); 2282 + gdev = card->gdev; 2283 + PRINT_WARN("Recovery of device %s started...\n", gdev->dev.bus_id); 2284 + rc = __lcs_shutdown_device(gdev, 1); 2285 + rc = lcs_new_device(gdev); 2286 + if (!rc) 2287 + PRINT_INFO("Device %s successfully recovered!\n", 2288 + card->dev->name); 2289 + else 2290 + PRINT_INFO("Device %s could not be recovered!\n", 2291 + card->dev->name); 2292 + lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); 2220 2293 return 0; 2221 2294 } 2222 2295
+9 -5
drivers/s390/net/lcs.h
··· 73 73 /** 74 74 * LCS sense byte definitions 75 75 */ 76 + #define LCS_SENSE_BYTE_0 0 77 + #define LCS_SENSE_BYTE_1 1 78 + #define LCS_SENSE_BYTE_2 2 79 + #define LCS_SENSE_BYTE_3 3 76 80 #define LCS_SENSE_INTERFACE_DISCONNECT 0x01 77 81 #define LCS_SENSE_EQUIPMENT_CHECK 0x10 78 82 #define LCS_SENSE_BUS_OUT_CHECK 0x20 79 83 #define LCS_SENSE_INTERVENTION_REQUIRED 0x40 80 84 #define LCS_SENSE_CMD_REJECT 0x80 81 - #define LCS_SENSE_RESETTING_EVENT 0x0080 82 - #define LCS_SENSE_DEVICE_ONLINE 0x0020 85 + #define LCS_SENSE_RESETTING_EVENT 0x80 86 + #define LCS_SENSE_DEVICE_ONLINE 0x20 83 87 84 88 /** 85 89 * LCS packet type definitions ··· 156 152 157 153 enum lcs_threads { 158 154 LCS_SET_MC_THREAD = 1, 159 - LCS_STARTLAN_THREAD = 2, 160 - LCS_STOPLAN_THREAD = 4, 161 - LCS_STARTUP_THREAD = 8, 155 + LCS_RECOVERY_THREAD = 2, 162 156 }; 157 + 163 158 /** 164 159 * LCS struct declarations 165 160 */ ··· 289 286 struct net_device_stats stats; 290 287 unsigned short (*lan_type_trans)(struct sk_buff *skb, 291 288 struct net_device *dev); 289 + struct ccwgroup_device *gdev; 292 290 struct lcs_channel read; 293 291 struct lcs_channel write; 294 292 struct lcs_buffer *tx_buffer;
+18 -18
drivers/s390/net/netiucv.c
··· 30 30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 31 31 * 32 32 */ 33 - 33 + 34 34 #undef DEBUG 35 35 36 36 #include <linux/module.h> ··· 65 65 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); 66 66 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); 67 67 68 - 68 + 69 69 #define PRINTK_HEADER " iucv: " /* for debugging */ 70 70 71 71 static struct device_driver netiucv_driver = { ··· 202 202 *p = '\0'; 203 203 return tmp; 204 204 } 205 - 205 + 206 206 /** 207 207 * States of the interface statemachine. 208 208 */ ··· 244 244 "Connection up", 245 245 "Connection down", 246 246 }; 247 - 247 + 248 248 /** 249 249 * Events of the connection statemachine 250 250 */ ··· 364 364 "Connect error", 365 365 }; 366 366 367 - 367 + 368 368 /** 369 369 * Debug Facility Stuff 370 370 */ ··· 516 516 fsm_action_nop(fsm_instance *fi, int event, void *arg) 517 517 { 518 518 } 519 - 519 + 520 520 /** 521 521 * Actions of the connection statemachine 522 522 *****************************************************************************/ ··· 993 993 994 994 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); 995 995 996 - 996 + 997 997 /** 998 998 * Actions for interface - statemachine. 999 999 *****************************************************************************/ ··· 1182 1182 1183 1183 fsm_newstate(conn->fsm, CONN_STATE_TX); 1184 1184 conn->prof.send_stamp = xtime; 1185 - 1185 + 1186 1186 rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */, 1187 1187 0, nskb->data, nskb->len); 1188 1188 /* Shut up, gcc! nskb is always below 2G. */ ··· 1220 1220 1221 1221 return rc; 1222 1222 } 1223 - 1223 + 1224 1224 /** 1225 1225 * Interface API for upper network layers 1226 1226 *****************************************************************************/ ··· 1291 1291 1292 1292 /** 1293 1293 * If connection is not running, try to restart it 1294 - * and throw away packet. 1294 + * and throw away packet. 1295 1295 */ 1296 1296 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { 1297 1297 fsm_event(privptr->fsm, DEV_EVENT_START, dev); ··· 1538 1538 maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1539 1539 { 1540 1540 struct netiucv_priv *priv = dev->driver_data; 1541 - 1541 + 1542 1542 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1543 1543 priv->conn->prof.maxcqueue = 0; 1544 1544 return count; ··· 1559 1559 sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1560 1560 { 1561 1561 struct netiucv_priv *priv = dev->driver_data; 1562 - 1562 + 1563 1563 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1564 1564 priv->conn->prof.doios_single = 0; 1565 1565 return count; ··· 1580 1580 mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1581 1581 { 1582 1582 struct netiucv_priv *priv = dev->driver_data; 1583 - 1583 + 1584 1584 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1585 1585 priv->conn->prof.doios_multi = 0; 1586 1586 return count; ··· 1601 1601 txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1602 1602 { 1603 1603 struct netiucv_priv *priv = dev->driver_data; 1604 - 1604 + 1605 1605 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1606 1606 priv->conn->prof.txlen = 0; 1607 1607 return count; ··· 1622 1622 txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1623 1623 { 1624 1624 struct netiucv_priv *priv = dev->driver_data; 1625 - 1625 + 1626 1626 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1627 1627 priv->conn->prof.tx_time = 0; 1628 1628 return count; ··· 2000 2000 } 2001 2001 2002 2002 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); 2003 - 2003 + 2004 2004 return count; 2005 2005 2006 2006 out_free_ndev: ··· 2099 2099 netiucv_init(void) 2100 2100 { 2101 2101 int ret; 2102 - 2102 + 2103 2103 ret = iucv_register_dbf_views(); 2104 2104 if (ret) { 2105 2105 PRINT_WARN("netiucv_init failed, " ··· 2128 2128 } 2129 2129 return ret; 2130 2130 } 2131 - 2131 + 2132 2132 module_init(netiucv_init); 2133 2133 module_exit(netiucv_exit); 2134 2134 MODULE_LICENSE("GPL");
+9 -9
drivers/s390/net/qeth.h
··· 376 376 __u8 reserved3[18]; 377 377 __u32 ccid; 378 378 } __attribute__ ((packed)); 379 - 379 + 380 380 struct qeth_hdr { 381 381 union { 382 382 struct qeth_hdr_layer2 l2; ··· 825 825 int use_hard_stop; 826 826 int (*orig_hard_header)(struct sk_buff *,struct net_device *, 827 827 unsigned short,void *,void *,unsigned); 828 - struct qeth_osn_info osn_info; 828 + struct qeth_osn_info osn_info; 829 829 }; 830 830 831 831 struct qeth_card_list_struct { ··· 944 944 return 0; 945 945 switch (card->info.type) { 946 946 case QETH_CARD_TYPE_IQD: 947 - case QETH_CARD_TYPE_OSN: 947 + case QETH_CARD_TYPE_OSN: 948 948 return IFF_NOARP; 949 949 #ifdef CONFIG_QETH_IPV6 950 950 default: ··· 981 981 qeth_get_max_mtu_for_card(int cardtype) 982 982 { 983 983 switch (cardtype) { 984 - 984 + 985 985 case QETH_CARD_TYPE_UNKNOWN: 986 986 case QETH_CARD_TYPE_OSAE: 987 987 case QETH_CARD_TYPE_OSN: ··· 1097 1097 int count = 0, rc = 0; 1098 1098 int in[4]; 1099 1099 1100 - rc = sscanf(buf, "%d.%d.%d.%d%n", 1100 + rc = sscanf(buf, "%d.%d.%d.%d%n", 1101 1101 &in[0], &in[1], &in[2], &in[3], &count); 1102 - if (rc != 4 || count) 1102 + if (rc != 4 || count<=0) 1103 1103 return -EINVAL; 1104 1104 for (count = 0; count < 4; count++) { 1105 1105 if (in[count] > 255) ··· 1131 1131 1132 1132 cnt = out = found = save_cnt = num2 = 0; 1133 1133 end = start = (char *) buf; 1134 - in = (__u16 *) addr; 1134 + in = (__u16 *) addr; 1135 1135 memset(in, 0, 16); 1136 1136 while (end) { 1137 1137 end = strchr(end,':'); ··· 1139 1139 end = (char *)buf + (strlen(buf)); 1140 1140 out = 1; 1141 1141 } 1142 - if ((end - start)) { 1142 + if ((end - start)) { 1143 1143 memset(num, 0, 5); 1144 1144 memcpy(num, start, end - start); 1145 1145 if (!qeth_isxdigit(num)) ··· 1241 1241 1242 1242 extern void 1243 1243 qeth_osn_deregister(struct net_device *); 1244 - 1244 + 1245 1245 #endif /* __QETH_H__ */
+9 -9
drivers/s390/net/qeth_eddp.c
··· 81 81 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) 82 82 { 83 83 struct qeth_eddp_context_reference *ref; 84 - 84 + 85 85 QETH_DBF_TEXT(trace, 6, "eddprctx"); 86 86 while (!list_empty(&buf->ctx_list)){ 87 87 ref = list_entry(buf->ctx_list.next, ··· 135 135 "buffer!\n"); 136 136 goto out; 137 137 } 138 - } 138 + } 139 139 /* check if the whole next skb fits into current buffer */ 140 140 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) - 141 141 buf->next_element_to_fill) ··· 148 148 * and increment ctx's refcnt */ 149 149 must_refcnt = 1; 150 150 continue; 151 - } 151 + } 152 152 if (must_refcnt){ 153 153 must_refcnt = 0; 154 154 if (qeth_eddp_buf_ref_context(buf, ctx)){ ··· 266 266 int left_in_frag; 267 267 int copy_len; 268 268 u8 *src; 269 - 269 + 270 270 QETH_DBF_TEXT(trace, 5, "eddpcdtc"); 271 271 if (skb_shinfo(eddp->skb)->nr_frags == 0) { 272 272 memcpy(dst, eddp->skb->data + eddp->skb_offset, len); ··· 408 408 struct tcphdr *tcph; 409 409 int data_len; 410 410 u32 hcsum; 411 - 411 + 412 412 QETH_DBF_TEXT(trace, 5, "eddpftcp"); 413 413 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; 414 414 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { ··· 465 465 eddp->th.tcp.h.seq += data_len; 466 466 } 467 467 } 468 - 468 + 469 469 static inline int 470 470 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 471 471 struct sk_buff *skb, struct qeth_hdr *qhdr) 472 472 { 473 473 struct qeth_eddp_data *eddp = NULL; 474 - 474 + 475 475 QETH_DBF_TEXT(trace, 5, "eddpficx"); 476 476 /* create our segmentation headers and copy original headers */ 477 477 if (skb->protocol == ETH_P_IP) ··· 512 512 int hdr_len) 513 513 { 514 514 int skbs_per_page; 515 - 515 + 516 516 QETH_DBF_TEXT(trace, 5, "eddpcanp"); 517 517 /* can we put multiple skbs in one page? */ 518 518 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); ··· 588 588 struct qeth_hdr *qhdr) 589 589 { 590 590 struct qeth_eddp_context *ctx = NULL; 591 - 591 + 592 592 QETH_DBF_TEXT(trace, 5, "creddpct"); 593 593 if (skb->protocol == ETH_P_IP) 594 594 ctx = qeth_eddp_create_context_generic(card, skb,
+1 -1
drivers/s390/net/qeth_fs.h
··· 42 42 43 43 extern void 44 44 qeth_remove_device_attributes_osn(struct device *dev); 45 - 45 + 46 46 extern int 47 47 qeth_create_driver_attributes(void); 48 48
+55 -52
drivers/s390/net/qeth_main.c
··· 513 513 514 514 QETH_DBF_TEXT(setup, 3, "setoffl"); 515 515 QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); 516 - 516 + 517 517 if (card->dev && netif_carrier_ok(card->dev)) 518 518 netif_carrier_off(card->dev); 519 519 recover_flag = card->state; ··· 604 604 list_for_each_entry(addr, &card->ip_list, entry) { 605 605 if (card->options.layer2) { 606 606 if ((addr->type == todo->type) && 607 - (memcmp(&addr->mac, &todo->mac, 607 + (memcmp(&addr->mac, &todo->mac, 608 608 OSA_ADDR_LEN) == 0)) { 609 609 found = 1; 610 610 break; 611 611 } 612 612 continue; 613 - } 613 + } 614 614 if ((addr->proto == QETH_PROT_IPV4) && 615 615 (todo->proto == QETH_PROT_IPV4) && 616 616 (addr->type == todo->type) && ··· 694 694 if (card->options.layer2) { 695 695 if ((tmp->type == addr->type) && 696 696 (tmp->is_multicast == addr->is_multicast) && 697 - (memcmp(&tmp->mac, &addr->mac, 697 + (memcmp(&tmp->mac, &addr->mac, 698 698 OSA_ADDR_LEN) == 0)) { 699 699 found = 1; 700 700 break; 701 701 } 702 702 continue; 703 - } 703 + } 704 704 if ((tmp->proto == QETH_PROT_IPV4) && 705 705 (addr->proto == QETH_PROT_IPV4) && 706 706 (tmp->type == addr->type) && ··· 1173 1173 "due to hardware limitations!\n"); 1174 1174 card->qdio.no_out_queues = 1; 1175 1175 card->qdio.default_out_queue = 0; 1176 - } 1176 + } 1177 1177 return 0; 1178 1178 } 1179 1179 i++; ··· 1198 1198 return -ENODEV; 1199 1199 1200 1200 QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id); 1201 - 1201 + 1202 1202 card = qeth_alloc_card(); 1203 1203 if (!card) { 1204 1204 put_device(dev); ··· 1220 1220 put_device(dev); 1221 1221 qeth_free_card(card); 1222 1222 return rc; 1223 - } 1223 + } 1224 1224 if ((rc = qeth_setup_card(card))){ 1225 1225 QETH_DBF_TEXT_(setup, 2, "2err%d", rc); 1226 1226 put_device(dev); ··· 1843 1843 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 1844 1844 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); 1845 1845 } 1846 - 1846 + 1847 1847 static int 1848 1848 qeth_send_control_data(struct qeth_card *card, int len, 1849 1849 struct qeth_cmd_buffer *iob, ··· 1937 1937 wake_up(&card->wait_q); 1938 1938 } 1939 1939 return rc; 1940 - } 1940 + } 1941 1941 1942 1942 static inline void 1943 1943 qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, ··· 1966 1966 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); 1967 1967 return qeth_osn_send_control_data(card, s1, iob); 1968 1968 } 1969 - 1969 + 1970 1970 static int 1971 1971 qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 1972 1972 int (*reply_cb) ··· 2579 2579 skb->dev = card->dev; 2580 2580 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) 2581 2581 vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); 2582 - else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) 2582 + else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) 2583 2583 qeth_rebuild_skb(card, skb, hdr); 2584 2584 else { /*in case of OSN*/ 2585 2585 skb_push(skb, sizeof(struct qeth_hdr)); ··· 2763 2763 index = i % QDIO_MAX_BUFFERS_PER_Q; 2764 2764 buffer = &card->qdio.in_q->bufs[index]; 2765 2765 if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) && 2766 - qeth_check_qdio_errors(buffer->buffer, 2766 + qeth_check_qdio_errors(buffer->buffer, 2767 2767 qdio_err, siga_err,"qinerr"))) 2768 2768 qeth_process_inbound_buffer(card, buffer, index); 2769 2769 /* clear buffer and give back to hardware */ ··· 3187 3187 if (card->qdio.state == QETH_QDIO_ALLOCATED) 3188 3188 return 0; 3189 3189 3190 - card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), 3190 + card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), 3191 3191 GFP_KERNEL|GFP_DMA); 3192 3192 if (!card->qdio.in_q) 3193 3193 return - ENOMEM; ··· 3476 3476 rc3 = qeth_halt_channel(&card->data); 3477 3477 if (rc1) 3478 3478 return rc1; 3479 - if (rc2) 3479 + if (rc2) 3480 3480 return rc2; 3481 3481 return rc3; 3482 3482 } ··· 3491 3491 rc3 = qeth_clear_channel(&card->data); 3492 3492 if (rc1) 3493 3493 return rc1; 3494 - if (rc2) 3494 + if (rc2) 3495 3495 return rc2; 3496 3496 return rc3; 3497 3497 } ··· 3798 3798 QETH_DBF_TEXT(trace,4,"nomacadr"); 3799 3799 return -EPERM; 3800 3800 } 3801 - card->dev->flags |= IFF_UP; 3802 - netif_start_queue(dev); 3803 3801 card->data.state = CH_STATE_UP; 3804 3802 card->state = CARD_STATE_UP; 3803 + card->dev->flags |= IFF_UP; 3804 + netif_start_queue(dev); 3805 3805 3806 3806 if (!card->lan_online && netif_carrier_ok(dev)) 3807 3807 netif_carrier_off(dev); ··· 3817 3817 3818 3818 card = (struct qeth_card *) dev->priv; 3819 3819 3820 - netif_stop_queue(dev); 3820 + netif_tx_disable(dev); 3821 3821 card->dev->flags &= ~IFF_UP; 3822 3822 if (card->state == CARD_STATE_UP) 3823 3823 card->state = CARD_STATE_SOFTSETUP; ··· 3958 3958 #endif 3959 3959 *hdr = (struct qeth_hdr *) 3960 3960 qeth_push_skb(card, skb, sizeof(struct qeth_hdr)); 3961 - if (hdr == NULL) 3961 + if (*hdr == NULL) 3962 3962 return -EINVAL; 3963 3963 return 0; 3964 3964 } ··· 4098 4098 } 4099 4099 } else { /* passthrough */ 4100 4100 if((skb->dev->type == ARPHRD_IEEE802_TR) && 4101 - !memcmp(skb->data + sizeof(struct qeth_hdr) + 4101 + !memcmp(skb->data + sizeof(struct qeth_hdr) + 4102 4102 sizeof(__u16), skb->dev->broadcast, 6)) { 4103 4103 hdr->hdr.l3.flags = QETH_CAST_BROADCAST | 4104 4104 QETH_HDR_PASSTHRU; ··· 4385 4385 } 4386 4386 4387 4387 static inline int 4388 - qeth_get_elements_no(struct qeth_card *card, void *hdr, 4388 + qeth_get_elements_no(struct qeth_card *card, void *hdr, 4389 4389 struct sk_buff *skb, int elems) 4390 4390 { 4391 4391 int elements_needed = 0; ··· 4416 4416 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 4417 4417 struct qeth_eddp_context *ctx = NULL; 4418 4418 int tx_bytes = skb->len; 4419 + unsigned short nr_frags = skb_shinfo(skb)->nr_frags; 4420 + unsigned short tso_size = skb_shinfo(skb)->tso_size; 4419 4421 int rc; 4420 4422 4421 4423 QETH_DBF_TEXT(trace, 6, "sendpkt"); ··· 4443 4441 return 0; 4444 4442 } 4445 4443 cast_type = qeth_get_cast_type(card, skb); 4446 - if ((cast_type == RTN_BROADCAST) && 4444 + if ((cast_type == RTN_BROADCAST) && 4447 4445 (card->info.broadcast_capable == 0)){ 4448 4446 card->stats.tx_dropped++; 4449 4447 card->stats.tx_errors++; ··· 4465 4463 card->stats.tx_errors++; 4466 4464 dev_kfree_skb_any(skb); 4467 4465 return NETDEV_TX_OK; 4468 - } 4466 + } 4469 4467 elements_needed++; 4470 4468 } else { 4471 4469 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))) { ··· 4500 4498 card->stats.tx_packets++; 4501 4499 card->stats.tx_bytes += tx_bytes; 4502 4500 #ifdef CONFIG_QETH_PERF_STATS 4503 - if (skb_shinfo(skb)->tso_size && 4501 + if (tso_size && 4504 4502 !(large_send == QETH_LARGE_SEND_NO)) { 4505 - card->perf_stats.large_send_bytes += skb->len; 4503 + card->perf_stats.large_send_bytes += tx_bytes; 4506 4504 card->perf_stats.large_send_cnt++; 4507 4505 } 4508 - if (skb_shinfo(skb)->nr_frags > 0){ 4506 + if (nr_frags > 0){ 4509 4507 card->perf_stats.sg_skbs_sent++; 4510 4508 /* nr_frags + skb->data */ 4511 4509 card->perf_stats.sg_frags_sent += 4512 - skb_shinfo(skb)->nr_frags + 1; 4510 + nr_frags + 1; 4513 4511 } 4514 4512 #endif /* CONFIG_QETH_PERF_STATS */ 4515 4513 } ··· 5375 5373 cmd = (struct qeth_ipa_cmd *) data; 5376 5374 if (cmd->hdr.return_code) { 5377 5375 PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. " 5378 - "Continuing\n",cmd->data.setdelvlan.vlan_id, 5376 + "Continuing\n",cmd->data.setdelvlan.vlan_id, 5379 5377 QETH_CARD_IFNAME(card), cmd->hdr.return_code); 5380 5378 QETH_DBF_TEXT_(trace, 2, "L2VL%4x", cmd->hdr.command); 5381 5379 QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card)); ··· 5395 5393 iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); 5396 5394 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); 5397 5395 cmd->data.setdelvlan.vlan_id = i; 5398 - return qeth_send_ipa_cmd(card, iob, 5396 + return qeth_send_ipa_cmd(card, iob, 5399 5397 qeth_layer2_send_setdelvlan_cb, NULL); 5400 5398 } 5401 5399 ··· 5459 5457 * Examine hardware response to SET_PROMISC_MODE 5460 5458 */ 5461 5459 static int 5462 - qeth_setadp_promisc_mode_cb(struct qeth_card *card, 5460 + qeth_setadp_promisc_mode_cb(struct qeth_card *card, 5463 5461 struct qeth_reply *reply, 5464 5462 unsigned long data) 5465 5463 { ··· 5470 5468 5471 5469 cmd = (struct qeth_ipa_cmd *) data; 5472 5470 setparms = &(cmd->data.setadapterparms); 5473 - 5471 + 5474 5472 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); 5475 - if (cmd->hdr.return_code) { 5476 - QETH_DBF_TEXT_(trace,4,"prmrc%2.2x",cmd->hdr.return_code); 5473 + if (cmd->hdr.return_code) { 5474 + QETH_DBF_TEXT_(trace,4,"prmrc%2.2x",cmd->hdr.return_code); 5477 5475 setparms->data.mode = SET_PROMISC_MODE_OFF; 5478 5476 } 5479 5477 card->info.promisc_mode = setparms->data.mode; ··· 5519 5517 5520 5518 if (card->info.type == QETH_CARD_TYPE_OSN) 5521 5519 return ; 5522 - 5520 + 5523 5521 QETH_DBF_TEXT(trace, 3, "setmulti"); 5524 5522 qeth_delete_mc_addresses(card); 5525 5523 if (card->options.layer2) { ··· 5577 5575 struct qeth_cmd_buffer *iob; 5578 5576 struct qeth_card *card; 5579 5577 int rc; 5580 - 5578 + 5581 5579 QETH_DBF_TEXT(trace, 2, "osnsdmc"); 5582 5580 if (!dev) 5583 5581 return -ENODEV; ··· 5656 5654 card->osn_info.data_cb = NULL; 5657 5655 return; 5658 5656 } 5659 - 5657 + 5660 5658 static void 5661 5659 qeth_delete_mc_addresses(struct qeth_card *card) 5662 5660 { ··· 5820 5818 struct inet6_dev *in6_dev; 5821 5819 5822 5820 QETH_DBF_TEXT(trace,4,"chkmcv6"); 5823 - if (!qeth_is_supported(card, IPA_IPV6)) 5821 + if (!qeth_is_supported(card, IPA_IPV6)) 5824 5822 return ; 5825 5823 in6_dev = in6_dev_get(card->dev); 5826 5824 if (in6_dev == NULL) ··· 6361 6359 dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid; 6362 6360 dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid; 6363 6361 #endif 6364 - dev->hard_header = card->orig_hard_header; 6365 6362 if (qeth_get_netdev_flags(card) & IFF_NOARP) { 6366 6363 dev->rebuild_header = NULL; 6367 6364 dev->hard_header = NULL; 6368 - if (card->options.fake_ll) 6369 - dev->hard_header = qeth_fake_header; 6370 6365 dev->header_cache_update = NULL; 6371 6366 dev->hard_header_cache = NULL; 6372 6367 } ··· 6372 6373 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 6373 6374 card->dev->dev_id = card->info.unique_id & 0xffff; 6374 6375 #endif 6376 + if (card->options.fake_ll && 6377 + (qeth_get_netdev_flags(card) & IFF_NOARP)) 6378 + dev->hard_header = qeth_fake_header; 6375 6379 dev->hard_header_parse = NULL; 6376 6380 dev->set_mac_address = qeth_layer2_set_mac_address; 6377 6381 dev->flags |= qeth_get_netdev_flags(card); ··· 6479 6477 /*network device will be recovered*/ 6480 6478 if (card->dev) { 6481 6479 card->dev->hard_header = card->orig_hard_header; 6480 + if (card->options.fake_ll && 6481 + (qeth_get_netdev_flags(card) & IFF_NOARP)) 6482 + card->dev->hard_header = qeth_fake_header; 6482 6483 return 0; 6483 6484 } 6484 6485 /* at first set_online allocate netdev */ ··· 6589 6584 6590 6585 cmd = (struct qeth_ipa_cmd *) data; 6591 6586 if (!card->options.layer2 || card->info.guestlan || 6592 - !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { 6587 + !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { 6593 6588 memcpy(card->dev->dev_addr, 6594 6589 &cmd->data.setadapterparms.data.change_addr.addr, 6595 6590 OSA_ADDR_LEN); ··· 7036 7031 7037 7032 QETH_DBF_TEXT(trace,3,"softipv6"); 7038 7033 7039 - netif_stop_queue(card->dev); 7040 7034 rc = qeth_send_startlan(card, QETH_PROT_IPV6); 7041 7035 if (rc) { 7042 7036 PRINT_ERR("IPv6 startlan failed on %s\n", 7043 7037 QETH_CARD_IFNAME(card)); 7044 7038 return rc; 7045 7039 } 7046 - netif_wake_queue(card->dev); 7047 7040 rc = qeth_query_ipassists(card,QETH_PROT_IPV6); 7048 7041 if (rc) { 7049 7042 PRINT_ERR("IPv6 query ipassist failed on %s\n", ··· 7355 7352 card->options.large_send = type; 7356 7353 return 0; 7357 7354 } 7358 - netif_stop_queue(card->dev); 7355 + if (card->state == CARD_STATE_UP) 7356 + netif_tx_disable(card->dev); 7359 7357 card->options.large_send = type; 7360 7358 switch (card->options.large_send) { 7361 7359 case QETH_LARGE_SEND_EDDP: ··· 7378 7374 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); 7379 7375 break; 7380 7376 } 7381 - netif_wake_queue(card->dev); 7377 + if (card->state == CARD_STATE_UP) 7378 + netif_wake_queue(card->dev); 7382 7379 return rc; 7383 7380 } 7384 7381 ··· 7432 7427 if ((rc = qeth_setrouting_v6(card))) 7433 7428 QETH_DBF_TEXT_(setup, 2, "5err%d", rc); 7434 7429 out: 7435 - netif_stop_queue(card->dev); 7430 + netif_tx_disable(card->dev); 7436 7431 return 0; 7437 7432 } 7438 7433 ··· 7572 7567 if (card->read.state == CH_STATE_UP && 7573 7568 card->write.state == CH_STATE_UP && 7574 7569 (card->state == CARD_STATE_UP)) { 7575 - if (recovery_mode && 7570 + if (recovery_mode && 7576 7571 card->info.type != QETH_CARD_TYPE_OSN) { 7577 7572 qeth_stop(card->dev); 7578 7573 } else { ··· 7741 7736 qeth_register_netdev(struct qeth_card *card) 7742 7737 { 7743 7738 QETH_DBF_TEXT(setup, 3, "regnetd"); 7744 - if (card->dev->reg_state != NETREG_UNINITIALIZED) { 7745 - qeth_netdev_init(card->dev); 7739 + if (card->dev->reg_state != NETREG_UNINITIALIZED) 7746 7740 return 0; 7747 - } 7748 7741 /* sysfs magic */ 7749 7742 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 7750 7743 return register_netdev(card->dev); ··· 7753 7750 { 7754 7751 QETH_DBF_TEXT(setup ,2, "startag"); 7755 7752 7756 - if (recovery_mode && 7753 + if (recovery_mode && 7757 7754 card->info.type != QETH_CARD_TYPE_OSN) { 7758 7755 qeth_open(card->dev); 7759 7756 } else {
+2 -2
drivers/s390/net/qeth_mpc.h
··· 445 445 /* Helper functions */ 446 446 #define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \ 447 447 (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY)) 448 - 448 + 449 449 /*****************************************************************************/ 450 450 /* END OF IP Assist related definitions */ 451 451 /*****************************************************************************/ ··· 490 490 /* Layer 2 defintions */ 491 491 #define QETH_PROT_LAYER2 0x08 492 492 #define QETH_PROT_TCPIP 0x03 493 - #define QETH_PROT_OSN2 0x0a 493 + #define QETH_PROT_OSN2 0x0a 494 494 #define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50) 495 495 #define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19) 496 496
+4 -4
drivers/s390/net/qeth_proc.c
··· 36 36 { 37 37 struct device *dev = NULL; 38 38 loff_t nr = 0; 39 - 39 + 40 40 down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); 41 41 if (*offset == 0) 42 42 return SEQ_START_TOKEN; ··· 60 60 qeth_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset) 61 61 { 62 62 struct device *prev, *next; 63 - 64 - if (it == SEQ_START_TOKEN) 63 + 64 + if (it == SEQ_START_TOKEN) 65 65 prev = NULL; 66 66 else 67 67 prev = (struct device *) it; ··· 180 180 struct device *device; 181 181 struct qeth_card *card; 182 182 183 - 183 + 184 184 if (it == SEQ_START_TOKEN) 185 185 return 0; 186 186
+3 -3
drivers/s390/net/qeth_sys.c
··· 785 785 } 786 786 if (card->options.large_send == type) 787 787 return count; 788 - if ((rc = qeth_set_large_send(card, type))) 788 + if ((rc = qeth_set_large_send(card, type))) 789 789 return rc; 790 790 return count; 791 791 } ··· 1682 1682 if (card->info.type == QETH_CARD_TYPE_OSN) 1683 1683 return sysfs_create_group(&dev->kobj, 1684 1684 &qeth_osn_device_attr_group); 1685 - 1685 + 1686 1686 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group))) 1687 1687 return ret; 1688 1688 if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){ ··· 1713 1713 if (card->info.type == QETH_CARD_TYPE_OSN) 1714 1714 return sysfs_remove_group(&dev->kobj, 1715 1715 &qeth_osn_device_attr_group); 1716 - 1716 + 1717 1717 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); 1718 1718 sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); 1719 1719 sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
+2 -2
drivers/s390/net/qeth_tso.h
··· 117 117 int fragno; 118 118 unsigned long addr; 119 119 int element, cnt, dlen; 120 - 120 + 121 121 fragno = skb_shinfo(skb)->nr_frags; 122 122 element = *next_element_to_fill; 123 123 dlen = 0; 124 - 124 + 125 125 if (is_tso) 126 126 buffer->element[element].flags = 127 127 SBAL_FLAGS_MIDDLE_FRAG;
+1
drivers/scsi/libata-core.c
··· 4297 4297 int ata_device_resume(struct ata_port *ap, struct ata_device *dev) 4298 4298 { 4299 4299 if (ap->flags & ATA_FLAG_SUSPENDED) { 4300 + ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000); 4300 4301 ap->flags &= ~ATA_FLAG_SUSPENDED; 4301 4302 ata_set_mode(ap); 4302 4303 }
+7
drivers/scsi/ppa.c
··· 982 982 return -ENODEV; 983 983 } 984 984 985 + static int ppa_adjust_queue(struct scsi_device *device) 986 + { 987 + blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); 988 + return 0; 989 + } 990 + 985 991 static struct scsi_host_template ppa_template = { 986 992 .module = THIS_MODULE, 987 993 .proc_name = "ppa", ··· 1003 997 .cmd_per_lun = 1, 1004 998 .use_clustering = ENABLE_CLUSTERING, 1005 999 .can_queue = 1, 1000 + .slave_alloc = ppa_adjust_queue, 1006 1001 }; 1007 1002 1008 1003 /***************************************************************************
+3 -3
drivers/scsi/sata_sil24.c
··· 454 454 */ 455 455 msleep(10); 456 456 457 - prb->ctrl = PRB_CTRL_SRST; 457 + prb->ctrl = cpu_to_le16(PRB_CTRL_SRST); 458 458 prb->fis[1] = 0; /* no PM yet */ 459 459 460 460 writel((u32)paddr, port + PORT_CMD_ACTIVATE); ··· 551 551 552 552 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 553 553 if (qc->tf.flags & ATA_TFLAG_WRITE) 554 - prb->ctrl = PRB_CTRL_PACKET_WRITE; 554 + prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_WRITE); 555 555 else 556 - prb->ctrl = PRB_CTRL_PACKET_READ; 556 + prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_READ); 557 557 } else 558 558 prb->ctrl = 0; 559 559
+1
drivers/scsi/scsi_devinfo.c
··· 165 165 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 166 166 {"HP", "C1557A", NULL, BLIST_FORCELUN}, 167 167 {"HP", "C3323-300", "4269", BLIST_NOTQ}, 168 + {"HP", "C5713A", NULL, BLIST_NOREPORTLUN}, 168 169 {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, 169 170 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 170 171 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
+1 -1
drivers/scsi/scsi_lib.c
··· 367 367 int nsegs, unsigned bufflen, gfp_t gfp) 368 368 { 369 369 struct request_queue *q = rq->q; 370 - int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT; 370 + int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 371 371 unsigned int data_len = 0, len, bytes, off; 372 372 struct page *page; 373 373 struct bio *bio = NULL;
+2 -2
drivers/scsi/scsi_transport_sas.c
··· 955 955 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 956 956 struct sas_phy *parent = dev_to_phy(rphy->dev.parent); 957 957 958 - if (rphy->scsi_target_id == -1) 958 + if (rphy->identify.device_type != SAS_END_DEVICE || 959 + rphy->scsi_target_id == -1) 959 960 continue; 960 961 961 962 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && ··· 978 977 #define SETUP_TEMPLATE(attrb, field, perm, test) \ 979 978 i->private_##attrb[count] = class_device_attr_##field; \ 980 979 i->private_##attrb[count].attr.mode = perm; \ 981 - i->private_##attrb[count].store = NULL; \ 982 980 i->attrb[count] = &i->private_##attrb[count]; \ 983 981 if (test) \ 984 982 count++
+5 -3
drivers/serial/cpm_uart/cpm_uart_core.c
··· 1164 1164 struct fs_uart_platform_info *pdata; 1165 1165 struct platform_device* pdev = early_uart_get_pdev(co->index); 1166 1166 1167 - port = 1168 - (struct uart_port *)&cpm_uart_ports[cpm_uart_port_map[co->index]]; 1169 - pinfo = (struct uart_cpm_port *)port; 1170 1167 if (!pdev) { 1171 1168 pr_info("cpm_uart: console: compat mode\n"); 1172 1169 /* compatibility - will be cleaned up */ 1173 1170 cpm_uart_init_portdesc(); 1171 + } 1174 1172 1173 + port = 1174 + (struct uart_port *)&cpm_uart_ports[cpm_uart_port_map[co->index]]; 1175 + pinfo = (struct uart_cpm_port *)port; 1176 + if (!pdev) { 1175 1177 if (pinfo->set_lineif) 1176 1178 pinfo->set_lineif(pinfo); 1177 1179 } else {
+1 -1
drivers/serial/cpm_uart/cpm_uart_cpm2.c
··· 213 213 L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); 214 214 if (is_con) { 215 215 mem_addr = alloc_bootmem(memsz); 216 - dma_addr = mem_addr; 216 + dma_addr = virt_to_bus(mem_addr); 217 217 } 218 218 else 219 219 mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr,
+2 -2
drivers/spi/spi_s3c24xx.c
··· 405 405 406 406 static int s3c24xx_spi_suspend(struct platform_device *pdev, pm_message_t msg) 407 407 { 408 - struct s3c24xx_spi *hw = platform_get_drvdata(dev); 408 + struct s3c24xx_spi *hw = platform_get_drvdata(pdev); 409 409 410 410 clk_disable(hw->clk); 411 411 return 0; ··· 413 413 414 414 static int s3c24xx_spi_resume(struct platform_device *pdev) 415 415 { 416 - struct s3c24xx_spi *hw = platform_get_drvdata(dev); 416 + struct s3c24xx_spi *hw = platform_get_drvdata(pdev); 417 417 418 418 clk_enable(hw->clk); 419 419 return 0;
+14 -7
drivers/video/au1100fb.c
··· 214 214 */ 215 215 int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fbi) 216 216 { 217 - struct au1100fb_device *fbdev = to_au1100fb_device(fbi); 218 - u32 *palette = fbdev->regs->lcd_pallettebase; 217 + struct au1100fb_device *fbdev; 218 + u32 *palette; 219 219 u32 value; 220 + 221 + fbdev = to_au1100fb_device(fbi); 222 + palette = fbdev->regs->lcd_pallettebase; 220 223 221 224 if (regno > (AU1100_LCD_NBR_PALETTE_ENTRIES - 1)) 222 225 return -EINVAL; ··· 319 316 */ 320 317 int au1100fb_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi) 321 318 { 322 - struct au1100fb_device *fbdev = to_au1100fb_device(fbi); 319 + struct au1100fb_device *fbdev; 323 320 int dy; 321 + 322 + fbdev = to_au1100fb_device(fbi); 324 323 325 324 print_dbg("fb_pan_display %p %p", var, fbi); 326 325 ··· 387 382 */ 388 383 int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) 389 384 { 390 - struct au1100fb_device *fbdev = to_au1100fb_device(fbi); 385 + struct au1100fb_device *fbdev; 391 386 unsigned int len; 392 387 unsigned long start=0, off; 388 + 389 + fbdev = to_au1100fb_device(fbi); 393 390 394 391 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { 395 392 return -EINVAL; ··· 474 467 475 468 if (!request_mem_region(au1100fb_fix.mmio_start, au1100fb_fix.mmio_len, 476 469 DRIVER_NAME)) { 477 - print_err("fail to lock memory region at 0x%08x", 470 + print_err("fail to lock memory region at 0x%08lx", 478 471 au1100fb_fix.mmio_start); 479 472 return -EBUSY; 480 473 } ··· 602 595 return 0; 603 596 } 604 597 605 - int au1100fb_drv_suspend(struct device *dev, u32 state, u32 level) 598 + int au1100fb_drv_suspend(struct device *dev, pm_message_t state) 606 599 { 607 600 /* TODO */ 608 601 return 0; 609 602 } 610 603 611 - int au1100fb_drv_resume(struct device *dev, u32 level) 604 + int au1100fb_drv_resume(struct device *dev) 612 605 { 613 606 /* TODO */ 614 607 return 0;
+1 -1
drivers/video/console/fbcon.c
··· 2631 2631 scr_memcpyw((u16 *) q, (u16 *) p, 2632 2632 vc->vc_size_row); 2633 2633 } 2634 - softback_in = p; 2634 + softback_in = softback_curr = p; 2635 2635 update_region(vc, vc->vc_origin, 2636 2636 logo_lines * vc->vc_cols); 2637 2637 }
+1 -3
drivers/video/maxinefb.c
··· 55 55 }; 56 56 57 57 static struct fb_fix_screeninfo maxinefb_fix = { 58 - .id = "Maxine onboard graphics 1024x768x8", 58 + .id = "Maxine", 59 59 .smem_len = (1024*768), 60 60 .type = FB_TYPE_PACKED_PIXELS, 61 61 .visual = FB_VISUAL_PSEUDOCOLOR, ··· 107 107 108 108 static struct fb_ops maxinefb_ops = { 109 109 .owner = THIS_MODULE, 110 - .fb_get_fix = gen_get_fix, 111 - .fb_get_var = gen_get_var, 112 110 .fb_setcolreg = maxinefb_setcolreg, 113 111 .fb_fillrect = cfb_fillrect, 114 112 .fb_copyarea = cfb_copyarea,
+1 -2
fs/affs/namei.c
··· 416 416 return retval; 417 417 } 418 418 419 - retval = -EIO; 420 419 bh = affs_bread(sb, old_dentry->d_inode->i_ino); 421 420 if (!bh) 422 - goto done; 421 + return -EIO; 423 422 424 423 /* Remove header from its parent directory. */ 425 424 affs_lock_dir(old_dir);
+7
fs/cifs/CHANGES
··· 1 + Version 1.43 2 + ------------ 3 + POSIX locking to servers which support CIFS POSIX Extensions 4 + (disabled by default controlled by proc/fs/cifs/Experimental). 5 + Handle conversion of long share names (especially Asian languages) 6 + to Unicode during mount. 7 + 1 8 Version 1.42 2 9 ------------ 3 10 Fix slow oplock break when mounted to different servers at the same time and
+1 -1
fs/cifs/cifsfs.h
··· 99 99 extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); 100 100 extern int cifs_ioctl (struct inode * inode, struct file * filep, 101 101 unsigned int command, unsigned long arg); 102 - #define CIFS_VERSION "1.42" 102 + #define CIFS_VERSION "1.43" 103 103 #endif /* _CIFSFS_H */
+1 -1
fs/cifs/cifsproto.h
··· 267 267 const int waitFlag); 268 268 extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, 269 269 const __u16 smb_file_id, const int get_flag, 270 - const __u64 len, const __u64 offset, 270 + const __u64 len, struct file_lock *, 271 271 const __u16 lock_type, const int waitFlag); 272 272 extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon); 273 273 extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
+35 -5
fs/cifs/cifssmb.c
··· 1355 1355 int 1356 1356 CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, 1357 1357 const __u16 smb_file_id, const int get_flag, const __u64 len, 1358 - const __u64 lkoffset, const __u16 lock_type, const int waitFlag) 1358 + struct file_lock *pLockData, const __u16 lock_type, 1359 + const int waitFlag) 1359 1360 { 1360 1361 struct smb_com_transaction2_sfi_req *pSMB = NULL; 1361 1362 struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; ··· 1367 1366 __u16 params, param_offset, offset, byte_count, count; 1368 1367 1369 1368 cFYI(1, ("Posix Lock")); 1369 + 1370 + if(pLockData == NULL) 1371 + return EINVAL; 1372 + 1370 1373 rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); 1371 1374 1372 1375 if (rc) ··· 1409 1404 1410 1405 parm_data->lock_type = cpu_to_le16(lock_type); 1411 1406 if(waitFlag) 1412 - parm_data->lock_flags = 1; 1407 + parm_data->lock_flags = cpu_to_le16(1); 1413 1408 parm_data->pid = cpu_to_le32(current->tgid); 1414 - parm_data->start = lkoffset; 1415 - parm_data->length = len; /* normalize negative numbers */ 1409 + parm_data->start = cpu_to_le64(pLockData->fl_start); 1410 + parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ 1416 1411 1417 1412 pSMB->DataOffset = cpu_to_le16(offset); 1418 1413 pSMB->Fid = smb_file_id; ··· 1424 1419 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 1425 1420 if (rc) { 1426 1421 cFYI(1, ("Send error in Posix Lock = %d", rc)); 1427 - } 1422 + } else if (get_flag) { 1423 + /* lock structure can be returned on get */ 1424 + __u16 data_offset; 1425 + __u16 data_count; 1426 + rc = validate_t2((struct smb_t2_rsp *)pSMBr); 1428 1427 1428 + if (rc || (pSMBr->ByteCount < sizeof(struct cifs_posix_lock))) { 1429 + rc = -EIO; /* bad smb */ 1430 + goto plk_err_exit; 1431 + } 1432 + if(pLockData == NULL) { 1433 + rc = -EINVAL; 1434 + goto plk_err_exit; 1435 + } 1436 + data_offset = le16_to_cpu(pSMBr->t2.DataOffset); 1437 + data_count = le16_to_cpu(pSMBr->t2.DataCount); 1438 + if(data_count < sizeof(struct cifs_posix_lock)) { 1439 + rc = -EIO; 1440 + goto plk_err_exit; 1441 + } 1442 + parm_data = (struct cifs_posix_lock *) 1443 + ((char *)&pSMBr->hdr.Protocol + data_offset); 1444 + if(parm_data->lock_type == cpu_to_le16(CIFS_UNLCK)) 1445 + pLockData->fl_type = F_UNLCK; 1446 + } 1447 + 1448 + plk_err_exit: 1429 1449 if (pSMB) 1430 1450 cifs_small_buf_release(pSMB); 1431 1451
+88 -9
fs/cifs/connect.c
··· 2148 2148 /* We look for obvious messed up bcc or strings in response so we do not go off 2149 2149 the end since (at least) WIN2K and Windows XP have a major bug in not null 2150 2150 terminating last Unicode string in response */ 2151 + if(ses->serverOS) 2152 + kfree(ses->serverOS); 2151 2153 ses->serverOS = kzalloc(2 * (len + 1), GFP_KERNEL); 2152 2154 if(ses->serverOS == NULL) 2153 2155 goto sesssetup_nomem; ··· 2162 2160 if (remaining_words > 0) { 2163 2161 len = UniStrnlen((wchar_t *)bcc_ptr, 2164 2162 remaining_words-1); 2163 + if(ses->serverNOS) 2164 + kfree(ses->serverNOS); 2165 2165 ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL); 2166 2166 if(ses->serverNOS == NULL) 2167 2167 goto sesssetup_nomem; ··· 2181 2177 if (remaining_words > 0) { 2182 2178 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2183 2179 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 2180 + if(ses->serverDomain) 2181 + kfree(ses->serverDomain); 2184 2182 ses->serverDomain = 2185 2183 kzalloc(2*(len+1),GFP_KERNEL); 2186 2184 if(ses->serverDomain == NULL) ··· 2193 2187 ses->serverDomain[2*len] = 0; 2194 2188 ses->serverDomain[1+(2*len)] = 0; 2195 2189 } /* else no more room so create dummy domain string */ 2196 - else 2190 + else { 2191 + if(ses->serverDomain) 2192 + kfree(ses->serverDomain); 2197 2193 ses->serverDomain = 2198 2194 kzalloc(2, GFP_KERNEL); 2195 + } 2199 2196 } else { /* no room so create dummy domain and NOS string */ 2200 2197 /* if these kcallocs fail not much we 2201 2198 can do, but better to not fail the 2202 2199 sesssetup itself */ 2200 + if(ses->serverDomain) 2201 + kfree(ses->serverDomain); 2203 2202 ses->serverDomain = 2204 2203 kzalloc(2, GFP_KERNEL); 2204 + if(ses->serverNOS) 2205 + kfree(ses->serverNOS); 2205 2206 ses->serverNOS = 2206 2207 kzalloc(2, GFP_KERNEL); 2207 2208 } ··· 2217 2204 if (((long) bcc_ptr + len) - (long) 2218 2205 pByteArea(smb_buffer_response) 2219 2206 <= BCC(smb_buffer_response)) { 2207 + if(ses->serverOS) 2208 + kfree(ses->serverOS); 2220 2209 ses->serverOS = kzalloc(len + 1,GFP_KERNEL); 2221 2210 if(ses->serverOS == NULL) 2222 2211 goto sesssetup_nomem; ··· 2229 2214 bcc_ptr++; 2230 2215 2231 2216 len = strnlen(bcc_ptr, 1024); 2217 + if(ses->serverNOS) 2218 + kfree(ses->serverNOS); 2232 2219 ses->serverNOS = kzalloc(len + 1,GFP_KERNEL); 2233 2220 if(ses->serverNOS == NULL) 2234 2221 goto sesssetup_nomem; ··· 2240 2223 bcc_ptr++; 2241 2224 2242 2225 len = strnlen(bcc_ptr, 1024); 2226 + if(ses->serverDomain) 2227 + kfree(ses->serverDomain); 2243 2228 ses->serverDomain = kzalloc(len + 1,GFP_KERNEL); 2244 2229 if(ses->serverDomain == NULL) 2245 2230 goto sesssetup_nomem; ··· 2446 2427 /* We look for obvious messed up bcc or strings in response so we do not go off 2447 2428 the end since (at least) WIN2K and Windows XP have a major bug in not null 2448 2429 terminating last Unicode string in response */ 2430 + if(ses->serverOS) 2431 + kfree(ses->serverOS); 2449 2432 ses->serverOS = 2450 2433 kzalloc(2 * (len + 1), GFP_KERNEL); 2451 2434 cifs_strfromUCS_le(ses->serverOS, ··· 2462 2441 len = UniStrnlen((wchar_t *)bcc_ptr, 2463 2442 remaining_words 2464 2443 - 1); 2444 + if(ses->serverNOS) 2445 + kfree(ses->serverNOS); 2465 2446 ses->serverNOS = 2466 2447 kzalloc(2 * (len + 1), 2467 2448 GFP_KERNEL); ··· 2477 2454 remaining_words -= len + 1; 2478 2455 if (remaining_words > 0) { 2479 2456 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2480 - /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 2457 + /* last string not null terminated (e.g.Windows XP/2000) */ 2458 + if(ses->serverDomain) 2459 + kfree(ses->serverDomain); 2481 2460 ses->serverDomain = kzalloc(2*(len+1),GFP_KERNEL); 2482 2461 cifs_strfromUCS_le(ses->serverDomain, 2483 2462 (__le16 *)bcc_ptr, ··· 2488 2463 ses->serverDomain[2*len] = 0; 2489 2464 ses->serverDomain[1+(2*len)] = 0; 2490 2465 } /* else no more room so create dummy domain string */ 2491 - else 2466 + else { 2467 + if(ses->serverDomain) 2468 + kfree(ses->serverDomain); 2492 2469 ses->serverDomain = 2493 2470 kzalloc(2,GFP_KERNEL); 2494 - } else { /* no room so create dummy domain and NOS string */ 2471 + } 2472 + } else {/* no room use dummy domain&NOS */ 2473 + if(ses->serverDomain) 2474 + kfree(ses->serverDomain); 2495 2475 ses->serverDomain = kzalloc(2, GFP_KERNEL); 2476 + if(ses->serverNOS) 2477 + kfree(ses->serverNOS); 2496 2478 ses->serverNOS = kzalloc(2, GFP_KERNEL); 2497 2479 } 2498 2480 } else { /* ASCII */ ··· 2508 2476 if (((long) bcc_ptr + len) - (long) 2509 2477 pByteArea(smb_buffer_response) 2510 2478 <= BCC(smb_buffer_response)) { 2479 + if(ses->serverOS) 2480 + kfree(ses->serverOS); 2511 2481 ses->serverOS = kzalloc(len + 1, GFP_KERNEL); 2512 2482 strncpy(ses->serverOS, bcc_ptr, len); 2513 2483 ··· 2518 2484 bcc_ptr++; 2519 2485 2520 2486 len = strnlen(bcc_ptr, 1024); 2487 + if(ses->serverNOS) 2488 + kfree(ses->serverNOS); 2521 2489 ses->serverNOS = kzalloc(len + 1,GFP_KERNEL); 2522 2490 strncpy(ses->serverNOS, bcc_ptr, len); 2523 2491 bcc_ptr += len; ··· 2527 2491 bcc_ptr++; 2528 2492 2529 2493 len = strnlen(bcc_ptr, 1024); 2494 + if(ses->serverDomain) 2495 + kfree(ses->serverDomain); 2530 2496 ses->serverDomain = kzalloc(len + 1, GFP_KERNEL); 2531 2497 strncpy(ses->serverDomain, bcc_ptr, len); 2532 2498 bcc_ptr += len; ··· 2766 2728 /* We look for obvious messed up bcc or strings in response so we do not go off 2767 2729 the end since (at least) WIN2K and Windows XP have a major bug in not null 2768 2730 terminating last Unicode string in response */ 2731 + if(ses->serverOS) 2732 + kfree(ses->serverOS); 2769 2733 ses->serverOS = 2770 2734 kzalloc(2 * (len + 1), GFP_KERNEL); 2771 2735 cifs_strfromUCS_le(ses->serverOS, ··· 2783 2743 bcc_ptr, 2784 2744 remaining_words 2785 2745 - 1); 2746 + if(ses->serverNOS) 2747 + kfree(ses->serverNOS); 2786 2748 ses->serverNOS = 2787 2749 kzalloc(2 * (len + 1), 2788 2750 GFP_KERNEL); ··· 2802 2760 if (remaining_words > 0) { 2803 2761 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2804 2762 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 2763 + if(ses->serverDomain) 2764 + kfree(ses->serverDomain); 2805 2765 ses->serverDomain = 2806 2766 kzalloc(2 * 2807 2767 (len + ··· 2821 2777 [1 + (2 * len)] 2822 2778 = 0; 2823 2779 } /* else no more room so create dummy domain string */ 2824 - else 2780 + else { 2781 + if(ses->serverDomain) 2782 + kfree(ses->serverDomain); 2825 2783 ses->serverDomain = 2826 2784 kzalloc(2, 2827 2785 GFP_KERNEL); 2786 + } 2828 2787 } else { /* no room so create dummy domain and NOS string */ 2788 + if(ses->serverDomain); 2789 + kfree(ses->serverDomain); 2829 2790 ses->serverDomain = 2830 2791 kzalloc(2, GFP_KERNEL); 2792 + if(ses->serverNOS) 2793 + kfree(ses->serverNOS); 2831 2794 ses->serverNOS = 2832 2795 kzalloc(2, GFP_KERNEL); 2833 2796 } ··· 2843 2792 if (((long) bcc_ptr + len) - (long) 2844 2793 pByteArea(smb_buffer_response) 2845 2794 <= BCC(smb_buffer_response)) { 2795 + if(ses->serverOS) 2796 + kfree(ses->serverOS); 2846 2797 ses->serverOS = 2847 2798 kzalloc(len + 1, 2848 2799 GFP_KERNEL); ··· 2856 2803 bcc_ptr++; 2857 2804 2858 2805 len = strnlen(bcc_ptr, 1024); 2806 + if(ses->serverNOS) 2807 + kfree(ses->serverNOS); 2859 2808 ses->serverNOS = 2860 2809 kzalloc(len + 1, 2861 2810 GFP_KERNEL); ··· 2867 2812 bcc_ptr++; 2868 2813 2869 2814 len = strnlen(bcc_ptr, 1024); 2815 + if(ses->serverDomain) 2816 + kfree(ses->serverDomain); 2870 2817 ses->serverDomain = 2871 2818 kzalloc(len + 1, 2872 2819 GFP_KERNEL); ··· 3173 3116 /* We look for obvious messed up bcc or strings in response so we do not go off 3174 3117 the end since (at least) WIN2K and Windows XP have a major bug in not null 3175 3118 terminating last Unicode string in response */ 3119 + if(ses->serverOS) 3120 + kfree(ses->serverOS); 3176 3121 ses->serverOS = 3177 3122 kzalloc(2 * (len + 1), GFP_KERNEL); 3178 3123 cifs_strfromUCS_le(ses->serverOS, ··· 3190 3131 bcc_ptr, 3191 3132 remaining_words 3192 3133 - 1); 3134 + if(ses->serverNOS) 3135 + kfree(ses->serverNOS); 3193 3136 ses->serverNOS = 3194 3137 kzalloc(2 * (len + 1), 3195 3138 GFP_KERNEL); ··· 3208 3147 if (remaining_words > 0) { 3209 3148 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 3210 3149 /* last string not always null terminated (e.g. for Windows XP & 2000) */ 3150 + if(ses->serverDomain) 3151 + kfree(ses->serverDomain); 3211 3152 ses->serverDomain = 3212 3153 kzalloc(2 * 3213 3154 (len + ··· 3235 3172 len)] 3236 3173 = 0; 3237 3174 } /* else no more room so create dummy domain string */ 3238 - else 3175 + else { 3176 + if(ses->serverDomain) 3177 + kfree(ses->serverDomain); 3239 3178 ses->serverDomain = kzalloc(2,GFP_KERNEL); 3179 + } 3240 3180 } else { /* no room so create dummy domain and NOS string */ 3181 + if(ses->serverDomain) 3182 + kfree(ses->serverDomain); 3241 3183 ses->serverDomain = kzalloc(2, GFP_KERNEL); 3184 + if(ses->serverNOS) 3185 + kfree(ses->serverNOS); 3242 3186 ses->serverNOS = kzalloc(2, GFP_KERNEL); 3243 3187 } 3244 3188 } else { /* ASCII */ ··· 3253 3183 if (((long) bcc_ptr + len) - 3254 3184 (long) pByteArea(smb_buffer_response) 3255 3185 <= BCC(smb_buffer_response)) { 3186 + if(ses->serverOS) 3187 + kfree(ses->serverOS); 3256 3188 ses->serverOS = kzalloc(len + 1,GFP_KERNEL); 3257 3189 strncpy(ses->serverOS,bcc_ptr, len); 3258 3190 ··· 3263 3191 bcc_ptr++; 3264 3192 3265 3193 len = strnlen(bcc_ptr, 1024); 3194 + if(ses->serverNOS) 3195 + kfree(ses->serverNOS); 3266 3196 ses->serverNOS = kzalloc(len+1,GFP_KERNEL); 3267 3197 strncpy(ses->serverNOS, bcc_ptr, len); 3268 3198 bcc_ptr += len; ··· 3272 3198 bcc_ptr++; 3273 3199 3274 3200 len = strnlen(bcc_ptr, 1024); 3201 + if(ses->serverDomain) 3202 + kfree(ses->serverDomain); 3275 3203 ses->serverDomain = kzalloc(len+1,GFP_KERNEL); 3276 3204 strncpy(ses->serverDomain, bcc_ptr, len); 3277 3205 bcc_ptr += len; ··· 3358 3282 bcc_ptr++; /* align */ 3359 3283 } 3360 3284 3361 - if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 3285 + if(ses->server->secMode & 3286 + (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 3362 3287 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 3363 3288 3364 3289 if (ses->capabilities & CAP_STATUS32) { ··· 3371 3294 if (ses->capabilities & CAP_UNICODE) { 3372 3295 smb_buffer->Flags2 |= SMBFLG2_UNICODE; 3373 3296 length = 3374 - cifs_strtoUCS((__le16 *) bcc_ptr, tree, 100, nls_codepage); 3375 - bcc_ptr += 2 * length; /* convert num of 16 bit words to bytes */ 3297 + cifs_strtoUCS((__le16 *) bcc_ptr, tree, 3298 + 6 /* max utf8 char length in bytes */ * 3299 + (/* server len*/ + 256 /* share len */), nls_codepage); 3300 + bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ 3376 3301 bcc_ptr += 2; /* skip trailing null */ 3377 3302 } else { /* ASCII */ 3378 3303 strcpy(bcc_ptr, tree);
+8 -4
fs/cifs/file.c
··· 84 84 return FILE_OVERWRITE_IF; 85 85 else if ((flags & O_CREAT) == O_CREAT) 86 86 return FILE_OPEN_IF; 87 + else if ((flags & O_TRUNC) == O_TRUNC) 88 + return FILE_OVERWRITE; 87 89 else 88 90 return FILE_OPEN; 89 91 } ··· 658 656 else 659 657 posix_lock_type = CIFS_WRLCK; 660 658 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */, 661 - length, pfLock->fl_start, 659 + length, pfLock, 662 660 posix_lock_type, wait_flag); 663 661 FreeXid(xid); 664 662 return rc; ··· 706 704 return -EOPNOTSUPP; 707 705 } 708 706 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */, 709 - length, pfLock->fl_start, 707 + length, pfLock, 710 708 posix_lock_type, wait_flag); 711 709 } else 712 710 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start, ··· 906 904 if (rc != 0) 907 905 break; 908 906 } 909 - if(experimEnabled || (pTcon->ses->server->secMode & 910 - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0) { 907 + if(experimEnabled || (pTcon->ses->server && 908 + ((pTcon->ses->server->secMode & 909 + (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 910 + == 0))) { 911 911 struct kvec iov[2]; 912 912 unsigned int len; 913 913
-1
fs/ext3/resize.c
··· 767 767 if (input->group != sbi->s_groups_count) { 768 768 ext3_warning(sb, __FUNCTION__, 769 769 "multiple resizers run on filesystem!"); 770 - unlock_super(sb); 771 770 err = -EBUSY; 772 771 goto exit_journal; 773 772 }
+10 -9
fs/namei.c
··· 1080 1080 nd->flags = flags; 1081 1081 nd->depth = 0; 1082 1082 1083 - read_lock(&current->fs->lock); 1084 1083 if (*name=='/') { 1084 + read_lock(&current->fs->lock); 1085 1085 if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) { 1086 1086 nd->mnt = mntget(current->fs->altrootmnt); 1087 1087 nd->dentry = dget(current->fs->altroot); ··· 1092 1092 } 1093 1093 nd->mnt = mntget(current->fs->rootmnt); 1094 1094 nd->dentry = dget(current->fs->root); 1095 + read_unlock(&current->fs->lock); 1095 1096 } else if (dfd == AT_FDCWD) { 1097 + read_lock(&current->fs->lock); 1096 1098 nd->mnt = mntget(current->fs->pwdmnt); 1097 1099 nd->dentry = dget(current->fs->pwd); 1100 + read_unlock(&current->fs->lock); 1098 1101 } else { 1099 1102 struct dentry *dentry; 1100 1103 1101 1104 file = fget_light(dfd, &fput_needed); 1102 1105 retval = -EBADF; 1103 1106 if (!file) 1104 - goto unlock_fail; 1107 + goto out_fail; 1105 1108 1106 1109 dentry = file->f_dentry; 1107 1110 1108 1111 retval = -ENOTDIR; 1109 1112 if (!S_ISDIR(dentry->d_inode->i_mode)) 1110 - goto fput_unlock_fail; 1113 + goto fput_fail; 1111 1114 1112 1115 retval = file_permission(file, MAY_EXEC); 1113 1116 if (retval) 1114 - goto fput_unlock_fail; 1117 + goto fput_fail; 1115 1118 1116 1119 nd->mnt = mntget(file->f_vfsmnt); 1117 1120 nd->dentry = dget(dentry); 1118 1121 1119 1122 fput_light(file, fput_needed); 1120 1123 } 1121 - read_unlock(&current->fs->lock); 1122 1124 current->total_link_count = 0; 1123 1125 retval = link_path_walk(name, nd); 1124 1126 out: ··· 1129 1127 nd->dentry->d_inode)) 1130 1128 audit_inode(name, nd->dentry->d_inode, flags); 1131 1129 } 1130 + out_fail: 1132 1131 return retval; 1133 1132 1134 - fput_unlock_fail: 1133 + fput_fail: 1135 1134 fput_light(file, fput_needed); 1136 - unlock_fail: 1137 - read_unlock(&current->fs->lock); 1138 - return retval; 1135 + goto out_fail; 1139 1136 } 1140 1137 1141 1138 int fastcall path_lookup(const char *name, unsigned int flags,
+1 -3
include/asm-alpha/smp.h
··· 45 45 #define hard_smp_processor_id() __hard_smp_processor_id() 46 46 #define raw_smp_processor_id() (current_thread_info()->cpu) 47 47 48 - extern cpumask_t cpu_present_mask; 49 - extern cpumask_t cpu_online_map; 50 48 extern int smp_num_cpus; 51 - #define cpu_possible_map cpu_present_mask 49 + #define cpu_possible_map cpu_present_map 52 50 53 51 int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu); 54 52
+1
include/asm-alpha/termbits.h
··· 148 148 #define HUPCL 00040000 149 149 150 150 #define CLOCAL 00100000 151 + #define CMSPAR 010000000000 /* mark or space (stick) parity */ 151 152 #define CRTSCTS 020000000000 /* flow control */ 152 153 153 154 /* c_lflag bits */
+1 -1
include/asm-arm/arch-ixp23xx/memory.h
··· 49 49 { 50 50 extern unsigned int processor_id; 51 51 52 - if (((processor_id & 15) >= 2) || machine_is_roadrunner()) 52 + if (((processor_id & 15) >= 4) || machine_is_roadrunner()) 53 53 return 1; 54 54 55 55 return 0;
+1 -1
include/asm-arm/arch-l7200/serial_l7200.h
··· 28 28 #define UARTDR 0x00 /* Tx/Rx data */ 29 29 #define RXSTAT 0x04 /* Rx status */ 30 30 #define H_UBRLCR 0x08 /* mode register high */ 31 - #define M_UBRLCR 0x0C /* mode reg mid (MSB of buad)*/ 31 + #define M_UBRLCR 0x0C /* mode reg mid (MSB of baud)*/ 32 32 #define L_UBRLCR 0x10 /* mode reg low (LSB of baud)*/ 33 33 #define UARTCON 0x14 /* control register */ 34 34 #define UARTFLG 0x18 /* flag register */
+1 -1
include/asm-arm/arch-l7200/uncompress.h
··· 6 6 * Changelog: 7 7 * 05-01-2000 SJH Created 8 8 * 05-13-2000 SJH Filled in function bodies 9 - * 07-26-2000 SJH Removed hard coded buad rate 9 + * 07-26-2000 SJH Removed hard coded baud rate 10 10 */ 11 11 12 12 #include <asm/hardware.h>
+6
include/asm-arm/system.h
··· 127 127 } 128 128 #endif 129 129 130 + #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) 131 + #define cpu_is_xscale() 0 132 + #else 133 + #define cpu_is_xscale() 1 134 + #endif 135 + 130 136 #define set_cr(x) \ 131 137 __asm__ __volatile__( \ 132 138 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \
+1 -10
include/asm-generic/pgtable.h
··· 159 159 #define lazy_mmu_prot_update(pte) do { } while (0) 160 160 #endif 161 161 162 - #ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE 162 + #ifndef __HAVE_ARCH_MOVE_PTE 163 163 #define move_pte(pte, prot, old_addr, new_addr) (pte) 164 - #else 165 - #define move_pte(pte, prot, old_addr, new_addr) \ 166 - ({ \ 167 - pte_t newpte = (pte); \ 168 - if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ 169 - pte_page(pte) == ZERO_PAGE(old_addr)) \ 170 - newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ 171 - newpte; \ 172 - }) 173 164 #endif 174 165 175 166 /*
+1
include/asm-mips/addrspace.h
··· 129 129 #if defined (CONFIG_CPU_R4300) \ 130 130 || defined (CONFIG_CPU_R4X00) \ 131 131 || defined (CONFIG_CPU_R5000) \ 132 + || defined (CONFIG_CPU_RM7000) \ 132 133 || defined (CONFIG_CPU_NEVADA) \ 133 134 || defined (CONFIG_CPU_TX49XX) \ 134 135 || defined (CONFIG_CPU_MIPS64)
+5 -1
include/asm-mips/cpu.h
··· 51 51 #define PRID_IMP_R4300 0x0b00 52 52 #define PRID_IMP_VR41XX 0x0c00 53 53 #define PRID_IMP_R12000 0x0e00 54 + #define PRID_IMP_R14000 0x0f00 54 55 #define PRID_IMP_R8000 0x1000 55 56 #define PRID_IMP_PR4450 0x1200 56 57 #define PRID_IMP_R4600 0x2000 ··· 88 87 #define PRID_IMP_24K 0x9300 89 88 #define PRID_IMP_34K 0x9500 90 89 #define PRID_IMP_24KE 0x9600 90 + #define PRID_IMP_74K 0x9700 91 91 92 92 /* 93 93 * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE ··· 198 196 #define CPU_34K 60 199 197 #define CPU_PR4450 61 200 198 #define CPU_SB1A 62 201 - #define CPU_LAST 62 199 + #define CPU_74K 63 200 + #define CPU_R14000 64 201 + #define CPU_LAST 64 202 202 203 203 /* 204 204 * ISA Level encodings
+12 -10
include/asm-mips/delay.h
··· 19 19 { 20 20 if (sizeof(long) == 4) 21 21 __asm__ __volatile__ ( 22 - ".set\tnoreorder\n" 23 - "1:\tbnez\t%0,1b\n\t" 24 - "subu\t%0,1\n\t" 25 - ".set\treorder" 22 + " .set noreorder \n" 23 + " .align 3 \n" 24 + "1: bnez %0, 1b \n" 25 + " subu %0, 1 \n" 26 + " .set reorder \n" 26 27 : "=r" (loops) 27 28 : "0" (loops)); 28 29 else if (sizeof(long) == 8) 29 30 __asm__ __volatile__ ( 30 - ".set\tnoreorder\n" 31 - "1:\tbnez\t%0,1b\n\t" 32 - "dsubu\t%0,1\n\t" 33 - ".set\treorder" 34 - :"=r" (loops) 35 - :"0" (loops)); 31 + " .set noreorder \n" 32 + " .align 3 \n" 33 + "1: bnez %0, 1b \n" 34 + " dsubu %0, 1 \n" 35 + " .set reorder \n" 36 + : "=r" (loops) 37 + : "0" (loops)); 36 38 } 37 39 38 40
+116 -25
include/asm-mips/futex.h
··· 7 7 #include <linux/futex.h> 8 8 #include <asm/errno.h> 9 9 #include <asm/uaccess.h> 10 + #include <asm/war.h> 10 11 11 12 #ifdef CONFIG_SMP 12 13 #define __FUTEX_SMP_SYNC " sync \n" ··· 17 16 18 17 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 19 18 { \ 20 - __asm__ __volatile__( \ 21 - " .set push \n" \ 22 - " .set noat \n" \ 23 - " .set mips3 \n" \ 24 - "1: ll %1, (%3) # __futex_atomic_op1 \n" \ 25 - " .set mips0 \n" \ 26 - " " insn " \n" \ 27 - " .set mips3 \n" \ 28 - "2: sc $1, (%3) \n" \ 29 - " beqzl $1, 1b \n" \ 30 - __FUTEX_SMP_SYNC \ 31 - "3: \n" \ 32 - " .set pop \n" \ 33 - " .set mips0 \n" \ 34 - " .section .fixup,\"ax\" \n" \ 35 - "4: li %0, %5 \n" \ 36 - " j 2b \n" \ 37 - " .previous \n" \ 38 - " .section __ex_table,\"a\" \n" \ 39 - " "__UA_ADDR "\t1b, 4b \n" \ 40 - " "__UA_ADDR "\t2b, 4b \n" \ 41 - " .previous \n" \ 42 - : "=r" (ret), "=r" (oldval) \ 43 - : "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \ 19 + if (cpu_has_llsc && R10000_LLSC_WAR) { \ 20 + __asm__ __volatile__( \ 21 + " .set push \n" \ 22 + " .set noat \n" \ 23 + " .set mips3 \n" \ 24 + "1: ll %1, (%3) # __futex_atomic_op \n" \ 25 + " .set mips0 \n" \ 26 + " " insn " \n" \ 27 + " .set mips3 \n" \ 28 + "2: sc $1, (%3) \n" \ 29 + " beqzl $1, 1b \n" \ 30 + __FUTEX_SMP_SYNC \ 31 + "3: \n" \ 32 + " .set pop \n" \ 33 + " .set mips0 \n" \ 34 + " .section .fixup,\"ax\" \n" \ 35 + "4: li %0, %5 \n" \ 36 + " j 2b \n" \ 37 + " .previous \n" \ 38 + " .section __ex_table,\"a\" \n" \ 39 + " "__UA_ADDR "\t1b, 4b \n" \ 40 + " "__UA_ADDR "\t2b, 4b \n" \ 41 + " .previous \n" \ 42 + : "=r" (ret), "=r" (oldval) \ 43 + : "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \ 44 + } else if (cpu_has_llsc) { \ 45 + __asm__ __volatile__( \ 46 + " .set push \n" \ 47 + " .set noat \n" \ 48 + " .set mips3 \n" \ 49 + "1: ll %1, (%3) # __futex_atomic_op \n" \ 50 + " .set mips0 \n" \ 51 + " " insn " \n" \ 52 + " .set mips3 \n" \ 53 + "2: sc $1, (%3) \n" \ 54 + " beqz $1, 1b \n" \ 55 + __FUTEX_SMP_SYNC \ 56 + "3: \n" \ 57 + " .set pop \n" \ 58 + " .set mips0 \n" \ 59 + " .section .fixup,\"ax\" \n" \ 60 + "4: li %0, %5 \n" \ 61 + " j 2b \n" \ 62 + " .previous \n" \ 63 + " .section __ex_table,\"a\" \n" \ 64 + " "__UA_ADDR "\t1b, 4b \n" \ 65 + " "__UA_ADDR "\t2b, 4b \n" \ 66 + " .previous \n" \ 67 + : "=r" (ret), "=r" (oldval) \ 68 + : "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \ 69 + } else \ 70 + ret = -ENOSYS; \ 44 71 } 45 72 46 73 static inline int ··· 131 102 static inline int 132 103 futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) 133 104 { 134 - return -ENOSYS; 105 + int retval; 106 + 107 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 108 + return -EFAULT; 109 + 110 + if (cpu_has_llsc && R10000_LLSC_WAR) { 111 + __asm__ __volatile__( 112 + "# futex_atomic_cmpxchg_inatomic \n" 113 + " .set push \n" 114 + " .set noat \n" 115 + " .set mips3 \n" 116 + "1: ll %0, %2 \n" 117 + " bne %0, %z3, 3f \n" 118 + " .set mips0 \n" 119 + " move $1, %z4 \n" 120 + " .set mips3 \n" 121 + "2: sc $1, %1 \n" 122 + " beqzl $1, 1b \n" 123 + __FUTEX_SMP_SYNC 124 + "3: \n" 125 + " .set pop \n" 126 + " .section .fixup,\"ax\" \n" 127 + "4: li %0, %5 \n" 128 + " j 3b \n" 129 + " .previous \n" 130 + " .section __ex_table,\"a\" \n" 131 + " "__UA_ADDR "\t1b, 4b \n" 132 + " "__UA_ADDR "\t2b, 4b \n" 133 + " .previous \n" 134 + : "=&r" (retval), "=R" (*uaddr) 135 + : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 136 + : "memory"); 137 + } else if (cpu_has_llsc) { 138 + __asm__ __volatile__( 139 + "# futex_atomic_cmpxchg_inatomic \n" 140 + " .set push \n" 141 + " .set noat \n" 142 + " .set mips3 \n" 143 + "1: ll %0, %2 \n" 144 + " bne %0, %z3, 3f \n" 145 + " .set mips0 \n" 146 + " move $1, %z4 \n" 147 + " .set mips3 \n" 148 + "2: sc $1, %1 \n" 149 + " beqz $1, 1b \n" 150 + __FUTEX_SMP_SYNC 151 + "3: \n" 152 + " .set pop \n" 153 + " .section .fixup,\"ax\" \n" 154 + "4: li %0, %5 \n" 155 + " j 3b \n" 156 + " .previous \n" 157 + " .section __ex_table,\"a\" \n" 158 + " "__UA_ADDR "\t1b, 4b \n" 159 + " "__UA_ADDR "\t2b, 4b \n" 160 + " .previous \n" 161 + : "=&r" (retval), "=R" (*uaddr) 162 + : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 163 + : "memory"); 164 + } else 165 + return -ENOSYS; 166 + 167 + return retval; 135 168 } 136 169 137 170 #endif
+28 -5
include/asm-mips/inst.h
··· 6 6 * for more details. 7 7 * 8 8 * Copyright (C) 1996, 2000 by Ralf Baechle 9 + * Copyright (C) 2006 by Thiemo Seufer 9 10 */ 10 11 #ifndef _ASM_INST_H 11 12 #define _ASM_INST_H ··· 22 21 cop0_op, cop1_op, cop2_op, cop1x_op, 23 22 beql_op, bnel_op, blezl_op, bgtzl_op, 24 23 daddi_op, daddiu_op, ldl_op, ldr_op, 25 - major_1c_op, jalx_op, major_1e_op, major_1f_op, 24 + spec2_op, jalx_op, mdmx_op, spec3_op, 26 25 lb_op, lh_op, lwl_op, lw_op, 27 26 lbu_op, lhu_op, lwr_op, lwu_op, 28 27 sb_op, sh_op, swl_op, sw_op, 29 28 sdl_op, sdr_op, swr_op, cache_op, 30 29 ll_op, lwc1_op, lwc2_op, pref_op, 31 30 lld_op, ldc1_op, ldc2_op, ld_op, 32 - sc_op, swc1_op, swc2_op, rdhwr_op, 31 + sc_op, swc1_op, swc2_op, major_3b_op, 33 32 scd_op, sdc1_op, sdc2_op, sd_op 34 33 }; 35 34 ··· 38 37 */ 39 38 enum spec_op { 40 39 sll_op, movc_op, srl_op, sra_op, 41 - sllv_op, srlv_op, srav_op, spec1_unused_op, /* Opcode 0x07 is unused */ 40 + sllv_op, pmon_op, srlv_op, srav_op, 42 41 jr_op, jalr_op, movz_op, movn_op, 43 42 syscall_op, break_op, spim_op, sync_op, 44 43 mfhi_op, mthi_op, mflo_op, mtlo_op, ··· 53 52 teq_op, spec5_unused_op, tne_op, spec6_unused_op, 54 53 dsll_op, spec7_unused_op, dsrl_op, dsra_op, 55 54 dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op 55 + }; 56 + 57 + /* 58 + * func field of spec2 opcode. 59 + */ 60 + enum spec2_op { 61 + madd_op, maddu_op, mul_op, spec2_3_unused_op, 62 + msub_op, msubu_op, /* more unused ops */ 63 + clz_op = 0x20, clo_op, 64 + dclz_op = 0x24, dclo_op, 65 + sdbpp_op = 0x3f 66 + }; 67 + 68 + /* 69 + * func field of spec3 opcode. 70 + */ 71 + enum spec3_op { 72 + ext_op, dextm_op, dextu_op, dext_op, 73 + ins_op, dinsm_op, dinsu_op, dins_op, 74 + bshfl_op = 0x20, 75 + dbshfl_op = 0x24, 76 + rdhwr_op = 0x3f 56 77 }; 57 78 58 79 /* ··· 174 151 * func field for mad opcodes (MIPS IV). 175 152 */ 176 153 enum mad_func { 177 - madd_op = 0x08, msub_op = 0x0a, 178 - nmadd_op = 0x0c, nmsub_op = 0x0e 154 + madd_fp_op = 0x08, msub_fp_op = 0x0a, 155 + nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e 179 156 }; 180 157 181 158 /*
+1 -1
include/asm-mips/mipsregs.h
··· 291 291 #define ST0_DL (_ULCAST_(1) << 24) 292 292 293 293 /* 294 - * Enable the MIPS DSP ASE 294 + * Enable the MIPS MDMX and DSP ASEs 295 295 */ 296 296 #define ST0_MX 0x01000000 297 297
+2
include/asm-mips/page.h
··· 139 139 140 140 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 141 141 142 + #ifndef CONFIG_SPARSEMEM 142 143 #ifndef CONFIG_NEED_MULTIPLE_NODES 143 144 #define pfn_valid(pfn) ((pfn) < max_mapnr) 145 + #endif 144 146 #endif 145 147 146 148 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+41 -22
include/asm-mips/pgtable-32.h
··· 177 177 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) 178 178 179 179 /* 180 - * Bits 0, 1, 2, 9 and 10 are taken, split up the 27 bits of offset 181 - * into this range: 180 + * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range: 182 181 */ 183 - #define PTE_FILE_MAX_BITS 27 182 + #define PTE_FILE_MAX_BITS 28 184 183 185 - #define pte_to_pgoff(_pte) \ 186 - ((((_pte).pte >> 3) & 0x3f ) + (((_pte).pte >> 11) << 8 )) 184 + #define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \ 185 + (((_pte).pte >> 2 ) & 0x38) | \ 186 + (((_pte).pte >> 10) << 6 )) 187 187 188 - #define pgoff_to_pte(off) \ 189 - ((pte_t) { (((off) & 0x3f) << 3) + (((off) >> 8) << 11) + _PAGE_FILE }) 188 + #define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \ 189 + (((off) & 0x38) << 2 ) | \ 190 + (((off) >> 6 ) << 10) | \ 191 + _PAGE_FILE }) 190 192 191 193 #else 192 194 193 195 /* Swap entries must have VALID and GLOBAL bits cleared. */ 194 - #define __swp_type(x) (((x).val >> 8) & 0x1f) 195 - #define __swp_offset(x) ((x).val >> 13) 196 + #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 197 + #define __swp_type(x) (((x).val >> 2) & 0x1f) 198 + #define __swp_offset(x) ((x).val >> 7) 196 199 #define __swp_entry(type,offset) \ 197 - ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) 200 + ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) 201 + #else 202 + #define __swp_type(x) (((x).val >> 8) & 0x1f) 203 + #define __swp_offset(x) ((x).val >> 13) 204 + #define __swp_entry(type,offset) \ 205 + ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) 206 + #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */ 198 207 208 + #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 199 209 /* 200 - * Bits 0, 1, 2, 7 and 8 are taken, split up the 27 bits of offset 201 - * into this range: 210 + * Bits 0 and 1 of pte_high are taken, use the rest for the page offset... 202 211 */ 203 - #define PTE_FILE_MAX_BITS 27 212 + #define PTE_FILE_MAX_BITS 30 204 213 205 - #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 206 - /* fixme */ 207 - #define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f)) 208 - #define pgoff_to_pte(off) \ 209 - ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)}) 214 + #define pte_to_pgoff(_pte) ((_pte).pte_high >> 2) 215 + #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 }) 210 216 211 217 #else 212 - #define pte_to_pgoff(_pte) \ 213 - ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 )) 218 + /* 219 + * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range: 220 + */ 221 + #define PTE_FILE_MAX_BITS 28 214 222 215 - #define pgoff_to_pte(off) \ 216 - ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE }) 223 + #define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \ 224 + (((_pte).pte >> 2) & 0x8) | \ 225 + (((_pte).pte >> 8) << 4)) 226 + 227 + #define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \ 228 + (((off) & 0x8) << 2) | \ 229 + (((off) >> 4) << 8) | \ 230 + _PAGE_FILE }) 217 231 #endif 218 232 219 233 #endif 220 234 235 + #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 236 + #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) 237 + #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) 238 + #else 221 239 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 222 240 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 241 + #endif 223 242 224 243 #endif /* _ASM_PGTABLE_32_H */
+5 -8
include/asm-mips/pgtable-64.h
··· 224 224 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 225 225 226 226 /* 227 - * Bits 0, 1, 2, 7 and 8 are taken, split up the 32 bits of offset 228 - * into this range: 227 + * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to 228 + * make things easier, and only use the upper 56 bits for the page offset... 229 229 */ 230 - #define PTE_FILE_MAX_BITS 32 230 + #define PTE_FILE_MAX_BITS 56 231 231 232 - #define pte_to_pgoff(_pte) \ 233 - ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 )) 234 - 235 - #define pgoff_to_pte(off) \ 236 - ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE }) 232 + #define pte_to_pgoff(_pte) ((_pte).pte >> 8) 233 + #define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE }) 237 234 238 235 #endif /* _ASM_PGTABLE_64_H */
+61 -42
include/asm-mips/pgtable.h
··· 70 70 #define ZERO_PAGE(vaddr) \ 71 71 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) 72 72 73 - #define __HAVE_ARCH_MULTIPLE_ZERO_PAGE 73 + #define __HAVE_ARCH_MOVE_PTE 74 + #define move_pte(pte, prot, old_addr, new_addr) \ 75 + ({ \ 76 + pte_t newpte = (pte); \ 77 + if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ 78 + pte_page(pte) == ZERO_PAGE(old_addr)) \ 79 + newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ 80 + newpte; \ 81 + }) 74 82 75 83 extern void paging_init(void); 76 84 ··· 90 82 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 91 83 #define pmd_page_kernel(pmd) pmd_val(pmd) 92 84 93 - #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 94 - #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 95 - 96 85 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 86 + 87 + #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 88 + #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 89 + 97 90 static inline void set_pte(pte_t *ptep, pte_t pte) 98 91 { 99 92 ptep->pte_high = pte.pte_high; ··· 102 93 ptep->pte_low = pte.pte_low; 103 94 //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low); 104 95 105 - if (pte_val(pte) & _PAGE_GLOBAL) { 96 + if (pte.pte_low & _PAGE_GLOBAL) { 106 97 pte_t *buddy = ptep_buddy(ptep); 107 98 /* 108 99 * Make sure the buddy is global too (if it's !none, 109 100 * it better already be global) 110 101 */ 111 - if (pte_none(*buddy)) 112 - buddy->pte_low |= _PAGE_GLOBAL; 102 + if (pte_none(*buddy)) { 103 + buddy->pte_low |= _PAGE_GLOBAL; 104 + buddy->pte_high |= _PAGE_GLOBAL; 105 + } 113 106 } 114 107 } 115 108 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 116 109 117 110 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 118 111 { 112 + pte_t null = __pte(0); 113 + 119 114 /* Preserve global status for the pair */ 120 - if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 121 - set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); 122 - else 123 - set_pte_at(mm, addr, ptep, __pte(0)); 115 + if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 116 + null.pte_low = null.pte_high = _PAGE_GLOBAL; 117 + 118 + set_pte_at(mm, addr, ptep, null); 124 119 } 125 120 #else 121 + 122 + #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 123 + #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 124 + 126 125 /* 127 126 * Certain architectures need to do special things when pte's 128 127 * within a page table are directly modified. Thus, the following ··· 191 174 */ 192 175 static inline int pte_user(pte_t pte) { BUG(); return 0; } 193 176 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 194 - static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_READ; } 195 - static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_WRITE; } 196 - static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_MODIFIED; } 197 - static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } 198 - static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } 177 + static inline int pte_read(pte_t pte) { return pte.pte_low & _PAGE_READ; } 178 + static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 179 + static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 180 + static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 181 + static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; } 182 + 199 183 static inline pte_t pte_wrprotect(pte_t pte) 200 184 { 201 - (pte).pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 202 - (pte).pte_high &= ~_PAGE_SILENT_WRITE; 185 + pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 186 + pte.pte_high &= ~_PAGE_SILENT_WRITE; 203 187 return pte; 204 188 } 205 189 206 190 static inline pte_t pte_rdprotect(pte_t pte) 207 191 { 208 - (pte).pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ); 209 - (pte).pte_high &= ~_PAGE_SILENT_READ; 192 + pte.pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ); 193 + pte.pte_high &= ~_PAGE_SILENT_READ; 210 194 return pte; 211 195 } 212 196 213 197 static inline pte_t pte_mkclean(pte_t pte) 214 198 { 215 - (pte).pte_low &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); 216 - (pte).pte_high &= ~_PAGE_SILENT_WRITE; 199 + pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 200 + pte.pte_high &= ~_PAGE_SILENT_WRITE; 217 201 return pte; 218 202 } 219 203 220 204 static inline pte_t pte_mkold(pte_t pte) 221 205 { 222 - (pte).pte_low &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 223 - (pte).pte_high &= ~_PAGE_SILENT_READ; 206 + pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 207 + pte.pte_high &= ~_PAGE_SILENT_READ; 224 208 return pte; 225 209 } 226 210 227 211 static inline pte_t pte_mkwrite(pte_t pte) 228 212 { 229 - (pte).pte_low |= _PAGE_WRITE; 230 - if ((pte).pte_low & _PAGE_MODIFIED) { 231 - (pte).pte_low |= _PAGE_SILENT_WRITE; 232 - (pte).pte_high |= _PAGE_SILENT_WRITE; 213 + pte.pte_low |= _PAGE_WRITE; 214 + if (pte.pte_low & _PAGE_MODIFIED) { 215 + pte.pte_low |= _PAGE_SILENT_WRITE; 216 + pte.pte_high |= _PAGE_SILENT_WRITE; 233 217 } 234 218 return pte; 235 219 } 236 220 237 221 static inline pte_t pte_mkread(pte_t pte) 238 222 { 239 - (pte).pte_low |= _PAGE_READ; 240 - if ((pte).pte_low & _PAGE_ACCESSED) { 241 - (pte).pte_low |= _PAGE_SILENT_READ; 242 - (pte).pte_high |= _PAGE_SILENT_READ; 223 + pte.pte_low |= _PAGE_READ; 224 + if (pte.pte_low & _PAGE_ACCESSED) { 225 + pte.pte_low |= _PAGE_SILENT_READ; 226 + pte.pte_high |= _PAGE_SILENT_READ; 243 227 } 244 228 return pte; 245 229 } 246 230 247 231 static inline pte_t pte_mkdirty(pte_t pte) 248 232 { 249 - (pte).pte_low |= _PAGE_MODIFIED; 250 - if ((pte).pte_low & _PAGE_WRITE) { 251 - (pte).pte_low |= _PAGE_SILENT_WRITE; 252 - (pte).pte_high |= _PAGE_SILENT_WRITE; 233 + pte.pte_low |= _PAGE_MODIFIED; 234 + if (pte.pte_low & _PAGE_WRITE) { 235 + pte.pte_low |= _PAGE_SILENT_WRITE; 236 + pte.pte_high |= _PAGE_SILENT_WRITE; 253 237 } 254 238 return pte; 255 239 } 256 240 257 241 static inline pte_t pte_mkyoung(pte_t pte) 258 242 { 259 - (pte).pte_low |= _PAGE_ACCESSED; 260 - if ((pte).pte_low & _PAGE_READ) 261 - (pte).pte_low |= _PAGE_SILENT_READ; 262 - (pte).pte_high |= _PAGE_SILENT_READ; 243 + pte.pte_low |= _PAGE_ACCESSED; 244 + if (pte.pte_low & _PAGE_READ) 245 + pte.pte_low |= _PAGE_SILENT_READ; 246 + pte.pte_high |= _PAGE_SILENT_READ; 263 247 return pte; 264 248 } 265 249 #else ··· 353 335 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 354 336 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 355 337 { 356 - pte.pte_low &= _PAGE_CHG_MASK; 357 - pte.pte_low |= pgprot_val(newprot); 338 + pte.pte_low &= _PAGE_CHG_MASK; 339 + pte.pte_high &= ~0x3f; 340 + pte.pte_low |= pgprot_val(newprot); 358 341 pte.pte_high |= pgprot_val(newprot) & 0x3f; 359 342 return pte; 360 343 }
+8 -2
include/asm-mips/sigcontext.h
··· 55 55 struct sigcontext { 56 56 unsigned long sc_regs[32]; 57 57 unsigned long sc_fpregs[32]; 58 - unsigned long sc_hi[4]; 59 - unsigned long sc_lo[4]; 58 + unsigned long sc_mdhi; 59 + unsigned long sc_hi1; 60 + unsigned long sc_hi2; 61 + unsigned long sc_hi3; 62 + unsigned long sc_mdlo; 63 + unsigned long sc_lo1; 64 + unsigned long sc_lo2; 65 + unsigned long sc_lo3; 60 66 unsigned long sc_pc; 61 67 unsigned int sc_fpc_csr; 62 68 unsigned int sc_used_math;
+2 -3
include/asm-mips/smp.h
··· 48 48 #define SMP_CALL_FUNCTION 0x2 49 49 50 50 extern cpumask_t phys_cpu_present_map; 51 - extern cpumask_t cpu_online_map; 52 51 #define cpu_possible_map phys_cpu_present_map 53 52 54 53 extern cpumask_t cpu_callout_map; ··· 85 86 extern void plat_smp_setup(void); 86 87 87 88 /* 88 - * Called after init_IRQ but before __cpu_up. 89 + * Called in smp_prepare_cpus. 89 90 */ 90 - extern void prom_prepare_cpus(unsigned int max_cpus); 91 + extern void plat_prepare_cpus(unsigned int max_cpus); 91 92 92 93 /* 93 94 * Last chance for the board code to finish SMP initialization before
+14
include/asm-mips/sparsemem.h
··· 1 + #ifndef _MIPS_SPARSEMEM_H 2 + #define _MIPS_SPARSEMEM_H 3 + #ifdef CONFIG_SPARSEMEM 4 + 5 + /* 6 + * SECTION_SIZE_BITS 2^N: how big each section will be 7 + * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space 8 + */ 9 + #define SECTION_SIZE_BITS 28 10 + #define MAX_PHYSMEM_BITS 35 11 + 12 + #endif /* CONFIG_SPARSEMEM */ 13 + #endif /* _MIPS_SPARSEMEM_H */ 14 +
+1
include/asm-powerpc/termbits.h
··· 153 153 #define HUPCL 00040000 154 154 155 155 #define CLOCAL 00100000 156 + #define CMSPAR 010000000000 /* mark or space (stick) parity */ 156 157 #define CRTSCTS 020000000000 /* flow control */ 157 158 158 159 /* c_lflag bits */
+2 -2
include/asm-s390/lowcore.h
··· 98 98 #define __LC_KERNEL_ASCE 0xD58 99 99 #define __LC_USER_ASCE 0xD60 100 100 #define __LC_PANIC_STACK 0xD68 101 - #define __LC_CPUID 0xD90 102 - #define __LC_CPUADDR 0xD98 101 + #define __LC_CPUID 0xD80 102 + #define __LC_CPUADDR 0xD88 103 103 #define __LC_IPLDEV 0xDB8 104 104 #define __LC_JIFFY_TIMER 0xDC0 105 105 #define __LC_CURRENT 0xDD8
+17
include/asm-sparc64/pgtable.h
··· 689 689 #define pte_clear(mm,addr,ptep) \ 690 690 set_pte_at((mm), (addr), (ptep), __pte(0UL)) 691 691 692 + #ifdef DCACHE_ALIASING_POSSIBLE 693 + #define __HAVE_ARCH_MOVE_PTE 694 + #define move_pte(pte, prot, old_addr, new_addr) \ 695 + ({ \ 696 + pte_t newpte = (pte); \ 697 + if (tlb_type != hypervisor && pte_present(pte)) { \ 698 + unsigned long this_pfn = pte_pfn(pte); \ 699 + \ 700 + if (pfn_valid(this_pfn) && \ 701 + (((old_addr) ^ (new_addr)) & (1 << 13))) \ 702 + flush_dcache_page_all(current->mm, \ 703 + pfn_to_page(this_pfn)); \ 704 + } \ 705 + newpte; \ 706 + }) 707 + #endif 708 + 692 709 extern pgd_t swapper_pg_dir[2048]; 693 710 extern pmd_t swapper_low_pmd_dir[2048]; 694 711
+6
include/asm-um/irqflags.h
··· 1 + #ifndef __UM_IRQFLAGS_H 2 + #define __UM_IRQFLAGS_H 3 + 4 + /* Empty for now */ 5 + 6 + #endif
+3 -3
include/asm-um/uaccess.h
··· 41 41 42 42 #define __get_user(x, ptr) \ 43 43 ({ \ 44 - const __typeof__(ptr) __private_ptr = ptr; \ 44 + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \ 45 45 __typeof__(x) __private_val; \ 46 46 int __private_ret = -EFAULT; \ 47 47 (x) = (__typeof__(*(__private_ptr)))0; \ 48 - if (__copy_from_user((void *) &__private_val, (__private_ptr), \ 48 + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\ 49 49 sizeof(*(__private_ptr))) == 0) { \ 50 50 (x) = (__typeof__(*(__private_ptr))) __private_val; \ 51 51 __private_ret = 0; \ ··· 62 62 63 63 #define __put_user(x, ptr) \ 64 64 ({ \ 65 - __typeof__(ptr) __private_ptr = ptr; \ 65 + __typeof__(*(ptr)) __user *__private_ptr = ptr; \ 66 66 __typeof__(*(__private_ptr)) __private_val; \ 67 67 int __private_ret = -EFAULT; \ 68 68 __private_val = (__typeof__(*(__private_ptr))) (x); \
+1 -1
include/asm-x86_64/elf.h
··· 159 159 #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 160 160 161 161 /* 1GB for 64bit, 8MB for 32bit */ 162 - #define STACK_RND_MASK (is_compat_task() ? 0x7ff : 0x3fffff) 162 + #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) 163 163 164 164 #endif 165 165
+5 -8
include/linux/input.h
··· 345 345 #define KEY_SAVE 234 346 346 #define KEY_DOCUMENTS 235 347 347 348 + #define KEY_BATTERY 236 349 + 348 350 #define KEY_UNKNOWN 240 349 351 350 352 #define BTN_MISC 0x100 ··· 579 577 * Switch events 580 578 */ 581 579 582 - #define SW_0 0x00 583 - #define SW_1 0x01 584 - #define SW_2 0x02 585 - #define SW_3 0x03 586 - #define SW_4 0x04 587 - #define SW_5 0x05 588 - #define SW_6 0x06 589 - #define SW_7 0x07 580 + #define SW_LID 0x00 /* set = lid shut */ 581 + #define SW_TABLET_MODE 0x01 /* set = tablet mode */ 582 + #define SW_HEADPHONE_INSERT 0x02 /* set = inserted */ 590 583 #define SW_MAX 0x0f 591 584 592 585 /*
+2 -2
include/linux/m48t86.h
··· 11 11 12 12 struct m48t86_ops 13 13 { 14 - void (*writeb)(unsigned char value, unsigned long addr); 15 - unsigned char (*readb)(unsigned long addr); 14 + void (*writebyte)(unsigned char value, unsigned long addr); 15 + unsigned char (*readbyte)(unsigned long addr); 16 16 };
+1
include/linux/mmzone.h
··· 15 15 #include <linux/seqlock.h> 16 16 #include <linux/nodemask.h> 17 17 #include <asm/atomic.h> 18 + #include <asm/page.h> 18 19 19 20 /* Free memory management - zoned buddy allocator. */ 20 21 #ifndef CONFIG_FORCE_MAX_ZONEORDER
+1
include/linux/pci_ids.h
··· 1231 1231 #define PCI_DEVICE_ID_VIA_8380_0 0x0204 1232 1232 #define PCI_DEVICE_ID_VIA_3238_0 0x0238 1233 1233 #define PCI_DEVICE_ID_VIA_PT880 0x0258 1234 + #define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308 1234 1235 #define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259 1235 1236 #define PCI_DEVICE_ID_VIA_3269_0 0x0269 1236 1237 #define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
-5
include/linux/vt_kern.h
··· 73 73 int vt_waitactive(int vt); 74 74 void change_console(struct vc_data *new_vc); 75 75 void reset_vc(struct vc_data *vc); 76 - #ifdef CONFIG_VT 77 - int is_console_suspend_safe(void); 78 - #else 79 - static inline int is_console_suspend_safe(void) { return 1; } 80 - #endif 81 76 82 77 /* 83 78 * vc_screen.c shares this temporary buffer with the console write code so that
+2 -1
include/net/compat.h
··· 3 3 4 4 #include <linux/config.h> 5 5 6 + struct sock; 7 + 6 8 #if defined(CONFIG_COMPAT) 7 9 8 10 #include <linux/compat.h> ··· 25 23 compat_int_t cmsg_type; 26 24 }; 27 25 28 - struct sock; 29 26 extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *); 30 27 31 28 #else /* defined(CONFIG_COMPAT) */
+6
kernel/hrtimer.c
··· 456 456 457 457 return ret; 458 458 } 459 + EXPORT_SYMBOL_GPL(hrtimer_start); 459 460 460 461 /** 461 462 * hrtimer_try_to_cancel - try to deactivate a timer ··· 485 484 return ret; 486 485 487 486 } 487 + EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); 488 488 489 489 /** 490 490 * hrtimer_cancel - cancel a timer and wait for the handler to finish. ··· 506 504 cpu_relax(); 507 505 } 508 506 } 507 + EXPORT_SYMBOL_GPL(hrtimer_cancel); 509 508 510 509 /** 511 510 * hrtimer_get_remaining - get remaining time for the timer ··· 525 522 526 523 return rem; 527 524 } 525 + EXPORT_SYMBOL_GPL(hrtimer_get_remaining); 528 526 529 527 #ifdef CONFIG_NO_IDLE_HZ 530 528 /** ··· 584 580 timer->base = &bases[clock_id]; 585 581 timer->node.rb_parent = HRTIMER_INACTIVE; 586 582 } 583 + EXPORT_SYMBOL_GPL(hrtimer_init); 587 584 588 585 /** 589 586 * hrtimer_get_res - get the timer resolution for a clock ··· 604 599 605 600 return 0; 606 601 } 602 + EXPORT_SYMBOL_GPL(hrtimer_get_res); 607 603 608 604 /* 609 605 * Expire the per base hrtimer-queue:
+4 -4
mm/memory_hotplug.c
··· 91 91 if (start_pfn < zone->zone_start_pfn) 92 92 zone->zone_start_pfn = start_pfn; 93 93 94 - if (end_pfn > old_zone_end_pfn) 95 - zone->spanned_pages = end_pfn - zone->zone_start_pfn; 94 + zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - 95 + zone->zone_start_pfn; 96 96 97 97 zone_span_writeunlock(zone); 98 98 } ··· 106 106 if (start_pfn < pgdat->node_start_pfn) 107 107 pgdat->node_start_pfn = start_pfn; 108 108 109 - if (end_pfn > old_pgdat_end_pfn) 110 - pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn; 109 + pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - 110 + pgdat->node_start_pfn; 111 111 } 112 112 113 113 int online_pages(unsigned long pfn, unsigned long nr_pages)
+13 -14
mm/slab.c
··· 207 207 #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 208 208 #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 209 209 210 - /* Max number of objs-per-slab for caches which use off-slab slabs. 211 - * Needed to avoid a possible looping condition in cache_grow(). 212 - */ 213 - static unsigned long offslab_limit; 214 - 215 210 /* 216 211 * struct slab 217 212 * ··· 1351 1356 NULL, NULL); 1352 1357 } 1353 1358 1354 - /* Inc off-slab bufctl limit until the ceiling is hit. */ 1355 - if (!(OFF_SLAB(sizes->cs_cachep))) { 1356 - offslab_limit = sizes->cs_size - sizeof(struct slab); 1357 - offslab_limit /= sizeof(kmem_bufctl_t); 1358 - } 1359 - 1360 1359 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1361 1360 sizes->cs_size, 1362 1361 ARCH_KMALLOC_MINALIGN, ··· 1769 1780 static size_t calculate_slab_order(struct kmem_cache *cachep, 1770 1781 size_t size, size_t align, unsigned long flags) 1771 1782 { 1783 + unsigned long offslab_limit; 1772 1784 size_t left_over = 0; 1773 1785 int gfporder; 1774 1786 ··· 1781 1791 if (!num) 1782 1792 continue; 1783 1793 1784 - /* More than offslab_limit objects will cause problems */ 1785 - if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) 1786 - break; 1794 + if (flags & CFLGS_OFF_SLAB) { 1795 + /* 1796 + * Max number of objs-per-slab for caches which 1797 + * use off-slab slabs. Needed to avoid a possible 1798 + * looping condition in cache_grow(). 1799 + */ 1800 + offslab_limit = size - sizeof(struct slab); 1801 + offslab_limit /= sizeof(kmem_bufctl_t); 1802 + 1803 + if (num > offslab_limit) 1804 + break; 1805 + } 1787 1806 1788 1807 /* Found something acceptable - save it away */ 1789 1808 cachep->num = num;
+7 -12
net/bridge/br_if.c
··· 300 300 rtnl_lock(); 301 301 if (strchr(dev->name, '%')) { 302 302 ret = dev_alloc_name(dev, dev->name); 303 - if (ret < 0) 304 - goto err1; 303 + if (ret < 0) { 304 + free_netdev(dev); 305 + goto out; 306 + } 305 307 } 306 308 307 309 ret = register_netdevice(dev); 308 310 if (ret) 309 - goto err2; 311 + goto out; 310 312 311 313 ret = br_sysfs_addbr(dev); 312 314 if (ret) 313 - goto err3; 314 - rtnl_unlock(); 315 - return 0; 316 - 317 - err3: 318 - unregister_netdev(dev); 319 - err2: 320 - free_netdev(dev); 321 - err1: 315 + unregister_netdevice(dev); 316 + out: 322 317 rtnl_unlock(); 323 318 return ret; 324 319 }
+11 -9
net/core/dev.c
··· 127 127 * sure which should go first, but I bet it won't make much 128 128 * difference if we are running VLANs. The good news is that 129 129 * this protocol won't be in the list unless compiled in, so 130 - * the average user (w/out VLANs) will not be adversly affected. 130 + * the average user (w/out VLANs) will not be adversely affected. 131 131 * --BLG 132 132 * 133 133 * 0800 IP ··· 149 149 static struct list_head ptype_all; /* Taps */ 150 150 151 151 /* 152 - * The @dev_base list is protected by @dev_base_lock and the rtln 152 + * The @dev_base list is protected by @dev_base_lock and the rtnl 153 153 * semaphore. 154 154 * 155 155 * Pure readers hold dev_base_lock for reading. ··· 641 641 * @name: name format string 642 642 * 643 643 * Passed a format string - eg "lt%d" it will try and find a suitable 644 - * id. Not efficient for many devices, not called a lot. The caller 645 - * must hold the dev_base or rtnl lock while allocating the name and 646 - * adding the device in order to avoid duplicates. Returns the number 647 - * of the unit assigned or a negative errno code. 644 + * id. It scans list of devices to build up a free map, then chooses 645 + * the first empty slot. The caller must hold the dev_base or rtnl lock 646 + * while allocating the name and adding the device in order to avoid 647 + * duplicates. 648 + * Limited to bits_per_byte * page size devices (ie 32K on most platforms). 649 + * Returns the number of the unit assigned or a negative errno code. 648 650 */ 649 651 650 652 int dev_alloc_name(struct net_device *dev, const char *name) ··· 746 744 } 747 745 748 746 /** 749 - * netdev_features_change - device changes fatures 747 + * netdev_features_change - device changes features 750 748 * @dev: device to cause notification 751 749 * 752 750 * Called to indicate a device has changed features. ··· 2198 2196 * @dev: device 2199 2197 * @inc: modifier 2200 2198 * 2201 - * Add or remove promsicuity from a device. While the count in the device 2199 + * Add or remove promiscuity from a device. While the count in the device 2202 2200 * remains above zero the interface remains promiscuous. Once it hits zero 2203 2201 * the device reverts back to normal filtering operation. A negative inc 2204 2202 * value is used to drop promiscuity on the device. ··· 3124 3122 void free_netdev(struct net_device *dev) 3125 3123 { 3126 3124 #ifdef CONFIG_SYSFS 3127 - /* Compatiablity with error handling in drivers */ 3125 + /* Compatibility with error handling in drivers */ 3128 3126 if (dev->reg_state == NETREG_UNINITIALIZED) { 3129 3127 kfree((char *)dev - dev->padded); 3130 3128 return;
-1
net/ethernet/Makefile
··· 3 3 # 4 4 5 5 obj-y += eth.o 6 - obj-$(CONFIG_SYSCTL) += sysctl_net_ether.o 7 6 obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o 8 7 obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o
-14
net/ethernet/sysctl_net_ether.c
··· 1 - /* -*- linux-c -*- 2 - * sysctl_net_ether.c: sysctl interface to net Ethernet subsystem. 3 - * 4 - * Begun April 1, 1996, Mike Shaver. 5 - * Added /proc/sys/net/ether directory entry (empty =) ). [MS] 6 - */ 7 - 8 - #include <linux/mm.h> 9 - #include <linux/sysctl.h> 10 - #include <linux/if_ether.h> 11 - 12 - ctl_table ether_table[] = { 13 - {0} 14 - };
+2 -2
net/ipv4/netfilter/Kconfig
··· 170 170 Documentation/modules.txt. If unsure, say `N'. 171 171 172 172 config IP_NF_H323 173 - tristate 'H.323 protocol support' 174 - depends on IP_NF_CONNTRACK 173 + tristate 'H.323 protocol support (EXPERIMENTAL)' 174 + depends on IP_NF_CONNTRACK && EXPERIMENTAL 175 175 help 176 176 H.323 is a VoIP signalling protocol from ITU-T. As one of the most 177 177 important VoIP protocols, it is widely used by voice hardware and
+1
net/ipv4/netfilter/ip_conntrack_core.c
··· 1318 1318 .tuple.dst.u.tcp.port; 1319 1319 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] 1320 1320 .tuple.dst.ip; 1321 + memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); 1321 1322 1322 1323 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", 1323 1324 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+2 -2
net/ipv4/netfilter/ip_conntrack_helper_pptp.c
··· 469 469 DEBUGP("%s but no session\n", pptp_msg_name[msg]); 470 470 break; 471 471 } 472 - if (info->sstate != PPTP_CALL_IN_REP 473 - && info->sstate != PPTP_CALL_IN_CONF) { 472 + if (info->cstate != PPTP_CALL_IN_REP 473 + && info->cstate != PPTP_CALL_IN_CONF) { 474 474 DEBUGP("%s but never sent IN_CALL_REPLY\n", 475 475 pptp_msg_name[msg]); 476 476 break;
+1
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
··· 348 348 .tuple.dst.u.tcp.port; 349 349 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] 350 350 .tuple.dst.u3.ip; 351 + memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); 351 352 352 353 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", 353 354 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+2 -1
net/ipv4/tcp_highspeed.c
··· 135 135 136 136 /* Do additive increase */ 137 137 if (tp->snd_cwnd < tp->snd_cwnd_clamp) { 138 - tp->snd_cwnd_cnt += ca->ai; 138 + /* cwnd = cwnd + a(w) / cwnd */ 139 + tp->snd_cwnd_cnt += ca->ai + 1; 139 140 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 140 141 tp->snd_cwnd_cnt -= tp->snd_cwnd; 141 142 tp->snd_cwnd++;
+5 -7
net/ipv4/tcp_output.c
··· 642 642 * eventually). The difference is that pulled data not copied, but 643 643 * immediately discarded. 644 644 */ 645 - static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len) 645 + static void __pskb_trim_head(struct sk_buff *skb, int len) 646 646 { 647 647 int i, k, eat; 648 648 ··· 667 667 skb->tail = skb->data; 668 668 skb->data_len -= len; 669 669 skb->len = skb->data_len; 670 - return skb->tail; 671 670 } 672 671 673 672 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) ··· 675 676 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 676 677 return -ENOMEM; 677 678 678 - if (len <= skb_headlen(skb)) { 679 + /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 680 + if (unlikely(len < skb_headlen(skb))) 679 681 __skb_pull(skb, len); 680 - } else { 681 - if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL) 682 - return -ENOMEM; 683 - } 682 + else 683 + __pskb_trim_head(skb, len - skb_headlen(skb)); 684 684 685 685 TCP_SKB_CB(skb)->seq += len; 686 686 skb->ip_summed = CHECKSUM_HW;
+11 -5
net/ipv6/route.c
··· 280 280 { 281 281 struct neighbour *neigh = rt->rt6i_nexthop; 282 282 int m = 0; 283 - if (neigh) { 283 + if (rt->rt6i_flags & RTF_NONEXTHOP || 284 + !(rt->rt6i_flags & RTF_GATEWAY)) 285 + m = 1; 286 + else if (neigh) { 284 287 read_lock_bh(&neigh->lock); 285 288 if (neigh->nud_state & NUD_VALID) 286 - m = 1; 289 + m = 2; 287 290 read_unlock_bh(&neigh->lock); 288 291 } 289 292 return m; ··· 295 292 static int rt6_score_route(struct rt6_info *rt, int oif, 296 293 int strict) 297 294 { 298 - int m = rt6_check_dev(rt, oif); 295 + int m, n; 296 + 297 + m = rt6_check_dev(rt, oif); 299 298 if (!m && (strict & RT6_SELECT_F_IFACE)) 300 299 return -1; 301 300 #ifdef CONFIG_IPV6_ROUTER_PREF 302 301 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; 303 302 #endif 304 - if (rt6_check_neigh(rt)) 303 + n = rt6_check_neigh(rt); 304 + if (n > 1) 305 305 m |= 16; 306 - else if (strict & RT6_SELECT_F_REACHABLE) 306 + else if (!n && strict & RT6_SELECT_F_REACHABLE) 307 307 return -1; 308 308 return m; 309 309 }
+2 -1
net/irda/irlap.c
··· 884 884 if (now) { 885 885 /* Send down empty frame to trigger speed change */ 886 886 skb = dev_alloc_skb(0); 887 - irlap_queue_xmit(self, skb); 887 + if (skb) 888 + irlap_queue_xmit(self, skb); 888 889 } 889 890 } 890 891
-8
net/sysctl_net.c
··· 37 37 .mode = 0555, 38 38 .child = core_table, 39 39 }, 40 - #ifdef CONFIG_NET 41 - { 42 - .ctl_name = NET_ETHER, 43 - .procname = "ethernet", 44 - .mode = 0555, 45 - .child = ether_table, 46 - }, 47 - #endif 48 40 #ifdef CONFIG_INET 49 41 { 50 42 .ctl_name = NET_IPV4,
+4 -2
security/selinux/hooks.c
··· 4422 4422 4423 4423 /* Set up any superblocks initialized prior to the policy load. */ 4424 4424 printk(KERN_INFO "SELinux: Setting up existing superblocks.\n"); 4425 + spin_lock(&sb_lock); 4425 4426 spin_lock(&sb_security_lock); 4426 4427 next_sb: 4427 4428 if (!list_empty(&superblock_security_head)) { ··· 4431 4430 struct superblock_security_struct, 4432 4431 list); 4433 4432 struct super_block *sb = sbsec->sb; 4434 - spin_lock(&sb_lock); 4435 4433 sb->s_count++; 4436 - spin_unlock(&sb_lock); 4437 4434 spin_unlock(&sb_security_lock); 4435 + spin_unlock(&sb_lock); 4438 4436 down_read(&sb->s_umount); 4439 4437 if (sb->s_root) 4440 4438 superblock_doinit(sb, NULL); 4441 4439 drop_super(sb); 4440 + spin_lock(&sb_lock); 4442 4441 spin_lock(&sb_security_lock); 4443 4442 list_del_init(&sbsec->list); 4444 4443 goto next_sb; 4445 4444 } 4446 4445 spin_unlock(&sb_security_lock); 4446 + spin_unlock(&sb_lock); 4447 4447 } 4448 4448 4449 4449 /* SELinux requires early initialization in order to label