Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' into upstream

+1323 -794
+5 -4
Documentation/serial/driver
··· 214 214 The interaction of the iflag bits is as follows (parity error 215 215 given as an example): 216 216 Parity error INPCK IGNPAR 217 - None n/a n/a character received 218 - Yes n/a 0 character discarded 219 - Yes 0 1 character received, marked as 217 + n/a 0 n/a character received, marked as 220 218 TTY_NORMAL 221 - Yes 1 1 character received, marked as 219 + None 1 n/a character received, marked as 220 + TTY_NORMAL 221 + Yes 1 0 character received, marked as 222 222 TTY_PARITY 223 + Yes 1 1 character discarded 223 224 224 225 Other flags may be used (eg, xon/xoff characters) if your 225 226 hardware supports hardware "soft" flow control.
+17
MAINTAINERS
··· 568 568 W: http://www.penguinppc.org/ppc64/ 569 569 S: Supported 570 570 571 + BROADCOM BNX2 GIGABIT ETHERNET DRIVER 572 + P: Michael Chan 573 + M: mchan@broadcom.com 574 + L: netdev@vger.kernel.org 575 + S: Supported 576 + 577 + BROADCOM TG3 GIGABIT ETHERNET DRIVER 578 + P: Michael Chan 579 + M: mchan@broadcom.com 580 + L: netdev@vger.kernel.org 581 + S: Supported 582 + 571 583 BTTV VIDEO4LINUX DRIVER 572 584 P: Mauro Carvalho Chehab 573 585 M: mchehab@infradead.org ··· 1893 1881 M: rgooch@atnf.csiro.au 1894 1882 L: linux-kernel@vger.kernel.org 1895 1883 W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html 1884 + S: Maintained 1885 + 1886 + MULTIMEDIA CARD SUBSYSTEM 1887 + P: Russell King 1888 + M: rmk+mmc@arm.linux.org.uk 1896 1889 S: Maintained 1897 1890 1898 1891 MULTISOUND SOUND DRIVER
+2 -2
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 17 4 - EXTRAVERSION =-rc5 5 - NAME=Lordi Rules 4 + EXTRAVERSION =-rc6 5 + NAME=Crazed Snow-Weasel 6 6 7 7 # *DOCUMENTATION* 8 8 # To see a list of typical targets execute "make help"
-1
arch/alpha/kernel/alpha_ksyms.c
··· 182 182 EXPORT_SYMBOL(smp_call_function); 183 183 EXPORT_SYMBOL(smp_call_function_on_cpu); 184 184 EXPORT_SYMBOL(_atomic_dec_and_lock); 185 - EXPORT_SYMBOL(cpu_present_mask); 186 185 #endif /* CONFIG_SMP */ 187 186 188 187 /*
+3 -3
arch/alpha/kernel/process.c
··· 94 94 if (cpuid != boot_cpuid) { 95 95 flags |= 0x00040000UL; /* "remain halted" */ 96 96 *pflags = flags; 97 - clear_bit(cpuid, &cpu_present_mask); 97 + cpu_clear(cpuid, cpu_present_map); 98 98 halt(); 99 99 } 100 100 #endif ··· 120 120 121 121 #ifdef CONFIG_SMP 122 122 /* Wait for the secondaries to halt. */ 123 - cpu_clear(boot_cpuid, cpu_possible_map); 124 - while (cpus_weight(cpu_possible_map)) 123 + cpu_clear(boot_cpuid, cpu_present_map); 124 + while (cpus_weight(cpu_present_map)) 125 125 barrier(); 126 126 #endif 127 127
+4 -10
arch/alpha/kernel/smp.c
··· 68 68 static int smp_secondary_alive __initdata = 0; 69 69 70 70 /* Which cpus ids came online. */ 71 - cpumask_t cpu_present_mask; 72 71 cpumask_t cpu_online_map; 73 72 74 73 EXPORT_SYMBOL(cpu_online_map); ··· 438 439 if ((cpu->flags & 0x1cc) == 0x1cc) { 439 440 smp_num_probed++; 440 441 /* Assume here that "whami" == index */ 441 - cpu_set(i, cpu_present_mask); 442 + cpu_set(i, cpu_present_map); 442 443 cpu->pal_revision = boot_cpu_palrev; 443 444 } 444 445 ··· 449 450 } 450 451 } else { 451 452 smp_num_probed = 1; 452 - cpu_set(boot_cpuid, cpu_present_mask); 453 453 } 454 454 455 - printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", 456 - smp_num_probed, cpu_possible_map.bits[0]); 455 + printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n", 456 + smp_num_probed, cpu_present_map.bits[0]); 457 457 } 458 458 459 459 /* ··· 471 473 472 474 /* Nothing to do on a UP box, or when told not to. */ 473 475 if (smp_num_probed == 1 || max_cpus == 0) { 474 - cpu_present_mask = cpumask_of_cpu(boot_cpuid); 476 + cpu_present_map = cpumask_of_cpu(boot_cpuid); 475 477 printk(KERN_INFO "SMP mode deactivated.\n"); 476 478 return; 477 479 } ··· 484 486 void __devinit 485 487 smp_prepare_boot_cpu(void) 486 488 { 487 - /* 488 - * Mark the boot cpu (current cpu) as online 489 - */ 490 - cpu_set(smp_processor_id(), cpu_online_map); 491 489 } 492 490 493 491 int __devinit
+1 -1
arch/alpha/kernel/sys_titan.c
··· 66 66 register int bcpu = boot_cpuid; 67 67 68 68 #ifdef CONFIG_SMP 69 - cpumask_t cpm = cpu_present_mask; 69 + cpumask_t cpm = cpu_present_map; 70 70 volatile unsigned long *dim0, *dim1, *dim2, *dim3; 71 71 unsigned long mask0, mask1, mask2, mask3, dummy; 72 72
+1 -1
arch/arm/Kconfig.debug
··· 101 101 help 102 102 Choice for UART for kernel low-level using S3C2410 UARTS, 103 103 should be between zero and two. The port must have been 104 - initalised by the boot-loader before use. 104 + initialised by the boot-loader before use. 105 105 106 106 The uncompressor code port configuration is now handled 107 107 by CONFIG_S3C2410_LOWLEVEL_UART_PORT.
+15 -3
arch/arm/mach-ixp23xx/core.c
··· 178 178 179 179 static void ixp23xx_irq_mask(unsigned int irq) 180 180 { 181 - volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 181 + volatile unsigned long *intr_reg; 182 182 183 + if (irq >= 56) 184 + irq += 8; 185 + 186 + intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 183 187 *intr_reg &= ~(1 << (irq % 32)); 184 188 } 185 189 ··· 203 199 */ 204 200 static void ixp23xx_irq_level_unmask(unsigned int irq) 205 201 { 206 - volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 202 + volatile unsigned long *intr_reg; 207 203 208 204 ixp23xx_irq_ack(irq); 209 205 206 + if (irq >= 56) 207 + irq += 8; 208 + 209 + intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 210 210 *intr_reg |= (1 << (irq % 32)); 211 211 } 212 212 213 213 static void ixp23xx_irq_edge_unmask(unsigned int irq) 214 214 { 215 - volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 215 + volatile unsigned long *intr_reg; 216 216 217 + if (irq >= 56) 218 + irq += 8; 219 + 220 + intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 217 221 *intr_reg |= (1 << (irq % 32)); 218 222 } 219 223
+1 -1
arch/arm/mach-ixp4xx/Kconfig
··· 141 141 2) If > 64MB of memory space is required, the IXP4xx can be 142 142 configured to use indirect registers to access PCI This allows 143 143 for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus. 144 - The disadvantadge of this is that every PCI access requires 144 + The disadvantage of this is that every PCI access requires 145 145 three local register accesses plus a spinlock, but in some 146 146 cases the performance hit is acceptable. In addition, you cannot 147 147 mmap() PCI devices in this case due to the indirect nature
+1
arch/arm/mach-pxa/mainstone.c
··· 493 493 MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)") 494 494 /* Maintainer: MontaVista Software Inc. */ 495 495 .phys_io = 0x40000000, 496 + .boot_params = 0xa0000100, /* BLOB boot parameter setting */ 496 497 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 497 498 .map_io = mainstone_map_io, 498 499 .init_irq = mainstone_init_irq,
+1 -1
arch/arm/mach-s3c2410/Kconfig
··· 170 170 depends on ARCH_S3C2410 && PM 171 171 help 172 172 Say Y here if you want verbose debugging from the PM Suspend and 173 - Resume code. See `Documentation/arm/Samsing-S3C24XX/Suspend.txt` 173 + Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt> 174 174 for more information. 175 175 176 176 config S3C2410_PM_CHECK
+2 -2
arch/arm/mm/mm-armv.c
··· 376 376 ecc_mask = 0; 377 377 } 378 378 379 - if (cpu_arch <= CPU_ARCH_ARMv5TEJ) { 379 + if (cpu_arch <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) { 380 380 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 381 381 if (mem_types[i].prot_l1) 382 382 mem_types[i].prot_l1 |= PMD_BIT4; ··· 631 631 pgd = init_mm.pgd; 632 632 633 633 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 634 - if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ) 634 + if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 635 635 base_pmdval |= PMD_BIT4; 636 636 637 637 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
+2 -1
arch/arm/mm/proc-xsc3.S
··· 427 427 #endif 428 428 mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg 429 429 mrc p15, 0, r0, c1, c0, 0 @ get control register 430 - bic r0, r0, #0x0200 @ .... ..R. .... .... 431 430 bic r0, r0, #0x0002 @ .... .... .... ..A. 432 431 orr r0, r0, #0x0005 @ .... .... .... .C.M 433 432 #if BTB_ENABLE 433 + bic r0, r0, #0x0200 @ .... ..R. .... .... 434 434 orr r0, r0, #0x3900 @ ..VI Z..S .... .... 435 435 #else 436 + bic r0, r0, #0x0a00 @ .... Z.R. .... .... 436 437 orr r0, r0, #0x3100 @ ..VI ...S .... .... 437 438 #endif 438 439 #if L2_CACHE_ENABLE
-8
arch/i386/kernel/acpi/boot.c
··· 1066 1066 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), 1067 1067 }, 1068 1068 }, 1069 - { 1070 - .callback = disable_acpi_pci, 1071 - .ident = "HP xw9300", 1072 - .matches = { 1073 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1074 - DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"), 1075 - }, 1076 - }, 1077 1069 {} 1078 1070 }; 1079 1071
+10 -6
arch/i386/mach-generic/probe.c
··· 93 93 int i; 94 94 for (i = 0; apic_probe[i]; ++i) { 95 95 if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) { 96 - genapic = apic_probe[i]; 97 - printk(KERN_INFO "Switched to APIC driver `%s'.\n", 98 - genapic->name); 96 + if (!cmdline_apic) { 97 + genapic = apic_probe[i]; 98 + printk(KERN_INFO "Switched to APIC driver `%s'.\n", 99 + genapic->name); 100 + } 99 101 return 1; 100 102 } 101 103 } ··· 109 107 int i; 110 108 for (i = 0; apic_probe[i]; ++i) { 111 109 if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { 112 - genapic = apic_probe[i]; 113 - printk(KERN_INFO "Switched to APIC driver `%s'.\n", 114 - genapic->name); 110 + if (!cmdline_apic) { 111 + genapic = apic_probe[i]; 112 + printk(KERN_INFO "Switched to APIC driver `%s'.\n", 113 + genapic->name); 114 + } 115 115 return 1; 116 116 } 117 117 }
+47 -49
arch/mips/Kconfig
··· 13 13 default SGI_IP22 14 14 15 15 config MIPS_MTX1 16 - bool "Support for 4G Systems MTX-1 board" 16 + bool "4G Systems MTX-1 board" 17 17 select DMA_NONCOHERENT 18 18 select HW_HAS_PCI 19 19 select SOC_AU1500 ··· 120 120 select SYS_SUPPORTS_LITTLE_ENDIAN 121 121 122 122 config MIPS_COBALT 123 - bool "Support for Cobalt Server" 123 + bool "Cobalt Server" 124 124 select DMA_NONCOHERENT 125 125 select HW_HAS_PCI 126 126 select I8259 ··· 132 132 select SYS_SUPPORTS_LITTLE_ENDIAN 133 133 134 134 config MACH_DECSTATION 135 - bool "Support for DECstations" 135 + bool "DECstations" 136 136 select BOOT_ELF32 137 137 select DMA_NONCOHERENT 138 138 select EARLY_PRINTK ··· 158 158 otherwise choose R3000. 159 159 160 160 config MIPS_EV64120 161 - bool "Support for Galileo EV64120 Evaluation board (EXPERIMENTAL)" 161 + bool "Galileo EV64120 Evaluation board (EXPERIMENTAL)" 162 162 depends on EXPERIMENTAL 163 163 select DMA_NONCOHERENT 164 164 select HW_HAS_PCI ··· 175 175 kernel for this platform. 176 176 177 177 config MIPS_EV96100 178 - bool "Support for Galileo EV96100 Evaluation board (EXPERIMENTAL)" 178 + bool "Galileo EV96100 Evaluation board (EXPERIMENTAL)" 179 179 depends on EXPERIMENTAL 180 180 select DMA_NONCOHERENT 181 181 select HW_HAS_PCI ··· 195 195 here if you wish to build a kernel for this platform. 196 196 197 197 config MIPS_IVR 198 - bool "Support for Globespan IVR board" 198 + bool "Globespan IVR board" 199 199 select DMA_NONCOHERENT 200 200 select HW_HAS_PCI 201 201 select ITE_BOARD_GEN ··· 211 211 build a kernel for this platform. 212 212 213 213 config MIPS_ITE8172 214 - bool "Support for ITE 8172G board" 214 + bool "ITE 8172G board" 215 215 select DMA_NONCOHERENT 216 216 select HW_HAS_PCI 217 217 select ITE_BOARD_GEN ··· 228 228 a kernel for this platform. 229 229 230 230 config MACH_JAZZ 231 - bool "Support for the Jazz family of machines" 231 + bool "Jazz family of machines" 232 232 select ARC 233 233 select ARC32 234 234 select ARCH_MAY_HAVE_PC_FDC ··· 246 246 Olivetti M700-10 workstations. 247 247 248 248 config LASAT 249 - bool "Support for LASAT Networks platforms" 249 + bool "LASAT Networks platforms" 250 250 select DMA_NONCOHERENT 251 251 select HW_HAS_PCI 252 252 select MIPS_GT64120 ··· 258 258 select SYS_SUPPORTS_LITTLE_ENDIAN 259 259 260 260 config MIPS_ATLAS 261 - bool "Support for MIPS Atlas board" 261 + bool "MIPS Atlas board" 262 262 select BOOT_ELF32 263 263 select DMA_NONCOHERENT 264 264 select IRQ_CPU ··· 283 283 board. 284 284 285 285 config MIPS_MALTA 286 - bool "Support for MIPS Malta board" 286 + bool "MIPS Malta board" 287 287 select ARCH_MAY_HAVE_PC_FDC 288 288 select BOOT_ELF32 289 289 select HAVE_STD_PC_SERIAL_PORT ··· 311 311 board. 312 312 313 313 config MIPS_SEAD 314 - bool "Support for MIPS SEAD board (EXPERIMENTAL)" 314 + bool "MIPS SEAD board (EXPERIMENTAL)" 315 315 depends on EXPERIMENTAL 316 316 select IRQ_CPU 317 317 select DMA_NONCOHERENT ··· 328 328 board. 329 329 330 330 config MIPS_SIM 331 - bool 'Support for MIPS simulator (MIPSsim)' 331 + bool 'MIPS simulator (MIPSsim)' 332 332 select DMA_NONCOHERENT 333 333 select IRQ_CPU 334 334 select SYS_HAS_CPU_MIPS32_R1 ··· 341 341 emulator. 342 342 343 343 config MOMENCO_JAGUAR_ATX 344 - bool "Support for Momentum Jaguar board" 344 + bool "Momentum Jaguar board" 345 345 select BOOT_ELF32 346 346 select DMA_NONCOHERENT 347 347 select HW_HAS_PCI ··· 361 361 Momentum Computer <http://www.momenco.com/>. 362 362 363 363 config MOMENCO_OCELOT 364 - bool "Support for Momentum Ocelot board" 364 + bool "Momentum Ocelot board" 365 365 select DMA_NONCOHERENT 366 366 select HW_HAS_PCI 367 367 select IRQ_CPU ··· 378 378 Momentum Computer <http://www.momenco.com/>. 379 379 380 380 config MOMENCO_OCELOT_3 381 - bool "Support for Momentum Ocelot-3 board" 381 + bool "Momentum Ocelot-3 board" 382 382 select BOOT_ELF32 383 383 select DMA_NONCOHERENT 384 384 select HW_HAS_PCI ··· 397 397 PMC-Sierra Rm79000 core. 398 398 399 399 config MOMENCO_OCELOT_C 400 - bool "Support for Momentum Ocelot-C board" 400 + bool "Momentum Ocelot-C board" 401 401 select DMA_NONCOHERENT 402 402 select HW_HAS_PCI 403 403 select IRQ_CPU ··· 414 414 Momentum Computer <http://www.momenco.com/>. 415 415 416 416 config MOMENCO_OCELOT_G 417 - bool "Support for Momentum Ocelot-G board" 417 + bool "Momentum Ocelot-G board" 418 418 select DMA_NONCOHERENT 419 419 select HW_HAS_PCI 420 420 select IRQ_CPU ··· 431 431 Momentum Computer <http://www.momenco.com/>. 432 432 433 433 config MIPS_XXS1500 434 - bool "Support for MyCable XXS1500 board" 434 + bool "MyCable XXS1500 board" 435 435 select DMA_NONCOHERENT 436 436 select SOC_AU1500 437 437 select SYS_SUPPORTS_LITTLE_ENDIAN 438 438 439 439 config PNX8550_V2PCI 440 - bool "Support for Philips PNX8550 based Viper2-PCI board" 440 + bool "Philips PNX8550 based Viper2-PCI board" 441 441 select PNX8550 442 442 select SYS_SUPPORTS_LITTLE_ENDIAN 443 443 444 444 config PNX8550_JBS 445 - bool "Support for Philips PNX8550 based JBS board" 445 + bool "Philips PNX8550 based JBS board" 446 446 select PNX8550 447 447 select SYS_SUPPORTS_LITTLE_ENDIAN 448 448 449 449 config DDB5074 450 - bool "Support for NEC DDB Vrc-5074 (EXPERIMENTAL)" 450 + bool "NEC DDB Vrc-5074 (EXPERIMENTAL)" 451 451 depends on EXPERIMENTAL 452 452 select DDB5XXX_COMMON 453 453 select DMA_NONCOHERENT ··· 465 465 evaluation board. 466 466 467 467 config DDB5476 468 - bool "Support for NEC DDB Vrc-5476" 468 + bool "NEC DDB Vrc-5476" 469 469 select DDB5XXX_COMMON 470 470 select DMA_NONCOHERENT 471 471 select HAVE_STD_PC_SERIAL_PORT ··· 486 486 IDE controller, PS2 keyboard, PS2 mouse, etc. 487 487 488 488 config DDB5477 489 - bool "Support for NEC DDB Vrc-5477" 489 + bool "NEC DDB Vrc-5477" 490 490 select DDB5XXX_COMMON 491 491 select DMA_NONCOHERENT 492 492 select HW_HAS_PCI ··· 504 504 ether port USB, AC97, PCI, etc. 505 505 506 506 config MACH_VR41XX 507 - bool "Support for NEC VR4100 series based machines" 507 + bool "NEC VR41XX-based machines" 508 508 select SYS_HAS_CPU_VR41XX 509 509 select SYS_SUPPORTS_32BIT_KERNEL 510 510 select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL 511 511 512 512 config PMC_YOSEMITE 513 - bool "Support for PMC-Sierra Yosemite eval board" 513 + bool "PMC-Sierra Yosemite eval board" 514 514 select DMA_COHERENT 515 515 select HW_HAS_PCI 516 516 select IRQ_CPU ··· 527 527 manufactured by PMC-Sierra. 528 528 529 529 config QEMU 530 - bool "Support for Qemu" 530 + bool "Qemu" 531 531 select DMA_COHERENT 532 532 select GENERIC_ISA_DMA 533 533 select HAVE_STD_PC_SERIAL_PORT ··· 547 547 can be found at http://www.linux-mips.org/wiki/Qemu. 548 548 549 549 config SGI_IP22 550 - bool "Support for SGI IP22 (Indy/Indigo2)" 550 + bool "SGI IP22 (Indy/Indigo2)" 551 551 select ARC 552 552 select ARC32 553 553 select BOOT_ELF32 ··· 567 567 that runs on these, say Y here. 568 568 569 569 config SGI_IP27 570 - bool "Support for SGI IP27 (Origin200/2000)" 570 + bool "SGI IP27 (Origin200/2000)" 571 571 select ARC 572 572 select ARC64 573 573 select BOOT_ELF64 ··· 583 583 here. 584 584 585 585 config SGI_IP32 586 - bool "Support for SGI IP32 (O2) (EXPERIMENTAL)" 586 + bool "SGI IP32 (O2) (EXPERIMENTAL)" 587 587 depends on EXPERIMENTAL 588 588 select ARC 589 589 select ARC32 ··· 604 604 If you want this kernel to run on SGI O2 workstation, say Y here. 605 605 606 606 config SIBYTE_BIGSUR 607 - bool "Support for Sibyte BCM91480B-BigSur" 607 + bool "Sibyte BCM91480B-BigSur" 608 608 select BOOT_ELF32 609 609 select DMA_COHERENT 610 610 select PCI_DOMAINS ··· 615 615 select SYS_SUPPORTS_LITTLE_ENDIAN 616 616 617 617 config SIBYTE_SWARM 618 - bool "Support for Sibyte BCM91250A-SWARM" 618 + bool "Sibyte BCM91250A-SWARM" 619 619 select BOOT_ELF32 620 620 select DMA_COHERENT 621 621 select SIBYTE_SB1250 ··· 626 626 select SYS_SUPPORTS_LITTLE_ENDIAN 627 627 628 628 config SIBYTE_SENTOSA 629 - bool "Support for Sibyte BCM91250E-Sentosa" 629 + bool "Sibyte BCM91250E-Sentosa" 630 630 depends on EXPERIMENTAL 631 631 select BOOT_ELF32 632 632 select DMA_COHERENT ··· 637 637 select SYS_SUPPORTS_LITTLE_ENDIAN 638 638 639 639 config SIBYTE_RHONE 640 - bool "Support for Sibyte BCM91125E-Rhone" 640 + bool "Sibyte BCM91125E-Rhone" 641 641 depends on EXPERIMENTAL 642 642 select BOOT_ELF32 643 643 select DMA_COHERENT ··· 648 648 select SYS_SUPPORTS_LITTLE_ENDIAN 649 649 650 650 config SIBYTE_CARMEL 651 - bool "Support for Sibyte BCM91120x-Carmel" 651 + bool "Sibyte BCM91120x-Carmel" 652 652 depends on EXPERIMENTAL 653 653 select BOOT_ELF32 654 654 select DMA_COHERENT ··· 659 659 select SYS_SUPPORTS_LITTLE_ENDIAN 660 660 661 661 config SIBYTE_PTSWARM 662 - bool "Support for Sibyte BCM91250PT-PTSWARM" 662 + bool "Sibyte BCM91250PT-PTSWARM" 663 663 depends on EXPERIMENTAL 664 664 select BOOT_ELF32 665 665 select DMA_COHERENT ··· 671 671 select SYS_SUPPORTS_LITTLE_ENDIAN 672 672 673 673 config SIBYTE_LITTLESUR 674 - bool "Support for Sibyte BCM91250C2-LittleSur" 674 + bool "Sibyte BCM91250C2-LittleSur" 675 675 depends on EXPERIMENTAL 676 676 select BOOT_ELF32 677 677 select DMA_COHERENT ··· 683 683 select SYS_SUPPORTS_LITTLE_ENDIAN 684 684 685 685 config SIBYTE_CRHINE 686 - bool "Support for Sibyte BCM91120C-CRhine" 686 + bool "Sibyte BCM91120C-CRhine" 687 687 depends on EXPERIMENTAL 688 688 select BOOT_ELF32 689 689 select DMA_COHERENT ··· 694 694 select SYS_SUPPORTS_LITTLE_ENDIAN 695 695 696 696 config SIBYTE_CRHONE 697 - bool "Support for Sibyte BCM91125C-CRhone" 697 + bool "Sibyte BCM91125C-CRhone" 698 698 depends on EXPERIMENTAL 699 699 select BOOT_ELF32 700 700 select DMA_COHERENT ··· 706 706 select SYS_SUPPORTS_LITTLE_ENDIAN 707 707 708 708 config SNI_RM200_PCI 709 - bool "Support for SNI RM200 PCI" 709 + bool "SNI RM200 PCI" 710 710 select ARC 711 711 select ARC32 712 712 select ARCH_MAY_HAVE_PC_FDC ··· 732 732 support this machine type. 733 733 734 734 config TOSHIBA_JMR3927 735 - bool "Support for Toshiba JMR-TX3927 board" 735 + bool "Toshiba JMR-TX3927 board" 736 736 select DMA_NONCOHERENT 737 737 select HW_HAS_PCI 738 738 select MIPS_TX3927 ··· 743 743 select TOSHIBA_BOARDS 744 744 745 745 config TOSHIBA_RBTX4927 746 - bool "Support for Toshiba TBTX49[23]7 board" 746 + bool "Toshiba TBTX49[23]7 board" 747 747 select DMA_NONCOHERENT 748 748 select HAS_TXX9_SERIAL 749 749 select HW_HAS_PCI ··· 760 760 support this machine type 761 761 762 762 config TOSHIBA_RBTX4938 763 - bool "Support for Toshiba RBTX4938 board" 763 + bool "Toshiba RBTX4938 board" 764 764 select HAVE_STD_PC_SERIAL_PORT 765 765 select DMA_NONCOHERENT 766 766 select GENERIC_ISA_DMA ··· 1411 1411 1412 1412 config PAGE_SIZE_16KB 1413 1413 bool "16kB" 1414 - depends on EXPERIMENTAL && !CPU_R3000 && !CPU_TX39XX 1414 + depends on !CPU_R3000 && !CPU_TX39XX 1415 1415 help 1416 1416 Using 16kB page size will result in higher performance kernel at 1417 1417 the price of higher memory consumption. This option is available on 1418 - all non-R3000 family processor. Not that at the time of this 1419 - writing this option is still high experimental; there are also 1420 - issues with compatibility of user applications. 1418 + all non-R3000 family processors. Note that you will need a suitable 1419 + Linux distribution to support this. 1421 1420 1422 1421 config PAGE_SIZE_64KB 1423 1422 bool "64kB" ··· 1425 1426 Using 64kB page size will result in higher performance kernel at 1426 1427 the price of higher memory consumption. This option is available on 1427 1428 all non-R3000 family processor. Not that at the time of this 1428 - writing this option is still high experimental; there are also 1429 - issues with compatibility of user applications. 1429 + writing this option is still high experimental. 1430 1430 1431 1431 endchoice 1432 1432
+1
arch/mips/au1000/common/irq.c
··· 68 68 69 69 extern void set_debug_traps(void); 70 70 extern irq_cpustat_t irq_stat [NR_CPUS]; 71 + extern void mips_timer_interrupt(struct pt_regs *regs); 71 72 72 73 static void setup_local_irq(unsigned int irq, int type, int int_req); 73 74 static unsigned int startup_irq(unsigned int irq);
+9 -15
arch/mips/au1000/common/prom.c
··· 1 1 /* 2 2 * 3 3 * BRIEF MODULE DESCRIPTION 4 - * PROM library initialisation code, assuming a version of 5 - * pmon is the boot code. 4 + * PROM library initialisation code, assuming YAMON is the boot loader. 6 5 * 7 - * Copyright 2000,2001 MontaVista Software Inc. 6 + * Copyright 2000, 2001, 2006 MontaVista Software Inc. 8 7 * Author: MontaVista Software, Inc. 9 8 * ppopov@mvista.com or source@mvista.com 10 9 * ··· 48 49 49 50 typedef struct 50 51 { 51 - char *name; 52 - /* char *val; */ 53 - }t_env_var; 52 + char *name; 53 + char *val; 54 + } t_env_var; 54 55 55 56 56 57 char * prom_getcmdline(void) ··· 84 85 { 85 86 /* 86 87 * Return a pointer to the given environment variable. 87 - * Environment variables are stored in the form of "memsize=64". 88 88 */ 89 89 90 90 t_env_var *env = (t_env_var *)prom_envp; 91 - int i; 92 91 93 - i = strlen(envname); 94 - 95 - while(env->name) { 96 - if(strncmp(envname, env->name, i) == 0) { 97 - return(env->name + strlen(envname) + 1); 98 - } 92 + while (env->name) { 93 + if (strcmp(envname, env->name) == 0) 94 + return env->val; 99 95 env++; 100 96 } 101 - return(NULL); 97 + return NULL; 102 98 } 103 99 104 100 inline unsigned char str2hexnum(unsigned char c)
+5
arch/mips/au1000/common/sleeper.S
··· 112 112 mtc0 k0, CP0_PAGEMASK 113 113 lw k0, 0x14(sp) 114 114 mtc0 k0, CP0_CONFIG 115 + 116 + /* We need to catch the ealry Alchemy SOCs with 117 + * the write-only Config[OD] bit and set it back to one... 118 + */ 119 + jal au1x00_fixup_config_od 115 120 lw $1, PT_R1(sp) 116 121 lw $2, PT_R2(sp) 117 122 lw $3, PT_R3(sp)
+1
arch/mips/au1000/common/time.c
··· 116 116 117 117 null: 118 118 ack_r4ktimer(0); 119 + irq_exit(); 119 120 } 120 121 121 122 #ifdef CONFIG_PM
+1 -1
arch/mips/ddb5xxx/ddb5476/dbg_io.c
··· 86 86 /* disable interrupts */ 87 87 UART16550_WRITE(OFS_INTR_ENABLE, 0); 88 88 89 - /* set up buad rate */ 89 + /* set up baud rate */ 90 90 { 91 91 uint32 divisor; 92 92
+1 -1
arch/mips/ddb5xxx/ddb5477/kgdb_io.c
··· 86 86 /* disable interrupts */ 87 87 UART16550_WRITE(OFS_INTR_ENABLE, 0); 88 88 89 - /* set up buad rate */ 89 + /* set up baud rate */ 90 90 { 91 91 uint32 divisor; 92 92
+1 -1
arch/mips/gt64120/ev64120/serialGT.c
··· 149 149 #else 150 150 /* 151 151 * Note: Set baud rate, hardcoded here for rate of 115200 152 - * since became unsure of above "buad rate" algorithm (??). 152 + * since became unsure of above "baud rate" algorithm (??). 153 153 */ 154 154 outreg(channel, LCR, 0x83); 155 155 outreg(channel, DLM, 0x00); // See note above
+1 -1
arch/mips/gt64120/momenco_ocelot/dbg_io.c
··· 73 73 /* disable interrupts */ 74 74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 75 75 76 - /* set up buad rate */ 76 + /* set up baud rate */ 77 77 { 78 78 uint32 divisor; 79 79
+1 -1
arch/mips/ite-boards/generic/dbg_io.c
··· 72 72 /* disable interrupts */ 73 73 UART16550_WRITE(OFS_INTR_ENABLE, 0); 74 74 75 - /* set up buad rate */ 75 + /* set up baud rate */ 76 76 { 77 77 uint32 divisor; 78 78
+2 -2
arch/mips/kernel/asm-offsets.c
··· 272 272 text("/* Linux sigcontext offsets. */"); 273 273 offset("#define SC_REGS ", struct sigcontext, sc_regs); 274 274 offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs); 275 - offset("#define SC_MDHI ", struct sigcontext, sc_hi); 276 - offset("#define SC_MDLO ", struct sigcontext, sc_lo); 275 + offset("#define SC_MDHI ", struct sigcontext, sc_mdhi); 276 + offset("#define SC_MDLO ", struct sigcontext, sc_mdlo); 277 277 offset("#define SC_PC ", struct sigcontext, sc_pc); 278 278 offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); 279 279 linefeed;
+4 -4
arch/mips/kernel/cpu-bugs64.c
··· 206 206 "daddi %0, %1, %3\n\t" 207 207 ".set pop" 208 208 : "=r" (v), "=&r" (tmp) 209 - : "I" (0xffffffffffffdb9a), "I" (0x1234)); 209 + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); 210 210 set_except_vector(12, handler); 211 211 local_irq_restore(flags); 212 212 ··· 224 224 "dsrl %1, %1, 1\n\t" 225 225 "daddi %0, %1, %3" 226 226 : "=r" (v), "=&r" (tmp) 227 - : "I" (0xffffffffffffdb9a), "I" (0x1234)); 227 + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); 228 228 set_except_vector(12, handler); 229 229 local_irq_restore(flags); 230 230 ··· 280 280 "daddu %1, %2\n\t" 281 281 ".set pop" 282 282 : "=&r" (v), "=&r" (w), "=&r" (tmp) 283 - : "I" (0xffffffffffffdb9a), "I" (0x1234)); 283 + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); 284 284 285 285 if (v == w) { 286 286 printk("no.\n"); ··· 296 296 "addiu %1, $0, %4\n\t" 297 297 "daddu %1, %2" 298 298 : "=&r" (v), "=&r" (w), "=&r" (tmp) 299 - : "I" (0xffffffffffffdb9a), "I" (0x1234)); 299 + : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); 300 300 301 301 if (v == w) { 302 302 printk("yes.\n");
+14 -1
arch/mips/kernel/cpu-probe.c
··· 121 121 case CPU_24K: 122 122 case CPU_25KF: 123 123 case CPU_34K: 124 + case CPU_74K: 124 125 case CPU_PR4450: 125 126 cpu_wait = r4k_wait; 126 127 printk(" available.\n"); ··· 433 432 MIPS_CPU_LLSC; 434 433 c->tlbsize = 64; 435 434 break; 435 + case PRID_IMP_R14000: 436 + c->cputype = CPU_R14000; 437 + c->isa_level = MIPS_CPU_ISA_IV; 438 + c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | 439 + MIPS_CPU_FPU | MIPS_CPU_32FPR | 440 + MIPS_CPU_COUNTER | MIPS_CPU_WATCH | 441 + MIPS_CPU_LLSC; 442 + c->tlbsize = 64; 443 + break; 436 444 } 437 445 } 438 446 ··· 603 593 case PRID_IMP_34K: 604 594 c->cputype = CPU_34K; 605 595 break; 596 + case PRID_IMP_74K: 597 + c->cputype = CPU_74K; 598 + break; 606 599 } 607 600 } 608 601 ··· 655 642 case PRID_IMP_SB1: 656 643 c->cputype = CPU_SB1; 657 644 /* FPU in pass1 is known to have issues. */ 658 - if ((c->processor_id & 0xff) < 0x20) 645 + if ((c->processor_id & 0xff) < 0x02) 659 646 c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); 660 647 break; 661 648 case PRID_IMP_SB1A:
+1 -1
arch/mips/kernel/entry.S
··· 101 101 EMT 102 102 1: 103 103 mfc0 v1, CP0_TCSTATUS 104 - /* We set IXMT above, XOR should cler it here */ 104 + /* We set IXMT above, XOR should clear it here */ 105 105 xori v1, v1, TCSTATUS_IXMT 106 106 or v1, v0, v1 107 107 mtc0 v1, CP0_TCSTATUS
+5 -3
arch/mips/kernel/gdb-low.S
··· 54 54 */ 55 55 mfc0 k0, CP0_CAUSE 56 56 andi k0, k0, 0x7c 57 - add k1, k1, k0 58 - PTR_L k0, saved_vectors(k1) 59 - jr k0 57 + #ifdef CONFIG_64BIT 58 + dsll k0, k0, 1 59 + #endif 60 + PTR_L k1, saved_vectors(k0) 61 + jr k1 60 62 nop 61 63 1: 62 64 move k0, sp
+6
arch/mips/kernel/module.c
··· 288 288 sym = (Elf_Sym *)sechdrs[symindex].sh_addr 289 289 + ELF_MIPS_R_SYM(rel[i]); 290 290 if (!sym->st_value) { 291 + /* Ignore unresolved weak symbol */ 292 + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) 293 + continue; 291 294 printk(KERN_WARNING "%s: Unknown symbol %s\n", 292 295 me->name, strtab + sym->st_name); 293 296 return -ENOENT; ··· 328 325 sym = (Elf_Sym *)sechdrs[symindex].sh_addr 329 326 + ELF_MIPS_R_SYM(rel[i]); 330 327 if (!sym->st_value) { 328 + /* Ignore unresolved weak symbol */ 329 + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) 330 + continue; 331 331 printk(KERN_WARNING "%s: Unknown symbol %s\n", 332 332 me->name, strtab + sym->st_name); 333 333 return -ENOENT;
+2
arch/mips/kernel/proc.c
··· 42 42 [CPU_R8000] = "R8000", 43 43 [CPU_R10000] = "R10000", 44 44 [CPU_R12000] = "R12000", 45 + [CPU_R14000] = "R14000", 45 46 [CPU_R4300] = "R4300", 46 47 [CPU_R4650] = "R4650", 47 48 [CPU_R4700] = "R4700", ··· 75 74 [CPU_24K] = "MIPS 24K", 76 75 [CPU_25KF] = "MIPS 25Kf", 77 76 [CPU_34K] = "MIPS 34K", 77 + [CPU_74K] = "MIPS 74K", 78 78 [CPU_VR4111] = "NEC VR4111", 79 79 [CPU_VR4121] = "NEC VR4121", 80 80 [CPU_VR4122] = "NEC VR4122",
+1 -1
arch/mips/kernel/scall64-o32.S
··· 209 209 PTR sys_fork 210 210 PTR sys_read 211 211 PTR sys_write 212 - PTR sys_open /* 4005 */ 212 + PTR compat_sys_open /* 4005 */ 213 213 PTR sys_close 214 214 PTR sys_waitpid 215 215 PTR sys_creat
+10 -8
arch/mips/kernel/setup.c
··· 246 246 #ifdef CONFIG_64BIT 247 247 /* HACK: Guess if the sign extension was forgotten */ 248 248 if (start > 0x0000000080000000 && start < 0x00000000ffffffff) 249 - start |= 0xffffffff00000000; 249 + start |= 0xffffffff00000000UL; 250 250 #endif 251 251 252 252 end = start + size; ··· 355 355 } 356 356 #endif 357 357 358 - memory_present(0, first_usable_pfn, max_low_pfn); 359 - 360 358 /* Initialize the boot-time allocator with low memory only. */ 361 359 bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn); 362 360 ··· 408 410 409 411 /* Register lowmem ranges */ 410 412 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); 413 + memory_present(0, curr_pfn, curr_pfn + size - 1); 411 414 } 412 415 413 416 /* Reserve the bootmap memory. */ ··· 418 419 #ifdef CONFIG_BLK_DEV_INITRD 419 420 initrd_below_start_ok = 1; 420 421 if (initrd_start) { 421 - unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start); 422 + unsigned long initrd_size = ((unsigned char *)initrd_end) - 423 + ((unsigned char *)initrd_start); 424 + const int width = sizeof(long) * 2; 425 + 422 426 printk("Initial ramdisk at: 0x%p (%lu bytes)\n", 423 427 (void *)initrd_start, initrd_size); 424 428 425 429 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) { 426 430 printk("initrd extends beyond end of memory " 427 431 "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n", 428 - sizeof(long) * 2, 429 - (unsigned long long)CPHYSADDR(initrd_end), 430 - sizeof(long) * 2, 431 - (unsigned long long)PFN_PHYS(max_low_pfn)); 432 + width, 433 + (unsigned long long) CPHYSADDR(initrd_end), 434 + width, 435 + (unsigned long long) PFN_PHYS(max_low_pfn)); 432 436 initrd_start = initrd_end = 0; 433 437 initrd_reserve_bootmem = 0; 434 438 }
-30
arch/mips/kernel/signal-common.h
··· 31 31 save_gp_reg(31); 32 32 #undef save_gp_reg 33 33 34 - #ifdef CONFIG_32BIT 35 34 err |= __put_user(regs->hi, &sc->sc_mdhi); 36 35 err |= __put_user(regs->lo, &sc->sc_mdlo); 37 36 if (cpu_has_dsp) { ··· 42 43 err |= __put_user(mflo3(), &sc->sc_lo3); 43 44 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 44 45 } 45 - #endif 46 - #ifdef CONFIG_64BIT 47 - err |= __put_user(regs->hi, &sc->sc_hi[0]); 48 - err |= __put_user(regs->lo, &sc->sc_lo[0]); 49 - if (cpu_has_dsp) { 50 - err |= __put_user(mfhi1(), &sc->sc_hi[1]); 51 - err |= __put_user(mflo1(), &sc->sc_lo[1]); 52 - err |= __put_user(mfhi2(), &sc->sc_hi[2]); 53 - err |= __put_user(mflo2(), &sc->sc_lo[2]); 54 - err |= __put_user(mfhi3(), &sc->sc_hi[3]); 55 - err |= __put_user(mflo3(), &sc->sc_lo[3]); 56 - err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 57 - } 58 - #endif 59 46 60 47 err |= __put_user(!!used_math(), &sc->sc_used_math); 61 48 ··· 77 92 current_thread_info()->restart_block.fn = do_no_restart_syscall; 78 93 79 94 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 80 - #ifdef CONFIG_32BIT 81 95 err |= __get_user(regs->hi, &sc->sc_mdhi); 82 96 err |= __get_user(regs->lo, &sc->sc_mdlo); 83 97 if (cpu_has_dsp) { ··· 88 104 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 89 105 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 90 106 } 91 - #endif 92 - #ifdef CONFIG_64BIT 93 - err |= __get_user(regs->hi, &sc->sc_hi[0]); 94 - err |= __get_user(regs->lo, &sc->sc_lo[0]); 95 - if (cpu_has_dsp) { 96 - err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg); 97 - err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg); 98 - err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg); 99 - err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg); 100 - err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg); 101 - err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg); 102 - err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 103 - } 104 - #endif 105 107 106 108 #define restore_gp_reg(i) do { \ 107 109 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
+4 -1
arch/mips/kernel/smp.c
··· 247 247 current_thread_info()->cpu = 0; 248 248 smp_tune_scheduling(); 249 249 plat_prepare_cpus(max_cpus); 250 + #ifndef CONFIG_HOTPLUG_CPU 251 + cpu_present_map = cpu_possible_map; 252 + #endif 250 253 } 251 254 252 255 /* preload SMP state for boot cpu */ ··· 445 442 int cpu; 446 443 int ret; 447 444 448 - for_each_cpu(cpu) { 445 + for_each_present_cpu(cpu) { 449 446 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 450 447 if (ret) 451 448 printk(KERN_WARNING "topology_init: register_cpu %d "
+1 -26
arch/mips/kernel/syscall.c
··· 276 276 277 277 asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) 278 278 { 279 - int tmp, len; 280 - char __user *name; 279 + int tmp; 281 280 282 281 switch(cmd) { 283 - case SETNAME: { 284 - char nodename[__NEW_UTS_LEN + 1]; 285 - 286 - if (!capable(CAP_SYS_ADMIN)) 287 - return -EPERM; 288 - 289 - name = (char __user *) arg1; 290 - 291 - len = strncpy_from_user(nodename, name, __NEW_UTS_LEN); 292 - if (len < 0) 293 - return -EFAULT; 294 - 295 - down_write(&uts_sem); 296 - strncpy(system_utsname.nodename, nodename, len); 297 - nodename[__NEW_UTS_LEN] = '\0'; 298 - strlcpy(system_utsname.nodename, nodename, 299 - sizeof(system_utsname.nodename)); 300 - up_write(&uts_sem); 301 - return 0; 302 - } 303 - 304 282 case MIPS_ATOMIC_SET: 305 283 printk(KERN_CRIT "How did I get here?\n"); 306 284 return -EINVAL; ··· 291 313 case FLUSH_CACHE: 292 314 __flush_cache_all(); 293 315 return 0; 294 - 295 - case MIPS_RDNVRAM: 296 - return -EIO; 297 316 } 298 317 299 318 return -EINVAL;
+18 -2
arch/mips/kernel/traps.c
··· 819 819 820 820 asmlinkage void do_mcheck(struct pt_regs *regs) 821 821 { 822 + const int field = 2 * sizeof(unsigned long); 823 + int multi_match = regs->cp0_status & ST0_TS; 824 + 822 825 show_regs(regs); 823 - dump_tlb_all(); 826 + 827 + if (multi_match) { 828 + printk("Index : %0x\n", read_c0_index()); 829 + printk("Pagemask: %0x\n", read_c0_pagemask()); 830 + printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); 831 + printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); 832 + printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); 833 + printk("\n"); 834 + dump_tlb_all(); 835 + } 836 + 837 + show_code((unsigned int *) regs->cp0_epc); 838 + 824 839 /* 825 840 * Some chips may have other causes of machine check (e.g. SB1 826 841 * graduation timer) 827 842 */ 828 843 panic("Caught Machine Check exception - %scaused by multiple " 829 844 "matching entries in the TLB.", 830 - (regs->cp0_status & ST0_TS) ? "" : "not "); 845 + (multi_match) ? "" : "not "); 831 846 } 832 847 833 848 asmlinkage void do_mt(struct pt_regs *regs) ··· 917 902 { 918 903 switch (current_cpu_data.cputype) { 919 904 case CPU_24K: 905 + case CPU_34K: 920 906 case CPU_5KC: 921 907 write_c0_ecc(0x80000000); 922 908 back_to_back_c0_hazard();
+5 -15
arch/mips/kernel/vmlinux.lds.S
··· 151 151 152 152 /* This is the MIPS specific mdebug section. */ 153 153 .mdebug : { *(.mdebug) } 154 - /* These are needed for ELF backends which have not yet been 155 - converted to the new style linker. */ 156 - .stab 0 : { *(.stab) } 157 - .stabstr 0 : { *(.stabstr) } 158 - /* DWARF debug sections. 159 - Symbols in the .debug DWARF section are relative to the beginning of the 160 - section so we begin .debug at 0. It's not clear yet what needs to happen 161 - for the others. */ 162 - .debug 0 : { *(.debug) } 163 - .debug_srcinfo 0 : { *(.debug_srcinfo) } 164 - .debug_aranges 0 : { *(.debug_aranges) } 165 - .debug_pubnames 0 : { *(.debug_pubnames) } 166 - .debug_sfnames 0 : { *(.debug_sfnames) } 167 - .line 0 : { *(.line) } 154 + 155 + STABS_DEBUG 156 + 157 + DWARF_DEBUG 158 + 168 159 /* These must appear regardless of . */ 169 160 .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) } 170 161 .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) } 171 - .comment : { *(.comment) } 172 162 .note : { *(.note) } 173 163 }
+3 -1
arch/mips/math-emu/dp_fint.c
··· 29 29 30 30 ieee754dp ieee754dp_fint(int x) 31 31 { 32 - COMPXDP; 32 + u64 xm; 33 + int xe; 34 + int xs; 33 35 34 36 CLEARCX; 35 37
+3 -1
arch/mips/math-emu/dp_flong.c
··· 29 29 30 30 ieee754dp ieee754dp_flong(s64 x) 31 31 { 32 - COMPXDP; 32 + u64 xm; 33 + int xe; 34 + int xs; 33 35 34 36 CLEARCX; 35 37
+3 -1
arch/mips/math-emu/sp_fint.c
··· 29 29 30 30 ieee754sp ieee754sp_fint(int x) 31 31 { 32 - COMPXSP; 32 + unsigned xm; 33 + int xe; 34 + int xs; 33 35 34 36 CLEARCX; 35 37
+3 -1
arch/mips/math-emu/sp_flong.c
··· 29 29 30 30 ieee754sp ieee754sp_flong(s64 x) 31 31 { 32 - COMPXDP; /* <--- need 64-bit mantissa temp */ 32 + u64 xm; /* <--- need 64-bit mantissa temp */ 33 + int xe; 34 + int xs; 33 35 34 36 CLEARCX; 35 37
+69 -9
arch/mips/mm/c-r4k.c
··· 29 29 #include <asm/war.h> 30 30 #include <asm/cacheflush.h> /* for run_uncached() */ 31 31 32 + 33 + /* 34 + * Special Variant of smp_call_function for use by cache functions: 35 + * 36 + * o No return value 37 + * o collapses to normal function call on UP kernels 38 + * o collapses to normal function call on systems with a single shared 39 + * primary cache. 40 + */ 41 + static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, 42 + int retry, int wait) 43 + { 44 + preempt_disable(); 45 + 46 + #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 47 + smp_call_function(func, info, retry, wait); 48 + #endif 49 + func(info); 50 + preempt_enable(); 51 + } 52 + 32 53 /* 33 54 * Must die. 34 55 */ ··· 320 299 if (!cpu_has_dc_aliases) 321 300 return; 322 301 323 - on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); 302 + r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); 324 303 } 325 304 326 305 static inline void local_r4k___flush_cache_all(void * args) ··· 335 314 case CPU_R4400MC: 336 315 case CPU_R10000: 337 316 case CPU_R12000: 317 + case CPU_R14000: 338 318 r4k_blast_scache(); 339 319 } 340 320 } 341 321 342 322 static void r4k___flush_cache_all(void) 343 323 { 344 - on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 324 + r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 345 325 } 346 326 347 327 static inline void local_r4k_flush_cache_range(void * args) ··· 363 341 static void r4k_flush_cache_range(struct vm_area_struct *vma, 364 342 unsigned long start, unsigned long end) 365 343 { 366 - on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 344 + r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 367 345 } 368 346 369 347 static inline void local_r4k_flush_cache_mm(void * args) ··· 392 370 if (!cpu_has_dc_aliases) 393 371 return; 394 372 395 - on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 373 + r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 396 374 } 397 375 398 376 struct flush_cache_page_args { ··· 483 461 args.addr = addr; 484 462 args.pfn = pfn; 485 463 486 - on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 464 + r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 487 465 } 488 466 489 467 static inline void local_r4k_flush_data_cache_page(void * addr) ··· 493 471 494 472 static void r4k_flush_data_cache_page(unsigned long addr) 495 473 { 496 - on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1); 474 + r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1); 497 475 } 498 476 499 477 struct flush_icache_range_args { ··· 536 514 args.start = start; 537 515 args.end = end; 538 516 539 - on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 517 + r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 540 518 instruction_hazard(); 541 519 } 542 520 ··· 612 590 args.vma = vma; 613 591 args.page = page; 614 592 615 - on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1); 593 + r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1); 616 594 } 617 595 618 596 ··· 711 689 712 690 static void r4k_flush_cache_sigtramp(unsigned long addr) 713 691 { 714 - on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 692 + r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 715 693 } 716 694 717 695 static void r4k_flush_icache_all(void) ··· 834 812 835 813 case CPU_R10000: 836 814 case CPU_R12000: 815 + case CPU_R14000: 837 816 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); 838 817 c->icache.linesz = 64; 839 818 c->icache.ways = 2; ··· 988 965 c->dcache.flags |= MIPS_CACHE_PINDEX; 989 966 case CPU_R10000: 990 967 case CPU_R12000: 968 + case CPU_R14000: 991 969 case CPU_SB1: 992 970 break; 993 971 case CPU_24K: 972 + case CPU_34K: 994 973 if (!(read_c0_config7() & (1 << 16))) 995 974 default: 996 975 if (c->dcache.waysize > PAGE_SIZE) ··· 1116 1091 1117 1092 case CPU_R10000: 1118 1093 case CPU_R12000: 1094 + case CPU_R14000: 1119 1095 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); 1120 1096 c->scache.linesz = 64 << ((config >> 13) & 1); 1121 1097 c->scache.ways = 2; ··· 1161 1135 c->options |= MIPS_CPU_SUBSET_CACHES; 1162 1136 } 1163 1137 1138 + void au1x00_fixup_config_od(void) 1139 + { 1140 + /* 1141 + * c0_config.od (bit 19) was write only (and read as 0) 1142 + * on the early revisions of Alchemy SOCs. It disables the bus 1143 + * transaction overlapping and needs to be set to fix various errata. 1144 + */ 1145 + switch (read_c0_prid()) { 1146 + case 0x00030100: /* Au1000 DA */ 1147 + case 0x00030201: /* Au1000 HA */ 1148 + case 0x00030202: /* Au1000 HB */ 1149 + case 0x01030200: /* Au1500 AB */ 1150 + /* 1151 + * Au1100 errata actually keeps silence about this bit, so we set it 1152 + * just in case for those revisions that require it to be set according 1153 + * to arch/mips/au1000/common/cputable.c 1154 + */ 1155 + case 0x02030200: /* Au1100 AB */ 1156 + case 0x02030201: /* Au1100 BA */ 1157 + case 0x02030202: /* Au1100 BC */ 1158 + set_c0_config(1 << 19); 1159 + break; 1160 + } 1161 + } 1162 + 1164 1163 static inline void coherency_setup(void) 1165 1164 { 1166 1165 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); ··· 1205 1154 case CPU_R4400SC: 1206 1155 case CPU_R4400MC: 1207 1156 clear_c0_config(CONF_CU); 1157 + break; 1158 + /* 1159 + * We need to catch the ealry Alchemy SOCs with 1160 + * the write-only co_config.od bit and set it back to one... 1161 + */ 1162 + case CPU_AU1000: /* rev. DA, HA, HB */ 1163 + case CPU_AU1100: /* rev. AB, BA, BC ?? */ 1164 + case CPU_AU1500: /* rev. AB */ 1165 + au1x00_fixup_config_od(); 1208 1166 break; 1209 1167 } 1210 1168 }
+1 -1
arch/mips/mm/init.c
··· 227 227 for (tmp = 0; tmp < max_low_pfn; tmp++) 228 228 if (page_is_ram(tmp)) { 229 229 ram++; 230 - if (PageReserved(mem_map+tmp)) 230 + if (PageReserved(pfn_to_page(tmp))) 231 231 reservedpages++; 232 232 } 233 233
+1
arch/mips/mm/pg-r4k.c
··· 357 357 358 358 case CPU_R10000: 359 359 case CPU_R12000: 360 + case CPU_R14000: 360 361 pref_src_mode = Pref_LoadStreamed; 361 362 pref_dst_mode = Pref_StoreStreamed; 362 363 break;
+2
arch/mips/mm/tlbex.c
··· 875 875 876 876 case CPU_R10000: 877 877 case CPU_R12000: 878 + case CPU_R14000: 878 879 case CPU_4KC: 879 880 case CPU_SB1: 880 881 case CPU_SB1A: ··· 907 906 case CPU_4KEC: 908 907 case CPU_24K: 909 908 case CPU_34K: 909 + case CPU_74K: 910 910 i_ehb(p); 911 911 tlbw(p); 912 912 break;
+1 -1
arch/mips/momentum/jaguar_atx/dbg_io.c
··· 73 73 /* disable interrupts */ 74 74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 75 75 76 - /* set up buad rate */ 76 + /* set up baud rate */ 77 77 { 78 78 uint32 divisor; 79 79
+1 -1
arch/mips/momentum/ocelot_c/dbg_io.c
··· 73 73 /* disable interrupts */ 74 74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 75 75 76 - /* set up buad rate */ 76 + /* set up baud rate */ 77 77 { 78 78 uint32 divisor; 79 79
+1 -1
arch/mips/momentum/ocelot_g/dbg_io.c
··· 73 73 /* disable interrupts */ 74 74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 75 75 76 - /* set up buad rate */ 76 + /* set up baud rate */ 77 77 { 78 78 uint32 divisor; 79 79
+5 -4
arch/mips/oprofile/common.c
··· 14 14 15 15 #include "op_impl.h" 16 16 17 - extern struct op_mips_model op_model_mipsxx __attribute__((weak)); 18 - extern struct op_mips_model op_model_rm9000 __attribute__((weak)); 17 + extern struct op_mips_model op_model_mipsxx_ops __attribute__((weak)); 18 + extern struct op_mips_model op_model_rm9000_ops __attribute__((weak)); 19 19 20 20 static struct op_mips_model *model; 21 21 ··· 80 80 case CPU_24K: 81 81 case CPU_25KF: 82 82 case CPU_34K: 83 + case CPU_74K: 83 84 case CPU_SB1: 84 85 case CPU_SB1A: 85 - lmodel = &op_model_mipsxx; 86 + lmodel = &op_model_mipsxx_ops; 86 87 break; 87 88 88 89 case CPU_RM9000: 89 - lmodel = &op_model_rm9000; 90 + lmodel = &op_model_rm9000_ops; 90 91 break; 91 92 }; 92 93
+19 -15
arch/mips/oprofile/op_model_mipsxx.c
··· 23 23 24 24 #define M_COUNTER_OVERFLOW (1UL << 31) 25 25 26 - struct op_mips_model op_model_mipsxx; 26 + struct op_mips_model op_model_mipsxx_ops; 27 27 28 28 static struct mipsxx_register_config { 29 29 unsigned int control[4]; ··· 34 34 35 35 static void mipsxx_reg_setup(struct op_counter_config *ctr) 36 36 { 37 - unsigned int counters = op_model_mipsxx.num_counters; 37 + unsigned int counters = op_model_mipsxx_ops.num_counters; 38 38 int i; 39 39 40 40 /* Compute the performance counter control word. */ ··· 62 62 63 63 static void mipsxx_cpu_setup (void *args) 64 64 { 65 - unsigned int counters = op_model_mipsxx.num_counters; 65 + unsigned int counters = op_model_mipsxx_ops.num_counters; 66 66 67 67 switch (counters) { 68 68 case 4: ··· 83 83 /* Start all counters on current CPU */ 84 84 static void mipsxx_cpu_start(void *args) 85 85 { 86 - unsigned int counters = op_model_mipsxx.num_counters; 86 + unsigned int counters = op_model_mipsxx_ops.num_counters; 87 87 88 88 switch (counters) { 89 89 case 4: ··· 100 100 /* Stop all counters on current CPU */ 101 101 static void mipsxx_cpu_stop(void *args) 102 102 { 103 - unsigned int counters = op_model_mipsxx.num_counters; 103 + unsigned int counters = op_model_mipsxx_ops.num_counters; 104 104 105 105 switch (counters) { 106 106 case 4: ··· 116 116 117 117 static int mipsxx_perfcount_handler(struct pt_regs *regs) 118 118 { 119 - unsigned int counters = op_model_mipsxx.num_counters; 119 + unsigned int counters = op_model_mipsxx_ops.num_counters; 120 120 unsigned int control; 121 121 unsigned int counter; 122 122 int handled = 0; ··· 187 187 188 188 reset_counters(counters); 189 189 190 - op_model_mipsxx.num_counters = counters; 190 + op_model_mipsxx_ops.num_counters = counters; 191 191 switch (current_cpu_data.cputype) { 192 192 case CPU_20KC: 193 - op_model_mipsxx.cpu_type = "mips/20K"; 193 + op_model_mipsxx_ops.cpu_type = "mips/20K"; 194 194 break; 195 195 196 196 case CPU_24K: 197 - op_model_mipsxx.cpu_type = "mips/24K"; 197 + op_model_mipsxx_ops.cpu_type = "mips/24K"; 198 198 break; 199 199 200 200 case CPU_25KF: 201 - op_model_mipsxx.cpu_type = "mips/25K"; 201 + op_model_mipsxx_ops.cpu_type = "mips/25K"; 202 202 break; 203 203 204 204 #ifndef CONFIG_SMP 205 205 case CPU_34K: 206 - op_model_mipsxx.cpu_type = "mips/34K"; 206 + op_model_mipsxx_ops.cpu_type = "mips/34K"; 207 + break; 208 + 209 + case CPU_74K: 210 + op_model_mipsxx_ops.cpu_type = "mips/74K"; 207 211 break; 208 212 #endif 209 213 210 214 case CPU_5KC: 211 - op_model_mipsxx.cpu_type = "mips/5K"; 215 + op_model_mipsxx_ops.cpu_type = "mips/5K"; 212 216 break; 213 217 214 218 case CPU_SB1: 215 219 case CPU_SB1A: 216 - op_model_mipsxx.cpu_type = "mips/sb1"; 220 + op_model_mipsxx_ops.cpu_type = "mips/sb1"; 217 221 break; 218 222 219 223 default: ··· 233 229 234 230 static void mipsxx_exit(void) 235 231 { 236 - reset_counters(op_model_mipsxx.num_counters); 232 + reset_counters(op_model_mipsxx_ops.num_counters); 237 233 238 234 perf_irq = null_perf_irq; 239 235 } 240 236 241 - struct op_mips_model op_model_mipsxx = { 237 + struct op_mips_model op_model_mipsxx_ops = { 242 238 .reg_setup = mipsxx_reg_setup, 243 239 .cpu_setup = mipsxx_cpu_setup, 244 240 .init = mipsxx_init,
+1 -1
arch/mips/oprofile/op_model_rm9000.c
··· 126 126 free_irq(rm9000_perfcount_irq, NULL); 127 127 } 128 128 129 - struct op_mips_model op_model_rm9000 = { 129 + struct op_mips_model op_model_rm9000_ops = { 130 130 .reg_setup = rm9000_reg_setup, 131 131 .cpu_setup = rm9000_cpu_setup, 132 132 .init = rm9000_init,
+2 -2
arch/mips/sgi-ip32/ip32-irq.c
··· 31 31 /* issue a PIO read to make sure no PIO writes are pending */ 32 32 static void inline flush_crime_bus(void) 33 33 { 34 - volatile unsigned long junk = crime->control; 34 + crime->control; 35 35 } 36 36 37 37 static void inline flush_mace_bus(void) 38 38 { 39 - volatile unsigned long junk = mace->perif.ctrl.misc; 39 + mace->perif.ctrl.misc; 40 40 } 41 41 42 42 #undef DEBUG_IRQ
+12
arch/powerpc/platforms/powermac/low_i2c.c
··· 1157 1157 /* some quirks for platform function decoding */ 1158 1158 enum { 1159 1159 pmac_i2c_quirk_invmask = 0x00000001u, 1160 + pmac_i2c_quirk_skip = 0x00000002u, 1160 1161 }; 1161 1162 1162 1163 static void pmac_i2c_devscan(void (*callback)(struct device_node *dev, ··· 1173 1172 /* XXX Study device-tree's & apple drivers are get the quirks 1174 1173 * right ! 1175 1174 */ 1175 + /* Workaround: It seems that running the clockspreading 1176 + * properties on the eMac will cause lockups during boot. 1177 + * The machine seems to work fine without that. So for now, 1178 + * let's make sure i2c-hwclock doesn't match about "imic" 1179 + * clocks and we'll figure out if we really need to do 1180 + * something special about those later. 1181 + */ 1182 + { "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip }, 1183 + { "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip }, 1176 1184 { "i2c-hwclock", NULL, pmac_i2c_quirk_invmask }, 1177 1185 { "i2c-cpu-voltage", NULL, 0}, 1178 1186 { "temp-monitor", NULL, 0 }, ··· 1208 1198 if (p->compatible && 1209 1199 !device_is_compatible(np, p->compatible)) 1210 1200 continue; 1201 + if (p->quirks & pmac_i2c_quirk_skip) 1202 + break; 1211 1203 callback(np, p->quirks); 1212 1204 break; 1213 1205 }
+11 -7
arch/powerpc/platforms/powermac/pfunc_core.c
··· 11 11 #include <linux/kernel.h> 12 12 #include <linux/spinlock.h> 13 13 #include <linux/module.h> 14 + #include <linux/mutex.h> 14 15 15 16 #include <asm/semaphore.h> 16 17 #include <asm/prom.h> ··· 547 546 548 547 static LIST_HEAD(pmf_devices); 549 548 static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED; 549 + static DEFINE_MUTEX(pmf_irq_mutex); 550 550 551 551 static void pmf_release_device(struct kref *kref) 552 552 { ··· 866 864 867 865 spin_lock_irqsave(&pmf_lock, flags); 868 866 func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN); 869 - if (func == NULL) { 870 - spin_unlock_irqrestore(&pmf_lock, flags); 867 + if (func) 868 + func = pmf_get_function(func); 869 + spin_unlock_irqrestore(&pmf_lock, flags); 870 + if (func == NULL) 871 871 return -ENODEV; 872 - } 872 + mutex_lock(&pmf_irq_mutex); 873 873 if (list_empty(&func->irq_clients)) 874 874 func->dev->handlers->irq_enable(func); 875 875 list_add(&client->link, &func->irq_clients); 876 876 client->func = func; 877 - spin_unlock_irqrestore(&pmf_lock, flags); 877 + mutex_unlock(&pmf_irq_mutex); 878 878 879 879 return 0; 880 880 } ··· 885 881 void pmf_unregister_irq_client(struct pmf_irq_client *client) 886 882 { 887 883 struct pmf_function *func = client->func; 888 - unsigned long flags; 889 884 890 885 BUG_ON(func == NULL); 891 886 892 - spin_lock_irqsave(&pmf_lock, flags); 887 + mutex_lock(&pmf_irq_mutex); 893 888 client->func = NULL; 894 889 list_del(&client->link); 895 890 if (list_empty(&func->irq_clients)) 896 891 func->dev->handlers->irq_disable(func); 897 - spin_unlock_irqrestore(&pmf_lock, flags); 892 + mutex_unlock(&pmf_irq_mutex); 893 + pmf_put_function(func); 898 894 } 899 895 EXPORT_SYMBOL_GPL(pmf_unregister_irq_client); 900 896
+1 -1
arch/ppc/kernel/asm-offsets.c
··· 134 134 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 135 135 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); 136 136 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 137 - DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, flags)); 137 + DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); 138 138 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 139 139 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 140 140
+30
arch/sparc64/kernel/head.S
··· 10 10 #include <linux/config.h> 11 11 #include <linux/version.h> 12 12 #include <linux/errno.h> 13 + #include <linux/threads.h> 13 14 #include <asm/thread_info.h> 14 15 #include <asm/asi.h> 15 16 #include <asm/pstate.h> ··· 493 492 mov %l6, %o1 ! OpenPROM stack 494 493 call prom_init 495 494 mov %l7, %o0 ! OpenPROM cif handler 495 + 496 + /* Initialize current_thread_info()->cpu as early as possible. 497 + * In order to do that accurately we have to patch up the get_cpuid() 498 + * assembler sequences. And that, in turn, requires that we know 499 + * if we are on a Starfire box or not. While we're here, patch up 500 + * the sun4v sequences as well. 501 + */ 502 + call check_if_starfire 503 + nop 504 + call per_cpu_patch 505 + nop 506 + call sun4v_patch 507 + nop 508 + 509 + #ifdef CONFIG_SMP 510 + call hard_smp_processor_id 511 + nop 512 + cmp %o0, NR_CPUS 513 + blu,pt %xcc, 1f 514 + nop 515 + call boot_cpu_id_too_large 516 + nop 517 + /* Not reached... */ 518 + 519 + 1: 520 + #else 521 + mov 0, %o0 522 + #endif 523 + stb %o0, [%g6 + TI_CPU] 496 524 497 525 /* Off we go.... */ 498 526 call start_kernel
+11 -12
arch/sparc64/kernel/setup.c
··· 220 220 221 221 static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; 222 222 223 - static void __init per_cpu_patch(void) 223 + void __init per_cpu_patch(void) 224 224 { 225 225 struct cpuid_patch_entry *p; 226 226 unsigned long ver; ··· 280 280 } 281 281 } 282 282 283 - static void __init sun4v_patch(void) 283 + void __init sun4v_patch(void) 284 284 { 285 285 struct sun4v_1insn_patch_entry *p1; 286 286 struct sun4v_2insn_patch_entry *p2; ··· 315 315 } 316 316 } 317 317 318 + #ifdef CONFIG_SMP 319 + void __init boot_cpu_id_too_large(int cpu) 320 + { 321 + prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", 322 + cpu, NR_CPUS); 323 + prom_halt(); 324 + } 325 + #endif 326 + 318 327 void __init setup_arch(char **cmdline_p) 319 328 { 320 329 /* Initialize PROM console and command line. */ ··· 340 331 #elif defined(CONFIG_PROM_CONSOLE) 341 332 conswitchp = &prom_con; 342 333 #endif 343 - 344 - /* Work out if we are starfire early on */ 345 - check_if_starfire(); 346 - 347 - /* Now we know enough to patch the get_cpuid sequences 348 - * used by trap code. 349 - */ 350 - per_cpu_patch(); 351 - 352 - sun4v_patch(); 353 334 354 335 boot_flags_init(*cmdline_p); 355 336
+3 -13
arch/sparc64/kernel/smp.c
··· 1264 1264 boot_cpu_id = hard_smp_processor_id(); 1265 1265 current_tick_offset = timer_tick_offset; 1266 1266 1267 - cpu_set(boot_cpu_id, cpu_online_map); 1268 1267 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; 1269 1268 } 1270 1269 ··· 1344 1345 1345 1346 void __devinit smp_prepare_boot_cpu(void) 1346 1347 { 1347 - int cpu = hard_smp_processor_id(); 1348 - 1349 - if (cpu >= NR_CPUS) { 1350 - prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); 1351 - prom_halt(); 1352 - } 1353 - 1354 - current_thread_info()->cpu = cpu; 1355 - __local_per_cpu_offset = __per_cpu_offset(cpu); 1356 - 1357 - cpu_set(smp_processor_id(), cpu_online_map); 1358 - cpu_set(smp_processor_id(), phys_cpu_present_map); 1359 1348 } 1360 1349 1361 1350 int __devinit __cpu_up(unsigned int cpu) ··· 1420 1433 1421 1434 for (i = 0; i < NR_CPUS; i++, ptr += size) 1422 1435 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 1436 + 1437 + /* Setup %g5 for the boot cpu. */ 1438 + __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1423 1439 }
+3 -2
arch/sparc64/lib/checksum.S
··· 165 165 sll %g1, 8, %g1 166 166 or %o5, %g1, %o4 167 167 168 - 1: add %o2, %o4, %o2 168 + 1: addcc %o2, %o4, %o2 169 + addc %g0, %o2, %o2 169 170 170 171 csum_partial_finish: 171 172 retl 172 - mov %o2, %o0 173 + srl %o2, 0, %o0
+3 -2
arch/sparc64/lib/csum_copy.S
··· 221 221 sll %g1, 8, %g1 222 222 or %o5, %g1, %o4 223 223 224 - 1: add %o3, %o4, %o3 224 + 1: addcc %o3, %o4, %o3 225 + addc %g0, %o3, %o3 225 226 226 227 70: 227 228 retl 228 - mov %o3, %o0 229 + srl %o3, 0, %o0 229 230 230 231 95: mov 0, GLOBAL_SPARE 231 232 brlez,pn %o2, 4f
+4
arch/um/Makefile-i386
··· 33 33 # prevent gcc from keeping the stack 16 byte aligned. Taken from i386. 34 34 cflags-y += $(call cc-option,-mpreferred-stack-boundary=2) 35 35 36 + # Prevent sprintf in nfsd from being converted to strcpy and resulting in 37 + # an unresolved reference. 38 + cflags-y += -ffreestanding 39 + 36 40 CFLAGS += $(cflags-y) 37 41 USER_CFLAGS += $(cflags-y)
+2 -11
arch/um/include/kern_util.h
··· 120 120 extern void free_irq(unsigned int, void *); 121 121 extern int cpu(void); 122 122 123 + extern void time_init_kern(void); 124 + 123 125 /* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */ 124 126 extern int __cant_sleep(void); 125 127 extern void segv_handler(int sig, union uml_pt_regs *regs); 126 128 extern void sigio_handler(int sig, union uml_pt_regs *regs); 127 129 128 130 #endif 129 - 130 - /* 131 - * Overrides for Emacs so that we follow Linus's tabbing style. 132 - * Emacs will notice this stuff at the end of the file and automatically 133 - * adjust the settings for this buffer only. This must remain at the end 134 - * of the file. 135 - * --------------------------------------------------------------------------- 136 - * Local variables: 137 - * c-file-style: "linux" 138 - * End: 139 - */
+10
arch/um/kernel/time_kern.c
··· 84 84 } 85 85 } 86 86 87 + 88 + void time_init_kern(void) 89 + { 90 + unsigned long long nsecs; 91 + 92 + nsecs = os_nsecs(); 93 + set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION, 94 + -nsecs % BILLION); 95 + } 96 + 87 97 void do_boot_timer_handler(struct sigcontext * sc) 88 98 { 89 99 struct pt_regs regs;
+1 -1
arch/um/os-Linux/main.c
··· 59 59 initcall_t *call; 60 60 61 61 call = &__uml_initcall_start; 62 - while (call < &__uml_initcall_end){; 62 + while (call < &__uml_initcall_end){ 63 63 (*call)(); 64 64 call++; 65 65 }
+1 -9
arch/um/os-Linux/time.c
··· 81 81 set_interval(ITIMER_REAL); 82 82 } 83 83 84 - extern void ktime_get_ts(struct timespec *ts); 85 - #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) 86 - 87 84 void time_init(void) 88 85 { 89 - struct timespec now; 90 - 91 86 if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR) 92 87 panic("Couldn't set SIGVTALRM handler"); 93 88 set_interval(ITIMER_VIRTUAL); 94 - 95 - do_posix_clock_monotonic_gettime(&now); 96 - wall_to_monotonic.tv_sec = -now.tv_sec; 97 - wall_to_monotonic.tv_nsec = -now.tv_nsec; 89 + time_init_kern(); 98 90 } 99 91 100 92 unsigned long long os_nsecs(void)
+5 -4
arch/um/sys-i386/syscalls.c
··· 99 99 100 100 switch (call) { 101 101 case SEMOP: 102 - return sys_semtimedop(first, (struct sembuf *) ptr, second, 103 - NULL); 102 + return sys_semtimedop(first, (struct sembuf __user *) ptr, 103 + second, NULL); 104 104 case SEMTIMEDOP: 105 - return sys_semtimedop(first, (struct sembuf *) ptr, second, 106 - (const struct timespec *) fifth); 105 + return sys_semtimedop(first, (struct sembuf __user *) ptr, 106 + second, 107 + (const struct timespec __user *) fifth); 107 108 case SEMGET: 108 109 return sys_semget (first, second, third); 109 110 case SEMCTL: {
+14 -10
arch/um/sys-x86_64/signal.c
··· 21 21 #include "skas.h" 22 22 23 23 static int copy_sc_from_user_skas(struct pt_regs *regs, 24 - struct sigcontext *from) 24 + struct sigcontext __user *from) 25 25 { 26 26 int err = 0; 27 27 ··· 54 54 return(err); 55 55 } 56 56 57 - int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, 57 + int copy_sc_to_user_skas(struct sigcontext __user *to, 58 + struct _fpstate __user *to_fp, 58 59 struct pt_regs *regs, unsigned long mask, 59 60 unsigned long sp) 60 61 { ··· 107 106 #endif 108 107 109 108 #ifdef CONFIG_MODE_TT 110 - int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from, 109 + int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext __user *from, 111 110 int fpsize) 112 111 { 113 - struct _fpstate *to_fp, *from_fp; 112 + struct _fpstate *to_fp; 113 + struct _fpstate __user *from_fp; 114 114 unsigned long sigs; 115 115 int err; 116 116 ··· 126 124 return(err); 127 125 } 128 126 129 - int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, 127 + int copy_sc_to_user_tt(struct sigcontext __user *to, struct _fpstate __user *fp, 130 128 struct sigcontext *from, int fpsize, unsigned long sp) 131 129 { 132 - struct _fpstate *to_fp, *from_fp; 130 + struct _fpstate __user *to_fp; 131 + struct _fpstate *from_fp; 133 132 int err; 134 133 135 - to_fp = (fp ? fp : (struct _fpstate *) (to + 1)); 134 + to_fp = (fp ? fp : (struct _fpstate __user *) (to + 1)); 136 135 from_fp = from->fpstate; 137 136 err = copy_to_user(to, from, sizeof(*to)); 138 137 /* The SP in the sigcontext is the updated one for the signal ··· 161 158 return(ret); 162 159 } 163 160 164 - static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp, 161 + static int copy_sc_to_user(struct sigcontext __user *to, 162 + struct _fpstate __user *fp, 165 163 struct pt_regs *from, unsigned long mask, 166 164 unsigned long sp) 167 165 { ··· 173 169 174 170 struct rt_sigframe 175 171 { 176 - char *pretcode; 172 + char __user *pretcode; 177 173 struct ucontext uc; 178 174 struct siginfo info; 179 175 }; ··· 192 188 193 189 frame = (struct rt_sigframe __user *) 194 190 round_down(stack_top - sizeof(struct rt_sigframe), 16) - 8; 195 - frame = (struct rt_sigframe *) ((unsigned long) frame - 128); 191 + frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128); 196 192 197 193 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) 198 194 goto out;
+1 -1
arch/um/sys-x86_64/syscalls.c
··· 45 45 case ARCH_GET_GS: 46 46 ret = arch_prctl(code, (unsigned long) &tmp); 47 47 if(!ret) 48 - ret = put_user(tmp, &addr); 48 + ret = put_user(tmp, (long __user *)addr); 49 49 break; 50 50 default: 51 51 ret = -EINVAL;
+2 -2
arch/x86_64/ia32/ia32_binfmt.c
··· 339 339 struct mm_struct *mm = current->mm; 340 340 int i, ret; 341 341 342 - stack_base = IA32_STACK_TOP - MAX_ARG_PAGES * PAGE_SIZE; 342 + stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE; 343 343 mm->arg_start = bprm->p + stack_base; 344 344 345 345 bprm->p += stack_base; ··· 357 357 { 358 358 mpnt->vm_mm = mm; 359 359 mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p; 360 - mpnt->vm_end = IA32_STACK_TOP; 360 + mpnt->vm_end = stack_top; 361 361 if (executable_stack == EXSTACK_ENABLE_X) 362 362 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC; 363 363 else if (executable_stack == EXSTACK_DISABLE_X)
+1 -1
arch/x86_64/kernel/e820.c
··· 149 149 addr = start; 150 150 if (addr > ei->addr + ei->size) 151 151 continue; 152 - while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size) 152 + while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size) 153 153 ; 154 154 last = addr + size; 155 155 if (last > ei->addr + ei->size)
+1 -6
arch/x86_64/kernel/entry.S
··· 281 281 ja 1f 282 282 movq %r10,%rcx /* fixup for C */ 283 283 call *sys_call_table(,%rax,8) 284 - movq %rax,RAX-ARGOFFSET(%rsp) 285 - 1: SAVE_REST 286 - movq %rsp,%rdi 287 - call syscall_trace_leave 288 - RESTORE_TOP_OF_STACK %rbx 289 - RESTORE_REST 284 + 1: movq %rax,RAX-ARGOFFSET(%rsp) 290 285 /* Use IRET because user could have changed frame */ 291 286 jmp int_ret_from_sys_call 292 287 CFI_ENDPROC
+4
arch/x86_64/kernel/pci-dma.c
··· 54 54 else 55 55 #endif 56 56 node = numa_node_id(); 57 + 58 + if (node < first_node(node_online_map)) 59 + node = first_node(node_online_map); 60 + 57 61 page = alloc_pages_node(node, gfp, order); 58 62 return page ? page_address(page) : NULL; 59 63 }
+2 -4
arch/x86_64/kernel/pci-gart.c
··· 631 631 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n"); 632 632 if (end_pfn > MAX_DMA32_PFN) { 633 633 printk(KERN_ERR "WARNING more than 4GB of memory " 634 - "but IOMMU not compiled in.\n" 635 - KERN_ERR "WARNING 32bit PCI may malfunction.\n" 636 - KERN_ERR "You might want to enable " 637 - "CONFIG_GART_IOMMU\n"); 634 + "but IOMMU not available.\n" 635 + KERN_ERR "WARNING 32bit PCI may malfunction.\n"); 638 636 } 639 637 return -1; 640 638 }
+1 -1
arch/x86_64/kernel/pmtimer.c
··· 68 68 offset_delay = delta % (USEC_PER_SEC / HZ); 69 69 70 70 rdtscll(tsc); 71 - vxtime.last_tsc = tsc - offset_delay * cpu_khz; 71 + vxtime.last_tsc = tsc - offset_delay * (u64)cpu_khz / 1000; 72 72 73 73 /* don't calculate delay for first run, 74 74 or if we've got less then a tick */
+1 -1
arch/x86_64/kernel/setup.c
··· 1051 1051 for now. */ 1052 1052 node = apicid_to_node[hard_smp_processor_id()]; 1053 1053 if (node == NUMA_NO_NODE) 1054 - node = 0; 1054 + node = first_node(node_online_map); 1055 1055 numa_set_node(cpu, node); 1056 1056 1057 1057 if (acpi_numa > 0)
+3 -1
arch/x86_64/mm/srat.c
··· 399 399 /* First clean up the node list */ 400 400 for (i = 0; i < MAX_NUMNODES; i++) { 401 401 cutoff_node(i, start, end); 402 - if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) 402 + if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) { 403 403 unparse_node(i); 404 + node_set_offline(i); 405 + } 404 406 } 405 407 406 408 if (acpi_numa <= 0)
+54 -23
block/cfq-iosched.c
··· 33 33 34 34 #define CFQ_KEY_ASYNC (0) 35 35 36 - static DEFINE_RWLOCK(cfq_exit_lock); 36 + static DEFINE_SPINLOCK(cfq_exit_lock); 37 37 38 38 /* 39 39 * for the hash of cfqq inside the cfqd ··· 133 133 mempool_t *crq_pool; 134 134 135 135 int rq_in_driver; 136 + int hw_tag; 136 137 137 138 /* 138 139 * schedule slice state info ··· 501 500 502 501 /* 503 502 * if queue was preempted, just add to front to be fair. busy_rr 504 - * isn't sorted. 503 + * isn't sorted, but insert at the back for fairness. 505 504 */ 506 505 if (preempted || list == &cfqd->busy_rr) { 507 - list_add(&cfqq->cfq_list, list); 506 + if (preempted) 507 + list = list->prev; 508 + 509 + list_add_tail(&cfqq->cfq_list, list); 508 510 return; 509 511 } 510 512 ··· 668 664 struct cfq_data *cfqd = q->elevator->elevator_data; 669 665 670 666 cfqd->rq_in_driver++; 667 + 668 + /* 669 + * If the depth is larger 1, it really could be queueing. But lets 670 + * make the mark a little higher - idling could still be good for 671 + * low queueing, and a low queueing number could also just indicate 672 + * a SCSI mid layer like behaviour where limit+1 is often seen. 673 + */ 674 + if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) 675 + cfqd->hw_tag = 1; 671 676 } 672 677 673 678 static void cfq_deactivate_request(request_queue_t *q, struct request *rq) ··· 890 877 */ 891 878 if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) 892 879 cfqq = list_entry_cfqq(cfqd->cur_rr.next); 880 + 881 + /* 882 + * If no new queues are available, check if the busy list has some 883 + * before falling back to idle io. 884 + */ 885 + if (!cfqq && !list_empty(&cfqd->busy_rr)) 886 + cfqq = list_entry_cfqq(cfqd->busy_rr.next); 893 887 894 888 /* 895 889 * if we have idle queues and no rt or be queues had pending ··· 1304 1284 /* 1305 1285 * put the reference this task is holding to the various queues 1306 1286 */ 1307 - read_lock_irqsave(&cfq_exit_lock, flags); 1287 + spin_lock_irqsave(&cfq_exit_lock, flags); 1308 1288 1309 1289 n = rb_first(&ioc->cic_root); 1310 1290 while (n != NULL) { ··· 1314 1294 n = rb_next(n); 1315 1295 } 1316 1296 1317 - read_unlock_irqrestore(&cfq_exit_lock, flags); 1297 + spin_unlock_irqrestore(&cfq_exit_lock, flags); 1318 1298 } 1319 1299 1320 1300 static struct cfq_io_context * ··· 1420 1400 struct cfq_io_context *cic; 1421 1401 struct rb_node *n; 1422 1402 1423 - write_lock(&cfq_exit_lock); 1403 + spin_lock(&cfq_exit_lock); 1424 1404 1425 1405 n = rb_first(&ioc->cic_root); 1426 1406 while (n != NULL) { 1427 1407 cic = rb_entry(n, struct cfq_io_context, rb_node); 1428 - 1408 + 1429 1409 changed_ioprio(cic); 1430 1410 n = rb_next(n); 1431 1411 } 1432 1412 1433 - write_unlock(&cfq_exit_lock); 1413 + spin_unlock(&cfq_exit_lock); 1434 1414 1435 1415 return 0; 1436 1416 } ··· 1478 1458 * set ->slice_left to allow preemption for a new process 1479 1459 */ 1480 1460 cfqq->slice_left = 2 * cfqd->cfq_slice_idle; 1481 - cfq_mark_cfqq_idle_window(cfqq); 1461 + if (!cfqd->hw_tag) 1462 + cfq_mark_cfqq_idle_window(cfqq); 1482 1463 cfq_mark_cfqq_prio_changed(cfqq); 1483 1464 cfq_init_prio_data(cfqq); 1484 1465 } ··· 1496 1475 static void 1497 1476 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) 1498 1477 { 1499 - read_lock(&cfq_exit_lock); 1478 + spin_lock(&cfq_exit_lock); 1500 1479 rb_erase(&cic->rb_node, &ioc->cic_root); 1501 - read_unlock(&cfq_exit_lock); 1480 + list_del_init(&cic->queue_list); 1481 + spin_unlock(&cfq_exit_lock); 1502 1482 kmem_cache_free(cfq_ioc_pool, cic); 1503 1483 atomic_dec(&ioc_count); 1504 1484 } ··· 1567 1545 BUG(); 1568 1546 } 1569 1547 1570 - read_lock(&cfq_exit_lock); 1548 + spin_lock(&cfq_exit_lock); 1571 1549 rb_link_node(&cic->rb_node, parent, p); 1572 1550 rb_insert_color(&cic->rb_node, &ioc->cic_root); 1573 1551 list_add(&cic->queue_list, &cfqd->cic_list); 1574 - read_unlock(&cfq_exit_lock); 1552 + spin_unlock(&cfq_exit_lock); 1575 1553 } 1576 1554 1577 1555 /* ··· 1670 1648 { 1671 1649 int enable_idle = cfq_cfqq_idle_window(cfqq); 1672 1650 1673 - if (!cic->ioc->task || !cfqd->cfq_slice_idle) 1651 + if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag) 1674 1652 enable_idle = 0; 1675 1653 else if (sample_valid(cic->ttime_samples)) { 1676 1654 if (cic->ttime_mean > cfqd->cfq_slice_idle) ··· 1761 1739 1762 1740 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); 1763 1741 1742 + cic = crq->io_context; 1743 + 1764 1744 /* 1765 1745 * we never wait for an async request and we don't allow preemption 1766 1746 * of an async request. so just return early 1767 1747 */ 1768 - if (!cfq_crq_is_sync(crq)) 1748 + if (!cfq_crq_is_sync(crq)) { 1749 + /* 1750 + * sync process issued an async request, if it's waiting 1751 + * then expire it and kick rq handling. 1752 + */ 1753 + if (cic == cfqd->active_cic && 1754 + del_timer(&cfqd->idle_slice_timer)) { 1755 + cfq_slice_expired(cfqd, 0); 1756 + cfq_start_queueing(cfqd, cfqq); 1757 + } 1769 1758 return; 1770 - 1771 - cic = crq->io_context; 1759 + } 1772 1760 1773 1761 cfq_update_io_thinktime(cfqd, cic); 1774 1762 cfq_update_io_seektime(cfqd, cic, crq); ··· 2196 2164 * race with a non-idle queue, reset timer 2197 2165 */ 2198 2166 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 2199 - if (!time_after_eq(jiffies, end)) { 2200 - cfqd->idle_class_timer.expires = end; 2201 - add_timer(&cfqd->idle_class_timer); 2202 - } else 2167 + if (!time_after_eq(jiffies, end)) 2168 + mod_timer(&cfqd->idle_class_timer, end); 2169 + else 2203 2170 cfq_schedule_dispatch(cfqd); 2204 2171 2205 2172 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); ··· 2218 2187 2219 2188 cfq_shutdown_timer_wq(cfqd); 2220 2189 2221 - write_lock(&cfq_exit_lock); 2190 + spin_lock(&cfq_exit_lock); 2222 2191 spin_lock_irq(q->queue_lock); 2223 2192 2224 2193 if (cfqd->active_queue) ··· 2241 2210 } 2242 2211 2243 2212 spin_unlock_irq(q->queue_lock); 2244 - write_unlock(&cfq_exit_lock); 2213 + spin_unlock(&cfq_exit_lock); 2245 2214 2246 2215 cfq_shutdown_timer_wq(cfqd); 2247 2216
+1 -4
drivers/base/power/suspend.c
··· 8 8 * 9 9 */ 10 10 11 - #include <linux/vt_kern.h> 12 11 #include <linux/device.h> 13 12 #include <linux/kallsyms.h> 14 13 #include <linux/pm.h> ··· 65 66 return error; 66 67 } 67 68 69 + 68 70 /** 69 71 * device_suspend - Save state and stop all devices in system. 70 72 * @state: Power state to put each device in. ··· 84 84 int device_suspend(pm_message_t state) 85 85 { 86 86 int error = 0; 87 - 88 - if (!is_console_suspend_safe()) 89 - return -EINVAL; 90 87 91 88 down(&dpm_sem); 92 89 down(&dpm_list_sem);
+1 -1
drivers/char/agp/Kconfig
··· 86 86 87 87 config AGP_SIS 88 88 tristate "SiS chipset support" 89 - depends on AGP && X86_32 89 + depends on AGP 90 90 help 91 91 This option gives you AGP support for the GLX component of 92 92 X on Silicon Integrated Systems [SiS] chipsets.
+3
drivers/char/agp/amd64-agp.c
··· 617 617 pci_set_power_state(pdev, PCI_D0); 618 618 pci_restore_state(pdev); 619 619 620 + if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) 621 + nforce3_agp_init(pdev); 622 + 620 623 return amd_8151_configure(); 621 624 } 622 625
+7
drivers/char/agp/via-agp.c
··· 345 345 .chipset_name = "PT880", 346 346 }, 347 347 348 + /* PT880 Ultra */ 349 + { 350 + .device_id = PCI_DEVICE_ID_VIA_PT880ULTRA, 351 + .chipset_name = "PT880 Ultra", 352 + }, 353 + 348 354 /* PT890 */ 349 355 { 350 356 .device_id = PCI_DEVICE_ID_VIA_8783_0, ··· 517 511 ID(PCI_DEVICE_ID_VIA_8763_0), 518 512 ID(PCI_DEVICE_ID_VIA_8378_0), 519 513 ID(PCI_DEVICE_ID_VIA_PT880), 514 + ID(PCI_DEVICE_ID_VIA_PT880ULTRA), 520 515 ID(PCI_DEVICE_ID_VIA_8783_0), 521 516 ID(PCI_DEVICE_ID_VIA_PX8X0_0), 522 517 ID(PCI_DEVICE_ID_VIA_3269_0),
+22 -16
drivers/char/ipmi/ipmi_si_intf.c
··· 1184 1184 static void port_cleanup(struct smi_info *info) 1185 1185 { 1186 1186 unsigned int addr = info->io.addr_data; 1187 - int mapsize; 1187 + int idx; 1188 1188 1189 1189 if (addr) { 1190 - mapsize = ((info->io_size * info->io.regspacing) 1191 - - (info->io.regspacing - info->io.regsize)); 1192 - 1193 - release_region (addr, mapsize); 1190 + for (idx = 0; idx < info->io_size; idx++) { 1191 + release_region(addr + idx * info->io.regspacing, 1192 + info->io.regsize); 1193 + } 1194 1194 } 1195 1195 } 1196 1196 1197 1197 static int port_setup(struct smi_info *info) 1198 1198 { 1199 1199 unsigned int addr = info->io.addr_data; 1200 - int mapsize; 1200 + int idx; 1201 1201 1202 1202 if (!addr) 1203 1203 return -ENODEV; ··· 1225 1225 return -EINVAL; 1226 1226 } 1227 1227 1228 - /* Calculate the total amount of memory to claim. This is an 1229 - * unusual looking calculation, but it avoids claiming any 1230 - * more memory than it has to. It will claim everything 1231 - * between the first address to the end of the last full 1232 - * register. */ 1233 - mapsize = ((info->io_size * info->io.regspacing) 1234 - - (info->io.regspacing - info->io.regsize)); 1235 - 1236 - if (request_region(addr, mapsize, DEVICE_NAME) == NULL) 1237 - return -EIO; 1228 + /* Some BIOSes reserve disjoint I/O regions in their ACPI 1229 + * tables. This causes problems when trying to register the 1230 + * entire I/O region. Therefore we must register each I/O 1231 + * port separately. 1232 + */ 1233 + for (idx = 0; idx < info->io_size; idx++) { 1234 + if (request_region(addr + idx * info->io.regspacing, 1235 + info->io.regsize, DEVICE_NAME) == NULL) { 1236 + /* Undo allocations */ 1237 + while (idx--) { 1238 + release_region(addr + idx * info->io.regspacing, 1239 + info->io.regsize); 1240 + } 1241 + return -EIO; 1242 + } 1243 + } 1238 1244 return 0; 1239 1245 } 1240 1246
+1 -1
drivers/char/pcmcia/cm4000_cs.c
··· 149 149 #define ZERO_DEV(dev) \ 150 150 memset(&dev->atr_csum,0, \ 151 151 sizeof(struct cm4000_dev) - \ 152 - /*link*/ sizeof(struct pcmcia_device) - \ 152 + /*link*/ sizeof(struct pcmcia_device *) - \ 153 153 /*node*/ sizeof(dev_node_t) - \ 154 154 /*atr*/ MAX_ATR*sizeof(char) - \ 155 155 /*rbuf*/ 512*sizeof(char) - \
+29 -60
drivers/char/tpm/tpm_bios.c
··· 105 105 "Non-Host Info" 106 106 }; 107 107 108 + struct tcpa_pc_event { 109 + u32 event_id; 110 + u32 event_size; 111 + u8 event_data[0]; 112 + }; 113 + 108 114 enum tcpa_pc_event_ids { 109 115 SMBIOS = 1, 110 116 BIS_CERT, ··· 120 114 NVRAM, 121 115 OPTION_ROM_EXEC, 122 116 OPTION_ROM_CONFIG, 123 - OPTION_ROM_MICROCODE, 117 + OPTION_ROM_MICROCODE = 10, 124 118 S_CRTM_VERSION, 125 119 S_CRTM_CONTENTS, 126 120 POST_CONTENTS, 121 + HOST_TABLE_OF_DEVICES, 127 122 }; 128 123 129 124 static const char* tcpa_pc_event_id_strings[] = { 130 - "" 125 + "", 131 126 "SMBIOS", 132 127 "BIS Certificate", 133 128 "POST BIOS ", ··· 137 130 "NVRAM", 138 131 "Option ROM", 139 132 "Option ROM config", 140 - "Option ROM microcode", 133 + "", 134 + "Option ROM microcode ", 141 135 "S-CRTM Version", 142 - "S-CRTM Contents", 143 - "S-CRTM POST Contents", 144 - "POST Contents", 136 + "S-CRTM Contents ", 137 + "POST Contents ", 138 + "Table of Devices", 145 139 }; 146 140 147 141 /* returns pointer to start of pos. entry of tcg log */ ··· 214 206 const char *name = ""; 215 207 char data[40] = ""; 216 208 int i, n_len = 0, d_len = 0; 217 - u32 event_id; 209 + struct tcpa_pc_event *pc_event; 218 210 219 211 switch(event->event_type) { 220 212 case PREBOOT: ··· 243 235 } 244 236 break; 245 237 case EVENT_TAG: 246 - event_id = be32_to_cpu(*((u32 *)event_entry)); 238 + pc_event = (struct tcpa_pc_event *)event_entry; 247 239 248 240 /* ToDo Row data -> Base64 */ 249 241 250 - switch (event_id) { 242 + switch (pc_event->event_id) { 251 243 case SMBIOS: 252 244 case BIS_CERT: 253 245 case CMOS: 254 246 case NVRAM: 255 247 case OPTION_ROM_EXEC: 256 248 case OPTION_ROM_CONFIG: 257 - case OPTION_ROM_MICROCODE: 258 249 case S_CRTM_VERSION: 259 - case S_CRTM_CONTENTS: 260 - case POST_CONTENTS: 261 - name = tcpa_pc_event_id_strings[event_id]; 250 + name = tcpa_pc_event_id_strings[pc_event->event_id]; 262 251 n_len = strlen(name); 263 252 break; 253 + /* hash data */ 264 254 case POST_BIOS_ROM: 265 255 case ESCD: 266 - name = tcpa_pc_event_id_strings[event_id]; 256 + case OPTION_ROM_MICROCODE: 257 + case S_CRTM_CONTENTS: 258 + case POST_CONTENTS: 259 + name = tcpa_pc_event_id_strings[pc_event->event_id]; 267 260 n_len = strlen(name); 268 261 for (i = 0; i < 20; i++) 269 - d_len += sprintf(data, "%02x", 270 - event_entry[8 + i]); 262 + d_len += sprintf(&data[2*i], "%02x", 263 + pc_event->event_data[i]); 271 264 break; 272 265 default: 273 266 break; ··· 284 275 285 276 static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v) 286 277 { 278 + struct tcpa_event *event = v; 279 + char *data = v; 280 + int i; 287 281 288 - char *eventname; 289 - char data[4]; 290 - u32 help; 291 - int i, len; 292 - struct tcpa_event *event = (struct tcpa_event *) v; 293 - unsigned char *event_entry = 294 - (unsigned char *) (v + sizeof(struct tcpa_event)); 295 - 296 - eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL); 297 - if (!eventname) { 298 - printk(KERN_ERR "%s: ERROR - No Memory for event name\n ", 299 - __func__); 300 - return -ENOMEM; 301 - } 302 - 303 - /* 1st: PCR used is in little-endian format (4 bytes) */ 304 - help = le32_to_cpu(event->pcr_index); 305 - memcpy(data, &help, 4); 306 - for (i = 0; i < 4; i++) 282 + for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) 307 283 seq_putc(m, data[i]); 308 284 309 - /* 2nd: SHA1 (20 bytes) */ 310 - for (i = 0; i < 20; i++) 311 - seq_putc(m, event->pcr_value[i]); 312 - 313 - /* 3rd: event type identifier (4 bytes) */ 314 - help = le32_to_cpu(event->event_type); 315 - memcpy(data, &help, 4); 316 - for (i = 0; i < 4; i++) 317 - seq_putc(m, data[i]); 318 - 319 - len = 0; 320 - 321 - len += get_event_name(eventname, event, event_entry); 322 - 323 - /* 4th: filename <= 255 + \'0' delimiter */ 324 - if (len > TCG_EVENT_NAME_LEN_MAX) 325 - len = TCG_EVENT_NAME_LEN_MAX; 326 - 327 - for (i = 0; i < len; i++) 328 - seq_putc(m, eventname[i]); 329 - 330 - /* 5th: delimiter */ 331 - seq_putc(m, '\0'); 332 - 333 - kfree(eventname); 334 285 return 0; 335 286 } 336 287
-8
drivers/char/vt.c
··· 3238 3238 } 3239 3239 } 3240 3240 3241 - int is_console_suspend_safe(void) 3242 - { 3243 - /* It is unsafe to suspend devices while X has control of the 3244 - * hardware. Make sure we are running on a kernel-controlled console. 3245 - */ 3246 - return vc_cons[fg_console].d->vc_mode == KD_TEXT; 3247 - } 3248 - 3249 3241 /* 3250 3242 * Visible symbols for modules 3251 3243 */
+11 -5
drivers/ide/pci/sgiioc4.c
··· 345 345 static u8 346 346 sgiioc4_INB(unsigned long port) 347 347 { 348 - u8 reg = (u8) inb(port); 348 + u8 reg = (u8) readb((void __iomem *) port); 349 349 350 350 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */ 351 351 if (reg & 0x51) { /* Not busy...check for interrupt */ 352 352 unsigned long other_ir = port - 0x110; 353 - unsigned int intr_reg = (u32) inl(other_ir); 353 + unsigned int intr_reg = (u32) readl((void __iomem *) other_ir); 354 354 355 355 /* Clear the Interrupt, Error bits on the IOC4 */ 356 356 if (intr_reg & 0x03) { 357 - outl(0x03, other_ir); 358 - intr_reg = (u32) inl(other_ir); 357 + writel(0x03, (void __iomem *) other_ir); 358 + intr_reg = (u32) readl((void __iomem *) other_ir); 359 359 } 360 360 } 361 361 } ··· 606 606 hwif->ide_dma_host_off = &sgiioc4_ide_dma_host_off; 607 607 hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq; 608 608 hwif->ide_dma_timeout = &__ide_dma_timeout; 609 + 610 + /* 611 + * The IOC4 uses MMIO rather than Port IO. 612 + * It also needs special workarounds for INB. 613 + */ 614 + default_hwif_mmiops(hwif); 609 615 hwif->INB = &sgiioc4_INB; 610 616 } 611 617 ··· 749 743 module_init(ioc4_ide_init); 750 744 module_exit(ioc4_ide_exit); 751 745 752 - MODULE_AUTHOR("Aniket Malatpure - Silicon Graphics Inc. (SGI)"); 746 + MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon"); 753 747 MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card"); 754 748 MODULE_LICENSE("GPL");
+1 -1
drivers/ieee1394/sbp2.c
··· 845 845 &sbp2_highlevel, ud->ne->host, &sbp2_ops, 846 846 sizeof(struct sbp2_status_block), sizeof(quadlet_t), 847 847 0x010000000000ULL, CSR1212_ALL_SPACE_END); 848 - if (!scsi_id->status_fifo_addr) { 848 + if (scsi_id->status_fifo_addr == ~0ULL) { 849 849 SBP2_ERR("failed to allocate status FIFO address range"); 850 850 goto failed_alloc; 851 851 }
+1
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 275 275 spin_lock_irqsave(&priv->tx_lock, flags); 276 276 ++priv->tx_tail; 277 277 if (netif_queue_stopped(dev) && 278 + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && 278 279 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) 279 280 netif_wake_queue(dev); 280 281 spin_unlock_irqrestore(&priv->tx_lock, flags);
+6 -5
drivers/input/joystick/sidewinder.c
··· 589 589 struct sw *sw; 590 590 struct input_dev *input_dev; 591 591 int i, j, k, l; 592 - int err; 592 + int err = 0; 593 593 unsigned char *buf = NULL; /* [SW_LENGTH] */ 594 594 unsigned char *idbuf = NULL; /* [SW_LENGTH] */ 595 595 unsigned char m = 1; ··· 776 776 goto fail4; 777 777 } 778 778 779 - return 0; 779 + out: kfree(buf); 780 + kfree(idbuf); 781 + 782 + return err; 780 783 781 784 fail4: input_free_device(sw->dev[i]); 782 785 fail3: while (--i >= 0) ··· 787 784 fail2: gameport_close(gameport); 788 785 fail1: gameport_set_drvdata(gameport, NULL); 789 786 kfree(sw); 790 - kfree(buf); 791 - kfree(idbuf); 792 - return err; 787 + goto out; 793 788 } 794 789 795 790 static void sw_disconnect(struct gameport *gameport)
+6 -6
drivers/input/keyboard/corgikbd.c
··· 245 245 if (hinge_count >= HINGE_STABLE_COUNT) { 246 246 spin_lock_irqsave(&corgikbd_data->lock, flags); 247 247 248 - input_report_switch(corgikbd_data->input, SW_0, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0)); 249 - input_report_switch(corgikbd_data->input, SW_1, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0)); 250 - input_report_switch(corgikbd_data->input, SW_2, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0)); 248 + input_report_switch(corgikbd_data->input, SW_LID, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0)); 249 + input_report_switch(corgikbd_data->input, SW_TABLET_MODE, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0)); 250 + input_report_switch(corgikbd_data->input, SW_HEADPHONE_INSERT, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0)); 251 251 input_sync(corgikbd_data->input); 252 252 253 253 spin_unlock_irqrestore(&corgikbd_data->lock, flags); ··· 340 340 for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++) 341 341 set_bit(corgikbd->keycode[i], input_dev->keybit); 342 342 clear_bit(0, input_dev->keybit); 343 - set_bit(SW_0, input_dev->swbit); 344 - set_bit(SW_1, input_dev->swbit); 345 - set_bit(SW_2, input_dev->swbit); 343 + set_bit(SW_LID, input_dev->swbit); 344 + set_bit(SW_TABLET_MODE, input_dev->swbit); 345 + set_bit(SW_HEADPHONE_INSERT, input_dev->swbit); 346 346 347 347 input_register_device(corgikbd->input); 348 348
+6 -6
drivers/input/keyboard/spitzkbd.c
··· 299 299 if (hinge_count >= HINGE_STABLE_COUNT) { 300 300 spin_lock_irqsave(&spitzkbd_data->lock, flags); 301 301 302 - input_report_switch(spitzkbd_data->input, SW_0, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0)); 303 - input_report_switch(spitzkbd_data->input, SW_1, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0)); 304 - input_report_switch(spitzkbd_data->input, SW_2, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0)); 302 + input_report_switch(spitzkbd_data->input, SW_LID, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0)); 303 + input_report_switch(spitzkbd_data->input, SW_TABLET_MODE, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0)); 304 + input_report_switch(spitzkbd_data->input, SW_HEADPHONE_INSERT, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0)); 305 305 input_sync(spitzkbd_data->input); 306 306 307 307 spin_unlock_irqrestore(&spitzkbd_data->lock, flags); ··· 398 398 for (i = 0; i < ARRAY_SIZE(spitzkbd_keycode); i++) 399 399 set_bit(spitzkbd->keycode[i], input_dev->keybit); 400 400 clear_bit(0, input_dev->keybit); 401 - set_bit(SW_0, input_dev->swbit); 402 - set_bit(SW_1, input_dev->swbit); 403 - set_bit(SW_2, input_dev->swbit); 401 + set_bit(SW_LID, input_dev->swbit); 402 + set_bit(SW_TABLET_MODE, input_dev->swbit); 403 + set_bit(SW_HEADPHONE_INSERT, input_dev->swbit); 404 404 405 405 input_register_device(input_dev); 406 406
+19
drivers/input/misc/wistron_btns.c
··· 318 318 { KE_END, 0 } 319 319 }; 320 320 321 + static struct key_entry keymap_aopen_1559as[] = { 322 + { KE_KEY, 0x01, KEY_HELP }, 323 + { KE_KEY, 0x06, KEY_PROG3 }, 324 + { KE_KEY, 0x11, KEY_PROG1 }, 325 + { KE_KEY, 0x12, KEY_PROG2 }, 326 + { KE_WIFI, 0x30, 0 }, 327 + { KE_KEY, 0x31, KEY_MAIL }, 328 + { KE_KEY, 0x36, KEY_WWW }, 329 + }; 330 + 321 331 /* 322 332 * If your machine is not here (which is currently rather likely), please send 323 333 * a list of buttons and their key codes (reported when loading this module ··· 378 368 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 240"), 379 369 }, 380 370 .driver_data = keymap_acer_travelmate_240 371 + }, 372 + { 373 + .callback = dmi_matched, 374 + .ident = "AOpen 1559AS", 375 + .matches = { 376 + DMI_MATCH(DMI_PRODUCT_NAME, "E2U"), 377 + DMI_MATCH(DMI_BOARD_NAME, "E2U"), 378 + }, 379 + .driver_data = keymap_aopen_1559as 381 380 }, 382 381 { NULL, } 383 382 };
+2 -2
drivers/input/mouse/alps.c
··· 100 100 } 101 101 102 102 if (priv->i->flags & ALPS_OLDPROTO) { 103 - left = packet[2] & 0x08; 104 - right = packet[2] & 0x10; 103 + left = packet[2] & 0x10; 104 + right = packet[2] & 0x08; 105 105 middle = 0; 106 106 x = packet[1] | ((packet[0] & 0x07) << 7); 107 107 y = packet[4] | ((packet[3] & 0x07) << 7);
+24
drivers/input/mouse/lifebook.c
··· 22 22 23 23 static struct dmi_system_id lifebook_dmi_table[] = { 24 24 { 25 + .ident = "LifeBook B", 26 + .matches = { 27 + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B Series"), 28 + }, 29 + }, 30 + { 25 31 .ident = "Lifebook B", 26 32 .matches = { 27 33 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK B Series"), 34 + }, 35 + }, 36 + { 37 + .ident = "Lifebook B213x/B2150", 38 + .matches = { 39 + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B2131/B2133/B2150"), 40 + }, 41 + }, 42 + { 43 + .ident = "Zephyr", 44 + .matches = { 45 + DMI_MATCH(DMI_PRODUCT_NAME, "ZEPHYR"), 46 + }, 47 + }, 48 + { 49 + .ident = "CF-18", 50 + .matches = { 51 + DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), 28 52 }, 29 53 }, 30 54 {
+6
drivers/input/mouse/logips2pp.c
··· 19 19 #define PS2PP_KIND_WHEEL 1 20 20 #define PS2PP_KIND_MX 2 21 21 #define PS2PP_KIND_TP3 3 22 + #define PS2PP_KIND_TRACKMAN 4 22 23 23 24 /* Logitech mouse features */ 24 25 #define PS2PP_WHEEL 0x01 ··· 224 223 { 73, 0, PS2PP_SIDE_BTN }, 225 224 { 75, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 226 225 { 76, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 226 + { 79, PS2PP_KIND_TRACKMAN, PS2PP_WHEEL }, /* TrackMan with wheel */ 227 227 { 80, PS2PP_KIND_WHEEL, PS2PP_SIDE_BTN | PS2PP_WHEEL }, 228 228 { 81, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 229 229 { 83, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, ··· 298 296 299 297 case PS2PP_KIND_TP3: 300 298 psmouse->name = "TouchPad 3"; 299 + break; 300 + 301 + case PS2PP_KIND_TRACKMAN: 302 + psmouse->name = "TrackMan"; 301 303 break; 302 304 303 305 default:
+10 -1
drivers/md/md.c
··· 167 167 } 168 168 EXPORT_SYMBOL_GPL(md_new_event); 169 169 170 + /* Alternate version that can be called from interrupts 171 + * when calling sysfs_notify isn't needed. 172 + */ 173 + void md_new_event_inintr(mddev_t *mddev) 174 + { 175 + atomic_inc(&md_event_count); 176 + wake_up(&md_event_waiters); 177 + } 178 + 170 179 /* 171 180 * Enables to iterate over all existing md arrays 172 181 * all_mddevs_lock protects this list. ··· 4158 4149 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4159 4150 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4160 4151 md_wakeup_thread(mddev->thread); 4161 - md_new_event(mddev); 4152 + md_new_event_inintr(mddev); 4162 4153 } 4163 4154 4164 4155 /* seq_file implementation /proc/mdstat */
+21 -6
drivers/message/fusion/mptbase.c
··· 1605 1605 } 1606 1606 #endif 1607 1607 1608 + static int 1609 + mpt_signal_reset(int index, MPT_ADAPTER *ioc, int reset_phase) 1610 + { 1611 + if ((MptDriverClass[index] == MPTSPI_DRIVER && 1612 + ioc->bus_type != SPI) || 1613 + (MptDriverClass[index] == MPTFC_DRIVER && 1614 + ioc->bus_type != FC) || 1615 + (MptDriverClass[index] == MPTSAS_DRIVER && 1616 + ioc->bus_type != SAS)) 1617 + /* make sure we only call the relevant reset handler 1618 + * for the bus */ 1619 + return 0; 1620 + return (MptResetHandlers[index])(ioc, reset_phase); 1621 + } 1622 + 1608 1623 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1609 1624 /* 1610 1625 * mpt_do_ioc_recovery - Initialize or recover MPT adapter. ··· 1900 1885 if ((ret == 0) && MptResetHandlers[ii]) { 1901 1886 dprintk((MYIOC_s_INFO_FMT "Calling IOC post_reset handler #%d\n", 1902 1887 ioc->name, ii)); 1903 - rc += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_POST_RESET); 1888 + rc += mpt_signal_reset(ii, ioc, MPT_IOC_POST_RESET); 1904 1889 handlers++; 1905 1890 } 1906 1891 1907 1892 if (alt_ioc_ready && MptResetHandlers[ii]) { 1908 1893 drsprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n", 1909 1894 ioc->name, ioc->alt_ioc->name, ii)); 1910 - rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET); 1895 + rc += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_POST_RESET); 1911 1896 handlers++; 1912 1897 } 1913 1898 } ··· 3282 3267 if (MptResetHandlers[ii]) { 3283 3268 dprintk((MYIOC_s_INFO_FMT "Calling IOC pre_reset handler #%d\n", 3284 3269 ioc->name, ii)); 3285 - r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_PRE_RESET); 3270 + r += mpt_signal_reset(ii, ioc, MPT_IOC_PRE_RESET); 3286 3271 if (ioc->alt_ioc) { 3287 3272 dprintk((MYIOC_s_INFO_FMT "Calling alt-%s pre_reset handler #%d\n", 3288 3273 ioc->name, ioc->alt_ioc->name, ii)); 3289 - r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_PRE_RESET); 3274 + r += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_PRE_RESET); 3290 3275 } 3291 3276 } 3292 3277 } ··· 5721 5706 if (MptResetHandlers[ii]) { 5722 5707 dtmprintk((MYIOC_s_INFO_FMT "Calling IOC reset_setup handler #%d\n", 5723 5708 ioc->name, ii)); 5724 - r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_SETUP_RESET); 5709 + r += mpt_signal_reset(ii, ioc, MPT_IOC_SETUP_RESET); 5725 5710 if (ioc->alt_ioc) { 5726 5711 dtmprintk((MYIOC_s_INFO_FMT "Calling alt-%s setup reset handler #%d\n", 5727 5712 ioc->name, ioc->alt_ioc->name, ii)); 5728 - r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_SETUP_RESET); 5713 + r += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_SETUP_RESET); 5729 5714 } 5730 5715 } 5731 5716 }
+1 -1
drivers/mmc/Kconfig
··· 84 84 85 85 config MMC_AU1X 86 86 tristate "Alchemy AU1XX0 MMC Card Interface support" 87 - depends on SOC_AU1X00 && MMC 87 + depends on MMC && SOC_AU1200 88 88 help 89 89 This selects the AMD Alchemy(R) Multimedia card interface. 90 90 If you have a Alchemy platform with a MMC slot, say Y or M here.
+16
drivers/net/forcedeth.c
··· 2675 2675 return ret; 2676 2676 } 2677 2677 2678 + #ifdef NETIF_F_TSO 2679 + static int nv_set_tso(struct net_device *dev, u32 value) 2680 + { 2681 + struct fe_priv *np = netdev_priv(dev); 2682 + 2683 + if ((np->driver_data & DEV_HAS_CHECKSUM)) 2684 + return ethtool_op_set_tso(dev, value); 2685 + else 2686 + return value ? -EOPNOTSUPP : 0; 2687 + } 2688 + #endif 2689 + 2678 2690 static struct ethtool_ops ops = { 2679 2691 .get_drvinfo = nv_get_drvinfo, 2680 2692 .get_link = ethtool_op_get_link, ··· 2698 2686 .get_regs = nv_get_regs, 2699 2687 .nway_reset = nv_nway_reset, 2700 2688 .get_perm_addr = ethtool_op_get_perm_addr, 2689 + #ifdef NETIF_F_TSO 2690 + .get_tso = ethtool_op_get_tso, 2691 + .set_tso = nv_set_tso 2692 + #endif 2701 2693 }; 2702 2694 2703 2695 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+1 -1
drivers/net/netconsole.c
··· 107 107 108 108 if(!configured) { 109 109 printk("netconsole: not configured, aborting\n"); 110 - return -EINVAL; 110 + return 0; 111 111 } 112 112 113 113 if(netpoll_setup(&np))
+1 -1
drivers/net/pcmcia/nmclan_cs.c
··· 1204 1204 1205 1205 dev->last_rx = jiffies; 1206 1206 lp->linux_stats.rx_packets++; 1207 - lp->linux_stats.rx_bytes += skb->len; 1207 + lp->linux_stats.rx_bytes += pkt_len; 1208 1208 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ 1209 1209 continue; 1210 1210 } else {
+3
drivers/net/pppoe.c
··· 861 861 * give dev_queue_xmit something it can free. 862 862 */ 863 863 skb2 = skb_clone(skb, GFP_ATOMIC); 864 + 865 + if (skb2 == NULL) 866 + goto abort; 864 867 } 865 868 866 869 ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr));
+6
drivers/pcmcia/ds.c
··· 1143 1143 { 1144 1144 struct pcmcia_socket *s = pcmcia_get_socket(skt); 1145 1145 1146 + if (!s) { 1147 + printk(KERN_ERR "PCMCIA obtaining reference to socket %p " \ 1148 + "failed, event 0x%x lost!\n", skt, event); 1149 + return -ENODEV; 1150 + } 1151 + 1146 1152 ds_dbg(1, "ds_event(0x%06x, %d, 0x%p)\n", 1147 1153 event, priority, skt); 1148 1154
+36 -36
drivers/rtc/rtc-m48t86.c
··· 48 48 struct platform_device *pdev = to_platform_device(dev); 49 49 struct m48t86_ops *ops = pdev->dev.platform_data; 50 50 51 - reg = ops->readb(M48T86_REG_B); 51 + reg = ops->readbyte(M48T86_REG_B); 52 52 53 53 if (reg & M48T86_REG_B_DM) { 54 54 /* data (binary) mode */ 55 - tm->tm_sec = ops->readb(M48T86_REG_SEC); 56 - tm->tm_min = ops->readb(M48T86_REG_MIN); 57 - tm->tm_hour = ops->readb(M48T86_REG_HOUR) & 0x3F; 58 - tm->tm_mday = ops->readb(M48T86_REG_DOM); 55 + tm->tm_sec = ops->readbyte(M48T86_REG_SEC); 56 + tm->tm_min = ops->readbyte(M48T86_REG_MIN); 57 + tm->tm_hour = ops->readbyte(M48T86_REG_HOUR) & 0x3F; 58 + tm->tm_mday = ops->readbyte(M48T86_REG_DOM); 59 59 /* tm_mon is 0-11 */ 60 - tm->tm_mon = ops->readb(M48T86_REG_MONTH) - 1; 61 - tm->tm_year = ops->readb(M48T86_REG_YEAR) + 100; 62 - tm->tm_wday = ops->readb(M48T86_REG_DOW); 60 + tm->tm_mon = ops->readbyte(M48T86_REG_MONTH) - 1; 61 + tm->tm_year = ops->readbyte(M48T86_REG_YEAR) + 100; 62 + tm->tm_wday = ops->readbyte(M48T86_REG_DOW); 63 63 } else { 64 64 /* bcd mode */ 65 - tm->tm_sec = BCD2BIN(ops->readb(M48T86_REG_SEC)); 66 - tm->tm_min = BCD2BIN(ops->readb(M48T86_REG_MIN)); 67 - tm->tm_hour = BCD2BIN(ops->readb(M48T86_REG_HOUR) & 0x3F); 68 - tm->tm_mday = BCD2BIN(ops->readb(M48T86_REG_DOM)); 65 + tm->tm_sec = BCD2BIN(ops->readbyte(M48T86_REG_SEC)); 66 + tm->tm_min = BCD2BIN(ops->readbyte(M48T86_REG_MIN)); 67 + tm->tm_hour = BCD2BIN(ops->readbyte(M48T86_REG_HOUR) & 0x3F); 68 + tm->tm_mday = BCD2BIN(ops->readbyte(M48T86_REG_DOM)); 69 69 /* tm_mon is 0-11 */ 70 - tm->tm_mon = BCD2BIN(ops->readb(M48T86_REG_MONTH)) - 1; 71 - tm->tm_year = BCD2BIN(ops->readb(M48T86_REG_YEAR)) + 100; 72 - tm->tm_wday = BCD2BIN(ops->readb(M48T86_REG_DOW)); 70 + tm->tm_mon = BCD2BIN(ops->readbyte(M48T86_REG_MONTH)) - 1; 71 + tm->tm_year = BCD2BIN(ops->readbyte(M48T86_REG_YEAR)) + 100; 72 + tm->tm_wday = BCD2BIN(ops->readbyte(M48T86_REG_DOW)); 73 73 } 74 74 75 75 /* correct the hour if the clock is in 12h mode */ 76 76 if (!(reg & M48T86_REG_B_H24)) 77 - if (ops->readb(M48T86_REG_HOUR) & 0x80) 77 + if (ops->readbyte(M48T86_REG_HOUR) & 0x80) 78 78 tm->tm_hour += 12; 79 79 80 80 return 0; ··· 86 86 struct platform_device *pdev = to_platform_device(dev); 87 87 struct m48t86_ops *ops = pdev->dev.platform_data; 88 88 89 - reg = ops->readb(M48T86_REG_B); 89 + reg = ops->readbyte(M48T86_REG_B); 90 90 91 91 /* update flag and 24h mode */ 92 92 reg |= M48T86_REG_B_SET | M48T86_REG_B_H24; 93 - ops->writeb(reg, M48T86_REG_B); 93 + ops->writebyte(reg, M48T86_REG_B); 94 94 95 95 if (reg & M48T86_REG_B_DM) { 96 96 /* data (binary) mode */ 97 - ops->writeb(tm->tm_sec, M48T86_REG_SEC); 98 - ops->writeb(tm->tm_min, M48T86_REG_MIN); 99 - ops->writeb(tm->tm_hour, M48T86_REG_HOUR); 100 - ops->writeb(tm->tm_mday, M48T86_REG_DOM); 101 - ops->writeb(tm->tm_mon + 1, M48T86_REG_MONTH); 102 - ops->writeb(tm->tm_year % 100, M48T86_REG_YEAR); 103 - ops->writeb(tm->tm_wday, M48T86_REG_DOW); 97 + ops->writebyte(tm->tm_sec, M48T86_REG_SEC); 98 + ops->writebyte(tm->tm_min, M48T86_REG_MIN); 99 + ops->writebyte(tm->tm_hour, M48T86_REG_HOUR); 100 + ops->writebyte(tm->tm_mday, M48T86_REG_DOM); 101 + ops->writebyte(tm->tm_mon + 1, M48T86_REG_MONTH); 102 + ops->writebyte(tm->tm_year % 100, M48T86_REG_YEAR); 103 + ops->writebyte(tm->tm_wday, M48T86_REG_DOW); 104 104 } else { 105 105 /* bcd mode */ 106 - ops->writeb(BIN2BCD(tm->tm_sec), M48T86_REG_SEC); 107 - ops->writeb(BIN2BCD(tm->tm_min), M48T86_REG_MIN); 108 - ops->writeb(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR); 109 - ops->writeb(BIN2BCD(tm->tm_mday), M48T86_REG_DOM); 110 - ops->writeb(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH); 111 - ops->writeb(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR); 112 - ops->writeb(BIN2BCD(tm->tm_wday), M48T86_REG_DOW); 106 + ops->writebyte(BIN2BCD(tm->tm_sec), M48T86_REG_SEC); 107 + ops->writebyte(BIN2BCD(tm->tm_min), M48T86_REG_MIN); 108 + ops->writebyte(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR); 109 + ops->writebyte(BIN2BCD(tm->tm_mday), M48T86_REG_DOM); 110 + ops->writebyte(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH); 111 + ops->writebyte(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR); 112 + ops->writebyte(BIN2BCD(tm->tm_wday), M48T86_REG_DOW); 113 113 } 114 114 115 115 /* update ended */ 116 116 reg &= ~M48T86_REG_B_SET; 117 - ops->writeb(reg, M48T86_REG_B); 117 + ops->writebyte(reg, M48T86_REG_B); 118 118 119 119 return 0; 120 120 } ··· 125 125 struct platform_device *pdev = to_platform_device(dev); 126 126 struct m48t86_ops *ops = pdev->dev.platform_data; 127 127 128 - reg = ops->readb(M48T86_REG_B); 128 + reg = ops->readbyte(M48T86_REG_B); 129 129 130 130 seq_printf(seq, "mode\t\t: %s\n", 131 131 (reg & M48T86_REG_B_DM) ? "binary" : "bcd"); 132 132 133 - reg = ops->readb(M48T86_REG_D); 133 + reg = ops->readbyte(M48T86_REG_D); 134 134 135 135 seq_printf(seq, "battery\t\t: %s\n", 136 136 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); ··· 157 157 platform_set_drvdata(dev, rtc); 158 158 159 159 /* read battery status */ 160 - reg = ops->readb(M48T86_REG_D); 160 + reg = ops->readbyte(M48T86_REG_D); 161 161 dev_info(&dev->dev, "battery %s\n", 162 162 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); 163 163
+2 -2
drivers/s390/cio/css.h
··· 45 45 union { 46 46 __u8 fc; /* SPID function code */ 47 47 struct path_state ps; /* SNID path state */ 48 - } inf; 48 + } __attribute__ ((packed)) inf; 49 49 union { 50 50 __u32 cpu_addr : 16; /* CPU address */ 51 51 struct extended_cssid ext_cssid; 52 - } pgid_high; 52 + } __attribute__ ((packed)) pgid_high; 53 53 __u32 cpu_id : 24; /* CPU identification */ 54 54 __u32 cpu_model : 16; /* CPU model */ 55 55 __u32 tod_high; /* high word TOD clock */
+1 -1
drivers/s390/cio/device_fsm.c
··· 749 749 /* Unit check but no sense data. Need basic sense. */ 750 750 if (ccw_device_do_sense(cdev, irb) != 0) 751 751 goto call_handler_unsol; 752 - memcpy(irb, &cdev->private->irb, sizeof(struct irb)); 752 + memcpy(&cdev->private->irb, irb, sizeof(struct irb)); 753 753 cdev->private->state = DEV_STATE_W4SENSE; 754 754 cdev->private->intparm = 0; 755 755 return;
+1
drivers/scsi/libata-core.c
··· 4297 4297 int ata_device_resume(struct ata_port *ap, struct ata_device *dev) 4298 4298 { 4299 4299 if (ap->flags & ATA_FLAG_SUSPENDED) { 4300 + ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000); 4300 4301 ap->flags &= ~ATA_FLAG_SUSPENDED; 4301 4302 ata_set_mode(ap); 4302 4303 }
+7
drivers/scsi/ppa.c
··· 982 982 return -ENODEV; 983 983 } 984 984 985 + static int ppa_adjust_queue(struct scsi_device *device) 986 + { 987 + blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); 988 + return 0; 989 + } 990 + 985 991 static struct scsi_host_template ppa_template = { 986 992 .module = THIS_MODULE, 987 993 .proc_name = "ppa", ··· 1003 997 .cmd_per_lun = 1, 1004 998 .use_clustering = ENABLE_CLUSTERING, 1005 999 .can_queue = 1, 1000 + .slave_alloc = ppa_adjust_queue, 1006 1001 }; 1007 1002 1008 1003 /***************************************************************************
+3 -3
drivers/scsi/sata_sil24.c
··· 454 454 */ 455 455 msleep(10); 456 456 457 - prb->ctrl = PRB_CTRL_SRST; 457 + prb->ctrl = cpu_to_le16(PRB_CTRL_SRST); 458 458 prb->fis[1] = 0; /* no PM yet */ 459 459 460 460 writel((u32)paddr, port + PORT_CMD_ACTIVATE); ··· 551 551 552 552 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 553 553 if (qc->tf.flags & ATA_TFLAG_WRITE) 554 - prb->ctrl = PRB_CTRL_PACKET_WRITE; 554 + prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_WRITE); 555 555 else 556 - prb->ctrl = PRB_CTRL_PACKET_READ; 556 + prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_READ); 557 557 } else 558 558 prb->ctrl = 0; 559 559
+1
drivers/scsi/scsi_devinfo.c
··· 165 165 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 166 166 {"HP", "C1557A", NULL, BLIST_FORCELUN}, 167 167 {"HP", "C3323-300", "4269", BLIST_NOTQ}, 168 + {"HP", "C5713A", NULL, BLIST_NOREPORTLUN}, 168 169 {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, 169 170 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 170 171 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
+1 -1
drivers/scsi/scsi_lib.c
··· 367 367 int nsegs, unsigned bufflen, gfp_t gfp) 368 368 { 369 369 struct request_queue *q = rq->q; 370 - int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT; 370 + int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 371 371 unsigned int data_len = 0, len, bytes, off; 372 372 struct page *page; 373 373 struct bio *bio = NULL;
+2 -2
drivers/scsi/scsi_transport_sas.c
··· 955 955 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 956 956 struct sas_phy *parent = dev_to_phy(rphy->dev.parent); 957 957 958 - if (rphy->scsi_target_id == -1) 958 + if (rphy->identify.device_type != SAS_END_DEVICE || 959 + rphy->scsi_target_id == -1) 959 960 continue; 960 961 961 962 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && ··· 978 977 #define SETUP_TEMPLATE(attrb, field, perm, test) \ 979 978 i->private_##attrb[count] = class_device_attr_##field; \ 980 979 i->private_##attrb[count].attr.mode = perm; \ 981 - i->private_##attrb[count].store = NULL; \ 982 980 i->attrb[count] = &i->private_##attrb[count]; \ 983 981 if (test) \ 984 982 count++
+14 -7
drivers/video/au1100fb.c
··· 214 214 */ 215 215 int au1100fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fbi) 216 216 { 217 - struct au1100fb_device *fbdev = to_au1100fb_device(fbi); 218 - u32 *palette = fbdev->regs->lcd_pallettebase; 217 + struct au1100fb_device *fbdev; 218 + u32 *palette; 219 219 u32 value; 220 + 221 + fbdev = to_au1100fb_device(fbi); 222 + palette = fbdev->regs->lcd_pallettebase; 220 223 221 224 if (regno > (AU1100_LCD_NBR_PALETTE_ENTRIES - 1)) 222 225 return -EINVAL; ··· 319 316 */ 320 317 int au1100fb_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi) 321 318 { 322 - struct au1100fb_device *fbdev = to_au1100fb_device(fbi); 319 + struct au1100fb_device *fbdev; 323 320 int dy; 321 + 322 + fbdev = to_au1100fb_device(fbi); 324 323 325 324 print_dbg("fb_pan_display %p %p", var, fbi); 326 325 ··· 387 382 */ 388 383 int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) 389 384 { 390 - struct au1100fb_device *fbdev = to_au1100fb_device(fbi); 385 + struct au1100fb_device *fbdev; 391 386 unsigned int len; 392 387 unsigned long start=0, off; 388 + 389 + fbdev = to_au1100fb_device(fbi); 393 390 394 391 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { 395 392 return -EINVAL; ··· 474 467 475 468 if (!request_mem_region(au1100fb_fix.mmio_start, au1100fb_fix.mmio_len, 476 469 DRIVER_NAME)) { 477 - print_err("fail to lock memory region at 0x%08x", 470 + print_err("fail to lock memory region at 0x%08lx", 478 471 au1100fb_fix.mmio_start); 479 472 return -EBUSY; 480 473 } ··· 602 595 return 0; 603 596 } 604 597 605 - int au1100fb_drv_suspend(struct device *dev, u32 state, u32 level) 598 + int au1100fb_drv_suspend(struct device *dev, pm_message_t state) 606 599 { 607 600 /* TODO */ 608 601 return 0; 609 602 } 610 603 611 - int au1100fb_drv_resume(struct device *dev, u32 level) 604 + int au1100fb_drv_resume(struct device *dev) 612 605 { 613 606 /* TODO */ 614 607 return 0;
+1 -1
drivers/video/console/fbcon.c
··· 2631 2631 scr_memcpyw((u16 *) q, (u16 *) p, 2632 2632 vc->vc_size_row); 2633 2633 } 2634 - softback_in = p; 2634 + softback_in = softback_curr = p; 2635 2635 update_region(vc, vc->vc_origin, 2636 2636 logo_lines * vc->vc_cols); 2637 2637 }
+1 -3
drivers/video/maxinefb.c
··· 55 55 }; 56 56 57 57 static struct fb_fix_screeninfo maxinefb_fix = { 58 - .id = "Maxine onboard graphics 1024x768x8", 58 + .id = "Maxine", 59 59 .smem_len = (1024*768), 60 60 .type = FB_TYPE_PACKED_PIXELS, 61 61 .visual = FB_VISUAL_PSEUDOCOLOR, ··· 107 107 108 108 static struct fb_ops maxinefb_ops = { 109 109 .owner = THIS_MODULE, 110 - .fb_get_fix = gen_get_fix, 111 - .fb_get_var = gen_get_var, 112 110 .fb_setcolreg = maxinefb_setcolreg, 113 111 .fb_fillrect = cfb_fillrect, 114 112 .fb_copyarea = cfb_copyarea,
+7
fs/cifs/CHANGES
··· 1 + Version 1.43 2 + ------------ 3 + POSIX locking to servers which support CIFS POSIX Extensions 4 + (disabled by default controlled by proc/fs/cifs/Experimental). 5 + Handle conversion of long share names (especially Asian languages) 6 + to Unicode during mount. 7 + 1 8 Version 1.42 2 9 ------------ 3 10 Fix slow oplock break when mounted to different servers at the same time and
+1 -1
fs/cifs/cifsfs.h
··· 99 99 extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); 100 100 extern int cifs_ioctl (struct inode * inode, struct file * filep, 101 101 unsigned int command, unsigned long arg); 102 - #define CIFS_VERSION "1.42" 102 + #define CIFS_VERSION "1.43" 103 103 #endif /* _CIFSFS_H */
+1 -1
fs/cifs/cifsproto.h
··· 267 267 const int waitFlag); 268 268 extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, 269 269 const __u16 smb_file_id, const int get_flag, 270 - const __u64 len, const __u64 offset, 270 + const __u64 len, struct file_lock *, 271 271 const __u16 lock_type, const int waitFlag); 272 272 extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon); 273 273 extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
+35 -5
fs/cifs/cifssmb.c
··· 1355 1355 int 1356 1356 CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, 1357 1357 const __u16 smb_file_id, const int get_flag, const __u64 len, 1358 - const __u64 lkoffset, const __u16 lock_type, const int waitFlag) 1358 + struct file_lock *pLockData, const __u16 lock_type, 1359 + const int waitFlag) 1359 1360 { 1360 1361 struct smb_com_transaction2_sfi_req *pSMB = NULL; 1361 1362 struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; ··· 1367 1366 __u16 params, param_offset, offset, byte_count, count; 1368 1367 1369 1368 cFYI(1, ("Posix Lock")); 1369 + 1370 + if(pLockData == NULL) 1371 + return EINVAL; 1372 + 1370 1373 rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB); 1371 1374 1372 1375 if (rc) ··· 1409 1404 1410 1405 parm_data->lock_type = cpu_to_le16(lock_type); 1411 1406 if(waitFlag) 1412 - parm_data->lock_flags = 1; 1407 + parm_data->lock_flags = cpu_to_le16(1); 1413 1408 parm_data->pid = cpu_to_le32(current->tgid); 1414 - parm_data->start = lkoffset; 1415 - parm_data->length = len; /* normalize negative numbers */ 1409 + parm_data->start = cpu_to_le64(pLockData->fl_start); 1410 + parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ 1416 1411 1417 1412 pSMB->DataOffset = cpu_to_le16(offset); 1418 1413 pSMB->Fid = smb_file_id; ··· 1424 1419 (struct smb_hdr *) pSMBr, &bytes_returned, 0); 1425 1420 if (rc) { 1426 1421 cFYI(1, ("Send error in Posix Lock = %d", rc)); 1427 - } 1422 + } else if (get_flag) { 1423 + /* lock structure can be returned on get */ 1424 + __u16 data_offset; 1425 + __u16 data_count; 1426 + rc = validate_t2((struct smb_t2_rsp *)pSMBr); 1428 1427 1428 + if (rc || (pSMBr->ByteCount < sizeof(struct cifs_posix_lock))) { 1429 + rc = -EIO; /* bad smb */ 1430 + goto plk_err_exit; 1431 + } 1432 + if(pLockData == NULL) { 1433 + rc = -EINVAL; 1434 + goto plk_err_exit; 1435 + } 1436 + data_offset = le16_to_cpu(pSMBr->t2.DataOffset); 1437 + data_count = le16_to_cpu(pSMBr->t2.DataCount); 1438 + if(data_count < sizeof(struct cifs_posix_lock)) { 1439 + rc = -EIO; 1440 + goto plk_err_exit; 1441 + } 1442 + parm_data = (struct cifs_posix_lock *) 1443 + ((char *)&pSMBr->hdr.Protocol + data_offset); 1444 + if(parm_data->lock_type == cpu_to_le16(CIFS_UNLCK)) 1445 + pLockData->fl_type = F_UNLCK; 1446 + } 1447 + 1448 + plk_err_exit: 1429 1449 if (pSMB) 1430 1450 cifs_small_buf_release(pSMB); 1431 1451
+88 -9
fs/cifs/connect.c
··· 2148 2148 /* We look for obvious messed up bcc or strings in response so we do not go off 2149 2149 the end since (at least) WIN2K and Windows XP have a major bug in not null 2150 2150 terminating last Unicode string in response */ 2151 + if(ses->serverOS) 2152 + kfree(ses->serverOS); 2151 2153 ses->serverOS = kzalloc(2 * (len + 1), GFP_KERNEL); 2152 2154 if(ses->serverOS == NULL) 2153 2155 goto sesssetup_nomem; ··· 2162 2160 if (remaining_words > 0) { 2163 2161 len = UniStrnlen((wchar_t *)bcc_ptr, 2164 2162 remaining_words-1); 2163 + if(ses->serverNOS) 2164 + kfree(ses->serverNOS); 2165 2165 ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL); 2166 2166 if(ses->serverNOS == NULL) 2167 2167 goto sesssetup_nomem; ··· 2181 2177 if (remaining_words > 0) { 2182 2178 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2183 2179 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 2180 + if(ses->serverDomain) 2181 + kfree(ses->serverDomain); 2184 2182 ses->serverDomain = 2185 2183 kzalloc(2*(len+1),GFP_KERNEL); 2186 2184 if(ses->serverDomain == NULL) ··· 2193 2187 ses->serverDomain[2*len] = 0; 2194 2188 ses->serverDomain[1+(2*len)] = 0; 2195 2189 } /* else no more room so create dummy domain string */ 2196 - else 2190 + else { 2191 + if(ses->serverDomain) 2192 + kfree(ses->serverDomain); 2197 2193 ses->serverDomain = 2198 2194 kzalloc(2, GFP_KERNEL); 2195 + } 2199 2196 } else { /* no room so create dummy domain and NOS string */ 2200 2197 /* if these kcallocs fail not much we 2201 2198 can do, but better to not fail the 2202 2199 sesssetup itself */ 2200 + if(ses->serverDomain) 2201 + kfree(ses->serverDomain); 2203 2202 ses->serverDomain = 2204 2203 kzalloc(2, GFP_KERNEL); 2204 + if(ses->serverNOS) 2205 + kfree(ses->serverNOS); 2205 2206 ses->serverNOS = 2206 2207 kzalloc(2, GFP_KERNEL); 2207 2208 } ··· 2217 2204 if (((long) bcc_ptr + len) - (long) 2218 2205 pByteArea(smb_buffer_response) 2219 2206 <= BCC(smb_buffer_response)) { 2207 + if(ses->serverOS) 2208 + kfree(ses->serverOS); 2220 2209 ses->serverOS = kzalloc(len + 1,GFP_KERNEL); 2221 2210 if(ses->serverOS == NULL) 2222 2211 goto sesssetup_nomem; ··· 2229 2214 bcc_ptr++; 2230 2215 2231 2216 len = strnlen(bcc_ptr, 1024); 2217 + if(ses->serverNOS) 2218 + kfree(ses->serverNOS); 2232 2219 ses->serverNOS = kzalloc(len + 1,GFP_KERNEL); 2233 2220 if(ses->serverNOS == NULL) 2234 2221 goto sesssetup_nomem; ··· 2240 2223 bcc_ptr++; 2241 2224 2242 2225 len = strnlen(bcc_ptr, 1024); 2226 + if(ses->serverDomain) 2227 + kfree(ses->serverDomain); 2243 2228 ses->serverDomain = kzalloc(len + 1,GFP_KERNEL); 2244 2229 if(ses->serverDomain == NULL) 2245 2230 goto sesssetup_nomem; ··· 2446 2427 /* We look for obvious messed up bcc or strings in response so we do not go off 2447 2428 the end since (at least) WIN2K and Windows XP have a major bug in not null 2448 2429 terminating last Unicode string in response */ 2430 + if(ses->serverOS) 2431 + kfree(ses->serverOS); 2449 2432 ses->serverOS = 2450 2433 kzalloc(2 * (len + 1), GFP_KERNEL); 2451 2434 cifs_strfromUCS_le(ses->serverOS, ··· 2462 2441 len = UniStrnlen((wchar_t *)bcc_ptr, 2463 2442 remaining_words 2464 2443 - 1); 2444 + if(ses->serverNOS) 2445 + kfree(ses->serverNOS); 2465 2446 ses->serverNOS = 2466 2447 kzalloc(2 * (len + 1), 2467 2448 GFP_KERNEL); ··· 2477 2454 remaining_words -= len + 1; 2478 2455 if (remaining_words > 0) { 2479 2456 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2480 - /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 2457 + /* last string not null terminated (e.g.Windows XP/2000) */ 2458 + if(ses->serverDomain) 2459 + kfree(ses->serverDomain); 2481 2460 ses->serverDomain = kzalloc(2*(len+1),GFP_KERNEL); 2482 2461 cifs_strfromUCS_le(ses->serverDomain, 2483 2462 (__le16 *)bcc_ptr, ··· 2488 2463 ses->serverDomain[2*len] = 0; 2489 2464 ses->serverDomain[1+(2*len)] = 0; 2490 2465 } /* else no more room so create dummy domain string */ 2491 - else 2466 + else { 2467 + if(ses->serverDomain) 2468 + kfree(ses->serverDomain); 2492 2469 ses->serverDomain = 2493 2470 kzalloc(2,GFP_KERNEL); 2494 - } else { /* no room so create dummy domain and NOS string */ 2471 + } 2472 + } else {/* no room use dummy domain&NOS */ 2473 + if(ses->serverDomain) 2474 + kfree(ses->serverDomain); 2495 2475 ses->serverDomain = kzalloc(2, GFP_KERNEL); 2476 + if(ses->serverNOS) 2477 + kfree(ses->serverNOS); 2496 2478 ses->serverNOS = kzalloc(2, GFP_KERNEL); 2497 2479 } 2498 2480 } else { /* ASCII */ ··· 2508 2476 if (((long) bcc_ptr + len) - (long) 2509 2477 pByteArea(smb_buffer_response) 2510 2478 <= BCC(smb_buffer_response)) { 2479 + if(ses->serverOS) 2480 + kfree(ses->serverOS); 2511 2481 ses->serverOS = kzalloc(len + 1, GFP_KERNEL); 2512 2482 strncpy(ses->serverOS, bcc_ptr, len); 2513 2483 ··· 2518 2484 bcc_ptr++; 2519 2485 2520 2486 len = strnlen(bcc_ptr, 1024); 2487 + if(ses->serverNOS) 2488 + kfree(ses->serverNOS); 2521 2489 ses->serverNOS = kzalloc(len + 1,GFP_KERNEL); 2522 2490 strncpy(ses->serverNOS, bcc_ptr, len); 2523 2491 bcc_ptr += len; ··· 2527 2491 bcc_ptr++; 2528 2492 2529 2493 len = strnlen(bcc_ptr, 1024); 2494 + if(ses->serverDomain) 2495 + kfree(ses->serverDomain); 2530 2496 ses->serverDomain = kzalloc(len + 1, GFP_KERNEL); 2531 2497 strncpy(ses->serverDomain, bcc_ptr, len); 2532 2498 bcc_ptr += len; ··· 2766 2728 /* We look for obvious messed up bcc or strings in response so we do not go off 2767 2729 the end since (at least) WIN2K and Windows XP have a major bug in not null 2768 2730 terminating last Unicode string in response */ 2731 + if(ses->serverOS) 2732 + kfree(ses->serverOS); 2769 2733 ses->serverOS = 2770 2734 kzalloc(2 * (len + 1), GFP_KERNEL); 2771 2735 cifs_strfromUCS_le(ses->serverOS, ··· 2783 2743 bcc_ptr, 2784 2744 remaining_words 2785 2745 - 1); 2746 + if(ses->serverNOS) 2747 + kfree(ses->serverNOS); 2786 2748 ses->serverNOS = 2787 2749 kzalloc(2 * (len + 1), 2788 2750 GFP_KERNEL); ··· 2802 2760 if (remaining_words > 0) { 2803 2761 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2804 2762 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 2763 + if(ses->serverDomain) 2764 + kfree(ses->serverDomain); 2805 2765 ses->serverDomain = 2806 2766 kzalloc(2 * 2807 2767 (len + ··· 2821 2777 [1 + (2 * len)] 2822 2778 = 0; 2823 2779 } /* else no more room so create dummy domain string */ 2824 - else 2780 + else { 2781 + if(ses->serverDomain) 2782 + kfree(ses->serverDomain); 2825 2783 ses->serverDomain = 2826 2784 kzalloc(2, 2827 2785 GFP_KERNEL); 2786 + } 2828 2787 } else { /* no room so create dummy domain and NOS string */ 2788 + if(ses->serverDomain); 2789 + kfree(ses->serverDomain); 2829 2790 ses->serverDomain = 2830 2791 kzalloc(2, GFP_KERNEL); 2792 + if(ses->serverNOS) 2793 + kfree(ses->serverNOS); 2831 2794 ses->serverNOS = 2832 2795 kzalloc(2, GFP_KERNEL); 2833 2796 } ··· 2843 2792 if (((long) bcc_ptr + len) - (long) 2844 2793 pByteArea(smb_buffer_response) 2845 2794 <= BCC(smb_buffer_response)) { 2795 + if(ses->serverOS) 2796 + kfree(ses->serverOS); 2846 2797 ses->serverOS = 2847 2798 kzalloc(len + 1, 2848 2799 GFP_KERNEL); ··· 2856 2803 bcc_ptr++; 2857 2804 2858 2805 len = strnlen(bcc_ptr, 1024); 2806 + if(ses->serverNOS) 2807 + kfree(ses->serverNOS); 2859 2808 ses->serverNOS = 2860 2809 kzalloc(len + 1, 2861 2810 GFP_KERNEL); ··· 2867 2812 bcc_ptr++; 2868 2813 2869 2814 len = strnlen(bcc_ptr, 1024); 2815 + if(ses->serverDomain) 2816 + kfree(ses->serverDomain); 2870 2817 ses->serverDomain = 2871 2818 kzalloc(len + 1, 2872 2819 GFP_KERNEL); ··· 3173 3116 /* We look for obvious messed up bcc or strings in response so we do not go off 3174 3117 the end since (at least) WIN2K and Windows XP have a major bug in not null 3175 3118 terminating last Unicode string in response */ 3119 + if(ses->serverOS) 3120 + kfree(ses->serverOS); 3176 3121 ses->serverOS = 3177 3122 kzalloc(2 * (len + 1), GFP_KERNEL); 3178 3123 cifs_strfromUCS_le(ses->serverOS, ··· 3190 3131 bcc_ptr, 3191 3132 remaining_words 3192 3133 - 1); 3134 + if(ses->serverNOS) 3135 + kfree(ses->serverNOS); 3193 3136 ses->serverNOS = 3194 3137 kzalloc(2 * (len + 1), 3195 3138 GFP_KERNEL); ··· 3208 3147 if (remaining_words > 0) { 3209 3148 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 3210 3149 /* last string not always null terminated (e.g. for Windows XP & 2000) */ 3150 + if(ses->serverDomain) 3151 + kfree(ses->serverDomain); 3211 3152 ses->serverDomain = 3212 3153 kzalloc(2 * 3213 3154 (len + ··· 3235 3172 len)] 3236 3173 = 0; 3237 3174 } /* else no more room so create dummy domain string */ 3238 - else 3175 + else { 3176 + if(ses->serverDomain) 3177 + kfree(ses->serverDomain); 3239 3178 ses->serverDomain = kzalloc(2,GFP_KERNEL); 3179 + } 3240 3180 } else { /* no room so create dummy domain and NOS string */ 3181 + if(ses->serverDomain) 3182 + kfree(ses->serverDomain); 3241 3183 ses->serverDomain = kzalloc(2, GFP_KERNEL); 3184 + if(ses->serverNOS) 3185 + kfree(ses->serverNOS); 3242 3186 ses->serverNOS = kzalloc(2, GFP_KERNEL); 3243 3187 } 3244 3188 } else { /* ASCII */ ··· 3253 3183 if (((long) bcc_ptr + len) - 3254 3184 (long) pByteArea(smb_buffer_response) 3255 3185 <= BCC(smb_buffer_response)) { 3186 + if(ses->serverOS) 3187 + kfree(ses->serverOS); 3256 3188 ses->serverOS = kzalloc(len + 1,GFP_KERNEL); 3257 3189 strncpy(ses->serverOS,bcc_ptr, len); 3258 3190 ··· 3263 3191 bcc_ptr++; 3264 3192 3265 3193 len = strnlen(bcc_ptr, 1024); 3194 + if(ses->serverNOS) 3195 + kfree(ses->serverNOS); 3266 3196 ses->serverNOS = kzalloc(len+1,GFP_KERNEL); 3267 3197 strncpy(ses->serverNOS, bcc_ptr, len); 3268 3198 bcc_ptr += len; ··· 3272 3198 bcc_ptr++; 3273 3199 3274 3200 len = strnlen(bcc_ptr, 1024); 3201 + if(ses->serverDomain) 3202 + kfree(ses->serverDomain); 3275 3203 ses->serverDomain = kzalloc(len+1,GFP_KERNEL); 3276 3204 strncpy(ses->serverDomain, bcc_ptr, len); 3277 3205 bcc_ptr += len; ··· 3358 3282 bcc_ptr++; /* align */ 3359 3283 } 3360 3284 3361 - if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 3285 + if(ses->server->secMode & 3286 + (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 3362 3287 smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 3363 3288 3364 3289 if (ses->capabilities & CAP_STATUS32) { ··· 3371 3294 if (ses->capabilities & CAP_UNICODE) { 3372 3295 smb_buffer->Flags2 |= SMBFLG2_UNICODE; 3373 3296 length = 3374 - cifs_strtoUCS((__le16 *) bcc_ptr, tree, 100, nls_codepage); 3375 - bcc_ptr += 2 * length; /* convert num of 16 bit words to bytes */ 3297 + cifs_strtoUCS((__le16 *) bcc_ptr, tree, 3298 + 6 /* max utf8 char length in bytes */ * 3299 + (/* server len*/ + 256 /* share len */), nls_codepage); 3300 + bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ 3376 3301 bcc_ptr += 2; /* skip trailing null */ 3377 3302 } else { /* ASCII */ 3378 3303 strcpy(bcc_ptr, tree);
+8 -4
fs/cifs/file.c
··· 84 84 return FILE_OVERWRITE_IF; 85 85 else if ((flags & O_CREAT) == O_CREAT) 86 86 return FILE_OPEN_IF; 87 + else if ((flags & O_TRUNC) == O_TRUNC) 88 + return FILE_OVERWRITE; 87 89 else 88 90 return FILE_OPEN; 89 91 } ··· 658 656 else 659 657 posix_lock_type = CIFS_WRLCK; 660 658 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */, 661 - length, pfLock->fl_start, 659 + length, pfLock, 662 660 posix_lock_type, wait_flag); 663 661 FreeXid(xid); 664 662 return rc; ··· 706 704 return -EOPNOTSUPP; 707 705 } 708 706 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */, 709 - length, pfLock->fl_start, 707 + length, pfLock, 710 708 posix_lock_type, wait_flag); 711 709 } else 712 710 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start, ··· 906 904 if (rc != 0) 907 905 break; 908 906 } 909 - if(experimEnabled || (pTcon->ses->server->secMode & 910 - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0) { 907 + if(experimEnabled || (pTcon->ses->server && 908 + ((pTcon->ses->server->secMode & 909 + (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 910 + == 0))) { 911 911 struct kvec iov[2]; 912 912 unsigned int len; 913 913
-1
fs/ext3/resize.c
··· 767 767 if (input->group != sbi->s_groups_count) { 768 768 ext3_warning(sb, __FUNCTION__, 769 769 "multiple resizers run on filesystem!"); 770 - unlock_super(sb); 771 770 err = -EBUSY; 772 771 goto exit_journal; 773 772 }
+10 -9
fs/namei.c
··· 1080 1080 nd->flags = flags; 1081 1081 nd->depth = 0; 1082 1082 1083 - read_lock(&current->fs->lock); 1084 1083 if (*name=='/') { 1084 + read_lock(&current->fs->lock); 1085 1085 if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) { 1086 1086 nd->mnt = mntget(current->fs->altrootmnt); 1087 1087 nd->dentry = dget(current->fs->altroot); ··· 1092 1092 } 1093 1093 nd->mnt = mntget(current->fs->rootmnt); 1094 1094 nd->dentry = dget(current->fs->root); 1095 + read_unlock(&current->fs->lock); 1095 1096 } else if (dfd == AT_FDCWD) { 1097 + read_lock(&current->fs->lock); 1096 1098 nd->mnt = mntget(current->fs->pwdmnt); 1097 1099 nd->dentry = dget(current->fs->pwd); 1100 + read_unlock(&current->fs->lock); 1098 1101 } else { 1099 1102 struct dentry *dentry; 1100 1103 1101 1104 file = fget_light(dfd, &fput_needed); 1102 1105 retval = -EBADF; 1103 1106 if (!file) 1104 - goto unlock_fail; 1107 + goto out_fail; 1105 1108 1106 1109 dentry = file->f_dentry; 1107 1110 1108 1111 retval = -ENOTDIR; 1109 1112 if (!S_ISDIR(dentry->d_inode->i_mode)) 1110 - goto fput_unlock_fail; 1113 + goto fput_fail; 1111 1114 1112 1115 retval = file_permission(file, MAY_EXEC); 1113 1116 if (retval) 1114 - goto fput_unlock_fail; 1117 + goto fput_fail; 1115 1118 1116 1119 nd->mnt = mntget(file->f_vfsmnt); 1117 1120 nd->dentry = dget(dentry); 1118 1121 1119 1122 fput_light(file, fput_needed); 1120 1123 } 1121 - read_unlock(&current->fs->lock); 1122 1124 current->total_link_count = 0; 1123 1125 retval = link_path_walk(name, nd); 1124 1126 out: ··· 1129 1127 nd->dentry->d_inode)) 1130 1128 audit_inode(name, nd->dentry->d_inode, flags); 1131 1129 } 1130 + out_fail: 1132 1131 return retval; 1133 1132 1134 - fput_unlock_fail: 1133 + fput_fail: 1135 1134 fput_light(file, fput_needed); 1136 - unlock_fail: 1137 - read_unlock(&current->fs->lock); 1138 - return retval; 1135 + goto out_fail; 1139 1136 } 1140 1137 1141 1138 int fastcall path_lookup(const char *name, unsigned int flags,
+1 -3
include/asm-alpha/smp.h
··· 45 45 #define hard_smp_processor_id() __hard_smp_processor_id() 46 46 #define raw_smp_processor_id() (current_thread_info()->cpu) 47 47 48 - extern cpumask_t cpu_present_mask; 49 - extern cpumask_t cpu_online_map; 50 48 extern int smp_num_cpus; 51 - #define cpu_possible_map cpu_present_mask 49 + #define cpu_possible_map cpu_present_map 52 50 53 51 int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu); 54 52
+1 -1
include/asm-arm/arch-ixp23xx/memory.h
··· 49 49 { 50 50 extern unsigned int processor_id; 51 51 52 - if (((processor_id & 15) >= 2) || machine_is_roadrunner()) 52 + if (((processor_id & 15) >= 4) || machine_is_roadrunner()) 53 53 return 1; 54 54 55 55 return 0;
+1 -1
include/asm-arm/arch-l7200/serial_l7200.h
··· 28 28 #define UARTDR 0x00 /* Tx/Rx data */ 29 29 #define RXSTAT 0x04 /* Rx status */ 30 30 #define H_UBRLCR 0x08 /* mode register high */ 31 - #define M_UBRLCR 0x0C /* mode reg mid (MSB of buad)*/ 31 + #define M_UBRLCR 0x0C /* mode reg mid (MSB of baud)*/ 32 32 #define L_UBRLCR 0x10 /* mode reg low (LSB of baud)*/ 33 33 #define UARTCON 0x14 /* control register */ 34 34 #define UARTFLG 0x18 /* flag register */
+1 -1
include/asm-arm/arch-l7200/uncompress.h
··· 6 6 * Changelog: 7 7 * 05-01-2000 SJH Created 8 8 * 05-13-2000 SJH Filled in function bodies 9 - * 07-26-2000 SJH Removed hard coded buad rate 9 + * 07-26-2000 SJH Removed hard coded baud rate 10 10 */ 11 11 12 12 #include <asm/hardware.h>
+6
include/asm-arm/system.h
··· 127 127 } 128 128 #endif 129 129 130 + #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) 131 + #define cpu_is_xscale() 0 132 + #else 133 + #define cpu_is_xscale() 1 134 + #endif 135 + 130 136 #define set_cr(x) \ 131 137 __asm__ __volatile__( \ 132 138 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \
+1 -10
include/asm-generic/pgtable.h
··· 159 159 #define lazy_mmu_prot_update(pte) do { } while (0) 160 160 #endif 161 161 162 - #ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE 162 + #ifndef __HAVE_ARCH_MOVE_PTE 163 163 #define move_pte(pte, prot, old_addr, new_addr) (pte) 164 - #else 165 - #define move_pte(pte, prot, old_addr, new_addr) \ 166 - ({ \ 167 - pte_t newpte = (pte); \ 168 - if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ 169 - pte_page(pte) == ZERO_PAGE(old_addr)) \ 170 - newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ 171 - newpte; \ 172 - }) 173 164 #endif 174 165 175 166 /*
+1
include/asm-mips/addrspace.h
··· 129 129 #if defined (CONFIG_CPU_R4300) \ 130 130 || defined (CONFIG_CPU_R4X00) \ 131 131 || defined (CONFIG_CPU_R5000) \ 132 + || defined (CONFIG_CPU_RM7000) \ 132 133 || defined (CONFIG_CPU_NEVADA) \ 133 134 || defined (CONFIG_CPU_TX49XX) \ 134 135 || defined (CONFIG_CPU_MIPS64)
+5 -1
include/asm-mips/cpu.h
··· 51 51 #define PRID_IMP_R4300 0x0b00 52 52 #define PRID_IMP_VR41XX 0x0c00 53 53 #define PRID_IMP_R12000 0x0e00 54 + #define PRID_IMP_R14000 0x0f00 54 55 #define PRID_IMP_R8000 0x1000 55 56 #define PRID_IMP_PR4450 0x1200 56 57 #define PRID_IMP_R4600 0x2000 ··· 88 87 #define PRID_IMP_24K 0x9300 89 88 #define PRID_IMP_34K 0x9500 90 89 #define PRID_IMP_24KE 0x9600 90 + #define PRID_IMP_74K 0x9700 91 91 92 92 /* 93 93 * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE ··· 198 196 #define CPU_34K 60 199 197 #define CPU_PR4450 61 200 198 #define CPU_SB1A 62 201 - #define CPU_LAST 62 199 + #define CPU_74K 63 200 + #define CPU_R14000 64 201 + #define CPU_LAST 64 202 202 203 203 /* 204 204 * ISA Level encodings
+12 -10
include/asm-mips/delay.h
··· 19 19 { 20 20 if (sizeof(long) == 4) 21 21 __asm__ __volatile__ ( 22 - ".set\tnoreorder\n" 23 - "1:\tbnez\t%0,1b\n\t" 24 - "subu\t%0,1\n\t" 25 - ".set\treorder" 22 + " .set noreorder \n" 23 + " .align 3 \n" 24 + "1: bnez %0, 1b \n" 25 + " subu %0, 1 \n" 26 + " .set reorder \n" 26 27 : "=r" (loops) 27 28 : "0" (loops)); 28 29 else if (sizeof(long) == 8) 29 30 __asm__ __volatile__ ( 30 - ".set\tnoreorder\n" 31 - "1:\tbnez\t%0,1b\n\t" 32 - "dsubu\t%0,1\n\t" 33 - ".set\treorder" 34 - :"=r" (loops) 35 - :"0" (loops)); 31 + " .set noreorder \n" 32 + " .align 3 \n" 33 + "1: bnez %0, 1b \n" 34 + " dsubu %0, 1 \n" 35 + " .set reorder \n" 36 + : "=r" (loops) 37 + : "0" (loops)); 36 38 } 37 39 38 40
+116 -25
include/asm-mips/futex.h
··· 7 7 #include <linux/futex.h> 8 8 #include <asm/errno.h> 9 9 #include <asm/uaccess.h> 10 + #include <asm/war.h> 10 11 11 12 #ifdef CONFIG_SMP 12 13 #define __FUTEX_SMP_SYNC " sync \n" ··· 17 16 18 17 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 19 18 { \ 20 - __asm__ __volatile__( \ 21 - " .set push \n" \ 22 - " .set noat \n" \ 23 - " .set mips3 \n" \ 24 - "1: ll %1, (%3) # __futex_atomic_op1 \n" \ 25 - " .set mips0 \n" \ 26 - " " insn " \n" \ 27 - " .set mips3 \n" \ 28 - "2: sc $1, (%3) \n" \ 29 - " beqzl $1, 1b \n" \ 30 - __FUTEX_SMP_SYNC \ 31 - "3: \n" \ 32 - " .set pop \n" \ 33 - " .set mips0 \n" \ 34 - " .section .fixup,\"ax\" \n" \ 35 - "4: li %0, %5 \n" \ 36 - " j 2b \n" \ 37 - " .previous \n" \ 38 - " .section __ex_table,\"a\" \n" \ 39 - " "__UA_ADDR "\t1b, 4b \n" \ 40 - " "__UA_ADDR "\t2b, 4b \n" \ 41 - " .previous \n" \ 42 - : "=r" (ret), "=r" (oldval) \ 43 - : "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \ 19 + if (cpu_has_llsc && R10000_LLSC_WAR) { \ 20 + __asm__ __volatile__( \ 21 + " .set push \n" \ 22 + " .set noat \n" \ 23 + " .set mips3 \n" \ 24 + "1: ll %1, (%3) # __futex_atomic_op \n" \ 25 + " .set mips0 \n" \ 26 + " " insn " \n" \ 27 + " .set mips3 \n" \ 28 + "2: sc $1, (%3) \n" \ 29 + " beqzl $1, 1b \n" \ 30 + __FUTEX_SMP_SYNC \ 31 + "3: \n" \ 32 + " .set pop \n" \ 33 + " .set mips0 \n" \ 34 + " .section .fixup,\"ax\" \n" \ 35 + "4: li %0, %5 \n" \ 36 + " j 2b \n" \ 37 + " .previous \n" \ 38 + " .section __ex_table,\"a\" \n" \ 39 + " "__UA_ADDR "\t1b, 4b \n" \ 40 + " "__UA_ADDR "\t2b, 4b \n" \ 41 + " .previous \n" \ 42 + : "=r" (ret), "=r" (oldval) \ 43 + : "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \ 44 + } else if (cpu_has_llsc) { \ 45 + __asm__ __volatile__( \ 46 + " .set push \n" \ 47 + " .set noat \n" \ 48 + " .set mips3 \n" \ 49 + "1: ll %1, (%3) # __futex_atomic_op \n" \ 50 + " .set mips0 \n" \ 51 + " " insn " \n" \ 52 + " .set mips3 \n" \ 53 + "2: sc $1, (%3) \n" \ 54 + " beqz $1, 1b \n" \ 55 + __FUTEX_SMP_SYNC \ 56 + "3: \n" \ 57 + " .set pop \n" \ 58 + " .set mips0 \n" \ 59 + " .section .fixup,\"ax\" \n" \ 60 + "4: li %0, %5 \n" \ 61 + " j 2b \n" \ 62 + " .previous \n" \ 63 + " .section __ex_table,\"a\" \n" \ 64 + " "__UA_ADDR "\t1b, 4b \n" \ 65 + " "__UA_ADDR "\t2b, 4b \n" \ 66 + " .previous \n" \ 67 + : "=r" (ret), "=r" (oldval) \ 68 + : "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \ 69 + } else \ 70 + ret = -ENOSYS; \ 44 71 } 45 72 46 73 static inline int ··· 131 102 static inline int 132 103 futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) 133 104 { 134 - return -ENOSYS; 105 + int retval; 106 + 107 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 108 + return -EFAULT; 109 + 110 + if (cpu_has_llsc && R10000_LLSC_WAR) { 111 + __asm__ __volatile__( 112 + "# futex_atomic_cmpxchg_inatomic \n" 113 + " .set push \n" 114 + " .set noat \n" 115 + " .set mips3 \n" 116 + "1: ll %0, %2 \n" 117 + " bne %0, %z3, 3f \n" 118 + " .set mips0 \n" 119 + " move $1, %z4 \n" 120 + " .set mips3 \n" 121 + "2: sc $1, %1 \n" 122 + " beqzl $1, 1b \n" 123 + __FUTEX_SMP_SYNC 124 + "3: \n" 125 + " .set pop \n" 126 + " .section .fixup,\"ax\" \n" 127 + "4: li %0, %5 \n" 128 + " j 3b \n" 129 + " .previous \n" 130 + " .section __ex_table,\"a\" \n" 131 + " "__UA_ADDR "\t1b, 4b \n" 132 + " "__UA_ADDR "\t2b, 4b \n" 133 + " .previous \n" 134 + : "=&r" (retval), "=R" (*uaddr) 135 + : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 136 + : "memory"); 137 + } else if (cpu_has_llsc) { 138 + __asm__ __volatile__( 139 + "# futex_atomic_cmpxchg_inatomic \n" 140 + " .set push \n" 141 + " .set noat \n" 142 + " .set mips3 \n" 143 + "1: ll %0, %2 \n" 144 + " bne %0, %z3, 3f \n" 145 + " .set mips0 \n" 146 + " move $1, %z4 \n" 147 + " .set mips3 \n" 148 + "2: sc $1, %1 \n" 149 + " beqz $1, 1b \n" 150 + __FUTEX_SMP_SYNC 151 + "3: \n" 152 + " .set pop \n" 153 + " .section .fixup,\"ax\" \n" 154 + "4: li %0, %5 \n" 155 + " j 3b \n" 156 + " .previous \n" 157 + " .section __ex_table,\"a\" \n" 158 + " "__UA_ADDR "\t1b, 4b \n" 159 + " "__UA_ADDR "\t2b, 4b \n" 160 + " .previous \n" 161 + : "=&r" (retval), "=R" (*uaddr) 162 + : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 163 + : "memory"); 164 + } else 165 + return -ENOSYS; 166 + 167 + return retval; 135 168 } 136 169 137 170 #endif
+28 -5
include/asm-mips/inst.h
··· 6 6 * for more details. 7 7 * 8 8 * Copyright (C) 1996, 2000 by Ralf Baechle 9 + * Copyright (C) 2006 by Thiemo Seufer 9 10 */ 10 11 #ifndef _ASM_INST_H 11 12 #define _ASM_INST_H ··· 22 21 cop0_op, cop1_op, cop2_op, cop1x_op, 23 22 beql_op, bnel_op, blezl_op, bgtzl_op, 24 23 daddi_op, daddiu_op, ldl_op, ldr_op, 25 - major_1c_op, jalx_op, major_1e_op, major_1f_op, 24 + spec2_op, jalx_op, mdmx_op, spec3_op, 26 25 lb_op, lh_op, lwl_op, lw_op, 27 26 lbu_op, lhu_op, lwr_op, lwu_op, 28 27 sb_op, sh_op, swl_op, sw_op, 29 28 sdl_op, sdr_op, swr_op, cache_op, 30 29 ll_op, lwc1_op, lwc2_op, pref_op, 31 30 lld_op, ldc1_op, ldc2_op, ld_op, 32 - sc_op, swc1_op, swc2_op, rdhwr_op, 31 + sc_op, swc1_op, swc2_op, major_3b_op, 33 32 scd_op, sdc1_op, sdc2_op, sd_op 34 33 }; 35 34 ··· 38 37 */ 39 38 enum spec_op { 40 39 sll_op, movc_op, srl_op, sra_op, 41 - sllv_op, srlv_op, srav_op, spec1_unused_op, /* Opcode 0x07 is unused */ 40 + sllv_op, pmon_op, srlv_op, srav_op, 42 41 jr_op, jalr_op, movz_op, movn_op, 43 42 syscall_op, break_op, spim_op, sync_op, 44 43 mfhi_op, mthi_op, mflo_op, mtlo_op, ··· 53 52 teq_op, spec5_unused_op, tne_op, spec6_unused_op, 54 53 dsll_op, spec7_unused_op, dsrl_op, dsra_op, 55 54 dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op 55 + }; 56 + 57 + /* 58 + * func field of spec2 opcode. 59 + */ 60 + enum spec2_op { 61 + madd_op, maddu_op, mul_op, spec2_3_unused_op, 62 + msub_op, msubu_op, /* more unused ops */ 63 + clz_op = 0x20, clo_op, 64 + dclz_op = 0x24, dclo_op, 65 + sdbpp_op = 0x3f 66 + }; 67 + 68 + /* 69 + * func field of spec3 opcode. 70 + */ 71 + enum spec3_op { 72 + ext_op, dextm_op, dextu_op, dext_op, 73 + ins_op, dinsm_op, dinsu_op, dins_op, 74 + bshfl_op = 0x20, 75 + dbshfl_op = 0x24, 76 + rdhwr_op = 0x3f 56 77 }; 57 78 58 79 /* ··· 174 151 * func field for mad opcodes (MIPS IV). 175 152 */ 176 153 enum mad_func { 177 - madd_op = 0x08, msub_op = 0x0a, 178 - nmadd_op = 0x0c, nmsub_op = 0x0e 154 + madd_fp_op = 0x08, msub_fp_op = 0x0a, 155 + nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e 179 156 }; 180 157 181 158 /*
+1 -1
include/asm-mips/mipsregs.h
··· 291 291 #define ST0_DL (_ULCAST_(1) << 24) 292 292 293 293 /* 294 - * Enable the MIPS DSP ASE 294 + * Enable the MIPS MDMX and DSP ASEs 295 295 */ 296 296 #define ST0_MX 0x01000000 297 297
+2
include/asm-mips/page.h
··· 139 139 140 140 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 141 141 142 + #ifndef CONFIG_SPARSEMEM 142 143 #ifndef CONFIG_NEED_MULTIPLE_NODES 143 144 #define pfn_valid(pfn) ((pfn) < max_mapnr) 145 + #endif 144 146 #endif 145 147 146 148 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+41 -22
include/asm-mips/pgtable-32.h
··· 177 177 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) 178 178 179 179 /* 180 - * Bits 0, 1, 2, 9 and 10 are taken, split up the 27 bits of offset 181 - * into this range: 180 + * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range: 182 181 */ 183 - #define PTE_FILE_MAX_BITS 27 182 + #define PTE_FILE_MAX_BITS 28 184 183 185 - #define pte_to_pgoff(_pte) \ 186 - ((((_pte).pte >> 3) & 0x3f ) + (((_pte).pte >> 11) << 8 )) 184 + #define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \ 185 + (((_pte).pte >> 2 ) & 0x38) | \ 186 + (((_pte).pte >> 10) << 6 )) 187 187 188 - #define pgoff_to_pte(off) \ 189 - ((pte_t) { (((off) & 0x3f) << 3) + (((off) >> 8) << 11) + _PAGE_FILE }) 188 + #define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \ 189 + (((off) & 0x38) << 2 ) | \ 190 + (((off) >> 6 ) << 10) | \ 191 + _PAGE_FILE }) 190 192 191 193 #else 192 194 193 195 /* Swap entries must have VALID and GLOBAL bits cleared. */ 194 - #define __swp_type(x) (((x).val >> 8) & 0x1f) 195 - #define __swp_offset(x) ((x).val >> 13) 196 + #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 197 + #define __swp_type(x) (((x).val >> 2) & 0x1f) 198 + #define __swp_offset(x) ((x).val >> 7) 196 199 #define __swp_entry(type,offset) \ 197 - ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) 200 + ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) 201 + #else 202 + #define __swp_type(x) (((x).val >> 8) & 0x1f) 203 + #define __swp_offset(x) ((x).val >> 13) 204 + #define __swp_entry(type,offset) \ 205 + ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) 206 + #endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */ 198 207 208 + #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 199 209 /* 200 - * Bits 0, 1, 2, 7 and 8 are taken, split up the 27 bits of offset 201 - * into this range: 210 + * Bits 0 and 1 of pte_high are taken, use the rest for the page offset... 202 211 */ 203 - #define PTE_FILE_MAX_BITS 27 212 + #define PTE_FILE_MAX_BITS 30 204 213 205 - #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 206 - /* fixme */ 207 - #define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f)) 208 - #define pgoff_to_pte(off) \ 209 - ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)}) 214 + #define pte_to_pgoff(_pte) ((_pte).pte_high >> 2) 215 + #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 }) 210 216 211 217 #else 212 - #define pte_to_pgoff(_pte) \ 213 - ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 )) 218 + /* 219 + * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range: 220 + */ 221 + #define PTE_FILE_MAX_BITS 28 214 222 215 - #define pgoff_to_pte(off) \ 216 - ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE }) 223 + #define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \ 224 + (((_pte).pte >> 2) & 0x8) | \ 225 + (((_pte).pte >> 8) << 4)) 226 + 227 + #define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \ 228 + (((off) & 0x8) << 2) | \ 229 + (((off) >> 4) << 8) | \ 230 + _PAGE_FILE }) 217 231 #endif 218 232 219 233 #endif 220 234 235 + #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 236 + #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) 237 + #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) 238 + #else 221 239 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 222 240 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 241 + #endif 223 242 224 243 #endif /* _ASM_PGTABLE_32_H */
+5 -8
include/asm-mips/pgtable-64.h
··· 224 224 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 225 225 226 226 /* 227 - * Bits 0, 1, 2, 7 and 8 are taken, split up the 32 bits of offset 228 - * into this range: 227 + * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to 228 + * make things easier, and only use the upper 56 bits for the page offset... 229 229 */ 230 - #define PTE_FILE_MAX_BITS 32 230 + #define PTE_FILE_MAX_BITS 56 231 231 232 - #define pte_to_pgoff(_pte) \ 233 - ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 )) 234 - 235 - #define pgoff_to_pte(off) \ 236 - ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE }) 232 + #define pte_to_pgoff(_pte) ((_pte).pte >> 8) 233 + #define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE }) 237 234 238 235 #endif /* _ASM_PGTABLE_64_H */
+61 -42
include/asm-mips/pgtable.h
··· 70 70 #define ZERO_PAGE(vaddr) \ 71 71 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) 72 72 73 - #define __HAVE_ARCH_MULTIPLE_ZERO_PAGE 73 + #define __HAVE_ARCH_MOVE_PTE 74 + #define move_pte(pte, prot, old_addr, new_addr) \ 75 + ({ \ 76 + pte_t newpte = (pte); \ 77 + if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ 78 + pte_page(pte) == ZERO_PAGE(old_addr)) \ 79 + newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ 80 + newpte; \ 81 + }) 74 82 75 83 extern void paging_init(void); 76 84 ··· 90 82 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 91 83 #define pmd_page_kernel(pmd) pmd_val(pmd) 92 84 93 - #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 94 - #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 95 - 96 85 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 86 + 87 + #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 88 + #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 89 + 97 90 static inline void set_pte(pte_t *ptep, pte_t pte) 98 91 { 99 92 ptep->pte_high = pte.pte_high; ··· 102 93 ptep->pte_low = pte.pte_low; 103 94 //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low); 104 95 105 - if (pte_val(pte) & _PAGE_GLOBAL) { 96 + if (pte.pte_low & _PAGE_GLOBAL) { 106 97 pte_t *buddy = ptep_buddy(ptep); 107 98 /* 108 99 * Make sure the buddy is global too (if it's !none, 109 100 * it better already be global) 110 101 */ 111 - if (pte_none(*buddy)) 112 - buddy->pte_low |= _PAGE_GLOBAL; 102 + if (pte_none(*buddy)) { 103 + buddy->pte_low |= _PAGE_GLOBAL; 104 + buddy->pte_high |= _PAGE_GLOBAL; 105 + } 113 106 } 114 107 } 115 108 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 116 109 117 110 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 118 111 { 112 + pte_t null = __pte(0); 113 + 119 114 /* Preserve global status for the pair */ 120 - if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 121 - set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); 122 - else 123 - set_pte_at(mm, addr, ptep, __pte(0)); 115 + if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 116 + null.pte_low = null.pte_high = _PAGE_GLOBAL; 117 + 118 + set_pte_at(mm, addr, ptep, null); 124 119 } 125 120 #else 121 + 122 + #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 123 + #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 124 + 126 125 /* 127 126 * Certain architectures need to do special things when pte's 128 127 * within a page table are directly modified. Thus, the following ··· 191 174 */ 192 175 static inline int pte_user(pte_t pte) { BUG(); return 0; } 193 176 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 194 - static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_READ; } 195 - static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_WRITE; } 196 - static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_MODIFIED; } 197 - static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } 198 - static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } 177 + static inline int pte_read(pte_t pte) { return pte.pte_low & _PAGE_READ; } 178 + static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 179 + static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 180 + static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 181 + static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; } 182 + 199 183 static inline pte_t pte_wrprotect(pte_t pte) 200 184 { 201 - (pte).pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 202 - (pte).pte_high &= ~_PAGE_SILENT_WRITE; 185 + pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 186 + pte.pte_high &= ~_PAGE_SILENT_WRITE; 203 187 return pte; 204 188 } 205 189 206 190 static inline pte_t pte_rdprotect(pte_t pte) 207 191 { 208 - (pte).pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ); 209 - (pte).pte_high &= ~_PAGE_SILENT_READ; 192 + pte.pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ); 193 + pte.pte_high &= ~_PAGE_SILENT_READ; 210 194 return pte; 211 195 } 212 196 213 197 static inline pte_t pte_mkclean(pte_t pte) 214 198 { 215 - (pte).pte_low &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); 216 - (pte).pte_high &= ~_PAGE_SILENT_WRITE; 199 + pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 200 + pte.pte_high &= ~_PAGE_SILENT_WRITE; 217 201 return pte; 218 202 } 219 203 220 204 static inline pte_t pte_mkold(pte_t pte) 221 205 { 222 - (pte).pte_low &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 223 - (pte).pte_high &= ~_PAGE_SILENT_READ; 206 + pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 207 + pte.pte_high &= ~_PAGE_SILENT_READ; 224 208 return pte; 225 209 } 226 210 227 211 static inline pte_t pte_mkwrite(pte_t pte) 228 212 { 229 - (pte).pte_low |= _PAGE_WRITE; 230 - if ((pte).pte_low & _PAGE_MODIFIED) { 231 - (pte).pte_low |= _PAGE_SILENT_WRITE; 232 - (pte).pte_high |= _PAGE_SILENT_WRITE; 213 + pte.pte_low |= _PAGE_WRITE; 214 + if (pte.pte_low & _PAGE_MODIFIED) { 215 + pte.pte_low |= _PAGE_SILENT_WRITE; 216 + pte.pte_high |= _PAGE_SILENT_WRITE; 233 217 } 234 218 return pte; 235 219 } 236 220 237 221 static inline pte_t pte_mkread(pte_t pte) 238 222 { 239 - (pte).pte_low |= _PAGE_READ; 240 - if ((pte).pte_low & _PAGE_ACCESSED) { 241 - (pte).pte_low |= _PAGE_SILENT_READ; 242 - (pte).pte_high |= _PAGE_SILENT_READ; 223 + pte.pte_low |= _PAGE_READ; 224 + if (pte.pte_low & _PAGE_ACCESSED) { 225 + pte.pte_low |= _PAGE_SILENT_READ; 226 + pte.pte_high |= _PAGE_SILENT_READ; 243 227 } 244 228 return pte; 245 229 } 246 230 247 231 static inline pte_t pte_mkdirty(pte_t pte) 248 232 { 249 - (pte).pte_low |= _PAGE_MODIFIED; 250 - if ((pte).pte_low & _PAGE_WRITE) { 251 - (pte).pte_low |= _PAGE_SILENT_WRITE; 252 - (pte).pte_high |= _PAGE_SILENT_WRITE; 233 + pte.pte_low |= _PAGE_MODIFIED; 234 + if (pte.pte_low & _PAGE_WRITE) { 235 + pte.pte_low |= _PAGE_SILENT_WRITE; 236 + pte.pte_high |= _PAGE_SILENT_WRITE; 253 237 } 254 238 return pte; 255 239 } 256 240 257 241 static inline pte_t pte_mkyoung(pte_t pte) 258 242 { 259 - (pte).pte_low |= _PAGE_ACCESSED; 260 - if ((pte).pte_low & _PAGE_READ) 261 - (pte).pte_low |= _PAGE_SILENT_READ; 262 - (pte).pte_high |= _PAGE_SILENT_READ; 243 + pte.pte_low |= _PAGE_ACCESSED; 244 + if (pte.pte_low & _PAGE_READ) 245 + pte.pte_low |= _PAGE_SILENT_READ; 246 + pte.pte_high |= _PAGE_SILENT_READ; 263 247 return pte; 264 248 } 265 249 #else ··· 353 335 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 354 336 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 355 337 { 356 - pte.pte_low &= _PAGE_CHG_MASK; 357 - pte.pte_low |= pgprot_val(newprot); 338 + pte.pte_low &= _PAGE_CHG_MASK; 339 + pte.pte_high &= ~0x3f; 340 + pte.pte_low |= pgprot_val(newprot); 358 341 pte.pte_high |= pgprot_val(newprot) & 0x3f; 359 342 return pte; 360 343 }
+8 -2
include/asm-mips/sigcontext.h
··· 55 55 struct sigcontext { 56 56 unsigned long sc_regs[32]; 57 57 unsigned long sc_fpregs[32]; 58 - unsigned long sc_hi[4]; 59 - unsigned long sc_lo[4]; 58 + unsigned long sc_mdhi; 59 + unsigned long sc_hi1; 60 + unsigned long sc_hi2; 61 + unsigned long sc_hi3; 62 + unsigned long sc_mdlo; 63 + unsigned long sc_lo1; 64 + unsigned long sc_lo2; 65 + unsigned long sc_lo3; 60 66 unsigned long sc_pc; 61 67 unsigned int sc_fpc_csr; 62 68 unsigned int sc_used_math;
+2 -3
include/asm-mips/smp.h
··· 48 48 #define SMP_CALL_FUNCTION 0x2 49 49 50 50 extern cpumask_t phys_cpu_present_map; 51 - extern cpumask_t cpu_online_map; 52 51 #define cpu_possible_map phys_cpu_present_map 53 52 54 53 extern cpumask_t cpu_callout_map; ··· 85 86 extern void plat_smp_setup(void); 86 87 87 88 /* 88 - * Called after init_IRQ but before __cpu_up. 89 + * Called in smp_prepare_cpus. 89 90 */ 90 - extern void prom_prepare_cpus(unsigned int max_cpus); 91 + extern void plat_prepare_cpus(unsigned int max_cpus); 91 92 92 93 /* 93 94 * Last chance for the board code to finish SMP initialization before
+14
include/asm-mips/sparsemem.h
··· 1 + #ifndef _MIPS_SPARSEMEM_H 2 + #define _MIPS_SPARSEMEM_H 3 + #ifdef CONFIG_SPARSEMEM 4 + 5 + /* 6 + * SECTION_SIZE_BITS 2^N: how big each section will be 7 + * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space 8 + */ 9 + #define SECTION_SIZE_BITS 28 10 + #define MAX_PHYSMEM_BITS 35 11 + 12 + #endif /* CONFIG_SPARSEMEM */ 13 + #endif /* _MIPS_SPARSEMEM_H */ 14 +
+2 -2
include/asm-s390/lowcore.h
··· 98 98 #define __LC_KERNEL_ASCE 0xD58 99 99 #define __LC_USER_ASCE 0xD60 100 100 #define __LC_PANIC_STACK 0xD68 101 - #define __LC_CPUID 0xD90 102 - #define __LC_CPUADDR 0xD98 101 + #define __LC_CPUID 0xD80 102 + #define __LC_CPUADDR 0xD88 103 103 #define __LC_IPLDEV 0xDB8 104 104 #define __LC_JIFFY_TIMER 0xDC0 105 105 #define __LC_CURRENT 0xDD8
+17
include/asm-sparc64/pgtable.h
··· 689 689 #define pte_clear(mm,addr,ptep) \ 690 690 set_pte_at((mm), (addr), (ptep), __pte(0UL)) 691 691 692 + #ifdef DCACHE_ALIASING_POSSIBLE 693 + #define __HAVE_ARCH_MOVE_PTE 694 + #define move_pte(pte, prot, old_addr, new_addr) \ 695 + ({ \ 696 + pte_t newpte = (pte); \ 697 + if (tlb_type != hypervisor && pte_present(pte)) { \ 698 + unsigned long this_pfn = pte_pfn(pte); \ 699 + \ 700 + if (pfn_valid(this_pfn) && \ 701 + (((old_addr) ^ (new_addr)) & (1 << 13))) \ 702 + flush_dcache_page_all(current->mm, \ 703 + pfn_to_page(this_pfn)); \ 704 + } \ 705 + newpte; \ 706 + }) 707 + #endif 708 + 692 709 extern pgd_t swapper_pg_dir[2048]; 693 710 extern pmd_t swapper_low_pmd_dir[2048]; 694 711
+6
include/asm-um/irqflags.h
··· 1 + #ifndef __UM_IRQFLAGS_H 2 + #define __UM_IRQFLAGS_H 3 + 4 + /* Empty for now */ 5 + 6 + #endif
+3 -3
include/asm-um/uaccess.h
··· 41 41 42 42 #define __get_user(x, ptr) \ 43 43 ({ \ 44 - const __typeof__(ptr) __private_ptr = ptr; \ 44 + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \ 45 45 __typeof__(x) __private_val; \ 46 46 int __private_ret = -EFAULT; \ 47 47 (x) = (__typeof__(*(__private_ptr)))0; \ 48 - if (__copy_from_user((void *) &__private_val, (__private_ptr), \ 48 + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\ 49 49 sizeof(*(__private_ptr))) == 0) { \ 50 50 (x) = (__typeof__(*(__private_ptr))) __private_val; \ 51 51 __private_ret = 0; \ ··· 62 62 63 63 #define __put_user(x, ptr) \ 64 64 ({ \ 65 - __typeof__(ptr) __private_ptr = ptr; \ 65 + __typeof__(*(ptr)) __user *__private_ptr = ptr; \ 66 66 __typeof__(*(__private_ptr)) __private_val; \ 67 67 int __private_ret = -EFAULT; \ 68 68 __private_val = (__typeof__(*(__private_ptr))) (x); \
+1 -1
include/asm-x86_64/elf.h
··· 159 159 #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 160 160 161 161 /* 1GB for 64bit, 8MB for 32bit */ 162 - #define STACK_RND_MASK (is_compat_task() ? 0x7ff : 0x3fffff) 162 + #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) 163 163 164 164 #endif 165 165
+5 -8
include/linux/input.h
··· 345 345 #define KEY_SAVE 234 346 346 #define KEY_DOCUMENTS 235 347 347 348 + #define KEY_BATTERY 236 349 + 348 350 #define KEY_UNKNOWN 240 349 351 350 352 #define BTN_MISC 0x100 ··· 579 577 * Switch events 580 578 */ 581 579 582 - #define SW_0 0x00 583 - #define SW_1 0x01 584 - #define SW_2 0x02 585 - #define SW_3 0x03 586 - #define SW_4 0x04 587 - #define SW_5 0x05 588 - #define SW_6 0x06 589 - #define SW_7 0x07 580 + #define SW_LID 0x00 /* set = lid shut */ 581 + #define SW_TABLET_MODE 0x01 /* set = tablet mode */ 582 + #define SW_HEADPHONE_INSERT 0x02 /* set = inserted */ 590 583 #define SW_MAX 0x0f 591 584 592 585 /*
+2 -2
include/linux/m48t86.h
··· 11 11 12 12 struct m48t86_ops 13 13 { 14 - void (*writeb)(unsigned char value, unsigned long addr); 15 - unsigned char (*readb)(unsigned long addr); 14 + void (*writebyte)(unsigned char value, unsigned long addr); 15 + unsigned char (*readbyte)(unsigned long addr); 16 16 };
+1
include/linux/mmzone.h
··· 15 15 #include <linux/seqlock.h> 16 16 #include <linux/nodemask.h> 17 17 #include <asm/atomic.h> 18 + #include <asm/page.h> 18 19 19 20 /* Free memory management - zoned buddy allocator. */ 20 21 #ifndef CONFIG_FORCE_MAX_ZONEORDER
+1
include/linux/pci_ids.h
··· 1240 1240 #define PCI_DEVICE_ID_VIA_8380_0 0x0204 1241 1241 #define PCI_DEVICE_ID_VIA_3238_0 0x0238 1242 1242 #define PCI_DEVICE_ID_VIA_PT880 0x0258 1243 + #define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308 1243 1244 #define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259 1244 1245 #define PCI_DEVICE_ID_VIA_3269_0 0x0269 1245 1246 #define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
-5
include/linux/vt_kern.h
··· 73 73 int vt_waitactive(int vt); 74 74 void change_console(struct vc_data *new_vc); 75 75 void reset_vc(struct vc_data *vc); 76 - #ifdef CONFIG_VT 77 - int is_console_suspend_safe(void); 78 - #else 79 - static inline int is_console_suspend_safe(void) { return 1; } 80 - #endif 81 76 82 77 /* 83 78 * vc_screen.c shares this temporary buffer with the console write code so that
+2 -1
include/net/compat.h
··· 3 3 4 4 #include <linux/config.h> 5 5 6 + struct sock; 7 + 6 8 #if defined(CONFIG_COMPAT) 7 9 8 10 #include <linux/compat.h> ··· 25 23 compat_int_t cmsg_type; 26 24 }; 27 25 28 - struct sock; 29 26 extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *); 30 27 31 28 #else /* defined(CONFIG_COMPAT) */
+6
kernel/hrtimer.c
··· 456 456 457 457 return ret; 458 458 } 459 + EXPORT_SYMBOL_GPL(hrtimer_start); 459 460 460 461 /** 461 462 * hrtimer_try_to_cancel - try to deactivate a timer ··· 485 484 return ret; 486 485 487 486 } 487 + EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); 488 488 489 489 /** 490 490 * hrtimer_cancel - cancel a timer and wait for the handler to finish. ··· 506 504 cpu_relax(); 507 505 } 508 506 } 507 + EXPORT_SYMBOL_GPL(hrtimer_cancel); 509 508 510 509 /** 511 510 * hrtimer_get_remaining - get remaining time for the timer ··· 525 522 526 523 return rem; 527 524 } 525 + EXPORT_SYMBOL_GPL(hrtimer_get_remaining); 528 526 529 527 #ifdef CONFIG_NO_IDLE_HZ 530 528 /** ··· 584 580 timer->base = &bases[clock_id]; 585 581 timer->node.rb_parent = HRTIMER_INACTIVE; 586 582 } 583 + EXPORT_SYMBOL_GPL(hrtimer_init); 587 584 588 585 /** 589 586 * hrtimer_get_res - get the timer resolution for a clock ··· 604 599 605 600 return 0; 606 601 } 602 + EXPORT_SYMBOL_GPL(hrtimer_get_res); 607 603 608 604 /* 609 605 * Expire the per base hrtimer-queue:
+4 -4
mm/memory_hotplug.c
··· 91 91 if (start_pfn < zone->zone_start_pfn) 92 92 zone->zone_start_pfn = start_pfn; 93 93 94 - if (end_pfn > old_zone_end_pfn) 95 - zone->spanned_pages = end_pfn - zone->zone_start_pfn; 94 + zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - 95 + zone->zone_start_pfn; 96 96 97 97 zone_span_writeunlock(zone); 98 98 } ··· 106 106 if (start_pfn < pgdat->node_start_pfn) 107 107 pgdat->node_start_pfn = start_pfn; 108 108 109 - if (end_pfn > old_pgdat_end_pfn) 110 - pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn; 109 + pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - 110 + pgdat->node_start_pfn; 111 111 } 112 112 113 113 int online_pages(unsigned long pfn, unsigned long nr_pages)
+13 -14
mm/slab.c
··· 207 207 #define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 208 208 #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 209 209 210 - /* Max number of objs-per-slab for caches which use off-slab slabs. 211 - * Needed to avoid a possible looping condition in cache_grow(). 212 - */ 213 - static unsigned long offslab_limit; 214 - 215 210 /* 216 211 * struct slab 217 212 * ··· 1351 1356 NULL, NULL); 1352 1357 } 1353 1358 1354 - /* Inc off-slab bufctl limit until the ceiling is hit. */ 1355 - if (!(OFF_SLAB(sizes->cs_cachep))) { 1356 - offslab_limit = sizes->cs_size - sizeof(struct slab); 1357 - offslab_limit /= sizeof(kmem_bufctl_t); 1358 - } 1359 - 1360 1359 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1361 1360 sizes->cs_size, 1362 1361 ARCH_KMALLOC_MINALIGN, ··· 1769 1780 static size_t calculate_slab_order(struct kmem_cache *cachep, 1770 1781 size_t size, size_t align, unsigned long flags) 1771 1782 { 1783 + unsigned long offslab_limit; 1772 1784 size_t left_over = 0; 1773 1785 int gfporder; 1774 1786 ··· 1781 1791 if (!num) 1782 1792 continue; 1783 1793 1784 - /* More than offslab_limit objects will cause problems */ 1785 - if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) 1786 - break; 1794 + if (flags & CFLGS_OFF_SLAB) { 1795 + /* 1796 + * Max number of objs-per-slab for caches which 1797 + * use off-slab slabs. Needed to avoid a possible 1798 + * looping condition in cache_grow(). 1799 + */ 1800 + offslab_limit = size - sizeof(struct slab); 1801 + offslab_limit /= sizeof(kmem_bufctl_t); 1802 + 1803 + if (num > offslab_limit) 1804 + break; 1805 + } 1787 1806 1788 1807 /* Found something acceptable - save it away */ 1789 1808 cachep->num = num;
+7 -12
net/bridge/br_if.c
··· 300 300 rtnl_lock(); 301 301 if (strchr(dev->name, '%')) { 302 302 ret = dev_alloc_name(dev, dev->name); 303 - if (ret < 0) 304 - goto err1; 303 + if (ret < 0) { 304 + free_netdev(dev); 305 + goto out; 306 + } 305 307 } 306 308 307 309 ret = register_netdevice(dev); 308 310 if (ret) 309 - goto err2; 311 + goto out; 310 312 311 313 ret = br_sysfs_addbr(dev); 312 314 if (ret) 313 - goto err3; 314 - rtnl_unlock(); 315 - return 0; 316 - 317 - err3: 318 - unregister_netdev(dev); 319 - err2: 320 - free_netdev(dev); 321 - err1: 315 + unregister_netdevice(dev); 316 + out: 322 317 rtnl_unlock(); 323 318 return ret; 324 319 }
-1
net/ethernet/Makefile
··· 3 3 # 4 4 5 5 obj-y += eth.o 6 - obj-$(CONFIG_SYSCTL) += sysctl_net_ether.o 7 6 obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o 8 7 obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o
-14
net/ethernet/sysctl_net_ether.c
··· 1 - /* -*- linux-c -*- 2 - * sysctl_net_ether.c: sysctl interface to net Ethernet subsystem. 3 - * 4 - * Begun April 1, 1996, Mike Shaver. 5 - * Added /proc/sys/net/ether directory entry (empty =) ). [MS] 6 - */ 7 - 8 - #include <linux/mm.h> 9 - #include <linux/sysctl.h> 10 - #include <linux/if_ether.h> 11 - 12 - ctl_table ether_table[] = { 13 - {0} 14 - };
+2 -2
net/ipv4/netfilter/Kconfig
··· 170 170 Documentation/modules.txt. If unsure, say `N'. 171 171 172 172 config IP_NF_H323 173 - tristate 'H.323 protocol support' 174 - depends on IP_NF_CONNTRACK 173 + tristate 'H.323 protocol support (EXPERIMENTAL)' 174 + depends on IP_NF_CONNTRACK && EXPERIMENTAL 175 175 help 176 176 H.323 is a VoIP signalling protocol from ITU-T. As one of the most 177 177 important VoIP protocols, it is widely used by voice hardware and
+1
net/ipv4/netfilter/ip_conntrack_core.c
··· 1318 1318 .tuple.dst.u.tcp.port; 1319 1319 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] 1320 1320 .tuple.dst.ip; 1321 + memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); 1321 1322 1322 1323 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", 1323 1324 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+2 -2
net/ipv4/netfilter/ip_conntrack_helper_pptp.c
··· 469 469 DEBUGP("%s but no session\n", pptp_msg_name[msg]); 470 470 break; 471 471 } 472 - if (info->sstate != PPTP_CALL_IN_REP 473 - && info->sstate != PPTP_CALL_IN_CONF) { 472 + if (info->cstate != PPTP_CALL_IN_REP 473 + && info->cstate != PPTP_CALL_IN_CONF) { 474 474 DEBUGP("%s but never sent IN_CALL_REPLY\n", 475 475 pptp_msg_name[msg]); 476 476 break;
+1
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
··· 348 348 .tuple.dst.u.tcp.port; 349 349 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] 350 350 .tuple.dst.u3.ip; 351 + memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); 351 352 352 353 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n", 353 354 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+2 -1
net/ipv4/tcp_highspeed.c
··· 135 135 136 136 /* Do additive increase */ 137 137 if (tp->snd_cwnd < tp->snd_cwnd_clamp) { 138 - tp->snd_cwnd_cnt += ca->ai; 138 + /* cwnd = cwnd + a(w) / cwnd */ 139 + tp->snd_cwnd_cnt += ca->ai + 1; 139 140 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 140 141 tp->snd_cwnd_cnt -= tp->snd_cwnd; 141 142 tp->snd_cwnd++;
+5 -7
net/ipv4/tcp_output.c
··· 642 642 * eventually). The difference is that pulled data not copied, but 643 643 * immediately discarded. 644 644 */ 645 - static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len) 645 + static void __pskb_trim_head(struct sk_buff *skb, int len) 646 646 { 647 647 int i, k, eat; 648 648 ··· 667 667 skb->tail = skb->data; 668 668 skb->data_len -= len; 669 669 skb->len = skb->data_len; 670 - return skb->tail; 671 670 } 672 671 673 672 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) ··· 675 676 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 676 677 return -ENOMEM; 677 678 678 - if (len <= skb_headlen(skb)) { 679 + /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 680 + if (unlikely(len < skb_headlen(skb))) 679 681 __skb_pull(skb, len); 680 - } else { 681 - if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL) 682 - return -ENOMEM; 683 - } 682 + else 683 + __pskb_trim_head(skb, len - skb_headlen(skb)); 684 684 685 685 TCP_SKB_CB(skb)->seq += len; 686 686 skb->ip_summed = CHECKSUM_HW;
+2 -1
net/irda/irlap.c
··· 884 884 if (now) { 885 885 /* Send down empty frame to trigger speed change */ 886 886 skb = dev_alloc_skb(0); 887 - irlap_queue_xmit(self, skb); 887 + if (skb) 888 + irlap_queue_xmit(self, skb); 888 889 } 889 890 } 890 891
-8
net/sysctl_net.c
··· 37 37 .mode = 0555, 38 38 .child = core_table, 39 39 }, 40 - #ifdef CONFIG_NET 41 - { 42 - .ctl_name = NET_ETHER, 43 - .procname = "ethernet", 44 - .mode = 0555, 45 - .child = ether_table, 46 - }, 47 - #endif 48 40 #ifdef CONFIG_INET 49 41 { 50 42 .ctl_name = NET_IPV4,
+4 -2
security/selinux/hooks.c
··· 4422 4422 4423 4423 /* Set up any superblocks initialized prior to the policy load. */ 4424 4424 printk(KERN_INFO "SELinux: Setting up existing superblocks.\n"); 4425 + spin_lock(&sb_lock); 4425 4426 spin_lock(&sb_security_lock); 4426 4427 next_sb: 4427 4428 if (!list_empty(&superblock_security_head)) { ··· 4431 4430 struct superblock_security_struct, 4432 4431 list); 4433 4432 struct super_block *sb = sbsec->sb; 4434 - spin_lock(&sb_lock); 4435 4433 sb->s_count++; 4436 - spin_unlock(&sb_lock); 4437 4434 spin_unlock(&sb_security_lock); 4435 + spin_unlock(&sb_lock); 4438 4436 down_read(&sb->s_umount); 4439 4437 if (sb->s_root) 4440 4438 superblock_doinit(sb, NULL); 4441 4439 drop_super(sb); 4440 + spin_lock(&sb_lock); 4442 4441 spin_lock(&sb_security_lock); 4443 4442 list_del_init(&sbsec->list); 4444 4443 goto next_sb; 4445 4444 } 4446 4445 spin_unlock(&sb_security_lock); 4446 + spin_unlock(&sb_lock); 4447 4447 } 4448 4448 4449 4449 /* SELinux requires early initialization in order to label