Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus: (29 commits)
MIPS: Call oops_enter, oops_exit in die
staging/octeon: Software should check the checksum of no tcp/udp packets
MIPS: Octeon: Enable C0_UserLocal probing.
MIPS: No branches in delay slots for huge pages in handle_tlbl
MIPS: Don't clobber CP0_STATUS value for CONFIG_MIPS_MT_SMTC
MIPS: Octeon: Select CONFIG_HOLES_IN_ZONE
MIPS: PM: Use struct syscore_ops instead of sysdevs for PM (v2)
MIPS: Compat: Use 32-bit wrapper for compat_sys_futex.
MIPS: Do not use EXTRA_CFLAGS
MIPS: Alchemy: DB1200: Disable cascade IRQ in handler
SERIAL: Lantiq: Set timeout in uart_port
MIPS: Lantiq: Fix setting the PCI bus speed on AR9
MIPS: Lantiq: Fix external interrupt sources
MIPS: tlbex: Fix build error in R3000 code.
MIPS: Alchemy: Include Au1100 in PM code.
MIPS: Alchemy: Fix typo in MAC0 registration
MIPS: MSP71xx: Fix build error.
MIPS: Handle __put_user() sleeping.
MIPS: Allow forced irq threading
MIPS: i8259: Mark cascade interrupt non-threaded
...

+171 -130
+6
arch/mips/Kconfig
··· 24 24 select GENERIC_IRQ_PROBE 25 25 select GENERIC_IRQ_SHOW 26 26 select HAVE_ARCH_JUMP_LABEL 27 + select IRQ_FORCED_THREADING 27 28 28 29 menu "Machine selection" 29 30 ··· 723 722 select SYS_SUPPORTS_HIGHMEM 724 723 select SYS_SUPPORTS_HOTPLUG_CPU 725 724 select SYS_HAS_CPU_CAVIUM_OCTEON 725 + select HOLES_IN_ZONE 726 726 help 727 727 The Octeon simulator is software performance model of the Cavium 728 728 Octeon Processor. It supports simulating Octeon processors on x86 ··· 746 744 select ZONE_DMA32 747 745 select USB_ARCH_HAS_OHCI 748 746 select USB_ARCH_HAS_EHCI 747 + select HOLES_IN_ZONE 749 748 help 750 749 This option supports all of the Octeon reference boards from Cavium 751 750 Networks. It builds a kernel that dynamically determines the Octeon ··· 974 971 bool 975 972 976 973 config GENERIC_GPIO 974 + bool 975 + 976 + config HOLES_IN_ZONE 977 977 bool 978 978 979 979 #
+1 -1
arch/mips/alchemy/common/platform.c
··· 492 492 memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); 493 493 494 494 ret = platform_device_register(&au1xxx_eth0_device); 495 - if (!ret) 495 + if (ret) 496 496 printk(KERN_INFO "Alchemy: failed to register MAC0\n"); 497 497 498 498
+14 -8
arch/mips/alchemy/common/power.c
··· 158 158 159 159 void au_sleep(void) 160 160 { 161 - int cpuid = alchemy_get_cputype(); 162 - if (cpuid != ALCHEMY_CPU_UNKNOWN) { 163 - save_core_regs(); 164 - if (cpuid <= ALCHEMY_CPU_AU1500) 165 - alchemy_sleep_au1000(); 166 - else if (cpuid <= ALCHEMY_CPU_AU1200) 167 - alchemy_sleep_au1550(); 168 - restore_core_regs(); 161 + save_core_regs(); 162 + 163 + switch (alchemy_get_cputype()) { 164 + case ALCHEMY_CPU_AU1000: 165 + case ALCHEMY_CPU_AU1500: 166 + case ALCHEMY_CPU_AU1100: 167 + alchemy_sleep_au1000(); 168 + break; 169 + case ALCHEMY_CPU_AU1550: 170 + case ALCHEMY_CPU_AU1200: 171 + alchemy_sleep_au1550(); 172 + break; 169 173 } 174 + 175 + restore_core_regs(); 170 176 } 171 177 172 178 #endif /* CONFIG_PM */
+4
arch/mips/alchemy/devboards/bcsr.c
··· 89 89 { 90 90 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); 91 91 92 + disable_irq_nosync(irq); 93 + 92 94 for ( ; bisr; bisr &= bisr - 1) 93 95 generic_handle_irq(bcsr_csc_base + __ffs(bisr)); 96 + 97 + enable_irq(irq); 94 98 } 95 99 96 100 /* NOTE: both the enable and mask bits must be cleared, otherwise the
-7
arch/mips/alchemy/devboards/db1200/setup.c
··· 23 23 unsigned long freq0, clksrc, div, pfc; 24 24 unsigned short whoami; 25 25 26 - /* Set Config[OD] (disable overlapping bus transaction): 27 - * This gets rid of a _lot_ of spurious interrupts (especially 28 - * wrt. IDE); but incurs ~10% performance hit in some 29 - * cpu-bound applications. 30 - */ 31 - set_c0_config(1 << 19); 32 - 33 26 bcsr_init(DB1200_BCSR_PHYS_ADDR, 34 27 DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); 35 28
+2 -1
arch/mips/ar7/irq.c
··· 98 98 99 99 static struct irqaction ar7_cascade_action = { 100 100 .handler = no_action, 101 - .name = "AR7 cascade interrupt" 101 + .name = "AR7 cascade interrupt", 102 + .flags = IRQF_NO_THREAD, 102 103 }; 103 104 104 105 static void __init ar7_irq_init(int base)
+1
arch/mips/bcm63xx/irq.c
··· 222 222 static struct irqaction cpu_ip2_cascade_action = { 223 223 .handler = no_action, 224 224 .name = "cascade_ip2", 225 + .flags = IRQF_NO_THREAD, 225 226 }; 226 227 227 228 void __init arch_init_irq(void)
+1
arch/mips/cobalt/irq.c
··· 48 48 static struct irqaction cascade = { 49 49 .handler = no_action, 50 50 .name = "cascade", 51 + .flags = IRQF_NO_THREAD, 51 52 }; 52 53 53 54 void __init arch_init_irq(void)
+4
arch/mips/dec/setup.c
··· 101 101 static struct irqaction ioirq = { 102 102 .handler = no_action, 103 103 .name = "cascade", 104 + .flags = IRQF_NO_THREAD, 104 105 }; 105 106 static struct irqaction fpuirq = { 106 107 .handler = no_action, 107 108 .name = "fpu", 109 + .flags = IRQF_NO_THREAD, 108 110 }; 109 111 110 112 static struct irqaction busirq = { 111 113 .flags = IRQF_DISABLED, 112 114 .name = "bus error", 115 + .flags = IRQF_NO_THREAD, 113 116 }; 114 117 115 118 static struct irqaction haltirq = { 116 119 .handler = dec_intr_halt, 117 120 .name = "halt", 121 + .flags = IRQF_NO_THREAD, 118 122 }; 119 123 120 124
+1 -1
arch/mips/emma/markeins/irq.c
··· 169 169 170 170 static struct irqaction irq_cascade = { 171 171 .handler = no_action, 172 - .flags = 0, 172 + .flags = IRQF_NO_THREAD, 173 173 .name = "cascade", 174 174 .dev_id = NULL, 175 175 .next = NULL,
-1
arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
··· 54 54 #define cpu_has_mips_r2_exec_hazard 0 55 55 #define cpu_has_dsp 0 56 56 #define cpu_has_mipsmt 0 57 - #define cpu_has_userlocal 0 58 57 #define cpu_has_vint 0 59 58 #define cpu_has_veic 0 60 59 #define cpu_hwrena_impl_bits 0xc0000000
-1
arch/mips/include/asm/mach-powertv/dma-coherence.h
··· 13 13 #define __ASM_MACH_POWERTV_DMA_COHERENCE_H 14 14 15 15 #include <linux/sched.h> 16 - #include <linux/version.h> 17 16 #include <linux/device.h> 18 17 #include <asm/mach-powertv/asic.h> 19 18
+2 -2
arch/mips/include/asm/stackframe.h
··· 195 195 * to cover the pipeline delay. 196 196 */ 197 197 .set mips32 198 - mfc0 v1, CP0_TCSTATUS 198 + mfc0 k0, CP0_TCSTATUS 199 199 .set mips0 200 - LONG_S v1, PT_TCSTATUS(sp) 200 + LONG_S k0, PT_TCSTATUS(sp) 201 201 #endif /* CONFIG_MIPS_MT_SMTC */ 202 202 LONG_S $4, PT_R4(sp) 203 203 LONG_S $5, PT_R5(sp)
+24 -32
arch/mips/jz4740/gpio.c
··· 18 18 #include <linux/init.h> 19 19 20 20 #include <linux/spinlock.h> 21 - #include <linux/sysdev.h> 21 + #include <linux/syscore_ops.h> 22 22 #include <linux/io.h> 23 23 #include <linux/gpio.h> 24 24 #include <linux/delay.h> ··· 86 86 spinlock_t lock; 87 87 88 88 struct gpio_chip gpio_chip; 89 - struct sys_device sysdev; 90 89 }; 91 90 92 91 static struct jz_gpio_chip jz4740_gpio_chips[]; ··· 458 459 JZ4740_GPIO_CHIP(D), 459 460 }; 460 461 461 - static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev) 462 + static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip) 462 463 { 463 - return container_of(dev, struct jz_gpio_chip, sysdev); 464 - } 465 - 466 - static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state) 467 - { 468 - struct jz_gpio_chip *chip = sysdev_to_chip(dev); 469 - 470 464 chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK); 471 465 writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET); 472 466 writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR); 467 + } 468 + 469 + static int jz4740_gpio_suspend(void) 470 + { 471 + int i; 472 + 473 + for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++) 474 + jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]); 473 475 474 476 return 0; 475 477 } 476 478 477 - static int jz4740_gpio_resume(struct sys_device *dev) 479 + static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip) 478 480 { 479 - struct jz_gpio_chip *chip = sysdev_to_chip(dev); 480 481 uint32_t mask = chip->suspend_mask; 481 482 482 483 writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR); 483 484 writel(mask, chip->base + JZ_REG_GPIO_MASK_SET); 484 - 485 - return 0; 486 485 } 487 486 488 - static struct sysdev_class jz4740_gpio_sysdev_class = { 489 - .name = "gpio", 487 + static void jz4740_gpio_resume(void) 488 + { 489 + int i; 490 + 491 + for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--) 492 + jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]); 493 + } 494 + 495 + static struct syscore_ops jz4740_gpio_syscore_ops = { 490 496 .suspend = jz4740_gpio_suspend, 491 497 .resume = jz4740_gpio_resume, 492 498 }; 493 499 494 - static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) 500 + static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) 495 501 { 496 - int ret, irq; 497 - 498 - chip->sysdev.id = id; 499 - chip->sysdev.cls = &jz4740_gpio_sysdev_class; 500 - ret = sysdev_register(&chip->sysdev); 501 - 502 - if (ret) 503 - return ret; 502 + int irq; 504 503 505 504 spin_lock_init(&chip->lock); 506 505 ··· 516 519 irq_set_chip_and_handler(irq, &jz_gpio_irq_chip, 517 520 handle_level_irq); 518 521 } 519 - 520 - return 0; 521 522 } 522 523 523 524 static int __init jz4740_gpio_init(void) 524 525 { 525 526 unsigned int i; 526 - int ret; 527 - 528 - ret = sysdev_class_register(&jz4740_gpio_sysdev_class); 529 - if (ret) 530 - return ret; 531 527 532 528 for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) 533 529 jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); 530 + 531 + register_syscore_ops(&jz4740_gpio_syscore_ops); 534 532 535 533 printk(KERN_INFO "JZ4740 GPIO initialized\n"); 536 534
+20 -19
arch/mips/kernel/ftrace.c
··· 19 19 20 20 #include <asm-generic/sections.h> 21 21 22 + #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 23 + #define MCOUNT_OFFSET_INSNS 5 24 + #else 25 + #define MCOUNT_OFFSET_INSNS 4 26 + #endif 27 + 28 + /* 29 + * Check if the address is in kernel space 30 + * 31 + * Clone core_kernel_text() from kernel/extable.c, but doesn't call 32 + * init_kernel_text() for Ftrace doesn't trace functions in init sections. 33 + */ 34 + static inline int in_kernel_space(unsigned long ip) 35 + { 36 + if (ip >= (unsigned long)_stext && 37 + ip <= (unsigned long)_etext) 38 + return 1; 39 + return 0; 40 + } 41 + 22 42 #ifdef CONFIG_DYNAMIC_FTRACE 23 43 24 44 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ ··· 72 52 buf = (u32 *)&insn_j_ftrace_graph_caller; 73 53 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); 74 54 #endif 75 - } 76 - 77 - /* 78 - * Check if the address is in kernel space 79 - * 80 - * Clone core_kernel_text() from kernel/extable.c, but doesn't call 81 - * init_kernel_text() for Ftrace doesn't trace functions in init sections. 82 - */ 83 - static inline int in_kernel_space(unsigned long ip) 84 - { 85 - if (ip >= (unsigned long)_stext && 86 - ip <= (unsigned long)_etext) 87 - return 1; 88 - return 0; 89 55 } 90 56 91 57 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) ··· 118 112 * 1: offset = 4 instructions 119 113 */ 120 114 121 - #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 122 - #define MCOUNT_OFFSET_INSNS 5 123 - #else 124 - #define MCOUNT_OFFSET_INSNS 4 125 - #endif 126 115 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 127 116 128 117 int ftrace_make_nop(struct module *mod,
+2 -1
arch/mips/kernel/i8259.c
··· 229 229 */ 230 230 if (i8259A_auto_eoi >= 0) { 231 231 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 232 - outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ 232 + outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 233 233 } 234 234 } 235 235 ··· 295 295 static struct irqaction irq2 = { 296 296 .handler = no_action, 297 297 .name = "cascade", 298 + .flags = IRQF_NO_THREAD, 298 299 }; 299 300 300 301 static struct resource pic1_io_resource = {
+7
arch/mips/kernel/linux32.c
··· 349 349 return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), 350 350 dfd, pathname); 351 351 } 352 + 353 + SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val, 354 + struct compat_timespec __user *, utime, u32 __user *, uaddr2, 355 + u32, val3) 356 + { 357 + return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3); 358 + }
+1 -1
arch/mips/kernel/scall64-n32.S
··· 315 315 PTR sys_fremovexattr 316 316 PTR sys_tkill 317 317 PTR sys_ni_syscall 318 - PTR compat_sys_futex 318 + PTR sys_32_futex 319 319 PTR compat_sys_sched_setaffinity /* 6195 */ 320 320 PTR compat_sys_sched_getaffinity 321 321 PTR sys_cacheflush
+1 -1
arch/mips/kernel/scall64-o32.S
··· 441 441 PTR sys_fremovexattr /* 4235 */ 442 442 PTR sys_tkill 443 443 PTR sys_sendfile64 444 - PTR compat_sys_futex 444 + PTR sys_32_futex 445 445 PTR compat_sys_sched_setaffinity 446 446 PTR compat_sys_sched_getaffinity /* 4240 */ 447 447 PTR compat_sys_io_setup
+3
arch/mips/kernel/signal.c
··· 8 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 9 */ 10 10 #include <linux/cache.h> 11 + #include <linux/irqflags.h> 11 12 #include <linux/sched.h> 12 13 #include <linux/mm.h> 13 14 #include <linux/personality.h> ··· 659 658 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 660 659 __u32 thread_info_flags) 661 660 { 661 + local_irq_enable(); 662 + 662 663 /* deal with pending signal delivery */ 663 664 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 664 665 do_signal(regs);
+12 -4
arch/mips/kernel/traps.c
··· 14 14 #include <linux/bug.h> 15 15 #include <linux/compiler.h> 16 16 #include <linux/init.h> 17 + #include <linux/kernel.h> 17 18 #include <linux/mm.h> 18 19 #include <linux/module.h> 19 20 #include <linux/sched.h> ··· 365 364 return (regs->cp0_cause >> 2) & 0x1f; 366 365 } 367 366 368 - static DEFINE_SPINLOCK(die_lock); 367 + static DEFINE_RAW_SPINLOCK(die_lock); 369 368 370 369 void __noreturn die(const char *str, struct pt_regs *regs) 371 370 { 372 371 static int die_counter; 373 372 int sig = SIGSEGV; 374 373 #ifdef CONFIG_MIPS_MT_SMTC 375 - unsigned long dvpret = dvpe(); 374 + unsigned long dvpret; 376 375 #endif /* CONFIG_MIPS_MT_SMTC */ 376 + 377 + oops_enter(); 377 378 378 379 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) 379 380 sig = 0; 380 381 381 382 console_verbose(); 382 - spin_lock_irq(&die_lock); 383 + raw_spin_lock_irq(&die_lock); 384 + #ifdef CONFIG_MIPS_MT_SMTC 385 + dvpret = dvpe(); 386 + #endif /* CONFIG_MIPS_MT_SMTC */ 383 387 bust_spinlocks(1); 384 388 #ifdef CONFIG_MIPS_MT_SMTC 385 389 mips_mt_regdump(dvpret); ··· 393 387 printk("%s[#%d]:\n", str, ++die_counter); 394 388 show_registers(regs); 395 389 add_taint(TAINT_DIE); 396 - spin_unlock_irq(&die_lock); 390 + raw_spin_unlock_irq(&die_lock); 391 + 392 + oops_exit(); 397 393 398 394 if (in_interrupt()) 399 395 panic("Fatal exception in interrupt");
+1 -1
arch/mips/kernel/vpe.c
··· 192 192 } 193 193 spin_unlock(&vpecontrol.tc_list_lock); 194 194 195 - return NULL; 195 + return res; 196 196 } 197 197 198 198 /* allocate a vpe and associate it with this minor (or index) */
+2 -4
arch/mips/lantiq/irq.c
··· 123 123 static unsigned int ltq_startup_eiu_irq(struct irq_data *d) 124 124 { 125 125 int i; 126 - int irq_nr = d->irq - INT_NUM_IRQ0; 127 126 128 127 ltq_enable_irq(d); 129 128 for (i = 0; i < MAX_EIU; i++) { 130 - if (irq_nr == ltq_eiu_irq[i]) { 129 + if (d->irq == ltq_eiu_irq[i]) { 131 130 /* low level - we should really handle set_type */ 132 131 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | 133 132 (0x6 << (i * 4)), LTQ_EIU_EXIN_C); ··· 146 147 static void ltq_shutdown_eiu_irq(struct irq_data *d) 147 148 { 148 149 int i; 149 - int irq_nr = d->irq - INT_NUM_IRQ0; 150 150 151 151 ltq_disable_irq(d); 152 152 for (i = 0; i < MAX_EIU; i++) { 153 - if (irq_nr == ltq_eiu_irq[i]) { 153 + if (d->irq == ltq_eiu_irq[i]) { 154 154 /* disable */ 155 155 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), 156 156 LTQ_EIU_EXIN_INEN);
-1
arch/mips/lantiq/xway/ebu.c
··· 10 10 11 11 #include <linux/kernel.h> 12 12 #include <linux/module.h> 13 - #include <linux/version.h> 14 13 #include <linux/ioport.h> 15 14 16 15 #include <lantiq_soc.h>
-1
arch/mips/lantiq/xway/pmu.c
··· 8 8 9 9 #include <linux/kernel.h> 10 10 #include <linux/module.h> 11 - #include <linux/version.h> 12 11 #include <linux/ioport.h> 13 12 14 13 #include <lantiq_soc.h>
+1
arch/mips/lasat/interrupt.c
··· 105 105 static struct irqaction cascade = { 106 106 .handler = no_action, 107 107 .name = "cascade", 108 + .flags = IRQF_NO_THREAD, 108 109 }; 109 110 110 111 void __init arch_init_irq(void)
+1
arch/mips/loongson/fuloong-2e/irq.c
··· 42 42 static struct irqaction cascade_irqaction = { 43 43 .handler = no_action, 44 44 .name = "cascade", 45 + .flags = IRQF_NO_THREAD, 45 46 }; 46 47 47 48 void __init mach_init_irq(void)
+2 -1
arch/mips/loongson/lemote-2f/irq.c
··· 96 96 struct irqaction ip6_irqaction = { 97 97 .handler = ip6_action, 98 98 .name = "cascade", 99 - .flags = IRQF_SHARED, 99 + .flags = IRQF_SHARED | IRQF_NO_THREAD, 100 100 }; 101 101 102 102 struct irqaction cascade_irqaction = { 103 103 .handler = no_action, 104 104 .name = "cascade", 105 + .flags = IRQF_NO_THREAD, 105 106 }; 106 107 107 108 void __init mach_init_irq(void)
+25 -23
arch/mips/mm/mmap.c
··· 6 6 * Copyright (C) 2011 Wind River Systems, 7 7 * written by Ralf Baechle <ralf@linux-mips.org> 8 8 */ 9 + #include <linux/compiler.h> 9 10 #include <linux/errno.h> 10 11 #include <linux/mm.h> 11 12 #include <linux/mman.h> ··· 16 15 #include <linux/sched.h> 17 16 18 17 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ 19 - 20 18 EXPORT_SYMBOL(shm_align_mask); 21 19 22 20 /* gap between mmap and stack */ 23 21 #define MIN_GAP (128*1024*1024UL) 24 - #define MAX_GAP ((TASK_SIZE)/6*5) 22 + #define MAX_GAP ((TASK_SIZE)/6*5) 25 23 26 24 static int mmap_is_legacy(void) 27 25 { ··· 57 57 return base - off; 58 58 } 59 59 60 - #define COLOUR_ALIGN(addr,pgoff) \ 60 + #define COLOUR_ALIGN(addr, pgoff) \ 61 61 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 62 62 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 63 63 64 64 enum mmap_allocation_direction {UP, DOWN}; 65 65 66 - static unsigned long arch_get_unmapped_area_foo(struct file *filp, 66 + static unsigned long arch_get_unmapped_area_common(struct file *filp, 67 67 unsigned long addr0, unsigned long len, unsigned long pgoff, 68 68 unsigned long flags, enum mmap_allocation_direction dir) 69 69 { ··· 103 103 104 104 vma = find_vma(mm, addr); 105 105 if (TASK_SIZE - len >= addr && 106 - (!vma || addr + len <= vma->vm_start)) 106 + (!vma || addr + len <= vma->vm_start)) 107 107 return addr; 108 108 } 109 109 110 110 if (dir == UP) { 111 111 addr = mm->mmap_base; 112 - if (do_color_align) 113 - addr = COLOUR_ALIGN(addr, pgoff); 114 - else 115 - addr = PAGE_ALIGN(addr); 112 + if (do_color_align) 113 + addr = COLOUR_ALIGN(addr, pgoff); 114 + else 115 + addr = PAGE_ALIGN(addr); 116 116 117 117 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { 118 118 /* At this point: (!vma || addr < vma->vm_end). */ ··· 131 131 mm->free_area_cache = mm->mmap_base; 132 132 } 133 133 134 - /* either no address requested or can't fit in requested address hole */ 134 + /* 135 + * either no address requested, or the mapping can't fit into 136 + * the requested address hole 137 + */ 135 138 addr = mm->free_area_cache; 136 - if (do_color_align) { 137 - unsigned long base = 138 - COLOUR_ALIGN_DOWN(addr - len, pgoff); 139 - 139 + if (do_color_align) { 140 + unsigned long base = 141 + COLOUR_ALIGN_DOWN(addr - len, pgoff); 140 142 addr = base + len; 141 - } 143 + } 142 144 143 145 /* make sure it can fit in the remaining address space */ 144 146 if (likely(addr > len)) { 145 147 vma = find_vma(mm, addr - len); 146 148 if (!vma || addr <= vma->vm_start) { 147 - /* remember the address as a hint for next time */ 148 - return mm->free_area_cache = addr-len; 149 + /* cache the address as a hint for next time */ 150 + return mm->free_area_cache = addr - len; 149 151 } 150 152 } 151 153 152 154 if (unlikely(mm->mmap_base < len)) 153 155 goto bottomup; 154 156 155 - addr = mm->mmap_base-len; 157 + addr = mm->mmap_base - len; 156 158 if (do_color_align) 157 159 addr = COLOUR_ALIGN_DOWN(addr, pgoff); 158 160 ··· 165 163 * return with success: 166 164 */ 167 165 vma = find_vma(mm, addr); 168 - if (likely(!vma || addr+len <= vma->vm_start)) { 169 - /* remember the address as a hint for next time */ 166 + if (likely(!vma || addr + len <= vma->vm_start)) { 167 + /* cache the address as a hint for next time */ 170 168 return mm->free_area_cache = addr; 171 169 } 172 170 ··· 175 173 mm->cached_hole_size = vma->vm_start - addr; 176 174 177 175 /* try just below the current vma->vm_start */ 178 - addr = vma->vm_start-len; 176 + addr = vma->vm_start - len; 179 177 if (do_color_align) 180 178 addr = COLOUR_ALIGN_DOWN(addr, pgoff); 181 179 } while (likely(len < vma->vm_start)); ··· 203 201 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, 204 202 unsigned long len, unsigned long pgoff, unsigned long flags) 205 203 { 206 - return arch_get_unmapped_area_foo(filp, 204 + return arch_get_unmapped_area_common(filp, 207 205 addr0, len, pgoff, flags, UP); 208 206 } 209 207 ··· 215 213 unsigned long addr0, unsigned long len, unsigned long pgoff, 216 214 unsigned long flags) 217 215 { 218 - return arch_get_unmapped_area_foo(filp, 216 + return arch_get_unmapped_area_common(filp, 219 217 addr0, len, pgoff, flags, DOWN); 220 218 } 221 219
+3 -3
arch/mips/mm/tlbex.c
··· 1759 1759 u32 *p = handle_tlbm; 1760 1760 struct uasm_label *l = labels; 1761 1761 struct uasm_reloc *r = relocs; 1762 - struct work_registers wr; 1763 1762 1764 1763 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 1765 1764 memset(labels, 0, sizeof(labels)); 1766 1765 memset(relocs, 0, sizeof(relocs)); 1767 1766 1768 1767 build_r3000_tlbchange_handler_head(&p, K0, K1); 1769 - build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); 1768 + build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); 1770 1769 uasm_i_nop(&p); /* load delay */ 1771 1770 build_make_write(&p, &r, K0, K1); 1772 1771 build_r3000_pte_reload_tlbwi(&p, K0, K1); ··· 1962 1963 uasm_i_andi(&p, wr.r3, wr.r3, 2); 1963 1964 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); 1964 1965 } 1965 - 1966 + if (PM_DEFAULT_MASK == 0) 1967 + uasm_i_nop(&p); 1966 1968 /* 1967 1969 * We clobbered C0_PAGEMASK, restore it. On the other branch 1968 1970 * it is restored in build_huge_tlb_write_entry.
+4 -2
arch/mips/mti-malta/malta-int.c
··· 350 350 351 351 static struct irqaction i8259irq = { 352 352 .handler = no_action, 353 - .name = "XT-PIC cascade" 353 + .name = "XT-PIC cascade", 354 + .flags = IRQF_NO_THREAD, 354 355 }; 355 356 356 357 static struct irqaction corehi_irqaction = { 357 358 .handler = no_action, 358 - .name = "CoreHi" 359 + .name = "CoreHi", 360 + .flags = IRQF_NO_THREAD, 359 361 }; 360 362 361 363 static msc_irqmap_t __initdata msc_irqmap[] = {
+1 -1
arch/mips/netlogic/xlr/Makefile
··· 2 2 obj-$(CONFIG_SMP) += smp.o smpboot.o 3 3 obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o 4 4 5 - EXTRA_CFLAGS += -Werror 5 + ccflags-y += -Werror
+7 -2
arch/mips/pci/pci-lantiq.c
··· 171 171 u32 temp_buffer; 172 172 173 173 /* set clock to 33Mhz */ 174 - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); 175 - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); 174 + if (ltq_is_ar9()) { 175 + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR); 176 + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR); 177 + } else { 178 + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); 179 + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); 180 + } 176 181 177 182 /* external or internal clock ? */ 178 183 if (conf->clock) {
+1 -1
arch/mips/pci/pci-rc32434.c
··· 215 215 rc32434_pcibridge_init(); 216 216 217 217 io_map_base = ioremap(rc32434_res_pci_io1.start, 218 - resource_size(&rcrc32434_res_pci_io1)); 218 + resource_size(&rc32434_res_pci_io1)); 219 219 220 220 if (!io_map_base) 221 221 return -ENOMEM;
+4 -2
arch/mips/pmc-sierra/msp71xx/msp_irq.c
··· 108 108 109 109 static struct irqaction cic_cascade_msp = { 110 110 .handler = no_action, 111 - .name = "MSP CIC cascade" 111 + .name = "MSP CIC cascade", 112 + .flags = IRQF_NO_THREAD, 112 113 }; 113 114 114 115 static struct irqaction per_cascade_msp = { 115 116 .handler = no_action, 116 - .name = "MSP PER cascade" 117 + .name = "MSP PER cascade", 118 + .flags = IRQF_NO_THREAD, 117 119 }; 118 120 119 121 void __init arch_init_irq(void)
+1 -1
arch/mips/pnx8550/common/int.c
··· 167 167 168 168 static struct irqaction gic_action = { 169 169 .handler = no_action, 170 - .flags = IRQF_DISABLED, 170 + .flags = IRQF_DISABLED | IRQF_NO_THREAD, 171 171 .name = "GIC", 172 172 }; 173 173
+5 -5
arch/mips/sgi-ip22/ip22-int.c
··· 155 155 156 156 static struct irqaction local0_cascade = { 157 157 .handler = no_action, 158 - .flags = IRQF_DISABLED, 158 + .flags = IRQF_DISABLED | IRQF_NO_THREAD, 159 159 .name = "local0 cascade", 160 160 }; 161 161 162 162 static struct irqaction local1_cascade = { 163 163 .handler = no_action, 164 - .flags = IRQF_DISABLED, 164 + .flags = IRQF_DISABLED | IRQF_NO_THREAD, 165 165 .name = "local1 cascade", 166 166 }; 167 167 168 168 static struct irqaction buserr = { 169 169 .handler = no_action, 170 - .flags = IRQF_DISABLED, 170 + .flags = IRQF_DISABLED | IRQF_NO_THREAD, 171 171 .name = "Bus Error", 172 172 }; 173 173 174 174 static struct irqaction map0_cascade = { 175 175 .handler = no_action, 176 - .flags = IRQF_DISABLED, 176 + .flags = IRQF_DISABLED | IRQF_NO_THREAD, 177 177 .name = "mapable0 cascade", 178 178 }; 179 179 180 180 #ifdef USE_LIO3_IRQ 181 181 static struct irqaction map1_cascade = { 182 182 .handler = no_action, 183 - .flags = IRQF_DISABLED, 183 + .flags = IRQF_DISABLED | IRQF_NO_THREAD, 184 184 .name = "mapable1 cascade", 185 185 }; 186 186 #define SGI_INTERRUPTS SGINT_END
+1
arch/mips/sni/rm200.c
··· 359 359 static struct irqaction sni_rm200_irq2 = { 360 360 .handler = no_action, 361 361 .name = "cascade", 362 + .flags = IRQF_NO_THREAD, 362 363 }; 363 364 364 365 static struct resource sni_rm200_pic1_resource = {
+1
arch/mips/vr41xx/common/irq.c
··· 34 34 static struct irqaction cascade_irqaction = { 35 35 .handler = no_action, 36 36 .name = "cascade", 37 + .flags = IRQF_NO_THREAD, 37 38 }; 38 39 39 40 int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
+2 -1
drivers/staging/octeon/ethernet-rx.c
··· 411 411 skb->protocol = eth_type_trans(skb, dev); 412 412 skb->dev = dev; 413 413 414 - if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) 414 + if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || 415 + work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) 415 416 skb->ip_summed = CHECKSUM_NONE; 416 417 else 417 418 skb->ip_summed = CHECKSUM_UNNECESSARY;
+3 -1
drivers/tty/serial/lantiq.c
··· 478 478 spin_unlock_irqrestore(&ltq_asc_lock, flags); 479 479 480 480 /* Don't rewrite B0 */ 481 - if (tty_termios_baud_rate(new)) 481 + if (tty_termios_baud_rate(new)) 482 482 tty_termios_encode_baud_rate(new, baud, baud); 483 + 484 + uart_update_timeout(port, cflag, baud); 483 485 } 484 486 485 487 static const char*