Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sh/evt2irq-migration' into sh-latest

Conflicts:
arch/sh/kernel/cpu/sh3/setup-sh770x.c
arch/sh/kernel/cpu/sh3/setup-sh7710.c
arch/sh/kernel/cpu/sh3/setup-sh7720.c
arch/sh/kernel/cpu/sh4/setup-sh7750.c
arch/sh/kernel/cpu/sh4a/setup-sh7343.c
arch/sh/kernel/cpu/sh4a/setup-sh7366.c
arch/sh/kernel/cpu/sh4a/setup-sh7722.c
arch/sh/kernel/cpu/sh4a/setup-sh7723.c
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
arch/sh/kernel/cpu/sh4a/setup-sh7757.c
arch/sh/kernel/cpu/sh4a/setup-sh7763.c
arch/sh/kernel/cpu/sh4a/setup-sh7770.c
arch/sh/kernel/cpu/sh4a/setup-sh7785.c
arch/sh/kernel/cpu/sh4a/setup-sh7786.c

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

+1925 -1293
+10
Documentation/feature-removal-schedule.txt
··· 539 539 Why: setitimer is not returning -EFAULT if user pointer is NULL. This 540 540 violates the spec. 541 541 Who: Sasikantha Babu <sasikanth.v19@gmail.com> 542 + 543 + ---------------------------- 544 + 545 + What: V4L2_CID_HCENTER, V4L2_CID_VCENTER V4L2 controls 546 + When: 3.7 547 + Why: The V4L2_CID_VCENTER, V4L2_CID_HCENTER controls have been deprecated 548 + for about 4 years and they are not used by any mainline driver. 549 + There are newer controls (V4L2_CID_PAN*, V4L2_CID_TILT*) that provide 550 + similar functionality. 551 + Who: Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+3
MAINTAINERS
··· 1968 1968 F: drivers/net/ethernet/ti/cpmac.c 1969 1969 1970 1970 CPU FREQUENCY DRIVERS 1971 + M: Rafael J. Wysocki <rjw@sisk.pl> 1971 1972 L: cpufreq@vger.kernel.org 1973 + L: linux-pm@vger.kernel.org 1972 1974 S: Maintained 1973 1975 F: drivers/cpufreq/ 1974 1976 F: include/linux/cpufreq.h ··· 4036 4034 F: drivers/scsi/53c700* 4037 4035 4038 4036 LED SUBSYSTEM 4037 + M: Bryan Wu <bryan.wu@canonical.com> 4039 4038 M: Richard Purdie <rpurdie@rpsys.net> 4040 4039 S: Maintained 4041 4040 F: drivers/leds/
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 4 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Saber-toothed Squirrel 6 6 7 7 # *DOCUMENTATION*
+3
arch/arm/mach-exynos/Kconfig
··· 232 232 config MACH_UNIVERSAL_C210 233 233 bool "Mobile UNIVERSAL_C210 Board" 234 234 select CPU_EXYNOS4210 235 + select S5P_HRT 236 + select CLKSRC_MMIO 237 + select HAVE_SCHED_CLOCK 235 238 select S5P_GPIO_INT 236 239 select S5P_DEV_FIMC0 237 240 select S5P_DEV_FIMC1
+1 -1
arch/arm/mach-exynos/clock-exynos5.c
··· 678 678 .name = "dma", 679 679 .devname = "dma-pl330.1", 680 680 .enable = exynos5_clk_ip_fsys_ctrl, 681 - .ctrlbit = (1 << 1), 681 + .ctrlbit = (1 << 2), 682 682 }; 683 683 684 684 static struct clk exynos5_clk_mdma1 = {
+3 -1
arch/arm/mach-exynos/mach-universal_c210.c
··· 40 40 #include <plat/pd.h> 41 41 #include <plat/regs-fb-v4.h> 42 42 #include <plat/fimc-core.h> 43 + #include <plat/s5p-time.h> 43 44 #include <plat/camport.h> 44 45 #include <plat/mipi_csis.h> 45 46 ··· 1064 1063 exynos_init_io(NULL, 0); 1065 1064 s3c24xx_init_clocks(24000000); 1066 1065 s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); 1066 + s5p_set_timer_source(S5P_PWM2, S5P_PWM4); 1067 1067 } 1068 1068 1069 1069 static void s5p_tv_setup(void) ··· 1115 1113 .map_io = universal_map_io, 1116 1114 .handle_irq = gic_handle_irq, 1117 1115 .init_machine = universal_machine_init, 1118 - .timer = &exynos4_timer, 1116 + .timer = &s5p_timer, 1119 1117 .reserve = &universal_reserve, 1120 1118 .restart = exynos4_restart, 1121 1119 MACHINE_END
+4 -2
arch/arm/mach-prima2/irq.c
··· 42 42 static __init void sirfsoc_irq_init(void) 43 43 { 44 44 sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32); 45 - sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END - 32); 45 + sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, 46 + SIRFSOC_INTENAL_IRQ_END + 1 - 32); 46 47 47 48 writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0); 48 49 writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1); ··· 69 68 if (!sirfsoc_intc_base) 70 69 panic("unable to map intc cpu registers\n"); 71 70 72 - irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL); 71 + irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0, 72 + &irq_domain_simple_ops, NULL); 73 73 74 74 of_node_put(np); 75 75
+2 -20
arch/arm/mach-shmobile/board-ag5evm.c
··· 365 365 }; 366 366 367 367 /* SDHI0 */ 368 - static irqreturn_t ag5evm_sdhi0_gpio_cd(int irq, void *arg) 369 - { 370 - struct device *dev = arg; 371 - struct sh_mobile_sdhi_info *info = dev->platform_data; 372 - struct tmio_mmc_data *pdata = info->pdata; 373 - 374 - tmio_mmc_cd_wakeup(pdata); 375 - 376 - return IRQ_HANDLED; 377 - } 378 - 379 368 static struct sh_mobile_sdhi_info sdhi0_info = { 380 369 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 381 370 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 382 - .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, 371 + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, 383 372 .tmio_caps = MMC_CAP_SD_HIGHSPEED, 384 373 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, 374 + .cd_gpio = GPIO_PORT251, 385 375 }; 386 376 387 377 static struct resource sdhi0_resources[] = { ··· 547 557 lcd_backlight_reset(); 548 558 549 559 /* enable SDHI0 on CN15 [SD I/F] */ 550 - gpio_request(GPIO_FN_SDHICD0, NULL); 551 560 gpio_request(GPIO_FN_SDHIWP0, NULL); 552 561 gpio_request(GPIO_FN_SDHICMD0, NULL); 553 562 gpio_request(GPIO_FN_SDHICLK0, NULL); ··· 554 565 gpio_request(GPIO_FN_SDHID0_2, NULL); 555 566 gpio_request(GPIO_FN_SDHID0_1, NULL); 556 567 gpio_request(GPIO_FN_SDHID0_0, NULL); 557 - 558 - if (!request_irq(intcs_evt2irq(0x3c0), ag5evm_sdhi0_gpio_cd, 559 - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 560 - "sdhi0 cd", &sdhi0_device.dev)) 561 - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; 562 - else 563 - pr_warn("Unable to setup SDHI0 GPIO IRQ\n"); 564 568 565 569 /* enable SDHI1 on CN4 [WLAN I/F] */ 566 570 gpio_request(GPIO_FN_SDHICLK1, NULL);
+2 -20
arch/arm/mach-shmobile/board-mackerel.c
··· 1011 1011 } 1012 1012 1013 1013 /* SDHI0 */ 1014 - static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg) 1015 - { 1016 - struct device *dev = arg; 1017 - struct sh_mobile_sdhi_info *info = dev->platform_data; 1018 - struct tmio_mmc_data *pdata = info->pdata; 1019 - 1020 - tmio_mmc_cd_wakeup(pdata); 1021 - 1022 - return IRQ_HANDLED; 1023 - } 1024 - 1025 1014 static struct sh_mobile_sdhi_info sdhi0_info = { 1026 1015 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 1027 1016 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 1017 + .tmio_flags = TMIO_MMC_USE_GPIO_CD, 1028 1018 .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, 1019 + .cd_gpio = GPIO_PORT172, 1029 1020 }; 1030 1021 1031 1022 static struct resource sdhi0_resources[] = { ··· 1375 1384 { 1376 1385 u32 srcr4; 1377 1386 struct clk *clk; 1378 - int ret; 1379 1387 1380 1388 /* External clock source */ 1381 1389 clk_set_rate(&sh7372_dv_clki_clk, 27000000); ··· 1471 1481 irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); 1472 1482 1473 1483 /* enable SDHI0 */ 1474 - gpio_request(GPIO_FN_SDHICD0, NULL); 1475 1484 gpio_request(GPIO_FN_SDHIWP0, NULL); 1476 1485 gpio_request(GPIO_FN_SDHICMD0, NULL); 1477 1486 gpio_request(GPIO_FN_SDHICLK0, NULL); ··· 1478 1489 gpio_request(GPIO_FN_SDHID0_2, NULL); 1479 1490 gpio_request(GPIO_FN_SDHID0_1, NULL); 1480 1491 gpio_request(GPIO_FN_SDHID0_0, NULL); 1481 - 1482 - ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd, 1483 - IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev); 1484 - if (!ret) 1485 - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; 1486 - else 1487 - pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret); 1488 1492 1489 1493 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) 1490 1494 /* enable SDHI1 */
+55 -1
arch/arm/mach-shmobile/headsmp.S
··· 16 16 17 17 __CPUINIT 18 18 19 + /* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks! 20 + * 21 + * The secondary kernel init calls v7_flush_dcache_all before it enables 22 + * the L1; however, the L1 comes out of reset in an undefined state, so 23 + * the clean + invalidate performed by v7_flush_dcache_all causes a bunch 24 + * of cache lines with uninitialized data and uninitialized tags to get 25 + * written out to memory, which does really unpleasant things to the main 26 + * processor. We fix this by performing an invalidate, rather than a 27 + * clean + invalidate, before jumping into the kernel. 28 + * 29 + * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs 30 + * to be called for both secondary cores startup and primary core resume 31 + * procedures. Ideally, it should be moved into arch/arm/mm/cache-v7.S. 32 + */ 33 + ENTRY(v7_invalidate_l1) 34 + mov r0, #0 35 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 36 + mcr p15, 2, r0, c0, c0, 0 37 + mrc p15, 1, r0, c0, c0, 0 38 + 39 + ldr r1, =0x7fff 40 + and r2, r1, r0, lsr #13 41 + 42 + ldr r1, =0x3ff 43 + 44 + and r3, r1, r0, lsr #3 @ NumWays - 1 45 + add r2, r2, #1 @ NumSets 46 + 47 + and r0, r0, #0x7 48 + add r0, r0, #4 @ SetShift 49 + 50 + clz r1, r3 @ WayShift 51 + add r4, r3, #1 @ NumWays 52 + 1: sub r2, r2, #1 @ NumSets-- 53 + mov r3, r4 @ Temp = NumWays 54 + 2: subs r3, r3, #1 @ Temp-- 55 + mov r5, r3, lsl r1 56 + mov r6, r2, lsl r0 57 + orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) 58 + mcr p15, 0, r5, c7, c6, 2 59 + bgt 2b 60 + cmp r2, #0 61 + bgt 1b 62 + dsb 63 + isb 64 + mov pc, lr 65 + ENDPROC(v7_invalidate_l1) 66 + 67 + ENTRY(shmobile_invalidate_start) 68 + bl v7_invalidate_l1 69 + b secondary_startup 70 + ENDPROC(shmobile_invalidate_start) 71 + 19 72 /* 20 73 * Reset vector for secondary CPUs. 21 74 * This will be mapped at address 0 by SBAR register. ··· 77 24 .align 12 78 25 ENTRY(shmobile_secondary_vector) 79 26 ldr pc, 1f 80 - 1: .long secondary_startup - PAGE_OFFSET + PLAT_PHYS_OFFSET 27 + 1: .long shmobile_invalidate_start - PAGE_OFFSET + PLAT_PHYS_OFFSET 28 + ENDPROC(shmobile_secondary_vector)
+1 -1
arch/arm/mach-shmobile/include/mach/common.h
··· 4 4 extern void shmobile_earlytimer_init(void); 5 5 extern struct sys_timer shmobile_timer; 6 6 struct twd_local_timer; 7 - void shmobile_twd_init(struct twd_local_timer *twd_local_timer); 8 7 extern void shmobile_setup_console(void); 9 8 extern void shmobile_secondary_vector(void); 10 9 extern int shmobile_platform_cpu_kill(unsigned int cpu); ··· 81 82 extern void r8a7779_secondary_init(unsigned int cpu); 82 83 extern int r8a7779_boot_secondary(unsigned int cpu); 83 84 extern void r8a7779_smp_prepare_cpus(void); 85 + extern void r8a7779_register_twd(void); 84 86 85 87 #endif /* __ARCH_MACH_COMMON_H */
+4
arch/arm/mach-shmobile/setup-r8a7779.c
··· 262 262 ARRAY_SIZE(r8a7779_late_devices)); 263 263 } 264 264 265 + /* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ 266 + void __init __weak r8a7779_register_twd(void) { } 267 + 265 268 static void __init r8a7779_earlytimer_init(void) 266 269 { 267 270 r8a7779_clock_init(); 268 271 shmobile_earlytimer_init(); 272 + r8a7779_register_twd(); 269 273 } 270 274 271 275 void __init r8a7779_add_early_devices(void)
+4
arch/arm/mach-shmobile/setup-sh73a0.c
··· 688 688 ARRAY_SIZE(sh73a0_late_devices)); 689 689 } 690 690 691 + /* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ 692 + void __init __weak sh73a0_register_twd(void) { } 693 + 691 694 static void __init sh73a0_earlytimer_init(void) 692 695 { 693 696 sh73a0_clock_init(); 694 697 shmobile_earlytimer_init(); 698 + sh73a0_register_twd(); 695 699 } 696 700 697 701 void __init sh73a0_add_early_devices(void)
+7 -1
arch/arm/mach-shmobile/smp-r8a7779.c
··· 64 64 static DEFINE_SPINLOCK(scu_lock); 65 65 static unsigned long tmp; 66 66 67 + #ifdef CONFIG_HAVE_ARM_TWD 67 68 static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); 69 + 70 + void __init r8a7779_register_twd(void) 71 + { 72 + twd_local_timer_register(&twd_local_timer); 73 + } 74 + #endif 68 75 69 76 static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) 70 77 { ··· 91 84 { 92 85 void __iomem *scu_base = scu_base_addr(); 93 86 94 - shmobile_twd_init(&twd_local_timer); 95 87 return scu_get_core_count(scu_base); 96 88 } 97 89
+6 -1
arch/arm/mach-shmobile/smp-sh73a0.c
··· 42 42 static DEFINE_SPINLOCK(scu_lock); 43 43 static unsigned long tmp; 44 44 45 + #ifdef CONFIG_HAVE_ARM_TWD 45 46 static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); 47 + void __init sh73a0_register_twd(void) 48 + { 49 + twd_local_timer_register(&twd_local_timer); 50 + } 51 + #endif 46 52 47 53 static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) 48 54 { ··· 68 62 { 69 63 void __iomem *scu_base = scu_base_addr(); 70 64 71 - shmobile_twd_init(&twd_local_timer); 72 65 return scu_get_core_count(scu_base); 73 66 } 74 67
-9
arch/arm/mach-shmobile/timer.c
··· 46 46 { 47 47 } 48 48 49 - void __init shmobile_twd_init(struct twd_local_timer *twd_local_timer) 50 - { 51 - #ifdef CONFIG_HAVE_ARM_TWD 52 - int err = twd_local_timer_register(twd_local_timer); 53 - if (err) 54 - pr_err("twd_local_timer_register failed %d\n", err); 55 - #endif 56 - } 57 - 58 49 struct sys_timer shmobile_timer = { 59 50 .init = shmobile_timer_init, 60 51 };
+2 -2
arch/arm/mach-tegra/flowctrl.c
··· 53 53 54 54 void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value) 55 55 { 56 - return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); 56 + return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); 57 57 } 58 58 59 59 void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) 60 60 { 61 - return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); 61 + return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); 62 62 }
+3 -1
arch/arm/mm/fault.c
··· 247 247 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); 248 248 249 249 check_stack: 250 - if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) 250 + /* Don't allow expansion below FIRST_USER_ADDRESS */ 251 + if (vma->vm_flags & VM_GROWSDOWN && 252 + addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr)) 251 253 goto good_area; 252 254 out: 253 255 return fault;
+2 -1
arch/arm/mm/mmu.c
··· 489 489 */ 490 490 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 491 491 mem_types[i].prot_pte |= PTE_EXT_AF; 492 - mem_types[i].prot_sect |= PMD_SECT_AF; 492 + if (mem_types[i].prot_sect) 493 + mem_types[i].prot_sect |= PMD_SECT_AF; 493 494 } 494 495 kern_pgprot |= PTE_EXT_AF; 495 496 vecs_pgprot |= PTE_EXT_AF;
+8 -16
arch/arm/vfp/vfpmodule.c
··· 11 11 #include <linux/types.h> 12 12 #include <linux/cpu.h> 13 13 #include <linux/cpu_pm.h> 14 + #include <linux/hardirq.h> 14 15 #include <linux/kernel.h> 15 16 #include <linux/notifier.h> 16 17 #include <linux/signal.h> ··· 433 432 434 433 static void vfp_enable(void *unused) 435 434 { 436 - u32 access = get_copro_access(); 435 + u32 access; 436 + 437 + BUG_ON(preemptible()); 438 + access = get_copro_access(); 437 439 438 440 /* 439 441 * Enable full access to VFP (cp10 and cp11) ··· 577 573 * entry. 578 574 */ 579 575 hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); 580 - 581 - /* 582 - * Disable VFP in the hwstate so that we can detect if it gets 583 - * used. 584 - */ 585 - hwstate->fpexc &= ~FPEXC_EN; 586 576 return 0; 587 577 } 588 578 ··· 589 591 unsigned long fpexc; 590 592 int err = 0; 591 593 592 - /* 593 - * If VFP has been used, then disable it to avoid corrupting 594 - * the new thread state. 595 - */ 596 - if (hwstate->fpexc & FPEXC_EN) 597 - vfp_flush_hwstate(thread); 594 + /* Disable VFP to avoid corrupting the new thread state. */ 595 + vfp_flush_hwstate(thread); 598 596 599 597 /* 600 598 * Copy the floating point registers. There can be unused ··· 651 657 unsigned int cpu_arch = cpu_architecture(); 652 658 653 659 if (cpu_arch >= CPU_ARCH_ARMv6) 654 - vfp_enable(NULL); 660 + on_each_cpu(vfp_enable, NULL, 1); 655 661 656 662 /* 657 663 * First check that there is a VFP that we can use. ··· 671 677 printk("no double precision support\n"); 672 678 } else { 673 679 hotcpu_notifier(vfp_hotplug, 0); 674 - 675 - smp_call_function(vfp_enable, NULL, 1); 676 680 677 681 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ 678 682 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
+3 -3
arch/m68k/platform/520x/config.c
··· 22 22 23 23 /***************************************************************************/ 24 24 25 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 25 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 26 26 27 27 static void __init m520x_qspi_init(void) 28 28 { ··· 35 35 writew(par, MCF_GPIO_PAR_UART); 36 36 } 37 37 38 - #endif /* CONFIG_SPI_COLDFIRE_QSPI */ 38 + #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ 39 39 40 40 /***************************************************************************/ 41 41 ··· 79 79 mach_sched_init = hw_timer_init; 80 80 m520x_uarts_init(); 81 81 m520x_fec_init(); 82 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 82 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 83 83 m520x_qspi_init(); 84 84 #endif 85 85 }
+3 -3
arch/m68k/platform/523x/config.c
··· 22 22 23 23 /***************************************************************************/ 24 24 25 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 25 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 26 26 27 27 static void __init m523x_qspi_init(void) 28 28 { ··· 36 36 writew(par, MCFGPIO_PAR_TIMER); 37 37 } 38 38 39 - #endif /* CONFIG_SPI_COLDFIRE_QSPI */ 39 + #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ 40 40 41 41 /***************************************************************************/ 42 42 ··· 58 58 { 59 59 mach_sched_init = hw_timer_init; 60 60 m523x_fec_init(); 61 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 61 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 62 62 m523x_qspi_init(); 63 63 #endif 64 64 }
+3 -3
arch/m68k/platform/5249/config.c
··· 51 51 52 52 /***************************************************************************/ 53 53 54 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 54 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 55 55 56 56 static void __init m5249_qspi_init(void) 57 57 { ··· 61 61 mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI); 62 62 } 63 63 64 - #endif /* CONFIG_SPI_COLDFIRE_QSPI */ 64 + #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ 65 65 66 66 /***************************************************************************/ 67 67 ··· 90 90 #ifdef CONFIG_M5249C3 91 91 m5249_smc91x_init(); 92 92 #endif 93 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 93 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 94 94 m5249_qspi_init(); 95 95 #endif 96 96 }
+3 -3
arch/m68k/platform/527x/config.c
··· 23 23 24 24 /***************************************************************************/ 25 25 26 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 26 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 27 27 28 28 static void __init m527x_qspi_init(void) 29 29 { ··· 42 42 #endif 43 43 } 44 44 45 - #endif /* CONFIG_SPI_COLDFIRE_QSPI */ 45 + #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ 46 46 47 47 /***************************************************************************/ 48 48 ··· 90 90 mach_sched_init = hw_timer_init; 91 91 m527x_uarts_init(); 92 92 m527x_fec_init(); 93 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 93 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 94 94 m527x_qspi_init(); 95 95 #endif 96 96 }
+3 -3
arch/m68k/platform/528x/config.c
··· 24 24 25 25 /***************************************************************************/ 26 26 27 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 27 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 28 28 29 29 static void __init m528x_qspi_init(void) 30 30 { ··· 32 32 __raw_writeb(0x07, MCFGPIO_PQSPAR); 33 33 } 34 34 35 - #endif /* CONFIG_SPI_COLDFIRE_QSPI */ 35 + #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ 36 36 37 37 /***************************************************************************/ 38 38 ··· 98 98 mach_sched_init = hw_timer_init; 99 99 m528x_uarts_init(); 100 100 m528x_fec_init(); 101 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 101 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 102 102 m528x_qspi_init(); 103 103 #endif 104 104 }
+3 -3
arch/m68k/platform/532x/config.c
··· 30 30 31 31 /***************************************************************************/ 32 32 33 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 33 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 34 34 35 35 static void __init m532x_qspi_init(void) 36 36 { ··· 38 38 writew(0x01f0, MCF_GPIO_PAR_QSPI); 39 39 } 40 40 41 - #endif /* CONFIG_SPI_COLDFIRE_QSPI */ 41 + #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ 42 42 43 43 /***************************************************************************/ 44 44 ··· 77 77 mach_sched_init = hw_timer_init; 78 78 m532x_uarts_init(); 79 79 m532x_fec_init(); 80 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 80 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 81 81 m532x_qspi_init(); 82 82 #endif 83 83
+3 -3
arch/m68k/platform/coldfire/device.c
··· 121 121 #endif /* MCFFEC_BASE1 */ 122 122 #endif /* CONFIG_FEC */ 123 123 124 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 124 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 125 125 /* 126 126 * The ColdFire QSPI module is an SPI protocol hardware block used 127 127 * on a number of different ColdFire CPUs. ··· 274 274 .resource = mcf_qspi_resources, 275 275 .dev.platform_data = &mcf_qspi_data, 276 276 }; 277 - #endif /* CONFIG_SPI_COLDFIRE_QSPI */ 277 + #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ 278 278 279 279 static struct platform_device *mcf_devices[] __initdata = { 280 280 &mcf_uart, ··· 284 284 &mcf_fec1, 285 285 #endif 286 286 #endif 287 - #ifdef CONFIG_SPI_COLDFIRE_QSPI 287 + #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) 288 288 &mcf_qspi, 289 289 #endif 290 290 };
+6 -3
arch/mn10300/kernel/smp.c
··· 24 24 #include <linux/sched.h> 25 25 #include <linux/profile.h> 26 26 #include <linux/smp.h> 27 + #include <linux/cpu.h> 27 28 #include <asm/tlbflush.h> 28 29 #include <asm/bitops.h> 29 30 #include <asm/processor.h> ··· 39 38 #include "internal.h" 40 39 41 40 #ifdef CONFIG_HOTPLUG_CPU 42 - #include <linux/cpu.h> 43 41 #include <asm/cacheflush.h> 44 42 45 43 static unsigned long sleep_mode[NR_CPUS]; ··· 874 874 875 875 cpu = smp_processor_id(); 876 876 877 - local_irq_enable(); 877 + notify_cpu_starting(cpu); 878 878 879 + ipi_call_lock(); 879 880 set_cpu_online(cpu, true); 880 - smp_wmb(); 881 + ipi_call_unlock(); 882 + 883 + local_irq_enable(); 881 884 } 882 885 883 886 /**
+2 -1
arch/parisc/include/asm/hardware.h
··· 2 2 #define _PARISC_HARDWARE_H 3 3 4 4 #include <linux/mod_devicetable.h> 5 - #include <asm/pdc.h> 6 5 7 6 #define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID 8 7 #define HVERSION_ANY_ID PA_HVERSION_ANY_ID ··· 94 95 #define HPHW_MC 15 95 96 #define HPHW_FAULTY 31 96 97 98 + struct parisc_device_id; 97 99 98 100 /* hardware.c: */ 99 101 extern const char *parisc_hardware_description(struct parisc_device_id *id); 100 102 extern enum cpu_type parisc_get_cpu_type(unsigned long hversion); 101 103 102 104 struct pci_dev; 105 + struct hardware_path; 103 106 104 107 /* drivers.c: */ 105 108 extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
+6
arch/parisc/include/asm/page.h
··· 160 160 161 161 #include <asm-generic/memory_model.h> 162 162 #include <asm-generic/getorder.h> 163 + #include <asm/pdc.h> 164 + 165 + #define PAGE0 ((struct zeropage *)__PAGE_OFFSET) 166 + 167 + /* DEFINITION OF THE ZERO-PAGE (PAG0) */ 168 + /* based on work by Jason Eckhardt (jason@equator.com) */ 163 169 164 170 #endif /* _PARISC_PAGE_H */
-7
arch/parisc/include/asm/pdc.h
··· 343 343 344 344 #ifdef __KERNEL__ 345 345 346 - #include <asm/page.h> /* for __PAGE_OFFSET */ 347 - 348 346 extern int pdc_type; 349 347 350 348 /* Values for pdc_type */ ··· 674 676 } 675 677 676 678 #endif /* __KERNEL__ */ 677 - 678 - #define PAGE0 ((struct zeropage *)__PAGE_OFFSET) 679 - 680 - /* DEFINITION OF THE ZERO-PAGE (PAG0) */ 681 - /* based on work by Jason Eckhardt (jason@equator.com) */ 682 679 683 680 /* flags of the device_path */ 684 681 #define PF_AUTOBOOT 0x80
+2
arch/parisc/include/asm/pgtable.h
··· 44 44 45 45 #endif /* !__ASSEMBLY__ */ 46 46 47 + #include <asm/page.h> 48 + 47 49 #define pte_ERROR(e) \ 48 50 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 49 51 #define pmd_ERROR(e) \
+2
arch/parisc/include/asm/spinlock.h
··· 1 1 #ifndef __ASM_SPINLOCK_H 2 2 #define __ASM_SPINLOCK_H 3 3 4 + #include <asm/barrier.h> 5 + #include <asm/ldcw.h> 4 6 #include <asm/processor.h> 5 7 #include <asm/spinlock_types.h> 6 8
+1
arch/parisc/kernel/pdc_cons.c
··· 50 50 #include <linux/init.h> 51 51 #include <linux/major.h> 52 52 #include <linux/tty.h> 53 + #include <asm/page.h> /* for PAGE0 */ 53 54 #include <asm/pdc.h> /* for iodc_call() proto and friends */ 54 55 55 56 static DEFINE_SPINLOCK(pdc_console_lock);
+7 -1
arch/parisc/kernel/smp.c
··· 31 31 #include <linux/delay.h> 32 32 #include <linux/bitops.h> 33 33 #include <linux/ftrace.h> 34 + #include <linux/cpu.h> 34 35 35 36 #include <linux/atomic.h> 36 37 #include <asm/current.h> ··· 296 295 297 296 printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); 298 297 machine_halt(); 299 - } 298 + } 299 + 300 + notify_cpu_starting(cpunum); 301 + 302 + ipi_call_lock(); 300 303 set_cpu_online(cpunum, true); 304 + ipi_call_unlock(); 301 305 302 306 /* Initialise the idle task for this CPU */ 303 307 atomic_inc(&init_mm.mm_count);
+1
arch/parisc/kernel/time.c
··· 29 29 #include <asm/uaccess.h> 30 30 #include <asm/io.h> 31 31 #include <asm/irq.h> 32 + #include <asm/page.h> 32 33 #include <asm/param.h> 33 34 #include <asm/pdc.h> 34 35 #include <asm/led.h>
+4 -3
arch/powerpc/include/asm/kvm_book3s.h
··· 81 81 u64 sdr1; 82 82 u64 hior; 83 83 u64 msr_mask; 84 - u64 vsid_next; 85 84 #ifdef CONFIG_PPC_BOOK3S_32 86 85 u32 vsid_pool[VSID_POOL_SIZE]; 86 + u32 vsid_next; 87 87 #else 88 - u64 vsid_first; 89 - u64 vsid_max; 88 + u64 proto_vsid_first; 89 + u64 proto_vsid_max; 90 + u64 proto_vsid_next; 90 91 #endif 91 92 int context_id[SID_CONTEXTS]; 92 93
+31 -13
arch/powerpc/kernel/entry_64.S
··· 588 588 fast_exc_return_irq: 589 589 restore: 590 590 /* 591 - * This is the main kernel exit path, we first check if we 592 - * have to change our interrupt state. 591 + * This is the main kernel exit path. First we check if we 592 + * are about to re-enable interrupts 593 593 */ 594 594 ld r5,SOFTE(r1) 595 595 lbz r6,PACASOFTIRQEN(r13) 596 - cmpwi cr1,r5,0 597 - cmpw cr0,r5,r6 598 - beq cr0,4f 596 + cmpwi cr0,r5,0 597 + beq restore_irq_off 599 598 600 - /* We do, handle disable first, which is easy */ 601 - bne cr1,3f; 602 - li r0,0 603 - stb r0,PACASOFTIRQEN(r13); 604 - TRACE_DISABLE_INTS 605 - b 4f 599 + /* We are enabling, were we already enabled ? Yes, just return */ 600 + cmpwi cr0,r6,1 601 + beq cr0,do_restore 606 602 607 - 3: /* 603 + /* 608 604 * We are about to soft-enable interrupts (we are hard disabled 609 605 * at this point). We check if there's anything that needs to 610 606 * be replayed first. ··· 622 626 /* 623 627 * Final return path. BookE is handled in a different file 624 628 */ 625 - 4: 629 + do_restore: 626 630 #ifdef CONFIG_PPC_BOOK3E 627 631 b .exception_return_book3e 628 632 #else ··· 696 700 #endif /* CONFIG_PPC_BOOK3E */ 697 701 698 702 /* 703 + * We are returning to a context with interrupts soft disabled. 704 + * 705 + * However, we may also about to hard enable, so we need to 706 + * make sure that in this case, we also clear PACA_IRQ_HARD_DIS 707 + * or that bit can get out of sync and bad things will happen 708 + */ 709 + restore_irq_off: 710 + ld r3,_MSR(r1) 711 + lbz r7,PACAIRQHAPPENED(r13) 712 + andi. r0,r3,MSR_EE 713 + beq 1f 714 + rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS 715 + stb r7,PACAIRQHAPPENED(r13) 716 + 1: li r0,0 717 + stb r0,PACASOFTIRQEN(r13); 718 + TRACE_DISABLE_INTS 719 + b do_restore 720 + 721 + /* 699 722 * Something did happen, check if a re-emit is needed 700 723 * (this also clears paca->irq_happened) 701 724 */ ··· 763 748 #endif /* CONFIG_PPC_BOOK3E */ 764 749 1: b .ret_from_except /* What else to do here ? */ 765 750 751 + 752 + 753 + 3: 766 754 do_work: 767 755 #ifdef CONFIG_PREEMPT 768 756 andi. r0,r3,MSR_PR /* Returning to user mode? */
+13
arch/powerpc/kernel/irq.c
··· 229 229 */ 230 230 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) 231 231 __hard_irq_disable(); 232 + #ifdef CONFIG_TRACE_IRQFLAG 233 + else { 234 + /* 235 + * We should already be hard disabled here. We had bugs 236 + * where that wasn't the case so let's dbl check it and 237 + * warn if we are wrong. Only do that when IRQ tracing 238 + * is enabled as mfmsr() can be costly. 239 + */ 240 + if (WARN_ON(mfmsr() & MSR_EE)) 241 + __hard_irq_disable(); 242 + } 243 + #endif /* CONFIG_TRACE_IRQFLAG */ 244 + 232 245 set_soft_enabled(0); 233 246 234 247 /*
+7 -6
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 194 194 backwards_map = !backwards_map; 195 195 196 196 /* Uh-oh ... out of mappings. Let's flush! */ 197 - if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { 198 - vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; 197 + if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) { 198 + vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first; 199 199 memset(vcpu_book3s->sid_map, 0, 200 200 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); 201 201 kvmppc_mmu_pte_flush(vcpu, 0, 0); 202 202 kvmppc_mmu_flush_segments(vcpu); 203 203 } 204 - map->host_vsid = vcpu_book3s->vsid_next++; 204 + map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M); 205 205 206 206 map->guest_vsid = gvsid; 207 207 map->valid = true; ··· 319 319 return -1; 320 320 vcpu3s->context_id[0] = err; 321 321 322 - vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; 323 - vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 324 - vcpu3s->vsid_next = vcpu3s->vsid_first; 322 + vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) 323 + << USER_ESID_BITS) - 1; 324 + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 325 + vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; 325 326 326 327 kvmppc_mmu_hpte_init(vcpu); 327 328
+1
arch/powerpc/kvm/book3s_hv_rm_mmu.c
··· 463 463 /* insert R and C bits from PTE */ 464 464 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); 465 465 args[j] |= rcbits << (56 - 5); 466 + hp[0] = 0; 466 467 continue; 467 468 } 468 469
+28 -14
arch/powerpc/kvm/book3s_segment.S
··· 197 197 /* Save guest PC and MSR */ 198 198 #ifdef CONFIG_PPC64 199 199 BEGIN_FTR_SECTION 200 - andi. r0,r12,0x2 200 + andi. r0, r12, 0x2 201 + cmpwi cr1, r0, 0 201 202 beq 1f 202 203 mfspr r3,SPRN_HSRR0 203 204 mfspr r4,SPRN_HSRR1 ··· 251 250 beq ld_last_prev_inst 252 251 cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT 253 252 beq- ld_last_inst 253 + #ifdef CONFIG_PPC64 254 + BEGIN_FTR_SECTION 255 + cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST 256 + beq- ld_last_inst 257 + END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 258 + #endif 254 259 255 260 b no_ld_last_inst 256 261 ··· 323 316 * Having set up SRR0/1 with the address where we want 324 317 * to continue with relocation on (potentially in module 325 318 * space), we either just go straight there with rfi[d], 326 - * or we jump to an interrupt handler with bctr if there 327 - * is an interrupt to be handled first. In the latter 328 - * case, the rfi[d] at the end of the interrupt handler 329 - * will get us back to where we want to continue. 319 + * or we jump to an interrupt handler if there is an 320 + * interrupt to be handled first. In the latter case, 321 + * the rfi[d] at the end of the interrupt handler will 322 + * get us back to where we want to continue. 330 323 */ 331 - 332 - cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 333 - beq 1f 334 - cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER 335 - beq 1f 336 - cmpwi r12, BOOK3S_INTERRUPT_PERFMON 337 - 1: mtctr r12 338 324 339 325 /* Register usage at this point: 340 326 * 341 327 * R1 = host R1 342 328 * R2 = host R2 329 + * R10 = raw exit handler id 343 330 * R12 = exit handler id 344 331 * R13 = shadow vcpu (32-bit) or PACA (64-bit) 345 332 * SVCPU.* = guest * ··· 343 342 PPC_LL r6, HSTATE_HOST_MSR(r13) 344 343 PPC_LL r8, HSTATE_VMHANDLER(r13) 345 344 346 - /* Restore host msr -> SRR1 */ 345 + #ifdef CONFIG_PPC64 346 + BEGIN_FTR_SECTION 347 + beq cr1, 1f 348 + mtspr SPRN_HSRR1, r6 349 + mtspr SPRN_HSRR0, r8 350 + END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 351 + #endif 352 + 1: /* Restore host msr -> SRR1 */ 347 353 mtsrr1 r6 348 354 /* Load highmem handler address */ 349 355 mtsrr0 r8 350 356 351 357 /* RFI into the highmem handler, or jump to interrupt handler */ 352 - beqctr 358 + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 359 + beqa BOOK3S_INTERRUPT_EXTERNAL 360 + cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER 361 + beqa BOOK3S_INTERRUPT_DECREMENTER 362 + cmpwi r12, BOOK3S_INTERRUPT_PERFMON 363 + beqa BOOK3S_INTERRUPT_PERFMON 364 + 353 365 RFI 354 366 kvmppc_handler_trampoline_exit_end:
+2 -1
arch/sh/boards/board-edosk7705.c
··· 13 13 #include <linux/platform_device.h> 14 14 #include <linux/interrupt.h> 15 15 #include <linux/smc91x.h> 16 + #include <linux/sh_intc.h> 16 17 #include <asm/machvec.h> 17 18 #include <asm/sizes.h> 18 19 ··· 21 20 #define SMC_IO_OFFSET 0x300 22 21 #define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET) 23 22 24 - #define ETHERNET_IRQ 0x09 23 + #define ETHERNET_IRQ evt2irq(0x320) 25 24 26 25 static void __init sh_edosk7705_init_irq(void) 27 26 {
+7 -8
arch/sh/boards/board-edosk7760.c
··· 23 23 #include <linux/platform_device.h> 24 24 #include <linux/smc91x.h> 25 25 #include <linux/interrupt.h> 26 + #include <linux/sh_intc.h> 26 27 #include <linux/i2c.h> 27 28 #include <linux/mtd/physmap.h> 28 29 #include <asm/machvec.h> ··· 40 39 #define SMC_IOBASE 0xA2000000 41 40 #define SMC_IO_OFFSET 0x300 42 41 #define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET) 43 - 44 - #define ETHERNET_IRQ 5 45 42 46 43 /* NOR flash */ 47 44 static struct mtd_partition edosk7760_nor_flash_partitions[] = { ··· 98 99 .end = SH7760_I2C1_MMIOEND, 99 100 .flags = IORESOURCE_MEM, 100 101 },{ 101 - .start = SH7760_I2C1_IRQ, 102 - .end = SH7760_I2C1_IRQ, 102 + .start = evt2irq(0x9e0), 103 + .end = evt2irq(0x9e0), 103 104 .flags = IORESOURCE_IRQ, 104 105 }, 105 106 }; ··· 121 122 .end = SH7760_I2C0_MMIOEND, 122 123 .flags = IORESOURCE_MEM, 123 124 }, { 124 - .start = SH7760_I2C0_IRQ, 125 - .end = SH7760_I2C0_IRQ, 125 + .start = evt2irq(0x9c0), 126 + .end = evt2irq(0x9c0), 126 127 .flags = IORESOURCE_IRQ, 127 128 }, 128 129 }; ··· 149 150 .flags = IORESOURCE_MEM, 150 151 }, 151 152 [1] = { 152 - .start = ETHERNET_IRQ, 153 - .end = ETHERNET_IRQ, 153 + .start = evt2irq(0x2a0), 154 + .end = evt2irq(0x2a0), 154 155 .flags = IORESOURCE_IRQ , 155 156 } 156 157 };
+2 -1
arch/sh/boards/board-espt.c
··· 14 14 #include <linux/mtd/physmap.h> 15 15 #include <linux/io.h> 16 16 #include <linux/sh_eth.h> 17 + #include <linux/sh_intc.h> 17 18 #include <asm/machvec.h> 18 19 #include <asm/sizes.h> 19 20 ··· 72 71 .flags = IORESOURCE_MEM, 73 72 }, { 74 73 75 - .start = 57, /* irq number */ 74 + .start = evt2irq(0x920), /* irq number */ 76 75 .flags = IORESOURCE_IRQ, 77 76 }, 78 77 };
+13 -12
arch/sh/boards/board-magicpanelr2.c
··· 19 19 #include <linux/mtd/partitions.h> 20 20 #include <linux/mtd/physmap.h> 21 21 #include <linux/mtd/map.h> 22 + #include <linux/sh_intc.h> 22 23 #include <mach/magicpanelr2.h> 23 24 #include <asm/heartbeat.h> 24 25 #include <cpu/sh7720.h> ··· 246 245 .flags = IORESOURCE_MEM, 247 246 }, 248 247 [1] = { 249 - .start = 35, 250 - .end = 35, 248 + .start = evt2irq(0x660), 249 + .end = evt2irq(0x660), 251 250 .flags = IORESOURCE_IRQ, 252 251 }, 253 252 }; ··· 359 358 { 360 359 plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-5 */ 361 360 362 - irq_set_irq_type(32, IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */ 363 - irq_set_irq_type(33, IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */ 364 - irq_set_irq_type(34, IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */ 365 - irq_set_irq_type(35, IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */ 366 - irq_set_irq_type(36, IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */ 367 - irq_set_irq_type(37, IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */ 361 + irq_set_irq_type(evt2irq(0x600), IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */ 362 + irq_set_irq_type(evt2irq(0x620), IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */ 363 + irq_set_irq_type(evt2irq(0x640), IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */ 364 + irq_set_irq_type(evt2irq(0x660), IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */ 365 + irq_set_irq_type(evt2irq(0x680), IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */ 366 + irq_set_irq_type(evt2irq(0x6a0), IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */ 368 367 369 - intc_set_priority(32, 13); /* IRQ0 CAN1 */ 370 - intc_set_priority(33, 13); /* IRQ0 CAN2 */ 371 - intc_set_priority(34, 13); /* IRQ0 CAN3 */ 372 - intc_set_priority(35, 6); /* IRQ3 SMSC9115 */ 368 + intc_set_priority(evt2irq(0x600), 13); /* IRQ0 CAN1 */ 369 + intc_set_priority(evt2irq(0x620), 13); /* IRQ0 CAN2 */ 370 + intc_set_priority(evt2irq(0x640), 13); /* IRQ0 CAN3 */ 371 + intc_set_priority(evt2irq(0x660), 6); /* IRQ3 SMSC9115 */ 373 372 } 374 373 375 374 /*
+14 -13
arch/sh/boards/board-sh7757lcr.c
··· 19 19 #include <linux/mmc/sh_mmcif.h> 20 20 #include <linux/mmc/sh_mobile_sdhi.h> 21 21 #include <linux/sh_eth.h> 22 + #include <linux/sh_intc.h> 22 23 #include <linux/usb/renesas_usbhs.h> 23 24 #include <cpu/sh7757.h> 24 25 #include <asm/heartbeat.h> ··· 66 65 .end = 0xfef001ff, 67 66 .flags = IORESOURCE_MEM, 68 67 }, { 69 - .start = 84, 70 - .end = 84, 68 + .start = evt2irq(0xc80), 69 + .end = evt2irq(0xc80), 71 70 .flags = IORESOURCE_IRQ, 72 71 }, 73 72 }; ··· 95 94 .end = 0xfef009ff, 96 95 .flags = IORESOURCE_MEM, 97 96 }, { 98 - .start = 84, 99 - .end = 84, 97 + .start = evt2irq(0xc80), 98 + .end = evt2irq(0xc80), 100 99 .flags = IORESOURCE_IRQ, 101 100 }, 102 101 }; ··· 140 139 .end = 0xfee01fff, 141 140 .flags = IORESOURCE_MEM, 142 141 }, { 143 - .start = 315, 144 - .end = 315, 142 + .start = evt2irq(0x2960), 143 + .end = evt2irq(0x2960), 145 144 .flags = IORESOURCE_IRQ, 146 145 }, 147 146 }; ··· 175 174 .end = 0xfee01fff, 176 175 .flags = IORESOURCE_MEM, 177 176 }, { 178 - .start = 316, 179 - .end = 316, 177 + .start = evt2irq(0x2980), 178 + .end = evt2irq(0x2980), 180 179 .flags = IORESOURCE_IRQ, 181 180 }, 182 181 }; ··· 207 206 .flags = IORESOURCE_MEM, 208 207 }, 209 208 [1] = { 210 - .start = 211, 209 + .start = evt2irq(0x1c60), 211 210 .flags = IORESOURCE_IRQ, 212 211 }, 213 212 [2] = { 214 - .start = 212, 213 + .start = evt2irq(0x1c80), 215 214 .flags = IORESOURCE_IRQ, 216 215 }, 217 216 }; ··· 249 248 .flags = IORESOURCE_MEM, 250 249 }, 251 250 [1] = { 252 - .start = 20, 251 + .start = evt2irq(0x480), 253 252 .flags = IORESOURCE_IRQ, 254 253 }, 255 254 }; ··· 285 284 .flags = IORESOURCE_MEM, 286 285 }, 287 286 [1] = { 288 - .start = 50, 289 - .end = 50, 287 + .start = evt2irq(0x840), 288 + .end = evt2irq(0x840), 290 289 .flags = IORESOURCE_IRQ, 291 290 }, 292 291 };
+8 -7
arch/sh/boards/board-sh7785lcr.c
··· 20 20 #include <linux/i2c-pca-platform.h> 21 21 #include <linux/i2c-algo-pca.h> 22 22 #include <linux/usb/r8a66597.h> 23 + #include <linux/sh_intc.h> 23 24 #include <linux/irq.h> 24 25 #include <linux/io.h> 25 26 #include <linux/clk.h> ··· 106 105 .flags = IORESOURCE_MEM, 107 106 }, 108 107 [1] = { 109 - .start = 2, 110 - .end = 2, 108 + .start = evt2irq(0x240), 109 + .end = evt2irq(0x240), 111 110 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, 112 111 }, 113 112 }; ··· 136 135 .flags = IORESOURCE_MEM, 137 136 }, 138 137 [2] = { 139 - .start = 10, 138 + .start = evt2irq(0x340), 140 139 .flags = IORESOURCE_IRQ, 141 140 }, 142 141 }; ··· 224 223 .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, 225 224 }, 226 225 [1] = { 227 - .start = 12, 228 - .end = 12, 226 + .start = evt2irq(0x380), 227 + .end = evt2irq(0x380), 229 228 .flags = IORESOURCE_IRQ, 230 229 }, 231 230 }; ··· 237 236 .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, 238 237 }, 239 238 [1] = { 240 - .start = 12, 241 - .end = 12, 239 + .start = evt2irq(0x380), 240 + .end = evt2irq(0x380), 242 241 .flags = IORESOURCE_IRQ, 243 242 }, 244 243 };
+2 -1
arch/sh/boards/board-urquell.c
··· 20 20 #include <linux/gpio.h> 21 21 #include <linux/irq.h> 22 22 #include <linux/clk.h> 23 + #include <linux/sh_intc.h> 23 24 #include <mach/urquell.h> 24 25 #include <cpu/sh7786.h> 25 26 #include <asm/heartbeat.h> ··· 79 78 .flags = IORESOURCE_MEM, 80 79 }, 81 80 [1] = { 82 - .start = 11, 81 + .start = evt2irq(0x360), 83 82 .flags = IORESOURCE_IRQ, 84 83 }, 85 84 };
+8 -7
arch/sh/boards/mach-ap325rxa/setup.c
··· 23 23 #include <linux/smsc911x.h> 24 24 #include <linux/gpio.h> 25 25 #include <linux/videodev2.h> 26 + #include <linux/sh_intc.h> 26 27 #include <media/ov772x.h> 27 28 #include <media/soc_camera.h> 28 29 #include <media/soc_camera_platform.h> ··· 48 47 .flags = IORESOURCE_MEM, 49 48 }, 50 49 [1] = { 51 - .start = 35, 52 - .end = 35, 50 + .start = evt2irq(0x660), 51 + .end = evt2irq(0x660), 53 52 .flags = IORESOURCE_IRQ, 54 53 } 55 54 }; ··· 167 166 __raw_writew(0, FPGA_BKLREG); 168 167 gpio_set_value(GPIO_PTS3, 1); 169 168 } 170 - 169 + 171 170 return 0; 172 171 } 173 172 ··· 237 236 .flags = IORESOURCE_MEM, 238 237 }, 239 238 [1] = { 240 - .start = 28, 239 + .start = evt2irq(0x580), 241 240 .flags = IORESOURCE_IRQ, 242 241 }, 243 242 }; ··· 405 404 .flags = IORESOURCE_MEM, 406 405 }, 407 406 [1] = { 408 - .start = 52, 407 + .start = evt2irq(0x880), 409 408 .flags = IORESOURCE_IRQ, 410 409 }, 411 410 [2] = { ··· 431 430 .flags = IORESOURCE_MEM, 432 431 }, 433 432 [1] = { 434 - .start = 100, 433 + .start = evt2irq(0xe80), 435 434 .flags = IORESOURCE_IRQ, 436 435 }, 437 436 }; ··· 458 457 .flags = IORESOURCE_MEM, 459 458 }, 460 459 [1] = { 461 - .start = 23, 460 + .start = evt2irq(0x4e0), 462 461 .flags = IORESOURCE_IRQ, 463 462 }, 464 463 };
+23 -21
arch/sh/boards/mach-ecovec24/setup.c
··· 29 29 #include <linux/input.h> 30 30 #include <linux/input/sh_keysc.h> 31 31 #include <linux/sh_eth.h> 32 + #include <linux/sh_intc.h> 32 33 #include <linux/videodev2.h> 33 34 #include <video/sh_mobile_lcdc.h> 34 35 #include <sound/sh_fsi.h> ··· 138 137 .flags = IORESOURCE_MEM, 139 138 }, 140 139 [1] = { 141 - .start = 91, 140 + .start = evt2irq(0xd60), 142 141 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 143 142 }, 144 143 }; ··· 179 178 .flags = IORESOURCE_MEM, 180 179 }, 181 180 [1] = { 182 - .start = 65, 183 - .end = 65, 181 + .start = evt2irq(0xa20), 182 + .end = evt2irq(0xa20), 184 183 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, 185 184 }, 186 185 }; ··· 215 214 .flags = IORESOURCE_MEM, 216 215 }, 217 216 [1] = { 218 - .start = 66, 219 - .end = 66, 217 + .start = evt2irq(0xa40), 218 + .end = evt2irq(0xa40), 220 219 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, 221 220 }, 222 221 }; ··· 262 261 .flags = IORESOURCE_MEM, 263 262 }, 264 263 [1] = { 265 - .start = 66, 266 - .end = 66, 264 + .start = evt2irq(0xa40), 265 + .end = evt2irq(0xa40), 267 266 .flags = IORESOURCE_IRQ, 268 267 }, 269 268 }; ··· 349 348 .flags = IORESOURCE_MEM, 350 349 }, 351 350 [1] = { 352 - .start = 106, 351 + .start = evt2irq(0xf40), 353 352 .flags = IORESOURCE_IRQ, 354 353 }, 355 354 }; ··· 376 375 .flags = IORESOURCE_MEM, 377 376 }, 378 377 [1] = { 379 - .start = 52, 378 + .start = evt2irq(0x880), 380 379 .flags = IORESOURCE_IRQ, 381 380 }, 382 381 [2] = { ··· 407 406 .flags = IORESOURCE_MEM, 408 407 }, 409 408 [1] = { 410 - .start = 63, 409 + .start = evt2irq(0x9e0), 411 410 .flags = IORESOURCE_IRQ, 412 411 }, 413 412 [2] = { ··· 438 437 }, 439 438 { 440 439 I2C_BOARD_INFO("lis3lv02d", 0x1c), 441 - .irq = 33, 440 + .irq = evt2irq(0x620), 442 441 } 443 442 }; 444 443 ··· 464 463 .flags = IORESOURCE_MEM, 465 464 }, 466 465 [1] = { 467 - .start = 79, 466 + .start = evt2irq(0xbe0), 468 467 .flags = IORESOURCE_IRQ, 469 468 }, 470 469 }; ··· 480 479 }; 481 480 482 481 /* TouchScreen */ 483 - #define IRQ0 32 482 + #define IRQ0 evt2irq(0x600) 483 + 484 484 static int ts_get_pendown_state(void) 485 485 { 486 486 int val = 0; ··· 546 544 .flags = IORESOURCE_MEM, 547 545 }, 548 546 [1] = { 549 - .start = 100, 547 + .start = evt2irq(0xe80), 550 548 .flags = IORESOURCE_IRQ, 551 549 }, 552 550 }; ··· 590 588 .flags = IORESOURCE_MEM, 591 589 }, 592 590 [1] = { 593 - .start = 23, 591 + .start = evt2irq(0x4e0), 594 592 .flags = IORESOURCE_IRQ, 595 593 }, 596 594 }; ··· 655 653 .flags = IORESOURCE_MEM, 656 654 }, 657 655 [1] = { 658 - .start = 84, 656 + .start = evt2irq(0xc80), 659 657 .flags = IORESOURCE_IRQ, 660 658 }, 661 659 }; ··· 796 794 .flags = IORESOURCE_MEM, 797 795 }, 798 796 [1] = { 799 - .start = 108, 797 + .start = evt2irq(0xf80), 800 798 .flags = IORESOURCE_IRQ, 801 799 }, 802 800 }; ··· 820 818 .flags = IORESOURCE_MEM, 821 819 }, 822 820 [1] = { 823 - .start = 20, 821 + .start = evt2irq(0x480), 824 822 .flags = IORESOURCE_IRQ, 825 823 }, 826 824 }; ··· 857 855 .flags = IORESOURCE_MEM, 858 856 }, 859 857 [1] = { 860 - .start = 55, 858 + .start = evt2irq(0x8e0), 861 859 .flags = IORESOURCE_IRQ, 862 860 }, 863 861 }; ··· 893 891 }, 894 892 [1] = { 895 893 /* MMC2I */ 896 - .start = 29, 894 + .start = evt2irq(0x5a0), 897 895 .flags = IORESOURCE_IRQ, 898 896 }, 899 897 [2] = { 900 898 /* MMC3I */ 901 - .start = 30, 899 + .start = evt2irq(0x5c0), 902 900 .flags = IORESOURCE_IRQ, 903 901 }, 904 902 };
+2 -1
arch/sh/boards/mach-hp6xx/setup.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/platform_device.h> 15 15 #include <linux/irq.h> 16 + #include <linux/sh_intc.h> 16 17 #include <sound/sh_dac_audio.h> 17 18 #include <asm/hd64461.h> 18 19 #include <asm/io.h> ··· 36 35 .flags = IORESOURCE_MEM, 37 36 }, 38 37 [2] = { 39 - .start = 77, 38 + .start = evt2irq(0xba0), 40 39 .flags = IORESOURCE_IRQ, 41 40 }, 42 41 };
+8 -7
arch/sh/boards/mach-kfr2r09/setup.c
··· 23 23 #include <linux/i2c.h> 24 24 #include <linux/usb/r8a66597.h> 25 25 #include <linux/videodev2.h> 26 + #include <linux/sh_intc.h> 26 27 #include <media/rj54n1cb0c.h> 27 28 #include <media/soc_camera.h> 28 29 #include <media/sh_mobile_ceu.h> ··· 111 110 .flags = IORESOURCE_MEM, 112 111 }, 113 112 [1] = { 114 - .start = 79, 113 + .start = evt2irq(0xbe0), 115 114 .flags = IORESOURCE_IRQ, 116 115 }, 117 116 }; ··· 176 175 .flags = IORESOURCE_MEM, 177 176 }, 178 177 [1] = { 179 - .start = 106, 178 + .start = evt2irq(0xf40), 180 179 .flags = IORESOURCE_IRQ, 181 180 }, 182 181 }; ··· 201 200 .flags = IORESOURCE_MEM, 202 201 }, 203 202 [1] = { 204 - .start = 65, 205 - .end = 65, 203 + .start = evtirq(0xa20), 204 + .end = evtirq(0xa20), 206 205 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, 207 206 }, 208 207 }; ··· 231 230 .flags = IORESOURCE_MEM, 232 231 }, 233 232 [1] = { 234 - .start = 52, 235 - .end = 52, 233 + .start = evt2irq(0x880), 234 + .end = evt2irq(0x880), 236 235 .flags = IORESOURCE_IRQ, 237 236 }, 238 237 [2] = { ··· 349 348 .flags = IORESOURCE_MEM, 350 349 }, 351 350 [1] = { 352 - .start = 100, 351 + .start = evt2irq(0xe80), 353 352 .flags = IORESOURCE_IRQ, 354 353 }, 355 354 };
+7 -6
arch/sh/boards/mach-migor/setup.c
··· 22 22 #include <linux/clk.h> 23 23 #include <linux/gpio.h> 24 24 #include <linux/videodev2.h> 25 + #include <linux/sh_intc.h> 25 26 #include <video/sh_mobile_lcdc.h> 26 27 #include <media/sh_mobile_ceu.h> 27 28 #include <media/ov772x.h> ··· 55 54 .flags = IORESOURCE_MEM, 56 55 }, 57 56 [1] = { 58 - .start = 32, /* IRQ0 */ 57 + .start = evt2irq(0x600), /* IRQ0 */ 59 58 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 60 59 }, 61 60 }; ··· 89 88 .flags = IORESOURCE_MEM, 90 89 }, 91 90 [1] = { 92 - .start = 79, 91 + .start = evt2irq(0xbe0), 93 92 .flags = IORESOURCE_IRQ, 94 93 }, 95 94 }; ··· 286 285 .flags = IORESOURCE_MEM, 287 286 }, 288 287 [1] = { 289 - .start = 28, 288 + .start = evt2irq(0x580), 290 289 .flags = IORESOURCE_IRQ, 291 290 }, 292 291 }; ··· 369 368 .flags = IORESOURCE_MEM, 370 369 }, 371 370 [1] = { 372 - .start = 52, 371 + .start = evt2irq(0x880), 373 372 .flags = IORESOURCE_IRQ, 374 373 }, 375 374 [2] = { ··· 395 394 .flags = IORESOURCE_MEM, 396 395 }, 397 396 [1] = { 398 - .start = 100, 397 + .start = evt2irq(0xe80), 399 398 .flags = IORESOURCE_IRQ, 400 399 }, 401 400 }; ··· 421 420 }, 422 421 { 423 422 I2C_BOARD_INFO("migor_ts", 0x51), 424 - .irq = 38, /* IRQ6 */ 423 + .irq = evt2irq(0x6c0), /* IRQ6 */ 425 424 }, 426 425 { 427 426 I2C_BOARD_INFO("wm8978", 0x1a),
+2 -1
arch/sh/boards/mach-se/7722/setup.c
··· 16 16 #include <linux/input.h> 17 17 #include <linux/input/sh_keysc.h> 18 18 #include <linux/smc91x.h> 19 + #include <linux/sh_intc.h> 19 20 #include <mach-se/mach/se7722.h> 20 21 #include <mach-se/mach/mrshpc.h> 21 22 #include <asm/machvec.h> ··· 115 114 .flags = IORESOURCE_MEM, 116 115 }, 117 116 [1] = { 118 - .start = 79, 117 + .start = evt2irq(0xbe0), 119 118 .flags = IORESOURCE_IRQ, 120 119 }, 121 120 };
+16 -15
arch/sh/boards/mach-se/7724/setup.c
··· 24 24 #include <linux/input/sh_keysc.h> 25 25 #include <linux/usb/r8a66597.h> 26 26 #include <linux/sh_eth.h> 27 + #include <linux/sh_intc.h> 27 28 #include <linux/videodev2.h> 28 29 #include <video/sh_mobile_lcdc.h> 29 30 #include <media/sh_mobile_ceu.h> ··· 198 197 .flags = IORESOURCE_MEM, 199 198 }, 200 199 [1] = { 201 - .start = 106, 200 + .start = evt2irq(0xf40), 202 201 .flags = IORESOURCE_IRQ, 203 202 }, 204 203 }; ··· 225 224 .flags = IORESOURCE_MEM, 226 225 }, 227 226 [1] = { 228 - .start = 52, 227 + .start = evt2irq(0x880), 229 228 .flags = IORESOURCE_IRQ, 230 229 }, 231 230 [2] = { ··· 256 255 .flags = IORESOURCE_MEM, 257 256 }, 258 257 [1] = { 259 - .start = 63, 258 + .start = evt2irq(0x9e0), 260 259 .flags = IORESOURCE_IRQ, 261 260 }, 262 261 [2] = { ··· 290 289 .flags = IORESOURCE_MEM, 291 290 }, 292 291 [1] = { 293 - .start = 108, 292 + .start = evt2irq(0xf80), 294 293 .flags = IORESOURCE_IRQ, 295 294 }, 296 295 }; ··· 344 343 .flags = IORESOURCE_MEM, 345 344 }, 346 345 [1] = { 347 - .start = 79, 346 + .start = evt2irq(0xbe0), 348 347 .flags = IORESOURCE_IRQ, 349 348 }, 350 349 }; ··· 367 366 .flags = IORESOURCE_MEM, 368 367 }, 369 368 [1] = { 370 - .start = 91, 369 + .start = evt2irq(0xd60), 371 370 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 372 371 }, 373 372 }; ··· 398 397 .flags = IORESOURCE_MEM, 399 398 }, 400 399 [1] = { 401 - .start = 65, 402 - .end = 65, 400 + .start = evt2irq(0xa20), 401 + .end = evt2irq(0xa20), 403 402 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, 404 403 }, 405 404 }; ··· 427 426 .flags = IORESOURCE_MEM, 428 427 }, 429 428 [1] = { 430 - .start = 66, 431 - .end = 66, 429 + .start = evt2irq(0xa40), 430 + .end = evt2irq(0xa40), 432 431 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, 433 432 }, 434 433 }; ··· 453 452 .flags = IORESOURCE_MEM, 454 453 }, 455 454 [1] = { 456 - .start = 100, 455 + .start = evt2irq(0xe80), 457 456 .flags = IORESOURCE_IRQ, 458 457 }, 459 458 }; ··· 482 481 .flags = IORESOURCE_MEM, 483 482 }, 484 483 [1] = { 485 - .start = 23, 484 + .start = evt2irq(0x4e0), 486 485 .flags = IORESOURCE_IRQ, 487 486 }, 488 487 }; ··· 512 511 .flags = IORESOURCE_MEM, 513 512 }, 514 513 [1] = { 515 - .start = 20, 514 + .start = evt2irq(0x480), 516 515 .flags = IORESOURCE_IRQ, 517 516 }, 518 517 }; ··· 550 549 .flags = IORESOURCE_MEM, 551 550 }, 552 551 [1] = { 553 - .start = 55, 552 + .start = evt2irq(0x8e0), 554 553 .flags = IORESOURCE_IRQ, 555 554 }, 556 555 }; ··· 596 595 #define EEPROM_DATA 0xBA20600C 597 596 #define EEPROM_STAT 0xBA206010 598 597 #define EEPROM_STRT 0xBA206014 598 + 599 599 static int __init sh_eth_is_eeprom_ready(void) 600 600 { 601 601 int t = 10000; ··· 652 650 extern char ms7724se_sdram_enter_end; 653 651 extern char ms7724se_sdram_leave_start; 654 652 extern char ms7724se_sdram_leave_end; 655 - 656 653 657 654 static int __init arch_setup(void) 658 655 {
+3 -2
arch/sh/boards/mach-sh7763rdp/setup.c
··· 18 18 #include <linux/fb.h> 19 19 #include <linux/io.h> 20 20 #include <linux/sh_eth.h> 21 + #include <linux/sh_intc.h> 21 22 #include <mach/sh7763rdp.h> 22 23 #include <asm/sh7760fb.h> 23 24 ··· 68 67 * SH-Ether 69 68 * 70 69 * SH Ether of SH7763 has multi IRQ handling. 71 - * (57,58,59 -> 57) 70 + * (0x920,0x940,0x960 -> 0x920) 72 71 */ 73 72 static struct resource sh_eth_resources[] = { 74 73 { ··· 80 79 .end = 0xFEE01FFF, 81 80 .flags = IORESOURCE_MEM, 82 81 }, { 83 - .start = 57, /* irq number */ 82 + .start = evt2irq(0x920), /* irq number */ 84 83 .flags = IORESOURCE_IRQ, 85 84 }, 86 85 };
+2 -1
arch/sh/drivers/pci/fixups-landisk.c
··· 14 14 #include <linux/init.h> 15 15 #include <linux/delay.h> 16 16 #include <linux/pci.h> 17 + #include <linux/sh_intc.h> 17 18 #include "pci-sh4.h" 18 19 19 20 #define PCIMCR_MRSET_OFF 0xBFFFFFFF ··· 28 27 * slot2: pin1-4 = irq7,8,5,6 29 28 * slot3: pin1-4 = irq8,5,6,7 30 29 */ 31 - int irq = ((slot + pin - 1) & 0x3) + 5; 30 + int irq = ((slot + pin - 1) & 0x3) + evt2irq(0x2a0); 32 31 33 32 if ((slot | (pin - 1)) > 0x3) { 34 33 printk(KERN_WARNING "PCI: Bad IRQ mapping request for slot %d pin %c\n",
+2 -5
arch/sh/drivers/pci/fixups-r7780rp.c
··· 12 12 */ 13 13 #include <linux/pci.h> 14 14 #include <linux/io.h> 15 + #include <linux/sh_intc.h> 15 16 #include "pci-sh4.h" 16 - 17 - static char irq_tab[] __initdata = { 18 - 65, 66, 67, 68, 19 - }; 20 17 21 18 int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) 22 19 { 23 - return irq_tab[slot]; 20 + return evt2irq(0xa20) + slot; 24 21 }
+14 -4
arch/sh/drivers/pci/fixups-sdk7780.c
··· 13 13 */ 14 14 #include <linux/pci.h> 15 15 #include <linux/io.h> 16 + #include <linux/sh_intc.h> 16 17 #include "pci-sh4.h" 18 + 19 + #define IRQ_INTA evt2irq(0xa20) 20 + #define IRQ_INTB evt2irq(0xa40) 21 + #define IRQ_INTC evt2irq(0xa60) 22 + #define IRQ_INTD evt2irq(0xa80) 17 23 18 24 /* IDSEL [16][17][18][19][20][21][22][23][24][25][26][27][28][29][30][31] */ 19 25 static char sdk7780_irq_tab[4][16] __initdata = { 20 26 /* INTA */ 21 - { 65, 68, 67, 68, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 27 + { IRQ_INTA, IRQ_INTD, IRQ_INTC, IRQ_INTD, -1, -1, -1, -1, -1, -1, 28 + -1, -1, -1, -1, -1, -1 }, 22 29 /* INTB */ 23 - { 66, 65, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 30 + { IRQ_INTB, IRQ_INTA, -1, IRQ_INTA, -1, -1, -1, -1, -1, -1, -1, -1, 31 + -1, -1, -1, -1 }, 24 32 /* INTC */ 25 - { 67, 66, -1, 66, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 33 + { IRQ_INTC, IRQ_INTB, -1, IRQ_INTB, -1, -1, -1, -1, -1, -1, -1, -1, 34 + -1, -1, -1, -1 }, 26 35 /* INTD */ 27 - { 68, 67, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 36 + { IRQ_INTD, IRQ_INTC, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 37 + -1, -1, -1 }, 28 38 }; 29 39 30 40 int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
+3 -2
arch/sh/drivers/pci/fixups-se7751.c
··· 4 4 #include <linux/delay.h> 5 5 #include <linux/pci.h> 6 6 #include <linux/io.h> 7 + #include <linux/sh_intc.h> 7 8 #include "pci-sh4.h" 8 9 9 10 int __init pcibios_map_platform_irq(const struct pci_dev *, u8 slot, u8 pin) 10 11 { 11 12 switch (slot) { 12 - case 0: return 13; 13 - case 1: return 13; /* AMD Ethernet controller */ 13 + case 0: return evt2irq(0x3a0); 14 + case 1: return evt2irq(0x3a0); /* AMD Ethernet controller */ 14 15 case 2: return -1; 15 16 case 3: return -1; 16 17 case 4: return -1;
+10 -9
arch/sh/drivers/pci/fixups-sh03.c
··· 2 2 #include <linux/init.h> 3 3 #include <linux/types.h> 4 4 #include <linux/pci.h> 5 + #include <linux/sh_intc.h> 5 6 6 7 int __init pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin) 7 8 { ··· 10 9 11 10 if (dev->bus->number == 0) { 12 11 switch (slot) { 13 - case 4: return 5; /* eth0 */ 14 - case 8: return 5; /* eth1 */ 15 - case 6: return 2; /* PCI bridge */ 12 + case 4: return evt2irq(0x2a0); /* eth0 */ 13 + case 8: return evt2irq(0x2a0); /* eth1 */ 14 + case 6: return evt2irq(0x240); /* PCI bridge */ 16 15 default: 17 16 printk(KERN_ERR "PCI: Bad IRQ mapping request " 18 17 "for slot %d\n", slot); 19 - return 2; 18 + return evt2irq(0x240); 20 19 } 21 20 } else { 22 21 switch (pin) { 23 - case 0: irq = 2; break; 24 - case 1: irq = 2; break; 25 - case 2: irq = 2; break; 26 - case 3: irq = 2; break; 27 - case 4: irq = 2; break; 22 + case 0: irq = evt2irq(0x240); break; 23 + case 1: irq = evt2irq(0x240); break; 24 + case 2: irq = evt2irq(0x240); break; 25 + case 3: irq = evt2irq(0x240); break; 26 + case 4: irq = evt2irq(0x240); break; 28 27 default: irq = -1; break; 29 28 } 30 29 }
+6 -5
arch/sh/drivers/pci/fixups-snapgear.c
··· 16 16 #include <linux/types.h> 17 17 #include <linux/init.h> 18 18 #include <linux/pci.h> 19 + #include <linux/sh_intc.h> 19 20 #include "pci-sh4.h" 20 21 21 22 int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) ··· 25 24 26 25 switch (slot) { 27 26 case 8: /* the PCI bridge */ break; 28 - case 11: irq = 8; break; /* USB */ 29 - case 12: irq = 11; break; /* PCMCIA */ 30 - case 13: irq = 5; break; /* eth0 */ 31 - case 14: irq = 8; break; /* eth1 */ 32 - case 15: irq = 11; break; /* safenet (unused) */ 27 + case 11: irq = evt2irq(0x300); break; /* USB */ 28 + case 12: irq = evt2irq(0x360); break; /* PCMCIA */ 29 + case 13: irq = evt2irq(0x2a0); break; /* eth0 */ 30 + case 14: irq = evt2irq(0x300); break; /* eth1 */ 31 + case 15: irq = evt2irq(0x360); break; /* safenet (unused) */ 33 32 } 34 33 35 34 printk("PCI: Mapping SnapGear IRQ for slot %d, pin %c to irq %d\n",
+2 -1
arch/sh/drivers/pci/pcie-sh7786.c
··· 18 18 #include <linux/slab.h> 19 19 #include <linux/clk.h> 20 20 #include <linux/sh_clk.h> 21 + #include <linux/sh_intc.h> 21 22 #include "pcie-sh7786.h" 22 23 #include <asm/sizes.h> 23 24 ··· 469 468 470 469 int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) 471 470 { 472 - return 71; 471 + return evt2irq(0xae0); 473 472 } 474 473 475 474 static int __init sh7786_pcie_core_init(void)
-2
arch/sh/include/asm/i2c-sh7760.h
··· 9 9 10 10 #define SH7760_I2C0_MMIO 0xFE140000 11 11 #define SH7760_I2C0_MMIOEND 0xFE14003B 12 - #define SH7760_I2C0_IRQ 62 13 12 14 13 #define SH7760_I2C1_MMIO 0xFE150000 15 14 #define SH7760_I2C1_MMIOEND 0xFE15003B 16 - #define SH7760_I2C1_IRQ 63 17 15 18 16 struct sh7760_i2c_platdata { 19 17 unsigned int speed_khz;
+4 -2
arch/sh/include/cpu-sh3/cpu/dma.h
··· 1 1 #ifndef __ASM_CPU_SH3_DMA_H 2 2 #define __ASM_CPU_SH3_DMA_H 3 3 4 + #include <linux/sh_intc.h> 5 + 4 6 #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 5 7 defined(CONFIG_CPU_SUBTYPE_SH7721) || \ 6 8 defined(CONFIG_CPU_SUBTYPE_SH7710) || \ ··· 12 10 #define SH_DMAC_BASE0 0xa4000020 13 11 #endif 14 12 15 - #define DMTE0_IRQ 48 16 - #define DMTE4_IRQ 76 13 + #define DMTE0_IRQ evt2irq(0x800) 14 + #define DMTE4_IRQ evt2irq(0xb80) 17 15 18 16 /* Definitions for the SuperH DMAC */ 19 17 #define TM_BURST 0x00000020
+46 -44
arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
··· 1 1 #ifndef __ASM_SH_CPU_SH4_DMA_SH7780_H 2 2 #define __ASM_SH_CPU_SH4_DMA_SH7780_H 3 3 4 + #include <linux/sh_intc.h> 5 + 4 6 #if defined(CONFIG_CPU_SUBTYPE_SH7343) || \ 5 7 defined(CONFIG_CPU_SUBTYPE_SH7730) 6 - #define DMTE0_IRQ 48 7 - #define DMTE4_IRQ 76 8 - #define DMAE0_IRQ 78 /* DMA Error IRQ*/ 8 + #define DMTE0_IRQ evt2irq(0x800) 9 + #define DMTE4_IRQ evt2irq(0xb80) 10 + #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ 9 11 #define SH_DMAC_BASE0 0xFE008020 10 12 #define SH_DMARS_BASE0 0xFE009000 11 13 #elif defined(CONFIG_CPU_SUBTYPE_SH7722) 12 - #define DMTE0_IRQ 48 13 - #define DMTE4_IRQ 76 14 - #define DMAE0_IRQ 78 /* DMA Error IRQ*/ 14 + #define DMTE0_IRQ evt2irq(0x800) 15 + #define DMTE4_IRQ evt2irq(0xb80) 16 + #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ 15 17 #define SH_DMAC_BASE0 0xFE008020 16 18 #define SH_DMARS_BASE0 0xFE009000 17 19 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 18 20 defined(CONFIG_CPU_SUBTYPE_SH7764) 19 - #define DMTE0_IRQ 34 20 - #define DMTE4_IRQ 44 21 - #define DMAE0_IRQ 38 21 + #define DMTE0_IRQ evt2irq(0x640) 22 + #define DMTE4_IRQ evt2irq(0x780) 23 + #define DMAE0_IRQ evt2irq(0x6c0) 22 24 #define SH_DMAC_BASE0 0xFF608020 23 25 #define SH_DMARS_BASE0 0xFF609000 24 26 #elif defined(CONFIG_CPU_SUBTYPE_SH7723) 25 - #define DMTE0_IRQ 48 /* DMAC0A*/ 26 - #define DMTE4_IRQ 76 /* DMAC0B */ 27 - #define DMTE6_IRQ 40 28 - #define DMTE8_IRQ 42 /* DMAC1A */ 29 - #define DMTE9_IRQ 43 30 - #define DMTE10_IRQ 72 /* DMAC1B */ 31 - #define DMTE11_IRQ 73 32 - #define DMAE0_IRQ 78 /* DMA Error IRQ*/ 33 - #define DMAE1_IRQ 74 /* DMA Error IRQ*/ 27 + #define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/ 28 + #define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */ 29 + #define DMTE6_IRQ evt2irq(0x700) 30 + #define DMTE8_IRQ evt2irq(0x740) /* DMAC1A */ 31 + #define DMTE9_IRQ evt2irq(0x760) 32 + #define DMTE10_IRQ evt2irq(0xb00) /* DMAC1B */ 33 + #define DMTE11_IRQ evt2irq(0xb20) 34 + #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ 35 + #define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/ 34 36 #define SH_DMAC_BASE0 0xFE008020 35 37 #define SH_DMAC_BASE1 0xFDC08020 36 38 #define SH_DMARS_BASE0 0xFDC09000 37 39 #elif defined(CONFIG_CPU_SUBTYPE_SH7724) 38 - #define DMTE0_IRQ 48 /* DMAC0A*/ 39 - #define DMTE4_IRQ 76 /* DMAC0B */ 40 - #define DMTE6_IRQ 40 41 - #define DMTE8_IRQ 42 /* DMAC1A */ 42 - #define DMTE9_IRQ 43 43 - #define DMTE10_IRQ 72 /* DMAC1B */ 44 - #define DMTE11_IRQ 73 45 - #define DMAE0_IRQ 78 /* DMA Error IRQ*/ 46 - #define DMAE1_IRQ 74 /* DMA Error IRQ*/ 40 + #define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/ 41 + #define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */ 42 + #define DMTE6_IRQ evt2irq(0x700) 43 + #define DMTE8_IRQ evt2irq(0x740) /* DMAC1A */ 44 + #define DMTE9_IRQ evt2irq(0x760) 45 + #define DMTE10_IRQ evt2irq(0xb00) /* DMAC1B */ 46 + #define DMTE11_IRQ evt2irq(0xb20) 47 + #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ 48 + #define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/ 47 49 #define SH_DMAC_BASE0 0xFE008020 48 50 #define SH_DMAC_BASE1 0xFDC08020 49 51 #define SH_DMARS_BASE0 0xFE009000 50 52 #define SH_DMARS_BASE1 0xFDC09000 51 53 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) 52 - #define DMTE0_IRQ 34 53 - #define DMTE4_IRQ 44 54 - #define DMTE6_IRQ 46 55 - #define DMTE8_IRQ 92 56 - #define DMTE9_IRQ 93 57 - #define DMTE10_IRQ 94 58 - #define DMTE11_IRQ 95 59 - #define DMAE0_IRQ 38 /* DMA Error IRQ */ 54 + #define DMTE0_IRQ evt2irq(0x640) 55 + #define DMTE4_IRQ evt2irq(0x780) 56 + #define DMTE6_IRQ evt2irq(0x7c0) 57 + #define DMTE8_IRQ evt2irq(0xd80) 58 + #define DMTE9_IRQ evt2irq(0xda0) 59 + #define DMTE10_IRQ evt2irq(0xdc0) 60 + #define DMTE11_IRQ evt2irq(0xde0) 61 + #define DMAE0_IRQ evt2irq(0x6c0) /* DMA Error IRQ */ 60 62 #define SH_DMAC_BASE0 0xFC808020 61 63 #define SH_DMAC_BASE1 0xFC818020 62 64 #define SH_DMARS_BASE0 0xFC809000 63 65 #else /* SH7785 */ 64 - #define DMTE0_IRQ 33 65 - #define DMTE4_IRQ 37 66 - #define DMTE6_IRQ 52 67 - #define DMTE8_IRQ 54 68 - #define DMTE9_IRQ 55 69 - #define DMTE10_IRQ 56 70 - #define DMTE11_IRQ 57 71 - #define DMAE0_IRQ 39 /* DMA Error IRQ0 */ 72 - #define DMAE1_IRQ 58 /* DMA Error IRQ1 */ 66 + #define DMTE0_IRQ evt2irq(0x620) 67 + #define DMTE4_IRQ evt2irq(0x6a0) 68 + #define DMTE6_IRQ evt2irq(0x880) 69 + #define DMTE8_IRQ evt2irq(0x8c0) 70 + #define DMTE9_IRQ evt2irq(0x8e0) 71 + #define DMTE10_IRQ evt2irq(0x900) 72 + #define DMTE11_IRQ evt2irq(0x920) 73 + #define DMAE0_IRQ evt2irq(0x6e0) /* DMA Error IRQ0 */ 74 + #define DMAE1_IRQ evt2irq(0x940) /* DMA Error IRQ1 */ 73 75 #define SH_DMAC_BASE0 0xFC808020 74 76 #define SH_DMAC_BASE1 0xFCC08020 75 77 #define SH_DMARS_BASE0 0xFC809000
+7 -4
arch/sh/include/cpu-sh4/cpu/dma.h
··· 8 8 #include <cpu/dma-sh4a.h> 9 9 10 10 #else /* CONFIG_CPU_SH4A */ 11 + 12 + #include <linux/sh_intc.h> 13 + 11 14 /* 12 15 * SH7750/SH7751/SH7760 13 16 */ 14 - #define DMTE0_IRQ 34 15 - #define DMTE4_IRQ 44 16 - #define DMTE6_IRQ 46 17 - #define DMAE0_IRQ 38 17 + #define DMTE0_IRQ evt2irq(0x640) 18 + #define DMTE4_IRQ evt2irq(0x780) 19 + #define DMTE6_IRQ evt2irq(0x7c0) 20 + #define DMAE0_IRQ evt2irq(0x6c0) 18 21 19 22 #define SH_DMAC_BASE0 0xffa00000 20 23 #define SH_DMAC_BASE1 0xffa00070
+4 -3
arch/sh/include/mach-common/mach/hp6xx.h
··· 9 9 * for more details. 10 10 * 11 11 */ 12 + #include <linux/sh_intc.h> 12 13 13 - #define HP680_BTN_IRQ 32 /* IRQ0_IRQ */ 14 - #define HP680_TS_IRQ 35 /* IRQ3_IRQ */ 15 - #define HP680_HD64461_IRQ 36 /* IRQ4_IRQ */ 14 + #define HP680_BTN_IRQ evt2irq(0x600) /* IRQ0_IRQ */ 15 + #define HP680_TS_IRQ evt2irq(0x660) /* IRQ3_IRQ */ 16 + #define HP680_HD64461_IRQ evt2irq(0x680) /* IRQ4_IRQ */ 16 17 17 18 #define DAC_LCD_BRIGHTNESS 0 18 19 #define DAC_SPEAKER_VOLUME 1
+7 -6
arch/sh/include/mach-common/mach/lboxre2.h
··· 11 11 * for more details. 12 12 * 13 13 */ 14 + #include <linux/sh_intc.h> 14 15 15 - #define IRQ_CF1 9 /* CF1 */ 16 - #define IRQ_CF0 10 /* CF0 */ 17 - #define IRQ_INTD 11 /* INTD */ 18 - #define IRQ_ETH1 12 /* Ether1 */ 19 - #define IRQ_ETH0 13 /* Ether0 */ 20 - #define IRQ_INTA 14 /* INTA */ 16 + #define IRQ_CF1 evt2irq(0x320) /* CF1 */ 17 + #define IRQ_CF0 evt2irq(0x340) /* CF0 */ 18 + #define IRQ_INTD evt2irq(0x360) /* INTD */ 19 + #define IRQ_ETH1 evt2irq(0x380) /* Ether1 */ 20 + #define IRQ_ETH0 evt2irq(0x3a0) /* Ether0 */ 21 + #define IRQ_INTA evt2irq(0x3c0) /* INTA */ 21 22 22 23 void init_lboxre2_IRQ(void); 23 24
+3 -2
arch/sh/include/mach-common/mach/sdk7780.h
··· 11 11 * License. See the file "COPYING" in the main directory of this archive 12 12 * for more details. 13 13 */ 14 + #include <linux/sh_intc.h> 14 15 #include <asm/addrspace.h> 15 16 16 17 /* Box specific addresses. */ ··· 68 67 69 68 #define SDK7780_NR_IRL 15 70 69 /* IDE/ATA interrupt */ 71 - #define IRQ_CFCARD 14 70 + #define IRQ_CFCARD evt2irq(0x3c0) 72 71 /* SMC interrupt */ 73 - #define IRQ_ETHERNET 6 72 + #define IRQ_ETHERNET evt2irq(0x2c0) 74 73 75 74 76 75 /* arch/sh/boards/renesas/sdk7780/irq.c */
+7 -5
arch/sh/include/mach-common/mach/titan.h
··· 4 4 #ifndef _ASM_SH_TITAN_H 5 5 #define _ASM_SH_TITAN_H 6 6 7 + #include <linux/sh_intc.h> 8 + 7 9 #define __IO_PREFIX titan 8 10 #include <asm/io_generic.h> 9 11 10 12 /* IRQ assignments */ 11 - #define TITAN_IRQ_WAN 2 /* eth0 (WAN) */ 12 - #define TITAN_IRQ_LAN 5 /* eth1 (LAN) */ 13 - #define TITAN_IRQ_MPCIA 8 /* mPCI A */ 14 - #define TITAN_IRQ_MPCIB 11 /* mPCI B */ 15 - #define TITAN_IRQ_USB 11 /* USB */ 13 + #define TITAN_IRQ_WAN evt2irq(0x240) /* eth0 (WAN) */ 14 + #define TITAN_IRQ_LAN evt2irq(0x2a0) /* eth1 (LAN) */ 15 + #define TITAN_IRQ_MPCIA evt2irq(0x300) /* mPCI A */ 16 + #define TITAN_IRQ_MPCIB evt2irq(0x360) /* mPCI B */ 17 + #define TITAN_IRQ_USB evt2irq(0x360) /* USB */ 16 18 17 19 #endif /* __ASM_SH_TITAN_H */
+10 -9
arch/sh/include/mach-landisk/mach/iodata_landisk.h
··· 8 8 * 9 9 * IO-DATA LANDISK support 10 10 */ 11 + #include <linux/sh_intc.h> 11 12 12 13 /* Box specific addresses. */ 13 14 ··· 26 25 #define PA_PIDE_OFFSET 0x40 /* CF IDE Offset */ 27 26 #define PA_SIDE_OFFSET 0x40 /* HDD IDE Offset */ 28 27 29 - #define IRQ_PCIINTA 5 /* PCI INTA IRQ */ 30 - #define IRQ_PCIINTB 6 /* PCI INTB IRQ */ 31 - #define IRQ_PCIINTC 7 /* PCI INTC IRQ */ 32 - #define IRQ_PCIINTD 8 /* PCI INTD IRQ */ 33 - #define IRQ_ATA 9 /* ATA IRQ */ 34 - #define IRQ_FATA 10 /* FATA IRQ */ 35 - #define IRQ_POWER 11 /* Power Switch IRQ */ 36 - #define IRQ_BUTTON 12 /* USL-5P Button IRQ */ 37 - #define IRQ_FAULT 13 /* USL-5P Fault IRQ */ 28 + #define IRQ_PCIINTA evt2irq(0x2a0) /* PCI INTA IRQ */ 29 + #define IRQ_PCIINTB evt2irq(0x2c0) /* PCI INTB IRQ */ 30 + #define IRQ_PCIINTC evt2irq(0x2e0) /* PCI INTC IRQ */ 31 + #define IRQ_PCIINTD evt2irq(0x300) /* PCI INTD IRQ */ 32 + #define IRQ_ATA evt2irq(0x320) /* ATA IRQ */ 33 + #define IRQ_FATA evt2irq(0x340) /* FATA IRQ */ 34 + #define IRQ_POWER evt2irq(0x360) /* Power Switch IRQ */ 35 + #define IRQ_BUTTON evt2irq(0x380) /* USL-5P Button IRQ */ 36 + #define IRQ_FAULT evt2irq(0x3a0) /* USL-5P Fault IRQ */ 38 37 39 38 void init_landisk_IRQ(void); 40 39
+10 -9
arch/sh/include/mach-se/mach/se.h
··· 8 8 * 9 9 * Hitachi SolutionEngine support 10 10 */ 11 + #include <linux/sh_intc.h> 11 12 12 13 /* Box specific addresses. */ 13 14 ··· 83 82 #define INTC_IPRD 0xa4000018UL 84 83 #define INTC_IPRE 0xa400001aUL 85 84 86 - #define IRQ0_IRQ 32 87 - #define IRQ1_IRQ 33 85 + #define IRQ0_IRQ evt2irq(0x600) 86 + #define IRQ1_IRQ evt2irq(0x620) 88 87 #endif 89 88 90 89 #if defined(CONFIG_CPU_SUBTYPE_SH7705) 91 - #define IRQ_STNIC 12 92 - #define IRQ_CFCARD 14 90 + #define IRQ_STNIC evt2irq(0x380) 91 + #define IRQ_CFCARD evt2irq(0x3c0) 93 92 #else 94 - #define IRQ_STNIC 10 95 - #define IRQ_CFCARD 7 93 + #define IRQ_STNIC evt2irq(0x340) 94 + #define IRQ_CFCARD evt2irq(0x2e0) 96 95 #endif 97 96 98 97 /* SH Ether support (SH7710/SH7712) */ ··· 106 105 # define PHY_ID 0x01 107 106 #endif 108 107 /* Ether IRQ */ 109 - #define SH_ETH0_IRQ 80 110 - #define SH_ETH1_IRQ 81 111 - #define SH_TSU_IRQ 82 108 + #define SH_ETH0_IRQ evt2irq(0xc00) 109 + #define SH_ETH1_IRQ evt2irq(0xc20) 110 + #define SH_TSU_IRQ evt2irq(0xc40) 112 111 113 112 void init_se_IRQ(void); 114 113
+5 -4
arch/sh/include/mach-se/mach/se7343.h
··· 8 8 * 9 9 * SH-Mobile SolutionEngine 7343 support 10 10 */ 11 + #include <linux/sh_intc.h> 11 12 12 13 /* Box specific addresses. */ 13 14 ··· 119 118 #define FPGA_IN 0xb1400000 120 119 #define FPGA_OUT 0xb1400002 121 120 122 - #define IRQ0_IRQ 32 123 - #define IRQ1_IRQ 33 124 - #define IRQ4_IRQ 36 125 - #define IRQ5_IRQ 37 121 + #define IRQ0_IRQ evt2irq(0x600) 122 + #define IRQ1_IRQ evt2irq(0x620) 123 + #define IRQ4_IRQ evt2irq(0x680) 124 + #define IRQ5_IRQ evt2irq(0x6a0) 126 125 127 126 #define SE7343_FPGA_IRQ_MRSHPC0 0 128 127 #define SE7343_FPGA_IRQ_MRSHPC1 1
+4 -2
arch/sh/include/mach-se/mach/se7721.h
··· 11 11 12 12 #ifndef __ASM_SH_SE7721_H 13 13 #define __ASM_SH_SE7721_H 14 + 15 + #include <linux/sh_intc.h> 14 16 #include <asm/addrspace.h> 15 17 16 18 /* Box specific addresses. */ ··· 51 49 #define MRSHPC_PCIC_INFO (PA_MRSHPC + 30) 52 50 53 51 #define PA_LED 0xB6800000 /* 8bit LED */ 54 - #define PA_FPGA 0xB7000000 /* FPGA base address */ 52 + #define PA_FPGA 0xB7000000 /* FPGA base address */ 55 53 56 - #define MRSHPC_IRQ0 10 54 + #define MRSHPC_IRQ0 evt2irq(0x340) 57 55 58 56 #define FPGA_ILSR1 (PA_FPGA + 0x02) 59 57 #define FPGA_ILSR2 (PA_FPGA + 0x03)
+5 -4
arch/sh/include/mach-se/mach/se7722.h
··· 13 13 * for more details. 14 14 * 15 15 */ 16 + #include <linux/sh_intc.h> 16 17 #include <asm/addrspace.h> 17 18 18 19 /* Box specific addresses. */ ··· 32 31 33 32 #define PA_PERIPHERAL 0xB0000000 34 33 35 - #define PA_PCIC PA_PERIPHERAL /* MR-SHPC-01 PCMCIA */ 34 + #define PA_PCIC PA_PERIPHERAL /* MR-SHPC-01 PCMCIA */ 36 35 #define PA_MRSHPC (PA_PERIPHERAL + 0x003fffe0) /* MR-SHPC-01 PCMCIA controller */ 37 36 #define PA_MRSHPC_MW1 (PA_PERIPHERAL + 0x00400000) /* MR-SHPC-01 memory window base */ 38 37 #define PA_MRSHPC_MW2 (PA_PERIPHERAL + 0x00500000) /* MR-SHPC-01 attribute window base */ ··· 52 51 #define MRSHPC_PCIC_INFO (PA_MRSHPC + 30) 53 52 54 53 #define PA_LED (PA_PERIPHERAL + 0x00800000) /* 8bit LED */ 55 - #define PA_FPGA (PA_PERIPHERAL + 0x01800000) /* FPGA base address */ 54 + #define PA_FPGA (PA_PERIPHERAL + 0x01800000) /* FPGA base address */ 56 55 57 56 #define PA_LAN (PA_AREA6_IO + 0) /* SMC LAN91C111 */ 58 57 /* GPIO */ ··· 78 77 #define PORT_HIZCRC 0xA405015CUL 79 78 80 79 /* IRQ */ 81 - #define IRQ0_IRQ 32 82 - #define IRQ1_IRQ 33 80 + #define IRQ0_IRQ evt2irq(0x600) 81 + #define IRQ1_IRQ evt2irq(0x620) 83 82 84 83 #define IRQ01_MODE 0xb1800000 85 84 #define IRQ01_STS 0xb1800004
+4 -3
arch/sh/include/mach-se/mach/se7724.h
··· 18 18 * for more details. 19 19 * 20 20 */ 21 + #include <linux/sh_intc.h> 21 22 #include <asm/addrspace.h> 22 23 23 24 /* SH Eth */ ··· 36 35 #define IRQ2_MR (0xba200028) 37 36 38 37 /* IRQ */ 39 - #define IRQ0_IRQ 32 40 - #define IRQ1_IRQ 33 41 - #define IRQ2_IRQ 34 38 + #define IRQ0_IRQ evt2irq(0x600) 39 + #define IRQ1_IRQ evt2irq(0x620) 40 + #define IRQ2_IRQ evt2irq(0x640) 42 41 43 42 /* Bits in IRQ012 registers */ 44 43 #define SE7724_FPGA_IRQ_BASE 220
+2 -1
arch/sh/include/mach-se/mach/se7751.h
··· 11 11 * Modified for 7751 Solution Engine by 12 12 * Ian da Silva and Jeremy Siegel, 2001. 13 13 */ 14 + #include <linux/sh_intc.h> 14 15 15 16 /* Box specific addresses. */ 16 17 ··· 64 63 #define BCR_ILCRF (PA_BCR + 10) 65 64 #define BCR_ILCRG (PA_BCR + 12) 66 65 67 - #define IRQ_79C973 13 66 + #define IRQ_79C973 evt2irq(0x3a0) 68 67 69 68 void init_7751se_IRQ(void); 70 69
+4 -3
arch/sh/include/mach-se/mach/se7780.h
··· 12 12 * License. See the file "COPYING" in the main directory of this archive 13 13 * for more details. 14 14 */ 15 + #include <linux/sh_intc.h> 15 16 #include <asm/addrspace.h> 16 17 17 18 /* Box specific addresses. */ ··· 81 80 #define IRQPOS_PCCPW (0 * 4) 82 81 83 82 /* IDE interrupt */ 84 - #define IRQ_IDE0 67 /* iVDR */ 83 + #define IRQ_IDE0 evt2irq(0xa60) /* iVDR */ 85 84 86 85 /* SMC interrupt */ 87 - #define SMC_IRQ 8 86 + #define SMC_IRQ evt2irq(0x300) 88 87 89 88 /* SM501 interrupt */ 90 - #define SM501_IRQ 0 89 + #define SM501_IRQ evt2irq(0x200) 91 90 92 91 /* interrupt pin */ 93 92 #define IRQPIN_EXTINT1 0 /* IRQ0 pin */
+5 -4
arch/sh/kernel/cpu/sh3/setup-sh7705.c
··· 14 14 #include <linux/serial.h> 15 15 #include <linux/serial_sci.h> 16 16 #include <linux/sh_timer.h> 17 + #include <linux/sh_intc.h> 17 18 #include <asm/rtc.h> 18 19 #include <cpu/serial.h> 19 20 ··· 115 114 .flags = IORESOURCE_IO, 116 115 }, 117 116 [1] = { 118 - .start = 20, 117 + .start = evt2irq(0x480), 119 118 .flags = IORESOURCE_IRQ, 120 119 }, 121 120 }; ··· 147 146 .flags = IORESOURCE_MEM, 148 147 }, 149 148 [1] = { 150 - .start = 16, 149 + .start = evt2irq(0x400), 151 150 .flags = IORESOURCE_IRQ, 152 151 }, 153 152 }; ··· 175 174 .flags = IORESOURCE_MEM, 176 175 }, 177 176 [1] = { 178 - .start = 17, 177 + .start = evt2irq(0x420), 179 178 .flags = IORESOURCE_IRQ, 180 179 }, 181 180 }; ··· 202 201 .flags = IORESOURCE_MEM, 203 202 }, 204 203 [1] = { 205 - .start = 18, 204 + .start = evt2irq(0x440), 206 205 .flags = IORESOURCE_IRQ, 207 206 }, 208 207 };
+6 -5
arch/sh/kernel/cpu/sh3/setup-sh770x.c
··· 19 19 #include <linux/serial.h> 20 20 #include <linux/serial_sci.h> 21 21 #include <linux/sh_timer.h> 22 + #include <linux/sh_intc.h> 22 23 #include <cpu/serial.h> 23 24 24 25 enum { ··· 96 95 .flags = IORESOURCE_IO, 97 96 }, 98 97 [1] = { 99 - .start = 20, 98 + .start = evt2irq(0x480), 100 99 .flags = IORESOURCE_IRQ, 101 100 }, 102 101 }; ··· 115 114 .scscr = SCSCR_TE | SCSCR_RE, 116 115 .scbrr_algo_id = SCBRR_ALGO_2, 117 116 .type = PORT_SCI, 118 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x4E0)), 117 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x4e0)), 119 118 .ops = &sh770x_sci_port_ops, 120 119 .regshift = 1, 121 120 }; ··· 185 184 .flags = IORESOURCE_MEM, 186 185 }, 187 186 [1] = { 188 - .start = 16, 187 + .start = evt2irq(0x400), 189 188 .flags = IORESOURCE_IRQ, 190 189 }, 191 190 }; ··· 213 212 .flags = IORESOURCE_MEM, 214 213 }, 215 214 [1] = { 216 - .start = 17, 215 + .start = evt2irq(0x420), 217 216 .flags = IORESOURCE_IRQ, 218 217 }, 219 218 }; ··· 240 239 .flags = IORESOURCE_MEM, 241 240 }, 242 241 [1] = { 243 - .start = 18, 242 + .start = evt2irq(0x440), 244 243 .flags = IORESOURCE_IRQ, 245 244 }, 246 245 };
+6 -5
arch/sh/kernel/cpu/sh3/setup-sh7710.c
··· 14 14 #include <linux/serial.h> 15 15 #include <linux/serial_sci.h> 16 16 #include <linux/sh_timer.h> 17 + #include <linux/sh_intc.h> 17 18 #include <asm/rtc.h> 18 19 19 20 enum { ··· 78 77 .flags = IORESOURCE_IO, 79 78 }, 80 79 [1] = { 81 - .start = 20, 80 + .start = evt2irq(0x480), 82 81 .flags = IORESOURCE_IRQ, 83 82 }, 84 83 }; ··· 122 121 SCSCR_CKE1 | SCSCR_CKE0, 123 122 .scbrr_algo_id = SCBRR_ALGO_2, 124 123 .type = PORT_SCIF, 125 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x900)), 124 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x900)), 126 125 }; 127 126 128 127 static struct platform_device scif1_device = { ··· 146 145 .flags = IORESOURCE_MEM, 147 146 }, 148 147 [1] = { 149 - .start = 16, 148 + .start = evt2irq(0x400), 150 149 .flags = IORESOURCE_IRQ, 151 150 }, 152 151 }; ··· 174 173 .flags = IORESOURCE_MEM, 175 174 }, 176 175 [1] = { 177 - .start = 17, 176 + .start = evt2irq(0x420), 178 177 .flags = IORESOURCE_IRQ, 179 178 }, 180 179 }; ··· 201 200 .flags = IORESOURCE_MEM, 202 201 }, 203 202 [1] = { 204 - .start = 18, 203 + .start = evt2irq(0x440), 205 204 .flags = IORESOURCE_IRQ, 206 205 }, 207 206 };
+17 -15
arch/sh/kernel/cpu/sh3/setup-sh7720.c
··· 19 19 #include <linux/io.h> 20 20 #include <linux/serial_sci.h> 21 21 #include <linux/sh_timer.h> 22 + #include <linux/sh_intc.h> 22 23 #include <asm/rtc.h> 23 24 #include <cpu/serial.h> 24 25 ··· 31 30 }, 32 31 [1] = { 33 32 /* Shared Period/Carry/Alarm IRQ */ 34 - .start = 20, 33 + .start = evt2irq(0x480), 35 34 .flags = IORESOURCE_IRQ, 36 35 }, 37 36 }; ··· 56 55 .scscr = SCSCR_RE | SCSCR_TE, 57 56 .scbrr_algo_id = SCBRR_ALGO_4, 58 57 .type = PORT_SCIF, 59 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC00)), 58 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc00)), 60 59 .ops = &sh7720_sci_port_ops, 61 60 .regtype = SCIx_SH7705_SCIF_REGTYPE, 62 61 }; ··· 75 74 .scscr = SCSCR_RE | SCSCR_TE, 76 75 .scbrr_algo_id = SCBRR_ALGO_4, 77 76 .type = PORT_SCIF, 78 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC20)), 77 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc20)), 79 78 .ops = &sh7720_sci_port_ops, 80 79 .regtype = SCIx_SH7705_SCIF_REGTYPE, 81 80 }; ··· 95 94 .flags = IORESOURCE_MEM, 96 95 }, 97 96 [1] = { 98 - .start = 67, 99 - .end = 67, 97 + .start = evt2irq(0xa60), 98 + .end = evt2irq(0xa60), 100 99 .flags = IORESOURCE_IRQ, 101 100 }, 102 101 }; 103 102 104 103 static u64 usb_ohci_dma_mask = 0xffffffffUL; 104 + 105 105 static struct platform_device usb_ohci_device = { 106 106 .name = "sh_ohci", 107 107 .id = -1, ··· 123 121 }, 124 122 [1] = { 125 123 .name = "sh_udc", 126 - .start = 65, 127 - .end = 65, 124 + .start = evt2irq(0xa20), 125 + .end = evt2irq(0xa20), 128 126 .flags = IORESOURCE_IRQ, 129 127 }, 130 128 }; ··· 154 152 .flags = IORESOURCE_MEM, 155 153 }, 156 154 [1] = { 157 - .start = 104, 155 + .start = evt2irq(0xf00), 158 156 .flags = IORESOURCE_IRQ, 159 157 }, 160 158 }; ··· 181 179 .flags = IORESOURCE_MEM, 182 180 }, 183 181 [1] = { 184 - .start = 104, 182 + .start = evt2irq(0xf00), 185 183 .flags = IORESOURCE_IRQ, 186 184 }, 187 185 }; ··· 208 206 .flags = IORESOURCE_MEM, 209 207 }, 210 208 [1] = { 211 - .start = 104, 209 + .start = evt2irq(0xf00), 212 210 .flags = IORESOURCE_IRQ, 213 211 }, 214 212 }; ··· 235 233 .flags = IORESOURCE_MEM, 236 234 }, 237 235 [1] = { 238 - .start = 104, 236 + .start = evt2irq(0xf00), 239 237 .flags = IORESOURCE_IRQ, 240 238 }, 241 239 }; ··· 262 260 .flags = IORESOURCE_MEM, 263 261 }, 264 262 [1] = { 265 - .start = 104, 263 + .start = evt2irq(0xf00), 266 264 .flags = IORESOURCE_IRQ, 267 265 }, 268 266 }; ··· 290 288 .flags = IORESOURCE_MEM, 291 289 }, 292 290 [1] = { 293 - .start = 16, 291 + .start = evt2irq(0x400), 294 292 .flags = IORESOURCE_IRQ, 295 293 }, 296 294 }; ··· 318 316 .flags = IORESOURCE_MEM, 319 317 }, 320 318 [1] = { 321 - .start = 17, 319 + .start = evt2irq(0x420), 322 320 .flags = IORESOURCE_IRQ, 323 321 }, 324 322 }; ··· 345 343 .flags = IORESOURCE_MEM, 346 344 }, 347 345 [1] = { 348 - .start = 18, 346 + .start = evt2irq(0x440), 349 347 .flags = IORESOURCE_IRQ, 350 348 }, 351 349 };
+8 -4
arch/sh/kernel/cpu/sh4/setup-sh4-202.c
··· 13 13 #include <linux/serial.h> 14 14 #include <linux/serial_sci.h> 15 15 #include <linux/sh_timer.h> 16 + #include <linux/sh_intc.h> 16 17 #include <linux/io.h> 17 18 18 19 static struct plat_sci_port scif0_platform_data = { ··· 22 21 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 23 22 .scbrr_algo_id = SCBRR_ALGO_2, 24 23 .type = PORT_SCIF, 25 - .irqs = { 40, 41, 43, 42 }, 24 + .irqs = { evt2irq(0x700), 25 + evt2irq(0x720), 26 + evt2irq(0x760), 27 + evt2irq(0x740) }, 26 28 }; 27 29 28 30 static struct platform_device scif0_device = { ··· 49 45 .flags = IORESOURCE_MEM, 50 46 }, 51 47 [1] = { 52 - .start = 16, 48 + .start = evt2irq(0x400), 53 49 .flags = IORESOURCE_IRQ, 54 50 }, 55 51 }; ··· 77 73 .flags = IORESOURCE_MEM, 78 74 }, 79 75 [1] = { 80 - .start = 17, 76 + .start = evt2irq(0x420), 81 77 .flags = IORESOURCE_IRQ, 82 78 }, 83 79 }; ··· 104 100 .flags = IORESOURCE_MEM, 105 101 }, 106 102 [1] = { 107 - .start = 18, 103 + .start = evt2irq(0x440), 108 104 .flags = IORESOURCE_IRQ, 109 105 }, 110 106 };
+8 -7
arch/sh/kernel/cpu/sh4/setup-sh7750.c
··· 13 13 #include <linux/serial.h> 14 14 #include <linux/io.h> 15 15 #include <linux/sh_timer.h> 16 + #include <linux/sh_intc.h> 16 17 #include <linux/serial_sci.h> 17 18 #include <generated/machtypes.h> 18 19 ··· 25 24 }, 26 25 [1] = { 27 26 /* Shared Period/Carry/Alarm IRQ */ 28 - .start = 20, 27 + .start = evt2irq(0x480), 29 28 .flags = IORESOURCE_IRQ, 30 29 }, 31 30 }; ··· 44 43 .scscr = SCSCR_TE | SCSCR_RE, 45 44 .scbrr_algo_id = SCBRR_ALGO_2, 46 45 .type = PORT_SCI, 47 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xE40)), 46 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x4e0)), 48 47 .regshift = 2, 49 48 }; 50 49 ··· 86 85 .flags = IORESOURCE_MEM, 87 86 }, 88 87 [1] = { 89 - .start = 16, 88 + .start = evt2irq(0x400), 90 89 .flags = IORESOURCE_IRQ, 91 90 }, 92 91 }; ··· 114 113 .flags = IORESOURCE_MEM, 115 114 }, 116 115 [1] = { 117 - .start = 17, 116 + .start = evt2irq(0x420), 118 117 .flags = IORESOURCE_IRQ, 119 118 }, 120 119 }; ··· 141 140 .flags = IORESOURCE_MEM, 142 141 }, 143 142 [1] = { 144 - .start = 18, 143 + .start = evt2irq(0x440), 145 144 .flags = IORESOURCE_IRQ, 146 145 }, 147 146 }; ··· 173 172 .flags = IORESOURCE_MEM, 174 173 }, 175 174 [1] = { 176 - .start = 72, 175 + .start = evt2irq(0xb00), 177 176 .flags = IORESOURCE_IRQ, 178 177 }, 179 178 }; ··· 200 199 .flags = IORESOURCE_MEM, 201 200 }, 202 201 [1] = { 203 - .start = 76, 202 + .start = evt2irq(0xb80), 204 203 .flags = IORESOURCE_IRQ, 205 204 }, 206 205 };
+19 -7
arch/sh/kernel/cpu/sh4/setup-sh7760.c
··· 11 11 #include <linux/init.h> 12 12 #include <linux/serial.h> 13 13 #include <linux/sh_timer.h> 14 + #include <linux/sh_intc.h> 14 15 #include <linux/serial_sci.h> 15 16 #include <linux/io.h> 16 17 ··· 133 132 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 134 133 .scbrr_algo_id = SCBRR_ALGO_2, 135 134 .type = PORT_SCIF, 136 - .irqs = { 52, 53, 55, 54 }, 135 + .irqs = { evt2irq(0x880), 136 + evt2irq(0x8a0), 137 + evt2irq(0x8e0), 138 + evt2irq(0x8c0) }, 137 139 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 138 140 }; 139 141 ··· 154 150 .type = PORT_SCIF, 155 151 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 156 152 .scbrr_algo_id = SCBRR_ALGO_2, 157 - .irqs = { 72, 73, 75, 74 }, 153 + .irqs = { evt2irq(0xb00), 154 + evt2irq(0xb20), 155 + evt2irq(0xb60), 156 + evt2irq(0xb40) }, 158 157 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 159 158 }; 160 159 ··· 175 168 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 176 169 .scbrr_algo_id = SCBRR_ALGO_2, 177 170 .type = PORT_SCIF, 178 - .irqs = { 76, 77, 79, 78 }, 171 + .irqs = { evt2irq(0xb80), 172 + evt2irq(0xba0), 173 + evt2irq(0xbe0), 174 + evt2irq(0xbc0) }, 179 175 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 180 176 }; 181 177 ··· 196 186 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 197 187 .scbrr_algo_id = SCBRR_ALGO_2, 198 188 .type = PORT_SCI, 199 - .irqs = { 80, 81, 82, 0 }, 189 + .irqs = { evt2irq(0xc00), 190 + evt2irq(0xc20), 191 + evt2irq(0xc40), }, 200 192 .regshift = 2, 201 193 }; 202 194 ··· 223 211 .flags = IORESOURCE_MEM, 224 212 }, 225 213 [1] = { 226 - .start = 16, 214 + .start = evt2irq(0x400), 227 215 .flags = IORESOURCE_IRQ, 228 216 }, 229 217 }; ··· 251 239 .flags = IORESOURCE_MEM, 252 240 }, 253 241 [1] = { 254 - .start = 17, 242 + .start = evt2irq(0x420), 255 243 .flags = IORESOURCE_IRQ, 256 244 }, 257 245 }; ··· 278 266 .flags = IORESOURCE_MEM, 279 267 }, 280 268 [1] = { 281 - .start = 18, 269 + .start = evt2irq(0x440), 282 270 .flags = IORESOURCE_IRQ, 283 271 }, 284 272 };
+16 -15
arch/sh/kernel/cpu/sh4a/setup-sh7343.c
··· 13 13 #include <linux/serial_sci.h> 14 14 #include <linux/uio_driver.h> 15 15 #include <linux/sh_timer.h> 16 + #include <linux/sh_intc.h> 16 17 #include <asm/clock.h> 17 18 18 19 /* Serial */ ··· 23 22 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, 24 23 .scbrr_algo_id = SCBRR_ALGO_2, 25 24 .type = PORT_SCIF, 26 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC00)), 25 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc00)), 27 26 }; 28 27 29 28 static struct platform_device scif0_device = { ··· 40 39 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, 41 40 .scbrr_algo_id = SCBRR_ALGO_2, 42 41 .type = PORT_SCIF, 43 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC20)), 42 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc20)), 44 43 }; 45 44 46 45 static struct platform_device scif1_device = { ··· 57 56 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, 58 57 .scbrr_algo_id = SCBRR_ALGO_2, 59 58 .type = PORT_SCIF, 60 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC40)), 59 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc40)), 61 60 }; 62 61 63 62 static struct platform_device scif2_device = { ··· 74 73 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1, 75 74 .scbrr_algo_id = SCBRR_ALGO_2, 76 75 .type = PORT_SCIF, 77 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC60)), 76 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc60)), 78 77 }; 79 78 80 79 static struct platform_device scif3_device = { ··· 93 92 .flags = IORESOURCE_MEM, 94 93 }, 95 94 [1] = { 96 - .start = 96, 97 - .end = 99, 95 + .start = evt2irq(0xe00), 96 + .end = evt2irq(0xe60), 98 97 .flags = IORESOURCE_IRQ, 99 98 }, 100 99 }; ··· 114 113 .flags = IORESOURCE_MEM, 115 114 }, 116 115 [1] = { 117 - .start = 44, 118 - .end = 47, 116 + .start = evt2irq(0x780), 117 + .end = evt2irq(0x7e0), 119 118 .flags = IORESOURCE_IRQ, 120 119 }, 121 120 }; ··· 130 129 static struct uio_info vpu_platform_data = { 131 130 .name = "VPU4", 132 131 .version = "0", 133 - .irq = 60, 132 + .irq = evt2irq(0x980), 134 133 }; 135 134 136 135 static struct resource vpu_resources[] = { ··· 158 157 static struct uio_info veu_platform_data = { 159 158 .name = "VEU", 160 159 .version = "0", 161 - .irq = 54, 160 + .irq = evt2irq(0x8c0), 162 161 }; 163 162 164 163 static struct resource veu_resources[] = { ··· 186 185 static struct uio_info jpu_platform_data = { 187 186 .name = "JPU", 188 187 .version = "0", 189 - .irq = 27, 188 + .irq = evt2irq(0x560), 190 189 }; 191 190 192 191 static struct resource jpu_resources[] = { ··· 225 224 .flags = IORESOURCE_MEM, 226 225 }, 227 226 [1] = { 228 - .start = 104, 227 + .start = evt2irq(0xf00), 229 228 .flags = IORESOURCE_IRQ, 230 229 }, 231 230 }; ··· 253 252 .flags = IORESOURCE_MEM, 254 253 }, 255 254 [1] = { 256 - .start = 16, 255 + .start = evt2irq(0x400), 257 256 .flags = IORESOURCE_IRQ, 258 257 }, 259 258 }; ··· 281 280 .flags = IORESOURCE_MEM, 282 281 }, 283 282 [1] = { 284 - .start = 17, 283 + .start = evt2irq(0x420), 285 284 .flags = IORESOURCE_IRQ, 286 285 }, 287 286 }; ··· 308 307 .flags = IORESOURCE_MEM, 309 308 }, 310 309 [1] = { 311 - .start = 18, 310 + .start = evt2irq(0x440), 312 311 .flags = IORESOURCE_IRQ, 313 312 }, 314 313 };
+12 -11
arch/sh/kernel/cpu/sh4a/setup-sh7366.c
··· 15 15 #include <linux/serial_sci.h> 16 16 #include <linux/uio_driver.h> 17 17 #include <linux/sh_timer.h> 18 + #include <linux/sh_intc.h> 18 19 #include <linux/usb/r8a66597.h> 19 20 #include <asm/clock.h> 20 21 ··· 26 25 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 27 26 .scbrr_algo_id = SCBRR_ALGO_2, 28 27 .type = PORT_SCIF, 29 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC00)), 28 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc00)), 30 29 }; 31 30 32 31 static struct platform_device scif0_device = { ··· 45 44 .flags = IORESOURCE_MEM, 46 45 }, 47 46 [1] = { 48 - .start = 96, 49 - .end = 99, 47 + .start = evt2irq(0xe00), 48 + .end = evt2irq(0xe60), 50 49 .flags = IORESOURCE_IRQ, 51 50 }, 52 51 }; ··· 69 68 .flags = IORESOURCE_MEM, 70 69 }, 71 70 [1] = { 72 - .start = 65, 73 - .end = 65, 71 + .start = evt2irq(0xa20), 72 + .end = evt2irq(0xa20), 74 73 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, 75 74 }, 76 75 }; ··· 90 89 static struct uio_info vpu_platform_data = { 91 90 .name = "VPU5", 92 91 .version = "0", 93 - .irq = 60, 92 + .irq = evt2irq(0x980), 94 93 }; 95 94 96 95 static struct resource vpu_resources[] = { ··· 118 117 static struct uio_info veu0_platform_data = { 119 118 .name = "VEU", 120 119 .version = "0", 121 - .irq = 54, 120 + .irq = evt2irq(0x8c0), 122 121 }; 123 122 124 123 static struct resource veu0_resources[] = { ··· 146 145 static struct uio_info veu1_platform_data = { 147 146 .name = "VEU", 148 147 .version = "0", 149 - .irq = 27, 148 + .irq = evt2irq(0x560), 150 149 }; 151 150 152 151 static struct resource veu1_resources[] = { ··· 185 184 .flags = IORESOURCE_MEM, 186 185 }, 187 186 [1] = { 188 - .start = 104, 187 + .start = evt2irq(0xf00), 189 188 .flags = IORESOURCE_IRQ, 190 189 }, 191 190 }; ··· 241 240 .flags = IORESOURCE_MEM, 242 241 }, 243 242 [1] = { 244 - .start = 17, 243 + .start = evt2irq(0x420), 245 244 .flags = IORESOURCE_IRQ, 246 245 }, 247 246 }; ··· 268 267 .flags = IORESOURCE_MEM, 269 268 }, 270 269 [1] = { 271 - .start = 18, 270 + .start = evt2irq(0x440), 272 271 .flags = IORESOURCE_IRQ, 273 272 }, 274 273 };
+24 -23
arch/sh/kernel/cpu/sh4a/setup-sh7722.c
··· 13 13 #include <linux/serial.h> 14 14 #include <linux/serial_sci.h> 15 15 #include <linux/sh_timer.h> 16 + #include <linux/sh_intc.h> 16 17 #include <linux/uio_driver.h> 17 18 #include <linux/usb/m66592.h> 18 19 ··· 148 147 }, 149 148 { 150 149 .name = "error_irq", 151 - .start = 78, 152 - .end = 78, 150 + .start = evt2irq(0xbc0), 151 + .end = evt2irq(0xbc0), 153 152 .flags = IORESOURCE_IRQ, 154 153 }, 155 154 { 156 155 /* IRQ for channels 0-3 */ 157 - .start = 48, 158 - .end = 51, 156 + .start = evt2irq(0x800), 157 + .end = evt2irq(0x860), 159 158 .flags = IORESOURCE_IRQ, 160 159 }, 161 160 { 162 161 /* IRQ for channels 4-5 */ 163 - .start = 76, 164 - .end = 77, 162 + .start = evt2irq(0xb80), 163 + .end = evt2irq(0xba0), 165 164 .flags = IORESOURCE_IRQ, 166 165 }, 167 166 }; ··· 183 182 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 184 183 .scbrr_algo_id = SCBRR_ALGO_2, 185 184 .type = PORT_SCIF, 186 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC00)), 185 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc00)), 187 186 .ops = &sh7722_sci_port_ops, 188 187 .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 189 188 }; ··· 202 201 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 203 202 .scbrr_algo_id = SCBRR_ALGO_2, 204 203 .type = PORT_SCIF, 205 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC20)), 204 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc20)), 206 205 .ops = &sh7722_sci_port_ops, 207 206 .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 208 207 }; ··· 221 220 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 222 221 .scbrr_algo_id = SCBRR_ALGO_2, 223 222 .type = PORT_SCIF, 224 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC40)), 223 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc40)), 225 224 .ops = &sh7722_sci_port_ops, 226 225 .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 227 226 }; ··· 242 241 }, 243 242 [1] = { 244 243 /* Period IRQ */ 245 - .start = 45, 244 + .start = evt2irq(0x7a0), 246 245 .flags = IORESOURCE_IRQ, 247 246 }, 248 247 [2] = { 249 248 /* Carry IRQ */ 250 - .start = 46, 249 + .start = evt2irq(0x7c0), 251 250 .flags = IORESOURCE_IRQ, 252 251 }, 253 252 [3] = { 254 253 /* Alarm IRQ */ 255 - .start = 44, 254 + .start = evt2irq(0x780), 256 255 .flags = IORESOURCE_IRQ, 257 256 }, 258 257 }; ··· 276 275 .flags = IORESOURCE_MEM, 277 276 }, 278 277 [1] = { 279 - .start = 65, 280 - .end = 65, 278 + .start = evt2irq(0xa20), 279 + .end = evt2irq(0xa20), 281 280 .flags = IORESOURCE_IRQ, 282 281 }, 283 282 }; ··· 302 301 .flags = IORESOURCE_MEM, 303 302 }, 304 303 [1] = { 305 - .start = 96, 306 - .end = 99, 304 + .start = evt2irq(0xe00), 305 + .end = evt2irq(0xe60), 307 306 .flags = IORESOURCE_IRQ, 308 307 }, 309 308 }; ··· 318 317 static struct uio_info vpu_platform_data = { 319 318 .name = "VPU4", 320 319 .version = "0", 321 - .irq = 60, 320 + .irq = evt2irq(0x980), 322 321 }; 323 322 324 323 static struct resource vpu_resources[] = { ··· 346 345 static struct uio_info veu_platform_data = { 347 346 .name = "VEU", 348 347 .version = "0", 349 - .irq = 54, 348 + .irq = evt2irq(0x8c0), 350 349 }; 351 350 352 351 static struct resource veu_resources[] = { ··· 374 373 static struct uio_info jpu_platform_data = { 375 374 .name = "JPU", 376 375 .version = "0", 377 - .irq = 27, 376 + .irq = evt2irq(0x560), 378 377 }; 379 378 380 379 static struct resource jpu_resources[] = { ··· 413 412 .flags = IORESOURCE_MEM, 414 413 }, 415 414 [1] = { 416 - .start = 104, 415 + .start = evt2irq(0xf00), 417 416 .flags = IORESOURCE_IRQ, 418 417 }, 419 418 }; ··· 441 440 .flags = IORESOURCE_MEM, 442 441 }, 443 442 [1] = { 444 - .start = 16, 443 + .start = evt2irq(0x400), 445 444 .flags = IORESOURCE_IRQ, 446 445 }, 447 446 }; ··· 469 468 .flags = IORESOURCE_MEM, 470 469 }, 471 470 [1] = { 472 - .start = 17, 471 + .start = evt2irq(0x420), 473 472 .flags = IORESOURCE_IRQ, 474 473 }, 475 474 }; ··· 526 525 .flags = IORESOURCE_MEM, 527 526 }, 528 527 [1] = { 529 - .start = 108, 528 + .start = evt2irq(0xf80), 530 529 .flags = IORESOURCE_IRQ, 531 530 }, 532 531 };
+24 -23
arch/sh/kernel/cpu/sh4a/setup-sh7723.c
··· 15 15 #include <linux/uio_driver.h> 16 16 #include <linux/usb/r8a66597.h> 17 17 #include <linux/sh_timer.h> 18 + #include <linux/sh_intc.h> 18 19 #include <linux/io.h> 19 20 #include <asm/clock.h> 20 21 #include <asm/mmzone.h> ··· 29 28 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 30 29 .scbrr_algo_id = SCBRR_ALGO_2, 31 30 .type = PORT_SCIF, 32 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC00)), 31 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc00)), 33 32 .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 34 33 }; 35 34 ··· 48 47 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 49 48 .scbrr_algo_id = SCBRR_ALGO_2, 50 49 .type = PORT_SCIF, 51 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC20)), 50 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc20)), 52 51 .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 53 52 }; 54 53 ··· 67 66 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 68 67 .scbrr_algo_id = SCBRR_ALGO_2, 69 68 .type = PORT_SCIF, 70 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC40)), 69 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc40)), 71 70 .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 72 71 }; 73 72 ··· 86 85 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 87 86 .scbrr_algo_id = SCBRR_ALGO_3, 88 87 .type = PORT_SCIFA, 89 - .irqs = { 56, 56, 56, 56 }, 88 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x900)), 90 89 }; 91 90 92 91 static struct platform_device scif3_device = { ··· 104 103 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 105 104 .scbrr_algo_id = SCBRR_ALGO_3, 106 105 .type = PORT_SCIFA, 107 - .irqs = { 88, 88, 88, 88 }, 106 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xd00)), 108 107 }; 109 108 110 109 static struct platform_device scif4_device = { ··· 122 121 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 123 122 .scbrr_algo_id = SCBRR_ALGO_3, 124 123 .type = PORT_SCIFA, 125 - .irqs = { 109, 109, 109, 109 }, 124 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xfa0)), 126 125 }; 127 126 128 127 static struct platform_device scif5_device = { ··· 136 135 static struct uio_info vpu_platform_data = { 137 136 .name = "VPU5", 138 137 .version = "0", 139 - .irq = 60, 138 + .irq = evt2irq(0x980), 140 139 }; 141 140 142 141 static struct resource vpu_resources[] = { ··· 164 163 static struct uio_info veu0_platform_data = { 165 164 .name = "VEU2H", 166 165 .version = "0", 167 - .irq = 54, 166 + .irq = evt2irq(0x8c0), 168 167 }; 169 168 170 169 static struct resource veu0_resources[] = { ··· 192 191 static struct uio_info veu1_platform_data = { 193 192 .name = "VEU2H", 194 193 .version = "0", 195 - .irq = 27, 194 + .irq = evt2irq(0x560), 196 195 }; 197 196 198 197 static struct resource veu1_resources[] = { ··· 231 230 .flags = IORESOURCE_MEM, 232 231 }, 233 232 [1] = { 234 - .start = 104, 233 + .start = evt2irq(0xf00), 235 234 .flags = IORESOURCE_IRQ, 236 235 }, 237 236 }; ··· 259 258 .flags = IORESOURCE_MEM, 260 259 }, 261 260 [1] = { 262 - .start = 16, 261 + .start = evt2irq(0x400), 263 262 .flags = IORESOURCE_IRQ, 264 263 }, 265 264 }; ··· 287 286 .flags = IORESOURCE_MEM, 288 287 }, 289 288 [1] = { 290 - .start = 17, 289 + .start = evt2irq(0x420), 291 290 .flags = IORESOURCE_IRQ, 292 291 }, 293 292 }; ··· 314 313 .flags = IORESOURCE_MEM, 315 314 }, 316 315 [1] = { 317 - .start = 18, 316 + .start = evt2irq(0x440), 318 317 .flags = IORESOURCE_IRQ, 319 318 }, 320 319 }; ··· 341 340 .flags = IORESOURCE_MEM, 342 341 }, 343 342 [1] = { 344 - .start = 57, 343 + .start = evt2irq(0x920), 345 344 .flags = IORESOURCE_IRQ, 346 345 }, 347 346 }; ··· 368 367 .flags = IORESOURCE_MEM, 369 368 }, 370 369 [1] = { 371 - .start = 58, 370 + .start = evt2irq(0x940), 372 371 .flags = IORESOURCE_IRQ, 373 372 }, 374 373 }; ··· 395 394 .flags = IORESOURCE_MEM, 396 395 }, 397 396 [1] = { 398 - .start = 57, 397 + .start = evt2irq(0x920), 399 398 .flags = IORESOURCE_IRQ, 400 399 }, 401 400 }; ··· 418 417 }, 419 418 [1] = { 420 419 /* Period IRQ */ 421 - .start = 69, 420 + .start = evt2irq(0xaa0), 422 421 .flags = IORESOURCE_IRQ, 423 422 }, 424 423 [2] = { 425 424 /* Carry IRQ */ 426 - .start = 70, 425 + .start = evt2irq(0xac0), 427 426 .flags = IORESOURCE_IRQ, 428 427 }, 429 428 [3] = { 430 429 /* Alarm IRQ */ 431 - .start = 68, 430 + .start = evt2irq(0xa80), 432 431 .flags = IORESOURCE_IRQ, 433 432 }, 434 433 }; ··· 451 450 .flags = IORESOURCE_MEM, 452 451 }, 453 452 [1] = { 454 - .start = 65, 455 - .end = 65, 453 + .start = evt2irq(0xa20), 454 + .end = evt2irq(0xa20), 456 455 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, 457 456 }, 458 457 }; ··· 477 476 .flags = IORESOURCE_MEM, 478 477 }, 479 478 [1] = { 480 - .start = 96, 481 - .end = 99, 479 + .start = evt2irq(0xe00), 480 + .end = evt2irq(0xe60), 482 481 .flags = IORESOURCE_IRQ, 483 482 }, 484 483 };
+39 -38
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
··· 20 20 #include <linux/uio_driver.h> 21 21 #include <linux/sh_dma.h> 22 22 #include <linux/sh_timer.h> 23 + #include <linux/sh_intc.h> 23 24 #include <linux/io.h> 24 25 #include <linux/notifier.h> 25 26 ··· 216 215 }, 217 216 { 218 217 .name = "error_irq", 219 - .start = 78, 220 - .end = 78, 218 + .start = evt2irq(0xbc0), 219 + .end = evt2irq(0xbc0), 221 220 .flags = IORESOURCE_IRQ, 222 221 }, 223 222 { 224 223 /* IRQ for channels 0-3 */ 225 - .start = 48, 226 - .end = 51, 224 + .start = evt2irq(0x800), 225 + .end = evt2irq(0x860), 227 226 .flags = IORESOURCE_IRQ, 228 227 }, 229 228 { 230 229 /* IRQ for channels 4-5 */ 231 - .start = 76, 232 - .end = 77, 230 + .start = evt2irq(0xb80), 231 + .end = evt2irq(0xba0), 233 232 .flags = IORESOURCE_IRQ, 234 233 }, 235 234 }; ··· 250 249 }, 251 250 { 252 251 .name = "error_irq", 253 - .start = 74, 254 - .end = 74, 252 + .start = evt2irq(0xb40), 253 + .end = evt2irq(0xb40), 255 254 .flags = IORESOURCE_IRQ, 256 255 }, 257 256 { 258 257 /* IRQ for channels 0-3 */ 259 - .start = 40, 260 - .end = 43, 258 + .start = evt2irq(0x700), 259 + .end = evt2irq(0x760), 261 260 .flags = IORESOURCE_IRQ, 262 261 }, 263 262 { 264 263 /* IRQ for channels 4-5 */ 265 - .start = 72, 266 - .end = 73, 264 + .start = evt2irq(0xb00), 265 + .end = evt2irq(0xb20), 267 266 .flags = IORESOURCE_IRQ, 268 267 }, 269 268 }; ··· 296 295 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 297 296 .scbrr_algo_id = SCBRR_ALGO_2, 298 297 .type = PORT_SCIF, 299 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC00)), 298 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc00)), 300 299 .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 301 300 }; 302 301 ··· 315 314 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 316 315 .scbrr_algo_id = SCBRR_ALGO_2, 317 316 .type = PORT_SCIF, 318 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC20)), 317 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc20)), 319 318 .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 320 319 }; 321 320 ··· 334 333 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 335 334 .scbrr_algo_id = SCBRR_ALGO_2, 336 335 .type = PORT_SCIF, 337 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xC40)), 336 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xc40)), 338 337 .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, 339 338 }; 340 339 ··· 353 352 .scscr = SCSCR_RE | SCSCR_TE, 354 353 .scbrr_algo_id = SCBRR_ALGO_3, 355 354 .type = PORT_SCIFA, 356 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x900)), 355 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x900)), 357 356 }; 358 357 359 358 static struct platform_device scif3_device = { ··· 371 370 .scscr = SCSCR_RE | SCSCR_TE, 372 371 .scbrr_algo_id = SCBRR_ALGO_3, 373 372 .type = PORT_SCIFA, 374 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xD00)), 373 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xd00)), 375 374 }; 376 375 377 376 static struct platform_device scif4_device = { ··· 389 388 .scscr = SCSCR_RE | SCSCR_TE, 390 389 .scbrr_algo_id = SCBRR_ALGO_3, 391 390 .type = PORT_SCIFA, 392 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xFA0)), 391 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xfa0)), 393 392 }; 394 393 395 394 static struct platform_device scif5_device = { ··· 409 408 }, 410 409 [1] = { 411 410 /* Period IRQ */ 412 - .start = 69, 411 + .start = evt2irq(0xaa0), 413 412 .flags = IORESOURCE_IRQ, 414 413 }, 415 414 [2] = { 416 415 /* Carry IRQ */ 417 - .start = 70, 416 + .start = evt2irq(0xac0), 418 417 .flags = IORESOURCE_IRQ, 419 418 }, 420 419 [3] = { 421 420 /* Alarm IRQ */ 422 - .start = 68, 421 + .start = evt2irq(0xa80), 423 422 .flags = IORESOURCE_IRQ, 424 423 }, 425 424 }; ··· 440 439 .flags = IORESOURCE_MEM, 441 440 }, 442 441 [1] = { 443 - .start = 96, 444 - .end = 99, 442 + .start = evt2irq(0xe00), 443 + .end = evt2irq(0xe60), 445 444 .flags = IORESOURCE_IRQ, 446 445 }, 447 446 }; ··· 462 461 .flags = IORESOURCE_MEM, 463 462 }, 464 463 [1] = { 465 - .start = 92, 466 - .end = 95, 464 + .start = evt2irq(0xd80), 465 + .end = evt2irq(0xde0), 467 466 .flags = IORESOURCE_IRQ, 468 467 }, 469 468 }; ··· 479 478 static struct uio_info vpu_platform_data = { 480 479 .name = "VPU5F", 481 480 .version = "0", 482 - .irq = 60, 481 + .irq = evt2irq(0x980), 483 482 }; 484 483 485 484 static struct resource vpu_resources[] = { ··· 508 507 static struct uio_info veu0_platform_data = { 509 508 .name = "VEU3F0", 510 509 .version = "0", 511 - .irq = 83, 510 + .irq = evt2irq(0xc60), 512 511 }; 513 512 514 513 static struct resource veu0_resources[] = { ··· 537 536 static struct uio_info veu1_platform_data = { 538 537 .name = "VEU3F1", 539 538 .version = "0", 540 - .irq = 54, 539 + .irq = evt2irq(0x8c0), 541 540 }; 542 541 543 542 static struct resource veu1_resources[] = { ··· 634 633 .flags = IORESOURCE_MEM, 635 634 }, 636 635 [1] = { 637 - .start = 104, 636 + .start = evt2irq(0xf00), 638 637 .flags = IORESOURCE_IRQ, 639 638 }, 640 639 }; ··· 662 661 .flags = IORESOURCE_MEM, 663 662 }, 664 663 [1] = { 665 - .start = 16, 664 + .start = evt2irq(0x400), 666 665 .flags = IORESOURCE_IRQ, 667 666 }, 668 667 }; ··· 690 689 .flags = IORESOURCE_MEM, 691 690 }, 692 691 [1] = { 693 - .start = 17, 692 + .start = evt2irq(0x420), 694 693 .flags = IORESOURCE_IRQ, 695 694 }, 696 695 }; ··· 717 716 .flags = IORESOURCE_MEM, 718 717 }, 719 718 [1] = { 720 - .start = 18, 719 + .start = evt2irq(0x440), 721 720 .flags = IORESOURCE_IRQ, 722 721 }, 723 722 }; ··· 745 744 .flags = IORESOURCE_MEM, 746 745 }, 747 746 [1] = { 748 - .start = 57, 747 + .start = evt2irq(0x920), 749 748 .flags = IORESOURCE_IRQ, 750 749 }, 751 750 }; ··· 772 771 .flags = IORESOURCE_MEM, 773 772 }, 774 773 [1] = { 775 - .start = 58, 774 + .start = evt2irq(0x940), 776 775 .flags = IORESOURCE_IRQ, 777 776 }, 778 777 }; ··· 799 798 .flags = IORESOURCE_MEM, 800 799 }, 801 800 [1] = { 802 - .start = 57, 801 + .start = evt2irq(0x920), 803 802 .flags = IORESOURCE_IRQ, 804 803 }, 805 804 }; ··· 818 817 static struct uio_info jpu_platform_data = { 819 818 .name = "JPU", 820 819 .version = "0", 821 - .irq = 27, 820 + .irq = evt2irq(0x560), 822 821 }; 823 822 824 823 static struct resource jpu_resources[] = { ··· 847 846 static struct uio_info spu0_platform_data = { 848 847 .name = "SPU2DSP0", 849 848 .version = "0", 850 - .irq = 86, 849 + .irq = evt2irq(0xcc0), 851 850 }; 852 851 853 852 static struct resource spu0_resources[] = { ··· 876 875 static struct uio_info spu1_platform_data = { 877 876 .name = "SPU2DSP1", 878 877 .version = "0", 879 - .irq = 87, 878 + .irq = evt2irq(0xce0), 880 879 }; 881 880 882 881 static struct resource spu1_resources[] = {
+43 -43
arch/sh/kernel/cpu/sh4a/setup-sh7757.c
··· 18 18 #include <linux/dma-mapping.h> 19 19 #include <linux/sh_timer.h> 20 20 #include <linux/sh_dma.h> 21 - 21 + #include <linux/sh_intc.h> 22 22 #include <cpu/dma-register.h> 23 23 #include <cpu/sh7757.h> 24 24 ··· 45 45 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 46 46 .scbrr_algo_id = SCBRR_ALGO_2, 47 47 .type = PORT_SCIF, 48 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xB80)), 48 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xb80)), 49 49 }; 50 50 51 51 static struct platform_device scif3_device = { ··· 86 86 .flags = IORESOURCE_MEM, 87 87 }, 88 88 [1] = { 89 - .start = 28, 89 + .start = evt2irq(0x580), 90 90 .flags = IORESOURCE_IRQ, 91 91 }, 92 92 }; ··· 114 114 .flags = IORESOURCE_MEM, 115 115 }, 116 116 [1] = { 117 - .start = 29, 117 + .start = evt2irq(0x5a0), 118 118 .flags = IORESOURCE_IRQ, 119 119 }, 120 120 }; ··· 136 136 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, 137 137 }, 138 138 [1] = { 139 - .start = 86, 139 + .start = evt2irq(0xcc0), 140 140 .flags = IORESOURCE_IRQ, 141 141 }, 142 142 }; ··· 466 466 }, 467 467 { 468 468 .name = "error_irq", 469 - .start = 34, 470 - .end = 34, 469 + .start = evt2irq(0x640), 470 + .end = evt2irq(0x640), 471 471 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 472 472 }, 473 473 }; ··· 488 488 }, 489 489 { 490 490 .name = "error_irq", 491 - .start = 34, 492 - .end = 34, 491 + .start = evt2irq(0x640), 492 + .end = evt2irq(0x640), 493 493 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 494 494 }, 495 495 { 496 496 /* IRQ for channels 4 */ 497 - .start = 46, 498 - .end = 46, 497 + .start = evt2irq(0x7c0), 498 + .end = evt2irq(0x7c0), 499 499 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 500 500 }, 501 501 { 502 502 /* IRQ for channels 5 */ 503 - .start = 46, 504 - .end = 46, 503 + .start = evt2irq(0x7c0), 504 + .end = evt2irq(0x7c0), 505 505 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 506 506 }, 507 507 { 508 508 /* IRQ for channels 6 */ 509 - .start = 88, 510 - .end = 88, 509 + .start = evt2irq(0xd00), 510 + .end = evt2irq(0xd00), 511 511 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 512 512 }, 513 513 { 514 514 /* IRQ for channels 7 */ 515 - .start = 88, 516 - .end = 88, 515 + .start = evt2irq(0xd00), 516 + .end = evt2irq(0xd00), 517 517 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 518 518 }, 519 519 { 520 520 /* IRQ for channels 8 */ 521 - .start = 88, 522 - .end = 88, 521 + .start = evt2irq(0xd00), 522 + .end = evt2irq(0xd00), 523 523 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 524 524 }, 525 525 { 526 526 /* IRQ for channels 9 */ 527 - .start = 88, 528 - .end = 88, 527 + .start = evt2irq(0xd00), 528 + .end = evt2irq(0xd00), 529 529 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 530 530 }, 531 531 { 532 532 /* IRQ for channels 10 */ 533 - .start = 88, 534 - .end = 88, 533 + .start = evt2irq(0xd00), 534 + .end = evt2irq(0xd00), 535 535 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 536 536 }, 537 537 { 538 538 /* IRQ for channels 11 */ 539 - .start = 88, 540 - .end = 88, 539 + .start = evt2irq(0xd00), 540 + .end = evt2irq(0xd00), 541 541 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 542 542 }, 543 543 }; ··· 558 558 }, 559 559 { 560 560 .name = "error_irq", 561 - .start = 323, 562 - .end = 323, 561 + .start = evt2irq(0x2a60), 562 + .end = evt2irq(0x2a60), 563 563 .flags = IORESOURCE_IRQ, 564 564 }, 565 565 { 566 566 /* IRQ for channels 12 to 16 */ 567 - .start = 272, 568 - .end = 276, 567 + .start = evt2irq(0x2400), 568 + .end = evt2irq(0x2480), 569 569 .flags = IORESOURCE_IRQ, 570 570 }, 571 571 { 572 572 /* IRQ for channel 17 */ 573 - .start = 279, 574 - .end = 279, 573 + .start = evt2irq(0x24e0), 574 + .end = evt2irq(0x24e0), 575 575 .flags = IORESOURCE_IRQ, 576 576 }, 577 577 }; ··· 592 592 }, 593 593 { 594 594 .name = "error_irq", 595 - .start = 324, 596 - .end = 324, 595 + .start = evt2irq(0x2a80), 596 + .end = evt2irq(0x2a80), 597 597 .flags = IORESOURCE_IRQ, 598 598 }, 599 599 { 600 600 /* IRQ for channels 18 to 22 */ 601 - .start = 280, 602 - .end = 284, 601 + .start = evt2irq(0x2500), 602 + .end = evt2irq(0x2580), 603 603 .flags = IORESOURCE_IRQ, 604 604 }, 605 605 { 606 606 /* IRQ for channel 23 */ 607 - .start = 288, 608 - .end = 288, 607 + .start = evt2irq(0x2600), 608 + .end = evt2irq(0x2600), 609 609 .flags = IORESOURCE_IRQ, 610 610 }, 611 611 }; ··· 668 668 .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, 669 669 }, 670 670 { 671 - .start = 54, 671 + .start = evt2irq(0x8c0), 672 672 .flags = IORESOURCE_IRQ, 673 673 }, 674 674 }; ··· 687 687 .flags = IORESOURCE_MEM, 688 688 }, 689 689 { 690 - .start = 220, 690 + .start = evt2irq(0x1d80), 691 691 .flags = IORESOURCE_IRQ, 692 692 }, 693 693 }; ··· 706 706 .flags = IORESOURCE_MEM, 707 707 }, 708 708 [1] = { 709 - .start = 57, 710 - .end = 57, 709 + .start = evt2irq(0x920), 710 + .end = evt2irq(0x920), 711 711 .flags = IORESOURCE_IRQ, 712 712 }, 713 713 }; ··· 730 730 .flags = IORESOURCE_MEM, 731 731 }, 732 732 [1] = { 733 - .start = 57, 734 - .end = 57, 733 + .start = evt2irq(0x920), 734 + .end = evt2irq(0x920), 735 735 .flags = IORESOURCE_IRQ, 736 736 }, 737 737 };
+15 -13
arch/sh/kernel/cpu/sh4a/setup-sh7763.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/serial.h> 15 15 #include <linux/sh_timer.h> 16 + #include <linux/sh_intc.h> 16 17 #include <linux/io.h> 17 18 #include <linux/serial_sci.h> 18 19 ··· 41 40 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 42 41 .scbrr_algo_id = SCBRR_ALGO_2, 43 42 .type = PORT_SCIF, 44 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xB80)), 43 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xb80)), 45 44 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 46 45 }; 47 46 ··· 59 58 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 60 59 .scbrr_algo_id = SCBRR_ALGO_2, 61 60 .type = PORT_SCIF, 62 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xF00)), 61 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xf00)), 63 62 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 64 63 }; 65 64 ··· 79 78 }, 80 79 [1] = { 81 80 /* Shared Period/Carry/Alarm IRQ */ 82 - .start = 20, 81 + .start = evt2irq(0x480), 83 82 .flags = IORESOURCE_IRQ, 84 83 }, 85 84 }; ··· 98 97 .flags = IORESOURCE_MEM, 99 98 }, 100 99 [1] = { 101 - .start = 83, 102 - .end = 83, 100 + .start = evt2irq(0xc60), 101 + .end = evt2irq(0xc60), 103 102 .flags = IORESOURCE_IRQ, 104 103 }, 105 104 }; 106 105 107 106 static u64 usb_ohci_dma_mask = 0xffffffffUL; 107 + 108 108 static struct platform_device usb_ohci_device = { 109 109 .name = "sh_ohci", 110 110 .id = -1, ··· 124 122 .flags = IORESOURCE_MEM, 125 123 }, 126 124 [1] = { 127 - .start = 84, 128 - .end = 84, 125 + .start = evt2irq(0xc80), 126 + .end = evt2irq(0xc80), 129 127 .flags = IORESOURCE_IRQ, 130 128 }, 131 129 }; ··· 154 152 .flags = IORESOURCE_MEM, 155 153 }, 156 154 [1] = { 157 - .start = 28, 155 + .start = evt2irq(0x580), 158 156 .flags = IORESOURCE_IRQ, 159 157 }, 160 158 }; ··· 182 180 .flags = IORESOURCE_MEM, 183 181 }, 184 182 [1] = { 185 - .start = 29, 183 + .start = evt2irq(0x5a0), 186 184 .flags = IORESOURCE_IRQ, 187 185 }, 188 186 }; ··· 209 207 .flags = IORESOURCE_MEM, 210 208 }, 211 209 [1] = { 212 - .start = 30, 210 + .start = evt2irq(0x5c0), 213 211 .flags = IORESOURCE_IRQ, 214 212 }, 215 213 }; ··· 236 234 .flags = IORESOURCE_MEM, 237 235 }, 238 236 [1] = { 239 - .start = 96, 237 + .start = evt2irq(0xe00), 240 238 .flags = IORESOURCE_IRQ, 241 239 }, 242 240 }; ··· 263 261 .flags = IORESOURCE_MEM, 264 262 }, 265 263 [1] = { 266 - .start = 97, 264 + .start = evt2irq(0xe20), 267 265 .flags = IORESOURCE_IRQ, 268 266 }, 269 267 }; ··· 290 288 .flags = IORESOURCE_MEM, 291 289 }, 292 290 [1] = { 293 - .start = 98, 291 + .start = evt2irq(0xe40), 294 292 .flags = IORESOURCE_IRQ, 295 293 }, 296 294 };
+20 -19
arch/sh/kernel/cpu/sh4a/setup-sh7770.c
··· 12 12 #include <linux/serial.h> 13 13 #include <linux/serial_sci.h> 14 14 #include <linux/sh_timer.h> 15 + #include <linux/sh_intc.h> 15 16 #include <linux/io.h> 16 17 17 18 static struct plat_sci_port scif0_platform_data = { ··· 21 20 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 22 21 .scbrr_algo_id = SCBRR_ALGO_2, 23 22 .type = PORT_SCIF, 24 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x9A0)), 23 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x9a0)), 25 24 }; 26 25 27 26 static struct platform_device scif0_device = { ··· 38 37 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 39 38 .scbrr_algo_id = SCBRR_ALGO_2, 40 39 .type = PORT_SCIF, 41 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x9C0)), 40 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x9c0)), 42 41 }; 43 42 44 43 static struct platform_device scif1_device = { ··· 55 54 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 56 55 .scbrr_algo_id = SCBRR_ALGO_2, 57 56 .type = PORT_SCIF, 58 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x9E0)), 57 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x9e0)), 59 58 }; 60 59 61 60 static struct platform_device scif2_device = { ··· 72 71 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 73 72 .scbrr_algo_id = SCBRR_ALGO_2, 74 73 .type = PORT_SCIF, 75 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xA00)), 74 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xa00)), 76 75 }; 77 76 78 77 static struct platform_device scif3_device = { ··· 89 88 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 90 89 .scbrr_algo_id = SCBRR_ALGO_2, 91 90 .type = PORT_SCIF, 92 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xA20)), 91 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xa20)), 93 92 }; 94 93 95 94 static struct platform_device scif4_device = { ··· 106 105 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 107 106 .scbrr_algo_id = SCBRR_ALGO_2, 108 107 .type = PORT_SCIF, 109 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xA40)), 108 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xa40)), 110 109 }; 111 110 112 111 static struct platform_device scif5_device = { ··· 123 122 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 124 123 .scbrr_algo_id = SCBRR_ALGO_2, 125 124 .type = PORT_SCIF, 126 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xA60)), 125 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xa60)), 127 126 }; 128 127 129 128 static struct platform_device scif6_device = { ··· 140 139 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 141 140 .scbrr_algo_id = SCBRR_ALGO_2, 142 141 .type = PORT_SCIF, 143 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xA80)), 142 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xa80)), 144 143 }; 145 144 146 145 static struct platform_device scif7_device = { ··· 157 156 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 158 157 .scbrr_algo_id = SCBRR_ALGO_2, 159 158 .type = PORT_SCIF, 160 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xAA0)), 159 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xaa0)), 161 160 }; 162 161 163 162 static struct platform_device scif8_device = { ··· 174 173 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, 175 174 .scbrr_algo_id = SCBRR_ALGO_2, 176 175 .type = PORT_SCIF, 177 - .irqs = SCIx_IRQ_MUXED(evt2irq(0xAC0)), 176 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xac0)), 178 177 }; 179 178 180 179 static struct platform_device scif9_device = { ··· 198 197 .flags = IORESOURCE_MEM, 199 198 }, 200 199 [1] = { 201 - .start = 16, 200 + .start = evt2irq(0x400), 202 201 .flags = IORESOURCE_IRQ, 203 202 }, 204 203 }; ··· 226 225 .flags = IORESOURCE_MEM, 227 226 }, 228 227 [1] = { 229 - .start = 17, 228 + .start = evt2irq(0x420), 230 229 .flags = IORESOURCE_IRQ, 231 230 }, 232 231 }; ··· 253 252 .flags = IORESOURCE_MEM, 254 253 }, 255 254 [1] = { 256 - .start = 18, 255 + .start = evt2irq(0x440), 257 256 .flags = IORESOURCE_IRQ, 258 257 }, 259 258 }; ··· 280 279 .flags = IORESOURCE_MEM, 281 280 }, 282 281 [1] = { 283 - .start = 19, 282 + .start = evt2irq(0x460), 284 283 .flags = IORESOURCE_IRQ, 285 284 }, 286 285 }; ··· 307 306 .flags = IORESOURCE_MEM, 308 307 }, 309 308 [1] = { 310 - .start = 20, 309 + .start = evt2irq(0x480), 311 310 .flags = IORESOURCE_IRQ, 312 311 }, 313 312 }; ··· 334 333 .flags = IORESOURCE_MEM, 335 334 }, 336 335 [1] = { 337 - .start = 21, 336 + .start = evt2irq(0x4a0), 338 337 .flags = IORESOURCE_IRQ, 339 338 }, 340 339 }; ··· 361 360 .flags = IORESOURCE_MEM, 362 361 }, 363 362 [1] = { 364 - .start = 22, 363 + .start = evt2irq(0x4c0), 365 364 .flags = IORESOURCE_IRQ, 366 365 }, 367 366 }; ··· 388 387 .flags = IORESOURCE_MEM, 389 388 }, 390 389 [1] = { 391 - .start = 23, 390 + .start = evt2irq(0x4e0), 392 391 .flags = IORESOURCE_IRQ, 393 392 }, 394 393 }; ··· 415 414 .flags = IORESOURCE_MEM, 416 415 }, 417 416 [1] = { 418 - .start = 24, 417 + .start = evt2irq(0x500), 419 418 .flags = IORESOURCE_IRQ, 420 419 }, 421 420 };
+22 -15
arch/sh/kernel/cpu/sh4a/setup-sh7780.c
··· 14 14 #include <linux/serial_sci.h> 15 15 #include <linux/sh_dma.h> 16 16 #include <linux/sh_timer.h> 17 + #include <linux/sh_intc.h> 17 18 #include <cpu/dma-register.h> 18 19 19 20 static struct plat_sci_port scif0_platform_data = { ··· 23 22 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 24 23 .scbrr_algo_id = SCBRR_ALGO_1, 25 24 .type = PORT_SCIF, 26 - .irqs = { 40, 40, 40, 40 }, 25 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x700)), 27 26 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 28 27 }; 29 28 ··· 41 40 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 42 41 .scbrr_algo_id = SCBRR_ALGO_1, 43 42 .type = PORT_SCIF, 44 - .irqs = { 76, 76, 76, 76 }, 43 + .irqs = SCIx_IRQ_MUXED(evt2irq(0xb80)), 45 44 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 46 45 }; 47 46 ··· 66 65 .flags = IORESOURCE_MEM, 67 66 }, 68 67 [1] = { 69 - .start = 28, 68 + .start = evt2irq(0x580), 70 69 .flags = IORESOURCE_IRQ, 71 70 }, 72 71 }; ··· 94 93 .flags = IORESOURCE_MEM, 95 94 }, 96 95 [1] = { 97 - .start = 29, 96 + .start = evt2irq(0x5a0), 98 97 .flags = IORESOURCE_IRQ, 99 98 }, 100 99 }; ··· 121 120 .flags = IORESOURCE_MEM, 122 121 }, 123 122 [1] = { 124 - .start = 30, 123 + .start = evt2irq(0x5c0), 125 124 .flags = IORESOURCE_IRQ, 126 125 }, 127 126 }; ··· 148 147 .flags = IORESOURCE_MEM, 149 148 }, 150 149 [1] = { 151 - .start = 96, 150 + .start = evt2irq(0xe00), 152 151 .flags = IORESOURCE_IRQ, 153 152 }, 154 153 }; ··· 175 174 .flags = IORESOURCE_MEM, 176 175 }, 177 176 [1] = { 178 - .start = 97, 177 + .start = evt2irq(0xe20), 179 178 .flags = IORESOURCE_IRQ, 180 179 }, 181 180 }; ··· 202 201 .flags = IORESOURCE_MEM, 203 202 }, 204 203 [1] = { 205 - .start = 98, 204 + .start = evt2irq(0xe40), 206 205 .flags = IORESOURCE_IRQ, 207 206 }, 208 207 }; ··· 225 224 }, 226 225 [1] = { 227 226 /* Shared Period/Carry/Alarm IRQ */ 228 - .start = 20, 227 + .start = evt2irq(0x480), 229 228 .flags = IORESOURCE_IRQ, 230 229 }, 231 230 }; ··· 322 321 .flags = IORESOURCE_MEM, 323 322 }, 324 323 { 325 - /* Real DMA error IRQ is 38, and channel IRQs are 34-37, 44-45 */ 324 + /* 325 + * Real DMA error vector is 0x6c0, and channel 326 + * vectors are 0x640-0x6a0, 0x780-0x7a0 327 + */ 326 328 .name = "error_irq", 327 - .start = 34, 328 - .end = 34, 329 + .start = evt2irq(0x640), 330 + .end = evt2irq(0x640), 329 331 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 330 332 }, 331 333 }; ··· 342 338 }, 343 339 /* DMAC1 has no DMARS */ 344 340 { 345 - /* Real DMA error IRQ is 38, and channel IRQs are 46-47, 92-95 */ 341 + /* 342 + * Real DMA error vector is 0x6c0, and channel 343 + * vectors are 0x7c0-0x7e0, 0xd80-0xde0 344 + */ 346 345 .name = "error_irq", 347 - .start = 46, 348 - .end = 46, 346 + .start = evt2irq(0x7c0), 347 + .end = evt2irq(0x7c0), 349 348 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 350 349 }, 351 350 };
+22 -15
arch/sh/kernel/cpu/sh4a/setup-sh7785.c
··· 15 15 #include <linux/mm.h> 16 16 #include <linux/sh_dma.h> 17 17 #include <linux/sh_timer.h> 18 + #include <linux/sh_intc.h> 18 19 #include <asm/mmzone.h> 19 20 #include <cpu/dma-register.h> 20 21 ··· 79 78 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 80 79 .scbrr_algo_id = SCBRR_ALGO_1, 81 80 .type = PORT_SCIF, 82 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x9A0)), 81 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x9a0)), 83 82 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 84 83 }; 85 84 ··· 97 96 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 98 97 .scbrr_algo_id = SCBRR_ALGO_1, 99 98 .type = PORT_SCIF, 100 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x9C0)), 99 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x9c0)), 101 100 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 102 101 }; 103 102 ··· 115 114 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 116 115 .scbrr_algo_id = SCBRR_ALGO_1, 117 116 .type = PORT_SCIF, 118 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x9E0)), 117 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x9e0)), 119 118 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 120 119 }; 121 120 ··· 140 139 .flags = IORESOURCE_MEM, 141 140 }, 142 141 [1] = { 143 - .start = 28, 142 + .start = evt2irq(0x580), 144 143 .flags = IORESOURCE_IRQ, 145 144 }, 146 145 }; ··· 168 167 .flags = IORESOURCE_MEM, 169 168 }, 170 169 [1] = { 171 - .start = 29, 170 + .start = evt2irq(0x5a0), 172 171 .flags = IORESOURCE_IRQ, 173 172 }, 174 173 }; ··· 195 194 .flags = IORESOURCE_MEM, 196 195 }, 197 196 [1] = { 198 - .start = 30, 197 + .start = evt2irq(0x5c0), 199 198 .flags = IORESOURCE_IRQ, 200 199 }, 201 200 }; ··· 222 221 .flags = IORESOURCE_MEM, 223 222 }, 224 223 [1] = { 225 - .start = 96, 224 + .start = evt2irq(0xe00), 226 225 .flags = IORESOURCE_IRQ, 227 226 }, 228 227 }; ··· 249 248 .flags = IORESOURCE_MEM, 250 249 }, 251 250 [1] = { 252 - .start = 97, 251 + .start = evt2irq(0xe20), 253 252 .flags = IORESOURCE_IRQ, 254 253 }, 255 254 }; ··· 276 275 .flags = IORESOURCE_MEM, 277 276 }, 278 277 [1] = { 279 - .start = 98, 278 + .start = evt2irq(0xe40), 280 279 .flags = IORESOURCE_IRQ, 281 280 }, 282 281 }; ··· 376 375 .flags = IORESOURCE_MEM, 377 376 }, 378 377 { 379 - /* Real DMA error IRQ is 39, and channel IRQs are 33-38 */ 378 + /* 379 + * Real DMA error vector is 0x6e0, and channel 380 + * vectors are 0x620-0x6c0 381 + */ 380 382 .name = "error_irq", 381 - .start = 33, 382 - .end = 33, 383 + .start = evt2irq(0x620), 384 + .end = evt2irq(0x620), 383 385 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 384 386 }, 385 387 }; ··· 396 392 }, 397 393 /* DMAC1 has no DMARS */ 398 394 { 399 - /* Real DMA error IRQ is 58, and channel IRQs are 52-57 */ 395 + /* 396 + * Real DMA error vector is 0x940, and channel 397 + * vectors are 0x880-0x920 398 + */ 400 399 .name = "error_irq", 401 - .start = 52, 402 - .end = 52, 400 + .start = evt2irq(0x880), 401 + .end = evt2irq(0x880), 403 402 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 404 403 }, 405 404 };
+21 -18
arch/sh/kernel/cpu/sh4a/setup-sh7786.c
··· 32 32 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 33 33 .scbrr_algo_id = SCBRR_ALGO_1, 34 34 .type = PORT_SCIF, 35 - .irqs = { 40, 41, 43, 42 }, 35 + .irqs = { evt2irq(0x700), 36 + evt2irq(0x720), 37 + evt2irq(0x760), 38 + evt2irq(0x740) }, 36 39 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 37 40 }; 38 41 ··· 128 125 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1, 129 126 .scbrr_algo_id = SCBRR_ALGO_1, 130 127 .type = PORT_SCIF, 131 - .irqs = SCIx_IRQ_MUXED(evt2irq(0x8A0)), 128 + .irqs = SCIx_IRQ_MUXED(evt2irq(0x8a0)), 132 129 .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, 133 130 }; 134 131 ··· 153 150 .flags = IORESOURCE_MEM, 154 151 }, 155 152 [1] = { 156 - .start = 16, 153 + .start = evt2irq(0x400), 157 154 .flags = IORESOURCE_IRQ, 158 155 }, 159 156 }; ··· 181 178 .flags = IORESOURCE_MEM, 182 179 }, 183 180 [1] = { 184 - .start = 17, 181 + .start = evt2irq(0x420), 185 182 .flags = IORESOURCE_IRQ, 186 183 }, 187 184 }; ··· 208 205 .flags = IORESOURCE_MEM, 209 206 }, 210 207 [1] = { 211 - .start = 18, 208 + .start = evt2irq(0x440), 212 209 .flags = IORESOURCE_IRQ, 213 210 }, 214 211 }; ··· 235 232 .flags = IORESOURCE_MEM, 236 233 }, 237 234 [1] = { 238 - .start = 20, 235 + .start = evt2irq(0x480), 239 236 .flags = IORESOURCE_IRQ, 240 237 }, 241 238 }; ··· 262 259 .flags = IORESOURCE_MEM, 263 260 }, 264 261 [1] = { 265 - .start = 21, 262 + .start = evt2irq(0x4a0), 266 263 .flags = IORESOURCE_IRQ, 267 264 }, 268 265 }; ··· 289 286 .flags = IORESOURCE_MEM, 290 287 }, 291 288 [1] = { 292 - .start = 22, 289 + .start = evt2irq(0x4c0), 293 290 .flags = IORESOURCE_IRQ, 294 291 }, 295 292 }; ··· 316 313 .flags = IORESOURCE_MEM, 317 314 }, 318 315 [1] = { 319 - .start = 45, 316 + .start = evt2irq(0x7a0), 320 317 .flags = IORESOURCE_IRQ, 321 318 }, 322 319 }; ··· 343 340 .flags = IORESOURCE_MEM, 344 341 }, 345 342 [1] = { 346 - .start = 45, 343 + .start = evt2irq(0x7a0), 347 344 .flags = IORESOURCE_IRQ, 348 345 }, 349 346 }; ··· 370 367 .flags = IORESOURCE_MEM, 371 368 }, 372 369 [1] = { 373 - .start = 45, 370 + .start = evt2irq(0x7a0), 374 371 .flags = IORESOURCE_IRQ, 375 372 }, 376 373 }; ··· 397 394 .flags = IORESOURCE_MEM, 398 395 }, 399 396 [1] = { 400 - .start = 46, 397 + .start = evt2irq(0x7c0), 401 398 .flags = IORESOURCE_IRQ, 402 399 }, 403 400 }; ··· 424 421 .flags = IORESOURCE_MEM, 425 422 }, 426 423 [1] = { 427 - .start = 46, 424 + .start = evt2irq(0x7c0), 428 425 .flags = IORESOURCE_IRQ, 429 426 }, 430 427 }; ··· 451 448 .flags = IORESOURCE_MEM, 452 449 }, 453 450 [1] = { 454 - .start = 46, 451 + .start = evt2irq(0x7c0), 455 452 .flags = IORESOURCE_IRQ, 456 453 }, 457 454 }; ··· 553 550 .flags = IORESOURCE_MEM, 554 551 }, 555 552 [1] = { 556 - .start = 77, 557 - .end = 77, 553 + .start = evt2irq(0xba0), 554 + .end = evt2irq(0xba0), 558 555 .flags = IORESOURCE_IRQ, 559 556 }, 560 557 }; ··· 577 574 .flags = IORESOURCE_MEM, 578 575 }, 579 576 [1] = { 580 - .start = 77, 581 - .end = 77, 577 + .start = evt2irq(0xba0), 578 + .end = evt2irq(0xba0), 582 579 .flags = IORESOURCE_IRQ, 583 580 }, 584 581 };
+19 -9
arch/sh/kernel/cpu/sh4a/setup-shx3.c
··· 14 14 #include <linux/io.h> 15 15 #include <linux/gpio.h> 16 16 #include <linux/sh_timer.h> 17 + #include <linux/sh_intc.h> 17 18 #include <cpu/shx3.h> 18 19 #include <asm/mmzone.h> 19 20 ··· 33 32 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 34 33 .scbrr_algo_id = SCBRR_ALGO_2, 35 34 .type = PORT_SCIF, 36 - .irqs = { 40, 41, 43, 42 }, 35 + .irqs = { evt2irq(0x700), 36 + evt2irq(0x720), 37 + evt2irq(0x760), 38 + evt2irq(0x740) }, 37 39 }; 38 40 39 41 static struct platform_device scif0_device = { ··· 53 49 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 54 50 .scbrr_algo_id = SCBRR_ALGO_2, 55 51 .type = PORT_SCIF, 56 - .irqs = { 44, 45, 47, 46 }, 52 + .irqs = { evt2irq(0x780), 53 + evt2irq(0x7a0), 54 + evt2irq(0x7e0), 55 + evt2irq(0x7c0) }, 57 56 }; 58 57 59 58 static struct platform_device scif1_device = { ··· 73 66 .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, 74 67 .scbrr_algo_id = SCBRR_ALGO_2, 75 68 .type = PORT_SCIF, 76 - .irqs = { 52, 53, 55, 54 }, 69 + .irqs = { evt2irq(0x880), 70 + evt2irq(0x8a0), 71 + evt2irq(0x8e0), 72 + evt2irq(0x8c0) }, 77 73 }; 78 74 79 75 static struct platform_device scif2_device = { ··· 100 90 .flags = IORESOURCE_MEM, 101 91 }, 102 92 [1] = { 103 - .start = 16, 93 + .start = evt2irq(0x400), 104 94 .flags = IORESOURCE_IRQ, 105 95 }, 106 96 }; ··· 128 118 .flags = IORESOURCE_MEM, 129 119 }, 130 120 [1] = { 131 - .start = 17, 121 + .start = evt2irq(0x420), 132 122 .flags = IORESOURCE_IRQ, 133 123 }, 134 124 }; ··· 155 145 .flags = IORESOURCE_MEM, 156 146 }, 157 147 [1] = { 158 - .start = 18, 148 + .start = evt2irq(0x440), 159 149 .flags = IORESOURCE_IRQ, 160 150 }, 161 151 }; ··· 182 172 .flags = IORESOURCE_MEM, 183 173 }, 184 174 [1] = { 185 - .start = 19, 175 + .start = evt2irq(0x460), 186 176 .flags = IORESOURCE_IRQ, 187 177 }, 188 178 }; ··· 209 199 .flags = IORESOURCE_MEM, 210 200 }, 211 201 [1] = { 212 - .start = 20, 202 + .start = evt2irq(0x480), 213 203 .flags = IORESOURCE_IRQ, 214 204 }, 215 205 }; ··· 236 226 .flags = IORESOURCE_MEM, 237 227 }, 238 228 [1] = { 239 - .start = 21, 229 + .start = evt2irq(0x4a0), 240 230 .flags = IORESOURCE_IRQ, 241 231 }, 242 232 };
+1 -1
arch/sparc/kernel/central.c
··· 269 269 return 0; 270 270 } 271 271 272 - subsys_initcall(sunfire_init); 272 + fs_initcall(sunfire_init);
+3 -3
arch/sparc/mm/ultra.S
··· 495 495 stx %o7, [%g1 + GR_SNAP_O7] 496 496 stx %i7, [%g1 + GR_SNAP_I7] 497 497 /* Don't try this at home kids... */ 498 - rdpr %cwp, %g2 499 - sub %g2, 1, %g7 498 + rdpr %cwp, %g3 499 + sub %g3, 1, %g7 500 500 wrpr %g7, %cwp 501 501 mov %i7, %g7 502 - wrpr %g2, %cwp 502 + wrpr %g3, %cwp 503 503 stx %g7, [%g1 + GR_SNAP_RPC] 504 504 sethi %hi(trap_block), %g7 505 505 or %g7, %lo(trap_block), %g7
+7 -2
arch/tile/include/asm/thread_info.h
··· 100 100 101 101 #else /* __ASSEMBLY__ */ 102 102 103 - /* how to get the thread information struct from ASM */ 103 + /* 104 + * How to get the thread information struct from assembly. 105 + * Note that we use different macros since different architectures 106 + * have different semantics in their "mm" instruction and we would 107 + * like to guarantee that the macro expands to exactly one instruction. 108 + */ 104 109 #ifdef __tilegx__ 105 - #define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63 110 + #define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63 106 111 #else 107 112 #define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31 108 113 #endif
+5 -7
arch/tile/kernel/compat_signal.c
··· 403 403 * Set up registers for signal handler. 404 404 * Registers that we don't modify keep the value they had from 405 405 * user-space at the time we took the signal. 406 + * We always pass siginfo and mcontext, regardless of SA_SIGINFO, 407 + * since some things rely on this (e.g. glibc's debug/segfault.c). 406 408 */ 407 409 regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); 408 410 regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ 409 411 regs->sp = ptr_to_compat_reg(frame); 410 412 regs->lr = restorer; 411 413 regs->regs[0] = (unsigned long) usig; 412 - 413 - if (ka->sa.sa_flags & SA_SIGINFO) { 414 - /* Need extra arguments, so mark to restore caller-saves. */ 415 - regs->regs[1] = ptr_to_compat_reg(&frame->info); 416 - regs->regs[2] = ptr_to_compat_reg(&frame->uc); 417 - regs->flags |= PT_FLAGS_CALLER_SAVES; 418 - } 414 + regs->regs[1] = ptr_to_compat_reg(&frame->info); 415 + regs->regs[2] = ptr_to_compat_reg(&frame->uc); 416 + regs->flags |= PT_FLAGS_CALLER_SAVES; 419 417 420 418 /* 421 419 * Notify any tracer that was single-stepping it.
+28 -13
arch/tile/kernel/intvec_32.S
··· 839 839 FEEDBACK_REENTER(interrupt_return) 840 840 841 841 /* 842 + * Use r33 to hold whether we have already loaded the callee-saves 843 + * into ptregs. We don't want to do it twice in this loop, since 844 + * then we'd clobber whatever changes are made by ptrace, etc. 845 + * Get base of stack in r32. 846 + */ 847 + { 848 + GET_THREAD_INFO(r32) 849 + movei r33, 0 850 + } 851 + 852 + .Lretry_work_pending: 853 + /* 842 854 * Disable interrupts so as to make sure we don't 843 855 * miss an interrupt that sets any of the thread flags (like 844 856 * need_resched or sigpending) between sampling and the iret. ··· 859 847 */ 860 848 IRQ_DISABLE(r20, r21) 861 849 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ 862 - 863 - /* Get base of stack in r32; note r30/31 are used as arguments here. */ 864 - GET_THREAD_INFO(r32) 865 850 866 851 867 852 /* Check to see if there is any work to do before returning to user. */ ··· 875 866 876 867 /* 877 868 * Make sure we have all the registers saved for signal 878 - * handling or single-step. Call out to C code to figure out 879 - * exactly what we need to do for each flag bit, then if 880 - * necessary, reload the flags and recheck. 869 + * handling, notify-resume, or single-step. Call out to C 870 + * code to figure out exactly what we need to do for each flag bit, 871 + * then if necessary, reload the flags and recheck. 881 872 */ 882 - push_extra_callee_saves r0 883 873 { 884 874 PTREGS_PTR(r0, PTREGS_OFFSET_BASE) 885 - jal do_work_pending 875 + bnz r33, 1f 886 876 } 887 - bnz r0, .Lresume_userspace 877 + push_extra_callee_saves r0 878 + movei r33, 1 879 + 1: jal do_work_pending 880 + bnz r0, .Lretry_work_pending 888 881 889 882 /* 890 883 * In the NMI case we ··· 1191 1180 add r20, r20, tp 1192 1181 lw r21, r20 1193 1182 addi r21, r21, 1 1194 - sw r20, r21 1183 + { 1184 + sw r20, r21 1185 + GET_THREAD_INFO(r31) 1186 + } 1195 1187 1196 1188 /* Trace syscalls, if requested. */ 1197 - GET_THREAD_INFO(r31) 1198 1189 addi r31, r31, THREAD_INFO_FLAGS_OFFSET 1199 1190 lw r30, r31 1200 1191 andi r30, r30, _TIF_SYSCALL_TRACE ··· 1375 1362 3: 1376 1363 /* set PC and continue */ 1377 1364 lw r26, r24 1378 - sw r28, r26 1365 + { 1366 + sw r28, r26 1367 + GET_THREAD_INFO(r0) 1368 + } 1379 1369 1380 1370 /* 1381 1371 * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill. ··· 1386 1370 * need to clear it here and can't really impose on all other arches. 1387 1371 * So what's another write between friends? 1388 1372 */ 1389 - GET_THREAD_INFO(r0) 1390 1373 1391 1374 addi r1, r0, THREAD_INFO_FLAGS_OFFSET 1392 1375 {
+28 -10
arch/tile/kernel/intvec_64.S
··· 647 647 FEEDBACK_REENTER(interrupt_return) 648 648 649 649 /* 650 + * Use r33 to hold whether we have already loaded the callee-saves 651 + * into ptregs. We don't want to do it twice in this loop, since 652 + * then we'd clobber whatever changes are made by ptrace, etc. 653 + */ 654 + { 655 + movei r33, 0 656 + move r32, sp 657 + } 658 + 659 + /* Get base of stack in r32. */ 660 + EXTRACT_THREAD_INFO(r32) 661 + 662 + .Lretry_work_pending: 663 + /* 650 664 * Disable interrupts so as to make sure we don't 651 665 * miss an interrupt that sets any of the thread flags (like 652 666 * need_resched or sigpending) between sampling and the iret. ··· 669 655 */ 670 656 IRQ_DISABLE(r20, r21) 671 657 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ 672 - 673 - /* Get base of stack in r32; note r30/31 are used as arguments here. */ 674 - GET_THREAD_INFO(r32) 675 658 676 659 677 660 /* Check to see if there is any work to do before returning to user. */ ··· 685 674 686 675 /* 687 676 * Make sure we have all the registers saved for signal 688 - * handling or single-step. Call out to C code to figure out 677 + * handling or notify-resume. Call out to C code to figure out 689 678 * exactly what we need to do for each flag bit, then if 690 679 * necessary, reload the flags and recheck. 691 680 */ 692 - push_extra_callee_saves r0 693 681 { 694 682 PTREGS_PTR(r0, PTREGS_OFFSET_BASE) 695 - jal do_work_pending 683 + bnez r33, 1f 696 684 } 697 - bnez r0, .Lresume_userspace 685 + push_extra_callee_saves r0 686 + movei r33, 1 687 + 1: jal do_work_pending 688 + bnez r0, .Lretry_work_pending 698 689 699 690 /* 700 691 * In the NMI case we ··· 981 968 shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) 982 969 add r20, r20, tp 983 970 ld4s r21, r20 984 - addi r21, r21, 1 985 - st4 r20, r21 971 + { 972 + addi r21, r21, 1 973 + move r31, sp 974 + } 975 + { 976 + st4 r20, r21 977 + EXTRACT_THREAD_INFO(r31) 978 + } 986 979 987 980 /* Trace syscalls, if requested. */ 988 - GET_THREAD_INFO(r31) 989 981 addi r31, r31, THREAD_INFO_FLAGS_OFFSET 990 982 ld r30, r31 991 983 andi r30, r30, _TIF_SYSCALL_TRACE
+5 -2
arch/tile/kernel/process.c
··· 567 567 */ 568 568 int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) 569 569 { 570 + /* If we enter in kernel mode, do nothing and exit the caller loop. */ 571 + if (!user_mode(regs)) 572 + return 0; 573 + 570 574 if (thread_info_flags & _TIF_NEED_RESCHED) { 571 575 schedule(); 572 576 return 1; ··· 593 589 return 1; 594 590 } 595 591 if (thread_info_flags & _TIF_SINGLESTEP) { 596 - if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0) 597 - single_step_once(regs); 592 + single_step_once(regs); 598 593 return 0; 599 594 } 600 595 panic("work_pending: bad flags %#x\n", thread_info_flags);
+3
arch/x86/include/asm/kvm_para.h
··· 170 170 unsigned int eax, ebx, ecx, edx; 171 171 char signature[13]; 172 172 173 + if (boot_cpu_data.cpuid_level < 0) 174 + return 0; /* So we don't blow up on old processors */ 175 + 173 176 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); 174 177 memcpy(signature + 0, &ebx, 4); 175 178 memcpy(signature + 4, &ecx, 4);
+1 -1
arch/x86/kernel/acpi/boot.c
··· 593 593 #ifdef CONFIG_ACPI_HOTPLUG_CPU 594 594 #include <acpi/processor.h> 595 595 596 - static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 596 + static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 597 597 { 598 598 #ifdef CONFIG_ACPI_NUMA 599 599 int nid;
+8 -6
arch/x86/kernel/microcode_intel.c
··· 147 147 148 148 memset(csig, 0, sizeof(*csig)); 149 149 150 - if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 151 - cpu_has(c, X86_FEATURE_IA64)) { 152 - pr_err("CPU%d not a capable Intel processor\n", cpu_num); 153 - return -1; 154 - } 155 - 156 150 csig->sig = cpuid_eax(0x00000001); 157 151 158 152 if ((c->x86_model >= 5) || (c->x86 > 6)) { ··· 457 463 458 464 struct microcode_ops * __init init_intel_microcode(void) 459 465 { 466 + struct cpuinfo_x86 *c = &cpu_data(0); 467 + 468 + if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 469 + cpu_has(c, X86_FEATURE_IA64)) { 470 + pr_err("Intel CPU family 0x%x not supported\n", c->x86); 471 + return NULL; 472 + } 473 + 460 474 return &microcode_intel_ops; 461 475 } 462 476
+4
drivers/acpi/bus.c
··· 250 250 return -ENODEV; 251 251 } 252 252 253 + /* For D3cold we should execute _PS3, not _PS4. */ 254 + if (state == ACPI_STATE_D3_COLD) 255 + object_name[3] = '3'; 256 + 253 257 /* 254 258 * Transition Power 255 259 * ----------------
+6 -3
drivers/acpi/power.c
··· 660 660 661 661 int acpi_power_transition(struct acpi_device *device, int state) 662 662 { 663 - int result; 663 + int result = 0; 664 664 665 665 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) 666 666 return -EINVAL; ··· 679 679 * (e.g. so the device doesn't lose power while transitioning). Then, 680 680 * we dereference all power resources used in the current list. 681 681 */ 682 - result = acpi_power_on_list(&device->power.states[state].resources); 683 - if (!result) 682 + if (state < ACPI_STATE_D3_COLD) 683 + result = acpi_power_on_list( 684 + &device->power.states[state].resources); 685 + 686 + if (!result && device->power.state < ACPI_STATE_D3_COLD) 684 687 acpi_power_off_list( 685 688 &device->power.states[device->power.state].resources); 686 689
+4
drivers/acpi/scan.c
··· 908 908 device->power.states[ACPI_STATE_D3].flags.valid = 1; 909 909 device->power.states[ACPI_STATE_D3].power = 0; 910 910 911 + /* Set D3cold's explicit_set flag if _PS3 exists. */ 912 + if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) 913 + device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; 914 + 911 915 acpi_bus_init_power(device); 912 916 913 917 return 0;
+1 -1
drivers/block/drbd/drbd_nl.c
··· 2297 2297 return; 2298 2298 } 2299 2299 2300 - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) { 2300 + if (!capable(CAP_SYS_ADMIN)) { 2301 2301 retcode = ERR_PERM; 2302 2302 goto fail; 2303 2303 }
+7
drivers/char/virtio_console.c
··· 1895 1895 1896 1896 /* Get port open/close status on the host */ 1897 1897 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 1898 + 1899 + /* 1900 + * If a port was open at the time of suspending, we 1901 + * have to let the host know that it's still open. 1902 + */ 1903 + if (port->guest_connected) 1904 + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); 1898 1905 } 1899 1906 return 0; 1900 1907 }
+1
drivers/crypto/Kconfig
··· 164 164 select CRYPTO_ALGAPI 165 165 select CRYPTO_AES 166 166 select CRYPTO_BLKCIPHER2 167 + select CRYPTO_HASH 167 168 help 168 169 This driver allows you to utilize the Cryptographic Engines and 169 170 Security Accelerator (CESA) which can be found on the Marvell Orion
+3 -1
drivers/dma/at_hdmac.c
··· 245 245 dev_vdbg(chan2dev(&atchan->chan_common), 246 246 "descriptor %u complete\n", txd->cookie); 247 247 248 - dma_cookie_complete(txd); 248 + /* mark the descriptor as complete for non cyclic cases only */ 249 + if (!atc_chan_is_cyclic(atchan)) 250 + dma_cookie_complete(txd); 249 251 250 252 /* move children to free_list */ 251 253 list_splice_init(&desc->tx_list, &atchan->free_list);
+3 -1
drivers/dma/ep93xx_dma.c
··· 703 703 desc = ep93xx_dma_get_active(edmac); 704 704 if (desc) { 705 705 if (desc->complete) { 706 - dma_cookie_complete(&desc->txd); 706 + /* mark descriptor complete for non cyclic case only */ 707 + if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) 708 + dma_cookie_complete(&desc->txd); 707 709 list_splice_init(&edmac->active, &list); 708 710 } 709 711 callback = desc->txd.callback;
+2 -1
drivers/dma/pl330.c
··· 2322 2322 /* Pick up ripe tomatoes */ 2323 2323 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 2324 2324 if (desc->status == DONE) { 2325 - dma_cookie_complete(&desc->txd); 2325 + if (pch->cyclic) 2326 + dma_cookie_complete(&desc->txd); 2326 2327 list_move_tail(&desc->node, &list); 2327 2328 } 2328 2329
+3 -6
drivers/gpio/gpio-omap.c
··· 965 965 } 966 966 967 967 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv); 968 - _gpio_rmw(base, bank->regs->irqstatus, l, 969 - bank->regs->irqenable_inv == false); 970 - _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0); 971 - _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0); 968 + _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv); 972 969 if (bank->regs->debounce_en) 973 - _gpio_rmw(base, bank->regs->debounce_en, 0, 1); 970 + __raw_writel(0, base + bank->regs->debounce_en); 974 971 975 972 /* Save OE default value (0xffffffff) in the context */ 976 973 bank->context.oe = __raw_readl(bank->base + bank->regs->direction); 977 974 /* Initialize interface clk ungated, module enabled */ 978 975 if (bank->regs->ctrl) 979 - _gpio_rmw(base, bank->regs->ctrl, 0, 1); 976 + __raw_writel(0, base + bank->regs->ctrl); 980 977 } 981 978 982 979 static __devinit void
+28 -29
drivers/gpio/gpio-pch.c
··· 230 230 231 231 static int pch_irq_type(struct irq_data *d, unsigned int type) 232 232 { 233 - u32 im; 234 - u32 __iomem *im_reg; 235 - u32 ien; 236 - u32 im_pos; 237 - int ch; 238 - unsigned long flags; 239 - u32 val; 240 - int irq = d->irq; 241 233 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 242 234 struct pch_gpio *chip = gc->private; 235 + u32 im, im_pos, val; 236 + u32 __iomem *im_reg; 237 + unsigned long flags; 238 + int ch, irq = d->irq; 243 239 244 240 ch = irq - chip->irq_base; 245 241 if (irq <= chip->irq_base + 7) { ··· 266 270 case IRQ_TYPE_LEVEL_LOW: 267 271 val = PCH_LEVEL_L; 268 272 break; 269 - case IRQ_TYPE_PROBE: 270 - goto end; 271 273 default: 272 - dev_warn(chip->dev, "%s: unknown type(%dd)", 273 - __func__, type); 274 - goto end; 274 + goto unlock; 275 275 } 276 276 277 277 /* Set interrupt mode */ 278 278 im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4)); 279 279 iowrite32(im | (val << (im_pos * 4)), im_reg); 280 280 281 - /* iclr */ 282 - iowrite32(BIT(ch), &chip->reg->iclr); 281 + /* And the handler */ 282 + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 283 + __irq_set_handler_locked(d->irq, handle_level_irq); 284 + else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 285 + __irq_set_handler_locked(d->irq, handle_edge_irq); 283 286 284 - /* IMASKCLR */ 285 - iowrite32(BIT(ch), &chip->reg->imaskclr); 286 - 287 - /* Enable interrupt */ 288 - ien = ioread32(&chip->reg->ien); 289 - iowrite32(ien | BIT(ch), &chip->reg->ien); 290 - end: 287 + unlock: 291 288 spin_unlock_irqrestore(&chip->spinlock, flags); 292 - 293 289 return 0; 294 290 } 295 291 ··· 301 313 iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask); 302 314 } 303 315 316 + static void pch_irq_ack(struct irq_data *d) 317 + { 318 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 319 + struct pch_gpio *chip = gc->private; 320 + 321 + iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr); 322 + } 323 + 304 324 static irqreturn_t pch_gpio_handler(int irq, void *dev_id) 305 325 { 306 326 struct pch_gpio *chip = dev_id; 307 327 u32 reg_val = ioread32(&chip->reg->istatus); 308 - int i; 309 - int ret = IRQ_NONE; 328 + int i, ret = IRQ_NONE; 310 329 311 330 for (i = 0; i < gpio_pins[chip->ioh]; i++) { 312 331 if (reg_val & BIT(i)) { 313 332 dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n", 314 333 __func__, i, irq, reg_val); 315 - iowrite32(BIT(i), &chip->reg->iclr); 316 334 generic_handle_irq(chip->irq_base + i); 317 335 ret = IRQ_HANDLED; 318 336 } ··· 337 343 gc->private = chip; 338 344 ct = gc->chip_types; 339 345 346 + ct->chip.irq_ack = pch_irq_ack; 340 347 ct->chip.irq_mask = pch_irq_mask; 341 348 ct->chip.irq_unmask = pch_irq_unmask; 342 349 ct->chip.irq_set_type = pch_irq_type; ··· 352 357 s32 ret; 353 358 struct pch_gpio *chip; 354 359 int irq_base; 360 + u32 msk; 355 361 356 362 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 357 363 if (chip == NULL) ··· 404 408 } 405 409 chip->irq_base = irq_base; 406 410 411 + /* Mask all interrupts, but enable them */ 412 + msk = (1 << gpio_pins[chip->ioh]) - 1; 413 + iowrite32(msk, &chip->reg->imask); 414 + iowrite32(msk, &chip->reg->ien); 415 + 407 416 ret = request_irq(pdev->irq, pch_gpio_handler, 408 - IRQF_SHARED, KBUILD_MODNAME, chip); 417 + IRQF_SHARED, KBUILD_MODNAME, chip); 409 418 if (ret != 0) { 410 419 dev_err(&pdev->dev, 411 420 "%s request_irq failed\n", __func__); ··· 419 418 420 419 pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]); 421 420 422 - /* Initialize interrupt ien register */ 423 - iowrite32(0, &chip->reg->ien); 424 421 end: 425 422 return 0; 426 423
+12 -6
drivers/gpio/gpio-samsung.c
··· 452 452 }; 453 453 #endif 454 454 455 + #if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5) 455 456 static struct samsung_gpio_cfg exynos_gpio_cfg = { 456 457 .set_pull = exynos_gpio_setpull, 457 458 .get_pull = exynos_gpio_getpull, 458 459 .set_config = samsung_gpio_setcfg_4bit, 459 460 .get_config = samsung_gpio_getcfg_4bit, 460 461 }; 462 + #endif 461 463 462 464 #if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450) 463 465 static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = { ··· 2125 2123 * uses the above macro and depends on the banks being listed in order here. 2126 2124 */ 2127 2125 2128 - static struct samsung_gpio_chip exynos4_gpios_1[] = { 2129 2126 #ifdef CONFIG_ARCH_EXYNOS4 2127 + static struct samsung_gpio_chip exynos4_gpios_1[] = { 2130 2128 { 2131 2129 .chip = { 2132 2130 .base = EXYNOS4_GPA0(0), ··· 2224 2222 .label = "GPF3", 2225 2223 }, 2226 2224 }, 2227 - #endif 2228 2225 }; 2226 + #endif 2229 2227 2230 - static struct samsung_gpio_chip exynos4_gpios_2[] = { 2231 2228 #ifdef CONFIG_ARCH_EXYNOS4 2229 + static struct samsung_gpio_chip exynos4_gpios_2[] = { 2232 2230 { 2233 2231 .chip = { 2234 2232 .base = EXYNOS4_GPJ0(0), ··· 2369 2367 .to_irq = samsung_gpiolib_to_irq, 2370 2368 }, 2371 2369 }, 2372 - #endif 2373 2370 }; 2371 + #endif 2374 2372 2375 - static struct samsung_gpio_chip exynos4_gpios_3[] = { 2376 2373 #ifdef CONFIG_ARCH_EXYNOS4 2374 + static struct samsung_gpio_chip exynos4_gpios_3[] = { 2377 2375 { 2378 2376 .chip = { 2379 2377 .base = EXYNOS4_GPZ(0), ··· 2381 2379 .label = "GPZ", 2382 2380 }, 2383 2381 }, 2384 - #endif 2385 2382 }; 2383 + #endif 2386 2384 2387 2385 #ifdef CONFIG_ARCH_EXYNOS5 2388 2386 static struct samsung_gpio_chip exynos5_gpios_1[] = { ··· 2721 2719 { 2722 2720 struct samsung_gpio_chip *chip; 2723 2721 int i, nr_chips; 2722 + #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250) 2724 2723 void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4; 2724 + #endif 2725 2725 int group = 0; 2726 2726 2727 2727 samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs)); ··· 2975 2971 2976 2972 return 0; 2977 2973 2974 + #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250) 2978 2975 err_ioremap4: 2979 2976 iounmap(gpio_base3); 2980 2977 err_ioremap3: ··· 2984 2979 iounmap(gpio_base1); 2985 2980 err_ioremap1: 2986 2981 return -ENOMEM; 2982 + #endif 2987 2983 } 2988 2984 core_initcall(samsung_gpiolib_init); 2989 2985
+2 -2
drivers/leds/leds-netxbig.c
··· 112 112 return err; 113 113 } 114 114 115 - static void __devexit gpio_ext_free(struct netxbig_gpio_ext *gpio_ext) 115 + static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext) 116 116 { 117 117 int i; 118 118 ··· 294 294 295 295 static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store); 296 296 297 - static void __devexit delete_netxbig_led(struct netxbig_led_data *led_dat) 297 + static void delete_netxbig_led(struct netxbig_led_data *led_dat) 298 298 { 299 299 if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) 300 300 device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
+1 -1
drivers/leds/leds-ns2.c
··· 255 255 return ret; 256 256 } 257 257 258 - static void __devexit delete_ns2_led(struct ns2_led_data *led_dat) 258 + static void delete_ns2_led(struct ns2_led_data *led_dat) 259 259 { 260 260 device_remove_file(led_dat->cdev.dev, &dev_attr_sata); 261 261 led_classdev_unregister(&led_dat->cdev);
+1 -1
drivers/md/dm-log-userspace-transfer.c
··· 134 134 { 135 135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); 136 136 137 - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) 137 + if (!capable(CAP_SYS_ADMIN)) 138 138 return; 139 139 140 140 spin_lock(&receiving_list_lock);
+2 -2
drivers/md/dm-mpath.c
··· 718 718 return 0; 719 719 720 720 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); 721 - request_module("scsi_dh_%s", m->hw_handler_name); 722 - if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { 721 + if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name), 722 + "scsi_dh_%s", m->hw_handler_name)) { 723 723 ti->error = "unknown hardware handler type"; 724 724 ret = -EINVAL; 725 725 goto fail;
+12 -4
drivers/md/dm-thin.c
··· 279 279 280 280 hlist_del(&cell->list); 281 281 282 - bio_list_add(inmates, cell->holder); 283 - bio_list_merge(inmates, &cell->bios); 282 + if (inmates) { 283 + bio_list_add(inmates, cell->holder); 284 + bio_list_merge(inmates, &cell->bios); 285 + } 284 286 285 287 mempool_free(cell, prison->cell_pool); 286 288 } ··· 305 303 */ 306 304 static void __cell_release_singleton(struct cell *cell, struct bio *bio) 307 305 { 308 - hlist_del(&cell->list); 309 306 BUG_ON(cell->holder != bio); 310 307 BUG_ON(!bio_list_empty(&cell->bios)); 308 + 309 + __cell_release(cell, NULL); 311 310 } 312 311 313 312 static void cell_release_singleton(struct cell *cell, struct bio *bio) ··· 1180 1177 static void process_discard(struct thin_c *tc, struct bio *bio) 1181 1178 { 1182 1179 int r; 1180 + unsigned long flags; 1183 1181 struct pool *pool = tc->pool; 1184 1182 struct cell *cell, *cell2; 1185 1183 struct cell_key key, key2; ··· 1222 1218 m->bio = bio; 1223 1219 1224 1220 if (!ds_add_work(&pool->all_io_ds, &m->list)) { 1221 + spin_lock_irqsave(&pool->lock, flags); 1225 1222 list_add(&m->list, &pool->prepared_discards); 1223 + spin_unlock_irqrestore(&pool->lock, flags); 1226 1224 wake_worker(pool); 1227 1225 } 1228 1226 } else { ··· 2632 2626 if (h->all_io_entry) { 2633 2627 INIT_LIST_HEAD(&work); 2634 2628 ds_dec(h->all_io_entry, &work); 2629 + spin_lock_irqsave(&pool->lock, flags); 2635 2630 list_for_each_entry_safe(m, tmp, &work, list) 2636 2631 list_add(&m->list, &pool->prepared_discards); 2632 + spin_unlock_irqrestore(&pool->lock, flags); 2637 2633 } 2638 2634 2639 2635 mempool_free(h, pool->endio_hook_pool); ··· 2767 2759 module_init(dm_thin_init); 2768 2760 module_exit(dm_thin_exit); 2769 2761 2770 - MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target"); 2762 + MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 2771 2763 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2772 2764 MODULE_LICENSE("GPL");
+2
drivers/md/md.c
··· 391 391 synchronize_rcu(); 392 392 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 393 393 mddev->pers->quiesce(mddev, 1); 394 + 395 + del_timer_sync(&mddev->safemode_timer); 394 396 } 395 397 EXPORT_SYMBOL_GPL(mddev_suspend); 396 398
+32 -24
drivers/md/raid10.c
··· 3164 3164 return size << conf->chunk_shift; 3165 3165 } 3166 3166 3167 + static void calc_sectors(struct r10conf *conf, sector_t size) 3168 + { 3169 + /* Calculate the number of sectors-per-device that will 3170 + * actually be used, and set conf->dev_sectors and 3171 + * conf->stride 3172 + */ 3173 + 3174 + size = size >> conf->chunk_shift; 3175 + sector_div(size, conf->far_copies); 3176 + size = size * conf->raid_disks; 3177 + sector_div(size, conf->near_copies); 3178 + /* 'size' is now the number of chunks in the array */ 3179 + /* calculate "used chunks per device" */ 3180 + size = size * conf->copies; 3181 + 3182 + /* We need to round up when dividing by raid_disks to 3183 + * get the stride size. 3184 + */ 3185 + size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks); 3186 + 3187 + conf->dev_sectors = size << conf->chunk_shift; 3188 + 3189 + if (conf->far_offset) 3190 + conf->stride = 1 << conf->chunk_shift; 3191 + else { 3192 + sector_div(size, conf->near_copies); 3193 + conf->stride = size << conf->chunk_shift; 3194 + } 3195 + } 3167 3196 3168 3197 static struct r10conf *setup_conf(struct mddev *mddev) 3169 3198 { 3170 3199 struct r10conf *conf = NULL; 3171 3200 int nc, fc, fo; 3172 - sector_t stride, size; 3173 3201 int err = -EINVAL; 3174 3202 3175 3203 if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) || ··· 3247 3219 if (!conf->r10bio_pool) 3248 3220 goto out; 3249 3221 3250 - size = mddev->dev_sectors >> conf->chunk_shift; 3251 - sector_div(size, fc); 3252 - size = size * conf->raid_disks; 3253 - sector_div(size, nc); 3254 - /* 'size' is now the number of chunks in the array */ 3255 - /* calculate "used chunks per device" in 'stride' */ 3256 - stride = size * conf->copies; 3257 - 3258 - /* We need to round up when dividing by raid_disks to 3259 - * get the stride size. 3260 - */ 3261 - stride += conf->raid_disks - 1; 3262 - sector_div(stride, conf->raid_disks); 3263 - 3264 - conf->dev_sectors = stride << conf->chunk_shift; 3265 - 3266 - if (fo) 3267 - stride = 1; 3268 - else 3269 - sector_div(stride, fc); 3270 - conf->stride = stride << conf->chunk_shift; 3271 - 3222 + calc_sectors(conf, mddev->dev_sectors); 3272 3223 3273 3224 spin_lock_init(&conf->device_lock); 3274 3225 INIT_LIST_HEAD(&conf->retry_list); ··· 3475 3468 mddev->recovery_cp = oldsize; 3476 3469 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3477 3470 } 3478 - mddev->dev_sectors = sectors; 3471 + calc_sectors(conf, sectors); 3472 + mddev->dev_sectors = conf->dev_sectors; 3479 3473 mddev->resync_max_sectors = size; 3480 3474 return 0; 3481 3475 }
+4
drivers/media/dvb/dvb-core/dvb_frontend.c
··· 1921 1921 } else { 1922 1922 /* default values */ 1923 1923 switch (c->delivery_system) { 1924 + case SYS_DVBS: 1925 + case SYS_DVBS2: 1926 + case SYS_ISDBS: 1927 + case SYS_TURBO: 1924 1928 case SYS_DVBC_ANNEX_A: 1925 1929 case SYS_DVBC_ANNEX_C: 1926 1930 fepriv->min_delay = HZ / 20;
+16 -16
drivers/media/rc/ene_ir.c
··· 1018 1018 1019 1019 spin_lock_init(&dev->hw_lock); 1020 1020 1021 - /* claim the resources */ 1022 - error = -EBUSY; 1023 - dev->hw_io = pnp_port_start(pnp_dev, 0); 1024 - if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) { 1025 - dev->hw_io = -1; 1026 - dev->irq = -1; 1027 - goto error; 1028 - } 1029 - 1030 - dev->irq = pnp_irq(pnp_dev, 0); 1031 - if (request_irq(dev->irq, ene_isr, 1032 - IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) { 1033 - dev->irq = -1; 1034 - goto error; 1035 - } 1036 - 1037 1021 pnp_set_drvdata(pnp_dev, dev); 1038 1022 dev->pnp_dev = pnp_dev; 1039 1023 ··· 1069 1085 1070 1086 device_set_wakeup_capable(&pnp_dev->dev, true); 1071 1087 device_set_wakeup_enable(&pnp_dev->dev, true); 1088 + 1089 + /* claim the resources */ 1090 + error = -EBUSY; 1091 + dev->hw_io = pnp_port_start(pnp_dev, 0); 1092 + if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) { 1093 + dev->hw_io = -1; 1094 + dev->irq = -1; 1095 + goto error; 1096 + } 1097 + 1098 + dev->irq = pnp_irq(pnp_dev, 0); 1099 + if (request_irq(dev->irq, ene_isr, 1100 + IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) { 1101 + dev->irq = -1; 1102 + goto error; 1103 + } 1072 1104 1073 1105 error = rc_register_device(rdev); 1074 1106 if (error < 0)
+11 -11
drivers/media/rc/fintek-cir.c
··· 197 197 /* 198 198 * Newer reviews of this chipset uses port 8 instead of 5 199 199 */ 200 - if ((chip != 0x0408) || (chip != 0x0804)) 200 + if ((chip != 0x0408) && (chip != 0x0804)) 201 201 fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV2; 202 202 else 203 203 fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV1; ··· 514 514 515 515 spin_lock_init(&fintek->fintek_lock); 516 516 517 - ret = -EBUSY; 518 - /* now claim resources */ 519 - if (!request_region(fintek->cir_addr, 520 - fintek->cir_port_len, FINTEK_DRIVER_NAME)) 521 - goto failure; 522 - 523 - if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED, 524 - FINTEK_DRIVER_NAME, (void *)fintek)) 525 - goto failure; 526 - 527 517 pnp_set_drvdata(pdev, fintek); 528 518 fintek->pdev = pdev; 529 519 ··· 547 557 rdev->timeout = US_TO_NS(1000); 548 558 /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */ 549 559 rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD); 560 + 561 + ret = -EBUSY; 562 + /* now claim resources */ 563 + if (!request_region(fintek->cir_addr, 564 + fintek->cir_port_len, FINTEK_DRIVER_NAME)) 565 + goto failure; 566 + 567 + if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED, 568 + FINTEK_DRIVER_NAME, (void *)fintek)) 569 + goto failure; 550 570 551 571 ret = rc_register_device(rdev); 552 572 if (ret)
+10 -10
drivers/media/rc/ite-cir.c
··· 1515 1515 /* initialize raw event */ 1516 1516 init_ir_raw_event(&itdev->rawir); 1517 1517 1518 - ret = -EBUSY; 1519 - /* now claim resources */ 1520 - if (!request_region(itdev->cir_addr, 1521 - dev_desc->io_region_size, ITE_DRIVER_NAME)) 1522 - goto failure; 1523 - 1524 - if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED, 1525 - ITE_DRIVER_NAME, (void *)itdev)) 1526 - goto failure; 1527 - 1528 1518 /* set driver data into the pnp device */ 1529 1519 pnp_set_drvdata(pdev, itdev); 1530 1520 itdev->pdev = pdev; ··· 1589 1599 rdev->input_id.version = 0; 1590 1600 rdev->driver_name = ITE_DRIVER_NAME; 1591 1601 rdev->map_name = RC_MAP_RC6_MCE; 1602 + 1603 + ret = -EBUSY; 1604 + /* now claim resources */ 1605 + if (!request_region(itdev->cir_addr, 1606 + dev_desc->io_region_size, ITE_DRIVER_NAME)) 1607 + goto failure; 1608 + 1609 + if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED, 1610 + ITE_DRIVER_NAME, (void *)itdev)) 1611 + goto failure; 1592 1612 1593 1613 ret = rc_register_device(rdev); 1594 1614 if (ret)
+18 -18
drivers/media/rc/nuvoton-cir.c
··· 1021 1021 spin_lock_init(&nvt->nvt_lock); 1022 1022 spin_lock_init(&nvt->tx.lock); 1023 1023 1024 - ret = -EBUSY; 1025 - /* now claim resources */ 1026 - if (!request_region(nvt->cir_addr, 1027 - CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) 1028 - goto failure; 1029 - 1030 - if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED, 1031 - NVT_DRIVER_NAME, (void *)nvt)) 1032 - goto failure; 1033 - 1034 - if (!request_region(nvt->cir_wake_addr, 1035 - CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) 1036 - goto failure; 1037 - 1038 - if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED, 1039 - NVT_DRIVER_NAME, (void *)nvt)) 1040 - goto failure; 1041 - 1042 1024 pnp_set_drvdata(pdev, nvt); 1043 1025 nvt->pdev = pdev; 1044 1026 ··· 1066 1084 /* tx bits */ 1067 1085 rdev->tx_resolution = XYZ; 1068 1086 #endif 1087 + 1088 + ret = -EBUSY; 1089 + /* now claim resources */ 1090 + if (!request_region(nvt->cir_addr, 1091 + CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) 1092 + goto failure; 1093 + 1094 + if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED, 1095 + NVT_DRIVER_NAME, (void *)nvt)) 1096 + goto failure; 1097 + 1098 + if (!request_region(nvt->cir_wake_addr, 1099 + CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) 1100 + goto failure; 1101 + 1102 + if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED, 1103 + NVT_DRIVER_NAME, (void *)nvt)) 1104 + goto failure; 1069 1105 1070 1106 ret = rc_register_device(rdev); 1071 1107 if (ret)
+39 -39
drivers/media/rc/winbond-cir.c
··· 991 991 "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n", 992 992 data->wbase, data->ebase, data->sbase, data->irq); 993 993 994 - if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) { 995 - dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", 996 - data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1); 997 - err = -EBUSY; 998 - goto exit_free_data; 999 - } 1000 - 1001 - if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) { 1002 - dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", 1003 - data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1); 1004 - err = -EBUSY; 1005 - goto exit_release_wbase; 1006 - } 1007 - 1008 - if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) { 1009 - dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", 1010 - data->sbase, data->sbase + SP_IOMEM_LEN - 1); 1011 - err = -EBUSY; 1012 - goto exit_release_ebase; 1013 - } 1014 - 1015 - err = request_irq(data->irq, wbcir_irq_handler, 1016 - IRQF_DISABLED, DRVNAME, device); 1017 - if (err) { 1018 - dev_err(dev, "Failed to claim IRQ %u\n", data->irq); 1019 - err = -EBUSY; 1020 - goto exit_release_sbase; 1021 - } 1022 - 1023 994 led_trigger_register_simple("cir-tx", &data->txtrigger); 1024 995 if (!data->txtrigger) { 1025 996 err = -ENOMEM; 1026 - goto exit_free_irq; 997 + goto exit_free_data; 1027 998 } 1028 999 1029 1000 led_trigger_register_simple("cir-rx", &data->rxtrigger); ··· 1033 1062 data->dev->priv = data; 1034 1063 data->dev->dev.parent = &device->dev; 1035 1064 1065 + if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) { 1066 + dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", 1067 + data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1); 1068 + err = -EBUSY; 1069 + goto exit_free_rc; 1070 + } 1071 + 1072 + if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) { 1073 + dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", 1074 + data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1); 1075 + err = -EBUSY; 1076 + goto exit_release_wbase; 1077 + } 1078 + 1079 + if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) { 1080 + dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", 1081 + data->sbase, data->sbase + SP_IOMEM_LEN - 1); 1082 + err = -EBUSY; 1083 + goto exit_release_ebase; 1084 + } 1085 + 1086 + err = request_irq(data->irq, wbcir_irq_handler, 1087 + IRQF_DISABLED, DRVNAME, device); 1088 + if (err) { 1089 + dev_err(dev, "Failed to claim IRQ %u\n", data->irq); 1090 + err = -EBUSY; 1091 + goto exit_release_sbase; 1092 + } 1093 + 1036 1094 err = rc_register_device(data->dev); 1037 1095 if (err) 1038 - goto exit_free_rc; 1096 + goto exit_free_irq; 1039 1097 1040 1098 device_init_wakeup(&device->dev, 1); 1041 1099 ··· 1072 1072 1073 1073 return 0; 1074 1074 1075 - exit_free_rc: 1076 - rc_free_device(data->dev); 1077 - exit_unregister_led: 1078 - led_classdev_unregister(&data->led); 1079 - exit_unregister_rxtrigger: 1080 - led_trigger_unregister_simple(data->rxtrigger); 1081 - exit_unregister_txtrigger: 1082 - led_trigger_unregister_simple(data->txtrigger); 1083 1075 exit_free_irq: 1084 1076 free_irq(data->irq, device); 1085 1077 exit_release_sbase: ··· 1080 1088 release_region(data->ebase, EHFUNC_IOMEM_LEN); 1081 1089 exit_release_wbase: 1082 1090 release_region(data->wbase, WAKEUP_IOMEM_LEN); 1091 + exit_free_rc: 1092 + rc_free_device(data->dev); 1093 + exit_unregister_led: 1094 + led_classdev_unregister(&data->led); 1095 + exit_unregister_rxtrigger: 1096 + led_trigger_unregister_simple(data->rxtrigger); 1097 + exit_unregister_txtrigger: 1098 + led_trigger_unregister_simple(data->txtrigger); 1083 1099 exit_free_data: 1084 1100 kfree(data); 1085 1101 pnp_set_drvdata(device, NULL);
+4 -4
drivers/media/video/gspca/sonixj.c
··· 2923 2923 * not the JPEG end of frame ('ff d9'). 2924 2924 */ 2925 2925 2926 + /* count the packets and their size */ 2927 + sd->npkt++; 2928 + sd->pktsz += len; 2929 + 2926 2930 /*fixme: assumption about the following code: 2927 2931 * - there can be only one marker in a packet 2928 2932 */ ··· 2948 2944 return; 2949 2945 data += i; 2950 2946 } 2951 - 2952 - /* count the packets and their size */ 2953 - sd->npkt++; 2954 - sd->pktsz += len; 2955 2947 2956 2948 /* search backwards if there is a marker in the packet */ 2957 2949 for (i = len - 1; --i >= 0; ) {
-1
drivers/media/video/marvell-ccic/mmp-driver.c
··· 181 181 INIT_LIST_HEAD(&cam->devlist); 182 182 183 183 mcam = &cam->mcam; 184 - mcam->platform = MHP_Armada610; 185 184 mcam->plat_power_up = mmpcam_power_up; 186 185 mcam->plat_power_down = mmpcam_power_down; 187 186 mcam->dev = &pdev->dev;
+21 -12
drivers/media/video/s5p-fimc/fimc-capture.c
··· 246 246 247 247 } 248 248 249 - static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane) 250 - { 251 - if (!fr || plane >= fr->fmt->memplanes) 252 - return 0; 253 - return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8; 254 - } 255 - 256 - static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt, 249 + static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt, 257 250 unsigned int *num_buffers, unsigned int *num_planes, 258 251 unsigned int sizes[], void *allocators[]) 259 252 { 253 + const struct v4l2_pix_format_mplane *pixm = NULL; 260 254 struct fimc_ctx *ctx = vq->drv_priv; 261 - struct fimc_fmt *fmt = ctx->d_frame.fmt; 255 + struct fimc_frame *frame = &ctx->d_frame; 256 + struct fimc_fmt *fmt = frame->fmt; 257 + unsigned long wh; 262 258 int i; 263 259 264 - if (!fmt) 260 + if (pfmt) { 261 + pixm = &pfmt->fmt.pix_mp; 262 + fmt = fimc_find_format(&pixm->pixelformat, NULL, 263 + FMT_FLAGS_CAM | FMT_FLAGS_M2M, -1); 264 + wh = pixm->width * pixm->height; 265 + } else { 266 + wh = frame->f_width * frame->f_height; 267 + } 268 + 269 + if (fmt == NULL) 265 270 return -EINVAL; 266 271 267 272 *num_planes = fmt->memplanes; 268 273 269 274 for (i = 0; i < fmt->memplanes; i++) { 270 - sizes[i] = get_plane_size(&ctx->d_frame, i); 275 + unsigned int size = (wh * fmt->depth[i]) / 8; 276 + if (pixm) 277 + sizes[i] = max(size, pixm->plane_fmt[i].sizeimage); 278 + else 279 + sizes[i] = size; 271 280 allocators[i] = ctx->fimc_dev->alloc_ctx; 272 281 } 273 282 ··· 1392 1383 fimc_capture_try_crop(ctx, r, crop->pad); 1393 1384 1394 1385 if (crop->which == V4L2_SUBDEV_FORMAT_TRY) { 1395 - mutex_lock(&fimc->lock); 1386 + mutex_unlock(&fimc->lock); 1396 1387 *v4l2_subdev_get_try_crop(fh, crop->pad) = *r; 1397 1388 return 0; 1398 1389 }
+2 -2
drivers/media/video/s5p-fimc/fimc-core.c
··· 1048 1048 * @mask: the color flags to match 1049 1049 * @index: offset in the fimc_formats array, ignored if negative 1050 1050 */ 1051 - struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code, 1051 + struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code, 1052 1052 unsigned int mask, int index) 1053 1053 { 1054 1054 struct fimc_fmt *fmt, *def_fmt = NULL; 1055 1055 unsigned int i; 1056 1056 int id = 0; 1057 1057 1058 - if (index >= ARRAY_SIZE(fimc_formats)) 1058 + if (index >= (int)ARRAY_SIZE(fimc_formats)) 1059 1059 return NULL; 1060 1060 1061 1061 for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
+1 -1
drivers/media/video/s5p-fimc/fimc-core.h
··· 718 718 int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f); 719 719 void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height, 720 720 struct v4l2_pix_format_mplane *pix); 721 - struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code, 721 + struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code, 722 722 unsigned int mask, int index); 723 723 724 724 int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
+6 -2
drivers/media/video/soc_camera.c
··· 530 530 if (icl->reset) 531 531 icl->reset(icd->pdev); 532 532 533 + /* Don't mess with the host during probe */ 534 + mutex_lock(&ici->host_lock); 533 535 ret = ici->ops->add(icd); 536 + mutex_unlock(&ici->host_lock); 534 537 if (ret < 0) { 535 538 dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret); 536 539 goto eiciadd; ··· 959 956 { 960 957 struct soc_camera_device *icd; 961 958 962 - mutex_lock(&list_lock); 959 + mutex_lock(&ici->host_lock); 963 960 964 961 list_for_each_entry(icd, &devices, list) { 965 962 if (icd->iface == ici->nr) { ··· 970 967 } 971 968 } 972 969 973 - mutex_unlock(&list_lock); 970 + mutex_unlock(&ici->host_lock); 974 971 } 975 972 976 973 #ifdef CONFIG_I2C_BOARDINFO ··· 1316 1313 list_add_tail(&ici->list, &hosts); 1317 1314 mutex_unlock(&list_lock); 1318 1315 1316 + mutex_init(&ici->host_lock); 1319 1317 scan_add_host(ici); 1320 1318 1321 1319 return 0;
+2 -1
drivers/media/video/videobuf2-dma-contig.c
··· 15 15 #include <linux/dma-mapping.h> 16 16 17 17 #include <media/videobuf2-core.h> 18 + #include <media/videobuf2-dma-contig.h> 18 19 #include <media/videobuf2-memops.h> 19 20 20 21 struct vb2_dc_conf { ··· 86 85 { 87 86 struct vb2_dc_buf *buf = buf_priv; 88 87 if (!buf) 89 - return 0; 88 + return NULL; 90 89 91 90 return buf->vaddr; 92 91 }
+1
drivers/media/video/videobuf2-memops.c
··· 55 55 56 56 return vma_copy; 57 57 } 58 + EXPORT_SYMBOL_GPL(vb2_get_vma); 58 59 59 60 /** 60 61 * vb2_put_userptr() - release a userspace virtual memory area
+1 -1
drivers/mtd/mtdchar.c
··· 376 376 * Make a fake call to mtd_read_fact_prot_reg() to check if OTP 377 377 * operations are supported. 378 378 */ 379 - if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP) 379 + if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP) 380 380 return -EOPNOTSUPP; 381 381 382 382 switch (mode) {
+6 -11
drivers/mtd/nand/ams-delta.c
··· 212 212 /* Link the private data with the MTD structure */ 213 213 ams_delta_mtd->priv = this; 214 214 215 - if (!request_mem_region(res->start, resource_size(res), 216 - dev_name(&pdev->dev))) { 217 - dev_err(&pdev->dev, "request_mem_region failed\n"); 218 - err = -EBUSY; 219 - goto out_free; 220 - } 215 + /* 216 + * Don't try to request the memory region from here, 217 + * it should have been already requested from the 218 + * gpio-omap driver and requesting it again would fail. 219 + */ 221 220 222 221 io_base = ioremap(res->start, resource_size(res)); 223 222 if (io_base == NULL) { 224 223 dev_err(&pdev->dev, "ioremap failed\n"); 225 224 err = -EIO; 226 - goto out_release_io; 225 + goto out_free; 227 226 } 228 227 229 228 this->priv = io_base; ··· 270 271 platform_set_drvdata(pdev, NULL); 271 272 gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); 272 273 iounmap(io_base); 273 - out_release_io: 274 - release_mem_region(res->start, resource_size(res)); 275 274 out_free: 276 275 kfree(ams_delta_mtd); 277 276 out: ··· 282 285 static int __devexit ams_delta_cleanup(struct platform_device *pdev) 283 286 { 284 287 void __iomem *io_base = platform_get_drvdata(pdev); 285 - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 286 288 287 289 /* Release resources, unregister device */ 288 290 nand_release(ams_delta_mtd); ··· 289 293 gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); 290 294 gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); 291 295 iounmap(io_base); 292 - release_mem_region(res->start, resource_size(res)); 293 296 294 297 /* Free the MTD device structure */ 295 298 kfree(ams_delta_mtd);
+12 -6
drivers/net/bonding/bond_3ad.c
··· 2173 2173 * received frames (loopback). Since only the payload is given to this 2174 2174 * function, it check for loopback. 2175 2175 */ 2176 - static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length) 2176 + static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length) 2177 2177 { 2178 2178 struct port *port; 2179 + int ret = RX_HANDLER_ANOTHER; 2179 2180 2180 2181 if (length >= sizeof(struct lacpdu)) { 2181 2182 ··· 2185 2184 if (!port->slave) { 2186 2185 pr_warning("%s: Warning: port of slave %s is uninitialized\n", 2187 2186 slave->dev->name, slave->dev->master->name); 2188 - return; 2187 + return ret; 2189 2188 } 2190 2189 2191 2190 switch (lacpdu->subtype) { 2192 2191 case AD_TYPE_LACPDU: 2192 + ret = RX_HANDLER_CONSUMED; 2193 2193 pr_debug("Received LACPDU on port %d\n", 2194 2194 port->actor_port_number); 2195 2195 /* Protect against concurrent state machines */ ··· 2200 2198 break; 2201 2199 2202 2200 case AD_TYPE_MARKER: 2201 + ret = RX_HANDLER_CONSUMED; 2203 2202 // No need to convert fields to Little Endian since we don't use the marker's fields. 2204 2203 2205 2204 switch (((struct bond_marker *)lacpdu)->tlv_type) { ··· 2222 2219 } 2223 2220 } 2224 2221 } 2222 + return ret; 2225 2223 } 2226 2224 2227 2225 /** ··· 2460 2456 return NETDEV_TX_OK; 2461 2457 } 2462 2458 2463 - void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 2459 + int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 2464 2460 struct slave *slave) 2465 2461 { 2462 + int ret = RX_HANDLER_ANOTHER; 2466 2463 if (skb->protocol != PKT_TYPE_LACPDU) 2467 - return; 2464 + return ret; 2468 2465 2469 2466 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2470 - return; 2467 + return ret; 2471 2468 2472 2469 read_lock(&bond->lock); 2473 - bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2470 + ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2474 2471 read_unlock(&bond->lock); 2472 + return ret; 2475 2473 } 2476 2474 2477 2475 /*
+1 -1
drivers/net/bonding/bond_3ad.h
··· 274 274 void bond_3ad_handle_link_change(struct slave *slave, char link); 275 275 int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 276 276 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 277 - void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 277 + int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, 278 278 struct slave *slave); 279 279 int bond_3ad_set_carrier(struct bonding *bond); 280 280 void bond_3ad_update_lacp_rate(struct bonding *bond);
+7 -5
drivers/net/bonding/bond_alb.c
··· 342 342 _unlock_rx_hashtbl_bh(bond); 343 343 } 344 344 345 - static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond, 345 + static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond, 346 346 struct slave *slave) 347 347 { 348 348 struct arp_pkt *arp; 349 349 350 350 if (skb->protocol != cpu_to_be16(ETH_P_ARP)) 351 - return; 351 + goto out; 352 352 353 353 arp = (struct arp_pkt *) skb->data; 354 354 if (!arp) { 355 355 pr_debug("Packet has no ARP data\n"); 356 - return; 356 + goto out; 357 357 } 358 358 359 359 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev))) 360 - return; 360 + goto out; 361 361 362 362 if (skb->len < sizeof(struct arp_pkt)) { 363 363 pr_debug("Packet is too small to be an ARP\n"); 364 - return; 364 + goto out; 365 365 } 366 366 367 367 if (arp->op_code == htons(ARPOP_REPLY)) { ··· 369 369 rlb_update_entry_from_arp(bond, arp); 370 370 pr_debug("Server received an ARP Reply from client\n"); 371 371 } 372 + out: 373 + return RX_HANDLER_ANOTHER; 372 374 } 373 375 374 376 /* Caller must hold bond lock for read */
+11 -5
drivers/net/bonding/bond_main.c
··· 1444 1444 struct sk_buff *skb = *pskb; 1445 1445 struct slave *slave; 1446 1446 struct bonding *bond; 1447 - void (*recv_probe)(struct sk_buff *, struct bonding *, 1447 + int (*recv_probe)(struct sk_buff *, struct bonding *, 1448 1448 struct slave *); 1449 + int ret = RX_HANDLER_ANOTHER; 1449 1450 1450 1451 skb = skb_share_check(skb, GFP_ATOMIC); 1451 1452 if (unlikely(!skb)) ··· 1465 1464 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 1466 1465 1467 1466 if (likely(nskb)) { 1468 - recv_probe(nskb, bond, slave); 1467 + ret = recv_probe(nskb, bond, slave); 1469 1468 dev_kfree_skb(nskb); 1469 + if (ret == RX_HANDLER_CONSUMED) { 1470 + consume_skb(skb); 1471 + return ret; 1472 + } 1470 1473 } 1471 1474 } 1472 1475 ··· 1492 1487 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); 1493 1488 } 1494 1489 1495 - return RX_HANDLER_ANOTHER; 1490 + return ret; 1496 1491 } 1497 1492 1498 1493 /* enslave device <slave> to bond device <master> */ ··· 2728 2723 } 2729 2724 } 2730 2725 2731 - static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, 2726 + static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, 2732 2727 struct slave *slave) 2733 2728 { 2734 2729 struct arphdr *arp; ··· 2736 2731 __be32 sip, tip; 2737 2732 2738 2733 if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) 2739 - return; 2734 + return RX_HANDLER_ANOTHER; 2740 2735 2741 2736 read_lock(&bond->lock); 2742 2737 ··· 2781 2776 2782 2777 out_unlock: 2783 2778 read_unlock(&bond->lock); 2779 + return RX_HANDLER_ANOTHER; 2784 2780 } 2785 2781 2786 2782 /*
+1 -1
drivers/net/bonding/bonding.h
··· 218 218 struct slave *primary_slave; 219 219 bool force_primary; 220 220 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 221 - void (*recv_probe)(struct sk_buff *, struct bonding *, 221 + int (*recv_probe)(struct sk_buff *, struct bonding *, 222 222 struct slave *); 223 223 rwlock_t lock; 224 224 rwlock_t curr_slave_lock;
+22 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 9122 9122 return bnx2x_prev_mcp_done(bp); 9123 9123 } 9124 9124 9125 + /* previous driver DMAE transaction may have occurred when pre-boot stage ended 9126 + * and boot began, or when kdump kernel was loaded. Either case would invalidate 9127 + * the addresses of the transaction, resulting in was-error bit set in the pci 9128 + * causing all hw-to-host pcie transactions to timeout. If this happened we want 9129 + * to clear the interrupt which detected this from the pglueb and the was done 9130 + * bit 9131 + */ 9132 + static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 9133 + { 9134 + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 9135 + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 9136 + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); 9137 + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); 9138 + } 9139 + } 9140 + 9125 9141 static int __devinit bnx2x_prev_unload(struct bnx2x *bp) 9126 9142 { 9127 9143 int time_counter = 10; 9128 9144 u32 rc, fw, hw_lock_reg, hw_lock_val; 9129 9145 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 9130 9146 9131 - /* Release previously held locks */ 9147 + /* clear hw from errors which may have resulted from an interrupted 9148 + * dmae transaction. 9149 + */ 9150 + bnx2x_prev_interrupted_dmae(bp); 9151 + 9152 + /* Release previously held locks */ 9132 9153 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 9133 9154 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 9134 9155 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
+2
drivers/net/ethernet/ibm/ehea/ehea_main.c
··· 3335 3335 goto out_shutdown_ports; 3336 3336 } 3337 3337 3338 + /* Handle any events that might be pending. */ 3339 + tasklet_hi_schedule(&adapter->neq_tasklet); 3338 3340 3339 3341 ret = 0; 3340 3342 goto out;
+7 -3
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 493 493 static void e1000_down_and_stop(struct e1000_adapter *adapter) 494 494 { 495 495 set_bit(__E1000_DOWN, &adapter->flags); 496 - cancel_work_sync(&adapter->reset_task); 496 + 497 + /* Only kill reset task if adapter is not resetting */ 498 + if (!test_bit(__E1000_RESETTING, &adapter->flags)) 499 + cancel_work_sync(&adapter->reset_task); 500 + 497 501 cancel_delayed_work_sync(&adapter->watchdog_task); 498 502 cancel_delayed_work_sync(&adapter->phy_info_task); 499 503 cancel_delayed_work_sync(&adapter->fifo_stall_task); ··· 3384 3380 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3385 3381 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3386 3382 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; 3387 - struct my_u { u64 a; u64 b; }; 3383 + struct my_u { __le64 a; __le64 b; }; 3388 3384 struct my_u *u = (struct my_u *)tx_desc; 3389 3385 const char *type; 3390 3386 ··· 3428 3424 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3429 3425 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3430 3426 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; 3431 - struct my_u { u64 a; u64 b; }; 3427 + struct my_u { __le64 a; __le64 b; }; 3432 3428 struct my_u *u = (struct my_u *)rx_desc; 3433 3429 const char *type; 3434 3430
+8 -16
drivers/net/ethernet/intel/igb/igb_main.c
··· 1111 1111 adapter->flags |= IGB_FLAG_HAS_MSI; 1112 1112 out: 1113 1113 /* Notify the stack of the (possibly) reduced queue counts. */ 1114 + rtnl_lock(); 1114 1115 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 1115 - return netif_set_real_num_rx_queues(adapter->netdev, 1116 - adapter->num_rx_queues); 1116 + err = netif_set_real_num_rx_queues(adapter->netdev, 1117 + adapter->num_rx_queues); 1118 + rtnl_unlock(); 1119 + return err; 1117 1120 } 1118 1121 1119 1122 /** ··· 2774 2771 2775 2772 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 2776 2773 wr32(E1000_TXDCTL(reg_idx), txdctl); 2777 - 2778 - netdev_tx_reset_queue(txring_txq(ring)); 2779 2774 } 2780 2775 2781 2776 /** ··· 3282 3281 buffer_info = &tx_ring->tx_buffer_info[i]; 3283 3282 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); 3284 3283 } 3284 + 3285 + netdev_tx_reset_queue(txring_txq(tx_ring)); 3285 3286 3286 3287 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 3287 3288 memset(tx_ring->tx_buffer_info, 0, size); ··· 6799 6796 pci_enable_wake(pdev, PCI_D3hot, 0); 6800 6797 pci_enable_wake(pdev, PCI_D3cold, 0); 6801 6798 6802 - if (!rtnl_is_locked()) { 6803 - /* 6804 - * shut up ASSERT_RTNL() warning in 6805 - * netif_set_real_num_tx/rx_queues. 6806 - */ 6807 - rtnl_lock(); 6808 - err = igb_init_interrupt_scheme(adapter); 6809 - rtnl_unlock(); 6810 - } else { 6811 - err = igb_init_interrupt_scheme(adapter); 6812 - } 6813 - if (err) { 6799 + if (igb_init_interrupt_scheme(adapter)) { 6814 6800 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 6815 6801 return -ENOMEM; 6816 6802 }
-3
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 574 574 extern struct ixgbe_info ixgbe_X540_info; 575 575 #ifdef CONFIG_IXGBE_DCB 576 576 extern const struct dcbnl_rtnl_ops dcbnl_ops; 577 - extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 578 - struct ixgbe_dcb_config *dst_dcb_cfg, 579 - int tc_max); 580 577 #endif 581 578 582 579 extern char ixgbe_driver_name[];
+20 -23
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
··· 44 44 #define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ 45 45 #define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ 46 46 47 - int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg, 48 - struct ixgbe_dcb_config *dcfg, int tc_max) 47 + static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) 49 48 { 49 + struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; 50 + struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; 50 51 struct tc_configuration *src = NULL; 51 52 struct tc_configuration *dst = NULL; 52 53 int i, j; 53 54 int tx = DCB_TX_CONFIG; 54 55 int rx = DCB_RX_CONFIG; 55 56 int changes = 0; 57 + #ifdef IXGBE_FCOE 58 + struct dcb_app app = { 59 + .selector = DCB_APP_IDTYPE_ETHTYPE, 60 + .protocol = ETH_P_FCOE, 61 + }; 62 + u8 up = dcb_getapp(adapter->netdev, &app); 56 63 57 - if (!scfg || !dcfg) 58 - return changes; 64 + if (up && !(up & (1 << adapter->fcoe.up))) 65 + changes |= BIT_APP_UPCHG; 66 + #endif 59 67 60 68 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { 61 69 src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; ··· 340 332 struct ixgbe_adapter *adapter = netdev_priv(netdev); 341 333 int ret = DCB_NO_HW_CHG; 342 334 int i; 343 - #ifdef IXGBE_FCOE 344 - struct dcb_app app = { 345 - .selector = DCB_APP_IDTYPE_ETHTYPE, 346 - .protocol = ETH_P_FCOE, 347 - }; 348 - u8 up; 349 - 350 - /* In IEEE mode, use the IEEE Ethertype selector value */ 351 - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { 352 - app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; 353 - up = dcb_ieee_getapp_mask(netdev, &app); 354 - } else { 355 - up = dcb_getapp(netdev, &app); 356 - } 357 - #endif 358 335 359 336 /* Fail command if not in CEE mode */ 360 337 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) 361 338 return ret; 362 339 363 - adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, 364 - &adapter->dcb_cfg, 340 + adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, 365 341 MAX_TRAFFIC_CLASS); 366 342 if (!adapter->dcb_set_bitmap) 367 343 return ret; ··· 432 440 * FCoE is using changes. This happens if the APP info 433 441 * changes or the up2tc mapping is updated. 434 442 */ 435 - if ((up && !(up & (1 << adapter->fcoe.up))) || 436 - (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) { 443 + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 444 + struct dcb_app app = { 445 + .selector = DCB_APP_IDTYPE_ETHTYPE, 446 + .protocol = ETH_P_FCOE, 447 + }; 448 + u8 up = dcb_getapp(netdev, &app); 449 + 437 450 adapter->fcoe.up = ffs(up) - 1; 438 451 ixgbe_dcbnl_devreset(netdev); 439 452 ret = DCB_HW_CHG_RST;
+2
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 1780 1780 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); 1781 1781 } 1782 1782 1783 + netdev_tx_reset_queue(txring_txq(tx_ring)); 1784 + 1783 1785 /* re-map buffers to ring, store next to clean values */ 1784 1786 ixgbe_alloc_rx_buffers(rx_ring, count); 1785 1787 rx_ring->next_to_clean = rx_ntc;
+6 -4
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 2671 2671 /* enable queue */ 2672 2672 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); 2673 2673 2674 - netdev_tx_reset_queue(txring_txq(ring)); 2675 - 2676 2674 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 2677 2675 if (hw->mac.type == ixgbe_mac_82598EB && 2678 2676 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) ··· 4165 4167 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 4166 4168 } 4167 4169 4170 + netdev_tx_reset_queue(txring_txq(tx_ring)); 4171 + 4168 4172 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4169 4173 memset(tx_ring->tx_buffer_info, 0, size); 4170 4174 ··· 4418 4418 adapter->dcb_cfg.pfc_mode_enable = false; 4419 4419 adapter->dcb_set_bitmap = 0x00; 4420 4420 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; 4421 - ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 4422 - MAX_TRAFFIC_CLASS); 4421 + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 4422 + sizeof(adapter->temp_dcb_cfg)); 4423 4423 4424 4424 #endif 4425 4425 ··· 4866 4866 netif_device_detach(netdev); 4867 4867 4868 4868 if (netif_running(netdev)) { 4869 + rtnl_lock(); 4869 4870 ixgbe_down(adapter); 4870 4871 ixgbe_free_irq(adapter); 4871 4872 ixgbe_free_all_tx_resources(adapter); 4872 4873 ixgbe_free_all_rx_resources(adapter); 4874 + rtnl_unlock(); 4873 4875 } 4874 4876 4875 4877 ixgbe_clear_interrupt_scheme(adapter);
+4 -3
drivers/net/ethernet/micrel/ks8851.c
··· 618 618 netif_dbg(ks, intr, ks->netdev, 619 619 "%s: status 0x%04x\n", __func__, status); 620 620 621 - if (status & IRQ_LCI) { 622 - /* should do something about checking link status */ 621 + if (status & IRQ_LCI) 623 622 handled |= IRQ_LCI; 624 - } 625 623 626 624 if (status & IRQ_LDI) { 627 625 u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); ··· 681 683 } 682 684 683 685 mutex_unlock(&ks->lock); 686 + 687 + if (status & IRQ_LCI) 688 + mii_check_link(&ks->mii); 684 689 685 690 if (status & IRQ_TXI) 686 691 netif_wake_queue(ks->netdev);
-2
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
··· 584 584 /** 585 585 * struct pch_gbe_adapter - board specific private data structure 586 586 * @stats_lock: Spinlock structure for status 587 - * @tx_queue_lock: Spinlock structure for transmit 588 587 * @ethtool_lock: Spinlock structure for ethtool 589 588 * @irq_sem: Semaphore for interrupt 590 589 * @netdev: Pointer of network device structure ··· 608 609 609 610 struct pch_gbe_adapter { 610 611 spinlock_t stats_lock; 611 - spinlock_t tx_queue_lock; 612 612 spinlock_t ethtool_lock; 613 613 atomic_t irq_sem; 614 614 struct net_device *netdev;
+11 -14
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
··· 640 640 */ 641 641 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter) 642 642 { 643 - int size; 644 - 645 - size = (int)sizeof(struct pch_gbe_tx_ring); 646 - adapter->tx_ring = kzalloc(size, GFP_KERNEL); 643 + adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL); 647 644 if (!adapter->tx_ring) 648 645 return -ENOMEM; 649 - size = (int)sizeof(struct pch_gbe_rx_ring); 650 - adapter->rx_ring = kzalloc(size, GFP_KERNEL); 646 + 647 + adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL); 651 648 if (!adapter->rx_ring) { 652 649 kfree(adapter->tx_ring); 653 650 return -ENOMEM; ··· 1159 1162 struct sk_buff *tmp_skb; 1160 1163 unsigned int frame_ctrl; 1161 1164 unsigned int ring_num; 1162 - unsigned long flags; 1163 1165 1164 1166 /*-- Set frame control --*/ 1165 1167 frame_ctrl = 0; ··· 1207 1211 } 1208 1212 } 1209 1213 } 1210 - spin_lock_irqsave(&tx_ring->tx_lock, flags); 1214 + 1211 1215 ring_num = tx_ring->next_to_use; 1212 1216 if (unlikely((ring_num + 1) == tx_ring->count)) 1213 1217 tx_ring->next_to_use = 0; 1214 1218 else 1215 1219 tx_ring->next_to_use = ring_num + 1; 1216 1220 1217 - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 1221 + 1218 1222 buffer_info = &tx_ring->buffer_info[ring_num]; 1219 1223 tmp_skb = buffer_info->skb; 1220 1224 ··· 1514 1518 &rx_ring->rx_buff_pool_logic, 1515 1519 GFP_KERNEL); 1516 1520 if (!rx_ring->rx_buff_pool) { 1517 - pr_err("Unable to allocate memory for the receive poll buffer\n"); 1521 + pr_err("Unable to allocate memory for the receive pool buffer\n"); 1518 1522 return -ENOMEM; 1519 1523 } 1520 1524 memset(rx_ring->rx_buff_pool, 0, size); ··· 1633 1637 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", 1634 1638 cleaned_count); 1635 1639 /* Recover from running out of Tx resources in xmit_frame */ 1640 + spin_lock(&tx_ring->tx_lock); 1636 1641 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) { 1637 1642 netif_wake_queue(adapter->netdev); 1638 1643 adapter->stats.tx_restart_count++; 1639 1644 pr_debug("Tx wake queue\n"); 1640 1645 } 1641 - spin_lock(&adapter->tx_queue_lock); 1646 + 1642 1647 tx_ring->next_to_clean = i; 1643 - spin_unlock(&adapter->tx_queue_lock); 1648 + 1644 1649 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); 1650 + spin_unlock(&tx_ring->tx_lock); 1645 1651 return cleaned; 1646 1652 } 1647 1653 ··· 2035 2037 return -ENOMEM; 2036 2038 } 2037 2039 spin_lock_init(&adapter->hw.miim_lock); 2038 - spin_lock_init(&adapter->tx_queue_lock); 2039 2040 spin_lock_init(&adapter->stats_lock); 2040 2041 spin_lock_init(&adapter->ethtool_lock); 2041 2042 atomic_set(&adapter->irq_sem, 0); ··· 2139 2142 tx_ring->next_to_use, tx_ring->next_to_clean); 2140 2143 return NETDEV_TX_BUSY; 2141 2144 } 2142 - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 2143 2145 2144 2146 /* CRC,ITAG no support */ 2145 2147 pch_gbe_tx_queue(adapter, tx_ring, skb); 2148 + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); 2146 2149 return NETDEV_TX_OK; 2147 2150 } 2148 2151
+10 -6
drivers/net/ethernet/realtek/r8169.c
··· 61 61 #define R8169_MSG_DEFAULT \ 62 62 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) 63 63 64 - #define TX_BUFFS_AVAIL(tp) \ 65 - (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) 64 + #define TX_SLOTS_AVAIL(tp) \ 65 + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) 66 + 67 + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ 68 + #define TX_FRAGS_READY_FOR(tp,nr_frags) \ 69 + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) 66 70 67 71 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 68 72 The RTL chips use a 64 element hash table based on the Ethernet CRC. */ ··· 5119 5115 u32 opts[2]; 5120 5116 int frags; 5121 5117 5122 - if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 5118 + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { 5123 5119 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 5124 5120 goto err_stop_0; 5125 5121 } ··· 5173 5169 5174 5170 mmiowb(); 5175 5171 5176 - if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { 5172 + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { 5177 5173 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 5178 5174 * not miss a ring update when it notices a stopped queue. 5179 5175 */ ··· 5187 5183 * can't. 5188 5184 */ 5189 5185 smp_mb(); 5190 - if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) 5186 + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) 5191 5187 netif_wake_queue(dev); 5192 5188 } 5193 5189 ··· 5310 5306 */ 5311 5307 smp_mb(); 5312 5308 if (netif_queue_stopped(dev) && 5313 - (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { 5309 + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { 5314 5310 netif_wake_queue(dev); 5315 5311 } 5316 5312 /*
+1 -1
drivers/net/ethernet/sfc/efx.c
··· 1349 1349 } 1350 1350 1351 1351 /* RSS might be usable on VFs even if it is disabled on the PF */ 1352 - efx->rss_spread = (efx->n_rx_channels > 1 ? 1352 + efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? 1353 1353 efx->n_rx_channels : efx_vf_size(efx)); 1354 1354 1355 1355 return 0;
+1 -1
drivers/net/macvlan.c
··· 259 259 260 260 xmit_world: 261 261 skb->ip_summed = ip_summed; 262 - skb_set_dev(skb, vlan->lowerdev); 262 + skb->dev = vlan->lowerdev; 263 263 return dev_queue_xmit(skb); 264 264 } 265 265
+37 -4
drivers/net/macvtap.c
··· 1 1 #include <linux/etherdevice.h> 2 2 #include <linux/if_macvlan.h> 3 + #include <linux/if_vlan.h> 3 4 #include <linux/interrupt.h> 4 5 #include <linux/nsproxy.h> 5 6 #include <linux/compat.h> ··· 760 759 struct macvlan_dev *vlan; 761 760 int ret; 762 761 int vnet_hdr_len = 0; 762 + int vlan_offset = 0; 763 + int copied; 763 764 764 765 if (q->flags & IFF_VNET_HDR) { 765 766 struct virtio_net_hdr vnet_hdr; ··· 776 773 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) 777 774 return -EFAULT; 778 775 } 776 + copied = vnet_hdr_len; 779 777 780 - len = min_t(int, skb->len, len); 778 + if (!vlan_tx_tag_present(skb)) 779 + len = min_t(int, skb->len, len); 780 + else { 781 + int copy; 782 + struct { 783 + __be16 h_vlan_proto; 784 + __be16 h_vlan_TCI; 785 + } veth; 786 + veth.h_vlan_proto = htons(ETH_P_8021Q); 787 + veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 781 788 782 - ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len); 789 + vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 790 + len = min_t(int, skb->len + VLAN_HLEN, len); 783 791 792 + copy = min_t(int, vlan_offset, len); 793 + ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); 794 + len -= copy; 795 + copied += copy; 796 + if (ret || !len) 797 + goto done; 798 + 799 + copy = min_t(int, sizeof(veth), len); 800 + ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); 801 + len -= copy; 802 + copied += copy; 803 + if (ret || !len) 804 + goto done; 805 + } 806 + 807 + ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); 808 + copied += len; 809 + 810 + done: 784 811 rcu_read_lock_bh(); 785 812 vlan = rcu_dereference_bh(q->vlan); 786 813 if (vlan) 787 - macvlan_count_rx(vlan, len, ret == 0, 0); 814 + macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); 788 815 rcu_read_unlock_bh(); 789 816 790 - return ret ? ret : (len + vnet_hdr_len); 817 + return ret ? ret : copied; 791 818 } 792 819 793 820 static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+28 -2
drivers/net/usb/cdc_ether.c
··· 83 83 struct cdc_state *info = (void *) &dev->data; 84 84 int status; 85 85 int rndis; 86 + bool android_rndis_quirk = false; 86 87 struct usb_driver *driver = driver_of(intf); 87 88 struct usb_cdc_mdlm_desc *desc = NULL; 88 89 struct usb_cdc_mdlm_detail_desc *detail = NULL; ··· 196 195 info->control, 197 196 info->u->bSlaveInterface0, 198 197 info->data); 198 + /* fall back to hard-wiring for RNDIS */ 199 + if (rndis) { 200 + android_rndis_quirk = true; 201 + goto next_desc; 202 + } 199 203 goto bad_desc; 200 204 } 201 205 if (info->control != intf) { ··· 277 271 /* Microsoft ActiveSync based and some regular RNDIS devices lack the 278 272 * CDC descriptors, so we'll hard-wire the interfaces and not check 279 273 * for descriptors. 274 + * 275 + * Some Android RNDIS devices have a CDC Union descriptor pointing 276 + * to non-existing interfaces. Ignore that and attempt the same 277 + * hard-wired 0 and 1 interfaces. 280 278 */ 281 - if (rndis && !info->u) { 279 + if (rndis && (!info->u || android_rndis_quirk)) { 282 280 info->control = usb_ifnum_to_if(dev->udev, 0); 283 281 info->data = usb_ifnum_to_if(dev->udev, 1); 284 - if (!info->control || !info->data) { 282 + if (!info->control || !info->data || info->control != intf) { 285 283 dev_dbg(&intf->dev, 286 284 "rndis: master #0/%p slave #1/%p\n", 287 285 info->control, ··· 485 475 /*-------------------------------------------------------------------------*/ 486 476 487 477 #define HUAWEI_VENDOR_ID 0x12D1 478 + #define NOVATEL_VENDOR_ID 0x1410 488 479 489 480 static const struct usb_device_id products [] = { 490 481 /* ··· 603 592 * because of bugs/quirks in a given product (like Zaurus, above). 604 593 */ 605 594 { 595 + /* Novatel USB551L */ 596 + /* This match must come *before* the generic CDC-ETHER match so that 597 + * we get FLAG_WWAN set on the device, since it's descriptors are 598 + * generic CDC-ETHER. 599 + */ 600 + .match_flags = USB_DEVICE_ID_MATCH_VENDOR 601 + | USB_DEVICE_ID_MATCH_PRODUCT 602 + | USB_DEVICE_ID_MATCH_INT_INFO, 603 + .idVendor = NOVATEL_VENDOR_ID, 604 + .idProduct = 0xB001, 605 + .bInterfaceClass = USB_CLASS_COMM, 606 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, 607 + .bInterfaceProtocol = USB_CDC_PROTO_NONE, 608 + .driver_info = (unsigned long)&wwan_info, 609 + }, { 606 610 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 607 611 USB_CDC_PROTO_NONE), 608 612 .driver_info = (unsigned long) &cdc_info,
+38 -16
drivers/net/usb/usbnet.c
··· 282 282 } 283 283 EXPORT_SYMBOL_GPL(usbnet_change_mtu); 284 284 285 + /* The caller must hold list->lock */ 286 + static void __usbnet_queue_skb(struct sk_buff_head *list, 287 + struct sk_buff *newsk, enum skb_state state) 288 + { 289 + struct skb_data *entry = (struct skb_data *) newsk->cb; 290 + 291 + __skb_queue_tail(list, newsk); 292 + entry->state = state; 293 + } 294 + 285 295 /*-------------------------------------------------------------------------*/ 286 296 287 297 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from 288 298 * completion callbacks. 2.5 should have fixed those bugs... 289 299 */ 290 300 291 - static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list) 301 + static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, 302 + struct sk_buff_head *list, enum skb_state state) 292 303 { 293 304 unsigned long flags; 305 + enum skb_state old_state; 306 + struct skb_data *entry = (struct skb_data *) skb->cb; 294 307 295 308 spin_lock_irqsave(&list->lock, flags); 309 + old_state = entry->state; 310 + entry->state = state; 296 311 __skb_unlink(skb, list); 297 312 spin_unlock(&list->lock); 298 313 spin_lock(&dev->done.lock); ··· 315 300 if (dev->done.qlen == 1) 316 301 tasklet_schedule(&dev->bh); 317 302 spin_unlock_irqrestore(&dev->done.lock, flags); 303 + return old_state; 318 304 } 319 305 320 306 /* some work can't be done in tasklets, so we use keventd ··· 356 340 entry = (struct skb_data *) skb->cb; 357 341 entry->urb = urb; 358 342 entry->dev = dev; 359 - entry->state = rx_start; 360 343 entry->length = 0; 361 344 362 345 usb_fill_bulk_urb (urb, dev->udev, dev->in, ··· 387 372 tasklet_schedule (&dev->bh); 388 373 break; 389 374 case 0: 390 - __skb_queue_tail (&dev->rxq, skb); 375 + __usbnet_queue_skb(&dev->rxq, skb, rx_start); 391 376 } 392 377 } else { 393 378 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); ··· 438 423 struct skb_data *entry = (struct skb_data *) skb->cb; 439 424 struct usbnet *dev = entry->dev; 440 425 int urb_status = urb->status; 426 + enum skb_state state; 441 427 442 428 skb_put (skb, urb->actual_length); 443 - entry->state = rx_done; 429 + state = rx_done; 444 430 entry->urb = NULL; 445 431 446 432 switch (urb_status) { 447 433 /* success */ 448 434 case 0: 449 435 if (skb->len < dev->net->hard_header_len) { 450 - entry->state = rx_cleanup; 436 + state = rx_cleanup; 451 437 dev->net->stats.rx_errors++; 452 438 dev->net->stats.rx_length_errors++; 453 439 netif_dbg(dev, rx_err, dev->net, ··· 487 471 "rx throttle %d\n", urb_status); 488 472 } 489 473 block: 490 - entry->state = rx_cleanup; 474 + state = rx_cleanup; 491 475 entry->urb = urb; 492 476 urb = NULL; 493 477 break; ··· 498 482 // FALLTHROUGH 499 483 500 484 default: 501 - entry->state = rx_cleanup; 485 + state = rx_cleanup; 502 486 dev->net->stats.rx_errors++; 503 487 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 504 488 break; 505 489 } 506 490 507 - defer_bh(dev, skb, &dev->rxq); 491 + state = defer_bh(dev, skb, &dev->rxq, state); 508 492 509 493 if (urb) { 510 494 if (netif_running (dev->net) && 511 - !test_bit (EVENT_RX_HALT, &dev->flags)) { 495 + !test_bit (EVENT_RX_HALT, &dev->flags) && 496 + state != unlink_start) { 512 497 rx_submit (dev, urb, GFP_ATOMIC); 513 498 usb_mark_last_busy(dev->udev); 514 499 return; ··· 596 579 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) 597 580 { 598 581 unsigned long flags; 599 - struct sk_buff *skb, *skbnext; 582 + struct sk_buff *skb; 600 583 int count = 0; 601 584 602 585 spin_lock_irqsave (&q->lock, flags); 603 - skb_queue_walk_safe(q, skb, skbnext) { 586 + while (!skb_queue_empty(q)) { 604 587 struct skb_data *entry; 605 588 struct urb *urb; 606 589 int retval; 607 590 608 - entry = (struct skb_data *) skb->cb; 591 + skb_queue_walk(q, skb) { 592 + entry = (struct skb_data *) skb->cb; 593 + if (entry->state != unlink_start) 594 + goto found; 595 + } 596 + break; 597 + found: 598 + entry->state = unlink_start; 609 599 urb = entry->urb; 610 600 611 601 /* ··· 1063 1039 } 1064 1040 1065 1041 usb_autopm_put_interface_async(dev->intf); 1066 - entry->state = tx_done; 1067 - defer_bh(dev, skb, &dev->txq); 1042 + (void) defer_bh(dev, skb, &dev->txq, tx_done); 1068 1043 } 1069 1044 1070 1045 /*-------------------------------------------------------------------------*/ ··· 1119 1096 entry = (struct skb_data *) skb->cb; 1120 1097 entry->urb = urb; 1121 1098 entry->dev = dev; 1122 - entry->state = tx_start; 1123 1099 entry->length = length; 1124 1100 1125 1101 usb_fill_bulk_urb (urb, dev->udev, dev->out, ··· 1177 1155 break; 1178 1156 case 0: 1179 1157 net->trans_start = jiffies; 1180 - __skb_queue_tail (&dev->txq, skb); 1158 + __usbnet_queue_skb(&dev->txq, skb, tx_start); 1181 1159 if (dev->txq.qlen >= TX_QLEN (dev)) 1182 1160 netif_stop_queue (net); 1183 1161 }
+2
drivers/net/virtio_net.c
··· 492 492 * We synchronize against interrupts via NAPI_STATE_SCHED */ 493 493 if (napi_schedule_prep(&vi->napi)) { 494 494 virtqueue_disable_cb(vi->rvq); 495 + local_bh_disable(); 495 496 __napi_schedule(&vi->napi); 497 + local_bh_enable(); 496 498 } 497 499 } 498 500
+2 -2
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 373 373 else 374 374 spur_subchannel_sd = 0; 375 375 376 - spur_freq_sd = (freq_offset << 9) / 11; 376 + spur_freq_sd = ((freq_offset + 10) << 9) / 11; 377 377 378 378 } else { 379 379 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL, ··· 382 382 else 383 383 spur_subchannel_sd = 1; 384 384 385 - spur_freq_sd = (freq_offset << 9) / 11; 385 + spur_freq_sd = ((freq_offset - 10) << 9) / 11; 386 386 387 387 } 388 388
+1
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
··· 2637 2637 /* after stopping the bus, exit thread */ 2638 2638 brcmf_sdbrcm_bus_stop(bus->sdiodev->dev); 2639 2639 bus->dpc_tsk = NULL; 2640 + spin_lock_irqsave(&bus->dpc_tl_lock, flags); 2640 2641 break; 2641 2642 } 2642 2643
+14 -7
drivers/net/wireless/iwlwifi/iwl-agn-rx.c
··· 773 773 struct sk_buff *skb; 774 774 __le16 fc = hdr->frame_control; 775 775 struct iwl_rxon_context *ctx; 776 - struct page *p; 777 - int offset; 776 + unsigned int hdrlen, fraglen; 778 777 779 778 /* We only process data packets if the interface is open */ 780 779 if (unlikely(!priv->is_open)) { ··· 787 788 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 788 789 return; 789 790 790 - skb = dev_alloc_skb(128); 791 + /* Dont use dev_alloc_skb(), we'll have enough headroom once 792 + * ieee80211_hdr pulled. 793 + */ 794 + skb = alloc_skb(128, GFP_ATOMIC); 791 795 if (!skb) { 792 - IWL_ERR(priv, "dev_alloc_skb failed\n"); 796 + IWL_ERR(priv, "alloc_skb failed\n"); 793 797 return; 794 798 } 799 + hdrlen = min_t(unsigned int, len, skb_tailroom(skb)); 800 + memcpy(skb_put(skb, hdrlen), hdr, hdrlen); 801 + fraglen = len - hdrlen; 795 802 796 - offset = (void *)hdr - rxb_addr(rxb); 797 - p = rxb_steal_page(rxb); 798 - skb_add_rx_frag(skb, 0, p, offset, len, len); 803 + if (fraglen) { 804 + int offset = (void *)hdr + hdrlen - rxb_addr(rxb); 799 805 806 + skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, 807 + fraglen, rxb->truesize); 808 + } 800 809 iwl_update_stats(priv, false, fc, len); 801 810 802 811 /*
+2 -1
drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
··· 374 374 if (WARN_ON(!rxb)) 375 375 return; 376 376 377 + rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order; 377 378 dma_unmap_page(trans->dev, rxb->page_dma, 378 - PAGE_SIZE << hw_params(trans).rx_page_order, 379 + rxcb.truesize, 379 380 DMA_FROM_DEVICE); 380 381 381 382 rxcb._page = rxb->page;
+1
drivers/net/wireless/iwlwifi/iwl-trans.h
··· 260 260 261 261 struct iwl_rx_cmd_buffer { 262 262 struct page *_page; 263 + unsigned int truesize; 263 264 }; 264 265 265 266 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
+8 -8
drivers/net/wireless/rtlwifi/pci.c
··· 1851 1851 /*like read eeprom and so on */ 1852 1852 rtlpriv->cfg->ops->read_eeprom_info(hw); 1853 1853 1854 - if (rtlpriv->cfg->ops->init_sw_vars(hw)) { 1855 - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); 1856 - err = -ENODEV; 1857 - goto fail3; 1858 - } 1859 - 1860 - rtlpriv->cfg->ops->init_sw_leds(hw); 1861 - 1862 1854 /*aspm */ 1863 1855 rtl_pci_init_aspm(hw); 1864 1856 ··· 1868 1876 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n"); 1869 1877 goto fail3; 1870 1878 } 1879 + 1880 + if (rtlpriv->cfg->ops->init_sw_vars(hw)) { 1881 + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); 1882 + err = -ENODEV; 1883 + goto fail3; 1884 + } 1885 + 1886 + rtlpriv->cfg->ops->init_sw_leds(hw); 1871 1887 1872 1888 err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group); 1873 1889 if (err) {
+5 -5
drivers/net/wireless/rtlwifi/usb.c
··· 971 971 rtlpriv->cfg->ops->read_chip_version(hw); 972 972 /*like read eeprom and so on */ 973 973 rtlpriv->cfg->ops->read_eeprom_info(hw); 974 - if (rtlpriv->cfg->ops->init_sw_vars(hw)) { 975 - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); 976 - goto error_out; 977 - } 978 - rtlpriv->cfg->ops->init_sw_leds(hw); 979 974 err = _rtl_usb_init(hw); 980 975 if (err) 981 976 goto error_out; ··· 982 987 "Can't allocate sw for mac80211\n"); 983 988 goto error_out; 984 989 } 990 + if (rtlpriv->cfg->ops->init_sw_vars(hw)) { 991 + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); 992 + goto error_out; 993 + } 994 + rtlpriv->cfg->ops->init_sw_leds(hw); 985 995 986 996 return 0; 987 997 error_out:
+1
drivers/parisc/sba_iommu.c
··· 44 44 #include <asm/ropes.h> 45 45 #include <asm/mckinley.h> /* for proc_mckinley_root */ 46 46 #include <asm/runway.h> /* for proc_runway_root */ 47 + #include <asm/page.h> /* for PAGE0 */ 47 48 #include <asm/pdc.h> /* for PDC_MODEL_* */ 48 49 #include <asm/pdcpat.h> /* for is_pdc_pat() */ 49 50 #include <asm/parisc-device.h>
+1 -1
drivers/pci/pci-acpi.c
··· 223 223 [PCI_D0] = ACPI_STATE_D0, 224 224 [PCI_D1] = ACPI_STATE_D1, 225 225 [PCI_D2] = ACPI_STATE_D2, 226 - [PCI_D3hot] = ACPI_STATE_D3_HOT, 226 + [PCI_D3hot] = ACPI_STATE_D3, 227 227 [PCI_D3cold] = ACPI_STATE_D3 228 228 }; 229 229 int error = -EINVAL;
+1
drivers/ptp/ptp_pch.c
··· 30 30 #include <linux/module.h> 31 31 #include <linux/pci.h> 32 32 #include <linux/ptp_clock_kernel.h> 33 + #include <linux/slab.h> 33 34 34 35 #define STATION_ADDR_LEN 20 35 36 #define PCI_DEVICE_ID_PCH_1588 0x8819
+1 -1
drivers/remoteproc/remoteproc_core.c
··· 354 354 { 355 355 struct rproc *rproc = rvdev->rproc; 356 356 357 - for (i--; i > 0; i--) { 357 + for (i--; i >= 0; i--) { 358 358 struct rproc_vring *rvring = &rvdev->vring[i]; 359 359 int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); 360 360
+3
drivers/scsi/hosts.c
··· 218 218 219 219 if (!shost->shost_gendev.parent) 220 220 shost->shost_gendev.parent = dev ? dev : &platform_bus; 221 + if (!dma_dev) 222 + dma_dev = shost->shost_gendev.parent; 223 + 221 224 shost->dma_dev = dma_dev; 222 225 223 226 error = device_add(&shost->shost_gendev);
+3
drivers/scsi/qla2xxx/qla_bsg.c
··· 1367 1367 struct qla_hw_data *ha = vha->hw; 1368 1368 int rval = 0; 1369 1369 1370 + if (ha->flags.isp82xx_reset_hdlr_active) 1371 + return -EBUSY; 1372 + 1370 1373 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1371 1374 if (rval) 1372 1375 return rval;
+1 -1
drivers/scsi/qla2xxx/qla_dbg.c
··· 15 15 * | Mailbox commands | 0x113e | 0x112c-0x112e | 16 16 * | | | 0x113a | 17 17 * | Device Discovery | 0x2086 | 0x2020-0x2022 | 18 - * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 | 18 + * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | 19 19 * | | | 0x302d-0x302e | 20 20 * | DPC Thread | 0x401c | | 21 21 * | Async Events | 0x505d | 0x502b-0x502f |
+13 -2
drivers/scsi/qla2xxx/qla_isr.c
··· 1715 1715 res = DID_ERROR << 16; 1716 1716 break; 1717 1717 } 1718 - } else { 1718 + } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 1719 + lscsi_status != SAM_STAT_BUSY) { 1720 + /* 1721 + * scsi status of task set and busy are considered to be 1722 + * task not completed. 1723 + */ 1724 + 1719 1725 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 1720 1726 "Dropped frame(s) detected (0x%x " 1721 - "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1727 + "of 0x%x bytes).\n", resid, 1728 + scsi_bufflen(cp)); 1722 1729 1723 1730 res = DID_ERROR << 16 | lscsi_status; 1724 1731 goto check_scsi_status; 1732 + } else { 1733 + ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 1734 + "scsi_status: 0x%x, lscsi_status: 0x%x\n", 1735 + scsi_status, lscsi_status); 1725 1736 } 1726 1737 1727 1738 res = DID_OK << 16 | lscsi_status;
+1
drivers/scsi/qla2xxx/qla_nx.c
··· 3125 3125 ql_log(ql_log_info, vha, 0x00b7, 3126 3126 "HW State: COLD/RE-INIT.\n"); 3127 3127 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3128 + qla82xx_set_rst_ready(ha); 3128 3129 if (ql2xmdenable) { 3129 3130 if (qla82xx_md_collect(vha)) 3130 3131 ql_log(ql_log_warn, vha, 0xb02c,
+17 -1
drivers/scsi/qla2xxx/qla_os.c
··· 3577 3577 continue; 3578 3578 /* Attempt a retry. */ 3579 3579 status = 1; 3580 - } else 3580 + } else { 3581 3581 status = qla2x00_fabric_login(vha, 3582 3582 fcport, &next_loopid); 3583 + if (status == QLA_SUCCESS) { 3584 + int status2; 3585 + uint8_t opts; 3586 + 3587 + opts = 0; 3588 + if (fcport->flags & 3589 + FCF_FCP2_DEVICE) 3590 + opts |= BIT_1; 3591 + status2 = 3592 + qla2x00_get_port_database( 3593 + vha, fcport, 3594 + opts); 3595 + if (status2 != QLA_SUCCESS) 3596 + status = 1; 3597 + } 3598 + } 3583 3599 } else 3584 3600 status = qla2x00_local_device_login(vha, 3585 3601 fcport);
+3
drivers/scsi/qla2xxx/qla_sup.c
··· 1017 1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 1018 1018 return; 1019 1019 1020 + if (ha->flags.isp82xx_reset_hdlr_active) 1021 + return; 1022 + 1020 1023 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 1021 1024 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); 1022 1025 if (hdr.version == __constant_cpu_to_le16(0xffff))
+3 -3
drivers/scsi/qla2xxx/qla_version.h
··· 7 7 /* 8 8 * Driver version 9 9 */ 10 - #define QLA2XXX_VERSION "8.03.07.13-k" 10 + #define QLA2XXX_VERSION "8.04.00.03-k" 11 11 12 12 #define QLA_DRIVER_MAJOR_VER 8 13 - #define QLA_DRIVER_MINOR_VER 3 14 - #define QLA_DRIVER_PATCH_VER 7 13 + #define QLA_DRIVER_MINOR_VER 4 14 + #define QLA_DRIVER_PATCH_VER 0 15 15 #define QLA_DRIVER_BETA_VER 3
+13 -11
drivers/scsi/virtio_scsi.c
··· 175 175 176 176 if (cmd->comp) 177 177 complete_all(cmd->comp); 178 - mempool_free(cmd, virtscsi_cmd_pool); 178 + else 179 + mempool_free(cmd, virtscsi_cmd_pool); 179 180 } 180 181 181 182 static void virtscsi_ctrl_done(struct virtqueue *vq) ··· 312 311 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) 313 312 { 314 313 DECLARE_COMPLETION_ONSTACK(comp); 315 - int ret; 314 + int ret = FAILED; 316 315 317 316 cmd->comp = &comp; 318 - ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, 319 - sizeof cmd->req.tmf, sizeof cmd->resp.tmf, 320 - GFP_NOIO); 321 - if (ret < 0) 322 - return FAILED; 317 + if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, 318 + sizeof cmd->req.tmf, sizeof cmd->resp.tmf, 319 + GFP_NOIO) < 0) 320 + goto out; 323 321 324 322 wait_for_completion(&comp); 325 - if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK && 326 - cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) 327 - return FAILED; 323 + if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || 324 + cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) 325 + ret = SUCCESS; 328 326 329 - return SUCCESS; 327 + out: 328 + mempool_free(cmd, virtscsi_cmd_pool); 329 + return ret; 330 330 } 331 331 332 332 static int virtscsi_device_reset(struct scsi_cmnd *sc)
+16 -6
drivers/target/target_core_file.c
··· 169 169 inode = file->f_mapping->host; 170 170 if (S_ISBLK(inode->i_mode)) { 171 171 struct request_queue *q; 172 + unsigned long long dev_size; 172 173 /* 173 174 * Setup the local scope queue_limits from struct request_queue->limits 174 175 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. ··· 184 183 * one (1) logical sector from underlying struct block_device 185 184 */ 186 185 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 187 - fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - 186 + dev_size = (i_size_read(file->f_mapping->host) - 188 187 fd_dev->fd_block_size); 189 188 190 189 pr_debug("FILEIO: Using size: %llu bytes from struct" 191 190 " block_device blocks: %llu logical_block_size: %d\n", 192 - fd_dev->fd_dev_size, 193 - div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), 191 + dev_size, div_u64(dev_size, fd_dev->fd_block_size), 194 192 fd_dev->fd_block_size); 195 193 } else { 196 194 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { ··· 605 605 static sector_t fd_get_blocks(struct se_device *dev) 606 606 { 607 607 struct fd_dev *fd_dev = dev->dev_ptr; 608 - unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, 609 - dev->se_sub_dev->se_dev_attrib.block_size); 608 + struct file *f = fd_dev->fd_file; 609 + struct inode *i = f->f_mapping->host; 610 + unsigned long long dev_size; 611 + /* 612 + * When using a file that references an underlying struct block_device, 613 + * ensure dev_size is always based on the current inode size in order 614 + * to handle underlying block_device resize operations. 615 + */ 616 + if (S_ISBLK(i->i_mode)) 617 + dev_size = (i_size_read(i) - fd_dev->fd_block_size); 618 + else 619 + dev_size = fd_dev->fd_dev_size; 610 620 611 - return blocks_long; 621 + return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); 612 622 } 613 623 614 624 static struct se_subsystem_api fileio_template = {
+3
drivers/target/target_core_pr.c
··· 220 220 if (dev->dev_reserved_node_acl != sess->se_node_acl) 221 221 goto out_unlock; 222 222 223 + if (dev->dev_res_bin_isid != sess->sess_bin_isid) 224 + goto out_unlock; 225 + 223 226 dev->dev_reserved_node_acl = NULL; 224 227 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 225 228 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
-22
drivers/target/target_core_tpg.c
··· 60 60 int i; 61 61 struct se_dev_entry *deve; 62 62 struct se_lun *lun; 63 - struct se_lun_acl *acl, *acl_tmp; 64 63 65 64 spin_lock_irq(&nacl->device_list_lock); 66 65 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { ··· 80 81 core_update_device_list_for_node(lun, NULL, deve->mapped_lun, 81 82 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 82 83 83 - spin_lock(&lun->lun_acl_lock); 84 - list_for_each_entry_safe(acl, acl_tmp, 85 - &lun->lun_acl_list, lacl_list) { 86 - if (!strcmp(acl->initiatorname, nacl->initiatorname) && 87 - (acl->mapped_lun == deve->mapped_lun)) 88 - break; 89 - } 90 - 91 - if (!acl) { 92 - pr_err("Unable to locate struct se_lun_acl for %s," 93 - " mapped_lun: %u\n", nacl->initiatorname, 94 - deve->mapped_lun); 95 - spin_unlock(&lun->lun_acl_lock); 96 - spin_lock_irq(&nacl->device_list_lock); 97 - continue; 98 - } 99 - 100 - list_del(&acl->lacl_list); 101 - spin_unlock(&lun->lun_acl_lock); 102 - 103 84 spin_lock_irq(&nacl->device_list_lock); 104 - kfree(acl); 105 85 } 106 86 spin_unlock_irq(&nacl->device_list_lock); 107 87 }
+1 -1
drivers/tty/vt/keyboard.c
··· 2044 2044 kbd->default_ledflagstate = ((arg >> 4) & 7); 2045 2045 set_leds(); 2046 2046 spin_unlock_irqrestore(&kbd_event_lock, flags); 2047 - break; 2047 + return 0; 2048 2048 2049 2049 /* the ioctls below only set the lights, not the functions */ 2050 2050 /* for those, see KDGKBLED and KDSKBLED above */
+6 -1
drivers/vhost/net.c
··· 24 24 #include <linux/if_arp.h> 25 25 #include <linux/if_tun.h> 26 26 #include <linux/if_macvlan.h> 27 + #include <linux/if_vlan.h> 27 28 28 29 #include <net/sock.h> 29 30 ··· 284 283 285 284 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); 286 285 head = skb_peek(&sk->sk_receive_queue); 287 - if (likely(head)) 286 + if (likely(head)) { 288 287 len = head->len; 288 + if (vlan_tx_tag_present(head)) 289 + len += VLAN_HLEN; 290 + } 291 + 289 292 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); 290 293 return len; 291 294 }
+2
drivers/video/console/sticore.c
··· 22 22 #include <linux/font.h> 23 23 24 24 #include <asm/hardware.h> 25 + #include <asm/page.h> 25 26 #include <asm/parisc-device.h> 27 + #include <asm/pdc.h> 26 28 #include <asm/cacheflush.h> 27 29 #include <asm/grfioctl.h> 28 30
+1 -1
drivers/video/uvesafb.c
··· 73 73 struct uvesafb_task *utask; 74 74 struct uvesafb_ktask *task; 75 75 76 - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) 76 + if (!capable(CAP_SYS_ADMIN)) 77 77 return; 78 78 79 79 if (msg->seq >= UVESAFB_TASKS_MAX)
+1
drivers/virtio/virtio_balloon.c
··· 390 390 /* There might be pages left in the balloon: free them. */ 391 391 while (vb->num_pages) 392 392 leak_balloon(vb, vb->num_pages); 393 + update_balloon_size(vb); 393 394 394 395 /* Now we reset the device so we can clean up the queues. */ 395 396 vdev->config->reset(vdev);
+1 -1
fs/cifs/cifsfs.c
··· 699 699 * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 700 700 * the cached file length 701 701 */ 702 - if (origin != SEEK_SET || origin != SEEK_CUR) { 702 + if (origin != SEEK_SET && origin != SEEK_CUR) { 703 703 int rc; 704 704 struct inode *inode = file->f_path.dentry->d_inode; 705 705
+2 -1
fs/cifs/connect.c
··· 164 164 { Opt_sign, "sign" }, 165 165 { Opt_seal, "seal" }, 166 166 { Opt_direct, "direct" }, 167 - { Opt_direct, "forceddirectio" }, 167 + { Opt_direct, "directio" }, 168 + { Opt_direct, "forcedirectio" }, 168 169 { Opt_strictcache, "strictcache" }, 169 170 { Opt_noac, "noac" }, 170 171 { Opt_fsc, "fsc" },
+1 -1
fs/jffs2/gc.c
··· 234 234 return 0; 235 235 236 236 jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n"); 237 - spin_lock(&c->erase_completion_lock); 238 237 mutex_lock(&c->alloc_sem); 238 + spin_lock(&c->erase_completion_lock); 239 239 } 240 240 241 241 /* First, work out which block we're garbage-collecting */
+10 -2
fs/proc/task_mmu.c
··· 747 747 else if (pte_present(pte)) 748 748 *pme = make_pme(PM_PFRAME(pte_pfn(pte)) 749 749 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 750 + else 751 + *pme = make_pme(PM_NOT_PRESENT); 750 752 } 751 753 752 754 #ifdef CONFIG_TRANSPARENT_HUGEPAGE ··· 763 761 if (pmd_present(pmd)) 764 762 *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) 765 763 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 764 + else 765 + *pme = make_pme(PM_NOT_PRESENT); 766 766 } 767 767 #else 768 768 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, ··· 805 801 806 802 /* check to see if we've left 'vma' behind 807 803 * and need a new, higher one */ 808 - if (vma && (addr >= vma->vm_end)) 804 + if (vma && (addr >= vma->vm_end)) { 809 805 vma = find_vma(walk->mm, addr); 806 + pme = make_pme(PM_NOT_PRESENT); 807 + } 810 808 811 809 /* check that 'vma' actually covers this address, 812 810 * and that it isn't a huge page vma */ ··· 836 830 if (pte_present(pte)) 837 831 *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) 838 832 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 833 + else 834 + *pme = make_pme(PM_NOT_PRESENT); 839 835 } 840 836 841 837 /* This function walks within one hugetlb entry in the single call */ ··· 847 839 { 848 840 struct pagemapread *pm = walk->private; 849 841 int err = 0; 850 - pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); 842 + pagemap_entry_t pme; 851 843 852 844 for (; addr != end; addr += PAGE_SIZE) { 853 845 int offset = (addr & ~hmask) >> PAGE_SHIFT;
+6 -5
include/linux/etherdevice.h
··· 159 159 * @addr1: Pointer to a six-byte array containing the Ethernet address 160 160 * @addr2: Pointer other six-byte array containing the Ethernet address 161 161 * 162 - * Compare two ethernet addresses, returns 0 if equal 162 + * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. 163 + * Unlike memcmp(), it doesn't return a value suitable for sorting. 163 164 */ 164 165 static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) 165 166 { ··· 185 184 * @addr1: Pointer to an array of 8 bytes 186 185 * @addr2: Pointer to an other array of 8 bytes 187 186 * 188 - * Compare two ethernet addresses, returns 0 if equal. 189 - * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional 190 - * branches, and possibly long word memory accesses on CPU allowing cheap 191 - * unaligned memory reads. 187 + * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. 188 + * Unlike memcmp(), it doesn't return a value suitable for sorting. 189 + * The function doesn't need any conditional branches and possibly uses 190 + * word memory accesses on CPU allowing cheap unaligned memory reads. 192 191 * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2} 193 192 * 194 193 * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits.
+2
include/linux/ftrace_event.h
··· 179 179 TRACE_EVENT_FL_RECORDED_CMD_BIT, 180 180 TRACE_EVENT_FL_CAP_ANY_BIT, 181 181 TRACE_EVENT_FL_NO_SET_FILTER_BIT, 182 + TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 182 183 }; 183 184 184 185 enum { ··· 188 187 TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), 189 188 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), 190 189 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), 190 + TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 191 191 }; 192 192 193 193 struct ftrace_event_call {
-9
include/linux/netdevice.h
··· 1403 1403 return 0; 1404 1404 } 1405 1405 1406 - #ifndef CONFIG_NET_NS 1407 - static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev) 1408 - { 1409 - skb->dev = dev; 1410 - } 1411 - #else /* CONFIG_NET_NS */ 1412 - void skb_set_dev(struct sk_buff *skb, struct net_device *dev); 1413 - #endif 1414 - 1415 1406 static inline bool netdev_uses_trailer_tags(struct net_device *dev) 1416 1407 { 1417 1408 #ifdef CONFIG_NET_DSA_TAG_TRAILER
+16
include/linux/netfilter/ipset/ip_set_ahash.h
··· 99 99 #endif 100 100 }; 101 101 102 + static size_t 103 + htable_size(u8 hbits) 104 + { 105 + size_t hsize; 106 + 107 + /* We must fit both into u32 in jhash and size_t */ 108 + if (hbits > 31) 109 + return 0; 110 + hsize = jhash_size(hbits); 111 + if ((((size_t)-1) - sizeof(struct htable))/sizeof(struct hbucket) 112 + < hsize) 113 + return 0; 114 + 115 + return hsize * sizeof(struct hbucket) + sizeof(struct htable); 116 + } 117 + 102 118 /* Compute htable_bits from the user input parameter hashsize */ 103 119 static u8 104 120 htable_bits(u32 hashsize)
+2 -1
include/linux/usb/usbnet.h
··· 191 191 enum skb_state { 192 192 illegal = 0, 193 193 tx_start, tx_done, 194 - rx_start, rx_done, rx_cleanup 194 + rx_start, rx_done, rx_cleanup, 195 + unlink_start 195 196 }; 196 197 197 198 struct skb_data { /* skb->cb is one of these */
+2 -1
include/media/soc_camera.h
··· 59 59 struct soc_camera_host { 60 60 struct v4l2_device v4l2_dev; 61 61 struct list_head list; 62 - unsigned char nr; /* Host number */ 62 + struct mutex host_lock; /* Protect during probing */ 63 + unsigned char nr; /* Host number */ 63 64 void *priv; 64 65 const char *drv_name; 65 66 struct soc_camera_host_ops *ops;
+1
include/net/bluetooth/bluetooth.h
··· 191 191 struct list_head accept_q; 192 192 struct sock *parent; 193 193 u32 defer_setup; 194 + bool suspended; 194 195 }; 195 196 196 197 struct bt_sock_list {
+13
include/net/sctp/sctp.h
··· 704 704 addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); 705 705 } 706 706 707 + /* The cookie is always 0 since this is how it's used in the 708 + * pmtu code. 709 + */ 710 + static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) 711 + { 712 + if (t->dst && !dst_check(t->dst, 0)) { 713 + dst_release(t->dst); 714 + t->dst = NULL; 715 + } 716 + 717 + return t->dst; 718 + } 719 + 707 720 #endif /* __net_sctp_h__ */
+46 -17
kernel/compat.c
··· 372 372 373 373 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 374 374 375 - asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, 376 - compat_old_sigset_t __user *oset) 375 + /* 376 + * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the 377 + * blocked set of signals to the supplied signal set 378 + */ 379 + static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set) 377 380 { 378 - old_sigset_t s; 379 - long ret; 380 - mm_segment_t old_fs; 381 + memcpy(blocked->sig, &set, sizeof(set)); 382 + } 381 383 382 - if (set && get_user(s, set)) 383 - return -EFAULT; 384 - old_fs = get_fs(); 385 - set_fs(KERNEL_DS); 386 - ret = sys_sigprocmask(how, 387 - set ? (old_sigset_t __user *) &s : NULL, 388 - oset ? (old_sigset_t __user *) &s : NULL); 389 - set_fs(old_fs); 390 - if (ret == 0) 391 - if (oset) 392 - ret = put_user(s, oset); 393 - return ret; 384 + asmlinkage long compat_sys_sigprocmask(int how, 385 + compat_old_sigset_t __user *nset, 386 + compat_old_sigset_t __user *oset) 387 + { 388 + old_sigset_t old_set, new_set; 389 + sigset_t new_blocked; 390 + 391 + old_set = current->blocked.sig[0]; 392 + 393 + if (nset) { 394 + if (get_user(new_set, nset)) 395 + return -EFAULT; 396 + new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); 397 + 398 + new_blocked = current->blocked; 399 + 400 + switch (how) { 401 + case SIG_BLOCK: 402 + sigaddsetmask(&new_blocked, new_set); 403 + break; 404 + case SIG_UNBLOCK: 405 + sigdelsetmask(&new_blocked, new_set); 406 + break; 407 + case SIG_SETMASK: 408 + compat_sig_setmask(&new_blocked, new_set); 409 + break; 410 + default: 411 + return -EINVAL; 412 + } 413 + 414 + set_current_blocked(&new_blocked); 415 + } 416 + 417 + if (oset) { 418 + if (put_user(old_set, oset)) 419 + return -EFAULT; 420 + } 421 + 422 + return 0; 394 423 } 395 424 396 425 #endif
+3
kernel/fork.c
··· 47 47 #include <linux/audit.h> 48 48 #include <linux/memcontrol.h> 49 49 #include <linux/ftrace.h> 50 + #include <linux/proc_fs.h> 50 51 #include <linux/profile.h> 51 52 #include <linux/rmap.h> 52 53 #include <linux/ksm.h> ··· 1465 1464 if (p->io_context) 1466 1465 exit_io_context(p); 1467 1466 bad_fork_cleanup_namespaces: 1467 + if (unlikely(clone_flags & CLONE_NEWPID)) 1468 + pid_ns_release_proc(p->nsproxy->pid_ns); 1468 1469 exit_task_namespaces(p); 1469 1470 bad_fork_cleanup_mm: 1470 1471 if (p->mm)
+1
kernel/irq/chip.c
··· 518 518 out_unlock: 519 519 raw_spin_unlock(&desc->lock); 520 520 } 521 + EXPORT_SYMBOL(handle_edge_irq); 521 522 522 523 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 523 524 /**
+1
kernel/irq/irqdesc.c
··· 112 112 { 113 113 return radix_tree_lookup(&irq_desc_tree, irq); 114 114 } 115 + EXPORT_SYMBOL(irq_to_desc); 115 116 116 117 static void delete_irq_desc(unsigned int irq) 117 118 {
+2
kernel/sched/core.c
··· 6382 6382 if (!sg) 6383 6383 return -ENOMEM; 6384 6384 6385 + sg->next = sg; 6386 + 6385 6387 *per_cpu_ptr(sdd->sg, j) = sg; 6386 6388 6387 6389 sgp = kzalloc_node(sizeof(struct sched_group_power),
+4 -1
kernel/trace/trace_events.c
··· 294 294 if (!call->name || !call->class || !call->class->reg) 295 295 continue; 296 296 297 + if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 298 + continue; 299 + 297 300 if (match && 298 301 strcmp(match, call->name) != 0 && 299 302 strcmp(match, call->class->system) != 0) ··· 1167 1164 return -1; 1168 1165 } 1169 1166 1170 - if (call->class->reg) 1167 + if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1171 1168 trace_create_file("enable", 0644, call->dir, call, 1172 1169 enable); 1173 1170
+1
kernel/trace/trace_export.c
··· 180 180 .event.type = etype, \ 181 181 .class = &event_class_ftrace_##call, \ 182 182 .print_fmt = print, \ 183 + .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ 183 184 }; \ 184 185 struct ftrace_event_call __used \ 185 186 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
-1
mm/hugetlb.c
··· 2498 2498 if (outside_reserve) { 2499 2499 BUG_ON(huge_pte_none(pte)); 2500 2500 if (unmap_ref_private(mm, vma, old_page, address)) { 2501 - BUG_ON(page_count(old_page) != 1); 2502 2501 BUG_ON(huge_pte_none(pte)); 2503 2502 spin_lock(&mm->page_table_lock); 2504 2503 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
+6
mm/memcontrol.c
··· 4507 4507 swap_buffers: 4508 4508 /* Swap primary and spare array */ 4509 4509 thresholds->spare = thresholds->primary; 4510 + /* If all events are unregistered, free the spare array */ 4511 + if (!new) { 4512 + kfree(thresholds->spare); 4513 + thresholds->spare = NULL; 4514 + } 4515 + 4510 4516 rcu_assign_pointer(thresholds->primary, new); 4511 4517 4512 4518 /* To be sure that nobody uses thresholds */
+1 -2
mm/nobootmem.c
··· 82 82 83 83 static void __init __free_pages_memory(unsigned long start, unsigned long end) 84 84 { 85 - int i; 86 - unsigned long start_aligned, end_aligned; 85 + unsigned long i, start_aligned, end_aligned; 87 86 int order = ilog2(BITS_PER_LONG); 88 87 89 88 start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
+1 -1
mm/page_alloc.c
··· 5203 5203 int ret; 5204 5204 5205 5205 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5206 - if (!write || (ret == -EINVAL)) 5206 + if (!write || (ret < 0)) 5207 5207 return ret; 5208 5208 for_each_populated_zone(zone) { 5209 5209 for_each_possible_cpu(cpu) {
+12
mm/percpu.c
··· 1650 1650 areas[group] = ptr; 1651 1651 1652 1652 base = min(ptr, base); 1653 + } 1654 + 1655 + /* 1656 + * Copy data and free unused parts. This should happen after all 1657 + * allocations are complete; otherwise, we may end up with 1658 + * overlapping groups. 1659 + */ 1660 + for (group = 0; group < ai->nr_groups; group++) { 1661 + struct pcpu_group_info *gi = &ai->groups[group]; 1662 + void *ptr = areas[group]; 1653 1663 1654 1664 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1655 1665 if (gi->cpu_map[i] == NR_CPUS) { ··· 1895 1885 fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 1896 1886 if (!ai || !fc) 1897 1887 panic("Failed to allocate memory for percpu areas."); 1888 + /* kmemleak tracks the percpu allocations separately */ 1889 + kmemleak_free(fc); 1898 1890 1899 1891 ai->dyn_size = unit_size; 1900 1892 ai->unit_size = unit_size;
+1 -1
net/8021q/vlan_dev.c
··· 157 157 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 158 158 } 159 159 160 - skb_set_dev(skb, vlan_dev_priv(dev)->real_dev); 160 + skb->dev = vlan_dev_priv(dev)->real_dev; 161 161 len = skb->len; 162 162 if (netpoll_tx_running(dev)) 163 163 return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
+1 -1
net/bluetooth/af_bluetooth.c
··· 450 450 sk->sk_state == BT_CONFIG) 451 451 return mask; 452 452 453 - if (sock_writeable(sk)) 453 + if (!bt_sk(sk)->suspended && sock_writeable(sk)) 454 454 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 455 455 else 456 456 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+8
net/bluetooth/hci_core.c
··· 2784 2784 if (conn) { 2785 2785 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 2786 2786 2787 + hci_dev_lock(hdev); 2788 + if (test_bit(HCI_MGMT, &hdev->dev_flags) && 2789 + !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2790 + mgmt_device_connected(hdev, &conn->dst, conn->type, 2791 + conn->dst_type, 0, NULL, 0, 2792 + conn->dev_class); 2793 + hci_dev_unlock(hdev); 2794 + 2787 2795 /* Send to upper protocol */ 2788 2796 l2cap_recv_acldata(conn, skb, flags); 2789 2797 return;
+9 -2
net/bluetooth/hci_event.c
··· 2039 2039 2040 2040 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2041 2041 2042 + if (ev->status && conn->state == BT_CONNECTED) { 2043 + hci_acl_disconn(conn, 0x13); 2044 + hci_conn_put(conn); 2045 + goto unlock; 2046 + } 2047 + 2042 2048 if (conn->state == BT_CONFIG) { 2043 2049 if (!ev->status) 2044 2050 conn->state = BT_CONNECTED; ··· 2055 2049 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 2056 2050 } 2057 2051 2052 + unlock: 2058 2053 hci_dev_unlock(hdev); 2059 2054 } 2060 2055 ··· 2109 2102 goto unlock; 2110 2103 } 2111 2104 2112 - if (!ev->status) { 2105 + if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 2113 2106 struct hci_cp_remote_name_req cp; 2114 2107 memset(&cp, 0, sizeof(cp)); 2115 2108 bacpy(&cp.bdaddr, &conn->dst); ··· 2878 2871 if (conn->state != BT_CONFIG) 2879 2872 goto unlock; 2880 2873 2881 - if (!ev->status) { 2874 + if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 2882 2875 struct hci_cp_remote_name_req cp; 2883 2876 memset(&cp, 0, sizeof(cp)); 2884 2877 bacpy(&cp.bdaddr, &conn->dst);
+5
net/bluetooth/l2cap_core.c
··· 4589 4589 4590 4590 if (!status && (chan->state == BT_CONNECTED || 4591 4591 chan->state == BT_CONFIG)) { 4592 + struct sock *sk = chan->sk; 4593 + 4594 + bt_sk(sk)->suspended = false; 4595 + sk->sk_state_change(sk); 4596 + 4592 4597 l2cap_check_encryption(chan, encrypt); 4593 4598 l2cap_chan_unlock(chan); 4594 4599 continue;
+8 -4
net/bluetooth/l2cap_sock.c
··· 592 592 sk->sk_state = BT_CONFIG; 593 593 chan->state = BT_CONFIG; 594 594 595 - /* or for ACL link, under defer_setup time */ 596 - } else if (sk->sk_state == BT_CONNECT2 && 597 - bt_sk(sk)->defer_setup) { 598 - err = l2cap_chan_check_security(chan); 595 + /* or for ACL link */ 596 + } else if ((sk->sk_state == BT_CONNECT2 && 597 + bt_sk(sk)->defer_setup) || 598 + sk->sk_state == BT_CONNECTED) { 599 + if (!l2cap_chan_check_security(chan)) 600 + bt_sk(sk)->suspended = true; 601 + else 602 + sk->sk_state_change(sk); 599 603 } else { 600 604 err = -EINVAL; 601 605 }
+5 -31
net/core/dev.c
··· 1617 1617 return NET_RX_DROP; 1618 1618 } 1619 1619 skb->skb_iif = 0; 1620 - skb_set_dev(skb, dev); 1620 + skb->dev = dev; 1621 + skb_dst_drop(skb); 1621 1622 skb->tstamp.tv64 = 0; 1622 1623 skb->pkt_type = PACKET_HOST; 1623 1624 skb->protocol = eth_type_trans(skb, dev); 1625 + skb->mark = 0; 1626 + secpath_reset(skb); 1627 + nf_reset(skb); 1624 1628 return netif_rx(skb); 1625 1629 } 1626 1630 EXPORT_SYMBOL_GPL(dev_forward_skb); ··· 1872 1868 } 1873 1869 } 1874 1870 EXPORT_SYMBOL(netif_device_attach); 1875 - 1876 - /** 1877 - * skb_dev_set -- assign a new device to a buffer 1878 - * @skb: buffer for the new device 1879 - * @dev: network device 1880 - * 1881 - * If an skb is owned by a device already, we have to reset 1882 - * all data private to the namespace a device belongs to 1883 - * before assigning it a new device. 1884 - */ 1885 - #ifdef CONFIG_NET_NS 1886 - void skb_set_dev(struct sk_buff *skb, struct net_device *dev) 1887 - { 1888 - skb_dst_drop(skb); 1889 - if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { 1890 - secpath_reset(skb); 1891 - nf_reset(skb); 1892 - skb_init_secmark(skb); 1893 - skb->mark = 0; 1894 - skb->priority = 0; 1895 - skb->nf_trace = 0; 1896 - skb->ipvs_property = 0; 1897 - #ifdef CONFIG_NET_SCHED 1898 - skb->tc_index = 0; 1899 - #endif 1900 - } 1901 - skb->dev = dev; 1902 - } 1903 - EXPORT_SYMBOL(skb_set_dev); 1904 - #endif /* CONFIG_NET_NS */ 1905 1871 1906 1872 static void skb_warn_bad_offload(const struct sk_buff *skb) 1907 1873 {
+8 -2
net/core/pktgen.c
··· 1931 1931 { 1932 1932 struct net_device *dev = ptr; 1933 1933 1934 - if (!net_eq(dev_net(dev), &init_net)) 1934 + if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting) 1935 1935 return NOTIFY_DONE; 1936 1936 1937 1937 /* It is OK that we do not hold the group lock right now, ··· 3755 3755 { 3756 3756 struct pktgen_thread *t; 3757 3757 struct list_head *q, *n; 3758 + struct list_head list; 3758 3759 3759 3760 /* Stop all interfaces & threads */ 3760 3761 pktgen_exiting = true; 3761 3762 3762 - list_for_each_safe(q, n, &pktgen_threads) { 3763 + mutex_lock(&pktgen_thread_lock); 3764 + list_splice(&list, &pktgen_threads); 3765 + mutex_unlock(&pktgen_thread_lock); 3766 + 3767 + list_for_each_safe(q, n, &list) { 3763 3768 t = list_entry(q, struct pktgen_thread, th_list); 3769 + list_del(&t->th_list); 3764 3770 kthread_stop(t->tsk); 3765 3771 kfree(t); 3766 3772 }
+2
net/ipv4/fib_trie.c
··· 1370 1370 1371 1371 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) 1372 1372 continue; 1373 + if (fi->fib_dead) 1374 + continue; 1373 1375 if (fa->fa_info->fib_scope < flp->flowi4_scope) 1374 1376 continue; 1375 1377 fib_alias_accessed(fa);
+1 -2
net/ipv4/tcp.c
··· 851 851 wait_for_sndbuf: 852 852 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 853 853 wait_for_memory: 854 - if (copied) 855 - tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 854 + tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 856 855 857 856 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 858 857 goto do_error;
+7 -3
net/netfilter/ipset/ip_set_hash_ip.c
··· 364 364 { 365 365 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 366 366 u8 netmask, hbits; 367 + size_t hsize; 367 368 struct ip_set_hash *h; 368 369 369 370 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) ··· 406 405 h->timeout = IPSET_NO_TIMEOUT; 407 406 408 407 hbits = htable_bits(hashsize); 409 - h->table = ip_set_alloc( 410 - sizeof(struct htable) 411 - + jhash_size(hbits) * sizeof(struct hbucket)); 408 + hsize = htable_size(hbits); 409 + if (hsize == 0) { 410 + kfree(h); 411 + return -ENOMEM; 412 + } 413 + h->table = ip_set_alloc(hsize); 412 414 if (!h->table) { 413 415 kfree(h); 414 416 return -ENOMEM;
+7 -3
net/netfilter/ipset/ip_set_hash_ipport.c
··· 449 449 struct ip_set_hash *h; 450 450 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 451 451 u8 hbits; 452 + size_t hsize; 452 453 453 454 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 454 455 return -IPSET_ERR_INVALID_FAMILY; ··· 477 476 h->timeout = IPSET_NO_TIMEOUT; 478 477 479 478 hbits = htable_bits(hashsize); 480 - h->table = ip_set_alloc( 481 - sizeof(struct htable) 482 - + jhash_size(hbits) * sizeof(struct hbucket)); 479 + hsize = htable_size(hbits); 480 + if (hsize == 0) { 481 + kfree(h); 482 + return -ENOMEM; 483 + } 484 + h->table = ip_set_alloc(hsize); 483 485 if (!h->table) { 484 486 kfree(h); 485 487 return -ENOMEM;
+7 -3
net/netfilter/ipset/ip_set_hash_ipportip.c
··· 467 467 struct ip_set_hash *h; 468 468 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 469 469 u8 hbits; 470 + size_t hsize; 470 471 471 472 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 472 473 return -IPSET_ERR_INVALID_FAMILY; ··· 495 494 h->timeout = IPSET_NO_TIMEOUT; 496 495 497 496 hbits = htable_bits(hashsize); 498 - h->table = ip_set_alloc( 499 - sizeof(struct htable) 500 - + jhash_size(hbits) * sizeof(struct hbucket)); 497 + hsize = htable_size(hbits); 498 + if (hsize == 0) { 499 + kfree(h); 500 + return -ENOMEM; 501 + } 502 + h->table = ip_set_alloc(hsize); 501 503 if (!h->table) { 502 504 kfree(h); 503 505 return -ENOMEM;
+7 -3
net/netfilter/ipset/ip_set_hash_ipportnet.c
··· 616 616 struct ip_set_hash *h; 617 617 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 618 618 u8 hbits; 619 + size_t hsize; 619 620 620 621 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 621 622 return -IPSET_ERR_INVALID_FAMILY; ··· 646 645 h->timeout = IPSET_NO_TIMEOUT; 647 646 648 647 hbits = htable_bits(hashsize); 649 - h->table = ip_set_alloc( 650 - sizeof(struct htable) 651 - + jhash_size(hbits) * sizeof(struct hbucket)); 648 + hsize = htable_size(hbits); 649 + if (hsize == 0) { 650 + kfree(h); 651 + return -ENOMEM; 652 + } 653 + h->table = ip_set_alloc(hsize); 652 654 if (!h->table) { 653 655 kfree(h); 654 656 return -ENOMEM;
+7 -3
net/netfilter/ipset/ip_set_hash_net.c
··· 460 460 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 461 461 struct ip_set_hash *h; 462 462 u8 hbits; 463 + size_t hsize; 463 464 464 465 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 465 466 return -IPSET_ERR_INVALID_FAMILY; ··· 490 489 h->timeout = IPSET_NO_TIMEOUT; 491 490 492 491 hbits = htable_bits(hashsize); 493 - h->table = ip_set_alloc( 494 - sizeof(struct htable) 495 - + jhash_size(hbits) * sizeof(struct hbucket)); 492 + hsize = htable_size(hbits); 493 + if (hsize == 0) { 494 + kfree(h); 495 + return -ENOMEM; 496 + } 497 + h->table = ip_set_alloc(hsize); 496 498 if (!h->table) { 497 499 kfree(h); 498 500 return -ENOMEM;
+7 -3
net/netfilter/ipset/ip_set_hash_netiface.c
··· 722 722 struct ip_set_hash *h; 723 723 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 724 724 u8 hbits; 725 + size_t hsize; 725 726 726 727 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 727 728 return -IPSET_ERR_INVALID_FAMILY; ··· 753 752 h->ahash_max = AHASH_MAX_SIZE; 754 753 755 754 hbits = htable_bits(hashsize); 756 - h->table = ip_set_alloc( 757 - sizeof(struct htable) 758 - + jhash_size(hbits) * sizeof(struct hbucket)); 755 + hsize = htable_size(hbits); 756 + if (hsize == 0) { 757 + kfree(h); 758 + return -ENOMEM; 759 + } 760 + h->table = ip_set_alloc(hsize); 759 761 if (!h->table) { 760 762 kfree(h); 761 763 return -ENOMEM;
+7 -3
net/netfilter/ipset/ip_set_hash_netport.c
··· 572 572 struct ip_set_hash *h; 573 573 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM; 574 574 u8 hbits; 575 + size_t hsize; 575 576 576 577 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) 577 578 return -IPSET_ERR_INVALID_FAMILY; ··· 602 601 h->timeout = IPSET_NO_TIMEOUT; 603 602 604 603 hbits = htable_bits(hashsize); 605 - h->table = ip_set_alloc( 606 - sizeof(struct htable) 607 - + jhash_size(hbits) * sizeof(struct hbucket)); 604 + hsize = htable_size(hbits); 605 + if (hsize == 0) { 606 + kfree(h); 607 + return -ENOMEM; 608 + } 609 + h->table = ip_set_alloc(hsize); 608 610 if (!h->table) { 609 611 kfree(h); 610 612 return -ENOMEM;
+18 -11
net/openvswitch/datapath.c
··· 321 321 return -ENOMEM; 322 322 323 323 nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); 324 - if (!skb) 324 + if (!nskb) 325 325 return -ENOMEM; 326 326 327 327 nskb->vlan_tci = 0; ··· 421 421 return validate_actions(actions, key, depth + 1); 422 422 } 423 423 424 + static int validate_tp_port(const struct sw_flow_key *flow_key) 425 + { 426 + if (flow_key->eth.type == htons(ETH_P_IP)) { 427 + if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst) 428 + return 0; 429 + } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { 430 + if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst) 431 + return 0; 432 + } 433 + 434 + return -EINVAL; 435 + } 436 + 424 437 static int validate_set(const struct nlattr *a, 425 438 const struct sw_flow_key *flow_key) 426 439 { ··· 475 462 if (flow_key->ip.proto != IPPROTO_TCP) 476 463 return -EINVAL; 477 464 478 - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 479 - return -EINVAL; 480 - 481 - break; 465 + return validate_tp_port(flow_key); 482 466 483 467 case OVS_KEY_ATTR_UDP: 484 468 if (flow_key->ip.proto != IPPROTO_UDP) 485 469 return -EINVAL; 486 470 487 - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) 488 - return -EINVAL; 489 - break; 471 + return validate_tp_port(flow_key); 490 472 491 473 default: 492 474 return -EINVAL; ··· 1649 1641 reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, 1650 1642 OVS_VPORT_CMD_NEW); 1651 1643 if (IS_ERR(reply)) { 1652 - err = PTR_ERR(reply); 1653 1644 netlink_set_err(init_net.genl_sock, 0, 1654 - ovs_dp_vport_multicast_group.id, err); 1655 - return 0; 1645 + ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); 1646 + goto exit_unlock; 1656 1647 } 1657 1648 1658 1649 genl_notify(reply, genl_info_net(info), info->snd_pid,
+2 -1
net/openvswitch/flow.c
··· 183 183 u8 tcp_flags = 0; 184 184 185 185 if (flow->key.eth.type == htons(ETH_P_IP) && 186 - flow->key.ip.proto == IPPROTO_TCP) { 186 + flow->key.ip.proto == IPPROTO_TCP && 187 + likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { 187 188 u8 *tcp = (u8 *)tcp_hdr(skb); 188 189 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; 189 190 }
+1 -3
net/sctp/output.c
··· 377 377 */ 378 378 skb_set_owner_w(nskb, sk); 379 379 380 - /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ 381 - if (!dst || (dst->obsolete > 1)) { 382 - dst_release(dst); 380 + if (!sctp_transport_dst_check(tp)) { 383 381 sctp_transport_route(tp, NULL, sctp_sk(sk)); 384 382 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { 385 383 sctp_assoc_sync_pmtu(asoc);
-17
net/sctp/transport.c
··· 226 226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 227 227 } 228 228 229 - /* this is a complete rip-off from __sk_dst_check 230 - * the cookie is always 0 since this is how it's used in the 231 - * pmtu code 232 - */ 233 - static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) 234 - { 235 - struct dst_entry *dst = t->dst; 236 - 237 - if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) { 238 - dst_release(t->dst); 239 - t->dst = NULL; 240 - return NULL; 241 - } 242 - 243 - return dst; 244 - } 245 - 246 229 void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 247 230 { 248 231 struct dst_entry *dst;
+4 -3
net/sunrpc/auth_gss/gss_mech_switch.c
··· 242 242 int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr) 243 243 { 244 244 struct gss_api_mech *pos = NULL; 245 - int i = 0; 245 + int j, i = 0; 246 246 247 247 spin_lock(&registered_mechs_lock); 248 248 list_for_each_entry(pos, &registered_mechs, gm_list) { 249 - array_ptr[i] = pos->gm_pfs->pseudoflavor; 250 - i++; 249 + for (j=0; j < pos->gm_pf_num; j++) { 250 + array_ptr[i++] = pos->gm_pfs[j].pseudoflavor; 251 + } 251 252 } 252 253 spin_unlock(&registered_mechs_lock); 253 254 return i;
+1 -1
sound/pci/echoaudio/echoaudio_dsp.c
··· 475 475 const struct firmware *fw; 476 476 int box_type, err; 477 477 478 - if (snd_BUG_ON(!chip->dsp_code_to_load || !chip->comm_page)) 478 + if (snd_BUG_ON(!chip->comm_page)) 479 479 return -EPERM; 480 480 481 481 /* See if the ASIC is present and working - only if the DSP is already loaded */
-4
sound/pci/hda/hda_codec.c
··· 5444 5444 list_for_each_entry(codec, &bus->codec_list, list) { 5445 5445 if (hda_codec_is_power_on(codec)) 5446 5446 hda_call_codec_suspend(codec); 5447 - else /* forcibly change the power to D3 even if not used */ 5448 - hda_set_power_state(codec, 5449 - codec->afg ? codec->afg : codec->mfg, 5450 - AC_PWRST_D3); 5451 5447 if (codec->patch_ops.post_suspend) 5452 5448 codec->patch_ops.post_suspend(codec); 5453 5449 }
+17 -3
sound/pci/hda/hda_intel.c
··· 783 783 { 784 784 struct azx *chip = bus->private_data; 785 785 unsigned long timeout; 786 + unsigned long loopcounter; 786 787 int do_poll = 0; 787 788 788 789 again: 789 790 timeout = jiffies + msecs_to_jiffies(1000); 790 - for (;;) { 791 + 792 + for (loopcounter = 0;; loopcounter++) { 791 793 if (chip->polling_mode || do_poll) { 792 794 spin_lock_irq(&chip->reg_lock); 793 795 azx_update_rirb(chip); ··· 805 803 } 806 804 if (time_after(jiffies, timeout)) 807 805 break; 808 - if (bus->needs_damn_long_delay) 806 + if (bus->needs_damn_long_delay || loopcounter > 3000) 809 807 msleep(2); /* temporary workaround */ 810 808 else { 811 809 udelay(10); ··· 2353 2351 * power management 2354 2352 */ 2355 2353 2354 + static int snd_hda_codecs_inuse(struct hda_bus *bus) 2355 + { 2356 + struct hda_codec *codec; 2357 + 2358 + list_for_each_entry(codec, &bus->codec_list, list) { 2359 + if (snd_hda_codec_needs_resume(codec)) 2360 + return 1; 2361 + } 2362 + return 0; 2363 + } 2364 + 2356 2365 static int azx_suspend(struct pci_dev *pci, pm_message_t state) 2357 2366 { 2358 2367 struct snd_card *card = pci_get_drvdata(pci); ··· 2410 2397 return -EIO; 2411 2398 azx_init_pci(chip); 2412 2399 2413 - azx_init_chip(chip, 1); 2400 + if (snd_hda_codecs_inuse(chip->bus)) 2401 + azx_init_chip(chip, 1); 2414 2402 2415 2403 snd_hda_resume(chip->bus); 2416 2404 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+10 -6
sound/pci/hda/patch_realtek.c
··· 5405 5405 SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", 5406 5406 ALC882_FIXUP_ACER_ASPIRE_4930G), 5407 5407 SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), 5408 + SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G", 5409 + ALC882_FIXUP_ACER_ASPIRE_4930G), 5408 5410 SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE), 5409 5411 SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G), 5410 5412 SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736), ··· 5440 5438 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), 5441 5439 5442 5440 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), 5441 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 5443 5442 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 5444 5443 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3", ALC889_FIXUP_CD), 5445 5444 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), ··· 5641 5638 snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PROC_COEF, tmp | 0x80); 5642 5639 } 5643 5640 #endif 5644 - alc_auto_parse_customize_define(codec); 5645 - 5646 5641 alc_fix_pll_init(codec, 0x20, 0x0a, 10); 5647 5642 5648 5643 alc_pick_fixup(codec, NULL, alc262_fixup_tbl, alc262_fixups); 5649 5644 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); 5645 + 5646 + alc_auto_parse_customize_define(codec); 5650 5647 5651 5648 /* automatic parse from the BIOS config */ 5652 5649 err = alc262_parse_auto_config(codec); ··· 6252 6249 6253 6250 spec->mixer_nid = 0x0b; 6254 6251 6255 - alc_auto_parse_customize_define(codec); 6256 - 6257 6252 err = alc_codec_rename_from_preset(codec); 6258 6253 if (err < 0) 6259 6254 goto error; ··· 6283 6282 alc_pick_fixup(codec, alc269_fixup_models, 6284 6283 alc269_fixup_tbl, alc269_fixups); 6285 6284 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); 6285 + 6286 + alc_auto_parse_customize_define(codec); 6286 6287 6287 6288 /* automatic parse from the BIOS config */ 6288 6289 err = alc269_parse_auto_config(codec); ··· 6862 6859 /* handle multiple HPs as is */ 6863 6860 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 6864 6861 6865 - alc_auto_parse_customize_define(codec); 6866 - 6867 6862 alc_fix_pll_init(codec, 0x20, 0x04, 15); 6868 6863 6869 6864 err = alc_codec_rename_from_preset(codec); ··· 6878 6877 alc_pick_fixup(codec, alc662_fixup_models, 6879 6878 alc662_fixup_tbl, alc662_fixups); 6880 6879 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); 6880 + 6881 + alc_auto_parse_customize_define(codec); 6882 + 6881 6883 /* automatic parse from the BIOS config */ 6882 6884 err = alc662_parse_auto_config(codec); 6883 6885 if (err < 0)
+3 -3
sound/pci/hda/patch_sigmatel.c
··· 4415 4415 def_conf = get_defcfg_connect(def_conf); 4416 4416 /* skip any ports that don't have jacks since presence 4417 4417 * detection is useless */ 4418 - if (def_conf != AC_JACK_PORT_COMPLEX) { 4419 - if (def_conf != AC_JACK_PORT_NONE) 4420 - stac_toggle_power_map(codec, nid, 1); 4418 + if (def_conf != AC_JACK_PORT_NONE && 4419 + !is_jack_detectable(codec, nid)) { 4420 + stac_toggle_power_map(codec, nid, 1); 4421 4421 continue; 4422 4422 } 4423 4423 if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) {
+1
sound/pci/rme9652/hdsp.c
··· 5170 5170 strcpy(hw->name, "HDSP hwdep interface"); 5171 5171 5172 5172 hw->ops.ioctl = snd_hdsp_hwdep_ioctl; 5173 + hw->ops.ioctl_compat = snd_hdsp_hwdep_ioctl; 5173 5174 5174 5175 return 0; 5175 5176 }
+8 -8
sound/soc/codecs/cs42l73.c
··· 568 568 attn_tlv), 569 569 570 570 SOC_SINGLE_TLV("SPK-IP Mono Volume", 571 - CS42L73_SPKMIPMA, 0, 0x3E, 1, attn_tlv), 571 + CS42L73_SPKMIPMA, 0, 0x3F, 1, attn_tlv), 572 572 SOC_SINGLE_TLV("SPK-XSP Mono Volume", 573 - CS42L73_SPKMXSPA, 0, 0x3E, 1, attn_tlv), 573 + CS42L73_SPKMXSPA, 0, 0x3F, 1, attn_tlv), 574 574 SOC_SINGLE_TLV("SPK-ASP Mono Volume", 575 - CS42L73_SPKMASPA, 0, 0x3E, 1, attn_tlv), 575 + CS42L73_SPKMASPA, 0, 0x3F, 1, attn_tlv), 576 576 SOC_SINGLE_TLV("SPK-VSP Mono Volume", 577 - CS42L73_SPKMVSPMA, 0, 0x3E, 1, attn_tlv), 577 + CS42L73_SPKMVSPMA, 0, 0x3F, 1, attn_tlv), 578 578 579 579 SOC_SINGLE_TLV("ESL-IP Mono Volume", 580 - CS42L73_ESLMIPMA, 0, 0x3E, 1, attn_tlv), 580 + CS42L73_ESLMIPMA, 0, 0x3F, 1, attn_tlv), 581 581 SOC_SINGLE_TLV("ESL-XSP Mono Volume", 582 - CS42L73_ESLMXSPA, 0, 0x3E, 1, attn_tlv), 582 + CS42L73_ESLMXSPA, 0, 0x3F, 1, attn_tlv), 583 583 SOC_SINGLE_TLV("ESL-ASP Mono Volume", 584 - CS42L73_ESLMASPA, 0, 0x3E, 1, attn_tlv), 584 + CS42L73_ESLMASPA, 0, 0x3F, 1, attn_tlv), 585 585 SOC_SINGLE_TLV("ESL-VSP Mono Volume", 586 - CS42L73_ESLMVSPMA, 0, 0x3E, 1, attn_tlv), 586 + CS42L73_ESLMVSPMA, 0, 0x3F, 1, attn_tlv), 587 587 588 588 SOC_ENUM("IP Digital Swap/Mono Select", ip_swap_enum), 589 589
+1 -1
sound/soc/codecs/wm8994.c
··· 1144 1144 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, 1145 1145 WM8994_AIF2DACL_ENA | 1146 1146 WM8994_AIF2DACR_ENA, 0); 1147 - snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, 1147 + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, 1148 1148 WM8994_AIF2ADCL_ENA | 1149 1149 WM8994_AIF2ADCR_ENA, 0); 1150 1150
+1 -1
sound/soc/sh/migor.c
··· 35 35 return codec_freq; 36 36 } 37 37 38 - static struct clk_ops siumckb_clk_ops = { 38 + static struct sh_clk_ops siumckb_clk_ops = { 39 39 .recalc = siumckb_recalc, 40 40 }; 41 41
+2 -2
tools/perf/Makefile
··· 774 774 # over the general rule for .o 775 775 776 776 $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS 777 - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $< 777 + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $< 778 778 779 779 $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS 780 - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $< 780 + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $< 781 781 782 782 $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS 783 783 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+35 -5
tools/perf/builtin-stat.c
··· 283 283 { 284 284 struct perf_event_attr *attr = &evsel->attr; 285 285 struct xyarray *group_fd = NULL; 286 + bool exclude_guest_missing = false; 287 + int ret; 286 288 287 289 if (group && evsel != first) 288 290 group_fd = first->fd; ··· 295 293 296 294 attr->inherit = !no_inherit; 297 295 298 - if (system_wide) 299 - return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, 296 + retry: 297 + if (exclude_guest_missing) 298 + evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; 299 + 300 + if (system_wide) { 301 + ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus, 300 302 group, group_fd); 303 + if (ret) 304 + goto check_ret; 305 + return 0; 306 + } 307 + 301 308 if (!target_pid && !target_tid && (!group || evsel == first)) { 302 309 attr->disabled = 1; 303 310 attr->enable_on_exec = 1; 304 311 } 305 312 306 - return perf_evsel__open_per_thread(evsel, evsel_list->threads, 307 - group, group_fd); 313 + ret = perf_evsel__open_per_thread(evsel, evsel_list->threads, 314 + group, group_fd); 315 + if (!ret) 316 + return 0; 317 + /* fall through */ 318 + check_ret: 319 + if (ret && errno == EINVAL) { 320 + if (!exclude_guest_missing && 321 + (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { 322 + pr_debug("Old kernel, cannot exclude " 323 + "guest or host samples.\n"); 324 + exclude_guest_missing = true; 325 + goto retry; 326 + } 327 + } 328 + return ret; 308 329 } 309 330 310 331 /* ··· 488 463 489 464 list_for_each_entry(counter, &evsel_list->entries, node) { 490 465 if (create_perf_stat_counter(counter, first) < 0) { 466 + /* 467 + * PPC returns ENXIO for HW counters until 2.6.37 468 + * (behavior changed with commit b0a873e). 469 + */ 491 470 if (errno == EINVAL || errno == ENOSYS || 492 - errno == ENOENT || errno == EOPNOTSUPP) { 471 + errno == ENOENT || errno == EOPNOTSUPP || 472 + errno == ENXIO) { 493 473 if (verbose) 494 474 ui__warning("%s event is not supported by the kernel.\n", 495 475 event_name(counter));
+1 -1
tools/perf/util/header.c
··· 296 296 if (mkdir_p(filename, 0755)) 297 297 goto out_free; 298 298 299 - snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); 299 + snprintf(filename + len, size - len, "/%s", sbuild_id); 300 300 301 301 if (access(filename, F_OK)) { 302 302 if (is_kallsyms) {