Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm: (25 commits)
[ARM] 5519/1: amba probe: pass "struct amba_id *" instead of void *
[ARM] 5517/1: integrator: don't put clock lookups in __initdata
[ARM] 5518/1: versatile: don't put clock lookups in __initdata
[ARM] mach-l7200: fix spelling of SYS_CLOCK_OFF
[ARM] Double check memmap is actually valid with a memmap has unexpected holes V2
[ARM] realview: fix broadcast tick support
[ARM] realview: remove useless smp_cross_call_done()
[ARM] smp: fix cpumask usage in ARM SMP code
[ARM] 5513/1: Eurotech VIPER SBC: fix compilation error
[ARM] 5509/1: ep93xx: clkdev enable UARTS
ARM: OMAP2/3: Change omapfb to use clkdev for dispc and rfbi, v2
ARM: OMAP3: Fix HW SAVEANDRESTORE shift define
ARM: OMAP3: Fix number of GPIO lines for 34xx
[ARM] S3C: Do not set clk->owner field if unset
[ARM] S3C2410: mach-bast.c registering i2c data too early
[ARM] S3C24XX: Fix unused code warning in arch/arm/plat-s3c24xx/dma.c
[ARM] S3C64XX: fix GPIO debug
[ARM] S3C64XX: GPIO include cleanup
[ARM] nwfpe: fix 'floatx80_is_nan' sparse warning
[ARM] nwfpe: Add decleration for ExtendedCPDO
...

+217 -164
+3 -3
arch/arm/Kconfig
··· 273 273 select HAVE_CLK 274 274 select COMMON_CLKDEV 275 275 select ARCH_REQUIRE_GPIOLIB 276 + select ARCH_HAS_HOLES_MEMORYMODEL 276 277 help 277 278 This enables support for the Cirrus EP93xx series of CPUs. 278 279 ··· 977 976 UNPREDICTABLE (in fact it can be predicted that it won't work 978 977 at all). If in doubt say Y. 979 978 980 - config ARCH_FLATMEM_HAS_HOLES 979 + config ARCH_HAS_HOLES_MEMORYMODEL 981 980 bool 982 - default y 983 - depends on FLATMEM 981 + default n 984 982 985 983 # Discontigmem is deprecated 986 984 config ARCH_DISCONTIGMEM_ENABLE
+2 -2
arch/arm/common/gic.c
··· 253 253 } 254 254 255 255 #ifdef CONFIG_SMP 256 - void gic_raise_softirq(cpumask_t cpumask, unsigned int irq) 256 + void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 257 257 { 258 - unsigned long map = *cpus_addr(cpumask); 258 + unsigned long map = *cpus_addr(*mask); 259 259 260 260 /* this always happens on GIC0 */ 261 261 writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
+1 -1
arch/arm/include/asm/hardware/gic.h
··· 36 36 void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start); 37 37 void gic_cpu_init(unsigned int gic_nr, void __iomem *base); 38 38 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 39 - void gic_raise_softirq(cpumask_t cpumask, unsigned int irq); 39 + void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); 40 40 #endif 41 41 42 42 #endif
+4 -8
arch/arm/include/asm/smp.h
··· 53 53 /* 54 54 * Raise an IPI cross call on CPUs in callmap. 55 55 */ 56 - extern void smp_cross_call(cpumask_t callmap); 57 - 58 - /* 59 - * Broadcast a timer interrupt to the other CPUs. 60 - */ 61 - extern void smp_send_timer(void); 56 + extern void smp_cross_call(const struct cpumask *mask); 62 57 63 58 /* 64 59 * Broadcast a clock event to other CPUs. 65 60 */ 66 - extern void smp_timer_broadcast(cpumask_t mask); 61 + extern void smp_timer_broadcast(const struct cpumask *mask); 67 62 68 63 /* 69 64 * Boot a secondary CPU, and assign it the specified idle task. ··· 97 102 extern void platform_cpu_enable(unsigned int cpu); 98 103 99 104 extern void arch_send_call_function_single_ipi(int cpu); 100 - extern void arch_send_call_function_ipi(cpumask_t mask); 105 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 106 + #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask 101 107 102 108 /* 103 109 * Local timer interrupt handling function (can be IPI'ed).
+16 -30
arch/arm/kernel/smp.c
··· 326 326 per_cpu(cpu_data, cpu).idle = current; 327 327 } 328 328 329 - static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) 329 + static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) 330 330 { 331 331 unsigned long flags; 332 332 unsigned int cpu; 333 333 334 334 local_irq_save(flags); 335 335 336 - for_each_cpu_mask(cpu, callmap) { 336 + for_each_cpu(cpu, mask) { 337 337 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 338 338 339 339 spin_lock(&ipi->lock); ··· 344 344 /* 345 345 * Call the platform specific cross-CPU call function. 346 346 */ 347 - smp_cross_call(callmap); 347 + smp_cross_call(mask); 348 348 349 349 local_irq_restore(flags); 350 350 } 351 351 352 - void arch_send_call_function_ipi(cpumask_t mask) 352 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 353 353 { 354 354 send_ipi_message(mask, IPI_CALL_FUNC); 355 355 } 356 356 357 357 void arch_send_call_function_single_ipi(int cpu) 358 358 { 359 - send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); 359 + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 360 360 } 361 361 362 362 void show_ipi_list(struct seq_file *p) ··· 498 498 499 499 void smp_send_reschedule(int cpu) 500 500 { 501 - send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); 501 + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 502 502 } 503 503 504 - void smp_send_timer(void) 505 - { 506 - cpumask_t mask = cpu_online_map; 507 - cpu_clear(smp_processor_id(), mask); 508 - send_ipi_message(mask, IPI_TIMER); 509 - } 510 - 511 - void smp_timer_broadcast(cpumask_t mask) 504 + void smp_timer_broadcast(const struct cpumask *mask) 512 505 { 513 506 send_ipi_message(mask, IPI_TIMER); 514 507 } ··· 510 517 { 511 518 cpumask_t mask = cpu_online_map; 512 519 cpu_clear(smp_processor_id(), mask); 513 - send_ipi_message(mask, IPI_CPU_STOP); 520 + send_ipi_message(&mask, IPI_CPU_STOP); 514 521 } 515 522 516 523 /* ··· 521 528 return -EINVAL; 522 529 } 523 530 524 - static int 525 - on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) 531 + static void 532 + on_each_cpu_mask(void (*func)(void *), void *info, int wait, 533 + const struct cpumask *mask) 526 534 { 527 - int ret = 0; 528 - 529 535 preempt_disable(); 530 536 531 - ret = smp_call_function_mask(mask, func, info, wait); 532 - if (cpu_isset(smp_processor_id(), mask)) 537 + smp_call_function_many(mask, func, info, wait); 538 + if (cpumask_test_cpu(smp_processor_id(), mask)) 533 539 func(info); 534 540 535 541 preempt_enable(); 536 - 537 - return ret; 538 542 } 539 543 540 544 /**********************************************************************/ ··· 592 602 593 603 void flush_tlb_mm(struct mm_struct *mm) 594 604 { 595 - cpumask_t mask = mm->cpu_vm_mask; 596 - 597 - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); 605 + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); 598 606 } 599 607 600 608 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 601 609 { 602 - cpumask_t mask = vma->vm_mm->cpu_vm_mask; 603 610 struct tlb_args ta; 604 611 605 612 ta.ta_vma = vma; 606 613 ta.ta_start = uaddr; 607 614 608 - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); 615 + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); 609 616 } 610 617 611 618 void flush_tlb_kernel_page(unsigned long kaddr) ··· 617 630 void flush_tlb_range(struct vm_area_struct *vma, 618 631 unsigned long start, unsigned long end) 619 632 { 620 - cpumask_t mask = vma->vm_mm->cpu_vm_mask; 621 633 struct tlb_args ta; 622 634 623 635 ta.ta_vma = vma; 624 636 ta.ta_start = start; 625 637 ta.ta_end = end; 626 638 627 - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); 639 + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); 628 640 } 629 641 630 642 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+61 -8
arch/arm/mach-ep93xx/clock.c
··· 21 21 #include <asm/div64.h> 22 22 #include <mach/hardware.h> 23 23 24 + 25 + /* 26 + * The EP93xx has two external crystal oscillators. To generate the 27 + * required high-frequency clocks, the processor uses two phase-locked- 28 + * loops (PLLs) to multiply the incoming external clock signal to much 29 + * higher frequencies that are then divided down by programmable dividers 30 + * to produce the needed clocks. The PLLs operate independently of one 31 + * another. 32 + */ 33 + #define EP93XX_EXT_CLK_RATE 14745600 34 + #define EP93XX_EXT_RTC_RATE 32768 35 + 36 + 24 37 struct clk { 25 38 unsigned long rate; 26 39 int users; 40 + int sw_locked; 27 41 u32 enable_reg; 28 42 u32 enable_mask; 43 + 44 + unsigned long (*get_rate)(struct clk *clk); 29 45 }; 30 46 31 - static struct clk clk_uart = { 32 - .rate = 14745600, 47 + 48 + static unsigned long get_uart_rate(struct clk *clk); 49 + 50 + 51 + static struct clk clk_uart1 = { 52 + .sw_locked = 1, 53 + .enable_reg = EP93XX_SYSCON_DEVICE_CONFIG, 54 + .enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U1EN, 55 + .get_rate = get_uart_rate, 56 + }; 57 + static struct clk clk_uart2 = { 58 + .sw_locked = 1, 59 + .enable_reg = EP93XX_SYSCON_DEVICE_CONFIG, 60 + .enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U2EN, 61 + .get_rate = get_uart_rate, 62 + }; 63 + static struct clk clk_uart3 = { 64 + .sw_locked = 1, 65 + .enable_reg = EP93XX_SYSCON_DEVICE_CONFIG, 66 + .enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U3EN, 67 + .get_rate = get_uart_rate, 33 68 }; 34 69 static struct clk clk_pll1; 35 70 static struct clk clk_f; ··· 130 95 { .dev_id = dev, .con_id = con, .clk = ck } 131 96 132 97 static struct clk_lookup clocks[] = { 133 - INIT_CK("apb:uart1", NULL, &clk_uart), 134 - INIT_CK("apb:uart2", NULL, &clk_uart), 135 - INIT_CK("apb:uart3", NULL, &clk_uart), 98 + INIT_CK("apb:uart1", NULL, &clk_uart1), 99 + INIT_CK("apb:uart2", NULL, &clk_uart2), 100 + INIT_CK("apb:uart3", NULL, &clk_uart3), 136 101 INIT_CK(NULL, "pll1", &clk_pll1), 137 102 INIT_CK(NULL, "fclk", &clk_f), 138 103 INIT_CK(NULL, "hclk", &clk_h), ··· 160 125 u32 value; 161 126 162 127 value = __raw_readl(clk->enable_reg); 128 + if (clk->sw_locked) 129 + __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); 163 130 __raw_writel(value | clk->enable_mask, clk->enable_reg); 164 131 } 165 132 ··· 175 138 u32 value; 176 139 177 140 value = __raw_readl(clk->enable_reg); 141 + if (clk->sw_locked) 142 + __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); 178 143 __raw_writel(value & ~clk->enable_mask, clk->enable_reg); 179 144 } 180 145 } 181 146 EXPORT_SYMBOL(clk_disable); 182 147 148 + static unsigned long get_uart_rate(struct clk *clk) 149 + { 150 + u32 value; 151 + 152 + value = __raw_readl(EP93XX_SYSCON_CLOCK_CONTROL); 153 + if (value & EP93XX_SYSCON_CLOCK_UARTBAUD) 154 + return EP93XX_EXT_CLK_RATE; 155 + else 156 + return EP93XX_EXT_CLK_RATE / 2; 157 + } 158 + 183 159 unsigned long clk_get_rate(struct clk *clk) 184 160 { 161 + if (clk->get_rate) 162 + return clk->get_rate(clk); 163 + 185 164 return clk->rate; 186 165 } 187 166 EXPORT_SYMBOL(clk_get_rate); ··· 215 162 unsigned long long rate; 216 163 int i; 217 164 218 - rate = 14745600; 165 + rate = EP93XX_EXT_CLK_RATE; 219 166 rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */ 220 167 rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */ 221 168 do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */ ··· 248 195 249 196 value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1); 250 197 if (!(value & 0x00800000)) { /* PLL1 bypassed? */ 251 - clk_pll1.rate = 14745600; 198 + clk_pll1.rate = EP93XX_EXT_CLK_RATE; 252 199 } else { 253 200 clk_pll1.rate = calc_pll_rate(value); 254 201 } ··· 259 206 260 207 value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2); 261 208 if (!(value & 0x00080000)) { /* PLL2 bypassed? */ 262 - clk_pll2.rate = 14745600; 209 + clk_pll2.rate = EP93XX_EXT_CLK_RATE; 263 210 } else if (value & 0x00040000) { /* PLL2 enabled? */ 264 211 clk_pll2.rate = calc_pll_rate(value); 265 212 } else {
+4 -1
arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
··· 159 159 #define EP93XX_SYSCON_CLOCK_SET1 EP93XX_SYSCON_REG(0x20) 160 160 #define EP93XX_SYSCON_CLOCK_SET2 EP93XX_SYSCON_REG(0x24) 161 161 #define EP93XX_SYSCON_DEVICE_CONFIG EP93XX_SYSCON_REG(0x80) 162 - #define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE 0x00800000 162 + #define EP93XX_SYSCON_DEVICE_CONFIG_U3EN (1<<24) 163 + #define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE (1<<23) 164 + #define EP93XX_SYSCON_DEVICE_CONFIG_U2EN (1<<20) 165 + #define EP93XX_SYSCON_DEVICE_CONFIG_U1EN (1<<18) 163 166 #define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0) 164 167 165 168 #define EP93XX_WATCHDOG_BASE (EP93XX_APB_VIRT_BASE + 0x00140000)
+1 -1
arch/arm/mach-integrator/core.c
··· 121 121 .rate = 14745600, 122 122 }; 123 123 124 - static struct clk_lookup lookups[] __initdata = { 124 + static struct clk_lookup lookups[] = { 125 125 { /* UART0 */ 126 126 .dev_id = "mb:16", 127 127 .clk = &uartclk,
+1 -1
arch/arm/mach-l7200/include/mach/sys-clock.h
··· 18 18 19 19 /* IO_START and IO_BASE are defined in hardware.h */ 20 20 21 - #define SYS_CLOCK_START (IO_START + SYS_CLCOK_OFF) /* Physical address */ 21 + #define SYS_CLOCK_START (IO_START + SYS_CLOCK_OFF) /* Physical address */ 22 22 #define SYS_CLOCK_BASE (IO_BASE + SYS_CLOCK_OFF) /* Virtual address */ 23 23 24 24 /* Define the interface to the SYS_CLOCK */
+5 -5
arch/arm/mach-omap2/clock24xx.c
··· 103 103 CLK(NULL, "mdm_ick", &mdm_ick, CK_243X), 104 104 CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X), 105 105 /* DSS domain clocks */ 106 - CLK(NULL, "dss_ick", &dss_ick, CK_243X | CK_242X), 107 - CLK(NULL, "dss1_fck", &dss1_fck, CK_243X | CK_242X), 108 - CLK(NULL, "dss2_fck", &dss2_fck, CK_243X | CK_242X), 109 - CLK(NULL, "dss_54m_fck", &dss_54m_fck, CK_243X | CK_242X), 106 + CLK("omapfb", "ick", &dss_ick, CK_243X | CK_242X), 107 + CLK("omapfb", "dss1_fck", &dss1_fck, CK_243X | CK_242X), 108 + CLK("omapfb", "dss2_fck", &dss2_fck, CK_243X | CK_242X), 109 + CLK("omapfb", "tv_fck", &dss_54m_fck, CK_243X | CK_242X), 110 110 /* L3 domain clocks */ 111 111 CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X | CK_242X), 112 112 CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X | CK_242X), ··· 206 206 CLK(NULL, "aes_ick", &aes_ick, CK_243X | CK_242X), 207 207 CLK(NULL, "pka_ick", &pka_ick, CK_243X | CK_242X), 208 208 CLK(NULL, "usb_fck", &usb_fck, CK_243X | CK_242X), 209 - CLK(NULL, "usbhs_ick", &usbhs_ick, CK_243X), 209 + CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X), 210 210 CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X), 211 211 CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X), 212 212 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X),
+6 -6
arch/arm/mach-omap2/clock34xx.c
··· 157 157 CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck, CK_343X), 158 158 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck, CK_343X), 159 159 CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X), 160 - CLK(NULL, "hsotgusb_ick", &hsotgusb_ick, CK_343X), 160 + CLK("musb_hdrc", "ick", &hsotgusb_ick, CK_343X), 161 161 CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X), 162 162 CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X), 163 163 CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X), ··· 197 197 CLK("omap_rng", "ick", &rng_ick, CK_343X), 198 198 CLK(NULL, "sha11_ick", &sha11_ick, CK_343X), 199 199 CLK(NULL, "des1_ick", &des1_ick, CK_343X), 200 - CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck, CK_343X), 201 - CLK(NULL, "dss_tv_fck", &dss_tv_fck, CK_343X), 202 - CLK(NULL, "dss_96m_fck", &dss_96m_fck, CK_343X), 203 - CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck, CK_343X), 204 - CLK(NULL, "dss_ick", &dss_ick, CK_343X), 200 + CLK("omapfb", "dss1_fck", &dss1_alwon_fck, CK_343X), 201 + CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X), 202 + CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X), 203 + CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X), 204 + CLK("omapfb", "ick", &dss_ick, CK_343X), 205 205 CLK(NULL, "cam_mclk", &cam_mclk, CK_343X), 206 206 CLK(NULL, "cam_ick", &cam_ick, CK_343X), 207 207 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X),
+6 -6
arch/arm/mach-omap2/clock34xx.h
··· 2182 2182 2183 2183 static struct clk gpio1_dbck = { 2184 2184 .name = "gpio1_dbck", 2185 - .ops = &clkops_omap2_dflt_wait, 2185 + .ops = &clkops_omap2_dflt, 2186 2186 .parent = &wkup_32k_fck, 2187 2187 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), 2188 2188 .enable_bit = OMAP3430_EN_GPIO1_SHIFT, ··· 2427 2427 2428 2428 static struct clk gpio6_dbck = { 2429 2429 .name = "gpio6_dbck", 2430 - .ops = &clkops_omap2_dflt_wait, 2430 + .ops = &clkops_omap2_dflt, 2431 2431 .parent = &per_32k_alwon_fck, 2432 2432 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2433 2433 .enable_bit = OMAP3430_EN_GPIO6_SHIFT, ··· 2437 2437 2438 2438 static struct clk gpio5_dbck = { 2439 2439 .name = "gpio5_dbck", 2440 - .ops = &clkops_omap2_dflt_wait, 2440 + .ops = &clkops_omap2_dflt, 2441 2441 .parent = &per_32k_alwon_fck, 2442 2442 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2443 2443 .enable_bit = OMAP3430_EN_GPIO5_SHIFT, ··· 2447 2447 2448 2448 static struct clk gpio4_dbck = { 2449 2449 .name = "gpio4_dbck", 2450 - .ops = &clkops_omap2_dflt_wait, 2450 + .ops = &clkops_omap2_dflt, 2451 2451 .parent = &per_32k_alwon_fck, 2452 2452 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2453 2453 .enable_bit = OMAP3430_EN_GPIO4_SHIFT, ··· 2457 2457 2458 2458 static struct clk gpio3_dbck = { 2459 2459 .name = "gpio3_dbck", 2460 - .ops = &clkops_omap2_dflt_wait, 2460 + .ops = &clkops_omap2_dflt, 2461 2461 .parent = &per_32k_alwon_fck, 2462 2462 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2463 2463 .enable_bit = OMAP3430_EN_GPIO3_SHIFT, ··· 2467 2467 2468 2468 static struct clk gpio2_dbck = { 2469 2469 .name = "gpio2_dbck", 2470 - .ops = &clkops_omap2_dflt_wait, 2470 + .ops = &clkops_omap2_dflt, 2471 2471 .parent = &per_32k_alwon_fck, 2472 2472 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2473 2473 .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
+4 -2
arch/arm/mach-omap2/devices.c
··· 354 354 platform_device_register(&omap2_mcspi1); 355 355 platform_device_register(&omap2_mcspi2); 356 356 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) 357 - platform_device_register(&omap2_mcspi3); 357 + if (cpu_is_omap2430() || cpu_is_omap343x()) 358 + platform_device_register(&omap2_mcspi3); 358 359 #endif 359 360 #ifdef CONFIG_ARCH_OMAP3 360 - platform_device_register(&omap2_mcspi4); 361 + if (cpu_is_omap343x()) 362 + platform_device_register(&omap2_mcspi4); 361 363 #endif 362 364 } 363 365
+1 -1
arch/arm/mach-omap2/prm-regbits-34xx.h
··· 409 409 /* PM_PREPWSTST_CAM specific bits */ 410 410 411 411 /* PM_PWSTCTRL_USBHOST specific bits */ 412 - #define OMAP3430ES2_SAVEANDRESTORE_SHIFT (1 << 4) 412 + #define OMAP3430ES2_SAVEANDRESTORE_SHIFT 4 413 413 414 414 /* RM_RSTST_PER specific bits */ 415 415
+1 -1
arch/arm/mach-omap2/usb-tusb6010.c
··· 187 187 unsigned sysclk_ps; 188 188 int status; 189 189 190 - if (!refclk_psec || sysclk_ps == 0) 190 + if (!refclk_psec || fclk_ps == 0) 191 191 return -ENODEV; 192 192 193 193 sysclk_ps = is_refclk ? refclk_psec : TUSB6010_OSCCLK_60;
+1
arch/arm/mach-pxa/viper.c
··· 46 46 #include <mach/audio.h> 47 47 #include <mach/pxafb.h> 48 48 #include <mach/i2c.h> 49 + #include <mach/regs-uart.h> 49 50 #include <mach/viper.h> 50 51 51 52 #include <asm/setup.h>
-8
arch/arm/mach-realview/core.c
··· 750 750 { 751 751 u32 val; 752 752 753 - #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 754 - /* 755 - * The dummy clock device has to be registered before the main device 756 - * so that the latter will broadcast the clock events 757 - */ 758 - local_timer_setup(); 759 - #endif 760 - 761 753 /* 762 754 * set clock frequency: 763 755 * REALVIEW_REFCLK is 32KHz
+2 -9
arch/arm/mach-realview/include/mach/smp.h
··· 15 15 /* 16 16 * We use IRQ1 as the IPI 17 17 */ 18 - static inline void smp_cross_call(cpumask_t callmap) 18 + static inline void smp_cross_call(const struct cpumask *mask) 19 19 { 20 - gic_raise_softirq(callmap, 1); 21 - } 22 - 23 - /* 24 - * Do nothing on MPcore. 25 - */ 26 - static inline void smp_cross_call_done(cpumask_t callmap) 27 - { 20 + gic_raise_softirq(mask, 1); 28 21 } 29 22 30 23 #endif
+4 -2
arch/arm/mach-realview/localtimer.c
··· 189 189 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); 190 190 191 191 clk->name = "dummy_timer"; 192 - clk->features = CLOCK_EVT_FEAT_DUMMY; 193 - clk->rating = 200; 192 + clk->features = CLOCK_EVT_FEAT_ONESHOT | 193 + CLOCK_EVT_FEAT_PERIODIC | 194 + CLOCK_EVT_FEAT_DUMMY; 195 + clk->rating = 400; 194 196 clk->mult = 1; 195 197 clk->set_mode = dummy_timer_set_mode; 196 198 clk->broadcast = smp_timer_broadcast;
+3 -12
arch/arm/mach-realview/platsmp.c
··· 78 78 trace_hardirqs_off(); 79 79 80 80 /* 81 - * the primary core may have used a "cross call" soft interrupt 82 - * to get this processor out of WFI in the BootMonitor - make 83 - * sure that we are no longer being sent this soft interrupt 84 - */ 85 - smp_cross_call_done(cpumask_of_cpu(cpu)); 86 - 87 - /* 88 81 * if any interrupts are already enabled for the primary 89 82 * core (e.g. timer irq), then they will not have been enabled 90 83 * for us: do so ··· 129 136 * Use smp_cross_call() for this, since there's little 130 137 * point duplicating the code here 131 138 */ 132 - smp_cross_call(cpumask_of_cpu(cpu)); 139 + smp_cross_call(cpumask_of(cpu)); 133 140 134 141 timeout = jiffies + (1 * HZ); 135 142 while (time_before(jiffies, timeout)) { ··· 217 224 if (max_cpus > ncores) 218 225 max_cpus = ncores; 219 226 220 - #ifdef CONFIG_LOCAL_TIMERS 227 + #if defined(CONFIG_LOCAL_TIMERS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) 221 228 /* 222 - * Enable the local timer for primary CPU. If the device is 223 - * dummy (!CONFIG_LOCAL_TIMERS), it was already registers in 224 - * realview_timer_init 229 + * Enable the local timer or broadcast device for the boot CPU. 225 230 */ 226 231 local_timer_setup(); 227 232 #endif
+1 -2
arch/arm/mach-s3c2410/mach-bast.c
··· 588 588 589 589 s3c_device_nand.dev.platform_data = &bast_nand_info; 590 590 591 - s3c_i2c0_set_platdata(&bast_i2c_info); 592 - 593 591 s3c24xx_init_io(bast_iodesc, ARRAY_SIZE(bast_iodesc)); 594 592 s3c24xx_init_clocks(0); 595 593 s3c24xx_init_uarts(bast_uartcfgs, ARRAY_SIZE(bast_uartcfgs)); ··· 600 602 sysdev_class_register(&bast_pm_sysclass); 601 603 sysdev_register(&bast_pm_sysdev); 602 604 605 + s3c_i2c0_set_platdata(&bast_i2c_info); 603 606 s3c24xx_fb_set_platdata(&bast_fb_info); 604 607 platform_add_devices(bast_devices, ARRAY_SIZE(bast_devices)); 605 608
+1 -1
arch/arm/mach-versatile/core.c
··· 413 413 .rate = 24000000, 414 414 }; 415 415 416 - static struct clk_lookup lookups[] __initdata = { 416 + static struct clk_lookup lookups[] = { 417 417 { /* UART0 */ 418 418 .dev_id = "dev:f1", 419 419 .clk = &ref24_clk,
+4
arch/arm/nwfpe/fpa11.h
··· 114 114 extern unsigned int DoubleCPDO(struct roundingData *roundData, 115 115 const unsigned int opcode, FPREG * rFd); 116 116 117 + /* extneded_cpdo.c */ 118 + extern unsigned int ExtendedCPDO(struct roundingData *roundData, 119 + const unsigned int opcode, FPREG * rFd); 120 + 117 121 #endif
-4
arch/arm/nwfpe/fpa11_cprt.c
··· 27 27 #include "fpmodule.inl" 28 28 #include "softfloat.h" 29 29 30 - #ifdef CONFIG_FPE_NWFPE_XP 31 - extern flag floatx80_is_nan(floatx80); 32 - #endif 33 - 34 30 unsigned int PerformFLT(const unsigned int opcode); 35 31 unsigned int PerformFIX(const unsigned int opcode); 36 32
+2
arch/arm/nwfpe/softfloat.h
··· 226 226 char floatx80_lt_quiet( floatx80, floatx80 ); 227 227 char floatx80_is_signaling_nan( floatx80 ); 228 228 229 + extern flag floatx80_is_nan(floatx80); 230 + 229 231 #endif 230 232 231 233 static inline flag extractFloat32Sign(float32 a)
+3 -2
arch/arm/plat-omap/fb.c
··· 206 206 config_invalid = 1; 207 207 return; 208 208 } 209 - if (rg.paddr) 209 + if (rg.paddr) { 210 210 reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT); 211 - reserved += rg.size; 211 + reserved += rg.size; 212 + } 212 213 omapfb_config.mem_desc.region[i] = rg; 213 214 configured_regions++; 214 215 }
+1 -1
arch/arm/plat-omap/gpio.c
··· 307 307 return 0; 308 308 if (cpu_is_omap24xx() && gpio < 128) 309 309 return 0; 310 - if (cpu_is_omap34xx() && gpio < 160) 310 + if (cpu_is_omap34xx() && gpio < 192) 311 311 return 0; 312 312 return -1; 313 313 }
-2
arch/arm/plat-s3c/clock.c
··· 306 306 307 307 int s3c24xx_register_clock(struct clk *clk) 308 308 { 309 - clk->owner = THIS_MODULE; 310 - 311 309 if (clk->enable == NULL) 312 310 clk->enable = clk_null_enable; 313 311
+1 -1
arch/arm/plat-s3c24xx/dma.c
··· 1235 1235 1236 1236 EXPORT_SYMBOL(s3c2410_dma_getposition); 1237 1237 1238 - static struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) 1238 + static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) 1239 1239 { 1240 1240 return container_of(dev, struct s3c2410_dma_chan, dev); 1241 1241 }
+1 -1
arch/arm/plat-s3c64xx/gpiolib.c
··· 57 57 #if 1 58 58 #define gpio_dbg(x...) do { } while(0) 59 59 #else 60 - #define gpio_dbg(x...) printk(KERN_DEBUG ## x) 60 + #define gpio_dbg(x...) printk(KERN_DEBUG x) 61 61 #endif 62 62 63 63 /* The s3c64xx_gpiolib_4bit routines are to control the gpio banks where
+10 -10
arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h
··· 61 61 #define S3C64XX_GPH7_ADDR_CF1 (0x06 << 28) 62 62 #define S3C64XX_GPH7_EINT_G6_7 (0x07 << 28) 63 63 64 - #define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 32) 65 - #define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 32) 66 - #define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 32) 67 - #define S3C64XX_GPH8_ADDR_CF2 (0x06 << 32) 68 - #define S3C64XX_GPH8_EINT_G6_8 (0x07 << 32) 64 + #define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 0) 65 + #define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 0) 66 + #define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 0) 67 + #define S3C64XX_GPH8_ADDR_CF2 (0x06 << 0) 68 + #define S3C64XX_GPH8_EINT_G6_8 (0x07 << 0) 69 69 70 - #define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 36) 71 - #define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 36) 72 - #define S3C64XX_GPH9_I2S_V40_DI (0x05 << 36) 73 - #define S3C64XX_GPH9_EINT_G6_9 (0x07 << 36) 74 - 70 + #define S3C64XX_GPH9_OUTPUT (0x01 << 4) 71 + #define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 4) 72 + #define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 4) 73 + #define S3C64XX_GPH9_I2S_V40_DI (0x05 << 4) 74 + #define S3C64XX_GPH9_EINT_G6_9 (0x07 << 4)
+1 -1
drivers/input/serio/ambakmi.c
··· 107 107 clk_disable(kmi->clk); 108 108 } 109 109 110 - static int amba_kmi_probe(struct amba_device *dev, void *id) 110 + static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id) 111 111 { 112 112 struct amba_kmi_port *kmi; 113 113 struct serio *io;
+1 -1
drivers/mmc/host/mmci.c
··· 490 490 mod_timer(&host->timer, jiffies + HZ); 491 491 } 492 492 493 - static int __devinit mmci_probe(struct amba_device *dev, void *id) 493 + static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 494 494 { 495 495 struct mmc_platform_data *plat = dev->dev.platform_data; 496 496 struct mmci_host *host;
+1 -1
drivers/rtc/rtc-pl030.c
··· 102 102 .set_alarm = pl030_set_alarm, 103 103 }; 104 104 105 - static int pl030_probe(struct amba_device *dev, void *id) 105 + static int pl030_probe(struct amba_device *dev, struct amba_id *id) 106 106 { 107 107 struct pl030_rtc *rtc; 108 108 int ret;
+1 -1
drivers/rtc/rtc-pl031.c
··· 127 127 return 0; 128 128 } 129 129 130 - static int pl031_probe(struct amba_device *adev, void *id) 130 + static int pl031_probe(struct amba_device *adev, struct amba_id *id) 131 131 { 132 132 int ret; 133 133 struct pl031_local *ldata;
+1 -1
drivers/serial/amba-pl010.c
··· 665 665 .cons = AMBA_CONSOLE, 666 666 }; 667 667 668 - static int pl010_probe(struct amba_device *dev, void *id) 668 + static int pl010_probe(struct amba_device *dev, struct amba_id *id) 669 669 { 670 670 struct uart_amba_port *uap; 671 671 void __iomem *base;
+1 -1
drivers/serial/amba-pl011.c
··· 729 729 .cons = AMBA_CONSOLE, 730 730 }; 731 731 732 - static int pl011_probe(struct amba_device *dev, void *id) 732 + static int pl011_probe(struct amba_device *dev, struct amba_id *id) 733 733 { 734 734 struct uart_amba_port *uap; 735 735 void __iomem *base;
+1 -1
drivers/video/amba-clcd.c
··· 437 437 return ret; 438 438 } 439 439 440 - static int clcdfb_probe(struct amba_device *dev, void *id) 440 + static int clcdfb_probe(struct amba_device *dev, struct amba_id *id) 441 441 { 442 442 struct clcd_board *board = dev->dev.platform_data; 443 443 struct clcd_fb *fb;
+8 -6
drivers/video/omap/dispc.c
··· 880 880 881 881 static int get_dss_clocks(void) 882 882 { 883 - if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) { 884 - dev_err(dispc.fbdev->dev, "can't get dss_ick\n"); 883 + dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick"); 884 + if (IS_ERR(dispc.dss_ick)) { 885 + dev_err(dispc.fbdev->dev, "can't get ick\n"); 885 886 return PTR_ERR(dispc.dss_ick); 886 887 } 887 888 888 - if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) { 889 + dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck"); 890 + if (IS_ERR(dispc.dss1_fck)) { 889 891 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); 890 892 clk_put(dispc.dss_ick); 891 893 return PTR_ERR(dispc.dss1_fck); 892 894 } 893 895 894 - if (IS_ERR((dispc.dss_54m_fck = 895 - clk_get(dispc.fbdev->dev, "dss_54m_fck")))) { 896 - dev_err(dispc.fbdev->dev, "can't get dss_54m_fck\n"); 896 + dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck"); 897 + if (IS_ERR(dispc.dss_54m_fck)) { 898 + dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); 897 899 clk_put(dispc.dss_ick); 898 900 clk_put(dispc.dss1_fck); 899 901 return PTR_ERR(dispc.dss_54m_fck);
+5 -3
drivers/video/omap/rfbi.c
··· 83 83 84 84 static int rfbi_get_clocks(void) 85 85 { 86 - if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) { 87 - dev_err(rfbi.fbdev->dev, "can't get dss_ick\n"); 86 + rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick"); 87 + if (IS_ERR(rfbi.dss_ick)) { 88 + dev_err(rfbi.fbdev->dev, "can't get ick\n"); 88 89 return PTR_ERR(rfbi.dss_ick); 89 90 } 90 91 91 - if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) { 92 + rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck"); 93 + if (IS_ERR(rfbi.dss1_fck)) { 92 94 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); 93 95 clk_put(rfbi.dss_ick); 94 96 return PTR_ERR(rfbi.dss1_fck);
+1 -1
include/linux/amba/bus.h
··· 28 28 29 29 struct amba_driver { 30 30 struct device_driver drv; 31 - int (*probe)(struct amba_device *, void *); 31 + int (*probe)(struct amba_device *, struct amba_id *); 32 32 int (*remove)(struct amba_device *); 33 33 void (*shutdown)(struct amba_device *); 34 34 int (*suspend)(struct amba_device *, pm_message_t);
+26
include/linux/mmzone.h
··· 1097 1097 #define pfn_valid_within(pfn) (1) 1098 1098 #endif 1099 1099 1100 + #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 1101 + /* 1102 + * pfn_valid() is meant to be able to tell if a given PFN has valid memmap 1103 + * associated with it or not. In FLATMEM, it is expected that holes always 1104 + * have valid memmap as long as there is valid PFNs either side of the hole. 1105 + * In SPARSEMEM, it is assumed that a valid section has a memmap for the 1106 + * entire section. 1107 + * 1108 + * However, an ARM, and maybe other embedded architectures in the future 1109 + * free memmap backing holes to save memory on the assumption the memmap is 1110 + * never used. The page_zone linkages are then broken even though pfn_valid() 1111 + * returns true. A walker of the full memmap must then do this additional 1112 + * check to ensure the memmap they are looking at is sane by making sure 1113 + * the zone and PFN linkages are still valid. This is expensive, but walkers 1114 + * of the full memmap are extremely rare. 1115 + */ 1116 + int memmap_valid_within(unsigned long pfn, 1117 + struct page *page, struct zone *zone); 1118 + #else 1119 + static inline int memmap_valid_within(unsigned long pfn, 1120 + struct page *page, struct zone *zone) 1121 + { 1122 + return 1; 1123 + } 1124 + #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 1125 + 1100 1126 #endif /* !__GENERATING_BOUNDS.H */ 1101 1127 #endif /* !__ASSEMBLY__ */ 1102 1128 #endif /* _LINUX_MMZONE_H */
+15
mm/mmzone.c
··· 6 6 7 7 8 8 #include <linux/stddef.h> 9 + #include <linux/mm.h> 9 10 #include <linux/mmzone.h> 10 11 #include <linux/module.h> 11 12 ··· 73 72 *zone = zonelist_zone(z); 74 73 return z; 75 74 } 75 + 76 + #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 77 + int memmap_valid_within(unsigned long pfn, 78 + struct page *page, struct zone *zone) 79 + { 80 + if (page_to_pfn(page) != pfn) 81 + return 0; 82 + 83 + if (page_zone(page) != zone) 84 + return 0; 85 + 86 + return 1; 87 + } 88 + #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+4 -15
mm/vmstat.c
··· 509 509 continue; 510 510 511 511 page = pfn_to_page(pfn); 512 - #ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES 513 - /* 514 - * Ordinarily, memory holes in flatmem still have a valid 515 - * memmap for the PFN range. However, an architecture for 516 - * embedded systems (e.g. ARM) can free up the memmap backing 517 - * holes to save memory on the assumption the memmap is 518 - * never used. The page_zone linkages are then broken even 519 - * though pfn_valid() returns true. Skip the page if the 520 - * linkages are broken. Even if this test passed, the impact 521 - * is that the counters for the movable type are off but 522 - * fragmentation monitoring is likely meaningless on small 523 - * systems. 524 - */ 525 - if (page_zone(page) != zone) 512 + 513 + /* Watch for unexpected holes punched in the memmap */ 514 + if (!memmap_valid_within(pfn, page, zone)) 526 515 continue; 527 - #endif 516 + 528 517 mtype = get_pageblock_migratetype(page); 529 518 530 519 if (mtype < MIGRATE_TYPES)
+1 -1
sound/arm/aaci.c
··· 1074 1074 return i; 1075 1075 } 1076 1076 1077 - static int __devinit aaci_probe(struct amba_device *dev, void *id) 1077 + static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id) 1078 1078 { 1079 1079 struct aaci *aaci; 1080 1080 int ret, i;