Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm: (25 commits)
[ARM] 5519/1: amba probe: pass "struct amba_id *" instead of void *
[ARM] 5517/1: integrator: don't put clock lookups in __initdata
[ARM] 5518/1: versatile: don't put clock lookups in __initdata
[ARM] mach-l7200: fix spelling of SYS_CLOCK_OFF
[ARM] Double check memmap is actually valid with a memmap has unexpected holes V2
[ARM] realview: fix broadcast tick support
[ARM] realview: remove useless smp_cross_call_done()
[ARM] smp: fix cpumask usage in ARM SMP code
[ARM] 5513/1: Eurotech VIPER SBC: fix compilation error
[ARM] 5509/1: ep93xx: clkdev enable UARTS
ARM: OMAP2/3: Change omapfb to use clkdev for dispc and rfbi, v2
ARM: OMAP3: Fix HW SAVEANDRESTORE shift define
ARM: OMAP3: Fix number of GPIO lines for 34xx
[ARM] S3C: Do not set clk->owner field if unset
[ARM] S3C2410: mach-bast.c registering i2c data too early
[ARM] S3C24XX: Fix unused code warning in arch/arm/plat-s3c24xx/dma.c
[ARM] S3C64XX: fix GPIO debug
[ARM] S3C64XX: GPIO include cleanup
[ARM] nwfpe: fix 'floatx80_is_nan' sparse warning
[ARM] nwfpe: Add decleration for ExtendedCPDO
...

+217 -164
+3 -3
arch/arm/Kconfig
··· 273 select HAVE_CLK 274 select COMMON_CLKDEV 275 select ARCH_REQUIRE_GPIOLIB 276 help 277 This enables support for the Cirrus EP93xx series of CPUs. 278 ··· 977 UNPREDICTABLE (in fact it can be predicted that it won't work 978 at all). If in doubt say Y. 979 980 - config ARCH_FLATMEM_HAS_HOLES 981 bool 982 - default y 983 - depends on FLATMEM 984 985 # Discontigmem is deprecated 986 config ARCH_DISCONTIGMEM_ENABLE
··· 273 select HAVE_CLK 274 select COMMON_CLKDEV 275 select ARCH_REQUIRE_GPIOLIB 276 + select ARCH_HAS_HOLES_MEMORYMODEL 277 help 278 This enables support for the Cirrus EP93xx series of CPUs. 279 ··· 976 UNPREDICTABLE (in fact it can be predicted that it won't work 977 at all). If in doubt say Y. 978 979 + config ARCH_HAS_HOLES_MEMORYMODEL 980 bool 981 + default n 982 983 # Discontigmem is deprecated 984 config ARCH_DISCONTIGMEM_ENABLE
+2 -2
arch/arm/common/gic.c
··· 253 } 254 255 #ifdef CONFIG_SMP 256 - void gic_raise_softirq(cpumask_t cpumask, unsigned int irq) 257 { 258 - unsigned long map = *cpus_addr(cpumask); 259 260 /* this always happens on GIC0 */ 261 writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
··· 253 } 254 255 #ifdef CONFIG_SMP 256 + void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 257 { 258 + unsigned long map = *cpus_addr(*mask); 259 260 /* this always happens on GIC0 */ 261 writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
+1 -1
arch/arm/include/asm/hardware/gic.h
··· 36 void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start); 37 void gic_cpu_init(unsigned int gic_nr, void __iomem *base); 38 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 39 - void gic_raise_softirq(cpumask_t cpumask, unsigned int irq); 40 #endif 41 42 #endif
··· 36 void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start); 37 void gic_cpu_init(unsigned int gic_nr, void __iomem *base); 38 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 39 + void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); 40 #endif 41 42 #endif
+4 -8
arch/arm/include/asm/smp.h
··· 53 /* 54 * Raise an IPI cross call on CPUs in callmap. 55 */ 56 - extern void smp_cross_call(cpumask_t callmap); 57 - 58 - /* 59 - * Broadcast a timer interrupt to the other CPUs. 60 - */ 61 - extern void smp_send_timer(void); 62 63 /* 64 * Broadcast a clock event to other CPUs. 65 */ 66 - extern void smp_timer_broadcast(cpumask_t mask); 67 68 /* 69 * Boot a secondary CPU, and assign it the specified idle task. ··· 97 extern void platform_cpu_enable(unsigned int cpu); 98 99 extern void arch_send_call_function_single_ipi(int cpu); 100 - extern void arch_send_call_function_ipi(cpumask_t mask); 101 102 /* 103 * Local timer interrupt handling function (can be IPI'ed).
··· 53 /* 54 * Raise an IPI cross call on CPUs in callmap. 55 */ 56 + extern void smp_cross_call(const struct cpumask *mask); 57 58 /* 59 * Broadcast a clock event to other CPUs. 60 */ 61 + extern void smp_timer_broadcast(const struct cpumask *mask); 62 63 /* 64 * Boot a secondary CPU, and assign it the specified idle task. ··· 102 extern void platform_cpu_enable(unsigned int cpu); 103 104 extern void arch_send_call_function_single_ipi(int cpu); 105 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 106 + #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask 107 108 /* 109 * Local timer interrupt handling function (can be IPI'ed).
+16 -30
arch/arm/kernel/smp.c
··· 326 per_cpu(cpu_data, cpu).idle = current; 327 } 328 329 - static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) 330 { 331 unsigned long flags; 332 unsigned int cpu; 333 334 local_irq_save(flags); 335 336 - for_each_cpu_mask(cpu, callmap) { 337 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 338 339 spin_lock(&ipi->lock); ··· 344 /* 345 * Call the platform specific cross-CPU call function. 346 */ 347 - smp_cross_call(callmap); 348 349 local_irq_restore(flags); 350 } 351 352 - void arch_send_call_function_ipi(cpumask_t mask) 353 { 354 send_ipi_message(mask, IPI_CALL_FUNC); 355 } 356 357 void arch_send_call_function_single_ipi(int cpu) 358 { 359 - send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); 360 } 361 362 void show_ipi_list(struct seq_file *p) ··· 498 499 void smp_send_reschedule(int cpu) 500 { 501 - send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); 502 } 503 504 - void smp_send_timer(void) 505 - { 506 - cpumask_t mask = cpu_online_map; 507 - cpu_clear(smp_processor_id(), mask); 508 - send_ipi_message(mask, IPI_TIMER); 509 - } 510 - 511 - void smp_timer_broadcast(cpumask_t mask) 512 { 513 send_ipi_message(mask, IPI_TIMER); 514 } ··· 510 { 511 cpumask_t mask = cpu_online_map; 512 cpu_clear(smp_processor_id(), mask); 513 - send_ipi_message(mask, IPI_CPU_STOP); 514 } 515 516 /* ··· 521 return -EINVAL; 522 } 523 524 - static int 525 - on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) 526 { 527 - int ret = 0; 528 - 529 preempt_disable(); 530 531 - ret = smp_call_function_mask(mask, func, info, wait); 532 - if (cpu_isset(smp_processor_id(), mask)) 533 func(info); 534 535 preempt_enable(); 536 - 537 - return ret; 538 } 539 540 /**********************************************************************/ ··· 592 593 void flush_tlb_mm(struct mm_struct *mm) 594 { 595 - cpumask_t mask = mm->cpu_vm_mask; 596 - 597 - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); 598 } 599 600 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 601 { 602 - cpumask_t mask = vma->vm_mm->cpu_vm_mask; 603 struct tlb_args ta; 604 605 ta.ta_vma = vma; 606 ta.ta_start = uaddr; 607 608 - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); 609 } 610 611 void flush_tlb_kernel_page(unsigned long kaddr) ··· 617 void flush_tlb_range(struct vm_area_struct *vma, 618 unsigned long start, unsigned long end) 619 { 620 - cpumask_t mask = vma->vm_mm->cpu_vm_mask; 621 struct tlb_args ta; 622 623 ta.ta_vma = vma; 624 ta.ta_start = start; 625 ta.ta_end = end; 626 627 - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); 628 } 629 630 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
··· 326 per_cpu(cpu_data, cpu).idle = current; 327 } 328 329 + static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) 330 { 331 unsigned long flags; 332 unsigned int cpu; 333 334 local_irq_save(flags); 335 336 + for_each_cpu(cpu, mask) { 337 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 338 339 spin_lock(&ipi->lock); ··· 344 /* 345 * Call the platform specific cross-CPU call function. 346 */ 347 + smp_cross_call(mask); 348 349 local_irq_restore(flags); 350 } 351 352 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 353 { 354 send_ipi_message(mask, IPI_CALL_FUNC); 355 } 356 357 void arch_send_call_function_single_ipi(int cpu) 358 { 359 + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 360 } 361 362 void show_ipi_list(struct seq_file *p) ··· 498 499 void smp_send_reschedule(int cpu) 500 { 501 + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 502 } 503 504 + void smp_timer_broadcast(const struct cpumask *mask) 505 { 506 send_ipi_message(mask, IPI_TIMER); 507 } ··· 517 { 518 cpumask_t mask = cpu_online_map; 519 cpu_clear(smp_processor_id(), mask); 520 + send_ipi_message(&mask, IPI_CPU_STOP); 521 } 522 523 /* ··· 528 return -EINVAL; 529 } 530 531 + static void 532 + on_each_cpu_mask(void (*func)(void *), void *info, int wait, 533 + const struct cpumask *mask) 534 { 535 preempt_disable(); 536 537 + smp_call_function_many(mask, func, info, wait); 538 + if (cpumask_test_cpu(smp_processor_id(), mask)) 539 func(info); 540 541 preempt_enable(); 542 } 543 544 /**********************************************************************/ ··· 602 603 void flush_tlb_mm(struct mm_struct *mm) 604 { 605 + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); 606 } 607 608 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 609 { 610 struct tlb_args ta; 611 612 ta.ta_vma = vma; 613 ta.ta_start = uaddr; 614 615 + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); 616 } 617 618 void flush_tlb_kernel_page(unsigned long kaddr) ··· 630 void flush_tlb_range(struct vm_area_struct *vma, 631 unsigned long start, unsigned long end) 632 { 633 struct tlb_args ta; 634 635 ta.ta_vma = vma; 636 ta.ta_start = start; 637 ta.ta_end = end; 638 639 + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); 640 } 641 642 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+61 -8
arch/arm/mach-ep93xx/clock.c
··· 21 #include <asm/div64.h> 22 #include <mach/hardware.h> 23 24 struct clk { 25 unsigned long rate; 26 int users; 27 u32 enable_reg; 28 u32 enable_mask; 29 }; 30 31 - static struct clk clk_uart = { 32 - .rate = 14745600, 33 }; 34 static struct clk clk_pll1; 35 static struct clk clk_f; ··· 130 { .dev_id = dev, .con_id = con, .clk = ck } 131 132 static struct clk_lookup clocks[] = { 133 - INIT_CK("apb:uart1", NULL, &clk_uart), 134 - INIT_CK("apb:uart2", NULL, &clk_uart), 135 - INIT_CK("apb:uart3", NULL, &clk_uart), 136 INIT_CK(NULL, "pll1", &clk_pll1), 137 INIT_CK(NULL, "fclk", &clk_f), 138 INIT_CK(NULL, "hclk", &clk_h), ··· 160 u32 value; 161 162 value = __raw_readl(clk->enable_reg); 163 __raw_writel(value | clk->enable_mask, clk->enable_reg); 164 } 165 ··· 175 u32 value; 176 177 value = __raw_readl(clk->enable_reg); 178 __raw_writel(value & ~clk->enable_mask, clk->enable_reg); 179 } 180 } 181 EXPORT_SYMBOL(clk_disable); 182 183 unsigned long clk_get_rate(struct clk *clk) 184 { 185 return clk->rate; 186 } 187 EXPORT_SYMBOL(clk_get_rate); ··· 215 unsigned long long rate; 216 int i; 217 218 - rate = 14745600; 219 rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */ 220 rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */ 221 do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */ ··· 248 249 value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1); 250 if (!(value & 0x00800000)) { /* PLL1 bypassed? */ 251 - clk_pll1.rate = 14745600; 252 } else { 253 clk_pll1.rate = calc_pll_rate(value); 254 } ··· 259 260 value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2); 261 if (!(value & 0x00080000)) { /* PLL2 bypassed? */ 262 - clk_pll2.rate = 14745600; 263 } else if (value & 0x00040000) { /* PLL2 enabled? */ 264 clk_pll2.rate = calc_pll_rate(value); 265 } else {
··· 21 #include <asm/div64.h> 22 #include <mach/hardware.h> 23 24 + 25 + /* 26 + * The EP93xx has two external crystal oscillators. To generate the 27 + * required high-frequency clocks, the processor uses two phase-locked- 28 + * loops (PLLs) to multiply the incoming external clock signal to much 29 + * higher frequencies that are then divided down by programmable dividers 30 + * to produce the needed clocks. The PLLs operate independently of one 31 + * another. 32 + */ 33 + #define EP93XX_EXT_CLK_RATE 14745600 34 + #define EP93XX_EXT_RTC_RATE 32768 35 + 36 + 37 struct clk { 38 unsigned long rate; 39 int users; 40 + int sw_locked; 41 u32 enable_reg; 42 u32 enable_mask; 43 + 44 + unsigned long (*get_rate)(struct clk *clk); 45 }; 46 47 + 48 + static unsigned long get_uart_rate(struct clk *clk); 49 + 50 + 51 + static struct clk clk_uart1 = { 52 + .sw_locked = 1, 53 + .enable_reg = EP93XX_SYSCON_DEVICE_CONFIG, 54 + .enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U1EN, 55 + .get_rate = get_uart_rate, 56 + }; 57 + static struct clk clk_uart2 = { 58 + .sw_locked = 1, 59 + .enable_reg = EP93XX_SYSCON_DEVICE_CONFIG, 60 + .enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U2EN, 61 + .get_rate = get_uart_rate, 62 + }; 63 + static struct clk clk_uart3 = { 64 + .sw_locked = 1, 65 + .enable_reg = EP93XX_SYSCON_DEVICE_CONFIG, 66 + .enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U3EN, 67 + .get_rate = get_uart_rate, 68 }; 69 static struct clk clk_pll1; 70 static struct clk clk_f; ··· 95 { .dev_id = dev, .con_id = con, .clk = ck } 96 97 static struct clk_lookup clocks[] = { 98 + INIT_CK("apb:uart1", NULL, &clk_uart1), 99 + INIT_CK("apb:uart2", NULL, &clk_uart2), 100 + INIT_CK("apb:uart3", NULL, &clk_uart3), 101 INIT_CK(NULL, "pll1", &clk_pll1), 102 INIT_CK(NULL, "fclk", &clk_f), 103 INIT_CK(NULL, "hclk", &clk_h), ··· 125 u32 value; 126 127 value = __raw_readl(clk->enable_reg); 128 + if (clk->sw_locked) 129 + __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); 130 __raw_writel(value | clk->enable_mask, clk->enable_reg); 131 } 132 ··· 138 u32 value; 139 140 value = __raw_readl(clk->enable_reg); 141 + if (clk->sw_locked) 142 + __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); 143 __raw_writel(value & ~clk->enable_mask, clk->enable_reg); 144 } 145 } 146 EXPORT_SYMBOL(clk_disable); 147 148 + static unsigned long get_uart_rate(struct clk *clk) 149 + { 150 + u32 value; 151 + 152 + value = __raw_readl(EP93XX_SYSCON_CLOCK_CONTROL); 153 + if (value & EP93XX_SYSCON_CLOCK_UARTBAUD) 154 + return EP93XX_EXT_CLK_RATE; 155 + else 156 + return EP93XX_EXT_CLK_RATE / 2; 157 + } 158 + 159 unsigned long clk_get_rate(struct clk *clk) 160 { 161 + if (clk->get_rate) 162 + return clk->get_rate(clk); 163 + 164 return clk->rate; 165 } 166 EXPORT_SYMBOL(clk_get_rate); ··· 162 unsigned long long rate; 163 int i; 164 165 + rate = EP93XX_EXT_CLK_RATE; 166 rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */ 167 rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */ 168 do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */ ··· 195 196 value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1); 197 if (!(value & 0x00800000)) { /* PLL1 bypassed? */ 198 + clk_pll1.rate = EP93XX_EXT_CLK_RATE; 199 } else { 200 clk_pll1.rate = calc_pll_rate(value); 201 } ··· 206 207 value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2); 208 if (!(value & 0x00080000)) { /* PLL2 bypassed? */ 209 + clk_pll2.rate = EP93XX_EXT_CLK_RATE; 210 } else if (value & 0x00040000) { /* PLL2 enabled? */ 211 clk_pll2.rate = calc_pll_rate(value); 212 } else {
+4 -1
arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
··· 159 #define EP93XX_SYSCON_CLOCK_SET1 EP93XX_SYSCON_REG(0x20) 160 #define EP93XX_SYSCON_CLOCK_SET2 EP93XX_SYSCON_REG(0x24) 161 #define EP93XX_SYSCON_DEVICE_CONFIG EP93XX_SYSCON_REG(0x80) 162 - #define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE 0x00800000 163 #define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0) 164 165 #define EP93XX_WATCHDOG_BASE (EP93XX_APB_VIRT_BASE + 0x00140000)
··· 159 #define EP93XX_SYSCON_CLOCK_SET1 EP93XX_SYSCON_REG(0x20) 160 #define EP93XX_SYSCON_CLOCK_SET2 EP93XX_SYSCON_REG(0x24) 161 #define EP93XX_SYSCON_DEVICE_CONFIG EP93XX_SYSCON_REG(0x80) 162 + #define EP93XX_SYSCON_DEVICE_CONFIG_U3EN (1<<24) 163 + #define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE (1<<23) 164 + #define EP93XX_SYSCON_DEVICE_CONFIG_U2EN (1<<20) 165 + #define EP93XX_SYSCON_DEVICE_CONFIG_U1EN (1<<18) 166 #define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0) 167 168 #define EP93XX_WATCHDOG_BASE (EP93XX_APB_VIRT_BASE + 0x00140000)
+1 -1
arch/arm/mach-integrator/core.c
··· 121 .rate = 14745600, 122 }; 123 124 - static struct clk_lookup lookups[] __initdata = { 125 { /* UART0 */ 126 .dev_id = "mb:16", 127 .clk = &uartclk,
··· 121 .rate = 14745600, 122 }; 123 124 + static struct clk_lookup lookups[] = { 125 { /* UART0 */ 126 .dev_id = "mb:16", 127 .clk = &uartclk,
+1 -1
arch/arm/mach-l7200/include/mach/sys-clock.h
··· 18 19 /* IO_START and IO_BASE are defined in hardware.h */ 20 21 - #define SYS_CLOCK_START (IO_START + SYS_CLCOK_OFF) /* Physical address */ 22 #define SYS_CLOCK_BASE (IO_BASE + SYS_CLOCK_OFF) /* Virtual address */ 23 24 /* Define the interface to the SYS_CLOCK */
··· 18 19 /* IO_START and IO_BASE are defined in hardware.h */ 20 21 + #define SYS_CLOCK_START (IO_START + SYS_CLOCK_OFF) /* Physical address */ 22 #define SYS_CLOCK_BASE (IO_BASE + SYS_CLOCK_OFF) /* Virtual address */ 23 24 /* Define the interface to the SYS_CLOCK */
+5 -5
arch/arm/mach-omap2/clock24xx.c
··· 103 CLK(NULL, "mdm_ick", &mdm_ick, CK_243X), 104 CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X), 105 /* DSS domain clocks */ 106 - CLK(NULL, "dss_ick", &dss_ick, CK_243X | CK_242X), 107 - CLK(NULL, "dss1_fck", &dss1_fck, CK_243X | CK_242X), 108 - CLK(NULL, "dss2_fck", &dss2_fck, CK_243X | CK_242X), 109 - CLK(NULL, "dss_54m_fck", &dss_54m_fck, CK_243X | CK_242X), 110 /* L3 domain clocks */ 111 CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X | CK_242X), 112 CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X | CK_242X), ··· 206 CLK(NULL, "aes_ick", &aes_ick, CK_243X | CK_242X), 207 CLK(NULL, "pka_ick", &pka_ick, CK_243X | CK_242X), 208 CLK(NULL, "usb_fck", &usb_fck, CK_243X | CK_242X), 209 - CLK(NULL, "usbhs_ick", &usbhs_ick, CK_243X), 210 CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X), 211 CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X), 212 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X),
··· 103 CLK(NULL, "mdm_ick", &mdm_ick, CK_243X), 104 CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X), 105 /* DSS domain clocks */ 106 + CLK("omapfb", "ick", &dss_ick, CK_243X | CK_242X), 107 + CLK("omapfb", "dss1_fck", &dss1_fck, CK_243X | CK_242X), 108 + CLK("omapfb", "dss2_fck", &dss2_fck, CK_243X | CK_242X), 109 + CLK("omapfb", "tv_fck", &dss_54m_fck, CK_243X | CK_242X), 110 /* L3 domain clocks */ 111 CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X | CK_242X), 112 CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X | CK_242X), ··· 206 CLK(NULL, "aes_ick", &aes_ick, CK_243X | CK_242X), 207 CLK(NULL, "pka_ick", &pka_ick, CK_243X | CK_242X), 208 CLK(NULL, "usb_fck", &usb_fck, CK_243X | CK_242X), 209 + CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X), 210 CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X), 211 CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X), 212 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X),
+6 -6
arch/arm/mach-omap2/clock34xx.c
··· 157 CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck, CK_343X), 158 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck, CK_343X), 159 CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X), 160 - CLK(NULL, "hsotgusb_ick", &hsotgusb_ick, CK_343X), 161 CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X), 162 CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X), 163 CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X), ··· 197 CLK("omap_rng", "ick", &rng_ick, CK_343X), 198 CLK(NULL, "sha11_ick", &sha11_ick, CK_343X), 199 CLK(NULL, "des1_ick", &des1_ick, CK_343X), 200 - CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck, CK_343X), 201 - CLK(NULL, "dss_tv_fck", &dss_tv_fck, CK_343X), 202 - CLK(NULL, "dss_96m_fck", &dss_96m_fck, CK_343X), 203 - CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck, CK_343X), 204 - CLK(NULL, "dss_ick", &dss_ick, CK_343X), 205 CLK(NULL, "cam_mclk", &cam_mclk, CK_343X), 206 CLK(NULL, "cam_ick", &cam_ick, CK_343X), 207 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X),
··· 157 CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck, CK_343X), 158 CLK(NULL, "ssi_sst_fck", &ssi_sst_fck, CK_343X), 159 CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X), 160 + CLK("musb_hdrc", "ick", &hsotgusb_ick, CK_343X), 161 CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X), 162 CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X), 163 CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X), ··· 197 CLK("omap_rng", "ick", &rng_ick, CK_343X), 198 CLK(NULL, "sha11_ick", &sha11_ick, CK_343X), 199 CLK(NULL, "des1_ick", &des1_ick, CK_343X), 200 + CLK("omapfb", "dss1_fck", &dss1_alwon_fck, CK_343X), 201 + CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X), 202 + CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X), 203 + CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X), 204 + CLK("omapfb", "ick", &dss_ick, CK_343X), 205 CLK(NULL, "cam_mclk", &cam_mclk, CK_343X), 206 CLK(NULL, "cam_ick", &cam_ick, CK_343X), 207 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X),
+6 -6
arch/arm/mach-omap2/clock34xx.h
··· 2182 2183 static struct clk gpio1_dbck = { 2184 .name = "gpio1_dbck", 2185 - .ops = &clkops_omap2_dflt_wait, 2186 .parent = &wkup_32k_fck, 2187 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), 2188 .enable_bit = OMAP3430_EN_GPIO1_SHIFT, ··· 2427 2428 static struct clk gpio6_dbck = { 2429 .name = "gpio6_dbck", 2430 - .ops = &clkops_omap2_dflt_wait, 2431 .parent = &per_32k_alwon_fck, 2432 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2433 .enable_bit = OMAP3430_EN_GPIO6_SHIFT, ··· 2437 2438 static struct clk gpio5_dbck = { 2439 .name = "gpio5_dbck", 2440 - .ops = &clkops_omap2_dflt_wait, 2441 .parent = &per_32k_alwon_fck, 2442 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2443 .enable_bit = OMAP3430_EN_GPIO5_SHIFT, ··· 2447 2448 static struct clk gpio4_dbck = { 2449 .name = "gpio4_dbck", 2450 - .ops = &clkops_omap2_dflt_wait, 2451 .parent = &per_32k_alwon_fck, 2452 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2453 .enable_bit = OMAP3430_EN_GPIO4_SHIFT, ··· 2457 2458 static struct clk gpio3_dbck = { 2459 .name = "gpio3_dbck", 2460 - .ops = &clkops_omap2_dflt_wait, 2461 .parent = &per_32k_alwon_fck, 2462 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2463 .enable_bit = OMAP3430_EN_GPIO3_SHIFT, ··· 2467 2468 static struct clk gpio2_dbck = { 2469 .name = "gpio2_dbck", 2470 - .ops = &clkops_omap2_dflt_wait, 2471 .parent = &per_32k_alwon_fck, 2472 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2473 .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
··· 2182 2183 static struct clk gpio1_dbck = { 2184 .name = "gpio1_dbck", 2185 + .ops = &clkops_omap2_dflt, 2186 .parent = &wkup_32k_fck, 2187 .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), 2188 .enable_bit = OMAP3430_EN_GPIO1_SHIFT, ··· 2427 2428 static struct clk gpio6_dbck = { 2429 .name = "gpio6_dbck", 2430 + .ops = &clkops_omap2_dflt, 2431 .parent = &per_32k_alwon_fck, 2432 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2433 .enable_bit = OMAP3430_EN_GPIO6_SHIFT, ··· 2437 2438 static struct clk gpio5_dbck = { 2439 .name = "gpio5_dbck", 2440 + .ops = &clkops_omap2_dflt, 2441 .parent = &per_32k_alwon_fck, 2442 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2443 .enable_bit = OMAP3430_EN_GPIO5_SHIFT, ··· 2447 2448 static struct clk gpio4_dbck = { 2449 .name = "gpio4_dbck", 2450 + .ops = &clkops_omap2_dflt, 2451 .parent = &per_32k_alwon_fck, 2452 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2453 .enable_bit = OMAP3430_EN_GPIO4_SHIFT, ··· 2457 2458 static struct clk gpio3_dbck = { 2459 .name = "gpio3_dbck", 2460 + .ops = &clkops_omap2_dflt, 2461 .parent = &per_32k_alwon_fck, 2462 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2463 .enable_bit = OMAP3430_EN_GPIO3_SHIFT, ··· 2467 2468 static struct clk gpio2_dbck = { 2469 .name = "gpio2_dbck", 2470 + .ops = &clkops_omap2_dflt, 2471 .parent = &per_32k_alwon_fck, 2472 .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), 2473 .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
+4 -2
arch/arm/mach-omap2/devices.c
··· 354 platform_device_register(&omap2_mcspi1); 355 platform_device_register(&omap2_mcspi2); 356 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) 357 - platform_device_register(&omap2_mcspi3); 358 #endif 359 #ifdef CONFIG_ARCH_OMAP3 360 - platform_device_register(&omap2_mcspi4); 361 #endif 362 } 363
··· 354 platform_device_register(&omap2_mcspi1); 355 platform_device_register(&omap2_mcspi2); 356 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) 357 + if (cpu_is_omap2430() || cpu_is_omap343x()) 358 + platform_device_register(&omap2_mcspi3); 359 #endif 360 #ifdef CONFIG_ARCH_OMAP3 361 + if (cpu_is_omap343x()) 362 + platform_device_register(&omap2_mcspi4); 363 #endif 364 } 365
+1 -1
arch/arm/mach-omap2/prm-regbits-34xx.h
··· 409 /* PM_PREPWSTST_CAM specific bits */ 410 411 /* PM_PWSTCTRL_USBHOST specific bits */ 412 - #define OMAP3430ES2_SAVEANDRESTORE_SHIFT (1 << 4) 413 414 /* RM_RSTST_PER specific bits */ 415
··· 409 /* PM_PREPWSTST_CAM specific bits */ 410 411 /* PM_PWSTCTRL_USBHOST specific bits */ 412 + #define OMAP3430ES2_SAVEANDRESTORE_SHIFT 4 413 414 /* RM_RSTST_PER specific bits */ 415
+1 -1
arch/arm/mach-omap2/usb-tusb6010.c
··· 187 unsigned sysclk_ps; 188 int status; 189 190 - if (!refclk_psec || sysclk_ps == 0) 191 return -ENODEV; 192 193 sysclk_ps = is_refclk ? refclk_psec : TUSB6010_OSCCLK_60;
··· 187 unsigned sysclk_ps; 188 int status; 189 190 + if (!refclk_psec || fclk_ps == 0) 191 return -ENODEV; 192 193 sysclk_ps = is_refclk ? refclk_psec : TUSB6010_OSCCLK_60;
+1
arch/arm/mach-pxa/viper.c
··· 46 #include <mach/audio.h> 47 #include <mach/pxafb.h> 48 #include <mach/i2c.h> 49 #include <mach/viper.h> 50 51 #include <asm/setup.h>
··· 46 #include <mach/audio.h> 47 #include <mach/pxafb.h> 48 #include <mach/i2c.h> 49 + #include <mach/regs-uart.h> 50 #include <mach/viper.h> 51 52 #include <asm/setup.h>
-8
arch/arm/mach-realview/core.c
··· 750 { 751 u32 val; 752 753 - #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 754 - /* 755 - * The dummy clock device has to be registered before the main device 756 - * so that the latter will broadcast the clock events 757 - */ 758 - local_timer_setup(); 759 - #endif 760 - 761 /* 762 * set clock frequency: 763 * REALVIEW_REFCLK is 32KHz
··· 750 { 751 u32 val; 752 753 /* 754 * set clock frequency: 755 * REALVIEW_REFCLK is 32KHz
+2 -9
arch/arm/mach-realview/include/mach/smp.h
··· 15 /* 16 * We use IRQ1 as the IPI 17 */ 18 - static inline void smp_cross_call(cpumask_t callmap) 19 { 20 - gic_raise_softirq(callmap, 1); 21 - } 22 - 23 - /* 24 - * Do nothing on MPcore. 25 - */ 26 - static inline void smp_cross_call_done(cpumask_t callmap) 27 - { 28 } 29 30 #endif
··· 15 /* 16 * We use IRQ1 as the IPI 17 */ 18 + static inline void smp_cross_call(const struct cpumask *mask) 19 { 20 + gic_raise_softirq(mask, 1); 21 } 22 23 #endif
+4 -2
arch/arm/mach-realview/localtimer.c
··· 189 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); 190 191 clk->name = "dummy_timer"; 192 - clk->features = CLOCK_EVT_FEAT_DUMMY; 193 - clk->rating = 200; 194 clk->mult = 1; 195 clk->set_mode = dummy_timer_set_mode; 196 clk->broadcast = smp_timer_broadcast;
··· 189 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); 190 191 clk->name = "dummy_timer"; 192 + clk->features = CLOCK_EVT_FEAT_ONESHOT | 193 + CLOCK_EVT_FEAT_PERIODIC | 194 + CLOCK_EVT_FEAT_DUMMY; 195 + clk->rating = 400; 196 clk->mult = 1; 197 clk->set_mode = dummy_timer_set_mode; 198 clk->broadcast = smp_timer_broadcast;
+3 -12
arch/arm/mach-realview/platsmp.c
··· 78 trace_hardirqs_off(); 79 80 /* 81 - * the primary core may have used a "cross call" soft interrupt 82 - * to get this processor out of WFI in the BootMonitor - make 83 - * sure that we are no longer being sent this soft interrupt 84 - */ 85 - smp_cross_call_done(cpumask_of_cpu(cpu)); 86 - 87 - /* 88 * if any interrupts are already enabled for the primary 89 * core (e.g. timer irq), then they will not have been enabled 90 * for us: do so ··· 129 * Use smp_cross_call() for this, since there's little 130 * point duplicating the code here 131 */ 132 - smp_cross_call(cpumask_of_cpu(cpu)); 133 134 timeout = jiffies + (1 * HZ); 135 while (time_before(jiffies, timeout)) { ··· 217 if (max_cpus > ncores) 218 max_cpus = ncores; 219 220 - #ifdef CONFIG_LOCAL_TIMERS 221 /* 222 - * Enable the local timer for primary CPU. If the device is 223 - * dummy (!CONFIG_LOCAL_TIMERS), it was already registers in 224 - * realview_timer_init 225 */ 226 local_timer_setup(); 227 #endif
··· 78 trace_hardirqs_off(); 79 80 /* 81 * if any interrupts are already enabled for the primary 82 * core (e.g. timer irq), then they will not have been enabled 83 * for us: do so ··· 136 * Use smp_cross_call() for this, since there's little 137 * point duplicating the code here 138 */ 139 + smp_cross_call(cpumask_of(cpu)); 140 141 timeout = jiffies + (1 * HZ); 142 while (time_before(jiffies, timeout)) { ··· 224 if (max_cpus > ncores) 225 max_cpus = ncores; 226 227 + #if defined(CONFIG_LOCAL_TIMERS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) 228 /* 229 + * Enable the local timer or broadcast device for the boot CPU. 230 */ 231 local_timer_setup(); 232 #endif
+1 -2
arch/arm/mach-s3c2410/mach-bast.c
··· 588 589 s3c_device_nand.dev.platform_data = &bast_nand_info; 590 591 - s3c_i2c0_set_platdata(&bast_i2c_info); 592 - 593 s3c24xx_init_io(bast_iodesc, ARRAY_SIZE(bast_iodesc)); 594 s3c24xx_init_clocks(0); 595 s3c24xx_init_uarts(bast_uartcfgs, ARRAY_SIZE(bast_uartcfgs)); ··· 600 sysdev_class_register(&bast_pm_sysclass); 601 sysdev_register(&bast_pm_sysdev); 602 603 s3c24xx_fb_set_platdata(&bast_fb_info); 604 platform_add_devices(bast_devices, ARRAY_SIZE(bast_devices)); 605
··· 588 589 s3c_device_nand.dev.platform_data = &bast_nand_info; 590 591 s3c24xx_init_io(bast_iodesc, ARRAY_SIZE(bast_iodesc)); 592 s3c24xx_init_clocks(0); 593 s3c24xx_init_uarts(bast_uartcfgs, ARRAY_SIZE(bast_uartcfgs)); ··· 602 sysdev_class_register(&bast_pm_sysclass); 603 sysdev_register(&bast_pm_sysdev); 604 605 + s3c_i2c0_set_platdata(&bast_i2c_info); 606 s3c24xx_fb_set_platdata(&bast_fb_info); 607 platform_add_devices(bast_devices, ARRAY_SIZE(bast_devices)); 608
+1 -1
arch/arm/mach-versatile/core.c
··· 413 .rate = 24000000, 414 }; 415 416 - static struct clk_lookup lookups[] __initdata = { 417 { /* UART0 */ 418 .dev_id = "dev:f1", 419 .clk = &ref24_clk,
··· 413 .rate = 24000000, 414 }; 415 416 + static struct clk_lookup lookups[] = { 417 { /* UART0 */ 418 .dev_id = "dev:f1", 419 .clk = &ref24_clk,
+4
arch/arm/nwfpe/fpa11.h
··· 114 extern unsigned int DoubleCPDO(struct roundingData *roundData, 115 const unsigned int opcode, FPREG * rFd); 116 117 #endif
··· 114 extern unsigned int DoubleCPDO(struct roundingData *roundData, 115 const unsigned int opcode, FPREG * rFd); 116 117 + /* extneded_cpdo.c */ 118 + extern unsigned int ExtendedCPDO(struct roundingData *roundData, 119 + const unsigned int opcode, FPREG * rFd); 120 + 121 #endif
-4
arch/arm/nwfpe/fpa11_cprt.c
··· 27 #include "fpmodule.inl" 28 #include "softfloat.h" 29 30 - #ifdef CONFIG_FPE_NWFPE_XP 31 - extern flag floatx80_is_nan(floatx80); 32 - #endif 33 - 34 unsigned int PerformFLT(const unsigned int opcode); 35 unsigned int PerformFIX(const unsigned int opcode); 36
··· 27 #include "fpmodule.inl" 28 #include "softfloat.h" 29 30 unsigned int PerformFLT(const unsigned int opcode); 31 unsigned int PerformFIX(const unsigned int opcode); 32
+2
arch/arm/nwfpe/softfloat.h
··· 226 char floatx80_lt_quiet( floatx80, floatx80 ); 227 char floatx80_is_signaling_nan( floatx80 ); 228 229 #endif 230 231 static inline flag extractFloat32Sign(float32 a)
··· 226 char floatx80_lt_quiet( floatx80, floatx80 ); 227 char floatx80_is_signaling_nan( floatx80 ); 228 229 + extern flag floatx80_is_nan(floatx80); 230 + 231 #endif 232 233 static inline flag extractFloat32Sign(float32 a)
+3 -2
arch/arm/plat-omap/fb.c
··· 206 config_invalid = 1; 207 return; 208 } 209 - if (rg.paddr) 210 reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT); 211 - reserved += rg.size; 212 omapfb_config.mem_desc.region[i] = rg; 213 configured_regions++; 214 }
··· 206 config_invalid = 1; 207 return; 208 } 209 + if (rg.paddr) { 210 reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT); 211 + reserved += rg.size; 212 + } 213 omapfb_config.mem_desc.region[i] = rg; 214 configured_regions++; 215 }
+1 -1
arch/arm/plat-omap/gpio.c
··· 307 return 0; 308 if (cpu_is_omap24xx() && gpio < 128) 309 return 0; 310 - if (cpu_is_omap34xx() && gpio < 160) 311 return 0; 312 return -1; 313 }
··· 307 return 0; 308 if (cpu_is_omap24xx() && gpio < 128) 309 return 0; 310 + if (cpu_is_omap34xx() && gpio < 192) 311 return 0; 312 return -1; 313 }
-2
arch/arm/plat-s3c/clock.c
··· 306 307 int s3c24xx_register_clock(struct clk *clk) 308 { 309 - clk->owner = THIS_MODULE; 310 - 311 if (clk->enable == NULL) 312 clk->enable = clk_null_enable; 313
··· 306 307 int s3c24xx_register_clock(struct clk *clk) 308 { 309 if (clk->enable == NULL) 310 clk->enable = clk_null_enable; 311
+1 -1
arch/arm/plat-s3c24xx/dma.c
··· 1235 1236 EXPORT_SYMBOL(s3c2410_dma_getposition); 1237 1238 - static struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) 1239 { 1240 return container_of(dev, struct s3c2410_dma_chan, dev); 1241 }
··· 1235 1236 EXPORT_SYMBOL(s3c2410_dma_getposition); 1237 1238 + static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) 1239 { 1240 return container_of(dev, struct s3c2410_dma_chan, dev); 1241 }
+1 -1
arch/arm/plat-s3c64xx/gpiolib.c
··· 57 #if 1 58 #define gpio_dbg(x...) do { } while(0) 59 #else 60 - #define gpio_dbg(x...) printk(KERN_DEBUG ## x) 61 #endif 62 63 /* The s3c64xx_gpiolib_4bit routines are to control the gpio banks where
··· 57 #if 1 58 #define gpio_dbg(x...) do { } while(0) 59 #else 60 + #define gpio_dbg(x...) printk(KERN_DEBUG x) 61 #endif 62 63 /* The s3c64xx_gpiolib_4bit routines are to control the gpio banks where
+10 -10
arch/arm/plat-s3c64xx/include/plat/gpio-bank-h.h
··· 61 #define S3C64XX_GPH7_ADDR_CF1 (0x06 << 28) 62 #define S3C64XX_GPH7_EINT_G6_7 (0x07 << 28) 63 64 - #define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 32) 65 - #define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 32) 66 - #define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 32) 67 - #define S3C64XX_GPH8_ADDR_CF2 (0x06 << 32) 68 - #define S3C64XX_GPH8_EINT_G6_8 (0x07 << 32) 69 70 - #define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 36) 71 - #define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 36) 72 - #define S3C64XX_GPH9_I2S_V40_DI (0x05 << 36) 73 - #define S3C64XX_GPH9_EINT_G6_9 (0x07 << 36) 74 -
··· 61 #define S3C64XX_GPH7_ADDR_CF1 (0x06 << 28) 62 #define S3C64XX_GPH7_EINT_G6_7 (0x07 << 28) 63 64 + #define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 0) 65 + #define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 0) 66 + #define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 0) 67 + #define S3C64XX_GPH8_ADDR_CF2 (0x06 << 0) 68 + #define S3C64XX_GPH8_EINT_G6_8 (0x07 << 0) 69 70 + #define S3C64XX_GPH9_OUTPUT (0x01 << 4) 71 + #define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 4) 72 + #define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 4) 73 + #define S3C64XX_GPH9_I2S_V40_DI (0x05 << 4) 74 + #define S3C64XX_GPH9_EINT_G6_9 (0x07 << 4)
+1 -1
drivers/input/serio/ambakmi.c
··· 107 clk_disable(kmi->clk); 108 } 109 110 - static int amba_kmi_probe(struct amba_device *dev, void *id) 111 { 112 struct amba_kmi_port *kmi; 113 struct serio *io;
··· 107 clk_disable(kmi->clk); 108 } 109 110 + static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id) 111 { 112 struct amba_kmi_port *kmi; 113 struct serio *io;
+1 -1
drivers/mmc/host/mmci.c
··· 490 mod_timer(&host->timer, jiffies + HZ); 491 } 492 493 - static int __devinit mmci_probe(struct amba_device *dev, void *id) 494 { 495 struct mmc_platform_data *plat = dev->dev.platform_data; 496 struct mmci_host *host;
··· 490 mod_timer(&host->timer, jiffies + HZ); 491 } 492 493 + static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 494 { 495 struct mmc_platform_data *plat = dev->dev.platform_data; 496 struct mmci_host *host;
+1 -1
drivers/rtc/rtc-pl030.c
··· 102 .set_alarm = pl030_set_alarm, 103 }; 104 105 - static int pl030_probe(struct amba_device *dev, void *id) 106 { 107 struct pl030_rtc *rtc; 108 int ret;
··· 102 .set_alarm = pl030_set_alarm, 103 }; 104 105 + static int pl030_probe(struct amba_device *dev, struct amba_id *id) 106 { 107 struct pl030_rtc *rtc; 108 int ret;
+1 -1
drivers/rtc/rtc-pl031.c
··· 127 return 0; 128 } 129 130 - static int pl031_probe(struct amba_device *adev, void *id) 131 { 132 int ret; 133 struct pl031_local *ldata;
··· 127 return 0; 128 } 129 130 + static int pl031_probe(struct amba_device *adev, struct amba_id *id) 131 { 132 int ret; 133 struct pl031_local *ldata;
+1 -1
drivers/serial/amba-pl010.c
··· 665 .cons = AMBA_CONSOLE, 666 }; 667 668 - static int pl010_probe(struct amba_device *dev, void *id) 669 { 670 struct uart_amba_port *uap; 671 void __iomem *base;
··· 665 .cons = AMBA_CONSOLE, 666 }; 667 668 + static int pl010_probe(struct amba_device *dev, struct amba_id *id) 669 { 670 struct uart_amba_port *uap; 671 void __iomem *base;
+1 -1
drivers/serial/amba-pl011.c
··· 729 .cons = AMBA_CONSOLE, 730 }; 731 732 - static int pl011_probe(struct amba_device *dev, void *id) 733 { 734 struct uart_amba_port *uap; 735 void __iomem *base;
··· 729 .cons = AMBA_CONSOLE, 730 }; 731 732 + static int pl011_probe(struct amba_device *dev, struct amba_id *id) 733 { 734 struct uart_amba_port *uap; 735 void __iomem *base;
+1 -1
drivers/video/amba-clcd.c
··· 437 return ret; 438 } 439 440 - static int clcdfb_probe(struct amba_device *dev, void *id) 441 { 442 struct clcd_board *board = dev->dev.platform_data; 443 struct clcd_fb *fb;
··· 437 return ret; 438 } 439 440 + static int clcdfb_probe(struct amba_device *dev, struct amba_id *id) 441 { 442 struct clcd_board *board = dev->dev.platform_data; 443 struct clcd_fb *fb;
+8 -6
drivers/video/omap/dispc.c
··· 880 881 static int get_dss_clocks(void) 882 { 883 - if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) { 884 - dev_err(dispc.fbdev->dev, "can't get dss_ick\n"); 885 return PTR_ERR(dispc.dss_ick); 886 } 887 888 - if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) { 889 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); 890 clk_put(dispc.dss_ick); 891 return PTR_ERR(dispc.dss1_fck); 892 } 893 894 - if (IS_ERR((dispc.dss_54m_fck = 895 - clk_get(dispc.fbdev->dev, "dss_54m_fck")))) { 896 - dev_err(dispc.fbdev->dev, "can't get dss_54m_fck\n"); 897 clk_put(dispc.dss_ick); 898 clk_put(dispc.dss1_fck); 899 return PTR_ERR(dispc.dss_54m_fck);
··· 880 881 static int get_dss_clocks(void) 882 { 883 + dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick"); 884 + if (IS_ERR(dispc.dss_ick)) { 885 + dev_err(dispc.fbdev->dev, "can't get ick\n"); 886 return PTR_ERR(dispc.dss_ick); 887 } 888 889 + dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck"); 890 + if (IS_ERR(dispc.dss1_fck)) { 891 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); 892 clk_put(dispc.dss_ick); 893 return PTR_ERR(dispc.dss1_fck); 894 } 895 896 + dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck"); 897 + if (IS_ERR(dispc.dss_54m_fck)) { 898 + dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); 899 clk_put(dispc.dss_ick); 900 clk_put(dispc.dss1_fck); 901 return PTR_ERR(dispc.dss_54m_fck);
+5 -3
drivers/video/omap/rfbi.c
··· 83 84 static int rfbi_get_clocks(void) 85 { 86 - if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) { 87 - dev_err(rfbi.fbdev->dev, "can't get dss_ick\n"); 88 return PTR_ERR(rfbi.dss_ick); 89 } 90 91 - if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) { 92 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); 93 clk_put(rfbi.dss_ick); 94 return PTR_ERR(rfbi.dss1_fck);
··· 83 84 static int rfbi_get_clocks(void) 85 { 86 + rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick"); 87 + if (IS_ERR(rfbi.dss_ick)) { 88 + dev_err(rfbi.fbdev->dev, "can't get ick\n"); 89 return PTR_ERR(rfbi.dss_ick); 90 } 91 92 + rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck"); 93 + if (IS_ERR(rfbi.dss1_fck)) { 94 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); 95 clk_put(rfbi.dss_ick); 96 return PTR_ERR(rfbi.dss1_fck);
+1 -1
include/linux/amba/bus.h
··· 28 29 struct amba_driver { 30 struct device_driver drv; 31 - int (*probe)(struct amba_device *, void *); 32 int (*remove)(struct amba_device *); 33 void (*shutdown)(struct amba_device *); 34 int (*suspend)(struct amba_device *, pm_message_t);
··· 28 29 struct amba_driver { 30 struct device_driver drv; 31 + int (*probe)(struct amba_device *, struct amba_id *); 32 int (*remove)(struct amba_device *); 33 void (*shutdown)(struct amba_device *); 34 int (*suspend)(struct amba_device *, pm_message_t);
+26
include/linux/mmzone.h
··· 1097 #define pfn_valid_within(pfn) (1) 1098 #endif 1099 1100 #endif /* !__GENERATING_BOUNDS.H */ 1101 #endif /* !__ASSEMBLY__ */ 1102 #endif /* _LINUX_MMZONE_H */
··· 1097 #define pfn_valid_within(pfn) (1) 1098 #endif 1099 1100 + #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 1101 + /* 1102 + * pfn_valid() is meant to be able to tell if a given PFN has valid memmap 1103 + * associated with it or not. In FLATMEM, it is expected that holes always 1104 + * have valid memmap as long as there is valid PFNs either side of the hole. 1105 + * In SPARSEMEM, it is assumed that a valid section has a memmap for the 1106 + * entire section. 1107 + * 1108 + * However, an ARM, and maybe other embedded architectures in the future 1109 + * free memmap backing holes to save memory on the assumption the memmap is 1110 + * never used. The page_zone linkages are then broken even though pfn_valid() 1111 + * returns true. A walker of the full memmap must then do this additional 1112 + * check to ensure the memmap they are looking at is sane by making sure 1113 + * the zone and PFN linkages are still valid. This is expensive, but walkers 1114 + * of the full memmap are extremely rare. 1115 + */ 1116 + int memmap_valid_within(unsigned long pfn, 1117 + struct page *page, struct zone *zone); 1118 + #else 1119 + static inline int memmap_valid_within(unsigned long pfn, 1120 + struct page *page, struct zone *zone) 1121 + { 1122 + return 1; 1123 + } 1124 + #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 1125 + 1126 #endif /* !__GENERATING_BOUNDS.H */ 1127 #endif /* !__ASSEMBLY__ */ 1128 #endif /* _LINUX_MMZONE_H */
+15
mm/mmzone.c
··· 6 7 8 #include <linux/stddef.h> 9 #include <linux/mmzone.h> 10 #include <linux/module.h> 11 ··· 73 *zone = zonelist_zone(z); 74 return z; 75 }
··· 6 7 8 #include <linux/stddef.h> 9 + #include <linux/mm.h> 10 #include <linux/mmzone.h> 11 #include <linux/module.h> 12 ··· 72 *zone = zonelist_zone(z); 73 return z; 74 } 75 + 76 + #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 77 + int memmap_valid_within(unsigned long pfn, 78 + struct page *page, struct zone *zone) 79 + { 80 + if (page_to_pfn(page) != pfn) 81 + return 0; 82 + 83 + if (page_zone(page) != zone) 84 + return 0; 85 + 86 + return 1; 87 + } 88 + #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+4 -15
mm/vmstat.c
··· 509 continue; 510 511 page = pfn_to_page(pfn); 512 - #ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES 513 - /* 514 - * Ordinarily, memory holes in flatmem still have a valid 515 - * memmap for the PFN range. However, an architecture for 516 - * embedded systems (e.g. ARM) can free up the memmap backing 517 - * holes to save memory on the assumption the memmap is 518 - * never used. The page_zone linkages are then broken even 519 - * though pfn_valid() returns true. Skip the page if the 520 - * linkages are broken. Even if this test passed, the impact 521 - * is that the counters for the movable type are off but 522 - * fragmentation monitoring is likely meaningless on small 523 - * systems. 524 - */ 525 - if (page_zone(page) != zone) 526 continue; 527 - #endif 528 mtype = get_pageblock_migratetype(page); 529 530 if (mtype < MIGRATE_TYPES)
··· 509 continue; 510 511 page = pfn_to_page(pfn); 512 + 513 + /* Watch for unexpected holes punched in the memmap */ 514 + if (!memmap_valid_within(pfn, page, zone)) 515 continue; 516 + 517 mtype = get_pageblock_migratetype(page); 518 519 if (mtype < MIGRATE_TYPES)
+1 -1
sound/arm/aaci.c
··· 1074 return i; 1075 } 1076 1077 - static int __devinit aaci_probe(struct amba_device *dev, void *id) 1078 { 1079 struct aaci *aaci; 1080 int ret, i;
··· 1074 return i; 1075 } 1076 1077 + static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id) 1078 { 1079 struct aaci *aaci; 1080 int ret, i;