Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm: (37 commits)
ARM: 5673/1: U300 fix initsection compile warning
ARM: Fix broken highmem support
mx31moboard: invert sdhc ro signal sense
ARM: S3C24XX: Fix clkout mpx error
ARM: S3C64XX: serial: Fix a typo in Kconfig
IXP4xx: Fix IO_SPACE_LIMIT for 2.6.31-rc core PCI changes
OMAP3: RX51: Updated rx51_defconfig
OMAP2/3: mmc-twl4030: Free up MMC regulators while cleaning up
OMAP3: RX51: Define TWL4030 USB transceiver in board file
OMAP3: Overo: Fix smsc911x platform device resource value
OMAP3: Fix omap3 sram virtual addres overlap vmalloc space after increasing vmalloc size
OMAP2/3: DMA errata correction
OMAP: Fix testing of cpu defines for mach-omap1
OMAP3: Overo: add missing pen-down GPIO definition
OMAP: GPIO: clear/restore level/edge detect settings on mask/unmask
OMAP3: PM: Fix wrong sequence in suspend.
OMAP: PM: CPUfreq: obey min/max settings of policy
OMAP2/3/4: UART: allow in-order port traversal
OMAP2/3/4: UART: Allow per-UART disabling wakeup for serial ports
OMAP3: Fixed crash bug with serial + suspend
...

+1026 -397
+4 -3
arch/arm/configs/rx51_defconfig
··· 282 # 283 CONFIG_ZBOOT_ROM_TEXT=0x0 284 CONFIG_ZBOOT_ROM_BSS=0x0 285 - CONFIG_CMDLINE="init=/sbin/preinit ubi.mtd=rootfs root=ubi0:rootfs rootfstype=ubifs rootflags=bulk_read,no_chk_data_crc rw console=ttyMTD,log console=tty0" 286 # CONFIG_XIP_KERNEL is not set 287 # CONFIG_KEXEC is not set 288 ··· 1354 # CONFIG_USB_GPIO_VBUS is not set 1355 # CONFIG_ISP1301_OMAP is not set 1356 CONFIG_TWL4030_USB=y 1357 - CONFIG_MMC=m 1358 # CONFIG_MMC_DEBUG is not set 1359 # CONFIG_MMC_UNSAFE_RESUME is not set 1360 ··· 1449 # on-CPU RTC drivers 1450 # 1451 # CONFIG_DMADEVICES is not set 1452 - # CONFIG_REGULATOR is not set 1453 # CONFIG_UIO is not set 1454 # CONFIG_STAGING is not set 1455
··· 282 # 283 CONFIG_ZBOOT_ROM_TEXT=0x0 284 CONFIG_ZBOOT_ROM_BSS=0x0 285 + CONFIG_CMDLINE="init=/sbin/preinit ubi.mtd=rootfs root=ubi0:rootfs rootfstype=ubifs rootflags=bulk_read,no_chk_data_crc rw console=ttyMTD,log console=tty0 console=ttyS2,115200n8" 286 # CONFIG_XIP_KERNEL is not set 287 # CONFIG_KEXEC is not set 288 ··· 1354 # CONFIG_USB_GPIO_VBUS is not set 1355 # CONFIG_ISP1301_OMAP is not set 1356 CONFIG_TWL4030_USB=y 1357 + CONFIG_MMC=y 1358 # CONFIG_MMC_DEBUG is not set 1359 # CONFIG_MMC_UNSAFE_RESUME is not set 1360 ··· 1449 # on-CPU RTC drivers 1450 # 1451 # CONFIG_DMADEVICES is not set 1452 + CONFIG_REGULATOR=y 1453 + CONFIG_REGULATOR_TWL4030=y 1454 # CONFIG_UIO is not set 1455 # CONFIG_STAGING is not set 1456
+2 -1
arch/arm/include/asm/setup.h
··· 201 struct membank { 202 unsigned long start; 203 unsigned long size; 204 - int node; 205 }; 206 207 struct meminfo {
··· 201 struct membank { 202 unsigned long start; 203 unsigned long size; 204 + unsigned short node; 205 + unsigned short highmem; 206 }; 207 208 struct meminfo {
+1 -1
arch/arm/mach-ixp4xx/include/mach/io.h
··· 17 18 #include <mach/hardware.h> 19 20 - #define IO_SPACE_LIMIT 0xffff0000 21 22 extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data); 23 extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
··· 17 18 #include <mach/hardware.h> 19 20 + #define IO_SPACE_LIMIT 0x0000ffff 21 22 extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data); 23 extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
+1 -1
arch/arm/mach-mx3/mx31moboard-devboard.c
··· 63 64 static int devboard_sdhc2_get_ro(struct device *dev) 65 { 66 - return gpio_get_value(SDHC2_WP); 67 } 68 69 static int devboard_sdhc2_init(struct device *dev, irq_handler_t detect_irq,
··· 63 64 static int devboard_sdhc2_get_ro(struct device *dev) 65 { 66 + return !gpio_get_value(SDHC2_WP); 67 } 68 69 static int devboard_sdhc2_init(struct device *dev, irq_handler_t detect_irq,
+1 -1
arch/arm/mach-mx3/mx31moboard-marxbot.c
··· 67 68 static int marxbot_sdhc2_get_ro(struct device *dev) 69 { 70 - return gpio_get_value(SDHC2_WP); 71 } 72 73 static int marxbot_sdhc2_init(struct device *dev, irq_handler_t detect_irq,
··· 67 68 static int marxbot_sdhc2_get_ro(struct device *dev) 69 { 70 + return !gpio_get_value(SDHC2_WP); 71 } 72 73 static int marxbot_sdhc2_init(struct device *dev, irq_handler_t detect_irq,
+1 -1
arch/arm/mach-mx3/mx31moboard.c
··· 94 95 static int moboard_sdhc1_get_ro(struct device *dev) 96 { 97 - return gpio_get_value(SDHC1_WP); 98 } 99 100 static int moboard_sdhc1_init(struct device *dev, irq_handler_t detect_irq,
··· 94 95 static int moboard_sdhc1_get_ro(struct device *dev) 96 { 97 + return !gpio_get_value(SDHC1_WP); 98 } 99 100 static int moboard_sdhc1_init(struct device *dev, irq_handler_t detect_irq,
-9
arch/arm/mach-mx3/pcm037_eet.c
··· 24 #include "devices.h" 25 26 static unsigned int pcm037_eet_pins[] = { 27 - /* SPI #1 */ 28 - MX31_PIN_CSPI1_MISO__MISO, 29 - MX31_PIN_CSPI1_MOSI__MOSI, 30 - MX31_PIN_CSPI1_SCLK__SCLK, 31 - MX31_PIN_CSPI1_SPI_RDY__SPI_RDY, 32 - MX31_PIN_CSPI1_SS0__SS0, 33 - MX31_PIN_CSPI1_SS1__SS1, 34 - MX31_PIN_CSPI1_SS2__SS2, 35 - 36 /* Reserve and hardwire GPIO 57 high - S6E63D6 chipselect */ 37 IOMUX_MODE(MX31_PIN_KEY_COL7, IOMUX_CONFIG_GPIO), 38 /* GPIO keys */
··· 24 #include "devices.h" 25 26 static unsigned int pcm037_eet_pins[] = { 27 /* Reserve and hardwire GPIO 57 high - S6E63D6 chipselect */ 28 IOMUX_MODE(MX31_PIN_KEY_COL7, IOMUX_CONFIG_GPIO), 29 /* GPIO keys */
+1 -1
arch/arm/mach-omap2/board-2430sdp.c
··· 141 142 static void __init omap_2430sdp_init_irq(void) 143 { 144 - omap2_init_common_hw(NULL); 145 omap_init_irq(); 146 omap_gpio_init(); 147 }
··· 141 142 static void __init omap_2430sdp_init_irq(void) 143 { 144 + omap2_init_common_hw(NULL, NULL); 145 omap_init_irq(); 146 omap_gpio_init(); 147 }
+1 -1
arch/arm/mach-omap2/board-3430sdp.c
··· 169 170 static void __init omap_3430sdp_init_irq(void) 171 { 172 - omap2_init_common_hw(hyb18m512160af6_sdrc_params); 173 omap_init_irq(); 174 omap_gpio_init(); 175 }
··· 169 170 static void __init omap_3430sdp_init_irq(void) 171 { 172 + omap2_init_common_hw(hyb18m512160af6_sdrc_params, NULL); 173 omap_init_irq(); 174 omap_gpio_init(); 175 }
+1 -1
arch/arm/mach-omap2/board-4430sdp.c
··· 59 60 static void __init omap_4430sdp_init_irq(void) 61 { 62 - omap2_init_common_hw(NULL); 63 #ifdef CONFIG_OMAP_32K_TIMER 64 omap2_gp_clockevent_set_gptimer(1); 65 #endif
··· 59 60 static void __init omap_4430sdp_init_irq(void) 61 { 62 + omap2_init_common_hw(NULL, NULL); 63 #ifdef CONFIG_OMAP_32K_TIMER 64 omap2_gp_clockevent_set_gptimer(1); 65 #endif
+1 -1
arch/arm/mach-omap2/board-apollon.c
··· 250 251 static void __init omap_apollon_init_irq(void) 252 { 253 - omap2_init_common_hw(NULL); 254 omap_init_irq(); 255 omap_gpio_init(); 256 apollon_init_smc91x();
··· 250 251 static void __init omap_apollon_init_irq(void) 252 { 253 + omap2_init_common_hw(NULL, NULL); 254 omap_init_irq(); 255 omap_gpio_init(); 256 apollon_init_smc91x();
+1 -1
arch/arm/mach-omap2/board-generic.c
··· 33 34 static void __init omap_generic_init_irq(void) 35 { 36 - omap2_init_common_hw(NULL); 37 omap_init_irq(); 38 } 39
··· 33 34 static void __init omap_generic_init_irq(void) 35 { 36 + omap2_init_common_hw(NULL, NULL); 37 omap_init_irq(); 38 } 39
+1 -1
arch/arm/mach-omap2/board-h4.c
··· 270 271 static void __init omap_h4_init_irq(void) 272 { 273 - omap2_init_common_hw(NULL); 274 omap_init_irq(); 275 omap_gpio_init(); 276 h4_init_flash();
··· 270 271 static void __init omap_h4_init_irq(void) 272 { 273 + omap2_init_common_hw(NULL, NULL); 274 omap_init_irq(); 275 omap_gpio_init(); 276 h4_init_flash();
+1 -1
arch/arm/mach-omap2/board-ldp.c
··· 270 271 static void __init omap_ldp_init_irq(void) 272 { 273 - omap2_init_common_hw(NULL); 274 omap_init_irq(); 275 omap_gpio_init(); 276 ldp_init_smsc911x();
··· 270 271 static void __init omap_ldp_init_irq(void) 272 { 273 + omap2_init_common_hw(NULL, NULL); 274 omap_init_irq(); 275 omap_gpio_init(); 276 ldp_init_smsc911x();
+6 -1
arch/arm/mach-omap2/board-omap3beagle.c
··· 282 283 static void __init omap3_beagle_init_irq(void) 284 { 285 - omap2_init_common_hw(mt46h32m32lf6_sdrc_params); 286 omap_init_irq(); 287 #ifdef CONFIG_OMAP_32K_TIMER 288 omap2_gp_clockevent_set_gptimer(12); ··· 409 410 usb_musb_init(); 411 omap3beagle_flash_init(); 412 } 413 414 static void __init omap3_beagle_map_io(void)
··· 282 283 static void __init omap3_beagle_init_irq(void) 284 { 285 + omap2_init_common_hw(mt46h32m32lf6_sdrc_params, 286 + mt46h32m32lf6_sdrc_params); 287 omap_init_irq(); 288 #ifdef CONFIG_OMAP_32K_TIMER 289 omap2_gp_clockevent_set_gptimer(12); ··· 408 409 usb_musb_init(); 410 omap3beagle_flash_init(); 411 + 412 + /* Ensure SDRC pins are mux'd for self-refresh */ 413 + omap_cfg_reg(H16_34XX_SDRC_CKE0); 414 + omap_cfg_reg(H17_34XX_SDRC_CKE1); 415 } 416 417 static void __init omap3_beagle_map_io(void)
+1 -1
arch/arm/mach-omap2/board-omap3evm.c
··· 280 281 static void __init omap3_evm_init_irq(void) 282 { 283 - omap2_init_common_hw(mt46h32m32lf6_sdrc_params); 284 omap_init_irq(); 285 omap_gpio_init(); 286 omap3evm_init_smc911x();
··· 280 281 static void __init omap3_evm_init_irq(void) 282 { 283 + omap2_init_common_hw(mt46h32m32lf6_sdrc_params, NULL); 284 omap_init_irq(); 285 omap_gpio_init(); 286 omap3evm_init_smc911x();
+7 -1
arch/arm/mach-omap2/board-omap3pandora.c
··· 40 #include <mach/mcspi.h> 41 #include <mach/usb.h> 42 #include <mach/keypad.h> 43 44 #include "sdram-micron-mt46h32m32lf-6.h" 45 #include "mmc-twl4030.h" ··· 311 312 static void __init omap3pandora_init_irq(void) 313 { 314 - omap2_init_common_hw(mt46h32m32lf6_sdrc_params); 315 omap_init_irq(); 316 omap_gpio_init(); 317 } ··· 399 omap3pandora_ads7846_init(); 400 pandora_keys_gpio_init(); 401 usb_musb_init(); 402 } 403 404 static void __init omap3pandora_map_io(void)
··· 40 #include <mach/mcspi.h> 41 #include <mach/usb.h> 42 #include <mach/keypad.h> 43 + #include <mach/mux.h> 44 45 #include "sdram-micron-mt46h32m32lf-6.h" 46 #include "mmc-twl4030.h" ··· 310 311 static void __init omap3pandora_init_irq(void) 312 { 313 + omap2_init_common_hw(mt46h32m32lf6_sdrc_params, 314 + mt46h32m32lf6_sdrc_params); 315 omap_init_irq(); 316 omap_gpio_init(); 317 } ··· 397 omap3pandora_ads7846_init(); 398 pandora_keys_gpio_init(); 399 usb_musb_init(); 400 + 401 + /* Ensure SDRC pins are mux'd for self-refresh */ 402 + omap_cfg_reg(H16_34XX_SDRC_CKE0); 403 + omap_cfg_reg(H17_34XX_SDRC_CKE1); 404 } 405 406 static void __init omap3pandora_map_io(void)
+9 -2
arch/arm/mach-omap2/board-overo.c
··· 44 #include <mach/gpmc.h> 45 #include <mach/hardware.h> 46 #include <mach/nand.h> 47 #include <mach/usb.h> 48 49 #include "sdram-micron-mt46h32m32lf-6.h" ··· 52 53 #define OVERO_GPIO_BT_XGATE 15 54 #define OVERO_GPIO_W2W_NRESET 16 55 #define OVERO_GPIO_BT_NRESET 164 56 #define OVERO_GPIO_USBH_CPEN 168 57 #define OVERO_GPIO_USBH_NRESET 183 ··· 148 .name = "smsc911x", 149 .id = -1, 150 .num_resources = ARRAY_SIZE(overo_smsc911x_resources), 151 - .resource = &overo_smsc911x_resources, 152 .dev = { 153 .platform_data = &overo_smsc911x_config, 154 }, ··· 362 363 static void __init overo_init_irq(void) 364 { 365 - omap2_init_common_hw(mt46h32m32lf6_sdrc_params); 366 omap_init_irq(); 367 omap_gpio_init(); 368 } ··· 397 usb_musb_init(); 398 overo_ads7846_init(); 399 overo_init_smsc911x(); 400 401 if ((gpio_request(OVERO_GPIO_W2W_NRESET, 402 "OVERO_GPIO_W2W_NRESET") == 0) &&
··· 44 #include <mach/gpmc.h> 45 #include <mach/hardware.h> 46 #include <mach/nand.h> 47 + #include <mach/mux.h> 48 #include <mach/usb.h> 49 50 #include "sdram-micron-mt46h32m32lf-6.h" ··· 51 52 #define OVERO_GPIO_BT_XGATE 15 53 #define OVERO_GPIO_W2W_NRESET 16 54 + #define OVERO_GPIO_PENDOWN 114 55 #define OVERO_GPIO_BT_NRESET 164 56 #define OVERO_GPIO_USBH_CPEN 168 57 #define OVERO_GPIO_USBH_NRESET 183 ··· 146 .name = "smsc911x", 147 .id = -1, 148 .num_resources = ARRAY_SIZE(overo_smsc911x_resources), 149 + .resource = overo_smsc911x_resources, 150 .dev = { 151 .platform_data = &overo_smsc911x_config, 152 }, ··· 360 361 static void __init overo_init_irq(void) 362 { 363 + omap2_init_common_hw(mt46h32m32lf6_sdrc_params, 364 + mt46h32m32lf6_sdrc_params); 365 omap_init_irq(); 366 omap_gpio_init(); 367 } ··· 394 usb_musb_init(); 395 overo_ads7846_init(); 396 overo_init_smsc911x(); 397 + 398 + /* Ensure SDRC pins are mux'd for self-refresh */ 399 + omap_cfg_reg(H16_34XX_SDRC_CKE0); 400 + omap_cfg_reg(H17_34XX_SDRC_CKE1); 401 402 if ((gpio_request(OVERO_GPIO_W2W_NRESET, 403 "OVERO_GPIO_W2W_NRESET") == 0) &&
+5
arch/arm/mach-omap2/board-rx51-peripherals.c
··· 278 .setup = rx51_twlgpio_setup, 279 }; 280 281 static struct twl4030_platform_data rx51_twldata = { 282 .irq_base = TWL4030_IRQ_BASE, 283 .irq_end = TWL4030_IRQ_END, ··· 290 .gpio = &rx51_gpio_data, 291 .keypad = &rx51_kp_data, 292 .madc = &rx51_madc_data, 293 294 .vaux1 = &rx51_vaux1, 295 .vaux2 = &rx51_vaux2,
··· 278 .setup = rx51_twlgpio_setup, 279 }; 280 281 + static struct twl4030_usb_data rx51_usb_data = { 282 + .usb_mode = T2_USB_MODE_ULPI, 283 + }; 284 + 285 static struct twl4030_platform_data rx51_twldata = { 286 .irq_base = TWL4030_IRQ_BASE, 287 .irq_end = TWL4030_IRQ_END, ··· 286 .gpio = &rx51_gpio_data, 287 .keypad = &rx51_kp_data, 288 .madc = &rx51_madc_data, 289 + .usb = &rx51_usb_data, 290 291 .vaux1 = &rx51_vaux1, 292 .vaux2 = &rx51_vaux2,
+5 -1
arch/arm/mach-omap2/board-rx51.c
··· 61 62 static void __init rx51_init_irq(void) 63 { 64 - omap2_init_common_hw(NULL); 65 omap_init_irq(); 66 omap_gpio_init(); 67 } ··· 75 omap_serial_init(); 76 usb_musb_init(); 77 rx51_peripherals_init(); 78 } 79 80 static void __init rx51_map_io(void)
··· 61 62 static void __init rx51_init_irq(void) 63 { 64 + omap2_init_common_hw(NULL, NULL); 65 omap_init_irq(); 66 omap_gpio_init(); 67 } ··· 75 omap_serial_init(); 76 usb_musb_init(); 77 rx51_peripherals_init(); 78 + 79 + /* Ensure SDRC pins are mux'd for self-refresh */ 80 + omap_cfg_reg(H16_34XX_SDRC_CKE0); 81 + omap_cfg_reg(H17_34XX_SDRC_CKE1); 82 } 83 84 static void __init rx51_map_io(void)
+1 -1
arch/arm/mach-omap2/board-zoom2.c
··· 25 26 static void __init omap_zoom2_init_irq(void) 27 { 28 - omap2_init_common_hw(NULL); 29 omap_init_irq(); 30 omap_gpio_init(); 31 }
··· 25 26 static void __init omap_zoom2_init_irq(void) 27 { 28 + omap2_init_common_hw(NULL, NULL); 29 omap_init_irq(); 30 omap_gpio_init(); 31 }
+83 -81
arch/arm/mach-omap2/clock.c
··· 27 #include <mach/clock.h> 28 #include <mach/clockdomain.h> 29 #include <mach/cpu.h> 30 #include <asm/div64.h> 31 32 #include <mach/sdrc.h> ··· 38 #include "cm.h" 39 #include "cm-regbits-24xx.h" 40 #include "cm-regbits-34xx.h" 41 - 42 - #define MAX_CLOCK_ENABLE_WAIT 100000 43 44 /* DPLL rate rounding: minimum DPLL multiplier, divider values */ 45 #define DPLL_MIN_MULTIPLIER 1 ··· 273 } 274 275 /** 276 - * omap2_wait_clock_ready - wait for clock to enable 277 - * @reg: physical address of clock IDLEST register 278 - * @mask: value to mask against to determine if the clock is active 279 - * @name: name of the clock (for printk) 280 * 281 - * Returns 1 if the clock enabled in time, or 0 if it failed to enable 282 - * in roughly MAX_CLOCK_ENABLE_WAIT microseconds. 283 - */ 284 - int omap2_wait_clock_ready(void __iomem *reg, u32 mask, const char *name) 285 - { 286 - int i = 0; 287 - int ena = 0; 288 - 289 - /* 290 - * 24xx uses 0 to indicate not ready, and 1 to indicate ready. 291 - * 34xx reverses this, just to keep us on our toes 292 - */ 293 - if (cpu_mask & (RATE_IN_242X | RATE_IN_243X)) 294 - ena = mask; 295 - else if (cpu_mask & RATE_IN_343X) 296 - ena = 0; 297 - 298 - /* Wait for lock */ 299 - while (((__raw_readl(reg) & mask) != ena) && 300 - (i++ < MAX_CLOCK_ENABLE_WAIT)) { 301 - udelay(1); 302 - } 303 - 304 - if (i <= MAX_CLOCK_ENABLE_WAIT) 305 - pr_debug("Clock %s stable after %d loops\n", name, i); 306 - else 307 - printk(KERN_ERR "Clock %s didn't enable in %d tries\n", 308 - name, MAX_CLOCK_ENABLE_WAIT); 309 - 310 - 311 - return (i < MAX_CLOCK_ENABLE_WAIT) ? 1 : 0; 312 - }; 313 - 314 - 315 - /* 316 - * Note: We don't need special code here for INVERT_ENABLE 317 - * for the time being since INVERT_ENABLE only applies to clocks enabled by 318 * CM_CLKEN_PLL 319 */ 320 - static void omap2_clk_wait_ready(struct clk *clk) 321 { 322 - void __iomem *reg, *other_reg, *st_reg; 323 - u32 bit; 324 - 325 - /* 326 - * REVISIT: This code is pretty ugly. It would be nice to generalize 327 - * it and pull it into struct clk itself somehow. 328 - */ 329 - reg = clk->enable_reg; 330 331 /* 332 * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes 333 * it's just a matter of XORing the bits. 334 */ 335 - other_reg = (void __iomem *)((u32)reg ^ (CM_FCLKEN ^ CM_ICLKEN)); 336 337 - /* Check if both functional and interface clocks 338 - * are running. */ 339 - bit = 1 << clk->enable_bit; 340 - if (!(__raw_readl(other_reg) & bit)) 341 - return; 342 - st_reg = (void __iomem *)(((u32)other_reg & ~0xf0) | 0x20); /* CM_IDLEST* */ 343 - 344 - omap2_wait_clock_ready(st_reg, bit, clk->name); 345 } 346 347 - static int omap2_dflt_clk_enable(struct clk *clk) 348 { 349 u32 v; 350 351 if (unlikely(clk->enable_reg == NULL)) { 352 - printk(KERN_ERR "clock.c: Enable for %s without enable code\n", 353 clk->name); 354 return 0; /* REVISIT: -EINVAL */ 355 } ··· 376 __raw_writel(v, clk->enable_reg); 377 v = __raw_readl(clk->enable_reg); /* OCP barrier */ 378 379 return 0; 380 } 381 382 - static int omap2_dflt_clk_enable_wait(struct clk *clk) 383 - { 384 - int ret; 385 - 386 - if (!clk->enable_reg) { 387 - printk(KERN_ERR "clock.c: Enable for %s without enable code\n", 388 - clk->name); 389 - return 0; /* REVISIT: -EINVAL */ 390 - } 391 - 392 - ret = omap2_dflt_clk_enable(clk); 393 - if (ret == 0) 394 - omap2_clk_wait_ready(clk); 395 - return ret; 396 - } 397 - 398 - static void omap2_dflt_clk_disable(struct clk *clk) 399 { 400 u32 v; 401 ··· 406 } 407 408 const struct clkops clkops_omap2_dflt_wait = { 409 - .enable = omap2_dflt_clk_enable_wait, 410 .disable = omap2_dflt_clk_disable, 411 }; 412 413 const struct clkops clkops_omap2_dflt = {
··· 27 #include <mach/clock.h> 28 #include <mach/clockdomain.h> 29 #include <mach/cpu.h> 30 + #include <mach/prcm.h> 31 #include <asm/div64.h> 32 33 #include <mach/sdrc.h> ··· 37 #include "cm.h" 38 #include "cm-regbits-24xx.h" 39 #include "cm-regbits-34xx.h" 40 41 /* DPLL rate rounding: minimum DPLL multiplier, divider values */ 42 #define DPLL_MIN_MULTIPLIER 1 ··· 274 } 275 276 /** 277 + * omap2_clk_dflt_find_companion - find companion clock to @clk 278 + * @clk: struct clk * to find the companion clock of 279 + * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in 280 + * @other_bit: u8 ** to return the companion clock bit shift in 281 * 282 + * Note: We don't need special code here for INVERT_ENABLE for the 283 + * time being since INVERT_ENABLE only applies to clocks enabled by 284 * CM_CLKEN_PLL 285 + * 286 + * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's 287 + * just a matter of XORing the bits. 288 + * 289 + * Some clocks don't have companion clocks. For example, modules with 290 + * only an interface clock (such as MAILBOXES) don't have a companion 291 + * clock. Right now, this code relies on the hardware exporting a bit 292 + * in the correct companion register that indicates that the 293 + * nonexistent 'companion clock' is active. Future patches will 294 + * associate this type of code with per-module data structures to 295 + * avoid this issue, and remove the casts. No return value. 296 */ 297 + void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg, 298 + u8 *other_bit) 299 { 300 + u32 r; 301 302 /* 303 * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes 304 * it's just a matter of XORing the bits. 305 */ 306 + r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN)); 307 308 + *other_reg = (__force void __iomem *)r; 309 + *other_bit = clk->enable_bit; 310 } 311 312 + /** 313 + * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk 314 + * @clk: struct clk * to find IDLEST info for 315 + * @idlest_reg: void __iomem ** to return the CM_IDLEST va in 316 + * @idlest_bit: u8 ** to return the CM_IDLEST bit shift in 317 + * 318 + * Return the CM_IDLEST register address and bit shift corresponding 319 + * to the module that "owns" this clock. This default code assumes 320 + * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that 321 + * the IDLEST register address ID corresponds to the CM_*CLKEN 322 + * register address ID (e.g., that CM_FCLKEN2 corresponds to 323 + * CM_IDLEST2). This is not true for all modules. No return value. 324 + */ 325 + void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg, 326 + u8 *idlest_bit) 327 + { 328 + u32 r; 329 + 330 + r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); 331 + *idlest_reg = (__force void __iomem *)r; 332 + *idlest_bit = clk->enable_bit; 333 + } 334 + 335 + /** 336 + * omap2_module_wait_ready - wait for an OMAP module to leave IDLE 337 + * @clk: struct clk * belonging to the module 338 + * 339 + * If the necessary clocks for the OMAP hardware IP block that 340 + * corresponds to clock @clk are enabled, then wait for the module to 341 + * indicate readiness (i.e., to leave IDLE). This code does not 342 + * belong in the clock code and will be moved in the medium term to 343 + * module-dependent code. No return value. 344 + */ 345 + static void omap2_module_wait_ready(struct clk *clk) 346 + { 347 + void __iomem *companion_reg, *idlest_reg; 348 + u8 other_bit, idlest_bit; 349 + 350 + /* Not all modules have multiple clocks that their IDLEST depends on */ 351 + if (clk->ops->find_companion) { 352 + clk->ops->find_companion(clk, &companion_reg, &other_bit); 353 + if (!(__raw_readl(companion_reg) & (1 << other_bit))) 354 + return; 355 + } 356 + 357 + clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit); 358 + 359 + omap2_cm_wait_idlest(idlest_reg, (1 << idlest_bit), clk->name); 360 + } 361 + 362 + int omap2_dflt_clk_enable(struct clk *clk) 363 { 364 u32 v; 365 366 if (unlikely(clk->enable_reg == NULL)) { 367 + pr_err("clock.c: Enable for %s without enable code\n", 368 clk->name); 369 return 0; /* REVISIT: -EINVAL */ 370 } ··· 363 __raw_writel(v, clk->enable_reg); 364 v = __raw_readl(clk->enable_reg); /* OCP barrier */ 365 366 + if (clk->ops->find_idlest) 367 + omap2_module_wait_ready(clk); 368 + 369 return 0; 370 } 371 372 + void omap2_dflt_clk_disable(struct clk *clk) 373 { 374 u32 v; 375 ··· 406 } 407 408 const struct clkops clkops_omap2_dflt_wait = { 409 + .enable = omap2_dflt_clk_enable, 410 .disable = omap2_dflt_clk_disable, 411 + .find_companion = omap2_clk_dflt_find_companion, 412 + .find_idlest = omap2_clk_dflt_find_idlest, 413 }; 414 415 const struct clkops clkops_omap2_dflt = {
+6
arch/arm/mach-omap2/clock.h
··· 65 u32 omap2_get_dpll_rate(struct clk *clk); 66 int omap2_wait_clock_ready(void __iomem *reg, u32 cval, const char *name); 67 void omap2_clk_prepare_for_reboot(void); 68 69 extern const struct clkops clkops_omap2_dflt_wait; 70 extern const struct clkops clkops_omap2_dflt;
··· 65 u32 omap2_get_dpll_rate(struct clk *clk); 66 int omap2_wait_clock_ready(void __iomem *reg, u32 cval, const char *name); 67 void omap2_clk_prepare_for_reboot(void); 68 + int omap2_dflt_clk_enable(struct clk *clk); 69 + void omap2_dflt_clk_disable(struct clk *clk); 70 + void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg, 71 + u8 *other_bit); 72 + void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg, 73 + u8 *idlest_bit); 74 75 extern const struct clkops clkops_omap2_dflt_wait; 76 extern const struct clkops clkops_omap2_dflt;
+35 -2
arch/arm/mach-omap2/clock24xx.c
··· 30 31 #include <mach/clock.h> 32 #include <mach/sram.h> 33 #include <asm/div64.h> 34 #include <asm/clkdev.h> 35 ··· 43 44 static const struct clkops clkops_oscck; 45 static const struct clkops clkops_fixed; 46 47 #include "clock24xx.h" 48 ··· 253 *-------------------------------------------------------------------------*/ 254 255 /** 256 * omap2xxx_clk_get_core_rate - return the CORE_CLK rate 257 * @clk: pointer to the combined dpll_ck + core_ck (currently "dpll_ck") 258 * ··· 358 else if (clk == &apll54_ck) 359 cval = OMAP24XX_ST_54M_APLL; 360 361 - omap2_wait_clock_ready(OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), cval, 362 - clk->name); 363 364 /* 365 * REVISIT: Should we return an error code if omap2_wait_clock_ready()
··· 30 31 #include <mach/clock.h> 32 #include <mach/sram.h> 33 + #include <mach/prcm.h> 34 #include <asm/div64.h> 35 #include <asm/clkdev.h> 36 ··· 42 43 static const struct clkops clkops_oscck; 44 static const struct clkops clkops_fixed; 45 + 46 + static void omap2430_clk_i2chs_find_idlest(struct clk *clk, 47 + void __iomem **idlest_reg, 48 + u8 *idlest_bit); 49 + 50 + /* 2430 I2CHS has non-standard IDLEST register */ 51 + static const struct clkops clkops_omap2430_i2chs_wait = { 52 + .enable = omap2_dflt_clk_enable, 53 + .disable = omap2_dflt_clk_disable, 54 + .find_idlest = omap2430_clk_i2chs_find_idlest, 55 + .find_companion = omap2_clk_dflt_find_companion, 56 + }; 57 58 #include "clock24xx.h" 59 ··· 240 *-------------------------------------------------------------------------*/ 241 242 /** 243 + * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS 244 + * @clk: struct clk * being enabled 245 + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into 246 + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into 247 + * 248 + * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the 249 + * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function 250 + * passes back the correct CM_IDLEST register address for I2CHS 251 + * modules. No return value. 252 + */ 253 + static void omap2430_clk_i2chs_find_idlest(struct clk *clk, 254 + void __iomem **idlest_reg, 255 + u8 *idlest_bit) 256 + { 257 + *idlest_reg = OMAP_CM_REGADDR(CORE_MOD, CM_IDLEST); 258 + *idlest_bit = clk->enable_bit; 259 + } 260 + 261 + 262 + /** 263 * omap2xxx_clk_get_core_rate - return the CORE_CLK rate 264 * @clk: pointer to the combined dpll_ck + core_ck (currently "dpll_ck") 265 * ··· 325 else if (clk == &apll54_ck) 326 cval = OMAP24XX_ST_54M_APLL; 327 328 + omap2_cm_wait_idlest(OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), cval, 329 + clk->name); 330 331 /* 332 * REVISIT: Should we return an error code if omap2_wait_clock_ready()
+2 -2
arch/arm/mach-omap2/clock24xx.h
··· 2337 2338 static struct clk i2chs2_fck = { 2339 .name = "i2c_fck", 2340 - .ops = &clkops_omap2_dflt_wait, 2341 .id = 2, 2342 .parent = &func_96m_ck, 2343 .clkdm_name = "core_l4_clkdm", ··· 2370 2371 static struct clk i2chs1_fck = { 2372 .name = "i2c_fck", 2373 - .ops = &clkops_omap2_dflt_wait, 2374 .id = 1, 2375 .parent = &func_96m_ck, 2376 .clkdm_name = "core_l4_clkdm",
··· 2337 2338 static struct clk i2chs2_fck = { 2339 .name = "i2c_fck", 2340 + .ops = &clkops_omap2430_i2chs_wait, 2341 .id = 2, 2342 .parent = &func_96m_ck, 2343 .clkdm_name = "core_l4_clkdm", ··· 2370 2371 static struct clk i2chs1_fck = { 2372 .name = "i2c_fck", 2373 + .ops = &clkops_omap2430_i2chs_wait, 2374 .id = 1, 2375 .parent = &func_96m_ck, 2376 .clkdm_name = "core_l4_clkdm",
+138 -15
arch/arm/mach-omap2/clock34xx.c
··· 2 * OMAP3-specific clock framework functions 3 * 4 * Copyright (C) 2007-2008 Texas Instruments, Inc. 5 - * Copyright (C) 2007-2008 Nokia Corporation 6 * 7 * Written by Paul Walmsley 8 * Testing and integration fixes by Jouni Högander ··· 40 #include "cm-regbits-34xx.h" 41 42 static const struct clkops clkops_noncore_dpll_ops; 43 44 #include "clock34xx.h" 45 ··· 188 CLK(NULL, "fshostusb_fck", &fshostusb_fck, CK_3430ES1), 189 CLK(NULL, "core_12m_fck", &core_12m_fck, CK_343X), 190 CLK("omap_hdq.0", "fck", &hdq_fck, CK_343X), 191 - CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck, CK_343X), 192 - CLK(NULL, "ssi_sst_fck", &ssi_sst_fck, CK_343X), 193 CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X), 194 - CLK("musb_hdrc", "ick", &hsotgusb_ick, CK_343X), 195 CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X), 196 CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X), 197 CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X), ··· 227 CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_343X), 228 CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_343X), 229 CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_343X), 230 - CLK(NULL, "ssi_ick", &ssi_ick, CK_343X), 231 CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_3430ES1), 232 CLK(NULL, "security_l4_ick2", &security_l4_ick2, CK_343X), 233 CLK(NULL, "aes1_ick", &aes1_ick, CK_343X), 234 CLK("omap_rng", "ick", &rng_ick, CK_343X), 235 CLK(NULL, "sha11_ick", &sha11_ick, CK_343X), 236 CLK(NULL, "des1_ick", &des1_ick, CK_343X), 237 - CLK("omapfb", "dss1_fck", &dss1_alwon_fck, CK_343X), 238 CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X), 239 CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X), 240 CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X), 241 - CLK("omapfb", "ick", &dss_ick, CK_343X), 242 CLK(NULL, "cam_mclk", &cam_mclk, CK_343X), 243 CLK(NULL, "cam_ick", &cam_ick, CK_343X), 244 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X), ··· 336 * 2^MPURATE_BASE_SHIFT MHz for SDRC to stabilize 337 */ 338 #define SDRC_MPURATE_LOOPS 96 339 340 /** 341 * omap3_dpll_recalc - recalculate DPLL rate ··· 829 u32 unlock_dll = 0; 830 u32 c; 831 unsigned long validrate, sdrcrate, mpurate; 832 - struct omap_sdrc_params *sp; 833 834 if (!clk || !rate) 835 return -EINVAL; ··· 849 else 850 sdrcrate >>= ((clk->rate / rate) >> 1); 851 852 - sp = omap2_sdrc_get_params(sdrcrate); 853 - if (!sp) 854 return -EINVAL; 855 856 if (sdrcrate < MIN_SDRC_DLL_LOCK_FREQ) { ··· 871 872 pr_debug("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate, 873 validrate); 874 - pr_debug("clock: SDRC timing params used: %08x %08x %08x\n", 875 - sp->rfr_ctrl, sp->actim_ctrla, sp->actim_ctrlb); 876 877 - omap3_configure_core_dpll(sp->rfr_ctrl, sp->actim_ctrla, 878 - sp->actim_ctrlb, new_div, unlock_dll, c, 879 - sp->mr, rate > clk->rate); 880 881 return 0; 882 }
··· 2 * OMAP3-specific clock framework functions 3 * 4 * Copyright (C) 2007-2008 Texas Instruments, Inc. 5 + * Copyright (C) 2007-2009 Nokia Corporation 6 * 7 * Written by Paul Walmsley 8 * Testing and integration fixes by Jouni Högander ··· 40 #include "cm-regbits-34xx.h" 41 42 static const struct clkops clkops_noncore_dpll_ops; 43 + 44 + static void omap3430es2_clk_ssi_find_idlest(struct clk *clk, 45 + void __iomem **idlest_reg, 46 + u8 *idlest_bit); 47 + static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk, 48 + void __iomem **idlest_reg, 49 + u8 *idlest_bit); 50 + static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk, 51 + void __iomem **idlest_reg, 52 + u8 *idlest_bit); 53 + 54 + static const struct clkops clkops_omap3430es2_ssi_wait = { 55 + .enable = omap2_dflt_clk_enable, 56 + .disable = omap2_dflt_clk_disable, 57 + .find_idlest = omap3430es2_clk_ssi_find_idlest, 58 + .find_companion = omap2_clk_dflt_find_companion, 59 + }; 60 + 61 + static const struct clkops clkops_omap3430es2_hsotgusb_wait = { 62 + .enable = omap2_dflt_clk_enable, 63 + .disable = omap2_dflt_clk_disable, 64 + .find_idlest = omap3430es2_clk_hsotgusb_find_idlest, 65 + .find_companion = omap2_clk_dflt_find_companion, 66 + }; 67 + 68 + static const struct clkops clkops_omap3430es2_dss_usbhost_wait = { 69 + .enable = omap2_dflt_clk_enable, 70 + .disable = omap2_dflt_clk_disable, 71 + .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest, 72 + .find_companion = omap2_clk_dflt_find_companion, 73 + }; 74 75 #include "clock34xx.h" 76 ··· 157 CLK(NULL, "fshostusb_fck", &fshostusb_fck, CK_3430ES1), 158 CLK(NULL, "core_12m_fck", &core_12m_fck, CK_343X), 159 CLK("omap_hdq.0", "fck", &hdq_fck, CK_343X), 160 + CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1, CK_3430ES1), 161 + CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2, CK_3430ES2), 162 + CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1, CK_3430ES1), 163 + CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2, CK_3430ES2), 164 CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X), 165 + CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es1, CK_3430ES1), 166 + CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es2, CK_3430ES2), 167 CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X), 168 CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X), 169 CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X), ··· 193 CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_343X), 194 CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_343X), 195 CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_343X), 196 + CLK(NULL, "ssi_ick", &ssi_ick_3430es1, CK_3430ES1), 197 + CLK(NULL, "ssi_ick", &ssi_ick_3430es2, CK_3430ES2), 198 CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_3430ES1), 199 CLK(NULL, "security_l4_ick2", &security_l4_ick2, CK_343X), 200 CLK(NULL, "aes1_ick", &aes1_ick, CK_343X), 201 CLK("omap_rng", "ick", &rng_ick, CK_343X), 202 CLK(NULL, "sha11_ick", &sha11_ick, CK_343X), 203 CLK(NULL, "des1_ick", &des1_ick, CK_343X), 204 + CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es1, CK_3430ES1), 205 + CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es2, CK_3430ES2), 206 CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X), 207 CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X), 208 CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X), 209 + CLK("omapfb", "ick", &dss_ick_3430es1, CK_3430ES1), 210 + CLK("omapfb", "ick", &dss_ick_3430es2, CK_3430ES2), 211 CLK(NULL, "cam_mclk", &cam_mclk, CK_343X), 212 CLK(NULL, "cam_ick", &cam_ick, CK_343X), 213 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X), ··· 299 * 2^MPURATE_BASE_SHIFT MHz for SDRC to stabilize 300 */ 301 #define SDRC_MPURATE_LOOPS 96 302 + 303 + /** 304 + * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI 305 + * @clk: struct clk * being enabled 306 + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into 307 + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into 308 + * 309 + * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift 310 + * from the CM_{I,F}CLKEN bit. Pass back the correct info via 311 + * @idlest_reg and @idlest_bit. No return value. 312 + */ 313 + static void omap3430es2_clk_ssi_find_idlest(struct clk *clk, 314 + void __iomem **idlest_reg, 315 + u8 *idlest_bit) 316 + { 317 + u32 r; 318 + 319 + r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); 320 + *idlest_reg = (__force void __iomem *)r; 321 + *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT; 322 + } 323 + 324 + /** 325 + * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST 326 + * @clk: struct clk * being enabled 327 + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into 328 + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into 329 + * 330 + * Some OMAP modules on OMAP3 ES2+ chips have both initiator and 331 + * target IDLEST bits. For our purposes, we are concerned with the 332 + * target IDLEST bits, which exist at a different bit position than 333 + * the *CLKEN bit position for these modules (DSS and USBHOST) (The 334 + * default find_idlest code assumes that they are at the same 335 + * position.) No return value. 336 + */ 337 + static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk, 338 + void __iomem **idlest_reg, 339 + u8 *idlest_bit) 340 + { 341 + u32 r; 342 + 343 + r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); 344 + *idlest_reg = (__force void __iomem *)r; 345 + /* USBHOST_IDLE has same shift */ 346 + *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT; 347 + } 348 + 349 + /** 350 + * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB 351 + * @clk: struct clk * being enabled 352 + * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into 353 + * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into 354 + * 355 + * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different 356 + * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via 357 + * @idlest_reg and @idlest_bit. No return value. 358 + */ 359 + static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk, 360 + void __iomem **idlest_reg, 361 + u8 *idlest_bit) 362 + { 363 + u32 r; 364 + 365 + r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); 366 + *idlest_reg = (__force void __iomem *)r; 367 + *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT; 368 + } 369 370 /** 371 * omap3_dpll_recalc - recalculate DPLL rate ··· 725 u32 unlock_dll = 0; 726 u32 c; 727 unsigned long validrate, sdrcrate, mpurate; 728 + struct omap_sdrc_params *sdrc_cs0; 729 + struct omap_sdrc_params *sdrc_cs1; 730 + int ret; 731 732 if (!clk || !rate) 733 return -EINVAL; ··· 743 else 744 sdrcrate >>= ((clk->rate / rate) >> 1); 745 746 + ret = omap2_sdrc_get_params(sdrcrate, &sdrc_cs0, &sdrc_cs1); 747 + if (ret) 748 return -EINVAL; 749 750 if (sdrcrate < MIN_SDRC_DLL_LOCK_FREQ) { ··· 765 766 pr_debug("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate, 767 validrate); 768 + pr_debug("clock: SDRC CS0 timing params used:" 769 + " RFR %08x CTRLA %08x CTRLB %08x MR %08x\n", 770 + sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, 771 + sdrc_cs0->actim_ctrlb, sdrc_cs0->mr); 772 + if (sdrc_cs1) 773 + pr_debug("clock: SDRC CS1 timing params used: " 774 + " RFR %08x CTRLA %08x CTRLB %08x MR %08x\n", 775 + sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla, 776 + sdrc_cs1->actim_ctrlb, sdrc_cs1->mr); 777 778 + if (sdrc_cs1) 779 + omap3_configure_core_dpll( 780 + new_div, unlock_dll, c, rate > clk->rate, 781 + sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, 782 + sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, 783 + sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla, 784 + sdrc_cs1->actim_ctrlb, sdrc_cs1->mr); 785 + else 786 + omap3_configure_core_dpll( 787 + new_div, unlock_dll, c, rate > clk->rate, 788 + sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, 789 + sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, 790 + 0, 0, 0, 0); 791 792 return 0; 793 }
+74 -11
arch/arm/mach-omap2/clock34xx.h
··· 1568 { .parent = NULL } 1569 }; 1570 1571 - static struct clk ssi_ssr_fck = { 1572 .name = "ssi_ssr_fck", 1573 .ops = &clkops_omap2_dflt, 1574 .init = &omap2_init_clksel_parent, ··· 1581 .recalc = &omap2_clksel_recalc, 1582 }; 1583 1584 - static struct clk ssi_sst_fck = { 1585 .name = "ssi_sst_fck", 1586 .ops = &clkops_null, 1587 - .parent = &ssi_ssr_fck, 1588 .fixed_div = 2, 1589 .recalc = &omap2_fixed_divisor_recalc, 1590 }; ··· 1627 .recalc = &followparent_recalc, 1628 }; 1629 1630 - static struct clk hsotgusb_ick = { 1631 .name = "hsotgusb_ick", 1632 - .ops = &clkops_omap2_dflt_wait, 1633 .parent = &core_l3_ick, 1634 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), 1635 .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT, ··· 1978 .recalc = &followparent_recalc, 1979 }; 1980 1981 - static struct clk ssi_ick = { 1982 .name = "ssi_ick", 1983 .ops = &clkops_omap2_dflt, 1984 .parent = &ssi_l4_ick, 1985 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), 1986 .enable_bit = OMAP3430_EN_SSI_SHIFT, ··· 2065 }; 2066 2067 /* DSS */ 2068 - static struct clk dss1_alwon_fck = { 2069 .name = "dss1_alwon_fck", 2070 .ops = &clkops_omap2_dflt, 2071 .parent = &dpll4_m4x2_ck, 2072 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), 2073 .enable_bit = OMAP3430_EN_DSS1_SHIFT, ··· 2118 .recalc = &followparent_recalc, 2119 }; 2120 2121 - static struct clk dss_ick = { 2122 /* Handles both L3 and L4 clocks */ 2123 .name = "dss_ick", 2124 .ops = &clkops_omap2_dflt, 2125 .parent = &l4_ick, 2126 .init = &omap2_init_clk_clkdm, 2127 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN), ··· 2181 2182 static struct clk usbhost_120m_fck = { 2183 .name = "usbhost_120m_fck", 2184 - .ops = &clkops_omap2_dflt_wait, 2185 .parent = &dpll5_m2_ck, 2186 .init = &omap2_init_clk_clkdm, 2187 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN), ··· 2192 2193 static struct clk usbhost_48m_fck = { 2194 .name = "usbhost_48m_fck", 2195 - .ops = &clkops_omap2_dflt_wait, 2196 .parent = &omap_48m_fck, 2197 .init = &omap2_init_clk_clkdm, 2198 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN), ··· 2204 static struct clk usbhost_ick = { 2205 /* Handles both L3 and L4 clocks */ 2206 .name = "usbhost_ick", 2207 - .ops = &clkops_omap2_dflt_wait, 2208 .parent = &l4_ick, 2209 .init = &omap2_init_clk_clkdm, 2210 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
··· 1568 { .parent = NULL } 1569 }; 1570 1571 + static struct clk ssi_ssr_fck_3430es1 = { 1572 .name = "ssi_ssr_fck", 1573 .ops = &clkops_omap2_dflt, 1574 .init = &omap2_init_clksel_parent, ··· 1581 .recalc = &omap2_clksel_recalc, 1582 }; 1583 1584 + static struct clk ssi_ssr_fck_3430es2 = { 1585 + .name = "ssi_ssr_fck", 1586 + .ops = &clkops_omap3430es2_ssi_wait, 1587 + .init = &omap2_init_clksel_parent, 1588 + .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), 1589 + .enable_bit = OMAP3430_EN_SSI_SHIFT, 1590 + .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), 1591 + .clksel_mask = OMAP3430_CLKSEL_SSI_MASK, 1592 + .clksel = ssi_ssr_clksel, 1593 + .clkdm_name = "core_l4_clkdm", 1594 + .recalc = &omap2_clksel_recalc, 1595 + }; 1596 + 1597 + static struct clk ssi_sst_fck_3430es1 = { 1598 .name = "ssi_sst_fck", 1599 .ops = &clkops_null, 1600 + .parent = &ssi_ssr_fck_3430es1, 1601 + .fixed_div = 2, 1602 + .recalc = &omap2_fixed_divisor_recalc, 1603 + }; 1604 + 1605 + static struct clk ssi_sst_fck_3430es2 = { 1606 + .name = "ssi_sst_fck", 1607 + .ops = &clkops_null, 1608 + .parent = &ssi_ssr_fck_3430es2, 1609 .fixed_div = 2, 1610 .recalc = &omap2_fixed_divisor_recalc, 1611 }; ··· 1606 .recalc = &followparent_recalc, 1607 }; 1608 1609 + static struct clk hsotgusb_ick_3430es1 = { 1610 .name = "hsotgusb_ick", 1611 + .ops = &clkops_omap2_dflt, 1612 + .parent = &core_l3_ick, 1613 + .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), 1614 + .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT, 1615 + .clkdm_name = "core_l3_clkdm", 1616 + .recalc = &followparent_recalc, 1617 + }; 1618 + 1619 + static struct clk hsotgusb_ick_3430es2 = { 1620 + .name = "hsotgusb_ick", 1621 + .ops = &clkops_omap3430es2_hsotgusb_wait, 1622 .parent = &core_l3_ick, 1623 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), 1624 .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT, ··· 1947 .recalc = &followparent_recalc, 1948 }; 1949 1950 + static struct clk ssi_ick_3430es1 = { 1951 .name = "ssi_ick", 1952 .ops = &clkops_omap2_dflt, 1953 + .parent = &ssi_l4_ick, 1954 + .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), 1955 + .enable_bit = OMAP3430_EN_SSI_SHIFT, 1956 + .clkdm_name = "core_l4_clkdm", 1957 + .recalc = &followparent_recalc, 1958 + }; 1959 + 1960 + static struct clk ssi_ick_3430es2 = { 1961 + .name = "ssi_ick", 1962 + .ops = &clkops_omap3430es2_ssi_wait, 1963 .parent = &ssi_l4_ick, 1964 .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), 1965 .enable_bit = OMAP3430_EN_SSI_SHIFT, ··· 2024 }; 2025 2026 /* DSS */ 2027 + static struct clk dss1_alwon_fck_3430es1 = { 2028 .name = "dss1_alwon_fck", 2029 .ops = &clkops_omap2_dflt, 2030 + .parent = &dpll4_m4x2_ck, 2031 + .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), 2032 + .enable_bit = OMAP3430_EN_DSS1_SHIFT, 2033 + .clkdm_name = "dss_clkdm", 2034 + .recalc = &followparent_recalc, 2035 + }; 2036 + 2037 + static struct clk dss1_alwon_fck_3430es2 = { 2038 + .name = "dss1_alwon_fck", 2039 + .ops = &clkops_omap3430es2_dss_usbhost_wait, 2040 .parent = &dpll4_m4x2_ck, 2041 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), 2042 .enable_bit = OMAP3430_EN_DSS1_SHIFT, ··· 2067 .recalc = &followparent_recalc, 2068 }; 2069 2070 + static struct clk dss_ick_3430es1 = { 2071 /* Handles both L3 and L4 clocks */ 2072 .name = "dss_ick", 2073 .ops = &clkops_omap2_dflt, 2074 + .parent = &l4_ick, 2075 + .init = &omap2_init_clk_clkdm, 2076 + .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN), 2077 + .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT, 2078 + .clkdm_name = "dss_clkdm", 2079 + .recalc = &followparent_recalc, 2080 + }; 2081 + 2082 + static struct clk dss_ick_3430es2 = { 2083 + /* Handles both L3 and L4 clocks */ 2084 + .name = "dss_ick", 2085 + .ops = &clkops_omap3430es2_dss_usbhost_wait, 2086 .parent = &l4_ick, 2087 .init = &omap2_init_clk_clkdm, 2088 .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN), ··· 2118 2119 static struct clk usbhost_120m_fck = { 2120 .name = "usbhost_120m_fck", 2121 + .ops = &clkops_omap2_dflt, 2122 .parent = &dpll5_m2_ck, 2123 .init = &omap2_init_clk_clkdm, 2124 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN), ··· 2129 2130 static struct clk usbhost_48m_fck = { 2131 .name = "usbhost_48m_fck", 2132 + .ops = &clkops_omap3430es2_dss_usbhost_wait, 2133 .parent = &omap_48m_fck, 2134 .init = &omap2_init_clk_clkdm, 2135 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN), ··· 2141 static struct clk usbhost_ick = { 2142 /* Handles both L3 and L4 clocks */ 2143 .name = "usbhost_ick", 2144 + .ops = &clkops_omap3430es2_dss_usbhost_wait, 2145 .parent = &l4_ick, 2146 .init = &omap2_init_clk_clkdm, 2147 .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
+3 -3
arch/arm/mach-omap2/cm.h
··· 29 * These registers appear once per CM module. 30 */ 31 32 - #define OMAP3430_CM_REVISION OMAP_CM_REGADDR(OCP_MOD, 0x0000) 33 - #define OMAP3430_CM_SYSCONFIG OMAP_CM_REGADDR(OCP_MOD, 0x0010) 34 - #define OMAP3430_CM_POLCTRL OMAP_CM_REGADDR(OCP_MOD, 0x009c) 35 36 #define OMAP3_CM_CLKOUT_CTRL_OFFSET 0x0070 37 #define OMAP3430_CM_CLKOUT_CTRL OMAP_CM_REGADDR(OMAP3430_CCR_MOD, 0x0070)
··· 29 * These registers appear once per CM module. 30 */ 31 32 + #define OMAP3430_CM_REVISION OMAP34XX_CM_REGADDR(OCP_MOD, 0x0000) 33 + #define OMAP3430_CM_SYSCONFIG OMAP34XX_CM_REGADDR(OCP_MOD, 0x0010) 34 + #define OMAP3430_CM_POLCTRL OMAP34XX_CM_REGADDR(OCP_MOD, 0x009c) 35 36 #define OMAP3_CM_CLKOUT_CTRL_OFFSET 0x0070 37 #define OMAP3430_CM_CLKOUT_CTRL OMAP_CM_REGADDR(OMAP3430_CCR_MOD, 0x0070)
+3 -2
arch/arm/mach-omap2/io.c
··· 276 return v; 277 } 278 279 - void __init omap2_init_common_hw(struct omap_sdrc_params *sp) 280 { 281 omap2_mux_init(); 282 #ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */ 283 pwrdm_init(powerdomains_omap); 284 clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps); 285 omap2_clk_init(); 286 - omap2_sdrc_init(sp); 287 _omap2_init_reprogram_sdrc(); 288 #endif 289 gpmc_init();
··· 276 return v; 277 } 278 279 + void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0, 280 + struct omap_sdrc_params *sdrc_cs1) 281 { 282 omap2_mux_init(); 283 #ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */ 284 pwrdm_init(powerdomains_omap); 285 clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps); 286 omap2_clk_init(); 287 + omap2_sdrc_init(sdrc_cs0, sdrc_cs1); 288 _omap2_init_reprogram_sdrc(); 289 #endif 290 gpmc_init();
+6
arch/arm/mach-omap2/mmc-twl4030.c
··· 119 if (i != 0) 120 break; 121 ret = PTR_ERR(reg); 122 goto err; 123 } 124 hsmmc[i].vcc = reg; ··· 166 static void twl_mmc_cleanup(struct device *dev) 167 { 168 struct omap_mmc_platform_data *mmc = dev->platform_data; 169 170 gpio_free(mmc->slots[0].switch_pin); 171 } 172 173 #ifdef CONFIG_PM
··· 119 if (i != 0) 120 break; 121 ret = PTR_ERR(reg); 122 + hsmmc[i].vcc = NULL; 123 goto err; 124 } 125 hsmmc[i].vcc = reg; ··· 165 static void twl_mmc_cleanup(struct device *dev) 166 { 167 struct omap_mmc_platform_data *mmc = dev->platform_data; 168 + int i; 169 170 gpio_free(mmc->slots[0].switch_pin); 171 + for(i = 0; i < ARRAY_SIZE(hsmmc); i++) { 172 + regulator_put(hsmmc[i].vcc); 173 + regulator_put(hsmmc[i].vcc_aux); 174 + } 175 } 176 177 #ifdef CONFIG_PM
+6
arch/arm/mach-omap2/mux.c
··· 486 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT) 487 MUX_CFG_34XX("J25_34XX_GPIO170", 0x1c6, 488 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT) 489 }; 490 491 #define OMAP34XX_PINS_SZ ARRAY_SIZE(omap34xx_pins)
··· 486 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT) 487 MUX_CFG_34XX("J25_34XX_GPIO170", 0x1c6, 488 OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT) 489 + 490 + /* OMAP3 SDRC CKE signals to SDR/DDR ram chips */ 491 + MUX_CFG_34XX("H16_34XX_SDRC_CKE0", 0x262, 492 + OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT) 493 + MUX_CFG_34XX("H17_34XX_SDRC_CKE1", 0x264, 494 + OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT) 495 }; 496 497 #define OMAP34XX_PINS_SZ ARRAY_SIZE(omap34xx_pins)
-3
arch/arm/mach-omap2/pm.h
··· 11 #ifndef __ARCH_ARM_MACH_OMAP2_PM_H 12 #define __ARCH_ARM_MACH_OMAP2_PM_H 13 14 - extern int omap2_pm_init(void); 15 - extern int omap3_pm_init(void); 16 - 17 #ifdef CONFIG_PM_DEBUG 18 extern void omap2_pm_dump(int mode, int resume, unsigned int us); 19 extern int omap2_pm_debug;
··· 11 #ifndef __ARCH_ARM_MACH_OMAP2_PM_H 12 #define __ARCH_ARM_MACH_OMAP2_PM_H 13 14 #ifdef CONFIG_PM_DEBUG 15 extern void omap2_pm_dump(int mode, int resume, unsigned int us); 16 extern int omap2_pm_debug;
+1 -1
arch/arm/mach-omap2/pm24xx.c
··· 470 WKUP_MOD, PM_WKEN); 471 } 472 473 - int __init omap2_pm_init(void) 474 { 475 u32 l; 476
··· 470 WKUP_MOD, PM_WKEN); 471 } 472 473 + static int __init omap2_pm_init(void) 474 { 475 u32 l; 476
+47 -4
arch/arm/mach-omap2/pm34xx.c
··· 39 struct power_state { 40 struct powerdomain *pwrdm; 41 u32 next_state; 42 u32 saved_state; 43 struct list_head node; 44 }; 45 ··· 295 local_irq_enable(); 296 } 297 298 static int omap3_pm_prepare(void) 299 { 300 disable_hlt(); ··· 326 restore: 327 /* Restore next_pwrsts */ 328 list_for_each_entry(pwrst, &pwrst_list, node) { 329 - set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); 330 state = pwrdm_read_prev_pwrst(pwrst->pwrdm); 331 if (state > pwrst->next_state) { 332 printk(KERN_INFO "Powerdomain (%s) didn't enter " ··· 333 pwrst->pwrdm->name, pwrst->next_state); 334 ret = -1; 335 } 336 } 337 if (ret) 338 printk(KERN_ERR "Could not enter target state in pm_suspend\n"); ··· 344 return ret; 345 } 346 347 - static int omap3_pm_enter(suspend_state_t state) 348 { 349 int ret = 0; 350 351 - switch (state) { 352 case PM_SUSPEND_STANDBY: 353 case PM_SUSPEND_MEM: 354 ret = omap3_pm_suspend(); ··· 365 enable_hlt(); 366 } 367 368 static struct platform_suspend_ops omap_pm_ops = { 369 .prepare = omap3_pm_prepare, 370 .enter = omap3_pm_enter, 371 .finish = omap3_pm_finish, 372 .valid = suspend_valid_only_mem, 373 }; 374 375 376 /** ··· 636 /* Clear any pending PRCM interrupts */ 637 prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 638 639 omap3_iva_idle(); 640 omap3_d2d_idle(); 641 } ··· 693 return 0; 694 } 695 696 - int __init omap3_pm_init(void) 697 { 698 struct power_state *pwrst, *tmp; 699 int ret; ··· 733 _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, 734 omap34xx_cpu_suspend_sz); 735 736 suspend_set_ops(&omap_pm_ops); 737 738 pm_idle = omap3_pm_idle; 739
··· 39 struct power_state { 40 struct powerdomain *pwrdm; 41 u32 next_state; 42 + #ifdef CONFIG_SUSPEND 43 u32 saved_state; 44 + #endif 45 struct list_head node; 46 }; 47 ··· 293 local_irq_enable(); 294 } 295 296 + #ifdef CONFIG_SUSPEND 297 + static suspend_state_t suspend_state; 298 + 299 static int omap3_pm_prepare(void) 300 { 301 disable_hlt(); ··· 321 restore: 322 /* Restore next_pwrsts */ 323 list_for_each_entry(pwrst, &pwrst_list, node) { 324 state = pwrdm_read_prev_pwrst(pwrst->pwrdm); 325 if (state > pwrst->next_state) { 326 printk(KERN_INFO "Powerdomain (%s) didn't enter " ··· 329 pwrst->pwrdm->name, pwrst->next_state); 330 ret = -1; 331 } 332 + set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); 333 } 334 if (ret) 335 printk(KERN_ERR "Could not enter target state in pm_suspend\n"); ··· 339 return ret; 340 } 341 342 + static int omap3_pm_enter(suspend_state_t unused) 343 { 344 int ret = 0; 345 346 + switch (suspend_state) { 347 case PM_SUSPEND_STANDBY: 348 case PM_SUSPEND_MEM: 349 ret = omap3_pm_suspend(); ··· 360 enable_hlt(); 361 } 362 363 + /* Hooks to enable / disable UART interrupts during suspend */ 364 + static int omap3_pm_begin(suspend_state_t state) 365 + { 366 + suspend_state = state; 367 + omap_uart_enable_irqs(0); 368 + return 0; 369 + } 370 + 371 + static void omap3_pm_end(void) 372 + { 373 + suspend_state = PM_SUSPEND_ON; 374 + omap_uart_enable_irqs(1); 375 + return; 376 + } 377 + 378 static struct platform_suspend_ops omap_pm_ops = { 379 + .begin = omap3_pm_begin, 380 + .end = omap3_pm_end, 381 .prepare = omap3_pm_prepare, 382 .enter = omap3_pm_enter, 383 .finish = omap3_pm_finish, 384 .valid = suspend_valid_only_mem, 385 }; 386 + #endif /* CONFIG_SUSPEND */ 387 388 389 /** ··· 613 /* Clear any pending PRCM interrupts */ 614 prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 615 616 + /* Don't attach IVA interrupts */ 617 + prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); 618 + prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); 619 + prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3); 620 + prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL); 621 + 622 + /* Clear any pending 'reset' flags */ 623 + prm_write_mod_reg(0xffffffff, MPU_MOD, RM_RSTST); 624 + prm_write_mod_reg(0xffffffff, CORE_MOD, RM_RSTST); 625 + prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, RM_RSTST); 626 + prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, RM_RSTST); 627 + prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, RM_RSTST); 628 + prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, RM_RSTST); 629 + prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, RM_RSTST); 630 + 631 + /* Clear any pending PRCM interrupts */ 632 + prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 633 + 634 omap3_iva_idle(); 635 omap3_d2d_idle(); 636 } ··· 652 return 0; 653 } 654 655 + static int __init omap3_pm_init(void) 656 { 657 struct power_state *pwrst, *tmp; 658 int ret; ··· 692 _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, 693 omap34xx_cpu_suspend_sz); 694 695 + #ifdef CONFIG_SUSPEND 696 suspend_set_ops(&omap_pm_ops); 697 + #endif /* CONFIG_SUSPEND */ 698 699 pm_idle = omap3_pm_idle; 700
+43
arch/arm/mach-omap2/prcm.c
··· 17 #include <linux/init.h> 18 #include <linux/clk.h> 19 #include <linux/io.h> 20 21 #include <mach/common.h> 22 #include <mach/prcm.h> ··· 28 29 static void __iomem *prm_base; 30 static void __iomem *cm_base; 31 32 u32 omap_prcm_get_reset_sources(void) 33 { ··· 122 return v; 123 } 124 EXPORT_SYMBOL(cm_rmw_mod_reg_bits); 125 126 void __init omap2_set_globals_prcm(struct omap_globals *omap2_globals) 127 {
··· 17 #include <linux/init.h> 18 #include <linux/clk.h> 19 #include <linux/io.h> 20 + #include <linux/delay.h> 21 22 #include <mach/common.h> 23 #include <mach/prcm.h> ··· 27 28 static void __iomem *prm_base; 29 static void __iomem *cm_base; 30 + 31 + #define MAX_MODULE_ENABLE_WAIT 100000 32 33 u32 omap_prcm_get_reset_sources(void) 34 { ··· 119 return v; 120 } 121 EXPORT_SYMBOL(cm_rmw_mod_reg_bits); 122 + 123 + /** 124 + * omap2_cm_wait_idlest - wait for IDLEST bit to indicate module readiness 125 + * @reg: physical address of module IDLEST register 126 + * @mask: value to mask against to determine if the module is active 127 + * @name: name of the clock (for printk) 128 + * 129 + * Returns 1 if the module indicated readiness in time, or 0 if it 130 + * failed to enable in roughly MAX_MODULE_ENABLE_WAIT microseconds. 131 + */ 132 + int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, const char *name) 133 + { 134 + int i = 0; 135 + int ena = 0; 136 + 137 + /* 138 + * 24xx uses 0 to indicate not ready, and 1 to indicate ready. 139 + * 34xx reverses this, just to keep us on our toes 140 + */ 141 + if (cpu_is_omap24xx()) 142 + ena = mask; 143 + else if (cpu_is_omap34xx()) 144 + ena = 0; 145 + else 146 + BUG(); 147 + 148 + /* Wait for lock */ 149 + while (((__raw_readl(reg) & mask) != ena) && 150 + (i++ < MAX_MODULE_ENABLE_WAIT)) 151 + udelay(1); 152 + 153 + if (i < MAX_MODULE_ENABLE_WAIT) 154 + pr_debug("cm: Module associated with clock %s ready after %d " 155 + "loops\n", name, i); 156 + else 157 + pr_err("cm: Module associated with clock %s didn't enable in " 158 + "%d tries\n", name, MAX_MODULE_ENABLE_WAIT); 159 + 160 + return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0; 161 + }; 162 163 void __init omap2_set_globals_prcm(struct omap_globals *omap2_globals) 164 {
+45 -23
arch/arm/mach-omap2/sdrc.c
··· 32 #include <mach/sdrc.h> 33 #include "sdrc.h" 34 35 - static struct omap_sdrc_params *sdrc_init_params; 36 37 void __iomem *omap2_sdrc_base; 38 void __iomem *omap2_sms_base; ··· 45 /** 46 * omap2_sdrc_get_params - return SDRC register values for a given clock rate 47 * @r: SDRC clock rate (in Hz) 48 * 49 * Return pre-calculated values for the SDRC_ACTIM_CTRLA, 50 - * SDRC_ACTIM_CTRLB, SDRC_RFR_CTRL, and SDRC_MR registers, for a given 51 - * SDRC clock rate 'r'. These parameters control various timing 52 - * delays in the SDRAM controller that are expressed in terms of the 53 - * number of SDRC clock cycles to wait; hence the clock rate 54 - * dependency. Note that sdrc_init_params must be sorted rate 55 - * descending. Also assumes that both chip-selects use the same 56 - * timing parameters. Returns a struct omap_sdrc_params * upon 57 - * success, or NULL upon failure. 58 */ 59 - struct omap_sdrc_params *omap2_sdrc_get_params(unsigned long r) 60 { 61 - struct omap_sdrc_params *sp; 62 63 - if (!sdrc_init_params) 64 - return NULL; 65 66 - sp = sdrc_init_params; 67 68 - while (sp->rate && sp->rate != r) 69 - sp++; 70 71 - if (!sp->rate) 72 - return NULL; 73 74 - return sp; 75 } 76 77 ··· 99 100 /** 101 * omap2_sdrc_init - initialize SMS, SDRC devices on boot 102 - * @sp: pointer to a null-terminated list of struct omap_sdrc_params 103 * 104 * Turn on smart idle modes for SDRAM scheduler and controller. 105 * Program a known-good configuration for the SDRC to deal with buggy 106 * bootloaders. 107 */ 108 - void __init omap2_sdrc_init(struct omap_sdrc_params *sp) 109 { 110 u32 l; 111 ··· 121 l |= (0x2 << 3); 122 sdrc_write_reg(l, SDRC_SYSCONFIG); 123 124 - sdrc_init_params = sp; 125 126 /* XXX Enable SRFRONIDLEREQ here also? */ 127 l = (1 << SDRC_POWER_EXTCLKDIS_SHIFT) | 128 - (1 << SDRC_POWER_PWDENA_SHIFT) | 129 (1 << SDRC_POWER_PAGEPOLICY_SHIFT); 130 sdrc_write_reg(l, SDRC_POWER); 131 }
··· 32 #include <mach/sdrc.h> 33 #include "sdrc.h" 34 35 + static struct omap_sdrc_params *sdrc_init_params_cs0, *sdrc_init_params_cs1; 36 37 void __iomem *omap2_sdrc_base; 38 void __iomem *omap2_sms_base; ··· 45 /** 46 * omap2_sdrc_get_params - return SDRC register values for a given clock rate 47 * @r: SDRC clock rate (in Hz) 48 + * @sdrc_cs0: chip select 0 ram timings ** 49 + * @sdrc_cs1: chip select 1 ram timings ** 50 * 51 * Return pre-calculated values for the SDRC_ACTIM_CTRLA, 52 + * SDRC_ACTIM_CTRLB, SDRC_RFR_CTRL and SDRC_MR registers in sdrc_cs[01] 53 + * structs,for a given SDRC clock rate 'r'. 54 + * These parameters control various timing delays in the SDRAM controller 55 + * that are expressed in terms of the number of SDRC clock cycles to 56 + * wait; hence the clock rate dependency. 57 + * 58 + * Supports 2 different timing parameters for both chip selects. 59 + * 60 + * Note 1: the sdrc_init_params_cs[01] must be sorted rate descending. 61 + * Note 2: If sdrc_init_params_cs_1 is not NULL it must be of same size 62 + * as sdrc_init_params_cs_0. 63 + * 64 + * Fills in the struct omap_sdrc_params * for each chip select. 65 + * Returns 0 upon success or -1 upon failure. 66 */ 67 + int omap2_sdrc_get_params(unsigned long r, 68 + struct omap_sdrc_params **sdrc_cs0, 69 + struct omap_sdrc_params **sdrc_cs1) 70 { 71 + struct omap_sdrc_params *sp0, *sp1; 72 73 + if (!sdrc_init_params_cs0) 74 + return -1; 75 76 + sp0 = sdrc_init_params_cs0; 77 + sp1 = sdrc_init_params_cs1; 78 79 + while (sp0->rate && sp0->rate != r) { 80 + sp0++; 81 + if (sdrc_init_params_cs1) 82 + sp1++; 83 + } 84 85 + if (!sp0->rate) 86 + return -1; 87 88 + *sdrc_cs0 = sp0; 89 + *sdrc_cs1 = sp1; 90 + return 0; 91 } 92 93 ··· 83 84 /** 85 * omap2_sdrc_init - initialize SMS, SDRC devices on boot 86 + * @sdrc_cs[01]: pointers to a null-terminated list of struct omap_sdrc_params 87 + * Support for 2 chip selects timings 88 * 89 * Turn on smart idle modes for SDRAM scheduler and controller. 90 * Program a known-good configuration for the SDRC to deal with buggy 91 * bootloaders. 92 */ 93 + void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0, 94 + struct omap_sdrc_params *sdrc_cs1) 95 { 96 u32 l; 97 ··· 103 l |= (0x2 << 3); 104 sdrc_write_reg(l, SDRC_SYSCONFIG); 105 106 + sdrc_init_params_cs0 = sdrc_cs0; 107 + sdrc_init_params_cs1 = sdrc_cs1; 108 109 /* XXX Enable SRFRONIDLEREQ here also? */ 110 + /* 111 + * PWDENA should not be set due to 34xx erratum 1.150 - PWDENA 112 + * can cause random memory corruption 113 + */ 114 l = (1 << SDRC_POWER_EXTCLKDIS_SHIFT) | 115 (1 << SDRC_POWER_PAGEPOLICY_SHIFT); 116 sdrc_write_reg(l, SDRC_POWER); 117 }
+135 -64
arch/arm/mach-omap2/serial.c
··· 54 55 struct plat_serial8250_port *p; 56 struct list_head node; 57 58 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) 59 int context_valid; ··· 69 #endif 70 }; 71 72 - static struct omap_uart_state omap_uart[OMAP_MAX_NR_PORTS]; 73 static LIST_HEAD(uart_list); 74 75 - static struct plat_serial8250_port serial_platform_data[] = { 76 { 77 .membase = IO_ADDRESS(OMAP_UART1_BASE), 78 .mapbase = OMAP_UART1_BASE, ··· 81 .regshift = 2, 82 .uartclk = OMAP24XX_BASE_BAUD * 16, 83 }, { 84 .membase = IO_ADDRESS(OMAP_UART2_BASE), 85 .mapbase = OMAP_UART2_BASE, 86 .irq = 73, ··· 95 .regshift = 2, 96 .uartclk = OMAP24XX_BASE_BAUD * 16, 97 }, { 98 .membase = IO_ADDRESS(OMAP_UART3_BASE), 99 .mapbase = OMAP_UART3_BASE, 100 .irq = 74, ··· 229 clk_disable(uart->fck); 230 } 231 232 static void omap_uart_smart_idle_enable(struct omap_uart_state *uart, 233 int enable) 234 { ··· 292 293 static void omap_uart_allow_sleep(struct omap_uart_state *uart) 294 { 295 if (!uart->clocked) 296 return; 297 ··· 343 /* Check for normal UART wakeup */ 344 if (__raw_readl(uart->wk_st) & uart->wk_mask) 345 omap_uart_block_sleep(uart); 346 - 347 return; 348 } 349 } ··· 396 return IRQ_NONE; 397 } 398 399 - static u32 sleep_timeout = DEFAULT_TIMEOUT; 400 - 401 static void omap_uart_idle_init(struct omap_uart_state *uart) 402 { 403 - u32 v; 404 struct plat_serial8250_port *p = uart->p; 405 int ret; 406 407 uart->can_sleep = 0; 408 - uart->timeout = sleep_timeout; 409 setup_timer(&uart->timer, omap_uart_idle_timer, 410 (unsigned long) uart); 411 mod_timer(&uart->timer, jiffies + uart->timeout); ··· 460 uart->padconf = 0; 461 } 462 463 - /* Set wake-enable bit */ 464 - if (uart->wk_en && uart->wk_mask) { 465 - v = __raw_readl(uart->wk_en); 466 - v |= uart->wk_mask; 467 - __raw_writel(v, uart->wk_en); 468 - } 469 - 470 - /* Ensure IOPAD wake-enables are set */ 471 - if (cpu_is_omap34xx() && uart->padconf) { 472 - u16 v; 473 - 474 - v = omap_ctrl_readw(uart->padconf); 475 - v |= OMAP3_PADCONF_WAKEUPENABLE0; 476 - omap_ctrl_writew(v, uart->padconf); 477 - } 478 - 479 p->flags |= UPF_SHARE_IRQ; 480 ret = request_irq(p->irq, omap_uart_interrupt, IRQF_SHARED, 481 "serial idle", (void *)uart); 482 WARN_ON(ret); 483 } 484 485 - static ssize_t sleep_timeout_show(struct kobject *kobj, 486 - struct kobj_attribute *attr, 487 - char *buf) 488 { 489 - return sprintf(buf, "%u\n", sleep_timeout / HZ); 490 } 491 492 - static ssize_t sleep_timeout_store(struct kobject *kobj, 493 - struct kobj_attribute *attr, 494 const char *buf, size_t n) 495 { 496 - struct omap_uart_state *uart; 497 unsigned int value; 498 499 if (sscanf(buf, "%u", &value) != 1) { 500 printk(KERN_ERR "sleep_timeout_store: Invalid value\n"); 501 return -EINVAL; 502 } 503 - sleep_timeout = value * HZ; 504 - list_for_each_entry(uart, &uart_list, node) { 505 - uart->timeout = sleep_timeout; 506 - if (uart->timeout) 507 - mod_timer(&uart->timer, jiffies + uart->timeout); 508 - else 509 - /* A zero value means disable timeout feature */ 510 - omap_uart_block_sleep(uart); 511 - } 512 return n; 513 } 514 515 - static struct kobj_attribute sleep_timeout_attr = 516 - __ATTR(sleep_timeout, 0644, sleep_timeout_show, sleep_timeout_store); 517 - 518 #else 519 static inline void omap_uart_idle_init(struct omap_uart_state *uart) {} 520 #endif /* CONFIG_PM */ 521 522 - static struct platform_device serial_device = { 523 - .name = "serial8250", 524 - .id = PLAT8250_DEV_PLATFORM, 525 - .dev = { 526 - .platform_data = serial_platform_data, 527 }, 528 }; 529 530 void __init omap_serial_init(void) 531 { 532 - int i, err; 533 const struct omap_uart_config *info; 534 char name[16]; 535 ··· 568 569 if (info == NULL) 570 return; 571 - if (cpu_is_omap44xx()) { 572 - for (i = 0; i < OMAP_MAX_NR_PORTS; i++) 573 - serial_platform_data[i].irq += 32; 574 - } 575 576 for (i = 0; i < OMAP_MAX_NR_PORTS; i++) { 577 - struct plat_serial8250_port *p = serial_platform_data + i; 578 struct omap_uart_state *uart = &omap_uart[i]; 579 580 if (!(info->enabled_uarts & (1 << i))) { 581 p->membase = NULL; ··· 601 uart->num = i; 602 p->private_data = uart; 603 uart->p = p; 604 - list_add(&uart->node, &uart_list); 605 606 omap_uart_enable_clocks(uart); 607 omap_uart_reset(uart); 608 omap_uart_idle_init(uart); 609 } 610 - 611 - err = platform_device_register(&serial_device); 612 - 613 - #ifdef CONFIG_PM 614 - if (!err) 615 - err = sysfs_create_file(&serial_device.dev.kobj, 616 - &sleep_timeout_attr.attr); 617 - #endif 618 - 619 } 620 -
··· 54 55 struct plat_serial8250_port *p; 56 struct list_head node; 57 + struct platform_device pdev; 58 59 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) 60 int context_valid; ··· 68 #endif 69 }; 70 71 static LIST_HEAD(uart_list); 72 73 + static struct plat_serial8250_port serial_platform_data0[] = { 74 { 75 .membase = IO_ADDRESS(OMAP_UART1_BASE), 76 .mapbase = OMAP_UART1_BASE, ··· 81 .regshift = 2, 82 .uartclk = OMAP24XX_BASE_BAUD * 16, 83 }, { 84 + .flags = 0 85 + } 86 + }; 87 + 88 + static struct plat_serial8250_port serial_platform_data1[] = { 89 + { 90 .membase = IO_ADDRESS(OMAP_UART2_BASE), 91 .mapbase = OMAP_UART2_BASE, 92 .irq = 73, ··· 89 .regshift = 2, 90 .uartclk = OMAP24XX_BASE_BAUD * 16, 91 }, { 92 + .flags = 0 93 + } 94 + }; 95 + 96 + static struct plat_serial8250_port serial_platform_data2[] = { 97 + { 98 .membase = IO_ADDRESS(OMAP_UART3_BASE), 99 .mapbase = OMAP_UART3_BASE, 100 .irq = 74, ··· 217 clk_disable(uart->fck); 218 } 219 220 + static void omap_uart_enable_wakeup(struct omap_uart_state *uart) 221 + { 222 + /* Set wake-enable bit */ 223 + if (uart->wk_en && uart->wk_mask) { 224 + u32 v = __raw_readl(uart->wk_en); 225 + v |= uart->wk_mask; 226 + __raw_writel(v, uart->wk_en); 227 + } 228 + 229 + /* Ensure IOPAD wake-enables are set */ 230 + if (cpu_is_omap34xx() && uart->padconf) { 231 + u16 v = omap_ctrl_readw(uart->padconf); 232 + v |= OMAP3_PADCONF_WAKEUPENABLE0; 233 + omap_ctrl_writew(v, uart->padconf); 234 + } 235 + } 236 + 237 + static void omap_uart_disable_wakeup(struct omap_uart_state *uart) 238 + { 239 + /* Clear wake-enable bit */ 240 + if (uart->wk_en && uart->wk_mask) { 241 + u32 v = __raw_readl(uart->wk_en); 242 + v &= ~uart->wk_mask; 243 + __raw_writel(v, uart->wk_en); 244 + } 245 + 246 + /* Ensure IOPAD wake-enables are cleared */ 247 + if (cpu_is_omap34xx() && uart->padconf) { 248 + u16 v = omap_ctrl_readw(uart->padconf); 249 + v &= ~OMAP3_PADCONF_WAKEUPENABLE0; 250 + omap_ctrl_writew(v, uart->padconf); 251 + } 252 + } 253 + 254 static void omap_uart_smart_idle_enable(struct omap_uart_state *uart, 255 int enable) 256 { ··· 246 247 static void omap_uart_allow_sleep(struct omap_uart_state *uart) 248 { 249 + if (device_may_wakeup(&uart->pdev.dev)) 250 + omap_uart_enable_wakeup(uart); 251 + else 252 + omap_uart_disable_wakeup(uart); 253 + 254 if (!uart->clocked) 255 return; 256 ··· 292 /* Check for normal UART wakeup */ 293 if (__raw_readl(uart->wk_st) & uart->wk_mask) 294 omap_uart_block_sleep(uart); 295 return; 296 } 297 } ··· 346 return IRQ_NONE; 347 } 348 349 static void omap_uart_idle_init(struct omap_uart_state *uart) 350 { 351 struct plat_serial8250_port *p = uart->p; 352 int ret; 353 354 uart->can_sleep = 0; 355 + uart->timeout = DEFAULT_TIMEOUT; 356 setup_timer(&uart->timer, omap_uart_idle_timer, 357 (unsigned long) uart); 358 mod_timer(&uart->timer, jiffies + uart->timeout); ··· 413 uart->padconf = 0; 414 } 415 416 p->flags |= UPF_SHARE_IRQ; 417 ret = request_irq(p->irq, omap_uart_interrupt, IRQF_SHARED, 418 "serial idle", (void *)uart); 419 WARN_ON(ret); 420 } 421 422 + void omap_uart_enable_irqs(int enable) 423 { 424 + int ret; 425 + struct omap_uart_state *uart; 426 + 427 + list_for_each_entry(uart, &uart_list, node) { 428 + if (enable) 429 + ret = request_irq(uart->p->irq, omap_uart_interrupt, 430 + IRQF_SHARED, "serial idle", (void *)uart); 431 + else 432 + free_irq(uart->p->irq, (void *)uart); 433 + } 434 } 435 436 + static ssize_t sleep_timeout_show(struct device *dev, 437 + struct device_attribute *attr, 438 + char *buf) 439 + { 440 + struct platform_device *pdev = container_of(dev, 441 + struct platform_device, dev); 442 + struct omap_uart_state *uart = container_of(pdev, 443 + struct omap_uart_state, pdev); 444 + 445 + return sprintf(buf, "%u\n", uart->timeout / HZ); 446 + } 447 + 448 + static ssize_t sleep_timeout_store(struct device *dev, 449 + struct device_attribute *attr, 450 const char *buf, size_t n) 451 { 452 + struct platform_device *pdev = container_of(dev, 453 + struct platform_device, dev); 454 + struct omap_uart_state *uart = container_of(pdev, 455 + struct omap_uart_state, pdev); 456 unsigned int value; 457 458 if (sscanf(buf, "%u", &value) != 1) { 459 printk(KERN_ERR "sleep_timeout_store: Invalid value\n"); 460 return -EINVAL; 461 } 462 + 463 + uart->timeout = value * HZ; 464 + if (uart->timeout) 465 + mod_timer(&uart->timer, jiffies + uart->timeout); 466 + else 467 + /* A zero value means disable timeout feature */ 468 + omap_uart_block_sleep(uart); 469 + 470 return n; 471 } 472 473 + DEVICE_ATTR(sleep_timeout, 0644, sleep_timeout_show, sleep_timeout_store); 474 + #define DEV_CREATE_FILE(dev, attr) WARN_ON(device_create_file(dev, attr)) 475 #else 476 static inline void omap_uart_idle_init(struct omap_uart_state *uart) {} 477 + #define DEV_CREATE_FILE(dev, attr) 478 #endif /* CONFIG_PM */ 479 480 + static struct omap_uart_state omap_uart[OMAP_MAX_NR_PORTS] = { 481 + { 482 + .pdev = { 483 + .name = "serial8250", 484 + .id = PLAT8250_DEV_PLATFORM, 485 + .dev = { 486 + .platform_data = serial_platform_data0, 487 + }, 488 + }, 489 + }, { 490 + .pdev = { 491 + .name = "serial8250", 492 + .id = PLAT8250_DEV_PLATFORM1, 493 + .dev = { 494 + .platform_data = serial_platform_data1, 495 + }, 496 + }, 497 + }, { 498 + .pdev = { 499 + .name = "serial8250", 500 + .id = PLAT8250_DEV_PLATFORM2, 501 + .dev = { 502 + .platform_data = serial_platform_data2, 503 + }, 504 + }, 505 }, 506 }; 507 508 void __init omap_serial_init(void) 509 { 510 + int i; 511 const struct omap_uart_config *info; 512 char name[16]; 513 ··· 496 497 if (info == NULL) 498 return; 499 500 for (i = 0; i < OMAP_MAX_NR_PORTS; i++) { 501 struct omap_uart_state *uart = &omap_uart[i]; 502 + struct platform_device *pdev = &uart->pdev; 503 + struct device *dev = &pdev->dev; 504 + struct plat_serial8250_port *p = dev->platform_data; 505 506 if (!(info->enabled_uarts & (1 << i))) { 507 p->membase = NULL; ··· 531 uart->num = i; 532 p->private_data = uart; 533 uart->p = p; 534 + list_add_tail(&uart->node, &uart_list); 535 + 536 + if (cpu_is_omap44xx()) 537 + p->irq += 32; 538 539 omap_uart_enable_clocks(uart); 540 omap_uart_reset(uart); 541 omap_uart_idle_init(uart); 542 + 543 + if (WARN_ON(platform_device_register(pdev))) 544 + continue; 545 + if ((cpu_is_omap34xx() && uart->padconf) || 546 + (uart->wk_en && uart->wk_mask)) { 547 + device_init_wakeup(dev, true); 548 + DEV_CREATE_FILE(dev, &dev_attr_sleep_timeout); 549 + } 550 } 551 }
+111 -36
arch/arm/mach-omap2/sram34xx.S
··· 36 37 .text 38 39 - /* r4 parameters */ 40 #define SDRC_NO_UNLOCK_DLL 0x0 41 #define SDRC_UNLOCK_DLL 0x1 42 ··· 58 59 /* SDRC_POWER bit settings */ 60 #define SRFRONIDLEREQ_MASK 0x40 61 - #define PWDENA_MASK 0x4 62 63 /* CM_IDLEST1_CORE bit settings */ 64 #define ST_SDRC_MASK 0x2 ··· 70 71 /* 72 * omap3_sram_configure_core_dpll - change DPLL3 M2 divider 73 - * r0 = new SDRC_RFR_CTRL register contents 74 - * r1 = new SDRC_ACTIM_CTRLA register contents 75 - * r2 = new SDRC_ACTIM_CTRLB register contents 76 - * r3 = new M2 divider setting (only 1 and 2 supported right now) 77 - * r4 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for 78 - * SDRC rates < 83MHz 79 - * r5 = number of MPU cycles to wait for SDRC to stabilize after 80 - * reprogramming the SDRC when switching to a slower MPU speed 81 - * r6 = new SDRC_MR_0 register value 82 - * r7 = increasing SDRC rate? (1 = yes, 0 = no) 83 * 84 */ 85 ENTRY(omap3_sram_configure_core_dpll) 86 stmfd sp!, {r1-r12, lr} @ store regs to stack 87 - ldr r4, [sp, #52] @ pull extra args off the stack 88 - ldr r5, [sp, #56] @ load extra args from the stack 89 - ldr r6, [sp, #60] @ load extra args from the stack 90 - ldr r7, [sp, #64] @ load extra args from the stack 91 dsb @ flush buffered writes to interconnect 92 - cmp r7, #1 @ if increasing SDRC clk rate, 93 bleq configure_sdrc @ program the SDRC regs early (for RFR) 94 - cmp r4, #SDRC_UNLOCK_DLL @ set the intended DLL state 95 bleq unlock_dll 96 blne lock_dll 97 bl sdram_in_selfrefresh @ put SDRAM in self refresh, idle SDRC 98 bl configure_core_dpll @ change the DPLL3 M2 divider 99 bl enable_sdrc @ take SDRC out of idle 100 - cmp r4, #SDRC_UNLOCK_DLL @ wait for DLL status to change 101 bleq wait_dll_unlock 102 blne wait_dll_lock 103 - cmp r7, #1 @ if increasing SDRC clk rate, 104 beq return_to_sdram @ return to SDRAM code, otherwise, 105 bl configure_sdrc @ reprogram SDRC regs now 106 - mov r12, r5 107 - bl wait_clk_stable @ wait for SDRC to stabilize 108 return_to_sdram: 109 isb @ prevent speculative exec past here 110 mov r0, #0 @ return value ··· 143 unlock_dll: 144 ldr r11, omap3_sdrc_dlla_ctrl 145 ldr r12, [r11] 146 - and r12, r12, #FIXEDDELAY_MASK 147 orr r12, r12, #FIXEDDELAY_DEFAULT 148 orr r12, r12, #DLLIDLE_MASK 149 str r12, [r11] @ (no OCP barrier needed) ··· 159 ldr r12, [r11] @ read the contents of SDRC_POWER 160 mov r9, r12 @ keep a copy of SDRC_POWER bits 161 orr r12, r12, #SRFRONIDLEREQ_MASK @ enable self refresh on idle 162 - bic r12, r12, #PWDENA_MASK @ clear PWDENA 163 str r12, [r11] @ write back to SDRC_POWER register 164 ldr r12, [r11] @ posted-write barrier for SDRC 165 idle_sdrc: ··· 178 ldr r12, [r11] 179 ldr r10, core_m2_mask_val @ modify m2 for core dpll 180 and r12, r12, r10 181 - orr r12, r12, r3, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT 182 str r12, [r11] 183 ldr r12, [r11] @ posted-write barrier for CM 184 bx lr ··· 216 bne wait_dll_unlock 217 bx lr 218 configure_sdrc: 219 - ldr r11, omap3_sdrc_rfr_ctrl 220 - str r0, [r11] 221 - ldr r11, omap3_sdrc_actim_ctrla 222 - str r1, [r11] 223 - ldr r11, omap3_sdrc_actim_ctrlb 224 - str r2, [r11] 225 ldr r11, omap3_sdrc_mr_0 226 - str r6, [r11] 227 - ldr r6, [r11] @ posted-write barrier for SDRC 228 bx lr 229 230 omap3_sdrc_power: ··· 254 .word OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST) 255 omap3_cm_iclken1_core: 256 .word OMAP34XX_CM_REGADDR(CORE_MOD, CM_ICLKEN1) 257 - omap3_sdrc_rfr_ctrl: 258 .word OMAP34XX_SDRC_REGADDR(SDRC_RFR_CTRL_0) 259 - omap3_sdrc_actim_ctrla: 260 .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_0) 261 - omap3_sdrc_actim_ctrlb: 262 .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_0) 263 omap3_sdrc_mr_0: 264 .word OMAP34XX_SDRC_REGADDR(SDRC_MR_0) 265 omap3_sdrc_dlla_status: 266 .word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS) 267 omap3_sdrc_dlla_ctrl: ··· 297 298 ENTRY(omap3_sram_configure_core_dpll_sz) 299 .word . - omap3_sram_configure_core_dpll
··· 36 37 .text 38 39 + /* r1 parameters */ 40 #define SDRC_NO_UNLOCK_DLL 0x0 41 #define SDRC_UNLOCK_DLL 0x1 42 ··· 58 59 /* SDRC_POWER bit settings */ 60 #define SRFRONIDLEREQ_MASK 0x40 61 62 /* CM_IDLEST1_CORE bit settings */ 63 #define ST_SDRC_MASK 0x2 ··· 71 72 /* 73 * omap3_sram_configure_core_dpll - change DPLL3 M2 divider 74 * 75 + * Params passed in registers: 76 + * r0 = new M2 divider setting (only 1 and 2 supported right now) 77 + * r1 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for 78 + * SDRC rates < 83MHz 79 + * r2 = number of MPU cycles to wait for SDRC to stabilize after 80 + * reprogramming the SDRC when switching to a slower MPU speed 81 + * r3 = increasing SDRC rate? (1 = yes, 0 = no) 82 + * 83 + * Params passed via the stack. The needed params will be copied in SRAM 84 + * before use by the code in SRAM (SDRAM is not accessible during SDRC 85 + * reconfiguration): 86 + * new SDRC_RFR_CTRL_0 register contents 87 + * new SDRC_ACTIM_CTRL_A_0 register contents 88 + * new SDRC_ACTIM_CTRL_B_0 register contents 89 + * new SDRC_MR_0 register value 90 + * new SDRC_RFR_CTRL_1 register contents 91 + * new SDRC_ACTIM_CTRL_A_1 register contents 92 + * new SDRC_ACTIM_CTRL_B_1 register contents 93 + * new SDRC_MR_1 register value 94 + * 95 + * If the param SDRC_RFR_CTRL_1 is 0, the parameters 96 + * are not programmed into the SDRC CS1 registers 97 */ 98 ENTRY(omap3_sram_configure_core_dpll) 99 stmfd sp!, {r1-r12, lr} @ store regs to stack 100 + 101 + @ pull the extra args off the stack 102 + @ and store them in SRAM 103 + ldr r4, [sp, #52] 104 + str r4, omap_sdrc_rfr_ctrl_0_val 105 + ldr r4, [sp, #56] 106 + str r4, omap_sdrc_actim_ctrl_a_0_val 107 + ldr r4, [sp, #60] 108 + str r4, omap_sdrc_actim_ctrl_b_0_val 109 + ldr r4, [sp, #64] 110 + str r4, omap_sdrc_mr_0_val 111 + ldr r4, [sp, #68] 112 + str r4, omap_sdrc_rfr_ctrl_1_val 113 + cmp r4, #0 @ if SDRC_RFR_CTRL_1 is 0, 114 + beq skip_cs1_params @ do not use cs1 params 115 + ldr r4, [sp, #72] 116 + str r4, omap_sdrc_actim_ctrl_a_1_val 117 + ldr r4, [sp, #76] 118 + str r4, omap_sdrc_actim_ctrl_b_1_val 119 + ldr r4, [sp, #80] 120 + str r4, omap_sdrc_mr_1_val 121 + skip_cs1_params: 122 dsb @ flush buffered writes to interconnect 123 + 124 + cmp r3, #1 @ if increasing SDRC clk rate, 125 bleq configure_sdrc @ program the SDRC regs early (for RFR) 126 + cmp r1, #SDRC_UNLOCK_DLL @ set the intended DLL state 127 bleq unlock_dll 128 blne lock_dll 129 bl sdram_in_selfrefresh @ put SDRAM in self refresh, idle SDRC 130 bl configure_core_dpll @ change the DPLL3 M2 divider 131 + mov r12, r2 132 + bl wait_clk_stable @ wait for SDRC to stabilize 133 bl enable_sdrc @ take SDRC out of idle 134 + cmp r1, #SDRC_UNLOCK_DLL @ wait for DLL status to change 135 bleq wait_dll_unlock 136 blne wait_dll_lock 137 + cmp r3, #1 @ if increasing SDRC clk rate, 138 beq return_to_sdram @ return to SDRAM code, otherwise, 139 bl configure_sdrc @ reprogram SDRC regs now 140 return_to_sdram: 141 isb @ prevent speculative exec past here 142 mov r0, #0 @ return value ··· 113 unlock_dll: 114 ldr r11, omap3_sdrc_dlla_ctrl 115 ldr r12, [r11] 116 + bic r12, r12, #FIXEDDELAY_MASK 117 orr r12, r12, #FIXEDDELAY_DEFAULT 118 orr r12, r12, #DLLIDLE_MASK 119 str r12, [r11] @ (no OCP barrier needed) ··· 129 ldr r12, [r11] @ read the contents of SDRC_POWER 130 mov r9, r12 @ keep a copy of SDRC_POWER bits 131 orr r12, r12, #SRFRONIDLEREQ_MASK @ enable self refresh on idle 132 str r12, [r11] @ write back to SDRC_POWER register 133 ldr r12, [r11] @ posted-write barrier for SDRC 134 idle_sdrc: ··· 149 ldr r12, [r11] 150 ldr r10, core_m2_mask_val @ modify m2 for core dpll 151 and r12, r12, r10 152 + orr r12, r12, r0, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT 153 str r12, [r11] 154 ldr r12, [r11] @ posted-write barrier for CM 155 bx lr ··· 187 bne wait_dll_unlock 188 bx lr 189 configure_sdrc: 190 + ldr r12, omap_sdrc_rfr_ctrl_0_val @ fetch value from SRAM 191 + ldr r11, omap3_sdrc_rfr_ctrl_0 @ fetch addr from SRAM 192 + str r12, [r11] @ store 193 + ldr r12, omap_sdrc_actim_ctrl_a_0_val 194 + ldr r11, omap3_sdrc_actim_ctrl_a_0 195 + str r12, [r11] 196 + ldr r12, omap_sdrc_actim_ctrl_b_0_val 197 + ldr r11, omap3_sdrc_actim_ctrl_b_0 198 + str r12, [r11] 199 + ldr r12, omap_sdrc_mr_0_val 200 ldr r11, omap3_sdrc_mr_0 201 + str r12, [r11] 202 + ldr r12, omap_sdrc_rfr_ctrl_1_val 203 + cmp r12, #0 @ if SDRC_RFR_CTRL_1 is 0, 204 + beq skip_cs1_prog @ do not program cs1 params 205 + ldr r11, omap3_sdrc_rfr_ctrl_1 206 + str r12, [r11] 207 + ldr r12, omap_sdrc_actim_ctrl_a_1_val 208 + ldr r11, omap3_sdrc_actim_ctrl_a_1 209 + str r12, [r11] 210 + ldr r12, omap_sdrc_actim_ctrl_b_1_val 211 + ldr r11, omap3_sdrc_actim_ctrl_b_1 212 + str r12, [r11] 213 + ldr r12, omap_sdrc_mr_1_val 214 + ldr r11, omap3_sdrc_mr_1 215 + str r12, [r11] 216 + skip_cs1_prog: 217 + ldr r12, [r11] @ posted-write barrier for SDRC 218 bx lr 219 220 omap3_sdrc_power: ··· 206 .word OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST) 207 omap3_cm_iclken1_core: 208 .word OMAP34XX_CM_REGADDR(CORE_MOD, CM_ICLKEN1) 209 + 210 + omap3_sdrc_rfr_ctrl_0: 211 .word OMAP34XX_SDRC_REGADDR(SDRC_RFR_CTRL_0) 212 + omap3_sdrc_rfr_ctrl_1: 213 + .word OMAP34XX_SDRC_REGADDR(SDRC_RFR_CTRL_1) 214 + omap3_sdrc_actim_ctrl_a_0: 215 .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_0) 216 + omap3_sdrc_actim_ctrl_a_1: 217 + .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_1) 218 + omap3_sdrc_actim_ctrl_b_0: 219 .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_0) 220 + omap3_sdrc_actim_ctrl_b_1: 221 + .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_1) 222 omap3_sdrc_mr_0: 223 .word OMAP34XX_SDRC_REGADDR(SDRC_MR_0) 224 + omap3_sdrc_mr_1: 225 + .word OMAP34XX_SDRC_REGADDR(SDRC_MR_1) 226 + omap_sdrc_rfr_ctrl_0_val: 227 + .word 0xDEADBEEF 228 + omap_sdrc_rfr_ctrl_1_val: 229 + .word 0xDEADBEEF 230 + omap_sdrc_actim_ctrl_a_0_val: 231 + .word 0xDEADBEEF 232 + omap_sdrc_actim_ctrl_a_1_val: 233 + .word 0xDEADBEEF 234 + omap_sdrc_actim_ctrl_b_0_val: 235 + .word 0xDEADBEEF 236 + omap_sdrc_actim_ctrl_b_1_val: 237 + .word 0xDEADBEEF 238 + omap_sdrc_mr_0_val: 239 + .word 0xDEADBEEF 240 + omap_sdrc_mr_1_val: 241 + .word 0xDEADBEEF 242 + 243 omap3_sdrc_dlla_status: 244 .word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS) 245 omap3_sdrc_dlla_ctrl: ··· 223 224 ENTRY(omap3_sram_configure_core_dpll_sz) 225 .word . - omap3_sram_configure_core_dpll 226 +
+1 -1
arch/arm/mach-u300/core.c
··· 510 } 511 }; 512 513 - static void u300_init_check_chip(void) 514 { 515 516 u16 val;
··· 510 } 511 }; 512 513 + static void __init u300_init_check_chip(void) 514 { 515 516 u16 val;
+73 -45
arch/arm/mm/init.c
··· 120 printk("%d pages swap cached\n", cached); 121 } 122 123 /* 124 * FIXME: We really want to avoid allocating the bootmap bitmap 125 * over the top of the initrd. Hopefully, this is located towards ··· 236 #endif 237 } 238 239 - static unsigned long __init bootmem_init_node(int node, struct meminfo *mi) 240 { 241 - unsigned long start_pfn, end_pfn, boot_pfn; 242 unsigned int boot_pages; 243 pg_data_t *pgdat; 244 int i; 245 246 - start_pfn = -1UL; 247 - end_pfn = 0; 248 - 249 /* 250 - * Calculate the pfn range, and map the memory banks for this node. 251 */ 252 for_each_nodebank(i, mi, node) { 253 struct membank *bank = &mi->bank[i]; 254 - unsigned long start, end; 255 256 - start = bank_pfn_start(bank); 257 - end = bank_pfn_end(bank); 258 - 259 - if (start_pfn > start) 260 - start_pfn = start; 261 - if (end_pfn < end) 262 - end_pfn = end; 263 - 264 - map_memory_bank(bank); 265 } 266 - 267 - /* 268 - * If there is no memory in this node, ignore it. 269 - */ 270 - if (end_pfn == 0) 271 - return end_pfn; 272 273 /* 274 * Allocate the bootmem bitmap page. ··· 270 271 for_each_nodebank(i, mi, node) { 272 struct membank *bank = &mi->bank[i]; 273 - free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); 274 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); 275 } 276 ··· 280 */ 281 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, 282 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); 283 - 284 - return end_pfn; 285 } 286 287 static void __init bootmem_reserve_initrd(int node) ··· 306 static void __init bootmem_free_node(int node, struct meminfo *mi) 307 { 308 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 309 - unsigned long start_pfn, end_pfn; 310 - pg_data_t *pgdat = NODE_DATA(node); 311 int i; 312 313 - start_pfn = pgdat->bdata->node_min_pfn; 314 - end_pfn = pgdat->bdata->node_low_pfn; 315 316 /* 317 * initialise the zones within this node. 318 */ 319 memset(zone_size, 0, sizeof(zone_size)); 320 - memset(zhole_size, 0, sizeof(zhole_size)); 321 322 /* 323 * The size of this node has already been determined. If we need 324 * to do anything fancy with the allocation of this memory to the 325 * zones, now is the time to do it. 326 */ 327 - zone_size[0] = end_pfn - start_pfn; 328 329 /* 330 * For each bank in this node, calculate the size of the holes. 331 * holes = node_size - sum(bank_sizes_in_node) 332 */ 333 - zhole_size[0] = zone_size[0]; 334 - for_each_nodebank(i, mi, node) 335 - zhole_size[0] -= bank_pfn_size(&mi->bank[i]); 336 337 /* 338 * Adjust the sizes according to any special requirements for ··· 346 */ 347 arch_adjust_zones(node, zone_size, zhole_size); 348 349 - free_area_init_node(node, zone_size, start_pfn, zhole_size); 350 } 351 352 void __init bootmem_init(void) 353 { 354 struct meminfo *mi = &meminfo; 355 - unsigned long memend_pfn = 0; 356 int node, initrd_node; 357 358 /* ··· 360 */ 361 initrd_node = check_initrd(mi); 362 363 /* 364 * Run through each node initialising the bootmem allocator. 365 */ 366 for_each_node(node) { 367 - unsigned long end_pfn = bootmem_init_node(node, mi); 368 369 /* 370 * Reserve any special node zero regions. ··· 395 */ 396 if (node == initrd_node) 397 bootmem_reserve_initrd(node); 398 - 399 - /* 400 - * Remember the highest memory PFN. 401 - */ 402 - if (end_pfn > memend_pfn) 403 - memend_pfn = end_pfn; 404 } 405 406 /* ··· 410 for_each_node(node) 411 bootmem_free_node(node, mi); 412 413 - high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1; 414 415 /* 416 * This doesn't seem to be used by the Linux memory manager any ··· 420 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 421 * the system, not the maximum PFN. 422 */ 423 - max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; 424 } 425 426 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
··· 120 printk("%d pages swap cached\n", cached); 121 } 122 123 + static void __init find_node_limits(int node, struct meminfo *mi, 124 + unsigned long *min, unsigned long *max_low, unsigned long *max_high) 125 + { 126 + int i; 127 + 128 + *min = -1UL; 129 + *max_low = *max_high = 0; 130 + 131 + for_each_nodebank(i, mi, node) { 132 + struct membank *bank = &mi->bank[i]; 133 + unsigned long start, end; 134 + 135 + start = bank_pfn_start(bank); 136 + end = bank_pfn_end(bank); 137 + 138 + if (*min > start) 139 + *min = start; 140 + if (*max_high < end) 141 + *max_high = end; 142 + if (bank->highmem) 143 + continue; 144 + if (*max_low < end) 145 + *max_low = end; 146 + } 147 + } 148 + 149 /* 150 * FIXME: We really want to avoid allocating the bootmap bitmap 151 * over the top of the initrd. Hopefully, this is located towards ··· 210 #endif 211 } 212 213 + static void __init bootmem_init_node(int node, struct meminfo *mi, 214 + unsigned long start_pfn, unsigned long end_pfn) 215 { 216 + unsigned long boot_pfn; 217 unsigned int boot_pages; 218 pg_data_t *pgdat; 219 int i; 220 221 /* 222 + * Map the memory banks for this node. 223 */ 224 for_each_nodebank(i, mi, node) { 225 struct membank *bank = &mi->bank[i]; 226 227 + if (!bank->highmem) 228 + map_memory_bank(bank); 229 } 230 231 /* 232 * Allocate the bootmem bitmap page. ··· 260 261 for_each_nodebank(i, mi, node) { 262 struct membank *bank = &mi->bank[i]; 263 + if (!bank->highmem) 264 + free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); 265 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); 266 } 267 ··· 269 */ 270 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, 271 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); 272 } 273 274 static void __init bootmem_reserve_initrd(int node) ··· 297 static void __init bootmem_free_node(int node, struct meminfo *mi) 298 { 299 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 300 + unsigned long min, max_low, max_high; 301 int i; 302 303 + find_node_limits(node, mi, &min, &max_low, &max_high); 304 305 /* 306 * initialise the zones within this node. 307 */ 308 memset(zone_size, 0, sizeof(zone_size)); 309 310 /* 311 * The size of this node has already been determined. If we need 312 * to do anything fancy with the allocation of this memory to the 313 * zones, now is the time to do it. 314 */ 315 + zone_size[0] = max_low - min; 316 + #ifdef CONFIG_HIGHMEM 317 + zone_size[ZONE_HIGHMEM] = max_high - max_low; 318 + #endif 319 320 /* 321 * For each bank in this node, calculate the size of the holes. 322 * holes = node_size - sum(bank_sizes_in_node) 323 */ 324 + memcpy(zhole_size, zone_size, sizeof(zhole_size)); 325 + for_each_nodebank(i, mi, node) { 326 + int idx = 0; 327 + #ifdef CONFIG_HIGHMEM 328 + if (mi->bank[i].highmem) 329 + idx = ZONE_HIGHMEM; 330 + #endif 331 + zhole_size[idx] -= bank_pfn_size(&mi->bank[i]); 332 + } 333 334 /* 335 * Adjust the sizes according to any special requirements for ··· 331 */ 332 arch_adjust_zones(node, zone_size, zhole_size); 333 334 + free_area_init_node(node, zone_size, min, zhole_size); 335 } 336 337 void __init bootmem_init(void) 338 { 339 struct meminfo *mi = &meminfo; 340 + unsigned long min, max_low, max_high; 341 int node, initrd_node; 342 343 /* ··· 345 */ 346 initrd_node = check_initrd(mi); 347 348 + max_low = max_high = 0; 349 + 350 /* 351 * Run through each node initialising the bootmem allocator. 352 */ 353 for_each_node(node) { 354 + unsigned long node_low, node_high; 355 + 356 + find_node_limits(node, mi, &min, &node_low, &node_high); 357 + 358 + if (node_low > max_low) 359 + max_low = node_low; 360 + if (node_high > max_high) 361 + max_high = node_high; 362 + 363 + /* 364 + * If there is no memory in this node, ignore it. 365 + * (We can't have nodes which have no lowmem) 366 + */ 367 + if (node_low == 0) 368 + continue; 369 + 370 + bootmem_init_node(node, mi, min, node_low); 371 372 /* 373 * Reserve any special node zero regions. ··· 362 */ 363 if (node == initrd_node) 364 bootmem_reserve_initrd(node); 365 } 366 367 /* ··· 383 for_each_node(node) 384 bootmem_free_node(node, mi); 385 386 + high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 387 388 /* 389 * This doesn't seem to be used by the Linux memory manager any ··· 393 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 394 * the system, not the maximum PFN. 395 */ 396 + max_low_pfn = max_low - PHYS_PFN_OFFSET; 397 + max_pfn = max_high - PHYS_PFN_OFFSET; 398 } 399 400 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
+8 -1
arch/arm/mm/mmu.c
··· 687 688 static void __init sanity_check_meminfo(void) 689 { 690 - int i, j; 691 692 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 693 struct membank *bank = &meminfo.bank[j]; 694 *bank = meminfo.bank[i]; 695 696 #ifdef CONFIG_HIGHMEM 697 /* 698 * Split those memory banks which are partially overlapping 699 * the vmalloc area greatly simplifying things later. ··· 720 i++; 721 bank[1].size -= VMALLOC_MIN - __va(bank->start); 722 bank[1].start = __pa(VMALLOC_MIN - 1) + 1; 723 j++; 724 } 725 bank->size = VMALLOC_MIN - __va(bank->start);
··· 687 688 static void __init sanity_check_meminfo(void) 689 { 690 + int i, j, highmem = 0; 691 692 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 693 struct membank *bank = &meminfo.bank[j]; 694 *bank = meminfo.bank[i]; 695 696 #ifdef CONFIG_HIGHMEM 697 + if (__va(bank->start) > VMALLOC_MIN || 698 + __va(bank->start) < (void *)PAGE_OFFSET) 699 + highmem = 1; 700 + 701 + bank->highmem = highmem; 702 + 703 /* 704 * Split those memory banks which are partially overlapping 705 * the vmalloc area greatly simplifying things later. ··· 714 i++; 715 bank[1].size -= VMALLOC_MIN - __va(bank->start); 716 bank[1].start = __pa(VMALLOC_MIN - 1) + 1; 717 + bank[1].highmem = highmem = 1; 718 j++; 719 } 720 bank->size = VMALLOC_MIN - __va(bank->start);
+4 -4
arch/arm/plat-omap/cpu-omap.c
··· 78 79 /* Ensure desired rate is within allowed range. Some govenors 80 * (ondemand) will just pass target_freq=0 to get the minimum. */ 81 - if (target_freq < policy->cpuinfo.min_freq) 82 - target_freq = policy->cpuinfo.min_freq; 83 - if (target_freq > policy->cpuinfo.max_freq) 84 - target_freq = policy->cpuinfo.max_freq; 85 86 freqs.old = omap_getspeed(0); 87 freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;
··· 78 79 /* Ensure desired rate is within allowed range. Some govenors 80 * (ondemand) will just pass target_freq=0 to get the minimum. */ 81 + if (target_freq < policy->min) 82 + target_freq = policy->min; 83 + if (target_freq > policy->max) 84 + target_freq = policy->max; 85 86 freqs.old = omap_getspeed(0); 87 freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;
+3 -1
arch/arm/plat-omap/dma.c
··· 946 947 cur_lch = next_lch; 948 } while (next_lch != -1); 949 - } else if (cpu_class_is_omap2()) { 950 /* Errata: Need to write lch even if not using chaining */ 951 dma_write(lch, CLNK_CTRL(lch)); 952 }
··· 946 947 cur_lch = next_lch; 948 } while (next_lch != -1); 949 + } else if (cpu_is_omap242x() || 950 + (cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) { 951 + 952 /* Errata: Need to write lch even if not using chaining */ 953 dma_write(lch, CLNK_CTRL(lch)); 954 }
+95 -32
arch/arm/plat-omap/gpio.c
··· 476 __raw_writel(l, reg); 477 } 478 479 - static int __omap_get_gpio_datain(int gpio) 480 { 481 - struct gpio_bank *bank; 482 void __iomem *reg; 483 484 if (check_gpio(gpio) < 0) 485 return -EINVAL; 486 - bank = get_gpio_bank(gpio); 487 reg = bank->base; 488 switch (bank->method) { 489 #ifdef CONFIG_ARCH_OMAP1 ··· 520 } 521 return (__raw_readl(reg) 522 & (1 << get_gpio_index(gpio))) != 0; 523 } 524 525 #define MOD_REG_BIT(reg, bit_mask, set) \ ··· 1234 struct gpio_bank *bank = get_irq_chip_data(irq); 1235 1236 _set_gpio_irqenable(bank, gpio, 0); 1237 } 1238 1239 static void gpio_unmask_irq(unsigned int irq) ··· 1242 unsigned int gpio = irq - IH_GPIO_BASE; 1243 struct gpio_bank *bank = get_irq_chip_data(irq); 1244 unsigned int irq_mask = 1 << get_gpio_index(gpio); 1245 1246 /* For level-triggered GPIOs, the clearing must be done after 1247 * the HW source is cleared, thus after the handler has run */ ··· 1401 return 0; 1402 } 1403 1404 static int gpio_get(struct gpio_chip *chip, unsigned offset) 1405 { 1406 - return __omap_get_gpio_datain(chip->base + offset); 1407 } 1408 1409 static int gpio_output(struct gpio_chip *chip, unsigned offset, int value) ··· 1976 1977 #include <linux/debugfs.h> 1978 #include <linux/seq_file.h> 1979 - 1980 - static int gpio_is_input(struct gpio_bank *bank, int mask) 1981 - { 1982 - void __iomem *reg = bank->base; 1983 - 1984 - switch (bank->method) { 1985 - case METHOD_MPUIO: 1986 - reg += OMAP_MPUIO_IO_CNTL; 1987 - break; 1988 - case METHOD_GPIO_1510: 1989 - reg += OMAP1510_GPIO_DIR_CONTROL; 1990 - break; 1991 - case METHOD_GPIO_1610: 1992 - reg += OMAP1610_GPIO_DIRECTION; 1993 - break; 1994 - case METHOD_GPIO_730: 1995 - reg += OMAP730_GPIO_DIR_CONTROL; 1996 - break; 1997 - case METHOD_GPIO_850: 1998 - reg += OMAP850_GPIO_DIR_CONTROL; 1999 - break; 2000 - case METHOD_GPIO_24XX: 2001 - reg += OMAP24XX_GPIO_OE; 2002 - break; 2003 - } 2004 - return __raw_readl(reg) & mask; 2005 - } 2006 - 2007 2008 static int dbg_gpio_show(struct seq_file *s, void *unused) 2009 {
··· 476 __raw_writel(l, reg); 477 } 478 479 + static int _get_gpio_datain(struct gpio_bank *bank, int gpio) 480 { 481 void __iomem *reg; 482 483 if (check_gpio(gpio) < 0) 484 return -EINVAL; 485 reg = bank->base; 486 switch (bank->method) { 487 #ifdef CONFIG_ARCH_OMAP1 ··· 522 } 523 return (__raw_readl(reg) 524 & (1 << get_gpio_index(gpio))) != 0; 525 + } 526 + 527 + static int _get_gpio_dataout(struct gpio_bank *bank, int gpio) 528 + { 529 + void __iomem *reg; 530 + 531 + if (check_gpio(gpio) < 0) 532 + return -EINVAL; 533 + reg = bank->base; 534 + 535 + switch (bank->method) { 536 + #ifdef CONFIG_ARCH_OMAP1 537 + case METHOD_MPUIO: 538 + reg += OMAP_MPUIO_OUTPUT; 539 + break; 540 + #endif 541 + #ifdef CONFIG_ARCH_OMAP15XX 542 + case METHOD_GPIO_1510: 543 + reg += OMAP1510_GPIO_DATA_OUTPUT; 544 + break; 545 + #endif 546 + #ifdef CONFIG_ARCH_OMAP16XX 547 + case METHOD_GPIO_1610: 548 + reg += OMAP1610_GPIO_DATAOUT; 549 + break; 550 + #endif 551 + #ifdef CONFIG_ARCH_OMAP730 552 + case METHOD_GPIO_730: 553 + reg += OMAP730_GPIO_DATA_OUTPUT; 554 + break; 555 + #endif 556 + #ifdef CONFIG_ARCH_OMAP850 557 + case METHOD_GPIO_850: 558 + reg += OMAP850_GPIO_DATA_OUTPUT; 559 + break; 560 + #endif 561 + #if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \ 562 + defined(CONFIG_ARCH_OMAP4) 563 + case METHOD_GPIO_24XX: 564 + reg += OMAP24XX_GPIO_DATAOUT; 565 + break; 566 + #endif 567 + default: 568 + return -EINVAL; 569 + } 570 + 571 + return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0; 572 } 573 574 #define MOD_REG_BIT(reg, bit_mask, set) \ ··· 1189 struct gpio_bank *bank = get_irq_chip_data(irq); 1190 1191 _set_gpio_irqenable(bank, gpio, 0); 1192 + _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE); 1193 } 1194 1195 static void gpio_unmask_irq(unsigned int irq) ··· 1196 unsigned int gpio = irq - IH_GPIO_BASE; 1197 struct gpio_bank *bank = get_irq_chip_data(irq); 1198 unsigned int irq_mask = 1 << get_gpio_index(gpio); 1199 + struct irq_desc *desc = irq_to_desc(irq); 1200 + u32 trigger = desc->status & IRQ_TYPE_SENSE_MASK; 1201 + 1202 + if (trigger) 1203 + _set_gpio_triggering(bank, get_gpio_index(gpio), trigger); 1204 1205 /* For level-triggered GPIOs, the clearing must be done after 1206 * the HW source is cleared, thus after the handler has run */ ··· 1350 return 0; 1351 } 1352 1353 + static int gpio_is_input(struct gpio_bank *bank, int mask) 1354 + { 1355 + void __iomem *reg = bank->base; 1356 + 1357 + switch (bank->method) { 1358 + case METHOD_MPUIO: 1359 + reg += OMAP_MPUIO_IO_CNTL; 1360 + break; 1361 + case METHOD_GPIO_1510: 1362 + reg += OMAP1510_GPIO_DIR_CONTROL; 1363 + break; 1364 + case METHOD_GPIO_1610: 1365 + reg += OMAP1610_GPIO_DIRECTION; 1366 + break; 1367 + case METHOD_GPIO_730: 1368 + reg += OMAP730_GPIO_DIR_CONTROL; 1369 + break; 1370 + case METHOD_GPIO_850: 1371 + reg += OMAP850_GPIO_DIR_CONTROL; 1372 + break; 1373 + case METHOD_GPIO_24XX: 1374 + reg += OMAP24XX_GPIO_OE; 1375 + break; 1376 + } 1377 + return __raw_readl(reg) & mask; 1378 + } 1379 + 1380 static int gpio_get(struct gpio_chip *chip, unsigned offset) 1381 { 1382 + struct gpio_bank *bank; 1383 + void __iomem *reg; 1384 + int gpio; 1385 + u32 mask; 1386 + 1387 + gpio = chip->base + offset; 1388 + bank = get_gpio_bank(gpio); 1389 + reg = bank->base; 1390 + mask = 1 << get_gpio_index(gpio); 1391 + 1392 + if (gpio_is_input(bank, mask)) 1393 + return _get_gpio_datain(bank, gpio); 1394 + else 1395 + return _get_gpio_dataout(bank, gpio); 1396 } 1397 1398 static int gpio_output(struct gpio_chip *chip, unsigned offset, int value) ··· 1885 1886 #include <linux/debugfs.h> 1887 #include <linux/seq_file.h> 1888 1889 static int dbg_gpio_show(struct seq_file *s, void *unused) 1890 {
+2
arch/arm/plat-omap/include/mach/clock.h
··· 20 struct clkops { 21 int (*enable)(struct clk *); 22 void (*disable)(struct clk *); 23 }; 24 25 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \
··· 20 struct clkops { 21 int (*enable)(struct clk *); 22 void (*disable)(struct clk *); 23 + void (*find_idlest)(struct clk *, void __iomem **, u8 *); 24 + void (*find_companion)(struct clk *, void __iomem **, u8 *); 25 }; 26 27 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \
-5
arch/arm/plat-omap/include/mach/cpu.h
··· 378 #define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \ 379 cpu_is_omap44xx()) 380 381 - #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \ 382 - defined(CONFIG_ARCH_OMAP4) 383 - 384 /* Various silicon revisions for omap2 */ 385 #define OMAP242X_CLASS 0x24200024 386 #define OMAP2420_REV_ES1_0 0x24200024 ··· 433 434 int omap_chip_is(struct omap_chip_id oci); 435 void omap2_check_revision(void); 436 - 437 - #endif /* defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) */
··· 378 #define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \ 379 cpu_is_omap44xx()) 380 381 /* Various silicon revisions for omap2 */ 382 #define OMAP242X_CLASS 0x24200024 383 #define OMAP2420_REV_ES1_0 0x24200024 ··· 436 437 int omap_chip_is(struct omap_chip_id oci); 438 void omap2_check_revision(void);
+2 -1
arch/arm/plat-omap/include/mach/io.h
··· 228 extern void omap1_init_common_hw(void); 229 230 extern void omap2_map_common_io(void); 231 - extern void omap2_init_common_hw(struct omap_sdrc_params *sp); 232 233 #define __arch_ioremap(p,s,t) omap_ioremap(p,s,t) 234 #define __arch_iounmap(v) omap_iounmap(v)
··· 228 extern void omap1_init_common_hw(void); 229 230 extern void omap2_map_common_io(void); 231 + extern void omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0, 232 + struct omap_sdrc_params *sdrc_cs1); 233 234 #define __arch_ioremap(p,s,t) omap_ioremap(p,s,t) 235 #define __arch_iounmap(v) omap_iounmap(v)
+4
arch/arm/plat-omap/include/mach/mux.h
··· 853 AE5_34XX_GPIO143, 854 H19_34XX_GPIO164_OUT, 855 J25_34XX_GPIO170, 856 }; 857 858 struct omap_mux_cfg {
··· 853 AE5_34XX_GPIO143, 854 H19_34XX_GPIO164_OUT, 855 J25_34XX_GPIO170, 856 + 857 + /* OMAP3 SDRC CKE signals to SDR/DDR ram chips */ 858 + H16_34XX_SDRC_CKE0, 859 + H17_34XX_SDRC_CKE1, 860 }; 861 862 struct omap_mux_cfg {
+1
arch/arm/plat-omap/include/mach/prcm.h
··· 25 26 u32 omap_prcm_get_reset_sources(void); 27 void omap_prcm_arch_reset(char mode); 28 29 #endif 30
··· 25 26 u32 omap_prcm_get_reset_sources(void); 27 void omap_prcm_arch_reset(char mode); 28 + int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, const char *name); 29 30 #endif 31
+9 -2
arch/arm/plat-omap/include/mach/sdrc.h
··· 30 #define SDRC_ACTIM_CTRL_A_0 0x09c 31 #define SDRC_ACTIM_CTRL_B_0 0x0a0 32 #define SDRC_RFR_CTRL_0 0x0a4 33 34 /* 35 * These values represent the number of memory clock cycles between ··· 106 u32 mr; 107 }; 108 109 - void __init omap2_sdrc_init(struct omap_sdrc_params *sp); 110 - struct omap_sdrc_params *omap2_sdrc_get_params(unsigned long r); 111 112 #ifdef CONFIG_ARCH_OMAP2 113
··· 30 #define SDRC_ACTIM_CTRL_A_0 0x09c 31 #define SDRC_ACTIM_CTRL_B_0 0x0a0 32 #define SDRC_RFR_CTRL_0 0x0a4 33 + #define SDRC_MR_1 0x0B4 34 + #define SDRC_ACTIM_CTRL_A_1 0x0C4 35 + #define SDRC_ACTIM_CTRL_B_1 0x0C8 36 + #define SDRC_RFR_CTRL_1 0x0D4 37 38 /* 39 * These values represent the number of memory clock cycles between ··· 102 u32 mr; 103 }; 104 105 + void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0, 106 + struct omap_sdrc_params *sdrc_cs1); 107 + int omap2_sdrc_get_params(unsigned long r, 108 + struct omap_sdrc_params **sdrc_cs0, 109 + struct omap_sdrc_params **sdrc_cs1); 110 111 #ifdef CONFIG_ARCH_OMAP2 112
+1
arch/arm/plat-omap/include/mach/serial.h
··· 59 extern void omap_uart_prepare_suspend(void); 60 extern void omap_uart_prepare_idle(int num); 61 extern void omap_uart_resume_idle(int num); 62 #endif 63 64 #endif
··· 59 extern void omap_uart_prepare_suspend(void); 60 extern void omap_uart_prepare_idle(int num); 61 extern void omap_uart_resume_idle(int num); 62 + extern void omap_uart_enable_irqs(int enable); 63 #endif 64 65 #endif
+12 -11
arch/arm/plat-omap/include/mach/sram.h
··· 21 u32 mem_type); 22 extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass); 23 24 - extern u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl, 25 - u32 sdrc_actim_ctrla, 26 - u32 sdrc_actim_ctrlb, u32 m2, 27 - u32 unlock_dll, u32 f, u32 sdrc_mr, 28 - u32 inc); 29 30 /* Do not use these */ 31 extern void omap1_sram_reprogram_clock(u32 ckctl, u32 dpllctl); ··· 60 u32 mem_type); 61 extern unsigned long omap243x_sram_reprogram_sdrc_sz; 62 63 - 64 - extern u32 omap3_sram_configure_core_dpll(u32 sdrc_rfr_ctrl, 65 - u32 sdrc_actim_ctrla, 66 - u32 sdrc_actim_ctrlb, u32 m2, 67 - u32 unlock_dll, u32 f, u32 sdrc_mr, 68 - u32 inc); 69 extern unsigned long omap3_sram_configure_core_dpll_sz; 70 71 #endif
··· 21 u32 mem_type); 22 extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass); 23 24 + extern u32 omap3_configure_core_dpll( 25 + u32 m2, u32 unlock_dll, u32 f, u32 inc, 26 + u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0, 27 + u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0, 28 + u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1, 29 + u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1); 30 31 /* Do not use these */ 32 extern void omap1_sram_reprogram_clock(u32 ckctl, u32 dpllctl); ··· 59 u32 mem_type); 60 extern unsigned long omap243x_sram_reprogram_sdrc_sz; 61 62 + extern u32 omap3_sram_configure_core_dpll( 63 + u32 m2, u32 unlock_dll, u32 f, u32 inc, 64 + u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0, 65 + u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0, 66 + u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1, 67 + u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1); 68 extern unsigned long omap3_sram_configure_core_dpll_sz; 69 70 #endif
+20 -14
arch/arm/plat-omap/sram.c
··· 44 #define OMAP2_SRAM_VA 0xe3000000 45 #define OMAP2_SRAM_PUB_VA (OMAP2_SRAM_VA + 0x800) 46 #define OMAP3_SRAM_PA 0x40200000 47 - #define OMAP3_SRAM_VA 0xd7000000 48 #define OMAP3_SRAM_PUB_PA 0x40208000 49 - #define OMAP3_SRAM_PUB_VA 0xd7008000 50 #define OMAP4_SRAM_PA 0x40200000 /*0x402f0000*/ 51 #define OMAP4_SRAM_VA 0xd7000000 /*0xd70f0000*/ 52 ··· 373 374 #ifdef CONFIG_ARCH_OMAP3 375 376 - static u32 (*_omap3_sram_configure_core_dpll)(u32 sdrc_rfr_ctrl, 377 - u32 sdrc_actim_ctrla, 378 - u32 sdrc_actim_ctrlb, 379 - u32 m2, u32 unlock_dll, 380 - u32 f, u32 sdrc_mr, u32 inc); 381 - u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl, u32 sdrc_actim_ctrla, 382 - u32 sdrc_actim_ctrlb, u32 m2, u32 unlock_dll, 383 - u32 f, u32 sdrc_mr, u32 inc) 384 { 385 BUG_ON(!_omap3_sram_configure_core_dpll); 386 - return _omap3_sram_configure_core_dpll(sdrc_rfr_ctrl, 387 - sdrc_actim_ctrla, 388 - sdrc_actim_ctrlb, m2, 389 - unlock_dll, f, sdrc_mr, inc); 390 } 391 392 /* REVISIT: Should this be same as omap34xx_sram_init() after off-idle? */
··· 44 #define OMAP2_SRAM_VA 0xe3000000 45 #define OMAP2_SRAM_PUB_VA (OMAP2_SRAM_VA + 0x800) 46 #define OMAP3_SRAM_PA 0x40200000 47 + #define OMAP3_SRAM_VA 0xe3000000 48 #define OMAP3_SRAM_PUB_PA 0x40208000 49 + #define OMAP3_SRAM_PUB_VA (OMAP3_SRAM_VA + 0x8000) 50 #define OMAP4_SRAM_PA 0x40200000 /*0x402f0000*/ 51 #define OMAP4_SRAM_VA 0xd7000000 /*0xd70f0000*/ 52 ··· 373 374 #ifdef CONFIG_ARCH_OMAP3 375 376 + static u32 (*_omap3_sram_configure_core_dpll)( 377 + u32 m2, u32 unlock_dll, u32 f, u32 inc, 378 + u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0, 379 + u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0, 380 + u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1, 381 + u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1); 382 + 383 + u32 omap3_configure_core_dpll(u32 m2, u32 unlock_dll, u32 f, u32 inc, 384 + u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0, 385 + u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0, 386 + u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1, 387 + u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1) 388 { 389 BUG_ON(!_omap3_sram_configure_core_dpll); 390 + return _omap3_sram_configure_core_dpll( 391 + m2, unlock_dll, f, inc, 392 + sdrc_rfr_ctrl_0, sdrc_actim_ctrl_a_0, 393 + sdrc_actim_ctrl_b_0, sdrc_mr_0, 394 + sdrc_rfr_ctrl_1, sdrc_actim_ctrl_a_1, 395 + sdrc_actim_ctrl_b_1, sdrc_mr_1); 396 } 397 398 /* REVISIT: Should this be same as omap34xx_sram_init() after off-idle? */
+1 -1
arch/arm/plat-s3c24xx/clock-dclk.c
··· 129 130 /* calculate the MISCCR setting for the clock */ 131 132 - if (parent == &clk_xtal) 133 source = S3C2410_MISCCR_CLK0_MPLL; 134 else if (parent == &clk_upll) 135 source = S3C2410_MISCCR_CLK0_UPLL;
··· 129 130 /* calculate the MISCCR setting for the clock */ 131 132 + if (parent == &clk_mpll) 133 source = S3C2410_MISCCR_CLK0_MPLL; 134 else if (parent == &clk_upll) 135 source = S3C2410_MISCCR_CLK0_UPLL;
+1 -1
drivers/serial/Kconfig
··· 527 528 config SERIAL_S3C6400 529 tristate "Samsung S3C6400/S3C6410 Serial port support" 530 - depends on SERIAL_SAMSUNG && (CPU_S3C600 || CPU_S3C6410) 531 default y 532 help 533 Serial port support for the Samsung S3C6400 and S3C6410
··· 527 528 config SERIAL_S3C6400 529 tristate "Samsung S3C6400/S3C6410 Serial port support" 530 + depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410) 531 default y 532 help 533 Serial port support for the Samsung S3C6400 and S3C6410