Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'pm' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc

power management changes for omap and imx

A significant part of the changes for these two platforms went into
power management, so they are split out into a separate branch.

* tag 'pm' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (65 commits)
ARM: imx6: remove __CPUINIT annotation from v7_invalidate_l1
ARM: imx6: fix v7_invalidate_l1 by adding I-Cache invalidation
ARM: imx6q: resume PL310 only when CACHE_L2X0 defined
ARM: imx6q: build pm code only when CONFIG_PM selected
ARM: mx5: use generic irq chip pm interface for pm functions on
ARM: omap: pass minimal SoC/board data for UART from dt
arm/dts: Add minimal device tree support for omap2420 and omap2430
omap-serial: Add minimal device tree support
omap-serial: Use default clock speed (48Mhz) if not specified
omap-serial: Get rid of all pdev->id usage
ARM: OMAP2+: hwmod: Add a new flag to handle hwmods left enabled at init
ARM: OMAP4: PRM: use PRCM interrupt handler
ARM: OMAP3: pm: use prcm chain handler
ARM: OMAP: hwmod: add support for selecting mpu_irq for each wakeup pad
ARM: OMAP2+: mux: add support for PAD wakeup interrupts
ARM: OMAP: PRCM: add suspend prepare / finish support
ARM: OMAP: PRCM: add support for chain interrupt handler
ARM: OMAP3/4: PRM: add functions to read pending IRQs, PRM barrier
ARM: OMAP2+: hwmod: Add API to enable IO ring wakeup
ARM: OMAP2+: mux: add wakeup-capable hwmod mux entries to dynamic list
...

+3876 -1185
+10
Documentation/devicetree/bindings/serial/omap_serial.txt
··· 1 + OMAP UART controller 2 + 3 + Required properties: 4 + - compatible : should be "ti,omap2-uart" for OMAP2 controllers 5 + - compatible : should be "ti,omap3-uart" for OMAP3 controllers 6 + - compatible : should be "ti,omap4-uart" for OMAP4 controllers 7 + - ti,hwmods : Must be "uart<n>", n being the instance number (1-based) 8 + 9 + Optional properties: 10 + - clock-frequency : frequency of the clock input to the UART
+67
arch/arm/boot/dts/omap2.dtsi
··· 1 + /* 2 + * Device Tree Source for OMAP2 SoC 3 + * 4 + * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ 5 + * 6 + * This file is licensed under the terms of the GNU General Public License 7 + * version 2. This program is licensed "as is" without any warranty of any 8 + * kind, whether express or implied. 9 + */ 10 + 11 + /include/ "skeleton.dtsi" 12 + 13 + / { 14 + compatible = "ti,omap2430", "ti,omap2420", "ti,omap2"; 15 + 16 + aliases { 17 + serial0 = &uart1; 18 + serial1 = &uart2; 19 + serial2 = &uart3; 20 + }; 21 + 22 + cpus { 23 + cpu@0 { 24 + compatible = "arm,arm1136jf-s"; 25 + }; 26 + }; 27 + 28 + soc { 29 + compatible = "ti,omap-infra"; 30 + mpu { 31 + compatible = "ti,omap2-mpu"; 32 + ti,hwmods = "mpu"; 33 + }; 34 + }; 35 + 36 + ocp { 37 + compatible = "simple-bus"; 38 + #address-cells = <1>; 39 + #size-cells = <1>; 40 + ranges; 41 + ti,hwmods = "l3_main"; 42 + 43 + intc: interrupt-controller@1 { 44 + compatible = "ti,omap2-intc"; 45 + interrupt-controller; 46 + #interrupt-cells = <1>; 47 + }; 48 + 49 + uart1: serial@4806a000 { 50 + compatible = "ti,omap2-uart"; 51 + ti,hwmods = "uart1"; 52 + clock-frequency = <48000000>; 53 + }; 54 + 55 + uart2: serial@4806c000 { 56 + compatible = "ti,omap2-uart"; 57 + ti,hwmods = "uart2"; 58 + clock-frequency = <48000000>; 59 + }; 60 + 61 + uart3: serial@4806e000 { 62 + compatible = "ti,omap2-uart"; 63 + ti,hwmods = "uart3"; 64 + clock-frequency = <48000000>; 65 + }; 66 + }; 67 + };
+31
arch/arm/boot/dts/omap3.dtsi
··· 13 13 / { 14 14 compatible = "ti,omap3430", "ti,omap3"; 15 15 16 + aliases { 17 + serial0 = &uart1; 18 + serial1 = &uart2; 19 + serial2 = &uart3; 20 + serial3 = &uart4; 21 + }; 22 + 16 23 cpus { 17 24 cpu@0 { 18 25 compatible = "arm,cortex-a8"; ··· 65 58 compatible = "ti,omap3-intc"; 66 59 interrupt-controller; 67 60 #interrupt-cells = <1>; 61 + }; 62 + 63 + uart1: serial@0x4806a000 { 64 + compatible = "ti,omap3-uart"; 65 + ti,hwmods = "uart1"; 66 + clock-frequency = <48000000>; 67 + }; 68 + 69 + uart2: serial@0x4806c000 { 70 + compatible = "ti,omap3-uart"; 71 + ti,hwmods = "uart2"; 72 + clock-frequency = <48000000>; 73 + }; 74 + 75 + uart3: serial@0x49020000 { 76 + compatible = "ti,omap3-uart"; 77 + ti,hwmods = "uart3"; 78 + clock-frequency = <48000000>; 79 + }; 80 + 81 + uart4: serial@0x49042000 { 82 + compatible = "ti,omap3-uart"; 83 + ti,hwmods = "uart4"; 84 + clock-frequency = <48000000>; 68 85 }; 69 86 }; 70 87 };
+28
arch/arm/boot/dts/omap4.dtsi
··· 21 21 interrupt-parent = <&gic>; 22 22 23 23 aliases { 24 + serial0 = &uart1; 25 + serial1 = &uart2; 26 + serial2 = &uart3; 27 + serial3 = &uart4; 24 28 }; 25 29 26 30 cpus { ··· 102 98 #interrupt-cells = <1>; 103 99 reg = <0x48241000 0x1000>, 104 100 <0x48240100 0x0100>; 101 + }; 102 + 103 + uart1: serial@0x4806a000 { 104 + compatible = "ti,omap4-uart"; 105 + ti,hwmods = "uart1"; 106 + clock-frequency = <48000000>; 107 + }; 108 + 109 + uart2: serial@0x4806c000 { 110 + compatible = "ti,omap4-uart"; 111 + ti,hwmods = "uart2"; 112 + clock-frequency = <48000000>; 113 + }; 114 + 115 + uart3: serial@0x48020000 { 116 + compatible = "ti,omap4-uart"; 117 + ti,hwmods = "uart3"; 118 + clock-frequency = <48000000>; 119 + }; 120 + 121 + uart4: serial@0x4806e000 { 122 + compatible = "ti,omap4-uart"; 123 + ti,hwmods = "uart4"; 124 + clock-frequency = <48000000>; 105 125 }; 106 126 }; 107 127 };
+1
arch/arm/mach-imx/Kconfig
··· 596 596 597 597 config SOC_IMX6Q 598 598 bool "i.MX6 Quad support" 599 + select ARM_CPU_SUSPEND if PM 599 600 select ARM_GIC 600 601 select CPU_V7 601 602 select HAVE_ARM_SCU
+5 -1
arch/arm/mach-imx/Makefile
··· 70 70 obj-$(CONFIG_SMP) += platsmp.o 71 71 obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 72 72 obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o 73 - obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o pm-imx6q.o 73 + obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o 74 + 75 + ifeq ($(CONFIG_PM),y) 76 + obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o 77 + endif
+12 -5
arch/arm/mach-imx/head-v7.S
··· 16 16 #include <asm/hardware/cache-l2x0.h> 17 17 18 18 .section ".text.head", "ax" 19 - __CPUINIT 20 19 21 20 /* 22 21 * The secondary kernel init calls v7_flush_dcache_all before it enables ··· 32 33 */ 33 34 ENTRY(v7_invalidate_l1) 34 35 mov r0, #0 36 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 35 37 mcr p15, 2, r0, c0, c0, 0 36 38 mrc p15, 1, r0, c0, c0, 0 37 39 ··· 71 71 ENDPROC(v7_secondary_startup) 72 72 #endif 73 73 74 + #ifdef CONFIG_PM 74 75 /* 75 76 * The following code is located into the .data section. This is to 76 77 * allow phys_l2x0_saved_regs to be accessed with a relative load ··· 80 79 .data 81 80 .align 82 81 82 + #ifdef CONFIG_CACHE_L2X0 83 83 .macro pl310_resume 84 84 ldr r2, phys_l2x0_saved_regs 85 85 ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0 ··· 90 88 str r1, [r0, #L2X0_CTRL] @ re-enable L2 91 89 .endm 92 90 91 + .globl phys_l2x0_saved_regs 92 + phys_l2x0_saved_regs: 93 + .long 0 94 + #else 95 + .macro pl310_resume 96 + .endm 97 + #endif 98 + 93 99 ENTRY(v7_cpu_resume) 94 100 bl v7_invalidate_l1 95 101 pl310_resume 96 102 b cpu_resume 97 103 ENDPROC(v7_cpu_resume) 98 - 99 - .globl phys_l2x0_saved_regs 100 - phys_l2x0_saved_regs: 101 - .long 0 104 + #endif
+2
arch/arm/mach-imx/pm-imx6q.c
··· 64 64 * address of the data structure used by l2x0 core to save registers, 65 65 * and later restore the necessary ones in imx6q resume entry. 66 66 */ 67 + #ifdef CONFIG_CACHE_L2X0 67 68 phys_l2x0_saved_regs = __pa(&l2x0_saved_regs); 69 + #endif 68 70 69 71 suspend_set_ops(&imx6q_pm_ops); 70 72 }
+18 -1
arch/arm/mach-mx5/mm.c
··· 13 13 14 14 #include <linux/mm.h> 15 15 #include <linux/init.h> 16 + #include <linux/clk.h> 16 17 17 18 #include <asm/mach/map.h> 18 19 ··· 22 21 #include <mach/devices-common.h> 23 22 #include <mach/iomux-v3.h> 24 23 24 + static struct clk *gpc_dvfs_clk; 25 + 25 26 static void imx5_idle(void) 26 27 { 27 - if (!need_resched()) 28 + if (!need_resched()) { 29 + /* gpc clock is needed for SRPG */ 30 + if (gpc_dvfs_clk == NULL) { 31 + gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs"); 32 + if (IS_ERR(gpc_dvfs_clk)) 33 + goto err0; 34 + } 35 + clk_enable(gpc_dvfs_clk); 28 36 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); 37 + if (tzic_enable_wake()) 38 + goto err1; 39 + cpu_do_idle(); 40 + err1: 41 + clk_disable(gpc_dvfs_clk); 42 + } 43 + err0: 29 44 local_irq_enable(); 30 45 } 31 46
-3
arch/arm/mach-mx5/system.c
··· 55 55 stop_mode = 1; 56 56 } 57 57 arm_srpgcr |= MXC_SRPGCR_PCR; 58 - 59 - if (tzic_enable_wake(1) != 0) 60 - return; 61 58 break; 62 59 case STOP_POWER_ON: 63 60 ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET;
+21
arch/arm/mach-omap2/Kconfig
··· 365 365 wish to say no. Selecting yes without understanding what is 366 366 going on could result in system crashes; 367 367 368 + config OMAP4_ERRATA_I688 369 + bool "OMAP4 errata: Async Bridge Corruption" 370 + depends on ARCH_OMAP4 371 + select ARCH_HAS_BARRIERS 372 + help 373 + If a data is stalled inside asynchronous bridge because of back 374 + pressure, it may be accepted multiple times, creating pointer 375 + misalignment that will corrupt next transfers on that data path 376 + until next reset of the system (No recovery procedure once the 377 + issue is hit, the path remains consistently broken). Async bridge 378 + can be found on path between MPU to EMIF and MPU to L3 interconnect. 379 + This situation can happen only when the idle is initiated by a 380 + Master Request Disconnection (which is trigged by software when 381 + executing WFI on CPU). 382 + The work-around for this errata needs all the initiators connected 383 + through async bridge must ensure that data path is properly drained 384 + before issuing WFI. This condition will be met if one Strongly ordered 385 + access is performed to the target right before executing the WFI. 386 + In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained. 387 + IO barrier ensure that there is no synchronisation loss on initiators 388 + operating on both interconnect port simultaneously. 368 389 endmenu 369 390 370 391 endif
+12 -7
arch/arm/mach-omap2/Makefile
··· 11 11 omap_hwmod_common_data.o 12 12 clock-common = clock.o clock_common_data.o \ 13 13 clkt_dpll.o clkt_clksel.o 14 + secure-common = omap-smc.o omap-secure.o 14 15 15 - obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) 16 - obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) 17 - obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) 16 + obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common) 17 + obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common) 18 + obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common) 18 19 19 20 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o 20 21 ··· 25 24 obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o 26 25 obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o 27 26 obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o 28 - obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o 27 + obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o \ 28 + sleep44xx.o 29 29 30 30 plus_sec := $(call as-instr,.arch_extension sec,+sec) 31 31 AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec) 32 - AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a$(plus_sec) 32 + AFLAGS_omap-smc.o :=-Wa,-march=armv7-a$(plus_sec) 33 + AFLAGS_sleep44xx.o :=-Wa,-march=armv7-a$(plus_sec) 33 34 34 35 # Functions loaded to SRAM 35 36 obj-$(CONFIG_SOC_OMAP2420) += sram242x.o ··· 65 62 obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o 66 63 obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ 67 64 cpuidle34xx.o 68 - obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o 65 + obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o \ 66 + cpuidle44xx.o 69 67 obj-$(CONFIG_PM_DEBUG) += pm-debug.o 70 68 obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o 71 69 obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o ··· 81 77 endif 82 78 83 79 # PRCM 80 + obj-y += prm_common.o 84 81 obj-$(CONFIG_ARCH_OMAP2) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o 85 82 obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \ 86 83 vc3xxx_data.o vp3xxx_data.o ··· 91 86 obj-$(CONFIG_ARCH_OMAP4) += prcm.o cm2xxx_3xxx.o cminst44xx.o \ 92 87 cm44xx.o prcm_mpu44xx.o \ 93 88 prminst44xx.o vc44xx_data.o \ 94 - vp44xx_data.o 89 + vp44xx_data.o prm44xx.o 95 90 96 91 # OMAP voltage domains 97 92 voltagedomain-common := voltage.o vc.o vp.o
+1 -99
arch/arm/mach-omap2/board-3430sdp.c
··· 475 475 static struct omap_board_mux board_mux[] __initdata = { 476 476 { .reg_offset = OMAP_MUX_TERMINATOR }, 477 477 }; 478 - 479 - static struct omap_device_pad serial1_pads[] __initdata = { 480 - /* 481 - * Note that off output enable is an active low 482 - * signal. So setting this means pin is a 483 - * input enabled in off mode 484 - */ 485 - OMAP_MUX_STATIC("uart1_cts.uart1_cts", 486 - OMAP_PIN_INPUT | 487 - OMAP_PIN_OFF_INPUT_PULLDOWN | 488 - OMAP_OFFOUT_EN | 489 - OMAP_MUX_MODE0), 490 - OMAP_MUX_STATIC("uart1_rts.uart1_rts", 491 - OMAP_PIN_OUTPUT | 492 - OMAP_OFF_EN | 493 - OMAP_MUX_MODE0), 494 - OMAP_MUX_STATIC("uart1_rx.uart1_rx", 495 - OMAP_PIN_INPUT | 496 - OMAP_PIN_OFF_INPUT_PULLDOWN | 497 - OMAP_OFFOUT_EN | 498 - OMAP_MUX_MODE0), 499 - OMAP_MUX_STATIC("uart1_tx.uart1_tx", 500 - OMAP_PIN_OUTPUT | 501 - OMAP_OFF_EN | 502 - OMAP_MUX_MODE0), 503 - }; 504 - 505 - static struct omap_device_pad serial2_pads[] __initdata = { 506 - OMAP_MUX_STATIC("uart2_cts.uart2_cts", 507 - OMAP_PIN_INPUT_PULLUP | 508 - OMAP_PIN_OFF_INPUT_PULLDOWN | 509 - OMAP_OFFOUT_EN | 510 - OMAP_MUX_MODE0), 511 - OMAP_MUX_STATIC("uart2_rts.uart2_rts", 512 - OMAP_PIN_OUTPUT | 513 - OMAP_OFF_EN | 514 - OMAP_MUX_MODE0), 515 - OMAP_MUX_STATIC("uart2_rx.uart2_rx", 516 - OMAP_PIN_INPUT | 517 - OMAP_PIN_OFF_INPUT_PULLDOWN | 518 - OMAP_OFFOUT_EN | 519 - OMAP_MUX_MODE0), 520 - OMAP_MUX_STATIC("uart2_tx.uart2_tx", 521 - OMAP_PIN_OUTPUT | 522 - OMAP_OFF_EN | 523 - OMAP_MUX_MODE0), 524 - }; 525 - 526 - static struct omap_device_pad serial3_pads[] __initdata = { 527 - OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx", 528 - OMAP_PIN_INPUT_PULLDOWN | 529 - OMAP_PIN_OFF_INPUT_PULLDOWN | 530 - OMAP_OFFOUT_EN | 531 - OMAP_MUX_MODE0), 532 - OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd", 533 - OMAP_PIN_OUTPUT | 534 - OMAP_OFF_EN | 535 - OMAP_MUX_MODE0), 536 - OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx", 537 - OMAP_PIN_INPUT | 538 - OMAP_PIN_OFF_INPUT_PULLDOWN | 539 - OMAP_OFFOUT_EN | 540 - OMAP_MUX_MODE0), 541 - OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx", 542 - OMAP_PIN_OUTPUT | 543 - OMAP_OFF_EN | 544 - OMAP_MUX_MODE0), 545 - }; 546 - 547 - static struct omap_board_data serial1_data __initdata = { 548 - .id = 0, 549 - .pads = serial1_pads, 550 - .pads_cnt = ARRAY_SIZE(serial1_pads), 551 - }; 552 - 553 - static struct omap_board_data serial2_data __initdata = { 554 - .id = 1, 555 - .pads = serial2_pads, 556 - .pads_cnt = ARRAY_SIZE(serial2_pads), 557 - }; 558 - 559 - static struct omap_board_data serial3_data __initdata = { 560 - .id = 2, 561 - .pads = serial3_pads, 562 - .pads_cnt = ARRAY_SIZE(serial3_pads), 563 - }; 564 - 565 - static inline void board_serial_init(void) 566 - { 567 - omap_serial_init_port(&serial1_data); 568 - omap_serial_init_port(&serial2_data); 569 - omap_serial_init_port(&serial3_data); 570 - } 571 478 #else 572 479 #define board_mux NULL 573 - 574 - static inline void board_serial_init(void) 575 - { 576 - omap_serial_init(); 577 - } 578 480 #endif 579 481 580 482 /* ··· 613 711 else 614 712 gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV1; 615 713 omap_ads7846_init(1, gpio_pendown, 310, NULL); 616 - board_serial_init(); 714 + omap_serial_init(); 617 715 omap_sdrc_init(hyb18m512160af6_sdrc_params, NULL); 618 716 usb_musb_init(NULL); 619 717 board_smc91x_init();
+1 -67
arch/arm/mach-omap2/board-4430sdp.c
··· 844 844 { .reg_offset = OMAP_MUX_TERMINATOR }, 845 845 }; 846 846 847 - static struct omap_device_pad serial2_pads[] __initdata = { 848 - OMAP_MUX_STATIC("uart2_cts.uart2_cts", 849 - OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0), 850 - OMAP_MUX_STATIC("uart2_rts.uart2_rts", 851 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 852 - OMAP_MUX_STATIC("uart2_rx.uart2_rx", 853 - OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0), 854 - OMAP_MUX_STATIC("uart2_tx.uart2_tx", 855 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 856 - }; 857 - 858 - static struct omap_device_pad serial3_pads[] __initdata = { 859 - OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx", 860 - OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0), 861 - OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd", 862 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 863 - OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx", 864 - OMAP_PIN_INPUT | OMAP_MUX_MODE0), 865 - OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx", 866 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 867 - }; 868 - 869 - static struct omap_device_pad serial4_pads[] __initdata = { 870 - OMAP_MUX_STATIC("uart4_rx.uart4_rx", 871 - OMAP_PIN_INPUT | OMAP_MUX_MODE0), 872 - OMAP_MUX_STATIC("uart4_tx.uart4_tx", 873 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 874 - }; 875 - 876 - static struct omap_board_data serial2_data __initdata = { 877 - .id = 1, 878 - .pads = serial2_pads, 879 - .pads_cnt = ARRAY_SIZE(serial2_pads), 880 - }; 881 - 882 - static struct omap_board_data serial3_data __initdata = { 883 - .id = 2, 884 - .pads = serial3_pads, 885 - .pads_cnt = ARRAY_SIZE(serial3_pads), 886 - }; 887 - 888 - static struct omap_board_data serial4_data __initdata = { 889 - .id = 3, 890 - .pads = serial4_pads, 891 - .pads_cnt = ARRAY_SIZE(serial4_pads), 892 - }; 893 - 894 - static inline void board_serial_init(void) 895 - { 896 - struct omap_board_data bdata; 897 - bdata.flags = 0; 898 - bdata.pads = NULL; 899 - bdata.pads_cnt = 0; 900 - bdata.id = 0; 901 - /* pass dummy data for UART1 */ 902 - omap_serial_init_port(&bdata); 903 - 904 - omap_serial_init_port(&serial2_data); 905 - omap_serial_init_port(&serial3_data); 906 - omap_serial_init_port(&serial4_data); 907 - } 908 847 #else 909 848 #define board_mux NULL 910 - 911 - static inline void board_serial_init(void) 912 - { 913 - omap_serial_init(); 914 - } 915 849 #endif 916 850 917 851 static void omap4_sdp4430_wifi_mux_init(void) ··· 895 961 omap4_i2c_init(); 896 962 omap_sfh7741prox_init(); 897 963 platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices)); 898 - board_serial_init(); 964 + omap_serial_init(); 899 965 omap_sdrc_init(NULL, NULL); 900 966 omap4_sdp4430_wifi_init(); 901 967 omap4_twl6030_hsmmc_init(mmc);
-1
arch/arm/mach-omap2/board-generic.c
··· 69 69 if (node) 70 70 irq_domain_add_simple(node, 0); 71 71 72 - omap_serial_init(); 73 72 omap_sdrc_init(NULL, NULL); 74 73 75 74 of_platform_populate(NULL, omap_dt_match_table, NULL, NULL);
+3 -3
arch/arm/mach-omap2/board-n8x0.c
··· 644 644 bdata.pads_cnt = 0; 645 645 646 646 bdata.id = 0; 647 - omap_serial_init_port(&bdata); 647 + omap_serial_init_port(&bdata, NULL); 648 648 649 649 bdata.id = 1; 650 - omap_serial_init_port(&bdata); 650 + omap_serial_init_port(&bdata, NULL); 651 651 652 652 bdata.id = 2; 653 653 bdata.pads = serial2_pads; 654 654 bdata.pads_cnt = ARRAY_SIZE(serial2_pads); 655 - omap_serial_init_port(&bdata); 655 + omap_serial_init_port(&bdata, NULL); 656 656 } 657 657 658 658 #else
+1 -67
arch/arm/mach-omap2/board-omap4panda.c
··· 364 364 { .reg_offset = OMAP_MUX_TERMINATOR }, 365 365 }; 366 366 367 - static struct omap_device_pad serial2_pads[] __initdata = { 368 - OMAP_MUX_STATIC("uart2_cts.uart2_cts", 369 - OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0), 370 - OMAP_MUX_STATIC("uart2_rts.uart2_rts", 371 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 372 - OMAP_MUX_STATIC("uart2_rx.uart2_rx", 373 - OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0), 374 - OMAP_MUX_STATIC("uart2_tx.uart2_tx", 375 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 376 - }; 377 - 378 - static struct omap_device_pad serial3_pads[] __initdata = { 379 - OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx", 380 - OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0), 381 - OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd", 382 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 383 - OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx", 384 - OMAP_PIN_INPUT | OMAP_MUX_MODE0), 385 - OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx", 386 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 387 - }; 388 - 389 - static struct omap_device_pad serial4_pads[] __initdata = { 390 - OMAP_MUX_STATIC("uart4_rx.uart4_rx", 391 - OMAP_PIN_INPUT | OMAP_MUX_MODE0), 392 - OMAP_MUX_STATIC("uart4_tx.uart4_tx", 393 - OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 394 - }; 395 - 396 - static struct omap_board_data serial2_data __initdata = { 397 - .id = 1, 398 - .pads = serial2_pads, 399 - .pads_cnt = ARRAY_SIZE(serial2_pads), 400 - }; 401 - 402 - static struct omap_board_data serial3_data __initdata = { 403 - .id = 2, 404 - .pads = serial3_pads, 405 - .pads_cnt = ARRAY_SIZE(serial3_pads), 406 - }; 407 - 408 - static struct omap_board_data serial4_data __initdata = { 409 - .id = 3, 410 - .pads = serial4_pads, 411 - .pads_cnt = ARRAY_SIZE(serial4_pads), 412 - }; 413 - 414 - static inline void board_serial_init(void) 415 - { 416 - struct omap_board_data bdata; 417 - bdata.flags = 0; 418 - bdata.pads = NULL; 419 - bdata.pads_cnt = 0; 420 - bdata.id = 0; 421 - /* pass dummy data for UART1 */ 422 - omap_serial_init_port(&bdata); 423 - 424 - omap_serial_init_port(&serial2_data); 425 - omap_serial_init_port(&serial3_data); 426 - omap_serial_init_port(&serial4_data); 427 - } 428 367 #else 429 368 #define board_mux NULL 430 - 431 - static inline void board_serial_init(void) 432 - { 433 - omap_serial_init(); 434 - } 435 369 #endif 436 370 437 371 /* Display DVI */ ··· 496 562 omap4_panda_i2c_init(); 497 563 platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); 498 564 platform_device_register(&omap_vwlan_device); 499 - board_serial_init(); 565 + omap_serial_init(); 500 566 omap_sdrc_init(NULL, NULL); 501 567 omap4_twl6030_hsmmc_init(mmc); 502 568 omap4_ehci_init();
+53 -11
arch/arm/mach-omap2/common.h
··· 24 24 25 25 #ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H 26 26 #define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H 27 + #ifndef __ASSEMBLER__ 27 28 28 29 #include <linux/delay.h> 29 30 #include <plat/common.h> 31 + #include <asm/proc-fns.h> 30 32 31 33 #ifdef CONFIG_SOC_OMAP2420 32 34 extern void omap242x_map_common_io(void); ··· 170 168 void omap2_intc_handle_irq(struct pt_regs *regs); 171 169 void omap3_intc_handle_irq(struct pt_regs *regs); 172 170 173 - /* 174 - * wfi used in low power code. Directly opcode is used instead 175 - * of instruction to avoid mulit-omap build break 176 - */ 177 - #ifdef CONFIG_THUMB2_KERNEL 178 - #define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory") 179 - #else 180 - #define do_wfi() \ 181 - __asm__ __volatile__ (".word 0xe320f003" : : : "memory") 171 + #ifdef CONFIG_CACHE_L2X0 172 + extern void __iomem *omap4_get_l2cache_base(void); 182 173 #endif 183 174 184 - #ifdef CONFIG_CACHE_L2X0 185 - extern void __iomem *l2cache_base; 175 + #ifdef CONFIG_SMP 176 + extern void __iomem *omap4_get_scu_base(void); 177 + #else 178 + static inline void __iomem *omap4_get_scu_base(void) 179 + { 180 + return NULL; 181 + } 186 182 #endif 187 183 188 184 extern void __init gic_init_irq(void); 189 185 extern void omap_smc1(u32 fn, u32 arg); 186 + extern void __iomem *omap4_get_sar_ram_base(void); 187 + extern void omap_do_wfi(void); 190 188 191 189 #ifdef CONFIG_SMP 192 190 /* Needed for secondary core boot */ ··· 196 194 extern u32 omap_read_auxcoreboot0(void); 197 195 #endif 198 196 197 + #if defined(CONFIG_SMP) && defined(CONFIG_PM) 198 + extern int omap4_mpuss_init(void); 199 + extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); 200 + extern int omap4_finish_suspend(unsigned long cpu_state); 201 + extern void omap4_cpu_resume(void); 202 + extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); 203 + extern u32 omap4_mpuss_read_prev_context_state(void); 204 + #else 205 + static inline int omap4_enter_lowpower(unsigned int cpu, 206 + unsigned int power_state) 207 + { 208 + cpu_do_idle(); 209 + return 0; 210 + } 211 + 212 + static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) 213 + { 214 + cpu_do_idle(); 215 + return 0; 216 + } 217 + 218 + static inline int omap4_mpuss_init(void) 219 + { 220 + return 0; 221 + } 222 + 223 + static inline int omap4_finish_suspend(unsigned long cpu_state) 224 + { 225 + return 0; 226 + } 227 + 228 + static inline void omap4_cpu_resume(void) 229 + {} 230 + 231 + static inline u32 omap4_mpuss_read_prev_context_state(void) 232 + { 233 + return 0; 234 + } 235 + #endif 236 + #endif /* __ASSEMBLER__ */ 199 237 #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
+15 -6
arch/arm/mach-omap2/cpuidle34xx.c
··· 25 25 #include <linux/sched.h> 26 26 #include <linux/cpuidle.h> 27 27 #include <linux/export.h> 28 + #include <linux/cpu_pm.h> 28 29 29 30 #include <plat/prcm.h> 30 31 #include <plat/irqs.h> 31 32 #include "powerdomain.h" 32 33 #include "clockdomain.h" 33 - #include <plat/serial.h> 34 34 35 35 #include "pm.h" 36 36 #include "control.h" ··· 124 124 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); 125 125 } 126 126 127 + /* 128 + * Call idle CPU PM enter notifier chain so that 129 + * VFP context is saved. 130 + */ 131 + if (mpu_state == PWRDM_POWER_OFF) 132 + cpu_pm_enter(); 133 + 127 134 /* Execute ARM wfi */ 128 135 omap_sram_idle(); 136 + 137 + /* 138 + * Call idle CPU PM enter notifier chain to restore 139 + * VFP context. 140 + */ 141 + if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) 142 + cpu_pm_exit(); 129 143 130 144 /* Re-allow idle for C1 */ 131 145 if (index == 0) { ··· 258 244 u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state; 259 245 struct omap3_idle_statedata *cx; 260 246 int ret; 261 - 262 - if (!omap3_can_sleep()) { 263 - new_state_idx = drv->safe_state_index; 264 - goto select_state; 265 - } 266 247 267 248 /* 268 249 * Prevent idle completely if CAM is active.
+245
arch/arm/mach-omap2/cpuidle44xx.c
··· 1 + /* 2 + * OMAP4 CPU idle Routines 3 + * 4 + * Copyright (C) 2011 Texas Instruments, Inc. 5 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 + * Rajendra Nayak <rnayak@ti.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/sched.h> 14 + #include <linux/cpuidle.h> 15 + #include <linux/cpu_pm.h> 16 + #include <linux/export.h> 17 + #include <linux/clockchips.h> 18 + 19 + #include <asm/proc-fns.h> 20 + 21 + #include "common.h" 22 + #include "pm.h" 23 + #include "prm.h" 24 + 25 + #ifdef CONFIG_CPU_IDLE 26 + 27 + /* Machine specific information to be recorded in the C-state driver_data */ 28 + struct omap4_idle_statedata { 29 + u32 cpu_state; 30 + u32 mpu_logic_state; 31 + u32 mpu_state; 32 + u8 valid; 33 + }; 34 + 35 + static struct cpuidle_params cpuidle_params_table[] = { 36 + /* C1 - CPU0 ON + CPU1 ON + MPU ON */ 37 + {.exit_latency = 2 + 2 , .target_residency = 5, .valid = 1}, 38 + /* C2- CPU0 OFF + CPU1 OFF + MPU CSWR */ 39 + {.exit_latency = 328 + 440 , .target_residency = 960, .valid = 1}, 40 + /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 41 + {.exit_latency = 460 + 518 , .target_residency = 1100, .valid = 1}, 42 + }; 43 + 44 + #define OMAP4_NUM_STATES ARRAY_SIZE(cpuidle_params_table) 45 + 46 + struct omap4_idle_statedata omap4_idle_data[OMAP4_NUM_STATES]; 47 + static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; 48 + 49 + /** 50 + * omap4_enter_idle - Programs OMAP4 to enter the specified state 51 + * @dev: cpuidle device 52 + * @drv: cpuidle driver 53 + * @index: the index of state to be entered 54 + * 55 + * Called from the CPUidle framework to program the device to the 56 + * specified low power state selected by the governor. 57 + * Returns the amount of time spent in the low power state. 58 + */ 59 + static int omap4_enter_idle(struct cpuidle_device *dev, 60 + struct cpuidle_driver *drv, 61 + int index) 62 + { 63 + struct omap4_idle_statedata *cx = 64 + cpuidle_get_statedata(&dev->states_usage[index]); 65 + struct timespec ts_preidle, ts_postidle, ts_idle; 66 + u32 cpu1_state; 67 + int idle_time; 68 + int new_state_idx; 69 + int cpu_id = smp_processor_id(); 70 + 71 + /* Used to keep track of the total time in idle */ 72 + getnstimeofday(&ts_preidle); 73 + 74 + local_irq_disable(); 75 + local_fiq_disable(); 76 + 77 + /* 78 + * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state. 79 + * This is necessary to honour hardware recommondation 80 + * of triggeing all the possible low power modes once CPU1 is 81 + * out of coherency and in OFF mode. 82 + * Update dev->last_state so that governor stats reflects right 83 + * data. 84 + */ 85 + cpu1_state = pwrdm_read_pwrst(cpu1_pd); 86 + if (cpu1_state != PWRDM_POWER_OFF) { 87 + new_state_idx = drv->safe_state_index; 88 + cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]); 89 + } 90 + 91 + if (index > 0) 92 + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); 93 + 94 + /* 95 + * Call idle CPU PM enter notifier chain so that 96 + * VFP and per CPU interrupt context is saved. 97 + */ 98 + if (cx->cpu_state == PWRDM_POWER_OFF) 99 + cpu_pm_enter(); 100 + 101 + pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); 102 + omap_set_pwrdm_state(mpu_pd, cx->mpu_state); 103 + 104 + /* 105 + * Call idle CPU cluster PM enter notifier chain 106 + * to save GIC and wakeupgen context. 107 + */ 108 + if ((cx->mpu_state == PWRDM_POWER_RET) && 109 + (cx->mpu_logic_state == PWRDM_POWER_OFF)) 110 + cpu_cluster_pm_enter(); 111 + 112 + omap4_enter_lowpower(dev->cpu, cx->cpu_state); 113 + 114 + /* 115 + * Call idle CPU PM exit notifier chain to restore 116 + * VFP and per CPU IRQ context. Only CPU0 state is 117 + * considered since CPU1 is managed by CPU hotplug. 118 + */ 119 + if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF) 120 + cpu_pm_exit(); 121 + 122 + /* 123 + * Call idle CPU cluster PM exit notifier chain 124 + * to restore GIC and wakeupgen context. 125 + */ 126 + if (omap4_mpuss_read_prev_context_state()) 127 + cpu_cluster_pm_exit(); 128 + 129 + if (index > 0) 130 + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); 131 + 132 + getnstimeofday(&ts_postidle); 133 + ts_idle = timespec_sub(ts_postidle, ts_preidle); 134 + 135 + local_irq_enable(); 136 + local_fiq_enable(); 137 + 138 + idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \ 139 + USEC_PER_SEC; 140 + 141 + /* Update cpuidle counters */ 142 + dev->last_residency = idle_time; 143 + 144 + return index; 145 + } 146 + 147 + DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev); 148 + 149 + struct cpuidle_driver omap4_idle_driver = { 150 + .name = "omap4_idle", 151 + .owner = THIS_MODULE, 152 + }; 153 + 154 + static inline void _fill_cstate(struct cpuidle_driver *drv, 155 + int idx, const char *descr) 156 + { 157 + struct cpuidle_state *state = &drv->states[idx]; 158 + 159 + state->exit_latency = cpuidle_params_table[idx].exit_latency; 160 + state->target_residency = cpuidle_params_table[idx].target_residency; 161 + state->flags = CPUIDLE_FLAG_TIME_VALID; 162 + state->enter = omap4_enter_idle; 163 + sprintf(state->name, "C%d", idx + 1); 164 + strncpy(state->desc, descr, CPUIDLE_DESC_LEN); 165 + } 166 + 167 + static inline struct omap4_idle_statedata *_fill_cstate_usage( 168 + struct cpuidle_device *dev, 169 + int idx) 170 + { 171 + struct omap4_idle_statedata *cx = &omap4_idle_data[idx]; 172 + struct cpuidle_state_usage *state_usage = &dev->states_usage[idx]; 173 + 174 + cx->valid = cpuidle_params_table[idx].valid; 175 + cpuidle_set_statedata(state_usage, cx); 176 + 177 + return cx; 178 + } 179 + 180 + 181 + 182 + /** 183 + * omap4_idle_init - Init routine for OMAP4 idle 184 + * 185 + * Registers the OMAP4 specific cpuidle driver to the cpuidle 186 + * framework with the valid set of states. 187 + */ 188 + int __init omap4_idle_init(void) 189 + { 190 + struct omap4_idle_statedata *cx; 191 + struct cpuidle_device *dev; 192 + struct cpuidle_driver *drv = &omap4_idle_driver; 193 + unsigned int cpu_id = 0; 194 + 195 + mpu_pd = pwrdm_lookup("mpu_pwrdm"); 196 + cpu0_pd = pwrdm_lookup("cpu0_pwrdm"); 197 + cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); 198 + if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd)) 199 + return -ENODEV; 200 + 201 + 202 + drv->safe_state_index = -1; 203 + dev = &per_cpu(omap4_idle_dev, cpu_id); 204 + dev->cpu = cpu_id; 205 + 206 + /* C1 - CPU0 ON + CPU1 ON + MPU ON */ 207 + _fill_cstate(drv, 0, "MPUSS ON"); 208 + drv->safe_state_index = 0; 209 + cx = _fill_cstate_usage(dev, 0); 210 + cx->valid = 1; /* C1 is always valid */ 211 + cx->cpu_state = PWRDM_POWER_ON; 212 + cx->mpu_state = PWRDM_POWER_ON; 213 + cx->mpu_logic_state = PWRDM_POWER_RET; 214 + 215 + /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ 216 + _fill_cstate(drv, 1, "MPUSS CSWR"); 217 + cx = _fill_cstate_usage(dev, 1); 218 + cx->cpu_state = PWRDM_POWER_OFF; 219 + cx->mpu_state = PWRDM_POWER_RET; 220 + cx->mpu_logic_state = PWRDM_POWER_RET; 221 + 222 + /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 223 + _fill_cstate(drv, 2, "MPUSS OSWR"); 224 + cx = _fill_cstate_usage(dev, 2); 225 + cx->cpu_state = PWRDM_POWER_OFF; 226 + cx->mpu_state = PWRDM_POWER_RET; 227 + cx->mpu_logic_state = PWRDM_POWER_OFF; 228 + 229 + drv->state_count = OMAP4_NUM_STATES; 230 + cpuidle_register_driver(&omap4_idle_driver); 231 + 232 + dev->state_count = OMAP4_NUM_STATES; 233 + if (cpuidle_register_device(dev)) { 234 + pr_err("%s: CPUidle register device failed\n", __func__); 235 + return -EIO; 236 + } 237 + 238 + return 0; 239 + } 240 + #else 241 + int __init omap4_idle_init(void) 242 + { 243 + return 0; 244 + } 245 + #endif /* CONFIG_CPU_IDLE */
+31
arch/arm/mach-omap2/include/mach/barriers.h
··· 1 + /* 2 + * OMAP memory barrier header. 3 + * 4 + * Copyright (C) 2011 Texas Instruments, Inc. 5 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 + * Richard Woodruff <r-woodruff2@ti.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 + */ 21 + 22 + #ifndef __MACH_BARRIERS_H 23 + #define __MACH_BARRIERS_H 24 + 25 + extern void omap_bus_sync(void); 26 + 27 + #define rmb() dsb() 28 + #define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0) 29 + #define mb() wmb() 30 + 31 + #endif /* __MACH_BARRIERS_H */
+57
arch/arm/mach-omap2/include/mach/omap-secure.h
··· 1 + /* 2 + * omap-secure.h: OMAP Secure infrastructure header. 3 + * 4 + * Copyright (C) 2011 Texas Instruments, Inc. 5 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + #ifndef OMAP_ARCH_OMAP_SECURE_H 12 + #define OMAP_ARCH_OMAP_SECURE_H 13 + 14 + /* Monitor error code */ 15 + #define API_HAL_RET_VALUE_NS2S_CONVERSION_ERROR 0xFFFFFFFE 16 + #define API_HAL_RET_VALUE_SERVICE_UNKNWON 0xFFFFFFFF 17 + 18 + /* HAL API error codes */ 19 + #define API_HAL_RET_VALUE_OK 0x00 20 + #define API_HAL_RET_VALUE_FAIL 0x01 21 + 22 + /* Secure HAL API flags */ 23 + #define FLAG_START_CRITICAL 0x4 24 + #define FLAG_IRQFIQ_MASK 0x3 25 + #define FLAG_IRQ_ENABLE 0x2 26 + #define FLAG_FIQ_ENABLE 0x1 27 + #define NO_FLAG 0x0 28 + 29 + /* Maximum Secure memory storage size */ 30 + #define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K) 31 + 32 + /* Secure low power HAL API index */ 33 + #define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a 34 + #define OMAP4_HAL_SAVEHW_INDEX 0x1b 35 + #define OMAP4_HAL_SAVEALL_INDEX 0x1c 36 + #define OMAP4_HAL_SAVEGIC_INDEX 0x1d 37 + 38 + /* Secure Monitor mode APIs */ 39 + #define OMAP4_MON_SCU_PWR_INDEX 0x108 40 + #define OMAP4_MON_L2X0_DBG_CTRL_INDEX 0x100 41 + #define OMAP4_MON_L2X0_CTRL_INDEX 0x102 42 + #define OMAP4_MON_L2X0_AUXCTRL_INDEX 0x109 43 + #define OMAP4_MON_L2X0_PREFETCH_INDEX 0x113 44 + 45 + /* Secure PPA(Primary Protected Application) APIs */ 46 + #define OMAP4_PPA_L2_POR_INDEX 0x23 47 + #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25 48 + 49 + #ifndef __ASSEMBLER__ 50 + 51 + extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, 52 + u32 arg1, u32 arg2, u32 arg3, u32 arg4); 53 + extern u32 omap_smc2(u32 id, u32 falg, u32 pargs); 54 + extern phys_addr_t omap_secure_ram_mempool_base(void); 55 + 56 + #endif /* __ASSEMBLER__ */ 57 + #endif /* OMAP_ARCH_OMAP_SECURE_H */
+39
arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
··· 1 + /* 2 + * OMAP WakeupGen header file 3 + * 4 + * Copyright (C) 2011 Texas Instruments, Inc. 5 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + #ifndef OMAP_ARCH_WAKEUPGEN_H 12 + #define OMAP_ARCH_WAKEUPGEN_H 13 + 14 + #define OMAP_WKG_CONTROL_0 0x00 15 + #define OMAP_WKG_ENB_A_0 0x10 16 + #define OMAP_WKG_ENB_B_0 0x14 17 + #define OMAP_WKG_ENB_C_0 0x18 18 + #define OMAP_WKG_ENB_D_0 0x1c 19 + #define OMAP_WKG_ENB_SECURE_A_0 0x20 20 + #define OMAP_WKG_ENB_SECURE_B_0 0x24 21 + #define OMAP_WKG_ENB_SECURE_C_0 0x28 22 + #define OMAP_WKG_ENB_SECURE_D_0 0x2c 23 + #define OMAP_WKG_ENB_A_1 0x410 24 + #define OMAP_WKG_ENB_B_1 0x414 25 + #define OMAP_WKG_ENB_C_1 0x418 26 + #define OMAP_WKG_ENB_D_1 0x41c 27 + #define OMAP_WKG_ENB_SECURE_A_1 0x420 28 + #define OMAP_WKG_ENB_SECURE_B_1 0x424 29 + #define OMAP_WKG_ENB_SECURE_C_1 0x428 30 + #define OMAP_WKG_ENB_SECURE_D_1 0x42c 31 + #define OMAP_AUX_CORE_BOOT_0 0x800 32 + #define OMAP_AUX_CORE_BOOT_1 0x804 33 + #define OMAP_PTMSYNCREQ_MASK 0xc00 34 + #define OMAP_PTMSYNCREQ_EN 0xc04 35 + #define OMAP_TIMESTAMPCYCLELO 0xc08 36 + #define OMAP_TIMESTAMPCYCLEHI 0xc0c 37 + 38 + extern int __init omap_wakeupgen_init(void); 39 + #endif
+9
arch/arm/mach-omap2/io.c
··· 254 254 .length = L4_EMU_44XX_SIZE, 255 255 .type = MT_DEVICE, 256 256 }, 257 + #ifdef CONFIG_OMAP4_ERRATA_I688 258 + { 259 + .virtual = OMAP4_SRAM_VA, 260 + .pfn = __phys_to_pfn(OMAP4_SRAM_PA), 261 + .length = PAGE_SIZE, 262 + .type = MT_MEMORY_SO, 263 + }, 264 + #endif 265 + 257 266 }; 258 267 #endif 259 268
+87 -2
arch/arm/mach-omap2/mux.c
··· 32 32 #include <linux/debugfs.h> 33 33 #include <linux/seq_file.h> 34 34 #include <linux/uaccess.h> 35 + #include <linux/irq.h> 36 + #include <linux/interrupt.h> 35 37 36 38 #include <asm/system.h> 37 39 ··· 41 39 42 40 #include "control.h" 43 41 #include "mux.h" 42 + #include "prm.h" 44 43 45 44 #define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */ 46 45 #define OMAP_MUX_BASE_SZ 0x5ca ··· 309 306 pad->idle = bpad->idle; 310 307 pad->off = bpad->off; 311 308 312 - if (pad->flags & OMAP_DEVICE_PAD_REMUX) 309 + if (pad->flags & 310 + (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP)) 313 311 nr_pads_dynamic++; 314 312 315 313 pr_debug("%s: Initialized %s\n", __func__, pad->name); ··· 335 331 for (i = 0; i < hmux->nr_pads; i++) { 336 332 struct omap_device_pad *pad = &hmux->pads[i]; 337 333 338 - if (pad->flags & OMAP_DEVICE_PAD_REMUX) { 334 + if (pad->flags & 335 + (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP)) { 339 336 pr_debug("%s: pad %s tagged dynamic\n", 340 337 __func__, pad->name); 341 338 hmux->pads_dynamic[nr_pads_dynamic] = pad; ··· 354 349 pr_err("%s: Could not allocate device mux entry\n", __func__); 355 350 356 351 return NULL; 352 + } 353 + 354 + /** 355 + * omap_hwmod_mux_scan_wakeups - omap hwmod scan wakeup pads 356 + * @hmux: Pads for a hwmod 357 + * @mpu_irqs: MPU irq array for a hwmod 358 + * 359 + * Scans the wakeup status of pads for a single hwmod. If an irq 360 + * array is defined for this mux, the parser will call the registered 361 + * ISRs for corresponding pads, otherwise the parser will stop at the 362 + * first wakeup active pad and return. Returns true if there is a 363 + * pending and non-served wakeup event for the mux, otherwise false. 364 + */ 365 + static bool omap_hwmod_mux_scan_wakeups(struct omap_hwmod_mux_info *hmux, 366 + struct omap_hwmod_irq_info *mpu_irqs) 367 + { 368 + int i, irq; 369 + unsigned int val; 370 + u32 handled_irqs = 0; 371 + 372 + for (i = 0; i < hmux->nr_pads_dynamic; i++) { 373 + struct omap_device_pad *pad = hmux->pads_dynamic[i]; 374 + 375 + if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP) || 376 + !(pad->idle & OMAP_WAKEUP_EN)) 377 + continue; 378 + 379 + val = omap_mux_read(pad->partition, pad->mux->reg_offset); 380 + if (!(val & OMAP_WAKEUP_EVENT)) 381 + continue; 382 + 383 + if (!hmux->irqs) 384 + return true; 385 + 386 + irq = hmux->irqs[i]; 387 + /* make sure we only handle each irq once */ 388 + if (handled_irqs & 1 << irq) 389 + continue; 390 + 391 + handled_irqs |= 1 << irq; 392 + 393 + generic_handle_irq(mpu_irqs[irq].irq); 394 + } 395 + 396 + return false; 397 + } 398 + 399 + /** 400 + * _omap_hwmod_mux_handle_irq - Process wakeup events for a single hwmod 401 + * 402 + * Checks a single hwmod for every wakeup capable pad to see if there is an 403 + * active wakeup event. If this is the case, call the corresponding ISR. 404 + */ 405 + static int _omap_hwmod_mux_handle_irq(struct omap_hwmod *oh, void *data) 406 + { 407 + if (!oh->mux || !oh->mux->enabled) 408 + return 0; 409 + if (omap_hwmod_mux_scan_wakeups(oh->mux, oh->mpu_irqs)) 410 + generic_handle_irq(oh->mpu_irqs[0].irq); 411 + return 0; 412 + } 413 + 414 + /** 415 + * omap_hwmod_mux_handle_irq - Process pad wakeup irqs. 416 + * 417 + * Calls a function for each registered omap_hwmod to check 418 + * pad wakeup statuses. 419 + */ 420 + static irqreturn_t omap_hwmod_mux_handle_irq(int irq, void *unused) 421 + { 422 + omap_hwmod_for_each(_omap_hwmod_mux_handle_irq, NULL); 423 + return IRQ_HANDLED; 357 424 } 358 425 359 426 /* Assumes the calling function takes care of locking */ ··· 792 715 static int __init omap_mux_late_init(void) 793 716 { 794 717 struct omap_mux_partition *partition; 718 + int ret; 795 719 796 720 list_for_each_entry(partition, &mux_partitions, node) { 797 721 struct omap_mux_entry *e, *tmp; ··· 812 734 #endif 813 735 } 814 736 } 737 + 738 + ret = request_irq(omap_prcm_event_to_irq("io"), 739 + omap_hwmod_mux_handle_irq, IRQF_SHARED | IRQF_NO_SUSPEND, 740 + "hwmod_io", omap_mux_late_init); 741 + 742 + if (ret) 743 + pr_warning("mux: Failed to setup hwmod io irq %d\n", ret); 815 744 816 745 omap_mux_dbg_init(); 817 746
-5
arch/arm/mach-omap2/omap-headsmp.S
··· 18 18 #include <linux/linkage.h> 19 19 #include <linux/init.h> 20 20 21 - /* Physical address needed since MMU not enabled yet on secondary core */ 22 - #define OMAP4_AUX_CORE_BOOT1_PA 0x48281804 23 - 24 - __INIT 25 - 26 21 /* 27 22 * OMAP4 specific entry point for secondary CPU to jump from ROM 28 23 * code. This routine also provides a holding flag into which
+9 -5
arch/arm/mach-omap2/omap-hotplug.c
··· 22 22 23 23 #include "common.h" 24 24 25 + #include "powerdomain.h" 26 + 25 27 int platform_cpu_kill(unsigned int cpu) 26 28 { 27 29 return 1; ··· 35 33 */ 36 34 void platform_cpu_die(unsigned int cpu) 37 35 { 36 + unsigned int this_cpu; 37 + 38 38 flush_cache_all(); 39 39 dsb(); 40 40 ··· 44 40 * we're ready for shutdown now, so do it 45 41 */ 46 42 if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0) 47 - printk(KERN_CRIT "Secure clear status failed\n"); 43 + pr_err("Secure clear status failed\n"); 48 44 49 45 for (;;) { 50 46 /* 51 - * Execute WFI 47 + * Enter into low power state 52 48 */ 53 - do_wfi(); 54 - 55 - if (omap_read_auxcoreboot0() == cpu) { 49 + omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); 50 + this_cpu = smp_processor_id(); 51 + if (omap_read_auxcoreboot0() == this_cpu) { 56 52 /* 57 53 * OK, proper wakeup, we're done 58 54 */
+398
arch/arm/mach-omap2/omap-mpuss-lowpower.c
··· 1 + /* 2 + * OMAP MPUSS low power code 3 + * 4 + * Copyright (C) 2011 Texas Instruments, Inc. 5 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 + * 7 + * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU 8 + * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller, 9 + * CPU0 and CPU1 LPRM modules. 10 + * CPU0, CPU1 and MPUSS each have there own power domain and 11 + * hence multiple low power combinations of MPUSS are possible. 12 + * 13 + * The CPU0 and CPU1 can't support Closed switch Retention (CSWR) 14 + * because the mode is not supported by hw constraints of dormant 15 + * mode. While waking up from the dormant mode, a reset signal 16 + * to the Cortex-A9 processor must be asserted by the external 17 + * power controller. 18 + * 19 + * With architectural inputs and hardware recommendations, only 20 + * below modes are supported from power gain vs latency point of view. 21 + * 22 + * CPU0 CPU1 MPUSS 23 + * ---------------------------------------------- 24 + * ON ON ON 25 + * ON(Inactive) OFF ON(Inactive) 26 + * OFF OFF CSWR 27 + * OFF OFF OSWR 28 + * OFF OFF OFF(Device OFF *TBD) 29 + * ---------------------------------------------- 30 + * 31 + * Note: CPU0 is the master core and it is the last CPU to go down 32 + * and first to wake-up when MPUSS low power states are excercised 33 + * 34 + * 35 + * This program is free software; you can redistribute it and/or modify 36 + * it under the terms of the GNU General Public License version 2 as 37 + * published by the Free Software Foundation. 38 + */ 39 + 40 + #include <linux/kernel.h> 41 + #include <linux/io.h> 42 + #include <linux/errno.h> 43 + #include <linux/linkage.h> 44 + #include <linux/smp.h> 45 + 46 + #include <asm/cacheflush.h> 47 + #include <asm/tlbflush.h> 48 + #include <asm/smp_scu.h> 49 + #include <asm/system.h> 50 + #include <asm/pgalloc.h> 51 + #include <asm/suspend.h> 52 + #include <asm/hardware/cache-l2x0.h> 53 + 54 + #include <plat/omap44xx.h> 55 + 56 + #include "common.h" 57 + #include "omap4-sar-layout.h" 58 + #include "pm.h" 59 + #include "prcm_mpu44xx.h" 60 + #include "prminst44xx.h" 61 + #include "prcm44xx.h" 62 + #include "prm44xx.h" 63 + #include "prm-regbits-44xx.h" 64 + 65 + #ifdef CONFIG_SMP 66 + 67 + struct omap4_cpu_pm_info { 68 + struct powerdomain *pwrdm; 69 + void __iomem *scu_sar_addr; 70 + void __iomem *wkup_sar_addr; 71 + void __iomem *l2x0_sar_addr; 72 + }; 73 + 74 + static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); 75 + static struct powerdomain *mpuss_pd; 76 + static void __iomem *sar_base; 77 + 78 + /* 79 + * Program the wakeup routine address for the CPU0 and CPU1 80 + * used for OFF or DORMANT wakeup. 81 + */ 82 + static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr) 83 + { 84 + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 85 + 86 + __raw_writel(addr, pm_info->wkup_sar_addr); 87 + } 88 + 89 + /* 90 + * Set the CPUx powerdomain's previous power state 91 + */ 92 + static inline void set_cpu_next_pwrst(unsigned int cpu_id, 93 + unsigned int power_state) 94 + { 95 + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 96 + 97 + pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); 98 + } 99 + 100 + /* 101 + * Read CPU's previous power state 102 + */ 103 + static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id) 104 + { 105 + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 106 + 107 + return pwrdm_read_prev_pwrst(pm_info->pwrdm); 108 + } 109 + 110 + /* 111 + * Clear the CPUx powerdomain's previous power state 112 + */ 113 + static inline void clear_cpu_prev_pwrst(unsigned int cpu_id) 114 + { 115 + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 116 + 117 + pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 118 + } 119 + 120 + /* 121 + * Store the SCU power status value to scratchpad memory 122 + */ 123 + static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) 124 + { 125 + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 126 + u32 scu_pwr_st; 127 + 128 + switch (cpu_state) { 129 + case PWRDM_POWER_RET: 130 + scu_pwr_st = SCU_PM_DORMANT; 131 + break; 132 + case PWRDM_POWER_OFF: 133 + scu_pwr_st = SCU_PM_POWEROFF; 134 + break; 135 + case PWRDM_POWER_ON: 136 + case PWRDM_POWER_INACTIVE: 137 + default: 138 + scu_pwr_st = SCU_PM_NORMAL; 139 + break; 140 + } 141 + 142 + __raw_writel(scu_pwr_st, pm_info->scu_sar_addr); 143 + } 144 + 145 + /* Helper functions for MPUSS OSWR */ 146 + static inline void mpuss_clear_prev_logic_pwrst(void) 147 + { 148 + u32 reg; 149 + 150 + reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, 151 + OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); 152 + omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION, 153 + OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); 154 + } 155 + 156 + static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) 157 + { 158 + u32 reg; 159 + 160 + if (cpu_id) { 161 + reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST, 162 + OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET); 163 + omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST, 164 + OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET); 165 + } else { 166 + reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST, 167 + OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET); 168 + omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST, 169 + OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET); 170 + } 171 + } 172 + 173 + /** 174 + * omap4_mpuss_read_prev_context_state: 175 + * Function returns the MPUSS previous context state 176 + */ 177 + u32 omap4_mpuss_read_prev_context_state(void) 178 + { 179 + u32 reg; 180 + 181 + reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, 182 + OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); 183 + reg &= OMAP4430_LOSTCONTEXT_DFF_MASK; 184 + return reg; 185 + } 186 + 187 + /* 188 + * Store the CPU cluster state for L2X0 low power operations. 189 + */ 190 + static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state) 191 + { 192 + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); 193 + 194 + __raw_writel(save_state, pm_info->l2x0_sar_addr); 195 + } 196 + 197 + /* 198 + * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to 199 + * in every restore MPUSS OFF path. 200 + */ 201 + #ifdef CONFIG_CACHE_L2X0 202 + static void save_l2x0_context(void) 203 + { 204 + u32 val; 205 + void __iomem *l2x0_base = omap4_get_l2cache_base(); 206 + 207 + val = __raw_readl(l2x0_base + L2X0_AUX_CTRL); 208 + __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET); 209 + val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL); 210 + __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET); 211 + } 212 + #else 213 + static void save_l2x0_context(void) 214 + {} 215 + #endif 216 + 217 + /** 218 + * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function 219 + * The purpose of this function is to manage low power programming 220 + * of OMAP4 MPUSS subsystem 221 + * @cpu : CPU ID 222 + * @power_state: Low power state. 223 + * 224 + * MPUSS states for the context save: 225 + * save_state = 226 + * 0 - Nothing lost and no need to save: MPUSS INACTIVE 227 + * 1 - CPUx L1 and logic lost: MPUSS CSWR 228 + * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR 229 + * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF 230 + */ 231 + int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) 232 + { 233 + unsigned int save_state = 0; 234 + unsigned int wakeup_cpu; 235 + 236 + if (omap_rev() == OMAP4430_REV_ES1_0) 237 + return -ENXIO; 238 + 239 + switch (power_state) { 240 + case PWRDM_POWER_ON: 241 + case PWRDM_POWER_INACTIVE: 242 + save_state = 0; 243 + break; 244 + case PWRDM_POWER_OFF: 245 + save_state = 1; 246 + break; 247 + case PWRDM_POWER_RET: 248 + default: 249 + /* 250 + * CPUx CSWR is invalid hardware state. Also CPUx OSWR 251 + * doesn't make much scense, since logic is lost and $L1 252 + * needs to be cleaned because of coherency. This makes 253 + * CPUx OSWR equivalent to CPUX OFF and hence not supported 254 + */ 255 + WARN_ON(1); 256 + return -ENXIO; 257 + } 258 + 259 + pwrdm_pre_transition(); 260 + 261 + /* 262 + * Check MPUSS next state and save interrupt controller if needed. 263 + * In MPUSS OSWR or device OFF, interrupt controller contest is lost. 264 + */ 265 + mpuss_clear_prev_logic_pwrst(); 266 + pwrdm_clear_all_prev_pwrst(mpuss_pd); 267 + if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) && 268 + (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF)) 269 + save_state = 2; 270 + 271 + clear_cpu_prev_pwrst(cpu); 272 + cpu_clear_prev_logic_pwrst(cpu); 273 + set_cpu_next_pwrst(cpu, power_state); 274 + set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume)); 275 + scu_pwrst_prepare(cpu, power_state); 276 + l2x0_pwrst_prepare(cpu, save_state); 277 + 278 + /* 279 + * Call low level function with targeted low power state. 280 + */ 281 + cpu_suspend(save_state, omap4_finish_suspend); 282 + 283 + /* 284 + * Restore the CPUx power state to ON otherwise CPUx 285 + * power domain can transitions to programmed low power 286 + * state while doing WFI outside the low powe code. On 287 + * secure devices, CPUx does WFI which can result in 288 + * domain transition 289 + */ 290 + wakeup_cpu = smp_processor_id(); 291 + set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON); 292 + 293 + pwrdm_post_transition(); 294 + 295 + return 0; 296 + } 297 + 298 + /** 299 + * omap4_hotplug_cpu: OMAP4 CPU hotplug entry 300 + * @cpu : CPU ID 301 + * @power_state: CPU low power state. 302 + */ 303 + int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) 304 + { 305 + unsigned int cpu_state = 0; 306 + 307 + if (omap_rev() == OMAP4430_REV_ES1_0) 308 + return -ENXIO; 309 + 310 + if (power_state == PWRDM_POWER_OFF) 311 + cpu_state = 1; 312 + 313 + clear_cpu_prev_pwrst(cpu); 314 + set_cpu_next_pwrst(cpu, power_state); 315 + set_cpu_wakeup_addr(cpu, virt_to_phys(omap_secondary_startup)); 316 + scu_pwrst_prepare(cpu, power_state); 317 + 318 + /* 319 + * CPU never retuns back if targetted power state is OFF mode. 320 + * CPU ONLINE follows normal CPU ONLINE ptah via 321 + * omap_secondary_startup(). 322 + */ 323 + omap4_finish_suspend(cpu_state); 324 + 325 + set_cpu_next_pwrst(cpu, PWRDM_POWER_ON); 326 + return 0; 327 + } 328 + 329 + 330 + /* 331 + * Initialise OMAP4 MPUSS 332 + */ 333 + int __init omap4_mpuss_init(void) 334 + { 335 + struct omap4_cpu_pm_info *pm_info; 336 + 337 + if (omap_rev() == OMAP4430_REV_ES1_0) { 338 + WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); 339 + return -ENODEV; 340 + } 341 + 342 + sar_base = omap4_get_sar_ram_base(); 343 + 344 + /* Initilaise per CPU PM information */ 345 + pm_info = &per_cpu(omap4_pm_info, 0x0); 346 + pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; 347 + pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET; 348 + pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; 349 + pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); 350 + if (!pm_info->pwrdm) { 351 + pr_err("Lookup failed for CPU0 pwrdm\n"); 352 + return -ENODEV; 353 + } 354 + 355 + /* Clear CPU previous power domain state */ 356 + pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 357 + cpu_clear_prev_logic_pwrst(0); 358 + 359 + /* Initialise CPU0 power domain state to ON */ 360 + pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 361 + 362 + pm_info = &per_cpu(omap4_pm_info, 0x1); 363 + pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; 364 + pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; 365 + pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; 366 + pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); 367 + if (!pm_info->pwrdm) { 368 + pr_err("Lookup failed for CPU1 pwrdm\n"); 369 + return -ENODEV; 370 + } 371 + 372 + /* Clear CPU previous power domain state */ 373 + pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 374 + cpu_clear_prev_logic_pwrst(1); 375 + 376 + /* Initialise CPU1 power domain state to ON */ 377 + pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); 378 + 379 + mpuss_pd = pwrdm_lookup("mpu_pwrdm"); 380 + if (!mpuss_pd) { 381 + pr_err("Failed to lookup MPUSS power domain\n"); 382 + return -ENODEV; 383 + } 384 + pwrdm_clear_all_prev_pwrst(mpuss_pd); 385 + mpuss_clear_prev_logic_pwrst(); 386 + 387 + /* Save device type on scratchpad for low level code to use */ 388 + if (omap_type() != OMAP2_DEVICE_TYPE_GP) 389 + __raw_writel(1, sar_base + OMAP_TYPE_OFFSET); 390 + else 391 + __raw_writel(0, sar_base + OMAP_TYPE_OFFSET); 392 + 393 + save_l2x0_context(); 394 + 395 + return 0; 396 + } 397 + 398 + #endif
+81
arch/arm/mach-omap2/omap-secure.c
··· 1 + /* 2 + * OMAP Secure API infrastructure. 3 + * 4 + * Copyright (C) 2011 Texas Instruments, Inc. 5 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 + * 7 + * 8 + * This program is free software,you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/kernel.h> 14 + #include <linux/init.h> 15 + #include <linux/io.h> 16 + #include <linux/memblock.h> 17 + 18 + #include <asm/cacheflush.h> 19 + 20 + #include <mach/omap-secure.h> 21 + 22 + static phys_addr_t omap_secure_memblock_base; 23 + 24 + /** 25 + * omap_sec_dispatcher: Routine to dispatch low power secure 26 + * service routines 27 + * @idx: The HAL API index 28 + * @flag: The flag indicating criticality of operation 29 + * @nargs: Number of valid arguments out of four. 30 + * @arg1, arg2, arg3 args4: Parameters passed to secure API 31 + * 32 + * Return the non-zero error value on failure. 33 + */ 34 + u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2, 35 + u32 arg3, u32 arg4) 36 + { 37 + u32 ret; 38 + u32 param[5]; 39 + 40 + param[0] = nargs; 41 + param[1] = arg1; 42 + param[2] = arg2; 43 + param[3] = arg3; 44 + param[4] = arg4; 45 + 46 + /* 47 + * Secure API needs physical address 48 + * pointer for the parameters 49 + */ 50 + flush_cache_all(); 51 + outer_clean_range(__pa(param), __pa(param + 5)); 52 + ret = omap_smc2(idx, flag, __pa(param)); 53 + 54 + return ret; 55 + } 56 + 57 + /* Allocate the memory to save secure ram */ 58 + int __init omap_secure_ram_reserve_memblock(void) 59 + { 60 + phys_addr_t paddr; 61 + u32 size = OMAP_SECURE_RAM_STORAGE; 62 + 63 + size = ALIGN(size, SZ_1M); 64 + paddr = memblock_alloc(size, SZ_1M); 65 + if (!paddr) { 66 + pr_err("%s: failed to reserve %x bytes\n", 67 + __func__, size); 68 + return -ENOMEM; 69 + } 70 + memblock_free(paddr, size); 71 + memblock_remove(paddr, size); 72 + 73 + omap_secure_memblock_base = paddr; 74 + 75 + return 0; 76 + } 77 + 78 + phys_addr_t omap_secure_ram_mempool_base(void) 79 + { 80 + return omap_secure_memblock_base; 81 + }
+45
arch/arm/mach-omap2/omap-smp.c
··· 24 24 #include <asm/hardware/gic.h> 25 25 #include <asm/smp_scu.h> 26 26 #include <mach/hardware.h> 27 + #include <mach/omap-secure.h> 27 28 28 29 #include "common.h" 30 + 31 + #include "clockdomain.h" 29 32 30 33 /* SCU base address */ 31 34 static void __iomem *scu_base; 32 35 33 36 static DEFINE_SPINLOCK(boot_lock); 34 37 38 + void __iomem *omap4_get_scu_base(void) 39 + { 40 + return scu_base; 41 + } 42 + 35 43 void __cpuinit platform_secondary_init(unsigned int cpu) 36 44 { 45 + /* 46 + * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device. 47 + * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA 48 + * init and for CPU1, a secure PPA API provided. CPU0 must be ON 49 + * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+. 50 + * OMAP443X GP devices- SMP bit isn't accessible. 51 + * OMAP446X GP devices - SMP bit access is enabled on both CPUs. 52 + */ 53 + if (cpu_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) 54 + omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX, 55 + 4, 0, 0, 0, 0, 0); 56 + 37 57 /* 38 58 * If any interrupts are already enabled for the primary 39 59 * core (e.g. timer irq), then they will not have been enabled ··· 70 50 71 51 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 72 52 { 53 + static struct clockdomain *cpu1_clkdm; 54 + static bool booted; 73 55 /* 74 56 * Set synchronisation state between this boot processor 75 57 * and the secondary one ··· 87 65 omap_modify_auxcoreboot0(0x200, 0xfffffdff); 88 66 flush_cache_all(); 89 67 smp_wmb(); 68 + 69 + if (!cpu1_clkdm) 70 + cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); 71 + 72 + /* 73 + * The SGI(Software Generated Interrupts) are not wakeup capable 74 + * from low power states. This is known limitation on OMAP4 and 75 + * needs to be worked around by using software forced clockdomain 76 + * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to 77 + * software force wakeup. The clockdomain is then put back to 78 + * hardware supervised mode. 79 + * More details can be found in OMAP4430 TRM - Version J 80 + * Section : 81 + * 4.3.4.2 Power States of CPU0 and CPU1 82 + */ 83 + if (booted) { 84 + clkdm_wakeup(cpu1_clkdm); 85 + clkdm_allow_idle(cpu1_clkdm); 86 + } else { 87 + dsb_sev(); 88 + booted = true; 89 + } 90 + 90 91 gic_raise_softirq(cpumask_of(cpu), 1); 91 92 92 93 /*
+389
arch/arm/mach-omap2/omap-wakeupgen.c
··· 1 + /* 2 + * OMAP WakeupGen Source file 3 + * 4 + * OMAP WakeupGen is the interrupt controller extension used along 5 + * with ARM GIC to wake the CPU out from low power states on 6 + * external interrupts. It is responsible for generating wakeup 7 + * event from the incoming interrupts and enable bits. It is 8 + * implemented in MPU always ON power domain. During normal operation, 9 + * WakeupGen delivers external interrupts directly to the GIC. 10 + * 11 + * Copyright (C) 2011 Texas Instruments, Inc. 12 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 13 + * 14 + * This program is free software; you can redistribute it and/or modify 15 + * it under the terms of the GNU General Public License version 2 as 16 + * published by the Free Software Foundation. 17 + */ 18 + 19 + #include <linux/kernel.h> 20 + #include <linux/init.h> 21 + #include <linux/io.h> 22 + #include <linux/irq.h> 23 + #include <linux/platform_device.h> 24 + #include <linux/cpu.h> 25 + #include <linux/notifier.h> 26 + #include <linux/cpu_pm.h> 27 + 28 + #include <asm/hardware/gic.h> 29 + 30 + #include <mach/omap-wakeupgen.h> 31 + #include <mach/omap-secure.h> 32 + 33 + #include "omap4-sar-layout.h" 34 + #include "common.h" 35 + 36 + #define NR_REG_BANKS 4 37 + #define MAX_IRQS 128 38 + #define WKG_MASK_ALL 0x00000000 39 + #define WKG_UNMASK_ALL 0xffffffff 40 + #define CPU_ENA_OFFSET 0x400 41 + #define CPU0_ID 0x0 42 + #define CPU1_ID 0x1 43 + 44 + static void __iomem *wakeupgen_base; 45 + static void __iomem *sar_base; 46 + static DEFINE_PER_CPU(u32 [NR_REG_BANKS], irqmasks); 47 + static DEFINE_SPINLOCK(wakeupgen_lock); 48 + static unsigned int irq_target_cpu[NR_IRQS]; 49 + 50 + /* 51 + * Static helper functions. 52 + */ 53 + static inline u32 wakeupgen_readl(u8 idx, u32 cpu) 54 + { 55 + return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 + 56 + (cpu * CPU_ENA_OFFSET) + (idx * 4)); 57 + } 58 + 59 + static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu) 60 + { 61 + __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 + 62 + (cpu * CPU_ENA_OFFSET) + (idx * 4)); 63 + } 64 + 65 + static inline void sar_writel(u32 val, u32 offset, u8 idx) 66 + { 67 + __raw_writel(val, sar_base + offset + (idx * 4)); 68 + } 69 + 70 + static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg) 71 + { 72 + u8 i; 73 + 74 + for (i = 0; i < NR_REG_BANKS; i++) 75 + wakeupgen_writel(reg, i, cpu); 76 + } 77 + 78 + static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index) 79 + { 80 + unsigned int spi_irq; 81 + 82 + /* 83 + * PPIs and SGIs are not supported. 84 + */ 85 + if (irq < OMAP44XX_IRQ_GIC_START) 86 + return -EINVAL; 87 + 88 + /* 89 + * Subtract the GIC offset. 90 + */ 91 + spi_irq = irq - OMAP44XX_IRQ_GIC_START; 92 + if (spi_irq > MAX_IRQS) { 93 + pr_err("omap wakeupGen: Invalid IRQ%d\n", irq); 94 + return -EINVAL; 95 + } 96 + 97 + /* 98 + * Each WakeupGen register controls 32 interrupt. 99 + * i.e. 1 bit per SPI IRQ 100 + */ 101 + *reg_index = spi_irq >> 5; 102 + *bit_posn = spi_irq %= 32; 103 + 104 + return 0; 105 + } 106 + 107 + static void _wakeupgen_clear(unsigned int irq, unsigned int cpu) 108 + { 109 + u32 val, bit_number; 110 + u8 i; 111 + 112 + if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) 113 + return; 114 + 115 + val = wakeupgen_readl(i, cpu); 116 + val &= ~BIT(bit_number); 117 + wakeupgen_writel(val, i, cpu); 118 + } 119 + 120 + static void _wakeupgen_set(unsigned int irq, unsigned int cpu) 121 + { 122 + u32 val, bit_number; 123 + u8 i; 124 + 125 + if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) 126 + return; 127 + 128 + val = wakeupgen_readl(i, cpu); 129 + val |= BIT(bit_number); 130 + wakeupgen_writel(val, i, cpu); 131 + } 132 + 133 + static void _wakeupgen_save_masks(unsigned int cpu) 134 + { 135 + u8 i; 136 + 137 + for (i = 0; i < NR_REG_BANKS; i++) 138 + per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu); 139 + } 140 + 141 + static void _wakeupgen_restore_masks(unsigned int cpu) 142 + { 143 + u8 i; 144 + 145 + for (i = 0; i < NR_REG_BANKS; i++) 146 + wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu); 147 + } 148 + 149 + /* 150 + * Architecture specific Mask extension 151 + */ 152 + static void wakeupgen_mask(struct irq_data *d) 153 + { 154 + unsigned long flags; 155 + 156 + spin_lock_irqsave(&wakeupgen_lock, flags); 157 + _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]); 158 + spin_unlock_irqrestore(&wakeupgen_lock, flags); 159 + } 160 + 161 + /* 162 + * Architecture specific Unmask extension 163 + */ 164 + static void wakeupgen_unmask(struct irq_data *d) 165 + { 166 + unsigned long flags; 167 + 168 + spin_lock_irqsave(&wakeupgen_lock, flags); 169 + _wakeupgen_set(d->irq, irq_target_cpu[d->irq]); 170 + spin_unlock_irqrestore(&wakeupgen_lock, flags); 171 + } 172 + 173 + /* 174 + * Mask or unmask all interrupts on given CPU. 175 + * 0 = Mask all interrupts on the 'cpu' 176 + * 1 = Unmask all interrupts on the 'cpu' 177 + * Ensure that the initial mask is maintained. This is faster than 178 + * iterating through GIC registers to arrive at the correct masks. 179 + */ 180 + static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) 181 + { 182 + unsigned long flags; 183 + 184 + spin_lock_irqsave(&wakeupgen_lock, flags); 185 + if (set) { 186 + _wakeupgen_save_masks(cpu); 187 + _wakeupgen_set_all(cpu, WKG_MASK_ALL); 188 + } else { 189 + _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); 190 + _wakeupgen_restore_masks(cpu); 191 + } 192 + spin_unlock_irqrestore(&wakeupgen_lock, flags); 193 + } 194 + 195 + #ifdef CONFIG_CPU_PM 196 + /* 197 + * Save WakeupGen interrupt context in SAR BANK3. Restore is done by 198 + * ROM code. WakeupGen IP is integrated along with GIC to manage the 199 + * interrupt wakeups from CPU low power states. It manages 200 + * masking/unmasking of Shared peripheral interrupts(SPI). So the 201 + * interrupt enable/disable control should be in sync and consistent 202 + * at WakeupGen and GIC so that interrupts are not lost. 203 + */ 204 + static void irq_save_context(void) 205 + { 206 + u32 i, val; 207 + 208 + if (omap_rev() == OMAP4430_REV_ES1_0) 209 + return; 210 + 211 + if (!sar_base) 212 + sar_base = omap4_get_sar_ram_base(); 213 + 214 + for (i = 0; i < NR_REG_BANKS; i++) { 215 + /* Save the CPUx interrupt mask for IRQ 0 to 127 */ 216 + val = wakeupgen_readl(i, 0); 217 + sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i); 218 + val = wakeupgen_readl(i, 1); 219 + sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i); 220 + 221 + /* 222 + * Disable the secure interrupts for CPUx. The restore 223 + * code blindly restores secure and non-secure interrupt 224 + * masks from SAR RAM. Secure interrupts are not suppose 225 + * to be enabled from HLOS. So overwrite the SAR location 226 + * so that the secure interrupt remains disabled. 227 + */ 228 + sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i); 229 + sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i); 230 + } 231 + 232 + /* Save AuxBoot* registers */ 233 + val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 234 + __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET); 235 + val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 236 + __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET); 237 + 238 + /* Save SyncReq generation logic */ 239 + val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 240 + __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET); 241 + val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); 242 + __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET); 243 + 244 + /* Save SyncReq generation logic */ 245 + val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK); 246 + __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET); 247 + val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN); 248 + __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET); 249 + 250 + /* Set the Backup Bit Mask status */ 251 + val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET); 252 + val |= SAR_BACKUP_STATUS_WAKEUPGEN; 253 + __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET); 254 + } 255 + 256 + /* 257 + * Clear WakeupGen SAR backup status. 258 + */ 259 + void irq_sar_clear(void) 260 + { 261 + u32 val; 262 + val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET); 263 + val &= ~SAR_BACKUP_STATUS_WAKEUPGEN; 264 + __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET); 265 + } 266 + 267 + /* 268 + * Save GIC and Wakeupgen interrupt context using secure API 269 + * for HS/EMU devices. 270 + */ 271 + static void irq_save_secure_context(void) 272 + { 273 + u32 ret; 274 + ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX, 275 + FLAG_START_CRITICAL, 276 + 0, 0, 0, 0, 0); 277 + if (ret != API_HAL_RET_VALUE_OK) 278 + pr_err("GIC and Wakeupgen context save failed\n"); 279 + } 280 + #endif 281 + 282 + #ifdef CONFIG_HOTPLUG_CPU 283 + static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self, 284 + unsigned long action, void *hcpu) 285 + { 286 + unsigned int cpu = (unsigned int)hcpu; 287 + 288 + switch (action) { 289 + case CPU_ONLINE: 290 + wakeupgen_irqmask_all(cpu, 0); 291 + break; 292 + case CPU_DEAD: 293 + wakeupgen_irqmask_all(cpu, 1); 294 + break; 295 + } 296 + return NOTIFY_OK; 297 + } 298 + 299 + static struct notifier_block __refdata irq_hotplug_notifier = { 300 + .notifier_call = irq_cpu_hotplug_notify, 301 + }; 302 + 303 + static void __init irq_hotplug_init(void) 304 + { 305 + register_hotcpu_notifier(&irq_hotplug_notifier); 306 + } 307 + #else 308 + static void __init irq_hotplug_init(void) 309 + {} 310 + #endif 311 + 312 + #ifdef CONFIG_CPU_PM 313 + static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v) 314 + { 315 + switch (cmd) { 316 + case CPU_CLUSTER_PM_ENTER: 317 + if (omap_type() == OMAP2_DEVICE_TYPE_GP) 318 + irq_save_context(); 319 + else 320 + irq_save_secure_context(); 321 + break; 322 + case CPU_CLUSTER_PM_EXIT: 323 + if (omap_type() == OMAP2_DEVICE_TYPE_GP) 324 + irq_sar_clear(); 325 + break; 326 + } 327 + return NOTIFY_OK; 328 + } 329 + 330 + static struct notifier_block irq_notifier_block = { 331 + .notifier_call = irq_notifier, 332 + }; 333 + 334 + static void __init irq_pm_init(void) 335 + { 336 + cpu_pm_register_notifier(&irq_notifier_block); 337 + } 338 + #else 339 + static void __init irq_pm_init(void) 340 + {} 341 + #endif 342 + 343 + /* 344 + * Initialise the wakeupgen module. 345 + */ 346 + int __init omap_wakeupgen_init(void) 347 + { 348 + int i; 349 + unsigned int boot_cpu = smp_processor_id(); 350 + 351 + /* Not supported on OMAP4 ES1.0 silicon */ 352 + if (omap_rev() == OMAP4430_REV_ES1_0) { 353 + WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n"); 354 + return -EPERM; 355 + } 356 + 357 + /* Static mapping, never released */ 358 + wakeupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K); 359 + if (WARN_ON(!wakeupgen_base)) 360 + return -ENOMEM; 361 + 362 + /* Clear all IRQ bitmasks at wakeupGen level */ 363 + for (i = 0; i < NR_REG_BANKS; i++) { 364 + wakeupgen_writel(0, i, CPU0_ID); 365 + wakeupgen_writel(0, i, CPU1_ID); 366 + } 367 + 368 + /* 369 + * Override GIC architecture specific functions to add 370 + * OMAP WakeupGen interrupt controller along with GIC 371 + */ 372 + gic_arch_extn.irq_mask = wakeupgen_mask; 373 + gic_arch_extn.irq_unmask = wakeupgen_unmask; 374 + gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; 375 + 376 + /* 377 + * FIXME: Add support to set_smp_affinity() once the core 378 + * GIC code has necessary hooks in place. 379 + */ 380 + 381 + /* Associate all the IRQs to boot CPU like GIC init does. */ 382 + for (i = 0; i < NR_IRQS; i++) 383 + irq_target_cpu[i] = boot_cpu; 384 + 385 + irq_hotplug_init(); 386 + irq_pm_init(); 387 + 388 + return 0; 389 + }
+92 -2
arch/arm/mach-omap2/omap4-common.c
··· 15 15 #include <linux/init.h> 16 16 #include <linux/io.h> 17 17 #include <linux/platform_device.h> 18 + #include <linux/memblock.h> 18 19 19 20 #include <asm/hardware/gic.h> 20 21 #include <asm/hardware/cache-l2x0.h> 22 + #include <asm/mach/map.h> 21 23 22 24 #include <plat/irqs.h> 25 + #include <plat/sram.h> 23 26 24 27 #include <mach/hardware.h> 28 + #include <mach/omap-wakeupgen.h> 25 29 26 30 #include "common.h" 31 + #include "omap4-sar-layout.h" 27 32 28 33 #ifdef CONFIG_CACHE_L2X0 29 - void __iomem *l2cache_base; 34 + static void __iomem *l2cache_base; 35 + #endif 36 + 37 + static void __iomem *sar_ram_base; 38 + 39 + #ifdef CONFIG_OMAP4_ERRATA_I688 40 + /* Used to implement memory barrier on DRAM path */ 41 + #define OMAP4_DRAM_BARRIER_VA 0xfe600000 42 + 43 + void __iomem *dram_sync, *sram_sync; 44 + 45 + void omap_bus_sync(void) 46 + { 47 + if (dram_sync && sram_sync) { 48 + writel_relaxed(readl_relaxed(dram_sync), dram_sync); 49 + writel_relaxed(readl_relaxed(sram_sync), sram_sync); 50 + isb(); 51 + } 52 + } 53 + 54 + static int __init omap_barriers_init(void) 55 + { 56 + struct map_desc dram_io_desc[1]; 57 + phys_addr_t paddr; 58 + u32 size; 59 + 60 + if (!cpu_is_omap44xx()) 61 + return -ENODEV; 62 + 63 + size = ALIGN(PAGE_SIZE, SZ_1M); 64 + paddr = memblock_alloc(size, SZ_1M); 65 + if (!paddr) { 66 + pr_err("%s: failed to reserve 4 Kbytes\n", __func__); 67 + return -ENOMEM; 68 + } 69 + memblock_free(paddr, size); 70 + memblock_remove(paddr, size); 71 + dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA; 72 + dram_io_desc[0].pfn = __phys_to_pfn(paddr); 73 + dram_io_desc[0].length = size; 74 + dram_io_desc[0].type = MT_MEMORY_SO; 75 + iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc)); 76 + dram_sync = (void __iomem *) dram_io_desc[0].virtual; 77 + sram_sync = (void __iomem *) OMAP4_SRAM_VA; 78 + 79 + pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n", 80 + (long long) paddr, dram_io_desc[0].virtual); 81 + 82 + return 0; 83 + } 84 + core_initcall(omap_barriers_init); 30 85 #endif 31 86 32 87 void __init gic_init_irq(void) ··· 97 42 omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512); 98 43 BUG_ON(!omap_irq_base); 99 44 45 + omap_wakeupgen_init(); 46 + 100 47 gic_init(0, 29, gic_dist_base_addr, omap_irq_base); 101 48 } 102 49 103 50 #ifdef CONFIG_CACHE_L2X0 51 + 52 + void __iomem *omap4_get_l2cache_base(void) 53 + { 54 + return l2cache_base; 55 + } 104 56 105 57 static void omap4_l2x0_disable(void) 106 58 { ··· 134 72 135 73 /* Static mapping, never released */ 136 74 l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); 137 - BUG_ON(!l2cache_base); 75 + if (WARN_ON(!l2cache_base)) 76 + return -ENOMEM; 138 77 139 78 /* 140 79 * 16-way associativity, parity disabled ··· 175 112 } 176 113 early_initcall(omap_l2_cache_init); 177 114 #endif 115 + 116 + void __iomem *omap4_get_sar_ram_base(void) 117 + { 118 + return sar_ram_base; 119 + } 120 + 121 + /* 122 + * SAR RAM used to save and restore the HW 123 + * context in low power modes 124 + */ 125 + static int __init omap4_sar_ram_init(void) 126 + { 127 + /* 128 + * To avoid code running on other OMAPs in 129 + * multi-omap builds 130 + */ 131 + if (!cpu_is_omap44xx()) 132 + return -ENOMEM; 133 + 134 + /* Static mapping, never released */ 135 + sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K); 136 + if (WARN_ON(!sar_ram_base)) 137 + return -ENOMEM; 138 + 139 + return 0; 140 + } 141 + early_initcall(omap4_sar_ram_init);
+50
arch/arm/mach-omap2/omap4-sar-layout.h
··· 1 + /* 2 + * omap4-sar-layout.h: OMAP4 SAR RAM layout header file 3 + * 4 + * Copyright (C) 2011 Texas Instruments, Inc. 5 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + #ifndef OMAP_ARCH_OMAP4_SAR_LAYOUT_H 12 + #define OMAP_ARCH_OMAP4_SAR_LAYOUT_H 13 + 14 + /* 15 + * SAR BANK offsets from base address OMAP44XX_SAR_RAM_BASE 16 + */ 17 + #define SAR_BANK1_OFFSET 0x0000 18 + #define SAR_BANK2_OFFSET 0x1000 19 + #define SAR_BANK3_OFFSET 0x2000 20 + #define SAR_BANK4_OFFSET 0x3000 21 + 22 + /* Scratch pad memory offsets from SAR_BANK1 */ 23 + #define SCU_OFFSET0 0xd00 24 + #define SCU_OFFSET1 0xd04 25 + #define OMAP_TYPE_OFFSET 0xd10 26 + #define L2X0_SAVE_OFFSET0 0xd14 27 + #define L2X0_SAVE_OFFSET1 0xd18 28 + #define L2X0_AUXCTRL_OFFSET 0xd1c 29 + #define L2X0_PREFETCH_CTRL_OFFSET 0xd20 30 + 31 + /* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */ 32 + #define CPU0_WAKEUP_NS_PA_ADDR_OFFSET 0xa04 33 + #define CPU1_WAKEUP_NS_PA_ADDR_OFFSET 0xa08 34 + 35 + #define SAR_BACKUP_STATUS_OFFSET (SAR_BANK3_OFFSET + 0x500) 36 + #define SAR_SECURE_RAM_SIZE_OFFSET (SAR_BANK3_OFFSET + 0x504) 37 + #define SAR_SECRAM_SAVED_AT_OFFSET (SAR_BANK3_OFFSET + 0x508) 38 + 39 + /* WakeUpGen save restore offset from OMAP44XX_SAR_RAM_BASE */ 40 + #define WAKEUPGENENB_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x684) 41 + #define WAKEUPGENENB_SECURE_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x694) 42 + #define WAKEUPGENENB_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6a4) 43 + #define WAKEUPGENENB_SECURE_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6b4) 44 + #define AUXCOREBOOT0_OFFSET (SAR_BANK3_OFFSET + 0x6c4) 45 + #define AUXCOREBOOT1_OFFSET (SAR_BANK3_OFFSET + 0x6c8) 46 + #define PTMSYNCREQ_MASK_OFFSET (SAR_BANK3_OFFSET + 0x6cc) 47 + #define PTMSYNCREQ_EN_OFFSET (SAR_BANK3_OFFSET + 0x6d0) 48 + #define SAR_BACKUP_STATUS_WAKEUPGEN 0x10 49 + 50 + #endif
+23
arch/arm/mach-omap2/omap44xx-smc.S arch/arm/mach-omap2/omap-smc.S
··· 31 31 ldmfd sp!, {r2-r12, pc} 32 32 ENDPROC(omap_smc1) 33 33 34 + /** 35 + * u32 omap_smc2(u32 id, u32 falg, u32 pargs) 36 + * Low level common routine for secure HAL and PPA APIs. 37 + * @id: Application ID of HAL APIs 38 + * @flag: Flag to indicate the criticality of operation 39 + * @pargs: Physical address of parameter list starting 40 + * with number of parametrs 41 + */ 42 + ENTRY(omap_smc2) 43 + stmfd sp!, {r4-r12, lr} 44 + mov r3, r2 45 + mov r2, r1 46 + mov r1, #0x0 @ Process ID 47 + mov r6, #0xff 48 + mov r12, #0x00 @ Secure Service ID 49 + mov r7, #0 50 + mcr p15, 0, r7, c7, c5, 6 51 + dsb 52 + dmb 53 + smc #0 54 + ldmfd sp!, {r4-r12, pc} 55 + ENDPROC(omap_smc2) 56 + 34 57 ENTRY(omap_modify_auxcoreboot0) 35 58 stmfd sp!, {r1-r12, lr} 36 59 ldr r12, =0x104
+124 -1
arch/arm/mach-omap2/omap_hwmod.c
··· 136 136 #include <linux/list.h> 137 137 #include <linux/mutex.h> 138 138 #include <linux/spinlock.h> 139 + #include <linux/slab.h> 139 140 140 141 #include "common.h" 141 142 #include <plat/cpu.h> ··· 379 378 *v |= autoidle << autoidle_shift; 380 379 381 380 return 0; 381 + } 382 + 383 + /** 384 + * _set_idle_ioring_wakeup - enable/disable IO pad wakeup on hwmod idle for mux 385 + * @oh: struct omap_hwmod * 386 + * @set_wake: bool value indicating to set (true) or clear (false) wakeup enable 387 + * 388 + * Set or clear the I/O pad wakeup flag in the mux entries for the 389 + * hwmod @oh. This function changes the @oh->mux->pads_dynamic array 390 + * in memory. If the hwmod is currently idled, and the new idle 391 + * values don't match the previous ones, this function will also 392 + * update the SCM PADCTRL registers. Otherwise, if the hwmod is not 393 + * currently idled, this function won't touch the hardware: the new 394 + * mux settings are written to the SCM PADCTRL registers when the 395 + * hwmod is idled. No return value. 396 + */ 397 + static void _set_idle_ioring_wakeup(struct omap_hwmod *oh, bool set_wake) 398 + { 399 + struct omap_device_pad *pad; 400 + bool change = false; 401 + u16 prev_idle; 402 + int j; 403 + 404 + if (!oh->mux || !oh->mux->enabled) 405 + return; 406 + 407 + for (j = 0; j < oh->mux->nr_pads_dynamic; j++) { 408 + pad = oh->mux->pads_dynamic[j]; 409 + 410 + if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP)) 411 + continue; 412 + 413 + prev_idle = pad->idle; 414 + 415 + if (set_wake) 416 + pad->idle |= OMAP_WAKEUP_EN; 417 + else 418 + pad->idle &= ~OMAP_WAKEUP_EN; 419 + 420 + if (prev_idle != pad->idle) 421 + change = true; 422 + } 423 + 424 + if (change && oh->_state == _HWMOD_STATE_IDLE) 425 + omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE); 382 426 } 383 427 384 428 /** ··· 1495 1449 1496 1450 pr_debug("omap_hwmod: %s: enabling\n", oh->name); 1497 1451 1452 + /* 1453 + * hwmods with HWMOD_INIT_NO_IDLE flag set are left 1454 + * in enabled state at init. 1455 + * Now that someone is really trying to enable them, 1456 + * just ensure that the hwmod mux is set. 1457 + */ 1458 + if (oh->_int_flags & _HWMOD_SKIP_ENABLE) { 1459 + /* 1460 + * If the caller has mux data populated, do the mux'ing 1461 + * which wouldn't have been done as part of the _enable() 1462 + * done during setup. 1463 + */ 1464 + if (oh->mux) 1465 + omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED); 1466 + 1467 + oh->_int_flags &= ~_HWMOD_SKIP_ENABLE; 1468 + return 0; 1469 + } 1470 + 1498 1471 if (oh->_state != _HWMOD_STATE_INITIALIZED && 1499 1472 oh->_state != _HWMOD_STATE_IDLE && 1500 1473 oh->_state != _HWMOD_STATE_DISABLED) { ··· 1809 1744 * it should be set by the core code as a runtime flag during startup 1810 1745 */ 1811 1746 if ((oh->flags & HWMOD_INIT_NO_IDLE) && 1812 - (postsetup_state == _HWMOD_STATE_IDLE)) 1747 + (postsetup_state == _HWMOD_STATE_IDLE)) { 1748 + oh->_int_flags |= _HWMOD_SKIP_ENABLE; 1813 1749 postsetup_state = _HWMOD_STATE_ENABLED; 1750 + } 1814 1751 1815 1752 if (postsetup_state == _HWMOD_STATE_IDLE) 1816 1753 _idle(oh); ··· 2483 2416 v = oh->_sysc_cache; 2484 2417 _enable_wakeup(oh, &v); 2485 2418 _write_sysconfig(v, oh); 2419 + _set_idle_ioring_wakeup(oh, true); 2486 2420 spin_unlock_irqrestore(&oh->_lock, flags); 2487 2421 2488 2422 return 0; ··· 2514 2446 v = oh->_sysc_cache; 2515 2447 _disable_wakeup(oh, &v); 2516 2448 _write_sysconfig(v, oh); 2449 + _set_idle_ioring_wakeup(oh, false); 2517 2450 spin_unlock_irqrestore(&oh->_lock, flags); 2518 2451 2519 2452 return 0; ··· 2728 2659 } 2729 2660 2730 2661 oh->flags |= HWMOD_INIT_NO_RESET; 2662 + 2663 + return 0; 2664 + } 2665 + 2666 + /** 2667 + * omap_hwmod_pad_route_irq - route an I/O pad wakeup to a particular MPU IRQ 2668 + * @oh: struct omap_hwmod * containing hwmod mux entries 2669 + * @pad_idx: array index in oh->mux of the hwmod mux entry to route wakeup 2670 + * @irq_idx: the hwmod mpu_irqs array index of the IRQ to trigger on wakeup 2671 + * 2672 + * When an I/O pad wakeup arrives for the dynamic or wakeup hwmod mux 2673 + * entry number @pad_idx for the hwmod @oh, trigger the interrupt 2674 + * service routine for the hwmod's mpu_irqs array index @irq_idx. If 2675 + * this function is not called for a given pad_idx, then the ISR 2676 + * associated with @oh's first MPU IRQ will be triggered when an I/O 2677 + * pad wakeup occurs on that pad. Note that @pad_idx is the index of 2678 + * the _dynamic or wakeup_ entry: if there are other entries not 2679 + * marked with OMAP_DEVICE_PAD_WAKEUP or OMAP_DEVICE_PAD_REMUX, these 2680 + * entries are NOT COUNTED in the dynamic pad index. This function 2681 + * must be called separately for each pad that requires its interrupt 2682 + * to be re-routed this way. Returns -EINVAL if there is an argument 2683 + * problem or if @oh does not have hwmod mux entries or MPU IRQs; 2684 + * returns -ENOMEM if memory cannot be allocated; or 0 upon success. 2685 + * 2686 + * XXX This function interface is fragile. Rather than using array 2687 + * indexes, which are subject to unpredictable change, it should be 2688 + * using hwmod IRQ names, and some other stable key for the hwmod mux 2689 + * pad records. 2690 + */ 2691 + int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx) 2692 + { 2693 + int nr_irqs; 2694 + 2695 + might_sleep(); 2696 + 2697 + if (!oh || !oh->mux || !oh->mpu_irqs || pad_idx < 0 || 2698 + pad_idx >= oh->mux->nr_pads_dynamic) 2699 + return -EINVAL; 2700 + 2701 + /* Check the number of available mpu_irqs */ 2702 + for (nr_irqs = 0; oh->mpu_irqs[nr_irqs].irq >= 0; nr_irqs++) 2703 + ; 2704 + 2705 + if (irq_idx >= nr_irqs) 2706 + return -EINVAL; 2707 + 2708 + if (!oh->mux->irqs) { 2709 + /* XXX What frees this? */ 2710 + oh->mux->irqs = kzalloc(sizeof(int) * oh->mux->nr_pads_dynamic, 2711 + GFP_KERNEL); 2712 + if (!oh->mux->irqs) 2713 + return -ENOMEM; 2714 + } 2715 + oh->mux->irqs[pad_idx] = irq_idx; 2731 2716 2732 2717 return 0; 2733 2718 }
+1
arch/arm/mach-omap2/pm.h
··· 21 21 extern int omap3_can_sleep(void); 22 22 extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state); 23 23 extern int omap3_idle_init(void); 24 + extern int omap4_idle_init(void); 24 25 25 26 #if defined(CONFIG_PM_OPP) 26 27 extern int omap3_opp_init(void);
-20
arch/arm/mach-omap2/pm24xx.c
··· 30 30 #include <linux/irq.h> 31 31 #include <linux/time.h> 32 32 #include <linux/gpio.h> 33 - #include <linux/console.h> 34 33 35 34 #include <asm/mach/time.h> 36 35 #include <asm/mach/irq.h> ··· 126 127 if (omap_irq_pending()) 127 128 goto no_sleep; 128 129 129 - /* Block console output in case it is on one of the OMAP UARTs */ 130 - if (!is_suspending()) 131 - if (!console_trylock()) 132 - goto no_sleep; 133 - 134 - omap_uart_prepare_idle(0); 135 - omap_uart_prepare_idle(1); 136 - omap_uart_prepare_idle(2); 137 - 138 130 /* Jump to SRAM suspend code */ 139 131 omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL), 140 132 OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL), 141 133 OMAP_SDRC_REGADDR(SDRC_POWER)); 142 - 143 - omap_uart_resume_idle(2); 144 - omap_uart_resume_idle(1); 145 - omap_uart_resume_idle(0); 146 - 147 - if (!is_suspending()) 148 - console_unlock(); 149 134 150 135 no_sleep: 151 136 omap2_gpio_resume_after_idle(); ··· 222 239 { 223 240 if (omap2_fclks_active()) 224 241 return 0; 225 - if (!omap_uart_can_sleep()) 226 - return 0; 227 242 if (osc_ck->usecount > 1) 228 243 return 0; 229 244 if (omap_dma_running()) ··· 272 291 mir1 = omap_readl(0x480fe0a4); 273 292 omap_writel(1 << 5, 0x480fe0ac); 274 293 275 - omap_uart_prepare_suspend(); 276 294 omap2_enter_full_retention(); 277 295 278 296 omap_writel(mir1, 0x480fe0a4);
+44 -114
arch/arm/mach-omap2/pm34xx.c
··· 28 28 #include <linux/clk.h> 29 29 #include <linux/delay.h> 30 30 #include <linux/slab.h> 31 - #include <linux/console.h> 32 31 #include <trace/events/power.h> 33 32 34 33 #include <asm/suspend.h> ··· 35 36 #include <plat/sram.h> 36 37 #include "clockdomain.h" 37 38 #include "powerdomain.h" 38 - #include <plat/serial.h> 39 39 #include <plat/sdrc.h> 40 40 #include <plat/prcm.h> 41 41 #include <plat/gpmc.h> ··· 52 54 53 55 #ifdef CONFIG_SUSPEND 54 56 static suspend_state_t suspend_state = PM_SUSPEND_ON; 55 - static inline bool is_suspending(void) 56 - { 57 - return (suspend_state != PM_SUSPEND_ON) && console_suspend_enabled; 58 - } 59 - #else 60 - static inline bool is_suspending(void) 61 - { 62 - return false; 63 - } 64 57 #endif 65 58 66 59 /* pm34xx errata defined in pm.h */ ··· 184 195 * that any peripheral wake-up events occurring while attempting to 185 196 * clear the PM_WKST_x are detected and cleared. 186 197 */ 187 - static int prcm_clear_mod_irqs(s16 module, u8 regs) 198 + static int prcm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits) 188 199 { 189 200 u32 wkst, fclk, iclk, clken; 190 201 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1; ··· 196 207 197 208 wkst = omap2_prm_read_mod_reg(module, wkst_off); 198 209 wkst &= omap2_prm_read_mod_reg(module, grpsel_off); 210 + wkst &= ~ignore_bits; 199 211 if (wkst) { 200 212 iclk = omap2_cm_read_mod_reg(module, iclk_off); 201 213 fclk = omap2_cm_read_mod_reg(module, fclk_off); ··· 212 222 omap2_cm_set_mod_reg_bits(clken, module, fclk_off); 213 223 omap2_prm_write_mod_reg(wkst, module, wkst_off); 214 224 wkst = omap2_prm_read_mod_reg(module, wkst_off); 225 + wkst &= ~ignore_bits; 215 226 c++; 216 227 } 217 228 omap2_cm_write_mod_reg(iclk, module, iclk_off); ··· 222 231 return c; 223 232 } 224 233 225 - static int _prcm_int_handle_wakeup(void) 234 + static irqreturn_t _prcm_int_handle_io(int irq, void *unused) 226 235 { 227 236 int c; 228 237 229 - c = prcm_clear_mod_irqs(WKUP_MOD, 1); 230 - c += prcm_clear_mod_irqs(CORE_MOD, 1); 231 - c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1); 232 - if (omap_rev() > OMAP3430_REV_ES1_0) { 233 - c += prcm_clear_mod_irqs(CORE_MOD, 3); 234 - c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1); 235 - } 238 + c = prcm_clear_mod_irqs(WKUP_MOD, 1, 239 + ~(OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK)); 236 240 237 - return c; 241 + return c ? IRQ_HANDLED : IRQ_NONE; 238 242 } 239 243 240 - /* 241 - * PRCM Interrupt Handler 242 - * 243 - * The PRM_IRQSTATUS_MPU register indicates if there are any pending 244 - * interrupts from the PRCM for the MPU. These bits must be cleared in 245 - * order to clear the PRCM interrupt. The PRCM interrupt handler is 246 - * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear 247 - * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU 248 - * register indicates that a wake-up event is pending for the MPU and 249 - * this bit can only be cleared if the all the wake-up events latched 250 - * in the various PM_WKST_x registers have been cleared. The interrupt 251 - * handler is implemented using a do-while loop so that if a wake-up 252 - * event occurred during the processing of the prcm interrupt handler 253 - * (setting a bit in the corresponding PM_WKST_x register and thus 254 - * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register) 255 - * this would be handled. 256 - */ 257 - static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) 244 + static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused) 258 245 { 259 - u32 irqenable_mpu, irqstatus_mpu; 260 - int c = 0; 246 + int c; 261 247 262 - irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD, 263 - OMAP3_PRM_IRQENABLE_MPU_OFFSET); 264 - irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD, 265 - OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 266 - irqstatus_mpu &= irqenable_mpu; 248 + /* 249 + * Clear all except ST_IO and ST_IO_CHAIN for wkup module, 250 + * these are handled in a separate handler to avoid acking 251 + * IO events before parsing in mux code 252 + */ 253 + c = prcm_clear_mod_irqs(WKUP_MOD, 1, 254 + OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK); 255 + c += prcm_clear_mod_irqs(CORE_MOD, 1, 0); 256 + c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1, 0); 257 + if (omap_rev() > OMAP3430_REV_ES1_0) { 258 + c += prcm_clear_mod_irqs(CORE_MOD, 3, 0); 259 + c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, 0); 260 + } 267 261 268 - do { 269 - if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK | 270 - OMAP3430_IO_ST_MASK)) { 271 - c = _prcm_int_handle_wakeup(); 272 - 273 - /* 274 - * Is the MPU PRCM interrupt handler racing with the 275 - * IVA2 PRCM interrupt handler ? 276 - */ 277 - WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup " 278 - "but no wakeup sources are marked\n"); 279 - } else { 280 - /* XXX we need to expand our PRCM interrupt handler */ 281 - WARN(1, "prcm: WARNING: PRCM interrupt received, but " 282 - "no code to handle it (%08x)\n", irqstatus_mpu); 283 - } 284 - 285 - omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD, 286 - OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 287 - 288 - irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD, 289 - OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 290 - irqstatus_mpu &= irqenable_mpu; 291 - 292 - } while (irqstatus_mpu); 293 - 294 - return IRQ_HANDLED; 262 + return c ? IRQ_HANDLED : IRQ_NONE; 295 263 } 296 264 297 265 static void omap34xx_save_context(u32 *save) ··· 326 376 omap3_enable_io_chain(); 327 377 } 328 378 329 - /* Block console output in case it is on one of the OMAP UARTs */ 330 - if (!is_suspending()) 331 - if (per_next_state < PWRDM_POWER_ON || 332 - core_next_state < PWRDM_POWER_ON) 333 - if (!console_trylock()) 334 - goto console_still_active; 335 - 336 379 pwrdm_pre_transition(); 337 380 338 381 /* PER */ 339 382 if (per_next_state < PWRDM_POWER_ON) { 340 383 per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0; 341 - omap_uart_prepare_idle(2); 342 - omap_uart_prepare_idle(3); 343 384 omap2_gpio_prepare_for_idle(per_going_off); 344 385 if (per_next_state == PWRDM_POWER_OFF) 345 386 omap3_per_save_context(); ··· 338 397 339 398 /* CORE */ 340 399 if (core_next_state < PWRDM_POWER_ON) { 341 - omap_uart_prepare_idle(0); 342 - omap_uart_prepare_idle(1); 343 400 if (core_next_state == PWRDM_POWER_OFF) { 344 401 omap3_core_save_context(); 345 402 omap3_cm_save_context(); ··· 386 447 omap3_sram_restore_context(); 387 448 omap2_sms_restore_context(); 388 449 } 389 - omap_uart_resume_idle(0); 390 - omap_uart_resume_idle(1); 391 450 if (core_next_state == PWRDM_POWER_OFF) 392 451 omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK, 393 452 OMAP3430_GR_MOD, ··· 401 464 omap2_gpio_resume_after_idle(); 402 465 if (per_prev_state == PWRDM_POWER_OFF) 403 466 omap3_per_restore_context(); 404 - omap_uart_resume_idle(2); 405 - omap_uart_resume_idle(3); 406 467 } 407 468 408 - if (!is_suspending()) 409 - console_unlock(); 410 - 411 - console_still_active: 412 469 /* Disable IO-PAD and IO-CHAIN wakeup */ 413 470 if (omap3_has_io_wakeup() && 414 471 (per_next_state < PWRDM_POWER_ON || ··· 416 485 clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]); 417 486 } 418 487 419 - int omap3_can_sleep(void) 420 - { 421 - if (!omap_uart_can_sleep()) 422 - return 0; 423 - return 1; 424 - } 425 - 426 488 static void omap3_pm_idle(void) 427 489 { 428 490 local_irq_disable(); 429 491 local_fiq_disable(); 430 - 431 - if (!omap3_can_sleep()) 432 - goto out; 433 492 434 493 if (omap_irq_pending() || need_resched()) 435 494 goto out; ··· 454 533 goto restore; 455 534 } 456 535 457 - omap_uart_prepare_suspend(); 458 536 omap3_intc_suspend(); 459 537 460 538 omap_sram_idle(); ··· 500 580 { 501 581 disable_hlt(); 502 582 suspend_state = state; 503 - omap_uart_enable_irqs(0); 583 + omap_prcm_irq_prepare(); 504 584 return 0; 505 585 } 506 586 507 587 static void omap3_pm_end(void) 508 588 { 509 589 suspend_state = PM_SUSPEND_ON; 510 - omap_uart_enable_irqs(1); 511 590 enable_hlt(); 512 591 return; 592 + } 593 + 594 + static void omap3_pm_finish(void) 595 + { 596 + omap_prcm_irq_complete(); 513 597 } 514 598 515 599 static const struct platform_suspend_ops omap_pm_ops = { 516 600 .begin = omap3_pm_begin, 517 601 .end = omap3_pm_end, 518 602 .enter = omap3_pm_enter, 603 + .finish = omap3_pm_finish, 519 604 .valid = suspend_valid_only_mem, 520 605 }; 521 606 #endif /* CONFIG_SUSPEND */ ··· 626 701 OMAP3430_GRPSEL_GPT1_MASK | 627 702 OMAP3430_GRPSEL_GPT12_MASK, 628 703 WKUP_MOD, OMAP3430_PM_MPUGRPSEL); 629 - /* For some reason IO doesn't generate wakeup event even if 630 - * it is selected to mpu wakeup goup */ 631 - omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK, 632 - OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); 633 704 634 705 /* Enable PM_WKEN to support DSS LPR */ 635 706 omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK, ··· 802 881 * supervised mode for powerdomains */ 803 882 prcm_setup_regs(); 804 883 805 - ret = request_irq(INT_34XX_PRCM_MPU_IRQ, 806 - (irq_handler_t)prcm_interrupt_handler, 807 - IRQF_DISABLED, "prcm", NULL); 884 + ret = request_irq(omap_prcm_event_to_irq("wkup"), 885 + _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL); 886 + 808 887 if (ret) { 809 - printk(KERN_ERR "request_irq failed to register for 0x%x\n", 810 - INT_34XX_PRCM_MPU_IRQ); 888 + pr_err("pm: Failed to request pm_wkup irq\n"); 889 + goto err1; 890 + } 891 + 892 + /* IO interrupt is shared with mux code */ 893 + ret = request_irq(omap_prcm_event_to_irq("io"), 894 + _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io", 895 + omap3_pm_init); 896 + 897 + if (ret) { 898 + pr_err("pm: Failed to request pm_io irq\n"); 811 899 goto err1; 812 900 } 813 901
+149 -4
arch/arm/mach-omap2/pm44xx.c
··· 1 1 /* 2 2 * OMAP4 Power Management Routines 3 3 * 4 - * Copyright (C) 2010 Texas Instruments, Inc. 4 + * Copyright (C) 2010-2011 Texas Instruments, Inc. 5 5 * Rajendra Nayak <rnayak@ti.com> 6 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 7 * 7 8 * This program is free software; you can redistribute it and/or modify 8 9 * it under the terms of the GNU General Public License version 2 as ··· 18 17 #include <linux/slab.h> 19 18 20 19 #include "common.h" 20 + #include "clockdomain.h" 21 21 #include "powerdomain.h" 22 + #include "pm.h" 22 23 23 24 struct power_state { 24 25 struct powerdomain *pwrdm; 25 26 u32 next_state; 26 27 #ifdef CONFIG_SUSPEND 27 28 u32 saved_state; 29 + u32 saved_logic_state; 28 30 #endif 29 31 struct list_head node; 30 32 }; ··· 37 33 #ifdef CONFIG_SUSPEND 38 34 static int omap4_pm_suspend(void) 39 35 { 40 - do_wfi(); 36 + struct power_state *pwrst; 37 + int state, ret = 0; 38 + u32 cpu_id = smp_processor_id(); 39 + 40 + /* Save current powerdomain state */ 41 + list_for_each_entry(pwrst, &pwrst_list, node) { 42 + pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); 43 + pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm); 44 + } 45 + 46 + /* Set targeted power domain states by suspend */ 47 + list_for_each_entry(pwrst, &pwrst_list, node) { 48 + omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); 49 + pwrdm_set_logic_retst(pwrst->pwrdm, PWRDM_POWER_OFF); 50 + } 51 + 52 + /* 53 + * For MPUSS to hit power domain retention(CSWR or OSWR), 54 + * CPU0 and CPU1 power domains need to be in OFF or DORMANT state, 55 + * since CPU power domain CSWR is not supported by hardware 56 + * Only master CPU follows suspend path. All other CPUs follow 57 + * CPU hotplug path in system wide suspend. On OMAP4, CPU power 58 + * domain CSWR is not supported by hardware. 59 + * More details can be found in OMAP4430 TRM section 4.3.4.2. 60 + */ 61 + omap4_enter_lowpower(cpu_id, PWRDM_POWER_OFF); 62 + 63 + /* Restore next powerdomain state */ 64 + list_for_each_entry(pwrst, &pwrst_list, node) { 65 + state = pwrdm_read_prev_pwrst(pwrst->pwrdm); 66 + if (state > pwrst->next_state) { 67 + pr_info("Powerdomain (%s) didn't enter " 68 + "target state %d\n", 69 + pwrst->pwrdm->name, pwrst->next_state); 70 + ret = -1; 71 + } 72 + omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); 73 + pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state); 74 + } 75 + if (ret) 76 + pr_crit("Could not enter target state in pm_suspend\n"); 77 + else 78 + pr_info("Successfully put all powerdomains to target state\n"); 79 + 41 80 return 0; 42 81 } 43 82 ··· 120 73 }; 121 74 #endif /* CONFIG_SUSPEND */ 122 75 76 + /* 77 + * Enable hardware supervised mode for all clockdomains if it's 78 + * supported. Initiate sleep transition for other clockdomains, if 79 + * they are not used 80 + */ 81 + static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) 82 + { 83 + if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO) 84 + clkdm_allow_idle(clkdm); 85 + else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP && 86 + atomic_read(&clkdm->usecount) == 0) 87 + clkdm_sleep(clkdm); 88 + return 0; 89 + } 90 + 91 + 123 92 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) 124 93 { 125 94 struct power_state *pwrst; ··· 143 80 if (!pwrdm->pwrsts) 144 81 return 0; 145 82 83 + /* 84 + * Skip CPU0 and CPU1 power domains. CPU1 is programmed 85 + * through hotplug path and CPU0 explicitly programmed 86 + * further down in the code path 87 + */ 88 + if (!strncmp(pwrdm->name, "cpu", 3)) 89 + return 0; 90 + 91 + /* 92 + * FIXME: Remove this check when core retention is supported 93 + * Only MPUSS power domain is added in the list. 94 + */ 95 + if (strcmp(pwrdm->name, "mpu_pwrdm")) 96 + return 0; 97 + 146 98 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC); 147 99 if (!pwrst) 148 100 return -ENOMEM; 101 + 149 102 pwrst->pwrdm = pwrdm; 150 - pwrst->next_state = PWRDM_POWER_ON; 103 + pwrst->next_state = PWRDM_POWER_RET; 151 104 list_add(&pwrst->node, &pwrst_list); 152 105 153 - return pwrdm_set_next_pwrst(pwrst->pwrdm, pwrst->next_state); 106 + return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); 107 + } 108 + 109 + /** 110 + * omap_default_idle - OMAP4 default ilde routine.' 111 + * 112 + * Implements OMAP4 memory, IO ordering requirements which can't be addressed 113 + * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and 114 + * by secondary CPU with CONFIG_CPUIDLE. 115 + */ 116 + static void omap_default_idle(void) 117 + { 118 + local_irq_disable(); 119 + local_fiq_disable(); 120 + 121 + omap_do_wfi(); 122 + 123 + local_fiq_enable(); 124 + local_irq_enable(); 154 125 } 155 126 156 127 /** ··· 196 99 static int __init omap4_pm_init(void) 197 100 { 198 101 int ret; 102 + struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm; 103 + struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm; 199 104 200 105 if (!cpu_is_omap44xx()) 201 106 return -ENODEV; 107 + 108 + if (omap_rev() == OMAP4430_REV_ES1_0) { 109 + WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); 110 + return -ENODEV; 111 + } 202 112 203 113 pr_err("Power Management for TI OMAP4.\n"); 204 114 ··· 215 111 goto err2; 216 112 } 217 113 114 + /* 115 + * The dynamic dependency between MPUSS -> MEMIF and 116 + * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as 117 + * expected. The hardware recommendation is to enable static 118 + * dependencies for these to avoid system lock ups or random crashes. 119 + */ 120 + mpuss_clkdm = clkdm_lookup("mpuss_clkdm"); 121 + emif_clkdm = clkdm_lookup("l3_emif_clkdm"); 122 + l3_1_clkdm = clkdm_lookup("l3_1_clkdm"); 123 + l3_2_clkdm = clkdm_lookup("l3_2_clkdm"); 124 + l4_per_clkdm = clkdm_lookup("l4_per_clkdm"); 125 + ducati_clkdm = clkdm_lookup("ducati_clkdm"); 126 + if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) || 127 + (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm)) 128 + goto err2; 129 + 130 + ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm); 131 + ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm); 132 + ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm); 133 + ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm); 134 + ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm); 135 + ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm); 136 + if (ret) { 137 + pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 " 138 + "wakeup dependency\n"); 139 + goto err2; 140 + } 141 + 142 + ret = omap4_mpuss_init(); 143 + if (ret) { 144 + pr_err("Failed to initialise OMAP4 MPUSS\n"); 145 + goto err2; 146 + } 147 + 148 + (void) clkdm_for_each(clkdms_setup, NULL); 149 + 218 150 #ifdef CONFIG_SUSPEND 219 151 suspend_set_ops(&omap_pm_ops); 220 152 #endif /* CONFIG_SUSPEND */ 153 + 154 + /* Overwrite the default arch_idle() */ 155 + pm_idle = omap_default_idle; 156 + 157 + omap4_idle_init(); 221 158 222 159 err2: 223 160 return ret;
+74 -1
arch/arm/mach-omap2/prcm-common.h
··· 4 4 /* 5 5 * OMAP2/3 PRCM base and module definitions 6 6 * 7 - * Copyright (C) 2007-2009 Texas Instruments, Inc. 7 + * Copyright (C) 2007-2009, 2011 Texas Instruments, Inc. 8 8 * Copyright (C) 2007-2009 Nokia Corporation 9 9 * 10 10 * Written by Paul Walmsley ··· 410 410 extern void __iomem *prm_base; 411 411 extern void __iomem *cm_base; 412 412 extern void __iomem *cm2_base; 413 + 414 + /** 415 + * struct omap_prcm_irq - describes a PRCM interrupt bit 416 + * @name: a short name describing the interrupt type, e.g. "wkup" or "io" 417 + * @offset: the bit shift of the interrupt inside the IRQ{ENABLE,STATUS} regs 418 + * @priority: should this interrupt be handled before @priority=false IRQs? 419 + * 420 + * Describes interrupt bits inside the PRM_IRQ{ENABLE,STATUS}_MPU* registers. 421 + * On systems with multiple PRM MPU IRQ registers, the bitfields read from 422 + * the registers are concatenated, so @offset could be > 31 on these systems - 423 + * see omap_prm_irq_handler() for more details. I/O ring interrupts should 424 + * have @priority set to true. 425 + */ 426 + struct omap_prcm_irq { 427 + const char *name; 428 + unsigned int offset; 429 + bool priority; 430 + }; 431 + 432 + /** 433 + * struct omap_prcm_irq_setup - PRCM interrupt controller details 434 + * @ack: PRM register offset for the first PRM_IRQSTATUS_MPU register 435 + * @mask: PRM register offset for the first PRM_IRQENABLE_MPU register 436 + * @nr_regs: number of PRM_IRQ{STATUS,ENABLE}_MPU* registers 437 + * @nr_irqs: number of entries in the @irqs array 438 + * @irqs: ptr to an array of PRCM interrupt bits (see @nr_irqs) 439 + * @irq: MPU IRQ asserted when a PRCM interrupt arrives 440 + * @read_pending_irqs: fn ptr to determine if any PRCM IRQs are pending 441 + * @ocp_barrier: fn ptr to force buffered PRM writes to complete 442 + * @save_and_clear_irqen: fn ptr to save and clear IRQENABLE regs 443 + * @restore_irqen: fn ptr to save and clear IRQENABLE regs 444 + * @saved_mask: IRQENABLE regs are saved here during suspend 445 + * @priority_mask: 1 bit per IRQ, set to 1 if omap_prcm_irq.priority = true 446 + * @base_irq: base dynamic IRQ number, returned from irq_alloc_descs() in init 447 + * @suspended: set to true after Linux suspend code has called our ->prepare() 448 + * @suspend_save_flag: set to true after IRQ masks have been saved and disabled 449 + * 450 + * @saved_mask, @priority_mask, @base_irq, @suspended, and 451 + * @suspend_save_flag are populated dynamically, and are not to be 452 + * specified in static initializers. 453 + */ 454 + struct omap_prcm_irq_setup { 455 + u16 ack; 456 + u16 mask; 457 + u8 nr_regs; 458 + u8 nr_irqs; 459 + const struct omap_prcm_irq *irqs; 460 + int irq; 461 + void (*read_pending_irqs)(unsigned long *events); 462 + void (*ocp_barrier)(void); 463 + void (*save_and_clear_irqen)(u32 *saved_mask); 464 + void (*restore_irqen)(u32 *saved_mask); 465 + u32 *saved_mask; 466 + u32 *priority_mask; 467 + int base_irq; 468 + bool suspended; 469 + bool suspend_save_flag; 470 + }; 471 + 472 + /* OMAP_PRCM_IRQ: convenience macro for creating struct omap_prcm_irq records */ 473 + #define OMAP_PRCM_IRQ(_name, _offset, _priority) { \ 474 + .name = _name, \ 475 + .offset = _offset, \ 476 + .priority = _priority \ 477 + } 478 + 479 + extern void omap_prcm_irq_cleanup(void); 480 + extern int omap_prcm_register_chain_handler( 481 + struct omap_prcm_irq_setup *irq_setup); 482 + extern int omap_prcm_event_to_irq(const char *event); 483 + extern void omap_prcm_irq_prepare(void); 484 + extern void omap_prcm_irq_complete(void); 485 + 413 486 # endif 414 487 415 488 #endif
+96 -1
arch/arm/mach-omap2/prm2xxx_3xxx.c
··· 1 1 /* 2 2 * OMAP2/3 PRM module functions 3 3 * 4 - * Copyright (C) 2010 Texas Instruments, Inc. 4 + * Copyright (C) 2010-2011 Texas Instruments, Inc. 5 5 * Copyright (C) 2010 Nokia Corporation 6 6 * Benoît Cousson 7 7 * Paul Walmsley ··· 26 26 #include "cm2xxx_3xxx.h" 27 27 #include "prm-regbits-24xx.h" 28 28 #include "prm-regbits-34xx.h" 29 + 30 + static const struct omap_prcm_irq omap3_prcm_irqs[] = { 31 + OMAP_PRCM_IRQ("wkup", 0, 0), 32 + OMAP_PRCM_IRQ("io", 9, 1), 33 + }; 34 + 35 + static struct omap_prcm_irq_setup omap3_prcm_irq_setup = { 36 + .ack = OMAP3_PRM_IRQSTATUS_MPU_OFFSET, 37 + .mask = OMAP3_PRM_IRQENABLE_MPU_OFFSET, 38 + .nr_regs = 1, 39 + .irqs = omap3_prcm_irqs, 40 + .nr_irqs = ARRAY_SIZE(omap3_prcm_irqs), 41 + .irq = INT_34XX_PRCM_MPU_IRQ, 42 + .read_pending_irqs = &omap3xxx_prm_read_pending_irqs, 43 + .ocp_barrier = &omap3xxx_prm_ocp_barrier, 44 + .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, 45 + .restore_irqen = &omap3xxx_prm_restore_irqen, 46 + }; 29 47 30 48 u32 omap2_prm_read_mod_reg(s16 module, u16 idx) 31 49 { ··· 230 212 { 231 213 return omap2_prm_rmw_mod_reg_bits(mask, bits, OMAP3430_GR_MOD, offset); 232 214 } 215 + 216 + /** 217 + * omap3xxx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events 218 + * @events: ptr to a u32, preallocated by caller 219 + * 220 + * Read PRM_IRQSTATUS_MPU bits, AND'ed with the currently-enabled PRM 221 + * MPU IRQs, and store the result into the u32 pointed to by @events. 222 + * No return value. 223 + */ 224 + void omap3xxx_prm_read_pending_irqs(unsigned long *events) 225 + { 226 + u32 mask, st; 227 + 228 + /* XXX Can the mask read be avoided (e.g., can it come from RAM?) */ 229 + mask = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); 230 + st = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 231 + 232 + events[0] = mask & st; 233 + } 234 + 235 + /** 236 + * omap3xxx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete 237 + * 238 + * Force any buffered writes to the PRM IP block to complete. Needed 239 + * by the PRM IRQ handler, which reads and writes directly to the IP 240 + * block, to avoid race conditions after acknowledging or clearing IRQ 241 + * bits. No return value. 242 + */ 243 + void omap3xxx_prm_ocp_barrier(void) 244 + { 245 + omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); 246 + } 247 + 248 + /** 249 + * omap3xxx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU reg 250 + * @saved_mask: ptr to a u32 array to save IRQENABLE bits 251 + * 252 + * Save the PRM_IRQENABLE_MPU register to @saved_mask. @saved_mask 253 + * must be allocated by the caller. Intended to be used in the PRM 254 + * interrupt handler suspend callback. The OCP barrier is needed to 255 + * ensure the write to disable PRM interrupts reaches the PRM before 256 + * returning; otherwise, spurious interrupts might occur. No return 257 + * value. 258 + */ 259 + void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask) 260 + { 261 + saved_mask[0] = omap2_prm_read_mod_reg(OCP_MOD, 262 + OMAP3_PRM_IRQENABLE_MPU_OFFSET); 263 + omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); 264 + 265 + /* OCP barrier */ 266 + omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); 267 + } 268 + 269 + /** 270 + * omap3xxx_prm_restore_irqen - set PRM_IRQENABLE_MPU register from args 271 + * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously 272 + * 273 + * Restore the PRM_IRQENABLE_MPU register from @saved_mask. Intended 274 + * to be used in the PRM interrupt handler resume callback to restore 275 + * values saved by omap3xxx_prm_save_and_clear_irqen(). No OCP 276 + * barrier should be needed here; any pending PRM interrupts will fire 277 + * once the writes reach the PRM. No return value. 278 + */ 279 + void omap3xxx_prm_restore_irqen(u32 *saved_mask) 280 + { 281 + omap2_prm_write_mod_reg(saved_mask[0], OCP_MOD, 282 + OMAP3_PRM_IRQENABLE_MPU_OFFSET); 283 + } 284 + 285 + static int __init omap3xxx_prcm_init(void) 286 + { 287 + if (cpu_is_omap34xx()) 288 + return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); 289 + return 0; 290 + } 291 + subsys_initcall(omap3xxx_prcm_init);
+8 -1
arch/arm/mach-omap2/prm2xxx_3xxx.h
··· 1 1 /* 2 2 * OMAP2/3 Power/Reset Management (PRM) register definitions 3 3 * 4 - * Copyright (C) 2007-2009 Texas Instruments, Inc. 4 + * Copyright (C) 2007-2009, 2011 Texas Instruments, Inc. 5 5 * Copyright (C) 2008-2010 Nokia Corporation 6 6 * Paul Walmsley 7 7 * ··· 314 314 extern u32 omap3_prm_vcvp_read(u8 offset); 315 315 extern void omap3_prm_vcvp_write(u32 val, u8 offset); 316 316 extern u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset); 317 + 318 + /* PRM interrupt-related functions */ 319 + extern void omap3xxx_prm_read_pending_irqs(unsigned long *events); 320 + extern void omap3xxx_prm_ocp_barrier(void); 321 + extern void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask); 322 + extern void omap3xxx_prm_restore_irqen(u32 *saved_mask); 323 + 317 324 #endif /* CONFIG_ARCH_OMAP4 */ 318 325 319 326 #endif
+116
arch/arm/mach-omap2/prm44xx.c
··· 27 27 #include "prcm44xx.h" 28 28 #include "prminst44xx.h" 29 29 30 + static const struct omap_prcm_irq omap4_prcm_irqs[] = { 31 + OMAP_PRCM_IRQ("wkup", 0, 0), 32 + OMAP_PRCM_IRQ("io", 9, 1), 33 + }; 34 + 35 + static struct omap_prcm_irq_setup omap4_prcm_irq_setup = { 36 + .ack = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, 37 + .mask = OMAP4_PRM_IRQENABLE_MPU_OFFSET, 38 + .nr_regs = 2, 39 + .irqs = omap4_prcm_irqs, 40 + .nr_irqs = ARRAY_SIZE(omap4_prcm_irqs), 41 + .irq = OMAP44XX_IRQ_PRCM, 42 + .read_pending_irqs = &omap44xx_prm_read_pending_irqs, 43 + .ocp_barrier = &omap44xx_prm_ocp_barrier, 44 + .save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen, 45 + .restore_irqen = &omap44xx_prm_restore_irqen, 46 + }; 47 + 30 48 /* PRM low-level functions */ 31 49 32 50 /* Read a register in a CM/PRM instance in the PRM module */ ··· 139 121 OMAP4430_PRM_DEVICE_INST, 140 122 offset); 141 123 } 124 + 125 + static inline u32 _read_pending_irq_reg(u16 irqen_offs, u16 irqst_offs) 126 + { 127 + u32 mask, st; 128 + 129 + /* XXX read mask from RAM? */ 130 + mask = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqen_offs); 131 + st = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqst_offs); 132 + 133 + return mask & st; 134 + } 135 + 136 + /** 137 + * omap44xx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events 138 + * @events: ptr to two consecutive u32s, preallocated by caller 139 + * 140 + * Read PRM_IRQSTATUS_MPU* bits, AND'ed with the currently-enabled PRM 141 + * MPU IRQs, and store the result into the two u32s pointed to by @events. 142 + * No return value. 143 + */ 144 + void omap44xx_prm_read_pending_irqs(unsigned long *events) 145 + { 146 + events[0] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_OFFSET, 147 + OMAP4_PRM_IRQSTATUS_MPU_OFFSET); 148 + 149 + events[1] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_2_OFFSET, 150 + OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); 151 + } 152 + 153 + /** 154 + * omap44xx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete 155 + * 156 + * Force any buffered writes to the PRM IP block to complete. Needed 157 + * by the PRM IRQ handler, which reads and writes directly to the IP 158 + * block, to avoid race conditions after acknowledging or clearing IRQ 159 + * bits. No return value. 160 + */ 161 + void omap44xx_prm_ocp_barrier(void) 162 + { 163 + omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, 164 + OMAP4_REVISION_PRM_OFFSET); 165 + } 166 + 167 + /** 168 + * omap44xx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU* regs 169 + * @saved_mask: ptr to a u32 array to save IRQENABLE bits 170 + * 171 + * Save the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers to 172 + * @saved_mask. @saved_mask must be allocated by the caller. 173 + * Intended to be used in the PRM interrupt handler suspend callback. 174 + * The OCP barrier is needed to ensure the write to disable PRM 175 + * interrupts reaches the PRM before returning; otherwise, spurious 176 + * interrupts might occur. No return value. 177 + */ 178 + void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) 179 + { 180 + saved_mask[0] = 181 + omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, 182 + OMAP4_PRM_IRQSTATUS_MPU_OFFSET); 183 + saved_mask[1] = 184 + omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, 185 + OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); 186 + 187 + omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST, 188 + OMAP4_PRM_IRQENABLE_MPU_OFFSET); 189 + omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST, 190 + OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); 191 + 192 + /* OCP barrier */ 193 + omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, 194 + OMAP4_REVISION_PRM_OFFSET); 195 + } 196 + 197 + /** 198 + * omap44xx_prm_restore_irqen - set PRM_IRQENABLE_MPU* registers from args 199 + * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously 200 + * 201 + * Restore the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers from 202 + * @saved_mask. Intended to be used in the PRM interrupt handler resume 203 + * callback to restore values saved by omap44xx_prm_save_and_clear_irqen(). 204 + * No OCP barrier should be needed here; any pending PRM interrupts will fire 205 + * once the writes reach the PRM. No return value. 206 + */ 207 + void omap44xx_prm_restore_irqen(u32 *saved_mask) 208 + { 209 + omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_DEVICE_INST, 210 + OMAP4_PRM_IRQENABLE_MPU_OFFSET); 211 + omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_DEVICE_INST, 212 + OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); 213 + } 214 + 215 + static int __init omap4xxx_prcm_init(void) 216 + { 217 + if (cpu_is_omap44xx()) 218 + return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup); 219 + return 0; 220 + } 221 + subsys_initcall(omap4xxx_prcm_init);
+7 -1
arch/arm/mach-omap2/prm44xx.h
··· 1 1 /* 2 2 * OMAP44xx PRM instance offset macros 3 3 * 4 - * Copyright (C) 2009-2010 Texas Instruments, Inc. 4 + * Copyright (C) 2009-2011 Texas Instruments, Inc. 5 5 * Copyright (C) 2009-2010 Nokia Corporation 6 6 * 7 7 * Paul Walmsley (paul@pwsan.com) ··· 762 762 extern u32 omap4_prm_vcvp_read(u8 offset); 763 763 extern void omap4_prm_vcvp_write(u32 val, u8 offset); 764 764 extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset); 765 + 766 + /* PRM interrupt-related functions */ 767 + extern void omap44xx_prm_read_pending_irqs(unsigned long *events); 768 + extern void omap44xx_prm_ocp_barrier(void); 769 + extern void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask); 770 + extern void omap44xx_prm_restore_irqen(u32 *saved_mask); 765 771 766 772 # endif 767 773
+320
arch/arm/mach-omap2/prm_common.c
··· 1 + /* 2 + * OMAP2+ common Power & Reset Management (PRM) IP block functions 3 + * 4 + * Copyright (C) 2011 Texas Instruments, Inc. 5 + * Tero Kristo <t-kristo@ti.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * 12 + * For historical purposes, the API used to configure the PRM 13 + * interrupt handler refers to it as the "PRCM interrupt." The 14 + * underlying registers are located in the PRM on OMAP3/4. 15 + * 16 + * XXX This code should eventually be moved to a PRM driver. 17 + */ 18 + 19 + #include <linux/kernel.h> 20 + #include <linux/module.h> 21 + #include <linux/init.h> 22 + #include <linux/io.h> 23 + #include <linux/irq.h> 24 + #include <linux/interrupt.h> 25 + #include <linux/slab.h> 26 + 27 + #include <mach/system.h> 28 + #include <plat/common.h> 29 + #include <plat/prcm.h> 30 + #include <plat/irqs.h> 31 + 32 + #include "prm2xxx_3xxx.h" 33 + #include "prm44xx.h" 34 + 35 + /* 36 + * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs 37 + * XXX this is technically not needed, since 38 + * omap_prcm_register_chain_handler() could allocate this based on the 39 + * actual amount of memory needed for the SoC 40 + */ 41 + #define OMAP_PRCM_MAX_NR_PENDING_REG 2 42 + 43 + /* 44 + * prcm_irq_chips: an array of all of the "generic IRQ chips" in use 45 + * by the PRCM interrupt handler code. There will be one 'chip' per 46 + * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have 47 + * one "chip" and OMAP4 will have two.) 48 + */ 49 + static struct irq_chip_generic **prcm_irq_chips; 50 + 51 + /* 52 + * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code 53 + * is currently running on. Defined and passed by initialization code 54 + * that calls omap_prcm_register_chain_handler(). 55 + */ 56 + static struct omap_prcm_irq_setup *prcm_irq_setup; 57 + 58 + /* Private functions */ 59 + 60 + /* 61 + * Move priority events from events to priority_events array 62 + */ 63 + static void omap_prcm_events_filter_priority(unsigned long *events, 64 + unsigned long *priority_events) 65 + { 66 + int i; 67 + 68 + for (i = 0; i < prcm_irq_setup->nr_regs; i++) { 69 + priority_events[i] = 70 + events[i] & prcm_irq_setup->priority_mask[i]; 71 + events[i] ^= priority_events[i]; 72 + } 73 + } 74 + 75 + /* 76 + * PRCM Interrupt Handler 77 + * 78 + * This is a common handler for the OMAP PRCM interrupts. Pending 79 + * interrupts are detected by a call to prcm_pending_events and 80 + * dispatched accordingly. Clearing of the wakeup events should be 81 + * done by the SoC specific individual handlers. 82 + */ 83 + static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc) 84 + { 85 + unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; 86 + unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; 87 + struct irq_chip *chip = irq_desc_get_chip(desc); 88 + unsigned int virtirq; 89 + int nr_irqs = prcm_irq_setup->nr_regs * 32; 90 + 91 + /* 92 + * If we are suspended, mask all interrupts from PRCM level, 93 + * this does not ack them, and they will be pending until we 94 + * re-enable the interrupts, at which point the 95 + * omap_prcm_irq_handler will be executed again. The 96 + * _save_and_clear_irqen() function must ensure that the PRM 97 + * write to disable all IRQs has reached the PRM before 98 + * returning, or spurious PRCM interrupts may occur during 99 + * suspend. 100 + */ 101 + if (prcm_irq_setup->suspended) { 102 + prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask); 103 + prcm_irq_setup->suspend_save_flag = true; 104 + } 105 + 106 + /* 107 + * Loop until all pending irqs are handled, since 108 + * generic_handle_irq() can cause new irqs to come 109 + */ 110 + while (!prcm_irq_setup->suspended) { 111 + prcm_irq_setup->read_pending_irqs(pending); 112 + 113 + /* No bit set, then all IRQs are handled */ 114 + if (find_first_bit(pending, nr_irqs) >= nr_irqs) 115 + break; 116 + 117 + omap_prcm_events_filter_priority(pending, priority_pending); 118 + 119 + /* 120 + * Loop on all currently pending irqs so that new irqs 121 + * cannot starve previously pending irqs 122 + */ 123 + 124 + /* Serve priority events first */ 125 + for_each_set_bit(virtirq, priority_pending, nr_irqs) 126 + generic_handle_irq(prcm_irq_setup->base_irq + virtirq); 127 + 128 + /* Serve normal events next */ 129 + for_each_set_bit(virtirq, pending, nr_irqs) 130 + generic_handle_irq(prcm_irq_setup->base_irq + virtirq); 131 + } 132 + if (chip->irq_ack) 133 + chip->irq_ack(&desc->irq_data); 134 + if (chip->irq_eoi) 135 + chip->irq_eoi(&desc->irq_data); 136 + chip->irq_unmask(&desc->irq_data); 137 + 138 + prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */ 139 + } 140 + 141 + /* Public functions */ 142 + 143 + /** 144 + * omap_prcm_event_to_irq - given a PRCM event name, returns the 145 + * corresponding IRQ on which the handler should be registered 146 + * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq 147 + * 148 + * Returns the Linux internal IRQ ID corresponding to @name upon success, 149 + * or -ENOENT upon failure. 150 + */ 151 + int omap_prcm_event_to_irq(const char *name) 152 + { 153 + int i; 154 + 155 + if (!prcm_irq_setup || !name) 156 + return -ENOENT; 157 + 158 + for (i = 0; i < prcm_irq_setup->nr_irqs; i++) 159 + if (!strcmp(prcm_irq_setup->irqs[i].name, name)) 160 + return prcm_irq_setup->base_irq + 161 + prcm_irq_setup->irqs[i].offset; 162 + 163 + return -ENOENT; 164 + } 165 + 166 + /** 167 + * omap_prcm_irq_cleanup - reverses memory allocated and other steps 168 + * done by omap_prcm_register_chain_handler() 169 + * 170 + * No return value. 171 + */ 172 + void omap_prcm_irq_cleanup(void) 173 + { 174 + int i; 175 + 176 + if (!prcm_irq_setup) { 177 + pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n"); 178 + return; 179 + } 180 + 181 + if (prcm_irq_chips) { 182 + for (i = 0; i < prcm_irq_setup->nr_regs; i++) { 183 + if (prcm_irq_chips[i]) 184 + irq_remove_generic_chip(prcm_irq_chips[i], 185 + 0xffffffff, 0, 0); 186 + prcm_irq_chips[i] = NULL; 187 + } 188 + kfree(prcm_irq_chips); 189 + prcm_irq_chips = NULL; 190 + } 191 + 192 + kfree(prcm_irq_setup->saved_mask); 193 + prcm_irq_setup->saved_mask = NULL; 194 + 195 + kfree(prcm_irq_setup->priority_mask); 196 + prcm_irq_setup->priority_mask = NULL; 197 + 198 + irq_set_chained_handler(prcm_irq_setup->irq, NULL); 199 + 200 + if (prcm_irq_setup->base_irq > 0) 201 + irq_free_descs(prcm_irq_setup->base_irq, 202 + prcm_irq_setup->nr_regs * 32); 203 + prcm_irq_setup->base_irq = 0; 204 + } 205 + 206 + void omap_prcm_irq_prepare(void) 207 + { 208 + prcm_irq_setup->suspended = true; 209 + } 210 + 211 + void omap_prcm_irq_complete(void) 212 + { 213 + prcm_irq_setup->suspended = false; 214 + 215 + /* If we have not saved the masks, do not attempt to restore */ 216 + if (!prcm_irq_setup->suspend_save_flag) 217 + return; 218 + 219 + prcm_irq_setup->suspend_save_flag = false; 220 + 221 + /* 222 + * Re-enable all masked PRCM irq sources, this causes the PRCM 223 + * interrupt to fire immediately if the events were masked 224 + * previously in the chain handler 225 + */ 226 + prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask); 227 + } 228 + 229 + /** 230 + * omap_prcm_register_chain_handler - initializes the prcm chained interrupt 231 + * handler based on provided parameters 232 + * @irq_setup: hardware data about the underlying PRM/PRCM 233 + * 234 + * Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up 235 + * one generic IRQ chip per PRM interrupt status/enable register pair. 236 + * Returns 0 upon success, -EINVAL if called twice or if invalid 237 + * arguments are passed, or -ENOMEM on any other error. 238 + */ 239 + int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup) 240 + { 241 + int nr_regs = irq_setup->nr_regs; 242 + u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG]; 243 + int offset, i; 244 + struct irq_chip_generic *gc; 245 + struct irq_chip_type *ct; 246 + 247 + if (!irq_setup) 248 + return -EINVAL; 249 + 250 + if (prcm_irq_setup) { 251 + pr_err("PRCM: already initialized; won't reinitialize\n"); 252 + return -EINVAL; 253 + } 254 + 255 + if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) { 256 + pr_err("PRCM: nr_regs too large\n"); 257 + return -EINVAL; 258 + } 259 + 260 + prcm_irq_setup = irq_setup; 261 + 262 + prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL); 263 + prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL); 264 + prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs, 265 + GFP_KERNEL); 266 + 267 + if (!prcm_irq_chips || !prcm_irq_setup->saved_mask || 268 + !prcm_irq_setup->priority_mask) { 269 + pr_err("PRCM: kzalloc failed\n"); 270 + goto err; 271 + } 272 + 273 + memset(mask, 0, sizeof(mask)); 274 + 275 + for (i = 0; i < irq_setup->nr_irqs; i++) { 276 + offset = irq_setup->irqs[i].offset; 277 + mask[offset >> 5] |= 1 << (offset & 0x1f); 278 + if (irq_setup->irqs[i].priority) 279 + irq_setup->priority_mask[offset >> 5] |= 280 + 1 << (offset & 0x1f); 281 + } 282 + 283 + irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler); 284 + 285 + irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32, 286 + 0); 287 + 288 + if (irq_setup->base_irq < 0) { 289 + pr_err("PRCM: failed to allocate irq descs: %d\n", 290 + irq_setup->base_irq); 291 + goto err; 292 + } 293 + 294 + for (i = 0; i <= irq_setup->nr_regs; i++) { 295 + gc = irq_alloc_generic_chip("PRCM", 1, 296 + irq_setup->base_irq + i * 32, prm_base, 297 + handle_level_irq); 298 + 299 + if (!gc) { 300 + pr_err("PRCM: failed to allocate generic chip\n"); 301 + goto err; 302 + } 303 + ct = gc->chip_types; 304 + ct->chip.irq_ack = irq_gc_ack_set_bit; 305 + ct->chip.irq_mask = irq_gc_mask_clr_bit; 306 + ct->chip.irq_unmask = irq_gc_mask_set_bit; 307 + 308 + ct->regs.ack = irq_setup->ack + i * 4; 309 + ct->regs.mask = irq_setup->mask + i * 4; 310 + 311 + irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0); 312 + prcm_irq_chips[i] = gc; 313 + } 314 + 315 + return 0; 316 + 317 + err: 318 + omap_prcm_irq_cleanup(); 319 + return -ENOMEM; 320 + }
+247 -670
arch/arm/mach-omap2/serial.c
··· 19 19 */ 20 20 #include <linux/kernel.h> 21 21 #include <linux/init.h> 22 - #include <linux/serial_reg.h> 23 22 #include <linux/clk.h> 24 23 #include <linux/io.h> 25 24 #include <linux/delay.h> 26 25 #include <linux/platform_device.h> 27 26 #include <linux/slab.h> 28 - #include <linux/serial_8250.h> 29 27 #include <linux/pm_runtime.h> 30 28 #include <linux/console.h> 31 29 32 - #ifdef CONFIG_SERIAL_OMAP 33 30 #include <plat/omap-serial.h> 34 - #endif 35 - 36 31 #include "common.h" 37 32 #include <plat/board.h> 38 - #include <plat/clock.h> 39 33 #include <plat/dma.h> 40 34 #include <plat/omap_hwmod.h> 41 35 #include <plat/omap_device.h> 36 + #include <plat/omap-pm.h> 42 37 43 38 #include "prm2xxx_3xxx.h" 44 39 #include "pm.h" ··· 42 47 #include "control.h" 43 48 #include "mux.h" 44 49 45 - #define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52 46 - #define UART_OMAP_WER 0x17 /* Wake-up enable register */ 47 - 48 - #define UART_ERRATA_FIFO_FULL_ABORT (0x1 << 0) 49 - #define UART_ERRATA_i202_MDR1_ACCESS (0x1 << 1) 50 - 51 50 /* 52 - * NOTE: By default the serial timeout is disabled as it causes lost characters 53 - * over the serial ports. This means that the UART clocks will stay on until 54 - * disabled via sysfs. This also causes that any deeper omap sleep states are 55 - * blocked. 51 + * NOTE: By default the serial auto_suspend timeout is disabled as it causes 52 + * lost characters over the serial ports. This means that the UART clocks will 53 + * stay on until power/autosuspend_delay is set for the uart from sysfs. 54 + * This also causes that any deeper omap sleep states are blocked. 56 55 */ 57 - #define DEFAULT_TIMEOUT 0 56 + #define DEFAULT_AUTOSUSPEND_DELAY -1 58 57 59 58 #define MAX_UART_HWMOD_NAME_LEN 16 60 59 61 60 struct omap_uart_state { 62 61 int num; 63 62 int can_sleep; 64 - struct timer_list timer; 65 - u32 timeout; 66 - 67 - void __iomem *wk_st; 68 - void __iomem *wk_en; 69 - u32 wk_mask; 70 - u32 padconf; 71 - u32 dma_enabled; 72 - 73 - struct clk *ick; 74 - struct clk *fck; 75 - int clocked; 76 - 77 - int irq; 78 - int regshift; 79 - int irqflags; 80 - void __iomem *membase; 81 - resource_size_t mapbase; 82 63 83 64 struct list_head node; 84 65 struct omap_hwmod *oh; 85 66 struct platform_device *pdev; 86 - 87 - u32 errata; 88 - #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) 89 - int context_valid; 90 - 91 - /* Registers to be saved/restored for OFF-mode */ 92 - u16 dll; 93 - u16 dlh; 94 - u16 ier; 95 - u16 sysc; 96 - u16 scr; 97 - u16 wer; 98 - u16 mcr; 99 - #endif 100 67 }; 101 68 102 69 static LIST_HEAD(uart_list); 103 70 static u8 num_uarts; 71 + static u8 console_uart_id = -1; 72 + static u8 no_console_suspend; 73 + static u8 uart_debug; 104 74 105 - static inline unsigned int __serial_read_reg(struct uart_port *up, 106 - int offset) 107 - { 108 - offset <<= up->regshift; 109 - return (unsigned int)__raw_readb(up->membase + offset); 110 - } 75 + #define DEFAULT_RXDMA_POLLRATE 1 /* RX DMA polling rate (us) */ 76 + #define DEFAULT_RXDMA_BUFSIZE 4096 /* RX DMA buffer size */ 77 + #define DEFAULT_RXDMA_TIMEOUT (3 * HZ)/* RX DMA timeout (jiffies) */ 111 78 112 - static inline unsigned int serial_read_reg(struct omap_uart_state *uart, 113 - int offset) 114 - { 115 - offset <<= uart->regshift; 116 - return (unsigned int)__raw_readb(uart->membase + offset); 117 - } 118 - 119 - static inline void __serial_write_reg(struct uart_port *up, int offset, 120 - int value) 121 - { 122 - offset <<= up->regshift; 123 - __raw_writeb(value, up->membase + offset); 124 - } 125 - 126 - static inline void serial_write_reg(struct omap_uart_state *uart, int offset, 127 - int value) 128 - { 129 - offset <<= uart->regshift; 130 - __raw_writeb(value, uart->membase + offset); 131 - } 132 - 133 - /* 134 - * Internal UARTs need to be initialized for the 8250 autoconfig to work 135 - * properly. Note that the TX watermark initialization may not be needed 136 - * once the 8250.c watermark handling code is merged. 137 - */ 138 - 139 - static inline void __init omap_uart_reset(struct omap_uart_state *uart) 140 - { 141 - serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE); 142 - serial_write_reg(uart, UART_OMAP_SCR, 0x08); 143 - serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE); 144 - } 145 - 146 - #if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3) 147 - 148 - /* 149 - * Work Around for Errata i202 (3430 - 1.12, 3630 - 1.6) 150 - * The access to uart register after MDR1 Access 151 - * causes UART to corrupt data. 152 - * 153 - * Need a delay = 154 - * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS) 155 - * give 10 times as much 156 - */ 157 - static void omap_uart_mdr1_errataset(struct omap_uart_state *uart, u8 mdr1_val, 158 - u8 fcr_val) 159 - { 160 - u8 timeout = 255; 161 - 162 - serial_write_reg(uart, UART_OMAP_MDR1, mdr1_val); 163 - udelay(2); 164 - serial_write_reg(uart, UART_FCR, fcr_val | UART_FCR_CLEAR_XMIT | 165 - UART_FCR_CLEAR_RCVR); 166 - /* 167 - * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and 168 - * TX_FIFO_E bit is 1. 169 - */ 170 - while (UART_LSR_THRE != (serial_read_reg(uart, UART_LSR) & 171 - (UART_LSR_THRE | UART_LSR_DR))) { 172 - timeout--; 173 - if (!timeout) { 174 - /* Should *never* happen. we warn and carry on */ 175 - dev_crit(&uart->pdev->dev, "Errata i202: timedout %x\n", 176 - serial_read_reg(uart, UART_LSR)); 177 - break; 178 - } 179 - udelay(1); 180 - } 181 - } 182 - 183 - static void omap_uart_save_context(struct omap_uart_state *uart) 184 - { 185 - u16 lcr = 0; 186 - 187 - if (!enable_off_mode) 188 - return; 189 - 190 - lcr = serial_read_reg(uart, UART_LCR); 191 - serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B); 192 - uart->dll = serial_read_reg(uart, UART_DLL); 193 - uart->dlh = serial_read_reg(uart, UART_DLM); 194 - serial_write_reg(uart, UART_LCR, lcr); 195 - uart->ier = serial_read_reg(uart, UART_IER); 196 - uart->sysc = serial_read_reg(uart, UART_OMAP_SYSC); 197 - uart->scr = serial_read_reg(uart, UART_OMAP_SCR); 198 - uart->wer = serial_read_reg(uart, UART_OMAP_WER); 199 - serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A); 200 - uart->mcr = serial_read_reg(uart, UART_MCR); 201 - serial_write_reg(uart, UART_LCR, lcr); 202 - 203 - uart->context_valid = 1; 204 - } 205 - 206 - static void omap_uart_restore_context(struct omap_uart_state *uart) 207 - { 208 - u16 efr = 0; 209 - 210 - if (!enable_off_mode) 211 - return; 212 - 213 - if (!uart->context_valid) 214 - return; 215 - 216 - uart->context_valid = 0; 217 - 218 - if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS) 219 - omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_DISABLE, 0xA0); 220 - else 221 - serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE); 222 - 223 - serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B); 224 - efr = serial_read_reg(uart, UART_EFR); 225 - serial_write_reg(uart, UART_EFR, UART_EFR_ECB); 226 - serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */ 227 - serial_write_reg(uart, UART_IER, 0x0); 228 - serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B); 229 - serial_write_reg(uart, UART_DLL, uart->dll); 230 - serial_write_reg(uart, UART_DLM, uart->dlh); 231 - serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */ 232 - serial_write_reg(uart, UART_IER, uart->ier); 233 - serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A); 234 - serial_write_reg(uart, UART_MCR, uart->mcr); 235 - serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B); 236 - serial_write_reg(uart, UART_EFR, efr); 237 - serial_write_reg(uart, UART_LCR, UART_LCR_WLEN8); 238 - serial_write_reg(uart, UART_OMAP_SCR, uart->scr); 239 - serial_write_reg(uart, UART_OMAP_WER, uart->wer); 240 - serial_write_reg(uart, UART_OMAP_SYSC, uart->sysc); 241 - 242 - if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS) 243 - omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_16X_MODE, 0xA1); 244 - else 245 - /* UART 16x mode */ 246 - serial_write_reg(uart, UART_OMAP_MDR1, 247 - UART_OMAP_MDR1_16X_MODE); 248 - } 249 - #else 250 - static inline void omap_uart_save_context(struct omap_uart_state *uart) {} 251 - static inline void omap_uart_restore_context(struct omap_uart_state *uart) {} 252 - #endif /* CONFIG_PM && CONFIG_ARCH_OMAP3 */ 253 - 254 - static inline void omap_uart_enable_clocks(struct omap_uart_state *uart) 255 - { 256 - if (uart->clocked) 257 - return; 258 - 259 - omap_device_enable(uart->pdev); 260 - uart->clocked = 1; 261 - omap_uart_restore_context(uart); 262 - } 79 + static struct omap_uart_port_info omap_serial_default_info[] __initdata = { 80 + { 81 + .dma_enabled = false, 82 + .dma_rx_buf_size = DEFAULT_RXDMA_BUFSIZE, 83 + .dma_rx_poll_rate = DEFAULT_RXDMA_POLLRATE, 84 + .dma_rx_timeout = DEFAULT_RXDMA_TIMEOUT, 85 + .autosuspend_timeout = DEFAULT_AUTOSUSPEND_DELAY, 86 + }, 87 + }; 263 88 264 89 #ifdef CONFIG_PM 265 - 266 - static inline void omap_uart_disable_clocks(struct omap_uart_state *uart) 90 + static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable) 267 91 { 268 - if (!uart->clocked) 92 + struct omap_device *od = to_omap_device(pdev); 93 + 94 + if (!od) 269 95 return; 270 96 271 - omap_uart_save_context(uart); 272 - uart->clocked = 0; 273 - omap_device_idle(uart->pdev); 274 - } 275 - 276 - static void omap_uart_enable_wakeup(struct omap_uart_state *uart) 277 - { 278 - /* Set wake-enable bit */ 279 - if (uart->wk_en && uart->wk_mask) { 280 - u32 v = __raw_readl(uart->wk_en); 281 - v |= uart->wk_mask; 282 - __raw_writel(v, uart->wk_en); 283 - } 284 - 285 - /* Ensure IOPAD wake-enables are set */ 286 - if (cpu_is_omap34xx() && uart->padconf) { 287 - u16 v = omap_ctrl_readw(uart->padconf); 288 - v |= OMAP3_PADCONF_WAKEUPENABLE0; 289 - omap_ctrl_writew(v, uart->padconf); 290 - } 291 - } 292 - 293 - static void omap_uart_disable_wakeup(struct omap_uart_state *uart) 294 - { 295 - /* Clear wake-enable bit */ 296 - if (uart->wk_en && uart->wk_mask) { 297 - u32 v = __raw_readl(uart->wk_en); 298 - v &= ~uart->wk_mask; 299 - __raw_writel(v, uart->wk_en); 300 - } 301 - 302 - /* Ensure IOPAD wake-enables are cleared */ 303 - if (cpu_is_omap34xx() && uart->padconf) { 304 - u16 v = omap_ctrl_readw(uart->padconf); 305 - v &= ~OMAP3_PADCONF_WAKEUPENABLE0; 306 - omap_ctrl_writew(v, uart->padconf); 307 - } 308 - } 309 - 310 - static void omap_uart_smart_idle_enable(struct omap_uart_state *uart, 311 - int enable) 312 - { 313 - u8 idlemode; 314 - 315 - if (enable) { 316 - /** 317 - * Errata 2.15: [UART]:Cannot Acknowledge Idle Requests 318 - * in Smartidle Mode When Configured for DMA Operations. 319 - */ 320 - if (uart->dma_enabled) 321 - idlemode = HWMOD_IDLEMODE_FORCE; 322 - else 323 - idlemode = HWMOD_IDLEMODE_SMART; 324 - } else { 325 - idlemode = HWMOD_IDLEMODE_NO; 326 - } 327 - 328 - omap_hwmod_set_slave_idlemode(uart->oh, idlemode); 329 - } 330 - 331 - static void omap_uart_block_sleep(struct omap_uart_state *uart) 332 - { 333 - omap_uart_enable_clocks(uart); 334 - 335 - omap_uart_smart_idle_enable(uart, 0); 336 - uart->can_sleep = 0; 337 - if (uart->timeout) 338 - mod_timer(&uart->timer, jiffies + uart->timeout); 97 + if (enable) 98 + omap_hwmod_enable_wakeup(od->hwmods[0]); 339 99 else 340 - del_timer(&uart->timer); 100 + omap_hwmod_disable_wakeup(od->hwmods[0]); 341 101 } 342 102 343 - static void omap_uart_allow_sleep(struct omap_uart_state *uart) 103 + /* 104 + * Errata i291: [UART]:Cannot Acknowledge Idle Requests 105 + * in Smartidle Mode When Configured for DMA Operations. 106 + * WA: configure uart in force idle mode. 107 + */ 108 + static void omap_uart_set_noidle(struct platform_device *pdev) 344 109 { 345 - if (device_may_wakeup(&uart->pdev->dev)) 346 - omap_uart_enable_wakeup(uart); 347 - else 348 - omap_uart_disable_wakeup(uart); 110 + struct omap_device *od = to_omap_device(pdev); 349 111 350 - if (!uart->clocked) 351 - return; 352 - 353 - omap_uart_smart_idle_enable(uart, 1); 354 - uart->can_sleep = 1; 355 - del_timer(&uart->timer); 112 + omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO); 356 113 } 357 114 358 - static void omap_uart_idle_timer(unsigned long data) 115 + static void omap_uart_set_forceidle(struct platform_device *pdev) 359 116 { 360 - struct omap_uart_state *uart = (struct omap_uart_state *)data; 117 + struct omap_device *od = to_omap_device(pdev); 361 118 362 - omap_uart_allow_sleep(uart); 119 + omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_FORCE); 363 120 } 364 121 365 - void omap_uart_prepare_idle(int num) 366 - { 367 - struct omap_uart_state *uart; 368 - 369 - list_for_each_entry(uart, &uart_list, node) { 370 - if (num == uart->num && uart->can_sleep) { 371 - omap_uart_disable_clocks(uart); 372 - return; 373 - } 374 - } 375 - } 376 - 377 - void omap_uart_resume_idle(int num) 378 - { 379 - struct omap_uart_state *uart; 380 - 381 - list_for_each_entry(uart, &uart_list, node) { 382 - if (num == uart->num && uart->can_sleep) { 383 - omap_uart_enable_clocks(uart); 384 - 385 - /* Check for IO pad wakeup */ 386 - if (cpu_is_omap34xx() && uart->padconf) { 387 - u16 p = omap_ctrl_readw(uart->padconf); 388 - 389 - if (p & OMAP3_PADCONF_WAKEUPEVENT0) 390 - omap_uart_block_sleep(uart); 391 - } 392 - 393 - /* Check for normal UART wakeup */ 394 - if (__raw_readl(uart->wk_st) & uart->wk_mask) 395 - omap_uart_block_sleep(uart); 396 - return; 397 - } 398 - } 399 - } 400 - 401 - void omap_uart_prepare_suspend(void) 402 - { 403 - struct omap_uart_state *uart; 404 - 405 - list_for_each_entry(uart, &uart_list, node) { 406 - omap_uart_allow_sleep(uart); 407 - } 408 - } 409 - 410 - int omap_uart_can_sleep(void) 411 - { 412 - struct omap_uart_state *uart; 413 - int can_sleep = 1; 414 - 415 - list_for_each_entry(uart, &uart_list, node) { 416 - if (!uart->clocked) 417 - continue; 418 - 419 - if (!uart->can_sleep) { 420 - can_sleep = 0; 421 - continue; 422 - } 423 - 424 - /* This UART can now safely sleep. */ 425 - omap_uart_allow_sleep(uart); 426 - } 427 - 428 - return can_sleep; 429 - } 430 - 431 - /** 432 - * omap_uart_interrupt() 433 - * 434 - * This handler is used only to detect that *any* UART interrupt has 435 - * occurred. It does _nothing_ to handle the interrupt. Rather, 436 - * any UART interrupt will trigger the inactivity timer so the 437 - * UART will not idle or sleep for its timeout period. 438 - * 439 - **/ 440 - /* static int first_interrupt; */ 441 - static irqreturn_t omap_uart_interrupt(int irq, void *dev_id) 442 - { 443 - struct omap_uart_state *uart = dev_id; 444 - 445 - omap_uart_block_sleep(uart); 446 - 447 - return IRQ_NONE; 448 - } 449 - 450 - static void omap_uart_idle_init(struct omap_uart_state *uart) 451 - { 452 - int ret; 453 - 454 - uart->can_sleep = 0; 455 - uart->timeout = DEFAULT_TIMEOUT; 456 - setup_timer(&uart->timer, omap_uart_idle_timer, 457 - (unsigned long) uart); 458 - if (uart->timeout) 459 - mod_timer(&uart->timer, jiffies + uart->timeout); 460 - omap_uart_smart_idle_enable(uart, 0); 461 - 462 - if (cpu_is_omap34xx() && !(cpu_is_ti81xx() || cpu_is_am33xx())) { 463 - u32 mod = (uart->num > 1) ? OMAP3430_PER_MOD : CORE_MOD; 464 - u32 wk_mask = 0; 465 - u32 padconf = 0; 466 - 467 - /* XXX These PRM accesses do not belong here */ 468 - uart->wk_en = OMAP34XX_PRM_REGADDR(mod, PM_WKEN1); 469 - uart->wk_st = OMAP34XX_PRM_REGADDR(mod, PM_WKST1); 470 - switch (uart->num) { 471 - case 0: 472 - wk_mask = OMAP3430_ST_UART1_MASK; 473 - padconf = 0x182; 474 - break; 475 - case 1: 476 - wk_mask = OMAP3430_ST_UART2_MASK; 477 - padconf = 0x17a; 478 - break; 479 - case 2: 480 - wk_mask = OMAP3430_ST_UART3_MASK; 481 - padconf = 0x19e; 482 - break; 483 - case 3: 484 - wk_mask = OMAP3630_ST_UART4_MASK; 485 - padconf = 0x0d2; 486 - break; 487 - } 488 - uart->wk_mask = wk_mask; 489 - uart->padconf = padconf; 490 - } else if (cpu_is_omap24xx()) { 491 - u32 wk_mask = 0; 492 - u32 wk_en = PM_WKEN1, wk_st = PM_WKST1; 493 - 494 - switch (uart->num) { 495 - case 0: 496 - wk_mask = OMAP24XX_ST_UART1_MASK; 497 - break; 498 - case 1: 499 - wk_mask = OMAP24XX_ST_UART2_MASK; 500 - break; 501 - case 2: 502 - wk_en = OMAP24XX_PM_WKEN2; 503 - wk_st = OMAP24XX_PM_WKST2; 504 - wk_mask = OMAP24XX_ST_UART3_MASK; 505 - break; 506 - } 507 - uart->wk_mask = wk_mask; 508 - if (cpu_is_omap2430()) { 509 - uart->wk_en = OMAP2430_PRM_REGADDR(CORE_MOD, wk_en); 510 - uart->wk_st = OMAP2430_PRM_REGADDR(CORE_MOD, wk_st); 511 - } else if (cpu_is_omap2420()) { 512 - uart->wk_en = OMAP2420_PRM_REGADDR(CORE_MOD, wk_en); 513 - uart->wk_st = OMAP2420_PRM_REGADDR(CORE_MOD, wk_st); 514 - } 515 - } else { 516 - uart->wk_en = NULL; 517 - uart->wk_st = NULL; 518 - uart->wk_mask = 0; 519 - uart->padconf = 0; 520 - } 521 - 522 - uart->irqflags |= IRQF_SHARED; 523 - ret = request_threaded_irq(uart->irq, NULL, omap_uart_interrupt, 524 - IRQF_SHARED, "serial idle", (void *)uart); 525 - WARN_ON(ret); 526 - } 527 - 528 - void omap_uart_enable_irqs(int enable) 529 - { 530 - int ret; 531 - struct omap_uart_state *uart; 532 - 533 - list_for_each_entry(uart, &uart_list, node) { 534 - if (enable) { 535 - pm_runtime_put_sync(&uart->pdev->dev); 536 - ret = request_threaded_irq(uart->irq, NULL, 537 - omap_uart_interrupt, 538 - IRQF_SHARED, 539 - "serial idle", 540 - (void *)uart); 541 - } else { 542 - pm_runtime_get_noresume(&uart->pdev->dev); 543 - free_irq(uart->irq, (void *)uart); 544 - } 545 - } 546 - } 547 - 548 - static ssize_t sleep_timeout_show(struct device *dev, 549 - struct device_attribute *attr, 550 - char *buf) 551 - { 552 - struct platform_device *pdev = to_platform_device(dev); 553 - struct omap_device *odev = to_omap_device(pdev); 554 - struct omap_uart_state *uart = odev->hwmods[0]->dev_attr; 555 - 556 - return sprintf(buf, "%u\n", uart->timeout / HZ); 557 - } 558 - 559 - static ssize_t sleep_timeout_store(struct device *dev, 560 - struct device_attribute *attr, 561 - const char *buf, size_t n) 562 - { 563 - struct platform_device *pdev = to_platform_device(dev); 564 - struct omap_device *odev = to_omap_device(pdev); 565 - struct omap_uart_state *uart = odev->hwmods[0]->dev_attr; 566 - unsigned int value; 567 - 568 - if (sscanf(buf, "%u", &value) != 1) { 569 - dev_err(dev, "sleep_timeout_store: Invalid value\n"); 570 - return -EINVAL; 571 - } 572 - 573 - uart->timeout = value * HZ; 574 - if (uart->timeout) 575 - mod_timer(&uart->timer, jiffies + uart->timeout); 576 - else 577 - /* A zero value means disable timeout feature */ 578 - omap_uart_block_sleep(uart); 579 - 580 - return n; 581 - } 582 - 583 - static DEVICE_ATTR(sleep_timeout, 0644, sleep_timeout_show, 584 - sleep_timeout_store); 585 - #define DEV_CREATE_FILE(dev, attr) WARN_ON(device_create_file(dev, attr)) 586 122 #else 587 - static inline void omap_uart_idle_init(struct omap_uart_state *uart) {} 588 - static void omap_uart_block_sleep(struct omap_uart_state *uart) 589 - { 590 - /* Needed to enable UART clocks when built without CONFIG_PM */ 591 - omap_uart_enable_clocks(uart); 592 - } 593 - #define DEV_CREATE_FILE(dev, attr) 123 + static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable) 124 + {} 125 + static void omap_uart_set_noidle(struct platform_device *pdev) {} 126 + static void omap_uart_set_forceidle(struct platform_device *pdev) {} 594 127 #endif /* CONFIG_PM */ 595 128 596 - #ifndef CONFIG_SERIAL_OMAP 597 - /* 598 - * Override the default 8250 read handler: mem_serial_in() 599 - * Empty RX fifo read causes an abort on omap3630 and omap4 600 - * This function makes sure that an empty rx fifo is not read on these silicons 601 - * (OMAP1/2/3430 are not affected) 602 - */ 603 - static unsigned int serial_in_override(struct uart_port *up, int offset) 129 + #ifdef CONFIG_OMAP_MUX 130 + static struct omap_device_pad default_uart1_pads[] __initdata = { 131 + { 132 + .name = "uart1_cts.uart1_cts", 133 + .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 134 + }, 135 + { 136 + .name = "uart1_rts.uart1_rts", 137 + .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 138 + }, 139 + { 140 + .name = "uart1_tx.uart1_tx", 141 + .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 142 + }, 143 + { 144 + .name = "uart1_rx.uart1_rx", 145 + .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 146 + .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 147 + .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 148 + }, 149 + }; 150 + 151 + static struct omap_device_pad default_uart2_pads[] __initdata = { 152 + { 153 + .name = "uart2_cts.uart2_cts", 154 + .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 155 + }, 156 + { 157 + .name = "uart2_rts.uart2_rts", 158 + .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 159 + }, 160 + { 161 + .name = "uart2_tx.uart2_tx", 162 + .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 163 + }, 164 + { 165 + .name = "uart2_rx.uart2_rx", 166 + .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 167 + .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 168 + .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 169 + }, 170 + }; 171 + 172 + static struct omap_device_pad default_uart3_pads[] __initdata = { 173 + { 174 + .name = "uart3_cts_rctx.uart3_cts_rctx", 175 + .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, 176 + }, 177 + { 178 + .name = "uart3_rts_sd.uart3_rts_sd", 179 + .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 180 + }, 181 + { 182 + .name = "uart3_tx_irtx.uart3_tx_irtx", 183 + .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 184 + }, 185 + { 186 + .name = "uart3_rx_irrx.uart3_rx_irrx", 187 + .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 188 + .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0, 189 + .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0, 190 + }, 191 + }; 192 + 193 + static struct omap_device_pad default_omap36xx_uart4_pads[] __initdata = { 194 + { 195 + .name = "gpmc_wait2.uart4_tx", 196 + .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 197 + }, 198 + { 199 + .name = "gpmc_wait3.uart4_rx", 200 + .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 201 + .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE2, 202 + .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE2, 203 + }, 204 + }; 205 + 206 + static struct omap_device_pad default_omap4_uart4_pads[] __initdata = { 207 + { 208 + .name = "uart4_tx.uart4_tx", 209 + .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, 210 + }, 211 + { 212 + .name = "uart4_rx.uart4_rx", 213 + .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, 214 + .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0, 215 + .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0, 216 + }, 217 + }; 218 + 219 + static void omap_serial_fill_default_pads(struct omap_board_data *bdata) 604 220 { 605 - if (UART_RX == offset) { 606 - unsigned int lsr; 607 - lsr = __serial_read_reg(up, UART_LSR); 608 - if (!(lsr & UART_LSR_DR)) 609 - return -EPERM; 221 + switch (bdata->id) { 222 + case 0: 223 + bdata->pads = default_uart1_pads; 224 + bdata->pads_cnt = ARRAY_SIZE(default_uart1_pads); 225 + break; 226 + case 1: 227 + bdata->pads = default_uart2_pads; 228 + bdata->pads_cnt = ARRAY_SIZE(default_uart2_pads); 229 + break; 230 + case 2: 231 + bdata->pads = default_uart3_pads; 232 + bdata->pads_cnt = ARRAY_SIZE(default_uart3_pads); 233 + break; 234 + case 3: 235 + if (cpu_is_omap44xx()) { 236 + bdata->pads = default_omap4_uart4_pads; 237 + bdata->pads_cnt = 238 + ARRAY_SIZE(default_omap4_uart4_pads); 239 + } else if (cpu_is_omap3630()) { 240 + bdata->pads = default_omap36xx_uart4_pads; 241 + bdata->pads_cnt = 242 + ARRAY_SIZE(default_omap36xx_uart4_pads); 243 + } 244 + break; 245 + default: 246 + break; 610 247 } 611 - 612 - return __serial_read_reg(up, offset); 613 248 } 614 - 615 - static void serial_out_override(struct uart_port *up, int offset, int value) 616 - { 617 - unsigned int status, tmout = 10000; 618 - 619 - status = __serial_read_reg(up, UART_LSR); 620 - while (!(status & UART_LSR_THRE)) { 621 - /* Wait up to 10ms for the character(s) to be sent. */ 622 - if (--tmout == 0) 623 - break; 624 - udelay(1); 625 - status = __serial_read_reg(up, UART_LSR); 626 - } 627 - __serial_write_reg(up, offset, value); 628 - } 249 + #else 250 + static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {} 629 251 #endif 252 + 253 + char *cmdline_find_option(char *str) 254 + { 255 + extern char *saved_command_line; 256 + 257 + return strstr(saved_command_line, str); 258 + } 630 259 631 260 static int __init omap_serial_early_init(void) 632 261 { 633 - int i = 0; 634 - 635 262 do { 636 263 char oh_name[MAX_UART_HWMOD_NAME_LEN]; 637 264 struct omap_hwmod *oh; 638 265 struct omap_uart_state *uart; 266 + char uart_name[MAX_UART_HWMOD_NAME_LEN]; 639 267 640 268 snprintf(oh_name, MAX_UART_HWMOD_NAME_LEN, 641 - "uart%d", i + 1); 269 + "uart%d", num_uarts + 1); 642 270 oh = omap_hwmod_lookup(oh_name); 643 271 if (!oh) 644 272 break; ··· 271 653 return -ENODEV; 272 654 273 655 uart->oh = oh; 274 - uart->num = i++; 656 + uart->num = num_uarts++; 275 657 list_add_tail(&uart->node, &uart_list); 276 - num_uarts++; 658 + snprintf(uart_name, MAX_UART_HWMOD_NAME_LEN, 659 + "%s%d", OMAP_SERIAL_NAME, uart->num); 277 660 278 - /* 279 - * NOTE: omap_hwmod_setup*() has not yet been called, 280 - * so no hwmod functions will work yet. 281 - */ 661 + if (cmdline_find_option(uart_name)) { 662 + console_uart_id = uart->num; 282 663 283 - /* 284 - * During UART early init, device need to be probed 285 - * to determine SoC specific init before omap_device 286 - * is ready. Therefore, don't allow idle here 287 - */ 288 - uart->oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET; 664 + if (console_loglevel >= 10) { 665 + uart_debug = true; 666 + pr_info("%s used as console in debug mode" 667 + " uart%d clocks will not be" 668 + " gated", uart_name, uart->num); 669 + } 670 + 671 + if (cmdline_find_option("no_console_suspend")) 672 + no_console_suspend = true; 673 + 674 + /* 675 + * omap-uart can be used for earlyprintk logs 676 + * So if omap-uart is used as console then prevent 677 + * uart reset and idle to get logs from omap-uart 678 + * until uart console driver is available to take 679 + * care for console messages. 680 + * Idling or resetting omap-uart while printing logs 681 + * early boot logs can stall the boot-up. 682 + */ 683 + oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET; 684 + } 289 685 } while (1); 290 686 291 687 return 0; ··· 309 677 /** 310 678 * omap_serial_init_port() - initialize single serial port 311 679 * @bdata: port specific board data pointer 680 + * @info: platform specific data pointer 312 681 * 313 682 * This function initialies serial driver for given port only. 314 683 * Platforms can call this function instead of omap_serial_init() ··· 318 685 * Don't mix calls to omap_serial_init_port() and omap_serial_init(), 319 686 * use only one of the two. 320 687 */ 321 - void __init omap_serial_init_port(struct omap_board_data *bdata) 688 + void __init omap_serial_init_port(struct omap_board_data *bdata, 689 + struct omap_uart_port_info *info) 322 690 { 323 691 struct omap_uart_state *uart; 324 692 struct omap_hwmod *oh; ··· 327 693 void *pdata = NULL; 328 694 u32 pdata_size = 0; 329 695 char *name; 330 - #ifndef CONFIG_SERIAL_OMAP 331 - struct plat_serial8250_port ports[2] = { 332 - {}, 333 - {.flags = 0}, 334 - }; 335 - struct plat_serial8250_port *p = &ports[0]; 336 - #else 337 696 struct omap_uart_port_info omap_up; 338 - #endif 339 697 340 698 if (WARN_ON(!bdata)) 341 699 return; ··· 339 713 list_for_each_entry(uart, &uart_list, node) 340 714 if (bdata->id == uart->num) 341 715 break; 716 + if (!info) 717 + info = omap_serial_default_info; 342 718 343 719 oh = uart->oh; 344 - uart->dma_enabled = 0; 345 - #ifndef CONFIG_SERIAL_OMAP 346 - name = "serial8250"; 347 - 348 - /* 349 - * !! 8250 driver does not use standard IORESOURCE* It 350 - * has it's own custom pdata that can be taken from 351 - * the hwmod resource data. But, this needs to be 352 - * done after the build. 353 - * 354 - * ?? does it have to be done before the register ?? 355 - * YES, because platform_device_data_add() copies 356 - * pdata, it does not use a pointer. 357 - */ 358 - p->flags = UPF_BOOT_AUTOCONF; 359 - p->iotype = UPIO_MEM; 360 - p->regshift = 2; 361 - p->uartclk = OMAP24XX_BASE_BAUD * 16; 362 - p->irq = oh->mpu_irqs[0].irq; 363 - p->mapbase = oh->slaves[0]->addr->pa_start; 364 - p->membase = omap_hwmod_get_mpu_rt_va(oh); 365 - p->irqflags = IRQF_SHARED; 366 - p->private_data = uart; 367 - 368 - /* 369 - * omap44xx, ti816x: Never read empty UART fifo 370 - * omap3xxx: Never read empty UART fifo on UARTs 371 - * with IP rev >=0x52 372 - */ 373 - uart->regshift = p->regshift; 374 - uart->membase = p->membase; 375 - if (cpu_is_omap44xx() || cpu_is_ti81xx()) 376 - uart->errata |= UART_ERRATA_FIFO_FULL_ABORT; 377 - else if ((serial_read_reg(uart, UART_OMAP_MVER) & 0xFF) 378 - >= UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV) 379 - uart->errata |= UART_ERRATA_FIFO_FULL_ABORT; 380 - 381 - if (uart->errata & UART_ERRATA_FIFO_FULL_ABORT) { 382 - p->serial_in = serial_in_override; 383 - p->serial_out = serial_out_override; 384 - } 385 - 386 - pdata = &ports[0]; 387 - pdata_size = 2 * sizeof(struct plat_serial8250_port); 388 - #else 389 - 390 720 name = DRIVER_NAME; 391 721 392 - omap_up.dma_enabled = uart->dma_enabled; 722 + omap_up.dma_enabled = info->dma_enabled; 393 723 omap_up.uartclk = OMAP24XX_BASE_BAUD * 16; 394 - omap_up.mapbase = oh->slaves[0]->addr->pa_start; 395 - omap_up.membase = omap_hwmod_get_mpu_rt_va(oh); 396 - omap_up.irqflags = IRQF_SHARED; 397 - omap_up.flags = UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ; 724 + omap_up.flags = UPF_BOOT_AUTOCONF; 725 + omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count; 726 + omap_up.set_forceidle = omap_uart_set_forceidle; 727 + omap_up.set_noidle = omap_uart_set_noidle; 728 + omap_up.enable_wakeup = omap_uart_enable_wakeup; 729 + omap_up.dma_rx_buf_size = info->dma_rx_buf_size; 730 + omap_up.dma_rx_timeout = info->dma_rx_timeout; 731 + omap_up.dma_rx_poll_rate = info->dma_rx_poll_rate; 732 + omap_up.autosuspend_timeout = info->autosuspend_timeout; 733 + 734 + /* Enable the MDR1 Errata i202 for OMAP2430/3xxx/44xx */ 735 + if (!cpu_is_omap2420() && !cpu_is_ti816x()) 736 + omap_up.errata |= UART_ERRATA_i202_MDR1_ACCESS; 737 + 738 + /* Enable DMA Mode Force Idle Errata i291 for omap34xx/3630 */ 739 + if (cpu_is_omap34xx() || cpu_is_omap3630()) 740 + omap_up.errata |= UART_ERRATA_i291_DMA_FORCEIDLE; 398 741 399 742 pdata = &omap_up; 400 743 pdata_size = sizeof(struct omap_uart_port_info); 401 - #endif 402 744 403 745 if (WARN_ON(!oh)) 404 746 return; ··· 376 782 WARN(IS_ERR(pdev), "Could not build omap_device for %s: %s.\n", 377 783 name, oh->name); 378 784 379 - omap_device_disable_idle_on_suspend(pdev); 785 + if ((console_uart_id == bdata->id) && no_console_suspend) 786 + omap_device_disable_idle_on_suspend(pdev); 787 + 380 788 oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt); 381 789 382 - uart->irq = oh->mpu_irqs[0].irq; 383 - uart->regshift = 2; 384 - uart->mapbase = oh->slaves[0]->addr->pa_start; 385 - uart->membase = omap_hwmod_get_mpu_rt_va(oh); 386 790 uart->pdev = pdev; 387 791 388 792 oh->dev_attr = uart; 389 793 390 - console_lock(); /* in case the earlycon is on the UART */ 391 - 392 - /* 393 - * Because of early UART probing, UART did not get idled 394 - * on init. Now that omap_device is ready, ensure full idle 395 - * before doing omap_device_enable(). 396 - */ 397 - omap_hwmod_idle(uart->oh); 398 - 399 - omap_device_enable(uart->pdev); 400 - omap_uart_idle_init(uart); 401 - omap_uart_reset(uart); 402 - omap_hwmod_enable_wakeup(uart->oh); 403 - omap_device_idle(uart->pdev); 404 - 405 - /* 406 - * Need to block sleep long enough for interrupt driven 407 - * driver to start. Console driver is in polling mode 408 - * so device needs to be kept enabled while polling driver 409 - * is in use. 410 - */ 411 - if (uart->timeout) 412 - uart->timeout = (30 * HZ); 413 - omap_uart_block_sleep(uart); 414 - uart->timeout = DEFAULT_TIMEOUT; 415 - 416 - console_unlock(); 417 - 418 - if ((cpu_is_omap34xx() && uart->padconf) || 419 - (uart->wk_en && uart->wk_mask)) { 794 + if (((cpu_is_omap34xx() || cpu_is_omap44xx()) && bdata->pads) 795 + && !uart_debug) 420 796 device_init_wakeup(&pdev->dev, true); 421 - DEV_CREATE_FILE(&pdev->dev, &dev_attr_sleep_timeout); 422 - } 423 - 424 - /* Enable the MDR1 errata for OMAP3 */ 425 - if (cpu_is_omap34xx() && !(cpu_is_ti81xx() || cpu_is_am33xx())) 426 - uart->errata |= UART_ERRATA_i202_MDR1_ACCESS; 427 797 } 428 798 429 799 /** 430 - * omap_serial_init() - initialize all supported serial ports 800 + * omap_serial_board_init() - initialize all supported serial ports 801 + * @info: platform specific data pointer 431 802 * 432 803 * Initializes all available UARTs as serial ports. Platforms 433 804 * can call this function when they want to have default behaviour 434 805 * for serial ports (e.g initialize them all as serial ports). 435 806 */ 436 - void __init omap_serial_init(void) 807 + void __init omap_serial_board_init(struct omap_uart_port_info *info) 437 808 { 438 809 struct omap_uart_state *uart; 439 810 struct omap_board_data bdata; ··· 408 849 bdata.flags = 0; 409 850 bdata.pads = NULL; 410 851 bdata.pads_cnt = 0; 411 - omap_serial_init_port(&bdata); 412 852 853 + if (cpu_is_omap44xx() || cpu_is_omap34xx()) 854 + omap_serial_fill_default_pads(&bdata); 855 + 856 + if (!info) 857 + omap_serial_init_port(&bdata, NULL); 858 + else 859 + omap_serial_init_port(&bdata, &info[uart->num]); 413 860 } 861 + } 862 + 863 + /** 864 + * omap_serial_init() - initialize all supported serial ports 865 + * 866 + * Initializes all available UARTs. 867 + * Platforms can call this function when they want to have default behaviour 868 + * for serial ports (e.g initialize them all as serial ports). 869 + */ 870 + void __init omap_serial_init(void) 871 + { 872 + omap_serial_board_init(NULL); 414 873 }
+379
arch/arm/mach-omap2/sleep44xx.S
··· 1 + /* 2 + * OMAP44xx sleep code. 3 + * 4 + * Copyright (C) 2011 Texas Instruments, Inc. 5 + * Santosh Shilimkar <santosh.shilimkar@ti.com> 6 + * 7 + * This program is free software,you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/linkage.h> 13 + #include <asm/system.h> 14 + #include <asm/smp_scu.h> 15 + #include <asm/memory.h> 16 + #include <asm/hardware/cache-l2x0.h> 17 + 18 + #include <plat/omap44xx.h> 19 + #include <mach/omap-secure.h> 20 + 21 + #include "common.h" 22 + #include "omap4-sar-layout.h" 23 + 24 + #if defined(CONFIG_SMP) && defined(CONFIG_PM) 25 + 26 + .macro DO_SMC 27 + dsb 28 + smc #0 29 + dsb 30 + .endm 31 + 32 + ppa_zero_params: 33 + .word 0x0 34 + 35 + ppa_por_params: 36 + .word 1, 0 37 + 38 + /* 39 + * ============================= 40 + * == CPU suspend finisher == 41 + * ============================= 42 + * 43 + * void omap4_finish_suspend(unsigned long cpu_state) 44 + * 45 + * This function code saves the CPU context and performs the CPU 46 + * power down sequence. Calling WFI effectively changes the CPU 47 + * power domains states to the desired target power state. 48 + * 49 + * @cpu_state : contains context save state (r0) 50 + * 0 - No context lost 51 + * 1 - CPUx L1 and logic lost: MPUSS CSWR 52 + * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR 53 + * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF 54 + * @return: This function never returns for CPU OFF and DORMANT power states. 55 + * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up 56 + * from this follows a full CPU reset path via ROM code to CPU restore code. 57 + * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. 58 + * It returns to the caller for CPU INACTIVE and ON power states or in case 59 + * CPU failed to transition to targeted OFF/DORMANT state. 60 + */ 61 + ENTRY(omap4_finish_suspend) 62 + stmfd sp!, {lr} 63 + cmp r0, #0x0 64 + beq do_WFI @ No lowpower state, jump to WFI 65 + 66 + /* 67 + * Flush all data from the L1 data cache before disabling 68 + * SCTLR.C bit. 69 + */ 70 + bl omap4_get_sar_ram_base 71 + ldr r9, [r0, #OMAP_TYPE_OFFSET] 72 + cmp r9, #0x1 @ Check for HS device 73 + bne skip_secure_l1_clean 74 + mov r0, #SCU_PM_NORMAL 75 + mov r1, #0xFF @ clean seucre L1 76 + stmfd r13!, {r4-r12, r14} 77 + ldr r12, =OMAP4_MON_SCU_PWR_INDEX 78 + DO_SMC 79 + ldmfd r13!, {r4-r12, r14} 80 + skip_secure_l1_clean: 81 + bl v7_flush_dcache_all 82 + 83 + /* 84 + * Clear the SCTLR.C bit to prevent further data cache 85 + * allocation. Clearing SCTLR.C would make all the data accesses 86 + * strongly ordered and would not hit the cache. 87 + */ 88 + mrc p15, 0, r0, c1, c0, 0 89 + bic r0, r0, #(1 << 2) @ Disable the C bit 90 + mcr p15, 0, r0, c1, c0, 0 91 + isb 92 + 93 + /* 94 + * Invalidate L1 data cache. Even though only invalidate is 95 + * necessary exported flush API is used here. Doing clean 96 + * on already clean cache would be almost NOP. 97 + */ 98 + bl v7_flush_dcache_all 99 + 100 + /* 101 + * Switch the CPU from Symmetric Multiprocessing (SMP) mode 102 + * to AsymmetricMultiprocessing (AMP) mode by programming 103 + * the SCU power status to DORMANT or OFF mode. 104 + * This enables the CPU to be taken out of coherency by 105 + * preventing the CPU from receiving cache, TLB, or BTB 106 + * maintenance operations broadcast by other CPUs in the cluster. 107 + */ 108 + bl omap4_get_sar_ram_base 109 + mov r8, r0 110 + ldr r9, [r8, #OMAP_TYPE_OFFSET] 111 + cmp r9, #0x1 @ Check for HS device 112 + bne scu_gp_set 113 + mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR 114 + ands r0, r0, #0x0f 115 + ldreq r0, [r8, #SCU_OFFSET0] 116 + ldrne r0, [r8, #SCU_OFFSET1] 117 + mov r1, #0x00 118 + stmfd r13!, {r4-r12, r14} 119 + ldr r12, =OMAP4_MON_SCU_PWR_INDEX 120 + DO_SMC 121 + ldmfd r13!, {r4-r12, r14} 122 + b skip_scu_gp_set 123 + scu_gp_set: 124 + mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR 125 + ands r0, r0, #0x0f 126 + ldreq r1, [r8, #SCU_OFFSET0] 127 + ldrne r1, [r8, #SCU_OFFSET1] 128 + bl omap4_get_scu_base 129 + bl scu_power_mode 130 + skip_scu_gp_set: 131 + mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data 132 + tst r0, #(1 << 18) 133 + mrcne p15, 0, r0, c1, c0, 1 134 + bicne r0, r0, #(1 << 6) @ Disable SMP bit 135 + mcrne p15, 0, r0, c1, c0, 1 136 + isb 137 + dsb 138 + #ifdef CONFIG_CACHE_L2X0 139 + /* 140 + * Clean and invalidate the L2 cache. 141 + * Common cache-l2x0.c functions can't be used here since it 142 + * uses spinlocks. We are out of coherency here with data cache 143 + * disabled. The spinlock implementation uses exclusive load/store 144 + * instruction which can fail without data cache being enabled. 145 + * OMAP4 hardware doesn't support exclusive monitor which can 146 + * overcome exclusive access issue. Because of this, CPU can 147 + * lead to deadlock. 148 + */ 149 + bl omap4_get_sar_ram_base 150 + mov r8, r0 151 + mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR 152 + ands r5, r5, #0x0f 153 + ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR 154 + ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory. 155 + cmp r0, #3 156 + bne do_WFI 157 + #ifdef CONFIG_PL310_ERRATA_727915 158 + mov r0, #0x03 159 + mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX 160 + DO_SMC 161 + #endif 162 + bl omap4_get_l2cache_base 163 + mov r2, r0 164 + ldr r0, =0xffff 165 + str r0, [r2, #L2X0_CLEAN_INV_WAY] 166 + wait: 167 + ldr r0, [r2, #L2X0_CLEAN_INV_WAY] 168 + ldr r1, =0xffff 169 + ands r0, r0, r1 170 + bne wait 171 + #ifdef CONFIG_PL310_ERRATA_727915 172 + mov r0, #0x00 173 + mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX 174 + DO_SMC 175 + #endif 176 + l2x_sync: 177 + bl omap4_get_l2cache_base 178 + mov r2, r0 179 + mov r0, #0x0 180 + str r0, [r2, #L2X0_CACHE_SYNC] 181 + sync: 182 + ldr r0, [r2, #L2X0_CACHE_SYNC] 183 + ands r0, r0, #0x1 184 + bne sync 185 + #endif 186 + 187 + do_WFI: 188 + bl omap_do_wfi 189 + 190 + /* 191 + * CPU is here when it failed to enter OFF/DORMANT or 192 + * no low power state was attempted. 193 + */ 194 + mrc p15, 0, r0, c1, c0, 0 195 + tst r0, #(1 << 2) @ Check C bit enabled? 196 + orreq r0, r0, #(1 << 2) @ Enable the C bit 197 + mcreq p15, 0, r0, c1, c0, 0 198 + isb 199 + 200 + /* 201 + * Ensure the CPU power state is set to NORMAL in 202 + * SCU power state so that CPU is back in coherency. 203 + * In non-coherent mode CPU can lock-up and lead to 204 + * system deadlock. 205 + */ 206 + mrc p15, 0, r0, c1, c0, 1 207 + tst r0, #(1 << 6) @ Check SMP bit enabled? 208 + orreq r0, r0, #(1 << 6) 209 + mcreq p15, 0, r0, c1, c0, 1 210 + isb 211 + bl omap4_get_sar_ram_base 212 + mov r8, r0 213 + ldr r9, [r8, #OMAP_TYPE_OFFSET] 214 + cmp r9, #0x1 @ Check for HS device 215 + bne scu_gp_clear 216 + mov r0, #SCU_PM_NORMAL 217 + mov r1, #0x00 218 + stmfd r13!, {r4-r12, r14} 219 + ldr r12, =OMAP4_MON_SCU_PWR_INDEX 220 + DO_SMC 221 + ldmfd r13!, {r4-r12, r14} 222 + b skip_scu_gp_clear 223 + scu_gp_clear: 224 + bl omap4_get_scu_base 225 + mov r1, #SCU_PM_NORMAL 226 + bl scu_power_mode 227 + skip_scu_gp_clear: 228 + isb 229 + dsb 230 + ldmfd sp!, {pc} 231 + ENDPROC(omap4_finish_suspend) 232 + 233 + /* 234 + * ============================ 235 + * == CPU resume entry point == 236 + * ============================ 237 + * 238 + * void omap4_cpu_resume(void) 239 + * 240 + * ROM code jumps to this function while waking up from CPU 241 + * OFF or DORMANT state. Physical address of the function is 242 + * stored in the SAR RAM while entering to OFF or DORMANT mode. 243 + * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. 244 + */ 245 + ENTRY(omap4_cpu_resume) 246 + /* 247 + * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device. 248 + * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA 249 + * init and for CPU1, a secure PPA API provided. CPU0 must be ON 250 + * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+. 251 + * OMAP443X GP devices- SMP bit isn't accessible. 252 + * OMAP446X GP devices - SMP bit access is enabled on both CPUs. 253 + */ 254 + ldr r8, =OMAP44XX_SAR_RAM_BASE 255 + ldr r9, [r8, #OMAP_TYPE_OFFSET] 256 + cmp r9, #0x1 @ Skip if GP device 257 + bne skip_ns_smp_enable 258 + mrc p15, 0, r0, c0, c0, 5 259 + ands r0, r0, #0x0f 260 + beq skip_ns_smp_enable 261 + ppa_actrl_retry: 262 + mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX 263 + adr r3, ppa_zero_params @ Pointer to parameters 264 + mov r1, #0x0 @ Process ID 265 + mov r2, #0x4 @ Flag 266 + mov r6, #0xff 267 + mov r12, #0x00 @ Secure Service ID 268 + DO_SMC 269 + cmp r0, #0x0 @ API returns 0 on success. 270 + beq enable_smp_bit 271 + b ppa_actrl_retry 272 + enable_smp_bit: 273 + mrc p15, 0, r0, c1, c0, 1 274 + tst r0, #(1 << 6) @ Check SMP bit enabled? 275 + orreq r0, r0, #(1 << 6) 276 + mcreq p15, 0, r0, c1, c0, 1 277 + isb 278 + skip_ns_smp_enable: 279 + #ifdef CONFIG_CACHE_L2X0 280 + /* 281 + * Restore the L2 AUXCTRL and enable the L2 cache. 282 + * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL 283 + * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL 284 + * register r0 contains value to be programmed. 285 + * L2 cache is already invalidate by ROM code as part 286 + * of MPUSS OFF wakeup path. 287 + */ 288 + ldr r2, =OMAP44XX_L2CACHE_BASE 289 + ldr r0, [r2, #L2X0_CTRL] 290 + and r0, #0x0f 291 + cmp r0, #1 292 + beq skip_l2en @ Skip if already enabled 293 + ldr r3, =OMAP44XX_SAR_RAM_BASE 294 + ldr r1, [r3, #OMAP_TYPE_OFFSET] 295 + cmp r1, #0x1 @ Check for HS device 296 + bne set_gp_por 297 + ldr r0, =OMAP4_PPA_L2_POR_INDEX 298 + ldr r1, =OMAP44XX_SAR_RAM_BASE 299 + ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET] 300 + adr r3, ppa_por_params 301 + str r4, [r3, #0x04] 302 + mov r1, #0x0 @ Process ID 303 + mov r2, #0x4 @ Flag 304 + mov r6, #0xff 305 + mov r12, #0x00 @ Secure Service ID 306 + DO_SMC 307 + b set_aux_ctrl 308 + set_gp_por: 309 + ldr r1, =OMAP44XX_SAR_RAM_BASE 310 + ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET] 311 + ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH 312 + DO_SMC 313 + set_aux_ctrl: 314 + ldr r1, =OMAP44XX_SAR_RAM_BASE 315 + ldr r0, [r1, #L2X0_AUXCTRL_OFFSET] 316 + ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL 317 + DO_SMC 318 + mov r0, #0x1 319 + ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache 320 + DO_SMC 321 + skip_l2en: 322 + #endif 323 + 324 + b cpu_resume @ Jump to generic resume 325 + ENDPROC(omap4_cpu_resume) 326 + #endif 327 + 328 + #ifndef CONFIG_OMAP4_ERRATA_I688 329 + ENTRY(omap_bus_sync) 330 + mov pc, lr 331 + ENDPROC(omap_bus_sync) 332 + #endif 333 + 334 + ENTRY(omap_do_wfi) 335 + stmfd sp!, {lr} 336 + /* Drain interconnect write buffers. */ 337 + bl omap_bus_sync 338 + 339 + /* 340 + * Execute an ISB instruction to ensure that all of the 341 + * CP15 register changes have been committed. 342 + */ 343 + isb 344 + 345 + /* 346 + * Execute a barrier instruction to ensure that all cache, 347 + * TLB and branch predictor maintenance operations issued 348 + * by any CPU in the cluster have completed. 349 + */ 350 + dsb 351 + dmb 352 + 353 + /* 354 + * Execute a WFI instruction and wait until the 355 + * STANDBYWFI output is asserted to indicate that the 356 + * CPU is in idle and low power state. CPU can specualatively 357 + * prefetch the instructions so add NOPs after WFI. Sixteen 358 + * NOPs as per Cortex-A9 pipeline. 359 + */ 360 + wfi @ Wait For Interrupt 361 + nop 362 + nop 363 + nop 364 + nop 365 + nop 366 + nop 367 + nop 368 + nop 369 + nop 370 + nop 371 + nop 372 + nop 373 + nop 374 + nop 375 + nop 376 + nop 377 + 378 + ldmfd sp!, {pc} 379 + ENDPROC(omap_do_wfi)
+7 -1
arch/arm/plat-mxc/include/mach/common.h
··· 131 131 extern void imx53_qsb_common_init(void); 132 132 extern void imx53_smd_common_init(void); 133 133 extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode); 134 - extern void imx6q_pm_init(void); 135 134 extern void imx6q_clock_map_io(void); 135 + 136 + #ifdef CONFIG_PM 137 + extern void imx6q_pm_init(void); 138 + #else 139 + static inline void imx6q_pm_init(void) {} 140 + #endif 141 + 136 142 #endif
+1 -1
arch/arm/plat-mxc/include/mach/mxc.h
··· 168 168 u32 cpu_rate; 169 169 }; 170 170 171 - int tzic_enable_wake(int is_idle); 171 + int tzic_enable_wake(void); 172 172 173 173 extern struct cpu_op *(*get_cpu_op)(int *op); 174 174 #endif
+29 -11
arch/arm/plat-mxc/tzic.c
··· 73 73 #define tzic_set_irq_fiq NULL 74 74 #endif 75 75 76 - static unsigned int *wakeup_intr[4]; 76 + #ifdef CONFIG_PM 77 + static void tzic_irq_suspend(struct irq_data *d) 78 + { 79 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 80 + int idx = gc->irq_base >> 5; 81 + 82 + __raw_writel(gc->wake_active, tzic_base + TZIC_WAKEUP0(idx)); 83 + } 84 + 85 + static void tzic_irq_resume(struct irq_data *d) 86 + { 87 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 88 + int idx = gc->irq_base >> 5; 89 + 90 + __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(idx)), 91 + tzic_base + TZIC_WAKEUP0(idx)); 92 + } 93 + 94 + #else 95 + #define tzic_irq_suspend NULL 96 + #define tzic_irq_resume NULL 97 + #endif 77 98 78 99 static struct mxc_extra_irq tzic_extra_irq = { 79 100 #ifdef CONFIG_FIQ ··· 112 91 handle_level_irq); 113 92 gc->private = &tzic_extra_irq; 114 93 gc->wake_enabled = IRQ_MSK(32); 115 - wakeup_intr[idx] = &gc->wake_active; 116 94 117 95 ct = gc->chip_types; 118 96 ct->chip.irq_mask = irq_gc_mask_disable_reg; 119 97 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; 120 98 ct->chip.irq_set_wake = irq_gc_set_wake; 99 + ct->chip.irq_suspend = tzic_irq_suspend; 100 + ct->chip.irq_resume = tzic_irq_resume; 121 101 ct->regs.disable = TZIC_ENCLEAR0(idx); 122 102 ct->regs.enable = TZIC_ENSET0(idx); 123 103 ··· 189 167 /** 190 168 * tzic_enable_wake() - enable wakeup interrupt 191 169 * 192 - * @param is_idle 1 if called in idle loop (ENSET0 register); 193 - * 0 to be used when called from low power entry 194 170 * @return 0 if successful; non-zero otherwise 195 171 */ 196 - int tzic_enable_wake(int is_idle) 172 + int tzic_enable_wake(void) 197 173 { 198 - unsigned int i, v; 174 + unsigned int i; 199 175 200 176 __raw_writel(1, tzic_base + TZIC_DSMINT); 201 177 if (unlikely(__raw_readl(tzic_base + TZIC_DSMINT) == 0)) 202 178 return -EAGAIN; 203 179 204 - for (i = 0; i < 4; i++) { 205 - v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) : 206 - *wakeup_intr[i]; 207 - __raw_writel(v, tzic_base + TZIC_WAKEUP0(i)); 208 - } 180 + for (i = 0; i < 4; i++) 181 + __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(i)), 182 + tzic_base + TZIC_WAKEUP0(i)); 209 183 210 184 return 0; 211 185 }
+3
arch/arm/plat-omap/common.c
··· 22 22 #include <plat/vram.h> 23 23 #include <plat/dsp.h> 24 24 25 + #include <plat/omap-secure.h> 26 + 25 27 26 28 #define NO_LENGTH_CHECK 0xffffffff 27 29 ··· 68 66 omapfb_reserve_sdram_memblock(); 69 67 omap_vram_reserve_sdram_memblock(); 70 68 omap_dsp_reserve_sdram_memblock(); 69 + omap_secure_ram_reserve_memblock(); 71 70 } 72 71 73 72 void __init omap_init_consistent_dma_size(void)
+13
arch/arm/plat-omap/include/plat/omap-secure.h
··· 1 + #ifndef __OMAP_SECURE_H__ 2 + #define __OMAP_SECURE_H__ 3 + 4 + #include <linux/types.h> 5 + 6 + #ifdef CONFIG_ARCH_OMAP2PLUS 7 + extern int omap_secure_ram_reserve_memblock(void); 8 + #else 9 + static inline void omap_secure_ram_reserve_memblock(void) 10 + { } 11 + #endif 12 + 13 + #endif /* __OMAP_SECURE_H__ */
+31 -6
arch/arm/plat-omap/include/plat/omap-serial.h
··· 19 19 20 20 #include <linux/serial_core.h> 21 21 #include <linux/platform_device.h> 22 + #include <linux/pm_qos.h> 22 23 23 24 #include <plat/mux.h> 24 25 ··· 33 32 #define OMAP_SERIAL_NAME "ttyO" 34 33 35 34 #define OMAP_MODE13X_SPEED 230400 35 + 36 + #define OMAP_UART_SCR_TX_EMPTY 0x08 36 37 37 38 /* WER = 0x7F 38 39 * Enable module level wakeup in WER reg ··· 54 51 55 52 #define OMAP_UART_DMA_CH_FREE -1 56 53 57 - #define RX_TIMEOUT (3 * HZ) 58 54 #define OMAP_MAX_HSUART_PORTS 4 59 55 60 56 #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA 61 57 58 + #define UART_ERRATA_i202_MDR1_ACCESS BIT(0) 59 + #define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1) 60 + 62 61 struct omap_uart_port_info { 63 62 bool dma_enabled; /* To specify DMA Mode */ 64 63 unsigned int uartclk; /* UART clock rate */ 65 - void __iomem *membase; /* ioremap cookie or NULL */ 66 - resource_size_t mapbase; /* resource base */ 67 - unsigned long irqflags; /* request_irq flags */ 68 64 upf_t flags; /* UPF_* flags */ 65 + u32 errata; 66 + unsigned int dma_rx_buf_size; 67 + unsigned int dma_rx_timeout; 68 + unsigned int autosuspend_timeout; 69 + unsigned int dma_rx_poll_rate; 70 + 71 + int (*get_context_loss_count)(struct device *); 72 + void (*set_forceidle)(struct platform_device *); 73 + void (*set_noidle)(struct platform_device *); 74 + void (*enable_wakeup)(struct platform_device *, bool); 69 75 }; 70 76 71 77 struct uart_omap_dma { ··· 98 86 spinlock_t rx_lock; 99 87 /* timer to poll activity on rx dma */ 100 88 struct timer_list rx_timer; 101 - int rx_buf_size; 102 - int rx_timeout; 89 + unsigned int rx_buf_size; 90 + unsigned int rx_poll_rate; 91 + unsigned int rx_timeout; 103 92 }; 104 93 105 94 struct uart_omap_port { ··· 113 100 unsigned char mcr; 114 101 unsigned char fcr; 115 102 unsigned char efr; 103 + unsigned char dll; 104 + unsigned char dlh; 105 + unsigned char mdr1; 106 + unsigned char scr; 116 107 117 108 int use_dma; 118 109 /* ··· 128 111 unsigned char msr_saved_flags; 129 112 char name[20]; 130 113 unsigned long port_activity; 114 + u32 context_loss_cnt; 115 + u32 errata; 116 + u8 wakeups_enabled; 117 + 118 + struct pm_qos_request pm_qos_request; 119 + u32 latency; 120 + u32 calc_latency; 121 + struct work_struct qos_work; 131 122 }; 132 123 133 124 #endif /* __OMAP_SERIAL_H__ */
+1
arch/arm/plat-omap/include/plat/omap44xx.h
··· 45 45 #define OMAP44XX_WKUPGEN_BASE 0x48281000 46 46 #define OMAP44XX_MCPDM_BASE 0x40132000 47 47 #define OMAP44XX_MCPDM_L3_BASE 0x49032000 48 + #define OMAP44XX_SAR_RAM_BASE 0x4a326000 48 49 49 50 #define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000) 50 51 #define OMAP44XX_HSUSB_OTG_BASE (L4_44XX_BASE + 0xAB000)
+6
arch/arm/plat-omap/include/plat/omap_hwmod.h
··· 97 97 struct omap_device_pad *pads; 98 98 int nr_pads_dynamic; 99 99 struct omap_device_pad **pads_dynamic; 100 + int *irqs; 100 101 bool enabled; 101 102 }; 102 103 ··· 417 416 * _HWMOD_NO_MPU_PORT: no path exists for the MPU to write to this module 418 417 * _HWMOD_WAKEUP_ENABLED: set when the omap_hwmod code has enabled ENAWAKEUP 419 418 * _HWMOD_SYSCONFIG_LOADED: set when the OCP_SYSCONFIG value has been cached 419 + * _HWMOD_SKIP_ENABLE: set if hwmod enabled during init (HWMOD_INIT_NO_IDLE) - 420 + * causes the first call to _enable() to only update the pinmux 420 421 */ 421 422 #define _HWMOD_NO_MPU_PORT (1 << 0) 422 423 #define _HWMOD_WAKEUP_ENABLED (1 << 1) 423 424 #define _HWMOD_SYSCONFIG_LOADED (1 << 2) 425 + #define _HWMOD_SKIP_ENABLE (1 << 3) 424 426 425 427 /* 426 428 * omap_hwmod._state definitions ··· 607 603 int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh); 608 604 609 605 int omap_hwmod_no_setup_reset(struct omap_hwmod *oh); 606 + 607 + int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx); 610 608 611 609 /* 612 610 * Chip variant-specific hwmod init routines - XXX should be converted
+4 -6
arch/arm/plat-omap/include/plat/serial.h
··· 107 107 #ifndef __ASSEMBLER__ 108 108 109 109 struct omap_board_data; 110 + struct omap_uart_port_info; 110 111 111 112 extern void omap_serial_init(void); 112 - extern void omap_serial_init_port(struct omap_board_data *bdata); 113 113 extern int omap_uart_can_sleep(void); 114 - extern void omap_uart_check_wakeup(void); 115 - extern void omap_uart_prepare_suspend(void); 116 - extern void omap_uart_prepare_idle(int num); 117 - extern void omap_uart_resume_idle(int num); 118 - extern void omap_uart_enable_irqs(int enable); 114 + extern void omap_serial_board_init(struct omap_uart_port_info *platform_data); 115 + extern void omap_serial_init_port(struct omap_board_data *bdata, 116 + struct omap_uart_port_info *platform_data); 119 117 #endif 120 118 121 119 #endif
+5 -1
arch/arm/plat-omap/include/plat/sram.h
··· 95 95 */ 96 96 #define OMAP2_SRAM_PA 0x40200000 97 97 #define OMAP3_SRAM_PA 0x40200000 98 + #ifdef CONFIG_OMAP4_ERRATA_I688 99 + #define OMAP4_SRAM_PA 0x40304000 100 + #define OMAP4_SRAM_VA 0xfe404000 101 + #else 98 102 #define OMAP4_SRAM_PA 0x40300000 99 - 103 + #endif 100 104 #endif
+8
arch/arm/plat-omap/sram.c
··· 40 40 #define OMAP1_SRAM_PA 0x20000000 41 41 #define OMAP2_SRAM_PUB_PA (OMAP2_SRAM_PA + 0xf800) 42 42 #define OMAP3_SRAM_PUB_PA (OMAP3_SRAM_PA + 0x8000) 43 + #ifdef CONFIG_OMAP4_ERRATA_I688 44 + #define OMAP4_SRAM_PUB_PA OMAP4_SRAM_PA 45 + #else 43 46 #define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000) 47 + #endif 44 48 45 49 #if defined(CONFIG_ARCH_OMAP2PLUS) 46 50 #define SRAM_BOOTLOADER_SZ 0x00 ··· 165 161 if (omap_sram_size == 0) 166 162 return; 167 163 164 + #ifdef CONFIG_OMAP4_ERRATA_I688 165 + omap_sram_start += PAGE_SIZE; 166 + omap_sram_size -= SZ_16K; 167 + #endif 168 168 if (cpu_is_omap34xx()) { 169 169 /* 170 170 * SRAM must be marked as non-cached on OMAP3 since the
+367 -61
drivers/tty/serial/omap-serial.c
··· 37 37 #include <linux/clk.h> 38 38 #include <linux/serial_core.h> 39 39 #include <linux/irq.h> 40 + #include <linux/pm_runtime.h> 41 + #include <linux/of.h> 40 42 41 43 #include <plat/dma.h> 42 44 #include <plat/dmtimer.h> 43 45 #include <plat/omap-serial.h> 44 46 47 + #define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/ 48 + 45 49 static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS]; 46 50 47 51 /* Forward declaration of functions */ 48 52 static void uart_tx_dma_callback(int lch, u16 ch_status, void *data); 49 - static void serial_omap_rx_timeout(unsigned long uart_no); 53 + static void serial_omap_rxdma_poll(unsigned long uart_no); 50 54 static int serial_omap_start_rxdma(struct uart_omap_port *up); 55 + static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1); 56 + 57 + static struct workqueue_struct *serial_omap_uart_wq; 51 58 52 59 static inline unsigned int serial_in(struct uart_omap_port *up, int offset) 53 60 { ··· 109 102 omap_free_dma(up->uart_dma.rx_dma_channel); 110 103 up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE; 111 104 up->uart_dma.rx_dma_used = false; 105 + pm_runtime_mark_last_busy(&up->pdev->dev); 106 + pm_runtime_put_autosuspend(&up->pdev->dev); 112 107 } 113 108 } 114 109 ··· 118 109 { 119 110 struct uart_omap_port *up = (struct uart_omap_port *)port; 120 111 121 - dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id); 112 + dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line); 113 + 114 + pm_runtime_get_sync(&up->pdev->dev); 122 115 up->ier |= UART_IER_MSI; 123 116 serial_out(up, UART_IER, up->ier); 117 + pm_runtime_put(&up->pdev->dev); 124 118 } 125 119 126 120 static void serial_omap_stop_tx(struct uart_port *port) ··· 141 129 omap_stop_dma(up->uart_dma.tx_dma_channel); 142 130 omap_free_dma(up->uart_dma.tx_dma_channel); 143 131 up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE; 132 + pm_runtime_mark_last_busy(&up->pdev->dev); 133 + pm_runtime_put_autosuspend(&up->pdev->dev); 144 134 } 145 135 136 + pm_runtime_get_sync(&up->pdev->dev); 146 137 if (up->ier & UART_IER_THRI) { 147 138 up->ier &= ~UART_IER_THRI; 148 139 serial_out(up, UART_IER, up->ier); 149 140 } 141 + 142 + pm_runtime_mark_last_busy(&up->pdev->dev); 143 + pm_runtime_put_autosuspend(&up->pdev->dev); 150 144 } 151 145 152 146 static void serial_omap_stop_rx(struct uart_port *port) 153 147 { 154 148 struct uart_omap_port *up = (struct uart_omap_port *)port; 155 149 150 + pm_runtime_get_sync(&up->pdev->dev); 156 151 if (up->use_dma) 157 152 serial_omap_stop_rxdma(up); 158 153 up->ier &= ~UART_IER_RLSI; 159 154 up->port.read_status_mask &= ~UART_LSR_DR; 160 155 serial_out(up, UART_IER, up->ier); 156 + pm_runtime_mark_last_busy(&up->pdev->dev); 157 + pm_runtime_put_autosuspend(&up->pdev->dev); 161 158 } 162 159 163 - static inline void receive_chars(struct uart_omap_port *up, int *status) 160 + static inline void receive_chars(struct uart_omap_port *up, 161 + unsigned int *status) 164 162 { 165 163 struct tty_struct *tty = up->port.state->port.tty; 166 - unsigned int flag; 167 - unsigned char ch, lsr = *status; 164 + unsigned int flag, lsr = *status; 165 + unsigned char ch = 0; 168 166 int max_count = 256; 169 167 170 168 do { ··· 284 262 int ret = 0; 285 263 286 264 if (!up->use_dma) { 265 + pm_runtime_get_sync(&up->pdev->dev); 287 266 serial_omap_enable_ier_thri(up); 267 + pm_runtime_mark_last_busy(&up->pdev->dev); 268 + pm_runtime_put_autosuspend(&up->pdev->dev); 288 269 return; 289 270 } 290 271 ··· 297 272 xmit = &up->port.state->xmit; 298 273 299 274 if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) { 275 + pm_runtime_get_sync(&up->pdev->dev); 300 276 ret = omap_request_dma(up->uart_dma.uart_dma_tx, 301 277 "UART Tx DMA", 302 278 (void *)uart_tx_dma_callback, up, ··· 380 354 unsigned int iir, lsr; 381 355 unsigned long flags; 382 356 357 + pm_runtime_get_sync(&up->pdev->dev); 383 358 iir = serial_in(up, UART_IIR); 384 - if (iir & UART_IIR_NO_INT) 359 + if (iir & UART_IIR_NO_INT) { 360 + pm_runtime_mark_last_busy(&up->pdev->dev); 361 + pm_runtime_put_autosuspend(&up->pdev->dev); 385 362 return IRQ_NONE; 363 + } 386 364 387 365 spin_lock_irqsave(&up->port.lock, flags); 388 366 lsr = serial_in(up, UART_LSR); ··· 408 378 transmit_chars(up); 409 379 410 380 spin_unlock_irqrestore(&up->port.lock, flags); 381 + pm_runtime_mark_last_busy(&up->pdev->dev); 382 + pm_runtime_put_autosuspend(&up->pdev->dev); 383 + 411 384 up->port_activity = jiffies; 412 385 return IRQ_HANDLED; 413 386 } ··· 421 388 unsigned long flags = 0; 422 389 unsigned int ret = 0; 423 390 424 - dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id); 391 + pm_runtime_get_sync(&up->pdev->dev); 392 + dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line); 425 393 spin_lock_irqsave(&up->port.lock, flags); 426 394 ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; 427 395 spin_unlock_irqrestore(&up->port.lock, flags); 428 - 396 + pm_runtime_put(&up->pdev->dev); 429 397 return ret; 430 398 } 431 399 ··· 436 402 unsigned int status; 437 403 unsigned int ret = 0; 438 404 405 + pm_runtime_get_sync(&up->pdev->dev); 439 406 status = check_modem_status(up); 440 - dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id); 407 + pm_runtime_put(&up->pdev->dev); 408 + 409 + dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line); 441 410 442 411 if (status & UART_MSR_DCD) 443 412 ret |= TIOCM_CAR; ··· 458 421 struct uart_omap_port *up = (struct uart_omap_port *)port; 459 422 unsigned char mcr = 0; 460 423 461 - dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id); 424 + dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line); 462 425 if (mctrl & TIOCM_RTS) 463 426 mcr |= UART_MCR_RTS; 464 427 if (mctrl & TIOCM_DTR) ··· 470 433 if (mctrl & TIOCM_LOOP) 471 434 mcr |= UART_MCR_LOOP; 472 435 473 - mcr |= up->mcr; 474 - serial_out(up, UART_MCR, mcr); 436 + pm_runtime_get_sync(&up->pdev->dev); 437 + up->mcr = serial_in(up, UART_MCR); 438 + up->mcr |= mcr; 439 + serial_out(up, UART_MCR, up->mcr); 440 + pm_runtime_put(&up->pdev->dev); 475 441 } 476 442 477 443 static void serial_omap_break_ctl(struct uart_port *port, int break_state) ··· 482 442 struct uart_omap_port *up = (struct uart_omap_port *)port; 483 443 unsigned long flags = 0; 484 444 485 - dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id); 445 + dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line); 446 + pm_runtime_get_sync(&up->pdev->dev); 486 447 spin_lock_irqsave(&up->port.lock, flags); 487 448 if (break_state == -1) 488 449 up->lcr |= UART_LCR_SBC; ··· 491 450 up->lcr &= ~UART_LCR_SBC; 492 451 serial_out(up, UART_LCR, up->lcr); 493 452 spin_unlock_irqrestore(&up->port.lock, flags); 453 + pm_runtime_put(&up->pdev->dev); 494 454 } 495 455 496 456 static int serial_omap_startup(struct uart_port *port) ··· 508 466 if (retval) 509 467 return retval; 510 468 511 - dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id); 469 + dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); 512 470 471 + pm_runtime_get_sync(&up->pdev->dev); 513 472 /* 514 473 * Clear the FIFO buffers and disable them. 515 474 * (they will be reenabled in set_termios()) ··· 548 505 (dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys), 549 506 0); 550 507 init_timer(&(up->uart_dma.rx_timer)); 551 - up->uart_dma.rx_timer.function = serial_omap_rx_timeout; 552 - up->uart_dma.rx_timer.data = up->pdev->id; 508 + up->uart_dma.rx_timer.function = serial_omap_rxdma_poll; 509 + up->uart_dma.rx_timer.data = up->port.line; 553 510 /* Currently the buffer size is 4KB. Can increase it */ 554 511 up->uart_dma.rx_buf = dma_alloc_coherent(NULL, 555 512 up->uart_dma.rx_buf_size, ··· 566 523 /* Enable module level wake up */ 567 524 serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP); 568 525 526 + pm_runtime_mark_last_busy(&up->pdev->dev); 527 + pm_runtime_put_autosuspend(&up->pdev->dev); 569 528 up->port_activity = jiffies; 570 529 return 0; 571 530 } ··· 577 532 struct uart_omap_port *up = (struct uart_omap_port *)port; 578 533 unsigned long flags = 0; 579 534 580 - dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id); 535 + dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line); 536 + 537 + pm_runtime_get_sync(&up->pdev->dev); 581 538 /* 582 539 * Disable interrupts from this port 583 540 */ ··· 613 566 up->uart_dma.rx_buf_dma_phys); 614 567 up->uart_dma.rx_buf = NULL; 615 568 } 569 + 570 + pm_runtime_put(&up->pdev->dev); 616 571 free_irq(up->port.irq, up); 617 572 } 618 573 ··· 622 573 serial_omap_configure_xonxoff 623 574 (struct uart_omap_port *up, struct ktermios *termios) 624 575 { 625 - unsigned char efr = 0; 626 - 627 576 up->lcr = serial_in(up, UART_LCR); 628 577 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); 629 578 up->efr = serial_in(up, UART_EFR); ··· 631 584 serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]); 632 585 633 586 /* clear SW control mode bits */ 634 - efr = up->efr; 635 - efr &= OMAP_UART_SW_CLR; 587 + up->efr &= OMAP_UART_SW_CLR; 636 588 637 589 /* 638 590 * IXON Flag: ··· 639 593 * Transmit XON1, XOFF1 640 594 */ 641 595 if (termios->c_iflag & IXON) 642 - efr |= OMAP_UART_SW_TX; 596 + up->efr |= OMAP_UART_SW_TX; 643 597 644 598 /* 645 599 * IXOFF Flag: ··· 647 601 * Receiver compares XON1, XOFF1. 648 602 */ 649 603 if (termios->c_iflag & IXOFF) 650 - efr |= OMAP_UART_SW_RX; 604 + up->efr |= OMAP_UART_SW_RX; 651 605 652 606 serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); 653 607 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); ··· 670 624 * load the new software flow control mode IXON or IXOFF 671 625 * and restore the UARTi.EFR_REG[4] ENHANCED_EN value. 672 626 */ 673 - serial_out(up, UART_EFR, efr | UART_EFR_SCD); 627 + serial_out(up, UART_EFR, up->efr | UART_EFR_SCD); 674 628 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); 675 629 676 630 serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR); 677 631 serial_out(up, UART_LCR, up->lcr); 632 + } 633 + 634 + static void serial_omap_uart_qos_work(struct work_struct *work) 635 + { 636 + struct uart_omap_port *up = container_of(work, struct uart_omap_port, 637 + qos_work); 638 + 639 + pm_qos_update_request(&up->pm_qos_request, up->latency); 678 640 } 679 641 680 642 static void ··· 725 671 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13); 726 672 quot = serial_omap_get_divisor(port, baud); 727 673 674 + /* calculate wakeup latency constraint */ 675 + up->calc_latency = (1000000 * up->port.fifosize) / 676 + (1000 * baud / 8); 677 + up->latency = up->calc_latency; 678 + schedule_work(&up->qos_work); 679 + 680 + up->dll = quot & 0xff; 681 + up->dlh = quot >> 8; 682 + up->mdr1 = UART_OMAP_MDR1_DISABLE; 683 + 728 684 up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 | 729 685 UART_FCR_ENABLE_FIFO; 730 686 if (up->use_dma) ··· 744 680 * Ok, we're now changing the port state. Do it with 745 681 * interrupts disabled. 746 682 */ 683 + pm_runtime_get_sync(&up->pdev->dev); 747 684 spin_lock_irqsave(&up->port.lock, flags); 748 685 749 686 /* ··· 788 723 up->ier |= UART_IER_MSI; 789 724 serial_out(up, UART_IER, up->ier); 790 725 serial_out(up, UART_LCR, cval); /* reset DLAB */ 726 + up->lcr = cval; 727 + up->scr = OMAP_UART_SCR_TX_EMPTY; 791 728 792 729 /* FIFOs and DMA Settings */ 793 730 ··· 816 749 817 750 if (up->use_dma) { 818 751 serial_out(up, UART_TI752_TLR, 0); 819 - serial_out(up, UART_OMAP_SCR, 820 - (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8)); 752 + up->scr |= (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8); 821 753 } 754 + 755 + serial_out(up, UART_OMAP_SCR, up->scr); 822 756 823 757 serial_out(up, UART_EFR, up->efr); 824 758 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); ··· 827 759 828 760 /* Protocol, Baud Rate, and Interrupt Settings */ 829 761 830 - serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE); 762 + if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) 763 + serial_omap_mdr1_errataset(up, up->mdr1); 764 + else 765 + serial_out(up, UART_OMAP_MDR1, up->mdr1); 766 + 831 767 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); 832 768 833 769 up->efr = serial_in(up, UART_EFR); ··· 841 769 serial_out(up, UART_IER, 0); 842 770 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); 843 771 844 - serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */ 845 - serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */ 772 + serial_out(up, UART_DLL, up->dll); /* LS of divisor */ 773 + serial_out(up, UART_DLM, up->dlh); /* MS of divisor */ 846 774 847 775 serial_out(up, UART_LCR, 0); 848 776 serial_out(up, UART_IER, up->ier); ··· 852 780 serial_out(up, UART_LCR, cval); 853 781 854 782 if (baud > 230400 && baud != 3000000) 855 - serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE); 783 + up->mdr1 = UART_OMAP_MDR1_13X_MODE; 856 784 else 857 - serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE); 785 + up->mdr1 = UART_OMAP_MDR1_16X_MODE; 786 + 787 + if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) 788 + serial_omap_mdr1_errataset(up, up->mdr1); 789 + else 790 + serial_out(up, UART_OMAP_MDR1, up->mdr1); 858 791 859 792 /* Hardware Flow Control Configuration */ 860 793 ··· 886 809 serial_omap_configure_xonxoff(up, termios); 887 810 888 811 spin_unlock_irqrestore(&up->port.lock, flags); 889 - dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id); 812 + pm_runtime_put(&up->pdev->dev); 813 + dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line); 890 814 } 891 815 892 816 static void ··· 897 819 struct uart_omap_port *up = (struct uart_omap_port *)port; 898 820 unsigned char efr; 899 821 900 - dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id); 822 + dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line); 823 + 824 + pm_runtime_get_sync(&up->pdev->dev); 901 825 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); 902 826 efr = serial_in(up, UART_EFR); 903 827 serial_out(up, UART_EFR, efr | UART_EFR_ECB); ··· 909 829 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); 910 830 serial_out(up, UART_EFR, efr); 911 831 serial_out(up, UART_LCR, 0); 832 + 833 + if (!device_may_wakeup(&up->pdev->dev)) { 834 + if (!state) 835 + pm_runtime_forbid(&up->pdev->dev); 836 + else 837 + pm_runtime_allow(&up->pdev->dev); 838 + } 839 + 840 + pm_runtime_put(&up->pdev->dev); 912 841 } 913 842 914 843 static void serial_omap_release_port(struct uart_port *port) ··· 936 847 struct uart_omap_port *up = (struct uart_omap_port *)port; 937 848 938 849 dev_dbg(up->port.dev, "serial_omap_config_port+%d\n", 939 - up->pdev->id); 850 + up->port.line); 940 851 up->port.type = PORT_OMAP; 941 852 } 942 853 ··· 953 864 { 954 865 struct uart_omap_port *up = (struct uart_omap_port *)port; 955 866 956 - dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id); 867 + dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line); 957 868 return up->name; 958 869 } 959 870 ··· 995 906 static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch) 996 907 { 997 908 struct uart_omap_port *up = (struct uart_omap_port *)port; 909 + 910 + pm_runtime_get_sync(&up->pdev->dev); 998 911 wait_for_xmitr(up); 999 912 serial_out(up, UART_TX, ch); 913 + pm_runtime_put(&up->pdev->dev); 1000 914 } 1001 915 1002 916 static int serial_omap_poll_get_char(struct uart_port *port) 1003 917 { 1004 918 struct uart_omap_port *up = (struct uart_omap_port *)port; 1005 - unsigned int status = serial_in(up, UART_LSR); 919 + unsigned int status; 1006 920 921 + pm_runtime_get_sync(&up->pdev->dev); 922 + status = serial_in(up, UART_LSR); 1007 923 if (!(status & UART_LSR_DR)) 1008 924 return NO_POLL_CHAR; 1009 925 1010 - return serial_in(up, UART_RX); 926 + status = serial_in(up, UART_RX); 927 + pm_runtime_put(&up->pdev->dev); 928 + return status; 1011 929 } 1012 930 1013 931 #endif /* CONFIG_CONSOLE_POLL */ ··· 1041 945 unsigned long flags; 1042 946 unsigned int ier; 1043 947 int locked = 1; 948 + 949 + pm_runtime_get_sync(&up->pdev->dev); 1044 950 1045 951 local_irq_save(flags); 1046 952 if (up->port.sysrq) ··· 1076 978 if (up->msr_saved_flags) 1077 979 check_modem_status(up); 1078 980 981 + pm_runtime_mark_last_busy(&up->pdev->dev); 982 + pm_runtime_put_autosuspend(&up->pdev->dev); 1079 983 if (locked) 1080 984 spin_unlock(&up->port.lock); 1081 985 local_irq_restore(flags); ··· 1114 1014 1115 1015 static void serial_omap_add_console_port(struct uart_omap_port *up) 1116 1016 { 1117 - serial_omap_console_ports[up->pdev->id] = up; 1017 + serial_omap_console_ports[up->port.line] = up; 1118 1018 } 1119 1019 1120 1020 #define OMAP_CONSOLE (&serial_omap_console) ··· 1160 1060 .cons = OMAP_CONSOLE, 1161 1061 }; 1162 1062 1163 - static int 1164 - serial_omap_suspend(struct platform_device *pdev, pm_message_t state) 1063 + #ifdef CONFIG_SUSPEND 1064 + static int serial_omap_suspend(struct device *dev) 1165 1065 { 1166 - struct uart_omap_port *up = platform_get_drvdata(pdev); 1066 + struct uart_omap_port *up = dev_get_drvdata(dev); 1167 1067 1168 - if (up) 1068 + if (up) { 1169 1069 uart_suspend_port(&serial_omap_reg, &up->port); 1070 + flush_work_sync(&up->qos_work); 1071 + } 1072 + 1170 1073 return 0; 1171 1074 } 1172 1075 1173 - static int serial_omap_resume(struct platform_device *dev) 1076 + static int serial_omap_resume(struct device *dev) 1174 1077 { 1175 - struct uart_omap_port *up = platform_get_drvdata(dev); 1078 + struct uart_omap_port *up = dev_get_drvdata(dev); 1176 1079 1177 1080 if (up) 1178 1081 uart_resume_port(&serial_omap_reg, &up->port); 1179 1082 return 0; 1180 1083 } 1084 + #endif 1181 1085 1182 - static void serial_omap_rx_timeout(unsigned long uart_no) 1086 + static void serial_omap_rxdma_poll(unsigned long uart_no) 1183 1087 { 1184 1088 struct uart_omap_port *up = ui[uart_no]; 1185 1089 unsigned int curr_dma_pos, curr_transmitted_size; ··· 1193 1089 if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) || 1194 1090 (curr_dma_pos == 0)) { 1195 1091 if (jiffies_to_msecs(jiffies - up->port_activity) < 1196 - RX_TIMEOUT) { 1092 + up->uart_dma.rx_timeout) { 1197 1093 mod_timer(&up->uart_dma.rx_timer, jiffies + 1198 - usecs_to_jiffies(up->uart_dma.rx_timeout)); 1094 + usecs_to_jiffies(up->uart_dma.rx_poll_rate)); 1199 1095 } else { 1200 1096 serial_omap_stop_rxdma(up); 1201 1097 up->ier |= (UART_IER_RDI | UART_IER_RLSI); ··· 1224 1120 } 1225 1121 } else { 1226 1122 mod_timer(&up->uart_dma.rx_timer, jiffies + 1227 - usecs_to_jiffies(up->uart_dma.rx_timeout)); 1123 + usecs_to_jiffies(up->uart_dma.rx_poll_rate)); 1228 1124 } 1229 1125 up->port_activity = jiffies; 1230 1126 } ··· 1239 1135 int ret = 0; 1240 1136 1241 1137 if (up->uart_dma.rx_dma_channel == -1) { 1138 + pm_runtime_get_sync(&up->pdev->dev); 1242 1139 ret = omap_request_dma(up->uart_dma.uart_dma_rx, 1243 1140 "UART Rx DMA", 1244 1141 (void *)uart_rx_dma_callback, up, ··· 1263 1158 /* FIXME: Cache maintenance needed here? */ 1264 1159 omap_start_dma(up->uart_dma.rx_dma_channel); 1265 1160 mod_timer(&up->uart_dma.rx_timer, jiffies + 1266 - usecs_to_jiffies(up->uart_dma.rx_timeout)); 1161 + usecs_to_jiffies(up->uart_dma.rx_poll_rate)); 1267 1162 up->uart_dma.rx_dma_used = true; 1268 1163 return ret; 1269 1164 } ··· 1326 1221 return; 1327 1222 } 1328 1223 1224 + static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev) 1225 + { 1226 + struct omap_uart_port_info *omap_up_info; 1227 + 1228 + omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL); 1229 + if (!omap_up_info) 1230 + return NULL; /* out of memory */ 1231 + 1232 + of_property_read_u32(dev->of_node, "clock-frequency", 1233 + &omap_up_info->uartclk); 1234 + return omap_up_info; 1235 + } 1236 + 1329 1237 static int serial_omap_probe(struct platform_device *pdev) 1330 1238 { 1331 1239 struct uart_omap_port *up; 1332 1240 struct resource *mem, *irq, *dma_tx, *dma_rx; 1333 1241 struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data; 1334 1242 int ret = -ENOSPC; 1243 + 1244 + if (pdev->dev.of_node) 1245 + omap_up_info = of_get_uart_port_info(&pdev->dev); 1335 1246 1336 1247 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1337 1248 if (!mem) { ··· 1384 1263 ret = -ENOMEM; 1385 1264 goto do_release_region; 1386 1265 } 1387 - sprintf(up->name, "OMAP UART%d", pdev->id); 1388 1266 up->pdev = pdev; 1389 1267 up->port.dev = &pdev->dev; 1390 1268 up->port.type = PORT_OMAP; ··· 1393 1273 up->port.regshift = 2; 1394 1274 up->port.fifosize = 64; 1395 1275 up->port.ops = &serial_omap_pops; 1396 - up->port.line = pdev->id; 1397 1276 1398 - up->port.membase = omap_up_info->membase; 1399 - up->port.mapbase = omap_up_info->mapbase; 1277 + if (pdev->dev.of_node) 1278 + up->port.line = of_alias_get_id(pdev->dev.of_node, "serial"); 1279 + else 1280 + up->port.line = pdev->id; 1281 + 1282 + if (up->port.line < 0) { 1283 + dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", 1284 + up->port.line); 1285 + ret = -ENODEV; 1286 + goto err; 1287 + } 1288 + 1289 + sprintf(up->name, "OMAP UART%d", up->port.line); 1290 + up->port.mapbase = mem->start; 1291 + up->port.membase = ioremap(mem->start, resource_size(mem)); 1292 + if (!up->port.membase) { 1293 + dev_err(&pdev->dev, "can't ioremap UART\n"); 1294 + ret = -ENOMEM; 1295 + goto err; 1296 + } 1297 + 1400 1298 up->port.flags = omap_up_info->flags; 1401 - up->port.irqflags = omap_up_info->irqflags; 1402 1299 up->port.uartclk = omap_up_info->uartclk; 1300 + if (!up->port.uartclk) { 1301 + up->port.uartclk = DEFAULT_CLK_SPEED; 1302 + dev_warn(&pdev->dev, "No clock speed specified: using default:" 1303 + "%d\n", DEFAULT_CLK_SPEED); 1304 + } 1403 1305 up->uart_dma.uart_base = mem->start; 1306 + up->errata = omap_up_info->errata; 1404 1307 1405 1308 if (omap_up_info->dma_enabled) { 1406 1309 up->uart_dma.uart_dma_tx = dma_tx->start; 1407 1310 up->uart_dma.uart_dma_rx = dma_rx->start; 1408 1311 up->use_dma = 1; 1409 - up->uart_dma.rx_buf_size = 4096; 1410 - up->uart_dma.rx_timeout = 2; 1312 + up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size; 1313 + up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout; 1314 + up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate; 1411 1315 spin_lock_init(&(up->uart_dma.tx_lock)); 1412 1316 spin_lock_init(&(up->uart_dma.rx_lock)); 1413 1317 up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE; 1414 1318 up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE; 1415 1319 } 1416 1320 1417 - ui[pdev->id] = up; 1321 + up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; 1322 + up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; 1323 + pm_qos_add_request(&up->pm_qos_request, 1324 + PM_QOS_CPU_DMA_LATENCY, up->latency); 1325 + serial_omap_uart_wq = create_singlethread_workqueue(up->name); 1326 + INIT_WORK(&up->qos_work, serial_omap_uart_qos_work); 1327 + 1328 + pm_runtime_use_autosuspend(&pdev->dev); 1329 + pm_runtime_set_autosuspend_delay(&pdev->dev, 1330 + omap_up_info->autosuspend_timeout); 1331 + 1332 + pm_runtime_irq_safe(&pdev->dev); 1333 + pm_runtime_enable(&pdev->dev); 1334 + pm_runtime_get_sync(&pdev->dev); 1335 + 1336 + ui[up->port.line] = up; 1418 1337 serial_omap_add_console_port(up); 1419 1338 1420 1339 ret = uart_add_one_port(&serial_omap_reg, &up->port); 1421 1340 if (ret != 0) 1422 1341 goto do_release_region; 1423 1342 1343 + pm_runtime_put(&pdev->dev); 1424 1344 platform_set_drvdata(pdev, up); 1425 1345 return 0; 1426 1346 err: ··· 1475 1315 { 1476 1316 struct uart_omap_port *up = platform_get_drvdata(dev); 1477 1317 1478 - platform_set_drvdata(dev, NULL); 1479 1318 if (up) { 1319 + pm_runtime_disable(&up->pdev->dev); 1480 1320 uart_remove_one_port(&serial_omap_reg, &up->port); 1321 + pm_qos_remove_request(&up->pm_qos_request); 1322 + 1481 1323 kfree(up); 1482 1324 } 1325 + 1326 + platform_set_drvdata(dev, NULL); 1483 1327 return 0; 1484 1328 } 1329 + 1330 + /* 1331 + * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460) 1332 + * The access to uart register after MDR1 Access 1333 + * causes UART to corrupt data. 1334 + * 1335 + * Need a delay = 1336 + * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS) 1337 + * give 10 times as much 1338 + */ 1339 + static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1) 1340 + { 1341 + u8 timeout = 255; 1342 + 1343 + serial_out(up, UART_OMAP_MDR1, mdr1); 1344 + udelay(2); 1345 + serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT | 1346 + UART_FCR_CLEAR_RCVR); 1347 + /* 1348 + * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and 1349 + * TX_FIFO_E bit is 1. 1350 + */ 1351 + while (UART_LSR_THRE != (serial_in(up, UART_LSR) & 1352 + (UART_LSR_THRE | UART_LSR_DR))) { 1353 + timeout--; 1354 + if (!timeout) { 1355 + /* Should *never* happen. we warn and carry on */ 1356 + dev_crit(&up->pdev->dev, "Errata i202: timedout %x\n", 1357 + serial_in(up, UART_LSR)); 1358 + break; 1359 + } 1360 + udelay(1); 1361 + } 1362 + } 1363 + 1364 + static void serial_omap_restore_context(struct uart_omap_port *up) 1365 + { 1366 + if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) 1367 + serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE); 1368 + else 1369 + serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE); 1370 + 1371 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */ 1372 + serial_out(up, UART_EFR, UART_EFR_ECB); 1373 + serial_out(up, UART_LCR, 0x0); /* Operational mode */ 1374 + serial_out(up, UART_IER, 0x0); 1375 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */ 1376 + serial_out(up, UART_DLL, up->dll); 1377 + serial_out(up, UART_DLM, up->dlh); 1378 + serial_out(up, UART_LCR, 0x0); /* Operational mode */ 1379 + serial_out(up, UART_IER, up->ier); 1380 + serial_out(up, UART_FCR, up->fcr); 1381 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); 1382 + serial_out(up, UART_MCR, up->mcr); 1383 + serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */ 1384 + serial_out(up, UART_OMAP_SCR, up->scr); 1385 + serial_out(up, UART_EFR, up->efr); 1386 + serial_out(up, UART_LCR, up->lcr); 1387 + if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) 1388 + serial_omap_mdr1_errataset(up, up->mdr1); 1389 + else 1390 + serial_out(up, UART_OMAP_MDR1, up->mdr1); 1391 + } 1392 + 1393 + #ifdef CONFIG_PM_RUNTIME 1394 + static int serial_omap_runtime_suspend(struct device *dev) 1395 + { 1396 + struct uart_omap_port *up = dev_get_drvdata(dev); 1397 + struct omap_uart_port_info *pdata = dev->platform_data; 1398 + 1399 + if (!up) 1400 + return -EINVAL; 1401 + 1402 + if (!pdata || !pdata->enable_wakeup) 1403 + return 0; 1404 + 1405 + if (pdata->get_context_loss_count) 1406 + up->context_loss_cnt = pdata->get_context_loss_count(dev); 1407 + 1408 + if (device_may_wakeup(dev)) { 1409 + if (!up->wakeups_enabled) { 1410 + pdata->enable_wakeup(up->pdev, true); 1411 + up->wakeups_enabled = true; 1412 + } 1413 + } else { 1414 + if (up->wakeups_enabled) { 1415 + pdata->enable_wakeup(up->pdev, false); 1416 + up->wakeups_enabled = false; 1417 + } 1418 + } 1419 + 1420 + /* Errata i291 */ 1421 + if (up->use_dma && pdata->set_forceidle && 1422 + (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE)) 1423 + pdata->set_forceidle(up->pdev); 1424 + 1425 + up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; 1426 + schedule_work(&up->qos_work); 1427 + 1428 + return 0; 1429 + } 1430 + 1431 + static int serial_omap_runtime_resume(struct device *dev) 1432 + { 1433 + struct uart_omap_port *up = dev_get_drvdata(dev); 1434 + struct omap_uart_port_info *pdata = dev->platform_data; 1435 + 1436 + if (up) { 1437 + if (pdata->get_context_loss_count) { 1438 + u32 loss_cnt = pdata->get_context_loss_count(dev); 1439 + 1440 + if (up->context_loss_cnt != loss_cnt) 1441 + serial_omap_restore_context(up); 1442 + } 1443 + 1444 + /* Errata i291 */ 1445 + if (up->use_dma && pdata->set_noidle && 1446 + (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE)) 1447 + pdata->set_noidle(up->pdev); 1448 + 1449 + up->latency = up->calc_latency; 1450 + schedule_work(&up->qos_work); 1451 + } 1452 + 1453 + return 0; 1454 + } 1455 + #endif 1456 + 1457 + static const struct dev_pm_ops serial_omap_dev_pm_ops = { 1458 + SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume) 1459 + SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend, 1460 + serial_omap_runtime_resume, NULL) 1461 + }; 1462 + 1463 + #if defined(CONFIG_OF) 1464 + static const struct of_device_id omap_serial_of_match[] = { 1465 + { .compatible = "ti,omap2-uart" }, 1466 + { .compatible = "ti,omap3-uart" }, 1467 + { .compatible = "ti,omap4-uart" }, 1468 + {}, 1469 + }; 1470 + MODULE_DEVICE_TABLE(of, omap_serial_of_match); 1471 + #endif 1485 1472 1486 1473 static struct platform_driver serial_omap_driver = { 1487 1474 .probe = serial_omap_probe, 1488 1475 .remove = serial_omap_remove, 1489 - 1490 - .suspend = serial_omap_suspend, 1491 - .resume = serial_omap_resume, 1492 1476 .driver = { 1493 1477 .name = DRIVER_NAME, 1478 + .pm = &serial_omap_dev_pm_ops, 1479 + .of_match_table = of_match_ptr(omap_serial_of_match), 1494 1480 }, 1495 1481 }; 1496 1482