Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'pm-domains' into pm-for-linus

* pm-domains:
PM / Domains: Split device PM domain data into base and need_restore
ARM: mach-shmobile: sh7372 sleep warning fixes
ARM: mach-shmobile: sh7372 A3SM support
ARM: mach-shmobile: sh7372 generic suspend/resume support
PM / Domains: Preliminary support for devices with power.irq_safe set
PM: Move clock-related definitions and headers to separate file
PM / Domains: Use power.sybsys_data to reduce overhead
PM: Reference counting of power.subsys_data
PM: Introduce struct pm_subsys_data
ARM / shmobile: Make A3RV be a subdomain of A4LC on SH7372
PM / Domains: Rename argument of pm_genpd_add_subdomain()
PM / Domains: Rename GPD_STATE_WAIT_PARENT to GPD_STATE_WAIT_MASTER
PM / Domains: Allow generic PM domains to have multiple masters
PM / Domains: Add "wait for parent" status for generic PM domains
PM / Domains: Make pm_genpd_poweron() always survive parent removal
PM / Domains: Do not take parent locks to modify subdomain counters
PM / Domains: Implement subdomain counters as atomic fields

+736 -534
+1
arch/arm/mach-omap1/pm_bus.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/io.h> 15 15 #include <linux/pm_runtime.h> 16 + #include <linux/pm_clock.h> 16 17 #include <linux/platform_device.h> 17 18 #include <linux/mutex.h> 18 19 #include <linux/clk.h>
+1
arch/arm/mach-shmobile/board-ap4evb.c
··· 42 42 #include <linux/leds.h> 43 43 #include <linux/input/sh_keysc.h> 44 44 #include <linux/usb/r8a66597.h> 45 + #include <linux/pm_clock.h> 45 46 46 47 #include <media/sh_mobile_ceu.h> 47 48 #include <media/sh_mobile_csi2.h>
+1 -1
arch/arm/mach-shmobile/board-mackerel.c
··· 39 39 #include <linux/mtd/mtd.h> 40 40 #include <linux/mtd/partitions.h> 41 41 #include <linux/mtd/physmap.h> 42 - #include <linux/pm_runtime.h> 42 + #include <linux/pm_clock.h> 43 43 #include <linux/smsc911x.h> 44 44 #include <linux/sh_intc.h> 45 45 #include <linux/tca6416_keypad.h>
+2 -2
arch/arm/mach-shmobile/include/mach/common.h
··· 35 35 extern void sh7372_clock_init(void); 36 36 extern void sh7372_pinmux_init(void); 37 37 extern void sh7372_pm_init(void); 38 - extern void sh7372_cpu_suspend(void); 39 - extern void sh7372_cpu_resume(void); 38 + extern void sh7372_resume_core_standby_a3sm(void); 39 + extern int sh7372_do_idle_a3sm(unsigned long unused); 40 40 extern struct clk sh7372_extal1_clk; 41 41 extern struct clk sh7372_extal2_clk; 42 42
+3
arch/arm/mach-shmobile/include/mach/sh7372.h
··· 498 498 extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); 499 499 extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, 500 500 struct platform_device *pdev); 501 + extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, 502 + struct sh7372_pm_domain *sh7372_sd); 501 503 #else 502 504 #define sh7372_init_pm_domain(pd) do { } while(0) 503 505 #define sh7372_add_device_to_domain(pd, pdev) do { } while(0) 506 + #define sh7372_pm_add_subdomain(pd, sd) do { } while(0) 504 507 #endif /* CONFIG_PM */ 505 508 506 509 #endif /* __ASM_SH7372_H__ */
+235 -76
arch/arm/mach-shmobile/pm-sh7372.c
··· 15 15 #include <linux/list.h> 16 16 #include <linux/err.h> 17 17 #include <linux/slab.h> 18 - #include <linux/pm_runtime.h> 18 + #include <linux/pm_clock.h> 19 19 #include <linux/platform_device.h> 20 20 #include <linux/delay.h> 21 + #include <linux/irq.h> 22 + #include <linux/bitrev.h> 21 23 #include <asm/system.h> 22 24 #include <asm/io.h> 23 25 #include <asm/tlbflush.h> 26 + #include <asm/suspend.h> 24 27 #include <mach/common.h> 25 28 #include <mach/sh7372.h> 26 29 27 - #define SMFRAM 0xe6a70000 28 - #define SYSTBCR 0xe6150024 29 - #define SBAR 0xe6180020 30 - #define APARMBAREA 0xe6f10020 30 + /* DBG */ 31 + #define DBGREG1 0xe6100020 32 + #define DBGREG9 0xe6100040 31 33 34 + /* CPGA */ 35 + #define SYSTBCR 0xe6150024 36 + #define MSTPSR0 0xe6150030 37 + #define MSTPSR1 0xe6150038 38 + #define MSTPSR2 0xe6150040 39 + #define MSTPSR3 0xe6150048 40 + #define MSTPSR4 0xe615004c 41 + #define PLLC01STPCR 0xe61500c8 42 + 43 + /* SYSC */ 32 44 #define SPDCR 0xe6180008 33 45 #define SWUCR 0xe6180014 46 + #define SBAR 0xe6180020 47 + #define WUPSMSK 0xe618002c 48 + #define WUPSMSK2 0xe6180048 34 49 #define PSTR 0xe6180080 50 + #define WUPSFAC 0xe6180098 51 + #define IRQCR 0xe618022c 52 + #define IRQCR2 0xe6180238 53 + #define IRQCR3 0xe6180244 54 + #define IRQCR4 0xe6180248 55 + #define PDNSEL 0xe6180254 56 + 57 + /* INTC */ 58 + #define ICR1A 0xe6900000 59 + #define ICR2A 0xe6900004 60 + #define ICR3A 0xe6900008 61 + #define ICR4A 0xe690000c 62 + #define INTMSK00A 0xe6900040 63 + #define INTMSK10A 0xe6900044 64 + #define INTMSK20A 0xe6900048 65 + #define INTMSK30A 0xe690004c 66 + 67 + /* MFIS */ 68 + #define SMFRAM 0xe6a70000 69 + 70 + /* AP-System Core */ 71 + #define APARMBAREA 0xe6f10020 35 72 36 73 #define PSTR_RETRIES 100 37 74 #define PSTR_DELAY_US 10 ··· 128 91 return ret; 129 92 } 130 93 131 - static int pd_power_up_a3rv(struct generic_pm_domain *genpd) 132 - { 133 - int ret = pd_power_up(genpd); 134 - 135 - /* force A4LC on after A3RV has been requested on */ 136 - pm_genpd_poweron(&sh7372_a4lc.genpd); 137 - 138 - return ret; 139 - } 140 - 141 - static int pd_power_down_a3rv(struct generic_pm_domain *genpd) 142 - { 143 - int ret = pd_power_down(genpd); 144 - 145 - /* try to power down A4LC after A3RV is requested off */ 146 - genpd_queue_power_off_work(&sh7372_a4lc.genpd); 147 - 148 - return ret; 149 - } 150 - 151 - static int pd_power_down_a4lc(struct generic_pm_domain *genpd) 152 - { 153 - /* only power down A4LC if A3RV is off */ 154 - if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift))) 155 - return pd_power_down(genpd); 156 - 157 - return -EBUSY; 158 - } 159 - 160 94 static bool pd_active_wakeup(struct device *dev) 161 95 { 162 96 return true; ··· 140 132 pm_genpd_init(genpd, NULL, false); 141 133 genpd->stop_device = pm_clk_suspend; 142 134 genpd->start_device = pm_clk_resume; 135 + genpd->dev_irq_safe = true; 143 136 genpd->active_wakeup = pd_active_wakeup; 144 - 145 - if (sh7372_pd == &sh7372_a4lc) { 146 - genpd->power_off = pd_power_down_a4lc; 147 - genpd->power_on = pd_power_up; 148 - } else if (sh7372_pd == &sh7372_a3rv) { 149 - genpd->power_off = pd_power_down_a3rv; 150 - genpd->power_on = pd_power_up_a3rv; 151 - } else { 152 - genpd->power_off = pd_power_down; 153 - genpd->power_on = pd_power_up; 154 - } 137 + genpd->power_off = pd_power_down; 138 + genpd->power_on = pd_power_up; 155 139 genpd->power_on(&sh7372_pd->genpd); 156 140 } 157 141 ··· 152 152 { 153 153 struct device *dev = &pdev->dev; 154 154 155 - if (!dev->power.subsys_data) { 156 - pm_clk_init(dev); 157 - pm_clk_add(dev, NULL); 158 - } 159 155 pm_genpd_add_device(&sh7372_pd->genpd, dev); 156 + if (pm_clk_no_clocks(dev)) 157 + pm_clk_add(dev, NULL); 158 + } 159 + 160 + void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, 161 + struct sh7372_pm_domain *sh7372_sd) 162 + { 163 + pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd); 160 164 } 161 165 162 166 struct sh7372_pm_domain sh7372_a4lc = { ··· 189 185 190 186 #endif /* CONFIG_PM */ 191 187 192 - static void sh7372_enter_core_standby(void) 188 + #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) 189 + static int sh7372_do_idle_core_standby(unsigned long unused) 193 190 { 194 - void __iomem *smfram = (void __iomem *)SMFRAM; 195 - 196 - __raw_writel(0, APARMBAREA); /* translate 4k */ 197 - __raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */ 198 - __raw_writel(0x10, SYSTBCR); /* enable core standby */ 199 - 200 - __raw_writel(0, smfram + 0x3c); /* clear page table address */ 201 - 202 - sh7372_cpu_suspend(); 203 - cpu_init(); 204 - 205 - /* if page table address is non-NULL then we have been powered down */ 206 - if (__raw_readl(smfram + 0x3c)) { 207 - __raw_writel(__raw_readl(smfram + 0x40), 208 - __va(__raw_readl(smfram + 0x3c))); 209 - 210 - flush_tlb_all(); 211 - set_cr(__raw_readl(smfram + 0x38)); 212 - } 213 - 214 - __raw_writel(0, SYSTBCR); /* disable core standby */ 215 - __raw_writel(0, SBAR); /* disable reset vector translation */ 191 + cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */ 192 + return 0; 216 193 } 217 194 195 + static void sh7372_enter_core_standby(void) 196 + { 197 + /* set reset vector, translate 4k */ 198 + __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR); 199 + __raw_writel(0, APARMBAREA); 200 + 201 + /* enter sleep mode with SYSTBCR to 0x10 */ 202 + __raw_writel(0x10, SYSTBCR); 203 + cpu_suspend(0, sh7372_do_idle_core_standby); 204 + __raw_writel(0, SYSTBCR); 205 + 206 + /* disable reset vector translation */ 207 + __raw_writel(0, SBAR); 208 + } 209 + #endif 210 + 211 + #ifdef CONFIG_SUSPEND 212 + static void sh7372_enter_a3sm_common(int pllc0_on) 213 + { 214 + /* set reset vector, translate 4k */ 215 + __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR); 216 + __raw_writel(0, APARMBAREA); 217 + 218 + if (pllc0_on) 219 + __raw_writel(0, PLLC01STPCR); 220 + else 221 + __raw_writel(1 << 28, PLLC01STPCR); 222 + 223 + __raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */ 224 + __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */ 225 + cpu_suspend(0, sh7372_do_idle_a3sm); 226 + __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */ 227 + 228 + /* disable reset vector translation */ 229 + __raw_writel(0, SBAR); 230 + } 231 + 232 + static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p) 233 + { 234 + unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4; 235 + unsigned long msk, msk2; 236 + 237 + /* check active clocks to determine potential wakeup sources */ 238 + 239 + mstpsr0 = __raw_readl(MSTPSR0); 240 + if ((mstpsr0 & 0x00000003) != 0x00000003) { 241 + pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0); 242 + return 0; 243 + } 244 + 245 + mstpsr1 = __raw_readl(MSTPSR1); 246 + if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) { 247 + pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1); 248 + return 0; 249 + } 250 + 251 + mstpsr2 = __raw_readl(MSTPSR2); 252 + if ((mstpsr2 & 0x000741ff) != 0x000741ff) { 253 + pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2); 254 + return 0; 255 + } 256 + 257 + mstpsr3 = __raw_readl(MSTPSR3); 258 + if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) { 259 + pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3); 260 + return 0; 261 + } 262 + 263 + mstpsr4 = __raw_readl(MSTPSR4); 264 + if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) { 265 + pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4); 266 + return 0; 267 + } 268 + 269 + msk = 0; 270 + msk2 = 0; 271 + 272 + /* make bitmaps of limited number of wakeup sources */ 273 + 274 + if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */ 275 + msk |= 1 << 31; 276 + 277 + if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */ 278 + msk |= 1 << 21; 279 + 280 + if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */ 281 + msk |= 1 << 2; 282 + 283 + if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */ 284 + msk |= 1 << 1; 285 + 286 + if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */ 287 + msk |= 1 << 1; 288 + 289 + if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */ 290 + msk |= 1 << 1; 291 + 292 + if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */ 293 + msk2 |= 1 << 17; 294 + 295 + *mskp = msk; 296 + *msk2p = msk2; 297 + 298 + return 1; 299 + } 300 + 301 + static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p) 302 + { 303 + u16 tmp, irqcr1, irqcr2; 304 + int k; 305 + 306 + irqcr1 = 0; 307 + irqcr2 = 0; 308 + 309 + /* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */ 310 + for (k = 0; k <= 7; k++) { 311 + tmp = (icr >> ((7 - k) * 4)) & 0xf; 312 + irqcr1 |= (tmp & 0x03) << (k * 2); 313 + irqcr2 |= (tmp >> 2) << (k * 2); 314 + } 315 + 316 + *irqcr1p = irqcr1; 317 + *irqcr2p = irqcr2; 318 + } 319 + 320 + static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2) 321 + { 322 + u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high; 323 + unsigned long tmp; 324 + 325 + /* read IRQ0A -> IRQ15A mask */ 326 + tmp = bitrev8(__raw_readb(INTMSK00A)); 327 + tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8; 328 + 329 + /* setup WUPSMSK from clocks and external IRQ mask */ 330 + msk = (~msk & 0xc030000f) | (tmp << 4); 331 + __raw_writel(msk, WUPSMSK); 332 + 333 + /* propage level/edge trigger for external IRQ 0->15 */ 334 + sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low); 335 + sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high); 336 + __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR); 337 + __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2); 338 + 339 + /* read IRQ16A -> IRQ31A mask */ 340 + tmp = bitrev8(__raw_readb(INTMSK20A)); 341 + tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8; 342 + 343 + /* setup WUPSMSK2 from clocks and external IRQ mask */ 344 + msk2 = (~msk2 & 0x00030000) | tmp; 345 + __raw_writel(msk2, WUPSMSK2); 346 + 347 + /* propage level/edge trigger for external IRQ 16->31 */ 348 + sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low); 349 + sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high); 350 + __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3); 351 + __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4); 352 + } 353 + #endif 354 + 218 355 #ifdef CONFIG_CPU_IDLE 356 + 219 357 static void sh7372_cpuidle_setup(struct cpuidle_device *dev) 220 358 { 221 359 struct cpuidle_state *state; ··· 385 239 #endif 386 240 387 241 #ifdef CONFIG_SUSPEND 242 + 388 243 static int sh7372_enter_suspend(suspend_state_t suspend_state) 389 244 { 390 - sh7372_enter_core_standby(); 245 + unsigned long msk, msk2; 246 + 247 + /* check active clocks to determine potential wakeup sources */ 248 + if (sh7372_a3sm_valid(&msk, &msk2)) { 249 + 250 + /* convert INTC mask and sense to SYSC mask and sense */ 251 + sh7372_setup_a3sm(msk, msk2); 252 + 253 + /* enter A3SM sleep with PLLC0 off */ 254 + pr_debug("entering A3SM\n"); 255 + sh7372_enter_a3sm_common(0); 256 + } else { 257 + /* default to Core Standby that supports all wakeup sources */ 258 + pr_debug("entering Core Standby\n"); 259 + sh7372_enter_core_standby(); 260 + } 391 261 return 0; 392 262 } 393 263 ··· 414 252 #else 415 253 static void sh7372_suspend_init(void) {} 416 254 #endif 417 - 418 - #define DBGREG1 0xe6100020 419 - #define DBGREG9 0xe6100040 420 255 421 256 void __init sh7372_pm_init(void) 422 257 {
+1
arch/arm/mach-shmobile/pm_runtime.c
··· 15 15 #include <linux/io.h> 16 16 #include <linux/pm_runtime.h> 17 17 #include <linux/pm_domain.h> 18 + #include <linux/pm_clock.h> 18 19 #include <linux/platform_device.h> 19 20 #include <linux/clk.h> 20 21 #include <linux/sh_clk.h>
+3
arch/arm/mach-shmobile/setup-sh7372.c
··· 30 30 #include <linux/sh_dma.h> 31 31 #include <linux/sh_intc.h> 32 32 #include <linux/sh_timer.h> 33 + #include <linux/pm_domain.h> 33 34 #include <mach/hardware.h> 34 35 #include <mach/sh7372.h> 35 36 #include <asm/mach-types.h> ··· 994 993 sh7372_init_pm_domain(&sh7372_a3rv); 995 994 sh7372_init_pm_domain(&sh7372_a3ri); 996 995 sh7372_init_pm_domain(&sh7372_a3sg); 996 + 997 + sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv); 997 998 998 999 platform_add_devices(sh7372_early_devices, 999 1000 ARRAY_SIZE(sh7372_early_devices));
+28 -193
arch/arm/mach-shmobile/sleep-sh7372.S
··· 30 30 */ 31 31 32 32 #include <linux/linkage.h> 33 + #include <linux/init.h> 34 + #include <asm/memory.h> 33 35 #include <asm/assembler.h> 34 36 35 - #define SMFRAM 0xe6a70000 37 + #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) 38 + .align 12 39 + .text 40 + .global sh7372_resume_core_standby_a3sm 41 + sh7372_resume_core_standby_a3sm: 42 + ldr pc, 1f 43 + 1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET 36 44 37 - .align 38 - kernel_flush: 39 - .word v7_flush_dcache_all 40 - 41 - .align 3 42 - ENTRY(sh7372_cpu_suspend) 43 - stmfd sp!, {r0-r12, lr} @ save registers on stack 44 - 45 - ldr r8, =SMFRAM 46 - 47 - mov r4, sp @ Store sp 48 - mrs r5, spsr @ Store spsr 49 - mov r6, lr @ Store lr 50 - stmia r8!, {r4-r6} 51 - 52 - mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register 53 - mrc p15, 0, r5, c2, c0, 0 @ TTBR0 54 - mrc p15, 0, r6, c2, c0, 1 @ TTBR1 55 - mrc p15, 0, r7, c2, c0, 2 @ TTBCR 56 - stmia r8!, {r4-r7} 57 - 58 - mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register 59 - mrc p15, 0, r5, c10, c2, 0 @ PRRR 60 - mrc p15, 0, r6, c10, c2, 1 @ NMRR 61 - stmia r8!,{r4-r6} 62 - 63 - mrc p15, 0, r4, c13, c0, 1 @ Context ID 64 - mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID 65 - mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address 66 - mrs r7, cpsr @ Store current cpsr 67 - stmia r8!, {r4-r7} 68 - 69 - mrc p15, 0, r4, c1, c0, 0 @ save control register 70 - stmia r8!, {r4} 71 - 72 - /* 73 - * jump out to kernel flush routine 74 - * - reuse that code is better 75 - * - it executes in a cached space so is faster than refetch per-block 76 - * - should be faster and will change with kernel 77 - * - 'might' have to copy address, load and jump to it 78 - * Flush all data from the L1 data cache before disabling 79 - * SCTLR.C bit. 80 - */ 81 - ldr r1, kernel_flush 82 - mov lr, pc 83 - bx r1 84 - 45 + .global sh7372_do_idle_a3sm 46 + sh7372_do_idle_a3sm: 85 47 /* 86 48 * Clear the SCTLR.C bit to prevent further data cache 87 49 * allocation. Clearing SCTLR.C would make all the data accesses ··· 54 92 mcr p15, 0, r0, c1, c0, 0 55 93 isb 56 94 95 + /* disable L2 cache in the aux control register */ 96 + mrc p15, 0, r10, c1, c0, 1 97 + bic r10, r10, #2 98 + mcr p15, 0, r10, c1, c0, 1 99 + 57 100 /* 58 - * Invalidate L1 data cache. Even though only invalidate is 59 - * necessary exported flush API is used here. Doing clean 60 - * on already clean cache would be almost NOP. 101 + * Invalidate data cache again. 61 102 */ 62 103 ldr r1, kernel_flush 63 104 blx r1 ··· 80 115 dsb 81 116 dmb 82 117 83 - /* 84 - * =================================== 85 - * == WFI instruction => Enter idle == 86 - * =================================== 87 - */ 88 - wfi @ wait for interrupt 118 + #define SPDCR 0xe6180008 119 + #define A3SM (1 << 12) 89 120 90 - /* 91 - * =================================== 92 - * == Resume path for non-OFF modes == 93 - * =================================== 94 - */ 95 - mrc p15, 0, r0, c1, c0, 0 96 - tst r0, #(1 << 2) @ Check C bit enabled? 97 - orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared 98 - mcreq p15, 0, r0, c1, c0, 0 99 - isb 121 + /* A3SM power down */ 122 + ldr r0, =SPDCR 123 + ldr r1, =A3SM 124 + str r1, [r0] 125 + 1: 126 + b 1b 100 127 101 - /* 102 - * =================================== 103 - * == Exit point from non-OFF modes == 104 - * =================================== 105 - */ 106 - ldmfd sp!, {r0-r12, pc} @ restore regs and return 107 - 108 - .pool 109 - 110 - .align 12 111 - .text 112 - .global sh7372_cpu_resume 113 - sh7372_cpu_resume: 114 - 115 - mov r1, #0 116 - /* 117 - * Invalidate all instruction caches to PoU 118 - * and flush branch target cache 119 - */ 120 - mcr p15, 0, r1, c7, c5, 0 121 - 122 - ldr r3, =SMFRAM 123 - 124 - ldmia r3!, {r4-r6} 125 - mov sp, r4 @ Restore sp 126 - msr spsr_cxsf, r5 @ Restore spsr 127 - mov lr, r6 @ Restore lr 128 - 129 - ldmia r3!, {r4-r7} 130 - mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register 131 - mcr p15, 0, r5, c2, c0, 0 @ TTBR0 132 - mcr p15, 0, r6, c2, c0, 1 @ TTBR1 133 - mcr p15, 0, r7, c2, c0, 2 @ TTBCR 134 - 135 - ldmia r3!,{r4-r6} 136 - mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register 137 - mcr p15, 0, r5, c10, c2, 0 @ PRRR 138 - mcr p15, 0, r6, c10, c2, 1 @ NMRR 139 - 140 - ldmia r3!,{r4-r7} 141 - mcr p15, 0, r4, c13, c0, 1 @ Context ID 142 - mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID 143 - mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address 144 - msr cpsr, r7 @ store cpsr 145 - 146 - /* Starting to enable MMU here */ 147 - mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl 148 - /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */ 149 - and r7, #0x7 150 - cmp r7, #0x0 151 - beq usettbr0 152 - ttbr_error: 153 - /* 154 - * More work needs to be done to support N[0:2] value other than 0 155 - * So looping here so that the error can be detected 156 - */ 157 - b ttbr_error 158 - 159 - .align 160 - cache_pred_disable_mask: 161 - .word 0xFFFFE7FB 162 - ttbrbit_mask: 163 - .word 0xFFFFC000 164 - table_index_mask: 165 - .word 0xFFF00000 166 - table_entry: 167 - .word 0x00000C02 168 - usettbr0: 169 - 170 - mrc p15, 0, r2, c2, c0, 0 171 - ldr r5, ttbrbit_mask 172 - and r2, r5 173 - mov r4, pc 174 - ldr r5, table_index_mask 175 - and r4, r5 @ r4 = 31 to 20 bits of pc 176 - /* Extract the value to be written to table entry */ 177 - ldr r6, table_entry 178 - /* r6 has the value to be written to table entry */ 179 - add r6, r6, r4 180 - /* Getting the address of table entry to modify */ 181 - lsr r4, #18 182 - /* r2 has the location which needs to be modified */ 183 - add r2, r4 184 - ldr r4, [r2] 185 - str r6, [r2] /* modify the table entry */ 186 - 187 - mov r7, r6 188 - mov r5, r2 189 - mov r6, r4 190 - /* r5 = original page table address */ 191 - /* r6 = original page table data */ 192 - 193 - mov r0, #0 194 - mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer 195 - mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array 196 - mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB 197 - mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB 198 - 199 - /* 200 - * Restore control register. This enables the MMU. 201 - * The caches and prediction are not enabled here, they 202 - * will be enabled after restoring the MMU table entry. 203 - */ 204 - ldmia r3!, {r4} 205 - stmia r3!, {r5} /* save original page table address */ 206 - stmia r3!, {r6} /* save original page table data */ 207 - stmia r3!, {r7} /* save modified page table data */ 208 - 209 - ldr r2, cache_pred_disable_mask 210 - and r4, r2 211 - mcr p15, 0, r4, c1, c0, 0 212 - dsb 213 - isb 214 - 215 - ldr r0, =restoremmu_on 216 - bx r0 217 - 218 - /* 219 - * ============================== 220 - * == Exit point from OFF mode == 221 - * ============================== 222 - */ 223 - restoremmu_on: 224 - 225 - ldmfd sp!, {r0-r12, pc} @ restore regs and return 128 + kernel_flush: 129 + .word v7_flush_dcache_all 130 + #endif
+1 -1
drivers/base/power/Makefile
··· 1 - obj-$(CONFIG_PM) += sysfs.o generic_ops.o 1 + obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o 2 2 obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o 3 3 obj-$(CONFIG_PM_RUNTIME) += runtime.o 4 4 obj-$(CONFIG_PM_TRACE_RTC) += trace.o
+58 -65
drivers/base/power/clock_ops.c
··· 10 10 #include <linux/kernel.h> 11 11 #include <linux/io.h> 12 12 #include <linux/pm.h> 13 - #include <linux/pm_runtime.h> 13 + #include <linux/pm_clock.h> 14 14 #include <linux/clk.h> 15 15 #include <linux/slab.h> 16 16 #include <linux/err.h> 17 17 18 18 #ifdef CONFIG_PM 19 - 20 - struct pm_clk_data { 21 - struct list_head clock_list; 22 - spinlock_t lock; 23 - }; 24 19 25 20 enum pce_status { 26 21 PCE_STATUS_NONE = 0, ··· 30 35 struct clk *clk; 31 36 enum pce_status status; 32 37 }; 33 - 34 - static struct pm_clk_data *__to_pcd(struct device *dev) 35 - { 36 - return dev ? dev->power.subsys_data : NULL; 37 - } 38 38 39 39 /** 40 40 * pm_clk_acquire - Acquire a device clock. ··· 57 67 */ 58 68 int pm_clk_add(struct device *dev, const char *con_id) 59 69 { 60 - struct pm_clk_data *pcd = __to_pcd(dev); 70 + struct pm_subsys_data *psd = dev_to_psd(dev); 61 71 struct pm_clock_entry *ce; 62 72 63 - if (!pcd) 73 + if (!psd) 64 74 return -EINVAL; 65 75 66 76 ce = kzalloc(sizeof(*ce), GFP_KERNEL); ··· 81 91 82 92 pm_clk_acquire(dev, ce); 83 93 84 - spin_lock_irq(&pcd->lock); 85 - list_add_tail(&ce->node, &pcd->clock_list); 86 - spin_unlock_irq(&pcd->lock); 94 + spin_lock_irq(&psd->lock); 95 + list_add_tail(&ce->node, &psd->clock_list); 96 + spin_unlock_irq(&psd->lock); 87 97 return 0; 88 98 } 89 99 ··· 120 130 */ 121 131 void pm_clk_remove(struct device *dev, const char *con_id) 122 132 { 123 - struct pm_clk_data *pcd = __to_pcd(dev); 133 + struct pm_subsys_data *psd = dev_to_psd(dev); 124 134 struct pm_clock_entry *ce; 125 135 126 - if (!pcd) 136 + if (!psd) 127 137 return; 128 138 129 - spin_lock_irq(&pcd->lock); 139 + spin_lock_irq(&psd->lock); 130 140 131 - list_for_each_entry(ce, &pcd->clock_list, node) { 141 + list_for_each_entry(ce, &psd->clock_list, node) { 132 142 if (!con_id && !ce->con_id) 133 143 goto remove; 134 144 else if (!con_id || !ce->con_id) ··· 137 147 goto remove; 138 148 } 139 149 140 - spin_unlock_irq(&pcd->lock); 150 + spin_unlock_irq(&psd->lock); 141 151 return; 142 152 143 153 remove: 144 154 list_del(&ce->node); 145 - spin_unlock_irq(&pcd->lock); 155 + spin_unlock_irq(&psd->lock); 146 156 147 157 __pm_clk_remove(ce); 148 158 } ··· 151 161 * pm_clk_init - Initialize a device's list of power management clocks. 152 162 * @dev: Device to initialize the list of PM clocks for. 153 163 * 154 - * Allocate a struct pm_clk_data object, initialize its lock member and 155 - * make the @dev's power.subsys_data field point to it. 164 + * Initialize the lock and clock_list members of the device's pm_subsys_data 165 + * object. 156 166 */ 157 - int pm_clk_init(struct device *dev) 167 + void pm_clk_init(struct device *dev) 158 168 { 159 - struct pm_clk_data *pcd; 169 + struct pm_subsys_data *psd = dev_to_psd(dev); 170 + if (psd) 171 + INIT_LIST_HEAD(&psd->clock_list); 172 + } 160 173 161 - pcd = kzalloc(sizeof(*pcd), GFP_KERNEL); 162 - if (!pcd) { 163 - dev_err(dev, "Not enough memory for PM clock data.\n"); 164 - return -ENOMEM; 165 - } 166 - 167 - INIT_LIST_HEAD(&pcd->clock_list); 168 - spin_lock_init(&pcd->lock); 169 - dev->power.subsys_data = pcd; 170 - return 0; 174 + /** 175 + * pm_clk_create - Create and initialize a device's list of PM clocks. 176 + * @dev: Device to create and initialize the list of PM clocks for. 177 + * 178 + * Allocate a struct pm_subsys_data object, initialize its lock and clock_list 179 + * members and make the @dev's power.subsys_data field point to it. 180 + */ 181 + int pm_clk_create(struct device *dev) 182 + { 183 + int ret = dev_pm_get_subsys_data(dev); 184 + return ret < 0 ? ret : 0; 171 185 } 172 186 173 187 /** ··· 179 185 * @dev: Device to destroy the list of PM clocks for. 180 186 * 181 187 * Clear the @dev's power.subsys_data field, remove the list of clock entries 182 - * from the struct pm_clk_data object pointed to by it before and free 188 + * from the struct pm_subsys_data object pointed to by it before and free 183 189 * that object. 184 190 */ 185 191 void pm_clk_destroy(struct device *dev) 186 192 { 187 - struct pm_clk_data *pcd = __to_pcd(dev); 193 + struct pm_subsys_data *psd = dev_to_psd(dev); 188 194 struct pm_clock_entry *ce, *c; 189 195 struct list_head list; 190 196 191 - if (!pcd) 197 + if (!psd) 192 198 return; 193 199 194 - dev->power.subsys_data = NULL; 195 200 INIT_LIST_HEAD(&list); 196 201 197 - spin_lock_irq(&pcd->lock); 202 + spin_lock_irq(&psd->lock); 198 203 199 - list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) 204 + list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) 200 205 list_move(&ce->node, &list); 201 206 202 - spin_unlock_irq(&pcd->lock); 207 + spin_unlock_irq(&psd->lock); 203 208 204 - kfree(pcd); 209 + dev_pm_put_subsys_data(dev); 205 210 206 211 list_for_each_entry_safe_reverse(ce, c, &list, node) { 207 212 list_del(&ce->node); ··· 218 225 */ 219 226 int pm_clk_suspend(struct device *dev) 220 227 { 221 - struct pm_clk_data *pcd = __to_pcd(dev); 228 + struct pm_subsys_data *psd = dev_to_psd(dev); 222 229 struct pm_clock_entry *ce; 223 230 unsigned long flags; 224 231 225 232 dev_dbg(dev, "%s()\n", __func__); 226 233 227 - if (!pcd) 234 + if (!psd) 228 235 return 0; 229 236 230 - spin_lock_irqsave(&pcd->lock, flags); 237 + spin_lock_irqsave(&psd->lock, flags); 231 238 232 - list_for_each_entry_reverse(ce, &pcd->clock_list, node) { 239 + list_for_each_entry_reverse(ce, &psd->clock_list, node) { 233 240 if (ce->status < PCE_STATUS_ERROR) { 234 241 clk_disable(ce->clk); 235 242 ce->status = PCE_STATUS_ACQUIRED; 236 243 } 237 244 } 238 245 239 - spin_unlock_irqrestore(&pcd->lock, flags); 246 + spin_unlock_irqrestore(&psd->lock, flags); 240 247 241 248 return 0; 242 249 } ··· 247 254 */ 248 255 int pm_clk_resume(struct device *dev) 249 256 { 250 - struct pm_clk_data *pcd = __to_pcd(dev); 257 + struct pm_subsys_data *psd = dev_to_psd(dev); 251 258 struct pm_clock_entry *ce; 252 259 unsigned long flags; 253 260 254 261 dev_dbg(dev, "%s()\n", __func__); 255 262 256 - if (!pcd) 263 + if (!psd) 257 264 return 0; 258 265 259 - spin_lock_irqsave(&pcd->lock, flags); 266 + spin_lock_irqsave(&psd->lock, flags); 260 267 261 - list_for_each_entry(ce, &pcd->clock_list, node) { 268 + list_for_each_entry(ce, &psd->clock_list, node) { 262 269 if (ce->status < PCE_STATUS_ERROR) { 263 270 clk_enable(ce->clk); 264 271 ce->status = PCE_STATUS_ENABLED; 265 272 } 266 273 } 267 274 268 - spin_unlock_irqrestore(&pcd->lock, flags); 275 + spin_unlock_irqrestore(&psd->lock, flags); 269 276 270 277 return 0; 271 278 } ··· 303 310 if (dev->pm_domain) 304 311 break; 305 312 306 - error = pm_clk_init(dev); 313 + error = pm_clk_create(dev); 307 314 if (error) 308 315 break; 309 316 ··· 338 345 */ 339 346 int pm_clk_suspend(struct device *dev) 340 347 { 341 - struct pm_clk_data *pcd = __to_pcd(dev); 348 + struct pm_subsys_data *psd = dev_to_psd(dev); 342 349 struct pm_clock_entry *ce; 343 350 unsigned long flags; 344 351 345 352 dev_dbg(dev, "%s()\n", __func__); 346 353 347 354 /* If there is no driver, the clocks are already disabled. */ 348 - if (!pcd || !dev->driver) 355 + if (!psd || !dev->driver) 349 356 return 0; 350 357 351 - spin_lock_irqsave(&pcd->lock, flags); 358 + spin_lock_irqsave(&psd->lock, flags); 352 359 353 - list_for_each_entry_reverse(ce, &pcd->clock_list, node) 360 + list_for_each_entry_reverse(ce, &psd->clock_list, node) 354 361 clk_disable(ce->clk); 355 362 356 - spin_unlock_irqrestore(&pcd->lock, flags); 363 + spin_unlock_irqrestore(&psd->lock, flags); 357 364 358 365 return 0; 359 366 } ··· 364 371 */ 365 372 int pm_clk_resume(struct device *dev) 366 373 { 367 - struct pm_clk_data *pcd = __to_pcd(dev); 374 + struct pm_subsys_data *psd = dev_to_psd(dev); 368 375 struct pm_clock_entry *ce; 369 376 unsigned long flags; 370 377 371 378 dev_dbg(dev, "%s()\n", __func__); 372 379 373 380 /* If there is no driver, the clocks should remain disabled. */ 374 - if (!pcd || !dev->driver) 381 + if (!psd || !dev->driver) 375 382 return 0; 376 383 377 - spin_lock_irqsave(&pcd->lock, flags); 384 + spin_lock_irqsave(&psd->lock, flags); 378 385 379 - list_for_each_entry(ce, &pcd->clock_list, node) 386 + list_for_each_entry(ce, &psd->clock_list, node) 380 387 clk_enable(ce->clk); 381 388 382 - spin_unlock_irqrestore(&pcd->lock, flags); 389 + spin_unlock_irqrestore(&psd->lock, flags); 383 390 384 391 return 0; 385 392 }
+86
drivers/base/power/common.c
··· 1 + /* 2 + * drivers/base/power/common.c - Common device power management code. 3 + * 4 + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 + * 6 + * This file is released under the GPLv2. 7 + */ 8 + 9 + #include <linux/init.h> 10 + #include <linux/kernel.h> 11 + #include <linux/module.h> 12 + #include <linux/slab.h> 13 + #include <linux/pm_clock.h> 14 + 15 + /** 16 + * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. 17 + * @dev: Device to handle. 18 + * 19 + * If power.subsys_data is NULL, point it to a new object, otherwise increment 20 + * its reference counter. Return 1 if a new object has been created, otherwise 21 + * return 0 or error code. 22 + */ 23 + int dev_pm_get_subsys_data(struct device *dev) 24 + { 25 + struct pm_subsys_data *psd; 26 + int ret = 0; 27 + 28 + psd = kzalloc(sizeof(*psd), GFP_KERNEL); 29 + if (!psd) 30 + return -ENOMEM; 31 + 32 + spin_lock_irq(&dev->power.lock); 33 + 34 + if (dev->power.subsys_data) { 35 + dev->power.subsys_data->refcount++; 36 + } else { 37 + spin_lock_init(&psd->lock); 38 + psd->refcount = 1; 39 + dev->power.subsys_data = psd; 40 + pm_clk_init(dev); 41 + psd = NULL; 42 + ret = 1; 43 + } 44 + 45 + spin_unlock_irq(&dev->power.lock); 46 + 47 + /* kfree() verifies that its argument is nonzero. */ 48 + kfree(psd); 49 + 50 + return ret; 51 + } 52 + EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); 53 + 54 + /** 55 + * dev_pm_put_subsys_data - Drop reference to power.subsys_data. 56 + * @dev: Device to handle. 57 + * 58 + * If the reference counter of power.subsys_data is zero after dropping the 59 + * reference, power.subsys_data is removed. Return 1 if that happens or 0 60 + * otherwise. 61 + */ 62 + int dev_pm_put_subsys_data(struct device *dev) 63 + { 64 + struct pm_subsys_data *psd; 65 + int ret = 0; 66 + 67 + spin_lock_irq(&dev->power.lock); 68 + 69 + psd = dev_to_psd(dev); 70 + if (!psd) { 71 + ret = -EINVAL; 72 + goto out; 73 + } 74 + 75 + if (--psd->refcount == 0) { 76 + dev->power.subsys_data = NULL; 77 + kfree(psd); 78 + ret = 1; 79 + } 80 + 81 + out: 82 + spin_unlock_irq(&dev->power.lock); 83 + 84 + return ret; 85 + } 86 + EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
+202 -146
drivers/base/power/domain.c
··· 29 29 return pd_to_genpd(dev->pm_domain); 30 30 } 31 31 32 - static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) 32 + static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 33 33 { 34 - if (!WARN_ON(genpd->sd_count == 0)) 35 - genpd->sd_count--; 34 + bool ret = false; 35 + 36 + if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 37 + ret = !!atomic_dec_and_test(&genpd->sd_count); 38 + 39 + return ret; 40 + } 41 + 42 + static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 43 + { 44 + atomic_inc(&genpd->sd_count); 45 + smp_mb__after_atomic_inc(); 36 46 } 37 47 38 48 static void genpd_acquire_lock(struct generic_pm_domain *genpd) ··· 81 71 } 82 72 83 73 /** 84 - * pm_genpd_poweron - Restore power to a given PM domain and its parents. 74 + * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 85 75 * @genpd: PM domain to power up. 86 76 * 87 - * Restore power to @genpd and all of its parents so that it is possible to 77 + * Restore power to @genpd and all of its masters so that it is possible to 88 78 * resume a device belonging to it. 89 79 */ 90 - int pm_genpd_poweron(struct generic_pm_domain *genpd) 80 + int __pm_genpd_poweron(struct generic_pm_domain *genpd) 81 + __releases(&genpd->lock) __acquires(&genpd->lock) 91 82 { 92 - struct generic_pm_domain *parent = genpd->parent; 83 + struct gpd_link *link; 84 + DEFINE_WAIT(wait); 93 85 int ret = 0; 94 86 95 - start: 96 - if (parent) { 97 - genpd_acquire_lock(parent); 98 - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 99 - } else { 87 + /* If the domain's master is being waited for, we have to wait too. */ 88 + for (;;) { 89 + prepare_to_wait(&genpd->status_wait_queue, &wait, 90 + TASK_UNINTERRUPTIBLE); 91 + if (genpd->status != GPD_STATE_WAIT_MASTER) 92 + break; 93 + mutex_unlock(&genpd->lock); 94 + 95 + schedule(); 96 + 100 97 mutex_lock(&genpd->lock); 101 98 } 99 + finish_wait(&genpd->status_wait_queue, &wait); 102 100 103 101 if (genpd->status == GPD_STATE_ACTIVE 104 102 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 105 - goto out; 103 + return 0; 106 104 107 105 if (genpd->status != GPD_STATE_POWER_OFF) { 108 106 genpd_set_active(genpd); 109 - goto out; 107 + return 0; 110 108 } 111 109 112 - if (parent && parent->status != GPD_STATE_ACTIVE) { 110 + /* 111 + * The list is guaranteed not to change while the loop below is being 112 + * executed, unless one of the masters' .power_on() callbacks fiddles 113 + * with it. 114 + */ 115 + list_for_each_entry(link, &genpd->slave_links, slave_node) { 116 + genpd_sd_counter_inc(link->master); 117 + genpd->status = GPD_STATE_WAIT_MASTER; 118 + 113 119 mutex_unlock(&genpd->lock); 114 - genpd_release_lock(parent); 115 120 116 - ret = pm_genpd_poweron(parent); 117 - if (ret) 118 - return ret; 121 + ret = pm_genpd_poweron(link->master); 119 122 120 - goto start; 123 + mutex_lock(&genpd->lock); 124 + 125 + /* 126 + * The "wait for parent" status is guaranteed not to change 127 + * while the master is powering on. 128 + */ 129 + genpd->status = GPD_STATE_POWER_OFF; 130 + wake_up_all(&genpd->status_wait_queue); 131 + if (ret) { 132 + genpd_sd_counter_dec(link->master); 133 + goto err; 134 + } 121 135 } 122 136 123 137 if (genpd->power_on) { 124 138 ret = genpd->power_on(genpd); 125 139 if (ret) 126 - goto out; 140 + goto err; 127 141 } 128 142 129 143 genpd_set_active(genpd); 130 - if (parent) 131 - parent->sd_count++; 132 144 133 - out: 145 + return 0; 146 + 147 + err: 148 + list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) 149 + genpd_sd_counter_dec(link->master); 150 + 151 + return ret; 152 + } 153 + 154 + /** 155 + * pm_genpd_poweron - Restore power to a given PM domain and its masters. 156 + * @genpd: PM domain to power up. 157 + */ 158 + int pm_genpd_poweron(struct generic_pm_domain *genpd) 159 + { 160 + int ret; 161 + 162 + mutex_lock(&genpd->lock); 163 + ret = __pm_genpd_poweron(genpd); 134 164 mutex_unlock(&genpd->lock); 135 - if (parent) 136 - genpd_release_lock(parent); 137 - 138 165 return ret; 139 166 } 140 167 ··· 181 134 182 135 /** 183 136 * __pm_genpd_save_device - Save the pre-suspend state of a device. 184 - * @dle: Device list entry of the device to save the state of. 137 + * @pdd: Domain data of the device to save the state of. 185 138 * @genpd: PM domain the device belongs to. 186 139 */ 187 - static int __pm_genpd_save_device(struct dev_list_entry *dle, 140 + static int __pm_genpd_save_device(struct pm_domain_data *pdd, 188 141 struct generic_pm_domain *genpd) 189 142 __releases(&genpd->lock) __acquires(&genpd->lock) 190 143 { 191 - struct device *dev = dle->dev; 144 + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 145 + struct device *dev = pdd->dev; 192 146 struct device_driver *drv = dev->driver; 193 147 int ret = 0; 194 148 195 - if (dle->need_restore) 149 + if (gpd_data->need_restore) 196 150 return 0; 197 151 198 152 mutex_unlock(&genpd->lock); ··· 211 163 mutex_lock(&genpd->lock); 212 164 213 165 if (!ret) 214 - dle->need_restore = true; 166 + gpd_data->need_restore = true; 215 167 216 168 return ret; 217 169 } 218 170 219 171 /** 220 172 * __pm_genpd_restore_device - Restore the pre-suspend state of a device. 221 - * @dle: Device list entry of the device to restore the state of. 173 + * @pdd: Domain data of the device to restore the state of. 222 174 * @genpd: PM domain the device belongs to. 223 175 */ 224 - static void __pm_genpd_restore_device(struct dev_list_entry *dle, 176 + static void __pm_genpd_restore_device(struct pm_domain_data *pdd, 225 177 struct generic_pm_domain *genpd) 226 178 __releases(&genpd->lock) __acquires(&genpd->lock) 227 179 { 228 - struct device *dev = dle->dev; 180 + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 181 + struct device *dev = pdd->dev; 229 182 struct device_driver *drv = dev->driver; 230 183 231 - if (!dle->need_restore) 184 + if (!gpd_data->need_restore) 232 185 return; 233 186 234 187 mutex_unlock(&genpd->lock); ··· 246 197 247 198 mutex_lock(&genpd->lock); 248 199 249 - dle->need_restore = false; 200 + gpd_data->need_restore = false; 250 201 } 251 202 252 203 /** ··· 260 211 */ 261 212 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) 262 213 { 263 - return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; 214 + return genpd->status == GPD_STATE_WAIT_MASTER 215 + || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; 264 216 } 265 217 266 218 /** ··· 288 238 static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 289 239 __releases(&genpd->lock) __acquires(&genpd->lock) 290 240 { 291 - struct generic_pm_domain *parent; 292 - struct dev_list_entry *dle; 241 + struct pm_domain_data *pdd; 242 + struct gpd_link *link; 293 243 unsigned int not_suspended; 294 244 int ret = 0; 295 245 ··· 297 247 /* 298 248 * Do not try to power off the domain in the following situations: 299 249 * (1) The domain is already in the "power off" state. 300 - * (2) System suspend is in progress. 250 + * (2) The domain is waiting for its master to power up. 301 251 * (3) One of the domain's devices is being resumed right now. 252 + * (4) System suspend is in progress. 302 253 */ 303 - if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 304 - || genpd->resume_count > 0) 254 + if (genpd->status == GPD_STATE_POWER_OFF 255 + || genpd->status == GPD_STATE_WAIT_MASTER 256 + || genpd->resume_count > 0 || genpd->prepared_count > 0) 305 257 return 0; 306 258 307 - if (genpd->sd_count > 0) 259 + if (atomic_read(&genpd->sd_count) > 0) 308 260 return -EBUSY; 309 261 310 262 not_suspended = 0; 311 - list_for_each_entry(dle, &genpd->dev_list, node) 312 - if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) 263 + list_for_each_entry(pdd, &genpd->dev_list, list_node) 264 + if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 265 + || pdd->dev->power.irq_safe)) 313 266 not_suspended++; 314 267 315 268 if (not_suspended > genpd->in_progress) ··· 335 282 genpd->status = GPD_STATE_BUSY; 336 283 genpd->poweroff_task = current; 337 284 338 - list_for_each_entry_reverse(dle, &genpd->dev_list, node) { 339 - ret = __pm_genpd_save_device(dle, genpd); 285 + list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { 286 + ret = atomic_read(&genpd->sd_count) == 0 ? 287 + __pm_genpd_save_device(pdd, genpd) : -EBUSY; 288 + 289 + if (genpd_abort_poweroff(genpd)) 290 + goto out; 291 + 340 292 if (ret) { 341 293 genpd_set_active(genpd); 342 294 goto out; 343 295 } 344 - 345 - if (genpd_abort_poweroff(genpd)) 346 - goto out; 347 296 348 297 if (genpd->status == GPD_STATE_REPEAT) { 349 298 genpd->poweroff_task = NULL; ··· 353 298 } 354 299 } 355 300 356 - parent = genpd->parent; 357 - if (parent) { 358 - mutex_unlock(&genpd->lock); 359 - 360 - genpd_acquire_lock(parent); 361 - mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 362 - 363 - if (genpd_abort_poweroff(genpd)) { 364 - genpd_release_lock(parent); 301 + if (genpd->power_off) { 302 + if (atomic_read(&genpd->sd_count) > 0) { 303 + ret = -EBUSY; 365 304 goto out; 366 305 } 367 - } 368 306 369 - if (genpd->power_off) { 307 + /* 308 + * If sd_count > 0 at this point, one of the subdomains hasn't 309 + * managed to call pm_genpd_poweron() for the master yet after 310 + * incrementing it. In that case pm_genpd_poweron() will wait 311 + * for us to drop the lock, so we can call .power_off() and let 312 + * the pm_genpd_poweron() restore power for us (this shouldn't 313 + * happen very often). 314 + */ 370 315 ret = genpd->power_off(genpd); 371 316 if (ret == -EBUSY) { 372 317 genpd_set_active(genpd); 373 - if (parent) 374 - genpd_release_lock(parent); 375 - 376 318 goto out; 377 319 } 378 320 } 379 321 380 322 genpd->status = GPD_STATE_POWER_OFF; 381 323 382 - if (parent) { 383 - genpd_sd_counter_dec(parent); 384 - if (parent->sd_count == 0) 385 - genpd_queue_power_off_work(parent); 386 - 387 - genpd_release_lock(parent); 324 + list_for_each_entry(link, &genpd->slave_links, slave_node) { 325 + genpd_sd_counter_dec(link->master); 326 + genpd_queue_power_off_work(link->master); 388 327 } 389 328 390 329 out: ··· 420 371 if (IS_ERR(genpd)) 421 372 return -EINVAL; 422 373 374 + might_sleep_if(!genpd->dev_irq_safe); 375 + 423 376 if (genpd->stop_device) { 424 377 int ret = genpd->stop_device(dev); 425 378 if (ret) 426 379 return ret; 427 380 } 381 + 382 + /* 383 + * If power.irq_safe is set, this routine will be run with interrupts 384 + * off, so it can't use mutexes. 385 + */ 386 + if (dev->power.irq_safe) 387 + return 0; 428 388 429 389 mutex_lock(&genpd->lock); 430 390 genpd->in_progress++; ··· 442 384 mutex_unlock(&genpd->lock); 443 385 444 386 return 0; 445 - } 446 - 447 - /** 448 - * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 449 - * @dev: Device to resume. 450 - * @genpd: PM domain the device belongs to. 451 - */ 452 - static void __pm_genpd_runtime_resume(struct device *dev, 453 - struct generic_pm_domain *genpd) 454 - { 455 - struct dev_list_entry *dle; 456 - 457 - list_for_each_entry(dle, &genpd->dev_list, node) { 458 - if (dle->dev == dev) { 459 - __pm_genpd_restore_device(dle, genpd); 460 - break; 461 - } 462 - } 463 387 } 464 388 465 389 /** ··· 464 424 if (IS_ERR(genpd)) 465 425 return -EINVAL; 466 426 467 - ret = pm_genpd_poweron(genpd); 468 - if (ret) 469 - return ret; 427 + might_sleep_if(!genpd->dev_irq_safe); 428 + 429 + /* If power.irq_safe, the PM domain is never powered off. */ 430 + if (dev->power.irq_safe) 431 + goto out; 470 432 471 433 mutex_lock(&genpd->lock); 434 + ret = __pm_genpd_poweron(genpd); 435 + if (ret) { 436 + mutex_unlock(&genpd->lock); 437 + return ret; 438 + } 472 439 genpd->status = GPD_STATE_BUSY; 473 440 genpd->resume_count++; 474 441 for (;;) { ··· 495 448 mutex_lock(&genpd->lock); 496 449 } 497 450 finish_wait(&genpd->status_wait_queue, &wait); 498 - __pm_genpd_runtime_resume(dev, genpd); 451 + __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); 499 452 genpd->resume_count--; 500 453 genpd_set_active(genpd); 501 454 wake_up_all(&genpd->status_wait_queue); 502 455 mutex_unlock(&genpd->lock); 503 456 457 + out: 504 458 if (genpd->start_device) 505 459 genpd->start_device(dev); 506 460 ··· 526 478 #else 527 479 528 480 static inline void genpd_power_off_work_fn(struct work_struct *work) {} 529 - static inline void __pm_genpd_runtime_resume(struct device *dev, 530 - struct generic_pm_domain *genpd) {} 531 481 532 482 #define pm_genpd_runtime_suspend NULL 533 483 #define pm_genpd_runtime_resume NULL ··· 535 489 #ifdef CONFIG_PM_SLEEP 536 490 537 491 /** 538 - * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. 492 + * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 539 493 * @genpd: PM domain to power off, if possible. 540 494 * 541 495 * Check if the given PM domain can be powered off (during system suspend or 542 - * hibernation) and do that if so. Also, in that case propagate to its parent. 496 + * hibernation) and do that if so. Also, in that case propagate to its masters. 543 497 * 544 498 * This function is only called in "noirq" stages of system power transitions, 545 499 * so it need not acquire locks (all of the "noirq" callbacks are executed ··· 547 501 */ 548 502 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) 549 503 { 550 - struct generic_pm_domain *parent = genpd->parent; 504 + struct gpd_link *link; 551 505 552 506 if (genpd->status == GPD_STATE_POWER_OFF) 553 507 return; 554 508 555 - if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) 509 + if (genpd->suspended_count != genpd->device_count 510 + || atomic_read(&genpd->sd_count) > 0) 556 511 return; 557 512 558 513 if (genpd->power_off) 559 514 genpd->power_off(genpd); 560 515 561 516 genpd->status = GPD_STATE_POWER_OFF; 562 - if (parent) { 563 - genpd_sd_counter_dec(parent); 564 - pm_genpd_sync_poweroff(parent); 517 + 518 + list_for_each_entry(link, &genpd->slave_links, slave_node) { 519 + genpd_sd_counter_dec(link->master); 520 + pm_genpd_sync_poweroff(link->master); 565 521 } 566 522 } 567 523 ··· 1082 1034 */ 1083 1035 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1084 1036 { 1085 - struct dev_list_entry *dle; 1037 + struct generic_pm_domain_data *gpd_data; 1038 + struct pm_domain_data *pdd; 1086 1039 int ret = 0; 1087 1040 1088 1041 dev_dbg(dev, "%s()\n", __func__); ··· 1103 1054 goto out; 1104 1055 } 1105 1056 1106 - list_for_each_entry(dle, &genpd->dev_list, node) 1107 - if (dle->dev == dev) { 1057 + list_for_each_entry(pdd, &genpd->dev_list, list_node) 1058 + if (pdd->dev == dev) { 1108 1059 ret = -EINVAL; 1109 1060 goto out; 1110 1061 } 1111 1062 1112 - dle = kzalloc(sizeof(*dle), GFP_KERNEL); 1113 - if (!dle) { 1063 + gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1064 + if (!gpd_data) { 1114 1065 ret = -ENOMEM; 1115 1066 goto out; 1116 1067 } 1117 1068 1118 - dle->dev = dev; 1119 - dle->need_restore = false; 1120 - list_add_tail(&dle->node, &genpd->dev_list); 1121 1069 genpd->device_count++; 1122 1070 1123 - spin_lock_irq(&dev->power.lock); 1124 1071 dev->pm_domain = &genpd->domain; 1125 - spin_unlock_irq(&dev->power.lock); 1072 + dev_pm_get_subsys_data(dev); 1073 + dev->power.subsys_data->domain_data = &gpd_data->base; 1074 + gpd_data->base.dev = dev; 1075 + gpd_data->need_restore = false; 1076 + list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1126 1077 1127 1078 out: 1128 1079 genpd_release_lock(genpd); ··· 1138 1089 int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1139 1090 struct device *dev) 1140 1091 { 1141 - struct dev_list_entry *dle; 1092 + struct pm_domain_data *pdd; 1142 1093 int ret = -EINVAL; 1143 1094 1144 1095 dev_dbg(dev, "%s()\n", __func__); ··· 1153 1104 goto out; 1154 1105 } 1155 1106 1156 - list_for_each_entry(dle, &genpd->dev_list, node) { 1157 - if (dle->dev != dev) 1107 + list_for_each_entry(pdd, &genpd->dev_list, list_node) { 1108 + if (pdd->dev != dev) 1158 1109 continue; 1159 1110 1160 - spin_lock_irq(&dev->power.lock); 1111 + list_del_init(&pdd->list_node); 1112 + pdd->dev = NULL; 1113 + dev_pm_put_subsys_data(dev); 1161 1114 dev->pm_domain = NULL; 1162 - spin_unlock_irq(&dev->power.lock); 1115 + kfree(to_gpd_data(pdd)); 1163 1116 1164 1117 genpd->device_count--; 1165 - list_del(&dle->node); 1166 - kfree(dle); 1167 1118 1168 1119 ret = 0; 1169 1120 break; ··· 1178 1129 /** 1179 1130 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1180 1131 * @genpd: Master PM domain to add the subdomain to. 1181 - * @new_subdomain: Subdomain to be added. 1132 + * @subdomain: Subdomain to be added. 1182 1133 */ 1183 1134 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1184 - struct generic_pm_domain *new_subdomain) 1135 + struct generic_pm_domain *subdomain) 1185 1136 { 1186 - struct generic_pm_domain *subdomain; 1137 + struct gpd_link *link; 1187 1138 int ret = 0; 1188 1139 1189 - if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) 1140 + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1190 1141 return -EINVAL; 1191 1142 1192 1143 start: 1193 1144 genpd_acquire_lock(genpd); 1194 - mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); 1145 + mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1195 1146 1196 - if (new_subdomain->status != GPD_STATE_POWER_OFF 1197 - && new_subdomain->status != GPD_STATE_ACTIVE) { 1198 - mutex_unlock(&new_subdomain->lock); 1147 + if (subdomain->status != GPD_STATE_POWER_OFF 1148 + && subdomain->status != GPD_STATE_ACTIVE) { 1149 + mutex_unlock(&subdomain->lock); 1199 1150 genpd_release_lock(genpd); 1200 1151 goto start; 1201 1152 } 1202 1153 1203 1154 if (genpd->status == GPD_STATE_POWER_OFF 1204 - && new_subdomain->status != GPD_STATE_POWER_OFF) { 1155 + && subdomain->status != GPD_STATE_POWER_OFF) { 1205 1156 ret = -EINVAL; 1206 1157 goto out; 1207 1158 } 1208 1159 1209 - list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { 1210 - if (subdomain == new_subdomain) { 1160 + list_for_each_entry(link, &genpd->slave_links, slave_node) { 1161 + if (link->slave == subdomain && link->master == genpd) { 1211 1162 ret = -EINVAL; 1212 1163 goto out; 1213 1164 } 1214 1165 } 1215 1166 1216 - list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); 1217 - new_subdomain->parent = genpd; 1167 + link = kzalloc(sizeof(*link), GFP_KERNEL); 1168 + if (!link) { 1169 + ret = -ENOMEM; 1170 + goto out; 1171 + } 1172 + link->master = genpd; 1173 + list_add_tail(&link->master_node, &genpd->master_links); 1174 + link->slave = subdomain; 1175 + list_add_tail(&link->slave_node, &subdomain->slave_links); 1218 1176 if (subdomain->status != GPD_STATE_POWER_OFF) 1219 - genpd->sd_count++; 1177 + genpd_sd_counter_inc(genpd); 1220 1178 1221 1179 out: 1222 - mutex_unlock(&new_subdomain->lock); 1180 + mutex_unlock(&subdomain->lock); 1223 1181 genpd_release_lock(genpd); 1224 1182 1225 1183 return ret; ··· 1235 1179 /** 1236 1180 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1237 1181 * @genpd: Master PM domain to remove the subdomain from. 1238 - * @target: Subdomain to be removed. 1182 + * @subdomain: Subdomain to be removed. 1239 1183 */ 1240 1184 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1241 - struct generic_pm_domain *target) 1185 + struct generic_pm_domain *subdomain) 1242 1186 { 1243 - struct generic_pm_domain *subdomain; 1187 + struct gpd_link *link; 1244 1188 int ret = -EINVAL; 1245 1189 1246 - if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) 1190 + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1247 1191 return -EINVAL; 1248 1192 1249 1193 start: 1250 1194 genpd_acquire_lock(genpd); 1251 1195 1252 - list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { 1253 - if (subdomain != target) 1196 + list_for_each_entry(link, &genpd->master_links, master_node) { 1197 + if (link->slave != subdomain) 1254 1198 continue; 1255 1199 1256 1200 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); ··· 1262 1206 goto start; 1263 1207 } 1264 1208 1265 - list_del(&subdomain->sd_node); 1266 - subdomain->parent = NULL; 1209 + list_del(&link->master_node); 1210 + list_del(&link->slave_node); 1211 + kfree(link); 1267 1212 if (subdomain->status != GPD_STATE_POWER_OFF) 1268 1213 genpd_sd_counter_dec(genpd); 1269 1214 ··· 1291 1234 if (IS_ERR_OR_NULL(genpd)) 1292 1235 return; 1293 1236 1294 - INIT_LIST_HEAD(&genpd->sd_node); 1295 - genpd->parent = NULL; 1237 + INIT_LIST_HEAD(&genpd->master_links); 1238 + INIT_LIST_HEAD(&genpd->slave_links); 1296 1239 INIT_LIST_HEAD(&genpd->dev_list); 1297 - INIT_LIST_HEAD(&genpd->sd_list); 1298 1240 mutex_init(&genpd->lock); 1299 1241 genpd->gov = gov; 1300 1242 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1301 1243 genpd->in_progress = 0; 1302 - genpd->sd_count = 0; 1244 + atomic_set(&genpd->sd_count, 0); 1303 1245 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1304 1246 init_waitqueue_head(&genpd->status_wait_queue); 1305 1247 genpd->poweroff_task = NULL;
+5
include/linux/device.h
··· 636 636 } 637 637 #endif 638 638 639 + static inline struct pm_subsys_data *dev_to_psd(struct device *dev) 640 + { 641 + return dev ? dev->power.subsys_data : NULL; 642 + } 643 + 639 644 static inline unsigned int dev_get_uevent_suppress(const struct device *dev) 640 645 { 641 646 return dev->kobj.uevent_suppress;
+19 -1
include/linux/pm.h
··· 423 423 424 424 struct wakeup_source; 425 425 426 + struct pm_domain_data { 427 + struct list_head list_node; 428 + struct device *dev; 429 + }; 430 + 431 + struct pm_subsys_data { 432 + spinlock_t lock; 433 + unsigned int refcount; 434 + #ifdef CONFIG_PM_CLK 435 + struct list_head clock_list; 436 + #endif 437 + #ifdef CONFIG_PM_GENERIC_DOMAINS 438 + struct pm_domain_data *domain_data; 439 + #endif 440 + }; 441 + 426 442 struct dev_pm_info { 427 443 pm_message_t power_state; 428 444 unsigned int can_wakeup:1; ··· 480 464 unsigned long suspended_jiffies; 481 465 unsigned long accounting_timestamp; 482 466 #endif 483 - void *subsys_data; /* Owned by the subsystem. */ 467 + struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 484 468 }; 485 469 486 470 extern void update_pm_runtime_accounting(struct device *dev); 471 + extern int dev_pm_get_subsys_data(struct device *dev); 472 + extern int dev_pm_put_subsys_data(struct device *dev); 487 473 488 474 /* 489 475 * Power domains provide callbacks that are executed during system suspend,
+71
include/linux/pm_clock.h
··· 1 + /* 2 + * pm_clock.h - Definitions and headers related to device clocks. 3 + * 4 + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 + * 6 + * This file is released under the GPLv2. 7 + */ 8 + 9 + #ifndef _LINUX_PM_CLOCK_H 10 + #define _LINUX_PM_CLOCK_H 11 + 12 + #include <linux/device.h> 13 + #include <linux/notifier.h> 14 + 15 + struct pm_clk_notifier_block { 16 + struct notifier_block nb; 17 + struct dev_pm_domain *pm_domain; 18 + char *con_ids[]; 19 + }; 20 + 21 + #ifdef CONFIG_PM_CLK 22 + static inline bool pm_clk_no_clocks(struct device *dev) 23 + { 24 + return dev && dev->power.subsys_data 25 + && list_empty(&dev->power.subsys_data->clock_list); 26 + } 27 + 28 + extern void pm_clk_init(struct device *dev); 29 + extern int pm_clk_create(struct device *dev); 30 + extern void pm_clk_destroy(struct device *dev); 31 + extern int pm_clk_add(struct device *dev, const char *con_id); 32 + extern void pm_clk_remove(struct device *dev, const char *con_id); 33 + extern int pm_clk_suspend(struct device *dev); 34 + extern int pm_clk_resume(struct device *dev); 35 + #else 36 + static inline bool pm_clk_no_clocks(struct device *dev) 37 + { 38 + return true; 39 + } 40 + static inline void pm_clk_init(struct device *dev) 41 + { 42 + } 43 + static inline int pm_clk_create(struct device *dev) 44 + { 45 + return -EINVAL; 46 + } 47 + static inline void pm_clk_destroy(struct device *dev) 48 + { 49 + } 50 + static inline int pm_clk_add(struct device *dev, const char *con_id) 51 + { 52 + return -EINVAL; 53 + } 54 + static inline void pm_clk_remove(struct device *dev, const char *con_id) 55 + { 56 + } 57 + #define pm_clk_suspend NULL 58 + #define pm_clk_resume NULL 59 + #endif 60 + 61 + #ifdef CONFIG_HAVE_CLK 62 + extern void pm_clk_add_notifier(struct bus_type *bus, 63 + struct pm_clk_notifier_block *clknb); 64 + #else 65 + static inline void pm_clk_add_notifier(struct bus_type *bus, 66 + struct pm_clk_notifier_block *clknb) 67 + { 68 + } 69 + #endif 70 + 71 + #endif
+19 -7
include/linux/pm_domain.h
··· 13 13 14 14 enum gpd_status { 15 15 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 16 + GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ 16 17 GPD_STATE_BUSY, /* Something is happening to the PM domain */ 17 18 GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ 18 19 GPD_STATE_POWER_OFF, /* PM domain is off */ ··· 26 25 struct generic_pm_domain { 27 26 struct dev_pm_domain domain; /* PM domain operations */ 28 27 struct list_head gpd_list_node; /* Node in the global PM domains list */ 29 - struct list_head sd_node; /* Node in the parent's subdomain list */ 30 - struct generic_pm_domain *parent; /* Parent PM domain */ 31 - struct list_head sd_list; /* List of dubdomains */ 28 + struct list_head master_links; /* Links with PM domain as a master */ 29 + struct list_head slave_links; /* Links with PM domain as a slave */ 32 30 struct list_head dev_list; /* List of devices */ 33 31 struct mutex lock; 34 32 struct dev_power_governor *gov; 35 33 struct work_struct power_off_work; 36 34 unsigned int in_progress; /* Number of devices being suspended now */ 37 - unsigned int sd_count; /* Number of subdomains with power "on" */ 35 + atomic_t sd_count; /* Number of subdomains with power "on" */ 38 36 enum gpd_status status; /* Current state of the domain */ 39 37 wait_queue_head_t status_wait_queue; 40 38 struct task_struct *poweroff_task; /* Powering off task */ ··· 42 42 unsigned int suspended_count; /* System suspend device counter */ 43 43 unsigned int prepared_count; /* Suspend counter of prepared devices */ 44 44 bool suspend_power_off; /* Power status before system suspend */ 45 + bool dev_irq_safe; /* Device callbacks are IRQ-safe */ 45 46 int (*power_off)(struct generic_pm_domain *domain); 46 47 int (*power_on)(struct generic_pm_domain *domain); 47 48 int (*start_device)(struct device *dev); ··· 55 54 return container_of(pd, struct generic_pm_domain, domain); 56 55 } 57 56 58 - struct dev_list_entry { 59 - struct list_head node; 60 - struct device *dev; 57 + struct gpd_link { 58 + struct generic_pm_domain *master; 59 + struct list_head master_node; 60 + struct generic_pm_domain *slave; 61 + struct list_head slave_node; 62 + }; 63 + 64 + struct generic_pm_domain_data { 65 + struct pm_domain_data base; 61 66 bool need_restore; 62 67 }; 68 + 69 + static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd) 70 + { 71 + return container_of(pdd, struct generic_pm_domain_data, base); 72 + } 63 73 64 74 #ifdef CONFIG_PM_GENERIC_DOMAINS 65 75 extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
-42
include/linux/pm_runtime.h
··· 251 251 __pm_runtime_use_autosuspend(dev, false); 252 252 } 253 253 254 - struct pm_clk_notifier_block { 255 - struct notifier_block nb; 256 - struct dev_pm_domain *pm_domain; 257 - char *con_ids[]; 258 - }; 259 - 260 - #ifdef CONFIG_PM_CLK 261 - extern int pm_clk_init(struct device *dev); 262 - extern void pm_clk_destroy(struct device *dev); 263 - extern int pm_clk_add(struct device *dev, const char *con_id); 264 - extern void pm_clk_remove(struct device *dev, const char *con_id); 265 - extern int pm_clk_suspend(struct device *dev); 266 - extern int pm_clk_resume(struct device *dev); 267 - #else 268 - static inline int pm_clk_init(struct device *dev) 269 - { 270 - return -EINVAL; 271 - } 272 - static inline void pm_clk_destroy(struct device *dev) 273 - { 274 - } 275 - static inline int pm_clk_add(struct device *dev, const char *con_id) 276 - { 277 - return -EINVAL; 278 - } 279 - static inline void pm_clk_remove(struct device *dev, const char *con_id) 280 - { 281 - } 282 - #define pm_clk_suspend NULL 283 - #define pm_clk_resume NULL 284 - #endif 285 - 286 - #ifdef CONFIG_HAVE_CLK 287 - extern void pm_clk_add_notifier(struct bus_type *bus, 288 - struct pm_clk_notifier_block *clknb); 289 - #else 290 - static inline void pm_clk_add_notifier(struct bus_type *bus, 291 - struct pm_clk_notifier_block *clknb) 292 - { 293 - } 294 - #endif 295 - 296 254 #endif