Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'ipa-autosuspend'

Alex Elder says:

====================
net: ipa: enable automatic suspend

At long last, the first patch in this series enables automatic
suspend managed by the power management core. The remaining two
just rename things to be "power" oriented rather than "clock"
oriented.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+217 -204
+1 -1
drivers/net/ipa/Makefile
··· 1 1 obj-$(CONFIG_QCOM_IPA) += ipa.o 2 2 3 - ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \ 3 + ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \ 4 4 ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \ 5 5 ipa_gsi.o ipa_smp2p.o ipa_uc.o \ 6 6 ipa_endpoint.o ipa_cmd.o ipa_modem.o \
+10 -10
drivers/net/ipa/ipa.h
··· 23 23 struct net_device; 24 24 struct platform_device; 25 25 26 - struct ipa_clock; 26 + struct ipa_power; 27 27 struct ipa_smp2p; 28 28 struct ipa_interrupt; 29 29 ··· 36 36 * @nb: Notifier block used for remoteproc SSR 37 37 * @notifier: Remoteproc SSR notifier 38 38 * @smp2p: SMP2P information 39 - * @clock: IPA clocking information 39 + * @power: IPA power information 40 40 * @table_addr: DMA address of filter/route table content 41 41 * @table_virt: Virtual address of filter/route table content 42 42 * @interrupt: IPA Interrupt information 43 - * @uc_clocked: true if clock is active by proxy for microcontroller 43 + * @uc_powered: true if power is active by proxy for microcontroller 44 44 * @uc_loaded: true after microcontroller has reported it's ready 45 45 * @reg_addr: DMA address used for IPA register access 46 46 * @reg_virt: Virtual address used for IPA register access ··· 78 78 struct notifier_block nb; 79 79 void *notifier; 80 80 struct ipa_smp2p *smp2p; 81 - struct ipa_clock *clock; 81 + struct ipa_power *power; 82 82 83 83 dma_addr_t table_addr; 84 84 __le64 *table_virt; 85 85 86 86 struct ipa_interrupt *interrupt; 87 - bool uc_clocked; 87 + bool uc_powered; 88 88 bool uc_loaded; 89 89 90 90 dma_addr_t reg_addr; ··· 134 134 * 135 135 * Activities performed at the init stage can be done without requiring 136 136 * any access to IPA hardware. Activities performed at the config stage 137 - * require the IPA clock to be running, because they involve access 138 - * to IPA registers. The setup stage is performed only after the GSI 139 - * hardware is ready (more on this below). The setup stage allows 140 - * the AP to perform more complex initialization by issuing "immediate 141 - * commands" using a special interface to the IPA. 137 + * require IPA power, because they involve access to IPA registers. 138 + * The setup stage is performed only after the GSI hardware is ready 139 + * (more on this below). The setup stage allows the AP to perform 140 + * more complex initialization by issuing "immediate commands" using 141 + * a special interface to the IPA. 142 142 * 143 143 * This function, @ipa_setup(), starts the setup stage. 144 144 *
+80 -83
drivers/net/ipa/ipa_clock.c drivers/net/ipa/ipa_power.c
··· 12 12 #include <linux/bitops.h> 13 13 14 14 #include "ipa.h" 15 - #include "ipa_clock.h" 15 + #include "ipa_power.h" 16 16 #include "ipa_endpoint.h" 17 17 #include "ipa_modem.h" 18 18 #include "ipa_data.h" 19 19 20 20 /** 21 - * DOC: IPA Clocking 21 + * DOC: IPA Power Management 22 22 * 23 - * The "IPA Clock" manages both the IPA core clock and the interconnects 24 - * (buses) the IPA depends on as a single logical entity. A reference count 25 - * is incremented by "get" operations and decremented by "put" operations. 26 - * Transitions of that count from 0 to 1 result in the clock and interconnects 27 - * being enabled, and transitions of the count from 1 to 0 cause them to be 28 - * disabled. We currently operate the core clock at a fixed clock rate, and 29 - * all buses at a fixed average and peak bandwidth. As more advanced IPA 30 - * features are enabled, we can make better use of clock and bus scaling. 23 + * The IPA hardware is enabled when the IPA core clock and all the 24 + * interconnects (buses) it depends on are enabled. Runtime power 25 + * management is used to determine whether the core clock and 26 + * interconnects are enabled, and if not in use to be suspended 27 + * automatically. 31 28 * 32 - * An IPA clock reference must be held for any access to IPA hardware. 29 + * The core clock currently runs at a fixed clock rate when enabled, 30 + * an all interconnects use a fixed average and peak bandwidth. 33 31 */ 32 + 33 + #define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */ 34 34 35 35 /** 36 36 * struct ipa_interconnect - IPA interconnect information ··· 61 61 }; 62 62 63 63 /** 64 - * struct ipa_clock - IPA clocking information 64 + * struct ipa_power - IPA power management information 65 65 * @dev: IPA device pointer 66 66 * @core: IPA core clock 67 67 * @spinlock: Protects modem TX queue enable/disable ··· 69 69 * @interconnect_count: Number of elements in interconnect[] 70 70 * @interconnect: Interconnect array 71 71 */ 72 - struct ipa_clock { 72 + struct ipa_power { 73 73 struct device *dev; 74 74 struct clk *core; 75 75 spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ ··· 108 108 } 109 109 110 110 /* Initialize interconnects required for IPA operation */ 111 - static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev, 111 + static int ipa_interconnect_init(struct ipa_power *power, struct device *dev, 112 112 const struct ipa_interconnect_data *data) 113 113 { 114 114 struct ipa_interconnect *interconnect; 115 115 u32 count; 116 116 int ret; 117 117 118 - count = clock->interconnect_count; 118 + count = power->interconnect_count; 119 119 interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); 120 120 if (!interconnect) 121 121 return -ENOMEM; 122 - clock->interconnect = interconnect; 122 + power->interconnect = interconnect; 123 123 124 124 while (count--) { 125 125 ret = ipa_interconnect_init_one(dev, interconnect, data++); ··· 131 131 return 0; 132 132 133 133 out_unwind: 134 - while (interconnect-- > clock->interconnect) 134 + while (interconnect-- > power->interconnect) 135 135 ipa_interconnect_exit_one(interconnect); 136 - kfree(clock->interconnect); 137 - clock->interconnect = NULL; 136 + kfree(power->interconnect); 137 + power->interconnect = NULL; 138 138 139 139 return ret; 140 140 } 141 141 142 142 /* Inverse of ipa_interconnect_init() */ 143 - static void ipa_interconnect_exit(struct ipa_clock *clock) 143 + static void ipa_interconnect_exit(struct ipa_power *power) 144 144 { 145 145 struct ipa_interconnect *interconnect; 146 146 147 - interconnect = clock->interconnect + clock->interconnect_count; 148 - while (interconnect-- > clock->interconnect) 147 + interconnect = power->interconnect + power->interconnect_count; 148 + while (interconnect-- > power->interconnect) 149 149 ipa_interconnect_exit_one(interconnect); 150 - kfree(clock->interconnect); 151 - clock->interconnect = NULL; 150 + kfree(power->interconnect); 151 + power->interconnect = NULL; 152 152 } 153 153 154 154 /* Currently we only use one bandwidth level, so just "enable" interconnects */ 155 155 static int ipa_interconnect_enable(struct ipa *ipa) 156 156 { 157 157 struct ipa_interconnect *interconnect; 158 - struct ipa_clock *clock = ipa->clock; 158 + struct ipa_power *power = ipa->power; 159 159 int ret; 160 160 u32 i; 161 161 162 - interconnect = clock->interconnect; 163 - for (i = 0; i < clock->interconnect_count; i++) { 162 + interconnect = power->interconnect; 163 + for (i = 0; i < power->interconnect_count; i++) { 164 164 ret = icc_set_bw(interconnect->path, 165 165 interconnect->average_bandwidth, 166 166 interconnect->peak_bandwidth); ··· 176 176 return 0; 177 177 178 178 out_unwind: 179 - while (interconnect-- > clock->interconnect) 179 + while (interconnect-- > power->interconnect) 180 180 (void)icc_set_bw(interconnect->path, 0, 0); 181 181 182 182 return ret; ··· 186 186 static int ipa_interconnect_disable(struct ipa *ipa) 187 187 { 188 188 struct ipa_interconnect *interconnect; 189 - struct ipa_clock *clock = ipa->clock; 189 + struct ipa_power *power = ipa->power; 190 190 struct device *dev = &ipa->pdev->dev; 191 191 int result = 0; 192 192 u32 count; 193 193 int ret; 194 194 195 - count = clock->interconnect_count; 196 - interconnect = clock->interconnect + count; 195 + count = power->interconnect_count; 196 + interconnect = power->interconnect + count; 197 197 while (count--) { 198 198 interconnect--; 199 199 ret = icc_set_bw(interconnect->path, 0, 0); ··· 209 209 return result; 210 210 } 211 211 212 - /* Turn on IPA clocks, including interconnects */ 213 - static int ipa_clock_enable(struct ipa *ipa) 212 + /* Enable IPA power, enabling interconnects and the core clock */ 213 + static int ipa_power_enable(struct ipa *ipa) 214 214 { 215 215 int ret; 216 216 ··· 218 218 if (ret) 219 219 return ret; 220 220 221 - ret = clk_prepare_enable(ipa->clock->core); 221 + ret = clk_prepare_enable(ipa->power->core); 222 222 if (ret) { 223 223 dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret); 224 224 (void)ipa_interconnect_disable(ipa); ··· 227 227 return ret; 228 228 } 229 229 230 - /* Inverse of ipa_clock_enable() */ 231 - static int ipa_clock_disable(struct ipa *ipa) 230 + /* Inverse of ipa_power_enable() */ 231 + static int ipa_power_disable(struct ipa *ipa) 232 232 { 233 - clk_disable_unprepare(ipa->clock->core); 233 + clk_disable_unprepare(ipa->power->core); 234 234 235 235 return ipa_interconnect_disable(ipa); 236 236 } ··· 241 241 242 242 /* Endpoints aren't usable until setup is complete */ 243 243 if (ipa->setup_complete) { 244 - __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags); 244 + __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags); 245 245 ipa_endpoint_suspend(ipa); 246 246 gsi_suspend(&ipa->gsi); 247 247 } 248 248 249 - return ipa_clock_disable(ipa); 249 + return ipa_power_disable(ipa); 250 250 } 251 251 252 252 static int ipa_runtime_resume(struct device *dev) ··· 254 254 struct ipa *ipa = dev_get_drvdata(dev); 255 255 int ret; 256 256 257 - ret = ipa_clock_enable(ipa); 257 + ret = ipa_power_enable(ipa); 258 258 if (WARN_ON(ret < 0)) 259 259 return ret; 260 260 ··· 267 267 return 0; 268 268 } 269 269 270 - static int ipa_runtime_idle(struct device *dev) 271 - { 272 - return -EAGAIN; 273 - } 274 - 275 270 static int ipa_suspend(struct device *dev) 276 271 { 277 272 struct ipa *ipa = dev_get_drvdata(dev); 278 273 279 - __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->clock->flags); 274 + __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); 280 275 281 276 return pm_runtime_force_suspend(dev); 282 277 } ··· 283 288 284 289 ret = pm_runtime_force_resume(dev); 285 290 286 - __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->clock->flags); 291 + __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); 287 292 288 293 return ret; 289 294 } 290 295 291 296 /* Return the current IPA core clock rate */ 292 - u32 ipa_clock_rate(struct ipa *ipa) 297 + u32 ipa_core_clock_rate(struct ipa *ipa) 293 298 { 294 - return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0; 299 + return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0; 295 300 } 296 301 297 302 /** ··· 310 315 * just to handle the interrupt, so we're done. If we are in a 311 316 * system suspend, trigger a system resume. 312 317 */ 313 - if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags)) 314 - if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->clock->flags)) 318 + if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags)) 319 + if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags)) 315 320 pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); 316 321 317 322 /* Acknowledge/clear the suspend interrupt on all endpoints */ ··· 343 348 */ 344 349 void ipa_power_modem_queue_stop(struct ipa *ipa) 345 350 { 346 - struct ipa_clock *clock = ipa->clock; 351 + struct ipa_power *power = ipa->power; 347 352 unsigned long flags; 348 353 349 - spin_lock_irqsave(&clock->spinlock, flags); 354 + spin_lock_irqsave(&power->spinlock, flags); 350 355 351 - if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, clock->flags)) { 356 + if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) { 352 357 netif_stop_queue(ipa->modem_netdev); 353 - __set_bit(IPA_POWER_FLAG_STOPPED, clock->flags); 358 + __set_bit(IPA_POWER_FLAG_STOPPED, power->flags); 354 359 } 355 360 356 - spin_unlock_irqrestore(&clock->spinlock, flags); 361 + spin_unlock_irqrestore(&power->spinlock, flags); 357 362 } 358 363 359 364 /* This function starts the modem netdev transmit queue, but only if the ··· 363 368 */ 364 369 void ipa_power_modem_queue_wake(struct ipa *ipa) 365 370 { 366 - struct ipa_clock *clock = ipa->clock; 371 + struct ipa_power *power = ipa->power; 367 372 unsigned long flags; 368 373 369 - spin_lock_irqsave(&clock->spinlock, flags); 374 + spin_lock_irqsave(&power->spinlock, flags); 370 375 371 - if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, clock->flags)) { 372 - __set_bit(IPA_POWER_FLAG_STARTED, clock->flags); 376 + if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) { 377 + __set_bit(IPA_POWER_FLAG_STARTED, power->flags); 373 378 netif_wake_queue(ipa->modem_netdev); 374 379 } 375 380 376 - spin_unlock_irqrestore(&clock->spinlock, flags); 381 + spin_unlock_irqrestore(&power->spinlock, flags); 377 382 } 378 383 379 384 /* This function clears the STARTED flag once the TX queue is operating */ 380 385 void ipa_power_modem_queue_active(struct ipa *ipa) 381 386 { 382 - clear_bit(IPA_POWER_FLAG_STARTED, ipa->clock->flags); 387 + clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags); 383 388 } 384 389 385 390 int ipa_power_setup(struct ipa *ipa) ··· 402 407 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); 403 408 } 404 409 405 - /* Initialize IPA clocking */ 406 - struct ipa_clock * 407 - ipa_clock_init(struct device *dev, const struct ipa_clock_data *data) 410 + /* Initialize IPA power management */ 411 + struct ipa_power * 412 + ipa_power_init(struct device *dev, const struct ipa_power_data *data) 408 413 { 409 - struct ipa_clock *clock; 414 + struct ipa_power *power; 410 415 struct clk *clk; 411 416 int ret; 412 417 ··· 424 429 goto err_clk_put; 425 430 } 426 431 427 - clock = kzalloc(sizeof(*clock), GFP_KERNEL); 428 - if (!clock) { 432 + power = kzalloc(sizeof(*power), GFP_KERNEL); 433 + if (!power) { 429 434 ret = -ENOMEM; 430 435 goto err_clk_put; 431 436 } 432 - clock->dev = dev; 433 - clock->core = clk; 434 - spin_lock_init(&clock->spinlock); 435 - clock->interconnect_count = data->interconnect_count; 437 + power->dev = dev; 438 + power->core = clk; 439 + spin_lock_init(&power->spinlock); 440 + power->interconnect_count = data->interconnect_count; 436 441 437 - ret = ipa_interconnect_init(clock, dev, data->interconnect_data); 442 + ret = ipa_interconnect_init(power, dev, data->interconnect_data); 438 443 if (ret) 439 444 goto err_kfree; 440 445 441 - pm_runtime_dont_use_autosuspend(dev); 446 + pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY); 447 + pm_runtime_use_autosuspend(dev); 442 448 pm_runtime_enable(dev); 443 449 444 - return clock; 450 + return power; 445 451 446 452 err_kfree: 447 - kfree(clock); 453 + kfree(power); 448 454 err_clk_put: 449 455 clk_put(clk); 450 456 451 457 return ERR_PTR(ret); 452 458 } 453 459 454 - /* Inverse of ipa_clock_init() */ 455 - void ipa_clock_exit(struct ipa_clock *clock) 460 + /* Inverse of ipa_power_init() */ 461 + void ipa_power_exit(struct ipa_power *power) 456 462 { 457 - struct clk *clk = clock->core; 463 + struct device *dev = power->dev; 464 + struct clk *clk = power->core; 458 465 459 - pm_runtime_disable(clock->dev); 460 - ipa_interconnect_exit(clock); 461 - kfree(clock); 466 + pm_runtime_disable(dev); 467 + pm_runtime_dont_use_autosuspend(dev); 468 + ipa_interconnect_exit(power); 469 + kfree(power); 462 470 clk_put(clk); 463 471 } 464 472 ··· 470 472 .resume = ipa_resume, 471 473 .runtime_suspend = ipa_runtime_suspend, 472 474 .runtime_resume = ipa_runtime_resume, 473 - .runtime_idle = ipa_runtime_idle, 474 475 };
+13 -13
drivers/net/ipa/ipa_clock.h drivers/net/ipa/ipa_power.h
··· 3 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 4 * Copyright (C) 2018-2020 Linaro Ltd. 5 5 */ 6 - #ifndef _IPA_CLOCK_H_ 7 - #define _IPA_CLOCK_H_ 6 + #ifndef _IPA_POWER_H_ 7 + #define _IPA_POWER_H_ 8 8 9 9 struct device; 10 10 11 11 struct ipa; 12 - struct ipa_clock_data; 12 + struct ipa_power_data; 13 13 14 14 /* IPA device power management function block */ 15 15 extern const struct dev_pm_ops ipa_pm_ops; 16 16 17 17 /** 18 - * ipa_clock_rate() - Return the current IPA core clock rate 18 + * ipa_core_clock_rate() - Return the current IPA core clock rate 19 19 * @ipa: IPA structure 20 20 * 21 21 * Return: The current clock rate (in Hz), or 0. 22 22 */ 23 - u32 ipa_clock_rate(struct ipa *ipa); 23 + u32 ipa_core_clock_rate(struct ipa *ipa); 24 24 25 25 /** 26 26 * ipa_power_modem_queue_stop() - Possibly stop the modem netdev TX queue ··· 55 55 void ipa_power_teardown(struct ipa *ipa); 56 56 57 57 /** 58 - * ipa_clock_init() - Initialize IPA clocking 58 + * ipa_power_init() - Initialize IPA power management 59 59 * @dev: IPA device 60 60 * @data: Clock configuration data 61 61 * 62 - * Return: A pointer to an ipa_clock structure, or a pointer-coded error 62 + * Return: A pointer to an ipa_power structure, or a pointer-coded error 63 63 */ 64 - struct ipa_clock *ipa_clock_init(struct device *dev, 65 - const struct ipa_clock_data *data); 64 + struct ipa_power *ipa_power_init(struct device *dev, 65 + const struct ipa_power_data *data); 66 66 67 67 /** 68 - * ipa_clock_exit() - Inverse of ipa_clock_init() 69 - * @clock: IPA clock pointer 68 + * ipa_power_exit() - Inverse of ipa_power_init() 69 + * @power: IPA power pointer 70 70 */ 71 - void ipa_clock_exit(struct ipa_clock *clock); 71 + void ipa_power_exit(struct ipa_power *power); 72 72 73 - #endif /* _IPA_CLOCK_H_ */ 73 + #endif /* _IPA_POWER_H_ */
+2 -2
drivers/net/ipa/ipa_data-v3.1.c
··· 513 513 }; 514 514 515 515 /* Clock and interconnect configuration data for an SoC having IPA v3.1 */ 516 - static const struct ipa_clock_data ipa_clock_data = { 516 + static const struct ipa_power_data ipa_power_data = { 517 517 .core_clock_rate = 16 * 1000 * 1000, /* Hz */ 518 518 .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), 519 519 .interconnect_data = ipa_interconnect_data, ··· 529 529 .endpoint_data = ipa_gsi_endpoint_data, 530 530 .resource_data = &ipa_resource_data, 531 531 .mem_data = &ipa_mem_data, 532 - .clock_data = &ipa_clock_data, 532 + .power_data = &ipa_power_data, 533 533 };
+2 -2
drivers/net/ipa/ipa_data-v3.5.1.c
··· 394 394 }; 395 395 396 396 /* Clock and interconnect configuration data for an SoC having IPA v3.5.1 */ 397 - static const struct ipa_clock_data ipa_clock_data = { 397 + static const struct ipa_power_data ipa_power_data = { 398 398 .core_clock_rate = 75 * 1000 * 1000, /* Hz */ 399 399 .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), 400 400 .interconnect_data = ipa_interconnect_data, ··· 414 414 .endpoint_data = ipa_gsi_endpoint_data, 415 415 .resource_data = &ipa_resource_data, 416 416 .mem_data = &ipa_mem_data, 417 - .clock_data = &ipa_clock_data, 417 + .power_data = &ipa_power_data, 418 418 };
+2 -2
drivers/net/ipa/ipa_data-v4.11.c
··· 382 382 }; 383 383 384 384 /* Clock and interconnect configuration data for an SoC having IPA v4.11 */ 385 - static const struct ipa_clock_data ipa_clock_data = { 385 + static const struct ipa_power_data ipa_power_data = { 386 386 .core_clock_rate = 60 * 1000 * 1000, /* Hz */ 387 387 .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), 388 388 .interconnect_data = ipa_interconnect_data, ··· 397 397 .endpoint_data = ipa_gsi_endpoint_data, 398 398 .resource_data = &ipa_resource_data, 399 399 .mem_data = &ipa_mem_data, 400 - .clock_data = &ipa_clock_data, 400 + .power_data = &ipa_power_data, 401 401 };
+2 -2
drivers/net/ipa/ipa_data-v4.2.c
··· 360 360 }; 361 361 362 362 /* Clock and interconnect configuration data for an SoC having IPA v4.2 */ 363 - static const struct ipa_clock_data ipa_clock_data = { 363 + static const struct ipa_power_data ipa_power_data = { 364 364 .core_clock_rate = 100 * 1000 * 1000, /* Hz */ 365 365 .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), 366 366 .interconnect_data = ipa_interconnect_data, ··· 376 376 .endpoint_data = ipa_gsi_endpoint_data, 377 377 .resource_data = &ipa_resource_data, 378 378 .mem_data = &ipa_mem_data, 379 - .clock_data = &ipa_clock_data, 379 + .power_data = &ipa_power_data, 380 380 };
+2 -2
drivers/net/ipa/ipa_data-v4.5.c
··· 443 443 }; 444 444 445 445 /* Clock and interconnect configuration data for an SoC having IPA v4.5 */ 446 - static const struct ipa_clock_data ipa_clock_data = { 446 + static const struct ipa_power_data ipa_power_data = { 447 447 .core_clock_rate = 150 * 1000 * 1000, /* Hz (150? 60?) */ 448 448 .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), 449 449 .interconnect_data = ipa_interconnect_data, ··· 458 458 .endpoint_data = ipa_gsi_endpoint_data, 459 459 .resource_data = &ipa_resource_data, 460 460 .mem_data = &ipa_mem_data, 461 - .clock_data = &ipa_clock_data, 461 + .power_data = &ipa_power_data, 462 462 };
+2 -2
drivers/net/ipa/ipa_data-v4.9.c
··· 432 432 }; 433 433 434 434 /* Clock and interconnect configuration data for an SoC having IPA v4.9 */ 435 - static const struct ipa_clock_data ipa_clock_data = { 435 + static const struct ipa_power_data ipa_power_data = { 436 436 .core_clock_rate = 60 * 1000 * 1000, /* Hz */ 437 437 .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), 438 438 .interconnect_data = ipa_interconnect_data, ··· 447 447 .endpoint_data = ipa_gsi_endpoint_data, 448 448 .resource_data = &ipa_resource_data, 449 449 .mem_data = &ipa_mem_data, 450 - .clock_data = &ipa_clock_data, 450 + .power_data = &ipa_power_data, 451 451 };
+5 -5
drivers/net/ipa/ipa_data.h
··· 19 19 * IPA and GSI resources to use for a given platform. This data is supplied 20 20 * via the Device Tree match table, associated with a particular compatible 21 21 * string. The data defines information about how resources, endpoints and 22 - * channels, memory, clocking and so on are allocated and used for the 22 + * channels, memory, power and so on are allocated and used for the 23 23 * platform. 24 24 * 25 25 * Resources are data structures used internally by the IPA hardware. The ··· 265 265 }; 266 266 267 267 /** 268 - * struct ipa_clock_data - description of IPA clock and interconnect rates 268 + * struct ipa_power_data - description of IPA power configuration data 269 269 * @core_clock_rate: Core clock rate (Hz) 270 270 * @interconnect_count: Number of entries in the interconnect_data array 271 271 * @interconnect_data: IPA interconnect configuration data 272 272 */ 273 - struct ipa_clock_data { 273 + struct ipa_power_data { 274 274 u32 core_clock_rate; 275 275 u32 interconnect_count; /* # entries in interconnect_data[] */ 276 276 const struct ipa_interconnect_data *interconnect_data; ··· 286 286 * @endpoint_data: IPA endpoint/GSI channel data 287 287 * @resource_data: IPA resource configuration data 288 288 * @mem_data: IPA memory region data 289 - * @clock_data: IPA clock and interconnect data 289 + * @power_data: IPA power data 290 290 */ 291 291 struct ipa_data { 292 292 enum ipa_version version; ··· 297 297 const struct ipa_gsi_endpoint_data *endpoint_data; 298 298 const struct ipa_resource_data *resource_data; 299 299 const struct ipa_mem_data *mem_data; 300 - const struct ipa_clock_data *clock_data; 300 + const struct ipa_power_data *power_data; 301 301 }; 302 302 303 303 extern const struct ipa_data ipa_data_v3_1;
+2 -2
drivers/net/ipa/ipa_endpoint.c
··· 21 21 #include "ipa_modem.h" 22 22 #include "ipa_table.h" 23 23 #include "ipa_gsi.h" 24 - #include "ipa_clock.h" 24 + #include "ipa_power.h" 25 25 26 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 27 ··· 810 810 return hol_block_timer_qtime_val(ipa, microseconds); 811 811 812 812 /* Use 64 bit arithmetic to avoid overflow... */ 813 - rate = ipa_clock_rate(ipa); 813 + rate = ipa_core_clock_rate(ipa); 814 814 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 815 815 /* ...but we still need to fit into a 32-bit register */ 816 816 WARN_ON(ticks > U32_MAX);
+2 -1
drivers/net/ipa/ipa_interrupt.c
··· 116 116 iowrite32(pending, ipa->reg_virt + offset); 117 117 } 118 118 out_power_put: 119 - (void)pm_runtime_put(dev); 119 + pm_runtime_mark_last_busy(dev); 120 + (void)pm_runtime_put_autosuspend(dev); 120 121 121 122 return IRQ_HANDLED; 122 123 }
+23 -22
drivers/net/ipa/ipa_main.c
··· 20 20 #include <linux/soc/qcom/mdt_loader.h> 21 21 22 22 #include "ipa.h" 23 - #include "ipa_clock.h" 23 + #include "ipa_power.h" 24 24 #include "ipa_data.h" 25 25 #include "ipa_endpoint.h" 26 26 #include "ipa_resource.h" ··· 326 326 * @ipa: IPA pointer 327 327 * 328 328 * Configures when the IPA signals it is idle to the global clock 329 - * controller, which can respond by scalling down the clock to 330 - * save power. 329 + * controller, which can respond by scaling down the clock to save 330 + * power. 331 331 */ 332 332 static void ipa_hardware_dcd_config(struct ipa *ipa) 333 333 { ··· 417 417 * @ipa: IPA pointer 418 418 * @data: IPA configuration data 419 419 * 420 - * Perform initialization requiring IPA clock to be enabled. 420 + * Perform initialization requiring IPA power to be enabled. 421 421 */ 422 422 static int ipa_config(struct ipa *ipa, const struct ipa_data *data) 423 423 { ··· 647 647 * in several stages: 648 648 * - The "init" stage involves activities that can be initialized without 649 649 * access to the IPA hardware. 650 - * - The "config" stage requires the IPA clock to be active so IPA registers 650 + * - The "config" stage requires IPA power to be active so IPA registers 651 651 * can be accessed, but does not require the use of IPA immediate commands. 652 652 * - The "setup" stage uses IPA immediate commands, and so requires the GSI 653 653 * layer to be initialized. ··· 663 663 { 664 664 struct device *dev = &pdev->dev; 665 665 const struct ipa_data *data; 666 - struct ipa_clock *clock; 666 + struct ipa_power *power; 667 667 bool modem_init; 668 668 struct ipa *ipa; 669 669 int ret; 670 670 671 671 ipa_validate_build(); 672 672 673 - /* Get configuration data early; needed for clock initialization */ 673 + /* Get configuration data early; needed for power initialization */ 674 674 data = of_device_get_match_data(dev); 675 675 if (!data) { 676 676 dev_err(dev, "matched hardware not supported\n"); ··· 691 691 /* The clock and interconnects might not be ready when we're 692 692 * probed, so might return -EPROBE_DEFER. 693 693 */ 694 - clock = ipa_clock_init(dev, data->clock_data); 695 - if (IS_ERR(clock)) 696 - return PTR_ERR(clock); 694 + power = ipa_power_init(dev, data->power_data); 695 + if (IS_ERR(power)) 696 + return PTR_ERR(power); 697 697 698 698 /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */ 699 699 ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); 700 700 if (!ipa) { 701 701 ret = -ENOMEM; 702 - goto err_clock_exit; 702 + goto err_power_exit; 703 703 } 704 704 705 705 ipa->pdev = pdev; 706 706 dev_set_drvdata(dev, ipa); 707 - ipa->clock = clock; 707 + ipa->power = power; 708 708 ipa->version = data->version; 709 709 init_completion(&ipa->completion); 710 710 ··· 737 737 if (ret) 738 738 goto err_table_exit; 739 739 740 - /* The clock needs to be active for config and setup */ 740 + /* Power needs to be active for config and setup */ 741 741 ret = pm_runtime_get_sync(dev); 742 742 if (WARN_ON(ret < 0)) 743 743 goto err_power_put; ··· 766 766 if (ret) 767 767 goto err_deconfig; 768 768 done: 769 - (void)pm_runtime_put(dev); 769 + pm_runtime_mark_last_busy(dev); 770 + (void)pm_runtime_put_autosuspend(dev); 770 771 771 772 return 0; 772 773 773 774 err_deconfig: 774 775 ipa_deconfig(ipa); 775 776 err_power_put: 776 - (void)pm_runtime_put(dev); 777 + pm_runtime_put_noidle(dev); 777 778 ipa_modem_exit(ipa); 778 779 err_table_exit: 779 780 ipa_table_exit(ipa); ··· 788 787 ipa_reg_exit(ipa); 789 788 err_kfree_ipa: 790 789 kfree(ipa); 791 - err_clock_exit: 792 - ipa_clock_exit(clock); 790 + err_power_exit: 791 + ipa_power_exit(power); 793 792 794 793 return ret; 795 794 } ··· 797 796 static int ipa_remove(struct platform_device *pdev) 798 797 { 799 798 struct ipa *ipa = dev_get_drvdata(&pdev->dev); 800 - struct ipa_clock *clock = ipa->clock; 799 + struct ipa_power *power = ipa->power; 800 + struct device *dev = &pdev->dev; 801 801 int ret; 802 802 803 - ret = pm_runtime_get_sync(&pdev->dev); 803 + ret = pm_runtime_get_sync(dev); 804 804 if (WARN_ON(ret < 0)) 805 805 goto out_power_put; 806 806 ··· 820 818 821 819 ipa_deconfig(ipa); 822 820 out_power_put: 823 - (void)pm_runtime_put(&pdev->dev); 824 - 821 + pm_runtime_put_noidle(dev); 825 822 ipa_modem_exit(ipa); 826 823 ipa_table_exit(ipa); 827 824 ipa_endpoint_exit(ipa); ··· 828 827 ipa_mem_exit(ipa); 829 828 ipa_reg_exit(ipa); 830 829 kfree(ipa); 831 - ipa_clock_exit(clock); 830 + ipa_power_exit(power); 832 831 833 832 return 0; 834 833 }
+12 -8
drivers/net/ipa/ipa_modem.c
··· 21 21 #include "ipa_smp2p.h" 22 22 #include "ipa_qmi.h" 23 23 #include "ipa_uc.h" 24 - #include "ipa_clock.h" 24 + #include "ipa_power.h" 25 25 26 26 #define IPA_NETDEV_NAME "rmnet_ipa%d" 27 27 #define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */ ··· 67 67 68 68 netif_start_queue(netdev); 69 69 70 - (void)pm_runtime_put(dev); 70 + pm_runtime_mark_last_busy(dev); 71 + (void)pm_runtime_put_autosuspend(dev); 71 72 72 73 return 0; 73 74 74 75 err_disable_tx: 75 76 ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); 76 77 err_power_put: 77 - (void)pm_runtime_put(dev); 78 + pm_runtime_put_noidle(dev); 78 79 79 80 return ret; 80 81 } ··· 98 97 ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); 99 98 ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); 100 99 out_power_put: 101 - (void)pm_runtime_put(dev); 100 + pm_runtime_mark_last_busy(dev); 101 + (void)pm_runtime_put_autosuspend(dev); 102 102 103 103 return 0; 104 104 } ··· 147 145 */ 148 146 ipa_power_modem_queue_stop(ipa); 149 147 150 - (void)pm_runtime_put(dev); 148 + pm_runtime_put_noidle(dev); 151 149 152 150 return NETDEV_TX_BUSY; 153 151 } ··· 156 154 157 155 ret = ipa_endpoint_skb_tx(endpoint, skb); 158 156 159 - (void)pm_runtime_put(dev); 157 + pm_runtime_mark_last_busy(dev); 158 + (void)pm_runtime_put_autosuspend(dev); 160 159 161 160 if (ret) { 162 161 if (ret != -E2BIG) ··· 401 398 dev_err(dev, "error %d zeroing modem memory regions\n", ret); 402 399 403 400 out_power_put: 404 - (void)pm_runtime_put(dev); 401 + pm_runtime_mark_last_busy(dev); 402 + (void)pm_runtime_put_autosuspend(dev); 405 403 } 406 404 407 405 static int ipa_modem_notify(struct notifier_block *nb, unsigned long action, ··· 415 411 switch (action) { 416 412 case QCOM_SSR_BEFORE_POWERUP: 417 413 dev_info(dev, "received modem starting event\n"); 418 - ipa_uc_clock(ipa); 414 + ipa_uc_power(ipa); 419 415 ipa_smp2p_notify_reset(ipa); 420 416 break; 421 417
+35 -31
drivers/net/ipa/ipa_smp2p.c
··· 23 23 * SMP2P is a primitive communication mechanism available between the AP and 24 24 * the modem. The IPA driver uses this for two purposes: to enable the modem 25 25 * to state that the GSI hardware is ready to use; and to communicate the 26 - * state of the IPA clock in the event of a crash. 26 + * state of IPA power in the event of a crash. 27 27 * 28 28 * GSI needs to have early initialization completed before it can be used. 29 29 * This initialization is done either by Trust Zone or by the modem. In the 30 30 * latter case, the modem uses an SMP2P interrupt to tell the AP IPA driver 31 31 * when the GSI is ready to use. 32 32 * 33 - * The modem is also able to inquire about the current state of the IPA 34 - * clock by trigging another SMP2P interrupt to the AP. We communicate 35 - * whether the clock is enabled using two SMP2P state bits--one to 36 - * indicate the clock state (on or off), and a second to indicate the 37 - * clock state bit is valid. The modem will poll the valid bit until it 38 - * is set, and at that time records whether the AP has the IPA clock enabled. 33 + * The modem is also able to inquire about the current state of IPA 34 + * power by trigging another SMP2P interrupt to the AP. We communicate 35 + * whether power is enabled using two SMP2P state bits--one to indicate 36 + * the power state (on or off), and a second to indicate the power state 37 + * bit is valid. The modem will poll the valid bit until it is set, and 38 + * at that time records whether the AP has IPA power enabled. 39 39 * 40 40 * Finally, if the AP kernel panics, we update the SMP2P state bits even if 41 41 * we never receive an interrupt from the modem requesting this. ··· 45 45 * struct ipa_smp2p - IPA SMP2P information 46 46 * @ipa: IPA pointer 47 47 * @valid_state: SMEM state indicating enabled state is valid 48 - * @enabled_state: SMEM state to indicate clock is enabled 48 + * @enabled_state: SMEM state to indicate power is enabled 49 49 * @valid_bit: Valid bit in 32-bit SMEM state mask 50 50 * @enabled_bit: Enabled bit in 32-bit SMEM state mask 51 51 * @enabled_bit: Enabled bit in 32-bit SMEM state mask 52 - * @clock_query_irq: IPA interrupt triggered by modem for clock query 52 + * @clock_query_irq: IPA interrupt triggered by modem for power query 53 53 * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready 54 - * @clock_on: Whether IPA clock is on 55 - * @notified: Whether modem has been notified of clock state 54 + * @power_on: Whether IPA power is on 55 + * @notified: Whether modem has been notified of power state 56 56 * @disabled: Whether setup ready interrupt handling is disabled 57 57 * @mutex: Mutex protecting ready-interrupt/shutdown interlock 58 58 * @panic_notifier: Panic notifier structure ··· 65 65 u32 enabled_bit; 66 66 u32 clock_query_irq; 67 67 u32 setup_ready_irq; 68 - bool clock_on; 68 + bool power_on; 69 69 bool notified; 70 70 bool disabled; 71 71 struct mutex mutex; ··· 73 73 }; 74 74 75 75 /** 76 - * ipa_smp2p_notify() - use SMP2P to tell modem about IPA clock state 76 + * ipa_smp2p_notify() - use SMP2P to tell modem about IPA power state 77 77 * @smp2p: SMP2P information 78 78 * 79 79 * This is called either when the modem has requested it (by triggering 80 - * the modem clock query IPA interrupt) or whenever the AP is shutting down 80 + * the modem power query IPA interrupt) or whenever the AP is shutting down 81 81 * (via a panic notifier). It sets the two SMP2P state bits--one saying 82 - * whether the IPA clock is running, and the other indicating the first bit 82 + * whether the IPA power is on, and the other indicating the first bit 83 83 * is valid. 84 84 */ 85 85 static void ipa_smp2p_notify(struct ipa_smp2p *smp2p) ··· 92 92 return; 93 93 94 94 dev = &smp2p->ipa->pdev->dev; 95 - smp2p->clock_on = pm_runtime_get_if_active(dev, true) > 0; 95 + smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0; 96 96 97 - /* Signal whether the clock is enabled */ 97 + /* Signal whether the IPA power is enabled */ 98 98 mask = BIT(smp2p->enabled_bit); 99 - value = smp2p->clock_on ? mask : 0; 99 + value = smp2p->power_on ? mask : 0; 100 100 qcom_smem_state_update_bits(smp2p->enabled_state, mask, value); 101 101 102 102 /* Now indicate that the enabled flag is valid */ ··· 126 126 127 127 ipa_smp2p_notify(smp2p); 128 128 129 - if (smp2p->clock_on) 129 + if (smp2p->power_on) 130 130 ipa_uc_panic_notifier(smp2p->ipa); 131 131 132 132 return NOTIFY_DONE; ··· 174 174 WARN(ret != 0, "error %d from ipa_setup()\n", ret); 175 175 176 176 out_power_put: 177 - (void)pm_runtime_put(dev); 177 + pm_runtime_mark_last_busy(dev); 178 + (void)pm_runtime_put_autosuspend(dev); 178 179 out_mutex_unlock: 179 180 mutex_unlock(&smp2p->mutex); 180 181 ··· 209 208 free_irq(irq, smp2p); 210 209 } 211 210 212 - /* Drop the clock reference if it was taken in ipa_smp2p_notify() */ 213 - static void ipa_smp2p_clock_release(struct ipa *ipa) 211 + /* Drop the power reference if it was taken in ipa_smp2p_notify() */ 212 + static void ipa_smp2p_power_release(struct ipa *ipa) 214 213 { 215 - if (!ipa->smp2p->clock_on) 214 + struct device *dev = &ipa->pdev->dev; 215 + 216 + if (!ipa->smp2p->power_on) 216 217 return; 217 218 218 - (void)pm_runtime_put(&ipa->pdev->dev); 219 - ipa->smp2p->clock_on = false; 219 + pm_runtime_mark_last_busy(dev); 220 + (void)pm_runtime_put_autosuspend(dev); 221 + ipa->smp2p->power_on = false; 220 222 } 221 223 222 224 /* Initialize the IPA SMP2P subsystem */ ··· 253 249 254 250 smp2p->ipa = ipa; 255 251 256 - /* These fields are needed by the clock query interrupt 252 + /* These fields are needed by the power query interrupt 257 253 * handler, so initialize them now. 258 254 */ 259 255 mutex_init(&smp2p->mutex); ··· 306 302 ipa_smp2p_irq_exit(smp2p, smp2p->setup_ready_irq); 307 303 ipa_smp2p_panic_notifier_unregister(smp2p); 308 304 ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq); 309 - /* We won't get notified any more; drop clock reference (if any) */ 310 - ipa_smp2p_clock_release(ipa); 305 + /* We won't get notified any more; drop power reference (if any) */ 306 + ipa_smp2p_power_release(ipa); 311 307 ipa->smp2p = NULL; 312 308 mutex_destroy(&smp2p->mutex); 313 309 kfree(smp2p); ··· 336 332 if (!smp2p->notified) 337 333 return; 338 334 339 - ipa_smp2p_clock_release(ipa); 335 + ipa_smp2p_power_release(ipa); 340 336 341 - /* Reset the clock enabled valid flag */ 337 + /* Reset the power enabled valid flag */ 342 338 mask = BIT(smp2p->valid_bit); 343 339 qcom_smem_state_update_bits(smp2p->valid_state, mask, 0); 344 340 345 - /* Mark the clock disabled for good measure... */ 341 + /* Mark the power disabled for good measure... */ 346 342 mask = BIT(smp2p->enabled_bit); 347 343 qcom_smem_state_update_bits(smp2p->enabled_state, mask, 0); 348 344
+1 -1
drivers/net/ipa/ipa_smp2p.h
··· 39 39 * ipa_smp2p_notify_reset() - Reset modem notification state 40 40 * @ipa: IPA pointer 41 41 * 42 - * If the modem crashes it queries the IPA clock state. In cleaning 42 + * If the modem crashes it queries the IPA power state. In cleaning 43 43 * up after such a crash this is used to reset some state maintained 44 44 * for managing this notification. 45 45 */
+16 -10
drivers/net/ipa/ipa_uc.c
··· 147 147 * should only receive responses from the microcontroller when it has 148 148 * sent it a request message. 149 149 * 150 - * We can drop the clock reference taken in ipa_uc_clock() once we 150 + * We can drop the power reference taken in ipa_uc_power() once we 151 151 * know the microcontroller has finished its initialization. 152 152 */ 153 153 switch (shared->response) { 154 154 case IPA_UC_RESPONSE_INIT_COMPLETED: 155 - if (ipa->uc_clocked) { 155 + if (ipa->uc_powered) { 156 156 ipa->uc_loaded = true; 157 - (void)pm_runtime_put(dev); 158 - ipa->uc_clocked = false; 157 + pm_runtime_mark_last_busy(dev); 158 + (void)pm_runtime_put_autosuspend(dev); 159 + ipa->uc_powered = false; 159 160 } else { 160 161 dev_warn(dev, "unexpected init_completed response\n"); 161 162 } ··· 171 170 /* Configure the IPA microcontroller subsystem */ 172 171 void ipa_uc_config(struct ipa *ipa) 173 172 { 174 - ipa->uc_clocked = false; 173 + ipa->uc_powered = false; 175 174 ipa->uc_loaded = false; 176 175 ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler); 177 176 ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr); ··· 180 179 /* Inverse of ipa_uc_config() */ 181 180 void ipa_uc_deconfig(struct ipa *ipa) 182 181 { 182 + struct device *dev = &ipa->pdev->dev; 183 + 183 184 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1); 184 185 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0); 185 - if (ipa->uc_clocked) 186 - (void)pm_runtime_put(&ipa->pdev->dev); 186 + if (!ipa->uc_powered) 187 + return; 188 + 189 + pm_runtime_mark_last_busy(dev); 190 + (void)pm_runtime_put_autosuspend(dev); 187 191 } 188 192 189 - /* Take a proxy clock reference for the microcontroller */ 190 - void ipa_uc_clock(struct ipa *ipa) 193 + /* Take a proxy power reference for the microcontroller */ 194 + void ipa_uc_power(struct ipa *ipa) 191 195 { 192 196 static bool already; 193 197 struct device *dev; ··· 209 203 pm_runtime_put_noidle(dev); 210 204 dev_err(dev, "error %d getting proxy power\n", ret); 211 205 } else { 212 - ipa->uc_clocked = true; 206 + ipa->uc_powered = true; 213 207 } 214 208 } 215 209
+5 -5
drivers/net/ipa/ipa_uc.h
··· 21 21 void ipa_uc_deconfig(struct ipa *ipa); 22 22 23 23 /** 24 - * ipa_uc_clock() - Take a proxy clock reference for the microcontroller 24 + * ipa_uc_power() - Take a proxy power reference for the microcontroller 25 25 * @ipa: IPA pointer 26 26 * 27 27 * The first time the modem boots, it loads firmware for and starts the 28 28 * IPA-resident microcontroller. The microcontroller signals that it 29 29 * has completed its initialization by sending an INIT_COMPLETED response 30 - * message to the AP. The AP must ensure the IPA core clock is operating 31 - * until it receives this message, and to do so we take a "proxy" clock 30 + * message to the AP. The AP must ensure the IPA is powered until 31 + * it receives this message, and to do so we take a "proxy" clock 32 32 * reference on its behalf here. Once we receive the INIT_COMPLETED 33 - * message (in ipa_uc_response_hdlr()) we drop this clock reference. 33 + * message (in ipa_uc_response_hdlr()) we drop this power reference. 34 34 */ 35 - void ipa_uc_clock(struct ipa *ipa); 35 + void ipa_uc_power(struct ipa *ipa); 36 36 37 37 /** 38 38 * ipa_uc_panic_notifier()