Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-ipa-wake-up-system-on-RX-available'

Alex Elder says:

====================
net: ipa: wake up system on RX available

This series arranges for the IPA driver to wake up a suspended
system if the IPA hardware has a packet to deliver to the AP.

Version 2 replaced the first patch from version 1 with three
patches, in response to David Miller's feedback. And based on
Bjorn Andersson's feedback on version 2, this version reworks
the tracking of IPA clock references. As a result, we no
longer need a flag to determine whether a "don't' suspend" clock
reference is held (though an bit in a bitmask is still used for
a different purpose).

In summary:
- A refcount_t is used to track IPA clock references where an
atomic_t was previously used. (This may go away soon as well,
with upcoming work to implement runtime PM.)
- We no longer track whether a special reference has been taken
to avoid suspending IPA.
- A bit in a bitmask is used to ensure we only trigger a system
resume once per system suspend.
And from the original series:
- Suspending endpoints only occurs when suspending the driver,
not when dropping the last clock reference. Resuming
endpoints is also disconnected from starting the clock.
- The IPA SUSPEND interrupt is now a wakeup interrupt. If it
fires, it schedules a system resume operation.
- The GSI interrupt is no longer a wakeup interrupt.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+74 -66
+4 -13
drivers/net/ipa/gsi.c
··· 1987 1987 } 1988 1988 gsi->irq = irq; 1989 1989 1990 - ret = enable_irq_wake(gsi->irq); 1991 - if (ret) 1992 - dev_warn(dev, "error %d enabling gsi wake irq\n", ret); 1993 - gsi->irq_wake_enabled = !ret; 1994 - 1995 1990 /* Get GSI memory range and map it */ 1996 1991 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 1997 1992 if (!res) { 1998 1993 dev_err(dev, "DT error getting \"gsi\" memory property\n"); 1999 1994 ret = -ENODEV; 2000 - goto err_disable_irq_wake; 1995 + goto err_free_irq; 2001 1996 } 2002 1997 2003 1998 size = resource_size(res); 2004 1999 if (res->start > U32_MAX || size > U32_MAX - res->start) { 2005 2000 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); 2006 2001 ret = -EINVAL; 2007 - goto err_disable_irq_wake; 2002 + goto err_free_irq; 2008 2003 } 2009 2004 2010 2005 gsi->virt = ioremap(res->start, size); 2011 2006 if (!gsi->virt) { 2012 2007 dev_err(dev, "unable to remap \"gsi\" memory\n"); 2013 2008 ret = -ENOMEM; 2014 - goto err_disable_irq_wake; 2009 + goto err_free_irq; 2015 2010 } 2016 2011 2017 2012 ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc); ··· 2020 2025 2021 2026 err_iounmap: 2022 2027 iounmap(gsi->virt); 2023 - err_disable_irq_wake: 2024 - if (gsi->irq_wake_enabled) 2025 - (void)disable_irq_wake(gsi->irq); 2028 + err_free_irq: 2026 2029 free_irq(gsi->irq, gsi); 2027 2030 2028 2031 return ret; ··· 2031 2038 { 2032 2039 mutex_destroy(&gsi->mutex); 2033 2040 gsi_channel_exit(gsi); 2034 - if (gsi->irq_wake_enabled) 2035 - (void)disable_irq_wake(gsi->irq); 2036 2041 free_irq(gsi->irq, gsi); 2037 2042 iounmap(gsi->virt); 2038 2043 }
-1
drivers/net/ipa/gsi.h
··· 150 150 struct net_device dummy_dev; /* needed for NAPI */ 151 151 void __iomem *virt; 152 152 u32 irq; 153 - bool irq_wake_enabled; 154 153 u32 channel_count; 155 154 u32 evt_ring_count; 156 155 struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
+12 -4
drivers/net/ipa/ipa.h
··· 28 28 struct ipa_interrupt; 29 29 30 30 /** 31 + * enum ipa_flag - IPA state flags 32 + * @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled 33 + * @IPA_FLAG_COUNT: Number of defined IPA flags 34 + */ 35 + enum ipa_flag { 36 + IPA_FLAG_RESUMED, 37 + IPA_FLAG_COUNT, /* Last; not a flag */ 38 + }; 39 + 40 + /** 31 41 * struct ipa - IPA information 32 42 * @gsi: Embedded GSI structure 43 + * @flags: Boolean state flags 33 44 * @version: IPA hardware version 34 45 * @pdev: Platform device 35 46 * @modem_rproc: Remoteproc handle for modem subsystem 36 47 * @smp2p: SMP2P information 37 48 * @clock: IPA clocking information 38 - * @suspend_ref: Whether clock reference preventing suspend taken 39 49 * @table_addr: DMA address of filter/route table content 40 50 * @table_virt: Virtual address of filter/route table content 41 51 * @interrupt: IPA Interrupt information ··· 80 70 */ 81 71 struct ipa { 82 72 struct gsi gsi; 73 + DECLARE_BITMAP(flags, IPA_FLAG_COUNT); 83 74 enum ipa_version version; 84 75 struct platform_device *pdev; 85 76 struct rproc *modem_rproc; ··· 88 77 void *notifier; 89 78 struct ipa_smp2p *smp2p; 90 79 struct ipa_clock *clock; 91 - atomic_t suspend_ref; 92 80 93 81 dma_addr_t table_addr; 94 82 __le64 *table_virt; ··· 113 103 dma_addr_t zero_addr; 114 104 void *zero_virt; 115 105 size_t zero_size; 116 - 117 - struct wakeup_source *wakeup_source; 118 106 119 107 /* Bit masks indicating endpoint state */ 120 108 u32 available; /* supported by hardware */
+11 -17
drivers/net/ipa/ipa_clock.c
··· 4 4 * Copyright (C) 2018-2020 Linaro Ltd. 5 5 */ 6 6 7 - #include <linux/atomic.h> 7 + #include <linux/refcount.h> 8 8 #include <linux/mutex.h> 9 9 #include <linux/clk.h> 10 10 #include <linux/device.h> ··· 51 51 * @config_path: Configuration space interconnect 52 52 */ 53 53 struct ipa_clock { 54 - atomic_t count; 54 + refcount_t count; 55 55 struct mutex mutex; /* protects clock enable/disable */ 56 56 struct clk *core; 57 57 struct icc_path *memory_path; ··· 195 195 */ 196 196 bool ipa_clock_get_additional(struct ipa *ipa) 197 197 { 198 - return !!atomic_inc_not_zero(&ipa->clock->count); 198 + return refcount_inc_not_zero(&ipa->clock->count); 199 199 } 200 200 201 201 /* Get an IPA clock reference. If the reference count is non-zero, it is 202 202 * incremented and return is immediate. Otherwise it is checked again 203 - * under protection of the mutex, and if appropriate the clock (and 204 - * interconnects) are enabled suspended endpoints (if any) are resumed 205 - * before returning. 203 + * under protection of the mutex, and if appropriate the IPA clock 204 + * is enabled. 206 205 * 207 206 * Incrementing the reference count is intentionally deferred until 208 207 * after the clock is running and endpoints are resumed. ··· 228 229 goto out_mutex_unlock; 229 230 } 230 231 231 - ipa_endpoint_resume(ipa); 232 - 233 - atomic_inc(&clock->count); 232 + refcount_set(&clock->count, 1); 234 233 235 234 out_mutex_unlock: 236 235 mutex_unlock(&clock->mutex); 237 236 } 238 237 239 - /* Attempt to remove an IPA clock reference. If this represents the last 240 - * reference, suspend endpoints and disable the clock (and interconnects) 241 - * under protection of a mutex. 238 + /* Attempt to remove an IPA clock reference. If this represents the 239 + * last reference, disable the IPA clock under protection of the mutex. 242 240 */ 243 241 void ipa_clock_put(struct ipa *ipa) 244 242 { 245 243 struct ipa_clock *clock = ipa->clock; 246 244 247 245 /* If this is not the last reference there's nothing more to do */ 248 - if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex)) 246 + if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex)) 249 247 return; 250 - 251 - ipa_endpoint_suspend(ipa); 252 248 253 249 ipa_clock_disable(ipa); 254 250 ··· 288 294 goto err_kfree; 289 295 290 296 mutex_init(&clock->mutex); 291 - atomic_set(&clock->count, 0); 297 + refcount_set(&clock->count, 0); 292 298 293 299 return clock; 294 300 ··· 305 311 { 306 312 struct clk *clk = clock->core; 307 313 308 - WARN_ON(atomic_read(&clock->count) != 0); 314 + WARN_ON(refcount_read(&clock->count) != 0); 309 315 mutex_destroy(&clock->mutex); 310 316 ipa_interconnect_exit(clock); 311 317 kfree(clock);
+14
drivers/net/ipa/ipa_interrupt.c
··· 237 237 goto err_kfree; 238 238 } 239 239 240 + ret = enable_irq_wake(irq); 241 + if (ret) { 242 + dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret); 243 + goto err_free_irq; 244 + } 245 + 240 246 return interrupt; 241 247 248 + err_free_irq: 249 + free_irq(interrupt->irq, interrupt); 242 250 err_kfree: 243 251 kfree(interrupt); 244 252 ··· 256 248 /* Tear down the IPA interrupt framework */ 257 249 void ipa_interrupt_teardown(struct ipa_interrupt *interrupt) 258 250 { 251 + struct device *dev = &interrupt->ipa->pdev->dev; 252 + int ret; 253 + 254 + ret = disable_irq_wake(interrupt->irq); 255 + if (ret) 256 + dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret); 259 257 free_irq(interrupt->irq, interrupt); 260 258 kfree(interrupt); 261 259 }
+33 -31
drivers/net/ipa/ipa_main.c
··· 75 75 * @ipa: IPA pointer 76 76 * @irq_id: IPA interrupt type (unused) 77 77 * 78 - * When in suspended state, the IPA can trigger a resume by sending a SUSPEND 79 - * IPA interrupt. 78 + * If an RX endpoint is in suspend state, and the IPA has a packet 79 + * destined for that endpoint, the IPA generates a SUSPEND interrupt 80 + * to inform the AP that it should resume the endpoint. If we get 81 + * one of these interrupts we just resume everything. 80 82 */ 81 83 static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) 82 84 { 83 - /* Take a a single clock reference to prevent suspend. All 84 - * endpoints will be resumed as a result. This reference will 85 - * be dropped when we get a power management suspend request. 85 + /* Just report the event, and let system resume handle the rest. 86 + * More than one endpoint could signal this; if so, ignore 87 + * all but the first. 86 88 */ 87 - if (!atomic_xchg(&ipa->suspend_ref, 1)) 88 - ipa_clock_get(ipa); 89 + if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags)) 90 + pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); 89 91 90 92 /* Acknowledge/clear the suspend interrupt on all endpoints */ 91 93 ipa_interrupt_suspend_clear_all(ipa->interrupt); ··· 108 106 { 109 107 struct ipa_endpoint *exception_endpoint; 110 108 struct ipa_endpoint *command_endpoint; 109 + struct device *dev = &ipa->pdev->dev; 111 110 int ret; 112 111 113 112 /* Setup for IPA v3.5.1 has some slight differences */ ··· 125 122 ipa_suspend_handler); 126 123 127 124 ipa_uc_setup(ipa); 125 + 126 + ret = device_init_wakeup(dev, true); 127 + if (ret) 128 + goto err_uc_teardown; 128 129 129 130 ipa_endpoint_setup(ipa); 130 131 ··· 165 158 166 159 ipa->setup_complete = true; 167 160 168 - dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n"); 161 + dev_info(dev, "IPA driver setup completed successfully\n"); 169 162 170 163 return 0; 171 164 ··· 180 173 ipa_endpoint_disable_one(command_endpoint); 181 174 err_endpoint_teardown: 182 175 ipa_endpoint_teardown(ipa); 176 + (void)device_init_wakeup(dev, false); 177 + err_uc_teardown: 183 178 ipa_uc_teardown(ipa); 184 179 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); 185 180 ipa_interrupt_teardown(ipa->interrupt); ··· 209 200 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 210 201 ipa_endpoint_disable_one(command_endpoint); 211 202 ipa_endpoint_teardown(ipa); 203 + (void)device_init_wakeup(&ipa->pdev->dev, false); 212 204 ipa_uc_teardown(ipa); 213 205 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); 214 206 ipa_interrupt_teardown(ipa->interrupt); ··· 518 508 * is held after initialization completes, and won't get dropped 519 509 * unless/until a system suspend request arrives. 520 510 */ 521 - atomic_set(&ipa->suspend_ref, 1); 522 511 ipa_clock_get(ipa); 523 512 524 513 ipa_hardware_config(ipa); ··· 553 544 err_hardware_deconfig: 554 545 ipa_hardware_deconfig(ipa); 555 546 ipa_clock_put(ipa); 556 - atomic_set(&ipa->suspend_ref, 0); 557 547 558 548 return ret; 559 549 } ··· 570 562 ipa_endpoint_deconfig(ipa); 571 563 ipa_hardware_deconfig(ipa); 572 564 ipa_clock_put(ipa); 573 - atomic_set(&ipa->suspend_ref, 0); 574 565 } 575 566 576 567 static int ipa_firmware_load(struct device *dev) ··· 716 709 */ 717 710 static int ipa_probe(struct platform_device *pdev) 718 711 { 719 - struct wakeup_source *wakeup_source; 720 712 struct device *dev = &pdev->dev; 721 713 const struct ipa_data *data; 722 714 struct ipa_clock *clock; ··· 764 758 goto err_clock_exit; 765 759 } 766 760 767 - /* Create a wakeup source. */ 768 - wakeup_source = wakeup_source_register(dev, "ipa"); 769 - if (!wakeup_source) { 770 - /* The most likely reason for failure is memory exhaustion */ 771 - ret = -ENOMEM; 772 - goto err_clock_exit; 773 - } 774 - 775 761 /* Allocate and initialize the IPA structure */ 776 762 ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); 777 763 if (!ipa) { 778 764 ret = -ENOMEM; 779 - goto err_wakeup_source_unregister; 765 + goto err_clock_exit; 780 766 } 781 767 782 768 ipa->pdev = pdev; 783 769 dev_set_drvdata(dev, ipa); 784 770 ipa->modem_rproc = rproc; 785 771 ipa->clock = clock; 786 - atomic_set(&ipa->suspend_ref, 0); 787 - ipa->wakeup_source = wakeup_source; 788 772 ipa->version = data->version; 789 773 790 774 ret = ipa_reg_init(ipa); ··· 853 857 ipa_reg_exit(ipa); 854 858 err_kfree_ipa: 855 859 kfree(ipa); 856 - err_wakeup_source_unregister: 857 - wakeup_source_unregister(wakeup_source); 858 860 err_clock_exit: 859 861 ipa_clock_exit(clock); 860 862 err_rproc_put: ··· 866 872 struct ipa *ipa = dev_get_drvdata(&pdev->dev); 867 873 struct rproc *rproc = ipa->modem_rproc; 868 874 struct ipa_clock *clock = ipa->clock; 869 - struct wakeup_source *wakeup_source; 870 875 int ret; 871 - 872 - wakeup_source = ipa->wakeup_source; 873 876 874 877 if (ipa->setup_complete) { 875 878 ret = ipa_modem_stop(ipa); ··· 884 893 ipa_mem_exit(ipa); 885 894 ipa_reg_exit(ipa); 886 895 kfree(ipa); 887 - wakeup_source_unregister(wakeup_source); 888 896 ipa_clock_exit(clock); 889 897 rproc_put(rproc); 890 898 ··· 897 907 * Return: Always returns zero 898 908 * 899 909 * Called by the PM framework when a system suspend operation is invoked. 910 + * Suspends endpoints and releases the clock reference held to keep 911 + * the IPA clock running until this point. 900 912 */ 901 913 static int ipa_suspend(struct device *dev) 902 914 { 903 915 struct ipa *ipa = dev_get_drvdata(dev); 904 916 917 + /* When a suspended RX endpoint has a packet ready to receive, we 918 + * get an IPA SUSPEND interrupt. We trigger a system resume in 919 + * that case, but only on the first such interrupt since suspend. 920 + */ 921 + __clear_bit(IPA_FLAG_RESUMED, ipa->flags); 922 + 923 + ipa_endpoint_suspend(ipa); 924 + 905 925 ipa_clock_put(ipa); 906 - atomic_set(&ipa->suspend_ref, 0); 907 926 908 927 return 0; 909 928 } ··· 924 925 * Return: Always returns 0 925 926 * 926 927 * Called by the PM framework when a system resume operation is invoked. 928 + * Takes an IPA clock reference to keep the clock running until suspend, 929 + * and resumes endpoints. 927 930 */ 928 931 static int ipa_resume(struct device *dev) 929 932 { ··· 934 933 /* This clock reference will keep the IPA out of suspend 935 934 * until we get a power management suspend request. 936 935 */ 937 - atomic_set(&ipa->suspend_ref, 1); 938 936 ipa_clock_get(ipa); 937 + 938 + ipa_endpoint_resume(ipa); 939 939 940 940 return 0; 941 941 }