Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7#ifndef INTEL_WAKEREF_H
8#define INTEL_WAKEREF_H
9
10#include <linux/atomic.h>
11#include <linux/bits.h>
12#include <linux/lockdep.h>
13#include <linux/mutex.h>
14#include <linux/refcount.h>
15#include <linux/stackdepot.h>
16#include <linux/timer.h>
17#include <linux/workqueue.h>
18
19#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
20#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
21#else
22#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
23#endif
24
25struct intel_runtime_pm;
26struct intel_wakeref;
27
28typedef depot_stack_handle_t intel_wakeref_t;
29
30struct intel_wakeref_ops {
31 int (*get)(struct intel_wakeref *wf);
32 int (*put)(struct intel_wakeref *wf);
33};
34
35struct intel_wakeref {
36 atomic_t count;
37 struct mutex mutex;
38
39 intel_wakeref_t wakeref;
40
41 struct intel_runtime_pm *rpm;
42 const struct intel_wakeref_ops *ops;
43
44 struct work_struct work;
45};
46
47struct intel_wakeref_lockclass {
48 struct lock_class_key mutex;
49 struct lock_class_key work;
50};
51
52void __intel_wakeref_init(struct intel_wakeref *wf,
53 struct intel_runtime_pm *rpm,
54 const struct intel_wakeref_ops *ops,
55 struct intel_wakeref_lockclass *key);
56#define intel_wakeref_init(wf, rpm, ops) do { \
57 static struct intel_wakeref_lockclass __key; \
58 \
59 __intel_wakeref_init((wf), (rpm), (ops), &__key); \
60} while (0)
61
62int __intel_wakeref_get_first(struct intel_wakeref *wf);
63void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
64
65/**
66 * intel_wakeref_get: Acquire the wakeref
67 * @wf: the wakeref
68 *
69 * Acquire a hold on the wakeref. The first user to do so, will acquire
70 * the runtime pm wakeref and then call the @fn underneath the wakeref
71 * mutex.
72 *
73 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
74 * will be released and the acquisition unwound, and an error reported.
75 *
76 * Returns: 0 if the wakeref was acquired successfully, or a negative error
77 * code otherwise.
78 */
79static inline int
80intel_wakeref_get(struct intel_wakeref *wf)
81{
82 might_sleep();
83 if (unlikely(!atomic_inc_not_zero(&wf->count)))
84 return __intel_wakeref_get_first(wf);
85
86 return 0;
87}
88
89/**
90 * __intel_wakeref_get: Acquire the wakeref, again
91 * @wf: the wakeref
92 *
93 * Increment the wakeref counter, only valid if it is already held by
94 * the caller.
95 *
96 * See intel_wakeref_get().
97 */
98static inline void
99__intel_wakeref_get(struct intel_wakeref *wf)
100{
101 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
102 atomic_inc(&wf->count);
103}
104
105/**
106 * intel_wakeref_get_if_in_use: Acquire the wakeref
107 * @wf: the wakeref
108 *
109 * Acquire a hold on the wakeref, but only if the wakeref is already
110 * active.
111 *
112 * Returns: true if the wakeref was acquired, false otherwise.
113 */
114static inline bool
115intel_wakeref_get_if_active(struct intel_wakeref *wf)
116{
117 return atomic_inc_not_zero(&wf->count);
118}
119
120/**
121 * intel_wakeref_put_flags: Release the wakeref
122 * @wf: the wakeref
123 * @flags: control flags
124 *
125 * Release our hold on the wakeref. When there are no more users,
126 * the runtime pm wakeref will be released after the @fn callback is called
127 * underneath the wakeref mutex.
128 *
129 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
130 * is retained and an error reported.
131 *
132 * Returns: 0 if the wakeref was released successfully, or a negative error
133 * code otherwise.
134 */
135static inline void
136__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
137#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
138{
139 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
140 if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
141 __intel_wakeref_put_last(wf, flags);
142}
143
144static inline void
145intel_wakeref_put(struct intel_wakeref *wf)
146{
147 might_sleep();
148 __intel_wakeref_put(wf, 0);
149}
150
151static inline void
152intel_wakeref_put_async(struct intel_wakeref *wf)
153{
154 __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
155}
156
157/**
158 * intel_wakeref_lock: Lock the wakeref (mutex)
159 * @wf: the wakeref
160 *
161 * Locks the wakeref to prevent it being acquired or released. New users
162 * can still adjust the counter, but the wakeref itself (and callback)
163 * cannot be acquired or released.
164 */
165static inline void
166intel_wakeref_lock(struct intel_wakeref *wf)
167 __acquires(wf->mutex)
168{
169 mutex_lock(&wf->mutex);
170}
171
172/**
173 * intel_wakeref_unlock: Unlock the wakeref
174 * @wf: the wakeref
175 *
176 * Releases a previously acquired intel_wakeref_lock().
177 */
178static inline void
179intel_wakeref_unlock(struct intel_wakeref *wf)
180 __releases(wf->mutex)
181{
182 mutex_unlock(&wf->mutex);
183}
184
185/**
186 * intel_wakeref_unlock_wait: Wait until the active callback is complete
187 * @wf: the wakeref
188 *
189 * Waits for the active callback (under the @wf->mutex or another CPU) is
190 * complete.
191 */
192static inline void
193intel_wakeref_unlock_wait(struct intel_wakeref *wf)
194{
195 mutex_lock(&wf->mutex);
196 mutex_unlock(&wf->mutex);
197 flush_work(&wf->work);
198}
199
200/**
201 * intel_wakeref_is_active: Query whether the wakeref is currently held
202 * @wf: the wakeref
203 *
204 * Returns: true if the wakeref is currently held.
205 */
206static inline bool
207intel_wakeref_is_active(const struct intel_wakeref *wf)
208{
209 return READ_ONCE(wf->wakeref);
210}
211
212/**
213 * __intel_wakeref_defer_park: Defer the current park callback
214 * @wf: the wakeref
215 */
216static inline void
217__intel_wakeref_defer_park(struct intel_wakeref *wf)
218{
219 lockdep_assert_held(&wf->mutex);
220 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
221 atomic_set_release(&wf->count, 1);
222}
223
224/**
225 * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
226 * @wf: the wakeref
227 *
228 * Wait for the earlier asynchronous release of the wakeref. Note
229 * this will wait for any third party as well, so make sure you only wait
230 * when you have control over the wakeref and trust no one else is acquiring
231 * it.
232 *
233 * Return: 0 on success, error code if killed.
234 */
235int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
236
237struct intel_wakeref_auto {
238 struct intel_runtime_pm *rpm;
239 struct timer_list timer;
240 intel_wakeref_t wakeref;
241 spinlock_t lock;
242 refcount_t count;
243};
244
245/**
246 * intel_wakeref_auto: Delay the runtime-pm autosuspend
247 * @wf: the wakeref
248 * @timeout: relative timeout in jiffies
249 *
250 * The runtime-pm core uses a suspend delay after the last wakeref
251 * is released before triggering runtime suspend of the device. That
252 * delay is configurable via sysfs with little regard to the device
253 * characteristics. Instead, we want to tune the autosuspend based on our
254 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
255 * timeout.
256 *
257 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
258 * suspend immediately.
259 */
260void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
261
262void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
263 struct intel_runtime_pm *rpm);
264void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
265
266#endif /* INTEL_WAKEREF_H */