Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2// Copyright(c) 2020 Intel Corporation.
3
4#include <linux/bits.h>
5#include <linux/delay.h>
6#include <linux/device.h>
7#include <linux/errno.h>
8#include <linux/iopoll.h>
9#include <linux/module.h>
10#include <linux/regmap.h>
11#include <linux/soundwire/sdw.h>
12#include <linux/soundwire/sdw_registers.h>
13#include <sound/sdca_function.h>
14#include "internal.h"
15
16struct regmap_mbq_context {
17 struct device *dev;
18 struct sdw_slave *sdw;
19
20 bool (*readable_reg)(struct device *dev, unsigned int reg);
21
22 struct regmap_sdw_mbq_cfg cfg;
23
24 int val_size;
25};
26
27static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
28{
29 int size = ctx->val_size;
30
31 if (ctx->cfg.mbq_size) {
32 size = ctx->cfg.mbq_size(ctx->dev, reg);
33 if (!size || size > ctx->val_size)
34 return -EINVAL;
35 }
36
37 return size;
38}
39
40static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
41{
42 if (ctx->cfg.deferrable)
43 return ctx->cfg.deferrable(ctx->dev, reg);
44
45 return false;
46}
47
48static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
49 struct regmap_mbq_context *ctx)
50{
51 struct device *dev = ctx->dev;
52 int val, ret = 0;
53
54 dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
55
56 reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
57 SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
58
59 if (ctx->readable_reg(dev, reg)) {
60 ret = read_poll_timeout(sdw_read_no_pm, val,
61 val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
62 ctx->cfg.timeout_us, ctx->cfg.retry_us,
63 false, slave, reg);
64 if (val < 0)
65 return val;
66 if (ret)
67 dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
68 } else {
69 fsleep(ctx->cfg.timeout_us);
70 }
71
72 return ret;
73}
74
75static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
76 unsigned int reg, unsigned int val,
77 int mbq_size, bool deferrable)
78{
79 int shift = mbq_size * BITS_PER_BYTE;
80 int ret;
81
82 while (--mbq_size > 0) {
83 shift -= BITS_PER_BYTE;
84
85 ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
86 (val >> shift) & 0xff);
87 if (ret < 0)
88 return ret;
89 }
90
91 ret = sdw_write_no_pm(slave, reg, val & 0xff);
92 if (deferrable && ret == -ENODATA)
93 return -EAGAIN;
94
95 return ret;
96}
97
98static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
99{
100 struct regmap_mbq_context *ctx = context;
101 struct sdw_slave *slave = ctx->sdw;
102 bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
103 int mbq_size = regmap_sdw_mbq_size(ctx, reg);
104 int ret;
105
106 if (mbq_size < 0)
107 return mbq_size;
108
109 /*
110 * Technically the spec does allow a device to set itself to busy for
111 * internal reasons, but since it doesn't provide any information on
112 * how to handle timeouts in that case, for now the code will only
113 * process a single wait/timeout on function busy and a single retry
114 * of the transaction.
115 */
116 ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
117 if (ret == -EAGAIN) {
118 ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
119 if (ret)
120 return ret;
121
122 ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
123 }
124
125 return ret;
126}
127
128static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
129 unsigned int reg, unsigned int *val,
130 int mbq_size, bool deferrable)
131{
132 int shift = BITS_PER_BYTE;
133 int read;
134
135 read = sdw_read_no_pm(slave, reg);
136 if (read < 0) {
137 if (deferrable && read == -ENODATA)
138 return -EAGAIN;
139
140 return read;
141 }
142
143 *val = read;
144
145 while (--mbq_size > 0) {
146 read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
147 if (read < 0)
148 return read;
149
150 *val |= read << shift;
151 shift += BITS_PER_BYTE;
152 }
153
154 return 0;
155}
156
157static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
158{
159 struct regmap_mbq_context *ctx = context;
160 struct sdw_slave *slave = ctx->sdw;
161 bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
162 int mbq_size = regmap_sdw_mbq_size(ctx, reg);
163 int ret;
164
165 if (mbq_size < 0)
166 return mbq_size;
167
168 /*
169 * Technically the spec does allow a device to set itself to busy for
170 * internal reasons, but since it doesn't provide any information on
171 * how to handle timeouts in that case, for now the code will only
172 * process a single wait/timeout on function busy and a single retry
173 * of the transaction.
174 */
175 ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
176 if (ret == -EAGAIN) {
177 ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
178 if (ret)
179 return ret;
180
181 ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
182 }
183
184 return ret;
185}
186
187static const struct regmap_bus regmap_sdw_mbq = {
188 .reg_read = regmap_sdw_mbq_read,
189 .reg_write = regmap_sdw_mbq_write,
190 .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
191 .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
192};
193
194static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
195{
196 if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
197 return -ENOTSUPP;
198
199 /* Registers are 32 bits wide */
200 if (config->reg_bits != 32)
201 return -ENOTSUPP;
202
203 if (config->pad_bits != 0)
204 return -ENOTSUPP;
205
206 return 0;
207}
208
209static struct regmap_mbq_context *
210regmap_sdw_mbq_gen_context(struct device *dev,
211 struct sdw_slave *sdw,
212 const struct regmap_config *config,
213 const struct regmap_sdw_mbq_cfg *mbq_config)
214{
215 struct regmap_mbq_context *ctx;
216
217 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
218 if (!ctx)
219 return ERR_PTR(-ENOMEM);
220
221 ctx->dev = dev;
222 ctx->sdw = sdw;
223
224 if (mbq_config)
225 ctx->cfg = *mbq_config;
226
227 ctx->val_size = config->val_bits / BITS_PER_BYTE;
228 ctx->readable_reg = config->readable_reg;
229
230 return ctx;
231}
232
233struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
234 const struct regmap_config *config,
235 const struct regmap_sdw_mbq_cfg *mbq_config,
236 struct lock_class_key *lock_key,
237 const char *lock_name)
238{
239 struct regmap_mbq_context *ctx;
240 int ret;
241
242 ret = regmap_sdw_mbq_config_check(config);
243 if (ret)
244 return ERR_PTR(ret);
245
246 ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
247 if (IS_ERR(ctx))
248 return ERR_CAST(ctx);
249
250 return __regmap_init(dev, ®map_sdw_mbq, ctx,
251 config, lock_key, lock_name);
252}
253EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
254
255struct regmap *__devm_regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
256 const struct regmap_config *config,
257 const struct regmap_sdw_mbq_cfg *mbq_config,
258 struct lock_class_key *lock_key,
259 const char *lock_name)
260{
261 struct regmap_mbq_context *ctx;
262 int ret;
263
264 ret = regmap_sdw_mbq_config_check(config);
265 if (ret)
266 return ERR_PTR(ret);
267
268 ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
269 if (IS_ERR(ctx))
270 return ERR_CAST(ctx);
271
272 return __devm_regmap_init(dev, ®map_sdw_mbq, ctx,
273 config, lock_key, lock_name);
274}
275EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
276
277MODULE_DESCRIPTION("regmap SoundWire MBQ Module");
278MODULE_LICENSE("GPL");