Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2//
3// Register cache access API - maple tree based cache
4//
5// Copyright 2023 Arm, Ltd
6//
7// Author: Mark Brown <broonie@kernel.org>
8
9#include <linux/debugfs.h>
10#include <linux/device.h>
11#include <linux/maple_tree.h>
12#include <linux/slab.h>
13
14#include "internal.h"
15
16static int regcache_maple_read(struct regmap *map,
17 unsigned int reg, unsigned int *value)
18{
19 struct maple_tree *mt = map->cache;
20 MA_STATE(mas, mt, reg, reg);
21 unsigned long *entry;
22
23 rcu_read_lock();
24
25 entry = mas_walk(&mas);
26 if (!entry) {
27 rcu_read_unlock();
28 return -ENOENT;
29 }
30
31 *value = entry[reg - mas.index];
32
33 rcu_read_unlock();
34
35 return 0;
36}
37
38static int regcache_maple_write(struct regmap *map, unsigned int reg,
39 unsigned int val)
40{
41 struct maple_tree *mt = map->cache;
42 MA_STATE(mas, mt, reg, reg);
43 unsigned long *entry, *upper, *lower;
44 unsigned long index, last;
45 size_t lower_sz, upper_sz;
46 int ret;
47
48 rcu_read_lock();
49
50 entry = mas_walk(&mas);
51 if (entry) {
52 entry[reg - mas.index] = val;
53 rcu_read_unlock();
54 return 0;
55 }
56
57 /* Any adjacent entries to extend/merge? */
58 mas_set_range(&mas, reg - 1, reg + 1);
59 index = reg;
60 last = reg;
61
62 lower = mas_find(&mas, reg - 1);
63 if (lower) {
64 index = mas.index;
65 lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66 }
67
68 upper = mas_find(&mas, reg + 1);
69 if (upper) {
70 last = mas.last;
71 upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72 }
73
74 rcu_read_unlock();
75
76 entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
77 if (!entry)
78 return -ENOMEM;
79
80 if (lower)
81 memcpy(entry, lower, lower_sz);
82 entry[reg - index] = val;
83 if (upper)
84 memcpy(&entry[reg - index + 1], upper, upper_sz);
85
86 /*
87 * This is safe because the regmap lock means the Maple lock
88 * is redundant, but we need to take it due to lockdep asserts
89 * in the maple tree code.
90 */
91 mas_lock(&mas);
92
93 mas_set_range(&mas, index, last);
94 ret = mas_store_gfp(&mas, entry, map->alloc_flags);
95
96 mas_unlock(&mas);
97
98 if (ret == 0) {
99 kfree(lower);
100 kfree(upper);
101 }
102
103 return ret;
104}
105
106static int regcache_maple_drop(struct regmap *map, unsigned int min,
107 unsigned int max)
108{
109 struct maple_tree *mt = map->cache;
110 MA_STATE(mas, mt, min, max);
111 unsigned long *entry, *lower, *upper;
112 /* initialized to work around false-positive -Wuninitialized warning */
113 unsigned long lower_index = 0, lower_last = 0;
114 unsigned long upper_index, upper_last;
115 int ret = 0;
116
117 lower = NULL;
118 upper = NULL;
119
120 mas_lock(&mas);
121
122 mas_for_each(&mas, entry, max) {
123 /*
124 * This is safe because the regmap lock means the
125 * Maple lock is redundant, but we need to take it due
126 * to lockdep asserts in the maple tree code.
127 */
128 mas_unlock(&mas);
129
130 /* Do we need to save any of this entry? */
131 if (mas.index < min) {
132 lower_index = mas.index;
133 lower_last = min -1;
134
135 lower = kmemdup_array(entry,
136 min - mas.index, sizeof(*lower),
137 map->alloc_flags);
138 if (!lower) {
139 ret = -ENOMEM;
140 goto out_unlocked;
141 }
142 }
143
144 if (mas.last > max) {
145 upper_index = max + 1;
146 upper_last = mas.last;
147
148 upper = kmemdup_array(&entry[max - mas.index + 1],
149 mas.last - max, sizeof(*upper),
150 map->alloc_flags);
151 if (!upper) {
152 ret = -ENOMEM;
153 goto out_unlocked;
154 }
155 }
156
157 kfree(entry);
158 mas_lock(&mas);
159 mas_erase(&mas);
160
161 /* Insert new nodes with the saved data */
162 if (lower) {
163 mas_set_range(&mas, lower_index, lower_last);
164 ret = mas_store_gfp(&mas, lower, map->alloc_flags);
165 if (ret != 0)
166 goto out;
167 lower = NULL;
168 }
169
170 if (upper) {
171 mas_set_range(&mas, upper_index, upper_last);
172 ret = mas_store_gfp(&mas, upper, map->alloc_flags);
173 if (ret != 0)
174 goto out;
175 upper = NULL;
176 }
177 }
178
179out:
180 mas_unlock(&mas);
181out_unlocked:
182 kfree(lower);
183 kfree(upper);
184
185 return ret;
186}
187
188static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
189 struct ma_state *mas,
190 unsigned int min, unsigned int max)
191{
192 void *buf;
193 unsigned long r;
194 size_t val_bytes = map->format.val_bytes;
195 int ret = 0;
196
197 mas_pause(mas);
198 rcu_read_unlock();
199
200 /*
201 * Use a raw write if writing more than one register to a
202 * device that supports raw writes to reduce transaction
203 * overheads.
204 */
205 if (max - min > 1 && regmap_can_raw_write(map)) {
206 buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
207 if (!buf) {
208 ret = -ENOMEM;
209 goto out;
210 }
211
212 /* Render the data for a raw write */
213 for (r = min; r < max; r++) {
214 regcache_set_val(map, buf, r - min,
215 entry[r - mas->index]);
216 }
217
218 ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
219 false);
220
221 kfree(buf);
222 } else {
223 for (r = min; r < max; r++) {
224 ret = _regmap_write(map, r,
225 entry[r - mas->index]);
226 if (ret != 0)
227 goto out;
228 }
229 }
230
231out:
232 rcu_read_lock();
233
234 return ret;
235}
236
237static int regcache_maple_sync(struct regmap *map, unsigned int min,
238 unsigned int max)
239{
240 struct maple_tree *mt = map->cache;
241 unsigned long *entry;
242 MA_STATE(mas, mt, min, max);
243 unsigned long lmin = min;
244 unsigned long lmax = max;
245 unsigned int r, v, sync_start;
246 int ret = 0;
247 bool sync_needed = false;
248
249 map->cache_bypass = true;
250
251 rcu_read_lock();
252
253 mas_for_each(&mas, entry, max) {
254 for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
255 v = entry[r - mas.index];
256
257 if (regcache_reg_needs_sync(map, r, v)) {
258 if (!sync_needed) {
259 sync_start = r;
260 sync_needed = true;
261 }
262 continue;
263 }
264
265 if (!sync_needed)
266 continue;
267
268 ret = regcache_maple_sync_block(map, entry, &mas,
269 sync_start, r);
270 if (ret != 0)
271 goto out;
272 sync_needed = false;
273 }
274
275 if (sync_needed) {
276 ret = regcache_maple_sync_block(map, entry, &mas,
277 sync_start, r);
278 if (ret != 0)
279 goto out;
280 sync_needed = false;
281 }
282 }
283
284out:
285 rcu_read_unlock();
286
287 map->cache_bypass = false;
288
289 return ret;
290}
291
292static int regcache_maple_init(struct regmap *map)
293{
294 struct maple_tree *mt;
295
296 mt = kmalloc(sizeof(*mt), map->alloc_flags);
297 if (!mt)
298 return -ENOMEM;
299 map->cache = mt;
300
301 mt_init(mt);
302
303 if (!mt_external_lock(mt) && map->lock_key)
304 lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
305
306 return 0;
307}
308
309static int regcache_maple_exit(struct regmap *map)
310{
311 struct maple_tree *mt = map->cache;
312 MA_STATE(mas, mt, 0, UINT_MAX);
313 unsigned int *entry;
314
315 /* if we've already been called then just return */
316 if (!mt)
317 return 0;
318
319 mas_lock(&mas);
320 mas_for_each(&mas, entry, UINT_MAX)
321 kfree(entry);
322 __mt_destroy(mt);
323 mas_unlock(&mas);
324
325 kfree(mt);
326 map->cache = NULL;
327
328 return 0;
329}
330
331static int regcache_maple_insert_block(struct regmap *map, int first,
332 int last)
333{
334 struct maple_tree *mt = map->cache;
335 MA_STATE(mas, mt, first, last);
336 unsigned long *entry;
337 int i, ret;
338
339 entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
340 if (!entry)
341 return -ENOMEM;
342
343 for (i = 0; i < last - first + 1; i++)
344 entry[i] = map->reg_defaults[first + i].def;
345
346 mas_lock(&mas);
347
348 mas_set_range(&mas, map->reg_defaults[first].reg,
349 map->reg_defaults[last].reg);
350 ret = mas_store_gfp(&mas, entry, map->alloc_flags);
351
352 mas_unlock(&mas);
353
354 if (ret)
355 kfree(entry);
356
357 return ret;
358}
359
360static int regcache_maple_populate(struct regmap *map)
361{
362 int i;
363 int ret;
364 int range_start;
365
366 range_start = 0;
367
368 /* Scan for ranges of contiguous registers */
369 for (i = 1; i < map->num_reg_defaults; i++) {
370 if (map->reg_defaults[i].reg !=
371 map->reg_defaults[i - 1].reg + 1) {
372 ret = regcache_maple_insert_block(map, range_start,
373 i - 1);
374 if (ret != 0)
375 return ret;
376
377 range_start = i;
378 }
379 }
380
381 /* Add the last block */
382 return regcache_maple_insert_block(map, range_start, map->num_reg_defaults - 1);
383}
384
385struct regcache_ops regcache_maple_ops = {
386 .type = REGCACHE_MAPLE,
387 .name = "maple",
388 .init = regcache_maple_init,
389 .exit = regcache_maple_exit,
390 .populate = regcache_maple_populate,
391 .read = regcache_maple_read,
392 .write = regcache_maple_write,
393 .drop = regcache_maple_drop,
394 .sync = regcache_maple_sync,
395};