Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2//
3// regmap KUnit tests
4//
5// Copyright 2023 Arm Ltd
6
7#include <kunit/device.h>
8#include <kunit/resource.h>
9#include <kunit/test.h>
10#include "internal.h"
11
12#define BLOCK_TEST_SIZE 12
13
14KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
15
16struct regmap_test_priv {
17 struct device *dev;
18};
19
20struct regmap_test_param {
21 enum regcache_type cache;
22 enum regmap_endian val_endian;
23
24 unsigned int from_reg;
25};
26
27static void get_changed_bytes(void *orig, void *new, size_t size)
28{
29 char *o = orig;
30 char *n = new;
31 int i;
32
33 get_random_bytes(new, size);
34
35 /*
36 * This could be nicer and more efficient but we shouldn't
37 * super care.
38 */
39 for (i = 0; i < size; i++)
40 while (n[i] == o[i])
41 get_random_bytes(&n[i], 1);
42}
43
44static const struct regmap_config test_regmap_config = {
45 .reg_stride = 1,
46 .val_bits = sizeof(unsigned int) * 8,
47};
48
49static const char *regcache_type_name(enum regcache_type type)
50{
51 switch (type) {
52 case REGCACHE_NONE:
53 return "none";
54 case REGCACHE_FLAT:
55 return "flat";
56 case REGCACHE_RBTREE:
57 return "rbtree";
58 case REGCACHE_MAPLE:
59 return "maple";
60 default:
61 return NULL;
62 }
63}
64
65static const char *regmap_endian_name(enum regmap_endian endian)
66{
67 switch (endian) {
68 case REGMAP_ENDIAN_BIG:
69 return "big";
70 case REGMAP_ENDIAN_LITTLE:
71 return "little";
72 case REGMAP_ENDIAN_DEFAULT:
73 return "default";
74 case REGMAP_ENDIAN_NATIVE:
75 return "native";
76 default:
77 return NULL;
78 }
79}
80
81static void param_to_desc(const struct regmap_test_param *param, char *desc)
82{
83 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x",
84 regcache_type_name(param->cache),
85 regmap_endian_name(param->val_endian),
86 param->from_reg);
87}
88
89static const struct regmap_test_param regcache_types_list[] = {
90 { .cache = REGCACHE_NONE },
91 { .cache = REGCACHE_FLAT },
92 { .cache = REGCACHE_RBTREE },
93 { .cache = REGCACHE_MAPLE },
94};
95
96KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
97
98static const struct regmap_test_param real_cache_types_only_list[] = {
99 { .cache = REGCACHE_FLAT },
100 { .cache = REGCACHE_RBTREE },
101 { .cache = REGCACHE_MAPLE },
102};
103
104KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
105
106static const struct regmap_test_param real_cache_types_list[] = {
107 { .cache = REGCACHE_FLAT, .from_reg = 0 },
108 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
109 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
110 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
111 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
112 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
113 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
114 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
115 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
116 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
117 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
118 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
119 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
120 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
121 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
122};
123
124KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
125
126static const struct regmap_test_param sparse_cache_types_list[] = {
127 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
128 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
129 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
130 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
131 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
132 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
133 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
134 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
135 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
136 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
137};
138
139KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
140
141static struct regmap *gen_regmap(struct kunit *test,
142 struct regmap_config *config,
143 struct regmap_ram_data **data)
144{
145 const struct regmap_test_param *param = test->param_value;
146 struct regmap_test_priv *priv = test->priv;
147 unsigned int *buf;
148 struct regmap *ret = ERR_PTR(-ENOMEM);
149 size_t size;
150 int i, error;
151 struct reg_default *defaults;
152
153 config->cache_type = param->cache;
154 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
155 config->cache_type == REGCACHE_MAPLE;
156
157 if (config->max_register == 0) {
158 config->max_register = param->from_reg;
159 if (config->num_reg_defaults)
160 config->max_register += (config->num_reg_defaults - 1) *
161 config->reg_stride;
162 else
163 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
164 }
165
166 size = array_size(config->max_register + 1, sizeof(*buf));
167 buf = kmalloc(size, GFP_KERNEL);
168 if (!buf)
169 return ERR_PTR(-ENOMEM);
170
171 get_random_bytes(buf, size);
172
173 *data = kzalloc(sizeof(**data), GFP_KERNEL);
174 if (!(*data))
175 goto out_free;
176 (*data)->vals = buf;
177
178 if (config->num_reg_defaults) {
179 defaults = kunit_kcalloc(test,
180 config->num_reg_defaults,
181 sizeof(struct reg_default),
182 GFP_KERNEL);
183 if (!defaults)
184 goto out_free;
185
186 config->reg_defaults = defaults;
187
188 for (i = 0; i < config->num_reg_defaults; i++) {
189 defaults[i].reg = param->from_reg + (i * config->reg_stride);
190 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
191 }
192 }
193
194 ret = regmap_init_ram(priv->dev, config, *data);
195 if (IS_ERR(ret))
196 goto out_free;
197
198 /* This calls regmap_exit() on failure, which frees buf and *data */
199 error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
200 if (error)
201 ret = ERR_PTR(error);
202
203 return ret;
204
205out_free:
206 kfree(buf);
207 kfree(*data);
208
209 return ret;
210}
211
212static bool reg_5_false(struct device *dev, unsigned int reg)
213{
214 struct kunit *test = dev_get_drvdata(dev);
215 const struct regmap_test_param *param = test->param_value;
216
217 return reg != (param->from_reg + 5);
218}
219
220static void basic_read_write(struct kunit *test)
221{
222 struct regmap *map;
223 struct regmap_config config;
224 struct regmap_ram_data *data;
225 unsigned int val, rval;
226
227 config = test_regmap_config;
228
229 map = gen_regmap(test, &config, &data);
230 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
231 if (IS_ERR(map))
232 return;
233
234 get_random_bytes(&val, sizeof(val));
235
236 /* If we write a value to a register we can read it back */
237 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
238 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
239 KUNIT_EXPECT_EQ(test, val, rval);
240
241 /* If using a cache the cache satisfied the read */
242 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
243}
244
245static void bulk_write(struct kunit *test)
246{
247 struct regmap *map;
248 struct regmap_config config;
249 struct regmap_ram_data *data;
250 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
251 int i;
252
253 config = test_regmap_config;
254
255 map = gen_regmap(test, &config, &data);
256 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
257 if (IS_ERR(map))
258 return;
259
260 get_random_bytes(&val, sizeof(val));
261
262 /*
263 * Data written via the bulk API can be read back with single
264 * reads.
265 */
266 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
267 BLOCK_TEST_SIZE));
268 for (i = 0; i < BLOCK_TEST_SIZE; i++)
269 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
270
271 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
272
273 /* If using a cache the cache satisfied the read */
274 for (i = 0; i < BLOCK_TEST_SIZE; i++)
275 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
276}
277
278static void bulk_read(struct kunit *test)
279{
280 struct regmap *map;
281 struct regmap_config config;
282 struct regmap_ram_data *data;
283 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
284 int i;
285
286 config = test_regmap_config;
287
288 map = gen_regmap(test, &config, &data);
289 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
290 if (IS_ERR(map))
291 return;
292
293 get_random_bytes(&val, sizeof(val));
294
295 /* Data written as single writes can be read via the bulk API */
296 for (i = 0; i < BLOCK_TEST_SIZE; i++)
297 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
298 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
299 BLOCK_TEST_SIZE));
300 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
301
302 /* If using a cache the cache satisfied the read */
303 for (i = 0; i < BLOCK_TEST_SIZE; i++)
304 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
305}
306
307static void multi_write(struct kunit *test)
308{
309 struct regmap *map;
310 struct regmap_config config;
311 struct regmap_ram_data *data;
312 struct reg_sequence sequence[BLOCK_TEST_SIZE];
313 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
314 int i;
315
316 config = test_regmap_config;
317
318 map = gen_regmap(test, &config, &data);
319 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
320 if (IS_ERR(map))
321 return;
322
323 get_random_bytes(&val, sizeof(val));
324
325 /*
326 * Data written via the multi API can be read back with single
327 * reads.
328 */
329 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
330 sequence[i].reg = i;
331 sequence[i].def = val[i];
332 sequence[i].delay_us = 0;
333 }
334 KUNIT_EXPECT_EQ(test, 0,
335 regmap_multi_reg_write(map, sequence, BLOCK_TEST_SIZE));
336 for (i = 0; i < BLOCK_TEST_SIZE; i++)
337 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
338
339 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
340
341 /* If using a cache the cache satisfied the read */
342 for (i = 0; i < BLOCK_TEST_SIZE; i++)
343 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
344}
345
346static void multi_read(struct kunit *test)
347{
348 struct regmap *map;
349 struct regmap_config config;
350 struct regmap_ram_data *data;
351 unsigned int regs[BLOCK_TEST_SIZE];
352 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
353 int i;
354
355 config = test_regmap_config;
356
357 map = gen_regmap(test, &config, &data);
358 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
359 if (IS_ERR(map))
360 return;
361
362 get_random_bytes(&val, sizeof(val));
363
364 /* Data written as single writes can be read via the multi API */
365 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
366 regs[i] = i;
367 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
368 }
369 KUNIT_EXPECT_EQ(test, 0,
370 regmap_multi_reg_read(map, regs, rval, BLOCK_TEST_SIZE));
371 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
372
373 /* If using a cache the cache satisfied the read */
374 for (i = 0; i < BLOCK_TEST_SIZE; i++)
375 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
376}
377
378static void read_bypassed(struct kunit *test)
379{
380 const struct regmap_test_param *param = test->param_value;
381 struct regmap *map;
382 struct regmap_config config;
383 struct regmap_ram_data *data;
384 unsigned int val[BLOCK_TEST_SIZE], rval;
385 int i;
386
387 config = test_regmap_config;
388
389 map = gen_regmap(test, &config, &data);
390 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
391 if (IS_ERR(map))
392 return;
393
394 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
395
396 get_random_bytes(&val, sizeof(val));
397
398 /* Write some test values */
399 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
400
401 regcache_cache_only(map, true);
402
403 /*
404 * While in cache-only regmap_read_bypassed() should return the register
405 * value and leave the map in cache-only.
406 */
407 for (i = 0; i < ARRAY_SIZE(val); i++) {
408 /* Put inverted bits in rval to prove we really read the value */
409 rval = ~val[i];
410 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
411 KUNIT_EXPECT_EQ(test, val[i], rval);
412
413 rval = ~val[i];
414 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
415 KUNIT_EXPECT_EQ(test, val[i], rval);
416 KUNIT_EXPECT_TRUE(test, map->cache_only);
417 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
418 }
419
420 /*
421 * Change the underlying register values to prove it is returning
422 * real values not cached values.
423 */
424 for (i = 0; i < ARRAY_SIZE(val); i++) {
425 val[i] = ~val[i];
426 data->vals[param->from_reg + i] = val[i];
427 }
428
429 for (i = 0; i < ARRAY_SIZE(val); i++) {
430 rval = ~val[i];
431 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
432 KUNIT_EXPECT_NE(test, val[i], rval);
433
434 rval = ~val[i];
435 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
436 KUNIT_EXPECT_EQ(test, val[i], rval);
437 KUNIT_EXPECT_TRUE(test, map->cache_only);
438 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
439 }
440}
441
442static void read_bypassed_volatile(struct kunit *test)
443{
444 const struct regmap_test_param *param = test->param_value;
445 struct regmap *map;
446 struct regmap_config config;
447 struct regmap_ram_data *data;
448 unsigned int val[BLOCK_TEST_SIZE], rval;
449 int i;
450
451 config = test_regmap_config;
452 /* All registers except #5 volatile */
453 config.volatile_reg = reg_5_false;
454
455 map = gen_regmap(test, &config, &data);
456 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
457 if (IS_ERR(map))
458 return;
459
460 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
461
462 get_random_bytes(&val, sizeof(val));
463
464 /* Write some test values */
465 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
466
467 regcache_cache_only(map, true);
468
469 /*
470 * While in cache-only regmap_read_bypassed() should return the register
471 * value and leave the map in cache-only.
472 */
473 for (i = 0; i < ARRAY_SIZE(val); i++) {
474 /* Register #5 is non-volatile so should read from cache */
475 KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
476 regmap_read(map, param->from_reg + i, &rval));
477
478 /* Put inverted bits in rval to prove we really read the value */
479 rval = ~val[i];
480 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
481 KUNIT_EXPECT_EQ(test, val[i], rval);
482 KUNIT_EXPECT_TRUE(test, map->cache_only);
483 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
484 }
485
486 /*
487 * Change the underlying register values to prove it is returning
488 * real values not cached values.
489 */
490 for (i = 0; i < ARRAY_SIZE(val); i++) {
491 val[i] = ~val[i];
492 data->vals[param->from_reg + i] = val[i];
493 }
494
495 for (i = 0; i < ARRAY_SIZE(val); i++) {
496 if (i == 5)
497 continue;
498
499 rval = ~val[i];
500 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
501 KUNIT_EXPECT_EQ(test, val[i], rval);
502 KUNIT_EXPECT_TRUE(test, map->cache_only);
503 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
504 }
505}
506
507static void write_readonly(struct kunit *test)
508{
509 struct regmap *map;
510 struct regmap_config config;
511 struct regmap_ram_data *data;
512 unsigned int val;
513 int i;
514
515 config = test_regmap_config;
516 config.num_reg_defaults = BLOCK_TEST_SIZE;
517 config.writeable_reg = reg_5_false;
518
519 map = gen_regmap(test, &config, &data);
520 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
521 if (IS_ERR(map))
522 return;
523
524 get_random_bytes(&val, sizeof(val));
525
526 for (i = 0; i < BLOCK_TEST_SIZE; i++)
527 data->written[i] = false;
528
529 /* Change the value of all registers, readonly should fail */
530 for (i = 0; i < BLOCK_TEST_SIZE; i++)
531 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
532
533 /* Did that match what we see on the device? */
534 for (i = 0; i < BLOCK_TEST_SIZE; i++)
535 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
536}
537
538static void read_writeonly(struct kunit *test)
539{
540 struct regmap *map;
541 struct regmap_config config;
542 struct regmap_ram_data *data;
543 unsigned int val;
544 int i;
545
546 config = test_regmap_config;
547 config.readable_reg = reg_5_false;
548
549 map = gen_regmap(test, &config, &data);
550 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
551 if (IS_ERR(map))
552 return;
553
554 for (i = 0; i < BLOCK_TEST_SIZE; i++)
555 data->read[i] = false;
556
557 /*
558 * Try to read all the registers, the writeonly one should
559 * fail if we aren't using the flat cache.
560 */
561 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
562 if (config.cache_type != REGCACHE_FLAT) {
563 KUNIT_EXPECT_EQ(test, i != 5,
564 regmap_read(map, i, &val) == 0);
565 } else {
566 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
567 }
568 }
569
570 /* Did we trigger a hardware access? */
571 KUNIT_EXPECT_FALSE(test, data->read[5]);
572}
573
574static void reg_defaults(struct kunit *test)
575{
576 struct regmap *map;
577 struct regmap_config config;
578 struct regmap_ram_data *data;
579 unsigned int rval[BLOCK_TEST_SIZE];
580 int i;
581
582 config = test_regmap_config;
583 config.num_reg_defaults = BLOCK_TEST_SIZE;
584
585 map = gen_regmap(test, &config, &data);
586 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
587 if (IS_ERR(map))
588 return;
589
590 /* Read back the expected default data */
591 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
592 BLOCK_TEST_SIZE));
593 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
594
595 /* The data should have been read from cache if there was one */
596 for (i = 0; i < BLOCK_TEST_SIZE; i++)
597 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
598}
599
600static void reg_defaults_read_dev(struct kunit *test)
601{
602 struct regmap *map;
603 struct regmap_config config;
604 struct regmap_ram_data *data;
605 unsigned int rval[BLOCK_TEST_SIZE];
606 int i;
607
608 config = test_regmap_config;
609 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
610
611 map = gen_regmap(test, &config, &data);
612 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
613 if (IS_ERR(map))
614 return;
615
616 /* We should have read the cache defaults back from the map */
617 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
618 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
619 data->read[i] = false;
620 }
621
622 /* Read back the expected default data */
623 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
624 BLOCK_TEST_SIZE));
625 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
626
627 /* The data should have been read from cache if there was one */
628 for (i = 0; i < BLOCK_TEST_SIZE; i++)
629 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
630}
631
632static void register_patch(struct kunit *test)
633{
634 struct regmap *map;
635 struct regmap_config config;
636 struct regmap_ram_data *data;
637 struct reg_sequence patch[2];
638 unsigned int rval[BLOCK_TEST_SIZE];
639 int i;
640
641 /* We need defaults so readback works */
642 config = test_regmap_config;
643 config.num_reg_defaults = BLOCK_TEST_SIZE;
644
645 map = gen_regmap(test, &config, &data);
646 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
647 if (IS_ERR(map))
648 return;
649
650 /* Stash the original values */
651 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
652 BLOCK_TEST_SIZE));
653
654 /* Patch a couple of values */
655 patch[0].reg = 2;
656 patch[0].def = rval[2] + 1;
657 patch[0].delay_us = 0;
658 patch[1].reg = 5;
659 patch[1].def = rval[5] + 1;
660 patch[1].delay_us = 0;
661 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
662 ARRAY_SIZE(patch)));
663
664 /* Only the patched registers are written */
665 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
666 switch (i) {
667 case 2:
668 case 5:
669 KUNIT_EXPECT_TRUE(test, data->written[i]);
670 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
671 break;
672 default:
673 KUNIT_EXPECT_FALSE(test, data->written[i]);
674 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
675 break;
676 }
677 }
678}
679
680static void stride(struct kunit *test)
681{
682 struct regmap *map;
683 struct regmap_config config;
684 struct regmap_ram_data *data;
685 unsigned int rval;
686 int i;
687
688 config = test_regmap_config;
689 config.reg_stride = 2;
690 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
691
692 /*
693 * Allow one extra register so that the read/written arrays
694 * are sized big enough to include an entry for the odd
695 * address past the final reg_default register.
696 */
697 config.max_register = BLOCK_TEST_SIZE;
698
699 map = gen_regmap(test, &config, &data);
700 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
701 if (IS_ERR(map))
702 return;
703
704 /* Only even addresses can be accessed, try both read and write */
705 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
706 data->read[i] = false;
707 data->written[i] = false;
708
709 if (i % 2) {
710 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
711 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
712 KUNIT_EXPECT_FALSE(test, data->read[i]);
713 KUNIT_EXPECT_FALSE(test, data->written[i]);
714 } else {
715 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
716 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
717 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
718 data->read[i]);
719
720 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
721 KUNIT_EXPECT_TRUE(test, data->written[i]);
722 }
723 }
724}
725
726static struct regmap_range_cfg test_range = {
727 .selector_reg = 1,
728 .selector_mask = 0xff,
729
730 .window_start = 4,
731 .window_len = 10,
732
733 .range_min = 20,
734 .range_max = 40,
735};
736
737static bool test_range_window_volatile(struct device *dev, unsigned int reg)
738{
739 if (reg >= test_range.window_start &&
740 reg <= test_range.window_start + test_range.window_len)
741 return true;
742
743 return false;
744}
745
746static bool test_range_all_volatile(struct device *dev, unsigned int reg)
747{
748 if (test_range_window_volatile(dev, reg))
749 return true;
750
751 if (reg >= test_range.range_min && reg <= test_range.range_max)
752 return true;
753
754 return false;
755}
756
757static void basic_ranges(struct kunit *test)
758{
759 struct regmap *map;
760 struct regmap_config config;
761 struct regmap_ram_data *data;
762 unsigned int val;
763 int i;
764
765 config = test_regmap_config;
766 config.volatile_reg = test_range_all_volatile;
767 config.ranges = &test_range;
768 config.num_ranges = 1;
769 config.max_register = test_range.range_max;
770
771 map = gen_regmap(test, &config, &data);
772 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
773 if (IS_ERR(map))
774 return;
775
776 for (i = test_range.range_min; i < test_range.range_max; i++) {
777 data->read[i] = false;
778 data->written[i] = false;
779 }
780
781 /* Reset the page to a non-zero value to trigger a change */
782 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
783 test_range.range_max));
784
785 /* Check we set the page and use the window for writes */
786 data->written[test_range.selector_reg] = false;
787 data->written[test_range.window_start] = false;
788 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
789 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
790 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
791
792 data->written[test_range.selector_reg] = false;
793 data->written[test_range.window_start] = false;
794 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
795 test_range.range_min +
796 test_range.window_len,
797 0));
798 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
799 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
800
801 /* Same for reads */
802 data->written[test_range.selector_reg] = false;
803 data->read[test_range.window_start] = false;
804 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
805 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
806 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
807
808 data->written[test_range.selector_reg] = false;
809 data->read[test_range.window_start] = false;
810 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
811 test_range.range_min +
812 test_range.window_len,
813 &val));
814 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
815 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
816
817 /* No physical access triggered in the virtual range */
818 for (i = test_range.range_min; i < test_range.range_max; i++) {
819 KUNIT_EXPECT_FALSE(test, data->read[i]);
820 KUNIT_EXPECT_FALSE(test, data->written[i]);
821 }
822}
823
824/* Try to stress dynamic creation of cache data structures */
825static void stress_insert(struct kunit *test)
826{
827 struct regmap *map;
828 struct regmap_config config;
829 struct regmap_ram_data *data;
830 unsigned int rval, *vals;
831 size_t buf_sz;
832 int i;
833
834 config = test_regmap_config;
835 config.max_register = 300;
836
837 map = gen_regmap(test, &config, &data);
838 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
839 if (IS_ERR(map))
840 return;
841
842 buf_sz = array_size(sizeof(*vals), config.max_register);
843 vals = kunit_kmalloc(test, buf_sz, GFP_KERNEL);
844 KUNIT_ASSERT_FALSE(test, vals == NULL);
845
846 get_random_bytes(vals, buf_sz);
847
848 /* Write data into the map/cache in ever decreasing strides */
849 for (i = 0; i < config.max_register; i += 100)
850 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
851 for (i = 0; i < config.max_register; i += 50)
852 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
853 for (i = 0; i < config.max_register; i += 25)
854 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
855 for (i = 0; i < config.max_register; i += 10)
856 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
857 for (i = 0; i < config.max_register; i += 5)
858 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
859 for (i = 0; i < config.max_register; i += 3)
860 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
861 for (i = 0; i < config.max_register; i += 2)
862 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
863 for (i = 0; i < config.max_register; i++)
864 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
865
866 /* Do reads from the cache (if there is one) match? */
867 for (i = 0; i < config.max_register; i ++) {
868 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
869 KUNIT_EXPECT_EQ(test, rval, vals[i]);
870 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
871 }
872}
873
874static void cache_bypass(struct kunit *test)
875{
876 const struct regmap_test_param *param = test->param_value;
877 struct regmap *map;
878 struct regmap_config config;
879 struct regmap_ram_data *data;
880 unsigned int val, rval;
881
882 config = test_regmap_config;
883
884 map = gen_regmap(test, &config, &data);
885 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
886 if (IS_ERR(map))
887 return;
888
889 get_random_bytes(&val, sizeof(val));
890
891 /* Ensure the cache has a value in it */
892 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
893
894 /* Bypass then write a different value */
895 regcache_cache_bypass(map, true);
896 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
897
898 /* Read the bypassed value */
899 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
900 KUNIT_EXPECT_EQ(test, val + 1, rval);
901 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
902
903 /* Disable bypass, the cache should still return the original value */
904 regcache_cache_bypass(map, false);
905 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
906 KUNIT_EXPECT_EQ(test, val, rval);
907}
908
909static void cache_sync_marked_dirty(struct kunit *test)
910{
911 const struct regmap_test_param *param = test->param_value;
912 struct regmap *map;
913 struct regmap_config config;
914 struct regmap_ram_data *data;
915 unsigned int val[BLOCK_TEST_SIZE];
916 int i;
917
918 config = test_regmap_config;
919
920 map = gen_regmap(test, &config, &data);
921 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
922 if (IS_ERR(map))
923 return;
924
925 get_random_bytes(&val, sizeof(val));
926
927 /* Put some data into the cache */
928 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
929 BLOCK_TEST_SIZE));
930 for (i = 0; i < BLOCK_TEST_SIZE; i++)
931 data->written[param->from_reg + i] = false;
932
933 /* Trash the data on the device itself then resync */
934 regcache_mark_dirty(map);
935 memset(data->vals, 0, sizeof(val));
936 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
937
938 /* Did we just write the correct data out? */
939 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
940 for (i = 0; i < BLOCK_TEST_SIZE; i++)
941 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
942}
943
944static void cache_sync_after_cache_only(struct kunit *test)
945{
946 const struct regmap_test_param *param = test->param_value;
947 struct regmap *map;
948 struct regmap_config config;
949 struct regmap_ram_data *data;
950 unsigned int val[BLOCK_TEST_SIZE];
951 unsigned int val_mask;
952 int i;
953
954 config = test_regmap_config;
955
956 map = gen_regmap(test, &config, &data);
957 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
958 if (IS_ERR(map))
959 return;
960
961 val_mask = GENMASK(config.val_bits - 1, 0);
962 get_random_bytes(&val, sizeof(val));
963
964 /* Put some data into the cache */
965 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
966 BLOCK_TEST_SIZE));
967 for (i = 0; i < BLOCK_TEST_SIZE; i++)
968 data->written[param->from_reg + i] = false;
969
970 /* Set cache-only and change the values */
971 regcache_cache_only(map, true);
972 for (i = 0; i < ARRAY_SIZE(val); ++i)
973 val[i] = ~val[i] & val_mask;
974
975 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
976 BLOCK_TEST_SIZE));
977 for (i = 0; i < BLOCK_TEST_SIZE; i++)
978 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
979
980 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
981
982 /* Exit cache-only and sync the cache without marking hardware registers dirty */
983 regcache_cache_only(map, false);
984
985 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
986
987 /* Did we just write the correct data out? */
988 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
989 for (i = 0; i < BLOCK_TEST_SIZE; i++)
990 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
991}
992
993static void cache_sync_defaults_marked_dirty(struct kunit *test)
994{
995 const struct regmap_test_param *param = test->param_value;
996 struct regmap *map;
997 struct regmap_config config;
998 struct regmap_ram_data *data;
999 unsigned int val;
1000 int i;
1001
1002 config = test_regmap_config;
1003 config.num_reg_defaults = BLOCK_TEST_SIZE;
1004
1005 map = gen_regmap(test, &config, &data);
1006 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1007 if (IS_ERR(map))
1008 return;
1009
1010 get_random_bytes(&val, sizeof(val));
1011
1012 /* Change the value of one register */
1013 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
1014
1015 /* Resync */
1016 regcache_mark_dirty(map);
1017 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1018 data->written[param->from_reg + i] = false;
1019 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1020
1021 /* Did we just sync the one register we touched? */
1022 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1023 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
1024
1025 /* Rewrite registers back to their defaults */
1026 for (i = 0; i < config.num_reg_defaults; ++i)
1027 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
1028 config.reg_defaults[i].def));
1029
1030 /*
1031 * Resync after regcache_mark_dirty() should not write out registers
1032 * that are at default value
1033 */
1034 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1035 data->written[param->from_reg + i] = false;
1036 regcache_mark_dirty(map);
1037 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1038 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1039 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
1040}
1041
1042static void cache_sync_default_after_cache_only(struct kunit *test)
1043{
1044 const struct regmap_test_param *param = test->param_value;
1045 struct regmap *map;
1046 struct regmap_config config;
1047 struct regmap_ram_data *data;
1048 unsigned int orig_val;
1049 int i;
1050
1051 config = test_regmap_config;
1052 config.num_reg_defaults = BLOCK_TEST_SIZE;
1053
1054 map = gen_regmap(test, &config, &data);
1055 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1056 if (IS_ERR(map))
1057 return;
1058
1059 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
1060
1061 /* Enter cache-only and change the value of one register */
1062 regcache_cache_only(map, true);
1063 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
1064
1065 /* Exit cache-only and resync, should write out the changed register */
1066 regcache_cache_only(map, false);
1067 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1068 data->written[param->from_reg + i] = false;
1069 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1070
1071 /* Was the register written out? */
1072 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1073 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
1074
1075 /* Enter cache-only and write register back to its default value */
1076 regcache_cache_only(map, true);
1077 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
1078
1079 /* Resync should write out the new value */
1080 regcache_cache_only(map, false);
1081 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1082 data->written[param->from_reg + i] = false;
1083
1084 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1085 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1086 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
1087}
1088
1089static void cache_sync_readonly(struct kunit *test)
1090{
1091 const struct regmap_test_param *param = test->param_value;
1092 struct regmap *map;
1093 struct regmap_config config;
1094 struct regmap_ram_data *data;
1095 unsigned int val;
1096 int i;
1097
1098 config = test_regmap_config;
1099 config.writeable_reg = reg_5_false;
1100
1101 map = gen_regmap(test, &config, &data);
1102 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1103 if (IS_ERR(map))
1104 return;
1105
1106 /* Read all registers to fill the cache */
1107 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1108 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1109
1110 /* Change the value of all registers, readonly should fail */
1111 get_random_bytes(&val, sizeof(val));
1112 regcache_cache_only(map, true);
1113 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1114 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
1115 regcache_cache_only(map, false);
1116
1117 /* Resync */
1118 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1119 data->written[param->from_reg + i] = false;
1120 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1121
1122 /* Did that match what we see on the device? */
1123 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1124 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
1125}
1126
1127static void cache_sync_patch(struct kunit *test)
1128{
1129 const struct regmap_test_param *param = test->param_value;
1130 struct regmap *map;
1131 struct regmap_config config;
1132 struct regmap_ram_data *data;
1133 struct reg_sequence patch[2];
1134 unsigned int rval[BLOCK_TEST_SIZE], val;
1135 int i;
1136
1137 /* We need defaults so readback works */
1138 config = test_regmap_config;
1139 config.num_reg_defaults = BLOCK_TEST_SIZE;
1140
1141 map = gen_regmap(test, &config, &data);
1142 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1143 if (IS_ERR(map))
1144 return;
1145
1146 /* Stash the original values */
1147 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1148 BLOCK_TEST_SIZE));
1149
1150 /* Patch a couple of values */
1151 patch[0].reg = param->from_reg + 2;
1152 patch[0].def = rval[2] + 1;
1153 patch[0].delay_us = 0;
1154 patch[1].reg = param->from_reg + 5;
1155 patch[1].def = rval[5] + 1;
1156 patch[1].delay_us = 0;
1157 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
1158 ARRAY_SIZE(patch)));
1159
1160 /* Sync the cache */
1161 regcache_mark_dirty(map);
1162 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1163 data->written[param->from_reg + i] = false;
1164 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1165
1166 /* The patch should be on the device but not in the cache */
1167 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1168 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1169 KUNIT_EXPECT_EQ(test, val, rval[i]);
1170
1171 switch (i) {
1172 case 2:
1173 case 5:
1174 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
1175 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
1176 break;
1177 default:
1178 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
1179 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
1180 break;
1181 }
1182 }
1183}
1184
1185static void cache_drop(struct kunit *test)
1186{
1187 const struct regmap_test_param *param = test->param_value;
1188 struct regmap *map;
1189 struct regmap_config config;
1190 struct regmap_ram_data *data;
1191 unsigned int rval[BLOCK_TEST_SIZE];
1192 int i;
1193
1194 config = test_regmap_config;
1195 config.num_reg_defaults = BLOCK_TEST_SIZE;
1196
1197 map = gen_regmap(test, &config, &data);
1198 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1199 if (IS_ERR(map))
1200 return;
1201
1202 /* Ensure the data is read from the cache */
1203 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1204 data->read[param->from_reg + i] = false;
1205 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1206 BLOCK_TEST_SIZE));
1207 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1208 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
1209 data->read[param->from_reg + i] = false;
1210 }
1211 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1212
1213 /* Drop some registers */
1214 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
1215 param->from_reg + 5));
1216
1217 /* Reread and check only the dropped registers hit the device. */
1218 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1219 BLOCK_TEST_SIZE));
1220 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1221 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
1222 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1223}
1224
1225static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
1226{
1227 const struct regmap_test_param *param = test->param_value;
1228 struct regmap *map;
1229 struct regmap_config config;
1230 struct regmap_ram_data *data;
1231 unsigned int val[4][BLOCK_TEST_SIZE];
1232 unsigned int reg;
1233 const int num_ranges = ARRAY_SIZE(val) * 2;
1234 int rangeidx, i;
1235
1236 static_assert(ARRAY_SIZE(val) == 4);
1237
1238 config = test_regmap_config;
1239 config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
1240
1241 map = gen_regmap(test, &config, &data);
1242 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1243 if (IS_ERR(map))
1244 return;
1245
1246 for (i = 0; i < config.max_register + 1; i++)
1247 data->written[i] = false;
1248
1249 /* Create non-contiguous cache blocks by writing every other range */
1250 get_random_bytes(&val, sizeof(val));
1251 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1252 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1253 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
1254 &val[rangeidx / 2],
1255 BLOCK_TEST_SIZE));
1256 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1257 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1258 }
1259
1260 /* Check that odd ranges weren't written */
1261 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1262 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1263 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1264 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1265 }
1266
1267 /* Drop range 2 */
1268 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1269 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
1270
1271 /* Drop part of range 4 */
1272 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1273 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
1274
1275 /* Mark dirty and reset mock registers to 0 */
1276 regcache_mark_dirty(map);
1277 for (i = 0; i < config.max_register + 1; i++) {
1278 data->vals[i] = 0;
1279 data->written[i] = false;
1280 }
1281
1282 /* The registers that were dropped from range 4 should now remain at 0 */
1283 val[4 / 2][3] = 0;
1284 val[4 / 2][4] = 0;
1285 val[4 / 2][5] = 0;
1286
1287 /* Sync and check that the expected register ranges were written */
1288 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1289
1290 /* Check that odd ranges weren't written */
1291 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1292 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1293 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1294 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1295 }
1296
1297 /* Check that even ranges (except 2 and 4) were written */
1298 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1299 if ((rangeidx == 2) || (rangeidx == 4))
1300 continue;
1301
1302 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1303 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1304 KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
1305
1306 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1307 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1308 }
1309
1310 /* Check that range 2 wasn't written */
1311 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1312 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1313 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1314
1315 /* Check that range 4 was partially written */
1316 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1317 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1318 KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
1319
1320 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
1321
1322 /* Nothing before param->from_reg should have been written */
1323 for (i = 0; i < param->from_reg; i++)
1324 KUNIT_EXPECT_FALSE(test, data->written[i]);
1325}
1326
1327static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
1328{
1329 const struct regmap_test_param *param = test->param_value;
1330 struct regmap *map;
1331 struct regmap_config config;
1332 struct regmap_ram_data *data;
1333 unsigned int rval[BLOCK_TEST_SIZE];
1334 int i;
1335
1336 config = test_regmap_config;
1337 config.num_reg_defaults = BLOCK_TEST_SIZE;
1338
1339 map = gen_regmap(test, &config, &data);
1340 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1341 if (IS_ERR(map))
1342 return;
1343
1344 /* Ensure the data is read from the cache */
1345 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1346 data->read[param->from_reg + i] = false;
1347 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1348 BLOCK_TEST_SIZE));
1349 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1350
1351 /* Change all values in cache from defaults */
1352 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1353 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1354
1355 /* Drop all registers */
1356 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1357
1358 /* Mark dirty and cache sync should not write anything. */
1359 regcache_mark_dirty(map);
1360 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1361 data->written[param->from_reg + i] = false;
1362
1363 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1364 for (i = 0; i <= config.max_register; i++)
1365 KUNIT_EXPECT_FALSE(test, data->written[i]);
1366}
1367
1368static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
1369{
1370 const struct regmap_test_param *param = test->param_value;
1371 struct regmap *map;
1372 struct regmap_config config;
1373 struct regmap_ram_data *data;
1374 unsigned int rval[BLOCK_TEST_SIZE];
1375 int i;
1376
1377 config = test_regmap_config;
1378
1379 map = gen_regmap(test, &config, &data);
1380 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1381 if (IS_ERR(map))
1382 return;
1383
1384 /* Ensure the data is read from the cache */
1385 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1386 data->read[param->from_reg + i] = false;
1387 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1388 BLOCK_TEST_SIZE));
1389 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1390
1391 /* Change all values in cache */
1392 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1393 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1394
1395 /* Drop all registers */
1396 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1397
1398 /*
1399 * Sync cache without marking it dirty. All registers were dropped
1400 * so the cache should not have any entries to write out.
1401 */
1402 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1403 data->written[param->from_reg + i] = false;
1404
1405 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1406 for (i = 0; i <= config.max_register; i++)
1407 KUNIT_EXPECT_FALSE(test, data->written[i]);
1408}
1409
1410static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
1411{
1412 const struct regmap_test_param *param = test->param_value;
1413 struct regmap *map;
1414 struct regmap_config config;
1415 struct regmap_ram_data *data;
1416 unsigned int rval[BLOCK_TEST_SIZE];
1417 int i;
1418
1419 config = test_regmap_config;
1420 config.num_reg_defaults = BLOCK_TEST_SIZE;
1421
1422 map = gen_regmap(test, &config, &data);
1423 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1424 if (IS_ERR(map))
1425 return;
1426
1427 /* Ensure the data is read from the cache */
1428 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1429 data->read[param->from_reg + i] = false;
1430 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1431 BLOCK_TEST_SIZE));
1432 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1433
1434 /* Change all values in cache from defaults */
1435 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1436 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1437
1438 /* Drop all registers */
1439 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1440
1441 /*
1442 * Sync cache without marking it dirty. All registers were dropped
1443 * so the cache should not have any entries to write out.
1444 */
1445 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1446 data->written[param->from_reg + i] = false;
1447
1448 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1449 for (i = 0; i <= config.max_register; i++)
1450 KUNIT_EXPECT_FALSE(test, data->written[i]);
1451}
1452
1453static void cache_present(struct kunit *test)
1454{
1455 const struct regmap_test_param *param = test->param_value;
1456 struct regmap *map;
1457 struct regmap_config config;
1458 struct regmap_ram_data *data;
1459 unsigned int val;
1460 int i;
1461
1462 config = test_regmap_config;
1463
1464 map = gen_regmap(test, &config, &data);
1465 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1466 if (IS_ERR(map))
1467 return;
1468
1469 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1470 data->read[param->from_reg + i] = false;
1471
1472 /* No defaults so no registers cached. */
1473 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1474 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1475
1476 /* We didn't trigger any reads */
1477 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1478 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1479
1480 /* Fill the cache */
1481 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1482 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1483
1484 /* Now everything should be cached */
1485 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1486 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
1487}
1488
1489/* Check that caching the window register works with sync */
1490static void cache_range_window_reg(struct kunit *test)
1491{
1492 struct regmap *map;
1493 struct regmap_config config;
1494 struct regmap_ram_data *data;
1495 unsigned int val;
1496 int i;
1497
1498 config = test_regmap_config;
1499 config.volatile_reg = test_range_window_volatile;
1500 config.ranges = &test_range;
1501 config.num_ranges = 1;
1502 config.max_register = test_range.range_max;
1503
1504 map = gen_regmap(test, &config, &data);
1505 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1506 if (IS_ERR(map))
1507 return;
1508
1509 /* Write new values to the entire range */
1510 for (i = test_range.range_min; i <= test_range.range_max; i++)
1511 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
1512
1513 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1514 KUNIT_ASSERT_EQ(test, val, 2);
1515
1516 /* Write to the first register in the range to reset the page */
1517 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1518 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1519 KUNIT_ASSERT_EQ(test, val, 0);
1520
1521 /* Trigger a cache sync */
1522 regcache_mark_dirty(map);
1523 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1524
1525 /* Write to the first register again, the page should be reset */
1526 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1527 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1528 KUNIT_ASSERT_EQ(test, val, 0);
1529
1530 /* Trigger another cache sync */
1531 regcache_mark_dirty(map);
1532 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1533
1534 /* Write to the last register again, the page should be reset */
1535 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
1536 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1537 KUNIT_ASSERT_EQ(test, val, 2);
1538}
1539
1540static const struct regmap_test_param raw_types_list[] = {
1541 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
1542 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
1543 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1544 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1545 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1546 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1547 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1548 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1549};
1550
1551KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
1552
1553static const struct regmap_test_param raw_cache_types_list[] = {
1554 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1555 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1556 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1557 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1558 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1559 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1560};
1561
1562KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
1563
1564static const struct regmap_config raw_regmap_config = {
1565 .max_register = BLOCK_TEST_SIZE,
1566
1567 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
1568 .reg_bits = 16,
1569 .val_bits = 16,
1570};
1571
1572static struct regmap *gen_raw_regmap(struct kunit *test,
1573 struct regmap_config *config,
1574 struct regmap_ram_data **data)
1575{
1576 struct regmap_test_priv *priv = test->priv;
1577 const struct regmap_test_param *param = test->param_value;
1578 u16 *buf;
1579 struct regmap *ret = ERR_PTR(-ENOMEM);
1580 int i, error;
1581 struct reg_default *defaults;
1582 size_t size;
1583
1584 config->cache_type = param->cache;
1585 config->val_format_endian = param->val_endian;
1586 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1587 config->cache_type == REGCACHE_MAPLE;
1588
1589 size = array_size(config->max_register + 1, BITS_TO_BYTES(config->reg_bits));
1590 buf = kmalloc(size, GFP_KERNEL);
1591 if (!buf)
1592 return ERR_PTR(-ENOMEM);
1593
1594 get_random_bytes(buf, size);
1595
1596 *data = kzalloc(sizeof(**data), GFP_KERNEL);
1597 if (!(*data))
1598 goto out_free;
1599 (*data)->vals = (void *)buf;
1600
1601 config->num_reg_defaults = config->max_register + 1;
1602 defaults = kunit_kcalloc(test,
1603 config->num_reg_defaults,
1604 sizeof(struct reg_default),
1605 GFP_KERNEL);
1606 if (!defaults)
1607 goto out_free;
1608 config->reg_defaults = defaults;
1609
1610 for (i = 0; i < config->num_reg_defaults; i++) {
1611 defaults[i].reg = i;
1612 switch (param->val_endian) {
1613 case REGMAP_ENDIAN_LITTLE:
1614 defaults[i].def = le16_to_cpu(buf[i]);
1615 break;
1616 case REGMAP_ENDIAN_BIG:
1617 defaults[i].def = be16_to_cpu(buf[i]);
1618 break;
1619 default:
1620 ret = ERR_PTR(-EINVAL);
1621 goto out_free;
1622 }
1623 }
1624
1625 /*
1626 * We use the defaults in the tests but they don't make sense
1627 * to the core if there's no cache.
1628 */
1629 if (config->cache_type == REGCACHE_NONE)
1630 config->num_reg_defaults = 0;
1631
1632 ret = regmap_init_raw_ram(priv->dev, config, *data);
1633 if (IS_ERR(ret))
1634 goto out_free;
1635
1636 /* This calls regmap_exit() on failure, which frees buf and *data */
1637 error = kunit_add_action_or_reset(test, regmap_exit_action, ret);
1638 if (error)
1639 ret = ERR_PTR(error);
1640
1641 return ret;
1642
1643out_free:
1644 kfree(buf);
1645 kfree(*data);
1646
1647 return ret;
1648}
1649
1650static void raw_read_defaults_single(struct kunit *test)
1651{
1652 struct regmap *map;
1653 struct regmap_config config;
1654 struct regmap_ram_data *data;
1655 unsigned int rval;
1656 int i;
1657
1658 config = raw_regmap_config;
1659
1660 map = gen_raw_regmap(test, &config, &data);
1661 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1662 if (IS_ERR(map))
1663 return;
1664
1665 /* Check that we can read the defaults via the API */
1666 for (i = 0; i < config.max_register + 1; i++) {
1667 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1668 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1669 }
1670}
1671
1672static void raw_read_defaults(struct kunit *test)
1673{
1674 struct regmap *map;
1675 struct regmap_config config;
1676 struct regmap_ram_data *data;
1677 u16 *rval;
1678 u16 def;
1679 size_t val_len;
1680 int i;
1681
1682 config = raw_regmap_config;
1683
1684 map = gen_raw_regmap(test, &config, &data);
1685 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1686 if (IS_ERR(map))
1687 return;
1688
1689 val_len = array_size(sizeof(*rval), config.max_register + 1);
1690 rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
1691 KUNIT_ASSERT_TRUE(test, rval != NULL);
1692 if (!rval)
1693 return;
1694
1695 /* Check that we can read the defaults via the API */
1696 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1697 for (i = 0; i < config.max_register + 1; i++) {
1698 def = config.reg_defaults[i].def;
1699 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1700 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
1701 } else {
1702 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
1703 }
1704 }
1705}
1706
1707static void raw_write_read_single(struct kunit *test)
1708{
1709 struct regmap *map;
1710 struct regmap_config config;
1711 struct regmap_ram_data *data;
1712 u16 val;
1713 unsigned int rval;
1714
1715 config = raw_regmap_config;
1716
1717 map = gen_raw_regmap(test, &config, &data);
1718 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1719 if (IS_ERR(map))
1720 return;
1721
1722 get_random_bytes(&val, sizeof(val));
1723
1724 /* If we write a value to a register we can read it back */
1725 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1726 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1727 KUNIT_EXPECT_EQ(test, val, rval);
1728}
1729
1730static void raw_write(struct kunit *test)
1731{
1732 struct regmap *map;
1733 struct regmap_config config;
1734 struct regmap_ram_data *data;
1735 u16 *hw_buf;
1736 u16 val[2];
1737 unsigned int rval;
1738 int i;
1739
1740 config = raw_regmap_config;
1741
1742 map = gen_raw_regmap(test, &config, &data);
1743 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1744 if (IS_ERR(map))
1745 return;
1746
1747 hw_buf = (u16 *)data->vals;
1748
1749 get_random_bytes(&val, sizeof(val));
1750
1751 /* Do a raw write */
1752 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1753
1754 /* We should read back the new values, and defaults for the rest */
1755 for (i = 0; i < config.max_register + 1; i++) {
1756 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1757
1758 switch (i) {
1759 case 2:
1760 case 3:
1761 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1762 KUNIT_EXPECT_EQ(test, rval,
1763 be16_to_cpu((__force __be16)val[i % 2]));
1764 } else {
1765 KUNIT_EXPECT_EQ(test, rval,
1766 le16_to_cpu((__force __le16)val[i % 2]));
1767 }
1768 break;
1769 default:
1770 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1771 break;
1772 }
1773 }
1774
1775 /* The values should appear in the "hardware" */
1776 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1777}
1778
1779static bool reg_zero(struct device *dev, unsigned int reg)
1780{
1781 return reg == 0;
1782}
1783
1784static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1785{
1786 return reg == 0;
1787}
1788
1789static void raw_noinc_write(struct kunit *test)
1790{
1791 struct regmap *map;
1792 struct regmap_config config;
1793 struct regmap_ram_data *data;
1794 unsigned int val;
1795 u16 val_test, val_last;
1796 u16 val_array[BLOCK_TEST_SIZE];
1797
1798 config = raw_regmap_config;
1799 config.volatile_reg = reg_zero;
1800 config.writeable_noinc_reg = reg_zero;
1801 config.readable_noinc_reg = reg_zero;
1802
1803 map = gen_raw_regmap(test, &config, &data);
1804 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1805 if (IS_ERR(map))
1806 return;
1807
1808 data->noinc_reg = ram_reg_zero;
1809
1810 get_random_bytes(&val_array, sizeof(val_array));
1811
1812 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1813 val_test = be16_to_cpu(val_array[1]) + 100;
1814 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1815 } else {
1816 val_test = le16_to_cpu(val_array[1]) + 100;
1817 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1818 }
1819
1820 /* Put some data into the register following the noinc register */
1821 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1822
1823 /* Write some data to the noinc register */
1824 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1825 sizeof(val_array)));
1826
1827 /* We should read back the last value written */
1828 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1829 KUNIT_ASSERT_EQ(test, val_last, val);
1830
1831 /* Make sure we didn't touch the register after the noinc register */
1832 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1833 KUNIT_ASSERT_EQ(test, val_test, val);
1834}
1835
1836static void raw_sync(struct kunit *test)
1837{
1838 struct regmap *map;
1839 struct regmap_config config;
1840 struct regmap_ram_data *data;
1841 u16 val[3];
1842 u16 *hw_buf;
1843 unsigned int rval;
1844 int i;
1845
1846 config = raw_regmap_config;
1847
1848 map = gen_raw_regmap(test, &config, &data);
1849 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1850 if (IS_ERR(map))
1851 return;
1852
1853 hw_buf = (u16 *)data->vals;
1854
1855 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
1856
1857 /* Do a regular write and a raw write in cache only mode */
1858 regcache_cache_only(map, true);
1859 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1860 sizeof(u16) * 2));
1861 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
1862
1863 /* We should read back the new values, and defaults for the rest */
1864 for (i = 0; i < config.max_register + 1; i++) {
1865 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1866
1867 switch (i) {
1868 case 2:
1869 case 3:
1870 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1871 KUNIT_EXPECT_EQ(test, rval,
1872 be16_to_cpu((__force __be16)val[i - 2]));
1873 } else {
1874 KUNIT_EXPECT_EQ(test, rval,
1875 le16_to_cpu((__force __le16)val[i - 2]));
1876 }
1877 break;
1878 case 4:
1879 KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1880 break;
1881 default:
1882 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1883 break;
1884 }
1885 }
1886
1887 /*
1888 * The value written via _write() was translated by the core,
1889 * translate the original copy for comparison purposes.
1890 */
1891 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1892 val[2] = cpu_to_be16(val[2]);
1893 else
1894 val[2] = cpu_to_le16(val[2]);
1895
1896 /* The values should not appear in the "hardware" */
1897 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
1898
1899 for (i = 0; i < config.max_register + 1; i++)
1900 data->written[i] = false;
1901
1902 /* Do the sync */
1903 regcache_cache_only(map, false);
1904 regcache_mark_dirty(map);
1905 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1906
1907 /* The values should now appear in the "hardware" */
1908 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
1909}
1910
1911static void raw_ranges(struct kunit *test)
1912{
1913 struct regmap *map;
1914 struct regmap_config config;
1915 struct regmap_ram_data *data;
1916 unsigned int val;
1917 int i;
1918
1919 config = raw_regmap_config;
1920 config.volatile_reg = test_range_all_volatile;
1921 config.ranges = &test_range;
1922 config.num_ranges = 1;
1923 config.max_register = test_range.range_max;
1924
1925 map = gen_raw_regmap(test, &config, &data);
1926 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1927 if (IS_ERR(map))
1928 return;
1929
1930 /* Reset the page to a non-zero value to trigger a change */
1931 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
1932 test_range.range_max));
1933
1934 /* Check we set the page and use the window for writes */
1935 data->written[test_range.selector_reg] = false;
1936 data->written[test_range.window_start] = false;
1937 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1938 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1939 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1940
1941 data->written[test_range.selector_reg] = false;
1942 data->written[test_range.window_start] = false;
1943 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
1944 test_range.range_min +
1945 test_range.window_len,
1946 0));
1947 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1948 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1949
1950 /* Same for reads */
1951 data->written[test_range.selector_reg] = false;
1952 data->read[test_range.window_start] = false;
1953 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
1954 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1955 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1956
1957 data->written[test_range.selector_reg] = false;
1958 data->read[test_range.window_start] = false;
1959 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
1960 test_range.range_min +
1961 test_range.window_len,
1962 &val));
1963 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1964 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1965
1966 /* No physical access triggered in the virtual range */
1967 for (i = test_range.range_min; i < test_range.range_max; i++) {
1968 KUNIT_EXPECT_FALSE(test, data->read[i]);
1969 KUNIT_EXPECT_FALSE(test, data->written[i]);
1970 }
1971}
1972
1973static struct kunit_case regmap_test_cases[] = {
1974 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1975 KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
1976 KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
1977 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1978 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1979 KUNIT_CASE_PARAM(multi_write, regcache_types_gen_params),
1980 KUNIT_CASE_PARAM(multi_read, regcache_types_gen_params),
1981 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1982 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1983 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1984 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1985 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1986 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1987 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1988 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1989 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1990 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
1991 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
1992 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
1993 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
1994 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1995 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1996 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1997 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
1998 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
1999 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
2000 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
2001 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
2002 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
2003
2004 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
2005 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
2006 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
2007 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
2008 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
2009 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
2010 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
2011 {}
2012};
2013
2014static int regmap_test_init(struct kunit *test)
2015{
2016 struct regmap_test_priv *priv;
2017 struct device *dev;
2018
2019 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
2020 if (!priv)
2021 return -ENOMEM;
2022
2023 test->priv = priv;
2024
2025 dev = kunit_device_register(test, "regmap_test");
2026 if (IS_ERR(dev))
2027 return PTR_ERR(dev);
2028
2029 priv->dev = get_device(dev);
2030 dev_set_drvdata(dev, test);
2031
2032 return 0;
2033}
2034
2035static void regmap_test_exit(struct kunit *test)
2036{
2037 struct regmap_test_priv *priv = test->priv;
2038
2039 /* Destroy the dummy struct device */
2040 if (priv && priv->dev)
2041 put_device(priv->dev);
2042}
2043
2044static struct kunit_suite regmap_test_suite = {
2045 .name = "regmap",
2046 .init = regmap_test_init,
2047 .exit = regmap_test_exit,
2048 .test_cases = regmap_test_cases,
2049};
2050kunit_test_suite(regmap_test_suite);
2051
2052MODULE_DESCRIPTION("Regmap KUnit tests");
2053MODULE_LICENSE("GPL v2");