Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'regmap-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap

Pull regmap updates from Mark Brown:
"Other than a few cleanups the changes here are all in the KUnit tests,
Richard Fitzgerald sent some bug fixes during the v6.9 cycle and while
adding test coverage for the issues fixed did some fairly substantial
improvements, both cleaning up the framework and building out the
coverage"

* tag 'regmap-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap:
regmap: Reorder fields in 'struct regmap_config' to save some memory
regmap: kunit: Fix an NULL vs IS_ERR() check
regmap: spi: Add missing MODULE_DESCRIPTION()
regmap: Drop capitalisation in MODULE_DESCRIPTION()
regmap: kunit: Add test cases for regmap_read_bypassed()
regmap: kunit: Add cache-drop test with multiple cache blocks
regmap: kunit: Replace a kmalloc/kfree() pair with KUnit-managed alloc
regmap: kunit: Use a KUnit action to call regmap_exit()
regmap: kunit: Add more cache-sync tests
regmap: kunit: Add more cache-drop tests
regmap: kunit: Run non-sparse cache tests at non-zero register addresses
regmap: kunit: Run sparse cache tests at non-zero register addresses
regmap: kunit: Introduce struct for test case parameters
regmap: kunit: Create a struct device for the regmap
regmap: kunit: Fix warnings of implicit casts to __le16 and __be16
regmap: maple: Remove second semicolon

+802 -289
+8 -6
drivers/base/regmap/internal.h
··· 326 326 * Create a test register map with data stored in RAM, not intended 327 327 * for practical use. 328 328 */ 329 - struct regmap *__regmap_init_ram(const struct regmap_config *config, 329 + struct regmap *__regmap_init_ram(struct device *dev, 330 + const struct regmap_config *config, 330 331 struct regmap_ram_data *data, 331 332 struct lock_class_key *lock_key, 332 333 const char *lock_name); 333 334 334 - #define regmap_init_ram(config, data) \ 335 - __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data) 335 + #define regmap_init_ram(dev, config, data) \ 336 + __regmap_lockdep_wrapper(__regmap_init_ram, #dev, dev, config, data) 336 337 337 - struct regmap *__regmap_init_raw_ram(const struct regmap_config *config, 338 + struct regmap *__regmap_init_raw_ram(struct device *dev, 339 + const struct regmap_config *config, 338 340 struct regmap_ram_data *data, 339 341 struct lock_class_key *lock_key, 340 342 const char *lock_name); 341 343 342 - #define regmap_init_raw_ram(config, data) \ 343 - __regmap_lockdep_wrapper(__regmap_init_raw_ram, #config, config, data) 344 + #define regmap_init_raw_ram(dev, config, data) \ 345 + __regmap_lockdep_wrapper(__regmap_init_raw_ram, #dev, dev, config, data) 344 346 345 347 #endif
+1 -1
drivers/base/regmap/regcache-maple.c
··· 294 294 { 295 295 struct maple_tree *mt = map->cache; 296 296 MA_STATE(mas, mt, 0, UINT_MAX); 297 - unsigned int *entry;; 297 + unsigned int *entry; 298 298 299 299 /* if we've already been called then just return */ 300 300 if (!mt)
+1 -1
drivers/base/regmap/regmap-i3c.c
··· 56 56 EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c); 57 57 58 58 MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>"); 59 - MODULE_DESCRIPTION("Regmap I3C Module"); 59 + MODULE_DESCRIPTION("regmap I3C Module"); 60 60 MODULE_LICENSE("GPL v2");
+751 -243
drivers/base/regmap/regmap-kunit.c
··· 4 4 // 5 5 // Copyright 2023 Arm Ltd 6 6 7 + #include <kunit/device.h> 8 + #include <kunit/resource.h> 7 9 #include <kunit/test.h> 8 10 #include "internal.h" 9 11 10 12 #define BLOCK_TEST_SIZE 12 13 + 14 + KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *); 15 + 16 + struct regmap_test_priv { 17 + struct device *dev; 18 + }; 19 + 20 + struct regmap_test_param { 21 + enum regcache_type cache; 22 + enum regmap_endian val_endian; 23 + 24 + unsigned int from_reg; 25 + }; 11 26 12 27 static void get_changed_bytes(void *orig, void *new, size_t size) 13 28 { ··· 42 27 } 43 28 44 29 static const struct regmap_config test_regmap_config = { 45 - .max_register = BLOCK_TEST_SIZE, 46 30 .reg_stride = 1, 47 31 .val_bits = sizeof(unsigned int) * 8, 48 32 }; 49 33 50 - struct regcache_types { 51 - enum regcache_type type; 52 - const char *name; 53 - }; 54 - 55 - static void case_to_desc(const struct regcache_types *t, char *desc) 34 + static const char *regcache_type_name(enum regcache_type type) 56 35 { 57 - strcpy(desc, t->name); 36 + switch (type) { 37 + case REGCACHE_NONE: 38 + return "none"; 39 + case REGCACHE_FLAT: 40 + return "flat"; 41 + case REGCACHE_RBTREE: 42 + return "rbtree"; 43 + case REGCACHE_MAPLE: 44 + return "maple"; 45 + default: 46 + return NULL; 47 + } 58 48 } 59 49 60 - static const struct regcache_types regcache_types_list[] = { 61 - { REGCACHE_NONE, "none" }, 62 - { REGCACHE_FLAT, "flat" }, 63 - { REGCACHE_RBTREE, "rbtree" }, 64 - { REGCACHE_MAPLE, "maple" }, 50 + static const char *regmap_endian_name(enum regmap_endian endian) 51 + { 52 + switch (endian) { 53 + case REGMAP_ENDIAN_BIG: 54 + return "big"; 55 + case REGMAP_ENDIAN_LITTLE: 56 + return "little"; 57 + case REGMAP_ENDIAN_DEFAULT: 58 + return "default"; 59 + case REGMAP_ENDIAN_NATIVE: 60 + return "native"; 61 + default: 62 + return NULL; 63 + } 64 + } 65 + 66 + static void param_to_desc(const struct regmap_test_param *param, char *desc) 67 + { 68 + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x", 69 + regcache_type_name(param->cache), 70 + regmap_endian_name(param->val_endian), 71 + param->from_reg); 72 + } 73 + 74 + static const struct regmap_test_param regcache_types_list[] = { 75 + { .cache = REGCACHE_NONE }, 76 + { .cache = REGCACHE_FLAT }, 77 + { .cache = REGCACHE_RBTREE }, 78 + { .cache = REGCACHE_MAPLE }, 65 79 }; 66 80 67 - KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc); 81 + KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc); 68 82 69 - static const struct regcache_types real_cache_types_list[] = { 70 - { REGCACHE_FLAT, "flat" }, 71 - { REGCACHE_RBTREE, "rbtree" }, 72 - { REGCACHE_MAPLE, "maple" }, 83 + static const struct regmap_test_param real_cache_types_only_list[] = { 84 + { .cache = REGCACHE_FLAT }, 85 + { .cache = REGCACHE_RBTREE }, 86 + { .cache = REGCACHE_MAPLE }, 73 87 }; 74 88 75 - KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc); 89 + KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc); 76 90 77 - static const struct regcache_types sparse_cache_types_list[] = { 78 - { REGCACHE_RBTREE, "rbtree" }, 79 - { REGCACHE_MAPLE, "maple" }, 91 + static const struct regmap_test_param real_cache_types_list[] = { 92 + { .cache = REGCACHE_FLAT, .from_reg = 0 }, 93 + { .cache = REGCACHE_FLAT, .from_reg = 0x2001 }, 94 + { .cache = REGCACHE_FLAT, .from_reg = 0x2002 }, 95 + { .cache = REGCACHE_FLAT, .from_reg = 0x2003 }, 96 + { .cache = REGCACHE_FLAT, .from_reg = 0x2004 }, 97 + { .cache = REGCACHE_RBTREE, .from_reg = 0 }, 98 + { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, 99 + { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, 100 + { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, 101 + { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, 102 + { .cache = REGCACHE_MAPLE, .from_reg = 0 }, 103 + { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, 104 + { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, 105 + { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, 106 + { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, 80 107 }; 81 108 82 - KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc); 109 + KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc); 83 110 84 - static struct regmap *gen_regmap(struct regmap_config *config, 111 + static const struct regmap_test_param sparse_cache_types_list[] = { 112 + { .cache = REGCACHE_RBTREE, .from_reg = 0 }, 113 + { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 }, 114 + { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 }, 115 + { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 }, 116 + { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 }, 117 + { .cache = REGCACHE_MAPLE, .from_reg = 0 }, 118 + { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 }, 119 + { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 }, 120 + { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 }, 121 + { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 }, 122 + }; 123 + 124 + KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc); 125 + 126 + static struct regmap *gen_regmap(struct kunit *test, 127 + struct regmap_config *config, 85 128 struct regmap_ram_data **data) 86 129 { 130 + const struct regmap_test_param *param = test->param_value; 131 + struct regmap_test_priv *priv = test->priv; 87 132 unsigned int *buf; 88 133 struct regmap *ret; 89 - size_t size = (config->max_register + 1) * sizeof(unsigned int); 134 + size_t size; 90 135 int i; 91 136 struct reg_default *defaults; 92 137 138 + config->cache_type = param->cache; 93 139 config->disable_locking = config->cache_type == REGCACHE_RBTREE || 94 140 config->cache_type == REGCACHE_MAPLE; 95 141 142 + if (config->max_register == 0) { 143 + config->max_register = param->from_reg; 144 + if (config->num_reg_defaults) 145 + config->max_register += (config->num_reg_defaults - 1) * 146 + config->reg_stride; 147 + else 148 + config->max_register += (BLOCK_TEST_SIZE * config->reg_stride); 149 + } 150 + 151 + size = (config->max_register + 1) * sizeof(unsigned int); 96 152 buf = kmalloc(size, GFP_KERNEL); 97 153 if (!buf) 98 154 return ERR_PTR(-ENOMEM); ··· 184 98 config->reg_defaults = defaults; 185 99 186 100 for (i = 0; i < config->num_reg_defaults; i++) { 187 - defaults[i].reg = i * config->reg_stride; 188 - defaults[i].def = buf[i * config->reg_stride]; 101 + defaults[i].reg = param->from_reg + (i * config->reg_stride); 102 + defaults[i].def = buf[param->from_reg + (i * config->reg_stride)]; 189 103 } 190 104 } 191 105 192 - ret = regmap_init_ram(config, *data); 106 + ret = regmap_init_ram(priv->dev, config, *data); 193 107 if (IS_ERR(ret)) { 194 108 kfree(buf); 195 109 kfree(*data); 110 + } else { 111 + kunit_add_action(test, regmap_exit_action, ret); 196 112 } 197 113 198 114 return ret; 199 115 } 200 116 201 - static bool reg_5_false(struct device *context, unsigned int reg) 117 + static bool reg_5_false(struct device *dev, unsigned int reg) 202 118 { 203 - return reg != 5; 119 + struct kunit *test = dev_get_drvdata(dev); 120 + const struct regmap_test_param *param = test->param_value; 121 + 122 + return reg != (param->from_reg + 5); 204 123 } 205 124 206 125 static void basic_read_write(struct kunit *test) 207 126 { 208 - struct regcache_types *t = (struct regcache_types *)test->param_value; 209 127 struct regmap *map; 210 128 struct regmap_config config; 211 129 struct regmap_ram_data *data; 212 130 unsigned int val, rval; 213 131 214 132 config = test_regmap_config; 215 - config.cache_type = t->type; 216 133 217 - map = gen_regmap(&config, &data); 134 + map = gen_regmap(test, &config, &data); 218 135 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 219 136 if (IS_ERR(map)) 220 137 return; ··· 230 141 KUNIT_EXPECT_EQ(test, val, rval); 231 142 232 143 /* If using a cache the cache satisfied the read */ 233 - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]); 234 - 235 - regmap_exit(map); 144 + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]); 236 145 } 237 146 238 147 static void bulk_write(struct kunit *test) 239 148 { 240 - struct regcache_types *t = (struct regcache_types *)test->param_value; 241 149 struct regmap *map; 242 150 struct regmap_config config; 243 151 struct regmap_ram_data *data; ··· 242 156 int i; 243 157 244 158 config = test_regmap_config; 245 - config.cache_type = t->type; 246 159 247 - map = gen_regmap(&config, &data); 160 + map = gen_regmap(test, &config, &data); 248 161 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 249 162 if (IS_ERR(map)) 250 163 return; ··· 263 178 264 179 /* If using a cache the cache satisfied the read */ 265 180 for (i = 0; i < BLOCK_TEST_SIZE; i++) 266 - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); 267 - 268 - regmap_exit(map); 181 + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 269 182 } 270 183 271 184 static void bulk_read(struct kunit *test) 272 185 { 273 - struct regcache_types *t = (struct regcache_types *)test->param_value; 274 186 struct regmap *map; 275 187 struct regmap_config config; 276 188 struct regmap_ram_data *data; ··· 275 193 int i; 276 194 277 195 config = test_regmap_config; 278 - config.cache_type = t->type; 279 196 280 - map = gen_regmap(&config, &data); 197 + map = gen_regmap(test, &config, &data); 281 198 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 282 199 if (IS_ERR(map)) 283 200 return; ··· 292 211 293 212 /* If using a cache the cache satisfied the read */ 294 213 for (i = 0; i < BLOCK_TEST_SIZE; i++) 295 - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); 214 + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 215 + } 296 216 297 - regmap_exit(map); 217 + static void read_bypassed(struct kunit *test) 218 + { 219 + const struct regmap_test_param *param = test->param_value; 220 + struct regmap *map; 221 + struct regmap_config config; 222 + struct regmap_ram_data *data; 223 + unsigned int val[BLOCK_TEST_SIZE], rval; 224 + int i; 225 + 226 + config = test_regmap_config; 227 + 228 + map = gen_regmap(test, &config, &data); 229 + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 230 + if (IS_ERR(map)) 231 + return; 232 + 233 + KUNIT_EXPECT_FALSE(test, map->cache_bypass); 234 + 235 + get_random_bytes(&val, sizeof(val)); 236 + 237 + /* Write some test values */ 238 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); 239 + 240 + regcache_cache_only(map, true); 241 + 242 + /* 243 + * While in cache-only regmap_read_bypassed() should return the register 244 + * value and leave the map in cache-only. 245 + */ 246 + for (i = 0; i < ARRAY_SIZE(val); i++) { 247 + /* Put inverted bits in rval to prove we really read the value */ 248 + rval = ~val[i]; 249 + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); 250 + KUNIT_EXPECT_EQ(test, val[i], rval); 251 + 252 + rval = ~val[i]; 253 + KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 254 + KUNIT_EXPECT_EQ(test, val[i], rval); 255 + KUNIT_EXPECT_TRUE(test, map->cache_only); 256 + KUNIT_EXPECT_FALSE(test, map->cache_bypass); 257 + } 258 + 259 + /* 260 + * Change the underlying register values to prove it is returning 261 + * real values not cached values. 262 + */ 263 + for (i = 0; i < ARRAY_SIZE(val); i++) { 264 + val[i] = ~val[i]; 265 + data->vals[param->from_reg + i] = val[i]; 266 + } 267 + 268 + for (i = 0; i < ARRAY_SIZE(val); i++) { 269 + rval = ~val[i]; 270 + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval)); 271 + KUNIT_EXPECT_NE(test, val[i], rval); 272 + 273 + rval = ~val[i]; 274 + KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 275 + KUNIT_EXPECT_EQ(test, val[i], rval); 276 + KUNIT_EXPECT_TRUE(test, map->cache_only); 277 + KUNIT_EXPECT_FALSE(test, map->cache_bypass); 278 + } 279 + } 280 + 281 + static void read_bypassed_volatile(struct kunit *test) 282 + { 283 + const struct regmap_test_param *param = test->param_value; 284 + struct regmap *map; 285 + struct regmap_config config; 286 + struct regmap_ram_data *data; 287 + unsigned int val[BLOCK_TEST_SIZE], rval; 288 + int i; 289 + 290 + config = test_regmap_config; 291 + /* All registers except #5 volatile */ 292 + config.volatile_reg = reg_5_false; 293 + 294 + map = gen_regmap(test, &config, &data); 295 + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 296 + if (IS_ERR(map)) 297 + return; 298 + 299 + KUNIT_EXPECT_FALSE(test, map->cache_bypass); 300 + 301 + get_random_bytes(&val, sizeof(val)); 302 + 303 + /* Write some test values */ 304 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val))); 305 + 306 + regcache_cache_only(map, true); 307 + 308 + /* 309 + * While in cache-only regmap_read_bypassed() should return the register 310 + * value and leave the map in cache-only. 311 + */ 312 + for (i = 0; i < ARRAY_SIZE(val); i++) { 313 + /* Register #5 is non-volatile so should read from cache */ 314 + KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY, 315 + regmap_read(map, param->from_reg + i, &rval)); 316 + 317 + /* Put inverted bits in rval to prove we really read the value */ 318 + rval = ~val[i]; 319 + KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 320 + KUNIT_EXPECT_EQ(test, val[i], rval); 321 + KUNIT_EXPECT_TRUE(test, map->cache_only); 322 + KUNIT_EXPECT_FALSE(test, map->cache_bypass); 323 + } 324 + 325 + /* 326 + * Change the underlying register values to prove it is returning 327 + * real values not cached values. 328 + */ 329 + for (i = 0; i < ARRAY_SIZE(val); i++) { 330 + val[i] = ~val[i]; 331 + data->vals[param->from_reg + i] = val[i]; 332 + } 333 + 334 + for (i = 0; i < ARRAY_SIZE(val); i++) { 335 + if (i == 5) 336 + continue; 337 + 338 + rval = ~val[i]; 339 + KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval)); 340 + KUNIT_EXPECT_EQ(test, val[i], rval); 341 + KUNIT_EXPECT_TRUE(test, map->cache_only); 342 + KUNIT_EXPECT_FALSE(test, map->cache_bypass); 343 + } 298 344 } 299 345 300 346 static void write_readonly(struct kunit *test) 301 347 { 302 - struct regcache_types *t = (struct regcache_types *)test->param_value; 303 348 struct regmap *map; 304 349 struct regmap_config config; 305 350 struct regmap_ram_data *data; ··· 433 226 int i; 434 227 435 228 config = test_regmap_config; 436 - config.cache_type = t->type; 437 229 config.num_reg_defaults = BLOCK_TEST_SIZE; 438 230 config.writeable_reg = reg_5_false; 439 231 440 - map = gen_regmap(&config, &data); 232 + map = gen_regmap(test, &config, &data); 441 233 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 442 234 if (IS_ERR(map)) 443 235 return; ··· 453 247 /* Did that match what we see on the device? */ 454 248 for (i = 0; i < BLOCK_TEST_SIZE; i++) 455 249 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]); 456 - 457 - regmap_exit(map); 458 250 } 459 251 460 252 static void read_writeonly(struct kunit *test) 461 253 { 462 - struct regcache_types *t = (struct regcache_types *)test->param_value; 463 254 struct regmap *map; 464 255 struct regmap_config config; 465 256 struct regmap_ram_data *data; ··· 464 261 int i; 465 262 466 263 config = test_regmap_config; 467 - config.cache_type = t->type; 468 264 config.readable_reg = reg_5_false; 469 265 470 - map = gen_regmap(&config, &data); 266 + map = gen_regmap(test, &config, &data); 471 267 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 472 268 if (IS_ERR(map)) 473 269 return; ··· 479 277 * fail if we aren't using the flat cache. 480 278 */ 481 279 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 482 - if (t->type != REGCACHE_FLAT) { 280 + if (config.cache_type != REGCACHE_FLAT) { 483 281 KUNIT_EXPECT_EQ(test, i != 5, 484 282 regmap_read(map, i, &val) == 0); 485 283 } else { ··· 489 287 490 288 /* Did we trigger a hardware access? */ 491 289 KUNIT_EXPECT_FALSE(test, data->read[5]); 492 - 493 - regmap_exit(map); 494 290 } 495 291 496 292 static void reg_defaults(struct kunit *test) 497 293 { 498 - struct regcache_types *t = (struct regcache_types *)test->param_value; 499 294 struct regmap *map; 500 295 struct regmap_config config; 501 296 struct regmap_ram_data *data; ··· 500 301 int i; 501 302 502 303 config = test_regmap_config; 503 - config.cache_type = t->type; 504 304 config.num_reg_defaults = BLOCK_TEST_SIZE; 505 305 506 - map = gen_regmap(&config, &data); 306 + map = gen_regmap(test, &config, &data); 507 307 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 508 308 if (IS_ERR(map)) 509 309 return; ··· 514 316 515 317 /* The data should have been read from cache if there was one */ 516 318 for (i = 0; i < BLOCK_TEST_SIZE; i++) 517 - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); 319 + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 518 320 } 519 321 520 322 static void reg_defaults_read_dev(struct kunit *test) 521 323 { 522 - struct regcache_types *t = (struct regcache_types *)test->param_value; 523 324 struct regmap *map; 524 325 struct regmap_config config; 525 326 struct regmap_ram_data *data; ··· 526 329 int i; 527 330 528 331 config = test_regmap_config; 529 - config.cache_type = t->type; 530 332 config.num_reg_defaults_raw = BLOCK_TEST_SIZE; 531 333 532 - map = gen_regmap(&config, &data); 334 + map = gen_regmap(test, &config, &data); 533 335 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 534 336 if (IS_ERR(map)) 535 337 return; 536 338 537 339 /* We should have read the cache defaults back from the map */ 538 340 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 539 - KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]); 341 + KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]); 540 342 data->read[i] = false; 541 343 } 542 344 ··· 546 350 547 351 /* The data should have been read from cache if there was one */ 548 352 for (i = 0; i < BLOCK_TEST_SIZE; i++) 549 - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); 353 + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 550 354 } 551 355 552 356 static void register_patch(struct kunit *test) 553 357 { 554 - struct regcache_types *t = (struct regcache_types *)test->param_value; 555 358 struct regmap *map; 556 359 struct regmap_config config; 557 360 struct regmap_ram_data *data; ··· 560 365 561 366 /* We need defaults so readback works */ 562 367 config = test_regmap_config; 563 - config.cache_type = t->type; 564 368 config.num_reg_defaults = BLOCK_TEST_SIZE; 565 369 566 - map = gen_regmap(&config, &data); 370 + map = gen_regmap(test, &config, &data); 567 371 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 568 372 if (IS_ERR(map)) 569 373 return; ··· 595 401 break; 596 402 } 597 403 } 598 - 599 - regmap_exit(map); 600 404 } 601 405 602 406 static void stride(struct kunit *test) 603 407 { 604 - struct regcache_types *t = (struct regcache_types *)test->param_value; 605 408 struct regmap *map; 606 409 struct regmap_config config; 607 410 struct regmap_ram_data *data; ··· 606 415 int i; 607 416 608 417 config = test_regmap_config; 609 - config.cache_type = t->type; 610 418 config.reg_stride = 2; 611 419 config.num_reg_defaults = BLOCK_TEST_SIZE / 2; 612 420 613 - map = gen_regmap(&config, &data); 421 + map = gen_regmap(test, &config, &data); 614 422 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 615 423 if (IS_ERR(map)) 616 424 return; ··· 627 437 } else { 628 438 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 629 439 KUNIT_EXPECT_EQ(test, data->vals[i], rval); 630 - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, 440 + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, 631 441 data->read[i]); 632 442 633 443 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval)); 634 444 KUNIT_EXPECT_TRUE(test, data->written[i]); 635 445 } 636 446 } 637 - 638 - regmap_exit(map); 639 447 } 640 448 641 449 static struct regmap_range_cfg test_range = { ··· 669 481 670 482 static void basic_ranges(struct kunit *test) 671 483 { 672 - struct regcache_types *t = (struct regcache_types *)test->param_value; 673 484 struct regmap *map; 674 485 struct regmap_config config; 675 486 struct regmap_ram_data *data; ··· 676 489 int i; 677 490 678 491 config = test_regmap_config; 679 - config.cache_type = t->type; 680 492 config.volatile_reg = test_range_all_volatile; 681 493 config.ranges = &test_range; 682 494 config.num_ranges = 1; 683 495 config.max_register = test_range.range_max; 684 496 685 - map = gen_regmap(&config, &data); 497 + map = gen_regmap(test, &config, &data); 686 498 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 687 499 if (IS_ERR(map)) 688 500 return; ··· 732 546 KUNIT_EXPECT_FALSE(test, data->read[i]); 733 547 KUNIT_EXPECT_FALSE(test, data->written[i]); 734 548 } 735 - 736 - regmap_exit(map); 737 549 } 738 550 739 551 /* Try to stress dynamic creation of cache data structures */ 740 552 static void stress_insert(struct kunit *test) 741 553 { 742 - struct regcache_types *t = (struct regcache_types *)test->param_value; 743 554 struct regmap *map; 744 555 struct regmap_config config; 745 556 struct regmap_ram_data *data; ··· 745 562 int i; 746 563 747 564 config = test_regmap_config; 748 - config.cache_type = t->type; 749 565 config.max_register = 300; 750 566 751 - map = gen_regmap(&config, &data); 567 + map = gen_regmap(test, &config, &data); 752 568 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 753 569 if (IS_ERR(map)) 754 570 return; ··· 781 599 for (i = 0; i < config.max_register; i ++) { 782 600 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 783 601 KUNIT_EXPECT_EQ(test, rval, vals[i]); 784 - KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]); 602 + KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]); 785 603 } 786 - 787 - regmap_exit(map); 788 604 } 789 605 790 606 static void cache_bypass(struct kunit *test) 791 607 { 792 - struct regcache_types *t = (struct regcache_types *)test->param_value; 608 + const struct regmap_test_param *param = test->param_value; 793 609 struct regmap *map; 794 610 struct regmap_config config; 795 611 struct regmap_ram_data *data; 796 612 unsigned int val, rval; 797 613 798 614 config = test_regmap_config; 799 - config.cache_type = t->type; 800 615 801 - map = gen_regmap(&config, &data); 616 + map = gen_regmap(test, &config, &data); 802 617 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 803 618 if (IS_ERR(map)) 804 619 return; ··· 803 624 get_random_bytes(&val, sizeof(val)); 804 625 805 626 /* Ensure the cache has a value in it */ 806 - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); 627 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val)); 807 628 808 629 /* Bypass then write a different value */ 809 630 regcache_cache_bypass(map, true); 810 - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1)); 631 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1)); 811 632 812 633 /* Read the bypassed value */ 813 - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); 634 + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); 814 635 KUNIT_EXPECT_EQ(test, val + 1, rval); 815 - KUNIT_EXPECT_EQ(test, data->vals[0], rval); 636 + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval); 816 637 817 638 /* Disable bypass, the cache should still return the original value */ 818 639 regcache_cache_bypass(map, false); 819 - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); 640 + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval)); 820 641 KUNIT_EXPECT_EQ(test, val, rval); 821 - 822 - regmap_exit(map); 823 642 } 824 643 825 - static void cache_sync(struct kunit *test) 644 + static void cache_sync_marked_dirty(struct kunit *test) 826 645 { 827 - struct regcache_types *t = (struct regcache_types *)test->param_value; 646 + const struct regmap_test_param *param = test->param_value; 828 647 struct regmap *map; 829 648 struct regmap_config config; 830 649 struct regmap_ram_data *data; ··· 830 653 int i; 831 654 832 655 config = test_regmap_config; 833 - config.cache_type = t->type; 834 656 835 - map = gen_regmap(&config, &data); 657 + map = gen_regmap(test, &config, &data); 836 658 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 837 659 if (IS_ERR(map)) 838 660 return; ··· 839 663 get_random_bytes(&val, sizeof(val)); 840 664 841 665 /* Put some data into the cache */ 842 - KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val, 666 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 843 667 BLOCK_TEST_SIZE)); 844 668 for (i = 0; i < BLOCK_TEST_SIZE; i++) 845 - data->written[i] = false; 669 + data->written[param->from_reg + i] = false; 846 670 847 671 /* Trash the data on the device itself then resync */ 848 672 regcache_mark_dirty(map); ··· 850 674 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 851 675 852 676 /* Did we just write the correct data out? */ 853 - KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val)); 677 + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 854 678 for (i = 0; i < BLOCK_TEST_SIZE; i++) 855 - KUNIT_EXPECT_EQ(test, true, data->written[i]); 856 - 857 - regmap_exit(map); 679 + KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); 858 680 } 859 681 860 - static void cache_sync_defaults(struct kunit *test) 682 + static void cache_sync_after_cache_only(struct kunit *test) 861 683 { 862 - struct regcache_types *t = (struct regcache_types *)test->param_value; 684 + const struct regmap_test_param *param = test->param_value; 685 + struct regmap *map; 686 + struct regmap_config config; 687 + struct regmap_ram_data *data; 688 + unsigned int val[BLOCK_TEST_SIZE]; 689 + unsigned int val_mask; 690 + int i; 691 + 692 + config = test_regmap_config; 693 + 694 + map = gen_regmap(test, &config, &data); 695 + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 696 + if (IS_ERR(map)) 697 + return; 698 + 699 + val_mask = GENMASK(config.val_bits - 1, 0); 700 + get_random_bytes(&val, sizeof(val)); 701 + 702 + /* Put some data into the cache */ 703 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 704 + BLOCK_TEST_SIZE)); 705 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 706 + data->written[param->from_reg + i] = false; 707 + 708 + /* Set cache-only and change the values */ 709 + regcache_cache_only(map, true); 710 + for (i = 0; i < ARRAY_SIZE(val); ++i) 711 + val[i] = ~val[i] & val_mask; 712 + 713 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, 714 + BLOCK_TEST_SIZE)); 715 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 716 + KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); 717 + 718 + KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 719 + 720 + /* Exit cache-only and sync the cache without marking hardware registers dirty */ 721 + regcache_cache_only(map, false); 722 + 723 + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 724 + 725 + /* Did we just write the correct data out? */ 726 + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val)); 727 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 728 + KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]); 729 + } 730 + 731 + static void cache_sync_defaults_marked_dirty(struct kunit *test) 732 + { 733 + const struct regmap_test_param *param = test->param_value; 863 734 struct regmap *map; 864 735 struct regmap_config config; 865 736 struct regmap_ram_data *data; ··· 914 691 int i; 915 692 916 693 config = test_regmap_config; 917 - config.cache_type = t->type; 918 694 config.num_reg_defaults = BLOCK_TEST_SIZE; 919 695 920 - map = gen_regmap(&config, &data); 696 + map = gen_regmap(test, &config, &data); 921 697 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 922 698 if (IS_ERR(map)) 923 699 return; ··· 924 702 get_random_bytes(&val, sizeof(val)); 925 703 926 704 /* Change the value of one register */ 927 - KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val)); 705 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val)); 928 706 929 707 /* Resync */ 930 708 regcache_mark_dirty(map); 931 709 for (i = 0; i < BLOCK_TEST_SIZE; i++) 932 - data->written[i] = false; 710 + data->written[param->from_reg + i] = false; 933 711 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 934 712 935 713 /* Did we just sync the one register we touched? */ 936 714 for (i = 0; i < BLOCK_TEST_SIZE; i++) 937 - KUNIT_EXPECT_EQ(test, i == 2, data->written[i]); 715 + KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]); 938 716 939 - regmap_exit(map); 717 + /* Rewrite registers back to their defaults */ 718 + for (i = 0; i < config.num_reg_defaults; ++i) 719 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg, 720 + config.reg_defaults[i].def)); 721 + 722 + /* 723 + * Resync after regcache_mark_dirty() should not write out registers 724 + * that are at default value 725 + */ 726 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 727 + data->written[param->from_reg + i] = false; 728 + regcache_mark_dirty(map); 729 + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 730 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 731 + KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]); 732 + } 733 + 734 + static void cache_sync_default_after_cache_only(struct kunit *test) 735 + { 736 + const struct regmap_test_param *param = test->param_value; 737 + struct regmap *map; 738 + struct regmap_config config; 739 + struct regmap_ram_data *data; 740 + unsigned int orig_val; 741 + int i; 742 + 743 + config = test_regmap_config; 744 + config.num_reg_defaults = BLOCK_TEST_SIZE; 745 + 746 + map = gen_regmap(test, &config, &data); 747 + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 748 + if (IS_ERR(map)) 749 + return; 750 + 751 + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val)); 752 + 753 + /* Enter cache-only and change the value of one register */ 754 + regcache_cache_only(map, true); 755 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1)); 756 + 757 + /* Exit cache-only and resync, should write out the changed register */ 758 + regcache_cache_only(map, false); 759 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 760 + data->written[param->from_reg + i] = false; 761 + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 762 + 763 + /* Was the register written out? */ 764 + KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); 765 + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1); 766 + 767 + /* Enter cache-only and write register back to its default value */ 768 + regcache_cache_only(map, true); 769 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val)); 770 + 771 + /* Resync should write out the new value */ 772 + regcache_cache_only(map, false); 773 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 774 + data->written[param->from_reg + i] = false; 775 + 776 + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 777 + KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]); 778 + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val); 940 779 } 941 780 942 781 static void cache_sync_readonly(struct kunit *test) 943 782 { 944 - struct regcache_types *t = (struct regcache_types *)test->param_value; 783 + const struct regmap_test_param *param = test->param_value; 945 784 struct regmap *map; 946 785 struct regmap_config config; 947 786 struct regmap_ram_data *data; ··· 1010 727 int i; 1011 728 1012 729 config = test_regmap_config; 1013 - config.cache_type = t->type; 1014 730 config.writeable_reg = reg_5_false; 1015 731 1016 - map = gen_regmap(&config, &data); 732 + map = gen_regmap(test, &config, &data); 1017 733 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1018 734 if (IS_ERR(map)) 1019 735 return; 1020 736 1021 737 /* Read all registers to fill the cache */ 1022 738 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1023 - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); 739 + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1024 740 1025 741 /* Change the value of all registers, readonly should fail */ 1026 742 get_random_bytes(&val, sizeof(val)); 1027 743 regcache_cache_only(map, true); 1028 744 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1029 - KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0); 745 + KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0); 1030 746 regcache_cache_only(map, false); 1031 747 1032 748 /* Resync */ 1033 749 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1034 - data->written[i] = false; 750 + data->written[param->from_reg + i] = false; 1035 751 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1036 752 1037 753 /* Did that match what we see on the device? */ 1038 754 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1039 - KUNIT_EXPECT_EQ(test, i != 5, data->written[i]); 1040 - 1041 - regmap_exit(map); 755 + KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]); 1042 756 } 1043 757 1044 758 static void cache_sync_patch(struct kunit *test) 1045 759 { 1046 - struct regcache_types *t = (struct regcache_types *)test->param_value; 760 + const struct regmap_test_param *param = test->param_value; 1047 761 struct regmap *map; 1048 762 struct regmap_config config; 1049 763 struct regmap_ram_data *data; ··· 1050 770 1051 771 /* We need defaults so readback works */ 1052 772 config = test_regmap_config; 1053 - config.cache_type = t->type; 1054 773 config.num_reg_defaults = BLOCK_TEST_SIZE; 1055 774 1056 - map = gen_regmap(&config, &data); 775 + map = gen_regmap(test, &config, &data); 1057 776 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1058 777 if (IS_ERR(map)) 1059 778 return; 1060 779 1061 780 /* Stash the original values */ 1062 - KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 781 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1063 782 BLOCK_TEST_SIZE)); 1064 783 1065 784 /* Patch a couple of values */ 1066 - patch[0].reg = 2; 785 + patch[0].reg = param->from_reg + 2; 1067 786 patch[0].def = rval[2] + 1; 1068 787 patch[0].delay_us = 0; 1069 - patch[1].reg = 5; 788 + patch[1].reg = param->from_reg + 5; 1070 789 patch[1].def = rval[5] + 1; 1071 790 patch[1].delay_us = 0; 1072 791 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch, ··· 1074 795 /* Sync the cache */ 1075 796 regcache_mark_dirty(map); 1076 797 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1077 - data->written[i] = false; 798 + data->written[param->from_reg + i] = false; 1078 799 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1079 800 1080 801 /* The patch should be on the device but not in the cache */ 1081 802 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 1082 - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); 803 + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1083 804 KUNIT_EXPECT_EQ(test, val, rval[i]); 1084 805 1085 806 switch (i) { 1086 807 case 2: 1087 808 case 5: 1088 - KUNIT_EXPECT_EQ(test, true, data->written[i]); 1089 - KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1); 809 + KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]); 810 + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1); 1090 811 break; 1091 812 default: 1092 - KUNIT_EXPECT_EQ(test, false, data->written[i]); 1093 - KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]); 813 + KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]); 814 + KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]); 1094 815 break; 1095 816 } 1096 817 } 1097 - 1098 - regmap_exit(map); 1099 818 } 1100 819 1101 820 static void cache_drop(struct kunit *test) 1102 821 { 1103 - struct regcache_types *t = (struct regcache_types *)test->param_value; 822 + const struct regmap_test_param *param = test->param_value; 1104 823 struct regmap *map; 1105 824 struct regmap_config config; 1106 825 struct regmap_ram_data *data; ··· 1106 829 int i; 1107 830 1108 831 config = test_regmap_config; 1109 - config.cache_type = t->type; 1110 832 config.num_reg_defaults = BLOCK_TEST_SIZE; 1111 833 1112 - map = gen_regmap(&config, &data); 834 + map = gen_regmap(test, &config, &data); 1113 835 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1114 836 if (IS_ERR(map)) 1115 837 return; 1116 838 1117 839 /* Ensure the data is read from the cache */ 1118 840 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1119 - data->read[i] = false; 1120 - KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 841 + data->read[param->from_reg + i] = false; 842 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1121 843 BLOCK_TEST_SIZE)); 1122 844 for (i = 0; i < BLOCK_TEST_SIZE; i++) { 1123 - KUNIT_EXPECT_FALSE(test, data->read[i]); 1124 - data->read[i] = false; 845 + KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]); 846 + data->read[param->from_reg + i] = false; 1125 847 } 1126 - KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); 848 + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1127 849 1128 850 /* Drop some registers */ 1129 - KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5)); 851 + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3, 852 + param->from_reg + 5)); 1130 853 1131 854 /* Reread and check only the dropped registers hit the device. */ 1132 - KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval, 855 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1133 856 BLOCK_TEST_SIZE)); 1134 857 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1135 - KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5); 1136 - KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval)); 858 + KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5); 859 + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 860 + } 1137 861 1138 - regmap_exit(map); 862 + static void cache_drop_with_non_contiguous_ranges(struct kunit *test) 863 + { 864 + const struct regmap_test_param *param = test->param_value; 865 + struct regmap *map; 866 + struct regmap_config config; 867 + struct regmap_ram_data *data; 868 + unsigned int val[4][BLOCK_TEST_SIZE]; 869 + unsigned int reg; 870 + const int num_ranges = ARRAY_SIZE(val) * 2; 871 + int rangeidx, i; 872 + 873 + static_assert(ARRAY_SIZE(val) == 4); 874 + 875 + config = test_regmap_config; 876 + config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE); 877 + 878 + map = gen_regmap(test, &config, &data); 879 + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 880 + if (IS_ERR(map)) 881 + return; 882 + 883 + for (i = 0; i < config.max_register + 1; i++) 884 + data->written[i] = false; 885 + 886 + /* Create non-contiguous cache blocks by writing every other range */ 887 + get_random_bytes(&val, sizeof(val)); 888 + for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { 889 + reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 890 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg, 891 + &val[rangeidx / 2], 892 + BLOCK_TEST_SIZE)); 893 + KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], 894 + &val[rangeidx / 2], sizeof(val[rangeidx / 2])); 895 + } 896 + 897 + /* Check that odd ranges weren't written */ 898 + for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { 899 + reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 900 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 901 + KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 902 + } 903 + 904 + /* Drop range 2 */ 905 + reg = param->from_reg + (2 * BLOCK_TEST_SIZE); 906 + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1)); 907 + 908 + /* Drop part of range 4 */ 909 + reg = param->from_reg + (4 * BLOCK_TEST_SIZE); 910 + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5)); 911 + 912 + /* Mark dirty and reset mock registers to 0 */ 913 + regcache_mark_dirty(map); 914 + for (i = 0; i < config.max_register + 1; i++) { 915 + data->vals[i] = 0; 916 + data->written[i] = false; 917 + } 918 + 919 + /* The registers that were dropped from range 4 should now remain at 0 */ 920 + val[4 / 2][3] = 0; 921 + val[4 / 2][4] = 0; 922 + val[4 / 2][5] = 0; 923 + 924 + /* Sync and check that the expected register ranges were written */ 925 + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 926 + 927 + /* Check that odd ranges weren't written */ 928 + for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) { 929 + reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 930 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 931 + KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 932 + } 933 + 934 + /* Check that even ranges (except 2 and 4) were written */ 935 + for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) { 936 + if ((rangeidx == 2) || (rangeidx == 4)) 937 + continue; 938 + 939 + reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE); 940 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 941 + KUNIT_EXPECT_TRUE(test, data->written[reg + i]); 942 + 943 + KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], 944 + &val[rangeidx / 2], sizeof(val[rangeidx / 2])); 945 + } 946 + 947 + /* Check that range 2 wasn't written */ 948 + reg = param->from_reg + (2 * BLOCK_TEST_SIZE); 949 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 950 + KUNIT_EXPECT_FALSE(test, data->written[reg + i]); 951 + 952 + /* Check that range 4 was partially written */ 953 + reg = param->from_reg + (4 * BLOCK_TEST_SIZE); 954 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 955 + KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5); 956 + 957 + KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2])); 958 + 959 + /* Nothing before param->from_reg should have been written */ 960 + for (i = 0; i < param->from_reg; i++) 961 + KUNIT_EXPECT_FALSE(test, data->written[i]); 962 + } 963 + 964 + static void cache_drop_all_and_sync_marked_dirty(struct kunit *test) 965 + { 966 + const struct regmap_test_param *param = test->param_value; 967 + struct regmap *map; 968 + struct regmap_config config; 969 + struct regmap_ram_data *data; 970 + unsigned int rval[BLOCK_TEST_SIZE]; 971 + int i; 972 + 973 + config = test_regmap_config; 974 + config.num_reg_defaults = BLOCK_TEST_SIZE; 975 + 976 + map = gen_regmap(test, &config, &data); 977 + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 978 + if (IS_ERR(map)) 979 + return; 980 + 981 + /* Ensure the data is read from the cache */ 982 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 983 + data->read[param->from_reg + i] = false; 984 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 985 + BLOCK_TEST_SIZE)); 986 + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 987 + 988 + /* Change all values in cache from defaults */ 989 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 990 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 991 + 992 + /* Drop all registers */ 993 + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 994 + 995 + /* Mark dirty and cache sync should not write anything. */ 996 + regcache_mark_dirty(map); 997 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 998 + data->written[param->from_reg + i] = false; 999 + 1000 + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1001 + for (i = 0; i <= config.max_register; i++) 1002 + KUNIT_EXPECT_FALSE(test, data->written[i]); 1003 + } 1004 + 1005 + static void cache_drop_all_and_sync_no_defaults(struct kunit *test) 1006 + { 1007 + const struct regmap_test_param *param = test->param_value; 1008 + struct regmap *map; 1009 + struct regmap_config config; 1010 + struct regmap_ram_data *data; 1011 + unsigned int rval[BLOCK_TEST_SIZE]; 1012 + int i; 1013 + 1014 + config = test_regmap_config; 1015 + 1016 + map = gen_regmap(test, &config, &data); 1017 + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1018 + if (IS_ERR(map)) 1019 + return; 1020 + 1021 + /* Ensure the data is read from the cache */ 1022 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 1023 + data->read[param->from_reg + i] = false; 1024 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1025 + BLOCK_TEST_SIZE)); 1026 + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1027 + 1028 + /* Change all values in cache */ 1029 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 1030 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1031 + 1032 + /* Drop all registers */ 1033 + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1034 + 1035 + /* 1036 + * Sync cache without marking it dirty. All registers were dropped 1037 + * so the cache should not have any entries to write out. 1038 + */ 1039 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 1040 + data->written[param->from_reg + i] = false; 1041 + 1042 + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1043 + for (i = 0; i <= config.max_register; i++) 1044 + KUNIT_EXPECT_FALSE(test, data->written[i]); 1045 + } 1046 + 1047 + static void cache_drop_all_and_sync_has_defaults(struct kunit *test) 1048 + { 1049 + const struct regmap_test_param *param = test->param_value; 1050 + struct regmap *map; 1051 + struct regmap_config config; 1052 + struct regmap_ram_data *data; 1053 + unsigned int rval[BLOCK_TEST_SIZE]; 1054 + int i; 1055 + 1056 + config = test_regmap_config; 1057 + config.num_reg_defaults = BLOCK_TEST_SIZE; 1058 + 1059 + map = gen_regmap(test, &config, &data); 1060 + KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1061 + if (IS_ERR(map)) 1062 + return; 1063 + 1064 + /* Ensure the data is read from the cache */ 1065 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 1066 + data->read[param->from_reg + i] = false; 1067 + KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval, 1068 + BLOCK_TEST_SIZE)); 1069 + KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval)); 1070 + 1071 + /* Change all values in cache from defaults */ 1072 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 1073 + KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1)); 1074 + 1075 + /* Drop all registers */ 1076 + KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register)); 1077 + 1078 + /* 1079 + * Sync cache without marking it dirty. All registers were dropped 1080 + * so the cache should not have any entries to write out. 1081 + */ 1082 + for (i = 0; i < BLOCK_TEST_SIZE; i++) 1083 + data->written[param->from_reg + i] = false; 1084 + 1085 + KUNIT_EXPECT_EQ(test, 0, regcache_sync(map)); 1086 + for (i = 0; i <= config.max_register; i++) 1087 + KUNIT_EXPECT_FALSE(test, data->written[i]); 1139 1088 } 1140 1089 1141 1090 static void cache_present(struct kunit *test) 1142 1091 { 1143 - struct regcache_types *t = (struct regcache_types *)test->param_value; 1092 + const struct regmap_test_param *param = test->param_value; 1144 1093 struct regmap *map; 1145 1094 struct regmap_config config; 1146 1095 struct regmap_ram_data *data; ··· 1374 871 int i; 1375 872 1376 873 config = test_regmap_config; 1377 - config.cache_type = t->type; 1378 874 1379 - map = gen_regmap(&config, &data); 875 + map = gen_regmap(test, &config, &data); 1380 876 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1381 877 if (IS_ERR(map)) 1382 878 return; 1383 879 1384 880 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1385 - data->read[i] = false; 881 + data->read[param->from_reg + i] = false; 1386 882 1387 883 /* No defaults so no registers cached. */ 1388 884 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1389 - KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i)); 885 + KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i)); 1390 886 1391 887 /* We didn't trigger any reads */ 1392 888 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1393 - KUNIT_ASSERT_FALSE(test, data->read[i]); 889 + KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]); 1394 890 1395 891 /* Fill the cache */ 1396 892 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1397 - KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val)); 893 + KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val)); 1398 894 1399 895 /* Now everything should be cached */ 1400 896 for (i = 0; i < BLOCK_TEST_SIZE; i++) 1401 - KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i)); 1402 - 1403 - regmap_exit(map); 897 + KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i)); 1404 898 } 1405 899 1406 900 /* Check that caching the window register works with sync */ 1407 901 static void cache_range_window_reg(struct kunit *test) 1408 902 { 1409 - struct regcache_types *t = (struct regcache_types *)test->param_value; 1410 903 struct regmap *map; 1411 904 struct regmap_config config; 1412 905 struct regmap_ram_data *data; ··· 1410 911 int i; 1411 912 1412 913 config = test_regmap_config; 1413 - config.cache_type = t->type; 1414 914 config.volatile_reg = test_range_window_volatile; 1415 915 config.ranges = &test_range; 1416 916 config.num_ranges = 1; 1417 917 config.max_register = test_range.range_max; 1418 918 1419 - map = gen_regmap(&config, &data); 919 + map = gen_regmap(test, &config, &data); 1420 920 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1421 921 if (IS_ERR(map)) 1422 922 return; ··· 1451 953 KUNIT_ASSERT_EQ(test, val, 2); 1452 954 } 1453 955 1454 - struct raw_test_types { 1455 - const char *name; 1456 - 1457 - enum regcache_type cache_type; 1458 - enum regmap_endian val_endian; 956 + static const struct regmap_test_param raw_types_list[] = { 957 + { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE }, 958 + { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG }, 959 + { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, 960 + { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, 961 + { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, 962 + { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, 963 + { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, 964 + { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, 1459 965 }; 1460 966 1461 - static void raw_to_desc(const struct raw_test_types *t, char *desc) 1462 - { 1463 - strcpy(desc, t->name); 1464 - } 967 + KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc); 1465 968 1466 - static const struct raw_test_types raw_types_list[] = { 1467 - { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE }, 1468 - { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG }, 1469 - { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE }, 1470 - { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG }, 1471 - { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE }, 1472 - { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG }, 1473 - { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE }, 1474 - { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG }, 969 + static const struct regmap_test_param raw_cache_types_list[] = { 970 + { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE }, 971 + { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG }, 972 + { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE }, 973 + { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG }, 974 + { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE }, 975 + { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG }, 1475 976 }; 1476 977 1477 - KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc); 1478 - 1479 - static const struct raw_test_types raw_cache_types_list[] = { 1480 - { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE }, 1481 - { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG }, 1482 - { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE }, 1483 - { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG }, 1484 - { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE }, 1485 - { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG }, 1486 - }; 1487 - 1488 - KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc); 978 + KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc); 1489 979 1490 980 static const struct regmap_config raw_regmap_config = { 1491 981 .max_register = BLOCK_TEST_SIZE, ··· 1483 997 .val_bits = 16, 1484 998 }; 1485 999 1486 - static struct regmap *gen_raw_regmap(struct regmap_config *config, 1487 - struct raw_test_types *test_type, 1000 + static struct regmap *gen_raw_regmap(struct kunit *test, 1001 + struct regmap_config *config, 1488 1002 struct regmap_ram_data **data) 1489 1003 { 1004 + struct regmap_test_priv *priv = test->priv; 1005 + const struct regmap_test_param *param = test->param_value; 1490 1006 u16 *buf; 1491 1007 struct regmap *ret; 1492 1008 size_t size = (config->max_register + 1) * config->reg_bits / 8; 1493 1009 int i; 1494 1010 struct reg_default *defaults; 1495 1011 1496 - config->cache_type = test_type->cache_type; 1497 - config->val_format_endian = test_type->val_endian; 1012 + config->cache_type = param->cache; 1013 + config->val_format_endian = param->val_endian; 1498 1014 config->disable_locking = config->cache_type == REGCACHE_RBTREE || 1499 1015 config->cache_type == REGCACHE_MAPLE; 1500 1016 ··· 1521 1033 1522 1034 for (i = 0; i < config->num_reg_defaults; i++) { 1523 1035 defaults[i].reg = i; 1524 - switch (test_type->val_endian) { 1036 + switch (param->val_endian) { 1525 1037 case REGMAP_ENDIAN_LITTLE: 1526 1038 defaults[i].def = le16_to_cpu(buf[i]); 1527 1039 break; ··· 1540 1052 if (config->cache_type == REGCACHE_NONE) 1541 1053 config->num_reg_defaults = 0; 1542 1054 1543 - ret = regmap_init_raw_ram(config, *data); 1055 + ret = regmap_init_raw_ram(priv->dev, config, *data); 1544 1056 if (IS_ERR(ret)) { 1545 1057 kfree(buf); 1546 1058 kfree(*data); 1059 + } else { 1060 + kunit_add_action(test, regmap_exit_action, ret); 1547 1061 } 1548 1062 1549 1063 return ret; ··· 1553 1063 1554 1064 static void raw_read_defaults_single(struct kunit *test) 1555 1065 { 1556 - struct raw_test_types *t = (struct raw_test_types *)test->param_value; 1557 1066 struct regmap *map; 1558 1067 struct regmap_config config; 1559 1068 struct regmap_ram_data *data; ··· 1561 1072 1562 1073 config = raw_regmap_config; 1563 1074 1564 - map = gen_raw_regmap(&config, t, &data); 1075 + map = gen_raw_regmap(test, &config, &data); 1565 1076 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1566 1077 if (IS_ERR(map)) 1567 1078 return; ··· 1571 1082 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval)); 1572 1083 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval); 1573 1084 } 1574 - 1575 - regmap_exit(map); 1576 1085 } 1577 1086 1578 1087 static void raw_read_defaults(struct kunit *test) 1579 1088 { 1580 - struct raw_test_types *t = (struct raw_test_types *)test->param_value; 1581 1089 struct regmap *map; 1582 1090 struct regmap_config config; 1583 1091 struct regmap_ram_data *data; ··· 1585 1099 1586 1100 config = raw_regmap_config; 1587 1101 1588 - map = gen_raw_regmap(&config, t, &data); 1102 + map = gen_raw_regmap(test, &config, &data); 1589 1103 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1590 1104 if (IS_ERR(map)) 1591 1105 return; 1592 1106 1593 1107 val_len = sizeof(*rval) * (config.max_register + 1); 1594 - rval = kmalloc(val_len, GFP_KERNEL); 1108 + rval = kunit_kmalloc(test, val_len, GFP_KERNEL); 1595 1109 KUNIT_ASSERT_TRUE(test, rval != NULL); 1596 1110 if (!rval) 1597 1111 return; 1598 - 1112 + 1599 1113 /* Check that we can read the defaults via the API */ 1600 1114 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len)); 1601 1115 for (i = 0; i < config.max_register + 1; i++) { 1602 1116 def = config.reg_defaults[i].def; 1603 1117 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1604 - KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i])); 1118 + KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i])); 1605 1119 } else { 1606 - KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i])); 1120 + KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i])); 1607 1121 } 1608 1122 } 1609 - 1610 - kfree(rval); 1611 - regmap_exit(map); 1612 1123 } 1613 1124 1614 1125 static void raw_write_read_single(struct kunit *test) 1615 1126 { 1616 - struct raw_test_types *t = (struct raw_test_types *)test->param_value; 1617 1127 struct regmap *map; 1618 1128 struct regmap_config config; 1619 1129 struct regmap_ram_data *data; ··· 1618 1136 1619 1137 config = raw_regmap_config; 1620 1138 1621 - map = gen_raw_regmap(&config, t, &data); 1139 + map = gen_raw_regmap(test, &config, &data); 1622 1140 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1623 1141 if (IS_ERR(map)) 1624 1142 return; ··· 1629 1147 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val)); 1630 1148 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval)); 1631 1149 KUNIT_EXPECT_EQ(test, val, rval); 1632 - 1633 - regmap_exit(map); 1634 1150 } 1635 1151 1636 1152 static void raw_write(struct kunit *test) 1637 1153 { 1638 - struct raw_test_types *t = (struct raw_test_types *)test->param_value; 1639 1154 struct regmap *map; 1640 1155 struct regmap_config config; 1641 1156 struct regmap_ram_data *data; ··· 1643 1164 1644 1165 config = raw_regmap_config; 1645 1166 1646 - map = gen_raw_regmap(&config, t, &data); 1167 + map = gen_raw_regmap(test, &config, &data); 1647 1168 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1648 1169 if (IS_ERR(map)) 1649 1170 return; ··· 1664 1185 case 3: 1665 1186 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1666 1187 KUNIT_EXPECT_EQ(test, rval, 1667 - be16_to_cpu(val[i % 2])); 1188 + be16_to_cpu((__force __be16)val[i % 2])); 1668 1189 } else { 1669 1190 KUNIT_EXPECT_EQ(test, rval, 1670 - le16_to_cpu(val[i % 2])); 1191 + le16_to_cpu((__force __le16)val[i % 2])); 1671 1192 } 1672 1193 break; 1673 1194 default: ··· 1678 1199 1679 1200 /* The values should appear in the "hardware" */ 1680 1201 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val)); 1681 - 1682 - regmap_exit(map); 1683 1202 } 1684 1203 1685 1204 static bool reg_zero(struct device *dev, unsigned int reg) ··· 1692 1215 1693 1216 static void raw_noinc_write(struct kunit *test) 1694 1217 { 1695 - struct raw_test_types *t = (struct raw_test_types *)test->param_value; 1696 1218 struct regmap *map; 1697 1219 struct regmap_config config; 1698 1220 struct regmap_ram_data *data; ··· 1704 1228 config.writeable_noinc_reg = reg_zero; 1705 1229 config.readable_noinc_reg = reg_zero; 1706 1230 1707 - map = gen_raw_regmap(&config, t, &data); 1231 + map = gen_raw_regmap(test, &config, &data); 1708 1232 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1709 1233 if (IS_ERR(map)) 1710 1234 return; ··· 1735 1259 /* Make sure we didn't touch the register after the noinc register */ 1736 1260 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val)); 1737 1261 KUNIT_ASSERT_EQ(test, val_test, val); 1738 - 1739 - regmap_exit(map); 1740 1262 } 1741 1263 1742 1264 static void raw_sync(struct kunit *test) 1743 1265 { 1744 - struct raw_test_types *t = (struct raw_test_types *)test->param_value; 1745 1266 struct regmap *map; 1746 1267 struct regmap_config config; 1747 1268 struct regmap_ram_data *data; ··· 1749 1276 1750 1277 config = raw_regmap_config; 1751 1278 1752 - map = gen_raw_regmap(&config, t, &data); 1279 + map = gen_raw_regmap(test, &config, &data); 1753 1280 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1754 1281 if (IS_ERR(map)) 1755 1282 return; ··· 1773 1300 case 3: 1774 1301 if (config.val_format_endian == REGMAP_ENDIAN_BIG) { 1775 1302 KUNIT_EXPECT_EQ(test, rval, 1776 - be16_to_cpu(val[i - 2])); 1303 + be16_to_cpu((__force __be16)val[i - 2])); 1777 1304 } else { 1778 1305 KUNIT_EXPECT_EQ(test, rval, 1779 - le16_to_cpu(val[i - 2])); 1306 + le16_to_cpu((__force __le16)val[i - 2])); 1780 1307 } 1781 1308 break; 1782 1309 case 4: ··· 1796 1323 val[2] = cpu_to_be16(val[2]); 1797 1324 else 1798 1325 val[2] = cpu_to_le16(val[2]); 1799 - 1326 + 1800 1327 /* The values should not appear in the "hardware" */ 1801 1328 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1802 1329 ··· 1810 1337 1811 1338 /* The values should now appear in the "hardware" */ 1812 1339 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val)); 1813 - 1814 - regmap_exit(map); 1815 1340 } 1816 1341 1817 1342 static void raw_ranges(struct kunit *test) 1818 1343 { 1819 - struct raw_test_types *t = (struct raw_test_types *)test->param_value; 1820 1344 struct regmap *map; 1821 1345 struct regmap_config config; 1822 1346 struct regmap_ram_data *data; ··· 1826 1356 config.num_ranges = 1; 1827 1357 config.max_register = test_range.range_max; 1828 1358 1829 - map = gen_raw_regmap(&config, t, &data); 1359 + map = gen_raw_regmap(test, &config, &data); 1830 1360 KUNIT_ASSERT_FALSE(test, IS_ERR(map)); 1831 1361 if (IS_ERR(map)) 1832 1362 return; ··· 1872 1402 KUNIT_EXPECT_FALSE(test, data->read[i]); 1873 1403 KUNIT_EXPECT_FALSE(test, data->written[i]); 1874 1404 } 1875 - 1876 - regmap_exit(map); 1877 1405 } 1878 1406 1879 1407 static struct kunit_case regmap_test_cases[] = { 1880 1408 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params), 1409 + KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params), 1410 + KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params), 1881 1411 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), 1882 1412 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params), 1883 1413 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params), ··· 1889 1419 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params), 1890 1420 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params), 1891 1421 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params), 1892 - KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params), 1893 - KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params), 1422 + KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params), 1423 + KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params), 1424 + KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params), 1425 + KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params), 1894 1426 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params), 1895 1427 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params), 1896 1428 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params), 1429 + KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params), 1430 + KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params), 1431 + KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params), 1432 + KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params), 1897 1433 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params), 1898 - KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params), 1434 + KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params), 1899 1435 1900 1436 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params), 1901 1437 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params), ··· 1913 1437 {} 1914 1438 }; 1915 1439 1440 + static int regmap_test_init(struct kunit *test) 1441 + { 1442 + struct regmap_test_priv *priv; 1443 + struct device *dev; 1444 + 1445 + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); 1446 + if (!priv) 1447 + return -ENOMEM; 1448 + 1449 + test->priv = priv; 1450 + 1451 + dev = kunit_device_register(test, "regmap_test"); 1452 + if (IS_ERR(dev)) 1453 + return PTR_ERR(dev); 1454 + 1455 + priv->dev = get_device(dev); 1456 + dev_set_drvdata(dev, test); 1457 + 1458 + return 0; 1459 + } 1460 + 1461 + static void regmap_test_exit(struct kunit *test) 1462 + { 1463 + struct regmap_test_priv *priv = test->priv; 1464 + 1465 + /* Destroy the dummy struct device */ 1466 + if (priv && priv->dev) 1467 + put_device(priv->dev); 1468 + } 1469 + 1916 1470 static struct kunit_suite regmap_test_suite = { 1917 1471 .name = "regmap", 1472 + .init = regmap_test_init, 1473 + .exit = regmap_test_exit, 1918 1474 .test_cases = regmap_test_cases, 1919 1475 }; 1920 1476 kunit_test_suite(regmap_test_suite);
+1 -1
drivers/base/regmap/regmap-mdio.c
··· 117 117 EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio); 118 118 119 119 MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>"); 120 - MODULE_DESCRIPTION("Regmap MDIO Module"); 120 + MODULE_DESCRIPTION("regmap MDIO Module"); 121 121 MODULE_LICENSE("GPL v2");
+3 -2
drivers/base/regmap/regmap-ram.c
··· 53 53 .free_context = regmap_ram_free_context, 54 54 }; 55 55 56 - struct regmap *__regmap_init_ram(const struct regmap_config *config, 56 + struct regmap *__regmap_init_ram(struct device *dev, 57 + const struct regmap_config *config, 57 58 struct regmap_ram_data *data, 58 59 struct lock_class_key *lock_key, 59 60 const char *lock_name) ··· 76 75 if (!data->written) 77 76 return ERR_PTR(-ENOMEM); 78 77 79 - map = __regmap_init(NULL, &regmap_ram, data, config, 78 + map = __regmap_init(dev, &regmap_ram, data, config, 80 79 lock_key, lock_name); 81 80 82 81 return map;
+3 -2
drivers/base/regmap/regmap-raw-ram.c
··· 107 107 .free_context = regmap_raw_ram_free_context, 108 108 }; 109 109 110 - struct regmap *__regmap_init_raw_ram(const struct regmap_config *config, 110 + struct regmap *__regmap_init_raw_ram(struct device *dev, 111 + const struct regmap_config *config, 111 112 struct regmap_ram_data *data, 112 113 struct lock_class_key *lock_key, 113 114 const char *lock_name) ··· 135 134 136 135 data->reg_endian = config->reg_format_endian; 137 136 138 - map = __regmap_init(NULL, &regmap_raw_ram, data, config, 137 + map = __regmap_init(dev, &regmap_raw_ram, data, config, 139 138 lock_key, lock_name); 140 139 141 140 return map;
+1 -1
drivers/base/regmap/regmap-sdw-mbq.c
··· 97 97 } 98 98 EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq); 99 99 100 - MODULE_DESCRIPTION("Regmap SoundWire MBQ Module"); 100 + MODULE_DESCRIPTION("regmap SoundWire MBQ Module"); 101 101 MODULE_LICENSE("GPL");
+1 -1
drivers/base/regmap/regmap-sdw.c
··· 98 98 } 99 99 EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw); 100 100 101 - MODULE_DESCRIPTION("Regmap SoundWire Module"); 101 + MODULE_DESCRIPTION("regmap SoundWire Module"); 102 102 MODULE_LICENSE("GPL v2");
+1
drivers/base/regmap/regmap-spi.c
··· 165 165 } 166 166 EXPORT_SYMBOL_GPL(__devm_regmap_init_spi); 167 167 168 + MODULE_DESCRIPTION("regmap SPI Module"); 168 169 MODULE_LICENSE("GPL");
+31 -31
include/linux/regmap.h
··· 297 297 * performed on such table (a register is no increment 298 298 * readable if it belongs to one of the ranges specified 299 299 * by rd_noinc_table). 300 - * @disable_locking: This regmap is either protected by external means or 301 - * is guaranteed not to be accessed from multiple threads. 302 - * Don't use any locking mechanisms. 303 - * @lock: Optional lock callback (overrides regmap's default lock 304 - * function, based on spinlock or mutex). 305 - * @unlock: As above for unlocking. 306 - * @lock_arg: this field is passed as the only argument of lock/unlock 307 - * functions (ignored in case regular lock/unlock functions 308 - * are not overridden). 309 300 * @reg_read: Optional callback that if filled will be used to perform 310 301 * all the reads from the registers. Should only be provided for 311 302 * devices whose read operation cannot be represented as a simple ··· 314 323 * @write: Same as above for writing. 315 324 * @max_raw_read: Max raw read size that can be used on the device. 316 325 * @max_raw_write: Max raw write size that can be used on the device. 326 + * @can_sleep: Optional, specifies whether regmap operations can sleep. 317 327 * @fast_io: Register IO is fast. Use a spinlock instead of a mutex 318 328 * to perform locking. This field is ignored if custom lock/unlock 319 329 * functions are used (see fields lock/unlock of struct regmap_config). ··· 323 331 * Use it only for "no-bus" cases. 324 332 * @io_port: Support IO port accessors. Makes sense only when MMIO vs. IO port 325 333 * access can be distinguished. 334 + * @disable_locking: This regmap is either protected by external means or 335 + * is guaranteed not to be accessed from multiple threads. 336 + * Don't use any locking mechanisms. 337 + * @lock: Optional lock callback (overrides regmap's default lock 338 + * function, based on spinlock or mutex). 339 + * @unlock: As above for unlocking. 340 + * @lock_arg: This field is passed as the only argument of lock/unlock 341 + * functions (ignored in case regular lock/unlock functions 342 + * are not overridden). 326 343 * @max_register: Optional, specifies the maximum valid register address. 327 344 * @max_register_is_0: Optional, specifies that zero value in @max_register 328 345 * should be taken into account. This is a workaround to ··· 374 373 * @reg_defaults_raw: Power on reset values for registers (for use with 375 374 * register cache support). 376 375 * @num_reg_defaults_raw: Number of elements in reg_defaults_raw. 377 - * @reg_format_endian: Endianness for formatted register addresses. If this is 378 - * DEFAULT, the @reg_format_endian_default value from the 379 - * regmap bus is used. 380 - * @val_format_endian: Endianness for formatted register values. If this is 381 - * DEFAULT, the @reg_format_endian_default value from the 382 - * regmap bus is used. 383 - * 384 - * @ranges: Array of configuration entries for virtual address ranges. 385 - * @num_ranges: Number of range configuration entries. 386 376 * @use_hwlock: Indicate if a hardware spinlock should be used. 387 377 * @use_raw_spinlock: Indicate if a raw spinlock should be used. 388 378 * @hwlock_id: Specify the hardware spinlock id. 389 379 * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, 390 380 * HWLOCK_IRQ or 0. 391 - * @can_sleep: Optional, specifies whether regmap operations can sleep. 381 + * @reg_format_endian: Endianness for formatted register addresses. If this is 382 + * DEFAULT, the @reg_format_endian_default value from the 383 + * regmap bus is used. 384 + * @val_format_endian: Endianness for formatted register values. If this is 385 + * DEFAULT, the @reg_format_endian_default value from the 386 + * regmap bus is used. 387 + * 388 + * @ranges: Array of configuration entries for virtual address ranges. 389 + * @num_ranges: Number of range configuration entries. 392 390 */ 393 391 struct regmap_config { 394 392 const char *name; ··· 406 406 bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg); 407 407 bool (*readable_noinc_reg)(struct device *dev, unsigned int reg); 408 408 409 - bool disable_locking; 410 - regmap_lock lock; 411 - regmap_unlock unlock; 412 - void *lock_arg; 413 - 414 409 int (*reg_read)(void *context, unsigned int reg, unsigned int *val); 415 410 int (*reg_write)(void *context, unsigned int reg, unsigned int val); 416 411 int (*reg_update_bits)(void *context, unsigned int reg, ··· 417 422 size_t max_raw_read; 418 423 size_t max_raw_write; 419 424 425 + bool can_sleep; 426 + 420 427 bool fast_io; 421 428 bool io_port; 429 + 430 + bool disable_locking; 431 + regmap_lock lock; 432 + regmap_unlock unlock; 433 + void *lock_arg; 422 434 423 435 unsigned int max_register; 424 436 bool max_register_is_0; ··· 450 448 bool use_relaxed_mmio; 451 449 bool can_multi_write; 452 450 453 - enum regmap_endian reg_format_endian; 454 - enum regmap_endian val_format_endian; 455 - 456 - const struct regmap_range_cfg *ranges; 457 - unsigned int num_ranges; 458 - 459 451 bool use_hwlock; 460 452 bool use_raw_spinlock; 461 453 unsigned int hwlock_id; 462 454 unsigned int hwlock_mode; 463 455 464 - bool can_sleep; 456 + enum regmap_endian reg_format_endian; 457 + enum regmap_endian val_format_endian; 458 + 459 + const struct regmap_range_cfg *ranges; 460 + unsigned int num_ranges; 465 461 }; 466 462 467 463 /**