xref: /linux/drivers/base/regmap/regmap-kunit.c (revision 021bc4b9)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap KUnit tests
4 //
5 // Copyright 2023 Arm Ltd
6 
7 #include <kunit/test.h>
8 #include "internal.h"
9 
10 #define BLOCK_TEST_SIZE 12
11 
12 static const struct regmap_config test_regmap_config = {
13 	.max_register = BLOCK_TEST_SIZE,
14 	.reg_stride = 1,
15 	.val_bits = sizeof(unsigned int) * 8,
16 };
17 
18 struct regcache_types {
19 	enum regcache_type type;
20 	const char *name;
21 };
22 
23 static void case_to_desc(const struct regcache_types *t, char *desc)
24 {
25 	strcpy(desc, t->name);
26 }
27 
28 static const struct regcache_types regcache_types_list[] = {
29 	{ REGCACHE_NONE, "none" },
30 	{ REGCACHE_FLAT, "flat" },
31 	{ REGCACHE_RBTREE, "rbtree" },
32 	{ REGCACHE_MAPLE, "maple" },
33 };
34 
35 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
36 
37 static const struct regcache_types real_cache_types_list[] = {
38 	{ REGCACHE_FLAT, "flat" },
39 	{ REGCACHE_RBTREE, "rbtree" },
40 	{ REGCACHE_MAPLE, "maple" },
41 };
42 
43 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
44 
45 static const struct regcache_types sparse_cache_types_list[] = {
46 	{ REGCACHE_RBTREE, "rbtree" },
47 	{ REGCACHE_MAPLE, "maple" },
48 };
49 
50 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
51 
52 static struct regmap *gen_regmap(struct regmap_config *config,
53 				 struct regmap_ram_data **data)
54 {
55 	unsigned int *buf;
56 	struct regmap *ret;
57 	size_t size = (config->max_register + 1) * sizeof(unsigned int);
58 	int i;
59 	struct reg_default *defaults;
60 
61 	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
62 					config->cache_type == REGCACHE_MAPLE;
63 
64 	buf = kmalloc(size, GFP_KERNEL);
65 	if (!buf)
66 		return ERR_PTR(-ENOMEM);
67 
68 	get_random_bytes(buf, size);
69 
70 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
71 	if (!(*data))
72 		return ERR_PTR(-ENOMEM);
73 	(*data)->vals = buf;
74 
75 	if (config->num_reg_defaults) {
76 		defaults = kcalloc(config->num_reg_defaults,
77 				   sizeof(struct reg_default),
78 				   GFP_KERNEL);
79 		if (!defaults)
80 			return ERR_PTR(-ENOMEM);
81 		config->reg_defaults = defaults;
82 
83 		for (i = 0; i < config->num_reg_defaults; i++) {
84 			defaults[i].reg = i * config->reg_stride;
85 			defaults[i].def = buf[i * config->reg_stride];
86 		}
87 	}
88 
89 	ret = regmap_init_ram(config, *data);
90 	if (IS_ERR(ret)) {
91 		kfree(buf);
92 		kfree(*data);
93 	}
94 
95 	return ret;
96 }
97 
98 static bool reg_5_false(struct device *context, unsigned int reg)
99 {
100 	return reg != 5;
101 }
102 
103 static void basic_read_write(struct kunit *test)
104 {
105 	struct regcache_types *t = (struct regcache_types *)test->param_value;
106 	struct regmap *map;
107 	struct regmap_config config;
108 	struct regmap_ram_data *data;
109 	unsigned int val, rval;
110 
111 	config = test_regmap_config;
112 	config.cache_type = t->type;
113 
114 	map = gen_regmap(&config, &data);
115 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
116 	if (IS_ERR(map))
117 		return;
118 
119 	get_random_bytes(&val, sizeof(val));
120 
121 	/* If we write a value to a register we can read it back */
122 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
123 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
124 	KUNIT_EXPECT_EQ(test, val, rval);
125 
126 	/* If using a cache the cache satisfied the read */
127 	KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
128 
129 	regmap_exit(map);
130 }
131 
132 static void bulk_write(struct kunit *test)
133 {
134 	struct regcache_types *t = (struct regcache_types *)test->param_value;
135 	struct regmap *map;
136 	struct regmap_config config;
137 	struct regmap_ram_data *data;
138 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
139 	int i;
140 
141 	config = test_regmap_config;
142 	config.cache_type = t->type;
143 
144 	map = gen_regmap(&config, &data);
145 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
146 	if (IS_ERR(map))
147 		return;
148 
149 	get_random_bytes(&val, sizeof(val));
150 
151 	/*
152 	 * Data written via the bulk API can be read back with single
153 	 * reads.
154 	 */
155 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
156 						   BLOCK_TEST_SIZE));
157 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
158 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
159 
160 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
161 
162 	/* If using a cache the cache satisfied the read */
163 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
164 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
165 
166 	regmap_exit(map);
167 }
168 
169 static void bulk_read(struct kunit *test)
170 {
171 	struct regcache_types *t = (struct regcache_types *)test->param_value;
172 	struct regmap *map;
173 	struct regmap_config config;
174 	struct regmap_ram_data *data;
175 	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
176 	int i;
177 
178 	config = test_regmap_config;
179 	config.cache_type = t->type;
180 
181 	map = gen_regmap(&config, &data);
182 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
183 	if (IS_ERR(map))
184 		return;
185 
186 	get_random_bytes(&val, sizeof(val));
187 
188 	/* Data written as single writes can be read via the bulk API */
189 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
190 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
191 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
192 						  BLOCK_TEST_SIZE));
193 	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
194 
195 	/* If using a cache the cache satisfied the read */
196 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
197 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
198 
199 	regmap_exit(map);
200 }
201 
202 static void write_readonly(struct kunit *test)
203 {
204 	struct regcache_types *t = (struct regcache_types *)test->param_value;
205 	struct regmap *map;
206 	struct regmap_config config;
207 	struct regmap_ram_data *data;
208 	unsigned int val;
209 	int i;
210 
211 	config = test_regmap_config;
212 	config.cache_type = t->type;
213 	config.num_reg_defaults = BLOCK_TEST_SIZE;
214 	config.writeable_reg = reg_5_false;
215 
216 	map = gen_regmap(&config, &data);
217 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
218 	if (IS_ERR(map))
219 		return;
220 
221 	get_random_bytes(&val, sizeof(val));
222 
223 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
224 		data->written[i] = false;
225 
226 	/* Change the value of all registers, readonly should fail */
227 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
228 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
229 
230 	/* Did that match what we see on the device? */
231 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
232 		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
233 
234 	regmap_exit(map);
235 }
236 
237 static void read_writeonly(struct kunit *test)
238 {
239 	struct regcache_types *t = (struct regcache_types *)test->param_value;
240 	struct regmap *map;
241 	struct regmap_config config;
242 	struct regmap_ram_data *data;
243 	unsigned int val;
244 	int i;
245 
246 	config = test_regmap_config;
247 	config.cache_type = t->type;
248 	config.readable_reg = reg_5_false;
249 
250 	map = gen_regmap(&config, &data);
251 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
252 	if (IS_ERR(map))
253 		return;
254 
255 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
256 		data->read[i] = false;
257 
258 	/*
259 	 * Try to read all the registers, the writeonly one should
260 	 * fail if we aren't using the flat cache.
261 	 */
262 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
263 		if (t->type != REGCACHE_FLAT) {
264 			KUNIT_EXPECT_EQ(test, i != 5,
265 					regmap_read(map, i, &val) == 0);
266 		} else {
267 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
268 		}
269 	}
270 
271 	/* Did we trigger a hardware access? */
272 	KUNIT_EXPECT_FALSE(test, data->read[5]);
273 
274 	regmap_exit(map);
275 }
276 
277 static void reg_defaults(struct kunit *test)
278 {
279 	struct regcache_types *t = (struct regcache_types *)test->param_value;
280 	struct regmap *map;
281 	struct regmap_config config;
282 	struct regmap_ram_data *data;
283 	unsigned int rval[BLOCK_TEST_SIZE];
284 	int i;
285 
286 	config = test_regmap_config;
287 	config.cache_type = t->type;
288 	config.num_reg_defaults = BLOCK_TEST_SIZE;
289 
290 	map = gen_regmap(&config, &data);
291 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
292 	if (IS_ERR(map))
293 		return;
294 
295 	/* Read back the expected default data */
296 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
297 						  BLOCK_TEST_SIZE));
298 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
299 
300 	/* The data should have been read from cache if there was one */
301 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
302 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
303 }
304 
305 static void reg_defaults_read_dev(struct kunit *test)
306 {
307 	struct regcache_types *t = (struct regcache_types *)test->param_value;
308 	struct regmap *map;
309 	struct regmap_config config;
310 	struct regmap_ram_data *data;
311 	unsigned int rval[BLOCK_TEST_SIZE];
312 	int i;
313 
314 	config = test_regmap_config;
315 	config.cache_type = t->type;
316 	config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
317 
318 	map = gen_regmap(&config, &data);
319 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
320 	if (IS_ERR(map))
321 		return;
322 
323 	/* We should have read the cache defaults back from the map */
324 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
325 		KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
326 		data->read[i] = false;
327 	}
328 
329 	/* Read back the expected default data */
330 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
331 						  BLOCK_TEST_SIZE));
332 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
333 
334 	/* The data should have been read from cache if there was one */
335 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
336 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
337 }
338 
339 static void register_patch(struct kunit *test)
340 {
341 	struct regcache_types *t = (struct regcache_types *)test->param_value;
342 	struct regmap *map;
343 	struct regmap_config config;
344 	struct regmap_ram_data *data;
345 	struct reg_sequence patch[2];
346 	unsigned int rval[BLOCK_TEST_SIZE];
347 	int i;
348 
349 	/* We need defaults so readback works */
350 	config = test_regmap_config;
351 	config.cache_type = t->type;
352 	config.num_reg_defaults = BLOCK_TEST_SIZE;
353 
354 	map = gen_regmap(&config, &data);
355 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
356 	if (IS_ERR(map))
357 		return;
358 
359 	/* Stash the original values */
360 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
361 						  BLOCK_TEST_SIZE));
362 
363 	/* Patch a couple of values */
364 	patch[0].reg = 2;
365 	patch[0].def = rval[2] + 1;
366 	patch[0].delay_us = 0;
367 	patch[1].reg = 5;
368 	patch[1].def = rval[5] + 1;
369 	patch[1].delay_us = 0;
370 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
371 						       ARRAY_SIZE(patch)));
372 
373 	/* Only the patched registers are written */
374 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
375 		switch (i) {
376 		case 2:
377 		case 5:
378 			KUNIT_EXPECT_TRUE(test, data->written[i]);
379 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
380 			break;
381 		default:
382 			KUNIT_EXPECT_FALSE(test, data->written[i]);
383 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
384 			break;
385 		}
386 	}
387 
388 	regmap_exit(map);
389 }
390 
391 static void stride(struct kunit *test)
392 {
393 	struct regcache_types *t = (struct regcache_types *)test->param_value;
394 	struct regmap *map;
395 	struct regmap_config config;
396 	struct regmap_ram_data *data;
397 	unsigned int rval;
398 	int i;
399 
400 	config = test_regmap_config;
401 	config.cache_type = t->type;
402 	config.reg_stride = 2;
403 	config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
404 
405 	map = gen_regmap(&config, &data);
406 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
407 	if (IS_ERR(map))
408 		return;
409 
410 	/* Only even registers can be accessed, try both read and write */
411 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
412 		data->read[i] = false;
413 		data->written[i] = false;
414 
415 		if (i % 2) {
416 			KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
417 			KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
418 			KUNIT_EXPECT_FALSE(test, data->read[i]);
419 			KUNIT_EXPECT_FALSE(test, data->written[i]);
420 		} else {
421 			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
422 			KUNIT_EXPECT_EQ(test, data->vals[i], rval);
423 			KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
424 					data->read[i]);
425 
426 			KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
427 			KUNIT_EXPECT_TRUE(test, data->written[i]);
428 		}
429 	}
430 
431 	regmap_exit(map);
432 }
433 
434 static struct regmap_range_cfg test_range = {
435 	.selector_reg = 1,
436 	.selector_mask = 0xff,
437 
438 	.window_start = 4,
439 	.window_len = 10,
440 
441 	.range_min = 20,
442 	.range_max = 40,
443 };
444 
445 static bool test_range_window_volatile(struct device *dev, unsigned int reg)
446 {
447 	if (reg >= test_range.window_start &&
448 	    reg <= test_range.window_start + test_range.window_len)
449 		return true;
450 
451 	return false;
452 }
453 
454 static bool test_range_all_volatile(struct device *dev, unsigned int reg)
455 {
456 	if (test_range_window_volatile(dev, reg))
457 		return true;
458 
459 	if (reg >= test_range.range_min && reg <= test_range.range_max)
460 		return true;
461 
462 	return false;
463 }
464 
465 static void basic_ranges(struct kunit *test)
466 {
467 	struct regcache_types *t = (struct regcache_types *)test->param_value;
468 	struct regmap *map;
469 	struct regmap_config config;
470 	struct regmap_ram_data *data;
471 	unsigned int val;
472 	int i;
473 
474 	config = test_regmap_config;
475 	config.cache_type = t->type;
476 	config.volatile_reg = test_range_all_volatile;
477 	config.ranges = &test_range;
478 	config.num_ranges = 1;
479 	config.max_register = test_range.range_max;
480 
481 	map = gen_regmap(&config, &data);
482 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
483 	if (IS_ERR(map))
484 		return;
485 
486 	for (i = test_range.range_min; i < test_range.range_max; i++) {
487 		data->read[i] = false;
488 		data->written[i] = false;
489 	}
490 
491 	/* Reset the page to a non-zero value to trigger a change */
492 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
493 					      test_range.range_max));
494 
495 	/* Check we set the page and use the window for writes */
496 	data->written[test_range.selector_reg] = false;
497 	data->written[test_range.window_start] = false;
498 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
499 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
500 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
501 
502 	data->written[test_range.selector_reg] = false;
503 	data->written[test_range.window_start] = false;
504 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
505 					      test_range.range_min +
506 					      test_range.window_len,
507 					      0));
508 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
509 	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
510 
511 	/* Same for reads */
512 	data->written[test_range.selector_reg] = false;
513 	data->read[test_range.window_start] = false;
514 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
515 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
516 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
517 
518 	data->written[test_range.selector_reg] = false;
519 	data->read[test_range.window_start] = false;
520 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
521 					     test_range.range_min +
522 					     test_range.window_len,
523 					     &val));
524 	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
525 	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
526 
527 	/* No physical access triggered in the virtual range */
528 	for (i = test_range.range_min; i < test_range.range_max; i++) {
529 		KUNIT_EXPECT_FALSE(test, data->read[i]);
530 		KUNIT_EXPECT_FALSE(test, data->written[i]);
531 	}
532 
533 	regmap_exit(map);
534 }
535 
536 /* Try to stress dynamic creation of cache data structures */
537 static void stress_insert(struct kunit *test)
538 {
539 	struct regcache_types *t = (struct regcache_types *)test->param_value;
540 	struct regmap *map;
541 	struct regmap_config config;
542 	struct regmap_ram_data *data;
543 	unsigned int rval, *vals;
544 	size_t buf_sz;
545 	int i;
546 
547 	config = test_regmap_config;
548 	config.cache_type = t->type;
549 	config.max_register = 300;
550 
551 	map = gen_regmap(&config, &data);
552 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
553 	if (IS_ERR(map))
554 		return;
555 
556 	vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
557 			     GFP_KERNEL);
558 	KUNIT_ASSERT_FALSE(test, vals == NULL);
559 	buf_sz = sizeof(unsigned long) * config.max_register;
560 
561 	get_random_bytes(vals, buf_sz);
562 
563 	/* Write data into the map/cache in ever decreasing strides */
564 	for (i = 0; i < config.max_register; i += 100)
565 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
566 	for (i = 0; i < config.max_register; i += 50)
567 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
568 	for (i = 0; i < config.max_register; i += 25)
569 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
570 	for (i = 0; i < config.max_register; i += 10)
571 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
572 	for (i = 0; i < config.max_register; i += 5)
573 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
574 	for (i = 0; i < config.max_register; i += 3)
575 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
576 	for (i = 0; i < config.max_register; i += 2)
577 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
578 	for (i = 0; i < config.max_register; i++)
579 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
580 
581 	/* Do reads from the cache (if there is one) match? */
582 	for (i = 0; i < config.max_register; i ++) {
583 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
584 		KUNIT_EXPECT_EQ(test, rval, vals[i]);
585 		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
586 	}
587 
588 	regmap_exit(map);
589 }
590 
591 static void cache_bypass(struct kunit *test)
592 {
593 	struct regcache_types *t = (struct regcache_types *)test->param_value;
594 	struct regmap *map;
595 	struct regmap_config config;
596 	struct regmap_ram_data *data;
597 	unsigned int val, rval;
598 
599 	config = test_regmap_config;
600 	config.cache_type = t->type;
601 
602 	map = gen_regmap(&config, &data);
603 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
604 	if (IS_ERR(map))
605 		return;
606 
607 	get_random_bytes(&val, sizeof(val));
608 
609 	/* Ensure the cache has a value in it */
610 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
611 
612 	/* Bypass then write a different value */
613 	regcache_cache_bypass(map, true);
614 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
615 
616 	/* Read the bypassed value */
617 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
618 	KUNIT_EXPECT_EQ(test, val + 1, rval);
619 	KUNIT_EXPECT_EQ(test, data->vals[0], rval);
620 
621 	/* Disable bypass, the cache should still return the original value */
622 	regcache_cache_bypass(map, false);
623 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
624 	KUNIT_EXPECT_EQ(test, val, rval);
625 
626 	regmap_exit(map);
627 }
628 
629 static void cache_sync(struct kunit *test)
630 {
631 	struct regcache_types *t = (struct regcache_types *)test->param_value;
632 	struct regmap *map;
633 	struct regmap_config config;
634 	struct regmap_ram_data *data;
635 	unsigned int val[BLOCK_TEST_SIZE];
636 	int i;
637 
638 	config = test_regmap_config;
639 	config.cache_type = t->type;
640 
641 	map = gen_regmap(&config, &data);
642 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
643 	if (IS_ERR(map))
644 		return;
645 
646 	get_random_bytes(&val, sizeof(val));
647 
648 	/* Put some data into the cache */
649 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
650 						   BLOCK_TEST_SIZE));
651 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
652 		data->written[i] = false;
653 
654 	/* Trash the data on the device itself then resync */
655 	regcache_mark_dirty(map);
656 	memset(data->vals, 0, sizeof(val));
657 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
658 
659 	/* Did we just write the correct data out? */
660 	KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
661 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
662 		KUNIT_EXPECT_EQ(test, true, data->written[i]);
663 
664 	regmap_exit(map);
665 }
666 
667 static void cache_sync_defaults(struct kunit *test)
668 {
669 	struct regcache_types *t = (struct regcache_types *)test->param_value;
670 	struct regmap *map;
671 	struct regmap_config config;
672 	struct regmap_ram_data *data;
673 	unsigned int val;
674 	int i;
675 
676 	config = test_regmap_config;
677 	config.cache_type = t->type;
678 	config.num_reg_defaults = BLOCK_TEST_SIZE;
679 
680 	map = gen_regmap(&config, &data);
681 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
682 	if (IS_ERR(map))
683 		return;
684 
685 	get_random_bytes(&val, sizeof(val));
686 
687 	/* Change the value of one register */
688 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
689 
690 	/* Resync */
691 	regcache_mark_dirty(map);
692 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
693 		data->written[i] = false;
694 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
695 
696 	/* Did we just sync the one register we touched? */
697 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
698 		KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
699 
700 	regmap_exit(map);
701 }
702 
703 static void cache_sync_readonly(struct kunit *test)
704 {
705 	struct regcache_types *t = (struct regcache_types *)test->param_value;
706 	struct regmap *map;
707 	struct regmap_config config;
708 	struct regmap_ram_data *data;
709 	unsigned int val;
710 	int i;
711 
712 	config = test_regmap_config;
713 	config.cache_type = t->type;
714 	config.writeable_reg = reg_5_false;
715 
716 	map = gen_regmap(&config, &data);
717 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
718 	if (IS_ERR(map))
719 		return;
720 
721 	/* Read all registers to fill the cache */
722 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
723 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
724 
725 	/* Change the value of all registers, readonly should fail */
726 	get_random_bytes(&val, sizeof(val));
727 	regcache_cache_only(map, true);
728 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
729 		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
730 	regcache_cache_only(map, false);
731 
732 	/* Resync */
733 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
734 		data->written[i] = false;
735 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
736 
737 	/* Did that match what we see on the device? */
738 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
739 		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
740 
741 	regmap_exit(map);
742 }
743 
744 static void cache_sync_patch(struct kunit *test)
745 {
746 	struct regcache_types *t = (struct regcache_types *)test->param_value;
747 	struct regmap *map;
748 	struct regmap_config config;
749 	struct regmap_ram_data *data;
750 	struct reg_sequence patch[2];
751 	unsigned int rval[BLOCK_TEST_SIZE], val;
752 	int i;
753 
754 	/* We need defaults so readback works */
755 	config = test_regmap_config;
756 	config.cache_type = t->type;
757 	config.num_reg_defaults = BLOCK_TEST_SIZE;
758 
759 	map = gen_regmap(&config, &data);
760 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
761 	if (IS_ERR(map))
762 		return;
763 
764 	/* Stash the original values */
765 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
766 						  BLOCK_TEST_SIZE));
767 
768 	/* Patch a couple of values */
769 	patch[0].reg = 2;
770 	patch[0].def = rval[2] + 1;
771 	patch[0].delay_us = 0;
772 	patch[1].reg = 5;
773 	patch[1].def = rval[5] + 1;
774 	patch[1].delay_us = 0;
775 	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
776 						       ARRAY_SIZE(patch)));
777 
778 	/* Sync the cache */
779 	regcache_mark_dirty(map);
780 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
781 		data->written[i] = false;
782 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
783 
784 	/* The patch should be on the device but not in the cache */
785 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
786 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
787 		KUNIT_EXPECT_EQ(test, val, rval[i]);
788 
789 		switch (i) {
790 		case 2:
791 		case 5:
792 			KUNIT_EXPECT_EQ(test, true, data->written[i]);
793 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
794 			break;
795 		default:
796 			KUNIT_EXPECT_EQ(test, false, data->written[i]);
797 			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
798 			break;
799 		}
800 	}
801 
802 	regmap_exit(map);
803 }
804 
805 static void cache_drop(struct kunit *test)
806 {
807 	struct regcache_types *t = (struct regcache_types *)test->param_value;
808 	struct regmap *map;
809 	struct regmap_config config;
810 	struct regmap_ram_data *data;
811 	unsigned int rval[BLOCK_TEST_SIZE];
812 	int i;
813 
814 	config = test_regmap_config;
815 	config.cache_type = t->type;
816 	config.num_reg_defaults = BLOCK_TEST_SIZE;
817 
818 	map = gen_regmap(&config, &data);
819 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
820 	if (IS_ERR(map))
821 		return;
822 
823 	/* Ensure the data is read from the cache */
824 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
825 		data->read[i] = false;
826 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
827 						  BLOCK_TEST_SIZE));
828 	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
829 		KUNIT_EXPECT_FALSE(test, data->read[i]);
830 		data->read[i] = false;
831 	}
832 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
833 
834 	/* Drop some registers */
835 	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
836 
837 	/* Reread and check only the dropped registers hit the device. */
838 	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
839 						  BLOCK_TEST_SIZE));
840 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
841 		KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
842 	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
843 
844 	regmap_exit(map);
845 }
846 
847 static void cache_present(struct kunit *test)
848 {
849 	struct regcache_types *t = (struct regcache_types *)test->param_value;
850 	struct regmap *map;
851 	struct regmap_config config;
852 	struct regmap_ram_data *data;
853 	unsigned int val;
854 	int i;
855 
856 	config = test_regmap_config;
857 	config.cache_type = t->type;
858 
859 	map = gen_regmap(&config, &data);
860 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
861 	if (IS_ERR(map))
862 		return;
863 
864 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
865 		data->read[i] = false;
866 
867 	/* No defaults so no registers cached. */
868 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
869 		KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
870 
871 	/* We didn't trigger any reads */
872 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
873 		KUNIT_ASSERT_FALSE(test, data->read[i]);
874 
875 	/* Fill the cache */
876 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
877 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
878 
879 	/* Now everything should be cached */
880 	for (i = 0; i < BLOCK_TEST_SIZE; i++)
881 		KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
882 
883 	regmap_exit(map);
884 }
885 
886 /* Check that caching the window register works with sync */
887 static void cache_range_window_reg(struct kunit *test)
888 {
889 	struct regcache_types *t = (struct regcache_types *)test->param_value;
890 	struct regmap *map;
891 	struct regmap_config config;
892 	struct regmap_ram_data *data;
893 	unsigned int val;
894 	int i;
895 
896 	config = test_regmap_config;
897 	config.cache_type = t->type;
898 	config.volatile_reg = test_range_window_volatile;
899 	config.ranges = &test_range;
900 	config.num_ranges = 1;
901 	config.max_register = test_range.range_max;
902 
903 	map = gen_regmap(&config, &data);
904 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
905 	if (IS_ERR(map))
906 		return;
907 
908 	/* Write new values to the entire range */
909 	for (i = test_range.range_min; i <= test_range.range_max; i++)
910 		KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
911 
912 	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
913 	KUNIT_ASSERT_EQ(test, val, 2);
914 
915 	/* Write to the first register in the range to reset the page */
916 	KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
917 	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
918 	KUNIT_ASSERT_EQ(test, val, 0);
919 
920 	/* Trigger a cache sync */
921 	regcache_mark_dirty(map);
922 	KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
923 
924 	/* Write to the first register again, the page should be reset */
925 	KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
926 	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
927 	KUNIT_ASSERT_EQ(test, val, 0);
928 
929 	/* Trigger another cache sync */
930 	regcache_mark_dirty(map);
931 	KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
932 
933 	/* Write to the last register again, the page should be reset */
934 	KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
935 	val = data->vals[test_range.selector_reg] & test_range.selector_mask;
936 	KUNIT_ASSERT_EQ(test, val, 2);
937 }
938 
939 struct raw_test_types {
940 	const char *name;
941 
942 	enum regcache_type cache_type;
943 	enum regmap_endian val_endian;
944 };
945 
946 static void raw_to_desc(const struct raw_test_types *t, char *desc)
947 {
948 	strcpy(desc, t->name);
949 }
950 
951 static const struct raw_test_types raw_types_list[] = {
952 	{ "none-little",   REGCACHE_NONE,   REGMAP_ENDIAN_LITTLE },
953 	{ "none-big",      REGCACHE_NONE,   REGMAP_ENDIAN_BIG },
954 	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
955 	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
956 	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
957 	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
958 	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
959 	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
960 };
961 
962 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
963 
964 static const struct raw_test_types raw_cache_types_list[] = {
965 	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
966 	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
967 	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
968 	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
969 	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
970 	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
971 };
972 
973 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
974 
975 static const struct regmap_config raw_regmap_config = {
976 	.max_register = BLOCK_TEST_SIZE,
977 
978 	.reg_format_endian = REGMAP_ENDIAN_LITTLE,
979 	.reg_bits = 16,
980 	.val_bits = 16,
981 };
982 
983 static struct regmap *gen_raw_regmap(struct regmap_config *config,
984 				     struct raw_test_types *test_type,
985 				     struct regmap_ram_data **data)
986 {
987 	u16 *buf;
988 	struct regmap *ret;
989 	size_t size = (config->max_register + 1) * config->reg_bits / 8;
990 	int i;
991 	struct reg_default *defaults;
992 
993 	config->cache_type = test_type->cache_type;
994 	config->val_format_endian = test_type->val_endian;
995 	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
996 					config->cache_type == REGCACHE_MAPLE;
997 
998 	buf = kmalloc(size, GFP_KERNEL);
999 	if (!buf)
1000 		return ERR_PTR(-ENOMEM);
1001 
1002 	get_random_bytes(buf, size);
1003 
1004 	*data = kzalloc(sizeof(**data), GFP_KERNEL);
1005 	if (!(*data))
1006 		return ERR_PTR(-ENOMEM);
1007 	(*data)->vals = (void *)buf;
1008 
1009 	config->num_reg_defaults = config->max_register + 1;
1010 	defaults = kcalloc(config->num_reg_defaults,
1011 			   sizeof(struct reg_default),
1012 			   GFP_KERNEL);
1013 	if (!defaults)
1014 		return ERR_PTR(-ENOMEM);
1015 	config->reg_defaults = defaults;
1016 
1017 	for (i = 0; i < config->num_reg_defaults; i++) {
1018 		defaults[i].reg = i;
1019 		switch (test_type->val_endian) {
1020 		case REGMAP_ENDIAN_LITTLE:
1021 			defaults[i].def = le16_to_cpu(buf[i]);
1022 			break;
1023 		case REGMAP_ENDIAN_BIG:
1024 			defaults[i].def = be16_to_cpu(buf[i]);
1025 			break;
1026 		default:
1027 			return ERR_PTR(-EINVAL);
1028 		}
1029 	}
1030 
1031 	/*
1032 	 * We use the defaults in the tests but they don't make sense
1033 	 * to the core if there's no cache.
1034 	 */
1035 	if (config->cache_type == REGCACHE_NONE)
1036 		config->num_reg_defaults = 0;
1037 
1038 	ret = regmap_init_raw_ram(config, *data);
1039 	if (IS_ERR(ret)) {
1040 		kfree(buf);
1041 		kfree(*data);
1042 	}
1043 
1044 	return ret;
1045 }
1046 
1047 static void raw_read_defaults_single(struct kunit *test)
1048 {
1049 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1050 	struct regmap *map;
1051 	struct regmap_config config;
1052 	struct regmap_ram_data *data;
1053 	unsigned int rval;
1054 	int i;
1055 
1056 	config = raw_regmap_config;
1057 
1058 	map = gen_raw_regmap(&config, t, &data);
1059 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1060 	if (IS_ERR(map))
1061 		return;
1062 
1063 	/* Check that we can read the defaults via the API */
1064 	for (i = 0; i < config.max_register + 1; i++) {
1065 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1066 		KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1067 	}
1068 
1069 	regmap_exit(map);
1070 }
1071 
1072 static void raw_read_defaults(struct kunit *test)
1073 {
1074 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1075 	struct regmap *map;
1076 	struct regmap_config config;
1077 	struct regmap_ram_data *data;
1078 	u16 *rval;
1079 	u16 def;
1080 	size_t val_len;
1081 	int i;
1082 
1083 	config = raw_regmap_config;
1084 
1085 	map = gen_raw_regmap(&config, t, &data);
1086 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1087 	if (IS_ERR(map))
1088 		return;
1089 
1090 	val_len = sizeof(*rval) * (config.max_register + 1);
1091 	rval = kmalloc(val_len, GFP_KERNEL);
1092 	KUNIT_ASSERT_TRUE(test, rval != NULL);
1093 	if (!rval)
1094 		return;
1095 
1096 	/* Check that we can read the defaults via the API */
1097 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1098 	for (i = 0; i < config.max_register + 1; i++) {
1099 		def = config.reg_defaults[i].def;
1100 		if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1101 			KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
1102 		} else {
1103 			KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
1104 		}
1105 	}
1106 
1107 	kfree(rval);
1108 	regmap_exit(map);
1109 }
1110 
1111 static void raw_write_read_single(struct kunit *test)
1112 {
1113 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1114 	struct regmap *map;
1115 	struct regmap_config config;
1116 	struct regmap_ram_data *data;
1117 	u16 val;
1118 	unsigned int rval;
1119 
1120 	config = raw_regmap_config;
1121 
1122 	map = gen_raw_regmap(&config, t, &data);
1123 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1124 	if (IS_ERR(map))
1125 		return;
1126 
1127 	get_random_bytes(&val, sizeof(val));
1128 
1129 	/* If we write a value to a register we can read it back */
1130 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1131 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1132 	KUNIT_EXPECT_EQ(test, val, rval);
1133 
1134 	regmap_exit(map);
1135 }
1136 
1137 static void raw_write(struct kunit *test)
1138 {
1139 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1140 	struct regmap *map;
1141 	struct regmap_config config;
1142 	struct regmap_ram_data *data;
1143 	u16 *hw_buf;
1144 	u16 val[2];
1145 	unsigned int rval;
1146 	int i;
1147 
1148 	config = raw_regmap_config;
1149 
1150 	map = gen_raw_regmap(&config, t, &data);
1151 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1152 	if (IS_ERR(map))
1153 		return;
1154 
1155 	hw_buf = (u16 *)data->vals;
1156 
1157 	get_random_bytes(&val, sizeof(val));
1158 
1159 	/* Do a raw write */
1160 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1161 
1162 	/* We should read back the new values, and defaults for the rest */
1163 	for (i = 0; i < config.max_register + 1; i++) {
1164 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1165 
1166 		switch (i) {
1167 		case 2:
1168 		case 3:
1169 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1170 				KUNIT_EXPECT_EQ(test, rval,
1171 						be16_to_cpu(val[i % 2]));
1172 			} else {
1173 				KUNIT_EXPECT_EQ(test, rval,
1174 						le16_to_cpu(val[i % 2]));
1175 			}
1176 			break;
1177 		default:
1178 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1179 			break;
1180 		}
1181 	}
1182 
1183 	/* The values should appear in the "hardware" */
1184 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1185 
1186 	regmap_exit(map);
1187 }
1188 
1189 static bool reg_zero(struct device *dev, unsigned int reg)
1190 {
1191 	return reg == 0;
1192 }
1193 
1194 static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1195 {
1196 	return reg == 0;
1197 }
1198 
1199 static void raw_noinc_write(struct kunit *test)
1200 {
1201 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1202 	struct regmap *map;
1203 	struct regmap_config config;
1204 	struct regmap_ram_data *data;
1205 	unsigned int val, val_test, val_last;
1206 	u16 val_array[BLOCK_TEST_SIZE];
1207 
1208 	config = raw_regmap_config;
1209 	config.volatile_reg = reg_zero;
1210 	config.writeable_noinc_reg = reg_zero;
1211 	config.readable_noinc_reg = reg_zero;
1212 
1213 	map = gen_raw_regmap(&config, t, &data);
1214 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1215 	if (IS_ERR(map))
1216 		return;
1217 
1218 	data->noinc_reg = ram_reg_zero;
1219 
1220 	get_random_bytes(&val_array, sizeof(val_array));
1221 
1222 	if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1223 		val_test = be16_to_cpu(val_array[1]) + 100;
1224 		val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1225 	} else {
1226 		val_test = le16_to_cpu(val_array[1]) + 100;
1227 		val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1228 	}
1229 
1230 	/* Put some data into the register following the noinc register */
1231 	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1232 
1233 	/* Write some data to the noinc register */
1234 	KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1235 						    sizeof(val_array)));
1236 
1237 	/* We should read back the last value written */
1238 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1239 	KUNIT_ASSERT_EQ(test, val_last, val);
1240 
1241 	/* Make sure we didn't touch the register after the noinc register */
1242 	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1243 	KUNIT_ASSERT_EQ(test, val_test, val);
1244 
1245 	regmap_exit(map);
1246 }
1247 
1248 static void raw_sync(struct kunit *test)
1249 {
1250 	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1251 	struct regmap *map;
1252 	struct regmap_config config;
1253 	struct regmap_ram_data *data;
1254 	u16 val[2];
1255 	u16 *hw_buf;
1256 	unsigned int rval;
1257 	int i;
1258 
1259 	config = raw_regmap_config;
1260 
1261 	map = gen_raw_regmap(&config, t, &data);
1262 	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1263 	if (IS_ERR(map))
1264 		return;
1265 
1266 	hw_buf = (u16 *)data->vals;
1267 
1268 	get_random_bytes(&val, sizeof(val));
1269 
1270 	/* Do a regular write and a raw write in cache only mode */
1271 	regcache_cache_only(map, true);
1272 	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1273 	if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1274 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1275 						      be16_to_cpu(val[0])));
1276 	else
1277 		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1278 						      le16_to_cpu(val[0])));
1279 
1280 	/* We should read back the new values, and defaults for the rest */
1281 	for (i = 0; i < config.max_register + 1; i++) {
1282 		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1283 
1284 		switch (i) {
1285 		case 2:
1286 		case 3:
1287 		case 6:
1288 			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1289 				KUNIT_EXPECT_EQ(test, rval,
1290 						be16_to_cpu(val[i % 2]));
1291 			} else {
1292 				KUNIT_EXPECT_EQ(test, rval,
1293 						le16_to_cpu(val[i % 2]));
1294 			}
1295 			break;
1296 		default:
1297 			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1298 			break;
1299 		}
1300 	}
1301 
1302 	/* The values should not appear in the "hardware" */
1303 	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
1304 	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
1305 
1306 	for (i = 0; i < config.max_register + 1; i++)
1307 		data->written[i] = false;
1308 
1309 	/* Do the sync */
1310 	regcache_cache_only(map, false);
1311 	regcache_mark_dirty(map);
1312 	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1313 
1314 	/* The values should now appear in the "hardware" */
1315 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1316 	KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
1317 
1318 	regmap_exit(map);
1319 }
1320 
1321 static struct kunit_case regmap_test_cases[] = {
1322 	KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1323 	KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1324 	KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1325 	KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1326 	KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1327 	KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1328 	KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1329 	KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1330 	KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1331 	KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1332 	KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1333 	KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1334 	KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
1335 	KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
1336 	KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1337 	KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1338 	KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1339 	KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
1340 	KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
1341 
1342 	KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1343 	KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1344 	KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1345 	KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1346 	KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
1347 	KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1348 	{}
1349 };
1350 
1351 static struct kunit_suite regmap_test_suite = {
1352 	.name = "regmap",
1353 	.test_cases = regmap_test_cases,
1354 };
1355 kunit_test_suite(regmap_test_suite);
1356 
1357 MODULE_LICENSE("GPL v2");
1358