Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kunit test for clk rate management
4 */
5#include <linux/clk.h>
6#include <linux/clk-provider.h>
7
8/* Needed for clk_hw_get_clk() */
9#include "clk.h"
10
11#include <kunit/test.h>
12
13#define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
14#define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000)
15#define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000)
16
17struct clk_dummy_context {
18 struct clk_hw hw;
19 unsigned long rate;
20};
21
22static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
23 unsigned long parent_rate)
24{
25 struct clk_dummy_context *ctx =
26 container_of(hw, struct clk_dummy_context, hw);
27
28 return ctx->rate;
29}
30
31static int clk_dummy_determine_rate(struct clk_hw *hw,
32 struct clk_rate_request *req)
33{
34 /* Just return the same rate without modifying it */
35 return 0;
36}
37
38static int clk_dummy_maximize_rate(struct clk_hw *hw,
39 struct clk_rate_request *req)
40{
41 /*
42 * If there's a maximum set, always run the clock at the maximum
43 * allowed.
44 */
45 if (req->max_rate < ULONG_MAX)
46 req->rate = req->max_rate;
47
48 return 0;
49}
50
51static int clk_dummy_minimize_rate(struct clk_hw *hw,
52 struct clk_rate_request *req)
53{
54 /*
55 * If there's a minimum set, always run the clock at the minimum
56 * allowed.
57 */
58 if (req->min_rate > 0)
59 req->rate = req->min_rate;
60
61 return 0;
62}
63
64static int clk_dummy_set_rate(struct clk_hw *hw,
65 unsigned long rate,
66 unsigned long parent_rate)
67{
68 struct clk_dummy_context *ctx =
69 container_of(hw, struct clk_dummy_context, hw);
70
71 ctx->rate = rate;
72 return 0;
73}
74
75static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
76{
77 if (index >= clk_hw_get_num_parents(hw))
78 return -EINVAL;
79
80 return 0;
81}
82
83static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
84{
85 return 0;
86}
87
88static const struct clk_ops clk_dummy_rate_ops = {
89 .recalc_rate = clk_dummy_recalc_rate,
90 .determine_rate = clk_dummy_determine_rate,
91 .set_rate = clk_dummy_set_rate,
92};
93
94static const struct clk_ops clk_dummy_maximize_rate_ops = {
95 .recalc_rate = clk_dummy_recalc_rate,
96 .determine_rate = clk_dummy_maximize_rate,
97 .set_rate = clk_dummy_set_rate,
98};
99
100static const struct clk_ops clk_dummy_minimize_rate_ops = {
101 .recalc_rate = clk_dummy_recalc_rate,
102 .determine_rate = clk_dummy_minimize_rate,
103 .set_rate = clk_dummy_set_rate,
104};
105
106static const struct clk_ops clk_dummy_single_parent_ops = {
107 .set_parent = clk_dummy_single_set_parent,
108 .get_parent = clk_dummy_single_get_parent,
109};
110
111static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
112{
113 struct clk_dummy_context *ctx;
114 struct clk_init_data init = { };
115 int ret;
116
117 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
118 if (!ctx)
119 return -ENOMEM;
120 ctx->rate = DUMMY_CLOCK_INIT_RATE;
121 test->priv = ctx;
122
123 init.name = "test_dummy_rate";
124 init.ops = ops;
125 ctx->hw.init = &init;
126
127 ret = clk_hw_register(NULL, &ctx->hw);
128 if (ret)
129 return ret;
130
131 return 0;
132}
133
134static int clk_test_init(struct kunit *test)
135{
136 return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
137}
138
139static int clk_maximize_test_init(struct kunit *test)
140{
141 return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
142}
143
144static int clk_minimize_test_init(struct kunit *test)
145{
146 return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
147}
148
149static void clk_test_exit(struct kunit *test)
150{
151 struct clk_dummy_context *ctx = test->priv;
152
153 clk_hw_unregister(&ctx->hw);
154}
155
156/*
157 * Test that the actual rate matches what is returned by clk_get_rate()
158 */
159static void clk_test_get_rate(struct kunit *test)
160{
161 struct clk_dummy_context *ctx = test->priv;
162 struct clk_hw *hw = &ctx->hw;
163 struct clk *clk = hw->clk;
164 unsigned long rate;
165
166 rate = clk_get_rate(clk);
167 KUNIT_ASSERT_GT(test, rate, 0);
168 KUNIT_EXPECT_EQ(test, rate, ctx->rate);
169}
170
171/*
172 * Test that, after a call to clk_set_rate(), the rate returned by
173 * clk_get_rate() matches.
174 *
175 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
176 * modify the requested rate, which is our case in clk_dummy_rate_ops.
177 */
178static void clk_test_set_get_rate(struct kunit *test)
179{
180 struct clk_dummy_context *ctx = test->priv;
181 struct clk_hw *hw = &ctx->hw;
182 struct clk *clk = hw->clk;
183 unsigned long rate;
184
185 KUNIT_ASSERT_EQ(test,
186 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
187 0);
188
189 rate = clk_get_rate(clk);
190 KUNIT_ASSERT_GT(test, rate, 0);
191 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
192}
193
194/*
195 * Test that, after several calls to clk_set_rate(), the rate returned
196 * by clk_get_rate() matches the last one.
197 *
198 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
199 * modify the requested rate, which is our case in clk_dummy_rate_ops.
200 */
201static void clk_test_set_set_get_rate(struct kunit *test)
202{
203 struct clk_dummy_context *ctx = test->priv;
204 struct clk_hw *hw = &ctx->hw;
205 struct clk *clk = hw->clk;
206 unsigned long rate;
207
208 KUNIT_ASSERT_EQ(test,
209 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
210 0);
211
212 KUNIT_ASSERT_EQ(test,
213 clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
214 0);
215
216 rate = clk_get_rate(clk);
217 KUNIT_ASSERT_GT(test, rate, 0);
218 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
219}
220
221/*
222 * Test that clk_round_rate and clk_set_rate are consitent and will
223 * return the same frequency.
224 */
225static void clk_test_round_set_get_rate(struct kunit *test)
226{
227 struct clk_dummy_context *ctx = test->priv;
228 struct clk_hw *hw = &ctx->hw;
229 struct clk *clk = hw->clk;
230 unsigned long rounded_rate, set_rate;
231
232 rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
233 KUNIT_ASSERT_GT(test, rounded_rate, 0);
234 KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
235
236 KUNIT_ASSERT_EQ(test,
237 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
238 0);
239
240 set_rate = clk_get_rate(clk);
241 KUNIT_ASSERT_GT(test, set_rate, 0);
242 KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
243}
244
245static struct kunit_case clk_test_cases[] = {
246 KUNIT_CASE(clk_test_get_rate),
247 KUNIT_CASE(clk_test_set_get_rate),
248 KUNIT_CASE(clk_test_set_set_get_rate),
249 KUNIT_CASE(clk_test_round_set_get_rate),
250 {}
251};
252
253static struct kunit_suite clk_test_suite = {
254 .name = "clk-test",
255 .init = clk_test_init,
256 .exit = clk_test_exit,
257 .test_cases = clk_test_cases,
258};
259
260struct clk_single_parent_ctx {
261 struct clk_dummy_context parent_ctx;
262 struct clk_hw hw;
263};
264
265static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
266{
267 struct clk_single_parent_ctx *ctx;
268 struct clk_init_data init = { };
269 const char * const parents[] = { "orphan_parent" };
270 int ret;
271
272 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
273 if (!ctx)
274 return -ENOMEM;
275 test->priv = ctx;
276
277 init.name = "test_orphan_dummy_parent";
278 init.ops = &clk_dummy_single_parent_ops;
279 init.parent_names = parents;
280 init.num_parents = ARRAY_SIZE(parents);
281 init.flags = CLK_SET_RATE_PARENT;
282 ctx->hw.init = &init;
283
284 ret = clk_hw_register(NULL, &ctx->hw);
285 if (ret)
286 return ret;
287
288 memset(&init, 0, sizeof(init));
289 init.name = "orphan_parent";
290 init.ops = &clk_dummy_rate_ops;
291 ctx->parent_ctx.hw.init = &init;
292 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
293
294 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
295 if (ret)
296 return ret;
297
298 return 0;
299}
300
301static void clk_orphan_transparent_single_parent_mux_test_exit(struct kunit *test)
302{
303 struct clk_single_parent_ctx *ctx = test->priv;
304
305 clk_hw_unregister(&ctx->hw);
306 clk_hw_unregister(&ctx->parent_ctx.hw);
307}
308
309/*
310 * Test that a mux-only clock, with an initial rate within a range,
311 * will still have the same rate after the range has been enforced.
312 */
313static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
314{
315 struct clk_single_parent_ctx *ctx = test->priv;
316 struct clk_hw *hw = &ctx->hw;
317 struct clk *clk = hw->clk;
318 unsigned long rate, new_rate;
319
320 rate = clk_get_rate(clk);
321 KUNIT_ASSERT_GT(test, rate, 0);
322
323 KUNIT_ASSERT_EQ(test,
324 clk_set_rate_range(clk,
325 ctx->parent_ctx.rate - 1000,
326 ctx->parent_ctx.rate + 1000),
327 0);
328
329 new_rate = clk_get_rate(clk);
330 KUNIT_ASSERT_GT(test, new_rate, 0);
331 KUNIT_EXPECT_EQ(test, rate, new_rate);
332}
333
334static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
335 KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
336 {}
337};
338
339static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
340 .name = "clk-orphan-transparent-single-parent-test",
341 .init = clk_orphan_transparent_single_parent_mux_test_init,
342 .exit = clk_orphan_transparent_single_parent_mux_test_exit,
343 .test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
344};
345
346/*
347 * Test that clk_set_rate_range won't return an error for a valid range
348 * and that it will make sure the rate of the clock is within the
349 * boundaries.
350 */
351static void clk_range_test_set_range(struct kunit *test)
352{
353 struct clk_dummy_context *ctx = test->priv;
354 struct clk_hw *hw = &ctx->hw;
355 struct clk *clk = hw->clk;
356 unsigned long rate;
357
358 KUNIT_ASSERT_EQ(test,
359 clk_set_rate_range(clk,
360 DUMMY_CLOCK_RATE_1,
361 DUMMY_CLOCK_RATE_2),
362 0);
363
364 rate = clk_get_rate(clk);
365 KUNIT_ASSERT_GT(test, rate, 0);
366 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
367 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
368}
369
370/*
371 * Test that calling clk_set_rate_range with a minimum rate higher than
372 * the maximum rate returns an error.
373 */
374static void clk_range_test_set_range_invalid(struct kunit *test)
375{
376 struct clk_dummy_context *ctx = test->priv;
377 struct clk_hw *hw = &ctx->hw;
378 struct clk *clk = hw->clk;
379
380 KUNIT_EXPECT_LT(test,
381 clk_set_rate_range(clk,
382 DUMMY_CLOCK_RATE_1 + 1000,
383 DUMMY_CLOCK_RATE_1),
384 0);
385}
386
387/*
388 * Test that users can't set multiple, disjoints, range that would be
389 * impossible to meet.
390 */
391static void clk_range_test_multiple_disjoints_range(struct kunit *test)
392{
393 struct clk_dummy_context *ctx = test->priv;
394 struct clk_hw *hw = &ctx->hw;
395 struct clk *user1, *user2;
396
397 user1 = clk_hw_get_clk(hw, NULL);
398 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
399
400 user2 = clk_hw_get_clk(hw, NULL);
401 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
402
403 KUNIT_ASSERT_EQ(test,
404 clk_set_rate_range(user1, 1000, 2000),
405 0);
406
407 KUNIT_EXPECT_LT(test,
408 clk_set_rate_range(user2, 3000, 4000),
409 0);
410
411 clk_put(user2);
412 clk_put(user1);
413}
414
415/*
416 * Test that if our clock has some boundaries and we try to round a rate
417 * lower than the minimum, the returned rate will be within range.
418 */
419static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
420{
421 struct clk_dummy_context *ctx = test->priv;
422 struct clk_hw *hw = &ctx->hw;
423 struct clk *clk = hw->clk;
424 long rate;
425
426 KUNIT_ASSERT_EQ(test,
427 clk_set_rate_range(clk,
428 DUMMY_CLOCK_RATE_1,
429 DUMMY_CLOCK_RATE_2),
430 0);
431
432 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
433 KUNIT_ASSERT_GT(test, rate, 0);
434 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
435 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
436}
437
438/*
439 * Test that if our clock has some boundaries and we try to set a rate
440 * higher than the maximum, the new rate will be within range.
441 */
442static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
443{
444 struct clk_dummy_context *ctx = test->priv;
445 struct clk_hw *hw = &ctx->hw;
446 struct clk *clk = hw->clk;
447 unsigned long rate;
448
449 KUNIT_ASSERT_EQ(test,
450 clk_set_rate_range(clk,
451 DUMMY_CLOCK_RATE_1,
452 DUMMY_CLOCK_RATE_2),
453 0);
454
455 KUNIT_ASSERT_EQ(test,
456 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
457 0);
458
459 rate = clk_get_rate(clk);
460 KUNIT_ASSERT_GT(test, rate, 0);
461 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
462 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
463}
464
465/*
466 * Test that if our clock has some boundaries and we try to round and
467 * set a rate lower than the minimum, the rate returned by
468 * clk_round_rate() will be consistent with the new rate set by
469 * clk_set_rate().
470 */
471static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
472{
473 struct clk_dummy_context *ctx = test->priv;
474 struct clk_hw *hw = &ctx->hw;
475 struct clk *clk = hw->clk;
476 long rounded;
477
478 KUNIT_ASSERT_EQ(test,
479 clk_set_rate_range(clk,
480 DUMMY_CLOCK_RATE_1,
481 DUMMY_CLOCK_RATE_2),
482 0);
483
484 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
485 KUNIT_ASSERT_GT(test, rounded, 0);
486
487 KUNIT_ASSERT_EQ(test,
488 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
489 0);
490
491 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
492}
493
494/*
495 * Test that if our clock has some boundaries and we try to round a rate
496 * higher than the maximum, the returned rate will be within range.
497 */
498static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
499{
500 struct clk_dummy_context *ctx = test->priv;
501 struct clk_hw *hw = &ctx->hw;
502 struct clk *clk = hw->clk;
503 long rate;
504
505 KUNIT_ASSERT_EQ(test,
506 clk_set_rate_range(clk,
507 DUMMY_CLOCK_RATE_1,
508 DUMMY_CLOCK_RATE_2),
509 0);
510
511 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
512 KUNIT_ASSERT_GT(test, rate, 0);
513 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
514 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
515}
516
517/*
518 * Test that if our clock has some boundaries and we try to set a rate
519 * higher than the maximum, the new rate will be within range.
520 */
521static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
522{
523 struct clk_dummy_context *ctx = test->priv;
524 struct clk_hw *hw = &ctx->hw;
525 struct clk *clk = hw->clk;
526 unsigned long rate;
527
528 KUNIT_ASSERT_EQ(test,
529 clk_set_rate_range(clk,
530 DUMMY_CLOCK_RATE_1,
531 DUMMY_CLOCK_RATE_2),
532 0);
533
534 KUNIT_ASSERT_EQ(test,
535 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
536 0);
537
538 rate = clk_get_rate(clk);
539 KUNIT_ASSERT_GT(test, rate, 0);
540 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
541 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
542}
543
544/*
545 * Test that if our clock has some boundaries and we try to round and
546 * set a rate higher than the maximum, the rate returned by
547 * clk_round_rate() will be consistent with the new rate set by
548 * clk_set_rate().
549 */
550static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
551{
552 struct clk_dummy_context *ctx = test->priv;
553 struct clk_hw *hw = &ctx->hw;
554 struct clk *clk = hw->clk;
555 long rounded;
556
557 KUNIT_ASSERT_EQ(test,
558 clk_set_rate_range(clk,
559 DUMMY_CLOCK_RATE_1,
560 DUMMY_CLOCK_RATE_2),
561 0);
562
563 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
564 KUNIT_ASSERT_GT(test, rounded, 0);
565
566 KUNIT_ASSERT_EQ(test,
567 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
568 0);
569
570 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
571}
572
573/*
574 * Test that if our clock has a rate lower than the minimum set by a
575 * call to clk_set_rate_range(), the rate will be raised to match the
576 * new minimum.
577 *
578 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
579 * modify the requested rate, which is our case in clk_dummy_rate_ops.
580 */
581static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
582{
583 struct clk_dummy_context *ctx = test->priv;
584 struct clk_hw *hw = &ctx->hw;
585 struct clk *clk = hw->clk;
586 unsigned long rate;
587
588 KUNIT_ASSERT_EQ(test,
589 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
590 0);
591
592 KUNIT_ASSERT_EQ(test,
593 clk_set_rate_range(clk,
594 DUMMY_CLOCK_RATE_1,
595 DUMMY_CLOCK_RATE_2),
596 0);
597
598 rate = clk_get_rate(clk);
599 KUNIT_ASSERT_GT(test, rate, 0);
600 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
601}
602
603/*
604 * Test that if our clock has a rate higher than the maximum set by a
605 * call to clk_set_rate_range(), the rate will be lowered to match the
606 * new maximum.
607 *
608 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
609 * modify the requested rate, which is our case in clk_dummy_rate_ops.
610 */
611static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
612{
613 struct clk_dummy_context *ctx = test->priv;
614 struct clk_hw *hw = &ctx->hw;
615 struct clk *clk = hw->clk;
616 unsigned long rate;
617
618 KUNIT_ASSERT_EQ(test,
619 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
620 0);
621
622 KUNIT_ASSERT_EQ(test,
623 clk_set_rate_range(clk,
624 DUMMY_CLOCK_RATE_1,
625 DUMMY_CLOCK_RATE_2),
626 0);
627
628 rate = clk_get_rate(clk);
629 KUNIT_ASSERT_GT(test, rate, 0);
630 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
631}
632
633static struct kunit_case clk_range_test_cases[] = {
634 KUNIT_CASE(clk_range_test_set_range),
635 KUNIT_CASE(clk_range_test_set_range_invalid),
636 KUNIT_CASE(clk_range_test_multiple_disjoints_range),
637 KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
638 KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
639 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
640 KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
641 KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
642 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
643 KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
644 KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
645 {}
646};
647
648static struct kunit_suite clk_range_test_suite = {
649 .name = "clk-range-test",
650 .init = clk_test_init,
651 .exit = clk_test_exit,
652 .test_cases = clk_range_test_cases,
653};
654
655/*
656 * Test that if we have several subsequent calls to
657 * clk_set_rate_range(), the core will reevaluate whether a new rate is
658 * needed each and every time.
659 *
660 * With clk_dummy_maximize_rate_ops, this means that the rate will
661 * trail along the maximum as it evolves.
662 */
663static void clk_range_test_set_range_rate_maximized(struct kunit *test)
664{
665 struct clk_dummy_context *ctx = test->priv;
666 struct clk_hw *hw = &ctx->hw;
667 struct clk *clk = hw->clk;
668 unsigned long rate;
669
670 KUNIT_ASSERT_EQ(test,
671 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
672 0);
673
674 KUNIT_ASSERT_EQ(test,
675 clk_set_rate_range(clk,
676 DUMMY_CLOCK_RATE_1,
677 DUMMY_CLOCK_RATE_2),
678 0);
679
680 rate = clk_get_rate(clk);
681 KUNIT_ASSERT_GT(test, rate, 0);
682 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
683
684 KUNIT_ASSERT_EQ(test,
685 clk_set_rate_range(clk,
686 DUMMY_CLOCK_RATE_1,
687 DUMMY_CLOCK_RATE_2 - 1000),
688 0);
689
690 rate = clk_get_rate(clk);
691 KUNIT_ASSERT_GT(test, rate, 0);
692 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
693
694 KUNIT_ASSERT_EQ(test,
695 clk_set_rate_range(clk,
696 DUMMY_CLOCK_RATE_1,
697 DUMMY_CLOCK_RATE_2),
698 0);
699
700 rate = clk_get_rate(clk);
701 KUNIT_ASSERT_GT(test, rate, 0);
702 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
703}
704
705/*
706 * Test that if we have several subsequent calls to
707 * clk_set_rate_range(), across multiple users, the core will reevaluate
708 * whether a new rate is needed each and every time.
709 *
710 * With clk_dummy_maximize_rate_ops, this means that the rate will
711 * trail along the maximum as it evolves.
712 */
713static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
714{
715 struct clk_dummy_context *ctx = test->priv;
716 struct clk_hw *hw = &ctx->hw;
717 struct clk *clk = hw->clk;
718 struct clk *user1, *user2;
719 unsigned long rate;
720
721 user1 = clk_hw_get_clk(hw, NULL);
722 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
723
724 user2 = clk_hw_get_clk(hw, NULL);
725 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
726
727 KUNIT_ASSERT_EQ(test,
728 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
729 0);
730
731 KUNIT_ASSERT_EQ(test,
732 clk_set_rate_range(user1,
733 0,
734 DUMMY_CLOCK_RATE_2),
735 0);
736
737 rate = clk_get_rate(clk);
738 KUNIT_ASSERT_GT(test, rate, 0);
739 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
740
741 KUNIT_ASSERT_EQ(test,
742 clk_set_rate_range(user2,
743 0,
744 DUMMY_CLOCK_RATE_1),
745 0);
746
747 rate = clk_get_rate(clk);
748 KUNIT_ASSERT_GT(test, rate, 0);
749 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
750
751 KUNIT_ASSERT_EQ(test,
752 clk_drop_range(user2),
753 0);
754
755 rate = clk_get_rate(clk);
756 KUNIT_ASSERT_GT(test, rate, 0);
757 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
758
759 clk_put(user2);
760 clk_put(user1);
761}
762
763/*
764 * Test that if we have several subsequent calls to
765 * clk_set_rate_range(), across multiple users, the core will reevaluate
766 * whether a new rate is needed, including when a user drop its clock.
767 *
768 * With clk_dummy_maximize_rate_ops, this means that the rate will
769 * trail along the maximum as it evolves.
770 */
771static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
772{
773 struct clk_dummy_context *ctx = test->priv;
774 struct clk_hw *hw = &ctx->hw;
775 struct clk *clk = hw->clk;
776 struct clk *user1, *user2;
777 unsigned long rate;
778
779 user1 = clk_hw_get_clk(hw, NULL);
780 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
781
782 user2 = clk_hw_get_clk(hw, NULL);
783 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
784
785 KUNIT_ASSERT_EQ(test,
786 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
787 0);
788
789 KUNIT_ASSERT_EQ(test,
790 clk_set_rate_range(user1,
791 0,
792 DUMMY_CLOCK_RATE_2),
793 0);
794
795 rate = clk_get_rate(clk);
796 KUNIT_ASSERT_GT(test, rate, 0);
797 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
798
799 KUNIT_ASSERT_EQ(test,
800 clk_set_rate_range(user2,
801 0,
802 DUMMY_CLOCK_RATE_1),
803 0);
804
805 rate = clk_get_rate(clk);
806 KUNIT_ASSERT_GT(test, rate, 0);
807 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
808
809 clk_put(user2);
810
811 rate = clk_get_rate(clk);
812 KUNIT_ASSERT_GT(test, rate, 0);
813 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
814
815 clk_put(user1);
816}
817
818static struct kunit_case clk_range_maximize_test_cases[] = {
819 KUNIT_CASE(clk_range_test_set_range_rate_maximized),
820 KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
821 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
822 {}
823};
824
825static struct kunit_suite clk_range_maximize_test_suite = {
826 .name = "clk-range-maximize-test",
827 .init = clk_maximize_test_init,
828 .exit = clk_test_exit,
829 .test_cases = clk_range_maximize_test_cases,
830};
831
832/*
833 * Test that if we have several subsequent calls to
834 * clk_set_rate_range(), the core will reevaluate whether a new rate is
835 * needed each and every time.
836 *
837 * With clk_dummy_minimize_rate_ops, this means that the rate will
838 * trail along the minimum as it evolves.
839 */
840static void clk_range_test_set_range_rate_minimized(struct kunit *test)
841{
842 struct clk_dummy_context *ctx = test->priv;
843 struct clk_hw *hw = &ctx->hw;
844 struct clk *clk = hw->clk;
845 unsigned long rate;
846
847 KUNIT_ASSERT_EQ(test,
848 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
849 0);
850
851 KUNIT_ASSERT_EQ(test,
852 clk_set_rate_range(clk,
853 DUMMY_CLOCK_RATE_1,
854 DUMMY_CLOCK_RATE_2),
855 0);
856
857 rate = clk_get_rate(clk);
858 KUNIT_ASSERT_GT(test, rate, 0);
859 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
860
861 KUNIT_ASSERT_EQ(test,
862 clk_set_rate_range(clk,
863 DUMMY_CLOCK_RATE_1 + 1000,
864 DUMMY_CLOCK_RATE_2),
865 0);
866
867 rate = clk_get_rate(clk);
868 KUNIT_ASSERT_GT(test, rate, 0);
869 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
870
871 KUNIT_ASSERT_EQ(test,
872 clk_set_rate_range(clk,
873 DUMMY_CLOCK_RATE_1,
874 DUMMY_CLOCK_RATE_2),
875 0);
876
877 rate = clk_get_rate(clk);
878 KUNIT_ASSERT_GT(test, rate, 0);
879 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
880}
881
882/*
883 * Test that if we have several subsequent calls to
884 * clk_set_rate_range(), across multiple users, the core will reevaluate
885 * whether a new rate is needed each and every time.
886 *
887 * With clk_dummy_minimize_rate_ops, this means that the rate will
888 * trail along the minimum as it evolves.
889 */
890static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
891{
892 struct clk_dummy_context *ctx = test->priv;
893 struct clk_hw *hw = &ctx->hw;
894 struct clk *clk = hw->clk;
895 struct clk *user1, *user2;
896 unsigned long rate;
897
898 user1 = clk_hw_get_clk(hw, NULL);
899 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
900
901 user2 = clk_hw_get_clk(hw, NULL);
902 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
903
904 KUNIT_ASSERT_EQ(test,
905 clk_set_rate_range(user1,
906 DUMMY_CLOCK_RATE_1,
907 ULONG_MAX),
908 0);
909
910 rate = clk_get_rate(clk);
911 KUNIT_ASSERT_GT(test, rate, 0);
912 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
913
914 KUNIT_ASSERT_EQ(test,
915 clk_set_rate_range(user2,
916 DUMMY_CLOCK_RATE_2,
917 ULONG_MAX),
918 0);
919
920 rate = clk_get_rate(clk);
921 KUNIT_ASSERT_GT(test, rate, 0);
922 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
923
924 KUNIT_ASSERT_EQ(test,
925 clk_drop_range(user2),
926 0);
927
928 rate = clk_get_rate(clk);
929 KUNIT_ASSERT_GT(test, rate, 0);
930 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
931
932 clk_put(user2);
933 clk_put(user1);
934}
935
936/*
937 * Test that if we have several subsequent calls to
938 * clk_set_rate_range(), across multiple users, the core will reevaluate
939 * whether a new rate is needed, including when a user drop its clock.
940 *
941 * With clk_dummy_minimize_rate_ops, this means that the rate will
942 * trail along the minimum as it evolves.
943 */
944static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
945{
946 struct clk_dummy_context *ctx = test->priv;
947 struct clk_hw *hw = &ctx->hw;
948 struct clk *clk = hw->clk;
949 struct clk *user1, *user2;
950 unsigned long rate;
951
952 user1 = clk_hw_get_clk(hw, NULL);
953 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
954
955 user2 = clk_hw_get_clk(hw, NULL);
956 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
957
958 KUNIT_ASSERT_EQ(test,
959 clk_set_rate_range(user1,
960 DUMMY_CLOCK_RATE_1,
961 ULONG_MAX),
962 0);
963
964 rate = clk_get_rate(clk);
965 KUNIT_ASSERT_GT(test, rate, 0);
966 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
967
968 KUNIT_ASSERT_EQ(test,
969 clk_set_rate_range(user2,
970 DUMMY_CLOCK_RATE_2,
971 ULONG_MAX),
972 0);
973
974 rate = clk_get_rate(clk);
975 KUNIT_ASSERT_GT(test, rate, 0);
976 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
977
978 clk_put(user2);
979
980 rate = clk_get_rate(clk);
981 KUNIT_ASSERT_GT(test, rate, 0);
982 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
983
984 clk_put(user1);
985}
986
987static struct kunit_case clk_range_minimize_test_cases[] = {
988 KUNIT_CASE(clk_range_test_set_range_rate_minimized),
989 KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
990 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
991 {}
992};
993
994static struct kunit_suite clk_range_minimize_test_suite = {
995 .name = "clk-range-minimize-test",
996 .init = clk_minimize_test_init,
997 .exit = clk_test_exit,
998 .test_cases = clk_range_minimize_test_cases,
999};
1000
1001kunit_test_suites(
1002 &clk_test_suite,
1003 &clk_orphan_transparent_single_parent_test_suite,
1004 &clk_range_test_suite,
1005 &clk_range_maximize_test_suite,
1006 &clk_range_minimize_test_suite
1007);
1008MODULE_LICENSE("GPL v2");