Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: selftest: convert drm_mm selftest to KUnit

Considering the current adoption of the KUnit framework, convert the
DRM mm selftest to the KUnit API.

Signed-off-by: Arthur Grillo <arthur.grillo@usp.br>
Tested-by: David Gow <davidgow@google.com>
Acked-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
Signed-off-by: Maíra Canal <maira.canal@usp.br>
Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220708203052.236290-10-maira.canal@usp.br

authored by

Arthur Grillo and committed by
Javier Martinez Canillas
fc8d29e2 932da861

+509 -953
-11
Documentation/gpu/todo.rst
··· 617 617 618 618 Level: Intermediate 619 619 620 - Convert Kernel Selftests (kselftest) to KUnit tests when appropriate 621 - -------------------------------------------------------------------- 622 - 623 - Many of the `Kselftest <https://www.kernel.org/doc/html/latest/dev-tools/kselftest.html>`_ 624 - tests in DRM could be converted to Kunit tests instead, since that framework 625 - is more suitable for unit testing. 626 - 627 - Contact: Javier Martinez Canillas <javierm@redhat.com> 628 - 629 - Level: Starter 630 - 631 620 Enable trinity for DRM 632 621 ---------------------- 633 622
-20
drivers/gpu/drm/Kconfig
··· 50 50 51 51 If in doubt, say "N". 52 52 53 - config DRM_DEBUG_SELFTEST 54 - tristate "kselftests for DRM" 55 - depends on DRM 56 - depends on DEBUG_KERNEL 57 - select PRIME_NUMBERS 58 - select DRM_DISPLAY_DP_HELPER 59 - select DRM_DISPLAY_HELPER 60 - select DRM_LIB_RANDOM 61 - select DRM_KMS_HELPER 62 - select DRM_BUDDY 63 - select DRM_EXPORT_FOR_TESTS if m 64 - default n 65 - help 66 - This option provides kernel modules that can be used to run 67 - various selftests on parts of the DRM api. This option is not 68 - useful for distributions or general kernels, but only for kernel 69 - developers working on DRM and associated drivers. 70 - 71 - If in doubt, say "N". 72 - 73 53 config DRM_KUNIT_TEST 74 54 tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS 75 55 depends on DRM && KUNIT
-1
drivers/gpu/drm/Makefile
··· 75 75 # Drivers and the rest 76 76 # 77 77 78 - obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/ 79 78 obj-$(CONFIG_DRM_KUNIT_TEST) += tests/ 80 79 81 80 obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
-2
drivers/gpu/drm/selftests/Makefile
··· 1 - # SPDX-License-Identifier: GPL-2.0-only 2 - obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o
-28
drivers/gpu/drm/selftests/drm_mm_selftests.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* List each unit test as selftest(name, function) 3 - * 4 - * The name is used as both an enum and expanded as igt__name to create 5 - * a module parameter. It must be unique and legal for a C identifier. 6 - * 7 - * Tests are executed in order by igt/drm_mm 8 - */ 9 - selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */ 10 - selftest(init, igt_init) 11 - selftest(debug, igt_debug) 12 - selftest(reserve, igt_reserve) 13 - selftest(insert, igt_insert) 14 - selftest(replace, igt_replace) 15 - selftest(insert_range, igt_insert_range) 16 - selftest(align, igt_align) 17 - selftest(frag, igt_frag) 18 - selftest(align32, igt_align32) 19 - selftest(align64, igt_align64) 20 - selftest(evict, igt_evict) 21 - selftest(evict_range, igt_evict_range) 22 - selftest(bottomup, igt_bottomup) 23 - selftest(lowest, igt_lowest) 24 - selftest(topdown, igt_topdown) 25 - selftest(highest, igt_highest) 26 - selftest(color, igt_color) 27 - selftest(color_evict, igt_color_evict) 28 - selftest(color_evict_range, igt_color_evict_range)
-109
drivers/gpu/drm/selftests/drm_selftest.c
··· 1 - /* 2 - * Copyright © 2016 Intel Corporation 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the next 12 - * paragraph) shall be included in all copies or substantial portions of the 13 - * Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 - * IN THE SOFTWARE. 22 - */ 23 - 24 - #include <linux/compiler.h> 25 - 26 - #define selftest(name, func) __idx_##name, 27 - enum { 28 - #include TESTS 29 - }; 30 - #undef selftest 31 - 32 - #define selftest(n, f) [__idx_##n] = { .name = #n, .func = f }, 33 - static struct drm_selftest { 34 - bool enabled; 35 - const char *name; 36 - int (*func)(void *); 37 - } selftests[] = { 38 - #include TESTS 39 - }; 40 - #undef selftest 41 - 42 - /* Embed the line number into the parameter name so that we can order tests */ 43 - #define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n)) 44 - #define selftest_0(n, func, id) \ 45 - module_param_named(id, selftests[__idx_##n].enabled, bool, 0400); 46 - #define selftest(n, func) selftest_0(n, func, param(n)) 47 - #include TESTS 48 - #undef selftest 49 - 50 - static void set_default_test_all(struct drm_selftest *st, unsigned long count) 51 - { 52 - unsigned long i; 53 - 54 - for (i = 0; i < count; i++) 55 - if (st[i].enabled) 56 - return; 57 - 58 - for (i = 0; i < count; i++) 59 - st[i].enabled = true; 60 - } 61 - 62 - static int run_selftests(struct drm_selftest *st, 63 - unsigned long count, 64 - void *data) 65 - { 66 - int err = 0; 67 - 68 - set_default_test_all(st, count); 69 - 70 - /* Tests are listed in natural order in drm_*_selftests.h */ 71 - for (; count--; st++) { 72 - if (!st->enabled) 73 - continue; 74 - 75 - pr_debug("drm: Running %s\n", st->name); 76 - err = st->func(data); 77 - if (err) 78 - break; 79 - } 80 - 81 - if (WARN(err > 0 || err == -ENOTTY, 82 - "%s returned %d, conflicting with selftest's magic values!\n", 83 - st->name, err)) 84 - err = -1; 85 - 86 - rcu_barrier(); 87 - return err; 88 - } 89 - 90 - static int __maybe_unused 91 - __drm_subtests(const char *caller, 92 - const struct drm_subtest *st, 93 - int count, 94 - void *data) 95 - { 96 - int err; 97 - 98 - for (; count--; st++) { 99 - pr_debug("Running %s/%s\n", caller, st->name); 100 - err = st->func(data); 101 - if (err) { 102 - pr_err("%s: %s failed with error %d\n", 103 - caller, st->name, err); 104 - return err; 105 - } 106 - } 107 - 108 - return 0; 109 - }
-41
drivers/gpu/drm/selftests/drm_selftest.h
··· 1 - /* 2 - * Copyright © 2016 Intel Corporation 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the next 12 - * paragraph) shall be included in all copies or substantial portions of the 13 - * Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 - * IN THE SOFTWARE. 22 - */ 23 - 24 - #ifndef __DRM_SELFTEST_H__ 25 - #define __DRM_SELFTEST_H__ 26 - 27 - struct drm_subtest { 28 - int (*func)(void *data); 29 - const char *name; 30 - }; 31 - 32 - static int __drm_subtests(const char *caller, 33 - const struct drm_subtest *st, 34 - int count, 35 - void *data); 36 - #define drm_subtests(T, data) \ 37 - __drm_subtests(__func__, T, ARRAY_SIZE(T), data) 38 - 39 - #define SUBTEST(x) { x, #x } 40 - 41 - #endif /* __DRM_SELFTEST_H__ */
+508 -740
drivers/gpu/drm/selftests/test-drm_mm.c drivers/gpu/drm/tests/drm_mm_test.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 3 * Test cases for the drm_mm range manager 4 + * 5 + * Copyright (c) 2022 Arthur Grillo <arthur.grillo@usp.br> 4 6 */ 5 7 6 - #define pr_fmt(fmt) "drm_mm: " fmt 8 + #include <kunit/test.h> 7 9 8 - #include <linux/module.h> 9 10 #include <linux/prime_numbers.h> 10 11 #include <linux/slab.h> 11 12 #include <linux/random.h> ··· 16 15 #include <drm/drm_mm.h> 17 16 18 17 #include "../lib/drm_random.h" 19 - 20 - #define TESTS "drm_mm_selftests.h" 21 - #include "drm_selftest.h" 22 18 23 19 static unsigned int random_seed; 24 20 static unsigned int max_iterations = 8192; ··· 43 45 {} 44 46 }; 45 47 46 - static int igt_sanitycheck(void *ignored) 47 - { 48 - pr_info("%s - ok!\n", __func__); 49 - return 0; 50 - } 51 - 52 - static bool assert_no_holes(const struct drm_mm *mm) 48 + static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm) 53 49 { 54 50 struct drm_mm_node *hole; 55 51 u64 hole_start, __always_unused hole_end; ··· 53 61 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) 54 62 count++; 55 63 if (count) { 56 - pr_err("Expected to find no holes (after reserve), found %lu instead\n", count); 64 + KUNIT_FAIL(test, 65 + "Expected to find no holes (after reserve), found %lu instead\n", count); 57 66 return false; 58 67 } 59 68 60 69 drm_mm_for_each_node(hole, mm) { 61 70 if (drm_mm_hole_follows(hole)) { 62 - pr_err("Hole follows node, expected none!\n"); 71 + KUNIT_FAIL(test, "Hole follows node, expected none!\n"); 63 72 return false; 64 73 } 65 74 } ··· 68 75 return true; 69 76 } 70 77 71 - static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end) 78 + static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end) 72 79 { 73 80 struct drm_mm_node *hole; 74 81 u64 hole_start, hole_end; ··· 82 89 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 83 90 if (start != hole_start || end != hole_end) { 84 91 if (ok) 85 - pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n", 86 - hole_start, hole_end, 87 - start, end); 92 + KUNIT_FAIL(test, 93 + "empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n", 94 + hole_start, hole_end, start, end); 88 95 ok = false; 89 96 } 90 97 count++; 91 98 } 92 99 if (count != 1) { 93 - pr_err("Expected to find one hole, found %lu instead\n", count); 100 + KUNIT_FAIL(test, "Expected to find one hole, found %lu instead\n", count); 94 101 ok = false; 95 102 } 96 103 97 104 return ok; 98 105 } 99 106 100 - static bool assert_continuous(const struct drm_mm *mm, u64 size) 107 + static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size) 101 108 { 102 109 struct drm_mm_node *node, *check, *found; 103 110 unsigned long n; 104 111 u64 addr; 105 112 106 - if (!assert_no_holes(mm)) 113 + if (!assert_no_holes(test, mm)) 107 114 return false; 108 115 109 116 n = 0; 110 117 addr = 0; 111 118 drm_mm_for_each_node(node, mm) { 112 119 if (node->start != addr) { 113 - pr_err("node[%ld] list out of order, expected %llx found %llx\n", 114 - n, addr, node->start); 120 + KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n", 121 + n, addr, node->start); 115 122 return false; 116 123 } 117 124 118 125 if (node->size != size) { 119 - pr_err("node[%ld].size incorrect, expected %llx, found %llx\n", 120 - n, size, node->size); 126 + KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n", 127 + n, size, node->size); 121 128 return false; 122 129 } 123 130 124 131 if (drm_mm_hole_follows(node)) { 125 - pr_err("node[%ld] is followed by a hole!\n", n); 132 + KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n); 126 133 return false; 127 134 } 128 135 129 136 found = NULL; 130 137 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) { 131 138 if (node != check) { 132 - pr_err("lookup return wrong node, expected start %llx, found %llx\n", 133 - node->start, check->start); 139 + KUNIT_FAIL(test, 140 + "lookup return wrong node, expected start %llx, found %llx\n", 141 + node->start, check->start); 134 142 return false; 135 143 } 136 144 found = check; 137 145 } 138 146 if (!found) { 139 - pr_err("lookup failed for node %llx + %llx\n", 140 - addr, size); 147 + KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size); 141 148 return false; 142 149 } 143 150 ··· 159 166 return rem; 160 167 } 161 168 162 - static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm, 169 + static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm, 163 170 u64 size, u64 alignment, unsigned long color) 164 171 { 165 172 bool ok = true; 166 173 167 174 if (!drm_mm_node_allocated(node) || node->mm != mm) { 168 - pr_err("node not allocated\n"); 175 + KUNIT_FAIL(test, "node not allocated\n"); 169 176 ok = false; 170 177 } 171 178 172 179 if (node->size != size) { 173 - pr_err("node has wrong size, found %llu, expected %llu\n", 174 - node->size, size); 180 + KUNIT_FAIL(test, "node has wrong size, found %llu, expected %llu\n", 181 + node->size, size); 175 182 ok = false; 176 183 } 177 184 178 185 if (misalignment(node, alignment)) { 179 - pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n", 180 - node->start, misalignment(node, alignment), alignment); 186 + KUNIT_FAIL(test, 187 + "node is misaligned, start %llx rem %llu, expected alignment %llu\n", 188 + node->start, misalignment(node, alignment), alignment); 181 189 ok = false; 182 190 } 183 191 184 192 if (node->color != color) { 185 - pr_err("node has wrong color, found %lu, expected %lu\n", 186 - node->color, color); 193 + KUNIT_FAIL(test, "node has wrong color, found %lu, expected %lu\n", 194 + node->color, color); 187 195 ok = false; 188 196 } 189 197 190 198 return ok; 191 199 } 192 200 193 - #define show_mm(mm) do { \ 194 - struct drm_printer __p = drm_debug_printer(__func__); \ 195 - drm_mm_print((mm), &__p); } while (0) 196 - 197 - static int igt_init(void *ignored) 201 + static void igt_mm_init(struct kunit *test) 198 202 { 199 203 const unsigned int size = 4096; 200 204 struct drm_mm mm; 201 205 struct drm_mm_node tmp; 202 - int ret = -EINVAL; 203 206 204 207 /* Start with some simple checks on initialising the struct drm_mm */ 205 208 memset(&mm, 0, sizeof(mm)); 206 - if (drm_mm_initialized(&mm)) { 207 - pr_err("zeroed mm claims to be initialized\n"); 208 - return ret; 209 - } 209 + KUNIT_ASSERT_FALSE_MSG(test, drm_mm_initialized(&mm), 210 + "zeroed mm claims to be initialized\n"); 210 211 211 212 memset(&mm, 0xff, sizeof(mm)); 212 213 drm_mm_init(&mm, 0, size); 213 214 if (!drm_mm_initialized(&mm)) { 214 - pr_err("mm claims not to be initialized\n"); 215 + KUNIT_FAIL(test, "mm claims not to be initialized\n"); 215 216 goto out; 216 217 } 217 218 218 219 if (!drm_mm_clean(&mm)) { 219 - pr_err("mm not empty on creation\n"); 220 + KUNIT_FAIL(test, "mm not empty on creation\n"); 220 221 goto out; 221 222 } 222 223 223 224 /* After creation, it should all be one massive hole */ 224 - if (!assert_one_hole(&mm, 0, size)) { 225 - ret = -EINVAL; 225 + if (!assert_one_hole(test, &mm, 0, size)) { 226 + KUNIT_FAIL(test, ""); 226 227 goto out; 227 228 } 228 229 229 230 memset(&tmp, 0, sizeof(tmp)); 230 231 tmp.start = 0; 231 232 tmp.size = size; 232 - ret = drm_mm_reserve_node(&mm, &tmp); 233 - if (ret) { 234 - pr_err("failed to reserve whole drm_mm\n"); 233 + if (drm_mm_reserve_node(&mm, &tmp)) { 234 + KUNIT_FAIL(test, "failed to reserve whole drm_mm\n"); 235 235 goto out; 236 236 } 237 237 238 238 /* After filling the range entirely, there should be no holes */ 239 - if (!assert_no_holes(&mm)) { 240 - ret = -EINVAL; 239 + if (!assert_no_holes(test, &mm)) { 240 + KUNIT_FAIL(test, ""); 241 241 goto out; 242 242 } 243 243 244 244 /* And then after emptying it again, the massive hole should be back */ 245 245 drm_mm_remove_node(&tmp); 246 - if (!assert_one_hole(&mm, 0, size)) { 247 - ret = -EINVAL; 246 + if (!assert_one_hole(test, &mm, 0, size)) { 247 + KUNIT_FAIL(test, ""); 248 248 goto out; 249 249 } 250 250 251 251 out: 252 - if (ret) 253 - show_mm(&mm); 254 252 drm_mm_takedown(&mm); 255 - return ret; 256 253 } 257 254 258 - static int igt_debug(void *ignored) 255 + static void igt_mm_debug(struct kunit *test) 259 256 { 260 257 struct drm_mm mm; 261 258 struct drm_mm_node nodes[2]; 262 - int ret; 263 259 264 260 /* Create a small drm_mm with a couple of nodes and a few holes, and 265 261 * check that the debug iterator doesn't explode over a trivial drm_mm. ··· 259 277 memset(nodes, 0, sizeof(nodes)); 260 278 nodes[0].start = 512; 261 279 nodes[0].size = 1024; 262 - ret = drm_mm_reserve_node(&mm, &nodes[0]); 263 - if (ret) { 264 - pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n", 265 - nodes[0].start, nodes[0].size); 266 - return ret; 267 - } 280 + KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[0]), 281 + "failed to reserve node[0] {start=%lld, size=%lld)\n", 282 + nodes[0].start, nodes[0].size); 268 283 269 284 nodes[1].size = 1024; 270 285 nodes[1].start = 4096 - 512 - nodes[1].size; 271 - ret = drm_mm_reserve_node(&mm, &nodes[1]); 272 - if (ret) { 273 - pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n", 274 - nodes[1].start, nodes[1].size); 275 - return ret; 276 - } 277 - 278 - show_mm(&mm); 279 - return 0; 286 + KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]), 287 + "failed to reserve node[0] {start=%lld, size=%lld)\n", 288 + nodes[0].start, nodes[0].size); 280 289 } 281 290 282 291 static struct drm_mm_node *set_node(struct drm_mm_node *node, ··· 278 305 return node; 279 306 } 280 307 281 - static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node) 308 + static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node) 282 309 { 283 310 int err; 284 311 ··· 287 314 return true; 288 315 289 316 if (!err) { 290 - pr_err("impossible reserve succeeded, node %llu + %llu\n", 291 - node->start, node->size); 317 + KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n", 318 + node->start, node->size); 292 319 drm_mm_remove_node(node); 293 320 } else { 294 - pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n", 321 + KUNIT_FAIL(test, 322 + "impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n", 295 323 err, -ENOSPC, node->start, node->size); 296 324 } 297 325 return false; 298 326 } 299 327 300 - static bool check_reserve_boundaries(struct drm_mm *mm, 328 + static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm, 301 329 unsigned int count, 302 330 u64 size) 303 331 { ··· 313 339 B(size * count, 0), 314 340 B(-size, size), 315 341 B(-size, -size), 316 - B(-size, 2*size), 342 + B(-size, 2 * size), 317 343 B(0, -size), 318 344 B(size, -size), 319 - B(count*size, size), 320 - B(count*size, -size), 321 - B(count*size, count*size), 322 - B(count*size, -count*size), 323 - B(count*size, -(count+1)*size), 324 - B((count+1)*size, size), 325 - B((count+1)*size, -size), 326 - B((count+1)*size, -2*size), 345 + B(count * size, size), 346 + B(count * size, -size), 347 + B(count * size, count * size), 348 + B(count * size, -count * size), 349 + B(count * size, -(count + 1) * size), 350 + B((count + 1) * size, size), 351 + B((count + 1) * size, -size), 352 + B((count + 1) * size, -2 * size), 327 353 #undef B 328 354 }; 329 355 struct drm_mm_node tmp = {}; 330 356 int n; 331 357 332 358 for (n = 0; n < ARRAY_SIZE(boundaries); n++) { 333 - if (!expect_reserve_fail(mm, 334 - set_node(&tmp, 335 - boundaries[n].start, 336 - boundaries[n].size))) { 337 - pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n", 338 - n, boundaries[n].name, count, size); 359 + if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start, 360 + boundaries[n].size))) { 361 + KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n", 362 + n, boundaries[n].name, count, size); 339 363 return false; 340 364 } 341 365 } ··· 341 369 return true; 342 370 } 343 371 344 - static int __igt_reserve(unsigned int count, u64 size) 372 + static int __igt_reserve(struct kunit *test, unsigned int count, u64 size) 345 373 { 346 374 DRM_RND_STATE(prng, random_seed); 347 375 struct drm_mm mm; ··· 349 377 unsigned int *order, n, m, o = 0; 350 378 int ret, err; 351 379 352 - /* For exercising drm_mm_reserve_node(), we want to check that 380 + /* For exercising drm_mm_reserve_node(struct kunit *test, ), we want to check that 353 381 * reservations outside of the drm_mm range are rejected, and to 354 382 * overlapping and otherwise already occupied ranges. Afterwards, 355 383 * the tree and nodes should be intact. ··· 364 392 goto err; 365 393 366 394 nodes = vzalloc(array_size(count, sizeof(*nodes))); 367 - if (!nodes) 368 - goto err_order; 395 + KUNIT_ASSERT_TRUE(test, nodes); 369 396 370 397 ret = -EINVAL; 371 398 drm_mm_init(&mm, 0, count * size); 372 399 373 - if (!check_reserve_boundaries(&mm, count, size)) 400 + if (!check_reserve_boundaries(test, &mm, count, size)) 374 401 goto out; 375 402 376 403 for (n = 0; n < count; n++) { ··· 378 407 379 408 err = drm_mm_reserve_node(&mm, &nodes[n]); 380 409 if (err) { 381 - pr_err("reserve failed, step %d, start %llu\n", 382 - n, nodes[n].start); 410 + KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n", 411 + n, nodes[n].start); 383 412 ret = err; 384 413 goto out; 385 414 } 386 415 387 416 if (!drm_mm_node_allocated(&nodes[n])) { 388 - pr_err("reserved node not allocated! step %d, start %llu\n", 389 - n, nodes[n].start); 417 + KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n", 418 + n, nodes[n].start); 390 419 goto out; 391 420 } 392 421 393 - if (!expect_reserve_fail(&mm, &nodes[n])) 422 + if (!expect_reserve_fail(test, &mm, &nodes[n])) 394 423 goto out; 395 424 } 396 425 397 426 /* After random insertion the nodes should be in order */ 398 - if (!assert_continuous(&mm, size)) 427 + if (!assert_continuous(test, &mm, size)) 399 428 goto out; 400 429 401 430 /* Repeated use should then fail */ 402 431 drm_random_reorder(order, count, &prng); 403 432 for (n = 0; n < count; n++) { 404 - if (!expect_reserve_fail(&mm, 405 - set_node(&tmp, order[n] * size, 1))) 433 + if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1))) 406 434 goto out; 407 435 408 436 /* Remove and reinsert should work */ 409 437 drm_mm_remove_node(&nodes[order[n]]); 410 438 err = drm_mm_reserve_node(&mm, &nodes[order[n]]); 411 439 if (err) { 412 - pr_err("reserve failed, step %d, start %llu\n", 413 - n, nodes[n].start); 440 + KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n", 441 + n, nodes[n].start); 414 442 ret = err; 415 443 goto out; 416 444 } 417 445 } 418 446 419 - if (!assert_continuous(&mm, size)) 447 + if (!assert_continuous(test, &mm, size)) 420 448 goto out; 421 449 422 450 /* Overlapping use should then fail */ 423 451 for (n = 0; n < count; n++) { 424 - if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count))) 452 + if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count))) 425 453 goto out; 426 454 } 427 455 for (n = 0; n < count; n++) { 428 - if (!expect_reserve_fail(&mm, 429 - set_node(&tmp, 430 - size * n, 431 - size * (count - n)))) 456 + if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n)))) 432 457 goto out; 433 458 } 434 459 ··· 439 472 node = &nodes[order[(o + m) % count]]; 440 473 err = drm_mm_reserve_node(&mm, node); 441 474 if (err) { 442 - pr_err("reserve failed, step %d/%d, start %llu\n", 443 - m, n, node->start); 475 + KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n", 476 + m, n, node->start); 444 477 ret = err; 445 478 goto out; 446 479 } ··· 448 481 449 482 o += n; 450 483 451 - if (!assert_continuous(&mm, size)) 484 + if (!assert_continuous(test, &mm, size)) 452 485 goto out; 453 486 } 454 487 ··· 458 491 drm_mm_remove_node(node); 459 492 drm_mm_takedown(&mm); 460 493 vfree(nodes); 461 - err_order: 462 494 kfree(order); 463 495 err: 464 496 return ret; 465 497 } 466 498 467 - static int igt_reserve(void *ignored) 499 + static void igt_mm_reserve(struct kunit *test) 468 500 { 469 501 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); 470 - int n, ret; 502 + int n; 471 503 472 504 for_each_prime_number_from(n, 1, 54) { 473 505 u64 size = BIT_ULL(n); 474 506 475 - ret = __igt_reserve(count, size - 1); 476 - if (ret) 477 - return ret; 478 - 479 - ret = __igt_reserve(count, size); 480 - if (ret) 481 - return ret; 482 - 483 - ret = __igt_reserve(count, size + 1); 484 - if (ret) 485 - return ret; 507 + KUNIT_ASSERT_FALSE(test, __igt_reserve(test, count, size - 1)); 508 + KUNIT_ASSERT_FALSE(test, __igt_reserve(test, count, size)); 509 + KUNIT_ASSERT_FALSE(test, __igt_reserve(test, count, size + 1)); 486 510 487 511 cond_resched(); 488 512 } 489 - 490 - return 0; 491 513 } 492 514 493 - static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node, 494 - u64 size, u64 alignment, unsigned long color, 495 - const struct insert_mode *mode) 515 + static bool expect_insert(struct kunit *test, struct drm_mm *mm, 516 + struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color, 517 + const struct insert_mode *mode) 496 518 { 497 519 int err; 498 520 ··· 489 533 size, alignment, color, 490 534 mode->mode); 491 535 if (err) { 492 - pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n", 493 - size, alignment, color, mode->name, err); 536 + KUNIT_FAIL(test, 537 + "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n", 538 + size, alignment, color, mode->name, err); 494 539 return false; 495 540 } 496 541 497 - if (!assert_node(node, mm, size, alignment, color)) { 542 + if (!assert_node(test, node, mm, size, alignment, color)) { 498 543 drm_mm_remove_node(node); 499 544 return false; 500 545 } ··· 503 546 return true; 504 547 } 505 548 506 - static bool expect_insert_fail(struct drm_mm *mm, u64 size) 549 + static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size) 507 550 { 508 551 struct drm_mm_node tmp = {}; 509 552 int err; ··· 513 556 return true; 514 557 515 558 if (!err) { 516 - pr_err("impossible insert succeeded, node %llu + %llu\n", 517 - tmp.start, tmp.size); 559 + KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n", 560 + tmp.start, tmp.size); 518 561 drm_mm_remove_node(&tmp); 519 562 } else { 520 - pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n", 521 - err, -ENOSPC, size); 563 + KUNIT_FAIL(test, 564 + "impossible insert failed with wrong error %d [expected %d], size %llu\n", 565 + err, -ENOSPC, size); 522 566 } 523 567 return false; 524 568 } 525 569 526 - static int __igt_insert(unsigned int count, u64 size, bool replace) 570 + static int __igt_insert(struct kunit *test, unsigned int count, u64 size, bool replace) 527 571 { 528 572 DRM_RND_STATE(prng, random_seed); 529 573 const struct insert_mode *mode; ··· 540 582 541 583 ret = -ENOMEM; 542 584 nodes = vmalloc(array_size(count, sizeof(*nodes))); 543 - if (!nodes) 544 - goto err; 585 + KUNIT_ASSERT_TRUE(test, nodes); 545 586 546 587 order = drm_random_order(count, &prng); 547 588 if (!order) ··· 555 598 556 599 node = replace ? &tmp : &nodes[n]; 557 600 memset(node, 0, sizeof(*node)); 558 - if (!expect_insert(&mm, node, size, 0, n, mode)) { 559 - pr_err("%s insert failed, size %llu step %d\n", 560 - mode->name, size, n); 601 + if (!expect_insert(test, &mm, node, size, 0, n, mode)) { 602 + KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n", 603 + mode->name, size, n); 561 604 goto out; 562 605 } 563 606 564 607 if (replace) { 565 608 drm_mm_replace_node(&tmp, &nodes[n]); 566 609 if (drm_mm_node_allocated(&tmp)) { 567 - pr_err("replaced old-node still allocated! step %d\n", 568 - n); 610 + KUNIT_FAIL(test, 611 + "replaced old-node still allocated! step %d\n", 612 + n); 569 613 goto out; 570 614 } 571 615 572 - if (!assert_node(&nodes[n], &mm, size, 0, n)) { 573 - pr_err("replaced node did not inherit parameters, size %llu step %d\n", 574 - size, n); 616 + if (!assert_node(test, &nodes[n], &mm, size, 0, n)) { 617 + KUNIT_FAIL(test, 618 + "replaced node did not inherit parameters, size %llu step %d\n", 619 + size, n); 575 620 goto out; 576 621 } 577 622 578 623 if (tmp.start != nodes[n].start) { 579 - pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n", 580 - tmp.start, size, 581 - nodes[n].start, nodes[n].size); 624 + KUNIT_FAIL(test, 625 + "replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n", 626 + tmp.start, size, nodes[n].start, nodes[n].size); 582 627 goto out; 583 628 } 584 629 } 585 630 } 586 631 587 632 /* After random insertion the nodes should be in order */ 588 - if (!assert_continuous(&mm, size)) 633 + if (!assert_continuous(test, &mm, size)) 589 634 goto out; 590 635 591 636 /* Repeated use should then fail */ 592 - if (!expect_insert_fail(&mm, size)) 637 + if (!expect_insert_fail(test, &mm, size)) 593 638 goto out; 594 639 595 640 /* Remove one and reinsert, as the only hole it should refill itself */ ··· 599 640 u64 addr = nodes[n].start; 600 641 601 642 drm_mm_remove_node(&nodes[n]); 602 - if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) { 603 - pr_err("%s reinsert failed, size %llu step %d\n", 604 - mode->name, size, n); 643 + if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) { 644 + KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n", 645 + mode->name, size, n); 605 646 goto out; 606 647 } 607 648 608 649 if (nodes[n].start != addr) { 609 - pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n", 610 - mode->name, n, addr, nodes[n].start); 650 + KUNIT_FAIL(test, 651 + "%s reinsert node moved, step %d, expected %llx, found %llx\n", 652 + mode->name, n, addr, nodes[n].start); 611 653 goto out; 612 654 } 613 655 614 - if (!assert_continuous(&mm, size)) 656 + if (!assert_continuous(test, &mm, size)) 615 657 goto out; 616 658 } 617 659 ··· 625 665 626 666 for (m = 0; m < n; m++) { 627 667 node = &nodes[order[(o + m) % count]]; 628 - if (!expect_insert(&mm, node, size, 0, n, mode)) { 629 - pr_err("%s multiple reinsert failed, size %llu step %d\n", 630 - mode->name, size, n); 668 + if (!expect_insert(test, &mm, node, size, 0, n, mode)) { 669 + KUNIT_FAIL(test, 670 + "%s multiple reinsert failed, size %llu step %d\n", 671 + mode->name, size, n); 631 672 goto out; 632 673 } 633 674 } 634 675 635 676 o += n; 636 677 637 - if (!assert_continuous(&mm, size)) 678 + if (!assert_continuous(test, &mm, size)) 638 679 goto out; 639 680 640 - if (!expect_insert_fail(&mm, size)) 681 + if (!expect_insert_fail(test, &mm, size)) 641 682 goto out; 642 683 } 643 684 ··· 657 696 kfree(order); 658 697 err_nodes: 659 698 vfree(nodes); 660 - err: 661 699 return ret; 662 700 } 663 701 664 - static int igt_insert(void *ignored) 702 + static void igt_mm_insert(struct kunit *test) 665 703 { 666 704 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); 667 705 unsigned int n; 668 - int ret; 669 706 670 707 for_each_prime_number_from(n, 1, 54) { 671 708 u64 size = BIT_ULL(n); 672 709 673 - ret = __igt_insert(count, size - 1, false); 674 - if (ret) 675 - return ret; 676 - 677 - ret = __igt_insert(count, size, false); 678 - if (ret) 679 - return ret; 680 - 681 - ret = __igt_insert(count, size + 1, false); 682 - if (ret) 683 - return ret; 710 + KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size - 1, false)); 711 + KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size, false)); 712 + KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size + 1, false)); 684 713 685 714 cond_resched(); 686 715 } 687 - 688 - return 0; 689 716 } 690 717 691 - static int igt_replace(void *ignored) 718 + static void igt_mm_replace(struct kunit *test) 692 719 { 693 720 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); 694 721 unsigned int n; 695 - int ret; 696 722 697 723 /* Reuse igt_insert to exercise replacement by inserting a dummy node, 698 724 * then replacing it with the intended node. We want to check that ··· 690 742 for_each_prime_number_from(n, 1, 54) { 691 743 u64 size = BIT_ULL(n); 692 744 693 - ret = __igt_insert(count, size - 1, true); 694 - if (ret) 695 - return ret; 696 - 697 - ret = __igt_insert(count, size, true); 698 - if (ret) 699 - return ret; 700 - 701 - ret = __igt_insert(count, size + 1, true); 702 - if (ret) 703 - return ret; 745 + KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size - 1, true)); 746 + KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size, true)); 747 + KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size + 1, true)); 704 748 705 749 cond_resched(); 706 750 } 707 - 708 - return 0; 709 751 } 710 752 711 - static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node, 753 + static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node, 712 754 u64 size, u64 alignment, unsigned long color, 713 - u64 range_start, u64 range_end, 714 - const struct insert_mode *mode) 755 + u64 range_start, u64 range_end, const struct insert_mode *mode) 715 756 { 716 757 int err; 717 758 ··· 709 772 range_start, range_end, 710 773 mode->mode); 711 774 if (err) { 712 - pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n", 713 - size, alignment, color, mode->name, 714 - range_start, range_end, err); 775 + KUNIT_FAIL(test, 776 + "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n", 777 + size, alignment, color, mode->name, 778 + range_start, range_end, err); 715 779 return false; 716 780 } 717 781 718 - if (!assert_node(node, mm, size, alignment, color)) { 782 + if (!assert_node(test, node, mm, size, alignment, color)) { 719 783 drm_mm_remove_node(node); 720 784 return false; 721 785 } ··· 724 786 return true; 725 787 } 726 788 727 - static bool expect_insert_in_range_fail(struct drm_mm *mm, 728 - u64 size, 729 - u64 range_start, 730 - u64 range_end) 789 + static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm, 790 + u64 size, u64 range_start, u64 range_end) 731 791 { 732 792 struct drm_mm_node tmp = {}; 733 793 int err; 734 794 735 - err = drm_mm_insert_node_in_range(mm, &tmp, 736 - size, 0, 0, 737 - range_start, range_end, 795 + err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end, 738 796 0); 739 797 if (likely(err == -ENOSPC)) 740 798 return true; 741 799 742 800 if (!err) { 743 - pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n", 744 - tmp.start, tmp.size, range_start, range_end); 801 + KUNIT_FAIL(test, 802 + "impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n", 803 + tmp.start, tmp.size, range_start, range_end); 745 804 drm_mm_remove_node(&tmp); 746 805 } else { 747 - pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n", 748 - err, -ENOSPC, size, range_start, range_end); 806 + KUNIT_FAIL(test, 807 + "impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n", 808 + err, -ENOSPC, size, range_start, range_end); 749 809 } 750 810 751 811 return false; 752 812 } 753 813 754 - static bool assert_contiguous_in_range(struct drm_mm *mm, 755 - u64 size, 756 - u64 start, 757 - u64 end) 814 + static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm, 815 + u64 size, u64 start, u64 end) 758 816 { 759 817 struct drm_mm_node *node; 760 818 unsigned int n; 761 819 762 - if (!expect_insert_in_range_fail(mm, size, start, end)) 820 + if (!expect_insert_in_range_fail(test, mm, size, start, end)) 763 821 return false; 764 822 765 823 n = div64_u64(start + size - 1, size); 766 824 drm_mm_for_each_node(node, mm) { 767 825 if (node->start < start || node->start + node->size > end) { 768 - pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n", 769 - n, node->start, node->start + node->size, start, end); 826 + KUNIT_FAIL(test, 827 + "node %d out of range, address [%llx + %llu], range [%llx, %llx]\n", 828 + n, node->start, node->start + node->size, start, end); 770 829 return false; 771 830 } 772 831 773 832 if (node->start != n * size) { 774 - pr_err("node %d out of order, expected start %llx, found %llx\n", 775 - n, n * size, node->start); 833 + KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n", 834 + n, n * size, node->start); 776 835 return false; 777 836 } 778 837 779 838 if (node->size != size) { 780 - pr_err("node %d has wrong size, expected size %llx, found %llx\n", 781 - n, size, node->size); 839 + KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n", 840 + n, size, node->size); 782 841 return false; 783 842 } 784 843 785 - if (drm_mm_hole_follows(node) && 786 - drm_mm_hole_node_end(node) < end) { 787 - pr_err("node %d is followed by a hole!\n", n); 844 + if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) { 845 + KUNIT_FAIL(test, "node %d is followed by a hole!\n", n); 788 846 return false; 789 847 } 790 848 ··· 790 856 if (start > 0) { 791 857 node = __drm_mm_interval_first(mm, 0, start - 1); 792 858 if (drm_mm_node_allocated(node)) { 793 - pr_err("node before start: node=%llx+%llu, start=%llx\n", 794 - node->start, node->size, start); 859 + KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n", 860 + node->start, node->size, start); 795 861 return false; 796 862 } 797 863 } ··· 799 865 if (end < U64_MAX) { 800 866 node = __drm_mm_interval_first(mm, end, U64_MAX); 801 867 if (drm_mm_node_allocated(node)) { 802 - pr_err("node after end: node=%llx+%llu, end=%llx\n", 803 - node->start, node->size, end); 868 + KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n", 869 + node->start, node->size, end); 804 870 return false; 805 871 } 806 872 } ··· 808 874 return true; 809 875 } 810 876 811 - static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end) 877 + static int __igt_insert_range(struct kunit *test, unsigned int count, u64 size, u64 start, u64 end) 812 878 { 813 879 const struct insert_mode *mode; 814 880 struct drm_mm mm; ··· 820 886 DRM_MM_BUG_ON(!size); 821 887 DRM_MM_BUG_ON(end <= start); 822 888 823 - /* Very similar to __igt_insert(), but now instead of populating the 889 + /* Very similar to __igt_insert(struct kunit *test, ), but now instead of populating the 824 890 * full range of the drm_mm, we try to fill a small portion of it. 825 891 */ 826 892 827 893 ret = -ENOMEM; 828 894 nodes = vzalloc(array_size(count, sizeof(*nodes))); 829 - if (!nodes) 830 - goto err; 895 + KUNIT_ASSERT_TRUE(test, nodes); 831 896 832 897 ret = -EINVAL; 833 898 drm_mm_init(&mm, 0, count * size); ··· 836 903 837 904 for (mode = insert_modes; mode->name; mode++) { 838 905 for (n = start_n; n <= end_n; n++) { 839 - if (!expect_insert_in_range(&mm, &nodes[n], 840 - size, size, n, 906 + if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n, 841 907 start, end, mode)) { 842 - pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n", 843 - mode->name, size, n, 844 - start_n, end_n, 845 - start, end); 908 + KUNIT_FAIL(test, 909 + "%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n", 910 + mode->name, size, n, start_n, end_n, start, end); 846 911 goto out; 847 912 } 848 913 } 849 914 850 - if (!assert_contiguous_in_range(&mm, size, start, end)) { 851 - pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n", 852 - mode->name, start, end, size); 915 + if (!assert_contiguous_in_range(test, &mm, size, start, end)) { 916 + KUNIT_FAIL(test, 917 + "%s: range [%llx, %llx] not full after initialisation, size=%llu\n", 918 + mode->name, start, end, size); 853 919 goto out; 854 920 } 855 921 ··· 857 925 u64 addr = nodes[n].start; 858 926 859 927 drm_mm_remove_node(&nodes[n]); 860 - if (!expect_insert_in_range(&mm, &nodes[n], 861 - size, size, n, 928 + if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n, 862 929 start, end, mode)) { 863 - pr_err("%s reinsert failed, step %d\n", mode->name, n); 930 + KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n); 864 931 goto out; 865 932 } 866 933 867 934 if (nodes[n].start != addr) { 868 - pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n", 869 - mode->name, n, addr, nodes[n].start); 935 + KUNIT_FAIL(test, 936 + "%s reinsert node moved, step %d, expected %llx, found %llx\n", 937 + mode->name, n, addr, nodes[n].start); 870 938 goto out; 871 939 } 872 940 } 873 941 874 - if (!assert_contiguous_in_range(&mm, size, start, end)) { 875 - pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n", 876 - mode->name, start, end, size); 942 + if (!assert_contiguous_in_range(test, &mm, size, start, end)) { 943 + KUNIT_FAIL(test, 944 + "%s: range [%llx, %llx] not full after reinsertion, size=%llu\n", 945 + mode->name, start, end, size); 877 946 goto out; 878 947 } 879 948 ··· 891 958 drm_mm_remove_node(node); 892 959 drm_mm_takedown(&mm); 893 960 vfree(nodes); 894 - err: 895 961 return ret; 896 962 } 897 963 898 - static int insert_outside_range(void) 964 + static int insert_outside_range(struct kunit *test) 899 965 { 900 966 struct drm_mm mm; 901 967 const unsigned int start = 1024; ··· 903 971 904 972 drm_mm_init(&mm, start, size); 905 973 906 - if (!expect_insert_in_range_fail(&mm, 1, 0, start)) 974 + if (!expect_insert_in_range_fail(test, &mm, 1, 0, start)) 907 975 return -EINVAL; 908 976 909 - if (!expect_insert_in_range_fail(&mm, size, 910 - start - size/2, start + (size+1)/2)) 977 + if (!expect_insert_in_range_fail(test, &mm, size, 978 + start - size / 2, start + (size + 1) / 2)) 911 979 return -EINVAL; 912 980 913 - if (!expect_insert_in_range_fail(&mm, size, 914 - end - (size+1)/2, end + size/2)) 981 + if (!expect_insert_in_range_fail(test, &mm, size, 982 + end - (size + 1) / 2, end + size / 2)) 915 983 return -EINVAL; 916 984 917 - if (!expect_insert_in_range_fail(&mm, 1, end, end + size)) 985 + if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size)) 918 986 return -EINVAL; 919 987 920 988 drm_mm_takedown(&mm); 921 989 return 0; 922 990 } 923 991 924 - static int igt_insert_range(void *ignored) 992 + static void igt_mm_insert_range(struct kunit *test) 925 993 { 926 994 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations); 927 995 unsigned int n; 928 - int ret; 929 996 930 997 /* Check that requests outside the bounds of drm_mm are rejected. */ 931 - ret = insert_outside_range(); 932 - if (ret) 933 - return ret; 998 + KUNIT_ASSERT_FALSE(test, insert_outside_range(test)); 934 999 935 1000 for_each_prime_number_from(n, 1, 50) { 936 1001 const u64 size = BIT_ULL(n); 937 1002 const u64 max = count * size; 938 1003 939 - ret = __igt_insert_range(count, size, 0, max); 940 - if (ret) 941 - return ret; 942 - 943 - ret = __igt_insert_range(count, size, 1, max); 944 - if (ret) 945 - return ret; 946 - 947 - ret = __igt_insert_range(count, size, 0, max - 1); 948 - if (ret) 949 - return ret; 950 - 951 - ret = __igt_insert_range(count, size, 0, max/2); 952 - if (ret) 953 - return ret; 954 - 955 - ret = __igt_insert_range(count, size, max/2, max); 956 - if (ret) 957 - return ret; 958 - 959 - ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1); 960 - if (ret) 961 - return ret; 1004 + KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, 0, max)); 1005 + KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, 1, max)); 1006 + KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, 0, max - 1)); 1007 + KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, 0, max / 2)); 1008 + KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, max / 2, max / 2)); 1009 + KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, 1010 + max / 4 + 1, 3 * max / 4 - 1)); 962 1011 963 1012 cond_resched(); 964 1013 } 965 - 966 - return 0; 967 1014 } 968 1015 969 - static int prepare_igt_frag(struct drm_mm *mm, 970 - struct drm_mm_node *nodes, 971 - unsigned int num_insert, 1016 + static int prepare_igt_frag(struct kunit *test, struct drm_mm *mm, 1017 + struct drm_mm_node *nodes, unsigned int num_insert, 972 1018 const struct insert_mode *mode) 973 1019 { 974 1020 unsigned int size = 4096; 975 1021 unsigned int i; 976 1022 977 1023 for (i = 0; i < num_insert; i++) { 978 - if (!expect_insert(mm, &nodes[i], size, 0, i, 979 - mode) != 0) { 980 - pr_err("%s insert failed\n", mode->name); 1024 + if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) { 1025 + KUNIT_FAIL(test, "%s insert failed\n", mode->name); 981 1026 return -EINVAL; 982 1027 } 983 1028 } ··· 966 1057 } 967 1058 968 1059 return 0; 969 - 970 1060 } 971 1061 972 - static u64 get_insert_time(struct drm_mm *mm, 973 - unsigned int num_insert, 974 - struct drm_mm_node *nodes, 1062 + static u64 get_insert_time(struct kunit *test, struct drm_mm *mm, 1063 + unsigned int num_insert, struct drm_mm_node *nodes, 975 1064 const struct insert_mode *mode) 976 1065 { 977 1066 unsigned int size = 8192; ··· 978 1071 979 1072 start = ktime_get(); 980 1073 for (i = 0; i < num_insert; i++) { 981 - if (!expect_insert(mm, &nodes[i], size, 0, i, mode) != 0) { 982 - pr_err("%s insert failed\n", mode->name); 1074 + if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) { 1075 + KUNIT_FAIL(test, "%s insert failed\n", mode->name); 983 1076 return 0; 984 1077 } 985 1078 } ··· 987 1080 return ktime_to_ns(ktime_sub(ktime_get(), start)); 988 1081 } 989 1082 990 - static int igt_frag(void *ignored) 1083 + static void igt_mm_frag(struct kunit *test) 991 1084 { 992 1085 struct drm_mm mm; 993 1086 const struct insert_mode *mode; 994 1087 struct drm_mm_node *nodes, *node, *next; 995 1088 unsigned int insert_size = 10000; 996 1089 unsigned int scale_factor = 4; 997 - int ret = -EINVAL; 998 1090 999 1091 /* We need 4 * insert_size nodes to hold intermediate allocated 1000 1092 * drm_mm nodes. 1001 - * 1 times for prepare_igt_frag() 1002 - * 1 times for get_insert_time() 1003 - * 2 times for get_insert_time() 1093 + * 1 times for prepare_igt_frag(struct kunit *test, ) 1094 + * 1 times for get_insert_time(struct kunit *test, ) 1095 + * 2 times for get_insert_time(struct kunit *test, ) 1004 1096 */ 1005 1097 nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes))); 1006 - if (!nodes) 1007 - return -ENOMEM; 1098 + KUNIT_ASSERT_TRUE(test, nodes); 1008 1099 1009 1100 /* For BOTTOMUP and TOPDOWN, we first fragment the 1010 - * address space using prepare_igt_frag() and then try to verify 1011 - * that that insertions scale quadratically from 10k to 20k insertions 1101 + * address space using prepare_igt_frag(struct kunit *test, ) and then try to verify 1102 + * that insertions scale quadratically from 10k to 20k insertions 1012 1103 */ 1013 1104 drm_mm_init(&mm, 1, U64_MAX - 2); 1014 1105 for (mode = insert_modes; mode->name; mode++) { ··· 1016 1111 mode->mode != DRM_MM_INSERT_HIGH) 1017 1112 continue; 1018 1113 1019 - ret = prepare_igt_frag(&mm, nodes, insert_size, mode); 1020 - if (ret) 1114 + if (prepare_igt_frag(test, &mm, nodes, insert_size, mode)) 1021 1115 goto err; 1022 1116 1023 - insert_time1 = get_insert_time(&mm, insert_size, 1117 + insert_time1 = get_insert_time(test, &mm, insert_size, 1024 1118 nodes + insert_size, mode); 1025 1119 if (insert_time1 == 0) 1026 1120 goto err; 1027 1121 1028 - insert_time2 = get_insert_time(&mm, (insert_size * 2), 1122 + insert_time2 = get_insert_time(test, &mm, (insert_size * 2), 1029 1123 nodes + insert_size * 2, mode); 1030 1124 if (insert_time2 == 0) 1031 1125 goto err; 1032 1126 1033 - pr_info("%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n", 1034 - mode->name, insert_size, insert_size * 2, 1035 - insert_time1, insert_time2); 1127 + kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n", 1128 + mode->name, insert_size, insert_size * 2, insert_time1, insert_time2); 1036 1129 1037 1130 if (insert_time2 > (scale_factor * insert_time1)) { 1038 - pr_err("%s fragmented insert took %llu nsecs more\n", 1039 - mode->name, 1040 - insert_time2 - (scale_factor * insert_time1)); 1131 + KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n", 1132 + mode->name, insert_time2 - (scale_factor * insert_time1)); 1041 1133 goto err; 1042 1134 } 1043 1135 ··· 1042 1140 drm_mm_remove_node(node); 1043 1141 } 1044 1142 1045 - ret = 0; 1046 1143 err: 1047 1144 drm_mm_for_each_node_safe(node, next, &mm) 1048 1145 drm_mm_remove_node(node); 1049 1146 drm_mm_takedown(&mm); 1050 1147 vfree(nodes); 1051 - 1052 - return ret; 1053 1148 } 1054 1149 1055 - static int igt_align(void *ignored) 1150 + static void igt_mm_align(struct kunit *test) 1056 1151 { 1057 1152 const struct insert_mode *mode; 1058 1153 const unsigned int max_count = min(8192u, max_prime); 1059 1154 struct drm_mm mm; 1060 1155 struct drm_mm_node *nodes, *node, *next; 1061 1156 unsigned int prime; 1062 - int ret = -EINVAL; 1063 1157 1064 1158 /* For each of the possible insertion modes, we pick a few 1065 1159 * arbitrary alignments and check that the inserted node ··· 1063 1165 */ 1064 1166 1065 1167 nodes = vzalloc(array_size(max_count, sizeof(*nodes))); 1066 - if (!nodes) 1067 - goto err; 1168 + KUNIT_ASSERT_TRUE(test, nodes); 1068 1169 1069 1170 drm_mm_init(&mm, 1, U64_MAX - 2); 1070 1171 ··· 1073 1176 for_each_prime_number_from(prime, 1, max_count) { 1074 1177 u64 size = next_prime_number(prime); 1075 1178 1076 - if (!expect_insert(&mm, &nodes[i], 1077 - size, prime, i, 1078 - mode)) { 1079 - pr_err("%s insert failed with alignment=%d", 1080 - mode->name, prime); 1179 + if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) { 1180 + KUNIT_FAIL(test, "%s insert failed with alignment=%d", 1181 + mode->name, prime); 1081 1182 goto out; 1082 1183 } 1083 1184 ··· 1089 1194 cond_resched(); 1090 1195 } 1091 1196 1092 - ret = 0; 1093 1197 out: 1094 1198 drm_mm_for_each_node_safe(node, next, &mm) 1095 1199 drm_mm_remove_node(node); 1096 1200 drm_mm_takedown(&mm); 1097 1201 vfree(nodes); 1098 - err: 1099 - return ret; 1100 1202 } 1101 1203 1102 - static int igt_align_pot(int max) 1204 + static void igt_align_pot(struct kunit *test, int max) 1103 1205 { 1104 1206 struct drm_mm mm; 1105 1207 struct drm_mm_node *node, *next; 1106 1208 int bit; 1107 - int ret = -EINVAL; 1108 1209 1109 1210 /* Check that we can align to the full u64 address space */ 1110 1211 ··· 1111 1220 1112 1221 node = kzalloc(sizeof(*node), GFP_KERNEL); 1113 1222 if (!node) { 1114 - ret = -ENOMEM; 1223 + KUNIT_FAIL(test, "failed to allocate node"); 1115 1224 goto out; 1116 1225 } 1117 1226 1118 1227 align = BIT_ULL(bit); 1119 - size = BIT_ULL(bit-1) + 1; 1120 - if (!expect_insert(&mm, node, 1121 - size, align, bit, 1122 - &insert_modes[0])) { 1123 - pr_err("insert failed with alignment=%llx [%d]", 1124 - align, bit); 1228 + size = BIT_ULL(bit - 1) + 1; 1229 + if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) { 1230 + KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit); 1125 1231 goto out; 1126 1232 } 1127 1233 1128 1234 cond_resched(); 1129 1235 } 1130 1236 1131 - ret = 0; 1132 1237 out: 1133 1238 drm_mm_for_each_node_safe(node, next, &mm) { 1134 1239 drm_mm_remove_node(node); 1135 1240 kfree(node); 1136 1241 } 1137 1242 drm_mm_takedown(&mm); 1138 - return ret; 1139 1243 } 1140 1244 1141 - static int igt_align32(void *ignored) 1245 + static void igt_mm_align32(struct kunit *test) 1142 1246 { 1143 - return igt_align_pot(32); 1247 + igt_align_pot(test, 32); 1144 1248 } 1145 1249 1146 - static int igt_align64(void *ignored) 1250 + static void igt_mm_align64(struct kunit *test) 1147 1251 { 1148 - return igt_align_pot(64); 1252 + igt_align_pot(test, 64); 1149 1253 } 1150 1254 1151 - static void show_scan(const struct drm_mm_scan *scan) 1255 + static void show_scan(struct kunit *test, const struct drm_mm_scan *scan) 1152 1256 { 1153 - pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n", 1154 - scan->hit_start, scan->hit_end, 1155 - scan->size, scan->alignment, scan->color); 1257 + kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n", 1258 + scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color); 1156 1259 } 1157 1260 1158 - static void show_holes(const struct drm_mm *mm, int count) 1261 + static void show_holes(struct kunit *test, const struct drm_mm *mm, int count) 1159 1262 { 1160 1263 u64 hole_start, hole_end; 1161 1264 struct drm_mm_node *hole; ··· 1159 1274 const char *node1 = NULL, *node2 = NULL; 1160 1275 1161 1276 if (drm_mm_node_allocated(hole)) 1162 - node1 = kasprintf(GFP_KERNEL, 1163 - "[%llx + %lld, color=%ld], ", 1277 + node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ", 1164 1278 hole->start, hole->size, hole->color); 1165 1279 1166 1280 if (drm_mm_node_allocated(next)) 1167 - node2 = kasprintf(GFP_KERNEL, 1168 - ", [%llx + %lld, color=%ld]", 1281 + node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]", 1169 1282 next->start, next->size, next->color); 1170 1283 1171 - pr_info("%sHole [%llx - %llx, size %lld]%s\n", 1172 - node1, 1173 - hole_start, hole_end, hole_end - hole_start, 1174 - node2); 1284 + kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1, 1285 + hole_start, hole_end, hole_end - hole_start, node2); 1175 1286 1176 1287 kfree(node2); 1177 1288 kfree(node1); ··· 1182 1301 struct list_head link; 1183 1302 }; 1184 1303 1185 - static bool evict_nodes(struct drm_mm_scan *scan, 1186 - struct evict_node *nodes, 1187 - unsigned int *order, 1188 - unsigned int count, 1189 - bool use_color, 1190 - struct list_head *evict_list) 1304 + static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan, 1305 + struct evict_node *nodes, unsigned int *order, unsigned int count, 1306 + bool use_color, struct list_head *evict_list) 1191 1307 { 1192 1308 struct evict_node *e, *en; 1193 1309 unsigned int i; ··· 1200 1322 list_del(&e->link); 1201 1323 } 1202 1324 if (list_empty(evict_list)) { 1203 - pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n", 1204 - scan->size, count, scan->alignment, scan->color); 1325 + KUNIT_FAIL(test, 1326 + "Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n", 1327 + scan->size, count, scan->alignment, scan->color); 1205 1328 return false; 1206 1329 } 1207 1330 ··· 1219 1340 } 1220 1341 } else { 1221 1342 if (drm_mm_scan_color_evict(scan)) { 1222 - pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n"); 1343 + KUNIT_FAIL(test, 1344 + "drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n"); 1223 1345 return false; 1224 1346 } 1225 1347 } ··· 1228 1348 return true; 1229 1349 } 1230 1350 1231 - static bool evict_nothing(struct drm_mm *mm, 1232 - unsigned int total_size, 1233 - struct evict_node *nodes) 1351 + static bool evict_nothing(struct kunit *test, struct drm_mm *mm, 1352 + unsigned int total_size, struct evict_node *nodes) 1234 1353 { 1235 1354 struct drm_mm_scan scan; 1236 1355 LIST_HEAD(evict_list); ··· 1250 1371 e = &nodes[n]; 1251 1372 1252 1373 if (!drm_mm_node_allocated(&e->node)) { 1253 - pr_err("node[%d] no longer allocated!\n", n); 1374 + KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n); 1254 1375 return false; 1255 1376 } 1256 1377 ··· 1266 1387 e = &nodes[n]; 1267 1388 1268 1389 if (!e->link.next) { 1269 - pr_err("node[%d] no longer connected!\n", n); 1390 + KUNIT_FAIL(test, "node[%d] no longer connected!\n", n); 1270 1391 return false; 1271 1392 } 1272 1393 } 1273 1394 1274 - return assert_continuous(mm, nodes[0].node.size); 1395 + return assert_continuous(test, mm, nodes[0].node.size); 1275 1396 } 1276 1397 1277 - static bool evict_everything(struct drm_mm *mm, 1278 - unsigned int total_size, 1279 - struct evict_node *nodes) 1398 + static bool evict_everything(struct kunit *test, struct drm_mm *mm, 1399 + unsigned int total_size, struct evict_node *nodes) 1280 1400 { 1281 1401 struct drm_mm_scan scan; 1282 1402 LIST_HEAD(evict_list); ··· 1295 1417 list_for_each_entry(e, &evict_list, link) { 1296 1418 if (!drm_mm_scan_remove_block(&scan, &e->node)) { 1297 1419 if (!err) { 1298 - pr_err("Node %lld not marked for eviction!\n", 1299 - e->node.start); 1420 + KUNIT_FAIL(test, "Node %lld not marked for eviction!\n", 1421 + e->node.start); 1300 1422 err = -EINVAL; 1301 1423 } 1302 1424 } ··· 1307 1429 list_for_each_entry(e, &evict_list, link) 1308 1430 drm_mm_remove_node(&e->node); 1309 1431 1310 - if (!assert_one_hole(mm, 0, total_size)) 1432 + if (!assert_one_hole(test, mm, 0, total_size)) 1311 1433 return false; 1312 1434 1313 1435 list_for_each_entry(e, &evict_list, link) { 1314 1436 err = drm_mm_reserve_node(mm, &e->node); 1315 1437 if (err) { 1316 - pr_err("Failed to reinsert node after eviction: start=%llx\n", 1317 - e->node.start); 1438 + KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n", 1439 + e->node.start); 1318 1440 return false; 1319 1441 } 1320 1442 } 1321 1443 1322 - return assert_continuous(mm, nodes[0].node.size); 1444 + return assert_continuous(test, mm, nodes[0].node.size); 1323 1445 } 1324 1446 1325 - static int evict_something(struct drm_mm *mm, 1326 - u64 range_start, u64 range_end, 1327 - struct evict_node *nodes, 1328 - unsigned int *order, 1329 - unsigned int count, 1330 - unsigned int size, 1331 - unsigned int alignment, 1332 - const struct insert_mode *mode) 1447 + static int evict_something(struct kunit *test, struct drm_mm *mm, 1448 + u64 range_start, u64 range_end, struct evict_node *nodes, 1449 + unsigned int *order, unsigned int count, unsigned int size, 1450 + unsigned int alignment, const struct insert_mode *mode) 1333 1451 { 1334 1452 struct drm_mm_scan scan; 1335 1453 LIST_HEAD(evict_list); ··· 1333 1459 struct drm_mm_node tmp; 1334 1460 int err; 1335 1461 1336 - drm_mm_scan_init_with_range(&scan, mm, 1337 - size, alignment, 0, 1338 - range_start, range_end, 1339 - mode->mode); 1340 - if (!evict_nodes(&scan, 1341 - nodes, order, count, false, 1342 - &evict_list)) 1462 + drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start, 1463 + range_end, mode->mode); 1464 + if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list)) 1343 1465 return -EINVAL; 1344 1466 1345 1467 memset(&tmp, 0, sizeof(tmp)); 1346 1468 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0, 1347 1469 DRM_MM_INSERT_EVICT); 1348 1470 if (err) { 1349 - pr_err("Failed to insert into eviction hole: size=%d, align=%d\n", 1350 - size, alignment); 1351 - show_scan(&scan); 1352 - show_holes(mm, 3); 1471 + KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n", 1472 + size, alignment); 1473 + show_scan(test, &scan); 1474 + show_holes(test, mm, 3); 1353 1475 return err; 1354 1476 } 1355 1477 1356 1478 if (tmp.start < range_start || tmp.start + tmp.size > range_end) { 1357 - pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n", 1358 - tmp.start, tmp.size, range_start, range_end); 1479 + KUNIT_FAIL(test, 1480 + "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n", 1481 + tmp.start, tmp.size, range_start, range_end); 1359 1482 err = -EINVAL; 1360 1483 } 1361 1484 1362 - if (!assert_node(&tmp, mm, size, alignment, 0) || 1485 + if (!assert_node(test, &tmp, mm, size, alignment, 0) || 1363 1486 drm_mm_hole_follows(&tmp)) { 1364 - pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n", 1365 - tmp.size, size, 1366 - alignment, misalignment(&tmp, alignment), 1367 - tmp.start, drm_mm_hole_follows(&tmp)); 1487 + KUNIT_FAIL(test, 1488 + "Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n", 1489 + tmp.size, size, alignment, misalignment(&tmp, alignment), 1490 + tmp.start, drm_mm_hole_follows(&tmp)); 1368 1491 err = -EINVAL; 1369 1492 } 1370 1493 ··· 1372 1501 list_for_each_entry(e, &evict_list, link) { 1373 1502 err = drm_mm_reserve_node(mm, &e->node); 1374 1503 if (err) { 1375 - pr_err("Failed to reinsert node after eviction: start=%llx\n", 1376 - e->node.start); 1504 + KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n", 1505 + e->node.start); 1377 1506 return err; 1378 1507 } 1379 1508 } 1380 1509 1381 - if (!assert_continuous(mm, nodes[0].node.size)) { 1382 - pr_err("range is no longer continuous\n"); 1510 + if (!assert_continuous(test, mm, nodes[0].node.size)) { 1511 + KUNIT_FAIL(test, "range is no longer continuous\n"); 1383 1512 return -EINVAL; 1384 1513 } 1385 1514 1386 1515 return 0; 1387 1516 } 1388 1517 1389 - static int igt_evict(void *ignored) 1518 + static void igt_mm_evict(struct kunit *test) 1390 1519 { 1391 1520 DRM_RND_STATE(prng, random_seed); 1392 1521 const unsigned int size = 8192; ··· 1395 1524 struct evict_node *nodes; 1396 1525 struct drm_mm_node *node, *next; 1397 1526 unsigned int *order, n; 1398 - int ret, err; 1399 1527 1400 1528 /* Here we populate a full drm_mm and then try and insert a new node 1401 1529 * by evicting other nodes in a random order. The drm_mm_scan should ··· 1403 1533 * sizes to try and stress the hole finder. 1404 1534 */ 1405 1535 1406 - ret = -ENOMEM; 1407 1536 nodes = vzalloc(array_size(size, sizeof(*nodes))); 1408 - if (!nodes) 1409 - goto err; 1537 + KUNIT_ASSERT_TRUE(test, nodes); 1410 1538 1411 1539 order = drm_random_order(size, &prng); 1412 1540 if (!order) 1413 1541 goto err_nodes; 1414 1542 1415 - ret = -EINVAL; 1416 1543 drm_mm_init(&mm, 0, size); 1417 1544 for (n = 0; n < size; n++) { 1418 - err = drm_mm_insert_node(&mm, &nodes[n].node, 1); 1419 - if (err) { 1420 - pr_err("insert failed, step %d\n", n); 1421 - ret = err; 1545 + if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) { 1546 + KUNIT_FAIL(test, "insert failed, step %d\n", n); 1422 1547 goto out; 1423 1548 } 1424 1549 } 1425 1550 1426 1551 /* First check that using the scanner doesn't break the mm */ 1427 - if (!evict_nothing(&mm, size, nodes)) { 1428 - pr_err("evict_nothing() failed\n"); 1552 + if (!evict_nothing(test, &mm, size, nodes)) { 1553 + KUNIT_FAIL(test, "evict_nothing() failed\n"); 1429 1554 goto out; 1430 1555 } 1431 - if (!evict_everything(&mm, size, nodes)) { 1432 - pr_err("evict_everything() failed\n"); 1556 + if (!evict_everything(test, &mm, size, nodes)) { 1557 + KUNIT_FAIL(test, "evict_everything() failed\n"); 1433 1558 goto out; 1434 1559 } 1435 1560 1436 1561 for (mode = evict_modes; mode->name; mode++) { 1437 1562 for (n = 1; n <= size; n <<= 1) { 1438 1563 drm_random_reorder(order, size, &prng); 1439 - err = evict_something(&mm, 0, U64_MAX, 1440 - nodes, order, size, 1441 - n, 1, 1442 - mode); 1443 - if (err) { 1444 - pr_err("%s evict_something(size=%u) failed\n", 1445 - mode->name, n); 1446 - ret = err; 1564 + if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1, 1565 + mode)) { 1566 + KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n", 1567 + mode->name, n); 1447 1568 goto out; 1448 1569 } 1449 1570 } 1450 1571 1451 1572 for (n = 1; n < size; n <<= 1) { 1452 1573 drm_random_reorder(order, size, &prng); 1453 - err = evict_something(&mm, 0, U64_MAX, 1454 - nodes, order, size, 1455 - size/2, n, 1456 - mode); 1457 - if (err) { 1458 - pr_err("%s evict_something(size=%u, alignment=%u) failed\n", 1459 - mode->name, size/2, n); 1460 - ret = err; 1574 + if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, 1575 + size / 2, n, mode)) { 1576 + KUNIT_FAIL(test, 1577 + "%s evict_something(size=%u, alignment=%u) failed\n", 1578 + mode->name, size / 2, n); 1461 1579 goto out; 1462 1580 } 1463 1581 } ··· 1456 1598 DRM_MM_BUG_ON(!nsize); 1457 1599 1458 1600 drm_random_reorder(order, size, &prng); 1459 - err = evict_something(&mm, 0, U64_MAX, 1460 - nodes, order, size, 1461 - nsize, n, 1462 - mode); 1463 - if (err) { 1464 - pr_err("%s evict_something(size=%u, alignment=%u) failed\n", 1465 - mode->name, nsize, n); 1466 - ret = err; 1601 + if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, 1602 + nsize, n, mode)) { 1603 + KUNIT_FAIL(test, 1604 + "%s evict_something(size=%u, alignment=%u) failed\n", 1605 + mode->name, nsize, n); 1467 1606 goto out; 1468 1607 } 1469 1608 } ··· 1468 1613 cond_resched(); 1469 1614 } 1470 1615 1471 - ret = 0; 1472 1616 out: 1473 1617 drm_mm_for_each_node_safe(node, next, &mm) 1474 1618 drm_mm_remove_node(node); ··· 1475 1621 kfree(order); 1476 1622 err_nodes: 1477 1623 vfree(nodes); 1478 - err: 1479 - return ret; 1480 1624 } 1481 1625 1482 - static int igt_evict_range(void *ignored) 1626 + static void igt_mm_evict_range(struct kunit *test) 1483 1627 { 1484 1628 DRM_RND_STATE(prng, random_seed); 1485 1629 const unsigned int size = 8192; ··· 1489 1637 struct evict_node *nodes; 1490 1638 struct drm_mm_node *node, *next; 1491 1639 unsigned int *order, n; 1492 - int ret, err; 1493 1640 1494 1641 /* Like igt_evict() but now we are limiting the search to a 1495 1642 * small portion of the full drm_mm. 1496 1643 */ 1497 1644 1498 - ret = -ENOMEM; 1499 1645 nodes = vzalloc(array_size(size, sizeof(*nodes))); 1500 - if (!nodes) 1501 - goto err; 1646 + KUNIT_ASSERT_TRUE(test, nodes); 1502 1647 1503 1648 order = drm_random_order(size, &prng); 1504 1649 if (!order) 1505 1650 goto err_nodes; 1506 1651 1507 - ret = -EINVAL; 1508 1652 drm_mm_init(&mm, 0, size); 1509 1653 for (n = 0; n < size; n++) { 1510 - err = drm_mm_insert_node(&mm, &nodes[n].node, 1); 1511 - if (err) { 1512 - pr_err("insert failed, step %d\n", n); 1513 - ret = err; 1654 + if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) { 1655 + KUNIT_FAIL(test, "insert failed, step %d\n", n); 1514 1656 goto out; 1515 1657 } 1516 1658 } ··· 1512 1666 for (mode = evict_modes; mode->name; mode++) { 1513 1667 for (n = 1; n <= range_size; n <<= 1) { 1514 1668 drm_random_reorder(order, size, &prng); 1515 - err = evict_something(&mm, range_start, range_end, 1516 - nodes, order, size, 1517 - n, 1, 1518 - mode); 1519 - if (err) { 1520 - pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n", 1521 - mode->name, n, range_start, range_end); 1669 + if (evict_something(test, &mm, range_start, range_end, nodes, 1670 + order, size, n, 1, mode)) { 1671 + KUNIT_FAIL(test, 1672 + "%s evict_something(size=%u) failed with range [%u, %u]\n", 1673 + mode->name, n, range_start, range_end); 1522 1674 goto out; 1523 1675 } 1524 1676 } 1525 1677 1526 1678 for (n = 1; n <= range_size; n <<= 1) { 1527 1679 drm_random_reorder(order, size, &prng); 1528 - err = evict_something(&mm, range_start, range_end, 1529 - nodes, order, size, 1530 - range_size/2, n, 1531 - mode); 1532 - if (err) { 1533 - pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n", 1534 - mode->name, range_size/2, n, range_start, range_end); 1680 + if (evict_something(test, &mm, range_start, range_end, nodes, 1681 + order, size, range_size / 2, n, mode)) { 1682 + KUNIT_FAIL(test, 1683 + "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n", 1684 + mode->name, range_size / 2, n, range_start, range_end); 1535 1685 goto out; 1536 1686 } 1537 1687 } ··· 1538 1696 DRM_MM_BUG_ON(!nsize); 1539 1697 1540 1698 drm_random_reorder(order, size, &prng); 1541 - err = evict_something(&mm, range_start, range_end, 1542 - nodes, order, size, 1543 - nsize, n, 1544 - mode); 1545 - if (err) { 1546 - pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n", 1547 - mode->name, nsize, n, range_start, range_end); 1699 + if (evict_something(test, &mm, range_start, range_end, nodes, 1700 + order, size, nsize, n, mode)) { 1701 + KUNIT_FAIL(test, 1702 + "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n", 1703 + mode->name, nsize, n, range_start, range_end); 1548 1704 goto out; 1549 1705 } 1550 1706 } ··· 1550 1710 cond_resched(); 1551 1711 } 1552 1712 1553 - ret = 0; 1554 1713 out: 1555 1714 drm_mm_for_each_node_safe(node, next, &mm) 1556 1715 drm_mm_remove_node(node); ··· 1557 1718 kfree(order); 1558 1719 err_nodes: 1559 1720 vfree(nodes); 1560 - err: 1561 - return ret; 1562 1721 } 1563 1722 1564 1723 static unsigned int node_index(const struct drm_mm_node *node) ··· 1564 1727 return div64_u64(node->start, node->size); 1565 1728 } 1566 1729 1567 - static int igt_topdown(void *ignored) 1730 + static void igt_mm_topdown(struct kunit *test) 1568 1731 { 1569 1732 const struct insert_mode *topdown = &insert_modes[TOPDOWN]; 1733 + 1570 1734 DRM_RND_STATE(prng, random_seed); 1571 1735 const unsigned int count = 8192; 1572 1736 unsigned int size; ··· 1575 1737 struct drm_mm mm; 1576 1738 struct drm_mm_node *nodes, *node, *next; 1577 1739 unsigned int *order, n, m, o = 0; 1578 - int ret; 1579 1740 1580 1741 /* When allocating top-down, we expect to be returned a node 1581 1742 * from a suitable hole at the top of the drm_mm. We check that 1582 1743 * the returned node does match the highest available slot. 1583 1744 */ 1584 1745 1585 - ret = -ENOMEM; 1586 1746 nodes = vzalloc(array_size(count, sizeof(*nodes))); 1587 - if (!nodes) 1588 - goto err; 1747 + KUNIT_ASSERT_TRUE(test, nodes); 1589 1748 1590 1749 bitmap = bitmap_zalloc(count, GFP_KERNEL); 1591 1750 if (!bitmap) ··· 1592 1757 if (!order) 1593 1758 goto err_bitmap; 1594 1759 1595 - ret = -EINVAL; 1596 1760 for (size = 1; size <= 64; size <<= 1) { 1597 - drm_mm_init(&mm, 0, size*count); 1761 + drm_mm_init(&mm, 0, size * count); 1598 1762 for (n = 0; n < count; n++) { 1599 - if (!expect_insert(&mm, &nodes[n], 1600 - size, 0, n, 1601 - topdown)) { 1602 - pr_err("insert failed, size %u step %d\n", size, n); 1763 + if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) { 1764 + KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n); 1603 1765 goto out; 1604 1766 } 1605 1767 1606 1768 if (drm_mm_hole_follows(&nodes[n])) { 1607 - pr_err("hole after topdown insert %d, start=%llx\n, size=%u", 1608 - n, nodes[n].start, size); 1769 + KUNIT_FAIL(test, 1770 + "hole after topdown insert %d, start=%llx\n, size=%u", 1771 + n, nodes[n].start, size); 1609 1772 goto out; 1610 1773 } 1611 1774 1612 - if (!assert_one_hole(&mm, 0, size*(count - n - 1))) 1775 + if (!assert_one_hole(test, &mm, 0, size * (count - n - 1))) 1613 1776 goto out; 1614 1777 } 1615 1778 1616 - if (!assert_continuous(&mm, size)) 1779 + if (!assert_continuous(test, &mm, size)) 1617 1780 goto out; 1618 1781 1619 1782 drm_random_reorder(order, count, &prng); ··· 1626 1793 unsigned int last; 1627 1794 1628 1795 node = &nodes[order[(o + m) % count]]; 1629 - if (!expect_insert(&mm, node, 1630 - size, 0, 0, 1631 - topdown)) { 1632 - pr_err("insert failed, step %d/%d\n", m, n); 1796 + if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) { 1797 + KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n); 1633 1798 goto out; 1634 1799 } 1635 1800 1636 1801 if (drm_mm_hole_follows(node)) { 1637 - pr_err("hole after topdown insert %d/%d, start=%llx\n", 1638 - m, n, node->start); 1802 + KUNIT_FAIL(test, 1803 + "hole after topdown insert %d/%d, start=%llx\n", 1804 + m, n, node->start); 1639 1805 goto out; 1640 1806 } 1641 1807 1642 1808 last = find_last_bit(bitmap, count); 1643 1809 if (node_index(node) != last) { 1644 - pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n", 1645 - m, n, size, last, node_index(node)); 1810 + KUNIT_FAIL(test, 1811 + "node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n", 1812 + m, n, size, last, node_index(node)); 1646 1813 goto out; 1647 1814 } 1648 1815 ··· 1660 1827 cond_resched(); 1661 1828 } 1662 1829 1663 - ret = 0; 1664 1830 out: 1665 1831 drm_mm_for_each_node_safe(node, next, &mm) 1666 1832 drm_mm_remove_node(node); ··· 1669 1837 bitmap_free(bitmap); 1670 1838 err_nodes: 1671 1839 vfree(nodes); 1672 - err: 1673 - return ret; 1674 1840 } 1675 1841 1676 - static int igt_bottomup(void *ignored) 1842 + static void igt_mm_bottomup(struct kunit *test) 1677 1843 { 1678 1844 const struct insert_mode *bottomup = &insert_modes[BOTTOMUP]; 1845 + 1679 1846 DRM_RND_STATE(prng, random_seed); 1680 1847 const unsigned int count = 8192; 1681 1848 unsigned int size; ··· 1682 1851 struct drm_mm mm; 1683 1852 struct drm_mm_node *nodes, *node, *next; 1684 1853 unsigned int *order, n, m, o = 0; 1685 - int ret; 1686 1854 1687 1855 /* Like igt_topdown, but instead of searching for the last hole, 1688 1856 * we search for the first. 1689 1857 */ 1690 1858 1691 - ret = -ENOMEM; 1692 1859 nodes = vzalloc(array_size(count, sizeof(*nodes))); 1693 - if (!nodes) 1694 - goto err; 1860 + KUNIT_ASSERT_TRUE(test, nodes); 1695 1861 1696 1862 bitmap = bitmap_zalloc(count, GFP_KERNEL); 1697 1863 if (!bitmap) ··· 1698 1870 if (!order) 1699 1871 goto err_bitmap; 1700 1872 1701 - ret = -EINVAL; 1702 1873 for (size = 1; size <= 64; size <<= 1) { 1703 - drm_mm_init(&mm, 0, size*count); 1874 + drm_mm_init(&mm, 0, size * count); 1704 1875 for (n = 0; n < count; n++) { 1705 - if (!expect_insert(&mm, &nodes[n], 1706 - size, 0, n, 1707 - bottomup)) { 1708 - pr_err("bottomup insert failed, size %u step %d\n", size, n); 1876 + if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) { 1877 + KUNIT_FAIL(test, 1878 + "bottomup insert failed, size %u step %d\n", size, n); 1709 1879 goto out; 1710 1880 } 1711 1881 1712 - if (!assert_one_hole(&mm, size*(n + 1), size*count)) 1882 + if (!assert_one_hole(test, &mm, size * (n + 1), size * count)) 1713 1883 goto out; 1714 1884 } 1715 1885 1716 - if (!assert_continuous(&mm, size)) 1886 + if (!assert_continuous(test, &mm, size)) 1717 1887 goto out; 1718 1888 1719 1889 drm_random_reorder(order, count, &prng); ··· 1726 1900 unsigned int first; 1727 1901 1728 1902 node = &nodes[order[(o + m) % count]]; 1729 - if (!expect_insert(&mm, node, 1730 - size, 0, 0, 1731 - bottomup)) { 1732 - pr_err("insert failed, step %d/%d\n", m, n); 1903 + if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) { 1904 + KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n); 1733 1905 goto out; 1734 1906 } 1735 1907 1736 1908 first = find_first_bit(bitmap, count); 1737 1909 if (node_index(node) != first) { 1738 - pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n", 1739 - m, n, first, node_index(node)); 1910 + KUNIT_FAIL(test, 1911 + "node %d/%d not inserted into bottom hole, expected %d, found %d\n", 1912 + m, n, first, node_index(node)); 1740 1913 goto out; 1741 1914 } 1742 1915 __clear_bit(first, bitmap); ··· 1752 1927 cond_resched(); 1753 1928 } 1754 1929 1755 - ret = 0; 1756 1930 out: 1757 1931 drm_mm_for_each_node_safe(node, next, &mm) 1758 1932 drm_mm_remove_node(node); ··· 1761 1937 bitmap_free(bitmap); 1762 1938 err_nodes: 1763 1939 vfree(nodes); 1764 - err: 1765 - return ret; 1766 1940 } 1767 1941 1768 - static int __igt_once(unsigned int mode) 1942 + static void __igt_once(struct kunit *test, unsigned int mode) 1769 1943 { 1770 1944 struct drm_mm mm; 1771 1945 struct drm_mm_node rsvd_lo, rsvd_hi, node; 1772 - int err; 1773 1946 1774 1947 drm_mm_init(&mm, 0, 7); 1775 1948 1776 1949 memset(&rsvd_lo, 0, sizeof(rsvd_lo)); 1777 1950 rsvd_lo.start = 1; 1778 1951 rsvd_lo.size = 1; 1779 - err = drm_mm_reserve_node(&mm, &rsvd_lo); 1780 - if (err) { 1781 - pr_err("Could not reserve low node\n"); 1952 + if (drm_mm_reserve_node(&mm, &rsvd_lo)) { 1953 + KUNIT_FAIL(test, "Could not reserve low node\n"); 1782 1954 goto err; 1783 1955 } 1784 1956 1785 1957 memset(&rsvd_hi, 0, sizeof(rsvd_hi)); 1786 1958 rsvd_hi.start = 5; 1787 1959 rsvd_hi.size = 1; 1788 - err = drm_mm_reserve_node(&mm, &rsvd_hi); 1789 - if (err) { 1790 - pr_err("Could not reserve low node\n"); 1960 + if (drm_mm_reserve_node(&mm, &rsvd_hi)) { 1961 + KUNIT_FAIL(test, "Could not reserve low node\n"); 1791 1962 goto err_lo; 1792 1963 } 1793 1964 1794 1965 if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) { 1795 - pr_err("Expected a hole after lo and high nodes!\n"); 1796 - err = -EINVAL; 1966 + KUNIT_FAIL(test, "Expected a hole after lo and high nodes!\n"); 1797 1967 goto err_hi; 1798 1968 } 1799 1969 1800 1970 memset(&node, 0, sizeof(node)); 1801 - err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode); 1802 - if (err) { 1803 - pr_err("Could not insert the node into the available hole!\n"); 1804 - err = -EINVAL; 1971 + if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) { 1972 + KUNIT_FAIL(test, "Could not insert the node into the available hole!\n"); 1805 1973 goto err_hi; 1806 1974 } 1807 1975 ··· 1804 1988 drm_mm_remove_node(&rsvd_lo); 1805 1989 err: 1806 1990 drm_mm_takedown(&mm); 1807 - return err; 1808 1991 } 1809 1992 1810 - static int igt_lowest(void *ignored) 1993 + static void igt_mm_lowest(struct kunit *test) 1811 1994 { 1812 - return __igt_once(DRM_MM_INSERT_LOW); 1995 + __igt_once(test, DRM_MM_INSERT_LOW); 1813 1996 } 1814 1997 1815 - static int igt_highest(void *ignored) 1998 + static void igt_mm_highest(struct kunit *test) 1816 1999 { 1817 - return __igt_once(DRM_MM_INSERT_HIGH); 2000 + __igt_once(test, DRM_MM_INSERT_HIGH); 1818 2001 } 1819 2002 1820 2003 static void separate_adjacent_colors(const struct drm_mm_node *node, 1821 - unsigned long color, 1822 - u64 *start, 1823 - u64 *end) 2004 + unsigned long color, u64 *start, u64 *end) 1824 2005 { 1825 2006 if (drm_mm_node_allocated(node) && node->color != color) 1826 2007 ++*start; ··· 1827 2014 --*end; 1828 2015 } 1829 2016 1830 - static bool colors_abutt(const struct drm_mm_node *node) 2017 + static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node) 1831 2018 { 1832 2019 if (!drm_mm_hole_follows(node) && 1833 2020 drm_mm_node_allocated(list_next_entry(node, node_list))) { 1834 - pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n", 1835 - node->color, node->start, node->size, 2021 + KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n", 2022 + node->color, node->start, node->size, 1836 2023 list_next_entry(node, node_list)->color, 1837 2024 list_next_entry(node, node_list)->start, 1838 2025 list_next_entry(node, node_list)->size); ··· 1842 2029 return false; 1843 2030 } 1844 2031 1845 - static int igt_color(void *ignored) 2032 + static void igt_mm_color(struct kunit *test) 1846 2033 { 1847 2034 const unsigned int count = min(4096u, max_iterations); 1848 2035 const struct insert_mode *mode; 1849 2036 struct drm_mm mm; 1850 2037 struct drm_mm_node *node, *nn; 1851 2038 unsigned int n; 1852 - int ret = -EINVAL, err; 1853 2039 1854 2040 /* Color adjustment complicates everything. First we just check 1855 2041 * that when we insert a node we apply any color_adjustment callback. ··· 1861 2049 1862 2050 for (n = 1; n <= count; n++) { 1863 2051 node = kzalloc(sizeof(*node), GFP_KERNEL); 1864 - if (!node) { 1865 - ret = -ENOMEM; 2052 + if (!node) 1866 2053 goto out; 1867 - } 1868 2054 1869 - if (!expect_insert(&mm, node, 1870 - n, 0, n, 1871 - &insert_modes[0])) { 1872 - pr_err("insert failed, step %d\n", n); 2055 + if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) { 2056 + KUNIT_FAIL(test, "insert failed, step %d\n", n); 1873 2057 kfree(node); 1874 2058 goto out; 1875 2059 } ··· 1873 2065 1874 2066 drm_mm_for_each_node_safe(node, nn, &mm) { 1875 2067 if (node->color != node->size) { 1876 - pr_err("invalid color stored: expected %lld, found %ld\n", 1877 - node->size, node->color); 2068 + KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n", 2069 + node->size, node->color); 1878 2070 1879 2071 goto out; 1880 2072 } ··· 1889 2081 u64 last; 1890 2082 1891 2083 node = kzalloc(sizeof(*node), GFP_KERNEL); 1892 - if (!node) { 1893 - ret = -ENOMEM; 2084 + if (!node) 1894 2085 goto out; 1895 - } 1896 2086 1897 - node->size = 1 + 2*count; 2087 + node->size = 1 + 2 * count; 1898 2088 node->color = node->size; 1899 2089 1900 - err = drm_mm_reserve_node(&mm, node); 1901 - if (err) { 1902 - pr_err("initial reserve failed!\n"); 1903 - ret = err; 2090 + if (drm_mm_reserve_node(&mm, node)) { 2091 + KUNIT_FAIL(test, "initial reserve failed!\n"); 1904 2092 goto out; 1905 2093 } 1906 2094 ··· 1906 2102 int rem; 1907 2103 1908 2104 node = kzalloc(sizeof(*node), GFP_KERNEL); 1909 - if (!node) { 1910 - ret = -ENOMEM; 2105 + if (!node) 1911 2106 goto out; 1912 - } 1913 2107 1914 2108 node->start = last; 1915 2109 node->size = n + count; 1916 2110 node->color = node->size; 1917 2111 1918 - err = drm_mm_reserve_node(&mm, node); 1919 - if (err != -ENOSPC) { 1920 - pr_err("reserve %d did not report color overlap! err=%d\n", 1921 - n, err); 2112 + if (drm_mm_reserve_node(&mm, node) != -ENOSPC) { 2113 + KUNIT_FAIL(test, "reserve %d did not report color overlap!", n); 1922 2114 goto out; 1923 2115 } 1924 2116 ··· 1922 2122 rem = misalignment(node, n + count); 1923 2123 node->start += n + count - rem; 1924 2124 1925 - err = drm_mm_reserve_node(&mm, node); 1926 - if (err) { 1927 - pr_err("reserve %d failed, err=%d\n", n, err); 1928 - ret = err; 2125 + if (drm_mm_reserve_node(&mm, node)) { 2126 + KUNIT_FAIL(test, "reserve %d failed", n); 1929 2127 goto out; 1930 2128 } 1931 2129 ··· 1932 2134 1933 2135 for (n = 1; n <= count; n++) { 1934 2136 node = kzalloc(sizeof(*node), GFP_KERNEL); 1935 - if (!node) { 1936 - ret = -ENOMEM; 2137 + if (!node) 1937 2138 goto out; 1938 - } 1939 2139 1940 - if (!expect_insert(&mm, node, 1941 - n, n, n, 1942 - mode)) { 1943 - pr_err("%s insert failed, step %d\n", 1944 - mode->name, n); 2140 + if (!expect_insert(test, &mm, node, n, n, n, mode)) { 2141 + KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n); 1945 2142 kfree(node); 1946 2143 goto out; 1947 2144 } ··· 1946 2153 u64 rem; 1947 2154 1948 2155 if (node->color != node->size) { 1949 - pr_err("%s invalid color stored: expected %lld, found %ld\n", 1950 - mode->name, node->size, node->color); 2156 + KUNIT_FAIL(test, 2157 + "%s invalid color stored: expected %lld, found %ld\n", 2158 + mode->name, node->size, node->color); 1951 2159 1952 2160 goto out; 1953 2161 } 1954 2162 1955 - if (colors_abutt(node)) 2163 + if (colors_abutt(test, node)) 1956 2164 goto out; 1957 2165 1958 2166 div64_u64_rem(node->start, node->size, &rem); 1959 2167 if (rem) { 1960 - pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n", 1961 - mode->name, node->start, node->size, rem); 2168 + KUNIT_FAIL(test, 2169 + "%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n", 2170 + mode->name, node->start, node->size, rem); 1962 2171 goto out; 1963 2172 } 1964 2173 ··· 1971 2176 cond_resched(); 1972 2177 } 1973 2178 1974 - ret = 0; 1975 2179 out: 1976 2180 drm_mm_for_each_node_safe(node, nn, &mm) { 1977 2181 drm_mm_remove_node(node); 1978 2182 kfree(node); 1979 2183 } 1980 2184 drm_mm_takedown(&mm); 1981 - return ret; 1982 2185 } 1983 2186 1984 - static int evict_color(struct drm_mm *mm, 1985 - u64 range_start, u64 range_end, 1986 - struct evict_node *nodes, 1987 - unsigned int *order, 1988 - unsigned int count, 1989 - unsigned int size, 1990 - unsigned int alignment, 1991 - unsigned long color, 1992 - const struct insert_mode *mode) 2187 + static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start, 2188 + u64 range_end, struct evict_node *nodes, unsigned int *order, 2189 + unsigned int count, unsigned int size, unsigned int alignment, 2190 + unsigned long color, const struct insert_mode *mode) 1993 2191 { 1994 2192 struct drm_mm_scan scan; 1995 2193 LIST_HEAD(evict_list); ··· 1990 2202 struct drm_mm_node tmp; 1991 2203 int err; 1992 2204 1993 - drm_mm_scan_init_with_range(&scan, mm, 1994 - size, alignment, color, 1995 - range_start, range_end, 1996 - mode->mode); 1997 - if (!evict_nodes(&scan, 1998 - nodes, order, count, true, 1999 - &evict_list)) 2205 + drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start, 2206 + range_end, mode->mode); 2207 + if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list)) 2000 2208 return -EINVAL; 2001 2209 2002 2210 memset(&tmp, 0, sizeof(tmp)); 2003 2211 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color, 2004 2212 DRM_MM_INSERT_EVICT); 2005 2213 if (err) { 2006 - pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n", 2007 - size, alignment, color, err); 2008 - show_scan(&scan); 2009 - show_holes(mm, 3); 2214 + KUNIT_FAIL(test, 2215 + "Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n", 2216 + size, alignment, color, err); 2217 + show_scan(test, &scan); 2218 + show_holes(test, mm, 3); 2010 2219 return err; 2011 2220 } 2012 2221 2013 2222 if (tmp.start < range_start || tmp.start + tmp.size > range_end) { 2014 - pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n", 2015 - tmp.start, tmp.size, range_start, range_end); 2223 + KUNIT_FAIL(test, 2224 + "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n", 2225 + tmp.start, tmp.size, range_start, range_end); 2016 2226 err = -EINVAL; 2017 2227 } 2018 2228 2019 - if (colors_abutt(&tmp)) 2229 + if (colors_abutt(test, &tmp)) 2020 2230 err = -EINVAL; 2021 2231 2022 - if (!assert_node(&tmp, mm, size, alignment, color)) { 2023 - pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n", 2024 - tmp.size, size, 2025 - alignment, misalignment(&tmp, alignment), tmp.start); 2232 + if (!assert_node(test, &tmp, mm, size, alignment, color)) { 2233 + KUNIT_FAIL(test, 2234 + "Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n", 2235 + tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start); 2026 2236 err = -EINVAL; 2027 2237 } 2028 2238 ··· 2031 2245 list_for_each_entry(e, &evict_list, link) { 2032 2246 err = drm_mm_reserve_node(mm, &e->node); 2033 2247 if (err) { 2034 - pr_err("Failed to reinsert node after eviction: start=%llx\n", 2035 - e->node.start); 2248 + KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n", 2249 + e->node.start); 2036 2250 return err; 2037 2251 } 2038 2252 } ··· 2041 2255 return 0; 2042 2256 } 2043 2257 2044 - static int igt_color_evict(void *ignored) 2258 + static void igt_mm_color_evict(struct kunit *test) 2045 2259 { 2046 2260 DRM_RND_STATE(prng, random_seed); 2047 2261 const unsigned int total_size = min(8192u, max_iterations); ··· 2051 2265 struct evict_node *nodes; 2052 2266 struct drm_mm_node *node, *next; 2053 2267 unsigned int *order, n; 2054 - int ret, err; 2055 2268 2056 2269 /* Check that the drm_mm_scan also honours color adjustment when 2057 2270 * choosing its victims to create a hole. Our color_adjust does not ··· 2058 2273 * enlarging the set of victims that must be evicted. 2059 2274 */ 2060 2275 2061 - ret = -ENOMEM; 2062 2276 nodes = vzalloc(array_size(total_size, sizeof(*nodes))); 2063 - if (!nodes) 2064 - goto err; 2277 + KUNIT_ASSERT_TRUE(test, nodes); 2065 2278 2066 2279 order = drm_random_order(total_size, &prng); 2067 2280 if (!order) 2068 2281 goto err_nodes; 2069 2282 2070 - ret = -EINVAL; 2071 - drm_mm_init(&mm, 0, 2*total_size - 1); 2283 + drm_mm_init(&mm, 0, 2 * total_size - 1); 2072 2284 mm.color_adjust = separate_adjacent_colors; 2073 2285 for (n = 0; n < total_size; n++) { 2074 - if (!expect_insert(&mm, &nodes[n].node, 2286 + if (!expect_insert(test, &mm, &nodes[n].node, 2075 2287 1, 0, color++, 2076 2288 &insert_modes[0])) { 2077 - pr_err("insert failed, step %d\n", n); 2289 + KUNIT_FAIL(test, "insert failed, step %d\n", n); 2078 2290 goto out; 2079 2291 } 2080 2292 } ··· 2079 2297 for (mode = evict_modes; mode->name; mode++) { 2080 2298 for (n = 1; n <= total_size; n <<= 1) { 2081 2299 drm_random_reorder(order, total_size, &prng); 2082 - err = evict_color(&mm, 0, U64_MAX, 2083 - nodes, order, total_size, 2084 - n, 1, color++, 2085 - mode); 2086 - if (err) { 2087 - pr_err("%s evict_color(size=%u) failed\n", 2088 - mode->name, n); 2300 + if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size, 2301 + n, 1, color++, mode)) { 2302 + KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n); 2089 2303 goto out; 2090 2304 } 2091 2305 } 2092 2306 2093 2307 for (n = 1; n < total_size; n <<= 1) { 2094 2308 drm_random_reorder(order, total_size, &prng); 2095 - err = evict_color(&mm, 0, U64_MAX, 2096 - nodes, order, total_size, 2097 - total_size/2, n, color++, 2098 - mode); 2099 - if (err) { 2100 - pr_err("%s evict_color(size=%u, alignment=%u) failed\n", 2101 - mode->name, total_size/2, n); 2309 + if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size, 2310 + total_size / 2, n, color++, mode)) { 2311 + KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n", 2312 + mode->name, total_size / 2, n); 2102 2313 goto out; 2103 2314 } 2104 2315 } ··· 2102 2327 DRM_MM_BUG_ON(!nsize); 2103 2328 2104 2329 drm_random_reorder(order, total_size, &prng); 2105 - err = evict_color(&mm, 0, U64_MAX, 2106 - nodes, order, total_size, 2107 - nsize, n, color++, 2108 - mode); 2109 - if (err) { 2110 - pr_err("%s evict_color(size=%u, alignment=%u) failed\n", 2111 - mode->name, nsize, n); 2330 + if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size, 2331 + nsize, n, color++, mode)) { 2332 + KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n", 2333 + mode->name, nsize, n); 2112 2334 goto out; 2113 2335 } 2114 2336 } ··· 2113 2341 cond_resched(); 2114 2342 } 2115 2343 2116 - ret = 0; 2117 2344 out: 2118 - if (ret) 2119 - show_mm(&mm); 2120 2345 drm_mm_for_each_node_safe(node, next, &mm) 2121 2346 drm_mm_remove_node(node); 2122 2347 drm_mm_takedown(&mm); 2123 2348 kfree(order); 2124 2349 err_nodes: 2125 2350 vfree(nodes); 2126 - err: 2127 - return ret; 2128 2351 } 2129 2352 2130 - static int igt_color_evict_range(void *ignored) 2353 + static void igt_mm_color_evict_range(struct kunit *test) 2131 2354 { 2132 2355 DRM_RND_STATE(prng, random_seed); 2133 2356 const unsigned int total_size = 8192; ··· 2135 2368 struct evict_node *nodes; 2136 2369 struct drm_mm_node *node, *next; 2137 2370 unsigned int *order, n; 2138 - int ret, err; 2139 2371 2140 2372 /* Like igt_color_evict(), but limited to small portion of the full 2141 2373 * drm_mm range. 2142 2374 */ 2143 2375 2144 - ret = -ENOMEM; 2145 2376 nodes = vzalloc(array_size(total_size, sizeof(*nodes))); 2146 - if (!nodes) 2147 - goto err; 2377 + KUNIT_ASSERT_TRUE(test, nodes); 2148 2378 2149 2379 order = drm_random_order(total_size, &prng); 2150 2380 if (!order) 2151 2381 goto err_nodes; 2152 2382 2153 - ret = -EINVAL; 2154 - drm_mm_init(&mm, 0, 2*total_size - 1); 2383 + drm_mm_init(&mm, 0, 2 * total_size - 1); 2155 2384 mm.color_adjust = separate_adjacent_colors; 2156 2385 for (n = 0; n < total_size; n++) { 2157 - if (!expect_insert(&mm, &nodes[n].node, 2386 + if (!expect_insert(test, &mm, &nodes[n].node, 2158 2387 1, 0, color++, 2159 2388 &insert_modes[0])) { 2160 - pr_err("insert failed, step %d\n", n); 2389 + KUNIT_FAIL(test, "insert failed, step %d\n", n); 2161 2390 goto out; 2162 2391 } 2163 2392 } ··· 2161 2398 for (mode = evict_modes; mode->name; mode++) { 2162 2399 for (n = 1; n <= range_size; n <<= 1) { 2163 2400 drm_random_reorder(order, range_size, &prng); 2164 - err = evict_color(&mm, range_start, range_end, 2165 - nodes, order, total_size, 2166 - n, 1, color++, 2167 - mode); 2168 - if (err) { 2169 - pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n", 2170 - mode->name, n, range_start, range_end); 2401 + if (evict_color(test, &mm, range_start, range_end, nodes, order, 2402 + total_size, n, 1, color++, mode)) { 2403 + KUNIT_FAIL(test, 2404 + "%s evict_color(size=%u) failed for range [%x, %x]\n", 2405 + mode->name, n, range_start, range_end); 2171 2406 goto out; 2172 2407 } 2173 2408 } 2174 2409 2175 2410 for (n = 1; n < range_size; n <<= 1) { 2176 2411 drm_random_reorder(order, total_size, &prng); 2177 - err = evict_color(&mm, range_start, range_end, 2178 - nodes, order, total_size, 2179 - range_size/2, n, color++, 2180 - mode); 2181 - if (err) { 2182 - pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n", 2183 - mode->name, total_size/2, n, range_start, range_end); 2412 + if (evict_color(test, &mm, range_start, range_end, nodes, order, 2413 + total_size, range_size / 2, n, color++, mode)) { 2414 + KUNIT_FAIL(test, 2415 + "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n", 2416 + mode->name, total_size / 2, n, range_start, range_end); 2184 2417 goto out; 2185 2418 } 2186 2419 } ··· 2187 2428 DRM_MM_BUG_ON(!nsize); 2188 2429 2189 2430 drm_random_reorder(order, total_size, &prng); 2190 - err = evict_color(&mm, range_start, range_end, 2191 - nodes, order, total_size, 2192 - nsize, n, color++, 2193 - mode); 2194 - if (err) { 2195 - pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n", 2196 - mode->name, nsize, n, range_start, range_end); 2431 + if (evict_color(test, &mm, range_start, range_end, nodes, order, 2432 + total_size, nsize, n, color++, mode)) { 2433 + KUNIT_FAIL(test, 2434 + "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n", 2435 + mode->name, nsize, n, range_start, range_end); 2197 2436 goto out; 2198 2437 } 2199 2438 } ··· 2199 2442 cond_resched(); 2200 2443 } 2201 2444 2202 - ret = 0; 2203 2445 out: 2204 - if (ret) 2205 - show_mm(&mm); 2206 2446 drm_mm_for_each_node_safe(node, next, &mm) 2207 2447 drm_mm_remove_node(node); 2208 2448 drm_mm_takedown(&mm); 2209 2449 kfree(order); 2210 2450 err_nodes: 2211 2451 vfree(nodes); 2212 - err: 2213 - return ret; 2214 2452 } 2215 2453 2216 - #include "drm_selftest.c" 2217 - 2218 - static int __init test_drm_mm_init(void) 2454 + static int drm_mm_init_test(struct kunit *test) 2219 2455 { 2220 - int err; 2221 - 2222 2456 while (!random_seed) 2223 2457 random_seed = get_random_int(); 2224 2458 2225 - pr_info("Testing DRM range manager (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n", 2226 - random_seed, max_iterations, max_prime); 2227 - err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL); 2228 - 2229 - return err > 0 ? 0 : err; 2459 + return 0; 2230 2460 } 2231 - 2232 - static void __exit test_drm_mm_exit(void) 2233 - { 2234 - } 2235 - 2236 - module_init(test_drm_mm_init); 2237 - module_exit(test_drm_mm_exit); 2238 2461 2239 2462 module_param(random_seed, uint, 0400); 2240 2463 module_param(max_iterations, uint, 0400); 2241 2464 module_param(max_prime, uint, 0400); 2465 + 2466 + static struct kunit_case drm_mm_tests[] = { 2467 + KUNIT_CASE(igt_mm_init), 2468 + KUNIT_CASE(igt_mm_debug), 2469 + KUNIT_CASE(igt_mm_reserve), 2470 + KUNIT_CASE(igt_mm_insert), 2471 + KUNIT_CASE(igt_mm_replace), 2472 + KUNIT_CASE(igt_mm_insert_range), 2473 + KUNIT_CASE(igt_mm_frag), 2474 + KUNIT_CASE(igt_mm_align), 2475 + KUNIT_CASE(igt_mm_align32), 2476 + KUNIT_CASE(igt_mm_align64), 2477 + KUNIT_CASE(igt_mm_evict), 2478 + KUNIT_CASE(igt_mm_evict_range), 2479 + KUNIT_CASE(igt_mm_topdown), 2480 + KUNIT_CASE(igt_mm_bottomup), 2481 + KUNIT_CASE(igt_mm_lowest), 2482 + KUNIT_CASE(igt_mm_highest), 2483 + KUNIT_CASE(igt_mm_color), 2484 + KUNIT_CASE(igt_mm_color_evict), 2485 + KUNIT_CASE(igt_mm_color_evict_range), 2486 + {} 2487 + }; 2488 + 2489 + static struct kunit_suite drm_mm_test_suite = { 2490 + .name = "drm_mm", 2491 + .init = drm_mm_init_test, 2492 + .test_cases = drm_mm_tests, 2493 + }; 2494 + 2495 + kunit_test_suite(drm_mm_test_suite); 2242 2496 2243 2497 MODULE_AUTHOR("Intel Corporation"); 2244 2498 MODULE_LICENSE("GPL");
+1 -1
drivers/gpu/drm/tests/Makefile
··· 2 2 3 3 obj-$(CONFIG_DRM_KUNIT_TEST) += drm_format_helper_test.o drm_damage_helper_test.o \ 4 4 drm_cmdline_parser_test.o drm_rect_test.o drm_format_test.o drm_plane_helper_test.o \ 5 - drm_dp_mst_helper_test.o drm_framebuffer_test.o drm_buddy_test.o 5 + drm_dp_mst_helper_test.o drm_framebuffer_test.o drm_buddy_test.o drm_mm_test.o