Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KUnit test of ext4 multiblocks allocation.
4 */
5
6#include <kunit/test.h>
7#include <kunit/static_stub.h>
8#include <linux/random.h>
9
10#include "ext4.h"
11
12struct mbt_grp_ctx {
13 struct buffer_head bitmap_bh;
14 /* desc and gd_bh are just the place holders for now */
15 struct ext4_group_desc desc;
16 struct buffer_head gd_bh;
17};
18
19struct mbt_ctx {
20 struct mbt_grp_ctx *grp_ctx;
21};
22
23struct mbt_ext4_super_block {
24 struct ext4_super_block es;
25 struct ext4_sb_info sbi;
26 struct mbt_ctx mbt_ctx;
27};
28
29#define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
30#define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
31#define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
32
33static struct inode *mbt_alloc_inode(struct super_block *sb)
34{
35 struct ext4_inode_info *ei;
36
37 ei = kmalloc(sizeof(struct ext4_inode_info), GFP_KERNEL);
38 if (!ei)
39 return NULL;
40
41 INIT_LIST_HEAD(&ei->i_orphan);
42 init_rwsem(&ei->xattr_sem);
43 init_rwsem(&ei->i_data_sem);
44 inode_init_once(&ei->vfs_inode);
45 ext4_fc_init_inode(&ei->vfs_inode);
46
47 return &ei->vfs_inode;
48}
49
50static void mbt_free_inode(struct inode *inode)
51{
52 kfree(EXT4_I(inode));
53}
54
55static const struct super_operations mbt_sops = {
56 .alloc_inode = mbt_alloc_inode,
57 .free_inode = mbt_free_inode,
58};
59
60static void mbt_kill_sb(struct super_block *sb)
61{
62 generic_shutdown_super(sb);
63}
64
65static struct file_system_type mbt_fs_type = {
66 .name = "mballoc test",
67 .kill_sb = mbt_kill_sb,
68};
69
70static int mbt_mb_init(struct super_block *sb)
71{
72 ext4_fsblk_t block;
73 int ret;
74
75 /* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
76 sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL);
77 if (sb->s_bdev == NULL)
78 return -ENOMEM;
79
80 sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL);
81 if (sb->s_bdev->bd_queue == NULL) {
82 kfree(sb->s_bdev);
83 return -ENOMEM;
84 }
85
86 /*
87 * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
88 * new_inode(sb);
89 */
90 INIT_LIST_HEAD(&sb->s_inodes);
91 sb->s_op = &mbt_sops;
92
93 ret = ext4_mb_init(sb);
94 if (ret != 0)
95 goto err_out;
96
97 block = ext4_count_free_clusters(sb);
98 ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
99 GFP_KERNEL);
100 if (ret != 0)
101 goto err_mb_release;
102
103 ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
104 GFP_KERNEL);
105 if (ret != 0)
106 goto err_freeclusters;
107
108 return 0;
109
110err_freeclusters:
111 percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
112err_mb_release:
113 ext4_mb_release(sb);
114err_out:
115 kfree(sb->s_bdev->bd_queue);
116 kfree(sb->s_bdev);
117 return ret;
118}
119
120static void mbt_mb_release(struct super_block *sb)
121{
122 percpu_counter_destroy(&EXT4_SB(sb)->s_dirtyclusters_counter);
123 percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
124 ext4_mb_release(sb);
125 kfree(sb->s_bdev->bd_queue);
126 kfree(sb->s_bdev);
127}
128
129static int mbt_set(struct super_block *sb, void *data)
130{
131 return 0;
132}
133
134static struct super_block *mbt_ext4_alloc_super_block(void)
135{
136 struct mbt_ext4_super_block *fsb;
137 struct super_block *sb;
138 struct ext4_sb_info *sbi;
139
140 fsb = kzalloc(sizeof(*fsb), GFP_KERNEL);
141 if (fsb == NULL)
142 return NULL;
143
144 sb = sget(&mbt_fs_type, NULL, mbt_set, 0, NULL);
145 if (IS_ERR(sb))
146 goto out;
147
148 sbi = &fsb->sbi;
149
150 sbi->s_blockgroup_lock =
151 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
152 if (!sbi->s_blockgroup_lock)
153 goto out_deactivate;
154
155 bgl_lock_init(sbi->s_blockgroup_lock);
156
157 sbi->s_es = &fsb->es;
158 sbi->s_sb = sb;
159 sb->s_fs_info = sbi;
160
161 up_write(&sb->s_umount);
162 return sb;
163
164out_deactivate:
165 deactivate_locked_super(sb);
166out:
167 kfree(fsb);
168 return NULL;
169}
170
171static void mbt_ext4_free_super_block(struct super_block *sb)
172{
173 struct mbt_ext4_super_block *fsb = MBT_SB(sb);
174 struct ext4_sb_info *sbi = EXT4_SB(sb);
175
176 kfree(sbi->s_blockgroup_lock);
177 deactivate_super(sb);
178 kfree(fsb);
179}
180
181struct mbt_ext4_block_layout {
182 unsigned char blocksize_bits;
183 unsigned int cluster_bits;
184 uint32_t blocks_per_group;
185 ext4_group_t group_count;
186 uint16_t desc_size;
187};
188
189static void mbt_init_sb_layout(struct super_block *sb,
190 struct mbt_ext4_block_layout *layout)
191{
192 struct ext4_sb_info *sbi = EXT4_SB(sb);
193 struct ext4_super_block *es = sbi->s_es;
194
195 sb->s_blocksize = 1UL << layout->blocksize_bits;
196 sb->s_blocksize_bits = layout->blocksize_bits;
197
198 sbi->s_groups_count = layout->group_count;
199 sbi->s_blocks_per_group = layout->blocks_per_group;
200 sbi->s_cluster_bits = layout->cluster_bits;
201 sbi->s_cluster_ratio = 1U << layout->cluster_bits;
202 sbi->s_clusters_per_group = layout->blocks_per_group >>
203 layout->cluster_bits;
204 sbi->s_desc_size = layout->desc_size;
205 sbi->s_desc_per_block_bits =
206 sb->s_blocksize_bits - (fls(layout->desc_size) - 1);
207 sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits;
208
209 es->s_first_data_block = cpu_to_le32(0);
210 es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group *
211 layout->group_count);
212}
213
214static int mbt_grp_ctx_init(struct super_block *sb,
215 struct mbt_grp_ctx *grp_ctx)
216{
217 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
218
219 grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
220 if (grp_ctx->bitmap_bh.b_data == NULL)
221 return -ENOMEM;
222 mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max);
223 ext4_free_group_clusters_set(sb, &grp_ctx->desc, max);
224
225 return 0;
226}
227
228static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx)
229{
230 kfree(grp_ctx->bitmap_bh.b_data);
231 grp_ctx->bitmap_bh.b_data = NULL;
232}
233
234static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
235 unsigned int start, unsigned int len)
236{
237 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
238
239 mb_set_bits(grp_ctx->bitmap_bh.b_data, start, len);
240}
241
242static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
243{
244 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
245
246 return grp_ctx->bitmap_bh.b_data;
247}
248
249/* called after mbt_init_sb_layout */
250static int mbt_ctx_init(struct super_block *sb)
251{
252 struct mbt_ctx *ctx = MBT_CTX(sb);
253 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
254
255 ctx->grp_ctx = kcalloc(ngroups, sizeof(struct mbt_grp_ctx),
256 GFP_KERNEL);
257 if (ctx->grp_ctx == NULL)
258 return -ENOMEM;
259
260 for (i = 0; i < ngroups; i++)
261 if (mbt_grp_ctx_init(sb, &ctx->grp_ctx[i]))
262 goto out;
263
264 /*
265 * first data block(first cluster in first group) is used by
266 * metadata, mark it used to avoid to alloc data block at first
267 * block which will fail ext4_sb_block_valid check.
268 */
269 mb_set_bits(ctx->grp_ctx[0].bitmap_bh.b_data, 0, 1);
270 ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc,
271 EXT4_CLUSTERS_PER_GROUP(sb) - 1);
272
273 return 0;
274out:
275 while (i-- > 0)
276 mbt_grp_ctx_release(&ctx->grp_ctx[i]);
277 kfree(ctx->grp_ctx);
278 return -ENOMEM;
279}
280
281static void mbt_ctx_release(struct super_block *sb)
282{
283 struct mbt_ctx *ctx = MBT_CTX(sb);
284 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
285
286 for (i = 0; i < ngroups; i++)
287 mbt_grp_ctx_release(&ctx->grp_ctx[i]);
288 kfree(ctx->grp_ctx);
289}
290
291static struct buffer_head *
292ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group,
293 bool ignore_locked)
294{
295 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
296
297 /* paired with brelse from caller of ext4_read_block_bitmap_nowait */
298 get_bh(&grp_ctx->bitmap_bh);
299 return &grp_ctx->bitmap_bh;
300}
301
302static int ext4_wait_block_bitmap_stub(struct super_block *sb,
303 ext4_group_t block_group,
304 struct buffer_head *bh)
305{
306 /*
307 * real ext4_wait_block_bitmap will set these flags and
308 * functions like ext4_mb_init_cache will verify the flags.
309 */
310 set_buffer_uptodate(bh);
311 set_bitmap_uptodate(bh);
312 set_buffer_verified(bh);
313 return 0;
314}
315
316static struct ext4_group_desc *
317ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group,
318 struct buffer_head **bh)
319{
320 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
321
322 if (bh != NULL)
323 *bh = &grp_ctx->gd_bh;
324
325 return &grp_ctx->desc;
326}
327
328static int
329ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
330 ext4_group_t group, ext4_grpblk_t blkoff,
331 ext4_grpblk_t len, int flags,
332 ext4_grpblk_t *ret_changed)
333{
334 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
335 struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh;
336
337 if (state)
338 mb_set_bits(bitmap_bh->b_data, blkoff, len);
339 else
340 mb_clear_bits(bitmap_bh->b_data, blkoff, len);
341
342 return 0;
343}
344
345#define TEST_GOAL_GROUP 1
346static int mbt_kunit_init(struct kunit *test)
347{
348 struct mbt_ext4_block_layout *layout =
349 (struct mbt_ext4_block_layout *)(test->param_value);
350 struct super_block *sb;
351 int ret;
352
353 sb = mbt_ext4_alloc_super_block();
354 if (sb == NULL)
355 return -ENOMEM;
356
357 mbt_init_sb_layout(sb, layout);
358
359 ret = mbt_ctx_init(sb);
360 if (ret != 0) {
361 mbt_ext4_free_super_block(sb);
362 return ret;
363 }
364
365 test->priv = sb;
366 kunit_activate_static_stub(test,
367 ext4_read_block_bitmap_nowait,
368 ext4_read_block_bitmap_nowait_stub);
369 kunit_activate_static_stub(test,
370 ext4_wait_block_bitmap,
371 ext4_wait_block_bitmap_stub);
372 kunit_activate_static_stub(test,
373 ext4_get_group_desc,
374 ext4_get_group_desc_stub);
375 kunit_activate_static_stub(test,
376 ext4_mb_mark_context,
377 ext4_mb_mark_context_stub);
378
379 /* stub function will be called in mbt_mb_init->ext4_mb_init */
380 if (mbt_mb_init(sb) != 0) {
381 mbt_ctx_release(sb);
382 mbt_ext4_free_super_block(sb);
383 return -ENOMEM;
384 }
385
386 return 0;
387}
388
389static void mbt_kunit_exit(struct kunit *test)
390{
391 struct super_block *sb = (struct super_block *)test->priv;
392
393 mbt_mb_release(sb);
394 mbt_ctx_release(sb);
395 mbt_ext4_free_super_block(sb);
396}
397
398static void test_new_blocks_simple(struct kunit *test)
399{
400 struct super_block *sb = (struct super_block *)test->priv;
401 struct inode *inode;
402 struct ext4_allocation_request ar;
403 ext4_group_t i, goal_group = TEST_GOAL_GROUP;
404 int err = 0;
405 ext4_fsblk_t found;
406 struct ext4_sb_info *sbi = EXT4_SB(sb);
407
408 inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
409 if (!inode)
410 return;
411
412 inode->i_sb = sb;
413 ar.inode = inode;
414
415 /* get block at goal */
416 ar.goal = ext4_group_first_block_no(sb, goal_group);
417 found = ext4_mb_new_blocks_simple(&ar, &err);
418 KUNIT_ASSERT_EQ_MSG(test, ar.goal, found,
419 "failed to alloc block at goal, expected %llu found %llu",
420 ar.goal, found);
421
422 /* get block after goal in goal group */
423 ar.goal = ext4_group_first_block_no(sb, goal_group);
424 found = ext4_mb_new_blocks_simple(&ar, &err);
425 KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
426 "failed to alloc block after goal in goal group, expected %llu found %llu",
427 ar.goal + 1, found);
428
429 /* get block after goal group */
430 mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
431 ar.goal = ext4_group_first_block_no(sb, goal_group);
432 found = ext4_mb_new_blocks_simple(&ar, &err);
433 KUNIT_ASSERT_EQ_MSG(test,
434 ext4_group_first_block_no(sb, goal_group + 1), found,
435 "failed to alloc block after goal group, expected %llu found %llu",
436 ext4_group_first_block_no(sb, goal_group + 1), found);
437
438 /* get block before goal group */
439 for (i = goal_group; i < ext4_get_groups_count(sb); i++)
440 mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
441 ar.goal = ext4_group_first_block_no(sb, goal_group);
442 found = ext4_mb_new_blocks_simple(&ar, &err);
443 KUNIT_ASSERT_EQ_MSG(test,
444 ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
445 "failed to alloc block before goal group, expected %llu found %llu",
446 ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
447
448 /* no block available, fail to allocate block */
449 for (i = 0; i < ext4_get_groups_count(sb); i++)
450 mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
451 ar.goal = ext4_group_first_block_no(sb, goal_group);
452 found = ext4_mb_new_blocks_simple(&ar, &err);
453 KUNIT_ASSERT_NE_MSG(test, err, 0,
454 "unexpectedly get block when no block is available");
455}
456
457#define TEST_RANGE_COUNT 8
458
459struct test_range {
460 ext4_grpblk_t start;
461 ext4_grpblk_t len;
462};
463
464static void
465mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
466 int count)
467{
468 ext4_grpblk_t start, len, max;
469 int i;
470
471 max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
472 for (i = 0; i < count; i++) {
473 start = get_random_u32() % max;
474 len = get_random_u32() % max;
475 len = min(len, max - start);
476
477 ranges[i].start = start + i * max;
478 ranges[i].len = len;
479 }
480}
481
482static void
483validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
484 ext4_group_t goal_group, ext4_grpblk_t start,
485 ext4_grpblk_t len)
486{
487 void *bitmap;
488 ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
489 ext4_group_t i;
490
491 for (i = 0; i < ext4_get_groups_count(sb); i++) {
492 if (i == goal_group)
493 continue;
494
495 bitmap = mbt_ctx_bitmap(sb, i);
496 bit = mb_find_next_zero_bit(bitmap, max, 0);
497 KUNIT_ASSERT_EQ_MSG(test, bit, max,
498 "free block on unexpected group %d", i);
499 }
500
501 bitmap = mbt_ctx_bitmap(sb, goal_group);
502 bit = mb_find_next_zero_bit(bitmap, max, 0);
503 KUNIT_ASSERT_EQ(test, bit, start);
504
505 bit = mb_find_next_bit(bitmap, max, bit + 1);
506 KUNIT_ASSERT_EQ(test, bit, start + len);
507}
508
509static void
510test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
511 ext4_grpblk_t start, ext4_grpblk_t len)
512{
513 struct super_block *sb = (struct super_block *)test->priv;
514 struct ext4_sb_info *sbi = EXT4_SB(sb);
515 struct inode *inode;
516 ext4_fsblk_t block;
517
518 inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
519 if (!inode)
520 return;
521 inode->i_sb = sb;
522
523 if (len == 0)
524 return;
525
526 block = ext4_group_first_block_no(sb, goal_group) +
527 EXT4_C2B(sbi, start);
528 ext4_free_blocks_simple(inode, block, len);
529 validate_free_blocks_simple(test, sb, goal_group, start, len);
530 mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
531}
532
533static void test_free_blocks_simple(struct kunit *test)
534{
535 struct super_block *sb = (struct super_block *)test->priv;
536 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
537 ext4_group_t i;
538 struct test_range ranges[TEST_RANGE_COUNT];
539
540 for (i = 0; i < ext4_get_groups_count(sb); i++)
541 mbt_ctx_mark_used(sb, i, 0, max);
542
543 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
544 for (i = 0; i < TEST_RANGE_COUNT; i++)
545 test_free_blocks_simple_range(test, TEST_GOAL_GROUP,
546 ranges[i].start, ranges[i].len);
547}
548
549static void
550test_mark_diskspace_used_range(struct kunit *test,
551 struct ext4_allocation_context *ac,
552 ext4_grpblk_t start,
553 ext4_grpblk_t len)
554{
555 struct super_block *sb = (struct super_block *)test->priv;
556 int ret;
557 void *bitmap;
558 ext4_grpblk_t i, max;
559
560 /* ext4_mb_mark_diskspace_used will BUG if len is 0 */
561 if (len == 0)
562 return;
563
564 ac->ac_b_ex.fe_group = TEST_GOAL_GROUP;
565 ac->ac_b_ex.fe_start = start;
566 ac->ac_b_ex.fe_len = len;
567
568 bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
569 memset(bitmap, 0, sb->s_blocksize);
570 ret = ext4_mb_mark_diskspace_used(ac, NULL, 0);
571 KUNIT_ASSERT_EQ(test, ret, 0);
572
573 max = EXT4_CLUSTERS_PER_GROUP(sb);
574 i = mb_find_next_bit(bitmap, max, 0);
575 KUNIT_ASSERT_EQ(test, i, start);
576 i = mb_find_next_zero_bit(bitmap, max, i + 1);
577 KUNIT_ASSERT_EQ(test, i, start + len);
578 i = mb_find_next_bit(bitmap, max, i + 1);
579 KUNIT_ASSERT_EQ(test, max, i);
580}
581
582static void test_mark_diskspace_used(struct kunit *test)
583{
584 struct super_block *sb = (struct super_block *)test->priv;
585 struct inode *inode;
586 struct ext4_allocation_context ac;
587 struct test_range ranges[TEST_RANGE_COUNT];
588 int i;
589
590 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
591
592 inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
593 if (!inode)
594 return;
595 inode->i_sb = sb;
596
597 ac.ac_status = AC_STATUS_FOUND;
598 ac.ac_sb = sb;
599 ac.ac_inode = inode;
600 for (i = 0; i < TEST_RANGE_COUNT; i++)
601 test_mark_diskspace_used_range(test, &ac, ranges[i].start,
602 ranges[i].len);
603}
604
605static void mbt_generate_buddy(struct super_block *sb, void *buddy,
606 void *bitmap, struct ext4_group_info *grp)
607{
608 struct ext4_sb_info *sbi = EXT4_SB(sb);
609 uint32_t order, off;
610 void *bb, *bb_h;
611 int max;
612
613 memset(buddy, 0xff, sb->s_blocksize);
614 memset(grp, 0, offsetof(struct ext4_group_info,
615 bb_counters[MB_NUM_ORDERS(sb)]));
616
617 bb = bitmap;
618 max = EXT4_CLUSTERS_PER_GROUP(sb);
619 bb_h = buddy + sbi->s_mb_offsets[1];
620
621 off = mb_find_next_zero_bit(bb, max, 0);
622 grp->bb_first_free = off;
623 while (off < max) {
624 grp->bb_counters[0]++;
625 grp->bb_free++;
626
627 if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
628 grp->bb_free++;
629 grp->bb_counters[0]--;
630 mb_clear_bit(off >> 1, bb_h);
631 grp->bb_counters[1]++;
632 grp->bb_largest_free_order = 1;
633 off++;
634 }
635
636 off = mb_find_next_zero_bit(bb, max, off + 1);
637 }
638
639 for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
640 bb = buddy + sbi->s_mb_offsets[order];
641 bb_h = buddy + sbi->s_mb_offsets[order + 1];
642 max = max >> 1;
643 off = mb_find_next_zero_bit(bb, max, 0);
644
645 while (off < max) {
646 if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
647 mb_set_bits(bb, off, 2);
648 grp->bb_counters[order] -= 2;
649 mb_clear_bit(off >> 1, bb_h);
650 grp->bb_counters[order + 1]++;
651 grp->bb_largest_free_order = order + 1;
652 off++;
653 }
654
655 off = mb_find_next_zero_bit(bb, max, off + 1);
656 }
657 }
658
659 max = EXT4_CLUSTERS_PER_GROUP(sb);
660 off = mb_find_next_zero_bit(bitmap, max, 0);
661 while (off < max) {
662 grp->bb_fragments++;
663
664 off = mb_find_next_bit(bitmap, max, off + 1);
665 if (off + 1 >= max)
666 break;
667
668 off = mb_find_next_zero_bit(bitmap, max, off + 1);
669 }
670}
671
672static void
673mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1,
674 struct ext4_group_info *grp2)
675{
676 struct super_block *sb = (struct super_block *)test->priv;
677 int i;
678
679 KUNIT_ASSERT_EQ(test, grp1->bb_first_free,
680 grp2->bb_first_free);
681 KUNIT_ASSERT_EQ(test, grp1->bb_fragments,
682 grp2->bb_fragments);
683 KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free);
684 KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order,
685 grp2->bb_largest_free_order);
686
687 for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
688 KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i],
689 grp2->bb_counters[i],
690 "bb_counters[%d] diffs, expected %d, generated %d",
691 i, grp1->bb_counters[i],
692 grp2->bb_counters[i]);
693 }
694}
695
696static void
697do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
698 void *mbt_buddy, struct ext4_group_info *mbt_grp,
699 void *ext4_buddy, struct ext4_group_info *ext4_grp)
700{
701 int i;
702
703 mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp);
704
705 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
706 ext4_grp->bb_counters[i] = 0;
707 /* needed by validation in ext4_mb_generate_buddy */
708 ext4_grp->bb_free = mbt_grp->bb_free;
709 memset(ext4_buddy, 0xff, sb->s_blocksize);
710 ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
711 ext4_grp);
712
713 KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
714 0);
715 mbt_validate_group_info(test, mbt_grp, ext4_grp);
716}
717
718static void test_mb_generate_buddy(struct kunit *test)
719{
720 struct super_block *sb = (struct super_block *)test->priv;
721 void *bitmap, *expected_bb, *generate_bb;
722 struct ext4_group_info *expected_grp, *generate_grp;
723 struct test_range ranges[TEST_RANGE_COUNT];
724 int i;
725
726 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
727 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
728 expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
729 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb);
730 generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
731 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb);
732 expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
733 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
734 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp);
735 generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
736 KUNIT_ASSERT_NOT_NULL(test, generate_grp);
737
738 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
739 for (i = 0; i < TEST_RANGE_COUNT; i++) {
740 mb_set_bits(bitmap, ranges[i].start, ranges[i].len);
741 do_test_generate_buddy(test, sb, bitmap, expected_bb,
742 expected_grp, generate_bb, generate_grp);
743 }
744}
745
746static void
747test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
748 ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
749 void *buddy, struct ext4_group_info *grp)
750{
751 struct super_block *sb = (struct super_block *)test->priv;
752 struct ext4_free_extent ex;
753 int i;
754
755 /* mb_mark_used only accepts non-zero len */
756 if (len == 0)
757 return;
758
759 ex.fe_start = start;
760 ex.fe_len = len;
761 ex.fe_group = TEST_GOAL_GROUP;
762
763 ext4_lock_group(sb, TEST_GOAL_GROUP);
764 mb_mark_used(e4b, &ex);
765 ext4_unlock_group(sb, TEST_GOAL_GROUP);
766
767 mb_set_bits(bitmap, start, len);
768 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
769 grp->bb_free -= len;
770 memset(buddy, 0xff, sb->s_blocksize);
771 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
772 grp->bb_counters[i] = 0;
773 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
774
775 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
776 0);
777 mbt_validate_group_info(test, grp, e4b->bd_info);
778}
779
780static void test_mb_mark_used(struct kunit *test)
781{
782 struct ext4_buddy e4b;
783 struct super_block *sb = (struct super_block *)test->priv;
784 void *bitmap, *buddy;
785 struct ext4_group_info *grp;
786 int ret;
787 struct test_range ranges[TEST_RANGE_COUNT];
788 int i;
789
790 /* buddy cache assumes that each page contains at least one block */
791 if (sb->s_blocksize > PAGE_SIZE)
792 kunit_skip(test, "blocksize exceeds pagesize");
793
794 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
795 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
796 buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
797 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
798 grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
799 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
800 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
801
802 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
803 KUNIT_ASSERT_EQ(test, ret, 0);
804
805 grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
806 grp->bb_largest_free_order = -1;
807 grp->bb_avg_fragment_size_order = -1;
808 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
809 for (i = 0; i < TEST_RANGE_COUNT; i++)
810 test_mb_mark_used_range(test, &e4b, ranges[i].start,
811 ranges[i].len, bitmap, buddy, grp);
812
813 ext4_mb_unload_buddy(&e4b);
814}
815
816static void
817test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
818 ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
819 void *buddy, struct ext4_group_info *grp)
820{
821 struct super_block *sb = (struct super_block *)test->priv;
822 int i;
823
824 /* mb_free_blocks will WARN if len is 0 */
825 if (len == 0)
826 return;
827
828 ext4_lock_group(sb, e4b->bd_group);
829 mb_free_blocks(NULL, e4b, start, len);
830 ext4_unlock_group(sb, e4b->bd_group);
831
832 mb_clear_bits(bitmap, start, len);
833 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
834 grp->bb_free += len;
835 memset(buddy, 0xff, sb->s_blocksize);
836 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
837 grp->bb_counters[i] = 0;
838 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
839
840 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
841 0);
842 mbt_validate_group_info(test, grp, e4b->bd_info);
843
844}
845
846static void test_mb_free_blocks(struct kunit *test)
847{
848 struct ext4_buddy e4b;
849 struct super_block *sb = (struct super_block *)test->priv;
850 void *bitmap, *buddy;
851 struct ext4_group_info *grp;
852 struct ext4_free_extent ex;
853 int ret;
854 int i;
855 struct test_range ranges[TEST_RANGE_COUNT];
856
857 /* buddy cache assumes that each page contains at least one block */
858 if (sb->s_blocksize > PAGE_SIZE)
859 kunit_skip(test, "blocksize exceeds pagesize");
860
861 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
862 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
863 buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
864 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
865 grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
866 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
867 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
868
869 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
870 KUNIT_ASSERT_EQ(test, ret, 0);
871
872 ex.fe_start = 0;
873 ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
874 ex.fe_group = TEST_GOAL_GROUP;
875
876 ext4_lock_group(sb, TEST_GOAL_GROUP);
877 mb_mark_used(&e4b, &ex);
878 ext4_unlock_group(sb, TEST_GOAL_GROUP);
879
880 grp->bb_free = 0;
881 grp->bb_largest_free_order = -1;
882 grp->bb_avg_fragment_size_order = -1;
883 memset(bitmap, 0xff, sb->s_blocksize);
884
885 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
886 for (i = 0; i < TEST_RANGE_COUNT; i++)
887 test_mb_free_blocks_range(test, &e4b, ranges[i].start,
888 ranges[i].len, bitmap, buddy, grp);
889
890 ext4_mb_unload_buddy(&e4b);
891}
892
893#define COUNT_FOR_ESTIMATE 100000
894static void test_mb_mark_used_cost(struct kunit *test)
895{
896 struct ext4_buddy e4b;
897 struct super_block *sb = (struct super_block *)test->priv;
898 struct ext4_free_extent ex;
899 int ret;
900 struct test_range ranges[TEST_RANGE_COUNT];
901 int i, j;
902 unsigned long start, end, all = 0;
903
904 /* buddy cache assumes that each page contains at least one block */
905 if (sb->s_blocksize > PAGE_SIZE)
906 kunit_skip(test, "blocksize exceeds pagesize");
907
908 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
909 KUNIT_ASSERT_EQ(test, ret, 0);
910
911 ex.fe_group = TEST_GOAL_GROUP;
912 for (j = 0; j < COUNT_FOR_ESTIMATE; j++) {
913 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
914 start = jiffies;
915 for (i = 0; i < TEST_RANGE_COUNT; i++) {
916 if (ranges[i].len == 0)
917 continue;
918
919 ex.fe_start = ranges[i].start;
920 ex.fe_len = ranges[i].len;
921 ext4_lock_group(sb, TEST_GOAL_GROUP);
922 mb_mark_used(&e4b, &ex);
923 ext4_unlock_group(sb, TEST_GOAL_GROUP);
924 }
925 end = jiffies;
926 all += (end - start);
927
928 for (i = 0; i < TEST_RANGE_COUNT; i++) {
929 if (ranges[i].len == 0)
930 continue;
931
932 ext4_lock_group(sb, TEST_GOAL_GROUP);
933 mb_free_blocks(NULL, &e4b, ranges[i].start,
934 ranges[i].len);
935 ext4_unlock_group(sb, TEST_GOAL_GROUP);
936 }
937 }
938
939 kunit_info(test, "costed jiffies %lu\n", all);
940 ext4_mb_unload_buddy(&e4b);
941}
942
943static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
944 {
945 .blocksize_bits = 10,
946 .cluster_bits = 3,
947 .blocks_per_group = 8192,
948 .group_count = 4,
949 .desc_size = 64,
950 },
951 {
952 .blocksize_bits = 12,
953 .cluster_bits = 3,
954 .blocks_per_group = 8192,
955 .group_count = 4,
956 .desc_size = 64,
957 },
958 {
959 .blocksize_bits = 16,
960 .cluster_bits = 3,
961 .blocks_per_group = 8192,
962 .group_count = 4,
963 .desc_size = 64,
964 },
965};
966
967static void mbt_show_layout(const struct mbt_ext4_block_layout *layout,
968 char *desc)
969{
970 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_bits=%d cluster_bits=%d "
971 "blocks_per_group=%d group_count=%d desc_size=%d\n",
972 layout->blocksize_bits, layout->cluster_bits,
973 layout->blocks_per_group, layout->group_count,
974 layout->desc_size);
975}
976KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout);
977
978static struct kunit_case mbt_test_cases[] = {
979 KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params),
980 KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params),
981 KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params),
982 KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
983 KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
984 KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
985 KUNIT_CASE_PARAM_ATTR(test_mb_mark_used_cost, mbt_layouts_gen_params,
986 { .speed = KUNIT_SPEED_SLOW }),
987 {}
988};
989
990static struct kunit_suite mbt_test_suite = {
991 .name = "ext4_mballoc_test",
992 .init = mbt_kunit_init,
993 .exit = mbt_kunit_exit,
994 .test_cases = mbt_test_cases,
995};
996
997kunit_test_suites(&mbt_test_suite);
998
999MODULE_LICENSE("GPL");