Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2#include "alloc_api.h"
3
4/*
5 * A simple test that tries to allocate a small memory region.
6 * Expect to allocate an aligned region near the end of the available memory.
7 */
8static int alloc_top_down_simple_check(void)
9{
10 struct memblock_region *rgn = &memblock.reserved.regions[0];
11 void *allocated_ptr = NULL;
12
13 PREFIX_PUSH();
14
15 phys_addr_t size = SZ_2;
16 phys_addr_t expected_start;
17
18 setup_memblock();
19
20 expected_start = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
21
22 allocated_ptr = memblock_alloc(size, SMP_CACHE_BYTES);
23
24 ASSERT_NE(allocated_ptr, NULL);
25 ASSERT_EQ(rgn->size, size);
26 ASSERT_EQ(rgn->base, expected_start);
27
28 ASSERT_EQ(memblock.reserved.cnt, 1);
29 ASSERT_EQ(memblock.reserved.total_size, size);
30
31 test_pass_pop();
32
33 return 0;
34}
35
36/*
37 * A test that tries to allocate memory next to a reserved region that starts at
38 * the misaligned address. Expect to create two separate entries, with the new
39 * entry aligned to the provided alignment:
40 *
41 * +
42 * | +--------+ +--------|
43 * | | rgn2 | | rgn1 |
44 * +------------+--------+---------+--------+
45 * ^
46 * |
47 * Aligned address boundary
48 *
49 * The allocation direction is top-down and region arrays are sorted from lower
50 * to higher addresses, so the new region will be the first entry in
51 * memory.reserved array. The previously reserved region does not get modified.
52 * Region counter and total size get updated.
53 */
54static int alloc_top_down_disjoint_check(void)
55{
56 /* After allocation, this will point to the "old" region */
57 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
58 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
59 struct region r1;
60 void *allocated_ptr = NULL;
61
62 PREFIX_PUSH();
63
64 phys_addr_t r2_size = SZ_16;
65 /* Use custom alignment */
66 phys_addr_t alignment = SMP_CACHE_BYTES * 2;
67 phys_addr_t total_size;
68 phys_addr_t expected_start;
69
70 setup_memblock();
71
72 r1.base = memblock_end_of_DRAM() - SZ_2;
73 r1.size = SZ_2;
74
75 total_size = r1.size + r2_size;
76 expected_start = memblock_end_of_DRAM() - alignment;
77
78 memblock_reserve(r1.base, r1.size);
79
80 allocated_ptr = memblock_alloc(r2_size, alignment);
81
82 ASSERT_NE(allocated_ptr, NULL);
83 ASSERT_EQ(rgn1->size, r1.size);
84 ASSERT_EQ(rgn1->base, r1.base);
85
86 ASSERT_EQ(rgn2->size, r2_size);
87 ASSERT_EQ(rgn2->base, expected_start);
88
89 ASSERT_EQ(memblock.reserved.cnt, 2);
90 ASSERT_EQ(memblock.reserved.total_size, total_size);
91
92 test_pass_pop();
93
94 return 0;
95}
96
97/*
98 * A test that tries to allocate memory when there is enough space at the end
99 * of the previously reserved block (i.e. first fit):
100 *
101 * | +--------+--------------|
102 * | | r1 | r2 |
103 * +--------------+--------+--------------+
104 *
105 * Expect a merge of both regions. Only the region size gets updated.
106 */
107static int alloc_top_down_before_check(void)
108{
109 struct memblock_region *rgn = &memblock.reserved.regions[0];
110 void *allocated_ptr = NULL;
111
112 PREFIX_PUSH();
113
114 /*
115 * The first region ends at the aligned address to test region merging
116 */
117 phys_addr_t r1_size = SMP_CACHE_BYTES;
118 phys_addr_t r2_size = SZ_512;
119 phys_addr_t total_size = r1_size + r2_size;
120
121 setup_memblock();
122
123 memblock_reserve(memblock_end_of_DRAM() - total_size, r1_size);
124
125 allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
126
127 ASSERT_NE(allocated_ptr, NULL);
128 ASSERT_EQ(rgn->size, total_size);
129 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - total_size);
130
131 ASSERT_EQ(memblock.reserved.cnt, 1);
132 ASSERT_EQ(memblock.reserved.total_size, total_size);
133
134 test_pass_pop();
135
136 return 0;
137}
138
139/*
140 * A test that tries to allocate memory when there is not enough space at the
141 * end of the previously reserved block (i.e. second fit):
142 *
143 * | +-----------+------+ |
144 * | | r2 | r1 | |
145 * +------------+-----------+------+-----+
146 *
147 * Expect a merge of both regions. Both the base address and size of the region
148 * get updated.
149 */
150static int alloc_top_down_after_check(void)
151{
152 struct memblock_region *rgn = &memblock.reserved.regions[0];
153 struct region r1;
154 void *allocated_ptr = NULL;
155
156 PREFIX_PUSH();
157
158 phys_addr_t r2_size = SZ_512;
159 phys_addr_t total_size;
160
161 setup_memblock();
162
163 /*
164 * The first region starts at the aligned address to test region merging
165 */
166 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
167 r1.size = SZ_8;
168
169 total_size = r1.size + r2_size;
170
171 memblock_reserve(r1.base, r1.size);
172
173 allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
174
175 ASSERT_NE(allocated_ptr, NULL);
176 ASSERT_EQ(rgn->size, total_size);
177 ASSERT_EQ(rgn->base, r1.base - r2_size);
178
179 ASSERT_EQ(memblock.reserved.cnt, 1);
180 ASSERT_EQ(memblock.reserved.total_size, total_size);
181
182 test_pass_pop();
183
184 return 0;
185}
186
187/*
188 * A test that tries to allocate memory when there are two reserved regions with
189 * a gap too small to fit the new region:
190 *
191 * | +--------+----------+ +------|
192 * | | r3 | r2 | | r1 |
193 * +-------+--------+----------+---+------+
194 *
195 * Expect to allocate a region before the one that starts at the lower address,
196 * and merge them into one. The region counter and total size fields get
197 * updated.
198 */
199static int alloc_top_down_second_fit_check(void)
200{
201 struct memblock_region *rgn = &memblock.reserved.regions[0];
202 struct region r1, r2;
203 void *allocated_ptr = NULL;
204
205 PREFIX_PUSH();
206
207 phys_addr_t r3_size = SZ_1K;
208 phys_addr_t total_size;
209
210 setup_memblock();
211
212 r1.base = memblock_end_of_DRAM() - SZ_512;
213 r1.size = SZ_512;
214
215 r2.base = r1.base - SZ_512;
216 r2.size = SZ_256;
217
218 total_size = r1.size + r2.size + r3_size;
219
220 memblock_reserve(r1.base, r1.size);
221 memblock_reserve(r2.base, r2.size);
222
223 allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
224
225 ASSERT_NE(allocated_ptr, NULL);
226 ASSERT_EQ(rgn->size, r2.size + r3_size);
227 ASSERT_EQ(rgn->base, r2.base - r3_size);
228
229 ASSERT_EQ(memblock.reserved.cnt, 2);
230 ASSERT_EQ(memblock.reserved.total_size, total_size);
231
232 test_pass_pop();
233
234 return 0;
235}
236
237/*
238 * A test that tries to allocate memory when there are two reserved regions with
239 * a gap big enough to accommodate the new region:
240 *
241 * | +--------+--------+--------+ |
242 * | | r2 | r3 | r1 | |
243 * +-----+--------+--------+--------+-----+
244 *
245 * Expect to merge all of them, creating one big entry in memblock.reserved
246 * array. The region counter and total size fields get updated.
247 */
248static int alloc_in_between_generic_check(void)
249{
250 struct memblock_region *rgn = &memblock.reserved.regions[0];
251 struct region r1, r2;
252 void *allocated_ptr = NULL;
253
254 PREFIX_PUSH();
255
256 phys_addr_t gap_size = SMP_CACHE_BYTES;
257 phys_addr_t r3_size = SZ_64;
258 /*
259 * Calculate regions size so there's just enough space for the new entry
260 */
261 phys_addr_t rgn_size = (MEM_SIZE - (2 * gap_size + r3_size)) / 2;
262 phys_addr_t total_size;
263
264 setup_memblock();
265
266 r1.size = rgn_size;
267 r1.base = memblock_end_of_DRAM() - (gap_size + rgn_size);
268
269 r2.size = rgn_size;
270 r2.base = memblock_start_of_DRAM() + gap_size;
271
272 total_size = r1.size + r2.size + r3_size;
273
274 memblock_reserve(r1.base, r1.size);
275 memblock_reserve(r2.base, r2.size);
276
277 allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
278
279 ASSERT_NE(allocated_ptr, NULL);
280 ASSERT_EQ(rgn->size, total_size);
281 ASSERT_EQ(rgn->base, r1.base - r2.size - r3_size);
282
283 ASSERT_EQ(memblock.reserved.cnt, 1);
284 ASSERT_EQ(memblock.reserved.total_size, total_size);
285
286 test_pass_pop();
287
288 return 0;
289}
290
291/*
292 * A test that tries to allocate memory when the memory is filled with reserved
293 * regions with memory gaps too small to fit the new region:
294 *
295 * +-------+
296 * | new |
297 * +--+----+
298 * | +-----+ +-----+ +-----+ |
299 * | | res | | res | | res | |
300 * +----+-----+----+-----+----+-----+----+
301 *
302 * Expect no allocation to happen.
303 */
304static int alloc_small_gaps_generic_check(void)
305{
306 void *allocated_ptr = NULL;
307
308 PREFIX_PUSH();
309
310 phys_addr_t region_size = SZ_1K;
311 phys_addr_t gap_size = SZ_256;
312 phys_addr_t region_end;
313
314 setup_memblock();
315
316 region_end = memblock_start_of_DRAM();
317
318 while (region_end < memblock_end_of_DRAM()) {
319 memblock_reserve(region_end + gap_size, region_size);
320 region_end += gap_size + region_size;
321 }
322
323 allocated_ptr = memblock_alloc(region_size, SMP_CACHE_BYTES);
324
325 ASSERT_EQ(allocated_ptr, NULL);
326
327 test_pass_pop();
328
329 return 0;
330}
331
332/*
333 * A test that tries to allocate memory when all memory is reserved.
334 * Expect no allocation to happen.
335 */
336static int alloc_all_reserved_generic_check(void)
337{
338 void *allocated_ptr = NULL;
339
340 PREFIX_PUSH();
341
342 setup_memblock();
343
344 /* Simulate full memory */
345 memblock_reserve(memblock_start_of_DRAM(), MEM_SIZE);
346
347 allocated_ptr = memblock_alloc(SZ_256, SMP_CACHE_BYTES);
348
349 ASSERT_EQ(allocated_ptr, NULL);
350
351 test_pass_pop();
352
353 return 0;
354}
355
356/*
357 * A test that tries to allocate memory when the memory is almost full,
358 * with not enough space left for the new region:
359 *
360 * +-------+
361 * | new |
362 * +-------+
363 * |-----------------------------+ |
364 * | reserved | |
365 * +-----------------------------+---+
366 *
367 * Expect no allocation to happen.
368 */
369static int alloc_no_space_generic_check(void)
370{
371 void *allocated_ptr = NULL;
372
373 PREFIX_PUSH();
374
375 setup_memblock();
376
377 phys_addr_t available_size = SZ_256;
378 phys_addr_t reserved_size = MEM_SIZE - available_size;
379
380 /* Simulate almost-full memory */
381 memblock_reserve(memblock_start_of_DRAM(), reserved_size);
382
383 allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
384
385 ASSERT_EQ(allocated_ptr, NULL);
386
387 test_pass_pop();
388
389 return 0;
390}
391
392/*
393 * A test that tries to allocate memory when the memory is almost full,
394 * but there is just enough space left:
395 *
396 * |---------------------------+---------|
397 * | reserved | new |
398 * +---------------------------+---------+
399 *
400 * Expect to allocate memory and merge all the regions. The total size field
401 * gets updated.
402 */
403static int alloc_limited_space_generic_check(void)
404{
405 struct memblock_region *rgn = &memblock.reserved.regions[0];
406 void *allocated_ptr = NULL;
407
408 PREFIX_PUSH();
409
410 phys_addr_t available_size = SZ_256;
411 phys_addr_t reserved_size = MEM_SIZE - available_size;
412
413 setup_memblock();
414
415 /* Simulate almost-full memory */
416 memblock_reserve(memblock_start_of_DRAM(), reserved_size);
417
418 allocated_ptr = memblock_alloc(available_size, SMP_CACHE_BYTES);
419
420 ASSERT_NE(allocated_ptr, NULL);
421 ASSERT_EQ(rgn->size, MEM_SIZE);
422 ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
423
424 ASSERT_EQ(memblock.reserved.cnt, 1);
425 ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE);
426
427 test_pass_pop();
428
429 return 0;
430}
431
432/*
433 * A test that tries to allocate memory when there is no available memory
434 * registered (i.e. memblock.memory has only a dummy entry).
435 * Expect no allocation to happen.
436 */
437static int alloc_no_memory_generic_check(void)
438{
439 struct memblock_region *rgn = &memblock.reserved.regions[0];
440 void *allocated_ptr = NULL;
441
442 PREFIX_PUSH();
443
444 reset_memblock_regions();
445
446 allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
447
448 ASSERT_EQ(allocated_ptr, NULL);
449 ASSERT_EQ(rgn->size, 0);
450 ASSERT_EQ(rgn->base, 0);
451 ASSERT_EQ(memblock.reserved.total_size, 0);
452
453 test_pass_pop();
454
455 return 0;
456}
457
458/*
459 * A simple test that tries to allocate a small memory region.
460 * Expect to allocate an aligned region at the beginning of the available
461 * memory.
462 */
463static int alloc_bottom_up_simple_check(void)
464{
465 struct memblock_region *rgn = &memblock.reserved.regions[0];
466 void *allocated_ptr = NULL;
467
468 PREFIX_PUSH();
469
470 setup_memblock();
471
472 allocated_ptr = memblock_alloc(SZ_2, SMP_CACHE_BYTES);
473
474 ASSERT_NE(allocated_ptr, NULL);
475 ASSERT_EQ(rgn->size, SZ_2);
476 ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
477
478 ASSERT_EQ(memblock.reserved.cnt, 1);
479 ASSERT_EQ(memblock.reserved.total_size, SZ_2);
480
481 test_pass_pop();
482
483 return 0;
484}
485
486/*
487 * A test that tries to allocate memory next to a reserved region that starts at
488 * the misaligned address. Expect to create two separate entries, with the new
489 * entry aligned to the provided alignment:
490 *
491 * +
492 * | +----------+ +----------+ |
493 * | | rgn1 | | rgn2 | |
494 * +----+----------+---+----------+-----+
495 * ^
496 * |
497 * Aligned address boundary
498 *
499 * The allocation direction is bottom-up, so the new region will be the second
500 * entry in memory.reserved array. The previously reserved region does not get
501 * modified. Region counter and total size get updated.
502 */
503static int alloc_bottom_up_disjoint_check(void)
504{
505 struct memblock_region *rgn1 = &memblock.reserved.regions[0];
506 struct memblock_region *rgn2 = &memblock.reserved.regions[1];
507 struct region r1;
508 void *allocated_ptr = NULL;
509
510 PREFIX_PUSH();
511
512 phys_addr_t r2_size = SZ_16;
513 /* Use custom alignment */
514 phys_addr_t alignment = SMP_CACHE_BYTES * 2;
515 phys_addr_t total_size;
516 phys_addr_t expected_start;
517
518 setup_memblock();
519
520 r1.base = memblock_start_of_DRAM() + SZ_2;
521 r1.size = SZ_2;
522
523 total_size = r1.size + r2_size;
524 expected_start = memblock_start_of_DRAM() + alignment;
525
526 memblock_reserve(r1.base, r1.size);
527
528 allocated_ptr = memblock_alloc(r2_size, alignment);
529
530 ASSERT_NE(allocated_ptr, NULL);
531
532 ASSERT_EQ(rgn1->size, r1.size);
533 ASSERT_EQ(rgn1->base, r1.base);
534
535 ASSERT_EQ(rgn2->size, r2_size);
536 ASSERT_EQ(rgn2->base, expected_start);
537
538 ASSERT_EQ(memblock.reserved.cnt, 2);
539 ASSERT_EQ(memblock.reserved.total_size, total_size);
540
541 test_pass_pop();
542
543 return 0;
544}
545
546/*
547 * A test that tries to allocate memory when there is enough space at
548 * the beginning of the previously reserved block (i.e. first fit):
549 *
550 * |------------------+--------+ |
551 * | r1 | r2 | |
552 * +------------------+--------+---------+
553 *
554 * Expect a merge of both regions. Only the region size gets updated.
555 */
556static int alloc_bottom_up_before_check(void)
557{
558 struct memblock_region *rgn = &memblock.reserved.regions[0];
559 void *allocated_ptr = NULL;
560
561 PREFIX_PUSH();
562
563 phys_addr_t r1_size = SZ_512;
564 phys_addr_t r2_size = SZ_128;
565 phys_addr_t total_size = r1_size + r2_size;
566
567 setup_memblock();
568
569 memblock_reserve(memblock_start_of_DRAM() + r1_size, r2_size);
570
571 allocated_ptr = memblock_alloc(r1_size, SMP_CACHE_BYTES);
572
573 ASSERT_NE(allocated_ptr, NULL);
574 ASSERT_EQ(rgn->size, total_size);
575 ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
576
577 ASSERT_EQ(memblock.reserved.cnt, 1);
578 ASSERT_EQ(memblock.reserved.total_size, total_size);
579
580 test_pass_pop();
581
582 return 0;
583}
584
585/*
586 * A test that tries to allocate memory when there is not enough space at
587 * the beginning of the previously reserved block (i.e. second fit):
588 *
589 * | +--------+--------------+ |
590 * | | r1 | r2 | |
591 * +----+--------+--------------+---------+
592 *
593 * Expect a merge of both regions. Only the region size gets updated.
594 */
595static int alloc_bottom_up_after_check(void)
596{
597 struct memblock_region *rgn = &memblock.reserved.regions[0];
598 struct region r1;
599 void *allocated_ptr = NULL;
600
601 PREFIX_PUSH();
602
603 phys_addr_t r2_size = SZ_512;
604 phys_addr_t total_size;
605
606 setup_memblock();
607
608 /*
609 * The first region starts at the aligned address to test region merging
610 */
611 r1.base = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
612 r1.size = SZ_64;
613
614 total_size = r1.size + r2_size;
615
616 memblock_reserve(r1.base, r1.size);
617
618 allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
619
620 ASSERT_NE(allocated_ptr, NULL);
621 ASSERT_EQ(rgn->size, total_size);
622 ASSERT_EQ(rgn->base, r1.base);
623
624 ASSERT_EQ(memblock.reserved.cnt, 1);
625 ASSERT_EQ(memblock.reserved.total_size, total_size);
626
627 test_pass_pop();
628
629 return 0;
630}
631
632/*
633 * A test that tries to allocate memory when there are two reserved regions, the
634 * first one starting at the beginning of the available memory, with a gap too
635 * small to fit the new region:
636 *
637 * |------------+ +--------+--------+ |
638 * | r1 | | r2 | r3 | |
639 * +------------+-----+--------+--------+--+
640 *
641 * Expect to allocate after the second region, which starts at the higher
642 * address, and merge them into one. The region counter and total size fields
643 * get updated.
644 */
645static int alloc_bottom_up_second_fit_check(void)
646{
647 struct memblock_region *rgn = &memblock.reserved.regions[1];
648 struct region r1, r2;
649 void *allocated_ptr = NULL;
650
651 PREFIX_PUSH();
652
653 phys_addr_t r3_size = SZ_1K;
654 phys_addr_t total_size;
655
656 setup_memblock();
657
658 r1.base = memblock_start_of_DRAM();
659 r1.size = SZ_512;
660
661 r2.base = r1.base + r1.size + SZ_512;
662 r2.size = SZ_256;
663
664 total_size = r1.size + r2.size + r3_size;
665
666 memblock_reserve(r1.base, r1.size);
667 memblock_reserve(r2.base, r2.size);
668
669 allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
670
671 ASSERT_NE(allocated_ptr, NULL);
672 ASSERT_EQ(rgn->size, r2.size + r3_size);
673 ASSERT_EQ(rgn->base, r2.base);
674
675 ASSERT_EQ(memblock.reserved.cnt, 2);
676 ASSERT_EQ(memblock.reserved.total_size, total_size);
677
678 test_pass_pop();
679
680 return 0;
681}
682
683/* Test case wrappers */
684static int alloc_simple_check(void)
685{
686 test_print("\tRunning %s...\n", __func__);
687 memblock_set_bottom_up(false);
688 alloc_top_down_simple_check();
689 memblock_set_bottom_up(true);
690 alloc_bottom_up_simple_check();
691
692 return 0;
693}
694
695static int alloc_disjoint_check(void)
696{
697 test_print("\tRunning %s...\n", __func__);
698 memblock_set_bottom_up(false);
699 alloc_top_down_disjoint_check();
700 memblock_set_bottom_up(true);
701 alloc_bottom_up_disjoint_check();
702
703 return 0;
704}
705
706static int alloc_before_check(void)
707{
708 test_print("\tRunning %s...\n", __func__);
709 memblock_set_bottom_up(false);
710 alloc_top_down_before_check();
711 memblock_set_bottom_up(true);
712 alloc_bottom_up_before_check();
713
714 return 0;
715}
716
717static int alloc_after_check(void)
718{
719 test_print("\tRunning %s...\n", __func__);
720 memblock_set_bottom_up(false);
721 alloc_top_down_after_check();
722 memblock_set_bottom_up(true);
723 alloc_bottom_up_after_check();
724
725 return 0;
726}
727
728static int alloc_in_between_check(void)
729{
730 test_print("\tRunning %s...\n", __func__);
731 memblock_set_bottom_up(false);
732 alloc_in_between_generic_check();
733 memblock_set_bottom_up(true);
734 alloc_in_between_generic_check();
735
736 return 0;
737}
738
739static int alloc_second_fit_check(void)
740{
741 test_print("\tRunning %s...\n", __func__);
742 memblock_set_bottom_up(false);
743 alloc_top_down_second_fit_check();
744 memblock_set_bottom_up(true);
745 alloc_bottom_up_second_fit_check();
746
747 return 0;
748}
749
750static int alloc_small_gaps_check(void)
751{
752 test_print("\tRunning %s...\n", __func__);
753 memblock_set_bottom_up(false);
754 alloc_small_gaps_generic_check();
755 memblock_set_bottom_up(true);
756 alloc_small_gaps_generic_check();
757
758 return 0;
759}
760
761static int alloc_all_reserved_check(void)
762{
763 test_print("\tRunning %s...\n", __func__);
764 memblock_set_bottom_up(false);
765 alloc_all_reserved_generic_check();
766 memblock_set_bottom_up(true);
767 alloc_all_reserved_generic_check();
768
769 return 0;
770}
771
772static int alloc_no_space_check(void)
773{
774 test_print("\tRunning %s...\n", __func__);
775 memblock_set_bottom_up(false);
776 alloc_no_space_generic_check();
777 memblock_set_bottom_up(true);
778 alloc_no_space_generic_check();
779
780 return 0;
781}
782
783static int alloc_limited_space_check(void)
784{
785 test_print("\tRunning %s...\n", __func__);
786 memblock_set_bottom_up(false);
787 alloc_limited_space_generic_check();
788 memblock_set_bottom_up(true);
789 alloc_limited_space_generic_check();
790
791 return 0;
792}
793
794static int alloc_no_memory_check(void)
795{
796 test_print("\tRunning %s...\n", __func__);
797 memblock_set_bottom_up(false);
798 alloc_no_memory_generic_check();
799 memblock_set_bottom_up(true);
800 alloc_no_memory_generic_check();
801
802 return 0;
803}
804
805int memblock_alloc_checks(void)
806{
807 const char *func_testing = "memblock_alloc";
808
809 prefix_reset();
810 prefix_push(func_testing);
811 test_print("Running %s tests...\n", func_testing);
812
813 reset_memblock_attributes();
814 dummy_physical_memory_init();
815
816 alloc_simple_check();
817 alloc_disjoint_check();
818 alloc_before_check();
819 alloc_after_check();
820 alloc_second_fit_check();
821 alloc_small_gaps_check();
822 alloc_in_between_check();
823 alloc_all_reserved_check();
824 alloc_no_space_check();
825 alloc_limited_space_check();
826 alloc_no_memory_check();
827
828 dummy_physical_memory_cleanup();
829
830 prefix_pop();
831
832 return 0;
833}