Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include <kunit/test-bug.h>
7
8#include <linux/export.h>
9#include <linux/kmemleak.h>
10#include <linux/module.h>
11#include <linux/sizes.h>
12
13#include <drm/drm_buddy.h>
14#include <drm/drm_print.h>
15
16enum drm_buddy_free_tree {
17 DRM_BUDDY_CLEAR_TREE = 0,
18 DRM_BUDDY_DIRTY_TREE,
19 DRM_BUDDY_MAX_FREE_TREES,
20};
21
22static struct kmem_cache *slab_blocks;
23
24#define for_each_free_tree(tree) \
25 for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++)
26
27static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
28 struct drm_buddy_block *parent,
29 unsigned int order,
30 u64 offset)
31{
32 struct drm_buddy_block *block;
33
34 BUG_ON(order > DRM_BUDDY_MAX_ORDER);
35
36 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
37 if (!block)
38 return NULL;
39
40 block->header = offset;
41 block->header |= order;
42 block->parent = parent;
43
44 RB_CLEAR_NODE(&block->rb);
45
46 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
47 return block;
48}
49
50static void drm_block_free(struct drm_buddy *mm,
51 struct drm_buddy_block *block)
52{
53 kmem_cache_free(slab_blocks, block);
54}
55
56static enum drm_buddy_free_tree
57get_block_tree(struct drm_buddy_block *block)
58{
59 return drm_buddy_block_is_clear(block) ?
60 DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
61}
62
63static struct drm_buddy_block *
64rbtree_get_free_block(const struct rb_node *node)
65{
66 return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
67}
68
69static struct drm_buddy_block *
70rbtree_last_free_block(struct rb_root *root)
71{
72 return rbtree_get_free_block(rb_last(root));
73}
74
75static bool rbtree_is_empty(struct rb_root *root)
76{
77 return RB_EMPTY_ROOT(root);
78}
79
80static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block,
81 const struct drm_buddy_block *node)
82{
83 return drm_buddy_block_offset(block) < drm_buddy_block_offset(node);
84}
85
86static bool rbtree_block_offset_less(struct rb_node *block,
87 const struct rb_node *node)
88{
89 return drm_buddy_block_offset_less(rbtree_get_free_block(block),
90 rbtree_get_free_block(node));
91}
92
93static void rbtree_insert(struct drm_buddy *mm,
94 struct drm_buddy_block *block,
95 enum drm_buddy_free_tree tree)
96{
97 rb_add(&block->rb,
98 &mm->free_trees[tree][drm_buddy_block_order(block)],
99 rbtree_block_offset_less);
100}
101
102static void rbtree_remove(struct drm_buddy *mm,
103 struct drm_buddy_block *block)
104{
105 unsigned int order = drm_buddy_block_order(block);
106 enum drm_buddy_free_tree tree;
107 struct rb_root *root;
108
109 tree = get_block_tree(block);
110 root = &mm->free_trees[tree][order];
111
112 rb_erase(&block->rb, root);
113 RB_CLEAR_NODE(&block->rb);
114}
115
116static void clear_reset(struct drm_buddy_block *block)
117{
118 block->header &= ~DRM_BUDDY_HEADER_CLEAR;
119}
120
121static void mark_cleared(struct drm_buddy_block *block)
122{
123 block->header |= DRM_BUDDY_HEADER_CLEAR;
124}
125
126static void mark_allocated(struct drm_buddy *mm,
127 struct drm_buddy_block *block)
128{
129 block->header &= ~DRM_BUDDY_HEADER_STATE;
130 block->header |= DRM_BUDDY_ALLOCATED;
131
132 rbtree_remove(mm, block);
133}
134
135static void mark_free(struct drm_buddy *mm,
136 struct drm_buddy_block *block)
137{
138 enum drm_buddy_free_tree tree;
139
140 block->header &= ~DRM_BUDDY_HEADER_STATE;
141 block->header |= DRM_BUDDY_FREE;
142
143 tree = get_block_tree(block);
144 rbtree_insert(mm, block, tree);
145}
146
147static void mark_split(struct drm_buddy *mm,
148 struct drm_buddy_block *block)
149{
150 block->header &= ~DRM_BUDDY_HEADER_STATE;
151 block->header |= DRM_BUDDY_SPLIT;
152
153 rbtree_remove(mm, block);
154}
155
156static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
157{
158 return s1 <= e2 && e1 >= s2;
159}
160
161static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
162{
163 return s1 <= s2 && e1 >= e2;
164}
165
166static struct drm_buddy_block *
167__get_buddy(struct drm_buddy_block *block)
168{
169 struct drm_buddy_block *parent;
170
171 parent = block->parent;
172 if (!parent)
173 return NULL;
174
175 if (parent->left == block)
176 return parent->right;
177
178 return parent->left;
179}
180
181static unsigned int __drm_buddy_free(struct drm_buddy *mm,
182 struct drm_buddy_block *block,
183 bool force_merge)
184{
185 struct drm_buddy_block *parent;
186 unsigned int order;
187
188 while ((parent = block->parent)) {
189 struct drm_buddy_block *buddy;
190
191 buddy = __get_buddy(block);
192
193 if (!drm_buddy_block_is_free(buddy))
194 break;
195
196 if (!force_merge) {
197 /*
198 * Check the block and its buddy clear state and exit
199 * the loop if they both have the dissimilar state.
200 */
201 if (drm_buddy_block_is_clear(block) !=
202 drm_buddy_block_is_clear(buddy))
203 break;
204
205 if (drm_buddy_block_is_clear(block))
206 mark_cleared(parent);
207 }
208
209 rbtree_remove(mm, buddy);
210 if (force_merge && drm_buddy_block_is_clear(buddy))
211 mm->clear_avail -= drm_buddy_block_size(mm, buddy);
212
213 drm_block_free(mm, block);
214 drm_block_free(mm, buddy);
215
216 block = parent;
217 }
218
219 order = drm_buddy_block_order(block);
220 mark_free(mm, block);
221
222 return order;
223}
224
225static int __force_merge(struct drm_buddy *mm,
226 u64 start,
227 u64 end,
228 unsigned int min_order)
229{
230 unsigned int tree, order;
231 int i;
232
233 if (!min_order)
234 return -ENOMEM;
235
236 if (min_order > mm->max_order)
237 return -EINVAL;
238
239 for_each_free_tree(tree) {
240 for (i = min_order - 1; i >= 0; i--) {
241 struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
242
243 while (iter) {
244 struct drm_buddy_block *block, *buddy;
245 u64 block_start, block_end;
246
247 block = rbtree_get_free_block(iter);
248 iter = rb_prev(iter);
249
250 if (!block || !block->parent)
251 continue;
252
253 block_start = drm_buddy_block_offset(block);
254 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
255
256 if (!contains(start, end, block_start, block_end))
257 continue;
258
259 buddy = __get_buddy(block);
260 if (!drm_buddy_block_is_free(buddy))
261 continue;
262
263 WARN_ON(drm_buddy_block_is_clear(block) ==
264 drm_buddy_block_is_clear(buddy));
265
266 /*
267 * Advance to the next node when the current node is the buddy,
268 * as freeing the block will also remove its buddy from the tree.
269 */
270 if (iter == &buddy->rb)
271 iter = rb_prev(iter);
272
273 rbtree_remove(mm, block);
274 if (drm_buddy_block_is_clear(block))
275 mm->clear_avail -= drm_buddy_block_size(mm, block);
276
277 order = __drm_buddy_free(mm, block, true);
278 if (order >= min_order)
279 return 0;
280 }
281 }
282 }
283
284 return -ENOMEM;
285}
286
287/**
288 * drm_buddy_init - init memory manager
289 *
290 * @mm: DRM buddy manager to initialize
291 * @size: size in bytes to manage
292 * @chunk_size: minimum page size in bytes for our allocations
293 *
294 * Initializes the memory manager and its resources.
295 *
296 * Returns:
297 * 0 on success, error code on failure.
298 */
299int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
300{
301 unsigned int i, j, root_count = 0;
302 u64 offset = 0;
303
304 if (size < chunk_size)
305 return -EINVAL;
306
307 if (chunk_size < SZ_4K)
308 return -EINVAL;
309
310 if (!is_power_of_2(chunk_size))
311 return -EINVAL;
312
313 size = round_down(size, chunk_size);
314
315 mm->size = size;
316 mm->avail = size;
317 mm->clear_avail = 0;
318 mm->chunk_size = chunk_size;
319 mm->max_order = ilog2(size) - ilog2(chunk_size);
320
321 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
322
323 mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES,
324 sizeof(*mm->free_trees),
325 GFP_KERNEL);
326 if (!mm->free_trees)
327 return -ENOMEM;
328
329 for_each_free_tree(i) {
330 mm->free_trees[i] = kmalloc_array(mm->max_order + 1,
331 sizeof(struct rb_root),
332 GFP_KERNEL);
333 if (!mm->free_trees[i])
334 goto out_free_tree;
335
336 for (j = 0; j <= mm->max_order; ++j)
337 mm->free_trees[i][j] = RB_ROOT;
338 }
339
340 mm->n_roots = hweight64(size);
341
342 mm->roots = kmalloc_array(mm->n_roots,
343 sizeof(struct drm_buddy_block *),
344 GFP_KERNEL);
345 if (!mm->roots)
346 goto out_free_tree;
347
348 /*
349 * Split into power-of-two blocks, in case we are given a size that is
350 * not itself a power-of-two.
351 */
352 do {
353 struct drm_buddy_block *root;
354 unsigned int order;
355 u64 root_size;
356
357 order = ilog2(size) - ilog2(chunk_size);
358 root_size = chunk_size << order;
359
360 root = drm_block_alloc(mm, NULL, order, offset);
361 if (!root)
362 goto out_free_roots;
363
364 mark_free(mm, root);
365
366 BUG_ON(root_count > mm->max_order);
367 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
368
369 mm->roots[root_count] = root;
370
371 offset += root_size;
372 size -= root_size;
373 root_count++;
374 } while (size);
375
376 return 0;
377
378out_free_roots:
379 while (root_count--)
380 drm_block_free(mm, mm->roots[root_count]);
381 kfree(mm->roots);
382out_free_tree:
383 while (i--)
384 kfree(mm->free_trees[i]);
385 kfree(mm->free_trees);
386 return -ENOMEM;
387}
388EXPORT_SYMBOL(drm_buddy_init);
389
390/**
391 * drm_buddy_fini - tear down the memory manager
392 *
393 * @mm: DRM buddy manager to free
394 *
395 * Cleanup memory manager resources and the freetree
396 */
397void drm_buddy_fini(struct drm_buddy *mm)
398{
399 u64 root_size, size, start;
400 unsigned int order;
401 int i;
402
403 size = mm->size;
404
405 for (i = 0; i < mm->n_roots; ++i) {
406 order = ilog2(size) - ilog2(mm->chunk_size);
407 start = drm_buddy_block_offset(mm->roots[i]);
408 __force_merge(mm, start, start + size, order);
409
410 if (WARN_ON(!drm_buddy_block_is_free(mm->roots[i])))
411 kunit_fail_current_test("buddy_fini() root");
412
413 drm_block_free(mm, mm->roots[i]);
414
415 root_size = mm->chunk_size << order;
416 size -= root_size;
417 }
418
419 WARN_ON(mm->avail != mm->size);
420
421 for_each_free_tree(i)
422 kfree(mm->free_trees[i]);
423 kfree(mm->free_trees);
424 kfree(mm->roots);
425}
426EXPORT_SYMBOL(drm_buddy_fini);
427
428static int split_block(struct drm_buddy *mm,
429 struct drm_buddy_block *block)
430{
431 unsigned int block_order = drm_buddy_block_order(block) - 1;
432 u64 offset = drm_buddy_block_offset(block);
433
434 BUG_ON(!drm_buddy_block_is_free(block));
435 BUG_ON(!drm_buddy_block_order(block));
436
437 block->left = drm_block_alloc(mm, block, block_order, offset);
438 if (!block->left)
439 return -ENOMEM;
440
441 block->right = drm_block_alloc(mm, block, block_order,
442 offset + (mm->chunk_size << block_order));
443 if (!block->right) {
444 drm_block_free(mm, block->left);
445 return -ENOMEM;
446 }
447
448 mark_split(mm, block);
449
450 if (drm_buddy_block_is_clear(block)) {
451 mark_cleared(block->left);
452 mark_cleared(block->right);
453 clear_reset(block);
454 }
455
456 mark_free(mm, block->left);
457 mark_free(mm, block->right);
458
459 return 0;
460}
461
462/**
463 * drm_get_buddy - get buddy address
464 *
465 * @block: DRM buddy block
466 *
467 * Returns the corresponding buddy block for @block, or NULL
468 * if this is a root block and can't be merged further.
469 * Requires some kind of locking to protect against
470 * any concurrent allocate and free operations.
471 */
472struct drm_buddy_block *
473drm_get_buddy(struct drm_buddy_block *block)
474{
475 return __get_buddy(block);
476}
477EXPORT_SYMBOL(drm_get_buddy);
478
479/**
480 * drm_buddy_reset_clear - reset blocks clear state
481 *
482 * @mm: DRM buddy manager
483 * @is_clear: blocks clear state
484 *
485 * Reset the clear state based on @is_clear value for each block
486 * in the freetree.
487 */
488void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
489{
490 enum drm_buddy_free_tree src_tree, dst_tree;
491 u64 root_size, size, start;
492 unsigned int order;
493 int i;
494
495 size = mm->size;
496 for (i = 0; i < mm->n_roots; ++i) {
497 order = ilog2(size) - ilog2(mm->chunk_size);
498 start = drm_buddy_block_offset(mm->roots[i]);
499 __force_merge(mm, start, start + size, order);
500
501 root_size = mm->chunk_size << order;
502 size -= root_size;
503 }
504
505 src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
506 dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
507
508 for (i = 0; i <= mm->max_order; ++i) {
509 struct rb_root *root = &mm->free_trees[src_tree][i];
510 struct drm_buddy_block *block, *tmp;
511
512 rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
513 rbtree_remove(mm, block);
514 if (is_clear) {
515 mark_cleared(block);
516 mm->clear_avail += drm_buddy_block_size(mm, block);
517 } else {
518 clear_reset(block);
519 mm->clear_avail -= drm_buddy_block_size(mm, block);
520 }
521
522 rbtree_insert(mm, block, dst_tree);
523 }
524 }
525}
526EXPORT_SYMBOL(drm_buddy_reset_clear);
527
528/**
529 * drm_buddy_free_block - free a block
530 *
531 * @mm: DRM buddy manager
532 * @block: block to be freed
533 */
534void drm_buddy_free_block(struct drm_buddy *mm,
535 struct drm_buddy_block *block)
536{
537 BUG_ON(!drm_buddy_block_is_allocated(block));
538 mm->avail += drm_buddy_block_size(mm, block);
539 if (drm_buddy_block_is_clear(block))
540 mm->clear_avail += drm_buddy_block_size(mm, block);
541
542 __drm_buddy_free(mm, block, false);
543}
544EXPORT_SYMBOL(drm_buddy_free_block);
545
546static void __drm_buddy_free_list(struct drm_buddy *mm,
547 struct list_head *objects,
548 bool mark_clear,
549 bool mark_dirty)
550{
551 struct drm_buddy_block *block, *on;
552
553 WARN_ON(mark_dirty && mark_clear);
554
555 list_for_each_entry_safe(block, on, objects, link) {
556 if (mark_clear)
557 mark_cleared(block);
558 else if (mark_dirty)
559 clear_reset(block);
560 drm_buddy_free_block(mm, block);
561 cond_resched();
562 }
563 INIT_LIST_HEAD(objects);
564}
565
566static void drm_buddy_free_list_internal(struct drm_buddy *mm,
567 struct list_head *objects)
568{
569 /*
570 * Don't touch the clear/dirty bit, since allocation is still internal
571 * at this point. For example we might have just failed part of the
572 * allocation.
573 */
574 __drm_buddy_free_list(mm, objects, false, false);
575}
576
577/**
578 * drm_buddy_free_list - free blocks
579 *
580 * @mm: DRM buddy manager
581 * @objects: input list head to free blocks
582 * @flags: optional flags like DRM_BUDDY_CLEARED
583 */
584void drm_buddy_free_list(struct drm_buddy *mm,
585 struct list_head *objects,
586 unsigned int flags)
587{
588 bool mark_clear = flags & DRM_BUDDY_CLEARED;
589
590 __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear);
591}
592EXPORT_SYMBOL(drm_buddy_free_list);
593
594static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags)
595{
596 bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION;
597
598 return needs_clear != drm_buddy_block_is_clear(block);
599}
600
601static struct drm_buddy_block *
602__alloc_range_bias(struct drm_buddy *mm,
603 u64 start, u64 end,
604 unsigned int order,
605 unsigned long flags,
606 bool fallback)
607{
608 u64 req_size = mm->chunk_size << order;
609 struct drm_buddy_block *block;
610 struct drm_buddy_block *buddy;
611 LIST_HEAD(dfs);
612 int err;
613 int i;
614
615 end = end - 1;
616
617 for (i = 0; i < mm->n_roots; ++i)
618 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
619
620 do {
621 u64 block_start;
622 u64 block_end;
623
624 block = list_first_entry_or_null(&dfs,
625 struct drm_buddy_block,
626 tmp_link);
627 if (!block)
628 break;
629
630 list_del(&block->tmp_link);
631
632 if (drm_buddy_block_order(block) < order)
633 continue;
634
635 block_start = drm_buddy_block_offset(block);
636 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
637
638 if (!overlaps(start, end, block_start, block_end))
639 continue;
640
641 if (drm_buddy_block_is_allocated(block))
642 continue;
643
644 if (block_start < start || block_end > end) {
645 u64 adjusted_start = max(block_start, start);
646 u64 adjusted_end = min(block_end, end);
647
648 if (round_down(adjusted_end + 1, req_size) <=
649 round_up(adjusted_start, req_size))
650 continue;
651 }
652
653 if (!fallback && block_incompatible(block, flags))
654 continue;
655
656 if (contains(start, end, block_start, block_end) &&
657 order == drm_buddy_block_order(block)) {
658 /*
659 * Find the free block within the range.
660 */
661 if (drm_buddy_block_is_free(block))
662 return block;
663
664 continue;
665 }
666
667 if (!drm_buddy_block_is_split(block)) {
668 err = split_block(mm, block);
669 if (unlikely(err))
670 goto err_undo;
671 }
672
673 list_add(&block->right->tmp_link, &dfs);
674 list_add(&block->left->tmp_link, &dfs);
675 } while (1);
676
677 return ERR_PTR(-ENOSPC);
678
679err_undo:
680 /*
681 * We really don't want to leave around a bunch of split blocks, since
682 * bigger is better, so make sure we merge everything back before we
683 * free the allocated blocks.
684 */
685 buddy = __get_buddy(block);
686 if (buddy &&
687 (drm_buddy_block_is_free(block) &&
688 drm_buddy_block_is_free(buddy)))
689 __drm_buddy_free(mm, block, false);
690 return ERR_PTR(err);
691}
692
693static struct drm_buddy_block *
694__drm_buddy_alloc_range_bias(struct drm_buddy *mm,
695 u64 start, u64 end,
696 unsigned int order,
697 unsigned long flags)
698{
699 struct drm_buddy_block *block;
700 bool fallback = false;
701
702 block = __alloc_range_bias(mm, start, end, order,
703 flags, fallback);
704 if (IS_ERR(block))
705 return __alloc_range_bias(mm, start, end, order,
706 flags, !fallback);
707
708 return block;
709}
710
711static struct drm_buddy_block *
712get_maxblock(struct drm_buddy *mm,
713 unsigned int order,
714 enum drm_buddy_free_tree tree)
715{
716 struct drm_buddy_block *max_block = NULL, *block = NULL;
717 struct rb_root *root;
718 unsigned int i;
719
720 for (i = order; i <= mm->max_order; ++i) {
721 root = &mm->free_trees[tree][i];
722 block = rbtree_last_free_block(root);
723 if (!block)
724 continue;
725
726 if (!max_block) {
727 max_block = block;
728 continue;
729 }
730
731 if (drm_buddy_block_offset(block) >
732 drm_buddy_block_offset(max_block)) {
733 max_block = block;
734 }
735 }
736
737 return max_block;
738}
739
740static struct drm_buddy_block *
741alloc_from_freetree(struct drm_buddy *mm,
742 unsigned int order,
743 unsigned long flags)
744{
745 struct drm_buddy_block *block = NULL;
746 struct rb_root *root;
747 enum drm_buddy_free_tree tree;
748 unsigned int tmp;
749 int err;
750
751 tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ?
752 DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
753
754 if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
755 block = get_maxblock(mm, order, tree);
756 if (block)
757 /* Store the obtained block order */
758 tmp = drm_buddy_block_order(block);
759 } else {
760 for (tmp = order; tmp <= mm->max_order; ++tmp) {
761 /* Get RB tree root for this order and tree */
762 root = &mm->free_trees[tree][tmp];
763 block = rbtree_last_free_block(root);
764 if (block)
765 break;
766 }
767 }
768
769 if (!block) {
770 /* Try allocating from the other tree */
771 tree = (tree == DRM_BUDDY_CLEAR_TREE) ?
772 DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
773
774 for (tmp = order; tmp <= mm->max_order; ++tmp) {
775 root = &mm->free_trees[tree][tmp];
776 block = rbtree_last_free_block(root);
777 if (block)
778 break;
779 }
780
781 if (!block)
782 return ERR_PTR(-ENOSPC);
783 }
784
785 BUG_ON(!drm_buddy_block_is_free(block));
786
787 while (tmp != order) {
788 err = split_block(mm, block);
789 if (unlikely(err))
790 goto err_undo;
791
792 block = block->right;
793 tmp--;
794 }
795 return block;
796
797err_undo:
798 if (tmp != order)
799 __drm_buddy_free(mm, block, false);
800 return ERR_PTR(err);
801}
802
803static int __alloc_range(struct drm_buddy *mm,
804 struct list_head *dfs,
805 u64 start, u64 size,
806 struct list_head *blocks,
807 u64 *total_allocated_on_err)
808{
809 struct drm_buddy_block *block;
810 struct drm_buddy_block *buddy;
811 u64 total_allocated = 0;
812 LIST_HEAD(allocated);
813 u64 end;
814 int err;
815
816 end = start + size - 1;
817
818 do {
819 u64 block_start;
820 u64 block_end;
821
822 block = list_first_entry_or_null(dfs,
823 struct drm_buddy_block,
824 tmp_link);
825 if (!block)
826 break;
827
828 list_del(&block->tmp_link);
829
830 block_start = drm_buddy_block_offset(block);
831 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
832
833 if (!overlaps(start, end, block_start, block_end))
834 continue;
835
836 if (drm_buddy_block_is_allocated(block)) {
837 err = -ENOSPC;
838 goto err_free;
839 }
840
841 if (contains(start, end, block_start, block_end)) {
842 if (drm_buddy_block_is_free(block)) {
843 mark_allocated(mm, block);
844 total_allocated += drm_buddy_block_size(mm, block);
845 mm->avail -= drm_buddy_block_size(mm, block);
846 if (drm_buddy_block_is_clear(block))
847 mm->clear_avail -= drm_buddy_block_size(mm, block);
848 list_add_tail(&block->link, &allocated);
849 continue;
850 } else if (!mm->clear_avail) {
851 err = -ENOSPC;
852 goto err_free;
853 }
854 }
855
856 if (!drm_buddy_block_is_split(block)) {
857 err = split_block(mm, block);
858 if (unlikely(err))
859 goto err_undo;
860 }
861
862 list_add(&block->right->tmp_link, dfs);
863 list_add(&block->left->tmp_link, dfs);
864 } while (1);
865
866 if (total_allocated < size) {
867 err = -ENOSPC;
868 goto err_free;
869 }
870
871 list_splice_tail(&allocated, blocks);
872
873 return 0;
874
875err_undo:
876 /*
877 * We really don't want to leave around a bunch of split blocks, since
878 * bigger is better, so make sure we merge everything back before we
879 * free the allocated blocks.
880 */
881 buddy = __get_buddy(block);
882 if (buddy &&
883 (drm_buddy_block_is_free(block) &&
884 drm_buddy_block_is_free(buddy)))
885 __drm_buddy_free(mm, block, false);
886
887err_free:
888 if (err == -ENOSPC && total_allocated_on_err) {
889 list_splice_tail(&allocated, blocks);
890 *total_allocated_on_err = total_allocated;
891 } else {
892 drm_buddy_free_list_internal(mm, &allocated);
893 }
894
895 return err;
896}
897
898static int __drm_buddy_alloc_range(struct drm_buddy *mm,
899 u64 start,
900 u64 size,
901 u64 *total_allocated_on_err,
902 struct list_head *blocks)
903{
904 LIST_HEAD(dfs);
905 int i;
906
907 for (i = 0; i < mm->n_roots; ++i)
908 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
909
910 return __alloc_range(mm, &dfs, start, size,
911 blocks, total_allocated_on_err);
912}
913
914static int __alloc_contig_try_harder(struct drm_buddy *mm,
915 u64 size,
916 u64 min_block_size,
917 struct list_head *blocks)
918{
919 u64 rhs_offset, lhs_offset, lhs_size, filled;
920 struct drm_buddy_block *block;
921 unsigned int tree, order;
922 LIST_HEAD(blocks_lhs);
923 unsigned long pages;
924 u64 modify_size;
925 int err;
926
927 modify_size = rounddown_pow_of_two(size);
928 pages = modify_size >> ilog2(mm->chunk_size);
929 order = fls(pages) - 1;
930 if (order == 0)
931 return -ENOSPC;
932
933 for_each_free_tree(tree) {
934 struct rb_root *root;
935 struct rb_node *iter;
936
937 root = &mm->free_trees[tree][order];
938 if (rbtree_is_empty(root))
939 continue;
940
941 iter = rb_last(root);
942 while (iter) {
943 block = rbtree_get_free_block(iter);
944
945 /* Allocate blocks traversing RHS */
946 rhs_offset = drm_buddy_block_offset(block);
947 err = __drm_buddy_alloc_range(mm, rhs_offset, size,
948 &filled, blocks);
949 if (!err || err != -ENOSPC)
950 return err;
951
952 lhs_size = max((size - filled), min_block_size);
953 if (!IS_ALIGNED(lhs_size, min_block_size))
954 lhs_size = round_up(lhs_size, min_block_size);
955
956 /* Allocate blocks traversing LHS */
957 lhs_offset = drm_buddy_block_offset(block) - lhs_size;
958 err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
959 NULL, &blocks_lhs);
960 if (!err) {
961 list_splice(&blocks_lhs, blocks);
962 return 0;
963 } else if (err != -ENOSPC) {
964 drm_buddy_free_list_internal(mm, blocks);
965 return err;
966 }
967 /* Free blocks for the next iteration */
968 drm_buddy_free_list_internal(mm, blocks);
969
970 iter = rb_prev(iter);
971 }
972 }
973
974 return -ENOSPC;
975}
976
977/**
978 * drm_buddy_block_trim - free unused pages
979 *
980 * @mm: DRM buddy manager
981 * @start: start address to begin the trimming.
982 * @new_size: original size requested
983 * @blocks: Input and output list of allocated blocks.
984 * MUST contain single block as input to be trimmed.
985 * On success will contain the newly allocated blocks
986 * making up the @new_size. Blocks always appear in
987 * ascending order
988 *
989 * For contiguous allocation, we round up the size to the nearest
990 * power of two value, drivers consume *actual* size, so remaining
991 * portions are unused and can be optionally freed with this function
992 *
993 * Returns:
994 * 0 on success, error code on failure.
995 */
996int drm_buddy_block_trim(struct drm_buddy *mm,
997 u64 *start,
998 u64 new_size,
999 struct list_head *blocks)
1000{
1001 struct drm_buddy_block *parent;
1002 struct drm_buddy_block *block;
1003 u64 block_start, block_end;
1004 LIST_HEAD(dfs);
1005 u64 new_start;
1006 int err;
1007
1008 if (!list_is_singular(blocks))
1009 return -EINVAL;
1010
1011 block = list_first_entry(blocks,
1012 struct drm_buddy_block,
1013 link);
1014
1015 block_start = drm_buddy_block_offset(block);
1016 block_end = block_start + drm_buddy_block_size(mm, block);
1017
1018 if (WARN_ON(!drm_buddy_block_is_allocated(block)))
1019 return -EINVAL;
1020
1021 if (new_size > drm_buddy_block_size(mm, block))
1022 return -EINVAL;
1023
1024 if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
1025 return -EINVAL;
1026
1027 if (new_size == drm_buddy_block_size(mm, block))
1028 return 0;
1029
1030 new_start = block_start;
1031 if (start) {
1032 new_start = *start;
1033
1034 if (new_start < block_start)
1035 return -EINVAL;
1036
1037 if (!IS_ALIGNED(new_start, mm->chunk_size))
1038 return -EINVAL;
1039
1040 if (range_overflows(new_start, new_size, block_end))
1041 return -EINVAL;
1042 }
1043
1044 list_del(&block->link);
1045 mark_free(mm, block);
1046 mm->avail += drm_buddy_block_size(mm, block);
1047 if (drm_buddy_block_is_clear(block))
1048 mm->clear_avail += drm_buddy_block_size(mm, block);
1049
1050 /* Prevent recursively freeing this node */
1051 parent = block->parent;
1052 block->parent = NULL;
1053
1054 list_add(&block->tmp_link, &dfs);
1055 err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
1056 if (err) {
1057 mark_allocated(mm, block);
1058 mm->avail -= drm_buddy_block_size(mm, block);
1059 if (drm_buddy_block_is_clear(block))
1060 mm->clear_avail -= drm_buddy_block_size(mm, block);
1061 list_add(&block->link, blocks);
1062 }
1063
1064 block->parent = parent;
1065 return err;
1066}
1067EXPORT_SYMBOL(drm_buddy_block_trim);
1068
1069static struct drm_buddy_block *
1070__drm_buddy_alloc_blocks(struct drm_buddy *mm,
1071 u64 start, u64 end,
1072 unsigned int order,
1073 unsigned long flags)
1074{
1075 if (flags & DRM_BUDDY_RANGE_ALLOCATION)
1076 /* Allocate traversing within the range */
1077 return __drm_buddy_alloc_range_bias(mm, start, end,
1078 order, flags);
1079 else
1080 /* Allocate from freetree */
1081 return alloc_from_freetree(mm, order, flags);
1082}
1083
1084/**
1085 * drm_buddy_alloc_blocks - allocate power-of-two blocks
1086 *
1087 * @mm: DRM buddy manager to allocate from
1088 * @start: start of the allowed range for this block
1089 * @end: end of the allowed range for this block
1090 * @size: size of the allocation in bytes
1091 * @min_block_size: alignment of the allocation
1092 * @blocks: output list head to add allocated blocks
1093 * @flags: DRM_BUDDY_*_ALLOCATION flags
1094 *
1095 * alloc_range_bias() called on range limitations, which traverses
1096 * the tree and returns the desired block.
1097 *
1098 * alloc_from_freetree() called when *no* range restrictions
1099 * are enforced, which picks the block from the freetree.
1100 *
1101 * Returns:
1102 * 0 on success, error code on failure.
1103 */
1104int drm_buddy_alloc_blocks(struct drm_buddy *mm,
1105 u64 start, u64 end, u64 size,
1106 u64 min_block_size,
1107 struct list_head *blocks,
1108 unsigned long flags)
1109{
1110 struct drm_buddy_block *block = NULL;
1111 u64 original_size, original_min_size;
1112 unsigned int min_order, order;
1113 LIST_HEAD(allocated);
1114 unsigned long pages;
1115 int err;
1116
1117 if (size < mm->chunk_size)
1118 return -EINVAL;
1119
1120 if (min_block_size < mm->chunk_size)
1121 return -EINVAL;
1122
1123 if (!is_power_of_2(min_block_size))
1124 return -EINVAL;
1125
1126 if (!IS_ALIGNED(start | end | size, mm->chunk_size))
1127 return -EINVAL;
1128
1129 if (end > mm->size)
1130 return -EINVAL;
1131
1132 if (range_overflows(start, size, mm->size))
1133 return -EINVAL;
1134
1135 /* Actual range allocation */
1136 if (start + size == end) {
1137 if (!IS_ALIGNED(start | end, min_block_size))
1138 return -EINVAL;
1139
1140 return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
1141 }
1142
1143 original_size = size;
1144 original_min_size = min_block_size;
1145
1146 /* Roundup the size to power of 2 */
1147 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
1148 size = roundup_pow_of_two(size);
1149 min_block_size = size;
1150 /* Align size value to min_block_size */
1151 } else if (!IS_ALIGNED(size, min_block_size)) {
1152 size = round_up(size, min_block_size);
1153 }
1154
1155 pages = size >> ilog2(mm->chunk_size);
1156 order = fls(pages) - 1;
1157 min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
1158
1159 if (order > mm->max_order || size > mm->size) {
1160 if ((flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) &&
1161 !(flags & DRM_BUDDY_RANGE_ALLOCATION))
1162 return __alloc_contig_try_harder(mm, original_size,
1163 original_min_size, blocks);
1164
1165 return -EINVAL;
1166 }
1167
1168 do {
1169 order = min(order, (unsigned int)fls(pages) - 1);
1170 BUG_ON(order > mm->max_order);
1171 BUG_ON(order < min_order);
1172
1173 do {
1174 block = __drm_buddy_alloc_blocks(mm, start,
1175 end,
1176 order,
1177 flags);
1178 if (!IS_ERR(block))
1179 break;
1180
1181 if (order-- == min_order) {
1182 /* Try allocation through force merge method */
1183 if (mm->clear_avail &&
1184 !__force_merge(mm, start, end, min_order)) {
1185 block = __drm_buddy_alloc_blocks(mm, start,
1186 end,
1187 min_order,
1188 flags);
1189 if (!IS_ERR(block)) {
1190 order = min_order;
1191 break;
1192 }
1193 }
1194
1195 /*
1196 * Try contiguous block allocation through
1197 * try harder method.
1198 */
1199 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
1200 !(flags & DRM_BUDDY_RANGE_ALLOCATION))
1201 return __alloc_contig_try_harder(mm,
1202 original_size,
1203 original_min_size,
1204 blocks);
1205 err = -ENOSPC;
1206 goto err_free;
1207 }
1208 } while (1);
1209
1210 mark_allocated(mm, block);
1211 mm->avail -= drm_buddy_block_size(mm, block);
1212 if (drm_buddy_block_is_clear(block))
1213 mm->clear_avail -= drm_buddy_block_size(mm, block);
1214 kmemleak_update_trace(block);
1215 list_add_tail(&block->link, &allocated);
1216
1217 pages -= BIT(order);
1218
1219 if (!pages)
1220 break;
1221 } while (1);
1222
1223 /* Trim the allocated block to the required size */
1224 if (!(flags & DRM_BUDDY_TRIM_DISABLE) &&
1225 original_size != size) {
1226 struct list_head *trim_list;
1227 LIST_HEAD(temp);
1228 u64 trim_size;
1229
1230 trim_list = &allocated;
1231 trim_size = original_size;
1232
1233 if (!list_is_singular(&allocated)) {
1234 block = list_last_entry(&allocated, typeof(*block), link);
1235 list_move(&block->link, &temp);
1236 trim_list = &temp;
1237 trim_size = drm_buddy_block_size(mm, block) -
1238 (size - original_size);
1239 }
1240
1241 drm_buddy_block_trim(mm,
1242 NULL,
1243 trim_size,
1244 trim_list);
1245
1246 if (!list_empty(&temp))
1247 list_splice_tail(trim_list, &allocated);
1248 }
1249
1250 list_splice_tail(&allocated, blocks);
1251 return 0;
1252
1253err_free:
1254 drm_buddy_free_list_internal(mm, &allocated);
1255 return err;
1256}
1257EXPORT_SYMBOL(drm_buddy_alloc_blocks);
1258
1259/**
1260 * drm_buddy_block_print - print block information
1261 *
1262 * @mm: DRM buddy manager
1263 * @block: DRM buddy block
1264 * @p: DRM printer to use
1265 */
1266void drm_buddy_block_print(struct drm_buddy *mm,
1267 struct drm_buddy_block *block,
1268 struct drm_printer *p)
1269{
1270 u64 start = drm_buddy_block_offset(block);
1271 u64 size = drm_buddy_block_size(mm, block);
1272
1273 drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
1274}
1275EXPORT_SYMBOL(drm_buddy_block_print);
1276
1277/**
1278 * drm_buddy_print - print allocator state
1279 *
1280 * @mm: DRM buddy manager
1281 * @p: DRM printer to use
1282 */
1283void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
1284{
1285 int order;
1286
1287 drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n",
1288 mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
1289
1290 for (order = mm->max_order; order >= 0; order--) {
1291 struct drm_buddy_block *block, *tmp;
1292 struct rb_root *root;
1293 u64 count = 0, free;
1294 unsigned int tree;
1295
1296 for_each_free_tree(tree) {
1297 root = &mm->free_trees[tree][order];
1298
1299 rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
1300 BUG_ON(!drm_buddy_block_is_free(block));
1301 count++;
1302 }
1303 }
1304
1305 drm_printf(p, "order-%2d ", order);
1306
1307 free = count * (mm->chunk_size << order);
1308 if (free < SZ_1M)
1309 drm_printf(p, "free: %8llu KiB", free >> 10);
1310 else
1311 drm_printf(p, "free: %8llu MiB", free >> 20);
1312
1313 drm_printf(p, ", blocks: %llu\n", count);
1314 }
1315}
1316EXPORT_SYMBOL(drm_buddy_print);
1317
1318static void drm_buddy_module_exit(void)
1319{
1320 kmem_cache_destroy(slab_blocks);
1321}
1322
1323static int __init drm_buddy_module_init(void)
1324{
1325 slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
1326 if (!slab_blocks)
1327 return -ENOMEM;
1328
1329 return 0;
1330}
1331
1332module_init(drm_buddy_module_init);
1333module_exit(drm_buddy_module_exit);
1334
1335MODULE_DESCRIPTION("DRM Buddy Allocator");
1336MODULE_LICENSE("Dual MIT/GPL");