Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * Copyright 2016 Intel Corporation
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 *
28 **************************************************************************/
29
30/*
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
33 *
34 * Note that the algorithm used is quite simple and there might be substantial
35 * performance gains if a smarter free list is implemented. Currently it is
36 * just an unordered stack of free regions. This could easily be improved if
37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
38 *
39 * Aligned allocations can also see improvement.
40 *
41 * Authors:
42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43 */
44
45#include <linux/export.h>
46#include <linux/interval_tree_generic.h>
47#include <linux/seq_file.h>
48#include <linux/slab.h>
49#include <linux/stacktrace.h>
50
51#include <drm/drm_mm.h>
52#include <drm/drm_print.h>
53
54/**
55 * DOC: Overview
56 *
57 * drm_mm provides a simple range allocator. The drivers are free to use the
58 * resource allocator from the linux core if it suits them, the upside of drm_mm
59 * is that it's in the DRM core. Which means that it's easier to extend for
60 * some of the crazier special purpose needs of gpus.
61 *
62 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
63 * Drivers are free to embed either of them into their own suitable
64 * datastructures. drm_mm itself will not do any memory allocations of its own,
65 * so if drivers choose not to embed nodes they need to still allocate them
66 * themselves.
67 *
68 * The range allocator also supports reservation of preallocated blocks. This is
69 * useful for taking over initial mode setting configurations from the firmware,
70 * where an object needs to be created which exactly matches the firmware's
71 * scanout target. As long as the range is still free it can be inserted anytime
72 * after the allocator is initialized, which helps with avoiding looped
73 * dependencies in the driver load sequence.
74 *
75 * drm_mm maintains a stack of most recently freed holes, which of all
76 * simplistic datastructures seems to be a fairly decent approach to clustering
77 * allocations and avoiding too much fragmentation. This means free space
78 * searches are O(num_holes). Given that all the fancy features drm_mm supports
79 * something better would be fairly complex and since gfx thrashing is a fairly
80 * steep cliff not a real concern. Removing a node again is O(1).
81 *
82 * drm_mm supports a few features: Alignment and range restrictions can be
83 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
84 * opaque unsigned long) which in conjunction with a driver callback can be used
85 * to implement sophisticated placement restrictions. The i915 DRM driver uses
86 * this to implement guard pages between incompatible caching domains in the
87 * graphics TT.
88 *
89 * Two behaviors are supported for searching and allocating: bottom-up and
90 * top-down. The default is bottom-up. Top-down allocation can be used if the
91 * memory area has different restrictions, or just to reduce fragmentation.
92 *
93 * Finally iteration helpers to walk all nodes and all holes are provided as are
94 * some basic allocator dumpers for debugging.
95 *
96 * Note that this range allocator is not thread-safe, drivers need to protect
97 * modifications with their own locking. The idea behind this is that for a full
98 * memory manager additional data needs to be protected anyway, hence internal
99 * locking would be fully redundant.
100 */
101
102#ifdef CONFIG_DRM_DEBUG_MM
103#include <linux/stackdepot.h>
104
105#define STACKDEPTH 32
106#define BUFSZ 4096
107
108static noinline void save_stack(struct drm_mm_node *node)
109{
110 unsigned long entries[STACKDEPTH];
111 unsigned int n;
112
113 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
114
115 /* May be called under spinlock, so avoid sleeping */
116 node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
117}
118
119static void show_leaks(struct drm_mm *mm)
120{
121 struct drm_mm_node *node;
122 char *buf;
123
124 buf = kmalloc(BUFSZ, GFP_KERNEL);
125 if (!buf)
126 return;
127
128 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
129 if (!node->stack) {
130 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
131 node->start, node->size);
132 continue;
133 }
134
135 stack_depot_snprint(node->stack, buf, BUFSZ, 0);
136 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
137 node->start, node->size, buf);
138 }
139
140 kfree(buf);
141}
142
143#undef STACKDEPTH
144#undef BUFSZ
145#else
146static void save_stack(struct drm_mm_node *node) { }
147static void show_leaks(struct drm_mm *mm) { }
148#endif
149
150#define START(node) ((node)->start)
151#define LAST(node) ((node)->start + (node)->size - 1)
152
153INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
154 u64, __subtree_last,
155 START, LAST, static inline __maybe_unused, drm_mm_interval_tree)
156
157struct drm_mm_node *
158__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
159{
160 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
161 start, last) ?: (struct drm_mm_node *)&mm->head_node;
162}
163EXPORT_SYMBOL(__drm_mm_interval_first);
164
165static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
166 struct drm_mm_node *node)
167{
168 struct drm_mm *mm = hole_node->mm;
169 struct rb_node **link, *rb;
170 struct drm_mm_node *parent;
171 bool leftmost;
172
173 node->__subtree_last = LAST(node);
174
175 if (drm_mm_node_allocated(hole_node)) {
176 rb = &hole_node->rb;
177 while (rb) {
178 parent = rb_entry(rb, struct drm_mm_node, rb);
179 if (parent->__subtree_last >= node->__subtree_last)
180 break;
181
182 parent->__subtree_last = node->__subtree_last;
183 rb = rb_parent(rb);
184 }
185
186 rb = &hole_node->rb;
187 link = &hole_node->rb.rb_right;
188 leftmost = false;
189 } else {
190 rb = NULL;
191 link = &mm->interval_tree.rb_root.rb_node;
192 leftmost = true;
193 }
194
195 while (*link) {
196 rb = *link;
197 parent = rb_entry(rb, struct drm_mm_node, rb);
198 if (parent->__subtree_last < node->__subtree_last)
199 parent->__subtree_last = node->__subtree_last;
200 if (node->start < parent->start) {
201 link = &parent->rb.rb_left;
202 } else {
203 link = &parent->rb.rb_right;
204 leftmost = false;
205 }
206 }
207
208 rb_link_node(&node->rb, rb, link);
209 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
210 &drm_mm_interval_tree_augment);
211}
212
213#define HOLE_SIZE(NODE) ((NODE)->hole_size)
214#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
215
216static u64 rb_to_hole_size(struct rb_node *rb)
217{
218 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
219}
220
221static void insert_hole_size(struct rb_root_cached *root,
222 struct drm_mm_node *node)
223{
224 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
225 u64 x = node->hole_size;
226 bool first = true;
227
228 while (*link) {
229 rb = *link;
230 if (x > rb_to_hole_size(rb)) {
231 link = &rb->rb_left;
232 } else {
233 link = &rb->rb_right;
234 first = false;
235 }
236 }
237
238 rb_link_node(&node->rb_hole_size, rb, link);
239 rb_insert_color_cached(&node->rb_hole_size, root, first);
240}
241
242RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
243 struct drm_mm_node, rb_hole_addr,
244 u64, subtree_max_hole, HOLE_SIZE)
245
246static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
247{
248 struct rb_node **link = &root->rb_node, *rb_parent = NULL;
249 u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
250 struct drm_mm_node *parent;
251
252 while (*link) {
253 rb_parent = *link;
254 parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
255 if (parent->subtree_max_hole < subtree_max_hole)
256 parent->subtree_max_hole = subtree_max_hole;
257 if (start < HOLE_ADDR(parent))
258 link = &parent->rb_hole_addr.rb_left;
259 else
260 link = &parent->rb_hole_addr.rb_right;
261 }
262
263 rb_link_node(&node->rb_hole_addr, rb_parent, link);
264 rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
265}
266
267static void add_hole(struct drm_mm_node *node)
268{
269 struct drm_mm *mm = node->mm;
270
271 node->hole_size =
272 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
273 node->subtree_max_hole = node->hole_size;
274 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
275
276 insert_hole_size(&mm->holes_size, node);
277 insert_hole_addr(&mm->holes_addr, node);
278
279 list_add(&node->hole_stack, &mm->hole_stack);
280}
281
282static void rm_hole(struct drm_mm_node *node)
283{
284 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
285
286 list_del(&node->hole_stack);
287 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
288 rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
289 &augment_callbacks);
290 node->hole_size = 0;
291 node->subtree_max_hole = 0;
292
293 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
294}
295
296static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
297{
298 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
299}
300
301static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
302{
303 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
304}
305
306static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
307{
308 struct rb_node *rb = mm->holes_size.rb_root.rb_node;
309 struct drm_mm_node *best = NULL;
310
311 do {
312 struct drm_mm_node *node =
313 rb_entry(rb, struct drm_mm_node, rb_hole_size);
314
315 if (size <= node->hole_size) {
316 best = node;
317 rb = rb->rb_right;
318 } else {
319 rb = rb->rb_left;
320 }
321 } while (rb);
322
323 return best;
324}
325
326static bool usable_hole_addr(struct rb_node *rb, u64 size)
327{
328 return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
329}
330
331static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
332{
333 struct rb_node *rb = mm->holes_addr.rb_node;
334 struct drm_mm_node *node = NULL;
335
336 while (rb) {
337 u64 hole_start;
338
339 if (!usable_hole_addr(rb, size))
340 break;
341
342 node = rb_hole_addr_to_node(rb);
343 hole_start = __drm_mm_hole_node_start(node);
344
345 if (addr < hole_start)
346 rb = node->rb_hole_addr.rb_left;
347 else if (addr > hole_start + node->hole_size)
348 rb = node->rb_hole_addr.rb_right;
349 else
350 break;
351 }
352
353 return node;
354}
355
356static struct drm_mm_node *
357first_hole(struct drm_mm *mm,
358 u64 start, u64 end, u64 size,
359 enum drm_mm_insert_mode mode)
360{
361 switch (mode) {
362 default:
363 case DRM_MM_INSERT_BEST:
364 return best_hole(mm, size);
365
366 case DRM_MM_INSERT_LOW:
367 return find_hole_addr(mm, start, size);
368
369 case DRM_MM_INSERT_HIGH:
370 return find_hole_addr(mm, end, size);
371
372 case DRM_MM_INSERT_EVICT:
373 return list_first_entry_or_null(&mm->hole_stack,
374 struct drm_mm_node,
375 hole_stack);
376 }
377}
378
379/**
380 * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
381 * @name: name of function to declare
382 * @first: first rb member to traverse (either rb_left or rb_right).
383 * @last: last rb member to traverse (either rb_right or rb_left).
384 *
385 * This macro declares a function to return the next hole of the addr rb tree.
386 * While traversing the tree we take the searched size into account and only
387 * visit branches with potential big enough holes.
388 */
389
390#define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
391static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \
392{ \
393 struct rb_node *parent, *node = &entry->rb_hole_addr; \
394 \
395 if (!entry || RB_EMPTY_NODE(node)) \
396 return NULL; \
397 \
398 if (usable_hole_addr(node->first, size)) { \
399 node = node->first; \
400 while (usable_hole_addr(node->last, size)) \
401 node = node->last; \
402 return rb_hole_addr_to_node(node); \
403 } \
404 \
405 while ((parent = rb_parent(node)) && node == parent->first) \
406 node = parent; \
407 \
408 return rb_hole_addr_to_node(parent); \
409}
410
411DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
412DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
413
414static struct drm_mm_node *
415next_hole(struct drm_mm *mm,
416 struct drm_mm_node *node,
417 u64 size,
418 enum drm_mm_insert_mode mode)
419{
420 switch (mode) {
421 default:
422 case DRM_MM_INSERT_BEST:
423 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
424
425 case DRM_MM_INSERT_LOW:
426 return next_hole_low_addr(node, size);
427
428 case DRM_MM_INSERT_HIGH:
429 return next_hole_high_addr(node, size);
430
431 case DRM_MM_INSERT_EVICT:
432 node = list_next_entry(node, hole_stack);
433 return &node->hole_stack == &mm->hole_stack ? NULL : node;
434 }
435}
436
437/**
438 * drm_mm_reserve_node - insert an pre-initialized node
439 * @mm: drm_mm allocator to insert @node into
440 * @node: drm_mm_node to insert
441 *
442 * This functions inserts an already set-up &drm_mm_node into the allocator,
443 * meaning that start, size and color must be set by the caller. All other
444 * fields must be cleared to 0. This is useful to initialize the allocator with
445 * preallocated objects which must be set-up before the range allocator can be
446 * set-up, e.g. when taking over a firmware framebuffer.
447 *
448 * Returns:
449 * 0 on success, -ENOSPC if there's no hole where @node is.
450 */
451int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
452{
453 struct drm_mm_node *hole;
454 u64 hole_start, hole_end;
455 u64 adj_start, adj_end;
456 u64 end;
457
458 end = node->start + node->size;
459 if (unlikely(end <= node->start))
460 return -ENOSPC;
461
462 /* Find the relevant hole to add our node to */
463 hole = find_hole_addr(mm, node->start, 0);
464 if (!hole)
465 return -ENOSPC;
466
467 adj_start = hole_start = __drm_mm_hole_node_start(hole);
468 adj_end = hole_end = hole_start + hole->hole_size;
469
470 if (mm->color_adjust)
471 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
472
473 if (adj_start > node->start || adj_end < end)
474 return -ENOSPC;
475
476 node->mm = mm;
477
478 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
479 list_add(&node->node_list, &hole->node_list);
480 drm_mm_interval_tree_add_node(hole, node);
481 node->hole_size = 0;
482
483 rm_hole(hole);
484 if (node->start > hole_start)
485 add_hole(hole);
486 if (end < hole_end)
487 add_hole(node);
488
489 save_stack(node);
490 return 0;
491}
492EXPORT_SYMBOL(drm_mm_reserve_node);
493
494static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
495{
496 return rb ? rb_to_hole_size(rb) : 0;
497}
498
499/**
500 * drm_mm_insert_node_in_range - ranged search for space and insert @node
501 * @mm: drm_mm to allocate from
502 * @node: preallocate node to insert
503 * @size: size of the allocation
504 * @alignment: alignment of the allocation
505 * @color: opaque tag value to use for this node
506 * @range_start: start of the allowed range for this node
507 * @range_end: end of the allowed range for this node
508 * @mode: fine-tune the allocation search and placement
509 *
510 * The preallocated @node must be cleared to 0.
511 *
512 * Returns:
513 * 0 on success, -ENOSPC if there's no suitable hole.
514 */
515int drm_mm_insert_node_in_range(struct drm_mm * const mm,
516 struct drm_mm_node * const node,
517 u64 size, u64 alignment,
518 unsigned long color,
519 u64 range_start, u64 range_end,
520 enum drm_mm_insert_mode mode)
521{
522 struct drm_mm_node *hole;
523 u64 remainder_mask;
524 bool once;
525
526 DRM_MM_BUG_ON(range_start > range_end);
527
528 if (unlikely(size == 0 || range_end - range_start < size))
529 return -ENOSPC;
530
531 if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
532 return -ENOSPC;
533
534 if (alignment <= 1)
535 alignment = 0;
536
537 once = mode & DRM_MM_INSERT_ONCE;
538 mode &= ~DRM_MM_INSERT_ONCE;
539
540 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
541 for (hole = first_hole(mm, range_start, range_end, size, mode);
542 hole;
543 hole = once ? NULL : next_hole(mm, hole, size, mode)) {
544 u64 hole_start = __drm_mm_hole_node_start(hole);
545 u64 hole_end = hole_start + hole->hole_size;
546 u64 adj_start, adj_end;
547 u64 col_start, col_end;
548
549 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
550 break;
551
552 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
553 break;
554
555 col_start = hole_start;
556 col_end = hole_end;
557 if (mm->color_adjust)
558 mm->color_adjust(hole, color, &col_start, &col_end);
559
560 adj_start = max(col_start, range_start);
561 adj_end = min(col_end, range_end);
562
563 if (adj_end <= adj_start || adj_end - adj_start < size)
564 continue;
565
566 if (mode == DRM_MM_INSERT_HIGH)
567 adj_start = adj_end - size;
568
569 if (alignment) {
570 u64 rem;
571
572 if (likely(remainder_mask))
573 rem = adj_start & remainder_mask;
574 else
575 div64_u64_rem(adj_start, alignment, &rem);
576 if (rem) {
577 adj_start -= rem;
578 if (mode != DRM_MM_INSERT_HIGH)
579 adj_start += alignment;
580
581 if (adj_start < max(col_start, range_start) ||
582 min(col_end, range_end) - adj_start < size)
583 continue;
584
585 if (adj_end <= adj_start ||
586 adj_end - adj_start < size)
587 continue;
588 }
589 }
590
591 node->mm = mm;
592 node->size = size;
593 node->start = adj_start;
594 node->color = color;
595 node->hole_size = 0;
596
597 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
598 list_add(&node->node_list, &hole->node_list);
599 drm_mm_interval_tree_add_node(hole, node);
600
601 rm_hole(hole);
602 if (adj_start > hole_start)
603 add_hole(hole);
604 if (adj_start + size < hole_end)
605 add_hole(node);
606
607 save_stack(node);
608 return 0;
609 }
610
611 return -ENOSPC;
612}
613EXPORT_SYMBOL(drm_mm_insert_node_in_range);
614
615static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
616{
617 return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
618}
619
620/**
621 * drm_mm_remove_node - Remove a memory node from the allocator.
622 * @node: drm_mm_node to remove
623 *
624 * This just removes a node from its drm_mm allocator. The node does not need to
625 * be cleared again before it can be re-inserted into this or any other drm_mm
626 * allocator. It is a bug to call this function on a unallocated node.
627 */
628void drm_mm_remove_node(struct drm_mm_node *node)
629{
630 struct drm_mm *mm = node->mm;
631 struct drm_mm_node *prev_node;
632
633 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
634 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
635
636 prev_node = list_prev_entry(node, node_list);
637
638 if (drm_mm_hole_follows(node))
639 rm_hole(node);
640
641 drm_mm_interval_tree_remove(node, &mm->interval_tree);
642 list_del(&node->node_list);
643
644 if (drm_mm_hole_follows(prev_node))
645 rm_hole(prev_node);
646 add_hole(prev_node);
647
648 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
649}
650EXPORT_SYMBOL(drm_mm_remove_node);
651
652/**
653 * DOC: lru scan roster
654 *
655 * Very often GPUs need to have continuous allocations for a given object. When
656 * evicting objects to make space for a new one it is therefore not most
657 * efficient when we simply start to select all objects from the tail of an LRU
658 * until there's a suitable hole: Especially for big objects or nodes that
659 * otherwise have special allocation constraints there's a good chance we evict
660 * lots of (smaller) objects unnecessarily.
661 *
662 * The DRM range allocator supports this use-case through the scanning
663 * interfaces. First a scan operation needs to be initialized with
664 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
665 * objects to the roster, probably by walking an LRU list, but this can be
666 * freely implemented. Eviction candidates are added using
667 * drm_mm_scan_add_block() until a suitable hole is found or there are no
668 * further evictable objects. Eviction roster metadata is tracked in &struct
669 * drm_mm_scan.
670 *
671 * The driver must walk through all objects again in exactly the reverse
672 * order to restore the allocator state. Note that while the allocator is used
673 * in the scan mode no other operation is allowed.
674 *
675 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
676 * reported true) in the scan, and any overlapping nodes after color adjustment
677 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
678 * since freeing a node is also O(1) the overall complexity is
679 * O(scanned_objects). So like the free stack which needs to be walked before a
680 * scan operation even begins this is linear in the number of objects. It
681 * doesn't seem to hurt too badly.
682 */
683
684/**
685 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
686 * @scan: scan state
687 * @mm: drm_mm to scan
688 * @size: size of the allocation
689 * @alignment: alignment of the allocation
690 * @color: opaque tag value to use for the allocation
691 * @start: start of the allowed range for the allocation
692 * @end: end of the allowed range for the allocation
693 * @mode: fine-tune the allocation search and placement
694 *
695 * This simply sets up the scanning routines with the parameters for the desired
696 * hole.
697 *
698 * Warning:
699 * As long as the scan list is non-empty, no other operations than
700 * adding/removing nodes to/from the scan list are allowed.
701 */
702void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
703 struct drm_mm *mm,
704 u64 size,
705 u64 alignment,
706 unsigned long color,
707 u64 start,
708 u64 end,
709 enum drm_mm_insert_mode mode)
710{
711 DRM_MM_BUG_ON(start >= end);
712 DRM_MM_BUG_ON(!size || size > end - start);
713 DRM_MM_BUG_ON(mm->scan_active);
714
715 scan->mm = mm;
716
717 if (alignment <= 1)
718 alignment = 0;
719
720 scan->color = color;
721 scan->alignment = alignment;
722 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
723 scan->size = size;
724 scan->mode = mode;
725
726 DRM_MM_BUG_ON(end <= start);
727 scan->range_start = start;
728 scan->range_end = end;
729
730 scan->hit_start = U64_MAX;
731 scan->hit_end = 0;
732}
733EXPORT_SYMBOL(drm_mm_scan_init_with_range);
734
735/**
736 * drm_mm_scan_add_block - add a node to the scan list
737 * @scan: the active drm_mm scanner
738 * @node: drm_mm_node to add
739 *
740 * Add a node to the scan list that might be freed to make space for the desired
741 * hole.
742 *
743 * Returns:
744 * True if a hole has been found, false otherwise.
745 */
746bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
747 struct drm_mm_node *node)
748{
749 struct drm_mm *mm = scan->mm;
750 struct drm_mm_node *hole;
751 u64 hole_start, hole_end;
752 u64 col_start, col_end;
753 u64 adj_start, adj_end;
754
755 DRM_MM_BUG_ON(node->mm != mm);
756 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
757 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
758 __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
759 mm->scan_active++;
760
761 /* Remove this block from the node_list so that we enlarge the hole
762 * (distance between the end of our previous node and the start of
763 * or next), without poisoning the link so that we can restore it
764 * later in drm_mm_scan_remove_block().
765 */
766 hole = list_prev_entry(node, node_list);
767 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
768 __list_del_entry(&node->node_list);
769
770 hole_start = __drm_mm_hole_node_start(hole);
771 hole_end = __drm_mm_hole_node_end(hole);
772
773 col_start = hole_start;
774 col_end = hole_end;
775 if (mm->color_adjust)
776 mm->color_adjust(hole, scan->color, &col_start, &col_end);
777
778 adj_start = max(col_start, scan->range_start);
779 adj_end = min(col_end, scan->range_end);
780 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
781 return false;
782
783 if (scan->mode == DRM_MM_INSERT_HIGH)
784 adj_start = adj_end - scan->size;
785
786 if (scan->alignment) {
787 u64 rem;
788
789 if (likely(scan->remainder_mask))
790 rem = adj_start & scan->remainder_mask;
791 else
792 div64_u64_rem(adj_start, scan->alignment, &rem);
793 if (rem) {
794 adj_start -= rem;
795 if (scan->mode != DRM_MM_INSERT_HIGH)
796 adj_start += scan->alignment;
797 if (adj_start < max(col_start, scan->range_start) ||
798 min(col_end, scan->range_end) - adj_start < scan->size)
799 return false;
800
801 if (adj_end <= adj_start ||
802 adj_end - adj_start < scan->size)
803 return false;
804 }
805 }
806
807 scan->hit_start = adj_start;
808 scan->hit_end = adj_start + scan->size;
809
810 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
811 DRM_MM_BUG_ON(scan->hit_start < hole_start);
812 DRM_MM_BUG_ON(scan->hit_end > hole_end);
813
814 return true;
815}
816EXPORT_SYMBOL(drm_mm_scan_add_block);
817
818/**
819 * drm_mm_scan_remove_block - remove a node from the scan list
820 * @scan: the active drm_mm scanner
821 * @node: drm_mm_node to remove
822 *
823 * Nodes **must** be removed in exactly the reverse order from the scan list as
824 * they have been added (e.g. using list_add() as they are added and then
825 * list_for_each() over that eviction list to remove), otherwise the internal
826 * state of the memory manager will be corrupted.
827 *
828 * When the scan list is empty, the selected memory nodes can be freed. An
829 * immediately following drm_mm_insert_node_in_range_generic() or one of the
830 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
831 * the just freed block (because it's at the top of the free_stack list).
832 *
833 * Returns:
834 * True if this block should be evicted, false otherwise. Will always
835 * return false when no hole has been found.
836 */
837bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
838 struct drm_mm_node *node)
839{
840 struct drm_mm_node *prev_node;
841
842 DRM_MM_BUG_ON(node->mm != scan->mm);
843 DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
844 __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
845
846 DRM_MM_BUG_ON(!node->mm->scan_active);
847 node->mm->scan_active--;
848
849 /* During drm_mm_scan_add_block() we decoupled this node leaving
850 * its pointers intact. Now that the caller is walking back along
851 * the eviction list we can restore this block into its rightful
852 * place on the full node_list. To confirm that the caller is walking
853 * backwards correctly we check that prev_node->next == node->next,
854 * i.e. both believe the same node should be on the other side of the
855 * hole.
856 */
857 prev_node = list_prev_entry(node, node_list);
858 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
859 list_next_entry(node, node_list));
860 list_add(&node->node_list, &prev_node->node_list);
861
862 return (node->start + node->size > scan->hit_start &&
863 node->start < scan->hit_end);
864}
865EXPORT_SYMBOL(drm_mm_scan_remove_block);
866
867/**
868 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
869 * @scan: drm_mm scan with target hole
870 *
871 * After completing an eviction scan and removing the selected nodes, we may
872 * need to remove a few more nodes from either side of the target hole if
873 * mm.color_adjust is being used.
874 *
875 * Returns:
876 * A node to evict, or NULL if there are no overlapping nodes.
877 */
878struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
879{
880 struct drm_mm *mm = scan->mm;
881 struct drm_mm_node *hole;
882 u64 hole_start, hole_end;
883
884 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
885
886 if (!mm->color_adjust)
887 return NULL;
888
889 /*
890 * The hole found during scanning should ideally be the first element
891 * in the hole_stack list, but due to side-effects in the driver it
892 * may not be.
893 */
894 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
895 hole_start = __drm_mm_hole_node_start(hole);
896 hole_end = hole_start + hole->hole_size;
897
898 if (hole_start <= scan->hit_start &&
899 hole_end >= scan->hit_end)
900 break;
901 }
902
903 /* We should only be called after we found the hole previously */
904 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
905 if (unlikely(&hole->hole_stack == &mm->hole_stack))
906 return NULL;
907
908 DRM_MM_BUG_ON(hole_start > scan->hit_start);
909 DRM_MM_BUG_ON(hole_end < scan->hit_end);
910
911 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
912 if (hole_start > scan->hit_start)
913 return hole;
914 if (hole_end < scan->hit_end)
915 return list_next_entry(hole, node_list);
916
917 return NULL;
918}
919EXPORT_SYMBOL(drm_mm_scan_color_evict);
920
921/**
922 * drm_mm_init - initialize a drm-mm allocator
923 * @mm: the drm_mm structure to initialize
924 * @start: start of the range managed by @mm
925 * @size: end of the range managed by @mm
926 *
927 * Note that @mm must be cleared to 0 before calling this function.
928 */
929void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
930{
931 DRM_MM_BUG_ON(start + size <= start);
932
933 mm->color_adjust = NULL;
934
935 INIT_LIST_HEAD(&mm->hole_stack);
936 mm->interval_tree = RB_ROOT_CACHED;
937 mm->holes_size = RB_ROOT_CACHED;
938 mm->holes_addr = RB_ROOT;
939
940 /* Clever trick to avoid a special case in the free hole tracking. */
941 INIT_LIST_HEAD(&mm->head_node.node_list);
942 mm->head_node.flags = 0;
943 mm->head_node.mm = mm;
944 mm->head_node.start = start + size;
945 mm->head_node.size = -size;
946 add_hole(&mm->head_node);
947
948 mm->scan_active = 0;
949
950#ifdef CONFIG_DRM_DEBUG_MM
951 stack_depot_init();
952#endif
953}
954EXPORT_SYMBOL(drm_mm_init);
955
956/**
957 * drm_mm_takedown - clean up a drm_mm allocator
958 * @mm: drm_mm allocator to clean up
959 *
960 * Note that it is a bug to call this function on an allocator which is not
961 * clean.
962 */
963void drm_mm_takedown(struct drm_mm *mm)
964{
965 if (WARN(!drm_mm_clean(mm),
966 "Memory manager not clean during takedown.\n"))
967 show_leaks(mm);
968}
969EXPORT_SYMBOL(drm_mm_takedown);
970
971static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
972{
973 u64 start, size;
974
975 size = entry->hole_size;
976 if (size) {
977 start = drm_mm_hole_node_start(entry);
978 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
979 start, start + size, size);
980 }
981
982 return size;
983}
984/**
985 * drm_mm_print - print allocator state
986 * @mm: drm_mm allocator to print
987 * @p: DRM printer to use
988 */
989void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
990{
991 const struct drm_mm_node *entry;
992 u64 total_used = 0, total_free = 0, total = 0;
993
994 total_free += drm_mm_dump_hole(p, &mm->head_node);
995
996 drm_mm_for_each_node(entry, mm) {
997 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
998 entry->start + entry->size, entry->size);
999 total_used += entry->size;
1000 total_free += drm_mm_dump_hole(p, entry);
1001 }
1002 total = total_free + total_used;
1003
1004 drm_printf(p, "total: %llu, used %llu free %llu\n", total,
1005 total_used, total_free);
1006}
1007EXPORT_SYMBOL(drm_mm_print);