Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only OR MIT
2/*
3 * Copyright (c) 2022 Red Hat.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Danilo Krummrich <dakr@redhat.com>
25 *
26 */
27
28#include <drm/drm_gpuvm.h>
29#include <drm/drm_print.h>
30
31#include <linux/export.h>
32#include <linux/interval_tree_generic.h>
33#include <linux/mm.h>
34
35/**
36 * DOC: Overview
37 *
38 * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a
39 * GPU's virtual address (VA) space and manages the corresponding virtual
40 * mappings represented by &drm_gpuva objects. It also keeps track of the
41 * mapping's backing &drm_gem_object buffers.
42 *
43 * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
44 * all existing GPU VA mappings using this &drm_gem_object as backing buffer.
45 *
46 * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
47 * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
48 *
49 * The GPU VA manager internally uses a rb-tree to manage the
50 * &drm_gpuva mappings within a GPU's virtual address space.
51 *
52 * The &drm_gpuvm structure contains a special &drm_gpuva representing the
53 * portion of VA space reserved by the kernel. This node is initialized together
54 * with the GPU VA manager instance and removed when the GPU VA manager is
55 * destroyed.
56 *
57 * In a typical application drivers would embed struct drm_gpuvm and
58 * struct drm_gpuva within their own driver specific structures, there won't be
59 * any memory allocations of its own nor memory allocations of &drm_gpuva
60 * entries.
61 *
62 * The data structures needed to store &drm_gpuvas within the &drm_gpuvm are
63 * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva
64 * entries from within dma-fence signalling critical sections it is enough to
65 * pre-allocate the &drm_gpuva structures.
66 *
67 * &drm_gem_objects which are private to a single VM can share a common
68 * &dma_resv in order to improve locking efficiency (e.g. with &drm_exec).
69 * For this purpose drivers must pass a &drm_gem_object to drm_gpuvm_init(), in
70 * the following called 'resv object', which serves as the container of the
71 * GPUVM's shared &dma_resv. This resv object can be a driver specific
72 * &drm_gem_object, such as the &drm_gem_object containing the root page table,
73 * but it can also be a 'dummy' object, which can be allocated with
74 * drm_gpuvm_resv_object_alloc().
75 *
76 * In order to connect a struct drm_gpuva to its backing &drm_gem_object each
77 * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
78 * &drm_gpuvm_bo contains a list of &drm_gpuva structures.
79 *
80 * A &drm_gpuvm_bo is an abstraction that represents a combination of a
81 * &drm_gpuvm and a &drm_gem_object. Every such combination should be unique.
82 * This is ensured by the API through drm_gpuvm_bo_obtain() and
83 * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
84 * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
85 * particular combination. If not present, a new instance is created and linked
86 * to the &drm_gem_object.
87 *
88 * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
89 * as entry for the &drm_gpuvm's lists of external and evicted objects. Those
90 * lists are maintained in order to accelerate locking of dma-resv locks and
91 * validation of evicted objects bound in a &drm_gpuvm. For instance, all
92 * &drm_gem_object's &dma_resv of a given &drm_gpuvm can be locked by calling
93 * drm_gpuvm_exec_lock(). Once locked drivers can call drm_gpuvm_validate() in
94 * order to validate all evicted &drm_gem_objects. It is also possible to lock
95 * additional &drm_gem_objects by providing the corresponding parameters to
96 * drm_gpuvm_exec_lock() as well as open code the &drm_exec loop while making
97 * use of helper functions such as drm_gpuvm_prepare_range() or
98 * drm_gpuvm_prepare_objects().
99 *
100 * Every bound &drm_gem_object is treated as external object when its &dma_resv
101 * structure is different than the &drm_gpuvm's common &dma_resv structure.
102 */
103
104/**
105 * DOC: Split and Merge
106 *
107 * Besides its capability to manage and represent a GPU VA space, the
108 * GPU VA manager also provides functions to let the &drm_gpuvm calculate a
109 * sequence of operations to satisfy a given map or unmap request.
110 *
111 * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
112 * and merging of existing GPU VA mappings with the ones that are requested to
113 * be mapped or unmapped. This feature is required by the Vulkan API to
114 * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
115 * as VM BIND.
116 *
117 * Drivers can call drm_gpuvm_sm_map() to receive a sequence of callbacks
118 * containing map, unmap and remap operations for a given newly requested
119 * mapping. The sequence of callbacks represents the set of operations to
120 * execute in order to integrate the new mapping cleanly into the current state
121 * of the GPU VA space.
122 *
123 * Depending on how the new GPU VA mapping intersects with the existing mappings
124 * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
125 * of unmap operations, a maximum of two remap operations and a single map
126 * operation. The caller might receive no callback at all if no operation is
127 * required, e.g. if the requested mapping already exists in the exact same way.
128 *
129 * The single map operation represents the original map operation requested by
130 * the caller.
131 *
132 * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
133 * &drm_gpuva to unmap is physically contiguous with the original mapping
134 * request. Optionally, if 'keep' is set, drivers may keep the actual page table
135 * entries for this &drm_gpuva, adding the missing page table entries only and
136 * update the &drm_gpuvm's view of things accordingly.
137 *
138 * Drivers may do the same optimization, namely delta page table updates, also
139 * for remap operations. This is possible since &drm_gpuva_op_remap consists of
140 * one unmap operation and one or two map operations, such that drivers can
141 * derive the page table update delta accordingly.
142 *
143 * Note that there can't be more than two existing mappings to split up, one at
144 * the beginning and one at the end of the new mapping, hence there is a
145 * maximum of two remap operations.
146 *
147 * Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
148 * call back into the driver in order to unmap a range of GPU VA space. The
149 * logic behind this function is way simpler though: For all existing mappings
150 * enclosed by the given range unmap operations are created. For mappings which
151 * are only partially located within the given range, remap operations are
152 * created such that those mappings are split up and re-mapped partially.
153 *
154 * As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
155 * drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
156 * to directly obtain an instance of struct drm_gpuva_ops containing a list of
157 * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
158 * contains the &drm_gpuva_ops analogous to the callbacks one would receive when
159 * calling drm_gpuvm_sm_map() or drm_gpuvm_sm_unmap(). While this way requires
160 * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
161 * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
162 * allocations are possible (e.g. to allocate GPU page tables) and once in the
163 * dma-fence signalling critical path.
164 *
165 * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and
166 * drm_gpuva_remove() may be used. These functions can safely be used from
167 * &drm_gpuvm_ops callbacks originating from drm_gpuvm_sm_map() or
168 * drm_gpuvm_sm_unmap(). However, it might be more convenient to use the
169 * provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
170 * drm_gpuva_unmap() instead.
171 *
172 * The following diagram depicts the basic relationships of existing GPU VA
173 * mappings, a newly requested mapping and the resulting mappings as implemented
174 * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
175 *
176 * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
177 * could be kept.
178 *
179 * ::
180 *
181 * 0 a 1
182 * old: |-----------| (bo_offset=n)
183 *
184 * 0 a 1
185 * req: |-----------| (bo_offset=n)
186 *
187 * 0 a 1
188 * new: |-----------| (bo_offset=n)
189 *
190 *
191 * 2) Requested mapping is identical, except for the BO offset, hence replace
192 * the mapping.
193 *
194 * ::
195 *
196 * 0 a 1
197 * old: |-----------| (bo_offset=n)
198 *
199 * 0 a 1
200 * req: |-----------| (bo_offset=m)
201 *
202 * 0 a 1
203 * new: |-----------| (bo_offset=m)
204 *
205 *
206 * 3) Requested mapping is identical, except for the backing BO, hence replace
207 * the mapping.
208 *
209 * ::
210 *
211 * 0 a 1
212 * old: |-----------| (bo_offset=n)
213 *
214 * 0 b 1
215 * req: |-----------| (bo_offset=n)
216 *
217 * 0 b 1
218 * new: |-----------| (bo_offset=n)
219 *
220 *
221 * 4) Existent mapping is a left aligned subset of the requested one, hence
222 * replace the existing one.
223 *
224 * ::
225 *
226 * 0 a 1
227 * old: |-----| (bo_offset=n)
228 *
229 * 0 a 2
230 * req: |-----------| (bo_offset=n)
231 *
232 * 0 a 2
233 * new: |-----------| (bo_offset=n)
234 *
235 * .. note::
236 * We expect to see the same result for a request with a different BO
237 * and/or non-contiguous BO offset.
238 *
239 *
240 * 5) Requested mapping's range is a left aligned subset of the existing one,
241 * but backed by a different BO. Hence, map the requested mapping and split
242 * the existing one adjusting its BO offset.
243 *
244 * ::
245 *
246 * 0 a 2
247 * old: |-----------| (bo_offset=n)
248 *
249 * 0 b 1
250 * req: |-----| (bo_offset=n)
251 *
252 * 0 b 1 a' 2
253 * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
254 *
255 * .. note::
256 * We expect to see the same result for a request with a different BO
257 * and/or non-contiguous BO offset.
258 *
259 *
260 * 6) Existent mapping is a superset of the requested mapping. Split it up, but
261 * indicate that the backing PTEs could be kept.
262 *
263 * ::
264 *
265 * 0 a 2
266 * old: |-----------| (bo_offset=n)
267 *
268 * 0 a 1
269 * req: |-----| (bo_offset=n)
270 *
271 * 0 a 1 a' 2
272 * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
273 *
274 *
275 * 7) Requested mapping's range is a right aligned subset of the existing one,
276 * but backed by a different BO. Hence, map the requested mapping and split
277 * the existing one, without adjusting the BO offset.
278 *
279 * ::
280 *
281 * 0 a 2
282 * old: |-----------| (bo_offset=n)
283 *
284 * 1 b 2
285 * req: |-----| (bo_offset=m)
286 *
287 * 0 a 1 b 2
288 * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
289 *
290 *
291 * 8) Existent mapping is a superset of the requested mapping. Split it up, but
292 * indicate that the backing PTEs could be kept.
293 *
294 * ::
295 *
296 * 0 a 2
297 * old: |-----------| (bo_offset=n)
298 *
299 * 1 a 2
300 * req: |-----| (bo_offset=n+1)
301 *
302 * 0 a' 1 a 2
303 * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
304 *
305 *
306 * 9) Existent mapping is overlapped at the end by the requested mapping backed
307 * by a different BO. Hence, map the requested mapping and split up the
308 * existing one, without adjusting the BO offset.
309 *
310 * ::
311 *
312 * 0 a 2
313 * old: |-----------| (bo_offset=n)
314 *
315 * 1 b 3
316 * req: |-----------| (bo_offset=m)
317 *
318 * 0 a 1 b 3
319 * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
320 *
321 *
322 * 10) Existent mapping is overlapped by the requested mapping, both having the
323 * same backing BO with a contiguous offset. Indicate the backing PTEs of
324 * the old mapping could be kept.
325 *
326 * ::
327 *
328 * 0 a 2
329 * old: |-----------| (bo_offset=n)
330 *
331 * 1 a 3
332 * req: |-----------| (bo_offset=n+1)
333 *
334 * 0 a' 1 a 3
335 * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
336 *
337 *
338 * 11) Requested mapping's range is a centered subset of the existing one
339 * having a different backing BO. Hence, map the requested mapping and split
340 * up the existing one in two mappings, adjusting the BO offset of the right
341 * one accordingly.
342 *
343 * ::
344 *
345 * 0 a 3
346 * old: |-----------------| (bo_offset=n)
347 *
348 * 1 b 2
349 * req: |-----| (bo_offset=m)
350 *
351 * 0 a 1 b 2 a' 3
352 * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
353 *
354 *
355 * 12) Requested mapping is a contiguous subset of the existing one. Split it
356 * up, but indicate that the backing PTEs could be kept.
357 *
358 * ::
359 *
360 * 0 a 3
361 * old: |-----------------| (bo_offset=n)
362 *
363 * 1 a 2
364 * req: |-----| (bo_offset=n+1)
365 *
366 * 0 a' 1 a 2 a'' 3
367 * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
368 *
369 *
370 * 13) Existent mapping is a right aligned subset of the requested one, hence
371 * replace the existing one.
372 *
373 * ::
374 *
375 * 1 a 2
376 * old: |-----| (bo_offset=n+1)
377 *
378 * 0 a 2
379 * req: |-----------| (bo_offset=n)
380 *
381 * 0 a 2
382 * new: |-----------| (bo_offset=n)
383 *
384 * .. note::
385 * We expect to see the same result for a request with a different bo
386 * and/or non-contiguous bo_offset.
387 *
388 *
389 * 14) Existent mapping is a centered subset of the requested one, hence
390 * replace the existing one.
391 *
392 * ::
393 *
394 * 1 a 2
395 * old: |-----| (bo_offset=n+1)
396 *
397 * 0 a 3
398 * req: |----------------| (bo_offset=n)
399 *
400 * 0 a 3
401 * new: |----------------| (bo_offset=n)
402 *
403 * .. note::
404 * We expect to see the same result for a request with a different bo
405 * and/or non-contiguous bo_offset.
406 *
407 *
408 * 15) Existent mappings is overlapped at the beginning by the requested mapping
409 * backed by a different BO. Hence, map the requested mapping and split up
410 * the existing one, adjusting its BO offset accordingly.
411 *
412 * ::
413 *
414 * 1 a 3
415 * old: |-----------| (bo_offset=n)
416 *
417 * 0 b 2
418 * req: |-----------| (bo_offset=m)
419 *
420 * 0 b 2 a' 3
421 * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
422 */
423
424/**
425 * DOC: Madvise Logic - Splitting and Traversal
426 *
427 * This logic handles GPU VA range updates by generating remap and map operations
428 * without performing unmaps or merging existing mappings.
429 *
430 * 1) The requested range lies entirely within a single drm_gpuva. The logic splits
431 * the existing mapping at the start and end boundaries and inserts a new map.
432 *
433 * ::
434 * a start end b
435 * pre: |-----------------------|
436 * drm_gpuva1
437 *
438 * a start end b
439 * new: |-----|=========|-------|
440 * remap map remap
441 *
442 * one REMAP and one MAP : Same behaviour as SPLIT and MERGE
443 *
444 * 2) The requested range spans multiple drm_gpuva regions. The logic traverses
445 * across boundaries, remapping the start and end segments, and inserting two
446 * map operations to cover the full range.
447 *
448 * :: a start b c end d
449 * pre: |------------------|--------------|------------------|
450 * drm_gpuva1 drm_gpuva2 drm_gpuva3
451 *
452 * a start b c end d
453 * new: |-------|==========|--------------|========|---------|
454 * remap1 map1 drm_gpuva2 map2 remap2
455 *
456 * two REMAPS and two MAPS
457 *
458 * 3) Either start or end lies within a drm_gpuva. A single remap and map operation
459 * are generated to update the affected portion.
460 *
461 *
462 * :: a/start b c end d
463 * pre: |------------------|--------------|------------------|
464 * drm_gpuva1 drm_gpuva2 drm_gpuva3
465 *
466 * a/start b c end d
467 * new: |------------------|--------------|========|---------|
468 * drm_gpuva1 drm_gpuva2 map1 remap1
469 *
470 * :: a start b c/end d
471 * pre: |------------------|--------------|------------------|
472 * drm_gpuva1 drm_gpuva2 drm_gpuva3
473 *
474 * a start b c/end d
475 * new: |-------|==========|--------------|------------------|
476 * remap1 map1 drm_gpuva2 drm_gpuva3
477 *
478 * one REMAP and one MAP
479 *
480 * 4) Both start and end align with existing drm_gpuva boundaries. No operations
481 * are needed as the range is already covered.
482 *
483 * 5) No existing drm_gpuvas. No operations.
484 *
485 * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging,
486 * focusing solely on remap and map operations for efficient traversal and update.
487 */
488
489/**
490 * DOC: Locking
491 *
492 * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
493 * locking itself, it is the drivers responsibility to take care about locking.
494 * Drivers might want to protect the following operations: inserting, removing
495 * and iterating &drm_gpuva objects as well as generating all kinds of
496 * operations, such as split / merge or prefetch.
497 *
498 * DRM GPUVM also does not take care of the locking of the backing
499 * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
500 * itself; drivers are responsible to enforce mutual exclusion using either the
501 * GEMs dma_resv lock or the GEMs gpuva.lock mutex.
502 *
503 * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
504 * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
505 * by functions such as drm_gpuva_link() or drm_gpuva_unlink(), but also
506 * drm_gpuvm_bo_obtain() and drm_gpuvm_bo_put().
507 *
508 * The latter is required since on creation and destruction of a &drm_gpuvm_bo
509 * the &drm_gpuvm_bo is attached / removed from the &drm_gem_objects gpuva list.
510 * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and
511 * &drm_gem_object must be able to observe previous creations and destructions
512 * of &drm_gpuvm_bos in order to keep instances unique.
513 *
514 * The &drm_gpuvm's lists for keeping track of external and evicted objects are
515 * protected against concurrent insertion / removal and iteration internally.
516 *
517 * However, drivers still need ensure to protect concurrent calls to functions
518 * iterating those lists, namely drm_gpuvm_prepare_objects() and
519 * drm_gpuvm_validate().
520 *
521 * Alternatively, drivers can set the &DRM_GPUVM_RESV_PROTECTED flag to indicate
522 * that the corresponding &dma_resv locks are held in order to protect the
523 * lists. If &DRM_GPUVM_RESV_PROTECTED is set, internal locking is disabled and
524 * the corresponding lockdep checks are enabled. This is an optimization for
525 * drivers which are capable of taking the corresponding &dma_resv locks and
526 * hence do not require internal locking.
527 */
528
529/**
530 * DOC: Examples
531 *
532 * This section gives two examples on how to let the DRM GPUVA Manager generate
533 * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
534 * make use of them.
535 *
536 * The below code is strictly limited to illustrate the generic usage pattern.
537 * To maintain simplicity, it doesn't make use of any abstractions for common
538 * code, different (asynchronous) stages with fence signalling critical paths,
539 * any other helpers or error handling in terms of freeing memory and dropping
540 * previously taken locks.
541 *
542 * 1) Obtain a list of &drm_gpuva_op to create a new mapping::
543 *
544 * // Allocates a new &drm_gpuva.
545 * struct drm_gpuva * driver_gpuva_alloc(void);
546 *
547 * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
548 * // structure in individual driver structures and lock the dma-resv with
549 * // drm_exec or similar helpers.
550 * int driver_mapping_create(struct drm_gpuvm *gpuvm,
551 * u64 addr, u64 range,
552 * struct drm_gem_object *obj, u64 offset)
553 * {
554 * struct drm_gpuvm_map_req map_req = {
555 * .map.va.addr = addr,
556 * .map.va.range = range,
557 * .map.gem.obj = obj,
558 * .map.gem.offset = offset,
559 * };
560 * struct drm_gpuva_ops *ops;
561 * struct drm_gpuva_op *op
562 * struct drm_gpuvm_bo *vm_bo;
563 *
564 * driver_lock_va_space();
565 * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
566 * if (IS_ERR(ops))
567 * return PTR_ERR(ops);
568 *
569 * vm_bo = drm_gpuvm_bo_obtain(gpuvm, obj);
570 * if (IS_ERR(vm_bo))
571 * return PTR_ERR(vm_bo);
572 *
573 * drm_gpuva_for_each_op(op, ops) {
574 * struct drm_gpuva *va;
575 *
576 * switch (op->op) {
577 * case DRM_GPUVA_OP_MAP:
578 * va = driver_gpuva_alloc();
579 * if (!va)
580 * ; // unwind previous VA space updates,
581 * // free memory and unlock
582 *
583 * driver_vm_map();
584 * drm_gpuva_map(gpuvm, va, &op->map);
585 * drm_gpuva_link(va, vm_bo);
586 *
587 * break;
588 * case DRM_GPUVA_OP_REMAP: {
589 * struct drm_gpuva *prev = NULL, *next = NULL;
590 *
591 * va = op->remap.unmap->va;
592 *
593 * if (op->remap.prev) {
594 * prev = driver_gpuva_alloc();
595 * if (!prev)
596 * ; // unwind previous VA space
597 * // updates, free memory and
598 * // unlock
599 * }
600 *
601 * if (op->remap.next) {
602 * next = driver_gpuva_alloc();
603 * if (!next)
604 * ; // unwind previous VA space
605 * // updates, free memory and
606 * // unlock
607 * }
608 *
609 * driver_vm_remap();
610 * drm_gpuva_remap(prev, next, &op->remap);
611 *
612 * if (prev)
613 * drm_gpuva_link(prev, va->vm_bo);
614 * if (next)
615 * drm_gpuva_link(next, va->vm_bo);
616 * drm_gpuva_unlink(va);
617 *
618 * break;
619 * }
620 * case DRM_GPUVA_OP_UNMAP:
621 * va = op->unmap->va;
622 *
623 * driver_vm_unmap();
624 * drm_gpuva_unlink(va);
625 * drm_gpuva_unmap(&op->unmap);
626 *
627 * break;
628 * default:
629 * break;
630 * }
631 * }
632 * drm_gpuvm_bo_put(vm_bo);
633 * driver_unlock_va_space();
634 *
635 * return 0;
636 * }
637 *
638 * 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
639 *
640 * struct driver_context {
641 * struct drm_gpuvm *gpuvm;
642 * struct drm_gpuvm_bo *vm_bo;
643 * struct drm_gpuva *new_va;
644 * struct drm_gpuva *prev_va;
645 * struct drm_gpuva *next_va;
646 * };
647 *
648 * // ops to pass to drm_gpuvm_init()
649 * static const struct drm_gpuvm_ops driver_gpuvm_ops = {
650 * .sm_step_map = driver_gpuva_map,
651 * .sm_step_remap = driver_gpuva_remap,
652 * .sm_step_unmap = driver_gpuva_unmap,
653 * };
654 *
655 * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
656 * // structure in individual driver structures and lock the dma-resv with
657 * // drm_exec or similar helpers.
658 * int driver_mapping_create(struct drm_gpuvm *gpuvm,
659 * u64 addr, u64 range,
660 * struct drm_gem_object *obj, u64 offset)
661 * {
662 * struct driver_context ctx;
663 * struct drm_gpuvm_bo *vm_bo;
664 * struct drm_gpuva_ops *ops;
665 * struct drm_gpuva_op *op;
666 * int ret = 0;
667 *
668 * ctx.gpuvm = gpuvm;
669 *
670 * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
671 * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
672 * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
673 * ctx.vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
674 * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va || !vm_bo) {
675 * ret = -ENOMEM;
676 * goto out;
677 * }
678 *
679 * // Typically protected with a driver specific GEM gpuva lock
680 * // used in the fence signaling path for drm_gpuva_link() and
681 * // drm_gpuva_unlink(), hence pre-allocate.
682 * ctx.vm_bo = drm_gpuvm_bo_obtain_prealloc(ctx.vm_bo);
683 *
684 * driver_lock_va_space();
685 * ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset);
686 * driver_unlock_va_space();
687 *
688 * out:
689 * drm_gpuvm_bo_put(ctx.vm_bo);
690 * kfree(ctx.new_va);
691 * kfree(ctx.prev_va);
692 * kfree(ctx.next_va);
693 * return ret;
694 * }
695 *
696 * int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
697 * {
698 * struct driver_context *ctx = __ctx;
699 *
700 * drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
701 *
702 * drm_gpuva_link(ctx->new_va, ctx->vm_bo);
703 *
704 * // prevent the new GPUVA from being freed in
705 * // driver_mapping_create()
706 * ctx->new_va = NULL;
707 *
708 * return 0;
709 * }
710 *
711 * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
712 * {
713 * struct driver_context *ctx = __ctx;
714 * struct drm_gpuva *va = op->remap.unmap->va;
715 *
716 * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
717 *
718 * if (op->remap.prev) {
719 * drm_gpuva_link(ctx->prev_va, va->vm_bo);
720 * ctx->prev_va = NULL;
721 * }
722 *
723 * if (op->remap.next) {
724 * drm_gpuva_link(ctx->next_va, va->vm_bo);
725 * ctx->next_va = NULL;
726 * }
727 *
728 * drm_gpuva_unlink(va);
729 * kfree(va);
730 *
731 * return 0;
732 * }
733 *
734 * int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
735 * {
736 * drm_gpuva_unlink(op->unmap.va);
737 * drm_gpuva_unmap(&op->unmap);
738 * kfree(op->unmap.va);
739 *
740 * return 0;
741 * }
742 */
743
744/**
745 * get_next_vm_bo_from_list() - get the next vm_bo element
746 * @__gpuvm: the &drm_gpuvm
747 * @__list_name: the name of the list we're iterating on
748 * @__local_list: a pointer to the local list used to store already iterated items
749 * @__prev_vm_bo: the previous element we got from get_next_vm_bo_from_list()
750 *
751 * This helper is here to provide lockless list iteration. Lockless as in, the
752 * iterator releases the lock immediately after picking the first element from
753 * the list, so list insertion and deletion can happen concurrently.
754 *
755 * Elements popped from the original list are kept in a local list, so removal
756 * and is_empty checks can still happen while we're iterating the list.
757 */
758#define get_next_vm_bo_from_list(__gpuvm, __list_name, __local_list, __prev_vm_bo) \
759 ({ \
760 struct drm_gpuvm_bo *__vm_bo = NULL; \
761 \
762 drm_gpuvm_bo_put(__prev_vm_bo); \
763 \
764 spin_lock(&(__gpuvm)->__list_name.lock); \
765 if (!(__gpuvm)->__list_name.local_list) \
766 (__gpuvm)->__list_name.local_list = __local_list; \
767 else \
768 drm_WARN_ON((__gpuvm)->drm, \
769 (__gpuvm)->__list_name.local_list != __local_list); \
770 \
771 while (!list_empty(&(__gpuvm)->__list_name.list)) { \
772 __vm_bo = list_first_entry(&(__gpuvm)->__list_name.list, \
773 struct drm_gpuvm_bo, \
774 list.entry.__list_name); \
775 if (kref_get_unless_zero(&__vm_bo->kref)) { \
776 list_move_tail(&(__vm_bo)->list.entry.__list_name, \
777 __local_list); \
778 break; \
779 } else { \
780 list_del_init(&(__vm_bo)->list.entry.__list_name); \
781 __vm_bo = NULL; \
782 } \
783 } \
784 spin_unlock(&(__gpuvm)->__list_name.lock); \
785 \
786 __vm_bo; \
787 })
788
789/**
790 * for_each_vm_bo_in_list() - internal vm_bo list iterator
791 * @__gpuvm: the &drm_gpuvm
792 * @__list_name: the name of the list we're iterating on
793 * @__local_list: a pointer to the local list used to store already iterated items
794 * @__vm_bo: the struct drm_gpuvm_bo to assign in each iteration step
795 *
796 * This helper is here to provide lockless list iteration. Lockless as in, the
797 * iterator releases the lock immediately after picking the first element from the
798 * list, hence list insertion and deletion can happen concurrently.
799 *
800 * It is not allowed to re-assign the vm_bo pointer from inside this loop.
801 *
802 * Typical use:
803 *
804 * struct drm_gpuvm_bo *vm_bo;
805 * LIST_HEAD(my_local_list);
806 *
807 * ret = 0;
808 * for_each_vm_bo_in_list(gpuvm, <list_name>, &my_local_list, vm_bo) {
809 * ret = do_something_with_vm_bo(..., vm_bo);
810 * if (ret)
811 * break;
812 * }
813 * // Drop ref in case we break out of the loop.
814 * drm_gpuvm_bo_put(vm_bo);
815 * restore_vm_bo_list(gpuvm, <list_name>, &my_local_list);
816 *
817 *
818 * Only used for internal list iterations, not meant to be exposed to the outside
819 * world.
820 */
821#define for_each_vm_bo_in_list(__gpuvm, __list_name, __local_list, __vm_bo) \
822 for (__vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \
823 __local_list, NULL); \
824 __vm_bo; \
825 __vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \
826 __local_list, __vm_bo))
827
828static void
829__restore_vm_bo_list(struct drm_gpuvm *gpuvm, spinlock_t *lock,
830 struct list_head *list, struct list_head **local_list)
831{
832 /* Merge back the two lists, moving local list elements to the
833 * head to preserve previous ordering, in case it matters.
834 */
835 spin_lock(lock);
836 if (*local_list) {
837 list_splice(*local_list, list);
838 *local_list = NULL;
839 }
840 spin_unlock(lock);
841}
842
843/**
844 * restore_vm_bo_list() - move vm_bo elements back to their original list
845 * @__gpuvm: the &drm_gpuvm
846 * @__list_name: the name of the list we're iterating on
847 *
848 * When we're done iterating a vm_bo list, we should call restore_vm_bo_list()
849 * to restore the original state and let new iterations take place.
850 */
851#define restore_vm_bo_list(__gpuvm, __list_name) \
852 __restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock, \
853 &(__gpuvm)->__list_name.list, \
854 &(__gpuvm)->__list_name.local_list)
855
856static void
857cond_spin_lock(spinlock_t *lock, bool cond)
858{
859 if (cond)
860 spin_lock(lock);
861}
862
863static void
864cond_spin_unlock(spinlock_t *lock, bool cond)
865{
866 if (cond)
867 spin_unlock(lock);
868}
869
870static void
871__drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
872 struct list_head *entry, struct list_head *list)
873{
874 cond_spin_lock(lock, !!lock);
875 if (list_empty(entry))
876 list_add_tail(entry, list);
877 cond_spin_unlock(lock, !!lock);
878}
879
880/**
881 * drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup
882 * @vm_bo: the &drm_gpuvm_bo
883 *
884 * When a vm_bo is scheduled for cleanup using the bo_defer list, it is not
885 * immediately removed from the evict and extobj lists. Therefore, anyone
886 * iterating these lists should skip entries that are being destroyed.
887 *
888 * Checking the refcount without incrementing it is okay as long as the lock
889 * protecting the evict/extobj list is held for as long as you are using the
890 * vm_bo, because even if the refcount hits zero while you are using it, freeing
891 * the vm_bo requires taking the list's lock.
892 *
893 * Zombie entries can be observed on the evict and extobj lists regardless of
894 * whether DRM_GPUVM_RESV_PROTECTED is used, but they remain on the lists for a
895 * longer time when the resv lock is used because we can't take the resv lock
896 * during run_job() in immediate mode, meaning that they need to remain on the
897 * lists until drm_gpuvm_bo_deferred_cleanup() is called.
898 */
899static bool
900drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo)
901{
902 return !kref_read(&vm_bo->kref);
903}
904
905/**
906 * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
907 * @__vm_bo: the &drm_gpuvm_bo
908 * @__list_name: the name of the list to insert into
909 * @__lock: whether to lock with the internal spinlock
910 *
911 * Inserts the given @__vm_bo into the list specified by @__list_name.
912 */
913#define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock) \
914 __drm_gpuvm_bo_list_add((__vm_bo)->vm, \
915 __lock ? &(__vm_bo)->vm->__list_name.lock : \
916 NULL, \
917 &(__vm_bo)->list.entry.__list_name, \
918 &(__vm_bo)->vm->__list_name.list)
919
920static void
921__drm_gpuvm_bo_list_del(struct drm_gpuvm *gpuvm, spinlock_t *lock,
922 struct list_head *entry, bool init)
923{
924 cond_spin_lock(lock, !!lock);
925 if (init) {
926 if (!list_empty(entry))
927 list_del_init(entry);
928 } else {
929 list_del(entry);
930 }
931 cond_spin_unlock(lock, !!lock);
932}
933
934/**
935 * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list
936 * @__vm_bo: the &drm_gpuvm_bo
937 * @__list_name: the name of the list to insert into
938 * @__lock: whether to lock with the internal spinlock
939 *
940 * Removes the given @__vm_bo from the list specified by @__list_name.
941 */
942#define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock) \
943 __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
944 __lock ? &(__vm_bo)->vm->__list_name.lock : \
945 NULL, \
946 &(__vm_bo)->list.entry.__list_name, \
947 true)
948
949/**
950 * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list
951 * @__vm_bo: the &drm_gpuvm_bo
952 * @__list_name: the name of the list to insert into
953 * @__lock: whether to lock with the internal spinlock
954 *
955 * Removes the given @__vm_bo from the list specified by @__list_name.
956 */
957#define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock) \
958 __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
959 __lock ? &(__vm_bo)->vm->__list_name.lock : \
960 NULL, \
961 &(__vm_bo)->list.entry.__list_name, \
962 false)
963
964#define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node)
965
966#define GPUVA_START(node) ((node)->va.addr)
967#define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
968
969/* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
970 * about this.
971 */
972INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
973 GPUVA_START, GPUVA_LAST, static __maybe_unused,
974 drm_gpuva_it)
975
976static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
977 struct drm_gpuva *va);
978static void __drm_gpuva_remove(struct drm_gpuva *va);
979
980static bool
981drm_gpuvm_check_overflow(u64 addr, u64 range)
982{
983 u64 end;
984
985 return check_add_overflow(addr, range, &end);
986}
987
988static bool
989drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
990{
991 return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range),
992 "GPUVA address limited to %zu bytes.\n", sizeof(addr));
993}
994
995static bool
996drm_gpuvm_in_mm_range(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
997{
998 u64 end = addr + range;
999 u64 mm_start = gpuvm->mm_start;
1000 u64 mm_end = mm_start + gpuvm->mm_range;
1001
1002 return addr >= mm_start && end <= mm_end;
1003}
1004
1005static bool
1006drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
1007{
1008 u64 end = addr + range;
1009 u64 kstart = gpuvm->kernel_alloc_node.va.addr;
1010 u64 krange = gpuvm->kernel_alloc_node.va.range;
1011 u64 kend = kstart + krange;
1012
1013 return krange && addr < kend && kstart < end;
1014}
1015
1016/**
1017 * drm_gpuvm_range_valid() - checks whether the given range is valid for the
1018 * given &drm_gpuvm
1019 * @gpuvm: the GPUVM to check the range for
1020 * @addr: the base address
1021 * @range: the range starting from the base address
1022 *
1023 * Checks whether the range is within the GPUVM's managed boundaries.
1024 *
1025 * Returns: true for a valid range, false otherwise
1026 */
1027bool
1028drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
1029 u64 addr, u64 range)
1030{
1031 return !drm_gpuvm_check_overflow(addr, range) &&
1032 drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
1033 !drm_gpuvm_in_kernel_node(gpuvm, addr, range);
1034}
1035EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid);
1036
1037static void
1038drm_gpuvm_gem_object_free(struct drm_gem_object *obj)
1039{
1040 drm_gem_object_release(obj);
1041 kfree(obj);
1042}
1043
1044static const struct drm_gem_object_funcs drm_gpuvm_object_funcs = {
1045 .free = drm_gpuvm_gem_object_free,
1046};
1047
1048/**
1049 * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object
1050 * @drm: the drivers &drm_device
1051 *
1052 * Allocates a dummy &drm_gem_object which can be passed to drm_gpuvm_init() in
1053 * order to serve as root GEM object providing the &drm_resv shared across
1054 * &drm_gem_objects local to a single GPUVM.
1055 *
1056 * Returns: the &drm_gem_object on success, NULL on failure
1057 */
1058struct drm_gem_object *
1059drm_gpuvm_resv_object_alloc(struct drm_device *drm)
1060{
1061 struct drm_gem_object *obj;
1062
1063 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
1064 if (!obj)
1065 return NULL;
1066
1067 obj->funcs = &drm_gpuvm_object_funcs;
1068 drm_gem_private_object_init(drm, obj, 0);
1069
1070 return obj;
1071}
1072EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc);
1073
1074/**
1075 * drm_gpuvm_init() - initialize a &drm_gpuvm
1076 * @gpuvm: pointer to the &drm_gpuvm to initialize
1077 * @name: the name of the GPU VA space
1078 * @flags: the &drm_gpuvm_flags for this GPUVM
1079 * @drm: the &drm_device this VM resides in
1080 * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv
1081 * @start_offset: the start offset of the GPU VA space
1082 * @range: the size of the GPU VA space
1083 * @reserve_offset: the start of the kernel reserved GPU VA area
1084 * @reserve_range: the size of the kernel reserved GPU VA area
1085 * @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap
1086 *
1087 * The &drm_gpuvm must be initialized with this function before use.
1088 *
1089 * Note that @gpuvm must be cleared to 0 before calling this function. The given
1090 * &name is expected to be managed by the surrounding driver structures.
1091 */
1092void
1093drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
1094 enum drm_gpuvm_flags flags,
1095 struct drm_device *drm,
1096 struct drm_gem_object *r_obj,
1097 u64 start_offset, u64 range,
1098 u64 reserve_offset, u64 reserve_range,
1099 const struct drm_gpuvm_ops *ops)
1100{
1101 gpuvm->rb.tree = RB_ROOT_CACHED;
1102 INIT_LIST_HEAD(&gpuvm->rb.list);
1103
1104 INIT_LIST_HEAD(&gpuvm->extobj.list);
1105 spin_lock_init(&gpuvm->extobj.lock);
1106
1107 INIT_LIST_HEAD(&gpuvm->evict.list);
1108 spin_lock_init(&gpuvm->evict.lock);
1109
1110 init_llist_head(&gpuvm->bo_defer);
1111
1112 kref_init(&gpuvm->kref);
1113
1114 gpuvm->name = name ? name : "unknown";
1115 gpuvm->flags = flags;
1116 gpuvm->ops = ops;
1117 gpuvm->drm = drm;
1118 gpuvm->r_obj = r_obj;
1119
1120 drm_gem_object_get(r_obj);
1121
1122 drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range);
1123 gpuvm->mm_start = start_offset;
1124 gpuvm->mm_range = range;
1125
1126 memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
1127 if (reserve_range) {
1128 gpuvm->kernel_alloc_node.va.addr = reserve_offset;
1129 gpuvm->kernel_alloc_node.va.range = reserve_range;
1130
1131 if (likely(!drm_gpuvm_warn_check_overflow(gpuvm, reserve_offset,
1132 reserve_range)))
1133 __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node);
1134 }
1135}
1136EXPORT_SYMBOL_GPL(drm_gpuvm_init);
1137
1138static void
1139drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
1140{
1141 gpuvm->name = NULL;
1142
1143 if (gpuvm->kernel_alloc_node.va.range)
1144 __drm_gpuva_remove(&gpuvm->kernel_alloc_node);
1145
1146 drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
1147 "GPUVA tree is not empty, potentially leaking memory.\n");
1148
1149 drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list),
1150 "Extobj list should be empty.\n");
1151 drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
1152 "Evict list should be empty.\n");
1153 drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer),
1154 "VM BO cleanup list should be empty.\n");
1155
1156 drm_gem_object_put(gpuvm->r_obj);
1157}
1158
1159static void
1160drm_gpuvm_free(struct kref *kref)
1161{
1162 struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
1163
1164 drm_gpuvm_fini(gpuvm);
1165
1166 if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
1167 return;
1168
1169 gpuvm->ops->vm_free(gpuvm);
1170}
1171
1172/**
1173 * drm_gpuvm_put() - drop a struct drm_gpuvm reference
1174 * @gpuvm: the &drm_gpuvm to release the reference of
1175 *
1176 * This releases a reference to @gpuvm.
1177 *
1178 * This function may be called from atomic context.
1179 */
1180void
1181drm_gpuvm_put(struct drm_gpuvm *gpuvm)
1182{
1183 if (gpuvm)
1184 kref_put(&gpuvm->kref, drm_gpuvm_free);
1185}
1186EXPORT_SYMBOL_GPL(drm_gpuvm_put);
1187
1188static int
1189exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
1190 unsigned int num_fences)
1191{
1192 return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) :
1193 drm_exec_lock_obj(exec, obj);
1194}
1195
1196/**
1197 * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
1198 * @gpuvm: the &drm_gpuvm
1199 * @exec: the &drm_exec context
1200 * @num_fences: the amount of &dma_fences to reserve
1201 *
1202 * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if
1203 * @num_fences is zero drm_exec_lock_obj() is called instead.
1204 *
1205 * Using this function directly, it is the drivers responsibility to call
1206 * drm_exec_init() and drm_exec_fini() accordingly.
1207 *
1208 * Returns: 0 on success, negative error code on failure.
1209 */
1210int
1211drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
1212 struct drm_exec *exec,
1213 unsigned int num_fences)
1214{
1215 return exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
1216}
1217EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm);
1218
1219static int
1220__drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1221 struct drm_exec *exec,
1222 unsigned int num_fences)
1223{
1224 struct drm_gpuvm_bo *vm_bo;
1225 LIST_HEAD(extobjs);
1226 int ret = 0;
1227
1228 for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
1229 ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1230 if (ret)
1231 break;
1232 }
1233 /* Drop ref in case we break out of the loop. */
1234 drm_gpuvm_bo_put(vm_bo);
1235 restore_vm_bo_list(gpuvm, extobj);
1236
1237 return ret;
1238}
1239
1240static int
1241drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
1242 struct drm_exec *exec,
1243 unsigned int num_fences)
1244{
1245 struct drm_gpuvm_bo *vm_bo;
1246 int ret = 0;
1247
1248 drm_gpuvm_resv_assert_held(gpuvm);
1249 list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
1250 if (drm_gpuvm_bo_is_zombie(vm_bo))
1251 continue;
1252
1253 ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
1254 if (ret)
1255 break;
1256
1257 if (vm_bo->evicted)
1258 drm_gpuvm_bo_list_add(vm_bo, evict, false);
1259 }
1260
1261 return ret;
1262}
1263
1264/**
1265 * drm_gpuvm_prepare_objects() - prepare all associated BOs
1266 * @gpuvm: the &drm_gpuvm
1267 * @exec: the &drm_exec locking context
1268 * @num_fences: the amount of &dma_fences to reserve
1269 *
1270 * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given
1271 * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj()
1272 * is called instead.
1273 *
1274 * Using this function directly, it is the drivers responsibility to call
1275 * drm_exec_init() and drm_exec_fini() accordingly.
1276 *
1277 * Note: This function is safe against concurrent insertion and removal of
1278 * external objects, however it is not safe against concurrent usage itself.
1279 *
1280 * Drivers need to make sure to protect this case with either an outer VM lock
1281 * or by calling drm_gpuvm_prepare_vm() before this function within the
1282 * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures
1283 * mutual exclusion.
1284 *
1285 * Returns: 0 on success, negative error code on failure.
1286 */
1287int
1288drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
1289 struct drm_exec *exec,
1290 unsigned int num_fences)
1291{
1292 if (drm_gpuvm_resv_protected(gpuvm))
1293 return drm_gpuvm_prepare_objects_locked(gpuvm, exec,
1294 num_fences);
1295 else
1296 return __drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1297}
1298EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects);
1299
1300/**
1301 * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range
1302 * @gpuvm: the &drm_gpuvm
1303 * @exec: the &drm_exec locking context
1304 * @addr: the start address within the VA space
1305 * @range: the range to iterate within the VA space
1306 * @num_fences: the amount of &dma_fences to reserve
1307 *
1308 * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr
1309 * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called
1310 * instead.
1311 *
1312 * Returns: 0 on success, negative error code on failure.
1313 */
1314int
1315drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
1316 u64 addr, u64 range, unsigned int num_fences)
1317{
1318 struct drm_gpuva *va;
1319 u64 end = addr + range;
1320 int ret;
1321
1322 drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
1323 struct drm_gem_object *obj = va->gem.obj;
1324
1325 ret = exec_prepare_obj(exec, obj, num_fences);
1326 if (ret)
1327 return ret;
1328 }
1329
1330 return 0;
1331}
1332EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
1333
1334/**
1335 * drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs
1336 * @vm_exec: the &drm_gpuvm_exec wrapper
1337 *
1338 * Acquires all dma-resv locks of all &drm_gem_objects the given
1339 * &drm_gpuvm contains mappings of.
1340 *
1341 * Additionally, when calling this function with struct drm_gpuvm_exec::extra
1342 * being set the driver receives the given @fn callback to lock additional
1343 * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
1344 * would call drm_exec_prepare_obj() from within this callback.
1345 *
1346 * Returns: 0 on success, negative error code on failure.
1347 */
1348int
1349drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec)
1350{
1351 struct drm_gpuvm *gpuvm = vm_exec->vm;
1352 struct drm_exec *exec = &vm_exec->exec;
1353 unsigned int num_fences = vm_exec->num_fences;
1354 int ret;
1355
1356 drm_exec_init(exec, vm_exec->flags, 0);
1357
1358 drm_exec_until_all_locked(exec) {
1359 ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences);
1360 drm_exec_retry_on_contention(exec);
1361 if (ret)
1362 goto err;
1363
1364 ret = drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
1365 drm_exec_retry_on_contention(exec);
1366 if (ret)
1367 goto err;
1368
1369 if (vm_exec->extra.fn) {
1370 ret = vm_exec->extra.fn(vm_exec);
1371 drm_exec_retry_on_contention(exec);
1372 if (ret)
1373 goto err;
1374 }
1375 }
1376
1377 return 0;
1378
1379err:
1380 drm_exec_fini(exec);
1381 return ret;
1382}
1383EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock);
1384
1385static int
1386fn_lock_array(struct drm_gpuvm_exec *vm_exec)
1387{
1388 struct {
1389 struct drm_gem_object **objs;
1390 unsigned int num_objs;
1391 } *args = vm_exec->extra.priv;
1392
1393 return drm_exec_prepare_array(&vm_exec->exec, args->objs,
1394 args->num_objs, vm_exec->num_fences);
1395}
1396
1397/**
1398 * drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs
1399 * @vm_exec: the &drm_gpuvm_exec wrapper
1400 * @objs: additional &drm_gem_objects to lock
1401 * @num_objs: the number of additional &drm_gem_objects to lock
1402 *
1403 * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm
1404 * contains mappings of, plus the ones given through @objs.
1405 *
1406 * Returns: 0 on success, negative error code on failure.
1407 */
1408int
1409drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
1410 struct drm_gem_object **objs,
1411 unsigned int num_objs)
1412{
1413 struct {
1414 struct drm_gem_object **objs;
1415 unsigned int num_objs;
1416 } args;
1417
1418 args.objs = objs;
1419 args.num_objs = num_objs;
1420
1421 vm_exec->extra.fn = fn_lock_array;
1422 vm_exec->extra.priv = &args;
1423
1424 return drm_gpuvm_exec_lock(vm_exec);
1425}
1426EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_array);
1427
1428/**
1429 * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range
1430 * @vm_exec: the &drm_gpuvm_exec wrapper
1431 * @addr: the start address within the VA space
1432 * @range: the range to iterate within the VA space
1433 *
1434 * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and
1435 * @addr + @range.
1436 *
1437 * Returns: 0 on success, negative error code on failure.
1438 */
1439int
1440drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
1441 u64 addr, u64 range)
1442{
1443 struct drm_gpuvm *gpuvm = vm_exec->vm;
1444 struct drm_exec *exec = &vm_exec->exec;
1445 int ret;
1446
1447 drm_exec_init(exec, vm_exec->flags, 0);
1448
1449 drm_exec_until_all_locked(exec) {
1450 ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
1451 vm_exec->num_fences);
1452 drm_exec_retry_on_contention(exec);
1453 if (ret)
1454 goto err;
1455 }
1456
1457 return ret;
1458
1459err:
1460 drm_exec_fini(exec);
1461 return ret;
1462}
1463EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_range);
1464
1465static int
1466__drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1467{
1468 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1469 struct drm_gpuvm_bo *vm_bo;
1470 LIST_HEAD(evict);
1471 int ret = 0;
1472
1473 for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) {
1474 ret = ops->vm_bo_validate(vm_bo, exec);
1475 if (ret)
1476 break;
1477 }
1478 /* Drop ref in case we break out of the loop. */
1479 drm_gpuvm_bo_put(vm_bo);
1480 restore_vm_bo_list(gpuvm, evict);
1481
1482 return ret;
1483}
1484
1485static int
1486drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1487{
1488 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1489 struct drm_gpuvm_bo *vm_bo, *next;
1490 int ret = 0;
1491
1492 drm_gpuvm_resv_assert_held(gpuvm);
1493
1494 list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
1495 list.entry.evict) {
1496 if (drm_gpuvm_bo_is_zombie(vm_bo))
1497 continue;
1498
1499 ret = ops->vm_bo_validate(vm_bo, exec);
1500 if (ret)
1501 break;
1502
1503 dma_resv_assert_held(vm_bo->obj->resv);
1504 if (!vm_bo->evicted)
1505 drm_gpuvm_bo_list_del_init(vm_bo, evict, false);
1506 }
1507
1508 return ret;
1509}
1510
1511/**
1512 * drm_gpuvm_validate() - validate all BOs marked as evicted
1513 * @gpuvm: the &drm_gpuvm to validate evicted BOs
1514 * @exec: the &drm_exec instance used for locking the GPUVM
1515 *
1516 * Calls the &drm_gpuvm_ops::vm_bo_validate callback for all evicted buffer
1517 * objects being mapped in the given &drm_gpuvm.
1518 *
1519 * Returns: 0 on success, negative error code on failure.
1520 */
1521int
1522drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
1523{
1524 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1525
1526 if (unlikely(!ops || !ops->vm_bo_validate))
1527 return -EOPNOTSUPP;
1528
1529 if (drm_gpuvm_resv_protected(gpuvm))
1530 return drm_gpuvm_validate_locked(gpuvm, exec);
1531 else
1532 return __drm_gpuvm_validate(gpuvm, exec);
1533}
1534EXPORT_SYMBOL_GPL(drm_gpuvm_validate);
1535
1536/**
1537 * drm_gpuvm_resv_add_fence - add fence to private and all extobj
1538 * dma-resv
1539 * @gpuvm: the &drm_gpuvm to add a fence to
1540 * @exec: the &drm_exec locking context
1541 * @fence: fence to add
1542 * @private_usage: private dma-resv usage
1543 * @extobj_usage: extobj dma-resv usage
1544 */
1545void
1546drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
1547 struct drm_exec *exec,
1548 struct dma_fence *fence,
1549 enum dma_resv_usage private_usage,
1550 enum dma_resv_usage extobj_usage)
1551{
1552 struct drm_gem_object *obj;
1553 unsigned long index;
1554
1555 drm_exec_for_each_locked_object(exec, index, obj) {
1556 dma_resv_assert_held(obj->resv);
1557 dma_resv_add_fence(obj->resv, fence,
1558 drm_gpuvm_is_extobj(gpuvm, obj) ?
1559 extobj_usage : private_usage);
1560 }
1561}
1562EXPORT_SYMBOL_GPL(drm_gpuvm_resv_add_fence);
1563
1564/**
1565 * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
1566 * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1567 * @obj: The &drm_gem_object being mapped in the @gpuvm.
1568 *
1569 * If provided by the driver, this function uses the &drm_gpuvm_ops
1570 * vm_bo_alloc() callback to allocate.
1571 *
1572 * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1573 */
1574struct drm_gpuvm_bo *
1575drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
1576 struct drm_gem_object *obj)
1577{
1578 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1579 struct drm_gpuvm_bo *vm_bo;
1580
1581 if (ops && ops->vm_bo_alloc)
1582 vm_bo = ops->vm_bo_alloc();
1583 else
1584 vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL);
1585
1586 if (unlikely(!vm_bo))
1587 return NULL;
1588
1589 vm_bo->vm = drm_gpuvm_get(gpuvm);
1590 vm_bo->obj = obj;
1591 drm_gem_object_get(obj);
1592
1593 kref_init(&vm_bo->kref);
1594 INIT_LIST_HEAD(&vm_bo->list.gpuva);
1595 INIT_LIST_HEAD(&vm_bo->list.entry.gem);
1596
1597 INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
1598 INIT_LIST_HEAD(&vm_bo->list.entry.evict);
1599 init_llist_node(&vm_bo->list.entry.bo_defer);
1600
1601 return vm_bo;
1602}
1603EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
1604
1605static void
1606drm_gpuvm_bo_destroy(struct kref *kref)
1607{
1608 struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1609 kref);
1610 struct drm_gpuvm *gpuvm = vm_bo->vm;
1611 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1612 struct drm_gem_object *obj = vm_bo->obj;
1613 bool lock = !drm_gpuvm_resv_protected(gpuvm);
1614
1615 if (!lock)
1616 drm_gpuvm_resv_assert_held(gpuvm);
1617
1618 drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
1619 drm_gpuvm_bo_list_del(vm_bo, evict, lock);
1620
1621 drm_gem_gpuva_assert_lock_held(gpuvm, obj);
1622 list_del(&vm_bo->list.entry.gem);
1623
1624 if (ops && ops->vm_bo_free)
1625 ops->vm_bo_free(vm_bo);
1626 else
1627 kfree(vm_bo);
1628
1629 drm_gpuvm_put(gpuvm);
1630 drm_gem_object_put(obj);
1631}
1632
1633/**
1634 * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
1635 * @vm_bo: the &drm_gpuvm_bo to release the reference of
1636 *
1637 * This releases a reference to @vm_bo.
1638 *
1639 * If the reference count drops to zero, the &gpuvm_bo is destroyed, which
1640 * includes removing it from the GEMs gpuva list. Hence, if a call to this
1641 * function can potentially let the reference count drop to zero the caller must
1642 * hold the lock that the GEM uses for its gpuva list (either the GEM's
1643 * dma-resv or gpuva.lock mutex).
1644 *
1645 * This function may only be called from non-atomic context.
1646 *
1647 * Returns: true if vm_bo was destroyed, false otherwise.
1648 */
1649bool
1650drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
1651{
1652 might_sleep();
1653
1654 if (vm_bo)
1655 return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
1656
1657 return false;
1658}
1659EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
1660
1661/*
1662 * drm_gpuvm_bo_into_zombie() - called when the vm_bo becomes a zombie due to
1663 * deferred cleanup
1664 *
1665 * If deferred cleanup is used, then this must be called right after the vm_bo
1666 * refcount drops to zero. Must be called with GEM mutex held. After releasing
1667 * the GEM mutex, drm_gpuvm_bo_defer_zombie_cleanup() must be called.
1668 */
1669static void
1670drm_gpuvm_bo_into_zombie(struct kref *kref)
1671{
1672 struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1673 kref);
1674
1675 if (!drm_gpuvm_resv_protected(vm_bo->vm)) {
1676 drm_gpuvm_bo_list_del(vm_bo, extobj, true);
1677 drm_gpuvm_bo_list_del(vm_bo, evict, true);
1678 }
1679
1680 list_del(&vm_bo->list.entry.gem);
1681}
1682
1683/*
1684 * drm_gpuvm_bo_defer_zombie_cleanup() - adds a new zombie vm_bo to the
1685 * bo_defer list
1686 *
1687 * Called after drm_gpuvm_bo_into_zombie(). GEM mutex must not be held.
1688 *
1689 * It's important that the GEM stays alive for the duration in which we hold
1690 * the mutex, but the instant we add the vm_bo to bo_defer, another thread
1691 * might call drm_gpuvm_bo_deferred_cleanup() and put the GEM. Therefore, to
1692 * avoid kfreeing a mutex we are holding, the GEM mutex must be released
1693 * *before* calling this function.
1694 */
1695static void
1696drm_gpuvm_bo_defer_zombie_cleanup(struct drm_gpuvm_bo *vm_bo)
1697{
1698 llist_add(&vm_bo->list.entry.bo_defer, &vm_bo->vm->bo_defer);
1699}
1700
1701static void
1702drm_gpuvm_bo_defer_free(struct kref *kref)
1703{
1704 struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
1705 kref);
1706
1707 drm_gpuvm_bo_into_zombie(kref);
1708 mutex_unlock(&vm_bo->obj->gpuva.lock);
1709 drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
1710}
1711
1712/**
1713 * drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with
1714 * deferred cleanup
1715 * @vm_bo: the &drm_gpuvm_bo to release the reference of
1716 *
1717 * This releases a reference to @vm_bo.
1718 *
1719 * This might take and release the GEMs GPUVA lock. You should call
1720 * drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process.
1721 *
1722 * Returns: true if vm_bo is being destroyed, false otherwise.
1723 */
1724bool
1725drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo)
1726{
1727 if (!vm_bo)
1728 return false;
1729
1730 drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
1731
1732 return !!kref_put_mutex(&vm_bo->kref,
1733 drm_gpuvm_bo_defer_free,
1734 &vm_bo->obj->gpuva.lock);
1735}
1736EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
1737
1738/**
1739 * drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list
1740 * deferred cleanup
1741 * @gpuvm: the VM to clean up
1742 *
1743 * Cleans up &drm_gpuvm_bo instances in the deferred cleanup list.
1744 */
1745void
1746drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
1747{
1748 const struct drm_gpuvm_ops *ops = gpuvm->ops;
1749 struct drm_gpuvm_bo *vm_bo;
1750 struct drm_gem_object *obj;
1751 struct llist_node *bo_defer;
1752
1753 bo_defer = llist_del_all(&gpuvm->bo_defer);
1754 if (!bo_defer)
1755 return;
1756
1757 if (drm_gpuvm_resv_protected(gpuvm)) {
1758 dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL);
1759 llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) {
1760 drm_gpuvm_bo_list_del(vm_bo, extobj, false);
1761 drm_gpuvm_bo_list_del(vm_bo, evict, false);
1762 }
1763 dma_resv_unlock(drm_gpuvm_resv(gpuvm));
1764 }
1765
1766 while (bo_defer) {
1767 vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
1768 bo_defer = bo_defer->next;
1769 obj = vm_bo->obj;
1770 if (ops && ops->vm_bo_free)
1771 ops->vm_bo_free(vm_bo);
1772 else
1773 kfree(vm_bo);
1774
1775 drm_gpuvm_put(gpuvm);
1776 drm_gem_object_put(obj);
1777 }
1778}
1779EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
1780
1781static struct drm_gpuvm_bo *
1782__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1783 struct drm_gem_object *obj)
1784{
1785 struct drm_gpuvm_bo *vm_bo;
1786
1787 drm_gem_gpuva_assert_lock_held(gpuvm, obj);
1788 drm_gem_for_each_gpuvm_bo(vm_bo, obj)
1789 if (vm_bo->vm == gpuvm)
1790 return vm_bo;
1791
1792 return NULL;
1793}
1794
1795/**
1796 * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
1797 * &drm_gpuvm and &drm_gem_object
1798 * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1799 * @obj: The &drm_gem_object being mapped in the @gpuvm.
1800 *
1801 * Find the &drm_gpuvm_bo representing the combination of the given
1802 * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1803 * count of the &drm_gpuvm_bo accordingly.
1804 *
1805 * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
1806 */
1807struct drm_gpuvm_bo *
1808drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
1809 struct drm_gem_object *obj)
1810{
1811 struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj);
1812
1813 return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL;
1814}
1815EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
1816
1817/**
1818 * drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the
1819 * given &drm_gpuvm and &drm_gem_object
1820 * @gpuvm: The &drm_gpuvm the @obj is mapped in.
1821 * @obj: The &drm_gem_object being mapped in the @gpuvm.
1822 *
1823 * Find the &drm_gpuvm_bo representing the combination of the given
1824 * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1825 * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
1826 * &drm_gpuvm_bo.
1827 *
1828 * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1829 *
1830 * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
1831 */
1832struct drm_gpuvm_bo *
1833drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
1834 struct drm_gem_object *obj)
1835{
1836 struct drm_gpuvm_bo *vm_bo;
1837
1838 vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1839 if (vm_bo)
1840 return vm_bo;
1841
1842 vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
1843 if (!vm_bo)
1844 return ERR_PTR(-ENOMEM);
1845
1846 drm_gem_gpuva_assert_lock_held(gpuvm, obj);
1847 list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
1848
1849 return vm_bo;
1850}
1851EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
1852
1853/**
1854 * drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
1855 * for the given &drm_gpuvm and &drm_gem_object
1856 * @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
1857 *
1858 * Find the &drm_gpuvm_bo representing the combination of the given
1859 * &drm_gpuvm and &drm_gem_object. If found, increases the reference
1860 * count of the found &drm_gpuvm_bo accordingly, while the @__vm_bo reference
1861 * count is decreased. If not found @__vm_bo is returned without further
1862 * increase of the reference count.
1863 *
1864 * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
1865 *
1866 * Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
1867 * &drm_gpuvm_bo was found
1868 */
1869struct drm_gpuvm_bo *
1870drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
1871{
1872 struct drm_gpuvm *gpuvm = __vm_bo->vm;
1873 struct drm_gem_object *obj = __vm_bo->obj;
1874 struct drm_gpuvm_bo *vm_bo;
1875
1876 vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
1877 if (vm_bo) {
1878 drm_gpuvm_bo_put(__vm_bo);
1879 return vm_bo;
1880 }
1881
1882 drm_gem_gpuva_assert_lock_held(gpuvm, obj);
1883 list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
1884
1885 return __vm_bo;
1886}
1887EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc);
1888
1889/**
1890 * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's
1891 * extobj list
1892 * @vm_bo: The &drm_gpuvm_bo to add to its &drm_gpuvm's the extobj list.
1893 *
1894 * Adds the given @vm_bo to its &drm_gpuvm's extobj list if not on the list
1895 * already and if the corresponding &drm_gem_object is an external object,
1896 * actually.
1897 */
1898void
1899drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo)
1900{
1901 struct drm_gpuvm *gpuvm = vm_bo->vm;
1902 bool lock = !drm_gpuvm_resv_protected(gpuvm);
1903
1904 if (!lock)
1905 drm_gpuvm_resv_assert_held(gpuvm);
1906
1907 if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj))
1908 drm_gpuvm_bo_list_add(vm_bo, extobj, lock);
1909}
1910EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
1911
1912/**
1913 * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms
1914 * evicted list
1915 * @vm_bo: the &drm_gpuvm_bo to add or remove
1916 * @evict: indicates whether the object is evicted
1917 *
1918 * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvm's evicted list.
1919 */
1920void
1921drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
1922{
1923 struct drm_gpuvm *gpuvm = vm_bo->vm;
1924 struct drm_gem_object *obj = vm_bo->obj;
1925 bool lock = !drm_gpuvm_resv_protected(gpuvm);
1926
1927 dma_resv_assert_held(obj->resv);
1928 vm_bo->evicted = evict;
1929
1930 /* Can't add external objects to the evicted list directly if not using
1931 * internal spinlocks, since in this case the evicted list is protected
1932 * with the VM's common dma-resv lock.
1933 */
1934 if (drm_gpuvm_is_extobj(gpuvm, obj) && !lock)
1935 return;
1936
1937 if (evict)
1938 drm_gpuvm_bo_list_add(vm_bo, evict, lock);
1939 else
1940 drm_gpuvm_bo_list_del_init(vm_bo, evict, lock);
1941}
1942EXPORT_SYMBOL_GPL(drm_gpuvm_bo_evict);
1943
1944static int
1945__drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1946 struct drm_gpuva *va)
1947{
1948 struct rb_node *node;
1949 struct list_head *head;
1950
1951 if (drm_gpuva_it_iter_first(&gpuvm->rb.tree,
1952 GPUVA_START(va),
1953 GPUVA_LAST(va)))
1954 return -EEXIST;
1955
1956 va->vm = gpuvm;
1957
1958 drm_gpuva_it_insert(va, &gpuvm->rb.tree);
1959
1960 node = rb_prev(&va->rb.node);
1961 if (node)
1962 head = &(to_drm_gpuva(node))->rb.entry;
1963 else
1964 head = &gpuvm->rb.list;
1965
1966 list_add(&va->rb.entry, head);
1967
1968 return 0;
1969}
1970
1971/**
1972 * drm_gpuva_insert() - insert a &drm_gpuva
1973 * @gpuvm: the &drm_gpuvm to insert the &drm_gpuva in
1974 * @va: the &drm_gpuva to insert
1975 *
1976 * Insert a &drm_gpuva with a given address and range into a
1977 * &drm_gpuvm.
1978 *
1979 * It is safe to use this function using the safe versions of iterating the GPU
1980 * VA space, such as drm_gpuvm_for_each_va_safe() and
1981 * drm_gpuvm_for_each_va_range_safe().
1982 *
1983 * Returns: 0 on success, negative error code on failure.
1984 */
1985int
1986drm_gpuva_insert(struct drm_gpuvm *gpuvm,
1987 struct drm_gpuva *va)
1988{
1989 u64 addr = va->va.addr;
1990 u64 range = va->va.range;
1991 int ret;
1992
1993 if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
1994 return -EINVAL;
1995
1996 ret = __drm_gpuva_insert(gpuvm, va);
1997 if (likely(!ret))
1998 /* Take a reference of the GPUVM for the successfully inserted
1999 * drm_gpuva. We can't take the reference in
2000 * __drm_gpuva_insert() itself, since we don't want to increse
2001 * the reference count for the GPUVM's kernel_alloc_node.
2002 */
2003 drm_gpuvm_get(gpuvm);
2004
2005 return ret;
2006}
2007EXPORT_SYMBOL_GPL(drm_gpuva_insert);
2008
2009static void
2010__drm_gpuva_remove(struct drm_gpuva *va)
2011{
2012 drm_gpuva_it_remove(va, &va->vm->rb.tree);
2013 list_del_init(&va->rb.entry);
2014}
2015
2016/**
2017 * drm_gpuva_remove() - remove a &drm_gpuva
2018 * @va: the &drm_gpuva to remove
2019 *
2020 * This removes the given &va from the underlying tree.
2021 *
2022 * It is safe to use this function using the safe versions of iterating the GPU
2023 * VA space, such as drm_gpuvm_for_each_va_safe() and
2024 * drm_gpuvm_for_each_va_range_safe().
2025 */
2026void
2027drm_gpuva_remove(struct drm_gpuva *va)
2028{
2029 struct drm_gpuvm *gpuvm = va->vm;
2030
2031 if (unlikely(va == &gpuvm->kernel_alloc_node)) {
2032 drm_WARN(gpuvm->drm, 1,
2033 "Can't destroy kernel reserved node.\n");
2034 return;
2035 }
2036
2037 __drm_gpuva_remove(va);
2038 drm_gpuvm_put(va->vm);
2039}
2040EXPORT_SYMBOL_GPL(drm_gpuva_remove);
2041
2042/**
2043 * drm_gpuva_link() - link a &drm_gpuva
2044 * @va: the &drm_gpuva to link
2045 * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to
2046 *
2047 * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
2048 * &drm_gpuvm_bo to the &drm_gem_object it is associated with.
2049 *
2050 * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional
2051 * reference of the latter is taken.
2052 *
2053 * This function expects the caller to protect the GEM's GPUVA list against
2054 * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
2055 */
2056void
2057drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
2058{
2059 struct drm_gem_object *obj = va->gem.obj;
2060 struct drm_gpuvm *gpuvm = va->vm;
2061
2062 if (unlikely(!obj))
2063 return;
2064
2065 drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj);
2066
2067 va->vm_bo = drm_gpuvm_bo_get(vm_bo);
2068
2069 drm_gem_gpuva_assert_lock_held(gpuvm, obj);
2070 list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
2071}
2072EXPORT_SYMBOL_GPL(drm_gpuva_link);
2073
2074/**
2075 * drm_gpuva_unlink() - unlink a &drm_gpuva
2076 * @va: the &drm_gpuva to unlink
2077 *
2078 * This removes the given &va from the GPU VA list of the &drm_gem_object it is
2079 * associated with.
2080 *
2081 * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
2082 * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case
2083 * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
2084 *
2085 * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of
2086 * the latter is dropped.
2087 *
2088 * This function expects the caller to protect the GEM's GPUVA list against
2089 * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
2090 */
2091void
2092drm_gpuva_unlink(struct drm_gpuva *va)
2093{
2094 struct drm_gem_object *obj = va->gem.obj;
2095 struct drm_gpuvm_bo *vm_bo = va->vm_bo;
2096
2097 if (unlikely(!obj))
2098 return;
2099
2100 drm_gem_gpuva_assert_lock_held(va->vm, obj);
2101 list_del_init(&va->gem.entry);
2102
2103 va->vm_bo = NULL;
2104 drm_gpuvm_bo_put(vm_bo);
2105}
2106EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
2107
2108/**
2109 * drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup
2110 * @va: the &drm_gpuva to unlink
2111 *
2112 * Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes
2113 * the lock for the caller.
2114 */
2115void
2116drm_gpuva_unlink_defer(struct drm_gpuva *va)
2117{
2118 struct drm_gem_object *obj = va->gem.obj;
2119 struct drm_gpuvm_bo *vm_bo = va->vm_bo;
2120 bool should_defer_bo;
2121
2122 if (unlikely(!obj))
2123 return;
2124
2125 drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
2126
2127 mutex_lock(&obj->gpuva.lock);
2128 list_del_init(&va->gem.entry);
2129
2130 /*
2131 * This is drm_gpuvm_bo_put_deferred() except we already hold the mutex.
2132 */
2133 should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_into_zombie);
2134 mutex_unlock(&obj->gpuva.lock);
2135 if (should_defer_bo)
2136 drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
2137
2138 va->vm_bo = NULL;
2139}
2140EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer);
2141
2142/**
2143 * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
2144 * @gpuvm: the &drm_gpuvm to search in
2145 * @addr: the &drm_gpuvas address
2146 * @range: the &drm_gpuvas range
2147 *
2148 * Returns: the first &drm_gpuva within the given range
2149 */
2150struct drm_gpuva *
2151drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
2152 u64 addr, u64 range)
2153{
2154 u64 last = addr + range - 1;
2155
2156 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last);
2157}
2158EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
2159
2160/**
2161 * drm_gpuva_find() - find a &drm_gpuva
2162 * @gpuvm: the &drm_gpuvm to search in
2163 * @addr: the &drm_gpuvas address
2164 * @range: the &drm_gpuvas range
2165 *
2166 * Returns: the &drm_gpuva at a given &addr and with a given &range
2167 */
2168struct drm_gpuva *
2169drm_gpuva_find(struct drm_gpuvm *gpuvm,
2170 u64 addr, u64 range)
2171{
2172 struct drm_gpuva *va;
2173
2174 va = drm_gpuva_find_first(gpuvm, addr, range);
2175 if (!va)
2176 goto out;
2177
2178 if (va->va.addr != addr ||
2179 va->va.range != range)
2180 goto out;
2181
2182 return va;
2183
2184out:
2185 return NULL;
2186}
2187EXPORT_SYMBOL_GPL(drm_gpuva_find);
2188
2189/**
2190 * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
2191 * @gpuvm: the &drm_gpuvm to search in
2192 * @start: the given GPU VA's start address
2193 *
2194 * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
2195 *
2196 * Note that if there is any free space between the GPU VA mappings no mapping
2197 * is returned.
2198 *
2199 * Returns: a pointer to the found &drm_gpuva or NULL if none was found
2200 */
2201struct drm_gpuva *
2202drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start)
2203{
2204 if (!drm_gpuvm_range_valid(gpuvm, start - 1, 1))
2205 return NULL;
2206
2207 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start);
2208}
2209EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
2210
2211/**
2212 * drm_gpuva_find_next() - find the &drm_gpuva after the given address
2213 * @gpuvm: the &drm_gpuvm to search in
2214 * @end: the given GPU VA's end address
2215 *
2216 * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
2217 *
2218 * Note that if there is any free space between the GPU VA mappings no mapping
2219 * is returned.
2220 *
2221 * Returns: a pointer to the found &drm_gpuva or NULL if none was found
2222 */
2223struct drm_gpuva *
2224drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end)
2225{
2226 if (!drm_gpuvm_range_valid(gpuvm, end, 1))
2227 return NULL;
2228
2229 return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1);
2230}
2231EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
2232
2233/**
2234 * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
2235 * is empty
2236 * @gpuvm: the &drm_gpuvm to check the range for
2237 * @addr: the start address of the range
2238 * @range: the range of the interval
2239 *
2240 * Returns: true if the interval is empty, false otherwise
2241 */
2242bool
2243drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
2244{
2245 return !drm_gpuva_find_first(gpuvm, addr, range);
2246}
2247EXPORT_SYMBOL_GPL(drm_gpuvm_interval_empty);
2248
2249/**
2250 * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
2251 * &drm_gpuva_op_map
2252 * @gpuvm: the &drm_gpuvm
2253 * @va: the &drm_gpuva to insert
2254 * @op: the &drm_gpuva_op_map to initialize @va with
2255 *
2256 * Initializes the @va from the @op and inserts it into the given @gpuvm.
2257 */
2258void
2259drm_gpuva_map(struct drm_gpuvm *gpuvm,
2260 struct drm_gpuva *va,
2261 struct drm_gpuva_op_map *op)
2262{
2263 drm_gpuva_init_from_op(va, op);
2264 drm_gpuva_insert(gpuvm, va);
2265}
2266EXPORT_SYMBOL_GPL(drm_gpuva_map);
2267
2268/**
2269 * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
2270 * &drm_gpuva_op_remap
2271 * @prev: the &drm_gpuva to remap when keeping the start of a mapping
2272 * @next: the &drm_gpuva to remap when keeping the end of a mapping
2273 * @op: the &drm_gpuva_op_remap to initialize @prev and @next with
2274 *
2275 * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
2276 * @next.
2277 */
2278void
2279drm_gpuva_remap(struct drm_gpuva *prev,
2280 struct drm_gpuva *next,
2281 struct drm_gpuva_op_remap *op)
2282{
2283 struct drm_gpuva *va = op->unmap->va;
2284 struct drm_gpuvm *gpuvm = va->vm;
2285
2286 drm_gpuva_remove(va);
2287
2288 if (op->prev) {
2289 drm_gpuva_init_from_op(prev, op->prev);
2290 drm_gpuva_insert(gpuvm, prev);
2291 }
2292
2293 if (op->next) {
2294 drm_gpuva_init_from_op(next, op->next);
2295 drm_gpuva_insert(gpuvm, next);
2296 }
2297}
2298EXPORT_SYMBOL_GPL(drm_gpuva_remap);
2299
2300/**
2301 * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
2302 * &drm_gpuva_op_unmap
2303 * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
2304 *
2305 * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
2306 */
2307void
2308drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
2309{
2310 drm_gpuva_remove(op->va);
2311}
2312EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
2313
2314static int
2315op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
2316 const struct drm_gpuvm_map_req *req)
2317{
2318 struct drm_gpuva_op op = {};
2319
2320 if (!req)
2321 return 0;
2322
2323 op.op = DRM_GPUVA_OP_MAP;
2324 op.map.va.addr = req->map.va.addr;
2325 op.map.va.range = req->map.va.range;
2326 op.map.gem.obj = req->map.gem.obj;
2327 op.map.gem.offset = req->map.gem.offset;
2328
2329 return fn->sm_step_map(&op, priv);
2330}
2331
2332static int
2333op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2334 struct drm_gpuva_op_map *prev,
2335 struct drm_gpuva_op_map *next,
2336 struct drm_gpuva_op_unmap *unmap)
2337{
2338 struct drm_gpuva_op op = {};
2339 struct drm_gpuva_op_remap *r;
2340
2341 op.op = DRM_GPUVA_OP_REMAP;
2342 r = &op.remap;
2343 r->prev = prev;
2344 r->next = next;
2345 r->unmap = unmap;
2346
2347 return fn->sm_step_remap(&op, priv);
2348}
2349
2350static int
2351op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
2352 struct drm_gpuva *va, bool merge, bool madvise)
2353{
2354 struct drm_gpuva_op op = {};
2355
2356 if (madvise)
2357 return 0;
2358
2359 op.op = DRM_GPUVA_OP_UNMAP;
2360 op.unmap.va = va;
2361 op.unmap.keep = merge;
2362
2363 return fn->sm_step_unmap(&op, priv);
2364}
2365
2366static int
2367__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
2368 const struct drm_gpuvm_ops *ops, void *priv,
2369 const struct drm_gpuvm_map_req *req,
2370 bool madvise)
2371{
2372 struct drm_gem_object *req_obj = req->map.gem.obj;
2373 const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req;
2374 struct drm_gpuva *va, *next;
2375 u64 req_offset = req->map.gem.offset;
2376 u64 req_range = req->map.va.range;
2377 u64 req_addr = req->map.va.addr;
2378 u64 req_end = req_addr + req_range;
2379 int ret;
2380
2381 if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2382 return -EINVAL;
2383
2384 drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2385 struct drm_gem_object *obj = va->gem.obj;
2386 u64 offset = va->gem.offset;
2387 u64 addr = va->va.addr;
2388 u64 range = va->va.range;
2389 u64 end = addr + range;
2390 bool merge = !!va->gem.obj;
2391
2392 if (madvise && obj)
2393 continue;
2394
2395 if (addr == req_addr) {
2396 merge &= obj == req_obj &&
2397 offset == req_offset;
2398
2399 if (end == req_end) {
2400 ret = op_unmap_cb(ops, priv, va, merge, madvise);
2401 if (ret)
2402 return ret;
2403 break;
2404 }
2405
2406 if (end < req_end) {
2407 ret = op_unmap_cb(ops, priv, va, merge, madvise);
2408 if (ret)
2409 return ret;
2410 continue;
2411 }
2412
2413 if (end > req_end) {
2414 struct drm_gpuva_op_map n = {
2415 .va.addr = req_end,
2416 .va.range = range - req_range,
2417 .gem.obj = obj,
2418 .gem.offset = offset + req_range,
2419 };
2420 struct drm_gpuva_op_unmap u = {
2421 .va = va,
2422 .keep = merge,
2423 };
2424
2425 ret = op_remap_cb(ops, priv, NULL, &n, &u);
2426 if (ret)
2427 return ret;
2428
2429 if (madvise)
2430 op_map = req;
2431 break;
2432 }
2433 } else if (addr < req_addr) {
2434 u64 ls_range = req_addr - addr;
2435 struct drm_gpuva_op_map p = {
2436 .va.addr = addr,
2437 .va.range = ls_range,
2438 .gem.obj = obj,
2439 .gem.offset = offset,
2440 };
2441 struct drm_gpuva_op_unmap u = { .va = va };
2442
2443 merge &= obj == req_obj &&
2444 offset + ls_range == req_offset;
2445 u.keep = merge;
2446
2447 if (end == req_end) {
2448 ret = op_remap_cb(ops, priv, &p, NULL, &u);
2449 if (ret)
2450 return ret;
2451
2452 if (madvise)
2453 op_map = req;
2454 break;
2455 }
2456
2457 if (end < req_end) {
2458 ret = op_remap_cb(ops, priv, &p, NULL, &u);
2459 if (ret)
2460 return ret;
2461
2462 if (madvise) {
2463 struct drm_gpuvm_map_req map_req = {
2464 .map.va.addr = req_addr,
2465 .map.va.range = end - req_addr,
2466 };
2467
2468 ret = op_map_cb(ops, priv, &map_req);
2469 if (ret)
2470 return ret;
2471 }
2472
2473 continue;
2474 }
2475
2476 if (end > req_end) {
2477 struct drm_gpuva_op_map n = {
2478 .va.addr = req_end,
2479 .va.range = end - req_end,
2480 .gem.obj = obj,
2481 .gem.offset = offset + ls_range +
2482 req_range,
2483 };
2484
2485 ret = op_remap_cb(ops, priv, &p, &n, &u);
2486 if (ret)
2487 return ret;
2488
2489 if (madvise)
2490 op_map = req;
2491 break;
2492 }
2493 } else if (addr > req_addr) {
2494 merge &= obj == req_obj &&
2495 offset == req_offset +
2496 (addr - req_addr);
2497
2498 if (end == req_end) {
2499 ret = op_unmap_cb(ops, priv, va, merge, madvise);
2500 if (ret)
2501 return ret;
2502
2503 break;
2504 }
2505
2506 if (end < req_end) {
2507 ret = op_unmap_cb(ops, priv, va, merge, madvise);
2508 if (ret)
2509 return ret;
2510
2511 continue;
2512 }
2513
2514 if (end > req_end) {
2515 struct drm_gpuva_op_map n = {
2516 .va.addr = req_end,
2517 .va.range = end - req_end,
2518 .gem.obj = obj,
2519 .gem.offset = offset + req_end - addr,
2520 };
2521 struct drm_gpuva_op_unmap u = {
2522 .va = va,
2523 .keep = merge,
2524 };
2525
2526 ret = op_remap_cb(ops, priv, NULL, &n, &u);
2527 if (ret)
2528 return ret;
2529
2530 if (madvise) {
2531 struct drm_gpuvm_map_req map_req = {
2532 .map.va.addr = addr,
2533 .map.va.range = req_end - addr,
2534 };
2535
2536 return op_map_cb(ops, priv, &map_req);
2537 }
2538 break;
2539 }
2540 }
2541 }
2542 return op_map_cb(ops, priv, op_map);
2543}
2544
2545static int
2546__drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
2547 const struct drm_gpuvm_ops *ops, void *priv,
2548 u64 req_addr, u64 req_range)
2549{
2550 struct drm_gpuva *va, *next;
2551 u64 req_end = req_addr + req_range;
2552 int ret;
2553
2554 if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
2555 return -EINVAL;
2556
2557 drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
2558 struct drm_gpuva_op_map prev = {}, next = {};
2559 bool prev_split = false, next_split = false;
2560 struct drm_gem_object *obj = va->gem.obj;
2561 u64 offset = va->gem.offset;
2562 u64 addr = va->va.addr;
2563 u64 range = va->va.range;
2564 u64 end = addr + range;
2565
2566 if (addr < req_addr) {
2567 prev.va.addr = addr;
2568 prev.va.range = req_addr - addr;
2569 prev.gem.obj = obj;
2570 prev.gem.offset = offset;
2571
2572 prev_split = true;
2573 }
2574
2575 if (end > req_end) {
2576 next.va.addr = req_end;
2577 next.va.range = end - req_end;
2578 next.gem.obj = obj;
2579 next.gem.offset = offset + (req_end - addr);
2580
2581 next_split = true;
2582 }
2583
2584 if (prev_split || next_split) {
2585 struct drm_gpuva_op_unmap unmap = { .va = va };
2586
2587 ret = op_remap_cb(ops, priv,
2588 prev_split ? &prev : NULL,
2589 next_split ? &next : NULL,
2590 &unmap);
2591 if (ret)
2592 return ret;
2593 } else {
2594 ret = op_unmap_cb(ops, priv, va, false, false);
2595 if (ret)
2596 return ret;
2597 }
2598 }
2599
2600 return 0;
2601}
2602
2603/**
2604 * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
2605 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2606 * @priv: pointer to a driver private data structure
2607 * @req: ptr to struct drm_gpuvm_map_req
2608 *
2609 * This function iterates the given range of the GPU VA space. It utilizes the
2610 * &drm_gpuvm_ops to call back into the driver providing the split and merge
2611 * steps.
2612 *
2613 * Drivers may use these callbacks to update the GPU VA space right away within
2614 * the callback. In case the driver decides to copy and store the operations for
2615 * later processing neither this function nor &drm_gpuvm_sm_unmap is allowed to
2616 * be called before the &drm_gpuvm's view of the GPU VA space was
2617 * updated with the previous set of operations. To update the
2618 * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2619 * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2620 * used.
2621 *
2622 * A sequence of callbacks can contain map, unmap and remap operations, but
2623 * the sequence of callbacks might also be empty if no operation is required,
2624 * e.g. if the requested mapping already exists in the exact same way.
2625 *
2626 * There can be an arbitrary amount of unmap operations, a maximum of two remap
2627 * operations and a single map operation. The latter one represents the original
2628 * map operation requested by the caller.
2629 *
2630 * Returns: 0 on success or a negative error code
2631 */
2632int
2633drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
2634 const struct drm_gpuvm_map_req *req)
2635{
2636 const struct drm_gpuvm_ops *ops = gpuvm->ops;
2637
2638 if (unlikely(!(ops && ops->sm_step_map &&
2639 ops->sm_step_remap &&
2640 ops->sm_step_unmap)))
2641 return -EINVAL;
2642
2643 return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false);
2644}
2645EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
2646
2647/**
2648 * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
2649 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2650 * @priv: pointer to a driver private data structure
2651 * @req_addr: the start address of the range to unmap
2652 * @req_range: the range of the mappings to unmap
2653 *
2654 * This function iterates the given range of the GPU VA space. It utilizes the
2655 * &drm_gpuvm_ops to call back into the driver providing the operations to
2656 * unmap and, if required, split existing mappings.
2657 *
2658 * Drivers may use these callbacks to update the GPU VA space right away within
2659 * the callback. In case the driver decides to copy and store the operations for
2660 * later processing neither this function nor &drm_gpuvm_sm_map is allowed to be
2661 * called before the &drm_gpuvm's view of the GPU VA space was updated
2662 * with the previous set of operations. To update the &drm_gpuvm's view
2663 * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
2664 * drm_gpuva_destroy_unlocked() should be used.
2665 *
2666 * A sequence of callbacks can contain unmap and remap operations, depending on
2667 * whether there are actual overlapping mappings to split.
2668 *
2669 * There can be an arbitrary amount of unmap operations and a maximum of two
2670 * remap operations.
2671 *
2672 * Returns: 0 on success or a negative error code
2673 */
2674int
2675drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
2676 u64 req_addr, u64 req_range)
2677{
2678 const struct drm_gpuvm_ops *ops = gpuvm->ops;
2679
2680 if (unlikely(!(ops && ops->sm_step_remap &&
2681 ops->sm_step_unmap)))
2682 return -EINVAL;
2683
2684 return __drm_gpuvm_sm_unmap(gpuvm, ops, priv,
2685 req_addr, req_range);
2686}
2687EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
2688
2689static int
2690drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
2691{
2692 struct drm_exec *exec = priv;
2693
2694 switch (op->op) {
2695 case DRM_GPUVA_OP_REMAP:
2696 if (op->remap.unmap->va->gem.obj)
2697 return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
2698 return 0;
2699 case DRM_GPUVA_OP_UNMAP:
2700 if (op->unmap.va->gem.obj)
2701 return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
2702 return 0;
2703 default:
2704 return 0;
2705 }
2706}
2707
2708static const struct drm_gpuvm_ops lock_ops = {
2709 .sm_step_map = drm_gpuva_sm_step_lock,
2710 .sm_step_remap = drm_gpuva_sm_step_lock,
2711 .sm_step_unmap = drm_gpuva_sm_step_lock,
2712};
2713
2714/**
2715 * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
2716 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2717 * @exec: the &drm_exec locking context
2718 * @num_fences: for newly mapped objects, the # of fences to reserve
2719 * @req: ptr to drm_gpuvm_map_req struct
2720 *
2721 * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
2722 * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
2723 * will be newly mapped.
2724 *
2725 * The expected usage is::
2726 *
2727 * vm_bind {
2728 * struct drm_exec exec;
2729 *
2730 * // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
2731 * drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
2732 *
2733 * drm_exec_until_all_locked (&exec) {
2734 * for_each_vm_bind_operation {
2735 * switch (op->op) {
2736 * case DRIVER_OP_UNMAP:
2737 * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
2738 * break;
2739 * case DRIVER_OP_MAP:
2740 * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
2741 * break;
2742 * }
2743 *
2744 * drm_exec_retry_on_contention(&exec);
2745 * if (ret)
2746 * return ret;
2747 * }
2748 * }
2749 * }
2750 *
2751 * This enables all locking to be performed before the driver begins modifying
2752 * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
2753 * where an earlier op can alter the sequence of steps generated for a later
2754 * op, because the later altered step will involve the same GEM object(s)
2755 * already seen in the earlier locking step. For example:
2756 *
2757 * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
2758 * DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already
2759 * locked the GEM object in the earlier DRIVER_OP_UNMAP op.
2760 *
2761 * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
2762 * op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
2763 * required without the earlier DRIVER_OP_MAP. This is safe because we've
2764 * already locked the GEM object in the earlier DRIVER_OP_MAP step.
2765 *
2766 * Returns: 0 on success or a negative error code
2767 */
2768int
2769drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
2770 struct drm_exec *exec, unsigned int num_fences,
2771 struct drm_gpuvm_map_req *req)
2772{
2773 struct drm_gem_object *req_obj = req->map.gem.obj;
2774
2775 if (req_obj) {
2776 int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
2777 if (ret)
2778 return ret;
2779 }
2780
2781 return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false);
2782
2783}
2784EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
2785
2786/**
2787 * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
2788 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2789 * @exec: the &drm_exec locking context
2790 * @req_addr: the start address of the range to unmap
2791 * @req_range: the range of the mappings to unmap
2792 *
2793 * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
2794 * remapped by drm_gpuvm_sm_unmap().
2795 *
2796 * See drm_gpuvm_sm_map_exec_lock() for expected usage.
2797 *
2798 * Returns: 0 on success or a negative error code
2799 */
2800int
2801drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
2802 u64 req_addr, u64 req_range)
2803{
2804 return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
2805 req_addr, req_range);
2806}
2807EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
2808
2809static struct drm_gpuva_op *
2810gpuva_op_alloc(struct drm_gpuvm *gpuvm)
2811{
2812 const struct drm_gpuvm_ops *fn = gpuvm->ops;
2813 struct drm_gpuva_op *op;
2814
2815 if (fn && fn->op_alloc)
2816 op = fn->op_alloc();
2817 else
2818 op = kzalloc(sizeof(*op), GFP_KERNEL);
2819
2820 if (unlikely(!op))
2821 return NULL;
2822
2823 return op;
2824}
2825
2826static void
2827gpuva_op_free(struct drm_gpuvm *gpuvm,
2828 struct drm_gpuva_op *op)
2829{
2830 const struct drm_gpuvm_ops *fn = gpuvm->ops;
2831
2832 if (fn && fn->op_free)
2833 fn->op_free(op);
2834 else
2835 kfree(op);
2836}
2837
2838static int
2839drm_gpuva_sm_step(struct drm_gpuva_op *__op,
2840 void *priv)
2841{
2842 struct {
2843 struct drm_gpuvm *vm;
2844 struct drm_gpuva_ops *ops;
2845 } *args = priv;
2846 struct drm_gpuvm *gpuvm = args->vm;
2847 struct drm_gpuva_ops *ops = args->ops;
2848 struct drm_gpuva_op *op;
2849
2850 op = gpuva_op_alloc(gpuvm);
2851 if (unlikely(!op))
2852 goto err;
2853
2854 memcpy(op, __op, sizeof(*op));
2855
2856 if (op->op == DRM_GPUVA_OP_REMAP) {
2857 struct drm_gpuva_op_remap *__r = &__op->remap;
2858 struct drm_gpuva_op_remap *r = &op->remap;
2859
2860 r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
2861 GFP_KERNEL);
2862 if (unlikely(!r->unmap))
2863 goto err_free_op;
2864
2865 if (__r->prev) {
2866 r->prev = kmemdup(__r->prev, sizeof(*r->prev),
2867 GFP_KERNEL);
2868 if (unlikely(!r->prev))
2869 goto err_free_unmap;
2870 }
2871
2872 if (__r->next) {
2873 r->next = kmemdup(__r->next, sizeof(*r->next),
2874 GFP_KERNEL);
2875 if (unlikely(!r->next))
2876 goto err_free_prev;
2877 }
2878 }
2879
2880 list_add_tail(&op->entry, &ops->list);
2881
2882 return 0;
2883
2884err_free_unmap:
2885 kfree(op->remap.unmap);
2886err_free_prev:
2887 kfree(op->remap.prev);
2888err_free_op:
2889 gpuva_op_free(gpuvm, op);
2890err:
2891 return -ENOMEM;
2892}
2893
2894static const struct drm_gpuvm_ops gpuvm_list_ops = {
2895 .sm_step_map = drm_gpuva_sm_step,
2896 .sm_step_remap = drm_gpuva_sm_step,
2897 .sm_step_unmap = drm_gpuva_sm_step,
2898};
2899
2900static struct drm_gpuva_ops *
2901__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
2902 const struct drm_gpuvm_map_req *req,
2903 bool madvise)
2904{
2905 struct drm_gpuva_ops *ops;
2906 struct {
2907 struct drm_gpuvm *vm;
2908 struct drm_gpuva_ops *ops;
2909 } args;
2910 int ret;
2911
2912 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
2913 if (unlikely(!ops))
2914 return ERR_PTR(-ENOMEM);
2915
2916 INIT_LIST_HEAD(&ops->list);
2917
2918 args.vm = gpuvm;
2919 args.ops = ops;
2920
2921 ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise);
2922 if (ret)
2923 goto err_free_ops;
2924
2925 return ops;
2926
2927err_free_ops:
2928 drm_gpuva_ops_free(gpuvm, ops);
2929 return ERR_PTR(ret);
2930}
2931
2932/**
2933 * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
2934 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2935 * @req: map request arguments
2936 *
2937 * This function creates a list of operations to perform splitting and merging
2938 * of existing mapping(s) with the newly requested one.
2939 *
2940 * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2941 * in the given order. It can contain map, unmap and remap operations, but it
2942 * also can be empty if no operation is required, e.g. if the requested mapping
2943 * already exists in the exact same way.
2944 *
2945 * There can be an arbitrary amount of unmap operations, a maximum of two remap
2946 * operations and a single map operation. The latter one represents the original
2947 * map operation requested by the caller.
2948 *
2949 * Note that before calling this function again with another mapping request it
2950 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2951 * previously obtained operations must be either processed or abandoned. To
2952 * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2953 * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2954 * used.
2955 *
2956 * After the caller finished processing the returned &drm_gpuva_ops, they must
2957 * be freed with &drm_gpuva_ops_free.
2958 *
2959 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2960 */
2961struct drm_gpuva_ops *
2962drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
2963 const struct drm_gpuvm_map_req *req)
2964{
2965 return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false);
2966}
2967EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
2968
2969/**
2970 * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
2971 * @gpuvm: the &drm_gpuvm representing the GPU VA space
2972 * @req: map request arguments
2973 *
2974 * This function creates a list of operations to perform splitting
2975 * of existent mapping(s) at start or end, based on the request map.
2976 *
2977 * The list can be iterated with &drm_gpuva_for_each_op and must be processed
2978 * in the given order. It can contain map and remap operations, but it
2979 * also can be empty if no operation is required, e.g. if the requested mapping
2980 * already exists is the exact same way.
2981 *
2982 * There will be no unmap operations, a maximum of two remap operations and two
2983 * map operations. The two map operations correspond to: one from start to the
2984 * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end.
2985 *
2986 * Note that before calling this function again with another mapping request it
2987 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
2988 * previously obtained operations must be either processed or abandoned. To
2989 * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
2990 * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
2991 * used.
2992 *
2993 * After the caller finished processing the returned &drm_gpuva_ops, they must
2994 * be freed with &drm_gpuva_ops_free.
2995 *
2996 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
2997 */
2998struct drm_gpuva_ops *
2999drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
3000 const struct drm_gpuvm_map_req *req)
3001{
3002 return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true);
3003}
3004EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
3005
3006/**
3007 * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
3008 * unmap
3009 * @gpuvm: the &drm_gpuvm representing the GPU VA space
3010 * @req_addr: the start address of the range to unmap
3011 * @req_range: the range of the mappings to unmap
3012 *
3013 * This function creates a list of operations to perform unmapping and, if
3014 * required, splitting of the mappings overlapping the unmap range.
3015 *
3016 * The list can be iterated with &drm_gpuva_for_each_op and must be processed
3017 * in the given order. It can contain unmap and remap operations, depending on
3018 * whether there are actual overlapping mappings to split.
3019 *
3020 * There can be an arbitrary amount of unmap operations and a maximum of two
3021 * remap operations.
3022 *
3023 * Note that before calling this function again with another range to unmap it
3024 * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
3025 * previously obtained operations must be processed or abandoned. To update the
3026 * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
3027 * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
3028 * used.
3029 *
3030 * After the caller finished processing the returned &drm_gpuva_ops, they must
3031 * be freed with &drm_gpuva_ops_free.
3032 *
3033 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
3034 */
3035struct drm_gpuva_ops *
3036drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
3037 u64 req_addr, u64 req_range)
3038{
3039 struct drm_gpuva_ops *ops;
3040 struct {
3041 struct drm_gpuvm *vm;
3042 struct drm_gpuva_ops *ops;
3043 } args;
3044 int ret;
3045
3046 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
3047 if (unlikely(!ops))
3048 return ERR_PTR(-ENOMEM);
3049
3050 INIT_LIST_HEAD(&ops->list);
3051
3052 args.vm = gpuvm;
3053 args.ops = ops;
3054
3055 ret = __drm_gpuvm_sm_unmap(gpuvm, &gpuvm_list_ops, &args,
3056 req_addr, req_range);
3057 if (ret)
3058 goto err_free_ops;
3059
3060 return ops;
3061
3062err_free_ops:
3063 drm_gpuva_ops_free(gpuvm, ops);
3064 return ERR_PTR(ret);
3065}
3066EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_ops_create);
3067
3068/**
3069 * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
3070 * @gpuvm: the &drm_gpuvm representing the GPU VA space
3071 * @addr: the start address of the range to prefetch
3072 * @range: the range of the mappings to prefetch
3073 *
3074 * This function creates a list of operations to perform prefetching.
3075 *
3076 * The list can be iterated with &drm_gpuva_for_each_op and must be processed
3077 * in the given order. It can contain prefetch operations.
3078 *
3079 * There can be an arbitrary amount of prefetch operations.
3080 *
3081 * After the caller finished processing the returned &drm_gpuva_ops, they must
3082 * be freed with &drm_gpuva_ops_free.
3083 *
3084 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
3085 */
3086struct drm_gpuva_ops *
3087drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
3088 u64 addr, u64 range)
3089{
3090 struct drm_gpuva_ops *ops;
3091 struct drm_gpuva_op *op;
3092 struct drm_gpuva *va;
3093 u64 end = addr + range;
3094 int ret;
3095
3096 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
3097 if (!ops)
3098 return ERR_PTR(-ENOMEM);
3099
3100 INIT_LIST_HEAD(&ops->list);
3101
3102 drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
3103 op = gpuva_op_alloc(gpuvm);
3104 if (!op) {
3105 ret = -ENOMEM;
3106 goto err_free_ops;
3107 }
3108
3109 op->op = DRM_GPUVA_OP_PREFETCH;
3110 op->prefetch.va = va;
3111 list_add_tail(&op->entry, &ops->list);
3112 }
3113
3114 return ops;
3115
3116err_free_ops:
3117 drm_gpuva_ops_free(gpuvm, ops);
3118 return ERR_PTR(ret);
3119}
3120EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
3121
3122/**
3123 * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
3124 * @vm_bo: the &drm_gpuvm_bo abstraction
3125 *
3126 * This function creates a list of operations to perform unmapping for every
3127 * GPUVA attached to a GEM.
3128 *
3129 * The list can be iterated with &drm_gpuva_for_each_op and consists out of an
3130 * arbitrary amount of unmap operations.
3131 *
3132 * After the caller finished processing the returned &drm_gpuva_ops, they must
3133 * be freed with &drm_gpuva_ops_free.
3134 *
3135 * This function expects the caller to protect the GEM's GPUVA list against
3136 * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
3137 *
3138 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
3139 */
3140struct drm_gpuva_ops *
3141drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
3142{
3143 struct drm_gpuva_ops *ops;
3144 struct drm_gpuva_op *op;
3145 struct drm_gpuva *va;
3146 int ret;
3147
3148 drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj);
3149
3150 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
3151 if (!ops)
3152 return ERR_PTR(-ENOMEM);
3153
3154 INIT_LIST_HEAD(&ops->list);
3155
3156 drm_gpuvm_bo_for_each_va(va, vm_bo) {
3157 op = gpuva_op_alloc(vm_bo->vm);
3158 if (!op) {
3159 ret = -ENOMEM;
3160 goto err_free_ops;
3161 }
3162
3163 op->op = DRM_GPUVA_OP_UNMAP;
3164 op->unmap.va = va;
3165 list_add_tail(&op->entry, &ops->list);
3166 }
3167
3168 return ops;
3169
3170err_free_ops:
3171 drm_gpuva_ops_free(vm_bo->vm, ops);
3172 return ERR_PTR(ret);
3173}
3174EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap_ops_create);
3175
3176/**
3177 * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
3178 * @gpuvm: the &drm_gpuvm the ops were created for
3179 * @ops: the &drm_gpuva_ops to free
3180 *
3181 * Frees the given &drm_gpuva_ops structure including all the ops associated
3182 * with it.
3183 */
3184void
3185drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
3186 struct drm_gpuva_ops *ops)
3187{
3188 struct drm_gpuva_op *op, *next;
3189
3190 drm_gpuva_for_each_op_safe(op, next, ops) {
3191 list_del(&op->entry);
3192
3193 if (op->op == DRM_GPUVA_OP_REMAP) {
3194 kfree(op->remap.prev);
3195 kfree(op->remap.next);
3196 kfree(op->remap.unmap);
3197 }
3198
3199 gpuva_op_free(gpuvm, op);
3200 }
3201
3202 kfree(ops);
3203}
3204EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
3205
3206MODULE_DESCRIPTION("DRM GPUVM");
3207MODULE_LICENSE("GPL");