Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29
30/*
31 * If we set up the screen target otable, screen objects stop working.
32 */
33
34#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
35
36#ifdef CONFIG_64BIT
37#define VMW_PPN_SIZE 8
38#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0
39#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1
40#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2
41#else
42#define VMW_PPN_SIZE 4
43#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0
44#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1
45#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2
46#endif
47
48/*
49 * struct vmw_mob - Structure containing page table and metadata for a
50 * Guest Memory OBject.
51 *
52 * @num_pages Number of pages that make up the page table.
53 * @pt_level The indirection level of the page table. 0-2.
54 * @pt_root_page DMA address of the level 0 page of the page table.
55 */
56struct vmw_mob {
57 struct ttm_buffer_object *pt_bo;
58 unsigned long num_pages;
59 unsigned pt_level;
60 dma_addr_t pt_root_page;
61 uint32_t id;
62};
63
64/*
65 * struct vmw_otable - Guest Memory OBject table metadata
66 *
67 * @size: Size of the table (page-aligned).
68 * @page_table: Pointer to a struct vmw_mob holding the page table.
69 */
70static const struct vmw_otable pre_dx_tables[] = {
71 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
72 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
73 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
74 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
75 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
76 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
77};
78
79static const struct vmw_otable dx_tables[] = {
80 {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
81 {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
82 {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
83 {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
84 {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
85 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
86 {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
87};
88
89static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
90 struct vmw_mob *mob);
91static void vmw_mob_pt_setup(struct vmw_mob *mob,
92 struct vmw_piter data_iter,
93 unsigned long num_data_pages);
94
95/*
96 * vmw_setup_otable_base - Issue an object table base setup command to
97 * the device
98 *
99 * @dev_priv: Pointer to a device private structure
100 * @type: Type of object table base
101 * @offset Start of table offset into dev_priv::otable_bo
102 * @otable Pointer to otable metadata;
103 *
104 * This function returns -ENOMEM if it fails to reserve fifo space,
105 * and may block waiting for fifo space.
106 */
107static int vmw_setup_otable_base(struct vmw_private *dev_priv,
108 SVGAOTableType type,
109 struct ttm_buffer_object *otable_bo,
110 unsigned long offset,
111 struct vmw_otable *otable)
112{
113 struct {
114 SVGA3dCmdHeader header;
115 SVGA3dCmdSetOTableBase64 body;
116 } *cmd;
117 struct vmw_mob *mob;
118 const struct vmw_sg_table *vsgt;
119 struct vmw_piter iter;
120 int ret;
121
122 BUG_ON(otable->page_table != NULL);
123
124 vsgt = vmw_bo_sg_table(otable_bo);
125 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
126 WARN_ON(!vmw_piter_next(&iter));
127
128 mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
129 if (unlikely(mob == NULL)) {
130 DRM_ERROR("Failed creating OTable page table.\n");
131 return -ENOMEM;
132 }
133
134 if (otable->size <= PAGE_SIZE) {
135 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
136 mob->pt_root_page = vmw_piter_dma_addr(&iter);
137 } else if (vsgt->num_regions == 1) {
138 mob->pt_level = SVGA3D_MOBFMT_RANGE;
139 mob->pt_root_page = vmw_piter_dma_addr(&iter);
140 } else {
141 ret = vmw_mob_pt_populate(dev_priv, mob);
142 if (unlikely(ret != 0))
143 goto out_no_populate;
144
145 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
146 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
147 }
148
149 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
150 if (unlikely(cmd == NULL)) {
151 DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
152 ret = -ENOMEM;
153 goto out_no_fifo;
154 }
155
156 memset(cmd, 0, sizeof(*cmd));
157 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
158 cmd->header.size = sizeof(cmd->body);
159 cmd->body.type = type;
160 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
161 cmd->body.sizeInBytes = otable->size;
162 cmd->body.validSizeInBytes = 0;
163 cmd->body.ptDepth = mob->pt_level;
164
165 /*
166 * The device doesn't support this, But the otable size is
167 * determined at compile-time, so this BUG shouldn't trigger
168 * randomly.
169 */
170 BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
171
172 vmw_fifo_commit(dev_priv, sizeof(*cmd));
173 otable->page_table = mob;
174
175 return 0;
176
177out_no_fifo:
178out_no_populate:
179 vmw_mob_destroy(mob);
180 return ret;
181}
182
183/*
184 * vmw_takedown_otable_base - Issue an object table base takedown command
185 * to the device
186 *
187 * @dev_priv: Pointer to a device private structure
188 * @type: Type of object table base
189 *
190 */
191static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
192 SVGAOTableType type,
193 struct vmw_otable *otable)
194{
195 struct {
196 SVGA3dCmdHeader header;
197 SVGA3dCmdSetOTableBase body;
198 } *cmd;
199 struct ttm_buffer_object *bo;
200
201 if (otable->page_table == NULL)
202 return;
203
204 bo = otable->page_table->pt_bo;
205 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
206 if (unlikely(cmd == NULL)) {
207 DRM_ERROR("Failed reserving FIFO space for OTable "
208 "takedown.\n");
209 return;
210 }
211
212 memset(cmd, 0, sizeof(*cmd));
213 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
214 cmd->header.size = sizeof(cmd->body);
215 cmd->body.type = type;
216 cmd->body.baseAddress = 0;
217 cmd->body.sizeInBytes = 0;
218 cmd->body.validSizeInBytes = 0;
219 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
220 vmw_fifo_commit(dev_priv, sizeof(*cmd));
221
222 if (bo) {
223 int ret;
224
225 ret = ttm_bo_reserve(bo, false, true, NULL);
226 BUG_ON(ret != 0);
227
228 vmw_bo_fence_single(bo, NULL);
229 ttm_bo_unreserve(bo);
230 }
231
232 vmw_mob_destroy(otable->page_table);
233 otable->page_table = NULL;
234}
235
236
237static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
238 struct vmw_otable_batch *batch)
239{
240 unsigned long offset;
241 unsigned long bo_size;
242 struct vmw_otable *otables = batch->otables;
243 struct ttm_operation_ctx ctx = {
244 .interruptible = false,
245 .no_wait_gpu = false
246 };
247 SVGAOTableType i;
248 int ret;
249
250 bo_size = 0;
251 for (i = 0; i < batch->num_otables; ++i) {
252 if (!otables[i].enabled)
253 continue;
254
255 otables[i].size =
256 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
257 bo_size += otables[i].size;
258 }
259
260 ret = ttm_bo_create(&dev_priv->bdev, bo_size,
261 ttm_bo_type_device,
262 &vmw_sys_ne_placement,
263 0, false, &batch->otable_bo);
264
265 if (unlikely(ret != 0))
266 goto out_no_bo;
267
268 ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
269 BUG_ON(ret != 0);
270 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
271 if (unlikely(ret != 0))
272 goto out_unreserve;
273 ret = vmw_bo_map_dma(batch->otable_bo);
274 if (unlikely(ret != 0))
275 goto out_unreserve;
276
277 ttm_bo_unreserve(batch->otable_bo);
278
279 offset = 0;
280 for (i = 0; i < batch->num_otables; ++i) {
281 if (!batch->otables[i].enabled)
282 continue;
283
284 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
285 offset,
286 &otables[i]);
287 if (unlikely(ret != 0))
288 goto out_no_setup;
289 offset += otables[i].size;
290 }
291
292 return 0;
293
294out_unreserve:
295 ttm_bo_unreserve(batch->otable_bo);
296out_no_setup:
297 for (i = 0; i < batch->num_otables; ++i) {
298 if (batch->otables[i].enabled)
299 vmw_takedown_otable_base(dev_priv, i,
300 &batch->otables[i]);
301 }
302
303 ttm_bo_put(batch->otable_bo);
304 batch->otable_bo = NULL;
305out_no_bo:
306 return ret;
307}
308
309/*
310 * vmw_otables_setup - Set up guest backed memory object tables
311 *
312 * @dev_priv: Pointer to a device private structure
313 *
314 * Takes care of the device guest backed surface
315 * initialization, by setting up the guest backed memory object tables.
316 * Returns 0 on success and various error codes on failure. A successful return
317 * means the object tables can be taken down using the vmw_otables_takedown
318 * function.
319 */
320int vmw_otables_setup(struct vmw_private *dev_priv)
321{
322 struct vmw_otable **otables = &dev_priv->otable_batch.otables;
323 int ret;
324
325 if (dev_priv->has_dx) {
326 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
327 if (!(*otables))
328 return -ENOMEM;
329
330 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
331 } else {
332 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
333 GFP_KERNEL);
334 if (!(*otables))
335 return -ENOMEM;
336
337 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
338 }
339
340 ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
341 if (unlikely(ret != 0))
342 goto out_setup;
343
344 return 0;
345
346out_setup:
347 kfree(*otables);
348 return ret;
349}
350
351static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
352 struct vmw_otable_batch *batch)
353{
354 SVGAOTableType i;
355 struct ttm_buffer_object *bo = batch->otable_bo;
356 int ret;
357
358 for (i = 0; i < batch->num_otables; ++i)
359 if (batch->otables[i].enabled)
360 vmw_takedown_otable_base(dev_priv, i,
361 &batch->otables[i]);
362
363 ret = ttm_bo_reserve(bo, false, true, NULL);
364 BUG_ON(ret != 0);
365
366 vmw_bo_fence_single(bo, NULL);
367 ttm_bo_unreserve(bo);
368
369 ttm_bo_put(batch->otable_bo);
370 batch->otable_bo = NULL;
371}
372
373/*
374 * vmw_otables_takedown - Take down guest backed memory object tables
375 *
376 * @dev_priv: Pointer to a device private structure
377 *
378 * Take down the Guest Memory Object tables.
379 */
380void vmw_otables_takedown(struct vmw_private *dev_priv)
381{
382 vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
383 kfree(dev_priv->otable_batch.otables);
384}
385
386/*
387 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
388 * needed for a guest backed memory object.
389 *
390 * @data_pages: Number of data pages in the memory object buffer.
391 */
392static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
393{
394 unsigned long data_size = data_pages * PAGE_SIZE;
395 unsigned long tot_size = 0;
396
397 while (likely(data_size > PAGE_SIZE)) {
398 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
399 data_size *= VMW_PPN_SIZE;
400 tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
401 }
402
403 return tot_size >> PAGE_SHIFT;
404}
405
406/*
407 * vmw_mob_create - Create a mob, but don't populate it.
408 *
409 * @data_pages: Number of data pages of the underlying buffer object.
410 */
411struct vmw_mob *vmw_mob_create(unsigned long data_pages)
412{
413 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
414
415 if (unlikely(!mob))
416 return NULL;
417
418 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
419
420 return mob;
421}
422
423/*
424 * vmw_mob_pt_populate - Populate the mob pagetable
425 *
426 * @mob: Pointer to the mob the pagetable of which we want to
427 * populate.
428 *
429 * This function allocates memory to be used for the pagetable, and
430 * adjusts TTM memory accounting accordingly. Returns ENOMEM if
431 * memory resources aren't sufficient and may cause TTM buffer objects
432 * to be swapped out by using the TTM memory accounting function.
433 */
434static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
435 struct vmw_mob *mob)
436{
437 int ret;
438 struct ttm_operation_ctx ctx = {
439 .interruptible = false,
440 .no_wait_gpu = false
441 };
442
443 BUG_ON(mob->pt_bo != NULL);
444
445 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
446 ttm_bo_type_device,
447 &vmw_sys_ne_placement,
448 0, false, &mob->pt_bo);
449 if (unlikely(ret != 0))
450 return ret;
451
452 ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
453
454 BUG_ON(ret != 0);
455 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
456 if (unlikely(ret != 0))
457 goto out_unreserve;
458 ret = vmw_bo_map_dma(mob->pt_bo);
459 if (unlikely(ret != 0))
460 goto out_unreserve;
461
462 ttm_bo_unreserve(mob->pt_bo);
463
464 return 0;
465
466out_unreserve:
467 ttm_bo_unreserve(mob->pt_bo);
468 ttm_bo_put(mob->pt_bo);
469 mob->pt_bo = NULL;
470
471 return ret;
472}
473
474/**
475 * vmw_mob_assign_ppn - Assign a value to a page table entry
476 *
477 * @addr: Pointer to pointer to page table entry.
478 * @val: The page table entry
479 *
480 * Assigns a value to a page table entry pointed to by *@addr and increments
481 * *@addr according to the page table entry size.
482 */
483#if (VMW_PPN_SIZE == 8)
484static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
485{
486 *((u64 *) *addr) = val >> PAGE_SHIFT;
487 *addr += 2;
488}
489#else
490static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
491{
492 *(*addr)++ = val >> PAGE_SHIFT;
493}
494#endif
495
496/*
497 * vmw_mob_build_pt - Build a pagetable
498 *
499 * @data_addr: Array of DMA addresses to the underlying buffer
500 * object's data pages.
501 * @num_data_pages: Number of buffer object data pages.
502 * @pt_pages: Array of page pointers to the page table pages.
503 *
504 * Returns the number of page table pages actually used.
505 * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
506 */
507static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
508 unsigned long num_data_pages,
509 struct vmw_piter *pt_iter)
510{
511 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
512 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
513 unsigned long pt_page;
514 u32 *addr, *save_addr;
515 unsigned long i;
516 struct page *page;
517
518 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
519 page = vmw_piter_page(pt_iter);
520
521 save_addr = addr = kmap_atomic(page);
522
523 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
524 vmw_mob_assign_ppn(&addr,
525 vmw_piter_dma_addr(data_iter));
526 if (unlikely(--num_data_pages == 0))
527 break;
528 WARN_ON(!vmw_piter_next(data_iter));
529 }
530 kunmap_atomic(save_addr);
531 vmw_piter_next(pt_iter);
532 }
533
534 return num_pt_pages;
535}
536
537/*
538 * vmw_mob_build_pt - Set up a multilevel mob pagetable
539 *
540 * @mob: Pointer to a mob whose page table needs setting up.
541 * @data_addr Array of DMA addresses to the buffer object's data
542 * pages.
543 * @num_data_pages: Number of buffer object data pages.
544 *
545 * Uses tail recursion to set up a multilevel mob page table.
546 */
547static void vmw_mob_pt_setup(struct vmw_mob *mob,
548 struct vmw_piter data_iter,
549 unsigned long num_data_pages)
550{
551 unsigned long num_pt_pages = 0;
552 struct ttm_buffer_object *bo = mob->pt_bo;
553 struct vmw_piter save_pt_iter;
554 struct vmw_piter pt_iter;
555 const struct vmw_sg_table *vsgt;
556 int ret;
557
558 ret = ttm_bo_reserve(bo, false, true, NULL);
559 BUG_ON(ret != 0);
560
561 vsgt = vmw_bo_sg_table(bo);
562 vmw_piter_start(&pt_iter, vsgt, 0);
563 BUG_ON(!vmw_piter_next(&pt_iter));
564 mob->pt_level = 0;
565 while (likely(num_data_pages > 1)) {
566 ++mob->pt_level;
567 BUG_ON(mob->pt_level > 2);
568 save_pt_iter = pt_iter;
569 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
570 &pt_iter);
571 data_iter = save_pt_iter;
572 num_data_pages = num_pt_pages;
573 }
574
575 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
576 ttm_bo_unreserve(bo);
577}
578
579/*
580 * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary.
581 *
582 * @mob: Pointer to a mob to destroy.
583 */
584void vmw_mob_destroy(struct vmw_mob *mob)
585{
586 if (mob->pt_bo) {
587 ttm_bo_put(mob->pt_bo);
588 mob->pt_bo = NULL;
589 }
590 kfree(mob);
591}
592
593/*
594 * vmw_mob_unbind - Hide a mob from the device.
595 *
596 * @dev_priv: Pointer to a device private.
597 * @mob_id: Device id of the mob to unbind.
598 */
599void vmw_mob_unbind(struct vmw_private *dev_priv,
600 struct vmw_mob *mob)
601{
602 struct {
603 SVGA3dCmdHeader header;
604 SVGA3dCmdDestroyGBMob body;
605 } *cmd;
606 int ret;
607 struct ttm_buffer_object *bo = mob->pt_bo;
608
609 if (bo) {
610 ret = ttm_bo_reserve(bo, false, true, NULL);
611 /*
612 * Noone else should be using this buffer.
613 */
614 BUG_ON(ret != 0);
615 }
616
617 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
618 if (unlikely(cmd == NULL)) {
619 DRM_ERROR("Failed reserving FIFO space for Memory "
620 "Object unbinding.\n");
621 } else {
622 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
623 cmd->header.size = sizeof(cmd->body);
624 cmd->body.mobid = mob->id;
625 vmw_fifo_commit(dev_priv, sizeof(*cmd));
626 }
627 if (bo) {
628 vmw_bo_fence_single(bo, NULL);
629 ttm_bo_unreserve(bo);
630 }
631 vmw_fifo_resource_dec(dev_priv);
632}
633
634/*
635 * vmw_mob_bind - Make a mob visible to the device after first
636 * populating it if necessary.
637 *
638 * @dev_priv: Pointer to a device private.
639 * @mob: Pointer to the mob we're making visible.
640 * @data_addr: Array of DMA addresses to the data pages of the underlying
641 * buffer object.
642 * @num_data_pages: Number of data pages of the underlying buffer
643 * object.
644 * @mob_id: Device id of the mob to bind
645 *
646 * This function is intended to be interfaced with the ttm_tt backend
647 * code.
648 */
649int vmw_mob_bind(struct vmw_private *dev_priv,
650 struct vmw_mob *mob,
651 const struct vmw_sg_table *vsgt,
652 unsigned long num_data_pages,
653 int32_t mob_id)
654{
655 int ret;
656 bool pt_set_up = false;
657 struct vmw_piter data_iter;
658 struct {
659 SVGA3dCmdHeader header;
660 SVGA3dCmdDefineGBMob64 body;
661 } *cmd;
662
663 mob->id = mob_id;
664 vmw_piter_start(&data_iter, vsgt, 0);
665 if (unlikely(!vmw_piter_next(&data_iter)))
666 return 0;
667
668 if (likely(num_data_pages == 1)) {
669 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
670 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
671 } else if (vsgt->num_regions == 1) {
672 mob->pt_level = SVGA3D_MOBFMT_RANGE;
673 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
674 } else if (unlikely(mob->pt_bo == NULL)) {
675 ret = vmw_mob_pt_populate(dev_priv, mob);
676 if (unlikely(ret != 0))
677 return ret;
678
679 vmw_mob_pt_setup(mob, data_iter, num_data_pages);
680 pt_set_up = true;
681 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
682 }
683
684 vmw_fifo_resource_inc(dev_priv);
685
686 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
687 if (unlikely(cmd == NULL)) {
688 DRM_ERROR("Failed reserving FIFO space for Memory "
689 "Object binding.\n");
690 goto out_no_cmd_space;
691 }
692
693 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
694 cmd->header.size = sizeof(cmd->body);
695 cmd->body.mobid = mob_id;
696 cmd->body.ptDepth = mob->pt_level;
697 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
698 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
699
700 vmw_fifo_commit(dev_priv, sizeof(*cmd));
701
702 return 0;
703
704out_no_cmd_space:
705 vmw_fifo_resource_dec(dev_priv);
706 if (pt_set_up) {
707 ttm_bo_put(mob->pt_bo);
708 mob->pt_bo = NULL;
709 }
710
711 return -ENOMEM;
712}