Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie
7 * Alon Levy
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#include <drm/ttm/ttm_bo_api.h>
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
31#include <drm/ttm/ttm_page_alloc.h>
32#include <drm/ttm/ttm_module.h>
33#include <drm/drmP.h>
34#include <drm/drm.h>
35#include <drm/virtgpu_drm.h>
36#include "virtgpu_drv.h"
37
38#include <linux/delay.h>
39
40#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
41
42static struct
43virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
44{
45 struct virtio_gpu_mman *mman;
46 struct virtio_gpu_device *vgdev;
47
48 mman = container_of(bdev, struct virtio_gpu_mman, bdev);
49 vgdev = container_of(mman, struct virtio_gpu_device, mman);
50 return vgdev;
51}
52
53int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
54{
55 struct drm_file *file_priv;
56 struct virtio_gpu_device *vgdev;
57 int r;
58
59 file_priv = filp->private_data;
60 vgdev = file_priv->minor->dev->dev_private;
61 if (vgdev == NULL) {
62 DRM_ERROR(
63 "filp->private_data->minor->dev->dev_private == NULL\n");
64 return -EINVAL;
65 }
66 r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
67
68 return r;
69}
70
71static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
72 uint32_t flags)
73{
74 return 0;
75}
76
77static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
78 struct ttm_buffer_object *bo,
79 const struct ttm_place *place,
80 struct ttm_mem_reg *mem)
81{
82 mem->mm_node = (void *)1;
83 return 0;
84}
85
86static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
87 struct ttm_mem_reg *mem)
88{
89 mem->mm_node = (void *)NULL;
90}
91
92static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
93 unsigned long p_size)
94{
95 return 0;
96}
97
98static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
99{
100 return 0;
101}
102
103static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
104 struct drm_printer *printer)
105{
106}
107
108static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
109 .init = ttm_bo_man_init,
110 .takedown = ttm_bo_man_takedown,
111 .get_node = ttm_bo_man_get_node,
112 .put_node = ttm_bo_man_put_node,
113 .debug = ttm_bo_man_debug
114};
115
116static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
117 struct ttm_mem_type_manager *man)
118{
119 struct virtio_gpu_device *vgdev;
120
121 vgdev = virtio_gpu_get_vgdev(bdev);
122
123 switch (type) {
124 case TTM_PL_SYSTEM:
125 /* System memory */
126 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
127 man->available_caching = TTM_PL_MASK_CACHING;
128 man->default_caching = TTM_PL_FLAG_CACHED;
129 break;
130 case TTM_PL_TT:
131 man->func = &virtio_gpu_bo_manager_func;
132 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
133 man->available_caching = TTM_PL_MASK_CACHING;
134 man->default_caching = TTM_PL_FLAG_CACHED;
135 break;
136 default:
137 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
138 return -EINVAL;
139 }
140 return 0;
141}
142
143static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
144 struct ttm_placement *placement)
145{
146 static const struct ttm_place placements = {
147 .fpfn = 0,
148 .lpfn = 0,
149 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
150 };
151
152 placement->placement = &placements;
153 placement->busy_placement = &placements;
154 placement->num_placement = 1;
155 placement->num_busy_placement = 1;
156}
157
158static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
159 struct file *filp)
160{
161 return 0;
162}
163
164static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
165 struct ttm_mem_reg *mem)
166{
167 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
168
169 mem->bus.addr = NULL;
170 mem->bus.offset = 0;
171 mem->bus.size = mem->num_pages << PAGE_SHIFT;
172 mem->bus.base = 0;
173 mem->bus.is_iomem = false;
174 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
175 return -EINVAL;
176 switch (mem->mem_type) {
177 case TTM_PL_SYSTEM:
178 case TTM_PL_TT:
179 /* system memory */
180 return 0;
181 default:
182 return -EINVAL;
183 }
184 return 0;
185}
186
187static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
188 struct ttm_mem_reg *mem)
189{
190}
191
192/*
193 * TTM backend functions.
194 */
195struct virtio_gpu_ttm_tt {
196 struct ttm_dma_tt ttm;
197 struct virtio_gpu_device *vgdev;
198 u64 offset;
199};
200
201static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
202 struct ttm_mem_reg *bo_mem)
203{
204 struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
205
206 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
207 if (!ttm->num_pages)
208 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
209 ttm->num_pages, bo_mem, ttm);
210
211 /* Not implemented */
212 return 0;
213}
214
215static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
216{
217 /* Not implemented */
218 return 0;
219}
220
221static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
222{
223 struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
224
225 ttm_dma_tt_fini(>t->ttm);
226 kfree(gtt);
227}
228
229static struct ttm_backend_func virtio_gpu_backend_func = {
230 .bind = &virtio_gpu_ttm_backend_bind,
231 .unbind = &virtio_gpu_ttm_backend_unbind,
232 .destroy = &virtio_gpu_ttm_backend_destroy,
233};
234
235static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
236 uint32_t page_flags)
237{
238 struct virtio_gpu_device *vgdev;
239 struct virtio_gpu_ttm_tt *gtt;
240
241 vgdev = virtio_gpu_get_vgdev(bo->bdev);
242 gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
243 if (gtt == NULL)
244 return NULL;
245 gtt->ttm.ttm.func = &virtio_gpu_backend_func;
246 gtt->vgdev = vgdev;
247 if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) {
248 kfree(gtt);
249 return NULL;
250 }
251 return >t->ttm.ttm;
252}
253
254static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
255 struct ttm_mem_reg *new_mem)
256{
257 struct ttm_mem_reg *old_mem = &bo->mem;
258
259 BUG_ON(old_mem->mm_node != NULL);
260 *old_mem = *new_mem;
261 new_mem->mm_node = NULL;
262}
263
264static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
265 struct ttm_operation_ctx *ctx,
266 struct ttm_mem_reg *new_mem)
267{
268 int ret;
269
270 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
271 if (ret)
272 return ret;
273
274 virtio_gpu_move_null(bo, new_mem);
275 return 0;
276}
277
278static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
279 bool evict,
280 struct ttm_mem_reg *new_mem)
281{
282 struct virtio_gpu_object *bo;
283 struct virtio_gpu_device *vgdev;
284
285 bo = container_of(tbo, struct virtio_gpu_object, tbo);
286 vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
287
288 if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
289 if (bo->hw_res_handle)
290 virtio_gpu_object_detach(vgdev, bo);
291
292 } else if (new_mem->placement & TTM_PL_FLAG_TT) {
293 if (bo->hw_res_handle) {
294 virtio_gpu_object_attach(vgdev, bo, NULL);
295 }
296 }
297}
298
299static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
300{
301 struct virtio_gpu_object *bo;
302 struct virtio_gpu_device *vgdev;
303
304 bo = container_of(tbo, struct virtio_gpu_object, tbo);
305 vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
306
307 if (bo->pages)
308 virtio_gpu_object_free_sg_table(bo);
309}
310
311static struct ttm_bo_driver virtio_gpu_bo_driver = {
312 .ttm_tt_create = &virtio_gpu_ttm_tt_create,
313 .invalidate_caches = &virtio_gpu_invalidate_caches,
314 .init_mem_type = &virtio_gpu_init_mem_type,
315 .eviction_valuable = ttm_bo_eviction_valuable,
316 .evict_flags = &virtio_gpu_evict_flags,
317 .move = &virtio_gpu_bo_move,
318 .verify_access = &virtio_gpu_verify_access,
319 .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
320 .io_mem_free = &virtio_gpu_ttm_io_mem_free,
321 .move_notify = &virtio_gpu_bo_move_notify,
322 .swap_notify = &virtio_gpu_bo_swap_notify,
323};
324
325int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
326{
327 int r;
328
329 /* No others user of address space so set it to 0 */
330 r = ttm_bo_device_init(&vgdev->mman.bdev,
331 &virtio_gpu_bo_driver,
332 vgdev->ddev->anon_inode->i_mapping,
333 DRM_FILE_PAGE_OFFSET, 0);
334 if (r) {
335 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
336 goto err_dev_init;
337 }
338
339 r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
340 if (r) {
341 DRM_ERROR("Failed initializing GTT heap.\n");
342 goto err_mm_init;
343 }
344 return 0;
345
346err_mm_init:
347 ttm_bo_device_release(&vgdev->mman.bdev);
348err_dev_init:
349 return r;
350}
351
352void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
353{
354 ttm_bo_device_release(&vgdev->mman.bdev);
355 DRM_INFO("virtio_gpu: ttm finalized\n");
356}