Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/virtio: move virtio_gpu_object_{attach, detach} calls.

Drop the dummy ttm backend implementation, add a real one for
TTM_PL_FLAG_TT objects. The bin/unbind callbacks will call
virtio_gpu_object_{attach,detach}, to update the object state
on the host side, instead of invoking those calls from the
move_notify() callback.

With that in place the move and move_notify callbacks are not
needed any more, so drop them.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Noralf Trønnes <noralf@tronnes.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20190318113332.10900-2-kraxel@redhat.com

+24 -68
+24 -68
drivers/gpu/drm/virtio/virtgpu_ttm.c
··· 190 190 */ 191 191 struct virtio_gpu_ttm_tt { 192 192 struct ttm_dma_tt ttm; 193 - struct virtio_gpu_device *vgdev; 194 - u64 offset; 193 + struct virtio_gpu_object *obj; 195 194 }; 196 195 197 - static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm, 198 - struct ttm_mem_reg *bo_mem) 196 + static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm, 197 + struct ttm_mem_reg *bo_mem) 199 198 { 200 - struct virtio_gpu_ttm_tt *gtt = (void *)ttm; 199 + struct virtio_gpu_ttm_tt *gtt = 200 + container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); 201 + struct virtio_gpu_device *vgdev = 202 + virtio_gpu_get_vgdev(gtt->obj->tbo.bdev); 201 203 202 - gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 203 - if (!ttm->num_pages) 204 - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 205 - ttm->num_pages, bo_mem, ttm); 206 - 207 - /* Not implemented */ 204 + virtio_gpu_object_attach(vgdev, gtt->obj, NULL); 208 205 return 0; 209 206 } 210 207 211 - static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm) 208 + static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm) 212 209 { 213 - /* Not implemented */ 210 + struct virtio_gpu_ttm_tt *gtt = 211 + container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); 212 + struct virtio_gpu_device *vgdev = 213 + virtio_gpu_get_vgdev(gtt->obj->tbo.bdev); 214 + 215 + virtio_gpu_object_detach(vgdev, gtt->obj); 214 216 return 0; 215 217 } 216 218 217 - static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm) 219 + static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm) 218 220 { 219 - struct virtio_gpu_ttm_tt *gtt = (void *)ttm; 221 + struct virtio_gpu_ttm_tt *gtt = 222 + container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); 220 223 221 224 ttm_dma_tt_fini(&gtt->ttm); 222 225 kfree(gtt); 223 226 } 224 227 225 - static struct ttm_backend_func virtio_gpu_backend_func = { 226 - .bind = &virtio_gpu_ttm_backend_bind, 227 - .unbind = &virtio_gpu_ttm_backend_unbind, 228 - .destroy = &virtio_gpu_ttm_backend_destroy, 228 + static struct ttm_backend_func virtio_gpu_tt_func = { 229 + .bind = &virtio_gpu_ttm_tt_bind, 230 + .unbind = &virtio_gpu_ttm_tt_unbind, 231 + .destroy = &virtio_gpu_ttm_tt_destroy, 229 232 }; 230 233 231 234 static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo, ··· 241 238 gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL); 242 239 if (gtt == NULL) 243 240 return NULL; 244 - gtt->ttm.ttm.func = &virtio_gpu_backend_func; 245 - gtt->vgdev = vgdev; 241 + gtt->ttm.ttm.func = &virtio_gpu_tt_func; 242 + gtt->obj = container_of(bo, struct virtio_gpu_object, tbo); 246 243 if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) { 247 244 kfree(gtt); 248 245 return NULL; 249 246 } 250 247 return &gtt->ttm.ttm; 251 - } 252 - 253 - static void virtio_gpu_move_null(struct ttm_buffer_object *bo, 254 - struct ttm_mem_reg *new_mem) 255 - { 256 - struct ttm_mem_reg *old_mem = &bo->mem; 257 - 258 - BUG_ON(old_mem->mm_node != NULL); 259 - *old_mem = *new_mem; 260 - new_mem->mm_node = NULL; 261 - } 262 - 263 - static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict, 264 - struct ttm_operation_ctx *ctx, 265 - struct ttm_mem_reg *new_mem) 266 - { 267 - int ret; 268 - 269 - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); 270 - if (ret) 271 - return ret; 272 - 273 - virtio_gpu_move_null(bo, new_mem); 274 - return 0; 275 - } 276 - 277 - static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo, 278 - bool evict, 279 - struct ttm_mem_reg *new_mem) 280 - { 281 - struct virtio_gpu_object *bo; 282 - struct virtio_gpu_device *vgdev; 283 - 284 - bo = container_of(tbo, struct virtio_gpu_object, tbo); 285 - vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; 286 - 287 - if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) { 288 - if (bo->hw_res_handle) 289 - virtio_gpu_object_detach(vgdev, bo); 290 - 291 - } else if (new_mem->placement & TTM_PL_FLAG_TT) { 292 - if (bo->hw_res_handle) { 293 - virtio_gpu_object_attach(vgdev, bo, NULL); 294 - } 295 - } 296 248 } 297 249 298 250 static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo) ··· 266 308 .init_mem_type = &virtio_gpu_init_mem_type, 267 309 .eviction_valuable = ttm_bo_eviction_valuable, 268 310 .evict_flags = &virtio_gpu_evict_flags, 269 - .move = &virtio_gpu_bo_move, 270 311 .verify_access = &virtio_gpu_verify_access, 271 312 .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve, 272 313 .io_mem_free = &virtio_gpu_ttm_io_mem_free, 273 - .move_notify = &virtio_gpu_bo_move_notify, 274 314 .swap_notify = &virtio_gpu_bo_swap_notify, 275 315 }; 276 316