Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm: Use drm_gem_object in submit bos table

Basically everywhere wants the base ptr type. So store that instead of
msm_gem_object.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/551021/

+40 -40
+3 -3
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 66 66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) 67 67 { 68 68 struct msm_ringbuffer *ring = submit->ring; 69 - struct msm_gem_object *obj; 69 + struct drm_gem_object *obj; 70 70 uint32_t *ptr, dwords; 71 71 unsigned int i; 72 72 ··· 83 83 obj = submit->bos[submit->cmd[i].idx].obj; 84 84 dwords = submit->cmd[i].size; 85 85 86 - ptr = msm_gem_get_vaddr(&obj->base); 86 + ptr = msm_gem_get_vaddr(obj); 87 87 88 88 /* _get_vaddr() shouldn't fail at this point, 89 89 * since we've already mapped it once in ··· 103 103 OUT_RING(ring, ptr[i]); 104 104 } 105 105 106 - msm_gem_put_vaddr(&obj->base); 106 + msm_gem_put_vaddr(obj); 107 107 108 108 break; 109 109 }
+1 -1
drivers/gpu/drm/msm/msm_gem.h
··· 301 301 #define BO_VMA_PINNED 0x1000 /* vma (virtual address) is pinned */ 302 302 uint32_t flags; 303 303 union { 304 - struct msm_gem_object *obj; 304 + struct drm_gem_object *obj; 305 305 uint32_t handle; 306 306 }; 307 307 uint64_t iova;
+21 -21
drivers/gpu/drm/msm/msm_gem_submit.c
··· 165 165 166 166 drm_gem_object_get(obj); 167 167 168 - submit->bos[i].obj = to_msm_bo(obj); 168 + submit->bos[i].obj = obj; 169 169 } 170 170 171 171 out_unlock: ··· 251 251 static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, 252 252 unsigned cleanup_flags) 253 253 { 254 - struct drm_gem_object *obj = &submit->bos[i].obj->base; 254 + struct drm_gem_object *obj = submit->bos[i].obj; 255 255 unsigned flags = submit->bos[i].flags & cleanup_flags; 256 256 257 257 /* ··· 287 287 288 288 retry: 289 289 for (i = 0; i < submit->nr_bos; i++) { 290 - struct msm_gem_object *msm_obj = submit->bos[i].obj; 290 + struct drm_gem_object *obj = submit->bos[i].obj; 291 291 292 292 if (slow_locked == i) 293 293 slow_locked = -1; ··· 295 295 contended = i; 296 296 297 297 if (!(submit->bos[i].flags & BO_LOCKED)) { 298 - ret = dma_resv_lock_interruptible(msm_obj->base.resv, 298 + ret = dma_resv_lock_interruptible(obj->resv, 299 299 &submit->ticket); 300 300 if (ret) 301 301 goto fail; ··· 321 321 submit_unlock_unpin_bo(submit, slow_locked); 322 322 323 323 if (ret == -EDEADLK) { 324 - struct msm_gem_object *msm_obj = submit->bos[contended].obj; 324 + struct drm_gem_object *obj = submit->bos[contended].obj; 325 325 /* we lost out in a seqno race, lock and retry.. */ 326 - ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv, 326 + ret = dma_resv_lock_slow_interruptible(obj->resv, 327 327 &submit->ticket); 328 328 if (!ret) { 329 329 submit->bos[contended].flags |= BO_LOCKED; ··· 346 346 int i, ret = 0; 347 347 348 348 for (i = 0; i < submit->nr_bos; i++) { 349 - struct drm_gem_object *obj = &submit->bos[i].obj->base; 349 + struct drm_gem_object *obj = submit->bos[i].obj; 350 350 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; 351 351 352 352 /* NOTE: _reserve_shared() must happen before ··· 389 389 submit->valid = true; 390 390 391 391 for (i = 0; i < submit->nr_bos; i++) { 392 - struct drm_gem_object *obj = &submit->bos[i].obj->base; 392 + struct drm_gem_object *obj = submit->bos[i].obj; 393 393 struct msm_gem_vma *vma; 394 394 395 395 /* if locking succeeded, pin bo: */ ··· 424 424 int i; 425 425 426 426 for (i = 0; i < submit->nr_bos; i++) { 427 - struct drm_gem_object *obj = &submit->bos[i].obj->base; 427 + struct drm_gem_object *obj = submit->bos[i].obj; 428 428 429 429 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 430 430 dma_resv_add_fence(obj->resv, submit->user_fence, ··· 436 436 } 437 437 438 438 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, 439 - struct msm_gem_object **obj, uint64_t *iova, bool *valid) 439 + struct drm_gem_object **obj, uint64_t *iova, bool *valid) 440 440 { 441 441 if (idx >= submit->nr_bos) { 442 442 DRM_ERROR("invalid buffer index: %u (out of %u)\n", ··· 455 455 } 456 456 457 457 /* process the reloc's and patch up the cmdstream as needed: */ 458 - static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj, 458 + static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj, 459 459 uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs) 460 460 { 461 461 uint32_t i, last_offset = 0; ··· 473 473 /* For now, just map the entire thing. Eventually we probably 474 474 * to do it page-by-page, w/ kmap() if not vmap()d.. 475 475 */ 476 - ptr = msm_gem_get_vaddr_locked(&obj->base); 476 + ptr = msm_gem_get_vaddr_locked(obj); 477 477 478 478 if (IS_ERR(ptr)) { 479 479 ret = PTR_ERR(ptr); ··· 497 497 /* offset in dwords: */ 498 498 off = submit_reloc.submit_offset / 4; 499 499 500 - if ((off >= (obj->base.size / 4)) || 500 + if ((off >= (obj->size / 4)) || 501 501 (off < last_offset)) { 502 502 DRM_ERROR("invalid offset %u at reloc %u\n", off, i); 503 503 ret = -EINVAL; ··· 524 524 } 525 525 526 526 out: 527 - msm_gem_put_vaddr_locked(&obj->base); 527 + msm_gem_put_vaddr_locked(obj); 528 528 529 529 return ret; 530 530 } ··· 542 542 cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED; 543 543 544 544 for (i = 0; i < submit->nr_bos; i++) { 545 - struct msm_gem_object *msm_obj = submit->bos[i].obj; 545 + struct drm_gem_object *obj = submit->bos[i].obj; 546 546 submit_cleanup_bo(submit, i, cleanup_flags); 547 547 if (error) 548 - drm_gem_object_put(&msm_obj->base); 548 + drm_gem_object_put(obj); 549 549 } 550 550 } 551 551 ··· 554 554 int i; 555 555 556 556 for (i = 0; i < submit->nr_bos; i++) { 557 - struct drm_gem_object *obj = &submit->bos[i].obj->base; 557 + struct drm_gem_object *obj = submit->bos[i].obj; 558 558 559 559 drm_gem_object_put(obj); 560 560 } ··· 861 861 goto out; 862 862 863 863 for (i = 0; i < args->nr_cmds; i++) { 864 - struct msm_gem_object *msm_obj; 864 + struct drm_gem_object *obj; 865 865 uint64_t iova; 866 866 867 867 ret = submit_bo(submit, submit->cmd[i].idx, 868 - &msm_obj, &iova, NULL); 868 + &obj, &iova, NULL); 869 869 if (ret) 870 870 goto out; 871 871 872 872 if (!submit->cmd[i].size || 873 873 ((submit->cmd[i].size + submit->cmd[i].offset) > 874 - msm_obj->base.size / 4)) { 874 + obj->size / 4)) { 875 875 DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4); 876 876 ret = -EINVAL; 877 877 goto out; ··· 892 892 continue; 893 893 } 894 894 895 - ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4, 895 + ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4, 896 896 submit->cmd[i].nr_relocs, submit->cmd[i].relocs); 897 897 if (ret) 898 898 goto out;
+10 -10
drivers/gpu/drm/msm/msm_gpu.c
··· 219 219 } 220 220 221 221 static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state, 222 - struct msm_gem_object *obj, u64 iova, bool full) 222 + struct drm_gem_object *obj, u64 iova, bool full) 223 223 { 224 224 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos]; 225 225 226 226 /* Don't record write only objects */ 227 - state_bo->size = obj->base.size; 227 + state_bo->size = obj->size; 228 228 state_bo->iova = iova; 229 229 230 - BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(obj->name)); 230 + BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(to_msm_bo(obj)->name)); 231 231 232 - memcpy(state_bo->name, obj->name, sizeof(state_bo->name)); 232 + memcpy(state_bo->name, to_msm_bo(obj)->name, sizeof(state_bo->name)); 233 233 234 234 if (full) { 235 235 void *ptr; 236 236 237 - state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL); 237 + state_bo->data = kvmalloc(obj->size, GFP_KERNEL); 238 238 if (!state_bo->data) 239 239 goto out; 240 240 241 - msm_gem_lock(&obj->base); 242 - ptr = msm_gem_get_vaddr_active(&obj->base); 243 - msm_gem_unlock(&obj->base); 241 + msm_gem_lock(obj); 242 + ptr = msm_gem_get_vaddr_active(obj); 243 + msm_gem_unlock(obj); 244 244 if (IS_ERR(ptr)) { 245 245 kvfree(state_bo->data); 246 246 state_bo->data = NULL; 247 247 goto out; 248 248 } 249 249 250 - memcpy(state_bo->data, ptr, obj->base.size); 251 - msm_gem_put_vaddr(&obj->base); 250 + memcpy(state_bo->data, ptr, obj->size); 251 + msm_gem_put_vaddr(obj); 252 252 } 253 253 out: 254 254 state->nr_bos++;
+4 -4
drivers/gpu/drm/msm/msm_rd.c
··· 310 310 struct msm_gem_submit *submit, int idx, 311 311 uint64_t iova, uint32_t size, bool full) 312 312 { 313 - struct msm_gem_object *obj = submit->bos[idx].obj; 313 + struct drm_gem_object *obj = submit->bos[idx].obj; 314 314 unsigned offset = 0; 315 315 const char *buf; 316 316 ··· 318 318 offset = iova - submit->bos[idx].iova; 319 319 } else { 320 320 iova = submit->bos[idx].iova; 321 - size = obj->base.size; 321 + size = obj->size; 322 322 } 323 323 324 324 /* ··· 335 335 if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ)) 336 336 return; 337 337 338 - buf = msm_gem_get_vaddr_active(&obj->base); 338 + buf = msm_gem_get_vaddr_active(obj); 339 339 if (IS_ERR(buf)) 340 340 return; 341 341 ··· 343 343 344 344 rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); 345 345 346 - msm_gem_put_vaddr_locked(&obj->base); 346 + msm_gem_put_vaddr_locked(obj); 347 347 } 348 348 349 349 /* called under gpu->lock */
+1 -1
drivers/gpu/drm/msm/msm_ringbuffer.c
··· 24 24 mutex_lock(&priv->lru.lock); 25 25 26 26 for (i = 0; i < submit->nr_bos; i++) { 27 - struct drm_gem_object *obj = &submit->bos[i].obj->base; 27 + struct drm_gem_object *obj = submit->bos[i].obj; 28 28 29 29 msm_gem_vma_unpin_fenced(submit->bos[i].vma, fctx); 30 30 msm_gem_unpin_active(obj);