Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/nv84-/fence: prepare for emit/sync support of sysram sequences

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>

+65 -62
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 561 561 struct nouveau_fence *fence = NULL; 562 562 int ret; 563 563 564 - ret = nouveau_fence_new(chan, &fence); 564 + ret = nouveau_fence_new(chan, false, &fence); 565 565 if (ret) 566 566 return ret; 567 567
+1 -1
drivers/gpu/drm/nouveau/nouveau_chan.c
··· 51 51 struct nouveau_fence *fence = NULL; 52 52 int ret; 53 53 54 - ret = nouveau_fence_new(chan, &fence); 54 + ret = nouveau_fence_new(chan, false, &fence); 55 55 if (!ret) { 56 56 ret = nouveau_fence_wait(fence, false, false); 57 57 nouveau_fence_unref(&fence);
+1 -1
drivers/gpu/drm/nouveau/nouveau_display.c
··· 540 540 } 541 541 FIRE_RING (chan); 542 542 543 - ret = nouveau_fence_new(chan, pfence); 543 + ret = nouveau_fence_new(chan, false, pfence); 544 544 if (ret) 545 545 goto fail; 546 546
+4 -5
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 41 41 struct nouveau_fence *fence, *fnext; 42 42 spin_lock(&fctx->lock); 43 43 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { 44 - if (fence->work) 45 - fence->work(fence->priv, false); 46 44 fence->channel = NULL; 47 45 list_del(&fence->head); 48 46 nouveau_fence_unref(&fence); ··· 67 69 if (fctx->read(chan) < fence->sequence) 68 70 break; 69 71 70 - if (fence->work) 71 - fence->work(fence->priv, true); 72 72 fence->channel = NULL; 73 73 list_del(&fence->head); 74 74 nouveau_fence_unref(&fence); ··· 252 256 } 253 257 254 258 int 255 - nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence) 259 + nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, 260 + struct nouveau_fence **pfence) 256 261 { 257 262 struct nouveau_fence *fence; 258 263 int ret = 0; ··· 264 267 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 265 268 if (!fence) 266 269 return -ENOMEM; 270 + 271 + fence->sysmem = sysmem; 267 272 kref_init(&fence->kref); 268 273 269 274 ret = nouveau_fence_emit(fence, chan);
+6 -12
drivers/gpu/drm/nouveau/nouveau_fence.h
··· 7 7 struct list_head head; 8 8 struct kref kref; 9 9 10 + bool sysmem; 11 + 10 12 struct nouveau_channel *channel; 11 13 unsigned long timeout; 12 14 u32 sequence; 13 - 14 - void (*work)(void *priv, bool signalled); 15 - void *priv; 16 15 }; 17 16 18 - int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **); 17 + int nouveau_fence_new(struct nouveau_channel *, bool sysmem, 18 + struct nouveau_fence **); 19 19 struct nouveau_fence * 20 20 nouveau_fence_ref(struct nouveau_fence *); 21 21 void nouveau_fence_unref(struct nouveau_fence **); ··· 79 79 struct nv84_fence_chan { 80 80 struct nouveau_fence_chan base; 81 81 struct nouveau_vma vma; 82 + struct nouveau_vma vma_gart; 82 83 struct nouveau_vma dispc_vma[4]; 83 84 }; 84 85 85 86 struct nv84_fence_priv { 86 87 struct nouveau_fence_priv base; 87 88 struct nouveau_bo *bo; 89 + struct nouveau_bo *bo_gart; 88 90 u32 *suspend; 89 91 }; 90 92 91 93 u64 nv84_fence_crtc(struct nouveau_channel *, int); 92 - int nv84_fence_emit(struct nouveau_fence *); 93 - int nv84_fence_sync(struct nouveau_fence *, struct nouveau_channel *, 94 - struct nouveau_channel *); 95 - u32 nv84_fence_read(struct nouveau_channel *); 96 94 int nv84_fence_context_new(struct nouveau_channel *); 97 - void nv84_fence_context_del(struct nouveau_channel *); 98 - bool nv84_fence_suspend(struct nouveau_drm *); 99 - void nv84_fence_resume(struct nouveau_drm *); 100 - void nv84_fence_destroy(struct nouveau_drm *); 101 95 102 96 #endif
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 787 787 } 788 788 } 789 789 790 - ret = nouveau_fence_new(chan, &fence); 790 + ret = nouveau_fence_new(chan, false, &fence); 791 791 if (ret) { 792 792 NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); 793 793 WIND_RING(chan);
+48 -11
drivers/gpu/drm/nouveau/nv84_fence.c
··· 76 76 return ret; 77 77 } 78 78 79 - int 79 + static int 80 80 nv84_fence_emit(struct nouveau_fence *fence) 81 81 { 82 82 struct nouveau_channel *chan = fence->channel; 83 83 struct nv84_fence_chan *fctx = chan->fence; 84 84 struct nouveau_fifo_chan *fifo = (void *)chan->object; 85 - u64 addr = fctx->vma.offset + fifo->chid * 16; 85 + u64 addr = fifo->chid * 16; 86 + 87 + if (fence->sysmem) 88 + addr += fctx->vma_gart.offset; 89 + else 90 + addr += fctx->vma.offset; 91 + 86 92 return fctx->base.emit32(chan, addr, fence->sequence); 87 93 } 88 94 89 - int 95 + static int 90 96 nv84_fence_sync(struct nouveau_fence *fence, 91 97 struct nouveau_channel *prev, struct nouveau_channel *chan) 92 98 { 93 99 struct nv84_fence_chan *fctx = chan->fence; 94 100 struct nouveau_fifo_chan *fifo = (void *)prev->object; 95 - u64 addr = fctx->vma.offset + fifo->chid * 16; 101 + u64 addr = fifo->chid * 16; 102 + 103 + if (fence->sysmem) 104 + addr += fctx->vma_gart.offset; 105 + else 106 + addr += fctx->vma.offset; 107 + 96 108 return fctx->base.sync32(chan, addr, fence->sequence); 97 109 } 98 110 99 - u32 111 + static u32 100 112 nv84_fence_read(struct nouveau_channel *chan) 101 113 { 102 114 struct nouveau_fifo_chan *fifo = (void *)chan->object; ··· 116 104 return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4); 117 105 } 118 106 119 - void 107 + static void 120 108 nv84_fence_context_del(struct nouveau_channel *chan) 121 109 { 122 110 struct drm_device *dev = chan->drm->dev; ··· 129 117 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); 130 118 } 131 119 120 + nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); 132 121 nouveau_bo_vma_del(priv->bo, &fctx->vma); 133 122 nouveau_fence_context_del(&fctx->base); 134 123 chan->fence = NULL; ··· 157 144 fctx->base.sync32 = nv84_fence_sync32; 158 145 159 146 ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); 160 - if (ret) 161 - nv84_fence_context_del(chan); 147 + if (ret == 0) { 148 + ret = nouveau_bo_vma_add(priv->bo_gart, client->vm, 149 + &fctx->vma_gart); 150 + } 162 151 163 152 /* map display semaphore buffers into channel's vm */ 164 153 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { ··· 169 154 } 170 155 171 156 nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000); 157 + 158 + if (ret) 159 + nv84_fence_context_del(chan); 172 160 return ret; 173 161 } 174 162 175 - bool 163 + static bool 176 164 nv84_fence_suspend(struct nouveau_drm *drm) 177 165 { 178 166 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); ··· 191 173 return priv->suspend != NULL; 192 174 } 193 175 194 - void 176 + static void 195 177 nv84_fence_resume(struct nouveau_drm *drm) 196 178 { 197 179 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); ··· 206 188 } 207 189 } 208 190 209 - void 191 + static void 210 192 nv84_fence_destroy(struct nouveau_drm *drm) 211 193 { 212 194 struct nv84_fence_priv *priv = drm->fence; 195 + nouveau_bo_unmap(priv->bo_gart); 196 + if (priv->bo_gart) 197 + nouveau_bo_unpin(priv->bo_gart); 198 + nouveau_bo_ref(NULL, &priv->bo_gart); 213 199 nouveau_bo_unmap(priv->bo); 214 200 if (priv->bo) 215 201 nouveau_bo_unpin(priv->bo); ··· 253 231 } 254 232 if (ret) 255 233 nouveau_bo_ref(NULL, &priv->bo); 234 + } 235 + 236 + if (ret == 0) 237 + ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, 238 + TTM_PL_FLAG_TT, 0, 0, NULL, 239 + &priv->bo_gart); 240 + if (ret == 0) { 241 + ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT); 242 + if (ret == 0) { 243 + ret = nouveau_bo_map(priv->bo_gart); 244 + if (ret) 245 + nouveau_bo_unpin(priv->bo_gart); 246 + } 247 + if (ret) 248 + nouveau_bo_ref(NULL, &priv->bo_gart); 256 249 } 257 250 258 251 if (ret)
+3 -30
drivers/gpu/drm/nouveau/nvc0_fence.c
··· 81 81 int 82 82 nvc0_fence_create(struct nouveau_drm *drm) 83 83 { 84 - struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); 85 - struct nv84_fence_priv *priv; 86 - int ret; 87 - 88 - priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); 89 - if (!priv) 90 - return -ENOMEM; 91 - 92 - priv->base.dtor = nv84_fence_destroy; 93 - priv->base.suspend = nv84_fence_suspend; 94 - priv->base.resume = nv84_fence_resume; 95 - priv->base.context_new = nvc0_fence_context_new; 96 - priv->base.context_del = nv84_fence_context_del; 97 - 98 - init_waitqueue_head(&priv->base.waiting); 99 - priv->base.uevent = true; 100 - 101 - ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, 102 - TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); 84 + int ret = nv84_fence_create(drm); 103 85 if (ret == 0) { 104 - ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 105 - if (ret == 0) { 106 - ret = nouveau_bo_map(priv->bo); 107 - if (ret) 108 - nouveau_bo_unpin(priv->bo); 109 - } 110 - if (ret) 111 - nouveau_bo_ref(NULL, &priv->bo); 86 + struct nv84_fence_priv *priv = drm->fence; 87 + priv->base.context_new = nvc0_fence_context_new; 112 88 } 113 - 114 - if (ret) 115 - nv84_fence_destroy(drm); 116 89 return ret; 117 90 }