drm/nv50-nvc0: prevent multiple vm/bar flushes occuring simultanenously

The per-vm mutex doesn't prevent this completely, a flush coming from the
BAR VM could potentially happen at the same time as one for the channel
VM. Not to mention that if/when we get per-client/channel VM, this will
happen far more frequently.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by Ben Skeggs and committed by Dave Airlie 6f70a4c3 ef1b2871

+12
+8
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 403 403 void 404 404 nv50_instmem_flush(struct drm_device *dev) 405 405 { 406 + struct drm_nouveau_private *dev_priv = dev->dev_private; 407 + 408 + spin_lock(&dev_priv->ramin_lock); 406 409 nv_wr32(dev, 0x00330c, 0x00000001); 407 410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 408 411 NV_ERROR(dev, "PRAMIN flush timeout\n"); 412 + spin_unlock(&dev_priv->ramin_lock); 409 413 } 410 414 411 415 void 412 416 nv84_instmem_flush(struct drm_device *dev) 413 417 { 418 + struct drm_nouveau_private *dev_priv = dev->dev_private; 419 + 420 + spin_lock(&dev_priv->ramin_lock); 414 421 nv_wr32(dev, 0x070000, 0x00000001); 415 422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 416 423 NV_ERROR(dev, "PRAMIN flush timeout\n"); 424 + spin_unlock(&dev_priv->ramin_lock); 417 425 } 418 426
+4
drivers/gpu/drm/nouveau/nv50_vm.c
··· 169 169 void 170 170 nv50_vm_flush_engine(struct drm_device *dev, int engine) 171 171 { 172 + struct drm_nouveau_private *dev_priv = dev->dev_private; 173 + 174 + spin_lock(&dev_priv->ramin_lock); 172 175 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 173 176 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 174 177 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 178 + spin_unlock(&dev_priv->ramin_lock); 175 179 }