Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/nouveau/vm: take subdev mutex, not the mm, protects against race with vm/nvc0

nvc0_vm_flush() accesses the pgd list, which will soon be able to race
with vm_unlink() during channel destruction.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>

+17 -16
+17 -16
drivers/gpu/drm/nouveau/core/subdev/vm/base.c
··· 236 236 vmm->map_pgt(vpgd->obj, pde, vpgt->obj); 237 237 } 238 238 239 - mutex_unlock(&vm->mm.mutex); 239 + mutex_unlock(&nv_subdev(vmm)->mutex); 240 240 nouveau_gpuobj_ref(NULL, &pgt); 241 - mutex_lock(&vm->mm.mutex); 241 + mutex_lock(&nv_subdev(vmm)->mutex); 242 242 } 243 243 } 244 244 ··· 256 256 pgt_size = (1 << (vmm->pgt_bits + 12)) >> type; 257 257 pgt_size *= 8; 258 258 259 - mutex_unlock(&vm->mm.mutex); 259 + mutex_unlock(&nv_subdev(vmm)->mutex); 260 260 ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000, 261 261 NVOBJ_FLAG_ZERO_ALLOC, &pgt); 262 - mutex_lock(&vm->mm.mutex); 262 + mutex_lock(&nv_subdev(vmm)->mutex); 263 263 if (unlikely(ret)) 264 264 return ret; 265 265 266 266 /* someone beat us to filling the PDE while we didn't have the lock */ 267 267 if (unlikely(vpgt->refcount[big]++)) { 268 - mutex_unlock(&vm->mm.mutex); 268 + mutex_unlock(&nv_subdev(vmm)->mutex); 269 269 nouveau_gpuobj_ref(NULL, &pgt); 270 - mutex_lock(&vm->mm.mutex); 270 + mutex_lock(&nv_subdev(vmm)->mutex); 271 271 return 0; 272 272 } 273 273 ··· 289 289 u32 fpde, lpde, pde; 290 290 int ret; 291 291 292 - mutex_lock(&vm->mm.mutex); 292 + mutex_lock(&nv_subdev(vmm)->mutex); 293 293 ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align, 294 294 &vma->node); 295 295 if (unlikely(ret != 0)) { 296 - mutex_unlock(&vm->mm.mutex); 296 + mutex_unlock(&nv_subdev(vmm)->mutex); 297 297 return ret; 298 298 } 299 299 ··· 314 314 if (pde != fpde) 315 315 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); 316 316 nouveau_mm_free(&vm->mm, &vma->node); 317 - mutex_unlock(&vm->mm.mutex); 317 + mutex_unlock(&nv_subdev(vmm)->mutex); 318 318 return ret; 319 319 } 320 320 } 321 - mutex_unlock(&vm->mm.mutex); 321 + mutex_unlock(&nv_subdev(vmm)->mutex); 322 322 323 323 vma->vm = vm; 324 324 vma->offset = (u64)vma->node->offset << 12; ··· 338 338 fpde = (vma->node->offset >> vmm->pgt_bits); 339 339 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits; 340 340 341 - mutex_lock(&vm->mm.mutex); 341 + mutex_lock(&nv_subdev(vmm)->mutex); 342 342 nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde); 343 343 nouveau_mm_free(&vm->mm, &vma->node); 344 - mutex_unlock(&vm->mm.mutex); 344 + mutex_unlock(&nv_subdev(vmm)->mutex); 345 345 } 346 346 347 347 int ··· 405 405 406 406 nouveau_gpuobj_ref(pgd, &vpgd->obj); 407 407 408 - mutex_lock(&vm->mm.mutex); 408 + mutex_lock(&nv_subdev(vmm)->mutex); 409 409 for (i = vm->fpde; i <= vm->lpde; i++) 410 410 vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); 411 411 list_add(&vpgd->head, &vm->pgd_list); 412 - mutex_unlock(&vm->mm.mutex); 412 + mutex_unlock(&nv_subdev(vmm)->mutex); 413 413 return 0; 414 414 } 415 415 416 416 static void 417 417 nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) 418 418 { 419 + struct nouveau_vmmgr *vmm = vm->vmm; 419 420 struct nouveau_vm_pgd *vpgd, *tmp; 420 421 struct nouveau_gpuobj *pgd = NULL; 421 422 422 423 if (!mpgd) 423 424 return; 424 425 425 - mutex_lock(&vm->mm.mutex); 426 + mutex_lock(&nv_subdev(vmm)->mutex); 426 427 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { 427 428 if (vpgd->obj == mpgd) { 428 429 pgd = vpgd->obj; ··· 432 431 break; 433 432 } 434 433 } 435 - mutex_unlock(&vm->mm.mutex); 434 + mutex_unlock(&nv_subdev(vmm)->mutex); 436 435 437 436 nouveau_gpuobj_ref(NULL, &pgd); 438 437 }