Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/panfrost: Prevent race when handling page fault

When handling a GPU page fault addr_to_drm_mm_node() is used to
translate the GPU address to a buffer object. However it is possible for
the buffer object to be freed after the function has returned resulting
in a use-after-free of the BO.

Change addr_to_drm_mm_node to return the panfrost_gem_object with an
extra reference on it, preventing the BO from being freed until after
the page fault has been handled.

Signed-off-by: Steven Price <steven.price@arm.com>
Signed-off-by: Rob Herring <robh@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20190913160310.50444-1-steven.price@arm.com

authored by

Steven Price and committed by
Rob Herring
65e51e30 d18a9662

+36 -19
+36 -19
drivers/gpu/drm/panfrost/panfrost_mmu.c
··· 386 386 free_io_pgtable_ops(mmu->pgtbl_ops); 387 387 } 388 388 389 - static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) 389 + static struct panfrost_gem_object * 390 + addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) 390 391 { 391 - struct drm_mm_node *node = NULL; 392 + struct panfrost_gem_object *bo = NULL; 393 + struct panfrost_file_priv *priv; 394 + struct drm_mm_node *node; 392 395 u64 offset = addr >> PAGE_SHIFT; 393 396 struct panfrost_mmu *mmu; 394 397 395 398 spin_lock(&pfdev->as_lock); 396 399 list_for_each_entry(mmu, &pfdev->as_lru_list, list) { 397 - struct panfrost_file_priv *priv; 398 - if (as != mmu->as) 399 - continue; 400 + if (as == mmu->as) 401 + break; 402 + } 403 + if (as != mmu->as) 404 + goto out; 400 405 401 - priv = container_of(mmu, struct panfrost_file_priv, mmu); 402 - drm_mm_for_each_node(node, &priv->mm) { 403 - if (offset >= node->start && offset < (node->start + node->size)) 404 - goto out; 406 + priv = container_of(mmu, struct panfrost_file_priv, mmu); 407 + 408 + spin_lock(&priv->mm_lock); 409 + 410 + drm_mm_for_each_node(node, &priv->mm) { 411 + if (offset >= node->start && 412 + offset < (node->start + node->size)) { 413 + bo = drm_mm_node_to_panfrost_bo(node); 414 + drm_gem_object_get(&bo->base.base); 415 + break; 405 416 } 406 417 } 407 418 419 + spin_unlock(&priv->mm_lock); 408 420 out: 409 421 spin_unlock(&pfdev->as_lock); 410 - return node; 422 + return bo; 411 423 } 412 424 413 425 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) ··· 427 415 int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) 428 416 { 429 417 int ret, i; 430 - struct drm_mm_node *node; 431 418 struct panfrost_gem_object *bo; 432 419 struct address_space *mapping; 433 420 pgoff_t page_offset; 434 421 struct sg_table *sgt; 435 422 struct page **pages; 436 423 437 - node = addr_to_drm_mm_node(pfdev, as, addr); 438 - if (!node) 424 + bo = addr_to_drm_mm_node(pfdev, as, addr); 425 + if (!bo) 439 426 return -ENOENT; 440 427 441 - bo = drm_mm_node_to_panfrost_bo(node); 442 428 if (!bo->is_heap) { 443 429 dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", 444 - node->start << PAGE_SHIFT); 445 - return -EINVAL; 430 + bo->node.start << PAGE_SHIFT); 431 + ret = -EINVAL; 432 + goto err_bo; 446 433 } 447 434 WARN_ON(bo->mmu->as != as); 448 435 449 436 /* Assume 2MB alignment and size multiple */ 450 437 addr &= ~((u64)SZ_2M - 1); 451 438 page_offset = addr >> PAGE_SHIFT; 452 - page_offset -= node->start; 439 + page_offset -= bo->node.start; 453 440 454 441 mutex_lock(&bo->base.pages_lock); 455 442 ··· 457 446 sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); 458 447 if (!bo->sgts) { 459 448 mutex_unlock(&bo->base.pages_lock); 460 - return -ENOMEM; 449 + ret = -ENOMEM; 450 + goto err_bo; 461 451 } 462 452 463 453 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, ··· 467 455 kfree(bo->sgts); 468 456 bo->sgts = NULL; 469 457 mutex_unlock(&bo->base.pages_lock); 470 - return -ENOMEM; 458 + ret = -ENOMEM; 459 + goto err_bo; 471 460 } 472 461 bo->base.pages = pages; 473 462 bo->base.pages_use_count = 1; ··· 506 493 507 494 dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); 508 495 496 + drm_gem_object_put_unlocked(&bo->base.base); 497 + 509 498 return 0; 510 499 511 500 err_map: 512 501 sg_free_table(sgt); 513 502 err_pages: 514 503 drm_gem_shmem_put_pages(&bo->base); 504 + err_bo: 505 + drm_gem_object_put_unlocked(&bo->base.base); 515 506 return ret; 516 507 } 517 508