Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/panthor: Expose size of driver internal BO's over fdinfo

This will display the sizes of kenrel BO's bound to an open file, which are
otherwise not exposed to UM through a handle.

The sizes recorded are as follows:
- Per group: suspend buffer, protm-suspend buffer, syncobjcs
- Per queue: ringbuffer, profiling slots, firmware interface
- For all heaps in all heap pools across all VM's bound to an open file,
record size of all heap chuks, and for each pool the gpu_context BO too.

This does not record the size of FW regions, as these aren't bound to a
specific open file and remain active through the whole life of the driver.

Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Reviewed-by: Mihail Atanassov <mihail.atanassov@arm.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250130172851.941597-4-adrian.larumbe@collabora.com

authored by

Adrián Larumbe and committed by
Boris Brezillon
434e5ca5 af6c2b7c

+136 -1
+14
drivers/gpu/drm/panthor/panthor_drv.c
··· 1457 1457 drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency); 1458 1458 } 1459 1459 1460 + static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file) 1461 + { 1462 + char *drv_name = file->minor->dev->driver->name; 1463 + struct panthor_file *pfile = file->driver_priv; 1464 + struct drm_memory_stats stats = {0}; 1465 + 1466 + panthor_fdinfo_gather_group_mem_info(pfile, &stats); 1467 + panthor_vm_heaps_sizes(pfile, &stats); 1468 + 1469 + drm_fdinfo_print_size(p, drv_name, "resident", "memory", stats.resident); 1470 + drm_fdinfo_print_size(p, drv_name, "active", "memory", stats.active); 1471 + } 1472 + 1460 1473 static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file) 1461 1474 { 1462 1475 struct drm_device *dev = file->minor->dev; 1463 1476 struct panthor_device *ptdev = container_of(dev, struct panthor_device, base); 1464 1477 1465 1478 panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p); 1479 + panthor_show_internal_memory_stats(p, file); 1466 1480 1467 1481 drm_show_memory_stats(p, file); 1468 1482 }
+26
drivers/gpu/drm/panthor/panthor_heap.c
··· 603 603 604 604 panthor_heap_pool_put(pool); 605 605 } 606 + 607 + /** 608 + * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool 609 + * @pool: Pool whose total chunk size to calculate. 610 + * 611 + * This function adds the size of all heap chunks across all heaps in the 612 + * argument pool. It also adds the size of the gpu contexts kernel bo. 613 + * It is meant to be used by fdinfo for displaying the size of internal 614 + * driver BO's that aren't exposed to userspace through a GEM handle. 615 + * 616 + */ 617 + size_t panthor_heap_pool_size(struct panthor_heap_pool *pool) 618 + { 619 + struct panthor_heap *heap; 620 + unsigned long i; 621 + size_t size = 0; 622 + 623 + down_read(&pool->lock); 624 + xa_for_each(&pool->xa, i, heap) 625 + size += heap->chunk_size * heap->chunk_count; 626 + up_read(&pool->lock); 627 + 628 + size += pool->gpu_contexts->obj->size; 629 + 630 + return size; 631 + }
+2
drivers/gpu/drm/panthor/panthor_heap.h
··· 27 27 panthor_heap_pool_get(struct panthor_heap_pool *pool); 28 28 void panthor_heap_pool_put(struct panthor_heap_pool *pool); 29 29 30 + size_t panthor_heap_pool_size(struct panthor_heap_pool *pool); 31 + 30 32 int panthor_heap_grow(struct panthor_heap_pool *pool, 31 33 u64 heap_gpu_va, 32 34 u32 renderpasses_in_flight,
+33
drivers/gpu/drm/panthor/panthor_mmu.c
··· 1944 1944 return pool; 1945 1945 } 1946 1946 1947 + /** 1948 + * panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all 1949 + * heaps over all the heap pools in a VM 1950 + * @pfile: File. 1951 + * @stats: Memory stats to be updated. 1952 + * 1953 + * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM 1954 + * is active, record the size as active as well. 1955 + */ 1956 + void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats) 1957 + { 1958 + struct panthor_vm *vm; 1959 + unsigned long i; 1960 + 1961 + if (!pfile->vms) 1962 + return; 1963 + 1964 + xa_lock(&pfile->vms->xa); 1965 + xa_for_each(&pfile->vms->xa, i, vm) { 1966 + size_t size = 0; 1967 + 1968 + mutex_lock(&vm->heaps.lock); 1969 + if (vm->heaps.pool) 1970 + size = panthor_heap_pool_size(vm->heaps.pool); 1971 + mutex_unlock(&vm->heaps.lock); 1972 + 1973 + stats->resident += size; 1974 + if (vm->as.id >= 0) 1975 + stats->active += size; 1976 + } 1977 + xa_unlock(&pfile->vms->xa); 1978 + } 1979 + 1947 1980 static u64 mair_to_memattr(u64 mair, bool coherent) 1948 1981 { 1949 1982 u64 memattr = 0;
+3
drivers/gpu/drm/panthor/panthor_mmu.h
··· 9 9 10 10 struct drm_exec; 11 11 struct drm_sched_job; 12 + struct drm_memory_stats; 12 13 struct panthor_gem_object; 13 14 struct panthor_heap_pool; 14 15 struct panthor_vm; ··· 37 36 38 37 struct panthor_heap_pool * 39 38 panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create); 39 + 40 + void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats); 40 41 41 42 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm); 42 43 void panthor_vm_put(struct panthor_vm *vm);
+55 -1
drivers/gpu/drm/panthor/panthor_sched.c
··· 625 625 */ 626 626 struct panthor_kernel_bo *syncobjs; 627 627 628 - /** @fdinfo: Per-file total cycle and timestamp values reference. */ 628 + /** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */ 629 629 struct { 630 630 /** @data: Total sampled values for jobs in queues from this group. */ 631 631 struct panthor_gpu_usage data; ··· 635 635 * and job post-completion processing function 636 636 */ 637 637 struct mutex lock; 638 + 639 + /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */ 640 + size_t kbo_sizes; 638 641 } fdinfo; 639 642 640 643 /** @state: Group state. */ ··· 3381 3378 return ERR_PTR(ret); 3382 3379 } 3383 3380 3381 + static void add_group_kbo_sizes(struct panthor_device *ptdev, 3382 + struct panthor_group *group) 3383 + { 3384 + struct panthor_queue *queue; 3385 + int i; 3386 + 3387 + if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group))) 3388 + return; 3389 + if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev)) 3390 + return; 3391 + 3392 + group->fdinfo.kbo_sizes += group->suspend_buf->obj->size; 3393 + group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size; 3394 + group->fdinfo.kbo_sizes += group->syncobjs->obj->size; 3395 + 3396 + for (i = 0; i < group->queue_count; i++) { 3397 + queue = group->queues[i]; 3398 + group->fdinfo.kbo_sizes += queue->ringbuf->obj->size; 3399 + group->fdinfo.kbo_sizes += queue->iface.mem->obj->size; 3400 + group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size; 3401 + } 3402 + } 3403 + 3384 3404 #define MAX_GROUPS_PER_POOL 128 3385 3405 3386 3406 int panthor_group_create(struct panthor_file *pfile, ··· 3528 3502 } 3529 3503 mutex_unlock(&sched->reset.lock); 3530 3504 3505 + add_group_kbo_sizes(group->ptdev, group); 3531 3506 mutex_init(&group->fdinfo.lock); 3532 3507 3533 3508 return gid; ··· 3646 3619 xa_destroy(&gpool->xa); 3647 3620 kfree(gpool); 3648 3621 pfile->groups = NULL; 3622 + } 3623 + 3624 + /** 3625 + * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's 3626 + * belonging to all the groups owned by an open Panthor file 3627 + * @pfile: File. 3628 + * @stats: Memory statistics to be updated. 3629 + * 3630 + */ 3631 + void 3632 + panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile, 3633 + struct drm_memory_stats *stats) 3634 + { 3635 + struct panthor_group_pool *gpool = pfile->groups; 3636 + struct panthor_group *group; 3637 + unsigned long i; 3638 + 3639 + if (IS_ERR_OR_NULL(gpool)) 3640 + return; 3641 + 3642 + xa_lock(&gpool->xa); 3643 + xa_for_each(&gpool->xa, i, group) { 3644 + stats->resident += group->fdinfo.kbo_sizes; 3645 + if (group->csg_id >= 0) 3646 + stats->active += group->fdinfo.kbo_sizes; 3647 + } 3648 + xa_unlock(&gpool->xa); 3649 3649 } 3650 3650 3651 3651 static void job_release(struct kref *ref)
+3
drivers/gpu/drm/panthor/panthor_sched.h
··· 9 9 struct drm_file; 10 10 struct drm_gem_object; 11 11 struct drm_sched_job; 12 + struct drm_memory_stats; 12 13 struct drm_panthor_group_create; 13 14 struct drm_panthor_queue_create; 14 15 struct drm_panthor_group_get_state; ··· 37 36 38 37 int panthor_group_pool_create(struct panthor_file *pfile); 39 38 void panthor_group_pool_destroy(struct panthor_file *pfile); 39 + void panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile, 40 + struct drm_memory_stats *stats); 40 41 41 42 int panthor_sched_init(struct panthor_device *ptdev); 42 43 void panthor_sched_unplug(struct panthor_device *ptdev);