Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

accel/ivpu: Improve debug and warning messages

Add IOCTL debug bit for logging user provided parameter validation
errors.

Refactor several warning and error messages to better reflect fault
reason. User generated faults should not flood kernel messages with
warnings or errors, so change those to ivpu_dbg(). Add additional debug
logs for parameter validation in IOCTLs.

Check size provided by in metric streamer start and return -EINVAL
together with a debug message print.

Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com>
Link: https://patch.msgid.link/20251104132418.970784-1-karol.wachowski@linux.intel.com

+120 -58
+1
drivers/accel/ivpu/ivpu_drv.h
··· 79 79 #define IVPU_DBG_KREF BIT(11) 80 80 #define IVPU_DBG_RPM BIT(12) 81 81 #define IVPU_DBG_MMU_MAP BIT(13) 82 + #define IVPU_DBG_IOCTL BIT(14) 82 83 83 84 #define ivpu_err(vdev, fmt, ...) \ 84 85 drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
+15 -10
drivers/accel/ivpu/ivpu_gem.c
··· 128 128 bo->ctx_id = ctx->id; 129 129 bo->vpu_addr = bo->mm_node.start; 130 130 ivpu_dbg_bo(vdev, bo, "vaddr"); 131 - } else { 132 - ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); 133 131 } 134 132 135 133 ivpu_bo_unlock(bo); ··· 287 289 struct ivpu_addr_range *range; 288 290 289 291 if (bo->ctx) { 290 - ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n", 291 - file_priv->ctx.id, bo->ctx->id); 292 + ivpu_dbg(vdev, IOCTL, "Can't add BO %pe to ctx %u: already in ctx %u\n", 293 + bo, file_priv->ctx.id, bo->ctx->id); 292 294 return -EALREADY; 293 295 } 294 296 ··· 355 357 struct ivpu_bo *bo; 356 358 int ret; 357 359 358 - if (args->flags & ~DRM_IVPU_BO_FLAGS) 360 + if (args->flags & ~DRM_IVPU_BO_FLAGS) { 361 + ivpu_dbg(vdev, IOCTL, "Invalid BO flags 0x%x\n", args->flags); 359 362 return -EINVAL; 363 + } 360 364 361 - if (size == 0) 365 + if (size == 0) { 366 + ivpu_dbg(vdev, IOCTL, "Invalid BO size %llu\n", args->size); 362 367 return -EINVAL; 368 + } 363 369 364 370 bo = ivpu_bo_alloc(vdev, size, args->flags); 365 371 if (IS_ERR(bo)) { 366 - ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)", 372 + ivpu_dbg(vdev, IOCTL, "Failed to allocate BO: %pe ctx %u size %llu flags 0x%x\n", 367 373 bo, file_priv->ctx.id, args->size, args->flags); 368 374 return PTR_ERR(bo); 369 375 } ··· 376 374 377 375 ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); 378 376 if (ret) { 379 - ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)", 377 + ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n", 380 378 bo, file_priv->ctx.id, args->size, args->flags); 381 379 } else { 382 380 args->vpu_addr = bo->vpu_addr; ··· 405 403 406 404 bo = ivpu_bo_alloc(vdev, size, flags); 407 405 if (IS_ERR(bo)) { 408 - ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", 406 + ivpu_err(vdev, "Failed to allocate BO: %pe vpu_addr 0x%llx size %llu flags 0x%x\n", 409 407 bo, range->start, size, flags); 410 408 return NULL; 411 409 } 412 410 413 411 ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range); 414 - if (ret) 412 + if (ret) { 413 + ivpu_err(vdev, "Failed to allocate NPU address for BO: %pe ctx %u size %llu: %d\n", 414 + bo, ctx->id, size, ret); 415 415 goto err_put; 416 + } 416 417 417 418 ret = ivpu_bo_bind(bo); 418 419 if (ret)
+20 -9
drivers/accel/ivpu/ivpu_gem_userptr.c
··· 84 84 pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages); 85 85 if (pinned < 0) { 86 86 ret = pinned; 87 - ivpu_warn(vdev, "Failed to pin user pages: %d\n", ret); 87 + ivpu_dbg(vdev, IOCTL, "Failed to pin user pages: %d\n", ret); 88 88 goto free_pages_array; 89 89 } 90 90 91 91 if (pinned != nr_pages) { 92 - ivpu_warn(vdev, "Pinned %d pages, expected %lu\n", pinned, nr_pages); 92 + ivpu_dbg(vdev, IOCTL, "Pinned %d pages, expected %lu\n", pinned, nr_pages); 93 93 ret = -EFAULT; 94 94 goto unpin_pages; 95 95 } ··· 102 102 103 103 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL); 104 104 if (ret) { 105 - ivpu_warn(vdev, "Failed to create sg table: %d\n", ret); 105 + ivpu_dbg(vdev, IOCTL, "Failed to create sg table: %d\n", ret); 106 106 goto free_sgt; 107 107 } 108 108 ··· 116 116 dma_buf = dma_buf_export(&exp_info); 117 117 if (IS_ERR(dma_buf)) { 118 118 ret = PTR_ERR(dma_buf); 119 - ivpu_warn(vdev, "Failed to export userptr dma-buf: %d\n", ret); 119 + ivpu_dbg(vdev, IOCTL, "Failed to export userptr dma-buf: %d\n", ret); 120 120 goto free_sg_table; 121 121 } 122 122 ··· 170 170 struct ivpu_bo *bo; 171 171 int ret; 172 172 173 - if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) 173 + if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) { 174 + ivpu_dbg(vdev, IOCTL, "Invalid BO flags: 0x%x\n", args->flags); 174 175 return -EINVAL; 176 + } 175 177 176 - if (!args->user_ptr || !args->size) 178 + if (!args->user_ptr || !args->size) { 179 + ivpu_dbg(vdev, IOCTL, "Userptr or size are zero: ptr %llx size %llu\n", 180 + args->user_ptr, args->size); 177 181 return -EINVAL; 182 + } 178 183 179 - if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) 184 + if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) { 185 + ivpu_dbg(vdev, IOCTL, "Userptr or size not page aligned: ptr %llx size %llu\n", 186 + args->user_ptr, args->size); 180 187 return -EINVAL; 188 + } 181 189 182 - if (!access_ok(user_ptr, args->size)) 190 + if (!access_ok(user_ptr, args->size)) { 191 + ivpu_dbg(vdev, IOCTL, "Userptr is not accessible: ptr %llx size %llu\n", 192 + args->user_ptr, args->size); 183 193 return -EFAULT; 194 + } 184 195 185 196 bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags); 186 197 if (IS_ERR(bo)) ··· 199 188 200 189 ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); 201 190 if (ret) { 202 - ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)", 191 + ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n", 203 192 bo, file_priv->ctx.id, args->size, args->flags); 204 193 } else { 205 194 ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n",
+65 -30
drivers/accel/ivpu/ivpu_job.c
··· 348 348 349 349 cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id); 350 350 if (!cmdq) { 351 - ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id); 351 + ivpu_dbg(vdev, IOCTL, "Failed to find command queue with ID: %u\n", cmdq_id); 352 352 return NULL; 353 353 } 354 354 ··· 534 534 job->bo_count = bo_count; 535 535 job->done_fence = ivpu_fence_create(vdev); 536 536 if (!job->done_fence) { 537 - ivpu_warn_ratelimited(vdev, "Failed to create a fence\n"); 537 + ivpu_err(vdev, "Failed to create a fence\n"); 538 538 goto err_free_job; 539 539 } 540 540 ··· 687 687 else 688 688 cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id); 689 689 if (!cmdq) { 690 - ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id); 691 690 ret = -EINVAL; 692 691 goto err_unlock; 693 692 } ··· 770 771 for (i = 0; i < buf_count; i++) { 771 772 struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]); 772 773 773 - if (!obj) 774 + if (!obj) { 775 + ivpu_dbg(vdev, IOCTL, "Failed to lookup GEM object with handle %u\n", 776 + buf_handles[i]); 774 777 return -ENOENT; 778 + } 775 779 776 780 job->bos[i] = to_ivpu_bo(obj); 777 781 ··· 785 783 786 784 bo = job->bos[CMD_BUF_IDX]; 787 785 if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) { 788 - ivpu_warn(vdev, "Buffer is already in use\n"); 786 + ivpu_dbg(vdev, IOCTL, "Buffer is already in use by another job\n"); 789 787 return -EBUSY; 790 788 } 791 789 792 790 if (commands_offset >= ivpu_bo_size(bo)) { 793 - ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset); 791 + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u for buffer size %zu\n", 792 + commands_offset, ivpu_bo_size(bo)); 794 793 return -EINVAL; 795 794 } 796 795 ··· 801 798 struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index]; 802 799 803 800 if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) { 804 - ivpu_warn(vdev, "Preemption buffer is too small\n"); 801 + ivpu_dbg(vdev, IOCTL, "Preemption buffer is too small\n"); 805 802 return -EINVAL; 806 803 } 807 804 if (ivpu_bo_is_mappable(preempt_bo)) { 808 - ivpu_warn(vdev, "Preemption buffer cannot be mappable\n"); 805 + ivpu_dbg(vdev, IOCTL, "Preemption buffer cannot be mappable\n"); 809 806 return -EINVAL; 810 807 } 811 808 job->primary_preempt_buf = preempt_bo; ··· 814 811 ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, 815 812 &acquire_ctx); 816 813 if (ret) { 817 - ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); 814 + ivpu_warn_ratelimited(vdev, "Failed to lock reservations: %d\n", ret); 818 815 return ret; 819 816 } 820 817 821 818 for (i = 0; i < buf_count; i++) { 822 819 ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1); 823 820 if (ret) { 824 - ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); 821 + ivpu_warn_ratelimited(vdev, "Failed to reserve fences: %d\n", ret); 825 822 goto unlock_reservations; 826 823 } 827 824 } ··· 868 865 869 866 job = ivpu_job_create(file_priv, engine, buffer_count); 870 867 if (!job) { 871 - ivpu_err(vdev, "Failed to create job\n"); 872 868 ret = -ENOMEM; 873 869 goto err_exit_dev; 874 870 } 875 871 876 872 ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset, 877 873 preempt_buffer_index); 878 - if (ret) { 879 - ivpu_err(vdev, "Failed to prepare job: %d\n", ret); 874 + if (ret) 880 875 goto err_destroy_job; 881 - } 882 876 883 877 down_read(&vdev->pm->reset_lock); 884 878 ret = ivpu_job_submit(job, priority, cmdq_id); ··· 901 901 int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 902 902 { 903 903 struct ivpu_file_priv *file_priv = file->driver_priv; 904 + struct ivpu_device *vdev = file_priv->vdev; 904 905 struct drm_ivpu_submit *args = data; 905 906 u8 priority; 906 907 907 - if (args->engine != DRM_IVPU_ENGINE_COMPUTE) 908 + if (args->engine != DRM_IVPU_ENGINE_COMPUTE) { 909 + ivpu_dbg(vdev, IOCTL, "Invalid engine %d\n", args->engine); 908 910 return -EINVAL; 911 + } 909 912 910 - if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) 913 + if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) { 914 + ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority); 911 915 return -EINVAL; 916 + } 912 917 913 - if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) 918 + if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) { 919 + ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count); 914 920 return -EINVAL; 921 + } 915 922 916 - if (!IS_ALIGNED(args->commands_offset, 8)) 923 + if (!IS_ALIGNED(args->commands_offset, 8)) { 924 + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset); 917 925 return -EINVAL; 926 + } 918 927 919 - if (!file_priv->ctx.id) 928 + if (!file_priv->ctx.id) { 929 + ivpu_dbg(vdev, IOCTL, "Context not initialized\n"); 920 930 return -EINVAL; 931 + } 921 932 922 - if (file_priv->has_mmu_faults) 933 + if (file_priv->has_mmu_faults) { 934 + ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id); 923 935 return -EBADFD; 936 + } 924 937 925 938 priority = ivpu_job_to_jsm_priority(args->priority); 926 939 ··· 944 931 int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 945 932 { 946 933 struct ivpu_file_priv *file_priv = file->driver_priv; 934 + struct ivpu_device *vdev = file_priv->vdev; 947 935 struct drm_ivpu_cmdq_submit *args = data; 948 936 949 - if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) 937 + if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { 938 + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); 950 939 return -ENODEV; 940 + } 951 941 952 - if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) 942 + if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) { 943 + ivpu_dbg(vdev, IOCTL, "Invalid command queue ID %u\n", args->cmdq_id); 953 944 return -EINVAL; 945 + } 954 946 955 - if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) 947 + if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) { 948 + ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count); 956 949 return -EINVAL; 950 + } 957 951 958 - if (args->preempt_buffer_index >= args->buffer_count) 952 + if (args->preempt_buffer_index >= args->buffer_count) { 953 + ivpu_dbg(vdev, IOCTL, "Invalid preemption buffer index %u\n", 954 + args->preempt_buffer_index); 959 955 return -EINVAL; 956 + } 960 957 961 - if (!IS_ALIGNED(args->commands_offset, 8)) 958 + if (!IS_ALIGNED(args->commands_offset, 8)) { 959 + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset); 962 960 return -EINVAL; 961 + } 963 962 964 - if (!file_priv->ctx.id) 963 + if (!file_priv->ctx.id) { 964 + ivpu_dbg(vdev, IOCTL, "Context not initialized\n"); 965 965 return -EINVAL; 966 + } 966 967 967 - if (file_priv->has_mmu_faults) 968 + if (file_priv->has_mmu_faults) { 969 + ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id); 968 970 return -EBADFD; 971 + } 969 972 970 973 return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE, 971 974 (void __user *)args->buffers_ptr, args->commands_offset, ··· 996 967 struct ivpu_cmdq *cmdq; 997 968 int ret; 998 969 999 - if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) 970 + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { 971 + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); 1000 972 return -ENODEV; 973 + } 1001 974 1002 - if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) 975 + if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) { 976 + ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority); 1003 977 return -EINVAL; 978 + } 1004 979 1005 980 ret = ivpu_rpm_get(vdev); 1006 981 if (ret < 0) ··· 1032 999 u32 cmdq_id = 0; 1033 1000 int ret; 1034 1001 1035 - if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) 1002 + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { 1003 + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); 1036 1004 return -ENODEV; 1005 + } 1037 1006 1038 1007 ret = ivpu_rpm_get(vdev); 1039 1008 if (ret < 0)
+2 -1
drivers/accel/ivpu/ivpu_mmu_context.c
··· 529 529 530 530 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 531 531 if (ret) 532 - ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 532 + ivpu_warn_ratelimited(vdev, "Failed to invalidate TLB for ctx %u: %d\n", 533 + ctx->id, ret); 533 534 } 534 535 535 536 int
+17 -8
drivers/accel/ivpu/ivpu_ms.c
··· 8 8 9 9 #include "ivpu_drv.h" 10 10 #include "ivpu_gem.h" 11 + #include "ivpu_hw.h" 11 12 #include "ivpu_jsm_msg.h" 12 13 #include "ivpu_ms.h" 13 14 #include "ivpu_pm.h" ··· 38 37 struct drm_ivpu_metric_streamer_start *args = data; 39 38 struct ivpu_device *vdev = file_priv->vdev; 40 39 struct ivpu_ms_instance *ms; 41 - u64 single_buff_size; 42 40 u32 sample_size; 41 + u64 buf_size; 43 42 int ret; 44 43 45 44 if (!args->metric_group_mask || !args->read_period_samples || ··· 53 52 mutex_lock(&file_priv->ms_lock); 54 53 55 54 if (get_instance_by_mask(file_priv, args->metric_group_mask)) { 56 - ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask); 55 + ivpu_dbg(vdev, IOCTL, "Instance already exists (mask %#llx)\n", 56 + args->metric_group_mask); 57 57 ret = -EALREADY; 58 58 goto unlock; 59 59 } ··· 71 69 if (ret) 72 70 goto err_free_ms; 73 71 74 - single_buff_size = sample_size * 75 - ((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER); 76 - ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS), 77 - DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); 72 + buf_size = PAGE_ALIGN((u64)args->read_period_samples * sample_size * 73 + MS_READ_PERIOD_MULTIPLIER * MS_NUM_BUFFERS); 74 + if (buf_size > ivpu_hw_range_size(&vdev->hw->ranges.global)) { 75 + ivpu_dbg(vdev, IOCTL, "Requested MS buffer size %llu exceeds range size %llu\n", 76 + buf_size, ivpu_hw_range_size(&vdev->hw->ranges.global)); 77 + ret = -EINVAL; 78 + goto err_free_ms; 79 + } 80 + 81 + ms->bo = ivpu_bo_create_global(vdev, buf_size, DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); 78 82 if (!ms->bo) { 79 - ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size); 83 + ivpu_dbg(vdev, IOCTL, "Failed to allocate MS buffer (size %llu)\n", buf_size); 80 84 ret = -ENOMEM; 81 85 goto err_free_ms; 82 86 } ··· 183 175 184 176 ms = get_instance_by_mask(file_priv, args->metric_group_mask); 185 177 if (!ms) { 186 - ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask); 178 + ivpu_dbg(vdev, IOCTL, "Instance doesn't exist for mask: %#llx\n", 179 + args->metric_group_mask); 187 180 ret = -EINVAL; 188 181 goto unlock; 189 182 }