Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Add ioctl to get all gem handles for a process

Add new ioctl DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES.

This ioctl returns a list of bos with their handles, sizes,
and flags and domains.

This ioctl is meant to be used during CRIU checkpoint and
provide information needed to reconstruct the bos
in CRIU restore.

Userspace for this and the next change can be found at
https://github.com/checkpoint-restore/criu/pull/2613

Signed-off-by: David Francis <David.Francis@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

David Francis and committed by
Alex Deucher
f9db1fc5 0317e0e2

+116
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 3051 3051 DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 3052 3052 DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 3053 3053 DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 3054 + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 3054 3055 }; 3055 3056 3056 3057 static const struct drm_driver amdgpu_kms_driver = {
+79
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 1024 1024 return r; 1025 1025 } 1026 1026 1027 + /** 1028 + * drm_amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects 1029 + * 1030 + * @dev: drm device pointer 1031 + * @data: drm_amdgpu_gem_list_handles 1032 + * @filp: drm file pointer 1033 + * 1034 + * num_entries is set as an input to the size of the entries array. 1035 + * num_entries is sent back as output as the number of bos in the process. 1036 + * If that number is larger than the size of the array, the ioctl must 1037 + * be retried. 1038 + * 1039 + * Returns: 1040 + * 0 for success, -errno for errors. 1041 + */ 1042 + int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data, 1043 + struct drm_file *filp) 1044 + { 1045 + struct drm_amdgpu_gem_list_handles *args = data; 1046 + struct drm_amdgpu_gem_list_handles_entry *bo_entries; 1047 + struct drm_gem_object *gobj; 1048 + int id, ret = 0; 1049 + int bo_index = 0; 1050 + int num_bos = 0; 1051 + 1052 + spin_lock(&filp->table_lock); 1053 + idr_for_each_entry(&filp->object_idr, gobj, id) 1054 + num_bos += 1; 1055 + spin_unlock(&filp->table_lock); 1056 + 1057 + if (args->num_entries < num_bos) { 1058 + args->num_entries = num_bos; 1059 + return 0; 1060 + } 1061 + 1062 + if (num_bos == 0) { 1063 + args->num_entries = 0; 1064 + return 0; 1065 + } 1066 + 1067 + bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL); 1068 + if (!bo_entries) 1069 + return -ENOMEM; 1070 + 1071 + spin_lock(&filp->table_lock); 1072 + idr_for_each_entry(&filp->object_idr, gobj, id) { 1073 + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 1074 + struct drm_amdgpu_gem_list_handles_entry *bo_entry; 1075 + 1076 + if (bo_index >= num_bos) { 1077 + ret = -EAGAIN; 1078 + break; 1079 + } 1080 + 1081 + bo_entry = &bo_entries[bo_index]; 1082 + 1083 + bo_entry->size = amdgpu_bo_size(bo); 1084 + bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK; 1085 + bo_entry->preferred_domains = bo->preferred_domains; 1086 + bo_entry->gem_handle = id; 1087 + bo_entry->alignment = bo->tbo.page_alignment; 1088 + 1089 + if (bo->tbo.base.import_attach) 1090 + bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT; 1091 + 1092 + bo_index += 1; 1093 + } 1094 + spin_unlock(&filp->table_lock); 1095 + 1096 + args->num_entries = bo_index; 1097 + 1098 + if (!ret) 1099 + ret = copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries)); 1100 + 1101 + kvfree(bo_entries); 1102 + 1103 + return ret; 1104 + } 1105 + 1027 1106 static int amdgpu_gem_align_pitch(struct amdgpu_device *adev, 1028 1107 int width, 1029 1108 int cpp,
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
··· 67 67 struct drm_file *filp); 68 68 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 69 69 struct drm_file *filp); 70 + int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data, 71 + struct drm_file *filp); 70 72 71 73 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 72 74 struct drm_file *filp);
+34
include/uapi/drm/amdgpu_drm.h
··· 57 57 #define DRM_AMDGPU_USERQ 0x16 58 58 #define DRM_AMDGPU_USERQ_SIGNAL 0x17 59 59 #define DRM_AMDGPU_USERQ_WAIT 0x18 60 + #define DRM_AMDGPU_GEM_LIST_HANDLES 0x19 60 61 61 62 #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) 62 63 #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) ··· 78 77 #define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq) 79 78 #define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal) 80 79 #define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait) 80 + #define DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_LIST_HANDLES, struct drm_amdgpu_gem_list_handles) 81 81 82 82 /** 83 83 * DOC: memory domains ··· 811 809 __u32 op; 812 810 /** Input or return value */ 813 811 __u64 value; 812 + }; 813 + 814 + #define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0) 815 + 816 + struct drm_amdgpu_gem_list_handles { 817 + /* User pointer to array of drm_amdgpu_gem_bo_info_entry */ 818 + __u64 entries; 819 + 820 + /* Size of entries buffer / Number of handles in process (if larger than size of buffer, must retry) */ 821 + __u32 num_entries; 822 + 823 + __u32 padding; 824 + }; 825 + 826 + struct drm_amdgpu_gem_list_handles_entry { 827 + /* gem handle of buffer object */ 828 + __u32 gem_handle; 829 + 830 + /* Currently just one flag: IS_IMPORT */ 831 + __u32 flags; 832 + 833 + /* Size of bo */ 834 + __u64 size; 835 + 836 + /* Preferred domains for GEM_CREATE */ 837 + __u64 preferred_domains; 838 + 839 + /* GEM_CREATE flags for re-creation of buffer */ 840 + __u64 alloc_flags; 841 + 842 + /* physical start_addr alignment in bytes for some HW requirements */ 843 + __u64 alignment; 814 844 }; 815 845 816 846 #define AMDGPU_VA_OP_MAP 1