Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.19-rc6 325 lines 8.5 kB view raw
1/* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26/* 27 * Authors: 28 * Christian König <deathsimple@vodafone.de> 29 */ 30 31#include <drm/drmP.h> 32#include "amdgpu.h" 33#include "amdgpu_trace.h" 34 35#define AMDGPU_BO_LIST_MAX_PRIORITY 32u 36#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) 37 38static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu) 39{ 40 struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list, 41 rhead); 42 43 kvfree(list); 44} 45 46static void amdgpu_bo_list_free(struct kref *ref) 47{ 48 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list, 49 refcount); 50 struct amdgpu_bo_list_entry *e; 51 52 amdgpu_bo_list_for_each_entry(e, list) 53 amdgpu_bo_unref(&e->robj); 54 55 call_rcu(&list->rhead, amdgpu_bo_list_free_rcu); 56} 57 58int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, 59 struct drm_amdgpu_bo_list_entry *info, 60 unsigned num_entries, struct amdgpu_bo_list **result) 61{ 62 unsigned last_entry = 0, first_userptr = num_entries; 63 struct amdgpu_bo_list_entry *array; 64 struct amdgpu_bo_list *list; 65 uint64_t total_size = 0; 66 size_t size; 67 unsigned i; 68 int r; 69 70 if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry)) 71 return -EINVAL; 72 73 size = sizeof(struct amdgpu_bo_list); 74 size += num_entries * sizeof(struct amdgpu_bo_list_entry); 75 list = kvmalloc(size, GFP_KERNEL); 76 if (!list) 77 return -ENOMEM; 78 79 kref_init(&list->refcount); 80 list->gds_obj = adev->gds.gds_gfx_bo; 81 list->gws_obj = adev->gds.gws_gfx_bo; 82 list->oa_obj = adev->gds.oa_gfx_bo; 83 84 array = amdgpu_bo_list_array_entry(list, 0); 85 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); 86 87 for (i = 0; i < num_entries; ++i) { 88 struct amdgpu_bo_list_entry *entry; 89 struct drm_gem_object *gobj; 90 struct amdgpu_bo *bo; 91 struct mm_struct *usermm; 92 93 gobj = drm_gem_object_lookup(filp, info[i].bo_handle); 94 if (!gobj) { 95 r = -ENOENT; 96 goto error_free; 97 } 98 99 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 100 drm_gem_object_put_unlocked(gobj); 101 102 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 103 if (usermm) { 104 if (usermm != current->mm) { 105 amdgpu_bo_unref(&bo); 106 r = -EPERM; 107 goto error_free; 108 } 109 entry = &array[--first_userptr]; 110 } else { 111 entry = &array[last_entry++]; 112 } 113 114 entry->robj = bo; 115 entry->priority = min(info[i].bo_priority, 116 AMDGPU_BO_LIST_MAX_PRIORITY); 117 entry->tv.bo = &entry->robj->tbo; 118 entry->tv.shared = !entry->robj->prime_shared_count; 119 120 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) 121 list->gds_obj = entry->robj; 122 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) 123 list->gws_obj = entry->robj; 124 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) 125 list->oa_obj = entry->robj; 126 127 total_size += amdgpu_bo_size(entry->robj); 128 trace_amdgpu_bo_list_set(list, entry->robj); 129 } 130 131 list->first_userptr = first_userptr; 132 list->num_entries = num_entries; 133 134 trace_amdgpu_cs_bo_status(list->num_entries, total_size); 135 136 *result = list; 137 return 0; 138 139error_free: 140 while (i--) 141 amdgpu_bo_unref(&array[i].robj); 142 kvfree(list); 143 return r; 144 145} 146 147static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) 148{ 149 struct amdgpu_bo_list *list; 150 151 mutex_lock(&fpriv->bo_list_lock); 152 list = idr_remove(&fpriv->bo_list_handles, id); 153 mutex_unlock(&fpriv->bo_list_lock); 154 if (list) 155 kref_put(&list->refcount, amdgpu_bo_list_free); 156} 157 158int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, 159 struct amdgpu_bo_list **result) 160{ 161 rcu_read_lock(); 162 *result = idr_find(&fpriv->bo_list_handles, id); 163 164 if (*result && kref_get_unless_zero(&(*result)->refcount)) { 165 rcu_read_unlock(); 166 return 0; 167 } 168 169 rcu_read_unlock(); 170 return -ENOENT; 171} 172 173void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, 174 struct list_head *validated) 175{ 176 /* This is based on the bucket sort with O(n) time complexity. 177 * An item with priority "i" is added to bucket[i]. The lists are then 178 * concatenated in descending order. 179 */ 180 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; 181 struct amdgpu_bo_list_entry *e; 182 unsigned i; 183 184 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) 185 INIT_LIST_HEAD(&bucket[i]); 186 187 /* Since buffers which appear sooner in the relocation list are 188 * likely to be used more often than buffers which appear later 189 * in the list, the sort mustn't change the ordering of buffers 190 * with the same priority, i.e. it must be stable. 191 */ 192 amdgpu_bo_list_for_each_entry(e, list) { 193 unsigned priority = e->priority; 194 195 if (!e->robj->parent) 196 list_add_tail(&e->tv.head, &bucket[priority]); 197 198 e->user_pages = NULL; 199 } 200 201 /* Connect the sorted buckets in the output list. */ 202 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) 203 list_splice(&bucket[i], validated); 204} 205 206void amdgpu_bo_list_put(struct amdgpu_bo_list *list) 207{ 208 kref_put(&list->refcount, amdgpu_bo_list_free); 209} 210 211int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, 212 struct drm_amdgpu_bo_list_entry **info_param) 213{ 214 const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); 215 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); 216 struct drm_amdgpu_bo_list_entry *info; 217 int r; 218 219 info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL); 220 if (!info) 221 return -ENOMEM; 222 223 /* copy the handle array from userspace to a kernel buffer */ 224 r = -EFAULT; 225 if (likely(info_size == in->bo_info_size)) { 226 unsigned long bytes = in->bo_number * 227 in->bo_info_size; 228 229 if (copy_from_user(info, uptr, bytes)) 230 goto error_free; 231 232 } else { 233 unsigned long bytes = min(in->bo_info_size, info_size); 234 unsigned i; 235 236 memset(info, 0, in->bo_number * info_size); 237 for (i = 0; i < in->bo_number; ++i) { 238 if (copy_from_user(&info[i], uptr, bytes)) 239 goto error_free; 240 241 uptr += in->bo_info_size; 242 } 243 } 244 245 *info_param = info; 246 return 0; 247 248error_free: 249 kvfree(info); 250 return r; 251} 252 253int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 254 struct drm_file *filp) 255{ 256 struct amdgpu_device *adev = dev->dev_private; 257 struct amdgpu_fpriv *fpriv = filp->driver_priv; 258 union drm_amdgpu_bo_list *args = data; 259 uint32_t handle = args->in.list_handle; 260 struct drm_amdgpu_bo_list_entry *info = NULL; 261 struct amdgpu_bo_list *list, *old; 262 int r; 263 264 r = amdgpu_bo_create_list_entry_array(&args->in, &info); 265 if (r) 266 goto error_free; 267 268 switch (args->in.operation) { 269 case AMDGPU_BO_LIST_OP_CREATE: 270 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number, 271 &list); 272 if (r) 273 goto error_free; 274 275 mutex_lock(&fpriv->bo_list_lock); 276 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); 277 mutex_unlock(&fpriv->bo_list_lock); 278 if (r < 0) { 279 amdgpu_bo_list_put(list); 280 return r; 281 } 282 283 handle = r; 284 break; 285 286 case AMDGPU_BO_LIST_OP_DESTROY: 287 amdgpu_bo_list_destroy(fpriv, handle); 288 handle = 0; 289 break; 290 291 case AMDGPU_BO_LIST_OP_UPDATE: 292 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number, 293 &list); 294 if (r) 295 goto error_free; 296 297 mutex_lock(&fpriv->bo_list_lock); 298 old = idr_replace(&fpriv->bo_list_handles, list, handle); 299 mutex_unlock(&fpriv->bo_list_lock); 300 301 if (IS_ERR(old)) { 302 amdgpu_bo_list_put(list); 303 r = PTR_ERR(old); 304 goto error_free; 305 } 306 307 amdgpu_bo_list_put(old); 308 break; 309 310 default: 311 r = -EINVAL; 312 goto error_free; 313 } 314 315 memset(args, 0, sizeof(*args)); 316 args->out.list_handle = handle; 317 kvfree(info); 318 319 return 0; 320 321error_free: 322 if (info) 323 kvfree(info); 324 return r; 325}