Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-buf: rename reservation_object to dma_resv

Be more consistent with the naming of the other DMA-buf objects.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/323401/

+523 -550
+1 -1
drivers/dma-buf/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ 3 - reservation.o seqno-fence.o 3 + dma-resv.o seqno-fence.o 4 4 obj-$(CONFIG_SYNC_FILE) += sync_file.o 5 5 obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o 6 6 obj-$(CONFIG_UDMABUF) += udmabuf.o
+16 -16
drivers/dma-buf/dma-buf.c
··· 21 21 #include <linux/module.h> 22 22 #include <linux/seq_file.h> 23 23 #include <linux/poll.h> 24 - #include <linux/reservation.h> 24 + #include <linux/dma-resv.h> 25 25 #include <linux/mm.h> 26 26 #include <linux/mount.h> 27 27 #include <linux/pseudo_fs.h> ··· 104 104 list_del(&dmabuf->list_node); 105 105 mutex_unlock(&db_list.lock); 106 106 107 - if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) 108 - reservation_object_fini(dmabuf->resv); 107 + if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) 108 + dma_resv_fini(dmabuf->resv); 109 109 110 110 module_put(dmabuf->owner); 111 111 kfree(dmabuf); ··· 165 165 * To support cross-device and cross-driver synchronization of buffer access 166 166 * implicit fences (represented internally in the kernel with &struct fence) can 167 167 * be attached to a &dma_buf. The glue for that and a few related things are 168 - * provided in the &reservation_object structure. 168 + * provided in the &dma_resv structure. 169 169 * 170 170 * Userspace can query the state of these implicitly tracked fences using poll() 171 171 * and related system calls: ··· 195 195 static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 196 196 { 197 197 struct dma_buf *dmabuf; 198 - struct reservation_object *resv; 199 - struct reservation_object_list *fobj; 198 + struct dma_resv *resv; 199 + struct dma_resv_list *fobj; 200 200 struct dma_fence *fence_excl; 201 201 __poll_t events; 202 202 unsigned shared_count; ··· 214 214 return 0; 215 215 216 216 rcu_read_lock(); 217 - reservation_object_fences(resv, &fence_excl, &fobj, &shared_count); 217 + dma_resv_fences(resv, &fence_excl, &fobj, &shared_count); 218 218 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { 219 219 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; 220 220 __poll_t pevents = EPOLLIN; ··· 493 493 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) 494 494 { 495 495 struct dma_buf *dmabuf; 496 - struct reservation_object *resv = exp_info->resv; 496 + struct dma_resv *resv = exp_info->resv; 497 497 struct file *file; 498 498 size_t alloc_size = sizeof(struct dma_buf); 499 499 int ret; 500 500 501 501 if (!exp_info->resv) 502 - alloc_size += sizeof(struct reservation_object); 502 + alloc_size += sizeof(struct dma_resv); 503 503 else 504 504 /* prevent &dma_buf[1] == dma_buf->resv */ 505 505 alloc_size += 1; ··· 531 531 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; 532 532 533 533 if (!resv) { 534 - resv = (struct reservation_object *)&dmabuf[1]; 535 - reservation_object_init(resv); 534 + resv = (struct dma_resv *)&dmabuf[1]; 535 + dma_resv_init(resv); 536 536 } 537 537 dmabuf->resv = resv; 538 538 ··· 896 896 { 897 897 bool write = (direction == DMA_BIDIRECTIONAL || 898 898 direction == DMA_TO_DEVICE); 899 - struct reservation_object *resv = dmabuf->resv; 899 + struct dma_resv *resv = dmabuf->resv; 900 900 long ret; 901 901 902 902 /* Wait on any implicit rendering fences */ 903 - ret = reservation_object_wait_timeout_rcu(resv, write, true, 903 + ret = dma_resv_wait_timeout_rcu(resv, write, true, 904 904 MAX_SCHEDULE_TIMEOUT); 905 905 if (ret < 0) 906 906 return ret; ··· 1141 1141 int ret; 1142 1142 struct dma_buf *buf_obj; 1143 1143 struct dma_buf_attachment *attach_obj; 1144 - struct reservation_object *robj; 1145 - struct reservation_object_list *fobj; 1144 + struct dma_resv *robj; 1145 + struct dma_resv_list *fobj; 1146 1146 struct dma_fence *fence; 1147 1147 int count = 0, attach_count, shared_count, i; 1148 1148 size_t size = 0; ··· 1175 1175 1176 1176 robj = buf_obj->resv; 1177 1177 rcu_read_lock(); 1178 - reservation_object_fences(robj, &fence, &fobj, &shared_count); 1178 + dma_resv_fences(robj, &fence, &fobj, &shared_count); 1179 1179 rcu_read_unlock(); 1180 1180 1181 1181 if (fence)
+1 -1
drivers/dma-buf/dma-fence.c
··· 60 60 * 61 61 * - Then there's also implicit fencing, where the synchronization points are 62 62 * implicitly passed around as part of shared &dma_buf instances. Such 63 - * implicit fences are stored in &struct reservation_object through the 63 + * implicit fences are stored in &struct dma_resv through the 64 64 * &dma_buf.resv pointer. 65 65 */ 66 66
+79 -86
drivers/dma-buf/reservation.c drivers/dma-buf/dma-resv.c
··· 32 32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 33 33 */ 34 34 35 - #include <linux/reservation.h> 35 + #include <linux/dma-resv.h> 36 36 #include <linux/export.h> 37 37 38 38 /** ··· 50 50 EXPORT_SYMBOL(reservation_ww_class); 51 51 52 52 /** 53 - * reservation_object_list_alloc - allocate fence list 53 + * dma_resv_list_alloc - allocate fence list 54 54 * @shared_max: number of fences we need space for 55 55 * 56 - * Allocate a new reservation_object_list and make sure to correctly initialize 56 + * Allocate a new dma_resv_list and make sure to correctly initialize 57 57 * shared_max. 58 58 */ 59 - static struct reservation_object_list * 60 - reservation_object_list_alloc(unsigned int shared_max) 59 + static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) 61 60 { 62 - struct reservation_object_list *list; 61 + struct dma_resv_list *list; 63 62 64 63 list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL); 65 64 if (!list) ··· 71 72 } 72 73 73 74 /** 74 - * reservation_object_list_free - free fence list 75 + * dma_resv_list_free - free fence list 75 76 * @list: list to free 76 77 * 77 - * Free a reservation_object_list and make sure to drop all references. 78 + * Free a dma_resv_list and make sure to drop all references. 78 79 */ 79 - static void reservation_object_list_free(struct reservation_object_list *list) 80 + static void dma_resv_list_free(struct dma_resv_list *list) 80 81 { 81 82 unsigned int i; 82 83 ··· 90 91 } 91 92 92 93 /** 93 - * reservation_object_init - initialize a reservation object 94 + * dma_resv_init - initialize a reservation object 94 95 * @obj: the reservation object 95 96 */ 96 - void reservation_object_init(struct reservation_object *obj) 97 + void dma_resv_init(struct dma_resv *obj) 97 98 { 98 99 ww_mutex_init(&obj->lock, &reservation_ww_class); 99 100 RCU_INIT_POINTER(obj->fence, NULL); 100 101 RCU_INIT_POINTER(obj->fence_excl, NULL); 101 102 } 102 - EXPORT_SYMBOL(reservation_object_init); 103 + EXPORT_SYMBOL(dma_resv_init); 103 104 104 105 /** 105 - * reservation_object_fini - destroys a reservation object 106 + * dma_resv_fini - destroys a reservation object 106 107 * @obj: the reservation object 107 108 */ 108 - void reservation_object_fini(struct reservation_object *obj) 109 + void dma_resv_fini(struct dma_resv *obj) 109 110 { 110 - struct reservation_object_list *fobj; 111 + struct dma_resv_list *fobj; 111 112 struct dma_fence *excl; 112 113 113 114 /* ··· 119 120 dma_fence_put(excl); 120 121 121 122 fobj = rcu_dereference_protected(obj->fence, 1); 122 - reservation_object_list_free(fobj); 123 + dma_resv_list_free(fobj); 123 124 ww_mutex_destroy(&obj->lock); 124 125 } 125 - EXPORT_SYMBOL(reservation_object_fini); 126 + EXPORT_SYMBOL(dma_resv_fini); 126 127 127 128 /** 128 - * reservation_object_reserve_shared - Reserve space to add shared fences to 129 - * a reservation_object. 129 + * dma_resv_reserve_shared - Reserve space to add shared fences to 130 + * a dma_resv. 130 131 * @obj: reservation object 131 132 * @num_fences: number of fences we want to add 132 133 * 133 - * Should be called before reservation_object_add_shared_fence(). Must 134 + * Should be called before dma_resv_add_shared_fence(). Must 134 135 * be called with obj->lock held. 135 136 * 136 137 * RETURNS 137 138 * Zero for success, or -errno 138 139 */ 139 - int reservation_object_reserve_shared(struct reservation_object *obj, 140 - unsigned int num_fences) 140 + int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) 141 141 { 142 - struct reservation_object_list *old, *new; 142 + struct dma_resv_list *old, *new; 143 143 unsigned int i, j, k, max; 144 144 145 - reservation_object_assert_held(obj); 145 + dma_resv_assert_held(obj); 146 146 147 - old = reservation_object_get_list(obj); 147 + old = dma_resv_get_list(obj); 148 148 149 149 if (old && old->shared_max) { 150 150 if ((old->shared_count + num_fences) <= old->shared_max) ··· 155 157 max = 4; 156 158 } 157 159 158 - new = reservation_object_list_alloc(max); 160 + new = dma_resv_list_alloc(max); 159 161 if (!new) 160 162 return -ENOMEM; 161 163 ··· 169 171 struct dma_fence *fence; 170 172 171 173 fence = rcu_dereference_protected(old->shared[i], 172 - reservation_object_held(obj)); 174 + dma_resv_held(obj)); 173 175 if (dma_fence_is_signaled(fence)) 174 176 RCU_INIT_POINTER(new->shared[--k], fence); 175 177 else ··· 195 197 struct dma_fence *fence; 196 198 197 199 fence = rcu_dereference_protected(new->shared[i], 198 - reservation_object_held(obj)); 200 + dma_resv_held(obj)); 199 201 dma_fence_put(fence); 200 202 } 201 203 kfree_rcu(old, rcu); 202 204 203 205 return 0; 204 206 } 205 - EXPORT_SYMBOL(reservation_object_reserve_shared); 207 + EXPORT_SYMBOL(dma_resv_reserve_shared); 206 208 207 209 /** 208 - * reservation_object_add_shared_fence - Add a fence to a shared slot 210 + * dma_resv_add_shared_fence - Add a fence to a shared slot 209 211 * @obj: the reservation object 210 212 * @fence: the shared fence to add 211 213 * 212 214 * Add a fence to a shared slot, obj->lock must be held, and 213 - * reservation_object_reserve_shared() has been called. 215 + * dma_resv_reserve_shared() has been called. 214 216 */ 215 - void reservation_object_add_shared_fence(struct reservation_object *obj, 216 - struct dma_fence *fence) 217 + void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) 217 218 { 218 - struct reservation_object_list *fobj; 219 + struct dma_resv_list *fobj; 219 220 struct dma_fence *old; 220 221 unsigned int i, count; 221 222 222 223 dma_fence_get(fence); 223 224 224 - reservation_object_assert_held(obj); 225 + dma_resv_assert_held(obj); 225 226 226 - fobj = reservation_object_get_list(obj); 227 + fobj = dma_resv_get_list(obj); 227 228 count = fobj->shared_count; 228 229 229 230 for (i = 0; i < count; ++i) { 230 231 231 232 old = rcu_dereference_protected(fobj->shared[i], 232 - reservation_object_held(obj)); 233 + dma_resv_held(obj)); 233 234 if (old->context == fence->context || 234 235 dma_fence_is_signaled(old)) 235 236 goto replace; ··· 244 247 smp_store_mb(fobj->shared_count, count); 245 248 dma_fence_put(old); 246 249 } 247 - EXPORT_SYMBOL(reservation_object_add_shared_fence); 250 + EXPORT_SYMBOL(dma_resv_add_shared_fence); 248 251 249 252 /** 250 - * reservation_object_add_excl_fence - Add an exclusive fence. 253 + * dma_resv_add_excl_fence - Add an exclusive fence. 251 254 * @obj: the reservation object 252 255 * @fence: the shared fence to add 253 256 * 254 257 * Add a fence to the exclusive slot. The obj->lock must be held. 255 258 */ 256 - void reservation_object_add_excl_fence(struct reservation_object *obj, 257 - struct dma_fence *fence) 259 + void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) 258 260 { 259 - struct dma_fence *old_fence = reservation_object_get_excl(obj); 260 - struct reservation_object_list *old; 261 + struct dma_fence *old_fence = dma_resv_get_excl(obj); 262 + struct dma_resv_list *old; 261 263 u32 i = 0; 262 264 263 - reservation_object_assert_held(obj); 265 + dma_resv_assert_held(obj); 264 266 265 - old = reservation_object_get_list(obj); 267 + old = dma_resv_get_list(obj); 266 268 if (old) 267 269 i = old->shared_count; 268 270 ··· 278 282 /* inplace update, no shared fences */ 279 283 while (i--) 280 284 dma_fence_put(rcu_dereference_protected(old->shared[i], 281 - reservation_object_held(obj))); 285 + dma_resv_held(obj))); 282 286 283 287 dma_fence_put(old_fence); 284 288 } 285 - EXPORT_SYMBOL(reservation_object_add_excl_fence); 289 + EXPORT_SYMBOL(dma_resv_add_excl_fence); 286 290 287 291 /** 288 - * reservation_object_copy_fences - Copy all fences from src to dst. 292 + * dma_resv_copy_fences - Copy all fences from src to dst. 289 293 * @dst: the destination reservation object 290 294 * @src: the source reservation object 291 295 * 292 296 * Copy all fences from src to dst. dst-lock must be held. 293 297 */ 294 - int reservation_object_copy_fences(struct reservation_object *dst, 295 - struct reservation_object *src) 298 + int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) 296 299 { 297 - struct reservation_object_list *src_list, *dst_list; 300 + struct dma_resv_list *src_list, *dst_list; 298 301 struct dma_fence *old, *new; 299 302 unsigned int i, shared_count; 300 303 301 - reservation_object_assert_held(dst); 304 + dma_resv_assert_held(dst); 302 305 303 306 rcu_read_lock(); 304 307 305 308 retry: 306 - reservation_object_fences(src, &new, &src_list, &shared_count); 309 + dma_resv_fences(src, &new, &src_list, &shared_count); 307 310 if (shared_count) { 308 311 rcu_read_unlock(); 309 312 310 - dst_list = reservation_object_list_alloc(shared_count); 313 + dst_list = dma_resv_list_alloc(shared_count); 311 314 if (!dst_list) 312 315 return -ENOMEM; 313 316 314 317 rcu_read_lock(); 315 - reservation_object_fences(src, &new, &src_list, &shared_count); 318 + dma_resv_fences(src, &new, &src_list, &shared_count); 316 319 if (!src_list || shared_count > dst_list->shared_max) { 317 320 kfree(dst_list); 318 321 goto retry; ··· 327 332 continue; 328 333 329 334 if (!dma_fence_get_rcu(fence)) { 330 - reservation_object_list_free(dst_list); 335 + dma_resv_list_free(dst_list); 331 336 goto retry; 332 337 } 333 338 ··· 343 348 } 344 349 345 350 if (new && !dma_fence_get_rcu(new)) { 346 - reservation_object_list_free(dst_list); 351 + dma_resv_list_free(dst_list); 347 352 goto retry; 348 353 } 349 354 rcu_read_unlock(); 350 355 351 - src_list = reservation_object_get_list(dst); 352 - old = reservation_object_get_excl(dst); 356 + src_list = dma_resv_get_list(dst); 357 + old = dma_resv_get_excl(dst); 353 358 354 359 preempt_disable(); 355 360 rcu_assign_pointer(dst->fence_excl, new); 356 361 rcu_assign_pointer(dst->fence, dst_list); 357 362 preempt_enable(); 358 363 359 - reservation_object_list_free(src_list); 364 + dma_resv_list_free(src_list); 360 365 dma_fence_put(old); 361 366 362 367 return 0; 363 368 } 364 - EXPORT_SYMBOL(reservation_object_copy_fences); 369 + EXPORT_SYMBOL(dma_resv_copy_fences); 365 370 366 371 /** 367 - * reservation_object_get_fences_rcu - Get an object's shared and exclusive 372 + * dma_resv_get_fences_rcu - Get an object's shared and exclusive 368 373 * fences without update side lock held 369 374 * @obj: the reservation object 370 375 * @pfence_excl: the returned exclusive fence (or NULL) ··· 376 381 * exclusive fence is not specified the fence is put into the array of the 377 382 * shared fences as well. Returns either zero or -ENOMEM. 378 383 */ 379 - int reservation_object_get_fences_rcu(struct reservation_object *obj, 380 - struct dma_fence **pfence_excl, 381 - unsigned *pshared_count, 382 - struct dma_fence ***pshared) 384 + int dma_resv_get_fences_rcu(struct dma_resv *obj, 385 + struct dma_fence **pfence_excl, 386 + unsigned *pshared_count, 387 + struct dma_fence ***pshared) 383 388 { 384 389 struct dma_fence **shared = NULL; 385 390 struct dma_fence *fence_excl; ··· 387 392 int ret = 1; 388 393 389 394 do { 390 - struct reservation_object_list *fobj; 395 + struct dma_resv_list *fobj; 391 396 unsigned int i; 392 397 size_t sz = 0; 393 398 394 399 i = 0; 395 400 396 401 rcu_read_lock(); 397 - reservation_object_fences(obj, &fence_excl, &fobj, 402 + dma_resv_fences(obj, &fence_excl, &fobj, 398 403 &shared_count); 399 404 400 405 if (fence_excl && !dma_fence_get_rcu(fence_excl)) ··· 460 465 *pshared = shared; 461 466 return ret; 462 467 } 463 - EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); 468 + EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); 464 469 465 470 /** 466 - * reservation_object_wait_timeout_rcu - Wait on reservation's objects 471 + * dma_resv_wait_timeout_rcu - Wait on reservation's objects 467 472 * shared and/or exclusive fences. 468 473 * @obj: the reservation object 469 474 * @wait_all: if true, wait on all fences, else wait on just exclusive fence ··· 474 479 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 475 480 * greater than zer on success. 476 481 */ 477 - long reservation_object_wait_timeout_rcu(struct reservation_object *obj, 478 - bool wait_all, bool intr, 479 - unsigned long timeout) 482 + long dma_resv_wait_timeout_rcu(struct dma_resv *obj, 483 + bool wait_all, bool intr, 484 + unsigned long timeout) 480 485 { 481 - struct reservation_object_list *fobj; 486 + struct dma_resv_list *fobj; 482 487 struct dma_fence *fence; 483 488 unsigned shared_count; 484 489 long ret = timeout ? timeout : 1; ··· 488 493 rcu_read_lock(); 489 494 i = -1; 490 495 491 - reservation_object_fences(obj, &fence, &fobj, &shared_count); 496 + dma_resv_fences(obj, &fence, &fobj, &shared_count); 492 497 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 493 498 if (!dma_fence_get_rcu(fence)) 494 499 goto unlock_retry; ··· 536 541 rcu_read_unlock(); 537 542 goto retry; 538 543 } 539 - EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); 544 + EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); 540 545 541 546 542 - static inline int 543 - reservation_object_test_signaled_single(struct dma_fence *passed_fence) 547 + static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) 544 548 { 545 549 struct dma_fence *fence, *lfence = passed_fence; 546 550 int ret = 1; ··· 556 562 } 557 563 558 564 /** 559 - * reservation_object_test_signaled_rcu - Test if a reservation object's 565 + * dma_resv_test_signaled_rcu - Test if a reservation object's 560 566 * fences have been signaled. 561 567 * @obj: the reservation object 562 568 * @test_all: if true, test all fences, otherwise only test the exclusive ··· 565 571 * RETURNS 566 572 * true if all fences signaled, else false 567 573 */ 568 - bool reservation_object_test_signaled_rcu(struct reservation_object *obj, 569 - bool test_all) 574 + bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) 570 575 { 571 - struct reservation_object_list *fobj; 576 + struct dma_resv_list *fobj; 572 577 struct dma_fence *fence_excl; 573 578 unsigned shared_count; 574 579 int ret; ··· 576 583 retry: 577 584 ret = true; 578 585 579 - reservation_object_fences(obj, &fence_excl, &fobj, &shared_count); 586 + dma_resv_fences(obj, &fence_excl, &fobj, &shared_count); 580 587 if (test_all) { 581 588 unsigned i; 582 589 583 590 for (i = 0; i < shared_count; ++i) { 584 591 struct dma_fence *fence = rcu_dereference(fobj->shared[i]); 585 592 586 - ret = reservation_object_test_signaled_single(fence); 593 + ret = dma_resv_test_signaled_single(fence); 587 594 if (ret < 0) 588 595 goto retry; 589 596 else if (!ret) ··· 592 599 } 593 600 594 601 if (!shared_count && fence_excl) { 595 - ret = reservation_object_test_signaled_single(fence_excl); 602 + ret = dma_resv_test_signaled_single(fence_excl); 596 603 if (ret < 0) 597 604 goto retry; 598 605 } ··· 600 607 rcu_read_unlock(); 601 608 return ret; 602 609 } 603 - EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); 610 + EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
+7 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 218 218 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, 219 219 struct amdgpu_amdkfd_fence *ef) 220 220 { 221 - struct reservation_object *resv = bo->tbo.base.resv; 222 - struct reservation_object_list *old, *new; 221 + struct dma_resv *resv = bo->tbo.base.resv; 222 + struct dma_resv_list *old, *new; 223 223 unsigned int i, j, k; 224 224 225 225 if (!ef) 226 226 return -EINVAL; 227 227 228 - old = reservation_object_get_list(resv); 228 + old = dma_resv_get_list(resv); 229 229 if (!old) 230 230 return 0; 231 231 ··· 241 241 struct dma_fence *f; 242 242 243 243 f = rcu_dereference_protected(old->shared[i], 244 - reservation_object_held(resv)); 244 + dma_resv_held(resv)); 245 245 246 246 if (f->context == ef->base.context) 247 247 RCU_INIT_POINTER(new->shared[--j], f); ··· 258 258 struct dma_fence *f; 259 259 260 260 f = rcu_dereference_protected(new->shared[i], 261 - reservation_object_held(resv)); 261 + dma_resv_held(resv)); 262 262 dma_fence_put(f); 263 263 } 264 264 kfree_rcu(old, rcu); ··· 882 882 AMDGPU_FENCE_OWNER_KFD, false); 883 883 if (ret) 884 884 goto wait_pd_fail; 885 - ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 885 + ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 886 886 if (ret) 887 887 goto reserve_shared_fail; 888 888 amdgpu_bo_fence(vm->root.base.bo, ··· 2127 2127 * Add process eviction fence to bo so they can 2128 2128 * evict each other. 2129 2129 */ 2130 - ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1); 2130 + ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); 2131 2131 if (ret) 2132 2132 goto reserve_shared_fail; 2133 2133 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 730 730 731 731 list_for_each_entry(e, &p->validated, tv.head) { 732 732 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 733 - struct reservation_object *resv = bo->tbo.base.resv; 733 + struct dma_resv *resv = bo->tbo.base.resv; 734 734 735 735 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, 736 736 amdgpu_bo_explicit_sync(bo)); ··· 1729 1729 *map = mapping; 1730 1730 1731 1731 /* Double check that the BO is reserved by this CS */ 1732 - if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) 1732 + if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) 1733 1733 return -EINVAL; 1734 1734 1735 1735 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 204 204 goto unpin; 205 205 } 206 206 207 - r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, 207 + r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, 208 208 &work->shared_count, 209 209 &work->shared); 210 210 if (unlikely(r != 0)) {
+10 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 137 137 } 138 138 139 139 static int 140 - __reservation_object_make_exclusive(struct reservation_object *obj) 140 + __dma_resv_make_exclusive(struct dma_resv *obj) 141 141 { 142 142 struct dma_fence **fences; 143 143 unsigned int count; 144 144 int r; 145 145 146 - if (!reservation_object_get_list(obj)) /* no shared fences to convert */ 146 + if (!dma_resv_get_list(obj)) /* no shared fences to convert */ 147 147 return 0; 148 148 149 - r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); 149 + r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); 150 150 if (r) 151 151 return r; 152 152 153 153 if (count == 0) { 154 154 /* Now that was unexpected. */ 155 155 } else if (count == 1) { 156 - reservation_object_add_excl_fence(obj, fences[0]); 156 + dma_resv_add_excl_fence(obj, fences[0]); 157 157 dma_fence_put(fences[0]); 158 158 kfree(fences); 159 159 } else { ··· 165 165 if (!array) 166 166 goto err_fences_put; 167 167 168 - reservation_object_add_excl_fence(obj, &array->base); 168 + dma_resv_add_excl_fence(obj, &array->base); 169 169 dma_fence_put(&array->base); 170 170 } 171 171 ··· 216 216 * fences on the reservation object into a single exclusive 217 217 * fence. 218 218 */ 219 - r = __reservation_object_make_exclusive(bo->tbo.base.resv); 219 + r = __dma_resv_make_exclusive(bo->tbo.base.resv); 220 220 if (r) 221 221 goto error_unreserve; 222 222 } ··· 367 367 struct dma_buf_attachment *attach, 368 368 struct sg_table *sg) 369 369 { 370 - struct reservation_object *resv = attach->dmabuf->resv; 370 + struct dma_resv *resv = attach->dmabuf->resv; 371 371 struct amdgpu_device *adev = dev->dev_private; 372 372 struct amdgpu_bo *bo; 373 373 struct amdgpu_bo_param bp; ··· 380 380 bp.flags = 0; 381 381 bp.type = ttm_bo_type_sg; 382 382 bp.resv = resv; 383 - reservation_object_lock(resv, NULL); 383 + dma_resv_lock(resv, NULL); 384 384 ret = amdgpu_bo_create(adev, &bp, &bo); 385 385 if (ret) 386 386 goto error; ··· 392 392 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) 393 393 bo->prime_shared_count = 1; 394 394 395 - reservation_object_unlock(resv); 395 + dma_resv_unlock(resv); 396 396 return &bo->tbo.base; 397 397 398 398 error: 399 - reservation_object_unlock(resv); 399 + dma_resv_unlock(resv); 400 400 return ERR_PTR(ret); 401 401 } 402 402
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 50 50 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 51 51 int alignment, u32 initial_domain, 52 52 u64 flags, enum ttm_bo_type type, 53 - struct reservation_object *resv, 53 + struct dma_resv *resv, 54 54 struct drm_gem_object **obj) 55 55 { 56 56 struct amdgpu_bo *bo; ··· 215 215 union drm_amdgpu_gem_create *args = data; 216 216 uint64_t flags = args->in.domain_flags; 217 217 uint64_t size = args->in.bo_size; 218 - struct reservation_object *resv = NULL; 218 + struct dma_resv *resv = NULL; 219 219 struct drm_gem_object *gobj; 220 220 uint32_t handle; 221 221 int r; ··· 433 433 return -ENOENT; 434 434 } 435 435 robj = gem_to_amdgpu_bo(gobj); 436 - ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 436 + ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 437 437 timeout); 438 438 439 439 /* ret == 0 means not signaled,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
··· 47 47 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 48 48 int alignment, u32 initial_domain, 49 49 u64 flags, enum ttm_bo_type type, 50 - struct reservation_object *resv, 50 + struct dma_resv *resv, 51 51 struct drm_gem_object **obj); 52 52 53 53 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 104 104 * 105 105 * Free the pasid only after all the fences in resv are signaled. 106 106 */ 107 - void amdgpu_pasid_free_delayed(struct reservation_object *resv, 107 + void amdgpu_pasid_free_delayed(struct dma_resv *resv, 108 108 unsigned int pasid) 109 109 { 110 110 struct dma_fence *fence, **fences; ··· 112 112 unsigned count; 113 113 int r; 114 114 115 - r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences); 115 + r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); 116 116 if (r) 117 117 goto fallback; 118 118 ··· 156 156 /* Not enough memory for the delayed delete, as last resort 157 157 * block for all the fences to complete. 158 158 */ 159 - reservation_object_wait_timeout_rcu(resv, true, false, 159 + dma_resv_wait_timeout_rcu(resv, true, false, 160 160 MAX_SCHEDULE_TIMEOUT); 161 161 amdgpu_pasid_free(pasid); 162 162 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
··· 72 72 73 73 int amdgpu_pasid_alloc(unsigned int bits); 74 74 void amdgpu_pasid_free(unsigned int pasid); 75 - void amdgpu_pasid_free_delayed(struct reservation_object *resv, 75 + void amdgpu_pasid_free_delayed(struct dma_resv *resv, 76 76 unsigned int pasid); 77 77 78 78 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
··· 179 179 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) 180 180 continue; 181 181 182 - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, 182 + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, 183 183 true, false, MAX_SCHEDULE_TIMEOUT); 184 184 if (r <= 0) 185 185 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+9 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 544 544 545 545 fail_unreserve: 546 546 if (!bp->resv) 547 - reservation_object_unlock(bo->tbo.base.resv); 547 + dma_resv_unlock(bo->tbo.base.resv); 548 548 amdgpu_bo_unref(&bo); 549 549 return r; 550 550 } ··· 606 606 607 607 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { 608 608 if (!bp->resv) 609 - WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv, 609 + WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv, 610 610 NULL)); 611 611 612 612 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); 613 613 614 614 if (!bp->resv) 615 - reservation_object_unlock((*bo_ptr)->tbo.base.resv); 615 + dma_resv_unlock((*bo_ptr)->tbo.base.resv); 616 616 617 617 if (r) 618 618 amdgpu_bo_unref(bo_ptr); ··· 709 709 return 0; 710 710 } 711 711 712 - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false, 712 + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, 713 713 MAX_SCHEDULE_TIMEOUT); 714 714 if (r < 0) 715 715 return r; ··· 1087 1087 */ 1088 1088 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) 1089 1089 { 1090 - reservation_object_assert_held(bo->tbo.base.resv); 1090 + dma_resv_assert_held(bo->tbo.base.resv); 1091 1091 1092 1092 if (tiling_flags) 1093 1093 *tiling_flags = bo->tiling_flags; ··· 1283 1283 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 1284 1284 bool shared) 1285 1285 { 1286 - struct reservation_object *resv = bo->tbo.base.resv; 1286 + struct dma_resv *resv = bo->tbo.base.resv; 1287 1287 1288 1288 if (shared) 1289 - reservation_object_add_shared_fence(resv, fence); 1289 + dma_resv_add_shared_fence(resv, fence); 1290 1290 else 1291 - reservation_object_add_excl_fence(resv, fence); 1291 + dma_resv_add_excl_fence(resv, fence); 1292 1292 } 1293 1293 1294 1294 /** ··· 1328 1328 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) 1329 1329 { 1330 1330 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); 1331 - WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) && 1331 + WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && 1332 1332 !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); 1333 1333 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); 1334 1334 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 41 41 u32 preferred_domain; 42 42 u64 flags; 43 43 enum ttm_bo_type type; 44 - struct reservation_object *resv; 44 + struct dma_resv *resv; 45 45 }; 46 46 47 47 /* bo virtual addresses in a vm */
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
··· 190 190 */ 191 191 int amdgpu_sync_resv(struct amdgpu_device *adev, 192 192 struct amdgpu_sync *sync, 193 - struct reservation_object *resv, 193 + struct dma_resv *resv, 194 194 void *owner, bool explicit_sync) 195 195 { 196 - struct reservation_object_list *flist; 196 + struct dma_resv_list *flist; 197 197 struct dma_fence *f; 198 198 void *fence_owner; 199 199 unsigned i; ··· 203 203 return -EINVAL; 204 204 205 205 /* always sync to the exclusive fence */ 206 - f = reservation_object_get_excl(resv); 206 + f = dma_resv_get_excl(resv); 207 207 r = amdgpu_sync_fence(adev, sync, f, false); 208 208 209 - flist = reservation_object_get_list(resv); 209 + flist = dma_resv_get_list(resv); 210 210 if (!flist || r) 211 211 return r; 212 212 213 213 for (i = 0; i < flist->shared_count; ++i) { 214 214 f = rcu_dereference_protected(flist->shared[i], 215 - reservation_object_held(resv)); 215 + dma_resv_held(resv)); 216 216 /* We only want to trigger KFD eviction fences on 217 217 * evict or move jobs. Skip KFD fences otherwise. 218 218 */
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
··· 27 27 #include <linux/hashtable.h> 28 28 29 29 struct dma_fence; 30 - struct reservation_object; 30 + struct dma_resv; 31 31 struct amdgpu_device; 32 32 struct amdgpu_ring; 33 33 ··· 44 44 struct dma_fence *f, bool explicit); 45 45 int amdgpu_sync_resv(struct amdgpu_device *adev, 46 46 struct amdgpu_sync *sync, 47 - struct reservation_object *resv, 47 + struct dma_resv *resv, 48 48 void *owner, 49 49 bool explicit_sync); 50 50 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+7 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 303 303 struct amdgpu_copy_mem *src, 304 304 struct amdgpu_copy_mem *dst, 305 305 uint64_t size, 306 - struct reservation_object *resv, 306 + struct dma_resv *resv, 307 307 struct dma_fence **f) 308 308 { 309 309 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; ··· 1470 1470 { 1471 1471 unsigned long num_pages = bo->mem.num_pages; 1472 1472 struct drm_mm_node *node = bo->mem.mm_node; 1473 - struct reservation_object_list *flist; 1473 + struct dma_resv_list *flist; 1474 1474 struct dma_fence *f; 1475 1475 int i; 1476 1476 ··· 1478 1478 * cleanly handle page faults. 1479 1479 */ 1480 1480 if (bo->type == ttm_bo_type_kernel && 1481 - !reservation_object_test_signaled_rcu(bo->base.resv, true)) 1481 + !dma_resv_test_signaled_rcu(bo->base.resv, true)) 1482 1482 return false; 1483 1483 1484 1484 /* If bo is a KFD BO, check if the bo belongs to the current process. 1485 1485 * If true, then return false as any KFD process needs all its BOs to 1486 1486 * be resident to run successfully 1487 1487 */ 1488 - flist = reservation_object_get_list(bo->base.resv); 1488 + flist = dma_resv_get_list(bo->base.resv); 1489 1489 if (flist) { 1490 1490 for (i = 0; i < flist->shared_count; ++i) { 1491 1491 f = rcu_dereference_protected(flist->shared[i], 1492 - reservation_object_held(bo->base.resv)); 1492 + dma_resv_held(bo->base.resv)); 1493 1493 if (amdkfd_fence_check_mm(f, current->mm)) 1494 1494 return false; 1495 1495 } ··· 1992 1992 1993 1993 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 1994 1994 uint64_t dst_offset, uint32_t byte_count, 1995 - struct reservation_object *resv, 1995 + struct dma_resv *resv, 1996 1996 struct dma_fence **fence, bool direct_submit, 1997 1997 bool vm_needs_flush) 1998 1998 { ··· 2066 2066 2067 2067 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 2068 2068 uint32_t src_data, 2069 - struct reservation_object *resv, 2069 + struct dma_resv *resv, 2070 2070 struct dma_fence **fence) 2071 2071 { 2072 2072 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 83 83 84 84 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 85 85 uint64_t dst_offset, uint32_t byte_count, 86 - struct reservation_object *resv, 86 + struct dma_resv *resv, 87 87 struct dma_fence **fence, bool direct_submit, 88 88 bool vm_needs_flush); 89 89 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 90 90 struct amdgpu_copy_mem *src, 91 91 struct amdgpu_copy_mem *dst, 92 92 uint64_t size, 93 - struct reservation_object *resv, 93 + struct dma_resv *resv, 94 94 struct dma_fence **f); 95 95 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 96 96 uint32_t src_data, 97 - struct reservation_object *resv, 97 + struct dma_resv *resv, 98 98 struct dma_fence **fence); 99 99 100 100 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 1073 1073 ib->length_dw = 16; 1074 1074 1075 1075 if (direct) { 1076 - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, 1076 + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, 1077 1077 true, false, 1078 1078 msecs_to_jiffies(10)); 1079 1079 if (r == 0)
+10 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1702 1702 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); 1703 1703 pages_addr = ttm->dma_address; 1704 1704 } 1705 - exclusive = reservation_object_get_excl(bo->tbo.base.resv); 1705 + exclusive = dma_resv_get_excl(bo->tbo.base.resv); 1706 1706 } 1707 1707 1708 1708 if (bo) { ··· 1879 1879 */ 1880 1880 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1881 1881 { 1882 - struct reservation_object *resv = vm->root.base.bo->tbo.base.resv; 1882 + struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 1883 1883 struct dma_fence *excl, **shared; 1884 1884 unsigned i, shared_count; 1885 1885 int r; 1886 1886 1887 - r = reservation_object_get_fences_rcu(resv, &excl, 1887 + r = dma_resv_get_fences_rcu(resv, &excl, 1888 1888 &shared_count, &shared); 1889 1889 if (r) { 1890 1890 /* Not enough memory to grab the fence list, as last resort 1891 1891 * block for all the fences to complete. 1892 1892 */ 1893 - reservation_object_wait_timeout_rcu(resv, true, false, 1893 + dma_resv_wait_timeout_rcu(resv, true, false, 1894 1894 MAX_SCHEDULE_TIMEOUT); 1895 1895 return; 1896 1896 } ··· 1978 1978 struct amdgpu_vm *vm) 1979 1979 { 1980 1980 struct amdgpu_bo_va *bo_va, *tmp; 1981 - struct reservation_object *resv; 1981 + struct dma_resv *resv; 1982 1982 bool clear; 1983 1983 int r; 1984 1984 ··· 1997 1997 spin_unlock(&vm->invalidated_lock); 1998 1998 1999 1999 /* Try to reserve the BO to avoid clearing its ptes */ 2000 - if (!amdgpu_vm_debug && reservation_object_trylock(resv)) 2000 + if (!amdgpu_vm_debug && dma_resv_trylock(resv)) 2001 2001 clear = false; 2002 2002 /* Somebody else is using the BO right now */ 2003 2003 else ··· 2008 2008 return r; 2009 2009 2010 2010 if (!clear) 2011 - reservation_object_unlock(resv); 2011 + dma_resv_unlock(resv); 2012 2012 spin_lock(&vm->invalidated_lock); 2013 2013 } 2014 2014 spin_unlock(&vm->invalidated_lock); ··· 2416 2416 struct amdgpu_bo *bo; 2417 2417 2418 2418 bo = mapping->bo_va->base.bo; 2419 - if (reservation_object_locking_ctx(bo->tbo.base.resv) != 2419 + if (dma_resv_locking_ctx(bo->tbo.base.resv) != 2420 2420 ticket) 2421 2421 continue; 2422 2422 } ··· 2649 2649 */ 2650 2650 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2651 2651 { 2652 - return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, 2652 + return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, 2653 2653 true, true, timeout); 2654 2654 } 2655 2655 ··· 2724 2724 if (r) 2725 2725 goto error_free_root; 2726 2726 2727 - r = reservation_object_reserve_shared(root->tbo.base.resv, 1); 2727 + r = dma_resv_reserve_shared(root->tbo.base.resv, 1); 2728 2728 if (r) 2729 2729 goto error_unreserve; 2730 2730
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 5693 5693 * deadlock during GPU reset when this fence will not signal 5694 5694 * but we hold reservation lock for the BO. 5695 5695 */ 5696 - r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true, 5696 + r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, 5697 5697 false, 5698 5698 msecs_to_jiffies(5000)); 5699 5699 if (unlikely(r <= 0))
+1 -1
drivers/gpu/drm/drm_atomic_uapi.c
··· 1037 1037 * As a contrast, with implicit fencing the kernel keeps track of any 1038 1038 * ongoing rendering, and automatically ensures that the atomic update waits 1039 1039 * for any pending rendering to complete. For shared buffers represented with 1040 - * a &struct dma_buf this is tracked in &struct reservation_object. 1040 + * a &struct dma_buf this is tracked in &struct dma_resv. 1041 1041 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), 1042 1042 * whereas explicit fencing is what Android wants. 1043 1043 *
+13 -13
drivers/gpu/drm/drm_gem.c
··· 159 159 kref_init(&obj->refcount); 160 160 obj->handle_count = 0; 161 161 obj->size = size; 162 - reservation_object_init(&obj->_resv); 162 + dma_resv_init(&obj->_resv); 163 163 if (!obj->resv) 164 164 obj->resv = &obj->_resv; 165 165 ··· 755 755 EXPORT_SYMBOL(drm_gem_object_lookup); 756 756 757 757 /** 758 - * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects 758 + * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 759 759 * shared and/or exclusive fences. 760 760 * @filep: DRM file private date 761 761 * @handle: userspace handle ··· 767 767 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 768 768 * greater than 0 on success. 769 769 */ 770 - long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, 770 + long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 771 771 bool wait_all, unsigned long timeout) 772 772 { 773 773 long ret; ··· 779 779 return -EINVAL; 780 780 } 781 781 782 - ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, 782 + ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, 783 783 true, timeout); 784 784 if (ret == 0) 785 785 ret = -ETIME; ··· 790 790 791 791 return ret; 792 792 } 793 - EXPORT_SYMBOL(drm_gem_reservation_object_wait); 793 + EXPORT_SYMBOL(drm_gem_dma_resv_wait); 794 794 795 795 /** 796 796 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl ··· 956 956 if (obj->filp) 957 957 fput(obj->filp); 958 958 959 - reservation_object_fini(&obj->_resv); 959 + dma_resv_fini(&obj->_resv); 960 960 drm_gem_free_mmap_offset(obj); 961 961 } 962 962 EXPORT_SYMBOL(drm_gem_object_release); ··· 1291 1291 if (contended != -1) { 1292 1292 struct drm_gem_object *obj = objs[contended]; 1293 1293 1294 - ret = reservation_object_lock_slow_interruptible(obj->resv, 1294 + ret = dma_resv_lock_slow_interruptible(obj->resv, 1295 1295 acquire_ctx); 1296 1296 if (ret) { 1297 1297 ww_acquire_done(acquire_ctx); ··· 1303 1303 if (i == contended) 1304 1304 continue; 1305 1305 1306 - ret = reservation_object_lock_interruptible(objs[i]->resv, 1306 + ret = dma_resv_lock_interruptible(objs[i]->resv, 1307 1307 acquire_ctx); 1308 1308 if (ret) { 1309 1309 int j; 1310 1310 1311 1311 for (j = 0; j < i; j++) 1312 - reservation_object_unlock(objs[j]->resv); 1312 + dma_resv_unlock(objs[j]->resv); 1313 1313 1314 1314 if (contended != -1 && contended >= i) 1315 - reservation_object_unlock(objs[contended]->resv); 1315 + dma_resv_unlock(objs[contended]->resv); 1316 1316 1317 1317 if (ret == -EDEADLK) { 1318 1318 contended = i; ··· 1337 1337 int i; 1338 1338 1339 1339 for (i = 0; i < count; i++) 1340 - reservation_object_unlock(objs[i]->resv); 1340 + dma_resv_unlock(objs[i]->resv); 1341 1341 1342 1342 ww_acquire_fini(acquire_ctx); 1343 1343 } ··· 1413 1413 1414 1414 if (!write) { 1415 1415 struct dma_fence *fence = 1416 - reservation_object_get_excl_rcu(obj->resv); 1416 + dma_resv_get_excl_rcu(obj->resv); 1417 1417 1418 1418 return drm_gem_fence_array_add(fence_array, fence); 1419 1419 } 1420 1420 1421 - ret = reservation_object_get_fences_rcu(obj->resv, NULL, 1421 + ret = dma_resv_get_fences_rcu(obj->resv, NULL, 1422 1422 &fence_count, &fences); 1423 1423 if (ret || !fence_count) 1424 1424 return ret;
+2 -2
drivers/gpu/drm/drm_gem_framebuffer_helper.c
··· 7 7 8 8 #include <linux/dma-buf.h> 9 9 #include <linux/dma-fence.h> 10 - #include <linux/reservation.h> 10 + #include <linux/dma-resv.h> 11 11 #include <linux/slab.h> 12 12 13 13 #include <drm/drm_atomic.h> ··· 294 294 return 0; 295 295 296 296 obj = drm_gem_fb_get_obj(state->fb, 0); 297 - fence = reservation_object_get_excl_rcu(obj->resv); 297 + fence = dma_resv_get_excl_rcu(obj->resv); 298 298 drm_atomic_set_fence_for_plane(state, fence); 299 299 300 300 return 0;
+4 -4
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 397 397 } 398 398 399 399 if (op & ETNA_PREP_NOSYNC) { 400 - if (!reservation_object_test_signaled_rcu(obj->resv, 400 + if (!dma_resv_test_signaled_rcu(obj->resv, 401 401 write)) 402 402 return -EBUSY; 403 403 } else { 404 404 unsigned long remain = etnaviv_timeout_to_jiffies(timeout); 405 405 406 - ret = reservation_object_wait_timeout_rcu(obj->resv, 406 + ret = dma_resv_wait_timeout_rcu(obj->resv, 407 407 write, true, remain); 408 408 if (ret <= 0) 409 409 return ret == 0 ? -ETIMEDOUT : ret; ··· 459 459 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 460 460 { 461 461 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 462 - struct reservation_object *robj = obj->resv; 463 - struct reservation_object_list *fobj; 462 + struct dma_resv *robj = obj->resv; 463 + struct dma_resv_list *fobj; 464 464 struct dma_fence *fence; 465 465 unsigned long off = drm_vma_node_start(&obj->vma_node); 466 466
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem.h
··· 6 6 #ifndef __ETNAVIV_GEM_H__ 7 7 #define __ETNAVIV_GEM_H__ 8 8 9 - #include <linux/reservation.h> 9 + #include <linux/dma-resv.h> 10 10 #include "etnaviv_cmdbuf.h" 11 11 #include "etnaviv_drv.h" 12 12
+7 -7
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 4 4 */ 5 5 6 6 #include <linux/dma-fence-array.h> 7 - #include <linux/reservation.h> 7 + #include <linux/dma-resv.h> 8 8 #include <linux/sync_file.h> 9 9 #include "etnaviv_cmdbuf.h" 10 10 #include "etnaviv_drv.h" ··· 165 165 166 166 for (i = 0; i < submit->nr_bos; i++) { 167 167 struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; 168 - struct reservation_object *robj = bo->obj->base.resv; 168 + struct dma_resv *robj = bo->obj->base.resv; 169 169 170 170 if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { 171 - ret = reservation_object_reserve_shared(robj, 1); 171 + ret = dma_resv_reserve_shared(robj, 1); 172 172 if (ret) 173 173 return ret; 174 174 } ··· 177 177 continue; 178 178 179 179 if (bo->flags & ETNA_SUBMIT_BO_WRITE) { 180 - ret = reservation_object_get_fences_rcu(robj, &bo->excl, 180 + ret = dma_resv_get_fences_rcu(robj, &bo->excl, 181 181 &bo->nr_shared, 182 182 &bo->shared); 183 183 if (ret) 184 184 return ret; 185 185 } else { 186 - bo->excl = reservation_object_get_excl_rcu(robj); 186 + bo->excl = dma_resv_get_excl_rcu(robj); 187 187 } 188 188 189 189 } ··· 199 199 struct drm_gem_object *obj = &submit->bos[i].obj->base; 200 200 201 201 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) 202 - reservation_object_add_excl_fence(obj->resv, 202 + dma_resv_add_excl_fence(obj->resv, 203 203 submit->out_fence); 204 204 else 205 - reservation_object_add_shared_fence(obj->resv, 205 + dma_resv_add_shared_fence(obj->resv, 206 206 submit->out_fence); 207 207 208 208 submit_unlock_object(submit, i);
+2 -2
drivers/gpu/drm/i915/display/intel_display.c
··· 29 29 #include <linux/intel-iommu.h> 30 30 #include <linux/kernel.h> 31 31 #include <linux/module.h> 32 - #include <linux/reservation.h> 32 + #include <linux/dma-resv.h> 33 33 #include <linux/slab.h> 34 34 #include <linux/vgaarb.h> 35 35 ··· 14317 14317 if (ret < 0) 14318 14318 return ret; 14319 14319 14320 - fence = reservation_object_get_excl_rcu(obj->base.resv); 14320 + fence = dma_resv_get_excl_rcu(obj->base.resv); 14321 14321 if (fence) { 14322 14322 add_rps_boost_after_vblank(new_state->crtc, fence); 14323 14323 dma_fence_put(fence);
+3 -3
drivers/gpu/drm/i915/gem/i915_gem_busy.c
··· 82 82 { 83 83 struct drm_i915_gem_busy *args = data; 84 84 struct drm_i915_gem_object *obj; 85 - struct reservation_object_list *list; 85 + struct dma_resv_list *list; 86 86 unsigned int i, shared_count; 87 87 struct dma_fence *excl; 88 88 int err; ··· 106 106 * Alternatively, we can trade that extra information on read/write 107 107 * activity with 108 108 * args->busy = 109 - * !reservation_object_test_signaled_rcu(obj->resv, true); 109 + * !dma_resv_test_signaled_rcu(obj->resv, true); 110 110 * to report the overall busyness. This is what the wait-ioctl does. 111 111 * 112 112 */ 113 - reservation_object_fences(obj->base.resv, &excl, &list, &shared_count); 113 + dma_resv_fences(obj->base.resv, &excl, &list, &shared_count); 114 114 115 115 /* Translate the exclusive fence to the READ *and* WRITE engine */ 116 116 args->busy = busy_check_writer(excl);
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
··· 147 147 true, I915_FENCE_TIMEOUT, 148 148 I915_FENCE_GFP); 149 149 150 - reservation_object_add_excl_fence(obj->base.resv, 150 + dma_resv_add_excl_fence(obj->base.resv, 151 151 &clflush->dma); 152 152 153 153 i915_sw_fence_commit(&clflush->wait);
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
··· 288 288 if (err < 0) { 289 289 dma_fence_set_error(&work->dma, err); 290 290 } else { 291 - reservation_object_add_excl_fence(obj->base.resv, &work->dma); 291 + dma_resv_add_excl_fence(obj->base.resv, &work->dma); 292 292 err = 0; 293 293 } 294 294 i915_gem_object_unlock(obj);
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
··· 6 6 7 7 #include <linux/dma-buf.h> 8 8 #include <linux/highmem.h> 9 - #include <linux/reservation.h> 9 + #include <linux/dma-resv.h> 10 10 11 11 #include "i915_drv.h" 12 12 #include "i915_gem_object.h"
+3 -3
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 5 5 */ 6 6 7 7 #include <linux/intel-iommu.h> 8 - #include <linux/reservation.h> 8 + #include <linux/dma-resv.h> 9 9 #include <linux/sync_file.h> 10 10 #include <linux/uaccess.h> 11 11 ··· 1246 1246 goto skip_request; 1247 1247 1248 1248 i915_vma_lock(batch); 1249 - GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); 1249 + GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true)); 1250 1250 err = i915_vma_move_to_active(batch, rq, 0); 1251 1251 i915_vma_unlock(batch); 1252 1252 if (err) ··· 1317 1317 1318 1318 if (!eb->reloc_cache.vaddr && 1319 1319 (DBG_FORCE_RELOC == FORCE_GPU_RELOC || 1320 - !reservation_object_test_signaled_rcu(vma->resv, true))) { 1320 + !dma_resv_test_signaled_rcu(vma->resv, true))) { 1321 1321 const unsigned int gen = eb->reloc_cache.gen; 1322 1322 unsigned int len; 1323 1323 u32 *batch;
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_fence.c
··· 78 78 I915_FENCE_GFP) < 0) 79 79 goto err; 80 80 81 - reservation_object_add_excl_fence(obj->base.resv, &stub->dma); 81 + dma_resv_add_excl_fence(obj->base.resv, &stub->dma); 82 82 83 83 return &stub->dma; 84 84
+5 -5
drivers/gpu/drm/i915/gem/i915_gem_object.h
··· 99 99 __drm_gem_object_put(&obj->base); 100 100 } 101 101 102 - #define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv) 102 + #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) 103 103 104 104 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) 105 105 { 106 - reservation_object_lock(obj->base.resv, NULL); 106 + dma_resv_lock(obj->base.resv, NULL); 107 107 } 108 108 109 109 static inline int 110 110 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) 111 111 { 112 - return reservation_object_lock_interruptible(obj->base.resv, NULL); 112 + return dma_resv_lock_interruptible(obj->base.resv, NULL); 113 113 } 114 114 115 115 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) 116 116 { 117 - reservation_object_unlock(obj->base.resv); 117 + dma_resv_unlock(obj->base.resv); 118 118 } 119 119 120 120 struct dma_fence * ··· 373 373 struct dma_fence *fence; 374 374 375 375 rcu_read_lock(); 376 - fence = reservation_object_get_excl_rcu(obj->base.resv); 376 + fence = dma_resv_get_excl_rcu(obj->base.resv); 377 377 rcu_read_unlock(); 378 378 379 379 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+9 -9
drivers/gpu/drm/i915/gem/i915_gem_wait.c
··· 31 31 } 32 32 33 33 static long 34 - i915_gem_object_wait_reservation(struct reservation_object *resv, 34 + i915_gem_object_wait_reservation(struct dma_resv *resv, 35 35 unsigned int flags, 36 36 long timeout) 37 37 { ··· 43 43 unsigned int count, i; 44 44 int ret; 45 45 46 - ret = reservation_object_get_fences_rcu(resv, 46 + ret = dma_resv_get_fences_rcu(resv, 47 47 &excl, &count, &shared); 48 48 if (ret) 49 49 return ret; ··· 72 72 */ 73 73 prune_fences = count && timeout >= 0; 74 74 } else { 75 - excl = reservation_object_get_excl_rcu(resv); 75 + excl = dma_resv_get_excl_rcu(resv); 76 76 } 77 77 78 78 if (excl && timeout >= 0) ··· 84 84 * Opportunistically prune the fences iff we know they have *all* been 85 85 * signaled. 86 86 */ 87 - if (prune_fences && reservation_object_trylock(resv)) { 88 - if (reservation_object_test_signaled_rcu(resv, true)) 89 - reservation_object_add_excl_fence(resv, NULL); 90 - reservation_object_unlock(resv); 87 + if (prune_fences && dma_resv_trylock(resv)) { 88 + if (dma_resv_test_signaled_rcu(resv, true)) 89 + dma_resv_add_excl_fence(resv, NULL); 90 + dma_resv_unlock(resv); 91 91 } 92 92 93 93 return timeout; ··· 140 140 unsigned int count, i; 141 141 int ret; 142 142 143 - ret = reservation_object_get_fences_rcu(obj->base.resv, 143 + ret = dma_resv_get_fences_rcu(obj->base.resv, 144 144 &excl, &count, &shared); 145 145 if (ret) 146 146 return ret; ··· 152 152 153 153 kfree(shared); 154 154 } else { 155 - excl = reservation_object_get_excl_rcu(obj->base.resv); 155 + excl = dma_resv_get_excl_rcu(obj->base.resv); 156 156 } 157 157 158 158 if (excl) {
+1 -1
drivers/gpu/drm/i915/i915_drv.h
··· 43 43 #include <linux/mm_types.h> 44 44 #include <linux/perf_event.h> 45 45 #include <linux/pm_qos.h> 46 - #include <linux/reservation.h> 46 + #include <linux/dma-resv.h> 47 47 #include <linux/shmem_fs.h> 48 48 #include <linux/stackdepot.h> 49 49
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 29 29 #include <drm/i915_drm.h> 30 30 #include <linux/dma-fence-array.h> 31 31 #include <linux/kthread.h> 32 - #include <linux/reservation.h> 32 + #include <linux/dma-resv.h> 33 33 #include <linux/shmem_fs.h> 34 34 #include <linux/slab.h> 35 35 #include <linux/stop_machine.h>
+6 -6
drivers/gpu/drm/i915/i915_gem_batch_pool.c
··· 96 96 list_for_each_entry(obj, list, batch_pool_link) { 97 97 /* The batches are strictly LRU ordered */ 98 98 if (i915_gem_object_is_active(obj)) { 99 - struct reservation_object *resv = obj->base.resv; 99 + struct dma_resv *resv = obj->base.resv; 100 100 101 - if (!reservation_object_test_signaled_rcu(resv, true)) 101 + if (!dma_resv_test_signaled_rcu(resv, true)) 102 102 break; 103 103 104 104 i915_retire_requests(pool->engine->i915); ··· 113 113 * than replace the existing fence. 114 114 */ 115 115 if (rcu_access_pointer(resv->fence)) { 116 - reservation_object_lock(resv, NULL); 117 - reservation_object_add_excl_fence(resv, NULL); 118 - reservation_object_unlock(resv); 116 + dma_resv_lock(resv, NULL); 117 + dma_resv_add_excl_fence(resv, NULL); 118 + dma_resv_unlock(resv); 119 119 } 120 120 } 121 121 122 - GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv, 122 + GEM_BUG_ON(!dma_resv_test_signaled_rcu(obj->base.resv, 123 123 true)); 124 124 125 125 if (obj->base.size >= size)
+2 -2
drivers/gpu/drm/i915/i915_request.c
··· 1027 1027 struct dma_fence **shared; 1028 1028 unsigned int count, i; 1029 1029 1030 - ret = reservation_object_get_fences_rcu(obj->base.resv, 1030 + ret = dma_resv_get_fences_rcu(obj->base.resv, 1031 1031 &excl, &count, &shared); 1032 1032 if (ret) 1033 1033 return ret; ··· 1044 1044 dma_fence_put(shared[i]); 1045 1045 kfree(shared); 1046 1046 } else { 1047 - excl = reservation_object_get_excl_rcu(obj->base.resv); 1047 + excl = dma_resv_get_excl_rcu(obj->base.resv); 1048 1048 } 1049 1049 1050 1050 if (excl) {
+4 -4
drivers/gpu/drm/i915/i915_sw_fence.c
··· 7 7 #include <linux/slab.h> 8 8 #include <linux/dma-fence.h> 9 9 #include <linux/irq_work.h> 10 - #include <linux/reservation.h> 10 + #include <linux/dma-resv.h> 11 11 12 12 #include "i915_sw_fence.h" 13 13 #include "i915_selftest.h" ··· 510 510 } 511 511 512 512 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 513 - struct reservation_object *resv, 513 + struct dma_resv *resv, 514 514 const struct dma_fence_ops *exclude, 515 515 bool write, 516 516 unsigned long timeout, ··· 526 526 struct dma_fence **shared; 527 527 unsigned int count, i; 528 528 529 - ret = reservation_object_get_fences_rcu(resv, 529 + ret = dma_resv_get_fences_rcu(resv, 530 530 &excl, &count, &shared); 531 531 if (ret) 532 532 return ret; ··· 551 551 dma_fence_put(shared[i]); 552 552 kfree(shared); 553 553 } else { 554 - excl = reservation_object_get_excl_rcu(resv); 554 + excl = dma_resv_get_excl_rcu(resv); 555 555 } 556 556 557 557 if (ret >= 0 && excl && excl->ops != exclude) {
+2 -2
drivers/gpu/drm/i915/i915_sw_fence.h
··· 16 16 #include <linux/wait.h> 17 17 18 18 struct completion; 19 - struct reservation_object; 19 + struct dma_resv; 20 20 21 21 struct i915_sw_fence { 22 22 wait_queue_head_t wait; ··· 82 82 gfp_t gfp); 83 83 84 84 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 85 - struct reservation_object *resv, 85 + struct dma_resv *resv, 86 86 const struct dma_fence_ops *exclude, 87 87 bool write, 88 88 unsigned long timeout,
+8 -8
drivers/gpu/drm/i915/i915_vma.c
··· 99 99 return; 100 100 101 101 /* Prune the shared fence arrays iff completely idle (inc. external) */ 102 - if (reservation_object_trylock(obj->base.resv)) { 103 - if (reservation_object_test_signaled_rcu(obj->base.resv, true)) 104 - reservation_object_add_excl_fence(obj->base.resv, NULL); 105 - reservation_object_unlock(obj->base.resv); 102 + if (dma_resv_trylock(obj->base.resv)) { 103 + if (dma_resv_test_signaled_rcu(obj->base.resv, true)) 104 + dma_resv_add_excl_fence(obj->base.resv, NULL); 105 + dma_resv_unlock(obj->base.resv); 106 106 } 107 107 108 108 /* ··· 903 903 struct i915_request *rq, 904 904 unsigned int flags) 905 905 { 906 - struct reservation_object *resv = vma->resv; 906 + struct dma_resv *resv = vma->resv; 907 907 908 908 /* 909 909 * Ignore errors from failing to allocate the new fence, we can't ··· 911 911 * synchronisation leading to rendering corruption. 912 912 */ 913 913 if (flags & EXEC_OBJECT_WRITE) 914 - reservation_object_add_excl_fence(resv, &rq->fence); 915 - else if (reservation_object_reserve_shared(resv, 1) == 0) 916 - reservation_object_add_shared_fence(resv, &rq->fence); 914 + dma_resv_add_excl_fence(resv, &rq->fence); 915 + else if (dma_resv_reserve_shared(resv, 1) == 0) 916 + dma_resv_add_shared_fence(resv, &rq->fence); 917 917 } 918 918 919 919 int i915_vma_move_to_active(struct i915_vma *vma,
+4 -4
drivers/gpu/drm/i915/i915_vma.h
··· 55 55 struct i915_address_space *vm; 56 56 const struct i915_vma_ops *ops; 57 57 struct i915_fence_reg *fence; 58 - struct reservation_object *resv; /** Alias of obj->resv */ 58 + struct dma_resv *resv; /** Alias of obj->resv */ 59 59 struct sg_table *pages; 60 60 void __iomem *iomap; 61 61 void *private; /* owned by creator */ ··· 299 299 void i915_vma_reopen(struct i915_vma *vma); 300 300 void i915_vma_destroy(struct i915_vma *vma); 301 301 302 - #define assert_vma_held(vma) reservation_object_assert_held((vma)->resv) 302 + #define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) 303 303 304 304 static inline void i915_vma_lock(struct i915_vma *vma) 305 305 { 306 - reservation_object_lock(vma->resv, NULL); 306 + dma_resv_lock(vma->resv, NULL); 307 307 } 308 308 309 309 static inline void i915_vma_unlock(struct i915_vma *vma) 310 310 { 311 - reservation_object_unlock(vma->resv); 311 + dma_resv_unlock(vma->resv); 312 312 } 313 313 314 314 int __i915_vma_do_pin(struct i915_vma *vma,
+4 -4
drivers/gpu/drm/lima/lima_gem.c
··· 136 136 int err = 0; 137 137 138 138 if (!write) { 139 - err = reservation_object_reserve_shared(bo->gem.resv, 1); 139 + err = dma_resv_reserve_shared(bo->gem.resv, 1); 140 140 if (err) 141 141 return err; 142 142 } ··· 296 296 297 297 for (i = 0; i < submit->nr_bos; i++) { 298 298 if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) 299 - reservation_object_add_excl_fence(bos[i]->gem.resv, fence); 299 + dma_resv_add_excl_fence(bos[i]->gem.resv, fence); 300 300 else 301 - reservation_object_add_shared_fence(bos[i]->gem.resv, fence); 301 + dma_resv_add_shared_fence(bos[i]->gem.resv, fence); 302 302 } 303 303 304 304 lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); ··· 341 341 342 342 timeout = drm_timeout_abs_to_jiffies(timeout_ns); 343 343 344 - ret = drm_gem_reservation_object_wait(file, handle, write, timeout); 344 + ret = drm_gem_dma_resv_wait(file, handle, write, timeout); 345 345 if (ret == 0) 346 346 ret = timeout ? -ETIMEDOUT : -EBUSY; 347 347
+1 -1
drivers/gpu/drm/mediatek/mtk_drm_fb.c
··· 4 4 */ 5 5 6 6 #include <linux/dma-buf.h> 7 - #include <linux/reservation.h> 7 + #include <linux/dma-resv.h> 8 8 9 9 #include <drm/drm_modeset_helper.h> 10 10 #include <drm/drm_fb_helper.h>
+9 -9
drivers/gpu/drm/msm/msm_gem.c
··· 663 663 int msm_gem_sync_object(struct drm_gem_object *obj, 664 664 struct msm_fence_context *fctx, bool exclusive) 665 665 { 666 - struct reservation_object_list *fobj; 666 + struct dma_resv_list *fobj; 667 667 struct dma_fence *fence; 668 668 int i, ret; 669 669 670 - fobj = reservation_object_get_list(obj->resv); 670 + fobj = dma_resv_get_list(obj->resv); 671 671 if (!fobj || (fobj->shared_count == 0)) { 672 - fence = reservation_object_get_excl(obj->resv); 672 + fence = dma_resv_get_excl(obj->resv); 673 673 /* don't need to wait on our own fences, since ring is fifo */ 674 674 if (fence && (fence->context != fctx->context)) { 675 675 ret = dma_fence_wait(fence, true); ··· 683 683 684 684 for (i = 0; i < fobj->shared_count; i++) { 685 685 fence = rcu_dereference_protected(fobj->shared[i], 686 - reservation_object_held(obj->resv)); 686 + dma_resv_held(obj->resv)); 687 687 if (fence->context != fctx->context) { 688 688 ret = dma_fence_wait(fence, true); 689 689 if (ret) ··· 701 701 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 702 702 msm_obj->gpu = gpu; 703 703 if (exclusive) 704 - reservation_object_add_excl_fence(obj->resv, fence); 704 + dma_resv_add_excl_fence(obj->resv, fence); 705 705 else 706 - reservation_object_add_shared_fence(obj->resv, fence); 706 + dma_resv_add_shared_fence(obj->resv, fence); 707 707 list_del_init(&msm_obj->mm_list); 708 708 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 709 709 } ··· 728 728 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 729 729 long ret; 730 730 731 - ret = reservation_object_wait_timeout_rcu(obj->resv, write, 731 + ret = dma_resv_wait_timeout_rcu(obj->resv, write, 732 732 true, remain); 733 733 if (ret == 0) 734 734 return remain == 0 ? -EBUSY : -ETIMEDOUT; ··· 760 760 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 761 761 { 762 762 struct msm_gem_object *msm_obj = to_msm_bo(obj); 763 - struct reservation_object *robj = obj->resv; 764 - struct reservation_object_list *fobj; 763 + struct dma_resv *robj = obj->resv; 764 + struct dma_resv_list *fobj; 765 765 struct dma_fence *fence; 766 766 struct msm_gem_vma *vma; 767 767 uint64_t off = drm_vma_node_start(&obj->vma_node);
+1 -1
drivers/gpu/drm/msm/msm_gem.h
··· 8 8 #define __MSM_GEM_H__ 9 9 10 10 #include <linux/kref.h> 11 - #include <linux/reservation.h> 11 + #include <linux/dma-resv.h> 12 12 #include "msm_drv.h" 13 13 14 14 /* Additional internal-use only BO flags: */
+1 -1
drivers/gpu/drm/msm/msm_gem_submit.c
··· 225 225 * strange place to call it. OTOH this is a 226 226 * convenient can-fail point to hook it in. 227 227 */ 228 - ret = reservation_object_reserve_shared(msm_obj->base.resv, 228 + ret = dma_resv_reserve_shared(msm_obj->base.resv, 229 229 1); 230 230 if (ret) 231 231 return ret;
+1 -1
drivers/gpu/drm/mxsfb/mxsfb_drv.c
··· 17 17 #include <linux/of_graph.h> 18 18 #include <linux/of_reserved_mem.h> 19 19 #include <linux/pm_runtime.h> 20 - #include <linux/reservation.h> 20 + #include <linux/dma-resv.h> 21 21 #include <linux/spinlock.h> 22 22 23 23 #include <drm/drm_atomic.h>
+1 -1
drivers/gpu/drm/nouveau/dispnv50/wndw.c
··· 457 457 asyw->image.handle[0] = ctxdma->object.handle; 458 458 } 459 459 460 - asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.base.resv); 460 + asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv); 461 461 asyw->image.offset[0] = fb->nvbo->bo.offset; 462 462 463 463 if (wndw->func->prepare) {
+5 -5
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 188 188 int 189 189 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, 190 190 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, 191 - struct sg_table *sg, struct reservation_object *robj, 191 + struct sg_table *sg, struct dma_resv *robj, 192 192 struct nouveau_bo **pnvbo) 193 193 { 194 194 struct nouveau_drm *drm = cli->drm; ··· 1324 1324 { 1325 1325 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1326 1326 struct drm_device *dev = drm->dev; 1327 - struct dma_fence *fence = reservation_object_get_excl(bo->base.resv); 1327 + struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); 1328 1328 1329 1329 nv10_bo_put_tile_region(dev, *old_tile, fence); 1330 1330 *old_tile = new_tile; ··· 1655 1655 void 1656 1656 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) 1657 1657 { 1658 - struct reservation_object *resv = nvbo->bo.base.resv; 1658 + struct dma_resv *resv = nvbo->bo.base.resv; 1659 1659 1660 1660 if (exclusive) 1661 - reservation_object_add_excl_fence(resv, &fence->base); 1661 + dma_resv_add_excl_fence(resv, &fence->base); 1662 1662 else if (fence) 1663 - reservation_object_add_shared_fence(resv, &fence->base); 1663 + dma_resv_add_shared_fence(resv, &fence->base); 1664 1664 } 1665 1665 1666 1666 struct ttm_bo_driver nouveau_bo_driver = {
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.h
··· 73 73 void nouveau_bo_move_init(struct nouveau_drm *); 74 74 int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, 75 75 u32 tile_mode, u32 tile_flags, struct sg_table *sg, 76 - struct reservation_object *robj, 76 + struct dma_resv *robj, 77 77 struct nouveau_bo **); 78 78 int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig); 79 79 int nouveau_bo_unpin(struct nouveau_bo *);
+6 -6
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 335 335 { 336 336 struct nouveau_fence_chan *fctx = chan->fence; 337 337 struct dma_fence *fence; 338 - struct reservation_object *resv = nvbo->bo.base.resv; 339 - struct reservation_object_list *fobj; 338 + struct dma_resv *resv = nvbo->bo.base.resv; 339 + struct dma_resv_list *fobj; 340 340 struct nouveau_fence *f; 341 341 int ret = 0, i; 342 342 343 343 if (!exclusive) { 344 - ret = reservation_object_reserve_shared(resv, 1); 344 + ret = dma_resv_reserve_shared(resv, 1); 345 345 346 346 if (ret) 347 347 return ret; 348 348 } 349 349 350 - fobj = reservation_object_get_list(resv); 351 - fence = reservation_object_get_excl(resv); 350 + fobj = dma_resv_get_list(resv); 351 + fence = dma_resv_get_excl(resv); 352 352 353 353 if (fence && (!exclusive || !fobj || !fobj->shared_count)) { 354 354 struct nouveau_channel *prev = NULL; ··· 377 377 bool must_wait = true; 378 378 379 379 fence = rcu_dereference_protected(fobj->shared[i], 380 - reservation_object_held(resv)); 380 + dma_resv_held(resv)); 381 381 382 382 f = nouveau_local_fence(fence, chan->drm); 383 383 if (f) {
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 887 887 return -ENOENT; 888 888 nvbo = nouveau_gem_object(gem); 889 889 890 - lret = reservation_object_wait_timeout_rcu(nvbo->bo.base.resv, write, true, 890 + lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, 891 891 no_wait ? 0 : 30 * HZ); 892 892 if (!lret) 893 893 ret = -EBUSY;
+3 -3
drivers/gpu/drm/nouveau/nouveau_prime.c
··· 62 62 { 63 63 struct nouveau_drm *drm = nouveau_drm(dev); 64 64 struct nouveau_bo *nvbo; 65 - struct reservation_object *robj = attach->dmabuf->resv; 65 + struct dma_resv *robj = attach->dmabuf->resv; 66 66 u32 flags = 0; 67 67 int ret; 68 68 69 69 flags = TTM_PL_FLAG_TT; 70 70 71 - reservation_object_lock(robj, NULL); 71 + dma_resv_lock(robj, NULL); 72 72 ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0, 73 73 sg, robj, &nvbo); 74 - reservation_object_unlock(robj); 74 + dma_resv_unlock(robj); 75 75 if (ret) 76 76 return ERR_PTR(ret); 77 77
+1 -1
drivers/gpu/drm/panfrost/panfrost_drv.c
··· 274 274 if (!gem_obj) 275 275 return -ENOENT; 276 276 277 - ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true, 277 + ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true, 278 278 true, timeout); 279 279 if (!ret) 280 280 ret = timeout ? -ETIMEDOUT : -EBUSY;
+3 -3
drivers/gpu/drm/panfrost/panfrost_job.c
··· 6 6 #include <linux/io.h> 7 7 #include <linux/platform_device.h> 8 8 #include <linux/pm_runtime.h> 9 - #include <linux/reservation.h> 9 + #include <linux/dma-resv.h> 10 10 #include <drm/gpu_scheduler.h> 11 11 #include <drm/panfrost_drm.h> 12 12 ··· 199 199 int i; 200 200 201 201 for (i = 0; i < bo_count; i++) 202 - implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv); 202 + implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv); 203 203 } 204 204 205 205 static void panfrost_attach_object_fences(struct drm_gem_object **bos, ··· 209 209 int i; 210 210 211 211 for (i = 0; i < bo_count; i++) 212 - reservation_object_add_excl_fence(bos[i]->resv, fence); 212 + dma_resv_add_excl_fence(bos[i]->resv, fence); 213 213 } 214 214 215 215 int panfrost_job_push(struct panfrost_job *job)
+1 -1
drivers/gpu/drm/qxl/qxl_debugfs.c
··· 57 57 struct qxl_bo *bo; 58 58 59 59 list_for_each_entry(bo, &qdev->gem.objects, list) { 60 - struct reservation_object_list *fobj; 60 + struct dma_resv_list *fobj; 61 61 int rel; 62 62 63 63 rcu_read_lock();
+3 -3
drivers/gpu/drm/qxl/qxl_release.c
··· 238 238 return ret; 239 239 } 240 240 241 - ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1); 241 + ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1); 242 242 if (ret) 243 243 return ret; 244 244 ··· 458 458 list_for_each_entry(entry, &release->bos, head) { 459 459 bo = entry->bo; 460 460 461 - reservation_object_add_shared_fence(bo->base.resv, &release->base); 461 + dma_resv_add_shared_fence(bo->base.resv, &release->base); 462 462 ttm_bo_add_to_lru(bo); 463 - reservation_object_unlock(bo->base.resv); 463 + dma_resv_unlock(bo->base.resv); 464 464 } 465 465 spin_unlock(&glob->lru_lock); 466 466 ww_acquire_fini(&release->ticket);
+1 -1
drivers/gpu/drm/radeon/cik.c
··· 3659 3659 struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, 3660 3660 uint64_t src_offset, uint64_t dst_offset, 3661 3661 unsigned num_gpu_pages, 3662 - struct reservation_object *resv) 3662 + struct dma_resv *resv) 3663 3663 { 3664 3664 struct radeon_fence *fence; 3665 3665 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/cik_sdma.c
··· 579 579 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, 580 580 uint64_t src_offset, uint64_t dst_offset, 581 581 unsigned num_gpu_pages, 582 - struct reservation_object *resv) 582 + struct dma_resv *resv) 583 583 { 584 584 struct radeon_fence *fence; 585 585 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/evergreen_dma.c
··· 108 108 uint64_t src_offset, 109 109 uint64_t dst_offset, 110 110 unsigned num_gpu_pages, 111 - struct reservation_object *resv) 111 + struct dma_resv *resv) 112 112 { 113 113 struct radeon_fence *fence; 114 114 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/r100.c
··· 891 891 uint64_t src_offset, 892 892 uint64_t dst_offset, 893 893 unsigned num_gpu_pages, 894 - struct reservation_object *resv) 894 + struct dma_resv *resv) 895 895 { 896 896 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 897 897 struct radeon_fence *fence;
+1 -1
drivers/gpu/drm/radeon/r200.c
··· 84 84 uint64_t src_offset, 85 85 uint64_t dst_offset, 86 86 unsigned num_gpu_pages, 87 - struct reservation_object *resv) 87 + struct dma_resv *resv) 88 88 { 89 89 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 90 90 struct radeon_fence *fence;
+1 -1
drivers/gpu/drm/radeon/r600.c
··· 2963 2963 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, 2964 2964 uint64_t src_offset, uint64_t dst_offset, 2965 2965 unsigned num_gpu_pages, 2966 - struct reservation_object *resv) 2966 + struct dma_resv *resv) 2967 2967 { 2968 2968 struct radeon_fence *fence; 2969 2969 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/r600_dma.c
··· 444 444 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, 445 445 uint64_t src_offset, uint64_t dst_offset, 446 446 unsigned num_gpu_pages, 447 - struct reservation_object *resv) 447 + struct dma_resv *resv) 448 448 { 449 449 struct radeon_fence *fence; 450 450 struct radeon_sync sync;
+4 -4
drivers/gpu/drm/radeon/radeon.h
··· 619 619 struct radeon_fence *fence); 620 620 int radeon_sync_resv(struct radeon_device *rdev, 621 621 struct radeon_sync *sync, 622 - struct reservation_object *resv, 622 + struct dma_resv *resv, 623 623 bool shared); 624 624 int radeon_sync_rings(struct radeon_device *rdev, 625 625 struct radeon_sync *sync, ··· 1912 1912 uint64_t src_offset, 1913 1913 uint64_t dst_offset, 1914 1914 unsigned num_gpu_pages, 1915 - struct reservation_object *resv); 1915 + struct dma_resv *resv); 1916 1916 u32 blit_ring_index; 1917 1917 struct radeon_fence *(*dma)(struct radeon_device *rdev, 1918 1918 uint64_t src_offset, 1919 1919 uint64_t dst_offset, 1920 1920 unsigned num_gpu_pages, 1921 - struct reservation_object *resv); 1921 + struct dma_resv *resv); 1922 1922 u32 dma_ring_index; 1923 1923 /* method used for bo copy */ 1924 1924 struct radeon_fence *(*copy)(struct radeon_device *rdev, 1925 1925 uint64_t src_offset, 1926 1926 uint64_t dst_offset, 1927 1927 unsigned num_gpu_pages, 1928 - struct reservation_object *resv); 1928 + struct dma_resv *resv); 1929 1929 /* ring used for bo copies */ 1930 1930 u32 copy_ring_index; 1931 1931 } copy;
+9 -9
drivers/gpu/drm/radeon/radeon_asic.h
··· 86 86 uint64_t src_offset, 87 87 uint64_t dst_offset, 88 88 unsigned num_gpu_pages, 89 - struct reservation_object *resv); 89 + struct dma_resv *resv); 90 90 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 91 91 uint32_t tiling_flags, uint32_t pitch, 92 92 uint32_t offset, uint32_t obj_size); ··· 157 157 uint64_t src_offset, 158 158 uint64_t dst_offset, 159 159 unsigned num_gpu_pages, 160 - struct reservation_object *resv); 160 + struct dma_resv *resv); 161 161 void r200_set_safe_registers(struct radeon_device *rdev); 162 162 163 163 /* ··· 347 347 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, 348 348 uint64_t src_offset, uint64_t dst_offset, 349 349 unsigned num_gpu_pages, 350 - struct reservation_object *resv); 350 + struct dma_resv *resv); 351 351 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, 352 352 uint64_t src_offset, uint64_t dst_offset, 353 353 unsigned num_gpu_pages, 354 - struct reservation_object *resv); 354 + struct dma_resv *resv); 355 355 void r600_hpd_init(struct radeon_device *rdev); 356 356 void r600_hpd_fini(struct radeon_device *rdev); 357 357 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); ··· 473 473 struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, 474 474 uint64_t src_offset, uint64_t dst_offset, 475 475 unsigned num_gpu_pages, 476 - struct reservation_object *resv); 476 + struct dma_resv *resv); 477 477 u32 rv770_get_xclk(struct radeon_device *rdev); 478 478 int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 479 479 int rv770_get_temp(struct radeon_device *rdev); ··· 547 547 struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, 548 548 uint64_t src_offset, uint64_t dst_offset, 549 549 unsigned num_gpu_pages, 550 - struct reservation_object *resv); 550 + struct dma_resv *resv); 551 551 int evergreen_get_temp(struct radeon_device *rdev); 552 552 int evergreen_get_allowed_info_register(struct radeon_device *rdev, 553 553 u32 reg, u32 *val); ··· 725 725 struct radeon_fence *si_copy_dma(struct radeon_device *rdev, 726 726 uint64_t src_offset, uint64_t dst_offset, 727 727 unsigned num_gpu_pages, 728 - struct reservation_object *resv); 728 + struct dma_resv *resv); 729 729 730 730 void si_dma_vm_copy_pages(struct radeon_device *rdev, 731 731 struct radeon_ib *ib, ··· 796 796 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, 797 797 uint64_t src_offset, uint64_t dst_offset, 798 798 unsigned num_gpu_pages, 799 - struct reservation_object *resv); 799 + struct dma_resv *resv); 800 800 struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, 801 801 uint64_t src_offset, uint64_t dst_offset, 802 802 unsigned num_gpu_pages, 803 - struct reservation_object *resv); 803 + struct dma_resv *resv); 804 804 int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); 805 805 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 806 806 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+1 -1
drivers/gpu/drm/radeon/radeon_benchmark.c
··· 35 35 static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, 36 36 uint64_t saddr, uint64_t daddr, 37 37 int flag, int n, 38 - struct reservation_object *resv) 38 + struct dma_resv *resv) 39 39 { 40 40 unsigned long start_jiffies; 41 41 unsigned long end_jiffies;
+1 -1
drivers/gpu/drm/radeon/radeon_cs.c
··· 255 255 int r; 256 256 257 257 list_for_each_entry(reloc, &p->validated, tv.head) { 258 - struct reservation_object *resv; 258 + struct dma_resv *resv; 259 259 260 260 resv = reloc->robj->tbo.base.resv; 261 261 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
+1 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 533 533 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 534 534 goto cleanup; 535 535 } 536 - work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv)); 536 + work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv)); 537 537 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); 538 538 radeon_bo_unreserve(new_rbo); 539 539
+3 -3
drivers/gpu/drm/radeon/radeon_gem.c
··· 114 114 } 115 115 if (domain == RADEON_GEM_DOMAIN_CPU) { 116 116 /* Asking for cpu access wait for object idle */ 117 - r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 117 + r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 118 118 if (!r) 119 119 r = -EBUSY; 120 120 ··· 449 449 } 450 450 robj = gem_to_radeon_bo(gobj); 451 451 452 - r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true); 452 + r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); 453 453 if (r == 0) 454 454 r = -EBUSY; 455 455 else ··· 478 478 } 479 479 robj = gem_to_radeon_bo(gobj); 480 480 481 - ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 481 + ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 482 482 if (ret == 0) 483 483 r = -EBUSY; 484 484 else if (ret < 0)
+1 -1
drivers/gpu/drm/radeon/radeon_mn.c
··· 163 163 continue; 164 164 } 165 165 166 - r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, 166 + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, 167 167 true, false, MAX_SCHEDULE_TIMEOUT); 168 168 if (r <= 0) 169 169 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+7 -7
drivers/gpu/drm/radeon/radeon_object.c
··· 183 183 int radeon_bo_create(struct radeon_device *rdev, 184 184 unsigned long size, int byte_align, bool kernel, 185 185 u32 domain, u32 flags, struct sg_table *sg, 186 - struct reservation_object *resv, 186 + struct dma_resv *resv, 187 187 struct radeon_bo **bo_ptr) 188 188 { 189 189 struct radeon_bo *bo; ··· 610 610 int steal; 611 611 int i; 612 612 613 - reservation_object_assert_held(bo->tbo.base.resv); 613 + dma_resv_assert_held(bo->tbo.base.resv); 614 614 615 615 if (!bo->tiling_flags) 616 616 return 0; ··· 736 736 uint32_t *tiling_flags, 737 737 uint32_t *pitch) 738 738 { 739 - reservation_object_assert_held(bo->tbo.base.resv); 739 + dma_resv_assert_held(bo->tbo.base.resv); 740 740 741 741 if (tiling_flags) 742 742 *tiling_flags = bo->tiling_flags; ··· 748 748 bool force_drop) 749 749 { 750 750 if (!force_drop) 751 - reservation_object_assert_held(bo->tbo.base.resv); 751 + dma_resv_assert_held(bo->tbo.base.resv); 752 752 753 753 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 754 754 return 0; ··· 870 870 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, 871 871 bool shared) 872 872 { 873 - struct reservation_object *resv = bo->tbo.base.resv; 873 + struct dma_resv *resv = bo->tbo.base.resv; 874 874 875 875 if (shared) 876 - reservation_object_add_shared_fence(resv, &fence->base); 876 + dma_resv_add_shared_fence(resv, &fence->base); 877 877 else 878 - reservation_object_add_excl_fence(resv, &fence->base); 878 + dma_resv_add_excl_fence(resv, &fence->base); 879 879 }
+1 -1
drivers/gpu/drm/radeon/radeon_object.h
··· 126 126 unsigned long size, int byte_align, 127 127 bool kernel, u32 domain, u32 flags, 128 128 struct sg_table *sg, 129 - struct reservation_object *resv, 129 + struct dma_resv *resv, 130 130 struct radeon_bo **bo_ptr); 131 131 extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); 132 132 extern void radeon_bo_kunmap(struct radeon_bo *bo);
+3 -3
drivers/gpu/drm/radeon/radeon_prime.c
··· 63 63 struct dma_buf_attachment *attach, 64 64 struct sg_table *sg) 65 65 { 66 - struct reservation_object *resv = attach->dmabuf->resv; 66 + struct dma_resv *resv = attach->dmabuf->resv; 67 67 struct radeon_device *rdev = dev->dev_private; 68 68 struct radeon_bo *bo; 69 69 int ret; 70 70 71 - reservation_object_lock(resv, NULL); 71 + dma_resv_lock(resv, NULL); 72 72 ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false, 73 73 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); 74 - reservation_object_unlock(resv); 74 + dma_resv_unlock(resv); 75 75 if (ret) 76 76 return ERR_PTR(ret); 77 77
+5 -5
drivers/gpu/drm/radeon/radeon_sync.c
··· 87 87 */ 88 88 int radeon_sync_resv(struct radeon_device *rdev, 89 89 struct radeon_sync *sync, 90 - struct reservation_object *resv, 90 + struct dma_resv *resv, 91 91 bool shared) 92 92 { 93 - struct reservation_object_list *flist; 93 + struct dma_resv_list *flist; 94 94 struct dma_fence *f; 95 95 struct radeon_fence *fence; 96 96 unsigned i; 97 97 int r = 0; 98 98 99 99 /* always sync to the exclusive fence */ 100 - f = reservation_object_get_excl(resv); 100 + f = dma_resv_get_excl(resv); 101 101 fence = f ? to_radeon_fence(f) : NULL; 102 102 if (fence && fence->rdev == rdev) 103 103 radeon_sync_fence(sync, fence); 104 104 else if (f) 105 105 r = dma_fence_wait(f, true); 106 106 107 - flist = reservation_object_get_list(resv); 107 + flist = dma_resv_get_list(resv); 108 108 if (shared || !flist || r) 109 109 return r; 110 110 111 111 for (i = 0; i < flist->shared_count; ++i) { 112 112 f = rcu_dereference_protected(flist->shared[i], 113 - reservation_object_held(resv)); 113 + dma_resv_held(resv)); 114 114 fence = to_radeon_fence(f); 115 115 if (fence && fence->rdev == rdev) 116 116 radeon_sync_fence(sync, fence);
+1 -1
drivers/gpu/drm/radeon/radeon_uvd.c
··· 477 477 return -EINVAL; 478 478 } 479 479 480 - f = reservation_object_get_excl(bo->tbo.base.resv); 480 + f = dma_resv_get_excl(bo->tbo.base.resv); 481 481 if (f) { 482 482 r = radeon_fence_wait((struct radeon_fence *)f, false); 483 483 if (r) {
+1 -1
drivers/gpu/drm/radeon/radeon_vm.c
··· 831 831 int r; 832 832 833 833 radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); 834 - r = reservation_object_reserve_shared(pt->tbo.base.resv, 1); 834 + r = dma_resv_reserve_shared(pt->tbo.base.resv, 1); 835 835 if (r) 836 836 return r; 837 837
+1 -1
drivers/gpu/drm/radeon/rv770_dma.c
··· 42 42 struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, 43 43 uint64_t src_offset, uint64_t dst_offset, 44 44 unsigned num_gpu_pages, 45 - struct reservation_object *resv) 45 + struct dma_resv *resv) 46 46 { 47 47 struct radeon_fence *fence; 48 48 struct radeon_sync sync;
+1 -1
drivers/gpu/drm/radeon/si_dma.c
··· 231 231 struct radeon_fence *si_copy_dma(struct radeon_device *rdev, 232 232 uint64_t src_offset, uint64_t dst_offset, 233 233 unsigned num_gpu_pages, 234 - struct reservation_object *resv) 234 + struct dma_resv *resv) 235 235 { 236 236 struct radeon_fence *fence; 237 237 struct radeon_sync sync;
+59 -59
drivers/gpu/drm/ttm/ttm_bo.c
··· 41 41 #include <linux/file.h> 42 42 #include <linux/module.h> 43 43 #include <linux/atomic.h> 44 - #include <linux/reservation.h> 44 + #include <linux/dma-resv.h> 45 45 46 46 static void ttm_bo_global_kobj_release(struct kobject *kobj); 47 47 ··· 161 161 atomic_dec(&bo->bdev->glob->bo_count); 162 162 dma_fence_put(bo->moving); 163 163 if (!ttm_bo_uses_embedded_gem_object(bo)) 164 - reservation_object_fini(&bo->base._resv); 164 + dma_resv_fini(&bo->base._resv); 165 165 mutex_destroy(&bo->wu_mutex); 166 166 bo->destroy(bo); 167 167 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); ··· 173 173 struct ttm_bo_device *bdev = bo->bdev; 174 174 struct ttm_mem_type_manager *man; 175 175 176 - reservation_object_assert_held(bo->base.resv); 176 + dma_resv_assert_held(bo->base.resv); 177 177 178 178 if (!list_empty(&bo->lru)) 179 179 return; ··· 244 244 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, 245 245 struct ttm_lru_bulk_move *bulk) 246 246 { 247 - reservation_object_assert_held(bo->base.resv); 247 + dma_resv_assert_held(bo->base.resv); 248 248 249 249 ttm_bo_del_from_lru(bo); 250 250 ttm_bo_add_to_lru(bo); ··· 277 277 if (!pos->first) 278 278 continue; 279 279 280 - reservation_object_assert_held(pos->first->base.resv); 281 - reservation_object_assert_held(pos->last->base.resv); 280 + dma_resv_assert_held(pos->first->base.resv); 281 + dma_resv_assert_held(pos->last->base.resv); 282 282 283 283 man = &pos->first->bdev->man[TTM_PL_TT]; 284 284 list_bulk_move_tail(&man->lru[i], &pos->first->lru, ··· 292 292 if (!pos->first) 293 293 continue; 294 294 295 - reservation_object_assert_held(pos->first->base.resv); 296 - reservation_object_assert_held(pos->last->base.resv); 295 + dma_resv_assert_held(pos->first->base.resv); 296 + dma_resv_assert_held(pos->last->base.resv); 297 297 298 298 man = &pos->first->bdev->man[TTM_PL_VRAM]; 299 299 list_bulk_move_tail(&man->lru[i], &pos->first->lru, ··· 307 307 if (!pos->first) 308 308 continue; 309 309 310 - reservation_object_assert_held(pos->first->base.resv); 311 - reservation_object_assert_held(pos->last->base.resv); 310 + dma_resv_assert_held(pos->first->base.resv); 311 + dma_resv_assert_held(pos->last->base.resv); 312 312 313 313 lru = &pos->first->bdev->glob->swap_lru[i]; 314 314 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); ··· 442 442 if (bo->base.resv == &bo->base._resv) 443 443 return 0; 444 444 445 - BUG_ON(!reservation_object_trylock(&bo->base._resv)); 445 + BUG_ON(!dma_resv_trylock(&bo->base._resv)); 446 446 447 - r = reservation_object_copy_fences(&bo->base._resv, bo->base.resv); 447 + r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); 448 448 if (r) 449 - reservation_object_unlock(&bo->base._resv); 449 + dma_resv_unlock(&bo->base._resv); 450 450 451 451 return r; 452 452 } 453 453 454 454 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) 455 455 { 456 - struct reservation_object_list *fobj; 456 + struct dma_resv_list *fobj; 457 457 struct dma_fence *fence; 458 458 int i; 459 459 460 - fobj = reservation_object_get_list(&bo->base._resv); 461 - fence = reservation_object_get_excl(&bo->base._resv); 460 + fobj = dma_resv_get_list(&bo->base._resv); 461 + fence = dma_resv_get_excl(&bo->base._resv); 462 462 if (fence && !fence->ops->signaled) 463 463 dma_fence_enable_sw_signaling(fence); 464 464 465 465 for (i = 0; fobj && i < fobj->shared_count; ++i) { 466 466 fence = rcu_dereference_protected(fobj->shared[i], 467 - reservation_object_held(bo->base.resv)); 467 + dma_resv_held(bo->base.resv)); 468 468 469 469 if (!fence->ops->signaled) 470 470 dma_fence_enable_sw_signaling(fence); ··· 482 482 /* Last resort, if we fail to allocate memory for the 483 483 * fences block for the BO to become idle 484 484 */ 485 - reservation_object_wait_timeout_rcu(bo->base.resv, true, false, 485 + dma_resv_wait_timeout_rcu(bo->base.resv, true, false, 486 486 30 * HZ); 487 487 spin_lock(&glob->lru_lock); 488 488 goto error; 489 489 } 490 490 491 491 spin_lock(&glob->lru_lock); 492 - ret = reservation_object_trylock(bo->base.resv) ? 0 : -EBUSY; 492 + ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY; 493 493 if (!ret) { 494 - if (reservation_object_test_signaled_rcu(&bo->base._resv, true)) { 494 + if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) { 495 495 ttm_bo_del_from_lru(bo); 496 496 spin_unlock(&glob->lru_lock); 497 497 if (bo->base.resv != &bo->base._resv) 498 - reservation_object_unlock(&bo->base._resv); 498 + dma_resv_unlock(&bo->base._resv); 499 499 500 500 ttm_bo_cleanup_memtype_use(bo); 501 - reservation_object_unlock(bo->base.resv); 501 + dma_resv_unlock(bo->base.resv); 502 502 return; 503 503 } 504 504 ··· 514 514 ttm_bo_add_to_lru(bo); 515 515 } 516 516 517 - reservation_object_unlock(bo->base.resv); 517 + dma_resv_unlock(bo->base.resv); 518 518 } 519 519 if (bo->base.resv != &bo->base._resv) 520 - reservation_object_unlock(&bo->base._resv); 520 + dma_resv_unlock(&bo->base._resv); 521 521 522 522 error: 523 523 kref_get(&bo->list_kref); ··· 546 546 bool unlock_resv) 547 547 { 548 548 struct ttm_bo_global *glob = bo->bdev->glob; 549 - struct reservation_object *resv; 549 + struct dma_resv *resv; 550 550 int ret; 551 551 552 552 if (unlikely(list_empty(&bo->ddestroy))) ··· 554 554 else 555 555 resv = &bo->base._resv; 556 556 557 - if (reservation_object_test_signaled_rcu(resv, true)) 557 + if (dma_resv_test_signaled_rcu(resv, true)) 558 558 ret = 0; 559 559 else 560 560 ret = -EBUSY; ··· 563 563 long lret; 564 564 565 565 if (unlock_resv) 566 - reservation_object_unlock(bo->base.resv); 566 + dma_resv_unlock(bo->base.resv); 567 567 spin_unlock(&glob->lru_lock); 568 568 569 - lret = reservation_object_wait_timeout_rcu(resv, true, 569 + lret = dma_resv_wait_timeout_rcu(resv, true, 570 570 interruptible, 571 571 30 * HZ); 572 572 ··· 576 576 return -EBUSY; 577 577 578 578 spin_lock(&glob->lru_lock); 579 - if (unlock_resv && !reservation_object_trylock(bo->base.resv)) { 579 + if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { 580 580 /* 581 581 * We raced, and lost, someone else holds the reservation now, 582 582 * and is probably busy in ttm_bo_cleanup_memtype_use. ··· 593 593 594 594 if (ret || unlikely(list_empty(&bo->ddestroy))) { 595 595 if (unlock_resv) 596 - reservation_object_unlock(bo->base.resv); 596 + dma_resv_unlock(bo->base.resv); 597 597 spin_unlock(&glob->lru_lock); 598 598 return ret; 599 599 } ··· 606 606 ttm_bo_cleanup_memtype_use(bo); 607 607 608 608 if (unlock_resv) 609 - reservation_object_unlock(bo->base.resv); 609 + dma_resv_unlock(bo->base.resv); 610 610 611 611 return 0; 612 612 } ··· 634 634 635 635 if (remove_all || bo->base.resv != &bo->base._resv) { 636 636 spin_unlock(&glob->lru_lock); 637 - reservation_object_lock(bo->base.resv, NULL); 637 + dma_resv_lock(bo->base.resv, NULL); 638 638 639 639 spin_lock(&glob->lru_lock); 640 640 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 641 641 642 - } else if (reservation_object_trylock(bo->base.resv)) { 642 + } else if (dma_resv_trylock(bo->base.resv)) { 643 643 ttm_bo_cleanup_refs(bo, false, !remove_all, true); 644 644 } else { 645 645 spin_unlock(&glob->lru_lock); ··· 708 708 struct ttm_placement placement; 709 709 int ret = 0; 710 710 711 - reservation_object_assert_held(bo->base.resv); 711 + dma_resv_assert_held(bo->base.resv); 712 712 713 713 placement.num_placement = 0; 714 714 placement.num_busy_placement = 0; ··· 779 779 bool ret = false; 780 780 781 781 if (bo->base.resv == ctx->resv) { 782 - reservation_object_assert_held(bo->base.resv); 782 + dma_resv_assert_held(bo->base.resv); 783 783 if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT 784 784 || !list_empty(&bo->ddestroy)) 785 785 ret = true; ··· 787 787 if (busy) 788 788 *busy = false; 789 789 } else { 790 - ret = reservation_object_trylock(bo->base.resv); 790 + ret = dma_resv_trylock(bo->base.resv); 791 791 *locked = ret; 792 792 if (busy) 793 793 *busy = !ret; ··· 815 815 return -EBUSY; 816 816 817 817 if (ctx->interruptible) 818 - r = reservation_object_lock_interruptible(busy_bo->base.resv, 818 + r = dma_resv_lock_interruptible(busy_bo->base.resv, 819 819 ticket); 820 820 else 821 - r = reservation_object_lock(busy_bo->base.resv, ticket); 821 + r = dma_resv_lock(busy_bo->base.resv, ticket); 822 822 823 823 /* 824 824 * TODO: It would be better to keep the BO locked until allocation is at ··· 826 826 * of TTM. 827 827 */ 828 828 if (!r) 829 - reservation_object_unlock(busy_bo->base.resv); 829 + dma_resv_unlock(busy_bo->base.resv); 830 830 831 831 return r == -EDEADLK ? -EBUSY : r; 832 832 } ··· 852 852 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, 853 853 &busy)) { 854 854 if (busy && !busy_bo && ticket != 855 - reservation_object_locking_ctx(bo->base.resv)) 855 + dma_resv_locking_ctx(bo->base.resv)) 856 856 busy_bo = bo; 857 857 continue; 858 858 } ··· 860 860 if (place && !bdev->driver->eviction_valuable(bo, 861 861 place)) { 862 862 if (locked) 863 - reservation_object_unlock(bo->base.resv); 863 + dma_resv_unlock(bo->base.resv); 864 864 continue; 865 865 } 866 866 break; ··· 932 932 spin_unlock(&man->move_lock); 933 933 934 934 if (fence) { 935 - reservation_object_add_shared_fence(bo->base.resv, fence); 935 + dma_resv_add_shared_fence(bo->base.resv, fence); 936 936 937 - ret = reservation_object_reserve_shared(bo->base.resv, 1); 937 + ret = dma_resv_reserve_shared(bo->base.resv, 1); 938 938 if (unlikely(ret)) { 939 939 dma_fence_put(fence); 940 940 return ret; ··· 961 961 struct ww_acquire_ctx *ticket; 962 962 int ret; 963 963 964 - ticket = reservation_object_locking_ctx(bo->base.resv); 964 + ticket = dma_resv_locking_ctx(bo->base.resv); 965 965 do { 966 966 ret = (*man->func->get_node)(man, bo, place, mem); 967 967 if (unlikely(ret != 0)) ··· 1091 1091 bool type_found = false; 1092 1092 int i, ret; 1093 1093 1094 - ret = reservation_object_reserve_shared(bo->base.resv, 1); 1094 + ret = dma_resv_reserve_shared(bo->base.resv, 1); 1095 1095 if (unlikely(ret)) 1096 1096 return ret; 1097 1097 ··· 1172 1172 int ret = 0; 1173 1173 struct ttm_mem_reg mem; 1174 1174 1175 - reservation_object_assert_held(bo->base.resv); 1175 + dma_resv_assert_held(bo->base.resv); 1176 1176 1177 1177 mem.num_pages = bo->num_pages; 1178 1178 mem.size = mem.num_pages << PAGE_SHIFT; ··· 1242 1242 int ret; 1243 1243 uint32_t new_flags; 1244 1244 1245 - reservation_object_assert_held(bo->base.resv); 1245 + dma_resv_assert_held(bo->base.resv); 1246 1246 /* 1247 1247 * Check whether we need to move buffer. 1248 1248 */ ··· 1279 1279 struct ttm_operation_ctx *ctx, 1280 1280 size_t acc_size, 1281 1281 struct sg_table *sg, 1282 - struct reservation_object *resv, 1282 + struct dma_resv *resv, 1283 1283 void (*destroy) (struct ttm_buffer_object *)) 1284 1284 { 1285 1285 int ret = 0; ··· 1333 1333 bo->sg = sg; 1334 1334 if (resv) { 1335 1335 bo->base.resv = resv; 1336 - reservation_object_assert_held(bo->base.resv); 1336 + dma_resv_assert_held(bo->base.resv); 1337 1337 } else { 1338 1338 bo->base.resv = &bo->base._resv; 1339 1339 } ··· 1342 1342 * bo.gem is not initialized, so we have to setup the 1343 1343 * struct elements we want use regardless. 1344 1344 */ 1345 - reservation_object_init(&bo->base._resv); 1345 + dma_resv_init(&bo->base._resv); 1346 1346 drm_vma_node_reset(&bo->base.vma_node); 1347 1347 } 1348 1348 atomic_inc(&bo->bdev->glob->bo_count); ··· 1360 1360 * since otherwise lockdep will be angered in radeon. 1361 1361 */ 1362 1362 if (!resv) { 1363 - locked = reservation_object_trylock(bo->base.resv); 1363 + locked = dma_resv_trylock(bo->base.resv); 1364 1364 WARN_ON(!locked); 1365 1365 } 1366 1366 ··· 1394 1394 bool interruptible, 1395 1395 size_t acc_size, 1396 1396 struct sg_table *sg, 1397 - struct reservation_object *resv, 1397 + struct dma_resv *resv, 1398 1398 void (*destroy) (struct ttm_buffer_object *)) 1399 1399 { 1400 1400 struct ttm_operation_ctx ctx = { interruptible, false }; ··· 1804 1804 long timeout = 15 * HZ; 1805 1805 1806 1806 if (no_wait) { 1807 - if (reservation_object_test_signaled_rcu(bo->base.resv, true)) 1807 + if (dma_resv_test_signaled_rcu(bo->base.resv, true)) 1808 1808 return 0; 1809 1809 else 1810 1810 return -EBUSY; 1811 1811 } 1812 1812 1813 - timeout = reservation_object_wait_timeout_rcu(bo->base.resv, true, 1813 + timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, 1814 1814 interruptible, timeout); 1815 1815 if (timeout < 0) 1816 1816 return timeout; ··· 1818 1818 if (timeout == 0) 1819 1819 return -EBUSY; 1820 1820 1821 - reservation_object_add_excl_fence(bo->base.resv, NULL); 1821 + dma_resv_add_excl_fence(bo->base.resv, NULL); 1822 1822 return 0; 1823 1823 } 1824 1824 EXPORT_SYMBOL(ttm_bo_wait); ··· 1934 1934 * already swapped buffer. 1935 1935 */ 1936 1936 if (locked) 1937 - reservation_object_unlock(bo->base.resv); 1937 + dma_resv_unlock(bo->base.resv); 1938 1938 kref_put(&bo->list_kref, ttm_bo_release_list); 1939 1939 return ret; 1940 1940 } ··· 1972 1972 ret = mutex_lock_interruptible(&bo->wu_mutex); 1973 1973 if (unlikely(ret != 0)) 1974 1974 return -ERESTARTSYS; 1975 - if (!reservation_object_is_locked(bo->base.resv)) 1975 + if (!dma_resv_is_locked(bo->base.resv)) 1976 1976 goto out_unlock; 1977 - ret = reservation_object_lock_interruptible(bo->base.resv, NULL); 1977 + ret = dma_resv_lock_interruptible(bo->base.resv, NULL); 1978 1978 if (ret == -EINTR) 1979 1979 ret = -ERESTARTSYS; 1980 1980 if (unlikely(ret != 0)) 1981 1981 goto out_unlock; 1982 - reservation_object_unlock(bo->base.resv); 1982 + dma_resv_unlock(bo->base.resv); 1983 1983 1984 1984 out_unlock: 1985 1985 mutex_unlock(&bo->wu_mutex);
+8 -8
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 38 38 #include <linux/slab.h> 39 39 #include <linux/vmalloc.h> 40 40 #include <linux/module.h> 41 - #include <linux/reservation.h> 41 + #include <linux/dma-resv.h> 42 42 43 43 struct ttm_transfer_obj { 44 44 struct ttm_buffer_object base; ··· 518 518 fbo->base.destroy = &ttm_transfered_destroy; 519 519 fbo->base.acc_size = 0; 520 520 fbo->base.base.resv = &fbo->base.base._resv; 521 - reservation_object_init(fbo->base.base.resv); 522 - ret = reservation_object_trylock(fbo->base.base.resv); 521 + dma_resv_init(fbo->base.base.resv); 522 + ret = dma_resv_trylock(fbo->base.base.resv); 523 523 WARN_ON(!ret); 524 524 525 525 *new_obj = &fbo->base; ··· 689 689 int ret; 690 690 struct ttm_buffer_object *ghost_obj; 691 691 692 - reservation_object_add_excl_fence(bo->base.resv, fence); 692 + dma_resv_add_excl_fence(bo->base.resv, fence); 693 693 if (evict) { 694 694 ret = ttm_bo_wait(bo, false, false); 695 695 if (ret) ··· 716 716 if (ret) 717 717 return ret; 718 718 719 - reservation_object_add_excl_fence(ghost_obj->base.resv, fence); 719 + dma_resv_add_excl_fence(ghost_obj->base.resv, fence); 720 720 721 721 /** 722 722 * If we're not moving to fixed memory, the TTM object ··· 752 752 753 753 int ret; 754 754 755 - reservation_object_add_excl_fence(bo->base.resv, fence); 755 + dma_resv_add_excl_fence(bo->base.resv, fence); 756 756 757 757 if (!evict) { 758 758 struct ttm_buffer_object *ghost_obj; ··· 772 772 if (ret) 773 773 return ret; 774 774 775 - reservation_object_add_excl_fence(ghost_obj->base.resv, fence); 775 + dma_resv_add_excl_fence(ghost_obj->base.resv, fence); 776 776 777 777 /** 778 778 * If we're not moving to fixed memory, the TTM object ··· 841 841 if (ret) 842 842 return ret; 843 843 844 - ret = reservation_object_copy_fences(ghost->base.resv, bo->base.resv); 844 + ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv); 845 845 /* Last resort, wait for the BO to be idle when we are OOM */ 846 846 if (ret) 847 847 ttm_bo_wait(bo, false, false);
+3 -3
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 71 71 ttm_bo_get(bo); 72 72 up_read(&vmf->vma->vm_mm->mmap_sem); 73 73 (void) dma_fence_wait(bo->moving, true); 74 - reservation_object_unlock(bo->base.resv); 74 + dma_resv_unlock(bo->base.resv); 75 75 ttm_bo_put(bo); 76 76 goto out_unlock; 77 77 } ··· 131 131 * for reserve, and if it fails, retry the fault after waiting 132 132 * for the buffer to become unreserved. 133 133 */ 134 - if (unlikely(!reservation_object_trylock(bo->base.resv))) { 134 + if (unlikely(!dma_resv_trylock(bo->base.resv))) { 135 135 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { 136 136 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 137 137 ttm_bo_get(bo); ··· 296 296 out_io_unlock: 297 297 ttm_mem_io_unlock(man); 298 298 out_unlock: 299 - reservation_object_unlock(bo->base.resv); 299 + dma_resv_unlock(bo->base.resv); 300 300 return ret; 301 301 } 302 302
+10 -10
drivers/gpu/drm/ttm/ttm_execbuf_util.c
··· 39 39 list_for_each_entry_continue_reverse(entry, list, head) { 40 40 struct ttm_buffer_object *bo = entry->bo; 41 41 42 - reservation_object_unlock(bo->base.resv); 42 + dma_resv_unlock(bo->base.resv); 43 43 } 44 44 } 45 45 ··· 71 71 72 72 if (list_empty(&bo->lru)) 73 73 ttm_bo_add_to_lru(bo); 74 - reservation_object_unlock(bo->base.resv); 74 + dma_resv_unlock(bo->base.resv); 75 75 } 76 76 spin_unlock(&glob->lru_lock); 77 77 ··· 114 114 115 115 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); 116 116 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { 117 - reservation_object_unlock(bo->base.resv); 117 + dma_resv_unlock(bo->base.resv); 118 118 119 119 ret = -EBUSY; 120 120 ··· 130 130 if (!entry->num_shared) 131 131 continue; 132 132 133 - ret = reservation_object_reserve_shared(bo->base.resv, 133 + ret = dma_resv_reserve_shared(bo->base.resv, 134 134 entry->num_shared); 135 135 if (!ret) 136 136 continue; ··· 144 144 145 145 if (ret == -EDEADLK) { 146 146 if (intr) { 147 - ret = reservation_object_lock_slow_interruptible(bo->base.resv, 147 + ret = dma_resv_lock_slow_interruptible(bo->base.resv, 148 148 ticket); 149 149 } else { 150 - reservation_object_lock_slow(bo->base.resv, ticket); 150 + dma_resv_lock_slow(bo->base.resv, ticket); 151 151 ret = 0; 152 152 } 153 153 } 154 154 155 155 if (!ret && entry->num_shared) 156 - ret = reservation_object_reserve_shared(bo->base.resv, 156 + ret = dma_resv_reserve_shared(bo->base.resv, 157 157 entry->num_shared); 158 158 159 159 if (unlikely(ret != 0)) { ··· 201 201 list_for_each_entry(entry, list, head) { 202 202 bo = entry->bo; 203 203 if (entry->num_shared) 204 - reservation_object_add_shared_fence(bo->base.resv, fence); 204 + dma_resv_add_shared_fence(bo->base.resv, fence); 205 205 else 206 - reservation_object_add_excl_fence(bo->base.resv, fence); 206 + dma_resv_add_excl_fence(bo->base.resv, fence); 207 207 if (list_empty(&bo->lru)) 208 208 ttm_bo_add_to_lru(bo); 209 209 else 210 210 ttm_bo_move_to_lru_tail(bo, NULL); 211 - reservation_object_unlock(bo->base.resv); 211 + dma_resv_unlock(bo->base.resv); 212 212 } 213 213 spin_unlock(&glob->lru_lock); 214 214 if (ticket)
+1 -1
drivers/gpu/drm/ttm/ttm_tt.c
··· 48 48 struct ttm_bo_device *bdev = bo->bdev; 49 49 uint32_t page_flags = 0; 50 50 51 - reservation_object_assert_held(bo->base.resv); 51 + dma_resv_assert_held(bo->base.resv); 52 52 53 53 if (bdev->need_dma32) 54 54 page_flags |= TTM_PAGE_FLAG_DMA32;
+2 -2
drivers/gpu/drm/v3d/v3d_gem.c
··· 409 409 if (args->pad != 0) 410 410 return -EINVAL; 411 411 412 - ret = drm_gem_reservation_object_wait(file_priv, args->handle, 412 + ret = drm_gem_dma_resv_wait(file_priv, args->handle, 413 413 true, timeout_jiffies); 414 414 415 415 /* Decrement the user's timeout, in case we got interrupted ··· 495 495 496 496 for (i = 0; i < job->bo_count; i++) { 497 497 /* XXX: Use shared fences for read-only objects. */ 498 - reservation_object_add_excl_fence(job->bo[i]->resv, 498 + dma_resv_add_excl_fence(job->bo[i]->resv, 499 499 job->done_fence); 500 500 } 501 501
+3 -3
drivers/gpu/drm/vc4/vc4_gem.c
··· 543 543 bo = to_vc4_bo(&exec->bo[i]->base); 544 544 bo->seqno = seqno; 545 545 546 - reservation_object_add_shared_fence(bo->base.base.resv, exec->fence); 546 + dma_resv_add_shared_fence(bo->base.base.resv, exec->fence); 547 547 } 548 548 549 549 list_for_each_entry(bo, &exec->unref_list, unref_head) { ··· 554 554 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); 555 555 bo->write_seqno = seqno; 556 556 557 - reservation_object_add_excl_fence(bo->base.base.resv, exec->fence); 557 + dma_resv_add_excl_fence(bo->base.base.resv, exec->fence); 558 558 } 559 559 } 560 560 ··· 642 642 for (i = 0; i < exec->bo_count; i++) { 643 643 bo = &exec->bo[i]->base; 644 644 645 - ret = reservation_object_reserve_shared(bo->resv, 1); 645 + ret = dma_resv_reserve_shared(bo->resv, 1); 646 646 if (ret) { 647 647 vc4_unlock_bo_reservations(dev, exec, acquire_ctx); 648 648 return ret;
+8 -8
drivers/gpu/drm/vgem/vgem_fence.c
··· 21 21 */ 22 22 23 23 #include <linux/dma-buf.h> 24 - #include <linux/reservation.h> 24 + #include <linux/dma-resv.h> 25 25 26 26 #include <drm/drm_file.h> 27 27 ··· 128 128 { 129 129 struct drm_vgem_fence_attach *arg = data; 130 130 struct vgem_file *vfile = file->driver_priv; 131 - struct reservation_object *resv; 131 + struct dma_resv *resv; 132 132 struct drm_gem_object *obj; 133 133 struct dma_fence *fence; 134 134 int ret; ··· 151 151 152 152 /* Check for a conflicting fence */ 153 153 resv = obj->resv; 154 - if (!reservation_object_test_signaled_rcu(resv, 154 + if (!dma_resv_test_signaled_rcu(resv, 155 155 arg->flags & VGEM_FENCE_WRITE)) { 156 156 ret = -EBUSY; 157 157 goto err_fence; ··· 159 159 160 160 /* Expose the fence via the dma-buf */ 161 161 ret = 0; 162 - reservation_object_lock(resv, NULL); 162 + dma_resv_lock(resv, NULL); 163 163 if (arg->flags & VGEM_FENCE_WRITE) 164 - reservation_object_add_excl_fence(resv, fence); 165 - else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0) 166 - reservation_object_add_shared_fence(resv, fence); 167 - reservation_object_unlock(resv); 164 + dma_resv_add_excl_fence(resv, fence); 165 + else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0) 166 + dma_resv_add_shared_fence(resv, fence); 167 + dma_resv_unlock(resv); 168 168 169 169 /* Record the fence in our idr for later signaling */ 170 170 if (ret == 0) {
+2 -2
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 396 396 (vgdev, qobj->hw_res_handle, 397 397 vfpriv->ctx_id, offset, args->level, 398 398 &box, fence); 399 - reservation_object_add_excl_fence(qobj->tbo.base.resv, 399 + dma_resv_add_excl_fence(qobj->tbo.base.resv, 400 400 &fence->f); 401 401 402 402 dma_fence_put(&fence->f); ··· 450 450 (vgdev, qobj, 451 451 vfpriv ? vfpriv->ctx_id : 0, offset, 452 452 args->level, &box, fence); 453 - reservation_object_add_excl_fence(qobj->tbo.base.resv, 453 + dma_resv_add_excl_fence(qobj->tbo.base.resv, 454 454 &fence->f); 455 455 dma_fence_put(&fence->f); 456 456 }
+1 -1
drivers/gpu/drm/virtio/virtgpu_plane.c
··· 212 212 0, 0, vgfb->fence); 213 213 ret = virtio_gpu_object_reserve(bo, false); 214 214 if (!ret) { 215 - reservation_object_add_excl_fence(bo->tbo.base.resv, 215 + dma_resv_add_excl_fence(bo->tbo.base.resv, 216 216 &vgfb->fence->f); 217 217 dma_fence_put(&vgfb->fence->f); 218 218 vgfb->fence = NULL;
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
··· 459 459 460 460 /* Buffer objects need to be either pinned or reserved: */ 461 461 if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT)) 462 - reservation_object_assert_held(dst->base.resv); 462 + dma_resv_assert_held(dst->base.resv); 463 463 if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT)) 464 - reservation_object_assert_held(src->base.resv); 464 + dma_resv_assert_held(src->base.resv); 465 465 466 466 if (dst->ttm->state == tt_unpopulated) { 467 467 ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 342 342 uint32_t old_mem_type = bo->mem.mem_type; 343 343 int ret; 344 344 345 - reservation_object_assert_held(bo->base.resv); 345 + dma_resv_assert_held(bo->base.resv); 346 346 347 347 if (pin) { 348 348 if (vbo->pin_count++ > 0) ··· 689 689 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 690 690 long lret; 691 691 692 - lret = reservation_object_wait_timeout_rcu 692 + lret = dma_resv_wait_timeout_rcu 693 693 (bo->base.resv, true, true, 694 694 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); 695 695 if (!lret) ··· 1007 1007 1008 1008 if (fence == NULL) { 1009 1009 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 1010 - reservation_object_add_excl_fence(bo->base.resv, &fence->base); 1010 + dma_resv_add_excl_fence(bo->base.resv, &fence->base); 1011 1011 dma_fence_put(&fence->base); 1012 1012 } else 1013 - reservation_object_add_excl_fence(bo->base.resv, &fence->base); 1013 + dma_resv_add_excl_fence(bo->base.resv, &fence->base); 1014 1014 } 1015 1015 1016 1016
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 169 169 } *cmd; 170 170 171 171 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 172 - reservation_object_assert_held(bo->base.resv); 172 + dma_resv_assert_held(bo->base.resv); 173 173 174 174 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 175 175 if (!cmd) ··· 311 311 return 0; 312 312 313 313 WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 314 - reservation_object_assert_held(bo->base.resv); 314 + dma_resv_assert_held(bo->base.resv); 315 315 316 316 mutex_lock(&dev_priv->binding_mutex); 317 317 if (!vcotbl->scrubbed)
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 402 402 403 403 if (switch_backup && new_backup != res->backup) { 404 404 if (res->backup) { 405 - reservation_object_assert_held(res->backup->base.base.resv); 405 + dma_resv_assert_held(res->backup->base.base.resv); 406 406 list_del_init(&res->mob_head); 407 407 vmw_bo_unreference(&res->backup); 408 408 } 409 409 410 410 if (new_backup) { 411 411 res->backup = vmw_bo_reference(new_backup); 412 - reservation_object_assert_held(new_backup->base.base.resv); 412 + dma_resv_assert_held(new_backup->base.base.resv); 413 413 list_add_tail(&res->mob_head, &new_backup->res_list); 414 414 } else { 415 415 res->backup = NULL; ··· 691 691 .num_shared = 0 692 692 }; 693 693 694 - reservation_object_assert_held(vbo->base.base.resv); 694 + dma_resv_assert_held(vbo->base.base.resv); 695 695 list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { 696 696 if (!res->func->unbind) 697 697 continue;
+1 -1
include/drm/drmP.h
··· 87 87 88 88 struct device_node; 89 89 struct videomode; 90 - struct reservation_object; 90 + struct dma_resv; 91 91 struct dma_buf_attachment; 92 92 93 93 struct pci_dev;
+4 -4
include/drm/drm_gem.h
··· 35 35 */ 36 36 37 37 #include <linux/kref.h> 38 - #include <linux/reservation.h> 38 + #include <linux/dma-resv.h> 39 39 40 40 #include <drm/drm_vma_manager.h> 41 41 ··· 276 276 * 277 277 * Normally (@resv == &@_resv) except for imported GEM objects. 278 278 */ 279 - struct reservation_object *resv; 279 + struct dma_resv *resv; 280 280 281 281 /** 282 282 * @_resv: ··· 285 285 * 286 286 * This is unused for imported GEM objects. 287 287 */ 288 - struct reservation_object _resv; 288 + struct dma_resv _resv; 289 289 290 290 /** 291 291 * @funcs: ··· 390 390 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 391 391 int count, struct drm_gem_object ***objs_out); 392 392 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); 393 - long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, 393 + long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 394 394 bool wait_all, unsigned long timeout); 395 395 int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 396 396 struct ww_acquire_ctx *acquire_ctx);
+6 -6
include/drm/ttm/ttm_bo_api.h
··· 40 40 #include <linux/mutex.h> 41 41 #include <linux/mm.h> 42 42 #include <linux/bitmap.h> 43 - #include <linux/reservation.h> 43 + #include <linux/dma-resv.h> 44 44 45 45 struct ttm_bo_global; 46 46 ··· 273 273 struct ttm_operation_ctx { 274 274 bool interruptible; 275 275 bool no_wait_gpu; 276 - struct reservation_object *resv; 276 + struct dma_resv *resv; 277 277 uint64_t bytes_moved; 278 278 uint32_t flags; 279 279 }; ··· 493 493 * @page_alignment: Data alignment in pages. 494 494 * @ctx: TTM operation context for memory allocation. 495 495 * @acc_size: Accounted size for this object. 496 - * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. 496 + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 497 497 * @destroy: Destroy function. Use NULL for kfree(). 498 498 * 499 499 * This function initializes a pre-allocated struct ttm_buffer_object. ··· 526 526 struct ttm_operation_ctx *ctx, 527 527 size_t acc_size, 528 528 struct sg_table *sg, 529 - struct reservation_object *resv, 529 + struct dma_resv *resv, 530 530 void (*destroy) (struct ttm_buffer_object *)); 531 531 532 532 /** ··· 545 545 * point to the shmem object backing a GEM object if TTM is used to back a 546 546 * GEM user interface. 547 547 * @acc_size: Accounted size for this object. 548 - * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. 548 + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 549 549 * @destroy: Destroy function. Use NULL for kfree(). 550 550 * 551 551 * This function initializes a pre-allocated struct ttm_buffer_object. ··· 570 570 unsigned long size, enum ttm_bo_type type, 571 571 struct ttm_placement *placement, 572 572 uint32_t page_alignment, bool interrubtible, size_t acc_size, 573 - struct sg_table *sg, struct reservation_object *resv, 573 + struct sg_table *sg, struct dma_resv *resv, 574 574 void (*destroy) (struct ttm_buffer_object *)); 575 575 576 576 /**
+7 -7
include/drm/ttm/ttm_bo_driver.h
··· 35 35 #include <linux/workqueue.h> 36 36 #include <linux/fs.h> 37 37 #include <linux/spinlock.h> 38 - #include <linux/reservation.h> 38 + #include <linux/dma-resv.h> 39 39 40 40 #include "ttm_bo_api.h" 41 41 #include "ttm_memory.h" ··· 654 654 if (WARN_ON(ticket)) 655 655 return -EBUSY; 656 656 657 - success = reservation_object_trylock(bo->base.resv); 657 + success = dma_resv_trylock(bo->base.resv); 658 658 return success ? 0 : -EBUSY; 659 659 } 660 660 661 661 if (interruptible) 662 - ret = reservation_object_lock_interruptible(bo->base.resv, ticket); 662 + ret = dma_resv_lock_interruptible(bo->base.resv, ticket); 663 663 else 664 - ret = reservation_object_lock(bo->base.resv, ticket); 664 + ret = dma_resv_lock(bo->base.resv, ticket); 665 665 if (ret == -EINTR) 666 666 return -ERESTARTSYS; 667 667 return ret; ··· 745 745 WARN_ON(!kref_read(&bo->kref)); 746 746 747 747 if (interruptible) 748 - ret = reservation_object_lock_slow_interruptible(bo->base.resv, 748 + ret = dma_resv_lock_slow_interruptible(bo->base.resv, 749 749 ticket); 750 750 else 751 - reservation_object_lock_slow(bo->base.resv, ticket); 751 + dma_resv_lock_slow(bo->base.resv, ticket); 752 752 753 753 if (likely(ret == 0)) 754 754 ttm_bo_del_sub_from_lru(bo); ··· 773 773 else 774 774 ttm_bo_move_to_lru_tail(bo, NULL); 775 775 spin_unlock(&bo->bdev->glob->lru_lock); 776 - reservation_object_unlock(bo->base.resv); 776 + dma_resv_unlock(bo->base.resv); 777 777 } 778 778 779 779 /*
+2 -2
include/linux/dma-buf.h
··· 306 306 struct module *owner; 307 307 struct list_head list_node; 308 308 void *priv; 309 - struct reservation_object *resv; 309 + struct dma_resv *resv; 310 310 311 311 /* poll support */ 312 312 wait_queue_head_t poll; ··· 365 365 const struct dma_buf_ops *ops; 366 366 size_t size; 367 367 int flags; 368 - struct reservation_object *resv; 368 + struct dma_resv *resv; 369 369 void *priv; 370 370 }; 371 371
+2 -2
include/linux/dma-fence.h
··· 279 279 } 280 280 281 281 /** 282 - * dma_fence_get_rcu - get a fence from a reservation_object_list with 282 + * dma_fence_get_rcu - get a fence from a dma_resv_list with 283 283 * rcu read lock 284 284 * @fence: fence to increase refcount of 285 285 * ··· 303 303 * so long as the caller is using RCU on the pointer to the fence. 304 304 * 305 305 * An alternative mechanism is to employ a seqlock to protect a bunch of 306 - * fences, such as used by struct reservation_object. When using a seqlock, 306 + * fences, such as used by struct dma_resv. When using a seqlock, 307 307 * the seqlock must be taken before and checked after a reference to the 308 308 * fence is acquired (as shown here). 309 309 *
+57 -77
include/linux/reservation.h include/linux/dma-resv.h
··· 48 48 extern struct ww_class reservation_ww_class; 49 49 50 50 /** 51 - * struct reservation_object_list - a list of shared fences 51 + * struct dma_resv_list - a list of shared fences 52 52 * @rcu: for internal use 53 53 * @shared_count: table of shared fences 54 54 * @shared_max: for growing shared fence table 55 55 * @shared: shared fence table 56 56 */ 57 - struct reservation_object_list { 57 + struct dma_resv_list { 58 58 struct rcu_head rcu; 59 59 u32 shared_count, shared_max; 60 60 struct dma_fence __rcu *shared[]; 61 61 }; 62 62 63 63 /** 64 - * struct reservation_object - a reservation object manages fences for a buffer 64 + * struct dma_resv - a reservation object manages fences for a buffer 65 65 * @lock: update side lock 66 66 * @seq: sequence count for managing RCU read-side synchronization 67 67 * @fence_excl: the exclusive fence, if there is one currently 68 68 * @fence: list of current shared fences 69 69 */ 70 - struct reservation_object { 70 + struct dma_resv { 71 71 struct ww_mutex lock; 72 72 73 73 struct dma_fence __rcu *fence_excl; 74 - struct reservation_object_list __rcu *fence; 74 + struct dma_resv_list __rcu *fence; 75 75 }; 76 76 77 - #define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) 78 - #define reservation_object_assert_held(obj) \ 79 - lockdep_assert_held(&(obj)->lock.base) 77 + #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) 78 + #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) 80 79 81 80 /** 82 - * reservation_object_get_excl - get the reservation object's 81 + * dma_resv_get_excl - get the reservation object's 83 82 * exclusive fence, with update-side lock held 84 83 * @obj: the reservation object 85 84 * ··· 89 90 * RETURNS 90 91 * The exclusive fence or NULL 91 92 */ 92 - static inline struct dma_fence * 93 - reservation_object_get_excl(struct reservation_object *obj) 93 + static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj) 94 94 { 95 95 return rcu_dereference_protected(obj->fence_excl, 96 - reservation_object_held(obj)); 96 + dma_resv_held(obj)); 97 97 } 98 98 99 99 /** 100 - * reservation_object_get_list - get the reservation object's 100 + * dma_resv_get_list - get the reservation object's 101 101 * shared fence list, with update-side lock held 102 102 * @obj: the reservation object 103 103 * 104 104 * Returns the shared fence list. Does NOT take references to 105 105 * the fence. The obj->lock must be held. 106 106 */ 107 - static inline struct reservation_object_list * 108 - reservation_object_get_list(struct reservation_object *obj) 107 + static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) 109 108 { 110 109 return rcu_dereference_protected(obj->fence, 111 - reservation_object_held(obj)); 110 + dma_resv_held(obj)); 112 111 } 113 112 114 113 /** 115 - * reservation_object_fences - read consistent fence pointers 114 + * dma_resv_fences - read consistent fence pointers 116 115 * @obj: reservation object where we get the fences from 117 116 * @excl: pointer for the exclusive fence 118 117 * @list: pointer for the shared fence list ··· 118 121 * Make sure we have a consisten exclusive fence and shared fence list. 119 122 * Must be called with rcu read side lock held. 120 123 */ 121 - static inline void 122 - reservation_object_fences(struct reservation_object *obj, 123 - struct dma_fence **excl, 124 - struct reservation_object_list **list, 125 - u32 *shared_count) 124 + static inline void dma_resv_fences(struct dma_resv *obj, 125 + struct dma_fence **excl, 126 + struct dma_resv_list **list, 127 + u32 *shared_count) 126 128 { 127 129 do { 128 130 *excl = rcu_dereference(obj->fence_excl); 129 131 *list = rcu_dereference(obj->fence); 130 132 *shared_count = *list ? (*list)->shared_count : 0; 131 - smp_rmb(); /* See reservation_object_add_excl_fence */ 133 + smp_rmb(); /* See dma_resv_add_excl_fence */ 132 134 } while (rcu_access_pointer(obj->fence_excl) != *excl); 133 135 } 134 136 135 137 /** 136 - * reservation_object_get_excl_rcu - get the reservation object's 138 + * dma_resv_get_excl_rcu - get the reservation object's 137 139 * exclusive fence, without lock held. 138 140 * @obj: the reservation object 139 141 * ··· 142 146 * RETURNS 143 147 * The exclusive fence or NULL if none 144 148 */ 145 - static inline struct dma_fence * 146 - reservation_object_get_excl_rcu(struct reservation_object *obj) 149 + static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj) 147 150 { 148 151 struct dma_fence *fence; 149 152 ··· 157 162 } 158 163 159 164 /** 160 - * reservation_object_lock - lock the reservation object 165 + * dma_resv_lock - lock the reservation object 161 166 * @obj: the reservation object 162 167 * @ctx: the locking context 163 168 * ··· 171 176 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 172 177 * object may be locked by itself by passing NULL as @ctx. 173 178 */ 174 - static inline int 175 - reservation_object_lock(struct reservation_object *obj, 176 - struct ww_acquire_ctx *ctx) 179 + static inline int dma_resv_lock(struct dma_resv *obj, 180 + struct ww_acquire_ctx *ctx) 177 181 { 178 182 return ww_mutex_lock(&obj->lock, ctx); 179 183 } 180 184 181 185 /** 182 - * reservation_object_lock_interruptible - lock the reservation object 186 + * dma_resv_lock_interruptible - lock the reservation object 183 187 * @obj: the reservation object 184 188 * @ctx: the locking context 185 189 * ··· 192 198 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 193 199 * object may be locked by itself by passing NULL as @ctx. 194 200 */ 195 - static inline int 196 - reservation_object_lock_interruptible(struct reservation_object *obj, 197 - struct ww_acquire_ctx *ctx) 201 + static inline int dma_resv_lock_interruptible(struct dma_resv *obj, 202 + struct ww_acquire_ctx *ctx) 198 203 { 199 204 return ww_mutex_lock_interruptible(&obj->lock, ctx); 200 205 } 201 206 202 207 /** 203 - * reservation_object_lock_slow - slowpath lock the reservation object 208 + * dma_resv_lock_slow - slowpath lock the reservation object 204 209 * @obj: the reservation object 205 210 * @ctx: the locking context 206 211 * 207 212 * Acquires the reservation object after a die case. This function 208 - * will sleep until the lock becomes available. See reservation_object_lock() as 213 + * will sleep until the lock becomes available. See dma_resv_lock() as 209 214 * well. 210 215 */ 211 - static inline void 212 - reservation_object_lock_slow(struct reservation_object *obj, 213 - struct ww_acquire_ctx *ctx) 216 + static inline void dma_resv_lock_slow(struct dma_resv *obj, 217 + struct ww_acquire_ctx *ctx) 214 218 { 215 219 ww_mutex_lock_slow(&obj->lock, ctx); 216 220 } 217 221 218 222 /** 219 - * reservation_object_lock_slow_interruptible - slowpath lock the reservation 223 + * dma_resv_lock_slow_interruptible - slowpath lock the reservation 220 224 * object, interruptible 221 225 * @obj: the reservation object 222 226 * @ctx: the locking context 223 227 * 224 228 * Acquires the reservation object interruptible after a die case. This function 225 229 * will sleep until the lock becomes available. See 226 - * reservation_object_lock_interruptible() as well. 230 + * dma_resv_lock_interruptible() as well. 227 231 */ 228 - static inline int 229 - reservation_object_lock_slow_interruptible(struct reservation_object *obj, 230 - struct ww_acquire_ctx *ctx) 232 + static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, 233 + struct ww_acquire_ctx *ctx) 231 234 { 232 235 return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); 233 236 } 234 237 235 238 /** 236 - * reservation_object_trylock - trylock the reservation object 239 + * dma_resv_trylock - trylock the reservation object 237 240 * @obj: the reservation object 238 241 * 239 242 * Tries to lock the reservation object for exclusive access and modification. ··· 243 252 * 244 253 * Returns true if the lock was acquired, false otherwise. 245 254 */ 246 - static inline bool __must_check 247 - reservation_object_trylock(struct reservation_object *obj) 255 + static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) 248 256 { 249 257 return ww_mutex_trylock(&obj->lock); 250 258 } 251 259 252 260 /** 253 - * reservation_object_is_locked - is the reservation object locked 261 + * dma_resv_is_locked - is the reservation object locked 254 262 * @obj: the reservation object 255 263 * 256 264 * Returns true if the mutex is locked, false if unlocked. 257 265 */ 258 - static inline bool 259 - reservation_object_is_locked(struct reservation_object *obj) 266 + static inline bool dma_resv_is_locked(struct dma_resv *obj) 260 267 { 261 268 return ww_mutex_is_locked(&obj->lock); 262 269 } 263 270 264 271 /** 265 - * reservation_object_locking_ctx - returns the context used to lock the object 272 + * dma_resv_locking_ctx - returns the context used to lock the object 266 273 * @obj: the reservation object 267 274 * 268 275 * Returns the context used to lock a reservation object or NULL if no context 269 276 * was used or the object is not locked at all. 270 277 */ 271 - static inline struct ww_acquire_ctx * 272 - reservation_object_locking_ctx(struct reservation_object *obj) 278 + static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) 273 279 { 274 280 return READ_ONCE(obj->lock.ctx); 275 281 } 276 282 277 283 /** 278 - * reservation_object_unlock - unlock the reservation object 284 + * dma_resv_unlock - unlock the reservation object 279 285 * @obj: the reservation object 280 286 * 281 287 * Unlocks the reservation object following exclusive access. 282 288 */ 283 - static inline void 284 - reservation_object_unlock(struct reservation_object *obj) 289 + static inline void dma_resv_unlock(struct dma_resv *obj) 285 290 { 286 291 #ifdef CONFIG_DEBUG_MUTEXES 287 292 /* Test shared fence slot reservation */ 288 293 if (rcu_access_pointer(obj->fence)) { 289 - struct reservation_object_list *fence = 290 - reservation_object_get_list(obj); 294 + struct dma_resv_list *fence = dma_resv_get_list(obj); 291 295 292 296 fence->shared_max = fence->shared_count; 293 297 } ··· 290 304 ww_mutex_unlock(&obj->lock); 291 305 } 292 306 293 - void reservation_object_init(struct reservation_object *obj); 294 - void reservation_object_fini(struct reservation_object *obj); 295 - int reservation_object_reserve_shared(struct reservation_object *obj, 296 - unsigned int num_fences); 297 - void reservation_object_add_shared_fence(struct reservation_object *obj, 298 - struct dma_fence *fence); 307 + void dma_resv_init(struct dma_resv *obj); 308 + void dma_resv_fini(struct dma_resv *obj); 309 + int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); 310 + void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); 299 311 300 - void reservation_object_add_excl_fence(struct reservation_object *obj, 301 - struct dma_fence *fence); 312 + void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); 302 313 303 - int reservation_object_get_fences_rcu(struct reservation_object *obj, 304 - struct dma_fence **pfence_excl, 305 - unsigned *pshared_count, 306 - struct dma_fence ***pshared); 314 + int dma_resv_get_fences_rcu(struct dma_resv *obj, 315 + struct dma_fence **pfence_excl, 316 + unsigned *pshared_count, 317 + struct dma_fence ***pshared); 307 318 308 - int reservation_object_copy_fences(struct reservation_object *dst, 309 - struct reservation_object *src); 319 + int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); 310 320 311 - long reservation_object_wait_timeout_rcu(struct reservation_object *obj, 312 - bool wait_all, bool intr, 313 - unsigned long timeout); 321 + long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, 322 + unsigned long timeout); 314 323 315 - bool reservation_object_test_signaled_rcu(struct reservation_object *obj, 316 - bool test_all); 324 + bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); 317 325 318 326 #endif /* _LINUX_RESERVATION_H */