Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-buf/fence: make fence context 64 bit v2

Fence contexts are created on the fly (for example) by the GPU scheduler used
in the amdgpu driver as a result of an userspace request. Because of this
userspace could in theory force a wrap around of the 32bit context number
if it doesn't behave well.

Avoid this by increasing the context number to 64bits. This way even when
userspace manages to allocate a billion contexts per second it takes more
than 500 years for the context number to wrap around.

v2: fix printf formats as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1464786612-5010-2-git-send-email-deathsimple@vodafone.de

authored by

Christian König and committed by
Daniel Vetter
76bf0db5 33779007

+21 -18
+4 -4
drivers/dma-buf/fence.c
··· 35 35 * context or not. One device can have multiple separate contexts, 36 36 * and they're used if some engine can run independently of another. 37 37 */ 38 - static atomic_t fence_context_counter = ATOMIC_INIT(0); 38 + static atomic64_t fence_context_counter = ATOMIC64_INIT(0); 39 39 40 40 /** 41 41 * fence_context_alloc - allocate an array of fence contexts ··· 44 44 * This function will return the first index of the number of fences allocated. 45 45 * The fence context is used for setting fence->context to a unique number. 46 46 */ 47 - unsigned fence_context_alloc(unsigned num) 47 + u64 fence_context_alloc(unsigned num) 48 48 { 49 49 BUG_ON(!num); 50 - return atomic_add_return(num, &fence_context_counter) - num; 50 + return atomic64_add_return(num, &fence_context_counter) - num; 51 51 } 52 52 EXPORT_SYMBOL(fence_context_alloc); 53 53 ··· 513 513 */ 514 514 void 515 515 fence_init(struct fence *fence, const struct fence_ops *ops, 516 - spinlock_t *lock, unsigned context, unsigned seqno) 516 + spinlock_t *lock, u64 context, unsigned seqno) 517 517 { 518 518 BUG_ON(!lock); 519 519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 2032 2032 struct amdgpu_irq_src hpd_irq; 2033 2033 2034 2034 /* rings */ 2035 - unsigned fence_context; 2035 + u64 fence_context; 2036 2036 unsigned num_rings; 2037 2037 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2038 2038 bool ib_pool_ready;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
··· 427 427 soffset, eoffset, eoffset - soffset); 428 428 429 429 if (i->fence) 430 - seq_printf(m, " protected by 0x%08x on context %d", 430 + seq_printf(m, " protected by 0x%08x on context %llu", 431 431 i->fence->seqno, i->fence->context); 432 432 433 433 seq_printf(m, "\n");
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
··· 125 125 u32 completed_fence; 126 126 u32 retired_fence; 127 127 wait_queue_head_t fence_event; 128 - unsigned int fence_context; 128 + u64 fence_context; 129 129 spinlock_t fence_spinlock; 130 130 131 131 /* worker for handling active-list retiring: */
+2 -1
drivers/gpu/drm/nouveau/nouveau_fence.h
··· 57 57 int (*context_new)(struct nouveau_channel *); 58 58 void (*context_del)(struct nouveau_channel *); 59 59 60 - u32 contexts, context_base; 60 + u32 contexts; 61 + u64 context_base; 61 62 bool uevent; 62 63 }; 63 64
+1 -1
drivers/gpu/drm/qxl/qxl_release.c
··· 96 96 return 0; 97 97 98 98 if (have_drawable_releases && sc > 300) { 99 - FENCE_WARN(fence, "failed to wait on release %d " 99 + FENCE_WARN(fence, "failed to wait on release %llu " 100 100 "after spincount %d\n", 101 101 fence->context & ~0xf0000000, sc); 102 102 goto signaled;
+1 -1
drivers/gpu/drm/radeon/radeon.h
··· 2386 2386 struct radeon_mman mman; 2387 2387 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 2388 2388 wait_queue_head_t fence_queue; 2389 - unsigned fence_context; 2389 + u64 fence_context; 2390 2390 struct mutex ring_lock; 2391 2391 struct radeon_ring ring[RADEON_NUM_RINGS]; 2392 2392 bool ib_pool_ready;
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 46 46 bool goal_irq_on; /* Protected by @goal_irq_mutex */ 47 47 bool seqno_valid; /* Protected by @lock, and may not be set to true 48 48 without the @goal_irq_mutex held. */ 49 - unsigned ctx; 49 + u64 ctx; 50 50 }; 51 51 52 52 struct vmw_user_fence {
+2 -1
drivers/staging/android/sync.h
··· 68 68 69 69 /* protected by child_list_lock */ 70 70 bool destroyed; 71 - int context, value; 71 + u64 context; 72 + int value; 72 73 73 74 struct list_head child_list_head; 74 75 spinlock_t child_list_lock;
+7 -6
include/linux/fence.h
··· 75 75 struct rcu_head rcu; 76 76 struct list_head cb_list; 77 77 spinlock_t *lock; 78 - unsigned context, seqno; 78 + u64 context; 79 + unsigned seqno; 79 80 unsigned long flags; 80 81 ktime_t timestamp; 81 82 int status; ··· 179 178 }; 180 179 181 180 void fence_init(struct fence *fence, const struct fence_ops *ops, 182 - spinlock_t *lock, unsigned context, unsigned seqno); 181 + spinlock_t *lock, u64 context, unsigned seqno); 183 182 184 183 void fence_release(struct kref *kref); 185 184 void fence_free(struct fence *fence); ··· 353 352 return ret < 0 ? ret : 0; 354 353 } 355 354 356 - unsigned fence_context_alloc(unsigned num); 355 + u64 fence_context_alloc(unsigned num); 357 356 358 357 #define FENCE_TRACE(f, fmt, args...) \ 359 358 do { \ 360 359 struct fence *__ff = (f); \ 361 360 if (config_enabled(CONFIG_FENCE_TRACE)) \ 362 - pr_info("f %u#%u: " fmt, \ 361 + pr_info("f %llu#%u: " fmt, \ 363 362 __ff->context, __ff->seqno, ##args); \ 364 363 } while (0) 365 364 366 365 #define FENCE_WARN(f, fmt, args...) \ 367 366 do { \ 368 367 struct fence *__ff = (f); \ 369 - pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 368 + pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ 370 369 ##args); \ 371 370 } while (0) 372 371 373 372 #define FENCE_ERR(f, fmt, args...) \ 374 373 do { \ 375 374 struct fence *__ff = (f); \ 376 - pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 375 + pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ 377 376 ##args); \ 378 377 } while (0) 379 378