at v4.14 8.3 kB view raw
1/* 2 * Header file for reservations for dma-buf and ttm 3 * 4 * Copyright(C) 2011 Linaro Limited. All rights reserved. 5 * Copyright (C) 2012-2013 Canonical Ltd 6 * Copyright (C) 2012 Texas Instruments 7 * 8 * Authors: 9 * Rob Clark <robdclark@gmail.com> 10 * Maarten Lankhorst <maarten.lankhorst@canonical.com> 11 * Thomas Hellstrom <thellstrom-at-vmware-dot-com> 12 * 13 * Based on bo.c which bears the following copyright notice, 14 * but is dual licensed: 15 * 16 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 17 * All Rights Reserved. 18 * 19 * Permission is hereby granted, free of charge, to any person obtaining a 20 * copy of this software and associated documentation files (the 21 * "Software"), to deal in the Software without restriction, including 22 * without limitation the rights to use, copy, modify, merge, publish, 23 * distribute, sub license, and/or sell copies of the Software, and to 24 * permit persons to whom the Software is furnished to do so, subject to 25 * the following conditions: 26 * 27 * The above copyright notice and this permission notice (including the 28 * next paragraph) shall be included in all copies or substantial portions 29 * of the Software. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 34 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 35 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 36 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 37 * USE OR OTHER DEALINGS IN THE SOFTWARE. 38 */ 39#ifndef _LINUX_RESERVATION_H 40#define _LINUX_RESERVATION_H 41 42#include <linux/ww_mutex.h> 43#include <linux/dma-fence.h> 44#include <linux/slab.h> 45#include <linux/seqlock.h> 46#include <linux/rcupdate.h> 47 48extern struct ww_class reservation_ww_class; 49extern struct lock_class_key reservation_seqcount_class; 50extern const char reservation_seqcount_string[]; 51 52/** 53 * struct reservation_object_list - a list of shared fences 54 * @rcu: for internal use 55 * @shared_count: table of shared fences 56 * @shared_max: for growing shared fence table 57 * @shared: shared fence table 58 */ 59struct reservation_object_list { 60 struct rcu_head rcu; 61 u32 shared_count, shared_max; 62 struct dma_fence __rcu *shared[]; 63}; 64 65/** 66 * struct reservation_object - a reservation object manages fences for a buffer 67 * @lock: update side lock 68 * @seq: sequence count for managing RCU read-side synchronization 69 * @fence_excl: the exclusive fence, if there is one currently 70 * @fence: list of current shared fences 71 * @staged: staged copy of shared fences for RCU updates 72 */ 73struct reservation_object { 74 struct ww_mutex lock; 75 seqcount_t seq; 76 77 struct dma_fence __rcu *fence_excl; 78 struct reservation_object_list __rcu *fence; 79 struct reservation_object_list *staged; 80}; 81 82#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) 83#define reservation_object_assert_held(obj) \ 84 lockdep_assert_held(&(obj)->lock.base) 85 86/** 87 * reservation_object_init - initialize a reservation object 88 * @obj: the reservation object 89 */ 90static inline void 91reservation_object_init(struct reservation_object *obj) 92{ 93 ww_mutex_init(&obj->lock, &reservation_ww_class); 94 95 __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); 96 RCU_INIT_POINTER(obj->fence, NULL); 97 RCU_INIT_POINTER(obj->fence_excl, NULL); 98 obj->staged = NULL; 99} 100 101/** 102 * reservation_object_fini - destroys a reservation object 103 * @obj: the reservation object 104 */ 105static inline void 106reservation_object_fini(struct reservation_object *obj) 107{ 108 int i; 109 struct reservation_object_list *fobj; 110 struct dma_fence *excl; 111 112 /* 113 * This object should be dead and all references must have 114 * been released to it, so no need to be protected with rcu. 115 */ 116 excl = rcu_dereference_protected(obj->fence_excl, 1); 117 if (excl) 118 dma_fence_put(excl); 119 120 fobj = rcu_dereference_protected(obj->fence, 1); 121 if (fobj) { 122 for (i = 0; i < fobj->shared_count; ++i) 123 dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); 124 125 kfree(fobj); 126 } 127 kfree(obj->staged); 128 129 ww_mutex_destroy(&obj->lock); 130} 131 132/** 133 * reservation_object_get_list - get the reservation object's 134 * shared fence list, with update-side lock held 135 * @obj: the reservation object 136 * 137 * Returns the shared fence list. Does NOT take references to 138 * the fence. The obj->lock must be held. 139 */ 140static inline struct reservation_object_list * 141reservation_object_get_list(struct reservation_object *obj) 142{ 143 return rcu_dereference_protected(obj->fence, 144 reservation_object_held(obj)); 145} 146 147/** 148 * reservation_object_lock - lock the reservation object 149 * @obj: the reservation object 150 * @ctx: the locking context 151 * 152 * Locks the reservation object for exclusive access and modification. Note, 153 * that the lock is only against other writers, readers will run concurrently 154 * with a writer under RCU. The seqlock is used to notify readers if they 155 * overlap with a writer. 156 * 157 * As the reservation object may be locked by multiple parties in an 158 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle 159 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 160 * object may be locked by itself by passing NULL as @ctx. 161 */ 162static inline int 163reservation_object_lock(struct reservation_object *obj, 164 struct ww_acquire_ctx *ctx) 165{ 166 return ww_mutex_lock(&obj->lock, ctx); 167} 168 169/** 170 * reservation_object_trylock - trylock the reservation object 171 * @obj: the reservation object 172 * 173 * Tries to lock the reservation object for exclusive access and modification. 174 * Note, that the lock is only against other writers, readers will run 175 * concurrently with a writer under RCU. The seqlock is used to notify readers 176 * if they overlap with a writer. 177 * 178 * Also note that since no context is provided, no deadlock protection is 179 * possible. 180 * 181 * Returns true if the lock was acquired, false otherwise. 182 */ 183static inline bool __must_check 184reservation_object_trylock(struct reservation_object *obj) 185{ 186 return ww_mutex_trylock(&obj->lock); 187} 188 189/** 190 * reservation_object_unlock - unlock the reservation object 191 * @obj: the reservation object 192 * 193 * Unlocks the reservation object following exclusive access. 194 */ 195static inline void 196reservation_object_unlock(struct reservation_object *obj) 197{ 198 ww_mutex_unlock(&obj->lock); 199} 200 201/** 202 * reservation_object_get_excl - get the reservation object's 203 * exclusive fence, with update-side lock held 204 * @obj: the reservation object 205 * 206 * Returns the exclusive fence (if any). Does NOT take a 207 * reference. The obj->lock must be held. 208 * 209 * RETURNS 210 * The exclusive fence or NULL 211 */ 212static inline struct dma_fence * 213reservation_object_get_excl(struct reservation_object *obj) 214{ 215 return rcu_dereference_protected(obj->fence_excl, 216 reservation_object_held(obj)); 217} 218 219/** 220 * reservation_object_get_excl_rcu - get the reservation object's 221 * exclusive fence, without lock held. 222 * @obj: the reservation object 223 * 224 * If there is an exclusive fence, this atomically increments it's 225 * reference count and returns it. 226 * 227 * RETURNS 228 * The exclusive fence or NULL if none 229 */ 230static inline struct dma_fence * 231reservation_object_get_excl_rcu(struct reservation_object *obj) 232{ 233 struct dma_fence *fence; 234 235 if (!rcu_access_pointer(obj->fence_excl)) 236 return NULL; 237 238 rcu_read_lock(); 239 fence = dma_fence_get_rcu_safe(&obj->fence_excl); 240 rcu_read_unlock(); 241 242 return fence; 243} 244 245int reservation_object_reserve_shared(struct reservation_object *obj); 246void reservation_object_add_shared_fence(struct reservation_object *obj, 247 struct dma_fence *fence); 248 249void reservation_object_add_excl_fence(struct reservation_object *obj, 250 struct dma_fence *fence); 251 252int reservation_object_get_fences_rcu(struct reservation_object *obj, 253 struct dma_fence **pfence_excl, 254 unsigned *pshared_count, 255 struct dma_fence ***pshared); 256 257int reservation_object_copy_fences(struct reservation_object *dst, 258 struct reservation_object *src); 259 260long reservation_object_wait_timeout_rcu(struct reservation_object *obj, 261 bool wait_all, bool intr, 262 unsigned long timeout); 263 264bool reservation_object_test_signaled_rcu(struct reservation_object *obj, 265 bool test_all); 266 267#endif /* _LINUX_RESERVATION_H */