Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe: Split lrc seqno fence creation up

Since sometimes a lock is required to initialize a seqno fence,
and it might be desirable not to hold that lock while performing
memory allocations, split the lrc seqno fence creation up into an
allocation phase and an initialization phase.

Since lrc seqno fences under the hood are hw_fences, do the same
for these and remove the xe_hw_fence_create() function since it
is not used anymore.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240527135912.152156-3-thomas.hellstrom@linux.intel.com

+101 -16
+47 -12
drivers/gpu/drm/xe/xe_hw_fence.c
··· 208 208 return container_of(fence, struct xe_hw_fence, dma); 209 209 } 210 210 211 - struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx, 212 - struct iosys_map seqno_map) 211 + /** 212 + * xe_hw_fence_alloc() - Allocate an hw fence. 213 + * 214 + * Allocate but don't initialize an hw fence. 215 + * 216 + * Return: Pointer to the allocated fence or 217 + * negative error pointer on error. 218 + */ 219 + struct dma_fence *xe_hw_fence_alloc(void) 213 220 { 214 - struct xe_hw_fence *fence; 221 + struct xe_hw_fence *hw_fence = fence_alloc(); 215 222 216 - fence = fence_alloc(); 217 - if (!fence) 223 + if (!hw_fence) 218 224 return ERR_PTR(-ENOMEM); 219 225 220 - fence->ctx = ctx; 221 - fence->seqno_map = seqno_map; 222 - INIT_LIST_HEAD(&fence->irq_link); 226 + return &hw_fence->dma; 227 + } 223 228 224 - dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock, 229 + /** 230 + * xe_hw_fence_free() - Free an hw fence. 231 + * @fence: Pointer to the fence to free. 232 + * 233 + * Frees an hw fence that hasn't yet been 234 + * initialized. 235 + */ 236 + void xe_hw_fence_free(struct dma_fence *fence) 237 + { 238 + fence_free(&fence->rcu); 239 + } 240 + 241 + /** 242 + * xe_hw_fence_init() - Initialize an hw fence. 243 + * @fence: Pointer to the fence to initialize. 244 + * @ctx: Pointer to the struct xe_hw_fence_ctx fence context. 245 + * @seqno_map: Pointer to the map into where the seqno is blitted. 246 + * 247 + * Initializes a pre-allocated hw fence. 248 + * After initialization, the fence is subject to normal 249 + * dma-fence refcounting. 250 + */ 251 + void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx, 252 + struct iosys_map seqno_map) 253 + { 254 + struct xe_hw_fence *hw_fence = 255 + container_of(fence, typeof(*hw_fence), dma); 256 + 257 + hw_fence->ctx = ctx; 258 + hw_fence->seqno_map = seqno_map; 259 + INIT_LIST_HEAD(&hw_fence->irq_link); 260 + 261 + dma_fence_init(fence, &xe_hw_fence_ops, &ctx->irq->lock, 225 262 ctx->dma_fence_ctx, ctx->next_seqno++); 226 263 227 - trace_xe_hw_fence_create(fence); 228 - 229 - return fence; 264 + trace_xe_hw_fence_create(hw_fence); 230 265 }
+5 -2
drivers/gpu/drm/xe/xe_hw_fence.h
··· 24 24 struct xe_hw_fence_irq *irq, const char *name); 25 25 void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx); 26 26 27 - struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx, 28 - struct iosys_map seqno_map); 27 + struct dma_fence *xe_hw_fence_alloc(void); 29 28 29 + void xe_hw_fence_free(struct dma_fence *fence); 30 + 31 + void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx, 32 + struct iosys_map seqno_map); 30 33 #endif
+46 -2
drivers/gpu/drm/xe/xe_lrc.c
··· 1033 1033 return __xe_lrc_seqno_ggtt_addr(lrc); 1034 1034 } 1035 1035 1036 + /** 1037 + * xe_lrc_alloc_seqno_fence() - Allocate an lrc seqno fence. 1038 + * 1039 + * Allocate but don't initialize an lrc seqno fence. 1040 + * 1041 + * Return: Pointer to the allocated fence or 1042 + * negative error pointer on error. 1043 + */ 1044 + struct dma_fence *xe_lrc_alloc_seqno_fence(void) 1045 + { 1046 + return xe_hw_fence_alloc(); 1047 + } 1048 + 1049 + /** 1050 + * xe_lrc_free_seqno_fence() - Free an lrc seqno fence. 1051 + * @fence: Pointer to the fence to free. 1052 + * 1053 + * Frees an lrc seqno fence that hasn't yet been 1054 + * initialized. 1055 + */ 1056 + void xe_lrc_free_seqno_fence(struct dma_fence *fence) 1057 + { 1058 + xe_hw_fence_free(fence); 1059 + } 1060 + 1061 + /** 1062 + * xe_lrc_init_seqno_fence() - Initialize an lrc seqno fence. 1063 + * @lrc: Pointer to the lrc. 1064 + * @fence: Pointer to the fence to initialize. 1065 + * 1066 + * Initializes a pre-allocated lrc seqno fence. 1067 + * After initialization, the fence is subject to normal 1068 + * dma-fence refcounting. 1069 + */ 1070 + void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct dma_fence *fence) 1071 + { 1072 + xe_hw_fence_init(fence, &lrc->fence_ctx, __xe_lrc_seqno_map(lrc)); 1073 + } 1074 + 1036 1075 struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc) 1037 1076 { 1038 - return &xe_hw_fence_create(&lrc->fence_ctx, 1039 - __xe_lrc_seqno_map(lrc))->dma; 1077 + struct dma_fence *fence = xe_lrc_alloc_seqno_fence(); 1078 + 1079 + if (IS_ERR(fence)) 1080 + return fence; 1081 + 1082 + xe_lrc_init_seqno_fence(lrc, fence); 1083 + return fence; 1040 1084 } 1041 1085 1042 1086 s32 xe_lrc_seqno(struct xe_lrc *lrc)
+3
drivers/gpu/drm/xe/xe_lrc.h
··· 44 44 u64 xe_lrc_descriptor(struct xe_lrc *lrc); 45 45 46 46 u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc); 47 + struct dma_fence *xe_lrc_alloc_seqno_fence(void); 48 + void xe_lrc_free_seqno_fence(struct dma_fence *fence); 49 + void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct dma_fence *fence); 47 50 struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc); 48 51 s32 xe_lrc_seqno(struct xe_lrc *lrc); 49 52