Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/guc: Introduce the GuC Buffer Cache

The purpose of the GuC Buffer Cache is to maintain a set ofreusable
buffers that could be used while sending some of the CTB H2G actions
that require separate buffer with indirect data. Currently only few
PF actions need this so initialize it only when running as a PF.

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-9-michal.wajdeczko@intel.com

+256
+1
drivers/gpu/drm/xe/Makefile
··· 56 56 xe_gt_topology.o \ 57 57 xe_guc.o \ 58 58 xe_guc_ads.o \ 59 + xe_guc_buf.o \ 59 60 xe_guc_capture.o \ 60 61 xe_guc_ct.o \ 61 62 xe_guc_db_mgr.o \
+5
drivers/gpu/drm/xe/xe_guc.c
··· 23 23 #include "xe_gt_sriov_vf.h" 24 24 #include "xe_gt_throttle.h" 25 25 #include "xe_guc_ads.h" 26 + #include "xe_guc_buf.h" 26 27 #include "xe_guc_capture.h" 27 28 #include "xe_guc_ct.h" 28 29 #include "xe_guc_db_mgr.h" ··· 741 740 return ret; 742 741 743 742 ret = xe_guc_pc_init(&guc->pc); 743 + if (ret) 744 + return ret; 745 + 746 + ret = xe_guc_buf_cache_init(&guc->buf); 744 747 if (ret) 745 748 return ret; 746 749
+172
drivers/gpu/drm/xe/xe_guc_buf.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2024 Intel Corporation 4 + */ 5 + 6 + #include <linux/cleanup.h> 7 + #include <drm/drm_managed.h> 8 + 9 + #include "xe_assert.h" 10 + #include "xe_bo.h" 11 + #include "xe_gt_printk.h" 12 + #include "xe_guc.h" 13 + #include "xe_guc_buf.h" 14 + #include "xe_sa.h" 15 + 16 + static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache) 17 + { 18 + return container_of(cache, struct xe_guc, buf); 19 + } 20 + 21 + static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache) 22 + { 23 + return guc_to_gt(cache_to_guc(cache)); 24 + } 25 + 26 + /** 27 + * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache. 28 + * @cache: the &xe_guc_buf_cache to initialize 29 + * 30 + * The Buffer Cache allows to obtain a reusable buffer that can be used to pass 31 + * indirect H2G data to GuC without a need to create a ad-hoc allocation. 32 + * 33 + * Return: 0 on success or a negative error code on failure. 34 + */ 35 + int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) 36 + { 37 + struct xe_gt *gt = cache_to_gt(cache); 38 + struct xe_sa_manager *sam; 39 + 40 + /* XXX: currently it's useful only for the PF actions */ 41 + if (!IS_SRIOV_PF(gt_to_xe(gt))) 42 + return 0; 43 + 44 + sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32)); 45 + if (IS_ERR(sam)) 46 + return PTR_ERR(sam); 47 + cache->sam = sam; 48 + 49 + xe_gt_dbg(gt, "reusable buffer with %u dwords at %#x for %ps\n", 50 + xe_guc_buf_cache_dwords(cache), xe_bo_ggtt_addr(sam->bo), 51 + __builtin_return_address(0)); 52 + return 0; 53 + } 54 + 55 + /** 56 + * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports. 57 + * @cache: the &xe_guc_buf_cache to query 58 + * 59 + * Return: a size of the largest reusable buffer (in dwords) 60 + */ 61 + u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache) 62 + { 63 + return cache->sam ? cache->sam->base.size / sizeof(u32) : 0; 64 + } 65 + 66 + /** 67 + * xe_guc_buf_reserve() - Reserve a new sub-allocation. 68 + * @cache: the &xe_guc_buf_cache where reserve sub-allocation 69 + * @dwords: the requested size of the buffer in dwords 70 + * 71 + * Use xe_guc_buf_is_valid() to check if returned buffer reference is valid. 72 + * Must use xe_guc_buf_release() to release a sub-allocation. 73 + * 74 + * Return: a &xe_guc_buf of new sub-allocation. 75 + */ 76 + struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords) 77 + { 78 + struct drm_suballoc *sa; 79 + 80 + if (cache->sam) 81 + sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(32), GFP_ATOMIC); 82 + else 83 + sa = ERR_PTR(-EOPNOTSUPP); 84 + 85 + return (struct xe_guc_buf){ .sa = sa }; 86 + } 87 + 88 + /** 89 + * xe_guc_buf_from_data() - Reserve a new sub-allocation using data. 90 + * @cache: the &xe_guc_buf_cache where reserve sub-allocation 91 + * @data: the data to flush the sub-allocation 92 + * @size: the size of the data 93 + * 94 + * Similar to xe_guc_buf_reserve() but flushes @data to the GPU memory. 95 + * 96 + * Return: a &xe_guc_buf of new sub-allocation. 97 + */ 98 + struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache, 99 + const void *data, size_t size) 100 + { 101 + struct drm_suballoc *sa; 102 + 103 + sa = __xe_sa_bo_new(cache->sam, size, GFP_ATOMIC); 104 + if (!IS_ERR(sa)) 105 + memcpy(xe_sa_bo_cpu_addr(sa), data, size); 106 + 107 + return (struct xe_guc_buf){ .sa = sa }; 108 + } 109 + 110 + /** 111 + * xe_guc_buf_release() - Release a sub-allocation. 112 + * @buf: the &xe_guc_buf to release 113 + * 114 + * Releases a sub-allocation reserved by the xe_guc_buf_reserve(). 115 + */ 116 + void xe_guc_buf_release(const struct xe_guc_buf buf) 117 + { 118 + if (xe_guc_buf_is_valid(buf)) 119 + xe_sa_bo_free(buf.sa, NULL); 120 + } 121 + 122 + /** 123 + * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory. 124 + * @buf: the &xe_guc_buf to flush 125 + * 126 + * Return: a GPU address of the sub-allocation. 127 + */ 128 + u64 xe_guc_buf_flush(const struct xe_guc_buf buf) 129 + { 130 + xe_sa_bo_flush_write(buf.sa); 131 + return xe_sa_bo_gpu_addr(buf.sa); 132 + } 133 + 134 + /** 135 + * xe_guc_buf_cpu_ptr() - Obtain a CPU pointer to the sub-allocation. 136 + * @buf: the &xe_guc_buf to query 137 + * 138 + * Return: a CPU pointer of the sub-allocation. 139 + */ 140 + void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf) 141 + { 142 + return xe_sa_bo_cpu_addr(buf.sa); 143 + } 144 + 145 + /** 146 + * xe_guc_buf_gpu_addr() - Obtain a GPU address of the sub-allocation. 147 + * @buf: the &xe_guc_buf to query 148 + * 149 + * Return: a GPU address of the sub-allocation. 150 + */ 151 + u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf) 152 + { 153 + return xe_sa_bo_gpu_addr(buf.sa); 154 + } 155 + 156 + /** 157 + * xe_guc_cache_gpu_addr_from_ptr() - Lookup a GPU address using the pointer. 158 + * @cache: the &xe_guc_buf_cache with sub-allocations 159 + * @ptr: the CPU pointer of the sub-allocation 160 + * @size: the size of the data 161 + * 162 + * Return: a GPU address on success or 0 if the pointer was unrelated. 163 + */ 164 + u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size) 165 + { 166 + ptrdiff_t offset = ptr - cache->sam->cpu_ptr; 167 + 168 + if (offset < 0 || offset + size > cache->sam->base.size) 169 + return 0; 170 + 171 + return cache->sam->gpu_addr + offset; 172 + }
+47
drivers/gpu/drm/xe/xe_guc_buf.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2024 Intel Corporation 4 + */ 5 + 6 + #ifndef _XE_GUC_BUF_H_ 7 + #define _XE_GUC_BUF_H_ 8 + 9 + #include <linux/cleanup.h> 10 + #include <linux/err.h> 11 + 12 + #include "xe_guc_buf_types.h" 13 + 14 + int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache); 15 + u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache); 16 + struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords); 17 + struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache, 18 + const void *data, size_t size); 19 + void xe_guc_buf_release(const struct xe_guc_buf buf); 20 + 21 + /** 22 + * xe_guc_buf_is_valid() - Check if a buffer reference is valid. 23 + * @buf: the &xe_guc_buf reference to check 24 + * 25 + * Return: true if @ref represents a valid sub-allication. 26 + */ 27 + static inline bool xe_guc_buf_is_valid(const struct xe_guc_buf buf) 28 + { 29 + return !IS_ERR_OR_NULL(buf.sa); 30 + } 31 + 32 + void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf); 33 + u64 xe_guc_buf_flush(const struct xe_guc_buf buf); 34 + u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf); 35 + u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size); 36 + 37 + DEFINE_CLASS(xe_guc_buf, struct xe_guc_buf, 38 + xe_guc_buf_release(_T), 39 + xe_guc_buf_reserve(cache, num), 40 + struct xe_guc_buf_cache *cache, u32 num); 41 + 42 + DEFINE_CLASS(xe_guc_buf_from_data, struct xe_guc_buf, 43 + xe_guc_buf_release(_T), 44 + xe_guc_buf_from_data(cache, data, size), 45 + struct xe_guc_buf_cache *cache, const void *data, size_t size); 46 + 47 + #endif
+28
drivers/gpu/drm/xe/xe_guc_buf_types.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2024 Intel Corporation 4 + */ 5 + 6 + #ifndef _XE_GUC_BUF_TYPES_H_ 7 + #define _XE_GUC_BUF_TYPES_H_ 8 + 9 + struct drm_suballoc; 10 + struct xe_sa_manager; 11 + 12 + /** 13 + * struct xe_guc_buf_cache - GuC Data Buffer Cache. 14 + */ 15 + struct xe_guc_buf_cache { 16 + /* private: internal sub-allocation manager */ 17 + struct xe_sa_manager *sam; 18 + }; 19 + 20 + /** 21 + * struct xe_guc_buf - GuC Data Buffer Reference. 22 + */ 23 + struct xe_guc_buf { 24 + /* private: internal sub-allocation reference */ 25 + struct drm_suballoc *sa; 26 + }; 27 + 28 + #endif
+3
drivers/gpu/drm/xe/xe_guc_types.h
··· 11 11 12 12 #include "regs/xe_reg_defs.h" 13 13 #include "xe_guc_ads_types.h" 14 + #include "xe_guc_buf_types.h" 14 15 #include "xe_guc_ct_types.h" 15 16 #include "xe_guc_fwif.h" 16 17 #include "xe_guc_log_types.h" ··· 59 58 struct xe_guc_ads ads; 60 59 /** @ct: GuC ct */ 61 60 struct xe_guc_ct ct; 61 + /** @buf: GuC Buffer Cache manager */ 62 + struct xe_guc_buf_cache buf; 62 63 /** @capture: the error-state-capture module's data and objects */ 63 64 struct xe_guc_state_capture *capture; 64 65 /** @pc: GuC Power Conservation */