Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

amdkfd: Add kernel queue module

The kernel queue module enables the amdkfd to establish kernel queues, not
exposed to user space.

The kernel queues are used for HIQ (HSA Interface Queue) and DIQ (Debug
Interface Queue) operations

v3: Removed use of internal typedefs and added use of the new gart allocation
functions

v4: Fixed a miscalculation in kernel queue wrapping

v5:

Move amdkfd from drm/radeon/ to drm/amd/
Change format of mqd structure to match latest KV firmware
Add support for AQL queues creation to enable working with open-source HSA
runtime
Add define for kernel queue size
Various fixes

Signed-off-by: Ben Goz <ben.goz@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@amd.com>

authored by

Ben Goz and committed by
Oded Gabbay
ed6e6a34 6e99df57

+1066 -2
+2 -1
drivers/gpu/drm/amd/amdkfd/Makefile
··· 6 6 7 7 amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ 8 8 kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ 9 - kfd_process.o kfd_queue.o kfd_mqd_manager.o 9 + kfd_process.o kfd_queue.o kfd_mqd_manager.o \ 10 + kfd_kernel_queue.o 10 11 11 12 obj-$(CONFIG_HSA_AMD) += amdkfd.o
+101
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
··· 1 + /* 2 + * Copyright 2014 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef KFD_DEVICE_QUEUE_MANAGER_H_ 25 + #define KFD_DEVICE_QUEUE_MANAGER_H_ 26 + 27 + #include <linux/rwsem.h> 28 + #include <linux/list.h> 29 + #include "kfd_priv.h" 30 + #include "kfd_mqd_manager.h" 31 + 32 + #define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500) 33 + #define QUEUES_PER_PIPE (8) 34 + #define PIPE_PER_ME_CP_SCHEDULING (3) 35 + #define CIK_VMID_NUM (8) 36 + #define KFD_VMID_START_OFFSET (8) 37 + #define VMID_PER_DEVICE CIK_VMID_NUM 38 + #define KFD_DQM_FIRST_PIPE (0) 39 + 40 + struct device_process_node { 41 + struct qcm_process_device *qpd; 42 + struct list_head list; 43 + }; 44 + 45 + struct device_queue_manager { 46 + int (*create_queue)(struct device_queue_manager *dqm, 47 + struct queue *q, 48 + struct qcm_process_device *qpd, 49 + int *allocate_vmid); 50 + int (*destroy_queue)(struct device_queue_manager *dqm, 51 + struct qcm_process_device *qpd, 52 + struct queue *q); 53 + int (*update_queue)(struct device_queue_manager *dqm, 54 + struct queue *q); 55 + struct mqd_manager * (*get_mqd_manager)( 56 + struct device_queue_manager *dqm, 57 + enum KFD_MQD_TYPE type); 58 + 59 + int (*register_process)(struct device_queue_manager *dqm, 60 + struct qcm_process_device *qpd); 61 + int (*unregister_process)(struct device_queue_manager *dqm, 62 + struct qcm_process_device *qpd); 63 + int (*initialize)(struct device_queue_manager *dqm); 64 + int (*start)(struct device_queue_manager *dqm); 65 + int (*stop)(struct device_queue_manager *dqm); 66 + void (*uninitialize)(struct device_queue_manager *dqm); 67 + int (*create_kernel_queue)(struct device_queue_manager *dqm, 68 + struct kernel_queue *kq, 69 + struct qcm_process_device *qpd); 70 + void (*destroy_kernel_queue)(struct device_queue_manager *dqm, 71 + struct kernel_queue *kq, 72 + struct qcm_process_device *qpd); 73 + bool (*set_cache_memory_policy)(struct device_queue_manager *dqm, 74 + struct qcm_process_device *qpd, 75 + enum cache_policy default_policy, 76 + enum cache_policy alternate_policy, 77 + void __user *alternate_aperture_base, 78 + uint64_t alternate_aperture_size); 79 + 80 + 81 + struct mqd_manager *mqds[KFD_MQD_TYPE_MAX]; 82 + struct packet_manager packets; 83 + struct kfd_dev *dev; 84 + struct mutex lock; 85 + struct list_head queues; 86 + unsigned int processes_count; 87 + unsigned int queue_count; 88 + unsigned int next_pipe_to_allocate; 89 + unsigned int *allocated_queues; 90 + unsigned int vmid_bitmap; 91 + uint64_t pipelines_addr; 92 + struct kfd_mem_obj *pipeline_mem; 93 + uint64_t fence_gpu_addr; 94 + unsigned int *fence_addr; 95 + struct kfd_mem_obj *fence_mem; 96 + bool active_runlist; 97 + }; 98 + 99 + 100 + 101 + #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
+347
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
··· 1 + /* 2 + * Copyright 2014 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include <linux/types.h> 25 + #include <linux/mutex.h> 26 + #include <linux/slab.h> 27 + #include <linux/printk.h> 28 + #include "kfd_kernel_queue.h" 29 + #include "kfd_priv.h" 30 + #include "kfd_device_queue_manager.h" 31 + #include "kfd_pm4_headers.h" 32 + #include "kfd_pm4_opcodes.h" 33 + 34 + #define PM4_COUNT_ZERO (((1 << 15) - 1) << 16) 35 + 36 + static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, 37 + enum kfd_queue_type type, unsigned int queue_size) 38 + { 39 + struct queue_properties prop; 40 + int retval; 41 + union PM4_MES_TYPE_3_HEADER nop; 42 + 43 + BUG_ON(!kq || !dev); 44 + BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); 45 + 46 + pr_debug("kfd: In func %s initializing queue type %d size %d\n", 47 + __func__, KFD_QUEUE_TYPE_HIQ, queue_size); 48 + 49 + nop.opcode = IT_NOP; 50 + nop.type = PM4_TYPE_3; 51 + nop.u32all |= PM4_COUNT_ZERO; 52 + 53 + kq->dev = dev; 54 + kq->nop_packet = nop.u32all; 55 + switch (type) { 56 + case KFD_QUEUE_TYPE_DIQ: 57 + case KFD_QUEUE_TYPE_HIQ: 58 + kq->mqd = dev->dqm->get_mqd_manager(dev->dqm, 59 + KFD_MQD_TYPE_CIK_HIQ); 60 + break; 61 + default: 62 + BUG(); 63 + break; 64 + } 65 + 66 + if (kq->mqd == NULL) 67 + return false; 68 + 69 + prop.doorbell_ptr = 70 + (uint32_t *)kfd_get_kernel_doorbell(dev, &prop.doorbell_off); 71 + 72 + if (prop.doorbell_ptr == NULL) 73 + goto err_get_kernel_doorbell; 74 + 75 + retval = kfd2kgd->allocate_mem(dev->kgd, 76 + queue_size, 77 + PAGE_SIZE, 78 + KFD_MEMPOOL_SYSTEM_WRITECOMBINE, 79 + (struct kgd_mem **) &kq->pq); 80 + 81 + if (retval != 0) 82 + goto err_pq_allocate_vidmem; 83 + 84 + kq->pq_kernel_addr = kq->pq->cpu_ptr; 85 + kq->pq_gpu_addr = kq->pq->gpu_addr; 86 + 87 + retval = kfd2kgd->allocate_mem(dev->kgd, 88 + sizeof(*kq->rptr_kernel), 89 + 32, 90 + KFD_MEMPOOL_SYSTEM_WRITECOMBINE, 91 + (struct kgd_mem **) &kq->rptr_mem); 92 + 93 + if (retval != 0) 94 + goto err_rptr_allocate_vidmem; 95 + 96 + kq->rptr_kernel = kq->rptr_mem->cpu_ptr; 97 + kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; 98 + 99 + retval = kfd2kgd->allocate_mem(dev->kgd, 100 + sizeof(*kq->wptr_kernel), 101 + 32, 102 + KFD_MEMPOOL_SYSTEM_WRITECOMBINE, 103 + (struct kgd_mem **) &kq->wptr_mem); 104 + 105 + if (retval != 0) 106 + goto err_wptr_allocate_vidmem; 107 + 108 + kq->wptr_kernel = kq->wptr_mem->cpu_ptr; 109 + kq->wptr_gpu_addr = kq->wptr_mem->gpu_addr; 110 + 111 + memset(kq->pq_kernel_addr, 0, queue_size); 112 + memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel)); 113 + memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel)); 114 + 115 + prop.queue_size = queue_size; 116 + prop.is_interop = false; 117 + prop.priority = 1; 118 + prop.queue_percent = 100; 119 + prop.type = type; 120 + prop.vmid = 0; 121 + prop.queue_address = kq->pq_gpu_addr; 122 + prop.read_ptr = (uint32_t *) kq->rptr_gpu_addr; 123 + prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr; 124 + 125 + if (init_queue(&kq->queue, prop) != 0) 126 + goto err_init_queue; 127 + 128 + kq->queue->device = dev; 129 + kq->queue->process = kfd_get_process(current); 130 + 131 + retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd, 132 + &kq->queue->mqd_mem_obj, 133 + &kq->queue->gart_mqd_addr, 134 + &kq->queue->properties); 135 + if (retval != 0) 136 + goto err_init_mqd; 137 + 138 + /* assign HIQ to HQD */ 139 + if (type == KFD_QUEUE_TYPE_HIQ) { 140 + pr_debug("assigning hiq to hqd\n"); 141 + kq->queue->pipe = KFD_CIK_HIQ_PIPE; 142 + kq->queue->queue = KFD_CIK_HIQ_QUEUE; 143 + kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe, 144 + kq->queue->queue, NULL); 145 + } else { 146 + /* allocate fence for DIQ */ 147 + 148 + retval = kfd2kgd->allocate_mem(dev->kgd, 149 + sizeof(uint32_t), 150 + 32, 151 + KFD_MEMPOOL_SYSTEM_WRITECOMBINE, 152 + (struct kgd_mem **) &kq->fence_mem_obj); 153 + 154 + if (retval != 0) 155 + goto err_alloc_fence; 156 + 157 + kq->fence_kernel_address = kq->fence_mem_obj->cpu_ptr; 158 + kq->fence_gpu_addr = kq->fence_mem_obj->gpu_addr; 159 + } 160 + 161 + print_queue(kq->queue); 162 + 163 + return true; 164 + err_alloc_fence: 165 + err_init_mqd: 166 + uninit_queue(kq->queue); 167 + err_init_queue: 168 + kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem); 169 + err_wptr_allocate_vidmem: 170 + kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem); 171 + err_rptr_allocate_vidmem: 172 + kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq); 173 + err_pq_allocate_vidmem: 174 + pr_err("kfd: error init pq\n"); 175 + kfd_release_kernel_doorbell(dev, (u32 *)prop.doorbell_ptr); 176 + err_get_kernel_doorbell: 177 + pr_err("kfd: error init doorbell"); 178 + return false; 179 + 180 + } 181 + 182 + static void uninitialize(struct kernel_queue *kq) 183 + { 184 + BUG_ON(!kq); 185 + 186 + if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) 187 + kq->mqd->destroy_mqd(kq->mqd, 188 + NULL, 189 + false, 190 + QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, 191 + kq->queue->pipe, 192 + kq->queue->queue); 193 + 194 + kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem); 195 + kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem); 196 + kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq); 197 + kfd_release_kernel_doorbell(kq->dev, 198 + (u32 *)kq->queue->properties.doorbell_ptr); 199 + uninit_queue(kq->queue); 200 + } 201 + 202 + static int acquire_packet_buffer(struct kernel_queue *kq, 203 + size_t packet_size_in_dwords, unsigned int **buffer_ptr) 204 + { 205 + size_t available_size; 206 + size_t queue_size_dwords; 207 + uint32_t wptr, rptr; 208 + unsigned int *queue_address; 209 + 210 + BUG_ON(!kq || !buffer_ptr); 211 + 212 + rptr = *kq->rptr_kernel; 213 + wptr = *kq->wptr_kernel; 214 + queue_address = (unsigned int *)kq->pq_kernel_addr; 215 + queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); 216 + 217 + pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", 218 + __func__, rptr, wptr, queue_address); 219 + 220 + available_size = (rptr - 1 - wptr + queue_size_dwords) % 221 + queue_size_dwords; 222 + 223 + if (packet_size_in_dwords >= queue_size_dwords || 224 + packet_size_in_dwords >= available_size) 225 + return -ENOMEM; 226 + 227 + if (wptr + packet_size_in_dwords >= queue_size_dwords) { 228 + while (wptr > 0) { 229 + queue_address[wptr] = kq->nop_packet; 230 + wptr = (wptr + 1) % queue_size_dwords; 231 + } 232 + } 233 + 234 + *buffer_ptr = &queue_address[wptr]; 235 + kq->pending_wptr = wptr + packet_size_in_dwords; 236 + 237 + return 0; 238 + } 239 + 240 + static void submit_packet(struct kernel_queue *kq) 241 + { 242 + #ifdef DEBUG 243 + int i; 244 + #endif 245 + 246 + BUG_ON(!kq); 247 + 248 + #ifdef DEBUG 249 + for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) { 250 + pr_debug("0x%2X ", kq->pq_kernel_addr[i]); 251 + if (i % 15 == 0) 252 + pr_debug("\n"); 253 + } 254 + pr_debug("\n"); 255 + #endif 256 + 257 + *kq->wptr_kernel = kq->pending_wptr; 258 + write_kernel_doorbell((u32 *)kq->queue->properties.doorbell_ptr, 259 + kq->pending_wptr); 260 + } 261 + 262 + static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms) 263 + { 264 + unsigned long org_timeout_ms; 265 + 266 + BUG_ON(!kq); 267 + 268 + org_timeout_ms = timeout_ms; 269 + timeout_ms += jiffies * 1000 / HZ; 270 + while (*kq->wptr_kernel != *kq->rptr_kernel) { 271 + if (time_after(jiffies * 1000 / HZ, timeout_ms)) { 272 + pr_err("kfd: kernel_queue %s timeout expired %lu\n", 273 + __func__, org_timeout_ms); 274 + pr_err("kfd: wptr: %d rptr: %d\n", 275 + *kq->wptr_kernel, *kq->rptr_kernel); 276 + return -ETIME; 277 + } 278 + cpu_relax(); 279 + } 280 + 281 + return 0; 282 + } 283 + 284 + static void rollback_packet(struct kernel_queue *kq) 285 + { 286 + BUG_ON(!kq); 287 + kq->pending_wptr = *kq->queue->properties.write_ptr; 288 + } 289 + 290 + struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, 291 + enum kfd_queue_type type) 292 + { 293 + struct kernel_queue *kq; 294 + 295 + BUG_ON(!dev); 296 + 297 + kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL); 298 + if (!kq) 299 + return NULL; 300 + 301 + kq->initialize = initialize; 302 + kq->uninitialize = uninitialize; 303 + kq->acquire_packet_buffer = acquire_packet_buffer; 304 + kq->submit_packet = submit_packet; 305 + kq->sync_with_hw = sync_with_hw; 306 + kq->rollback_packet = rollback_packet; 307 + 308 + if (kq->initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { 309 + pr_err("kfd: failed to init kernel queue\n"); 310 + kfree(kq); 311 + return NULL; 312 + } 313 + return kq; 314 + } 315 + 316 + void kernel_queue_uninit(struct kernel_queue *kq) 317 + { 318 + BUG_ON(!kq); 319 + 320 + kq->uninitialize(kq); 321 + kfree(kq); 322 + } 323 + 324 + void test_kq(struct kfd_dev *dev) 325 + { 326 + struct kernel_queue *kq; 327 + uint32_t *buffer, i; 328 + int retval; 329 + 330 + BUG_ON(!dev); 331 + 332 + pr_debug("kfd: starting kernel queue test\n"); 333 + 334 + kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); 335 + BUG_ON(!kq); 336 + 337 + retval = kq->acquire_packet_buffer(kq, 5, &buffer); 338 + BUG_ON(retval != 0); 339 + for (i = 0; i < 5; i++) 340 + buffer[i] = kq->nop_packet; 341 + kq->submit_packet(kq); 342 + kq->sync_with_hw(kq, 1000); 343 + 344 + pr_debug("kfd: ending kernel queue test\n"); 345 + } 346 + 347 +
+69
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
··· 1 + /* 2 + * Copyright 2014 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef KFD_KERNEL_QUEUE_H_ 25 + #define KFD_KERNEL_QUEUE_H_ 26 + 27 + #include <linux/list.h> 28 + #include <linux/types.h> 29 + #include "kfd_priv.h" 30 + 31 + struct kernel_queue { 32 + /* interface */ 33 + bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev, 34 + enum kfd_queue_type type, unsigned int queue_size); 35 + void (*uninitialize)(struct kernel_queue *kq); 36 + int (*acquire_packet_buffer)(struct kernel_queue *kq, 37 + size_t packet_size_in_dwords, 38 + unsigned int **buffer_ptr); 39 + 40 + void (*submit_packet)(struct kernel_queue *kq); 41 + int (*sync_with_hw)(struct kernel_queue *kq, 42 + unsigned long timeout_ms); 43 + void (*rollback_packet)(struct kernel_queue *kq); 44 + 45 + /* data */ 46 + struct kfd_dev *dev; 47 + struct mqd_manager *mqd; 48 + struct queue *queue; 49 + uint32_t pending_wptr; 50 + unsigned int nop_packet; 51 + 52 + struct kfd_mem_obj *rptr_mem; 53 + uint32_t *rptr_kernel; 54 + uint64_t rptr_gpu_addr; 55 + struct kfd_mem_obj *wptr_mem; 56 + uint32_t *wptr_kernel; 57 + uint64_t wptr_gpu_addr; 58 + struct kfd_mem_obj *pq; 59 + uint64_t pq_gpu_addr; 60 + uint32_t *pq_kernel_addr; 61 + 62 + struct kfd_mem_obj *fence_mem_obj; 63 + uint64_t fence_gpu_addr; 64 + void *fence_kernel_address; 65 + 66 + struct list_head list; 67 + }; 68 + 69 + #endif /* KFD_KERNEL_QUEUE_H_ */
+405
drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
··· 1 + /* 2 + * Copyright 2014 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef KFD_PM4_HEADERS_H_ 25 + #define KFD_PM4_HEADERS_H_ 26 + 27 + #ifndef PM4_MES_HEADER_DEFINED 28 + #define PM4_MES_HEADER_DEFINED 29 + union PM4_MES_TYPE_3_HEADER { 30 + struct { 31 + uint32_t reserved1:8; /* < reserved */ 32 + uint32_t opcode:8; /* < IT opcode */ 33 + uint32_t count:14; /* < number of DWORDs - 1 34 + * in the information body. 35 + */ 36 + uint32_t type:2; /* < packet identifier. 37 + * It should be 3 for type 3 packets 38 + */ 39 + }; 40 + uint32_t u32all; 41 + }; 42 + #endif /* PM4_MES_HEADER_DEFINED */ 43 + 44 + /* --------------------MES_SET_RESOURCES-------------------- */ 45 + 46 + #ifndef PM4_MES_SET_RESOURCES_DEFINED 47 + #define PM4_MES_SET_RESOURCES_DEFINED 48 + enum set_resources_queue_type_enum { 49 + queue_type__mes_set_resources__kernel_interface_queue_kiq = 0, 50 + queue_type__mes_set_resources__hsa_interface_queue_hiq = 1, 51 + queue_type__mes_set_resources__hsa_debug_interface_queue = 4 52 + }; 53 + 54 + struct pm4_set_resources { 55 + union { 56 + union PM4_MES_TYPE_3_HEADER header; /* header */ 57 + uint32_t ordinal1; 58 + }; 59 + 60 + union { 61 + struct { 62 + uint32_t vmid_mask:16; 63 + uint32_t unmap_latency:8; 64 + uint32_t reserved1:5; 65 + enum set_resources_queue_type_enum queue_type:3; 66 + } bitfields2; 67 + uint32_t ordinal2; 68 + }; 69 + 70 + uint32_t queue_mask_lo; 71 + uint32_t queue_mask_hi; 72 + uint32_t gws_mask_lo; 73 + uint32_t gws_mask_hi; 74 + 75 + union { 76 + struct { 77 + uint32_t oac_mask:16; 78 + uint32_t reserved2:16; 79 + } bitfields7; 80 + uint32_t ordinal7; 81 + }; 82 + 83 + union { 84 + struct { 85 + uint32_t gds_heap_base:6; 86 + uint32_t reserved3:5; 87 + uint32_t gds_heap_size:6; 88 + uint32_t reserved4:15; 89 + } bitfields8; 90 + uint32_t ordinal8; 91 + }; 92 + 93 + }; 94 + #endif 95 + 96 + /*--------------------MES_RUN_LIST-------------------- */ 97 + 98 + #ifndef PM4_MES_RUN_LIST_DEFINED 99 + #define PM4_MES_RUN_LIST_DEFINED 100 + 101 + struct pm4_runlist { 102 + union { 103 + union PM4_MES_TYPE_3_HEADER header; /* header */ 104 + uint32_t ordinal1; 105 + }; 106 + 107 + union { 108 + struct { 109 + uint32_t reserved1:2; 110 + uint32_t ib_base_lo:30; 111 + } bitfields2; 112 + uint32_t ordinal2; 113 + }; 114 + 115 + union { 116 + struct { 117 + uint32_t ib_base_hi:16; 118 + uint32_t reserved2:16; 119 + } bitfields3; 120 + uint32_t ordinal3; 121 + }; 122 + 123 + union { 124 + struct { 125 + uint32_t ib_size:20; 126 + uint32_t chain:1; 127 + uint32_t offload_polling:1; 128 + uint32_t reserved3:1; 129 + uint32_t valid:1; 130 + uint32_t reserved4:8; 131 + } bitfields4; 132 + uint32_t ordinal4; 133 + }; 134 + 135 + }; 136 + #endif 137 + 138 + /*--------------------MES_MAP_PROCESS-------------------- */ 139 + 140 + #ifndef PM4_MES_MAP_PROCESS_DEFINED 141 + #define PM4_MES_MAP_PROCESS_DEFINED 142 + 143 + struct pm4_map_process { 144 + union { 145 + union PM4_MES_TYPE_3_HEADER header; /* header */ 146 + uint32_t ordinal1; 147 + }; 148 + 149 + union { 150 + struct { 151 + uint32_t pasid:16; 152 + uint32_t reserved1:8; 153 + uint32_t diq_enable:1; 154 + uint32_t process_quantum:7; 155 + } bitfields2; 156 + uint32_t ordinal2; 157 + }; 158 + 159 + union { 160 + struct { 161 + uint32_t page_table_base:28; 162 + uint32_t reserved3:4; 163 + } bitfields3; 164 + uint32_t ordinal3; 165 + }; 166 + 167 + uint32_t sh_mem_bases; 168 + uint32_t sh_mem_ape1_base; 169 + uint32_t sh_mem_ape1_limit; 170 + uint32_t sh_mem_config; 171 + uint32_t gds_addr_lo; 172 + uint32_t gds_addr_hi; 173 + 174 + union { 175 + struct { 176 + uint32_t num_gws:6; 177 + uint32_t reserved4:2; 178 + uint32_t num_oac:4; 179 + uint32_t reserved5:4; 180 + uint32_t gds_size:6; 181 + uint32_t num_queues:10; 182 + } bitfields10; 183 + uint32_t ordinal10; 184 + }; 185 + 186 + }; 187 + #endif 188 + 189 + /*--------------------MES_MAP_QUEUES--------------------*/ 190 + 191 + #ifndef PM4_MES_MAP_QUEUES_DEFINED 192 + #define PM4_MES_MAP_QUEUES_DEFINED 193 + enum map_queues_queue_sel_enum { 194 + queue_sel__mes_map_queues__map_to_specified_queue_slots = 0, 195 + queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1, 196 + queue_sel__mes_map_queues__enable_process_queues = 2 197 + }; 198 + 199 + enum map_queues_vidmem_enum { 200 + vidmem__mes_map_queues__uses_no_video_memory = 0, 201 + vidmem__mes_map_queues__uses_video_memory = 1 202 + }; 203 + 204 + enum map_queues_alloc_format_enum { 205 + alloc_format__mes_map_queues__one_per_pipe = 0, 206 + alloc_format__mes_map_queues__all_on_one_pipe = 1 207 + }; 208 + 209 + enum map_queues_engine_sel_enum { 210 + engine_sel__mes_map_queues__compute = 0, 211 + engine_sel__mes_map_queues__sdma0 = 2, 212 + engine_sel__mes_map_queues__sdma1 = 3 213 + }; 214 + 215 + struct pm4_map_queues { 216 + union { 217 + union PM4_MES_TYPE_3_HEADER header; /* header */ 218 + uint32_t ordinal1; 219 + }; 220 + 221 + union { 222 + struct { 223 + uint32_t reserved1:4; 224 + enum map_queues_queue_sel_enum queue_sel:2; 225 + uint32_t reserved2:2; 226 + uint32_t vmid:4; 227 + uint32_t reserved3:4; 228 + enum map_queues_vidmem_enum vidmem:2; 229 + uint32_t reserved4:6; 230 + enum map_queues_alloc_format_enum alloc_format:2; 231 + enum map_queues_engine_sel_enum engine_sel:3; 232 + uint32_t num_queues:3; 233 + } bitfields2; 234 + uint32_t ordinal2; 235 + }; 236 + 237 + struct { 238 + union { 239 + struct { 240 + uint32_t reserved5:2; 241 + uint32_t doorbell_offset:21; 242 + uint32_t reserved6:3; 243 + uint32_t queue:6; 244 + } bitfields3; 245 + uint32_t ordinal3; 246 + }; 247 + 248 + uint32_t mqd_addr_lo; 249 + uint32_t mqd_addr_hi; 250 + uint32_t wptr_addr_lo; 251 + uint32_t wptr_addr_hi; 252 + 253 + } mes_map_queues_ordinals[1]; /* 1..N of these ordinal groups */ 254 + 255 + }; 256 + #endif 257 + 258 + /*--------------------MES_QUERY_STATUS--------------------*/ 259 + 260 + #ifndef PM4_MES_QUERY_STATUS_DEFINED 261 + #define PM4_MES_QUERY_STATUS_DEFINED 262 + enum query_status_interrupt_sel_enum { 263 + interrupt_sel__mes_query_status__completion_status = 0, 264 + interrupt_sel__mes_query_status__process_status = 1, 265 + interrupt_sel__mes_query_status__queue_status = 2 266 + }; 267 + 268 + enum query_status_command_enum { 269 + command__mes_query_status__interrupt_only = 0, 270 + command__mes_query_status__fence_only_immediate = 1, 271 + command__mes_query_status__fence_only_after_write_ack = 2, 272 + command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3 273 + }; 274 + 275 + enum query_status_engine_sel_enum { 276 + engine_sel__mes_query_status__compute = 0, 277 + engine_sel__mes_query_status__sdma0_queue = 2, 278 + engine_sel__mes_query_status__sdma1_queue = 3 279 + }; 280 + 281 + struct pm4_query_status { 282 + union { 283 + union PM4_MES_TYPE_3_HEADER header; /* header */ 284 + uint32_t ordinal1; 285 + }; 286 + 287 + union { 288 + struct { 289 + uint32_t context_id:28; 290 + enum query_status_interrupt_sel_enum interrupt_sel:2; 291 + enum query_status_command_enum command:2; 292 + } bitfields2; 293 + uint32_t ordinal2; 294 + }; 295 + 296 + union { 297 + struct { 298 + uint32_t pasid:16; 299 + uint32_t reserved1:16; 300 + } bitfields3a; 301 + struct { 302 + uint32_t reserved2:2; 303 + uint32_t doorbell_offset:21; 304 + uint32_t reserved3:3; 305 + enum query_status_engine_sel_enum engine_sel:3; 306 + uint32_t reserved4:3; 307 + } bitfields3b; 308 + uint32_t ordinal3; 309 + }; 310 + 311 + uint32_t addr_lo; 312 + uint32_t addr_hi; 313 + uint32_t data_lo; 314 + uint32_t data_hi; 315 + }; 316 + #endif 317 + 318 + /*--------------------MES_UNMAP_QUEUES--------------------*/ 319 + 320 + #ifndef PM4_MES_UNMAP_QUEUES_DEFINED 321 + #define PM4_MES_UNMAP_QUEUES_DEFINED 322 + enum unmap_queues_action_enum { 323 + action__mes_unmap_queues__preempt_queues = 0, 324 + action__mes_unmap_queues__reset_queues = 1, 325 + action__mes_unmap_queues__disable_process_queues = 2 326 + }; 327 + 328 + enum unmap_queues_queue_sel_enum { 329 + queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0, 330 + queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1, 331 + queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2 332 + }; 333 + 334 + enum unmap_queues_engine_sel_enum { 335 + engine_sel__mes_unmap_queues__compute = 0, 336 + engine_sel__mes_unmap_queues__sdma0 = 2, 337 + engine_sel__mes_unmap_queues__sdma1 = 3 338 + }; 339 + 340 + struct pm4_unmap_queues { 341 + union { 342 + union PM4_MES_TYPE_3_HEADER header; /* header */ 343 + uint32_t ordinal1; 344 + }; 345 + 346 + union { 347 + struct { 348 + enum unmap_queues_action_enum action:2; 349 + uint32_t reserved1:2; 350 + enum unmap_queues_queue_sel_enum queue_sel:2; 351 + uint32_t reserved2:20; 352 + enum unmap_queues_engine_sel_enum engine_sel:3; 353 + uint32_t num_queues:3; 354 + } bitfields2; 355 + uint32_t ordinal2; 356 + }; 357 + 358 + union { 359 + struct { 360 + uint32_t pasid:16; 361 + uint32_t reserved3:16; 362 + } bitfields3a; 363 + struct { 364 + uint32_t reserved4:2; 365 + uint32_t doorbell_offset0:21; 366 + uint32_t reserved5:9; 367 + } bitfields3b; 368 + uint32_t ordinal3; 369 + }; 370 + 371 + union { 372 + struct { 373 + uint32_t reserved6:2; 374 + uint32_t doorbell_offset1:21; 375 + uint32_t reserved7:9; 376 + } bitfields4; 377 + uint32_t ordinal4; 378 + }; 379 + 380 + union { 381 + struct { 382 + uint32_t reserved8:2; 383 + uint32_t doorbell_offset2:21; 384 + uint32_t reserved9:9; 385 + } bitfields5; 386 + uint32_t ordinal5; 387 + }; 388 + 389 + union { 390 + struct { 391 + uint32_t reserved10:2; 392 + uint32_t doorbell_offset3:21; 393 + uint32_t reserved11:9; 394 + } bitfields6; 395 + uint32_t ordinal6; 396 + }; 397 + 398 + }; 399 + #endif 400 + 401 + enum { 402 + CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014 403 + }; 404 + 405 + #endif /* KFD_PM4_HEADERS_H_ */
+107
drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h
··· 1 + /* 2 + * Copyright 2014 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + 25 + #ifndef KFD_PM4_OPCODES_H 26 + #define KFD_PM4_OPCODES_H 27 + 28 + enum it_opcode_type { 29 + IT_NOP = 0x10, 30 + IT_SET_BASE = 0x11, 31 + IT_CLEAR_STATE = 0x12, 32 + IT_INDEX_BUFFER_SIZE = 0x13, 33 + IT_DISPATCH_DIRECT = 0x15, 34 + IT_DISPATCH_INDIRECT = 0x16, 35 + IT_ATOMIC_GDS = 0x1D, 36 + IT_OCCLUSION_QUERY = 0x1F, 37 + IT_SET_PREDICATION = 0x20, 38 + IT_REG_RMW = 0x21, 39 + IT_COND_EXEC = 0x22, 40 + IT_PRED_EXEC = 0x23, 41 + IT_DRAW_INDIRECT = 0x24, 42 + IT_DRAW_INDEX_INDIRECT = 0x25, 43 + IT_INDEX_BASE = 0x26, 44 + IT_DRAW_INDEX_2 = 0x27, 45 + IT_CONTEXT_CONTROL = 0x28, 46 + IT_INDEX_TYPE = 0x2A, 47 + IT_DRAW_INDIRECT_MULTI = 0x2C, 48 + IT_DRAW_INDEX_AUTO = 0x2D, 49 + IT_NUM_INSTANCES = 0x2F, 50 + IT_DRAW_INDEX_MULTI_AUTO = 0x30, 51 + IT_INDIRECT_BUFFER_CNST = 0x33, 52 + IT_STRMOUT_BUFFER_UPDATE = 0x34, 53 + IT_DRAW_INDEX_OFFSET_2 = 0x35, 54 + IT_DRAW_PREAMBLE = 0x36, 55 + IT_WRITE_DATA = 0x37, 56 + IT_DRAW_INDEX_INDIRECT_MULTI = 0x38, 57 + IT_MEM_SEMAPHORE = 0x39, 58 + IT_COPY_DW = 0x3B, 59 + IT_WAIT_REG_MEM = 0x3C, 60 + IT_INDIRECT_BUFFER = 0x3F, 61 + IT_COPY_DATA = 0x40, 62 + IT_PFP_SYNC_ME = 0x42, 63 + IT_SURFACE_SYNC = 0x43, 64 + IT_COND_WRITE = 0x45, 65 + IT_EVENT_WRITE = 0x46, 66 + IT_EVENT_WRITE_EOP = 0x47, 67 + IT_EVENT_WRITE_EOS = 0x48, 68 + IT_RELEASE_MEM = 0x49, 69 + IT_PREAMBLE_CNTL = 0x4A, 70 + IT_DMA_DATA = 0x50, 71 + IT_ACQUIRE_MEM = 0x58, 72 + IT_REWIND = 0x59, 73 + IT_LOAD_UCONFIG_REG = 0x5E, 74 + IT_LOAD_SH_REG = 0x5F, 75 + IT_LOAD_CONFIG_REG = 0x60, 76 + IT_LOAD_CONTEXT_REG = 0x61, 77 + IT_SET_CONFIG_REG = 0x68, 78 + IT_SET_CONTEXT_REG = 0x69, 79 + IT_SET_CONTEXT_REG_INDIRECT = 0x73, 80 + IT_SET_SH_REG = 0x76, 81 + IT_SET_SH_REG_OFFSET = 0x77, 82 + IT_SET_QUEUE_REG = 0x78, 83 + IT_SET_UCONFIG_REG = 0x79, 84 + IT_SCRATCH_RAM_WRITE = 0x7D, 85 + IT_SCRATCH_RAM_READ = 0x7E, 86 + IT_LOAD_CONST_RAM = 0x80, 87 + IT_WRITE_CONST_RAM = 0x81, 88 + IT_DUMP_CONST_RAM = 0x83, 89 + IT_INCREMENT_CE_COUNTER = 0x84, 90 + IT_INCREMENT_DE_COUNTER = 0x85, 91 + IT_WAIT_ON_CE_COUNTER = 0x86, 92 + IT_WAIT_ON_DE_COUNTER_DIFF = 0x88, 93 + IT_SWITCH_BUFFER = 0x8B, 94 + IT_SET_RESOURCES = 0xA0, 95 + IT_MAP_PROCESS = 0xA1, 96 + IT_MAP_QUEUES = 0xA2, 97 + IT_UNMAP_QUEUES = 0xA3, 98 + IT_QUERY_STATUS = 0xA4, 99 + IT_RUN_LIST = 0xA5, 100 + }; 101 + 102 + #define PM4_TYPE_0 0 103 + #define PM4_TYPE_2 2 104 + #define PM4_TYPE_3 3 105 + 106 + #endif /* KFD_PM4_OPCODES_H */ 107 +
+35 -1
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 35 35 36 36 #define KFD_SYSFS_FILE_MODE 0444 37 37 38 + /* 39 + * When working with cp scheduler we should assign the HIQ manually or via 40 + * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot 41 + * definitions for Kaveri. In Kaveri only the first ME queues participates 42 + * in the cp scheduling taking that in mind we set the HIQ slot in the 43 + * second ME. 44 + */ 45 + #define KFD_CIK_HIQ_PIPE 4 46 + #define KFD_CIK_HIQ_QUEUE 0 47 + 38 48 /* GPU ID hash width in bits */ 39 49 #define KFD_GPU_ID_HASH_WIDTH 16 40 50 ··· 66 56 67 57 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 68 58 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 59 + 60 + #define KFD_KERNEL_QUEUE_SIZE 2048 61 + 62 + enum cache_policy { 63 + cache_policy_coherent, 64 + cache_policy_noncoherent 65 + }; 69 66 70 67 struct kfd_device_info { 71 68 unsigned int max_pasid_bits; ··· 105 88 106 89 struct kgd2kfd_shared_resources shared_resources; 107 90 108 - bool init_complete; 91 + /* QCM Device instance */ 92 + struct device_queue_manager *dqm; 109 93 94 + bool init_complete; 110 95 }; 111 96 112 97 /* KGD2KFD callbacks */ ··· 402 383 403 384 /* amdkfd Apertures */ 404 385 int kfd_init_apertures(struct kfd_process *process); 386 + 387 + /* Queue Context Management */ 388 + int init_queue(struct queue **q, struct queue_properties properties); 389 + void uninit_queue(struct queue *q); 390 + void print_queue(struct queue *q); 391 + 392 + /* Packet Manager */ 393 + 394 + struct packet_manager { 395 + struct device_queue_manager *dqm; 396 + struct kernel_queue *priv_queue; 397 + struct mutex lock; 398 + bool allocated; 399 + struct kfd_mem_obj *ib_buffer_obj; 400 + }; 405 401 406 402 uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 407 403 phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,