Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.4-rc8 348 lines 11 kB view raw
1/* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include "kfd_kernel_queue.h" 25#include "kfd_device_queue_manager.h" 26#include "kfd_pm4_headers_ai.h" 27#include "kfd_pm4_opcodes.h" 28#include "gc/gc_10_1_0_sh_mask.h" 29 30static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev, 31 enum kfd_queue_type type, unsigned int queue_size); 32static void uninitialize_v10(struct kernel_queue *kq); 33static void submit_packet_v10(struct kernel_queue *kq); 34 35void kernel_queue_init_v10(struct kernel_queue_ops *ops) 36{ 37 ops->initialize = initialize_v10; 38 ops->uninitialize = uninitialize_v10; 39 ops->submit_packet = submit_packet_v10; 40} 41 42static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev, 43 enum kfd_queue_type type, unsigned int queue_size) 44{ 45 int retval; 46 47 retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); 48 if (retval != 0) 49 return false; 50 51 kq->eop_gpu_addr = kq->eop_mem->gpu_addr; 52 kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; 53 54 memset(kq->eop_kernel_addr, 0, PAGE_SIZE); 55 56 return true; 57} 58 59static void uninitialize_v10(struct kernel_queue *kq) 60{ 61 kfd_gtt_sa_free(kq->dev, kq->eop_mem); 62} 63 64static void submit_packet_v10(struct kernel_queue *kq) 65{ 66 *kq->wptr64_kernel = kq->pending_wptr64; 67 write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, 68 kq->pending_wptr64); 69} 70 71static int pm_map_process_v10(struct packet_manager *pm, 72 uint32_t *buffer, struct qcm_process_device *qpd) 73{ 74 struct pm4_mes_map_process *packet; 75 uint64_t vm_page_table_base_addr = qpd->page_table_base; 76 77 packet = (struct pm4_mes_map_process *)buffer; 78 memset(buffer, 0, sizeof(struct pm4_mes_map_process)); 79 80 packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, 81 sizeof(struct pm4_mes_map_process)); 82 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; 83 packet->bitfields2.process_quantum = 1; 84 packet->bitfields2.pasid = qpd->pqm->process->pasid; 85 packet->bitfields14.gds_size = qpd->gds_size; 86 packet->bitfields14.num_gws = qpd->num_gws; 87 packet->bitfields14.num_oac = qpd->num_oac; 88 packet->bitfields14.sdma_enable = 1; 89 90 packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; 91 92 packet->sh_mem_config = qpd->sh_mem_config; 93 packet->sh_mem_bases = qpd->sh_mem_bases; 94 if (qpd->tba_addr) { 95 packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); 96 packet->sq_shader_tba_hi = (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT) | 97 upper_32_bits(qpd->tba_addr >> 8); 98 packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); 99 packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); 100 } 101 102 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); 103 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); 104 105 packet->vm_context_page_table_base_addr_lo32 = 106 lower_32_bits(vm_page_table_base_addr); 107 packet->vm_context_page_table_base_addr_hi32 = 108 upper_32_bits(vm_page_table_base_addr); 109 110 return 0; 111} 112 113static int pm_runlist_v10(struct packet_manager *pm, uint32_t *buffer, 114 uint64_t ib, size_t ib_size_in_dwords, bool chain) 115{ 116 struct pm4_mes_runlist *packet; 117 118 int concurrent_proc_cnt = 0; 119 struct kfd_dev *kfd = pm->dqm->dev; 120 121 /* Determine the number of processes to map together to HW: 122 * it can not exceed the number of VMIDs available to the 123 * scheduler, and it is determined by the smaller of the number 124 * of processes in the runlist and kfd module parameter 125 * hws_max_conc_proc. 126 * Note: the arbitration between the number of VMIDs and 127 * hws_max_conc_proc has been done in 128 * kgd2kfd_device_init(). 129 */ 130 concurrent_proc_cnt = min(pm->dqm->processes_count, 131 kfd->max_proc_per_quantum); 132 133 134 packet = (struct pm4_mes_runlist *)buffer; 135 136 memset(buffer, 0, sizeof(struct pm4_mes_runlist)); 137 packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, 138 sizeof(struct pm4_mes_runlist)); 139 140 packet->bitfields4.ib_size = ib_size_in_dwords; 141 packet->bitfields4.chain = chain ? 1 : 0; 142 packet->bitfields4.offload_polling = 0; 143 packet->bitfields4.valid = 1; 144 packet->bitfields4.process_cnt = concurrent_proc_cnt; 145 packet->ordinal2 = lower_32_bits(ib); 146 packet->ib_base_hi = upper_32_bits(ib); 147 148 return 0; 149} 150 151static int pm_map_queues_v10(struct packet_manager *pm, uint32_t *buffer, 152 struct queue *q, bool is_static) 153{ 154 struct pm4_mes_map_queues *packet; 155 bool use_static = is_static; 156 157 packet = (struct pm4_mes_map_queues *)buffer; 158 memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); 159 160 packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, 161 sizeof(struct pm4_mes_map_queues)); 162 packet->bitfields2.num_queues = 1; 163 packet->bitfields2.queue_sel = 164 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; 165 166 packet->bitfields2.engine_sel = 167 engine_sel__mes_map_queues__compute_vi; 168 packet->bitfields2.queue_type = 169 queue_type__mes_map_queues__normal_compute_vi; 170 171 switch (q->properties.type) { 172 case KFD_QUEUE_TYPE_COMPUTE: 173 if (use_static) 174 packet->bitfields2.queue_type = 175 queue_type__mes_map_queues__normal_latency_static_queue_vi; 176 break; 177 case KFD_QUEUE_TYPE_DIQ: 178 packet->bitfields2.queue_type = 179 queue_type__mes_map_queues__debug_interface_queue_vi; 180 break; 181 case KFD_QUEUE_TYPE_SDMA: 182 case KFD_QUEUE_TYPE_SDMA_XGMI: 183 packet->bitfields2.engine_sel = q->properties.sdma_engine_id + 184 engine_sel__mes_map_queues__sdma0_vi; 185 use_static = false; /* no static queues under SDMA */ 186 break; 187 default: 188 WARN(1, "queue type %d\n", q->properties.type); 189 return -EINVAL; 190 } 191 packet->bitfields3.doorbell_offset = 192 q->properties.doorbell_off; 193 194 packet->mqd_addr_lo = 195 lower_32_bits(q->gart_mqd_addr); 196 197 packet->mqd_addr_hi = 198 upper_32_bits(q->gart_mqd_addr); 199 200 packet->wptr_addr_lo = 201 lower_32_bits((uint64_t)q->properties.write_ptr); 202 203 packet->wptr_addr_hi = 204 upper_32_bits((uint64_t)q->properties.write_ptr); 205 206 return 0; 207} 208 209static int pm_unmap_queues_v10(struct packet_manager *pm, uint32_t *buffer, 210 enum kfd_queue_type type, 211 enum kfd_unmap_queues_filter filter, 212 uint32_t filter_param, bool reset, 213 unsigned int sdma_engine) 214{ 215 struct pm4_mes_unmap_queues *packet; 216 217 packet = (struct pm4_mes_unmap_queues *)buffer; 218 memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); 219 220 packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, 221 sizeof(struct pm4_mes_unmap_queues)); 222 switch (type) { 223 case KFD_QUEUE_TYPE_COMPUTE: 224 case KFD_QUEUE_TYPE_DIQ: 225 packet->bitfields2.engine_sel = 226 engine_sel__mes_unmap_queues__compute; 227 break; 228 case KFD_QUEUE_TYPE_SDMA: 229 case KFD_QUEUE_TYPE_SDMA_XGMI: 230 packet->bitfields2.engine_sel = 231 engine_sel__mes_unmap_queues__sdma0 + sdma_engine; 232 break; 233 default: 234 WARN(1, "queue type %d\n", type); 235 break; 236 } 237 238 if (reset) 239 packet->bitfields2.action = 240 action__mes_unmap_queues__reset_queues; 241 else 242 packet->bitfields2.action = 243 action__mes_unmap_queues__preempt_queues; 244 245 switch (filter) { 246 case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: 247 packet->bitfields2.queue_sel = 248 queue_sel__mes_unmap_queues__perform_request_on_specified_queues; 249 packet->bitfields2.num_queues = 1; 250 packet->bitfields3b.doorbell_offset0 = filter_param; 251 break; 252 case KFD_UNMAP_QUEUES_FILTER_BY_PASID: 253 packet->bitfields2.queue_sel = 254 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; 255 packet->bitfields3a.pasid = filter_param; 256 break; 257 case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: 258 packet->bitfields2.queue_sel = 259 queue_sel__mes_unmap_queues__unmap_all_queues; 260 break; 261 case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: 262 /* in this case, we do not preempt static queues */ 263 packet->bitfields2.queue_sel = 264 queue_sel__mes_unmap_queues__unmap_all_non_static_queues; 265 break; 266 default: 267 WARN(1, "filter %d\n", filter); 268 break; 269 } 270 271 return 0; 272 273} 274 275static int pm_query_status_v10(struct packet_manager *pm, uint32_t *buffer, 276 uint64_t fence_address, uint32_t fence_value) 277{ 278 struct pm4_mes_query_status *packet; 279 280 packet = (struct pm4_mes_query_status *)buffer; 281 memset(buffer, 0, sizeof(struct pm4_mes_query_status)); 282 283 284 packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, 285 sizeof(struct pm4_mes_query_status)); 286 287 packet->bitfields2.context_id = 0; 288 packet->bitfields2.interrupt_sel = 289 interrupt_sel__mes_query_status__completion_status; 290 packet->bitfields2.command = 291 command__mes_query_status__fence_only_after_write_ack; 292 293 packet->addr_hi = upper_32_bits((uint64_t)fence_address); 294 packet->addr_lo = lower_32_bits((uint64_t)fence_address); 295 packet->data_hi = upper_32_bits((uint64_t)fence_value); 296 packet->data_lo = lower_32_bits((uint64_t)fence_value); 297 298 return 0; 299} 300 301 302static int pm_release_mem_v10(uint64_t gpu_addr, uint32_t *buffer) 303{ 304 struct pm4_mec_release_mem *packet; 305 306 WARN_ON(!buffer); 307 308 packet = (struct pm4_mec_release_mem *)buffer; 309 memset(buffer, 0, sizeof(struct pm4_mec_release_mem)); 310 311 packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, 312 sizeof(struct pm4_mec_release_mem)); 313 314 packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; 315 packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe; 316 packet->bitfields2.tcl1_action_ena = 1; 317 packet->bitfields2.tc_action_ena = 1; 318 packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru; 319 320 packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low; 321 packet->bitfields3.int_sel = 322 int_sel__mec_release_mem__send_interrupt_after_write_confirm; 323 324 packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; 325 packet->address_hi = upper_32_bits(gpu_addr); 326 327 packet->data_lo = 0; 328 329 return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int); 330} 331 332const struct packet_manager_funcs kfd_v10_pm_funcs = { 333 .map_process = pm_map_process_v10, 334 .runlist = pm_runlist_v10, 335 .set_resources = pm_set_resources_vi, 336 .map_queues = pm_map_queues_v10, 337 .unmap_queues = pm_unmap_queues_v10, 338 .query_status = pm_query_status_v10, 339 .release_mem = pm_release_mem_v10, 340 .map_process_size = sizeof(struct pm4_mes_map_process), 341 .runlist_size = sizeof(struct pm4_mes_runlist), 342 .set_resources_size = sizeof(struct pm4_mes_set_resources), 343 .map_queues_size = sizeof(struct pm4_mes_map_queues), 344 .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), 345 .query_status_size = sizeof(struct pm4_mes_query_status), 346 .release_mem_size = sizeof(struct pm4_mec_release_mem) 347}; 348