Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdkfd: remove unneeded unmap single queue option

The KFD only unmaps all queues, all dynamics queues or all process queues
since RUN_LIST is mapped with all KFD queues.

There's no need to provide a single type unmap so remove this option.

Signed-off-by: Jonathan Kim <jonathan.kim@amd.com>
Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Jonathan Kim and committed by
Alex Deucher
d2cb0b21 7258fa31

+22 -76
+1 -2
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 1555 1555 if (!dqm->active_runlist) 1556 1556 return retval; 1557 1557 1558 - retval = pm_send_unmap_queue(&dqm->packet_mgr, KFD_QUEUE_TYPE_COMPUTE, 1559 - filter, filter_param, reset, 0); 1558 + retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset); 1560 1559 if (retval) 1561 1560 return retval; 1562 1561
+3 -5
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
··· 369 369 return retval; 370 370 } 371 371 372 - int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, 372 + int pm_send_unmap_queue(struct packet_manager *pm, 373 373 enum kfd_unmap_queues_filter filter, 374 - uint32_t filter_param, bool reset, 375 - unsigned int sdma_engine) 374 + uint32_t filter_param, bool reset) 376 375 { 377 376 uint32_t *buffer, size; 378 377 int retval = 0; ··· 386 387 goto out; 387 388 } 388 389 389 - retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param, 390 - reset, sdma_engine); 390 + retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset); 391 391 if (!retval) 392 392 kq_submit_packet(pm->priv_queue); 393 393 else
+6 -34
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
··· 247 247 } 248 248 249 249 static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, 250 - enum kfd_queue_type type, 251 250 enum kfd_unmap_queues_filter filter, 252 - uint32_t filter_param, bool reset, 253 - unsigned int sdma_engine) 251 + uint32_t filter_param, bool reset) 254 252 { 255 253 struct pm4_mes_unmap_queues *packet; 256 254 ··· 257 259 258 260 packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, 259 261 sizeof(struct pm4_mes_unmap_queues)); 260 - switch (type) { 261 - case KFD_QUEUE_TYPE_COMPUTE: 262 - case KFD_QUEUE_TYPE_DIQ: 263 - packet->bitfields2.extended_engine_sel = 264 - extended_engine_sel__mes_unmap_queues__legacy_engine_sel; 265 - packet->bitfields2.engine_sel = 266 - engine_sel__mes_unmap_queues__compute; 267 - break; 268 - case KFD_QUEUE_TYPE_SDMA: 269 - case KFD_QUEUE_TYPE_SDMA_XGMI: 270 - if (sdma_engine < 2) { 271 - packet->bitfields2.extended_engine_sel = 272 - extended_engine_sel__mes_unmap_queues__legacy_engine_sel; 273 - packet->bitfields2.engine_sel = 274 - engine_sel__mes_unmap_queues__sdma0 + sdma_engine; 275 - } else { 276 - packet->bitfields2.extended_engine_sel = 277 - extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel; 278 - packet->bitfields2.engine_sel = sdma_engine; 279 - } 280 - break; 281 - default: 282 - WARN(1, "queue type %d", type); 283 - return -EINVAL; 284 - } 262 + 263 + packet->bitfields2.extended_engine_sel = 264 + extended_engine_sel__mes_unmap_queues__legacy_engine_sel; 265 + packet->bitfields2.engine_sel = 266 + engine_sel__mes_unmap_queues__compute; 285 267 286 268 if (reset) 287 269 packet->bitfields2.action = ··· 271 293 action__mes_unmap_queues__preempt_queues; 272 294 273 295 switch (filter) { 274 - case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: 275 - packet->bitfields2.queue_sel = 276 - queue_sel__mes_unmap_queues__perform_request_on_specified_queues; 277 - packet->bitfields2.num_queues = 1; 278 - packet->bitfields3b.doorbell_offset0 = filter_param; 279 - break; 280 296 case KFD_UNMAP_QUEUES_FILTER_BY_PASID: 281 297 packet->bitfields2.queue_sel = 282 298 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
+3 -23
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
··· 198 198 } 199 199 200 200 static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, 201 - enum kfd_queue_type type, 202 201 enum kfd_unmap_queues_filter filter, 203 - uint32_t filter_param, bool reset, 204 - unsigned int sdma_engine) 202 + uint32_t filter_param, bool reset) 205 203 { 206 204 struct pm4_mes_unmap_queues *packet; 207 205 ··· 208 210 209 211 packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, 210 212 sizeof(struct pm4_mes_unmap_queues)); 211 - switch (type) { 212 - case KFD_QUEUE_TYPE_COMPUTE: 213 - case KFD_QUEUE_TYPE_DIQ: 214 - packet->bitfields2.engine_sel = 213 + 214 + packet->bitfields2.engine_sel = 215 215 engine_sel__mes_unmap_queues__compute; 216 - break; 217 - case KFD_QUEUE_TYPE_SDMA: 218 - case KFD_QUEUE_TYPE_SDMA_XGMI: 219 - packet->bitfields2.engine_sel = 220 - engine_sel__mes_unmap_queues__sdma0 + sdma_engine; 221 - break; 222 - default: 223 - WARN(1, "queue type %d", type); 224 - return -EINVAL; 225 - } 226 216 227 217 if (reset) 228 218 packet->bitfields2.action = ··· 220 234 action__mes_unmap_queues__preempt_queues; 221 235 222 236 switch (filter) { 223 - case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: 224 - packet->bitfields2.queue_sel = 225 - queue_sel__mes_unmap_queues__perform_request_on_specified_queues; 226 - packet->bitfields2.num_queues = 1; 227 - packet->bitfields3b.doorbell_offset0 = filter_param; 228 - break; 229 237 case KFD_UNMAP_QUEUES_FILTER_BY_PASID: 230 238 packet->bitfields2.queue_sel = 231 239 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
+9 -12
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 360 360 /** 361 361 * enum kfd_unmap_queues_filter - Enum for queue filters. 362 362 * 363 - * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue. 364 - * 365 363 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 366 364 * running queues list. 365 + * 366 + * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues 367 + * in the run list. 367 368 * 368 369 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 369 370 * specific process. 370 371 * 371 372 */ 372 373 enum kfd_unmap_queues_filter { 373 - KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE, 374 - KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 375 - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 376 - KFD_UNMAP_QUEUES_FILTER_BY_PASID 374 + KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1, 375 + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2, 376 + KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3 377 377 }; 378 378 379 379 /** ··· 1247 1247 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 1248 1248 struct queue *q, bool is_static); 1249 1249 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 1250 - enum kfd_queue_type type, 1251 1250 enum kfd_unmap_queues_filter mode, 1252 - uint32_t filter_param, bool reset, 1253 - unsigned int sdma_engine); 1251 + uint32_t filter_param, bool reset); 1254 1252 int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1255 1253 uint64_t fence_address, uint64_t fence_value); 1256 1254 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); ··· 1275 1277 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1276 1278 uint64_t fence_value); 1277 1279 1278 - int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, 1280 + int pm_send_unmap_queue(struct packet_manager *pm, 1279 1281 enum kfd_unmap_queues_filter mode, 1280 - uint32_t filter_param, bool reset, 1281 - unsigned int sdma_engine); 1282 + uint32_t filter_param, bool reset); 1282 1283 1283 1284 void pm_release_ib(struct packet_manager *pm); 1284 1285