Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdkfd: Clean up KFD style errors and warnings v2

Using checkpatch.pl -f <file> showed a number of style issues. This
patch addresses as many of them as possible. Some long lines have been
left for readability, but attempts to minimize them have been made.

v2: Broke long lines in gfx_v7 get_fw_version

Signed-off-by: Kent Russell <kent.russell@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>

authored by

Kent Russell and committed by
Oded Gabbay
8eabaf54 438e29a2

+91 -78
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 28 28 #include <linux/module.h> 29 29 30 30 const struct kgd2kfd_calls *kgd2kfd; 31 - bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); 31 + bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); 32 32 33 33 int amdgpu_amdkfd_init(void) 34 34 { 35 35 int ret; 36 36 37 37 #if defined(CONFIG_HSA_AMD_MODULE) 38 - int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); 38 + int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); 39 39 40 40 kgd2kfd_init_p = symbol_request(kgd2kfd_init); 41 41
+8 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
··· 566 566 switch (type) { 567 567 case KGD_ENGINE_PFP: 568 568 hdr = (const union amdgpu_firmware_header *) 569 - adev->gfx.pfp_fw->data; 569 + adev->gfx.pfp_fw->data; 570 570 break; 571 571 572 572 case KGD_ENGINE_ME: 573 573 hdr = (const union amdgpu_firmware_header *) 574 - adev->gfx.me_fw->data; 574 + adev->gfx.me_fw->data; 575 575 break; 576 576 577 577 case KGD_ENGINE_CE: 578 578 hdr = (const union amdgpu_firmware_header *) 579 - adev->gfx.ce_fw->data; 579 + adev->gfx.ce_fw->data; 580 580 break; 581 581 582 582 case KGD_ENGINE_MEC1: 583 583 hdr = (const union amdgpu_firmware_header *) 584 - adev->gfx.mec_fw->data; 584 + adev->gfx.mec_fw->data; 585 585 break; 586 586 587 587 case KGD_ENGINE_MEC2: 588 588 hdr = (const union amdgpu_firmware_header *) 589 - adev->gfx.mec2_fw->data; 589 + adev->gfx.mec2_fw->data; 590 590 break; 591 591 592 592 case KGD_ENGINE_RLC: 593 593 hdr = (const union amdgpu_firmware_header *) 594 - adev->gfx.rlc_fw->data; 594 + adev->gfx.rlc_fw->data; 595 595 break; 596 596 597 597 case KGD_ENGINE_SDMA1: 598 598 hdr = (const union amdgpu_firmware_header *) 599 - adev->sdma.instance[0].fw->data; 599 + adev->sdma.instance[0].fw->data; 600 600 break; 601 601 602 602 case KGD_ENGINE_SDMA2: 603 603 hdr = (const union amdgpu_firmware_header *) 604 - adev->sdma.instance[1].fw->data; 604 + adev->sdma.instance[1].fw->data; 605 605 break; 606 606 607 607 default:
+8 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
··· 454 454 switch (type) { 455 455 case KGD_ENGINE_PFP: 456 456 hdr = (const union amdgpu_firmware_header *) 457 - adev->gfx.pfp_fw->data; 457 + adev->gfx.pfp_fw->data; 458 458 break; 459 459 460 460 case KGD_ENGINE_ME: 461 461 hdr = (const union amdgpu_firmware_header *) 462 - adev->gfx.me_fw->data; 462 + adev->gfx.me_fw->data; 463 463 break; 464 464 465 465 case KGD_ENGINE_CE: 466 466 hdr = (const union amdgpu_firmware_header *) 467 - adev->gfx.ce_fw->data; 467 + adev->gfx.ce_fw->data; 468 468 break; 469 469 470 470 case KGD_ENGINE_MEC1: 471 471 hdr = (const union amdgpu_firmware_header *) 472 - adev->gfx.mec_fw->data; 472 + adev->gfx.mec_fw->data; 473 473 break; 474 474 475 475 case KGD_ENGINE_MEC2: 476 476 hdr = (const union amdgpu_firmware_header *) 477 - adev->gfx.mec2_fw->data; 477 + adev->gfx.mec2_fw->data; 478 478 break; 479 479 480 480 case KGD_ENGINE_RLC: 481 481 hdr = (const union amdgpu_firmware_header *) 482 - adev->gfx.rlc_fw->data; 482 + adev->gfx.rlc_fw->data; 483 483 break; 484 484 485 485 case KGD_ENGINE_SDMA1: 486 486 hdr = (const union amdgpu_firmware_header *) 487 - adev->sdma.instance[0].fw->data; 487 + adev->sdma.instance[0].fw->data; 488 488 break; 489 489 490 490 case KGD_ENGINE_SDMA2: 491 491 hdr = (const union amdgpu_firmware_header *) 492 - adev->sdma.instance[1].fw->data; 492 + adev->sdma.instance[1].fw->data; 493 493 break; 494 494 495 495 default:
+4 -2
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 782 782 "scratch_limit %llX\n", pdd->scratch_limit); 783 783 784 784 args->num_of_nodes++; 785 - } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && 785 + } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != 786 + NULL && 786 787 (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); 787 788 } 788 789 ··· 849 848 } 850 849 851 850 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ 852 - [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} 851 + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ 852 + .cmd_drv = 0, .name = #ioctl} 853 853 854 854 /** Ioctl table */ 855 855 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+4 -3
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
··· 313 313 return -EINVAL; 314 314 } 315 315 316 - for (i = 0 ; i < adw_info->num_watch_points ; i++) { 316 + for (i = 0; i < adw_info->num_watch_points; i++) { 317 317 dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo, 318 318 &cntl, i, pdd->qpd.vmid); 319 319 ··· 623 623 return status; 624 624 } 625 625 626 - /* we do not control the VMID in DIQ,so reset it to a known value */ 626 + /* we do not control the VMID in DIQ, so reset it to a known value */ 627 627 reg_sq_cmd.bits.vm_id = 0; 628 628 629 629 pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *"); ··· 810 810 811 811 /* Scan all registers in the range ATC_VMID8_PASID_MAPPING .. 812 812 * ATC_VMID15_PASID_MAPPING 813 - * to check which VMID the current process is mapped to. */ 813 + * to check which VMID the current process is mapped to. 814 + */ 814 815 815 816 for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { 816 817 if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
+13 -14
drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
··· 30 30 #pragma pack(push, 4) 31 31 32 32 enum HSA_DBG_WAVEOP { 33 - HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */ 34 - HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */ 35 - HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */ 36 - HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter 37 - debug mode */ 38 - HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take 39 - a trap */ 33 + HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */ 34 + HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */ 35 + HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */ 36 + HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter dbg mode */ 37 + HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take a trap */ 40 38 HSA_DBG_NUM_WAVEOP = 5, 41 39 HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF 42 40 }; ··· 79 81 uint32_t UserData:8; /* user data */ 80 82 uint32_t ShaderArray:1; /* Shader array */ 81 83 uint32_t Priv:1; /* Privileged */ 82 - uint32_t Reserved0:4; /* This field is reserved, 83 - should be 0 */ 84 + uint32_t Reserved0:4; /* Reserved, should be 0 */ 84 85 uint32_t WaveId:4; /* wave id */ 85 86 uint32_t SIMD:2; /* SIMD id */ 86 87 uint32_t HSACU:4; /* Compute unit */ 87 88 uint32_t ShaderEngine:2;/* Shader engine */ 88 89 uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */ 89 - uint32_t Reserved1:4; /* This field is reserved, 90 - should be 0 */ 90 + uint32_t Reserved1:4; /* Reserved, should be 0 */ 91 91 } ui32; 92 92 uint32_t Value; 93 93 }; ··· 117 121 * in the user mode instruction stream. The OS scheduler event is typically 118 122 * associated and signaled by an interrupt issued by the GPU, but other HSA 119 123 * system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced 120 - * by the KFD by this mechanism, too. */ 124 + * by the KFD by this mechanism, too. 125 + */ 121 126 122 127 /* these are the new definitions for events */ 123 128 enum HSA_EVENTTYPE { 124 129 HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */ 125 130 HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */ 126 131 HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change 127 - (start/stop) */ 132 + * (start/stop) 133 + */ 128 134 HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */ 129 135 HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */ 130 136 HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */ 131 137 HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */ 132 138 HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state 133 - (EOP pm4) */ 139 + * (EOP pm4) 140 + */ 134 141 /* ... */ 135 142 HSA_EVENTTYPE_MAXID, 136 143 HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
+3 -2
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 155 155 dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n", 156 156 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, 157 157 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, 158 - (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0); 158 + (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) 159 + != 0); 159 160 return false; 160 161 } 161 162 162 163 pasid_limit = min_t(unsigned int, 163 - (unsigned int)1 << kfd->device_info->max_pasid_bits, 164 + (unsigned int)(1 << kfd->device_info->max_pasid_bits), 164 165 iommu_info.max_pasids); 165 166 /* 166 167 * last pasid is used for kernel queues doorbells
+5 -3
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 216 216 217 217 set = false; 218 218 219 - for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_per_mec(dqm); 219 + for (pipe = dqm->next_pipe_to_allocate, i = 0; 220 + i < get_pipes_per_mec(dqm); 220 221 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) { 221 222 222 223 if (!is_pipe_enabled(dqm, 0, pipe)) ··· 670 669 671 670 /* This situation may be hit in the future if a new HW 672 671 * generation exposes more than 64 queues. If so, the 673 - * definition of res.queue_mask needs updating */ 672 + * definition of res.queue_mask needs updating 673 + */ 674 674 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { 675 675 pr_err("Invalid queue enabled by amdgpu: %d\n", i); 676 676 break; ··· 892 890 } 893 891 894 892 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 895 - dqm->sdma_queue_count++; 893 + dqm->sdma_queue_count++; 896 894 /* 897 895 * Unconditionally increment this counter, regardless of the queue's 898 896 * type or whether the queue is active.
+3 -2
drivers/gpu/drm/amd/amdkfd/kfd_events.c
··· 194 194 page->free_slots++; 195 195 196 196 /* We don't free signal pages, they are retained by the process 197 - * and reused until it exits. */ 197 + * and reused until it exits. 198 + */ 198 199 } 199 200 200 201 static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p, ··· 585 584 * search faster. 586 585 */ 587 586 struct signal_page *page; 588 - unsigned i; 587 + unsigned int i; 589 588 590 589 list_for_each_entry(page, &p->signal_event_pages, event_pages) 591 590 for (i = 0; i < SLOTS_PER_PAGE; i++)
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
··· 179 179 bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry) 180 180 { 181 181 /* integer and bitwise OR so there is no boolean short-circuiting */ 182 - unsigned wanted = 0; 182 + unsigned int wanted = 0; 183 183 184 184 wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev, 185 185 ih_ring_entry);
+2 -1
drivers/gpu/drm/amd/amdkfd/kfd_module.c
··· 61 61 62 62 static int amdkfd_init_completed; 63 63 64 - int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f) 64 + int kgd2kfd_init(unsigned int interface_version, 65 + const struct kgd2kfd_calls **g2f) 65 66 { 66 67 if (!amdkfd_init_completed) 67 68 return -EPROBE_DEFER;
+1 -2
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
··· 193 193 194 194 m->cp_hqd_vmid = q->vmid; 195 195 196 - if (q->format == KFD_QUEUE_FORMAT_AQL) { 196 + if (q->format == KFD_QUEUE_FORMAT_AQL) 197 197 m->cp_hqd_pq_control |= NO_UPDATE_RPTR; 198 - } 199 198 200 199 m->cp_hqd_active = 0; 201 200 q->is_active = false;
+2 -3
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
··· 458 458 mutex_lock(&pm->lock); 459 459 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 460 460 sizeof(*packet) / sizeof(uint32_t), 461 - (unsigned int **)&packet); 461 + (unsigned int **)&packet); 462 462 if (packet == NULL) { 463 463 mutex_unlock(&pm->lock); 464 464 pr_err("kfd: failed to allocate buffer on kernel queue\n"); ··· 530 530 fail_acquire_packet_buffer: 531 531 mutex_unlock(&pm->lock); 532 532 fail_create_runlist_ib: 533 - if (pm->allocated) 534 - pm_release_ib(pm); 533 + pm_release_ib(pm); 535 534 return retval; 536 535 } 537 536
+2 -1
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
··· 32 32 { 33 33 pasid_limit = KFD_MAX_NUM_OF_PROCESSES; 34 34 35 - pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); 35 + pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), 36 + GFP_KERNEL); 36 37 if (!pasid_bitmap) 37 38 return -ENOMEM; 38 39
+8 -8
drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
··· 28 28 #define PM4_MES_HEADER_DEFINED 29 29 union PM4_MES_TYPE_3_HEADER { 30 30 struct { 31 - uint32_t reserved1:8; /* < reserved */ 32 - uint32_t opcode:8; /* < IT opcode */ 33 - uint32_t count:14; /* < number of DWORDs - 1 34 - * in the information body. 35 - */ 36 - uint32_t type:2; /* < packet identifier. 37 - * It should be 3 for type 3 packets 38 - */ 31 + /* reserved */ 32 + uint32_t reserved1:8; 33 + /* IT opcode */ 34 + uint32_t opcode:8; 35 + /* number of DWORDs - 1 in the information body */ 36 + uint32_t count:14; 37 + /* packet identifier. It should be 3 for type 3 packets */ 38 + uint32_t type:2; 39 39 }; 40 40 uint32_t u32all; 41 41 };
+6 -4
drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
··· 30 30 struct { 31 31 uint32_t reserved1 : 8; /* < reserved */ 32 32 uint32_t opcode : 8; /* < IT opcode */ 33 - uint32_t count : 14;/* < number of DWORDs - 1 in the 34 - information body. */ 35 - uint32_t type : 2; /* < packet identifier. 36 - It should be 3 for type 3 packets */ 33 + uint32_t count : 14;/* < Number of DWORDS - 1 in the 34 + * information body 35 + */ 36 + uint32_t type : 2; /* < packet identifier 37 + * It should be 3 for type 3 packets 38 + */ 37 39 }; 38 40 uint32_t u32All; 39 41 };
+13 -10
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 294 294 * @write_ptr: Defines the number of dwords written to the ring buffer. 295 295 * 296 296 * @doorbell_ptr: This field aim is to notify the H/W of new packet written to 297 - * the queue ring buffer. This field should be similar to write_ptr and the user 298 - * should update this field after he updated the write_ptr. 297 + * the queue ring buffer. This field should be similar to write_ptr and the 298 + * user should update this field after he updated the write_ptr. 299 299 * 300 300 * @doorbell_off: The doorbell offset in the doorbell pci-bar. 301 301 * 302 - * @is_interop: Defines if this is a interop queue. Interop queue means that the 303 - * queue can access both graphics and compute resources. 302 + * @is_interop: Defines if this is a interop queue. Interop queue means that 303 + * the queue can access both graphics and compute resources. 304 304 * 305 305 * @is_active: Defines if the queue is active or not. 306 306 * ··· 352 352 * @properties: The queue properties. 353 353 * 354 354 * @mec: Used only in no cp scheduling mode and identifies to micro engine id 355 - * that the queue should be execute on. 355 + * that the queue should be execute on. 356 356 * 357 - * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id. 357 + * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 358 + * id. 358 359 * 359 360 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 360 361 * ··· 521 520 struct mutex event_mutex; 522 521 /* All events in process hashed by ID, linked on kfd_event.events. */ 523 522 DECLARE_HASHTABLE(events, 4); 524 - struct list_head signal_event_pages; /* struct slot_page_header. 525 - event_pages */ 523 + /* struct slot_page_header.event_pages */ 524 + struct list_head signal_event_pages; 526 525 u32 next_nonsignal_event_id; 527 526 size_t signal_event_count; 528 527 }; ··· 560 559 struct kfd_process *p); 561 560 562 561 /* Process device data iterator */ 563 - struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p); 564 - struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p, 562 + struct kfd_process_device *kfd_get_first_process_device_data( 563 + struct kfd_process *p); 564 + struct kfd_process_device *kfd_get_next_process_device_data( 565 + struct kfd_process *p, 565 566 struct kfd_process_device *pdd); 566 567 bool kfd_has_process_device_data(struct kfd_process *p); 567 568
+4 -2
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 449 449 mutex_unlock(&p->mutex); 450 450 } 451 451 452 - struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) 452 + struct kfd_process_device *kfd_get_first_process_device_data( 453 + struct kfd_process *p) 453 454 { 454 455 return list_first_entry(&p->per_device_data, 455 456 struct kfd_process_device, 456 457 per_device_list); 457 458 } 458 459 459 - struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p, 460 + struct kfd_process_device *kfd_get_next_process_device_data( 461 + struct kfd_process *p, 460 462 struct kfd_process_device *pdd) 461 463 { 462 464 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 1170 1170 * GPU vBIOS 1171 1171 */ 1172 1172 1173 - /* 1174 - * Update the SYSFS tree, since we added another topology device 1173 + /* Update the SYSFS tree, since we added another topology 1174 + * device 1175 1175 */ 1176 1176 if (kfd_topology_update_sysfs() < 0) 1177 1177 kfd_topology_release_sysfs();