Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdkfd: Change x==NULL/false references to !x

Upstream prefers the !x notation to x==NULL or x==false. Along those lines
change the ==true or !=NULL references as well. Also make the references
to !x the same, excluding () for readability.

Signed-off-by: Kent Russell <kent.russell@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>

authored by

Kent Russell and committed by
Oded Gabbay
4eacc26b 79775b62

+85 -87
+11 -11
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 265 265 266 266 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); 267 267 dev = kfd_device_by_id(args->gpu_id); 268 - if (dev == NULL) { 268 + if (!dev) { 269 269 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); 270 270 return -EINVAL; 271 271 } ··· 400 400 } 401 401 402 402 dev = kfd_device_by_id(args->gpu_id); 403 - if (dev == NULL) 403 + if (!dev) 404 404 return -EINVAL; 405 405 406 406 mutex_lock(&p->mutex); ··· 443 443 long status = 0; 444 444 445 445 dev = kfd_device_by_id(args->gpu_id); 446 - if (dev == NULL) 446 + if (!dev) 447 447 return -EINVAL; 448 448 449 449 if (dev->device_info->asic_family == CHIP_CARRIZO) { ··· 465 465 return PTR_ERR(pdd); 466 466 } 467 467 468 - if (dev->dbgmgr == NULL) { 468 + if (!dev->dbgmgr) { 469 469 /* In case of a legal call, we have no dbgmgr yet */ 470 470 create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev); 471 471 if (create_ok) { ··· 494 494 long status; 495 495 496 496 dev = kfd_device_by_id(args->gpu_id); 497 - if (dev == NULL) 497 + if (!dev) 498 498 return -EINVAL; 499 499 500 500 if (dev->device_info->asic_family == CHIP_CARRIZO) { ··· 505 505 mutex_lock(kfd_get_dbgmgr_mutex()); 506 506 507 507 status = kfd_dbgmgr_unregister(dev->dbgmgr, p); 508 - if (status == 0) { 508 + if (!status) { 509 509 kfd_dbgmgr_destroy(dev->dbgmgr); 510 510 dev->dbgmgr = NULL; 511 511 } ··· 539 539 memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info)); 540 540 541 541 dev = kfd_device_by_id(args->gpu_id); 542 - if (dev == NULL) 542 + if (!dev) 543 543 return -EINVAL; 544 544 545 545 if (dev->device_info->asic_family == CHIP_CARRIZO) { ··· 646 646 sizeof(wac_info.trapId); 647 647 648 648 dev = kfd_device_by_id(args->gpu_id); 649 - if (dev == NULL) 649 + if (!dev) 650 650 return -EINVAL; 651 651 652 652 if (dev->device_info->asic_family == CHIP_CARRIZO) { ··· 782 782 "scratch_limit %llX\n", pdd->scratch_limit); 783 783 784 784 args->num_of_nodes++; 785 - } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != 786 - NULL && 787 - (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); 785 + 786 + pdd = kfd_get_next_process_device_data(p, pdd); 787 + } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); 788 788 } 789 789 790 790 mutex_unlock(&p->mutex);
+9 -11
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
··· 77 77 status = kq->ops.acquire_packet_buffer(kq, 78 78 pq_packets_size_in_bytes / sizeof(uint32_t), 79 79 &ib_packet_buff); 80 - if (status != 0) { 80 + if (status) { 81 81 pr_err("acquire_packet_buffer failed\n"); 82 82 return status; 83 83 } ··· 115 115 status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t), 116 116 &mem_obj); 117 117 118 - if (status != 0) { 118 + if (status) { 119 119 pr_err("Failed to allocate GART memory\n"); 120 120 kq->ops.rollback_packet(kq); 121 121 return status; ··· 202 202 203 203 kq = pqm_get_kernel_queue(dbgdev->pqm, qid); 204 204 205 - if (kq == NULL) { 205 + if (!kq) { 206 206 pr_err("Error getting DIQ\n"); 207 207 pqm_destroy_queue(dbgdev->pqm, qid); 208 208 return -EFAULT; ··· 252 252 addrLo->u32All = 0; 253 253 cntl->u32All = 0; 254 254 255 - if (adw_info->watch_mask != NULL) 255 + if (adw_info->watch_mask) 256 256 cntl->bitfields.mask = 257 257 (uint32_t) (adw_info->watch_mask[index] & 258 258 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK); ··· 307 307 return -EINVAL; 308 308 } 309 309 310 - if ((adw_info->watch_mode == NULL) || 311 - (adw_info->watch_address == NULL)) { 310 + if (!adw_info->watch_mode || !adw_info->watch_address) { 312 311 pr_err("adw_info fields are not valid\n"); 313 312 return -EINVAL; 314 313 } ··· 374 375 return -EINVAL; 375 376 } 376 377 377 - if ((NULL == adw_info->watch_mode) || 378 - (NULL == adw_info->watch_address)) { 378 + if (!adw_info->watch_mode || !adw_info->watch_address) { 379 379 pr_err("adw_info fields are not valid\n"); 380 380 return -EINVAL; 381 381 } 382 382 383 383 status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj); 384 384 385 - if (status != 0) { 385 + if (status) { 386 386 pr_err("Failed to allocate GART memory\n"); 387 387 return status; 388 388 } ··· 488 490 packet_buff_uint, 489 491 ib_size); 490 492 491 - if (status != 0) { 493 + if (status) { 492 494 pr_err("Failed to submit IB to DIQ\n"); 493 495 break; 494 496 } ··· 709 711 packet_buff_uint, 710 712 ib_size); 711 713 712 - if (status != 0) 714 + if (status) 713 715 pr_err("Failed to submit IB to DIQ\n"); 714 716 715 717 kfd_gtt_sa_free(dbgdev->dev, mem_obj);
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
··· 55 55 56 56 void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr) 57 57 { 58 - if (pmgr != NULL) { 58 + if (pmgr) { 59 59 kfd_dbgmgr_uninitialize(pmgr); 60 60 kfree(pmgr); 61 61 } ··· 66 66 enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ; 67 67 struct kfd_dbgmgr *new_buff; 68 68 69 - BUG_ON(pdev == NULL); 69 + BUG_ON(!pdev); 70 70 BUG_ON(!pdev->init_complete); 71 71 72 72 new_buff = kfd_alloc_struct(new_buff);
+5 -5
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 98 98 99 99 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { 100 100 if (supported_devices[i].did == did) { 101 - BUG_ON(supported_devices[i].device_info == NULL); 101 + BUG_ON(!supported_devices[i].device_info); 102 102 return supported_devices[i].device_info; 103 103 } 104 104 } ··· 212 212 flags); 213 213 214 214 dev = kfd_device_by_pci_dev(pdev); 215 - BUG_ON(dev == NULL); 215 + BUG_ON(!dev); 216 216 217 217 kfd_signal_iommu_event(dev, pasid, address, 218 218 flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); ··· 262 262 263 263 kfd_doorbell_init(kfd); 264 264 265 - if (kfd_topology_add_device(kfd) != 0) { 265 + if (kfd_topology_add_device(kfd)) { 266 266 dev_err(kfd_device, "Error adding device to topology\n"); 267 267 goto kfd_topology_add_device_error; 268 268 } ··· 288 288 goto device_queue_manager_error; 289 289 } 290 290 291 - if (kfd->dqm->ops.start(kfd->dqm) != 0) { 291 + if (kfd->dqm->ops.start(kfd->dqm)) { 292 292 dev_err(kfd_device, 293 293 "Error starting queue manager for device %x:%x\n", 294 294 kfd->pdev->vendor, kfd->pdev->device); ··· 341 341 342 342 void kgd2kfd_suspend(struct kfd_dev *kfd) 343 343 { 344 - BUG_ON(kfd == NULL); 344 + BUG_ON(!kfd); 345 345 346 346 if (kfd->init_complete) { 347 347 kfd->dqm->ops.stop(kfd->dqm);
+25 -25
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 167 167 168 168 if (list_empty(&qpd->queues_list)) { 169 169 retval = allocate_vmid(dqm, qpd, q); 170 - if (retval != 0) { 170 + if (retval) { 171 171 mutex_unlock(&dqm->lock); 172 172 return retval; 173 173 } ··· 180 180 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 181 181 retval = create_sdma_queue_nocpsch(dqm, q, qpd); 182 182 183 - if (retval != 0) { 183 + if (retval) { 184 184 if (list_empty(&qpd->queues_list)) { 185 185 deallocate_vmid(dqm, qpd, q); 186 186 *allocated_vmid = 0; ··· 262 262 BUG_ON(!dqm || !q || !qpd); 263 263 264 264 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); 265 - if (mqd == NULL) 265 + if (!mqd) 266 266 return -ENOMEM; 267 267 268 268 retval = allocate_hqd(dqm, q); 269 - if (retval != 0) 269 + if (retval) 270 270 return retval; 271 271 272 272 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 273 273 &q->gart_mqd_addr, &q->properties); 274 - if (retval != 0) { 274 + if (retval) { 275 275 deallocate_hqd(dqm, q); 276 276 return retval; 277 277 } ··· 281 281 282 282 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, 283 283 q->queue, (uint32_t __user *) q->properties.write_ptr); 284 - if (retval != 0) { 284 + if (retval) { 285 285 deallocate_hqd(dqm, q); 286 286 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); 287 287 return retval; ··· 330 330 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, 331 331 q->pipe, q->queue); 332 332 333 - if (retval != 0) 333 + if (retval) 334 334 goto out; 335 335 336 336 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); ··· 365 365 mutex_lock(&dqm->lock); 366 366 mqd = dqm->ops.get_mqd_manager(dqm, 367 367 get_mqd_type_from_queue_type(q->properties.type)); 368 - if (mqd == NULL) { 368 + if (!mqd) { 369 369 mutex_unlock(&dqm->lock); 370 370 return -ENOMEM; 371 371 } ··· 381 381 retval = mqd->update_mqd(mqd, q->mqd, &q->properties); 382 382 if ((q->properties.is_active) && (!prev_active)) 383 383 dqm->queue_count++; 384 - else if ((!q->properties.is_active) && (prev_active)) 384 + else if (!q->properties.is_active && prev_active) 385 385 dqm->queue_count--; 386 386 387 387 if (sched_policy != KFD_SCHED_POLICY_NO_HWS) ··· 403 403 mqd = dqm->mqds[type]; 404 404 if (!mqd) { 405 405 mqd = mqd_manager_init(type, dqm->dev); 406 - if (mqd == NULL) 406 + if (!mqd) 407 407 pr_err("mqd manager is NULL"); 408 408 dqm->mqds[type] = mqd; 409 409 } ··· 485 485 { 486 486 unsigned int i; 487 487 488 - BUG_ON(dqm == NULL); 488 + BUG_ON(!dqm); 489 489 490 490 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) 491 491 if (is_pipe_enabled(dqm, 0, i)) ··· 589 589 return -ENOMEM; 590 590 591 591 retval = allocate_sdma_queue(dqm, &q->sdma_id); 592 - if (retval != 0) 592 + if (retval) 593 593 return retval; 594 594 595 595 q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; ··· 602 602 dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); 603 603 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 604 604 &q->gart_mqd_addr, &q->properties); 605 - if (retval != 0) { 605 + if (retval) { 606 606 deallocate_sdma_queue(dqm, q->sdma_id); 607 607 return retval; 608 608 } 609 609 610 610 retval = mqd->load_mqd(mqd, q->mqd, 0, 611 611 0, NULL); 612 - if (retval != 0) { 612 + if (retval) { 613 613 deallocate_sdma_queue(dqm, q->sdma_id); 614 614 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); 615 615 return retval; ··· 680 680 dqm->sdma_queue_count = 0; 681 681 dqm->active_runlist = false; 682 682 retval = dqm->ops_asic_specific.initialize(dqm); 683 - if (retval != 0) 683 + if (retval) 684 684 goto fail_init_pipelines; 685 685 686 686 return 0; ··· 700 700 retval = 0; 701 701 702 702 retval = pm_init(&dqm->packets, dqm); 703 - if (retval != 0) 703 + if (retval) 704 704 goto fail_packet_manager_init; 705 705 706 706 retval = set_sched_resources(dqm); 707 - if (retval != 0) 707 + if (retval) 708 708 goto fail_set_sched_resources; 709 709 710 710 pr_debug("Allocating fence memory\n"); ··· 713 713 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), 714 714 &dqm->fence_mem); 715 715 716 - if (retval != 0) 716 + if (retval) 717 717 goto fail_allocate_vidmem; 718 718 719 719 dqm->fence_addr = dqm->fence_mem->cpu_ptr; ··· 845 845 mqd = dqm->ops.get_mqd_manager(dqm, 846 846 get_mqd_type_from_queue_type(q->properties.type)); 847 847 848 - if (mqd == NULL) { 848 + if (!mqd) { 849 849 mutex_unlock(&dqm->lock); 850 850 return -ENOMEM; 851 851 } ··· 853 853 dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); 854 854 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 855 855 &q->gart_mqd_addr, &q->properties); 856 - if (retval != 0) 856 + if (retval) 857 857 goto out; 858 858 859 859 list_add(&q->list, &qpd->queues_list); ··· 934 934 935 935 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, 936 936 preempt_type, 0, false, 0); 937 - if (retval != 0) 937 + if (retval) 938 938 goto out; 939 939 940 940 *dqm->fence_addr = KFD_FENCE_INIT; ··· 943 943 /* should be timed out */ 944 944 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, 945 945 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS); 946 - if (retval != 0) { 946 + if (retval) { 947 947 pdd = kfd_get_process_device_data(dqm->dev, 948 948 kfd_get_process(current)); 949 949 pdd->reset_wavefronts = true; ··· 968 968 mutex_lock(&dqm->lock); 969 969 970 970 retval = destroy_queues_cpsch(dqm, false, false); 971 - if (retval != 0) { 971 + if (retval) { 972 972 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption"); 973 973 goto out; 974 974 } ··· 984 984 } 985 985 986 986 retval = pm_send_runlist(&dqm->packets, &dqm->queues); 987 - if (retval != 0) { 987 + if (retval) { 988 988 pr_err("failed to execute runlist"); 989 989 goto out; 990 990 } ··· 1193 1193 break; 1194 1194 } 1195 1195 1196 - if (dqm->ops.initialize(dqm) != 0) { 1196 + if (dqm->ops.initialize(dqm)) { 1197 1197 kfree(dqm); 1198 1198 return NULL; 1199 1199 }
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
··· 131 131 132 132 /* Find kfd device according to gpu id */ 133 133 dev = kfd_device_by_id(vma->vm_pgoff); 134 - if (dev == NULL) 134 + if (!dev) 135 135 return -EINVAL; 136 136 137 137 /* Calculate physical address of doorbell */
+3 -3
drivers/gpu/drm/amd/amdkfd/kfd_events.c
··· 247 247 248 248 for (id = p->next_nonsignal_event_id; 249 249 id < KFD_LAST_NONSIGNAL_EVENT_ID && 250 - lookup_event_by_id(p, id) != NULL; 250 + lookup_event_by_id(p, id); 251 251 id++) 252 252 ; 253 253 ··· 266 266 267 267 for (id = KFD_FIRST_NONSIGNAL_EVENT_ID; 268 268 id < KFD_LAST_NONSIGNAL_EVENT_ID && 269 - lookup_event_by_id(p, id) != NULL; 269 + lookup_event_by_id(p, id); 270 270 id++) 271 271 ; 272 272 ··· 342 342 343 343 static void destroy_event(struct kfd_process *p, struct kfd_event *ev) 344 344 { 345 - if (ev->signal_page != NULL) { 345 + if (ev->signal_page) { 346 346 release_event_notification_slot(ev->signal_page, 347 347 ev->signal_slot_index); 348 348 p->signal_event_count--;
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
··· 304 304 id < NUM_OF_SUPPORTED_GPUS) { 305 305 306 306 pdd = kfd_create_process_device_data(dev, process); 307 - if (pdd == NULL) { 307 + if (!pdd) { 308 308 pr_err("Failed to create process device data\n"); 309 309 return -1; 310 310 }
+3 -3
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
··· 67 67 break; 68 68 } 69 69 70 - if (kq->mqd == NULL) 70 + if (!kq->mqd) 71 71 return false; 72 72 73 73 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); 74 74 75 - if (prop.doorbell_ptr == NULL) { 75 + if (!prop.doorbell_ptr) { 76 76 pr_err("Failed to initialize doorbell"); 77 77 goto err_get_kernel_doorbell; 78 78 } ··· 87 87 kq->pq_gpu_addr = kq->pq->gpu_addr; 88 88 89 89 retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size); 90 - if (retval == false) 90 + if (!retval) 91 91 goto err_eop_allocate_vidmem; 92 92 93 93 retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
··· 99 99 m->cp_hqd_iq_rptr = AQL_ENABLE; 100 100 101 101 *mqd = m; 102 - if (gart_addr != NULL) 102 + if (gart_addr) 103 103 *gart_addr = addr; 104 104 retval = mm->update_mqd(mm, m, q); 105 105 ··· 127 127 memset(m, 0, sizeof(struct cik_sdma_rlc_registers)); 128 128 129 129 *mqd = m; 130 - if (gart_addr != NULL) 130 + if (gart_addr) 131 131 *gart_addr = (*mqd_mem_obj)->gpu_addr; 132 132 133 133 retval = mm->update_mqd(mm, m, q);
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
··· 85 85 m->cp_hqd_iq_rptr = 1; 86 86 87 87 *mqd = m; 88 - if (gart_addr != NULL) 88 + if (gart_addr) 89 89 *gart_addr = addr; 90 90 retval = mm->update_mqd(mm, m, q); 91 91
+13 -13
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
··· 98 98 99 99 BUG_ON(!pm); 100 100 BUG_ON(pm->allocated); 101 - BUG_ON(is_over_subscription == NULL); 101 + BUG_ON(!is_over_subscription); 102 102 103 103 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); 104 104 105 105 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, 106 106 &pm->ib_buffer_obj); 107 107 108 - if (retval != 0) { 108 + if (retval) { 109 109 pr_err("Failed to allocate runlist IB\n"); 110 110 return retval; 111 111 } ··· 321 321 322 322 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, 323 323 &alloc_size_bytes, &is_over_subscription); 324 - if (retval != 0) 324 + if (retval) 325 325 return retval; 326 326 327 327 *rl_size_bytes = alloc_size_bytes; ··· 340 340 } 341 341 342 342 retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd); 343 - if (retval != 0) 343 + if (retval) 344 344 return retval; 345 345 346 346 proccesses_mapped++; ··· 365 365 &rl_buffer[rl_wptr], 366 366 kq->queue, 367 367 qpd->is_debug); 368 - if (retval != 0) 368 + if (retval) 369 369 return retval; 370 370 371 371 inc_wptr(&rl_wptr, ··· 392 392 q, 393 393 qpd->is_debug); 394 394 395 - if (retval != 0) 395 + if (retval) 396 396 return retval; 397 397 398 398 inc_wptr(&rl_wptr, ··· 421 421 pm->dqm = dqm; 422 422 mutex_init(&pm->lock); 423 423 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); 424 - if (pm->priv_queue == NULL) { 424 + if (!pm->priv_queue) { 425 425 mutex_destroy(&pm->lock); 426 426 return -ENOMEM; 427 427 } ··· 449 449 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 450 450 sizeof(*packet) / sizeof(uint32_t), 451 451 (unsigned int **)&packet); 452 - if (packet == NULL) { 452 + if (!packet) { 453 453 mutex_unlock(&pm->lock); 454 454 pr_err("Failed to allocate buffer on kernel queue\n"); 455 455 return -ENOMEM; ··· 491 491 492 492 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, 493 493 &rl_ib_size); 494 - if (retval != 0) 494 + if (retval) 495 495 goto fail_create_runlist_ib; 496 496 497 497 pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr); ··· 501 501 502 502 retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 503 503 packet_size_dwords, &rl_buffer); 504 - if (retval != 0) 504 + if (retval) 505 505 goto fail_acquire_packet_buffer; 506 506 507 507 retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr, 508 508 rl_ib_size / sizeof(uint32_t), false); 509 - if (retval != 0) 509 + if (retval) 510 510 goto fail_create_runlist; 511 511 512 512 pm->priv_queue->ops.submit_packet(pm->priv_queue); ··· 537 537 pm->priv_queue, 538 538 sizeof(struct pm4_query_status) / sizeof(uint32_t), 539 539 (unsigned int **)&packet); 540 - if (retval != 0) 540 + if (retval) 541 541 goto fail_acquire_packet_buffer; 542 542 543 543 packet->header.u32all = build_pm4_header(IT_QUERY_STATUS, ··· 580 580 pm->priv_queue, 581 581 sizeof(struct pm4_unmap_queues) / sizeof(uint32_t), 582 582 &buffer); 583 - if (retval != 0) 583 + if (retval) 584 584 goto err_acquire_packet_buffer; 585 585 586 586 packet = (struct pm4_unmap_queues *)buffer;
+3 -3
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 81 81 82 82 BUG_ON(!kfd_process_wq); 83 83 84 - if (thread->mm == NULL) 84 + if (!thread->mm) 85 85 return ERR_PTR(-EINVAL); 86 86 87 87 /* Only the pthreads threading model is supported. */ ··· 117 117 { 118 118 struct kfd_process *process; 119 119 120 - if (thread->mm == NULL) 120 + if (!thread->mm) 121 121 return ERR_PTR(-EINVAL); 122 122 123 123 /* Only the pthreads threading model is supported. */ ··· 407 407 struct kfd_process *p; 408 408 struct kfd_process_device *pdd; 409 409 410 - BUG_ON(dev == NULL); 410 + BUG_ON(!dev); 411 411 412 412 /* 413 413 * Look for the process that matches the pasid. If there is no such
+3 -3
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
··· 76 76 pqm->queue_slot_bitmap = 77 77 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, 78 78 BITS_PER_BYTE), GFP_KERNEL); 79 - if (pqm->queue_slot_bitmap == NULL) 79 + if (!pqm->queue_slot_bitmap) 80 80 return -ENOMEM; 81 81 pqm->process = p; 82 82 ··· 223 223 break; 224 224 case KFD_QUEUE_TYPE_DIQ: 225 225 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ); 226 - if (kq == NULL) { 226 + if (!kq) { 227 227 retval = -ENOMEM; 228 228 goto err_create_queue; 229 229 } ··· 279 279 retval = 0; 280 280 281 281 pqn = get_queue_by_qid(pqm, qid); 282 - if (pqn == NULL) { 282 + if (!pqn) { 283 283 pr_err("Queue id does not match any known queue\n"); 284 284 return -EINVAL; 285 285 }
+3 -3
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 416 416 struct kfd_topology_device *dev; 417 417 418 418 dev = kfd_alloc_struct(dev); 419 - if (dev == NULL) { 419 + if (!dev) { 420 420 pr_err("No memory to allocate a topology device"); 421 421 return NULL; 422 422 } ··· 957 957 int ret; 958 958 959 959 pr_info("Creating topology SYSFS entries\n"); 960 - if (sys_props.kobj_topology == NULL) { 960 + if (!sys_props.kobj_topology) { 961 961 sys_props.kobj_topology = 962 962 kfd_alloc_struct(sys_props.kobj_topology); 963 963 if (!sys_props.kobj_topology) ··· 1120 1120 BUG_ON(!gpu); 1121 1121 1122 1122 list_for_each_entry(dev, &topology_device_list, list) 1123 - if (dev->gpu == NULL && dev->node_props.simd_count > 0) { 1123 + if (!dev->gpu && (dev->node_props.simd_count > 0)) { 1124 1124 dev->gpu = gpu; 1125 1125 out_dev = dev; 1126 1126 break;