Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: msm: Use DRM_DEV_* instead of dev_*

Use DRM_DEV_INFO/ERROR/WARN instead of dev_info/err/debug to generate
drm-formatted specific log messages so that it will be easy to
differentiate in case of multiple instances of driver.

Signed-off-by: Mamta Shukla <mamtashukla555@gmail.com>
Signed-off-by: Rob Clark <robdclark@gmail.com>

authored by

Mamta Shukla and committed by
Rob Clark
6a41da17 84511abc

+313 -310
+2 -2
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
··· 481 481 int ret; 482 482 483 483 if (!pdev) { 484 - dev_err(dev->dev, "no a3xx device\n"); 484 + DRM_DEV_ERROR(dev->dev, "no a3xx device\n"); 485 485 ret = -ENXIO; 486 486 goto fail; 487 487 } ··· 528 528 * to not be possible to restrict access, then we must 529 529 * implement a cmdstream validator. 530 530 */ 531 - dev_err(dev->dev, "No memory protection without IOMMU\n"); 531 + DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); 532 532 ret = -ENXIO; 533 533 goto fail; 534 534 }
+2 -2
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
··· 561 561 int ret; 562 562 563 563 if (!pdev) { 564 - dev_err(dev->dev, "no a4xx device\n"); 564 + DRM_DEV_ERROR(dev->dev, "no a4xx device\n"); 565 565 ret = -ENXIO; 566 566 goto fail; 567 567 } ··· 608 608 * to not be possible to restrict access, then we must 609 609 * implement a cmdstream validator. 610 610 */ 611 - dev_err(dev->dev, "No memory protection without IOMMU\n"); 611 + DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); 612 612 ret = -ENXIO; 613 613 goto fail; 614 614 }
+1 -1
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
··· 173 173 minor->debugfs_root, minor); 174 174 175 175 if (ret) { 176 - dev_err(dev->dev, "could not install a5xx_debugfs_list\n"); 176 + DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n"); 177 177 return ret; 178 178 } 179 179
+5 -5
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 514 514 if (IS_ERR(a5xx_gpu->pm4_bo)) { 515 515 ret = PTR_ERR(a5xx_gpu->pm4_bo); 516 516 a5xx_gpu->pm4_bo = NULL; 517 - dev_err(gpu->dev->dev, "could not allocate PM4: %d\n", 517 + DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n", 518 518 ret); 519 519 return ret; 520 520 } ··· 527 527 if (IS_ERR(a5xx_gpu->pfp_bo)) { 528 528 ret = PTR_ERR(a5xx_gpu->pfp_bo); 529 529 a5xx_gpu->pfp_bo = NULL; 530 - dev_err(gpu->dev->dev, "could not allocate PFP: %d\n", 530 + DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n", 531 531 ret); 532 532 return ret; 533 533 } ··· 1028 1028 struct msm_drm_private *priv = dev->dev_private; 1029 1029 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); 1030 1030 1031 - dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", 1031 + DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", 1032 1032 ring ? ring->id : -1, ring ? ring->seqno : 0, 1033 1033 gpu_read(gpu, REG_A5XX_RBBM_STATUS), 1034 1034 gpu_read(gpu, REG_A5XX_CP_RB_RPTR), ··· 1134 1134 1135 1135 static void a5xx_dump(struct msm_gpu *gpu) 1136 1136 { 1137 - dev_info(gpu->dev->dev, "status: %08x\n", 1137 + DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n", 1138 1138 gpu_read(gpu, REG_A5XX_RBBM_STATUS)); 1139 1139 adreno_dump(gpu); 1140 1140 } ··· 1505 1505 int ret; 1506 1506 1507 1507 if (!pdev) { 1508 - dev_err(dev->dev, "No A5XX device is defined\n"); 1508 + DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n"); 1509 1509 return ERR_PTR(-ENXIO); 1510 1510 } 1511 1511
+2 -2
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
··· 92 92 if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED)) 93 93 return; 94 94 95 - dev_err(dev->dev, "%s: preemption timed out\n", gpu->name); 95 + DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name); 96 96 queue_work(priv->wq, &gpu->recover_work); 97 97 } 98 98 ··· 188 188 status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL); 189 189 if (unlikely(status)) { 190 190 set_preempt_state(a5xx_gpu, PREEMPT_FAULTED); 191 - dev_err(dev->dev, "%s: Preemption failed to complete\n", 191 + DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n", 192 192 gpu->name); 193 193 queue_work(priv->wq, &gpu->recover_work); 194 194 return;
+19 -19
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 153 153 val == 0xbabeface, 100, 10000); 154 154 155 155 if (ret) 156 - dev_err(gmu->dev, "GMU firmware initialization timed out\n"); 156 + DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); 157 157 158 158 return ret; 159 159 } ··· 168 168 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 169 169 val & 1, 100, 10000); 170 170 if (ret) 171 - dev_err(gmu->dev, "Unable to start the HFI queues\n"); 171 + DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); 172 172 173 173 return ret; 174 174 } ··· 209 209 val & (1 << ack), 100, 10000); 210 210 211 211 if (ret) 212 - dev_err(gmu->dev, 212 + DRM_DEV_ERROR(gmu->dev, 213 213 "Timeout waiting for GMU OOB set %s: 0x%x\n", 214 214 name, 215 215 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); ··· 251 251 (val & 0x38) == 0x28, 1, 100); 252 252 253 253 if (ret) { 254 - dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 254 + DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 255 255 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 256 256 } 257 257 ··· 273 273 (val & 0x04), 100, 10000); 274 274 275 275 if (ret) 276 - dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 276 + DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 277 277 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 278 278 } 279 279 ··· 317 317 /* Check to see if the GMU really did slumber */ 318 318 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 319 319 != 0x0f) { 320 - dev_err(gmu->dev, "The GMU did not go into slumber\n"); 320 + DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); 321 321 ret = -ETIMEDOUT; 322 322 } 323 323 } ··· 339 339 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 340 340 val & (1 << 1), 100, 10000); 341 341 if (ret) { 342 - dev_err(gmu->dev, "Unable to power on the GPU RSC\n"); 342 + DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); 343 343 return ret; 344 344 } 345 345 ··· 354 354 return 0; 355 355 } 356 356 357 - dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 357 + DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 358 358 return ret; 359 359 } 360 360 ··· 368 368 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 369 369 val, val & (1 << 16), 100, 10000); 370 370 if (ret) 371 - dev_err(gmu->dev, "Unable to power off the GPU RSC\n"); 371 + DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); 372 372 373 373 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 374 374 } ··· 520 520 521 521 /* Sanity check the size of the firmware that was loaded */ 522 522 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) { 523 - dev_err(gmu->dev, 523 + DRM_DEV_ERROR(gmu->dev, 524 524 "GMU firmware is bigger than the available region\n"); 525 525 return -EINVAL; 526 526 } ··· 764 764 */ 765 765 766 766 if (ret) 767 - dev_err(gmu->dev, 767 + DRM_DEV_ERROR(gmu->dev, 768 768 "Unable to slumber GMU: status = 0%x/0%x\n", 769 769 gmu_read(gmu, 770 770 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), ··· 843 843 IOMMU_READ | IOMMU_WRITE); 844 844 845 845 if (ret) { 846 - dev_err(gmu->dev, "Unable to map GMU buffer object\n"); 846 + DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n"); 847 847 848 848 for (i = i - 1 ; i >= 0; i--) 849 849 iommu_unmap(gmu->domain, ··· 969 969 } 970 970 971 971 if (j == pri_count) { 972 - dev_err(dev, 972 + DRM_DEV_ERROR(dev, 973 973 "Level %u not found in in the RPMh list\n", 974 974 level); 975 - dev_err(dev, "Available levels:\n"); 975 + DRM_DEV_ERROR(dev, "Available levels:\n"); 976 976 for (j = 0; j < pri_count; j++) 977 - dev_err(dev, " %u\n", pri[j]); 977 + DRM_DEV_ERROR(dev, " %u\n", pri[j]); 978 978 979 979 return -EINVAL; 980 980 } ··· 1081 1081 */ 1082 1082 ret = dev_pm_opp_of_add_table(gmu->dev); 1083 1083 if (ret) { 1084 - dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1084 + DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1085 1085 return ret; 1086 1086 } 1087 1087 ··· 1122 1122 IORESOURCE_MEM, name); 1123 1123 1124 1124 if (!res) { 1125 - dev_err(&pdev->dev, "Unable to find the %s registers\n", name); 1125 + DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); 1126 1126 return ERR_PTR(-EINVAL); 1127 1127 } 1128 1128 1129 1129 ret = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1130 1130 if (!ret) { 1131 - dev_err(&pdev->dev, "Unable to map the %s registers\n", name); 1131 + DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); 1132 1132 return ERR_PTR(-EINVAL); 1133 1133 } 1134 1134 ··· 1145 1145 ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH, 1146 1146 name, gmu); 1147 1147 if (ret) { 1148 - dev_err(&pdev->dev, "Unable to get interrupt %s\n", name); 1148 + DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name); 1149 1149 return ret; 1150 1150 } 1151 1151
+2 -2
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 481 481 482 482 static void a6xx_dump(struct msm_gpu *gpu) 483 483 { 484 - dev_info(&gpu->pdev->dev, "status: %08x\n", 484 + DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", 485 485 gpu_read(gpu, REG_A6XX_RBBM_STATUS)); 486 486 adreno_dump(gpu); 487 487 } ··· 498 498 adreno_dump_info(gpu); 499 499 500 500 for (i = 0; i < 8; i++) 501 - dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, 501 + DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, 502 502 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); 503 503 504 504 if (hang_debug)
+7 -7
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
··· 91 91 val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000); 92 92 93 93 if (ret) { 94 - dev_err(gmu->dev, 94 + DRM_DEV_ERROR(gmu->dev, 95 95 "Message %s id %d timed out waiting for response\n", 96 96 a6xx_hfi_msg_id[id], seqnum); 97 97 return -ETIMEDOUT; ··· 110 110 111 111 /* If the queue is empty our response never made it */ 112 112 if (!ret) { 113 - dev_err(gmu->dev, 113 + DRM_DEV_ERROR(gmu->dev, 114 114 "The HFI response queue is unexpectedly empty\n"); 115 115 116 116 return -ENOENT; ··· 120 120 struct a6xx_hfi_msg_error *error = 121 121 (struct a6xx_hfi_msg_error *) &resp; 122 122 123 - dev_err(gmu->dev, "GMU firmware error %d\n", 123 + DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n", 124 124 error->code); 125 125 continue; 126 126 } 127 127 128 128 if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) { 129 - dev_err(gmu->dev, 129 + DRM_DEV_ERROR(gmu->dev, 130 130 "Unexpected message id %d on the response queue\n", 131 131 HFI_HEADER_SEQNUM(resp.ret_header)); 132 132 continue; 133 133 } 134 134 135 135 if (resp.error) { 136 - dev_err(gmu->dev, 136 + DRM_DEV_ERROR(gmu->dev, 137 137 "Message %s id %d returned error %d\n", 138 138 a6xx_hfi_msg_id[id], seqnum, resp.error); 139 139 return -EINVAL; ··· 163 163 164 164 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); 165 165 if (ret) { 166 - dev_err(gmu->dev, "Unable to send message %s id %d\n", 166 + DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n", 167 167 a6xx_hfi_msg_id[id], seqnum); 168 168 return ret; 169 169 } ··· 317 317 continue; 318 318 319 319 if (queue->header->read_index != queue->header->write_index) 320 - dev_err(gmu->dev, "HFI queue %d is not empty\n", i); 320 + DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i); 321 321 322 322 queue->header->read_index = 0; 323 323 queue->header->write_index = 0;
+3 -3
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 196 196 197 197 ret = pm_runtime_get_sync(&pdev->dev); 198 198 if (ret < 0) { 199 - dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret); 199 + DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret); 200 200 return NULL; 201 201 } 202 202 ··· 205 205 mutex_unlock(&dev->struct_mutex); 206 206 pm_runtime_put_autosuspend(&pdev->dev); 207 207 if (ret) { 208 - dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 208 + DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); 209 209 return NULL; 210 210 } 211 211 ··· 253 253 /* and if that fails, fall back to legacy "qcom,chipid" property: */ 254 254 ret = of_property_read_u32(node, "qcom,chipid", &chipid); 255 255 if (ret) { 256 - dev_err(dev, "could not parse qcom,chipid: %d\n", ret); 256 + DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret); 257 257 return ret; 258 258 } 259 259
+11 -11
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 89 89 90 90 ret = request_firmware_direct(&fw, newname, drm->dev); 91 91 if (!ret) { 92 - dev_info(drm->dev, "loaded %s from new location\n", 92 + DRM_DEV_INFO(drm->dev, "loaded %s from new location\n", 93 93 newname); 94 94 adreno_gpu->fwloc = FW_LOCATION_NEW; 95 95 goto out; 96 96 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { 97 - dev_err(drm->dev, "failed to load %s: %d\n", 97 + DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", 98 98 newname, ret); 99 99 fw = ERR_PTR(ret); 100 100 goto out; ··· 109 109 110 110 ret = request_firmware_direct(&fw, fwname, drm->dev); 111 111 if (!ret) { 112 - dev_info(drm->dev, "loaded %s from legacy location\n", 112 + DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", 113 113 newname); 114 114 adreno_gpu->fwloc = FW_LOCATION_LEGACY; 115 115 goto out; 116 116 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { 117 - dev_err(drm->dev, "failed to load %s: %d\n", 117 + DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", 118 118 fwname, ret); 119 119 fw = ERR_PTR(ret); 120 120 goto out; ··· 130 130 131 131 ret = request_firmware(&fw, newname, drm->dev); 132 132 if (!ret) { 133 - dev_info(drm->dev, "loaded %s with helper\n", 133 + DRM_DEV_INFO(drm->dev, "loaded %s with helper\n", 134 134 newname); 135 135 adreno_gpu->fwloc = FW_LOCATION_HELPER; 136 136 goto out; 137 137 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { 138 - dev_err(drm->dev, "failed to load %s: %d\n", 138 + DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", 139 139 newname, ret); 140 140 fw = ERR_PTR(ret); 141 141 goto out; 142 142 } 143 143 } 144 144 145 - dev_err(drm->dev, "failed to load %s\n", fwname); 145 + DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname); 146 146 fw = ERR_PTR(-ENOENT); 147 147 out: 148 148 kfree(newname); ··· 212 212 ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova); 213 213 if (ret) { 214 214 ring->iova = 0; 215 - dev_err(gpu->dev->dev, 215 + DRM_DEV_ERROR(gpu->dev->dev, 216 216 "could not map ringbuffer %d: %d\n", i, ret); 217 217 return ret; 218 218 } ··· 277 277 278 278 ret = msm_gpu_hw_init(gpu); 279 279 if (ret) { 280 - dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 280 + DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); 281 281 /* hmm, oh well? */ 282 282 } 283 283 } ··· 635 635 636 636 node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels"); 637 637 if (!node) { 638 - dev_err(dev, "Could not find the GPU powerlevels\n"); 638 + DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n"); 639 639 return -ENXIO; 640 640 } 641 641 ··· 674 674 else { 675 675 ret = dev_pm_opp_of_add_table(dev); 676 676 if (ret) 677 - dev_err(dev, "Unable to set the OPP table\n"); 677 + DRM_DEV_ERROR(dev, "Unable to set the OPP table\n"); 678 678 } 679 679 680 680 if (!ret) {
+14 -14
drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
··· 144 144 static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base, 145 145 struct dpu_debug_bus_entry *entry, u32 val) 146 146 { 147 - dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n", 147 + DRM_DEV_ERROR(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n", 148 148 entry->wr_addr, entry->block_id, entry->test_id, val); 149 149 } 150 150 ··· 154 154 if (!(val & 0xFFF000)) 155 155 return; 156 156 157 - dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n", 157 + DRM_DEV_ERROR(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n", 158 158 entry->wr_addr, entry->block_id, entry->test_id, val); 159 159 } 160 160 ··· 164 164 if (!(val & BIT(15))) 165 165 return; 166 166 167 - dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n", 167 + DRM_DEV_ERROR(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n", 168 168 entry->wr_addr, entry->block_id, entry->test_id, val); 169 169 } 170 170 ··· 174 174 if (!(val & BIT(15))) 175 175 return; 176 176 177 - dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n", 177 + DRM_DEV_ERROR(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n", 178 178 entry->wr_addr, entry->block_id, entry->test_id, val); 179 179 } 180 180 ··· 1994 1994 if (!in_log && !in_mem) 1995 1995 return; 1996 1996 1997 - dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n", 1997 + DRM_DEV_INFO(dpu_dbg_base.dev, "======== start %s dump =========\n", 1998 1998 bus->cmn.name); 1999 1999 2000 2000 if (in_mem) { ··· 2004 2004 2005 2005 if (*dump_mem) { 2006 2006 dump_addr = *dump_mem; 2007 - dev_info(dpu_dbg_base.dev, 2007 + DRM_DEV_INFO(dpu_dbg_base.dev, 2008 2008 "%s: start_addr:0x%pK len:0x%x\n", 2009 2009 __func__, dump_addr, list_size); 2010 2010 } else { ··· 2032 2032 status = readl_relaxed(mem_base + offset); 2033 2033 2034 2034 if (in_log) 2035 - dev_info(dpu_dbg_base.dev, 2035 + DRM_DEV_INFO(dpu_dbg_base.dev, 2036 2036 "waddr=0x%x blk=%d tst=%d val=0x%x\n", 2037 2037 head->wr_addr, head->block_id, 2038 2038 head->test_id, status); ··· 2055 2055 } 2056 2056 _dpu_dbg_enable_power(false); 2057 2057 2058 - dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n", 2058 + DRM_DEV_INFO(dpu_dbg_base.dev, "======== end %s dump =========\n", 2059 2059 bus->cmn.name); 2060 2060 } 2061 2061 ··· 2086 2086 *dump_addr++ = val; 2087 2087 } 2088 2088 if (in_log) 2089 - dev_info(dpu_dbg_base.dev, 2089 + DRM_DEV_INFO(dpu_dbg_base.dev, 2090 2090 "testpoint:%x arb/xin id=%d index=%d val=0x%x\n", 2091 2091 head->block_bus_addr, i, j, val); 2092 2092 } ··· 2127 2127 list_size = bus->cmn.entries_size; 2128 2128 dump_mem = &bus->cmn.dumped_content; 2129 2129 2130 - dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n", 2130 + DRM_DEV_INFO(dpu_dbg_base.dev, "======== start %s dump =========\n", 2131 2131 bus->cmn.name); 2132 2132 2133 2133 if (!dump_mem || !dbg_bus || !bus_size || !list_size) ··· 2155 2155 2156 2156 if (*dump_mem) { 2157 2157 dump_addr = *dump_mem; 2158 - dev_info(dpu_dbg_base.dev, 2158 + DRM_DEV_INFO(dpu_dbg_base.dev, 2159 2159 "%s: start_addr:0x%pK len:0x%x\n", 2160 2160 __func__, dump_addr, list_size); 2161 2161 } else { ··· 2180 2180 reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1); 2181 2181 reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR); 2182 2182 reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR); 2183 - dev_err(dpu_dbg_base.dev, 2183 + DRM_DEV_ERROR(dpu_dbg_base.dev, 2184 2184 "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n", 2185 2185 reg, reg1, reg2); 2186 2186 reg >>= 16; ··· 2194 2194 d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO); 2195 2195 d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1); 2196 2196 2197 - dev_err(dpu_dbg_base.dev, 2197 + DRM_DEV_ERROR(dpu_dbg_base.dev, 2198 2198 "Client:%d, errinfo=0x%X, errinfo1=0x%X\n", 2199 2199 i, d0, d1); 2200 2200 } ··· 2217 2217 2218 2218 _dpu_dbg_enable_power(false); 2219 2219 2220 - dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n", 2220 + DRM_DEV_INFO(dpu_dbg_base.dev, "======== end %s dump =========\n", 2221 2221 bus->cmn.name); 2222 2222 } 2223 2223
+5 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
··· 16 16 #include <linux/err.h> 17 17 #include <linux/delay.h> 18 18 19 + #include <drm/drm_print.h> 20 + 19 21 #include "dpu_io_util.h" 20 22 21 23 void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk) ··· 166 164 "clock-names", i, 167 165 &clock_name); 168 166 if (rc) { 169 - dev_err(&pdev->dev, "Failed to get clock name for %d\n", 167 + DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n", 170 168 i); 171 169 break; 172 170 } ··· 178 176 179 177 rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk); 180 178 if (rc) { 181 - dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc); 179 + DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc); 182 180 goto err; 183 181 } 184 182 185 183 rc = of_clk_set_defaults(pdev->dev.of_node, false); 186 184 if (rc) { 187 - dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc); 185 + DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc); 188 186 goto err; 189 187 } 190 188
+1 -1
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
··· 429 429 int ret; 430 430 431 431 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { 432 - dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); 432 + DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height); 433 433 return -EINVAL; 434 434 } 435 435
+6 -6
drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
··· 45 45 struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0"); 46 46 47 47 if (!dtv_pdata) { 48 - dev_err(dev->dev, "could not find dtv pdata\n"); 48 + DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n"); 49 49 return; 50 50 } 51 51 ··· 209 209 210 210 ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc); 211 211 if (ret) 212 - dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n", 212 + DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n", 213 213 pc, ret); 214 214 215 215 ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); 216 216 if (ret) 217 - dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); 217 + DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret); 218 218 219 219 ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); 220 220 if (ret) 221 - dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); 221 + DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret); 222 222 223 223 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); 224 224 ··· 258 258 259 259 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); 260 260 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { 261 - dev_err(dev->dev, "failed to get hdmi_clk\n"); 261 + DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n"); 262 262 ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk); 263 263 goto fail; 264 264 } 265 265 266 266 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk"); 267 267 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { 268 - dev_err(dev->dev, "failed to get tv_clk\n"); 268 + DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n"); 269 269 ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); 270 270 goto fail; 271 271 }
+22 -22
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
··· 43 43 DBG("found MDP4 version v%d.%d", major, minor); 44 44 45 45 if (major != 4) { 46 - dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", 46 + DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n", 47 47 major, minor); 48 48 ret = -ENXIO; 49 49 goto out; ··· 251 251 252 252 encoder = mdp4_lcdc_encoder_init(dev, panel_node); 253 253 if (IS_ERR(encoder)) { 254 - dev_err(dev->dev, "failed to construct LCDC encoder\n"); 254 + DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n"); 255 255 return PTR_ERR(encoder); 256 256 } 257 257 ··· 260 260 261 261 connector = mdp4_lvds_connector_init(dev, panel_node, encoder); 262 262 if (IS_ERR(connector)) { 263 - dev_err(dev->dev, "failed to initialize LVDS connector\n"); 263 + DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n"); 264 264 return PTR_ERR(connector); 265 265 } 266 266 ··· 271 271 case DRM_MODE_ENCODER_TMDS: 272 272 encoder = mdp4_dtv_encoder_init(dev); 273 273 if (IS_ERR(encoder)) { 274 - dev_err(dev->dev, "failed to construct DTV encoder\n"); 274 + DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n"); 275 275 return PTR_ERR(encoder); 276 276 } 277 277 ··· 282 282 /* Construct bridge/connector for HDMI: */ 283 283 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); 284 284 if (ret) { 285 - dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); 285 + DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret); 286 286 return ret; 287 287 } 288 288 } ··· 300 300 encoder = mdp4_dsi_encoder_init(dev); 301 301 if (IS_ERR(encoder)) { 302 302 ret = PTR_ERR(encoder); 303 - dev_err(dev->dev, 303 + DRM_DEV_ERROR(dev->dev, 304 304 "failed to construct DSI encoder: %d\n", ret); 305 305 return ret; 306 306 } ··· 311 311 312 312 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); 313 313 if (ret) { 314 - dev_err(dev->dev, "failed to initialize DSI: %d\n", 314 + DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n", 315 315 ret); 316 316 return ret; 317 317 } 318 318 319 319 break; 320 320 default: 321 - dev_err(dev->dev, "Invalid or unsupported interface\n"); 321 + DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n"); 322 322 return -EINVAL; 323 323 } 324 324 ··· 354 354 for (i = 0; i < ARRAY_SIZE(vg_planes); i++) { 355 355 plane = mdp4_plane_init(dev, vg_planes[i], false); 356 356 if (IS_ERR(plane)) { 357 - dev_err(dev->dev, 357 + DRM_DEV_ERROR(dev->dev, 358 358 "failed to construct plane for VG%d\n", i + 1); 359 359 ret = PTR_ERR(plane); 360 360 goto fail; ··· 365 365 for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) { 366 366 plane = mdp4_plane_init(dev, rgb_planes[i], true); 367 367 if (IS_ERR(plane)) { 368 - dev_err(dev->dev, 368 + DRM_DEV_ERROR(dev->dev, 369 369 "failed to construct plane for RGB%d\n", i + 1); 370 370 ret = PTR_ERR(plane); 371 371 goto fail; ··· 374 374 crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i, 375 375 mdp4_crtcs[i]); 376 376 if (IS_ERR(crtc)) { 377 - dev_err(dev->dev, "failed to construct crtc for %s\n", 377 + DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n", 378 378 mdp4_crtc_names[i]); 379 379 ret = PTR_ERR(crtc); 380 380 goto fail; ··· 396 396 for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) { 397 397 ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]); 398 398 if (ret) { 399 - dev_err(dev->dev, "failed to initialize intf: %d, %d\n", 399 + DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n", 400 400 i, ret); 401 401 goto fail; 402 402 } ··· 419 419 420 420 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); 421 421 if (!mdp4_kms) { 422 - dev_err(dev->dev, "failed to allocate kms\n"); 422 + DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n"); 423 423 ret = -ENOMEM; 424 424 goto fail; 425 425 } ··· 439 439 irq = platform_get_irq(pdev, 0); 440 440 if (irq < 0) { 441 441 ret = irq; 442 - dev_err(dev->dev, "failed to get irq: %d\n", ret); 442 + DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret); 443 443 goto fail; 444 444 } 445 445 ··· 456 456 if (mdp4_kms->vdd) { 457 457 ret = regulator_enable(mdp4_kms->vdd); 458 458 if (ret) { 459 - dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); 459 + DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret); 460 460 goto fail; 461 461 } 462 462 } 463 463 464 464 mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk"); 465 465 if (IS_ERR(mdp4_kms->clk)) { 466 - dev_err(dev->dev, "failed to get core_clk\n"); 466 + DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n"); 467 467 ret = PTR_ERR(mdp4_kms->clk); 468 468 goto fail; 469 469 } ··· 475 475 // XXX if (rev >= MDP_REV_42) { ??? 476 476 mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); 477 477 if (IS_ERR(mdp4_kms->lut_clk)) { 478 - dev_err(dev->dev, "failed to get lut_clk\n"); 478 + DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n"); 479 479 ret = PTR_ERR(mdp4_kms->lut_clk); 480 480 goto fail; 481 481 } 482 482 483 483 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); 484 484 if (IS_ERR(mdp4_kms->axi_clk)) { 485 - dev_err(dev->dev, "failed to get axi_clk\n"); 485 + DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n"); 486 486 ret = PTR_ERR(mdp4_kms->axi_clk); 487 487 goto fail; 488 488 } ··· 519 519 if (ret) 520 520 goto fail; 521 521 } else { 522 - dev_info(dev->dev, "no iommu, fallback to phys " 522 + DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys " 523 523 "contig buffers for scanout\n"); 524 524 aspace = NULL; 525 525 } 526 526 527 527 ret = modeset_init(mdp4_kms); 528 528 if (ret) { 529 - dev_err(dev->dev, "modeset_init failed: %d\n", ret); 529 + DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret); 530 530 goto fail; 531 531 } 532 532 533 533 mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); 534 534 if (IS_ERR(mdp4_kms->blank_cursor_bo)) { 535 535 ret = PTR_ERR(mdp4_kms->blank_cursor_bo); 536 - dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); 536 + DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); 537 537 mdp4_kms->blank_cursor_bo = NULL; 538 538 goto fail; 539 539 } ··· 541 541 ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace, 542 542 &mdp4_kms->blank_cursor_iova); 543 543 if (ret) { 544 - dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); 544 + DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret); 545 545 goto fail; 546 546 } 547 547
+11 -11
drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
··· 47 47 struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0"); 48 48 49 49 if (!lcdc_pdata) { 50 - dev_err(dev->dev, "could not find lvds pdata\n"); 50 + DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n"); 51 51 return; 52 52 } 53 53 ··· 224 224 break; 225 225 226 226 default: 227 - dev_err(dev->dev, "unknown bpp: %d\n", bpp); 227 + DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp); 228 228 return; 229 229 } 230 230 ··· 241 241 MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN; 242 242 break; 243 243 default: 244 - dev_err(dev->dev, "unknown # of channels: %d\n", nchan); 244 + DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan); 245 245 return; 246 246 } 247 247 ··· 361 361 for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { 362 362 ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); 363 363 if (ret) 364 - dev_err(dev->dev, "failed to disable regulator: %d\n", ret); 364 + DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret); 365 365 } 366 366 367 367 bs_set(mdp4_lcdc_encoder, 0); ··· 398 398 for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { 399 399 ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); 400 400 if (ret) 401 - dev_err(dev->dev, "failed to enable regulator: %d\n", ret); 401 + DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret); 402 402 } 403 403 404 404 DBG("setting lcdc_clk=%lu", pc); 405 405 ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); 406 406 if (ret) 407 - dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret); 407 + DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret); 408 408 ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk); 409 409 if (ret) 410 - dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); 410 + DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret); 411 411 412 412 panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); 413 413 if (!IS_ERR(panel)) { ··· 461 461 /* TODO: do we need different pll in other cases? */ 462 462 mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev); 463 463 if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) { 464 - dev_err(dev->dev, "failed to get lvds_clk\n"); 464 + DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n"); 465 465 ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk); 466 466 goto fail; 467 467 } ··· 470 470 reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v"); 471 471 if (IS_ERR(reg)) { 472 472 ret = PTR_ERR(reg); 473 - dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret); 473 + DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret); 474 474 goto fail; 475 475 } 476 476 mdp4_lcdc_encoder->regs[0] = reg; ··· 478 478 reg = devm_regulator_get(dev->dev, "lvds-pll-vdda"); 479 479 if (IS_ERR(reg)) { 480 480 ret = PTR_ERR(reg); 481 - dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret); 481 + DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret); 482 482 goto fail; 483 483 } 484 484 mdp4_lcdc_encoder->regs[1] = reg; ··· 486 486 reg = devm_regulator_get(dev->dev, "lvds-vdda"); 487 487 if (IS_ERR(reg)) { 488 488 ret = PTR_ERR(reg); 489 - dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret); 489 + DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret); 490 490 goto fail; 491 491 } 492 492 mdp4_lcdc_encoder->regs[2] = reg;
+4 -4
drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
··· 234 234 format = to_mdp_format(msm_framebuffer_format(fb)); 235 235 236 236 if (src_w > (crtc_w * DOWN_SCALE_MAX)) { 237 - dev_err(dev->dev, "Width down scaling exceeds limits!\n"); 237 + DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n"); 238 238 return -ERANGE; 239 239 } 240 240 241 241 if (src_h > (crtc_h * DOWN_SCALE_MAX)) { 242 - dev_err(dev->dev, "Height down scaling exceeds limits!\n"); 242 + DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n"); 243 243 return -ERANGE; 244 244 } 245 245 246 246 if (crtc_w > (src_w * UP_SCALE_MAX)) { 247 - dev_err(dev->dev, "Width up scaling exceeds limits!\n"); 247 + DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n"); 248 248 return -ERANGE; 249 249 } 250 250 251 251 if (crtc_h > (src_h * UP_SCALE_MAX)) { 252 - dev_err(dev->dev, "Height up scaling exceeds limits!\n"); 252 + DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n"); 253 253 return -ERANGE; 254 254 } 255 255
+2 -2
drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
··· 600 600 } 601 601 602 602 if (major != 1) { 603 - dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n", 603 + DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n", 604 604 major, minor); 605 605 ret = -ENXIO; 606 606 goto fail; ··· 615 615 break; 616 616 } 617 617 if (unlikely(!mdp5_cfg)) { 618 - dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n", 618 + DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n", 619 619 major, minor); 620 620 ret = -ENXIO; 621 621 goto fail;
+5 -5
drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
··· 55 55 int pp_id = mixer->pp; 56 56 57 57 if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) { 58 - dev_err(dev, "vsync_clk is not initialized\n"); 58 + DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n"); 59 59 return -EINVAL; 60 60 } 61 61 62 62 total_lines_x100 = mode->vtotal * mode->vrefresh; 63 63 if (!total_lines_x100) { 64 - dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n", 64 + DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n", 65 65 __func__, mode->vtotal, mode->vrefresh); 66 66 return -EINVAL; 67 67 } 68 68 69 69 vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE); 70 70 if (vsync_clk_speed <= 0) { 71 - dev_err(dev, "vsync_clk round rate failed %ld\n", 71 + DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n", 72 72 vsync_clk_speed); 73 73 return -EINVAL; 74 74 } ··· 102 102 ret = clk_set_rate(mdp5_kms->vsync_clk, 103 103 clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE)); 104 104 if (ret) { 105 - dev_err(encoder->dev->dev, 105 + DRM_DEV_ERROR(encoder->dev->dev, 106 106 "vsync_clk clk_set_rate failed, %d\n", ret); 107 107 return ret; 108 108 } 109 109 ret = clk_prepare_enable(mdp5_kms->vsync_clk); 110 110 if (ret) { 111 - dev_err(encoder->dev->dev, 111 + DRM_DEV_ERROR(encoder->dev->dev, 112 112 "vsync_clk clk_prepare_enable failed, %d\n", ret); 113 113 return ret; 114 114 }
+4 -4
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
··· 662 662 663 663 ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer); 664 664 if (ret) { 665 - dev_err(dev->dev, "couldn't assign mixers %d\n", ret); 665 + DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret); 666 666 return ret; 667 667 } 668 668 ··· 679 679 * and that we don't have conflicting mixer stages: 680 680 */ 681 681 if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) { 682 - dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n", 682 + DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n", 683 683 cnt, start); 684 684 return -EINVAL; 685 685 } ··· 879 879 } 880 880 881 881 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { 882 - dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); 882 + DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height); 883 883 return -EINVAL; 884 884 } 885 885 ··· 924 924 set_cursor: 925 925 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); 926 926 if (ret) { 927 - dev_err(dev->dev, "failed to %sable cursor: %d\n", 927 + DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n", 928 928 cursor_enable ? "en" : "dis", ret); 929 929 goto end; 930 930 }
+8 -8
drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
··· 262 262 struct mdp5_hw_mixer *mixer = pipeline->mixer; 263 263 264 264 if (unlikely(WARN_ON(!mixer))) { 265 - dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM", 265 + DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM", 266 266 ctl->id); 267 267 return -EINVAL; 268 268 } 269 269 270 270 if (pipeline->r_mixer) { 271 - dev_err(ctl_mgr->dev->dev, "unsupported configuration"); 271 + DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration"); 272 272 return -EINVAL; 273 273 } 274 274 ··· 604 604 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0); 605 605 return 0; 606 606 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { 607 - dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); 607 + DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n"); 608 608 return -EINVAL; 609 609 } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { 610 - dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); 610 + DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); 611 611 return -EINVAL; 612 612 } 613 613 ··· 652 652 if ((ctl_mgr->ctls[c].status & checkm) == match) 653 653 goto found; 654 654 655 - dev_err(ctl_mgr->dev->dev, "No more CTL available!"); 655 + DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!"); 656 656 goto unlock; 657 657 658 658 found: ··· 698 698 699 699 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); 700 700 if (!ctl_mgr) { 701 - dev_err(dev->dev, "failed to allocate CTL manager\n"); 701 + DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n"); 702 702 ret = -ENOMEM; 703 703 goto fail; 704 704 } 705 705 706 706 if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) { 707 - dev_err(dev->dev, "Increase static pool size to at least %d\n", 707 + DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n", 708 708 ctl_cfg->count); 709 709 ret = -ENOSPC; 710 710 goto fail; ··· 723 723 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; 724 724 725 725 if (WARN_ON(!ctl_cfg->base[c])) { 726 - dev_err(dev->dev, "CTL_%d: base is null!\n", c); 726 + DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c); 727 727 ret = -EINVAL; 728 728 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); 729 729 goto fail;
+15 -15
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
··· 264 264 minor->debugfs_root, minor); 265 265 266 266 if (ret) { 267 - dev_err(dev->dev, "could not install mdp5_debugfs_list\n"); 267 + DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n"); 268 268 return ret; 269 269 } 270 270 ··· 337 337 338 338 encoder = mdp5_encoder_init(dev, intf, ctl); 339 339 if (IS_ERR(encoder)) { 340 - dev_err(dev->dev, "failed to construct encoder\n"); 340 + DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n"); 341 341 return encoder; 342 342 } 343 343 ··· 418 418 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); 419 419 420 420 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { 421 - dev_err(dev->dev, "failed to find dsi from intf %d\n", 421 + DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n", 422 422 intf->num); 423 423 ret = -EINVAL; 424 424 break; ··· 443 443 break; 444 444 } 445 445 default: 446 - dev_err(dev->dev, "unknown intf: %d\n", intf->type); 446 + DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type); 447 447 ret = -EINVAL; 448 448 break; 449 449 } ··· 500 500 plane = mdp5_plane_init(dev, type); 501 501 if (IS_ERR(plane)) { 502 502 ret = PTR_ERR(plane); 503 - dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); 503 + DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret); 504 504 goto fail; 505 505 } 506 506 priv->planes[priv->num_planes++] = plane; ··· 517 517 crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); 518 518 if (IS_ERR(crtc)) { 519 519 ret = PTR_ERR(crtc); 520 - dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); 520 + DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); 521 521 goto fail; 522 522 } 523 523 priv->crtcs[priv->num_crtcs++] = crtc; ··· 552 552 *major = FIELD(version, MDP5_HW_VERSION_MAJOR); 553 553 *minor = FIELD(version, MDP5_HW_VERSION_MINOR); 554 554 555 - dev_info(dev, "MDP5 version v%d.%d", *major, *minor); 555 + DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor); 556 556 } 557 557 558 558 static int get_clk(struct platform_device *pdev, struct clk **clkp, ··· 561 561 struct device *dev = &pdev->dev; 562 562 struct clk *clk = msm_clk_get(pdev, name); 563 563 if (IS_ERR(clk) && mandatory) { 564 - dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 564 + DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 565 565 return PTR_ERR(clk); 566 566 } 567 567 if (IS_ERR(clk)) ··· 688 688 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 689 689 if (irq < 0) { 690 690 ret = irq; 691 - dev_err(&pdev->dev, "failed to get irq: %d\n", ret); 691 + DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret); 692 692 goto fail; 693 693 } 694 694 ··· 724 724 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 725 725 ARRAY_SIZE(iommu_ports)); 726 726 if (ret) { 727 - dev_err(&pdev->dev, "failed to attach iommu: %d\n", 727 + DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n", 728 728 ret); 729 729 goto fail; 730 730 } 731 731 } else { 732 - dev_info(&pdev->dev, 732 + DRM_DEV_INFO(&pdev->dev, 733 733 "no iommu, fallback to phys contig buffers for scanout\n"); 734 734 aspace = NULL; 735 735 } ··· 738 738 739 739 ret = modeset_init(mdp5_kms); 740 740 if (ret) { 741 - dev_err(&pdev->dev, "modeset_init failed: %d\n", ret); 741 + DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret); 742 742 goto fail; 743 743 } 744 744 ··· 795 795 hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); 796 796 if (IS_ERR(hwpipe)) { 797 797 ret = PTR_ERR(hwpipe); 798 - dev_err(dev->dev, "failed to construct pipe for %s (%d)\n", 798 + DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n", 799 799 pipe2name(pipes[i]), ret); 800 800 return ret; 801 801 } ··· 867 867 mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); 868 868 if (IS_ERR(mixer)) { 869 869 ret = PTR_ERR(mixer); 870 - dev_err(dev->dev, "failed to construct LM%d (%d)\n", 870 + DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n", 871 871 i, ret); 872 872 return ret; 873 873 } ··· 897 897 898 898 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 899 899 if (!intf) { 900 - dev_err(dev->dev, "failed to construct INTF%d\n", i); 900 + DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i); 901 901 return -ENOMEM; 902 902 } 903 903
+5 -5
drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
··· 132 132 d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops, 133 133 mdp5_mdss); 134 134 if (!d) { 135 - dev_err(dev, "mdss irq domain add failed\n"); 135 + DRM_DEV_ERROR(dev, "mdss irq domain add failed\n"); 136 136 return -ENXIO; 137 137 } 138 138 ··· 246 246 247 247 ret = msm_mdss_get_clocks(mdp5_mdss); 248 248 if (ret) { 249 - dev_err(dev->dev, "failed to get clocks: %d\n", ret); 249 + DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret); 250 250 goto fail; 251 251 } 252 252 ··· 259 259 260 260 ret = regulator_enable(mdp5_mdss->vdd); 261 261 if (ret) { 262 - dev_err(dev->dev, "failed to enable regulator vdd: %d\n", 262 + DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", 263 263 ret); 264 264 goto fail; 265 265 } ··· 267 267 ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0), 268 268 mdss_irq, 0, "mdss_isr", mdp5_mdss); 269 269 if (ret) { 270 - dev_err(dev->dev, "failed to init irq: %d\n", ret); 270 + DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret); 271 271 goto fail_irq; 272 272 } 273 273 274 274 ret = mdss_irq_domain_init(mdp5_mdss); 275 275 if (ret) { 276 - dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret); 276 + DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret); 277 277 goto fail_irq; 278 278 } 279 279
+4 -4
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
··· 125 125 126 126 SET_PROPERTY(zpos, ZPOS, uint8_t); 127 127 128 - dev_err(dev->dev, "Invalid property\n"); 128 + DRM_DEV_ERROR(dev->dev, "Invalid property\n"); 129 129 ret = -EINVAL; 130 130 done: 131 131 return ret; ··· 153 153 154 154 GET_PROPERTY(zpos, ZPOS, uint8_t); 155 155 156 - dev_err(dev->dev, "Invalid property\n"); 156 + DRM_DEV_ERROR(dev->dev, "Invalid property\n"); 157 157 ret = -EINVAL; 158 158 done: 159 159 return ret; ··· 658 658 659 659 ret = calc_phase_step(src, dest, &phasex_step); 660 660 if (ret) { 661 - dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret); 661 + DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret); 662 662 return ret; 663 663 } 664 664 ··· 683 683 684 684 ret = calc_phase_step(src, dest, &phasey_step); 685 685 if (ret) { 686 - dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret); 686 + DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret); 687 687 return ret; 688 688 } 689 689
+2 -2
drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
··· 88 88 89 89 avail = cnt - bitmap_weight(state->state, cnt); 90 90 if (nblks > avail) { 91 - dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n", 91 + DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n", 92 92 nblks, avail); 93 93 return -ENOSPC; 94 94 } ··· 188 188 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); 189 189 ret = smp_request_block(smp, state, cid, n); 190 190 if (ret) { 191 - dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n", 191 + DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n", 192 192 n, ret); 193 193 return ret; 194 194 }
+5 -5
drivers/gpu/drm/msm/dsi/dsi.c
··· 29 29 30 30 phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0); 31 31 if (!phy_node) { 32 - dev_err(&pdev->dev, "cannot find phy device\n"); 32 + DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n"); 33 33 return -ENXIO; 34 34 } 35 35 ··· 40 40 of_node_put(phy_node); 41 41 42 42 if (!phy_pdev || !msm_dsi->phy) { 43 - dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__); 43 + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); 44 44 return -EPROBE_DEFER; 45 45 } 46 46 ··· 210 210 211 211 ret = msm_dsi_host_modeset_init(msm_dsi->host, dev); 212 212 if (ret) { 213 - dev_err(dev->dev, "failed to modeset init host: %d\n", ret); 213 + DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret); 214 214 goto fail; 215 215 } 216 216 ··· 222 222 msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id); 223 223 if (IS_ERR(msm_dsi->bridge)) { 224 224 ret = PTR_ERR(msm_dsi->bridge); 225 - dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret); 225 + DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret); 226 226 msm_dsi->bridge = NULL; 227 227 goto fail; 228 228 } ··· 244 244 245 245 if (IS_ERR(msm_dsi->connector)) { 246 246 ret = PTR_ERR(msm_dsi->connector); 247 - dev_err(dev->dev, 247 + DRM_DEV_ERROR(dev->dev, 248 248 "failed to create dsi connector: %d\n", ret); 249 249 msm_dsi->connector = NULL; 250 250 goto fail;
+11 -11
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 1050 1050 msecs_to_jiffies(70)); 1051 1051 1052 1052 if (ret <= 0) 1053 - dev_err(dev, "wait for video done timed out\n"); 1053 + DRM_DEV_ERROR(dev, "wait for video done timed out\n"); 1054 1054 1055 1055 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 1056 1056 } ··· 1673 1673 1674 1674 prop = of_find_property(ep, "data-lanes", &len); 1675 1675 if (!prop) { 1676 - dev_dbg(dev, 1676 + DRM_DEV_DEBUG(dev, 1677 1677 "failed to find data lane mapping, using default\n"); 1678 1678 return 0; 1679 1679 } ··· 1681 1681 num_lanes = len / sizeof(u32); 1682 1682 1683 1683 if (num_lanes < 1 || num_lanes > 4) { 1684 - dev_err(dev, "bad number of data lanes\n"); 1684 + DRM_DEV_ERROR(dev, "bad number of data lanes\n"); 1685 1685 return -EINVAL; 1686 1686 } 1687 1687 ··· 1690 1690 ret = of_property_read_u32_array(ep, "data-lanes", lane_map, 1691 1691 num_lanes); 1692 1692 if (ret) { 1693 - dev_err(dev, "failed to read lane data\n"); 1693 + DRM_DEV_ERROR(dev, "failed to read lane data\n"); 1694 1694 return ret; 1695 1695 } 1696 1696 ··· 1711 1711 */ 1712 1712 for (j = 0; j < num_lanes; j++) { 1713 1713 if (lane_map[j] < 0 || lane_map[j] > 3) 1714 - dev_err(dev, "bad physical lane entry %u\n", 1714 + DRM_DEV_ERROR(dev, "bad physical lane entry %u\n", 1715 1715 lane_map[j]); 1716 1716 1717 1717 if (swap[lane_map[j]] != j) ··· 1742 1742 */ 1743 1743 endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); 1744 1744 if (!endpoint) { 1745 - dev_dbg(dev, "%s: no endpoint\n", __func__); 1745 + DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__); 1746 1746 return 0; 1747 1747 } 1748 1748 1749 1749 ret = dsi_host_parse_lane_data(msm_host, endpoint); 1750 1750 if (ret) { 1751 - dev_err(dev, "%s: invalid lane configuration %d\n", 1751 + DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n", 1752 1752 __func__, ret); 1753 1753 ret = -EINVAL; 1754 1754 goto err; ··· 1757 1757 /* Get panel node from the output port's endpoint data */ 1758 1758 device_node = of_graph_get_remote_node(np, 1, 0); 1759 1759 if (!device_node) { 1760 - dev_dbg(dev, "%s: no valid device\n", __func__); 1760 + DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__); 1761 1761 ret = -ENODEV; 1762 1762 goto err; 1763 1763 } ··· 1768 1768 msm_host->sfpb = syscon_regmap_lookup_by_phandle(np, 1769 1769 "syscon-sfpb"); 1770 1770 if (IS_ERR(msm_host->sfpb)) { 1771 - dev_err(dev, "%s: failed to get sfpb regmap\n", 1771 + DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n", 1772 1772 __func__); 1773 1773 ret = PTR_ERR(msm_host->sfpb); 1774 1774 } ··· 1918 1918 msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1919 1919 if (msm_host->irq < 0) { 1920 1920 ret = msm_host->irq; 1921 - dev_err(dev->dev, "failed to get irq: %d\n", ret); 1921 + DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret); 1922 1922 return ret; 1923 1923 } 1924 1924 ··· 1926 1926 dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 1927 1927 "dsi_isr", msm_host); 1928 1928 if (ret < 0) { 1929 - dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", 1929 + DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n", 1930 1930 msm_host->irq, ret); 1931 1931 return ret; 1932 1932 }
+14 -14
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
··· 404 404 405 405 ret = devm_regulator_bulk_get(dev, num, s); 406 406 if (ret < 0) { 407 - dev_err(dev, "%s: failed to init regulator, ret=%d\n", 407 + DRM_DEV_ERROR(dev, "%s: failed to init regulator, ret=%d\n", 408 408 __func__, ret); 409 409 return ret; 410 410 } ··· 441 441 ret = regulator_set_load(s[i].consumer, 442 442 regs[i].enable_load); 443 443 if (ret < 0) { 444 - dev_err(dev, 444 + DRM_DEV_ERROR(dev, 445 445 "regulator %d set op mode failed, %d\n", 446 446 i, ret); 447 447 goto fail; ··· 451 451 452 452 ret = regulator_bulk_enable(num, s); 453 453 if (ret < 0) { 454 - dev_err(dev, "regulator enable failed, %d\n", ret); 454 + DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret); 455 455 goto fail; 456 456 } 457 457 ··· 472 472 473 473 ret = clk_prepare_enable(phy->ahb_clk); 474 474 if (ret) { 475 - dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret); 475 + DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret); 476 476 pm_runtime_put_sync(dev); 477 477 } 478 478 ··· 543 543 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", 544 544 "DSI_PHY_REG"); 545 545 if (IS_ERR(phy->reg_base)) { 546 - dev_err(&pdev->dev, "%s: failed to map phy regulator base\n", 546 + DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", 547 547 __func__); 548 548 ret = -ENOMEM; 549 549 goto fail; ··· 574 574 phy->id = dsi_phy_get_id(phy); 575 575 if (phy->id < 0) { 576 576 ret = phy->id; 577 - dev_err(dev, "%s: couldn't identify PHY index, %d\n", 577 + DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n", 578 578 __func__, ret); 579 579 goto fail; 580 580 } ··· 584 584 585 585 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); 586 586 if (IS_ERR(phy->base)) { 587 - dev_err(dev, "%s: failed to map phy base\n", __func__); 587 + DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__); 588 588 ret = -ENOMEM; 589 589 goto fail; 590 590 } 591 591 592 592 ret = dsi_phy_regulator_init(phy); 593 593 if (ret) { 594 - dev_err(dev, "%s: failed to init regulator\n", __func__); 594 + DRM_DEV_ERROR(dev, "%s: failed to init regulator\n", __func__); 595 595 goto fail; 596 596 } 597 597 598 598 phy->ahb_clk = msm_clk_get(pdev, "iface"); 599 599 if (IS_ERR(phy->ahb_clk)) { 600 - dev_err(dev, "%s: Unable to get ahb clk\n", __func__); 600 + DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__); 601 601 ret = PTR_ERR(phy->ahb_clk); 602 602 goto fail; 603 603 } ··· 617 617 618 618 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id); 619 619 if (IS_ERR_OR_NULL(phy->pll)) 620 - dev_info(dev, 620 + DRM_DEV_INFO(dev, 621 621 "%s: pll init failed: %ld, need separate pll clk driver\n", 622 622 __func__, PTR_ERR(phy->pll)); 623 623 ··· 675 675 676 676 ret = dsi_phy_enable_resource(phy); 677 677 if (ret) { 678 - dev_err(dev, "%s: resource enable failed, %d\n", 678 + DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n", 679 679 __func__, ret); 680 680 goto res_en_fail; 681 681 } 682 682 683 683 ret = dsi_phy_regulator_enable(phy); 684 684 if (ret) { 685 - dev_err(dev, "%s: regulator enable failed, %d\n", 685 + DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n", 686 686 __func__, ret); 687 687 goto reg_en_fail; 688 688 } 689 689 690 690 ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req); 691 691 if (ret) { 692 - dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); 692 + DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret); 693 693 goto phy_en_fail; 694 694 } 695 695 ··· 702 702 if (phy->usecase != MSM_DSI_PHY_SLAVE) { 703 703 ret = msm_dsi_pll_restore_state(phy->pll); 704 704 if (ret) { 705 - dev_err(dev, "%s: failed to restore pll state, %d\n", 705 + DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n", 706 706 __func__, ret); 707 707 goto pll_restor_fail; 708 708 }
+3 -3
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
··· 93 93 DBG(""); 94 94 95 95 if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) { 96 - dev_err(&phy->pdev->dev, 96 + DRM_DEV_ERROR(&phy->pdev->dev, 97 97 "%s: D-PHY timing calculation failed\n", __func__); 98 98 return -EINVAL; 99 99 } ··· 172 172 173 173 ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); 174 174 if (ret) { 175 - dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", 175 + DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", 176 176 __func__, ret); 177 177 return ret; 178 178 } ··· 196 196 phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", 197 197 "DSI_PHY_LANE"); 198 198 if (IS_ERR(phy->lane_base)) { 199 - dev_err(&pdev->dev, "%s: failed to map phy lane base\n", 199 + DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", 200 200 __func__); 201 201 return -ENOMEM; 202 202 }
+3 -3
drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
··· 64 64 void __iomem *lane_base = phy->lane_base; 65 65 66 66 if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) { 67 - dev_err(&phy->pdev->dev, 67 + DRM_DEV_ERROR(&phy->pdev->dev, 68 68 "%s: D-PHY timing calculation failed\n", __func__); 69 69 return -EINVAL; 70 70 } ··· 115 115 116 116 ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); 117 117 if (ret) { 118 - dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", 118 + DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", 119 119 __func__, ret); 120 120 return ret; 121 121 } ··· 142 142 phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", 143 143 "DSI_PHY_LANE"); 144 144 if (IS_ERR(phy->lane_base)) { 145 - dev_err(&pdev->dev, "%s: failed to map phy lane base\n", 145 + DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", 146 146 __func__); 147 147 return -ENOMEM; 148 148 }
+1 -1
drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
··· 82 82 DBG(""); 83 83 84 84 if (msm_dsi_dphy_timing_calc(timing, clk_req)) { 85 - dev_err(&phy->pdev->dev, 85 + DRM_DEV_ERROR(&phy->pdev->dev, 86 86 "%s: D-PHY timing calculation failed\n", __func__); 87 87 return -EINVAL; 88 88 }
+1 -1
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
··· 76 76 DBG(""); 77 77 78 78 if (msm_dsi_dphy_timing_calc(timing, clk_req)) { 79 - dev_err(&phy->pdev->dev, 79 + DRM_DEV_ERROR(&phy->pdev->dev, 80 80 "%s: D-PHY timing calculation failed\n", __func__); 81 81 return -EINVAL; 82 82 }
+1 -1
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
··· 132 132 DBG(""); 133 133 134 134 if (msm_dsi_dphy_timing_calc(timing, clk_req)) { 135 - dev_err(&phy->pdev->dev, 135 + DRM_DEV_ERROR(&phy->pdev->dev, 136 136 "%s: D-PHY timing calculation failed\n", __func__); 137 137 return -EINVAL; 138 138 }
+1 -1
drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
··· 175 175 } 176 176 177 177 if (IS_ERR(pll)) { 178 - dev_err(dev, "%s: failed to init DSI PLL\n", __func__); 178 + DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__); 179 179 return pll; 180 180 } 181 181
+4 -4
drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
··· 760 760 ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, 761 761 pll_10nm->hw_data); 762 762 if (ret) { 763 - dev_err(dev, "failed to register clk provider: %d\n", ret); 763 + DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret); 764 764 return ret; 765 765 } 766 766 ··· 788 788 789 789 pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); 790 790 if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) { 791 - dev_err(&pdev->dev, "failed to map CMN PHY base\n"); 791 + DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n"); 792 792 return ERR_PTR(-ENOMEM); 793 793 } 794 794 795 795 pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); 796 796 if (IS_ERR_OR_NULL(pll_10nm->mmio)) { 797 - dev_err(&pdev->dev, "failed to map PLL base\n"); 797 + DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n"); 798 798 return ERR_PTR(-ENOMEM); 799 799 } 800 800 ··· 813 813 814 814 ret = pll_10nm_register(pll_10nm); 815 815 if (ret) { 816 - dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); 816 + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); 817 817 return ERR_PTR(ret); 818 818 } 819 819
+6 -6
drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
··· 783 783 POLL_TIMEOUT_US); 784 784 785 785 if (unlikely(!locked)) 786 - dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n"); 786 + DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n"); 787 787 else 788 788 DBG("DSI PLL lock success"); 789 789 ··· 829 829 ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw, 830 830 cached_state->vco_rate, 0); 831 831 if (ret) { 832 - dev_err(&pll_14nm->pdev->dev, 832 + DRM_DEV_ERROR(&pll_14nm->pdev->dev, 833 833 "restore vco rate failed. ret=%d\n", ret); 834 834 return ret; 835 835 } ··· 1039 1039 ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, 1040 1040 pll_14nm->hw_data); 1041 1041 if (ret) { 1042 - dev_err(dev, "failed to register clk provider: %d\n", ret); 1042 + DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret); 1043 1043 return ret; 1044 1044 } 1045 1045 ··· 1067 1067 1068 1068 pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); 1069 1069 if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) { 1070 - dev_err(&pdev->dev, "failed to map CMN PHY base\n"); 1070 + DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n"); 1071 1071 return ERR_PTR(-ENOMEM); 1072 1072 } 1073 1073 1074 1074 pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); 1075 1075 if (IS_ERR_OR_NULL(pll_14nm->mmio)) { 1076 - dev_err(&pdev->dev, "failed to map PLL base\n"); 1076 + DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n"); 1077 1077 return ERR_PTR(-ENOMEM); 1078 1078 } 1079 1079 ··· 1096 1096 1097 1097 ret = pll_14nm_register(pll_14nm); 1098 1098 if (ret) { 1099 - dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); 1099 + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); 1100 1100 return ERR_PTR(ret); 1101 1101 } 1102 1102
+8 -8
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
··· 156 156 if (rate <= lpfr_lut[i].vco_rate) 157 157 break; 158 158 if (i == LPFR_LUT_SIZE) { 159 - dev_err(dev, "unable to get loop filter resistance. vco=%lu\n", 159 + DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n", 160 160 rate); 161 161 return -EINVAL; 162 162 } ··· 386 386 } 387 387 388 388 if (unlikely(!locked)) 389 - dev_err(dev, "DSI PLL lock failed\n"); 389 + DRM_DEV_ERROR(dev, "DSI PLL lock failed\n"); 390 390 else 391 391 DBG("DSI PLL Lock success"); 392 392 ··· 429 429 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us); 430 430 431 431 if (unlikely(!locked)) 432 - dev_err(dev, "DSI PLL lock failed\n"); 432 + DRM_DEV_ERROR(dev, "DSI PLL lock failed\n"); 433 433 else 434 434 DBG("DSI PLL lock success"); 435 435 ··· 468 468 ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw, 469 469 cached_state->vco_rate, 0); 470 470 if (ret) { 471 - dev_err(&pll_28nm->pdev->dev, 471 + DRM_DEV_ERROR(&pll_28nm->pdev->dev, 472 472 "restore vco rate failed. ret=%d\n", ret); 473 473 return ret; 474 474 } ··· 581 581 ret = of_clk_add_provider(dev->of_node, 582 582 of_clk_src_onecell_get, &pll_28nm->clk_data); 583 583 if (ret) { 584 - dev_err(dev, "failed to register clk provider: %d\n", ret); 584 + DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret); 585 585 return ret; 586 586 } 587 587 ··· 607 607 608 608 pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); 609 609 if (IS_ERR_OR_NULL(pll_28nm->mmio)) { 610 - dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__); 610 + DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__); 611 611 return ERR_PTR(-ENOMEM); 612 612 } 613 613 ··· 633 633 pll->en_seq_cnt = 1; 634 634 pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp; 635 635 } else { 636 - dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type); 636 + DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type); 637 637 return ERR_PTR(-EINVAL); 638 638 } 639 639 640 640 ret = pll_28nm_register(pll_28nm); 641 641 if (ret) { 642 - dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); 642 + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); 643 643 return ERR_PTR(ret); 644 644 } 645 645
+5 -5
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
··· 327 327 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us); 328 328 329 329 if (unlikely(!locked)) 330 - dev_err(dev, "DSI PLL lock failed\n"); 330 + DRM_DEV_ERROR(dev, "DSI PLL lock failed\n"); 331 331 else 332 332 DBG("DSI PLL lock success"); 333 333 ··· 368 368 ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw, 369 369 cached_state->vco_rate, 0); 370 370 if (ret) { 371 - dev_err(&pll_28nm->pdev->dev, 371 + DRM_DEV_ERROR(&pll_28nm->pdev->dev, 372 372 "restore vco rate failed. ret=%d\n", ret); 373 373 return ret; 374 374 } ··· 482 482 ret = of_clk_add_provider(dev->of_node, 483 483 of_clk_src_onecell_get, &pll_28nm->clk_data); 484 484 if (ret) { 485 - dev_err(dev, "failed to register clk provider: %d\n", ret); 485 + DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret); 486 486 return ret; 487 487 } 488 488 ··· 508 508 509 509 pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); 510 510 if (IS_ERR_OR_NULL(pll_28nm->mmio)) { 511 - dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__); 511 + DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__); 512 512 return ERR_PTR(-ENOMEM); 513 513 } 514 514 ··· 526 526 527 527 ret = pll_28nm_register(pll_28nm); 528 528 if (ret) { 529 - dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); 529 + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); 530 530 return ERR_PTR(ret); 531 531 } 532 532
+4 -4
drivers/gpu/drm/msm/edp/edp.c
··· 157 157 edp->bridge = msm_edp_bridge_init(edp); 158 158 if (IS_ERR(edp->bridge)) { 159 159 ret = PTR_ERR(edp->bridge); 160 - dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret); 160 + DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret); 161 161 edp->bridge = NULL; 162 162 goto fail; 163 163 } ··· 165 165 edp->connector = msm_edp_connector_init(edp); 166 166 if (IS_ERR(edp->connector)) { 167 167 ret = PTR_ERR(edp->connector); 168 - dev_err(dev->dev, "failed to create eDP connector: %d\n", ret); 168 + DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret); 169 169 edp->connector = NULL; 170 170 goto fail; 171 171 } ··· 173 173 edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 174 174 if (edp->irq < 0) { 175 175 ret = edp->irq; 176 - dev_err(dev->dev, "failed to get IRQ: %d\n", ret); 176 + DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret); 177 177 goto fail; 178 178 } 179 179 ··· 181 181 edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 182 182 "edp_isr", edp); 183 183 if (ret < 0) { 184 - dev_err(dev->dev, "failed to request IRQ%u: %d\n", 184 + DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n", 185 185 edp->irq, ret); 186 186 goto fail; 187 187 }
+16 -16
drivers/gpu/drm/msm/hdmi/hdmi.c
··· 98 98 99 99 phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0); 100 100 if (!phy_node) { 101 - dev_err(&pdev->dev, "cannot find phy device\n"); 101 + DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n"); 102 102 return -ENXIO; 103 103 } 104 104 ··· 109 109 of_node_put(phy_node); 110 110 111 111 if (!phy_pdev || !hdmi->phy) { 112 - dev_err(&pdev->dev, "phy driver is not ready\n"); 112 + DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); 113 113 return -EPROBE_DEFER; 114 114 } 115 115 ··· 153 153 hdmi->qfprom_mmio = msm_ioremap(pdev, 154 154 config->qfprom_mmio_name, "HDMI_QFPROM"); 155 155 if (IS_ERR(hdmi->qfprom_mmio)) { 156 - dev_info(&pdev->dev, "can't find qfprom resource\n"); 156 + DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n"); 157 157 hdmi->qfprom_mmio = NULL; 158 158 } 159 159 ··· 172 172 config->hpd_reg_names[i]); 173 173 if (IS_ERR(reg)) { 174 174 ret = PTR_ERR(reg); 175 - dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n", 175 + DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n", 176 176 config->hpd_reg_names[i], ret); 177 177 goto fail; 178 178 } ··· 195 195 config->pwr_reg_names[i]); 196 196 if (IS_ERR(reg)) { 197 197 ret = PTR_ERR(reg); 198 - dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n", 198 + DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n", 199 199 config->pwr_reg_names[i], ret); 200 200 goto fail; 201 201 } ··· 217 217 clk = msm_clk_get(pdev, config->hpd_clk_names[i]); 218 218 if (IS_ERR(clk)) { 219 219 ret = PTR_ERR(clk); 220 - dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n", 220 + DRM_DEV_ERROR(&pdev->dev, "failed to get hpd clk: %s (%d)\n", 221 221 config->hpd_clk_names[i], ret); 222 222 goto fail; 223 223 } ··· 239 239 clk = msm_clk_get(pdev, config->pwr_clk_names[i]); 240 240 if (IS_ERR(clk)) { 241 241 ret = PTR_ERR(clk); 242 - dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n", 242 + DRM_DEV_ERROR(&pdev->dev, "failed to get pwr clk: %s (%d)\n", 243 243 config->pwr_clk_names[i], ret); 244 244 goto fail; 245 245 } ··· 254 254 hdmi->i2c = msm_hdmi_i2c_init(hdmi); 255 255 if (IS_ERR(hdmi->i2c)) { 256 256 ret = PTR_ERR(hdmi->i2c); 257 - dev_err(&pdev->dev, "failed to get i2c: %d\n", ret); 257 + DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret); 258 258 hdmi->i2c = NULL; 259 259 goto fail; 260 260 } 261 261 262 262 ret = msm_hdmi_get_phy(hdmi); 263 263 if (ret) { 264 - dev_err(&pdev->dev, "failed to get phy\n"); 264 + DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n"); 265 265 goto fail; 266 266 } 267 267 ··· 303 303 hdmi->bridge = msm_hdmi_bridge_init(hdmi); 304 304 if (IS_ERR(hdmi->bridge)) { 305 305 ret = PTR_ERR(hdmi->bridge); 306 - dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret); 306 + DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret); 307 307 hdmi->bridge = NULL; 308 308 goto fail; 309 309 } ··· 311 311 hdmi->connector = msm_hdmi_connector_init(hdmi); 312 312 if (IS_ERR(hdmi->connector)) { 313 313 ret = PTR_ERR(hdmi->connector); 314 - dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret); 314 + DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret); 315 315 hdmi->connector = NULL; 316 316 goto fail; 317 317 } ··· 319 319 hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 320 320 if (hdmi->irq < 0) { 321 321 ret = hdmi->irq; 322 - dev_err(dev->dev, "failed to get irq: %d\n", ret); 322 + DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret); 323 323 goto fail; 324 324 } 325 325 ··· 327 327 msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 328 328 "hdmi_isr", hdmi); 329 329 if (ret < 0) { 330 - dev_err(dev->dev, "failed to request IRQ%u: %d\n", 330 + DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n", 331 331 hdmi->irq, ret); 332 332 goto fail; 333 333 } ··· 476 476 unsigned int level_shift = 0; /* 0dB */ 477 477 bool down_mix = false; 478 478 479 - dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate, 479 + DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate, 480 480 params->sample_width, params->cea.channels); 481 481 482 482 switch (params->cea.channels) { ··· 527 527 rate = HDMI_SAMPLE_RATE_192KHZ; 528 528 break; 529 529 default: 530 - dev_err(dev, "rate[%d] not supported!\n", 530 + DRM_DEV_ERROR(dev, "rate[%d] not supported!\n", 531 531 params->sample_rate); 532 532 return -EINVAL; 533 533 } ··· 579 579 hdmi_cfg = (struct hdmi_platform_config *) 580 580 of_device_get_match_data(dev); 581 581 if (!hdmi_cfg) { 582 - dev_err(dev, "unknown hdmi_cfg: %pOFn\n", of_node); 582 + DRM_DEV_ERROR(dev, "unknown hdmi_cfg: %pOFn\n", of_node); 583 583 return -ENXIO; 584 584 } 585 585
+5 -5
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
··· 40 40 for (i = 0; i < config->pwr_reg_cnt; i++) { 41 41 ret = regulator_enable(hdmi->pwr_regs[i]); 42 42 if (ret) { 43 - dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n", 43 + DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n", 44 44 config->pwr_reg_names[i], ret); 45 45 } 46 46 } ··· 49 49 DBG("pixclock: %lu", hdmi->pixclock); 50 50 ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock); 51 51 if (ret) { 52 - dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n", 52 + DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n", 53 53 config->pwr_clk_names[0], ret); 54 54 } 55 55 } ··· 57 57 for (i = 0; i < config->pwr_clk_cnt; i++) { 58 58 ret = clk_prepare_enable(hdmi->pwr_clks[i]); 59 59 if (ret) { 60 - dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n", 60 + DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n", 61 61 config->pwr_clk_names[i], ret); 62 62 } 63 63 } ··· 82 82 for (i = 0; i < config->pwr_reg_cnt; i++) { 83 83 ret = regulator_disable(hdmi->pwr_regs[i]); 84 84 if (ret) { 85 - dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n", 85 + DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n", 86 86 config->pwr_reg_names[i], ret); 87 87 } 88 88 } ··· 105 105 106 106 len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer)); 107 107 if (len < 0) { 108 - dev_err(&hdmi->pdev->dev, 108 + DRM_DEV_ERROR(&hdmi->pdev->dev, 109 109 "failed to configure avi infoframe\n"); 110 110 return; 111 111 }
+5 -5
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
··· 90 90 if (gpio.num != -1) { 91 91 ret = gpio_request(gpio.num, gpio.label); 92 92 if (ret) { 93 - dev_err(dev, 93 + DRM_DEV_ERROR(dev, 94 94 "'%s'(%d) gpio_request failed: %d\n", 95 95 gpio.label, gpio.num, ret); 96 96 goto err; ··· 156 156 157 157 ret = clk_prepare_enable(hdmi->hpd_clks[i]); 158 158 if (ret) { 159 - dev_err(dev, 159 + DRM_DEV_ERROR(dev, 160 160 "failed to enable hpd clk: %s (%d)\n", 161 161 config->hpd_clk_names[i], ret); 162 162 } ··· 179 179 for (i = 0; i < config->hpd_reg_cnt; i++) { 180 180 ret = regulator_enable(hdmi->hpd_regs[i]); 181 181 if (ret) { 182 - dev_err(dev, "failed to enable hpd regulator: %s (%d)\n", 182 + DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n", 183 183 config->hpd_reg_names[i], ret); 184 184 goto fail; 185 185 } ··· 187 187 188 188 ret = pinctrl_pm_select_default_state(dev); 189 189 if (ret) { 190 - dev_err(dev, "pinctrl state chg failed: %d\n", ret); 190 + DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret); 191 191 goto fail; 192 192 } 193 193 194 194 ret = gpio_config(hdmi, true); 195 195 if (ret) { 196 - dev_err(dev, "failed to configure GPIOs: %d\n", ret); 196 + DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret); 197 197 goto fail; 198 198 } 199 199
+1 -1
drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
··· 66 66 } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry); 67 67 68 68 if (!retry) { 69 - dev_err(dev->dev, "timeout waiting for DDC\n"); 69 + DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n"); 70 70 return -ETIMEDOUT; 71 71 } 72 72
+6 -6
drivers/gpu/drm/msm/hdmi/hdmi_phy.c
··· 37 37 reg = devm_regulator_get(dev, cfg->reg_names[i]); 38 38 if (IS_ERR(reg)) { 39 39 ret = PTR_ERR(reg); 40 - dev_err(dev, "failed to get phy regulator: %s (%d)\n", 40 + DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n", 41 41 cfg->reg_names[i], ret); 42 42 return ret; 43 43 } ··· 51 51 clk = msm_clk_get(phy->pdev, cfg->clk_names[i]); 52 52 if (IS_ERR(clk)) { 53 53 ret = PTR_ERR(clk); 54 - dev_err(dev, "failed to get phy clock: %s (%d)\n", 54 + DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n", 55 55 cfg->clk_names[i], ret); 56 56 return ret; 57 57 } ··· 73 73 for (i = 0; i < cfg->num_regs; i++) { 74 74 ret = regulator_enable(phy->regs[i]); 75 75 if (ret) 76 - dev_err(dev, "failed to enable regulator: %s (%d)\n", 76 + DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n", 77 77 cfg->reg_names[i], ret); 78 78 } 79 79 80 80 for (i = 0; i < cfg->num_clks; i++) { 81 81 ret = clk_prepare_enable(phy->clks[i]); 82 82 if (ret) 83 - dev_err(dev, "failed to enable clock: %s (%d)\n", 83 + DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n", 84 84 cfg->clk_names[i], ret); 85 85 } 86 86 ··· 159 159 160 160 phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY"); 161 161 if (IS_ERR(phy->mmio)) { 162 - dev_err(dev, "%s: failed to map phy base\n", __func__); 162 + DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__); 163 163 return -ENOMEM; 164 164 } 165 165 ··· 177 177 178 178 ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type); 179 179 if (ret) { 180 - dev_err(dev, "couldn't init PLL\n"); 180 + DRM_DEV_ERROR(dev, "couldn't init PLL\n"); 181 181 msm_hdmi_phy_resource_disable(phy); 182 182 return ret; 183 183 }
+3 -3
drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
··· 725 725 726 726 pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL"); 727 727 if (IS_ERR(pll->mmio_qserdes_com)) { 728 - dev_err(dev, "failed to map pll base\n"); 728 + DRM_DEV_ERROR(dev, "failed to map pll base\n"); 729 729 return -ENOMEM; 730 730 } 731 731 ··· 737 737 738 738 pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label); 739 739 if (IS_ERR(pll->mmio_qserdes_tx[i])) { 740 - dev_err(dev, "failed to map pll base\n"); 740 + DRM_DEV_ERROR(dev, "failed to map pll base\n"); 741 741 return -ENOMEM; 742 742 } 743 743 } ··· 745 745 746 746 clk = devm_clk_register(dev, &pll->clk_hw); 747 747 if (IS_ERR(clk)) { 748 - dev_err(dev, "failed to register pll clock\n"); 748 + DRM_DEV_ERROR(dev, "failed to register pll clock\n"); 749 749 return -EINVAL; 750 750 } 751 751
+2 -2
drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
··· 445 445 446 446 pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL"); 447 447 if (IS_ERR(pll->mmio)) { 448 - dev_err(dev, "failed to map pll base\n"); 448 + DRM_DEV_ERROR(dev, "failed to map pll base\n"); 449 449 return -ENOMEM; 450 450 } 451 451 ··· 454 454 455 455 clk = devm_clk_register(dev, &pll->clk_hw); 456 456 if (IS_ERR(clk)) { 457 - dev_err(dev, "failed to register pll clock\n"); 457 + DRM_DEV_ERROR(dev, "failed to register pll clock\n"); 458 458 return -EINVAL; 459 459 } 460 460
+3 -3
drivers/gpu/drm/msm/msm_debugfs.c
··· 194 194 195 195 ret = msm_rd_debugfs_init(minor); 196 196 if (ret) { 197 - dev_err(minor->dev->dev, "could not install rd debugfs\n"); 197 + DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n"); 198 198 return ret; 199 199 } 200 200 201 201 ret = msm_perf_debugfs_init(minor); 202 202 if (ret) { 203 - dev_err(minor->dev->dev, "could not install perf debugfs\n"); 203 + DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n"); 204 204 return ret; 205 205 } 206 206 ··· 228 228 minor->debugfs_root, minor); 229 229 230 230 if (ret) { 231 - dev_err(dev->dev, "could not install msm_debugfs_list\n"); 231 + DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n"); 232 232 return ret; 233 233 } 234 234
+14 -13
drivers/gpu/drm/msm/msm_drv.c
··· 170 170 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 171 171 172 172 if (!res) { 173 - dev_err(&pdev->dev, "failed to get memory resource: %s\n", name); 173 + DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name); 174 174 return ERR_PTR(-EINVAL); 175 175 } 176 176 ··· 178 178 179 179 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); 180 180 if (!ptr) { 181 - dev_err(&pdev->dev, "failed to ioremap: %s\n", name); 181 + DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name); 182 182 return ERR_PTR(-ENOMEM); 183 183 } 184 184 ··· 419 419 p = dma_alloc_attrs(dev->dev, size, 420 420 &priv->vram.paddr, GFP_KERNEL, attrs); 421 421 if (!p) { 422 - dev_err(dev->dev, "failed to allocate VRAM\n"); 422 + DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n"); 423 423 priv->vram.paddr = 0; 424 424 return -ENOMEM; 425 425 } 426 426 427 - dev_info(dev->dev, "VRAM: %08x->%08x\n", 427 + DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n", 428 428 (uint32_t)priv->vram.paddr, 429 429 (uint32_t)(priv->vram.paddr + size)); 430 430 } ··· 444 444 445 445 ddev = drm_dev_alloc(drv, dev); 446 446 if (IS_ERR(ddev)) { 447 - dev_err(dev, "failed to allocate drm_device\n"); 447 + DRM_DEV_ERROR(dev, "failed to allocate drm_device\n"); 448 448 return PTR_ERR(ddev); 449 449 } 450 450 ··· 519 519 * and (for example) use dmabuf/prime to share buffers with 520 520 * imx drm driver on iMX5 521 521 */ 522 - dev_err(dev, "failed to load kms\n"); 522 + DRM_DEV_ERROR(dev, "failed to load kms\n"); 523 523 ret = PTR_ERR(kms); 524 524 goto err_msm_uninit; 525 525 } ··· 530 530 if (kms) { 531 531 ret = kms->funcs->hw_init(kms); 532 532 if (ret) { 533 - dev_err(dev, "kms hw init failed: %d\n", ret); 533 + DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret); 534 534 goto err_msm_uninit; 535 535 } 536 536 } ··· 561 561 ret); 562 562 563 563 if (IS_ERR(priv->disp_thread[i].thread)) { 564 - dev_err(dev, "failed to create crtc_commit kthread\n"); 564 + DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n"); 565 565 priv->disp_thread[i].thread = NULL; 566 566 } 567 567 ··· 573 573 kthread_run(kthread_worker_fn, 574 574 &priv->event_thread[i].worker, 575 575 "crtc_event:%d", priv->event_thread[i].crtc_id); 576 + 576 577 /** 577 578 * event thread should also run at same priority as disp_thread 578 579 * because it is handling frame_done events. A lower priority ··· 614 613 615 614 ret = drm_vblank_init(ddev, priv->num_crtcs); 616 615 if (ret < 0) { 617 - dev_err(dev, "failed to initialize vblank\n"); 616 + DRM_DEV_ERROR(dev, "failed to initialize vblank\n"); 618 617 goto err_msm_uninit; 619 618 } 620 619 ··· 623 622 ret = drm_irq_install(ddev, kms->irq); 624 623 pm_runtime_put_sync(dev); 625 624 if (ret < 0) { 626 - dev_err(dev, "failed to install IRQ handler\n"); 625 + DRM_DEV_ERROR(dev, "failed to install IRQ handler\n"); 627 626 goto err_msm_uninit; 628 627 } 629 628 } ··· 1183 1182 1184 1183 ret = of_graph_parse_endpoint(ep_node, &ep); 1185 1184 if (ret) { 1186 - dev_err(mdp_dev, "unable to parse port endpoint\n"); 1185 + DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n"); 1187 1186 of_node_put(ep_node); 1188 1187 return ret; 1189 1188 } ··· 1234 1233 of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) { 1235 1234 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 1236 1235 if (ret) { 1237 - dev_err(dev, "failed to populate children devices\n"); 1236 + DRM_DEV_ERROR(dev, "failed to populate children devices\n"); 1238 1237 return ret; 1239 1238 } 1240 1239 1241 1240 mdp_dev = device_find_child(dev, NULL, compare_name_mdp); 1242 1241 if (!mdp_dev) { 1243 - dev_err(dev, "failed to find MDSS MDP node\n"); 1242 + DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n"); 1244 1243 of_platform_depopulate(dev); 1245 1244 return -ENODEV; 1246 1245 }
+4 -4
drivers/gpu/drm/msm/msm_fb.c
··· 154 154 format = kms->funcs->get_format(kms, mode_cmd->pixel_format, 155 155 mode_cmd->modifier[0]); 156 156 if (!format) { 157 - dev_err(dev->dev, "unsupported pixel format: %4.4s\n", 157 + DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n", 158 158 (char *)&mode_cmd->pixel_format); 159 159 ret = -EINVAL; 160 160 goto fail; ··· 196 196 197 197 ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs); 198 198 if (ret) { 199 - dev_err(dev->dev, "framebuffer init failed: %d\n", ret); 199 + DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret); 200 200 goto fail; 201 201 } 202 202 ··· 233 233 bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC); 234 234 } 235 235 if (IS_ERR(bo)) { 236 - dev_err(dev->dev, "failed to allocate buffer object\n"); 236 + DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n"); 237 237 return ERR_CAST(bo); 238 238 } 239 239 240 240 fb = msm_framebuffer_init(dev, &mode_cmd, &bo); 241 241 if (IS_ERR(fb)) { 242 - dev_err(dev->dev, "failed to allocate fb\n"); 242 + DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n"); 243 243 /* note: if fb creation failed, we can't rely on fb destroy 244 244 * to unref the bo: 245 245 */
+4 -4
drivers/gpu/drm/msm/msm_fbdev.c
··· 91 91 sizes->surface_height, pitch, format); 92 92 93 93 if (IS_ERR(fb)) { 94 - dev_err(dev->dev, "failed to allocate fb\n"); 94 + DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n"); 95 95 return PTR_ERR(fb); 96 96 } 97 97 ··· 106 106 */ 107 107 ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr); 108 108 if (ret) { 109 - dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); 109 + DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret); 110 110 goto fail_unlock; 111 111 } 112 112 113 113 fbi = drm_fb_helper_alloc_fbi(helper); 114 114 if (IS_ERR(fbi)) { 115 - dev_err(dev->dev, "failed to allocate fb info\n"); 115 + DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n"); 116 116 ret = PTR_ERR(fbi); 117 117 goto fail_unlock; 118 118 } ··· 176 176 177 177 ret = drm_fb_helper_init(dev, helper, priv->num_connectors); 178 178 if (ret) { 179 - dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret); 179 + DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret); 180 180 goto fail; 181 181 } 182 182
+6 -6
drivers/gpu/drm/msm/msm_gem.c
··· 88 88 p = get_pages_vram(obj, npages); 89 89 90 90 if (IS_ERR(p)) { 91 - dev_err(dev->dev, "could not get pages: %ld\n", 91 + DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 92 92 PTR_ERR(p)); 93 93 return p; 94 94 } ··· 99 99 if (IS_ERR(msm_obj->sgt)) { 100 100 void *ptr = ERR_CAST(msm_obj->sgt); 101 101 102 - dev_err(dev->dev, "failed to allocate sgt\n"); 102 + DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 103 103 msm_obj->sgt = NULL; 104 104 return ptr; 105 105 } ··· 280 280 ret = drm_gem_create_mmap_offset(obj); 281 281 282 282 if (ret) { 283 - dev_err(dev->dev, "could not allocate mmap offset\n"); 283 + DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 284 284 return 0; 285 285 } 286 286 ··· 473 473 mutex_lock(&msm_obj->lock); 474 474 475 475 if (WARN_ON(msm_obj->madv > madv)) { 476 - dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n", 476 + DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 477 477 msm_obj->madv, madv); 478 478 mutex_unlock(&msm_obj->lock); 479 479 return ERR_PTR(-EBUSY); ··· 864 864 case MSM_BO_WC: 865 865 break; 866 866 default: 867 - dev_err(dev->dev, "invalid cache flag: %x\n", 867 + DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 868 868 (flags & MSM_BO_CACHE_MASK)); 869 869 return -EINVAL; 870 870 } ··· 990 990 991 991 /* if we don't have IOMMU, don't bother pretending we can import: */ 992 992 if (!iommu_present(&platform_bus_type)) { 993 - dev_err(dev->dev, "cannot import without IOMMU\n"); 993 + DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 994 994 return ERR_PTR(-EINVAL); 995 995 } 996 996
+15 -15
drivers/gpu/drm/msm/msm_gpu.c
··· 107 107 &msm_devfreq_profile, "simple_ondemand", NULL); 108 108 109 109 if (IS_ERR(gpu->devfreq.devfreq)) { 110 - dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); 110 + DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); 111 111 gpu->devfreq.devfreq = NULL; 112 112 } 113 113 ··· 122 122 if (gpu->gpu_reg) { 123 123 ret = regulator_enable(gpu->gpu_reg); 124 124 if (ret) { 125 - dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret); 125 + DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret); 126 126 return ret; 127 127 } 128 128 } ··· 130 130 if (gpu->gpu_cx) { 131 131 ret = regulator_enable(gpu->gpu_cx); 132 132 if (ret) { 133 - dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret); 133 + DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret); 134 134 return ret; 135 135 } 136 136 } ··· 428 428 429 429 mutex_lock(&dev->struct_mutex); 430 430 431 - dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); 431 + DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name); 432 432 433 433 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); 434 434 if (submit) { ··· 456 456 rcu_read_unlock(); 457 457 458 458 if (comm && cmd) { 459 - dev_err(dev->dev, "%s: offending task: %s (%s)\n", 459 + DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n", 460 460 gpu->name, comm, cmd); 461 461 462 462 msm_rd_dump_submit(priv->hangrd, submit, ··· 539 539 } else if (fence < ring->seqno) { 540 540 /* no progress and not done.. hung! */ 541 541 ring->hangcheck_fence = fence; 542 - dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", 542 + DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", 543 543 gpu->name, ring->id); 544 - dev_err(dev->dev, "%s: completed fence: %u\n", 544 + DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n", 545 545 gpu->name, fence); 546 - dev_err(dev->dev, "%s: submitted fence: %u\n", 546 + DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n", 547 547 gpu->name, ring->seqno); 548 548 549 549 queue_work(priv->wq, &gpu->recover_work); ··· 816 816 iommu->geometry.aperture_start = va_start; 817 817 iommu->geometry.aperture_end = va_end; 818 818 819 - dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); 819 + DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); 820 820 821 821 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu"); 822 822 if (IS_ERR(aspace)) { 823 - dev_err(gpu->dev->dev, "failed to init iommu: %ld\n", 823 + DRM_DEV_ERROR(gpu->dev->dev, "failed to init iommu: %ld\n", 824 824 PTR_ERR(aspace)); 825 825 iommu_domain_free(iommu); 826 826 return ERR_CAST(aspace); ··· 871 871 gpu->irq = platform_get_irq_byname(pdev, config->irqname); 872 872 if (gpu->irq < 0) { 873 873 ret = gpu->irq; 874 - dev_err(drm->dev, "failed to get irq: %d\n", ret); 874 + DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); 875 875 goto fail; 876 876 } 877 877 878 878 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 879 879 IRQF_TRIGGER_HIGH, gpu->name, gpu); 880 880 if (ret) { 881 - dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); 881 + DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); 882 882 goto fail; 883 883 } 884 884 ··· 911 911 config->va_start, config->va_end); 912 912 913 913 if (gpu->aspace == NULL) 914 - dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 914 + DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 915 915 else if (IS_ERR(gpu->aspace)) { 916 916 ret = PTR_ERR(gpu->aspace); 917 917 goto fail; ··· 923 923 924 924 if (IS_ERR(memptrs)) { 925 925 ret = PTR_ERR(memptrs); 926 - dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); 926 + DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret); 927 927 goto fail; 928 928 } 929 929 ··· 939 939 940 940 if (IS_ERR(gpu->rb[i])) { 941 941 ret = PTR_ERR(gpu->rb[i]); 942 - dev_err(drm->dev, 942 + DRM_DEV_ERROR(drm->dev, 943 943 "could not create ringbuffer %d: %d\n", i, ret); 944 944 goto fail; 945 945 }