Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux into drm-next

A few more patches for 4.8. Mostly bug fixes and some prep work
for iceland powerplay support. I have a couple polaris patches and
Edward's misc cleanups that require a merge with Linus'. I don't know
if you are planning a merge anytime soon.

[airlied: fixed up endian vs 32-bit change in ppatomctrl]

* 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux: (26 commits)
drm/amdgpu: comment out unused defaults_bonaire_pro static const structures to fix the build
drm/amdgpu: temporary comment out unused static const structures to fix the build
drm/amdgpu: S3 resume fail on Polaris10
drm/amd/powerplay: add pp_tables_get_response_times function in process pptables
drm/amd/powerplay: fix the incorrect return value
drm/amd/powerplay: add atomctrl_get_voltage_evv function in ppatomctrl
drm/amdgpu: add new definitions into ppsmc.h for iceland
drm/amd/powerplay: add SMU register macro for future use
drm/amdgpu: add ucode_start_address into cgs_firmware_info
drm/amdgpu: no need load microcode at sdma if powerplay is enabled
drm/amdgpu: rename smumgr to smum for dpm
drm/amdgpu: disable GFX PG on CZ/BR/ST
drivers: gpu: drm: amd: powerplay: hwmgr: Remove unused variable
drm/amdgpu: return -ENOSPC when running out of UVD handles
drm/amdgpu: trace need_flush in grab_vm as well
drm/amdgpu: always signal all fences
drm/amdgpu: check flush fence context instead of same ring v2
drm/radeon: support backlight control for UNIPHY3
drm/amdgpu: support backlight control for UNIPHY3
drm/amdgpu: remove usec timeout loop from IB tests
...

+468 -359
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
··· 752 752 753 753 if (!adev->pm.fw) { 754 754 switch (adev->asic_type) { 755 + case CHIP_TOPAZ: 756 + strcpy(fw_name, "amdgpu/topaz_smc.bin"); 757 + break; 755 758 case CHIP_TONGA: 756 759 strcpy(fw_name, "amdgpu/tonga_smc.bin"); 757 760 break; ··· 803 800 804 801 info->version = adev->pm.fw_version; 805 802 info->image_size = ucode_size; 803 + info->ucode_start_address = ucode_start_address; 806 804 info->kptr = (void *)src; 807 805 } 808 806 return 0;
+13 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 204 204 if (seq != ring->fence_drv.sync_seq) 205 205 amdgpu_fence_schedule_fallback(ring); 206 206 207 - while (last_seq != seq) { 207 + if (unlikely(seq == last_seq)) 208 + return; 209 + 210 + last_seq &= drv->num_fences_mask; 211 + seq &= drv->num_fences_mask; 212 + 213 + do { 208 214 struct fence *fence, **ptr; 209 215 210 - ptr = &drv->fences[++last_seq & drv->num_fences_mask]; 216 + ++last_seq; 217 + last_seq &= drv->num_fences_mask; 218 + ptr = &drv->fences[last_seq]; 211 219 212 220 /* There is always exactly one thread signaling this fence slot */ 213 221 fence = rcu_dereference_protected(*ptr, 1); 214 222 RCU_INIT_POINTER(*ptr, NULL); 215 223 216 - BUG_ON(!fence); 224 + if (!fence) 225 + continue; 217 226 218 227 r = fence_signal(fence); 219 228 if (!r) ··· 231 222 BUG(); 232 223 233 224 fence_put(fence); 234 - } 225 + } while (last_seq != seq); 235 226 } 236 227 237 228 /**
+1 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 122 122 bool skip_preamble, need_ctx_switch; 123 123 unsigned patch_offset = ~0; 124 124 struct amdgpu_vm *vm; 125 - struct fence *hwf; 126 125 uint64_t ctx; 127 126 128 127 unsigned i; ··· 189 190 if (ring->funcs->emit_hdp_invalidate) 190 191 amdgpu_ring_emit_hdp_invalidate(ring); 191 192 192 - r = amdgpu_fence_emit(ring, &hwf); 193 + r = amdgpu_fence_emit(ring, f); 193 194 if (r) { 194 195 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 195 196 if (job && job->vm_id) ··· 203 204 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, 204 205 AMDGPU_FENCE_FLAG_64BIT); 205 206 } 206 - 207 - if (f) 208 - *f = fence_get(hwf); 209 207 210 208 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 211 209 amdgpu_ring_patch_cond_exec(ring, patch_offset);
+3 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 172 172 trace_amdgpu_sched_run_job(job); 173 173 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, 174 174 job->sync.last_vm_update, job, &fence); 175 - if (r) { 175 + if (r) 176 176 DRM_ERROR("Error scheduling IBs (%d)\n", r); 177 - goto err; 178 - } 179 177 180 - err: 181 178 /* if gpu reset, hw fence will be replaced here */ 182 179 fence_put(job->fence); 183 - job->fence = fence; 180 + job->fence = fence_get(fence); 181 + amdgpu_job_free_resources(job); 184 182 return fence; 185 183 } 186 184
+9 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
··· 149 149 150 150 151 151 TRACE_EVENT(amdgpu_vm_grab_id, 152 - TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, 153 - uint64_t pd_addr), 154 - TP_ARGS(vm, ring, vmid, pd_addr), 152 + TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job), 153 + TP_ARGS(vm, ring, job), 155 154 TP_STRUCT__entry( 156 155 __field(struct amdgpu_vm *, vm) 157 156 __field(u32, ring) 158 157 __field(u32, vmid) 159 158 __field(u64, pd_addr) 159 + __field(u32, needs_flush) 160 160 ), 161 161 162 162 TP_fast_assign( 163 163 __entry->vm = vm; 164 164 __entry->ring = ring; 165 - __entry->vmid = vmid; 166 - __entry->pd_addr = pd_addr; 165 + __entry->vmid = job->vm_id; 166 + __entry->pd_addr = job->vm_pd_addr; 167 + __entry->needs_flush = job->vm_needs_flush; 167 168 ), 168 - TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, 169 - __entry->ring, __entry->vmid, __entry->pd_addr) 169 + TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u", 170 + __entry->vm, __entry->ring, __entry->vmid, 171 + __entry->pd_addr, __entry->needs_flush) 170 172 ); 171 173 172 174 TRACE_EVENT(amdgpu_vm_bo_map,
+5 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 40 40 #include "uvd/uvd_4_2_d.h" 41 41 42 42 /* 1 second timeout */ 43 - #define UVD_IDLE_TIMEOUT_MS 1000 43 + #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) 44 44 /* Polaris10/11 firmware version */ 45 45 #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) 46 46 ··· 662 662 } 663 663 664 664 DRM_ERROR("No more free UVD handles!\n"); 665 - return -EINVAL; 665 + return -ENOSPC; 666 666 667 667 case 1: 668 668 /* it's a decode msg, calc buffer sizes */ ··· 968 968 969 969 if (direct) { 970 970 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); 971 - job->fence = f; 971 + job->fence = fence_get(f); 972 972 if (r) 973 973 goto err_free; 974 974 ··· 1114 1114 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 1115 1115 } 1116 1116 } else { 1117 - schedule_delayed_work(&adev->uvd.idle_work, 1118 - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 1117 + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1119 1118 } 1120 1119 } 1121 1120 ··· 1122 1123 { 1123 1124 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); 1124 1125 set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, 1125 - msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); 1126 + UVD_IDLE_TIMEOUT); 1126 1127 1127 1128 if (set_clocks) { 1128 1129 if (adev->pm.dpm_enabled) {
+50 -54
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 36 36 #include "cikd.h" 37 37 38 38 /* 1 second timeout */ 39 - #define VCE_IDLE_TIMEOUT_MS 1000 39 + #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) 40 40 41 41 /* Firmware Names */ 42 42 #ifdef CONFIG_DRM_AMDGPU_CIK ··· 310 310 amdgpu_asic_set_vce_clocks(adev, 0, 0); 311 311 } 312 312 } else { 313 - schedule_delayed_work(&adev->vce.idle_work, 314 - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 313 + schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); 315 314 } 316 315 } 317 316 ··· 323 324 */ 324 325 static void amdgpu_vce_note_usage(struct amdgpu_device *adev) 325 326 { 326 - bool streams_changed = false; 327 327 bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); 328 + 328 329 set_clocks &= schedule_delayed_work(&adev->vce.idle_work, 329 - msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 330 + VCE_IDLE_TIMEOUT); 330 331 331 - if (adev->pm.dpm_enabled) { 332 - /* XXX figure out if the streams changed */ 333 - streams_changed = false; 334 - } 335 - 336 - if (set_clocks || streams_changed) { 332 + if (set_clocks) { 337 333 if (adev->pm.dpm_enabled) { 338 334 amdgpu_dpm_enable_vce(adev, true); 339 335 } else { ··· 351 357 int i, r; 352 358 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 353 359 uint32_t handle = atomic_read(&adev->vce.handles[i]); 360 + 354 361 if (!handle || adev->vce.filp[i] != filp) 355 362 continue; 356 363 ··· 432 437 ib->ptr[i] = 0x0; 433 438 434 439 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); 435 - job->fence = f; 440 + job->fence = fence_get(f); 436 441 if (r) 437 442 goto err; 438 443 ··· 494 499 495 500 if (direct) { 496 501 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); 497 - job->fence = f; 502 + job->fence = fence_get(f); 498 503 if (r) 499 504 goto err; 500 505 ··· 575 580 * we we don't have another free session index. 576 581 */ 577 582 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, 578 - uint32_t handle, bool *allocated) 583 + uint32_t handle, uint32_t *allocated) 579 584 { 580 585 unsigned i; 581 - 582 - *allocated = false; 583 586 584 587 /* validate the handle */ 585 588 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { ··· 595 602 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { 596 603 p->adev->vce.filp[i] = p->filp; 597 604 p->adev->vce.img_size[i] = 0; 598 - *allocated = true; 605 + *allocated |= 1 << i; 599 606 return i; 600 607 } 601 608 } ··· 615 622 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 616 623 unsigned fb_idx = 0, bs_idx = 0; 617 624 int session_idx = -1; 618 - bool destroyed = false; 619 - bool created = false; 620 - bool allocated = false; 625 + uint32_t destroyed = 0; 626 + uint32_t created = 0; 627 + uint32_t allocated = 0; 621 628 uint32_t tmp, handle = 0; 622 629 uint32_t *size = &tmp; 623 630 int i, r = 0, idx = 0; ··· 634 641 goto out; 635 642 } 636 643 637 - if (destroyed) { 638 - DRM_ERROR("No other command allowed after destroy!\n"); 639 - r = -EINVAL; 640 - goto out; 641 - } 642 - 643 644 switch (cmd) { 644 - case 0x00000001: // session 645 + case 0x00000001: /* session */ 645 646 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 646 647 session_idx = amdgpu_vce_validate_handle(p, handle, 647 648 &allocated); 648 - if (session_idx < 0) 649 - return session_idx; 649 + if (session_idx < 0) { 650 + r = session_idx; 651 + goto out; 652 + } 650 653 size = &p->adev->vce.img_size[session_idx]; 651 654 break; 652 655 653 - case 0x00000002: // task info 656 + case 0x00000002: /* task info */ 654 657 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 655 658 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 656 659 break; 657 660 658 - case 0x01000001: // create 659 - created = true; 660 - if (!allocated) { 661 + case 0x01000001: /* create */ 662 + created |= 1 << session_idx; 663 + if (destroyed & (1 << session_idx)) { 664 + destroyed &= ~(1 << session_idx); 665 + allocated |= 1 << session_idx; 666 + 667 + } else if (!(allocated & (1 << session_idx))) { 661 668 DRM_ERROR("Handle already in use!\n"); 662 669 r = -EINVAL; 663 670 goto out; ··· 668 675 8 * 3 / 2; 669 676 break; 670 677 671 - case 0x04000001: // config extension 672 - case 0x04000002: // pic control 673 - case 0x04000005: // rate control 674 - case 0x04000007: // motion estimation 675 - case 0x04000008: // rdo 676 - case 0x04000009: // vui 677 - case 0x05000002: // auxiliary buffer 678 + case 0x04000001: /* config extension */ 679 + case 0x04000002: /* pic control */ 680 + case 0x04000005: /* rate control */ 681 + case 0x04000007: /* motion estimation */ 682 + case 0x04000008: /* rdo */ 683 + case 0x04000009: /* vui */ 684 + case 0x05000002: /* auxiliary buffer */ 678 685 break; 679 686 680 - case 0x03000001: // encode 687 + case 0x03000001: /* encode */ 681 688 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, 682 689 *size, 0); 683 690 if (r) ··· 689 696 goto out; 690 697 break; 691 698 692 - case 0x02000001: // destroy 693 - destroyed = true; 699 + case 0x02000001: /* destroy */ 700 + destroyed |= 1 << session_idx; 694 701 break; 695 702 696 - case 0x05000001: // context buffer 703 + case 0x05000001: /* context buffer */ 697 704 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 698 705 *size * 2, 0); 699 706 if (r) 700 707 goto out; 701 708 break; 702 709 703 - case 0x05000004: // video bitstream buffer 710 + case 0x05000004: /* video bitstream buffer */ 704 711 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 705 712 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 706 713 tmp, bs_idx); ··· 708 715 goto out; 709 716 break; 710 717 711 - case 0x05000005: // feedback buffer 718 + case 0x05000005: /* feedback buffer */ 712 719 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 713 720 4096, fb_idx); 714 721 if (r) ··· 730 737 idx += len / 4; 731 738 } 732 739 733 - if (allocated && !created) { 740 + if (allocated & ~created) { 734 741 DRM_ERROR("New session without create command!\n"); 735 742 r = -ENOENT; 736 743 } 737 744 738 745 out: 739 - if ((!r && destroyed) || (r && allocated)) { 740 - /* 741 - * IB contains a destroy msg or we have allocated an 742 - * handle and got an error, anyway free the handle 743 - */ 744 - for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 745 - atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); 746 + if (!r) { 747 + /* No error, free all destroyed handle slots */ 748 + tmp = destroyed; 749 + } else { 750 + /* Error during parsing, free all allocated handle slots */ 751 + tmp = allocated; 746 752 } 753 + 754 + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 755 + if (tmp & (1 << i)) 756 + atomic_set(&p->adev->vce.handles[i], 0); 747 757 748 758 return r; 749 759 }
+8 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 195 195 struct amdgpu_job *job) 196 196 { 197 197 struct amdgpu_device *adev = ring->adev; 198 + uint64_t fence_context = adev->fence_context + ring->idx; 198 199 struct fence *updates = sync->last_vm_update; 199 200 struct amdgpu_vm_id *id, *idle; 200 201 struct fence **fences; ··· 255 254 i = ring->idx; 256 255 do { 257 256 struct fence *flushed; 258 - bool same_ring = ring->idx == i; 259 257 260 258 id = vm->ids[i++]; 261 259 if (i == AMDGPU_MAX_RINGS) ··· 272 272 if (job->vm_pd_addr != id->pd_gpu_addr) 273 273 continue; 274 274 275 - if (!same_ring && 276 - (!id->last_flush || !fence_is_signaled(id->last_flush))) 275 + if (!id->last_flush) 276 + continue; 277 + 278 + if (id->last_flush->context != fence_context && 279 + !fence_is_signaled(id->last_flush)) 277 280 continue; 278 281 279 282 flushed = id->flushed_updates; ··· 297 294 298 295 job->vm_id = id - adev->vm_manager.ids; 299 296 job->vm_needs_flush = false; 300 - trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr); 297 + trace_amdgpu_vm_grab_id(vm, ring->idx, job); 301 298 302 299 mutex_unlock(&adev->vm_manager.lock); 303 300 return 0; ··· 328 325 vm->ids[ring->idx] = id; 329 326 330 327 job->vm_id = id - adev->vm_manager.ids; 331 - trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr); 328 + trace_amdgpu_vm_grab_id(vm, ring->idx, job); 332 329 333 330 error: 334 331 mutex_unlock(&adev->vm_manager.lock);
+1
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
··· 98 98 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 99 99 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 100 100 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 101 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 101 102 if (dig->backlight_level == 0) 102 103 amdgpu_atombios_encoder_setup_dig_transmitter(encoder, 103 104 ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+2
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
··· 86 86 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } 87 87 }; 88 88 89 + #if 0 89 90 static const struct ci_pt_defaults defaults_bonaire_pro = 90 91 { 91 92 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, 92 93 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, 93 94 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } 94 95 }; 96 + #endif 95 97 96 98 static const struct ci_pt_defaults defaults_saturn_xt = 97 99 {
+5 -13
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
··· 622 622 struct amdgpu_device *adev = ring->adev; 623 623 struct amdgpu_ib ib; 624 624 struct fence *f = NULL; 625 - unsigned i; 626 625 unsigned index; 627 626 int r; 628 627 u32 tmp = 0; ··· 643 644 goto err0; 644 645 } 645 646 646 - ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 647 + ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, 648 + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 647 649 ib.ptr[1] = lower_32_bits(gpu_addr); 648 650 ib.ptr[2] = upper_32_bits(gpu_addr); 649 651 ib.ptr[3] = 1; ··· 659 659 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 660 660 goto err1; 661 661 } 662 - for (i = 0; i < adev->usec_timeout; i++) { 663 - tmp = le32_to_cpu(adev->wb.wb[index]); 664 - if (tmp == 0xDEADBEEF) 665 - break; 666 - DRM_UDELAY(1); 667 - } 668 - if (i < adev->usec_timeout) { 669 - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 670 - ring->idx, i); 671 - goto err1; 662 + tmp = le32_to_cpu(adev->wb.wb[index]); 663 + if (tmp == 0xDEADBEEF) { 664 + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 672 665 } else { 673 666 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 674 667 r = -EINVAL; 675 668 } 676 669 677 670 err1: 678 - fence_put(f); 679 671 amdgpu_ib_free(adev, &ib, NULL); 680 672 fence_put(f); 681 673 err0:
+3 -12
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 2112 2112 struct fence *f = NULL; 2113 2113 uint32_t scratch; 2114 2114 uint32_t tmp = 0; 2115 - unsigned i; 2116 2115 int r; 2117 2116 2118 2117 r = amdgpu_gfx_scratch_get(adev, &scratch); ··· 2140 2141 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 2141 2142 goto err2; 2142 2143 } 2143 - for (i = 0; i < adev->usec_timeout; i++) { 2144 - tmp = RREG32(scratch); 2145 - if (tmp == 0xDEADBEEF) 2146 - break; 2147 - DRM_UDELAY(1); 2148 - } 2149 - if (i < adev->usec_timeout) { 2150 - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 2151 - ring->idx, i); 2152 - goto err2; 2144 + tmp = RREG32(scratch); 2145 + if (tmp == 0xDEADBEEF) { 2146 + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 2153 2147 } else { 2154 2148 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", 2155 2149 scratch, tmp); ··· 2150 2158 } 2151 2159 2152 2160 err2: 2153 - fence_put(f); 2154 2161 amdgpu_ib_free(adev, &ib, NULL); 2155 2162 fence_put(f); 2156 2163 err1:
+3 -13
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 794 794 struct fence *f = NULL; 795 795 uint32_t scratch; 796 796 uint32_t tmp = 0; 797 - unsigned i; 798 797 int r; 799 798 800 799 r = amdgpu_gfx_scratch_get(adev, &scratch); ··· 822 823 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 823 824 goto err2; 824 825 } 825 - for (i = 0; i < adev->usec_timeout; i++) { 826 - tmp = RREG32(scratch); 827 - if (tmp == 0xDEADBEEF) 828 - break; 829 - DRM_UDELAY(1); 830 - } 831 - if (i < adev->usec_timeout) { 832 - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 833 - ring->idx, i); 834 - goto err2; 826 + tmp = RREG32(scratch); 827 + if (tmp == 0xDEADBEEF) { 828 + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 835 829 } else { 836 830 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", 837 831 scratch, tmp); 838 832 r = -EINVAL; 839 833 } 840 834 err2: 841 - fence_put(f); 842 835 amdgpu_ib_free(adev, &ib, NULL); 843 836 fence_put(f); 844 837 err1: ··· 1720 1729 RREG32(sec_ded_counter_registers[i]); 1721 1730 1722 1731 fail: 1723 - fence_put(f); 1724 1732 amdgpu_ib_free(adev, &ib, NULL); 1725 1733 fence_put(f); 1726 1734
+1 -1
drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
··· 24 24 #include <linux/firmware.h> 25 25 #include "drmP.h" 26 26 #include "amdgpu.h" 27 - #include "iceland_smumgr.h" 27 + #include "iceland_smum.h" 28 28 29 29 MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 30 30
+1 -1
drivers/gpu/drm/amd/amdgpu/iceland_smc.c
··· 25 25 #include "drmP.h" 26 26 #include "amdgpu.h" 27 27 #include "ppsmc.h" 28 - #include "iceland_smumgr.h" 28 + #include "iceland_smum.h" 29 29 #include "smu_ucode_xfer_vi.h" 30 30 #include "amdgpu_ucode.h" 31 31
drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+2
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
··· 191 191 vid_mapping_table->num_entries = i; 192 192 } 193 193 194 + #if 0 194 195 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 195 196 { 196 197 { 0, 4, 1 }, ··· 290 289 { 291 290 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 292 291 }; 292 + #endif 293 293 294 294 static const struct kv_pt_config_reg didt_config_kv[] = 295 295 {
+4
drivers/gpu/drm/amd/amdgpu/ppsmc.h
··· 90 90 #define PPSMC_StartFanControl ((uint8_t)0x5B) 91 91 #define PPSMC_StopFanControl ((uint8_t)0x5C) 92 92 #define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) 93 + #define PPSMC_NoDisplay ((uint8_t)0x5D) 93 94 #define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) 95 + #define PPSMC_HasDisplay ((uint8_t)0x5E) 94 96 #define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) 95 97 #define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) 96 98 #define PPSMC_MSG_EnableULV ((uint8_t)0x62) ··· 110 108 #define PPSMC_MSG_DisableDTE ((uint8_t)0x88) 111 109 #define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) 112 110 #define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) 111 + #define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) 113 112 114 113 /* CI/KV/KB */ 115 114 #define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) ··· 164 161 #define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) 165 162 #define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) 166 163 #define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) 164 + #define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) 167 165 168 166 #define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) 169 167 #define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
+18 -25
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 567 567 { 568 568 int r; 569 569 570 - if (!adev->firmware.smu_load) { 571 - r = sdma_v2_4_load_microcode(adev); 572 - if (r) 573 - return r; 574 - } else { 575 - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 576 - AMDGPU_UCODE_ID_SDMA0); 577 - if (r) 578 - return -EINVAL; 579 - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 580 - AMDGPU_UCODE_ID_SDMA1); 581 - if (r) 582 - return -EINVAL; 570 + if (!adev->pp_enabled) { 571 + if (!adev->firmware.smu_load) { 572 + r = sdma_v2_4_load_microcode(adev); 573 + if (r) 574 + return r; 575 + } else { 576 + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 577 + AMDGPU_UCODE_ID_SDMA0); 578 + if (r) 579 + return -EINVAL; 580 + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 581 + AMDGPU_UCODE_ID_SDMA1); 582 + if (r) 583 + return -EINVAL; 584 + } 583 585 } 584 586 585 587 /* halt the engine before programing */ ··· 673 671 struct amdgpu_device *adev = ring->adev; 674 672 struct amdgpu_ib ib; 675 673 struct fence *f = NULL; 676 - unsigned i; 677 674 unsigned index; 678 675 int r; 679 676 u32 tmp = 0; ··· 714 713 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 715 714 goto err1; 716 715 } 717 - for (i = 0; i < adev->usec_timeout; i++) { 718 - tmp = le32_to_cpu(adev->wb.wb[index]); 719 - if (tmp == 0xDEADBEEF) 720 - break; 721 - DRM_UDELAY(1); 722 - } 723 - if (i < adev->usec_timeout) { 724 - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 725 - ring->idx, i); 726 - goto err1; 716 + tmp = le32_to_cpu(adev->wb.wb[index]); 717 + if (tmp == 0xDEADBEEF) { 718 + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 727 719 } else { 728 720 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 729 721 r = -EINVAL; 730 722 } 731 723 732 724 err1: 733 - fence_put(f); 734 725 amdgpu_ib_free(adev, &ib, NULL); 735 726 fence_put(f); 736 727 err0:
+3 -12
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
··· 901 901 struct amdgpu_device *adev = ring->adev; 902 902 struct amdgpu_ib ib; 903 903 struct fence *f = NULL; 904 - unsigned i; 905 904 unsigned index; 906 905 int r; 907 906 u32 tmp = 0; ··· 942 943 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 943 944 goto err1; 944 945 } 945 - for (i = 0; i < adev->usec_timeout; i++) { 946 - tmp = le32_to_cpu(adev->wb.wb[index]); 947 - if (tmp == 0xDEADBEEF) 948 - break; 949 - DRM_UDELAY(1); 950 - } 951 - if (i < adev->usec_timeout) { 952 - DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 953 - ring->idx, i); 954 - goto err1; 946 + tmp = le32_to_cpu(adev->wb.wb[index]); 947 + if (tmp == 0xDEADBEEF) { 948 + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 955 949 } else { 956 950 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 957 951 r = -EINVAL; 958 952 } 959 953 err1: 960 - fence_put(f); 961 954 amdgpu_ib_free(adev, &ib, NULL); 962 955 fence_put(f); 963 956 err0:
+96 -45
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 43 43 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 44 44 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 45 45 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 46 + #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 46 47 47 48 #define VCE_V3_0_FW_SIZE (384 * 1024) 48 49 #define VCE_V3_0_STACK_SIZE (64 * 1024) ··· 52 51 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 53 52 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 54 53 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 54 + static int vce_v3_0_wait_for_idle(void *handle); 55 55 56 56 /** 57 57 * vce_v3_0_ring_get_rptr - get read pointer ··· 207 205 vce_v3_0_override_vce_clock_gating(adev, false); 208 206 } 209 207 208 + static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) 209 + { 210 + int i, j; 211 + uint32_t status = 0; 212 + 213 + for (i = 0; i < 10; ++i) { 214 + for (j = 0; j < 100; ++j) { 215 + status = RREG32(mmVCE_STATUS); 216 + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) 217 + return 0; 218 + mdelay(10); 219 + } 220 + 221 + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 222 + WREG32_P(mmVCE_SOFT_RESET, 223 + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 224 + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 225 + mdelay(10); 226 + WREG32_P(mmVCE_SOFT_RESET, 0, 227 + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 228 + mdelay(10); 229 + } 230 + 231 + return -ETIMEDOUT; 232 + } 233 + 210 234 /** 211 235 * vce_v3_0_start - start VCE block 212 236 * ··· 243 215 static int vce_v3_0_start(struct amdgpu_device *adev) 244 216 { 245 217 struct amdgpu_ring *ring; 246 - int idx, i, j, r; 218 + int idx, r; 219 + 220 + ring = &adev->vce.ring[0]; 221 + WREG32(mmVCE_RB_RPTR, ring->wptr); 222 + WREG32(mmVCE_RB_WPTR, ring->wptr); 223 + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 224 + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 225 + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 226 + 227 + ring = &adev->vce.ring[1]; 228 + WREG32(mmVCE_RB_RPTR2, ring->wptr); 229 + WREG32(mmVCE_RB_WPTR2, ring->wptr); 230 + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 231 + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 232 + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 247 233 248 234 mutex_lock(&adev->grbm_idx_mutex); 249 235 for (idx = 0; idx < 2; ++idx) { 250 - 251 236 if (adev->vce.harvest_config & (1 << idx)) 252 237 continue; 253 238 ··· 274 233 275 234 vce_v3_0_mc_resume(adev, idx); 276 235 277 - /* set BUSY flag */ 278 - WREG32_P(mmVCE_STATUS, 1, ~1); 236 + WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, 237 + ~VCE_STATUS__JOB_BUSY_MASK); 238 + 279 239 if (adev->asic_type >= CHIP_STONEY) 280 240 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); 281 241 else 282 242 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, 283 243 ~VCE_VCPU_CNTL__CLK_EN_MASK); 284 244 285 - WREG32_P(mmVCE_SOFT_RESET, 286 - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 287 - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 288 - 289 - mdelay(100); 290 - 291 245 WREG32_P(mmVCE_SOFT_RESET, 0, 292 246 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 293 247 294 - for (i = 0; i < 10; ++i) { 295 - uint32_t status; 296 - for (j = 0; j < 100; ++j) { 297 - status = RREG32(mmVCE_STATUS); 298 - if (status & 2) 299 - break; 300 - mdelay(10); 301 - } 302 - r = 0; 303 - if (status & 2) 304 - break; 248 + mdelay(100); 305 249 306 - DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 307 - WREG32_P(mmVCE_SOFT_RESET, 308 - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 309 - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 310 - mdelay(10); 311 - WREG32_P(mmVCE_SOFT_RESET, 0, 312 - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 313 - mdelay(10); 314 - r = -1; 315 - } 250 + r = vce_v3_0_firmware_loaded(adev); 316 251 317 252 /* clear BUSY flag */ 318 - WREG32_P(mmVCE_STATUS, 0, ~1); 253 + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); 319 254 320 255 /* Set Clock-Gating off */ 321 256 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) ··· 307 290 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 308 291 mutex_unlock(&adev->grbm_idx_mutex); 309 292 310 - ring = &adev->vce.ring[0]; 311 - WREG32(mmVCE_RB_RPTR, ring->wptr); 312 - WREG32(mmVCE_RB_WPTR, ring->wptr); 313 - WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 314 - WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 315 - WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 293 + return 0; 294 + } 316 295 317 - ring = &adev->vce.ring[1]; 318 - WREG32(mmVCE_RB_RPTR2, ring->wptr); 319 - WREG32(mmVCE_RB_WPTR2, ring->wptr); 320 - WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 321 - WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 322 - WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 296 + static int vce_v3_0_stop(struct amdgpu_device *adev) 297 + { 298 + int idx; 299 + 300 + mutex_lock(&adev->grbm_idx_mutex); 301 + for (idx = 0; idx < 2; ++idx) { 302 + if (adev->vce.harvest_config & (1 << idx)) 303 + continue; 304 + 305 + if (idx == 0) 306 + WREG32_P(mmGRBM_GFX_INDEX, 0, 307 + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 308 + else 309 + WREG32_P(mmGRBM_GFX_INDEX, 310 + GRBM_GFX_INDEX__VCE_INSTANCE_MASK, 311 + ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 312 + 313 + if (adev->asic_type >= CHIP_STONEY) 314 + WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 315 + else 316 + WREG32_P(mmVCE_VCPU_CNTL, 0, 317 + ~VCE_VCPU_CNTL__CLK_EN_MASK); 318 + /* hold on ECPU */ 319 + WREG32_P(mmVCE_SOFT_RESET, 320 + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 321 + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 322 + 323 + /* clear BUSY flag */ 324 + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); 325 + 326 + /* Set Clock-Gating off */ 327 + if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) 328 + vce_v3_0_set_vce_sw_clock_gating(adev, false); 329 + } 330 + 331 + WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 332 + mutex_unlock(&adev->grbm_idx_mutex); 323 333 324 334 return 0; 325 335 } ··· 485 441 486 442 static int vce_v3_0_hw_fini(void *handle) 487 443 { 488 - return 0; 444 + int r; 445 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 446 + 447 + r = vce_v3_0_wait_for_idle(handle); 448 + if (r) 449 + return r; 450 + 451 + return vce_v3_0_stop(adev); 489 452 } 490 453 491 454 static int vce_v3_0_suspend(void *handle)
-14
drivers/gpu/drm/amd/amdgpu/vi.c
··· 1249 1249 AMD_CG_SUPPORT_HDP_LS | 1250 1250 AMD_CG_SUPPORT_SDMA_MGCG | 1251 1251 AMD_CG_SUPPORT_SDMA_LS; 1252 - /* rev0 hardware doesn't support PG */ 1253 1252 adev->pg_flags = 0; 1254 - if (adev->rev_id != 0x00) 1255 - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1256 - AMD_PG_SUPPORT_GFX_SMG | 1257 - AMD_PG_SUPPORT_GFX_DMG | 1258 - AMD_PG_SUPPORT_CP | 1259 - AMD_PG_SUPPORT_RLC_SMU_HS | 1260 - AMD_PG_SUPPORT_GFX_PIPELINE; 1261 1253 adev->external_rev_id = adev->rev_id + 0x1; 1262 1254 break; 1263 1255 case CHIP_STONEY: ··· 1268 1276 AMD_CG_SUPPORT_HDP_LS | 1269 1277 AMD_CG_SUPPORT_SDMA_MGCG | 1270 1278 AMD_CG_SUPPORT_SDMA_LS; 1271 - adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1272 - AMD_PG_SUPPORT_GFX_SMG | 1273 - AMD_PG_SUPPORT_GFX_DMG | 1274 - AMD_PG_SUPPORT_GFX_PIPELINE | 1275 - AMD_PG_SUPPORT_CP | 1276 - AMD_PG_SUPPORT_RLC_SMU_HS; 1277 1279 adev->external_rev_id = adev->rev_id + 0x1; 1278 1280 break; 1279 1281 default:
+4
drivers/gpu/drm/amd/include/cgs_common.h
··· 160 160 uint16_t feature_version; 161 161 uint32_t image_size; 162 162 uint64_t mc_addr; 163 + 164 + /* only for smc firmware */ 165 + uint32_t ucode_start_address; 166 + 163 167 void *kptr; 164 168 }; 165 169
+3 -6
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
··· 1828 1828 { 1829 1829 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; 1830 1830 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1831 - uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; 1831 + uint8_t i, stretch_amount, volt_offset = 0; 1832 1832 struct phm_ppt_v1_information *table_info = 1833 1833 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1834 1834 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = ··· 1879 1879 1880 1880 data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; 1881 1881 /* Populate CKS Lookup Table */ 1882 - if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) 1883 - stretch_amount2 = 0; 1884 - else if (stretch_amount == 3 || stretch_amount == 4) 1885 - stretch_amount2 = 1; 1886 - else { 1882 + if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 && 1883 + stretch_amount != 4 && stretch_amount != 5) { 1887 1884 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1888 1885 PHM_PlatformCaps_ClockStretcher); 1889 1886 PP_ASSERT_WITH_CODE(false,
+175 -124
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
··· 179 179 180 180 /* They are both in 10KHz Units. */ 181 181 engine_clock_parameters.ulTargetEngineClock = 182 - (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK; 183 - engine_clock_parameters.ulTargetEngineClock |= 184 - (COMPUTE_ENGINE_PLL_PARAM << 24); 182 + cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) | 183 + ((COMPUTE_ENGINE_PLL_PARAM << 24))); 185 184 186 185 /* in 10 khz units.*/ 187 186 engine_clock_parameters.sReserved.ulClock = 188 - (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK; 187 + cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); 189 188 return cgs_atom_exec_cmd_table(hwmgr->device, 190 189 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), 191 190 &engine_clock_parameters); ··· 251 252 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; 252 253 int result; 253 254 254 - mpll_parameters.ulClock = (uint32_t) clock_value; 255 + mpll_parameters.ulClock = cpu_to_le32(clock_value); 255 256 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); 256 257 257 258 result = cgs_atom_exec_cmd_table ··· 261 262 262 263 if (0 == result) { 263 264 mpll_param->mpll_fb_divider.clk_frac = 264 - mpll_parameters.ulFbDiv.usFbDivFrac; 265 + le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac); 265 266 mpll_param->mpll_fb_divider.cl_kf = 266 - mpll_parameters.ulFbDiv.usFbDiv; 267 + le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv); 267 268 mpll_param->mpll_post_divider = 268 269 (uint32_t)mpll_parameters.ucPostDiv; 269 270 mpll_param->vco_mode = ··· 299 300 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; 300 301 int result; 301 302 302 - mpll_parameters.ulClock.ulClock = (uint32_t)clock_value; 303 + mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); 303 304 304 305 result = cgs_atom_exec_cmd_table(hwmgr->device, 305 306 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), ··· 319 320 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; 320 321 int result; 321 322 322 - pll_parameters.ulClock = clock_value; 323 + pll_parameters.ulClock = cpu_to_le32(clock_value); 323 324 324 325 result = cgs_atom_exec_cmd_table 325 326 (hwmgr->device, ··· 328 329 329 330 if (0 == result) { 330 331 dividers->pll_post_divider = pll_parameters.ucPostDiv; 331 - dividers->real_clock = pll_parameters.ulClock; 332 + dividers->real_clock = le32_to_cpu(pll_parameters.ulClock); 332 333 } 333 334 334 335 return result; ··· 342 343 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; 343 344 int result; 344 345 345 - pll_patameters.ulClock.ulClock = clock_value; 346 + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); 346 347 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; 347 348 348 349 result = cgs_atom_exec_cmd_table ··· 354 355 dividers->pll_post_divider = 355 356 pll_patameters.ulClock.ucPostDiv; 356 357 dividers->real_clock = 357 - pll_patameters.ulClock.ulClock; 358 + le32_to_cpu(pll_patameters.ulClock.ulClock); 358 359 359 360 dividers->ul_fb_div.ul_fb_div_frac = 360 - pll_patameters.ulFbDiv.usFbDivFrac; 361 + le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac); 361 362 dividers->ul_fb_div.ul_fb_div = 362 - pll_patameters.ulFbDiv.usFbDiv; 363 + le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv); 363 364 364 365 dividers->uc_pll_ref_div = 365 366 pll_patameters.ucPllRefDiv; ··· 379 380 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; 380 381 int result; 381 382 382 - pll_patameters.ulClock.ulClock = clock_value; 383 + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); 383 384 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; 384 385 385 386 result = cgs_atom_exec_cmd_table ··· 411 412 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; 412 413 int result; 413 414 414 - pll_patameters.ulClock.ulClock = clock_value; 415 + pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); 415 416 pll_patameters.ulClock.ucPostDiv = 416 417 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK; 417 418 ··· 424 425 dividers->pll_post_divider = 425 426 pll_patameters.ulClock.ucPostDiv; 426 427 dividers->real_clock = 427 - pll_patameters.ulClock.ulClock; 428 + le32_to_cpu(pll_patameters.ulClock.ulClock); 428 429 429 430 dividers->ul_fb_div.ul_fb_div_frac = 430 - pll_patameters.ulFbDiv.usFbDivFrac; 431 + le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac); 431 432 dividers->ul_fb_div.ul_fb_div = 432 - pll_patameters.ulFbDiv.usFbDiv; 433 + le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv); 433 434 434 435 dividers->uc_pll_ref_div = 435 436 pll_patameters.ucPllRefDiv; ··· 518 519 519 520 for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) { 520 521 voltage_table->entries[i].value = 521 - voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue; 522 + le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue); 522 523 voltage_table->entries[i].smio_low = 523 - voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId; 524 + le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId); 524 525 } 525 526 526 527 voltage_table->mask_low = 527 - voltage_object->asGpioVoltageObj.ulGpioMaskVal; 528 + le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal); 528 529 voltage_table->count = 529 530 voltage_object->asGpioVoltageObj.ucGpioEntryNum; 530 531 voltage_table->phase_delay = ··· 591 592 const uint32_t pinId, 592 593 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) 593 594 { 594 - bool bRet = 0; 595 + bool bRet = false; 595 596 ATOM_GPIO_PIN_LUT *gpio_lookup_table = 596 597 get_gpio_lookup_table(hwmgr->device); 597 598 598 599 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table), 599 - "Could not find GPIO lookup Table in BIOS.", return -1); 600 + "Could not find GPIO lookup Table in BIOS.", return false); 600 601 601 602 bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId, 602 603 gpio_pin_assignment); ··· 649 650 return -1; 650 651 651 652 if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || 652 - (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && 653 - getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) 653 + (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && 654 + getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) 654 655 return -1; 655 656 656 657 /*----------------------------------------------------------- ··· 661 662 662 663 switch (dpm_level) { 663 664 case 1: 664 - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1); 665 - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000); 665 + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1)); 666 + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000); 666 667 break; 667 668 case 2: 668 - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2); 669 - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000); 669 + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2)); 670 + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000); 670 671 break; 671 672 case 3: 672 - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3); 673 - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000); 673 + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3)); 674 + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000); 674 675 break; 675 676 case 4: 676 - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4); 677 - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000); 677 + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4)); 678 + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000); 678 679 break; 679 680 case 5: 680 - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5); 681 - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000); 681 + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5)); 682 + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000); 682 683 break; 683 684 case 6: 684 - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6); 685 - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000); 685 + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6)); 686 + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000); 686 687 break; 687 688 case 7: 688 - fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7); 689 - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000); 689 + fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7)); 690 + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000); 690 691 break; 691 692 default: 692 693 printk(KERN_ERR "DPM Level not supported\n"); 693 694 fPowerDPMx = Convert_ULONG_ToFraction(1); 694 - fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000); 695 + fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000); 695 696 } 696 697 697 698 /*------------------------- ··· 715 716 return result; 716 717 717 718 /* Finally, the actual fuse value */ 718 - ul_RO_fused = sOutput_FuseValues.ulEfuseValue; 719 - fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1); 720 - fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1); 719 + ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); 720 + fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1); 721 + fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1); 721 722 fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); 722 723 723 724 sCACm_fuse = getASICProfilingInfo->sCACm; ··· 735 736 if (result) 736 737 return result; 737 738 738 - ul_CACm_fused = sOutput_FuseValues.ulEfuseValue; 739 - fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000); 740 - fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000); 739 + ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); 740 + fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000); 741 + fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000); 741 742 742 743 fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); 743 744 ··· 755 756 if (result) 756 757 return result; 757 758 758 - ul_CACb_fused = sOutput_FuseValues.ulEfuseValue; 759 - fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000); 760 - fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000); 759 + ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); 760 + fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000); 761 + fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000); 761 762 762 763 fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); 763 764 ··· 776 777 if (result) 777 778 return result; 778 779 779 - ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue; 780 - fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000); 781 - fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000); 780 + ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); 781 + fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000); 782 + fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000); 782 783 783 784 fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, 784 785 fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); ··· 797 798 if (result) 798 799 return result; 799 800 800 - ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue; 801 - fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000); 802 - fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000); 801 + ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); 802 + fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000); 803 + fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000); 803 804 fRange = fMultiply(fRange, ConvertToFraction(-1)); 804 805 805 806 fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, ··· 819 820 if (result) 820 821 return result; 821 822 822 - ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue; 823 - fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000); 824 - fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000); 823 + ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); 824 + fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000); 825 + fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000); 825 826 826 827 fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, 827 828 fAverage, fRange, sKv_b_fuse.ucEfuseLength); ··· 850 851 if (result) 851 852 return result; 852 853 853 - ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue; 854 - fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000); 855 - fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000); 854 + ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue); 855 + fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000); 856 + fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000); 856 857 857 858 fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, 858 859 fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); ··· 862 863 * PART 2 - Grabbing all required values 863 864 *------------------------------------------- 864 865 */ 865 - fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000), 866 + fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000), 866 867 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); 867 - fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000), 868 + fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000), 868 869 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); 869 - fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000), 870 + fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000), 870 871 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); 871 - fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000), 872 + fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000), 872 873 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); 873 - fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000), 874 + fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000), 874 875 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); 875 - fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000), 876 + fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000), 876 877 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); 877 - fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000), 878 + fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000), 878 879 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); 879 - fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000), 880 + fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000), 880 881 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); 881 882 882 - fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a); 883 - fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b); 884 - fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c); 883 + fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)); 884 + fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)); 885 + fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)); 885 886 886 - fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed); 887 + fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)); 887 888 888 889 fMargin_FMAX_mean = GetScaledFraction( 889 - getASICProfilingInfo->ulMargin_Fmax_mean, 10000); 890 + le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000); 890 891 fMargin_Plat_mean = GetScaledFraction( 891 - getASICProfilingInfo->ulMargin_plat_mean, 10000); 892 + le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000); 892 893 fMargin_FMAX_sigma = GetScaledFraction( 893 - getASICProfilingInfo->ulMargin_Fmax_sigma, 10000); 894 + le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000); 894 895 fMargin_Plat_sigma = GetScaledFraction( 895 - getASICProfilingInfo->ulMargin_plat_sigma, 10000); 896 + le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000); 896 897 897 898 fMargin_DC_sigma = GetScaledFraction( 898 - getASICProfilingInfo->ulMargin_DC_sigma, 100); 899 + le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100); 899 900 fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); 900 901 901 902 fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); ··· 907 908 fSclk = GetScaledFraction(sclk, 100); 908 909 909 910 fV_max = fDivide(GetScaledFraction( 910 - getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4)); 911 - fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10); 912 - fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100); 913 - fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10); 911 + le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4)); 912 + fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10); 913 + fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100); 914 + fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10); 914 915 fV_FT = fDivide(GetScaledFraction( 915 - getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4)); 916 + le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4)); 916 917 fV_min = fDivide(GetScaledFraction( 917 - getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4)); 918 + le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4)); 918 919 919 920 /*----------------------- 920 921 * PART 3 ··· 924 925 fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); 925 926 fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); 926 927 fC_Term = fAdd(fMargin_RO_c, 927 - fAdd(fMultiply(fSM_A0,fLkg_FT), 928 + fAdd(fMultiply(fSM_A0, fLkg_FT), 928 929 fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), 929 930 fAdd(fMultiply(fSM_A3, fSclk), 930 931 fSubtract(fSM_A7, fRO_fused))))); ··· 1062 1063 get_voltage_info_param_space.ucVoltageMode = 1063 1064 ATOM_GET_VOLTAGE_EVV_VOLTAGE; 1064 1065 get_voltage_info_param_space.usVoltageLevel = 1065 - virtual_voltage_Id; 1066 + cpu_to_le16(virtual_voltage_Id); 1066 1067 get_voltage_info_param_space.ulSCLKFreq = 1067 - sclk; 1068 + cpu_to_le32(sclk); 1068 1069 1069 1070 result = cgs_atom_exec_cmd_table(hwmgr->device, 1070 1071 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), ··· 1073 1074 if (0 != result) 1074 1075 return result; 1075 1076 1076 - *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) 1077 - (&get_voltage_info_param_space))->usVoltageLevel; 1077 + *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) 1078 + (&get_voltage_info_param_space))->usVoltageLevel); 1079 + 1080 + return result; 1081 + } 1082 + 1083 + /** 1084 + * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table. 1085 + * @param hwmgr input: pointer to hwManager 1086 + * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 1087 + * @param voltage output: real voltage level in unit of mv 1088 + */ 1089 + int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, 1090 + uint16_t virtual_voltage_id, 1091 + uint16_t *voltage) 1092 + { 1093 + int result; 1094 + int entry_id; 1095 + GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; 1096 + 1097 + /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ 1098 + for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) { 1099 + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) { 1100 + /* found */ 1101 + break; 1102 + } 1103 + } 1104 + 1105 + PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count, 1106 + "Can't find requested voltage id in vddc_dependency_on_sclk table!", 1107 + return -EINVAL; 1108 + ); 1109 + 1110 + get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC; 1111 + get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; 1112 + get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id; 1113 + get_voltage_info_param_space.ulSCLKFreq = 1114 + cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk); 1115 + 1116 + result = cgs_atom_exec_cmd_table(hwmgr->device, 1117 + GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), 1118 + &get_voltage_info_param_space); 1119 + 1120 + if (0 != result) 1121 + return result; 1122 + 1123 + *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) 1124 + (&get_voltage_info_param_space))->usVoltageLevel); 1078 1125 1079 1126 return result; 1080 1127 } ··· 1210 1165 1211 1166 if (entry_found) { 1212 1167 ssEntry->speed_spectrum_percentage = 1213 - ssInfo->usSpreadSpectrumPercentage; 1214 - ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz; 1168 + le16_to_cpu(ssInfo->usSpreadSpectrumPercentage); 1169 + ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz); 1215 1170 1216 1171 if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) && 1217 1172 (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) || ··· 1267 1222 int result; 1268 1223 READ_EFUSE_VALUE_PARAMETER efuse_param; 1269 1224 1270 - efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4; 1225 + efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4); 1271 1226 efuse_param.sEfuse.ucBitShift = (uint8_t) 1272 1227 (start_index - ((start_index / 32) * 32)); 1273 1228 efuse_param.sEfuse.ucBitLength = (uint8_t) ··· 1277 1232 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 1278 1233 &efuse_param); 1279 1234 if (!result) 1280 - *efuse = efuse_param.ulEfuseValue & mask; 1235 + *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask; 1281 1236 1282 1237 return result; 1283 1238 } 1284 1239 1285 1240 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, 1286 - uint8_t level) 1241 + uint8_t level) 1287 1242 { 1288 1243 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; 1289 1244 int result; 1290 1245 1291 - memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK; 1292 - memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM; 1246 + memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = 1247 + cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); 1248 + memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = 1249 + cpu_to_le32(ADJUST_MC_SETTING_PARAM); 1293 1250 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; 1294 1251 1295 1252 result = cgs_atom_exec_cmd_table ··· 1311 1264 1312 1265 get_voltage_info_param_space.ucVoltageType = voltage_type; 1313 1266 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; 1314 - get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id; 1315 - get_voltage_info_param_space.ulSCLKFreq = sclk; 1267 + get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id); 1268 + get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk); 1316 1269 1317 1270 result = cgs_atom_exec_cmd_table(hwmgr->device, 1318 1271 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), ··· 1321 1274 if (0 != result) 1322 1275 return result; 1323 1276 1324 - *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel; 1277 + *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); 1325 1278 1326 1279 return result; 1327 1280 } ··· 1342 1295 for (i = 0; i < psmu_info->ucSclkEntryNum; i++) { 1343 1296 table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; 1344 1297 table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv; 1345 - table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc; 1346 - table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper; 1347 - table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower; 1298 + table->entry[i].usFcw_pcc = 1299 + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc); 1300 + table->entry[i].usFcw_trans_upper = 1301 + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper); 1302 + table->entry[i].usRcw_trans_lower = 1303 + le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower); 1348 1304 } 1349 1305 1350 1306 return 0; 1351 1307 } 1352 1308 1353 - int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param) 1309 + int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, 1310 + struct pp_atom_ctrl__avfs_parameters *param) 1354 1311 { 1355 1312 ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL; 1356 1313 ··· 1368 1317 if (!profile) 1369 1318 return -1; 1370 1319 1371 - param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0; 1372 - param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1; 1373 - param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2; 1374 - param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma; 1375 - param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean; 1376 - param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma; 1377 - param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0; 1378 - param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1; 1379 - param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2; 1380 - param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0; 1381 - param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1; 1382 - param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2; 1383 - param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1; 1384 - param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2; 1385 - param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b; 1386 - param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1; 1387 - param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2; 1388 - param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b; 1389 - param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv; 1320 + param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0); 1321 + param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1); 1322 + param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2); 1323 + param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma); 1324 + param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean); 1325 + param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma); 1326 + param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0); 1327 + param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1); 1328 + param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2); 1329 + param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0); 1330 + param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1); 1331 + param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2); 1332 + param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1); 1333 + param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2); 1334 + param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b); 1335 + param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1); 1336 + param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2); 1337 + param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b); 1338 + param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv); 1390 1339 param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; 1391 1340 param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; 1392 1341 param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; 1393 1342 param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; 1394 - param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor; 1343 + param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor); 1395 1344 param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; 1396 1345 1397 1346 return 0;
+1
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
··· 281 281 282 282 extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); 283 283 extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); 284 + extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage); 284 285 extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); 285 286 extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo); 286 287 extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo);
+13
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
··· 810 810 return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; 811 811 } 812 812 813 + int pp_tables_get_response_times(struct pp_hwmgr *hwmgr, 814 + uint32_t *vol_rep_time, uint32_t *bb_rep_time) 815 + { 816 + const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr); 817 + 818 + PP_ASSERT_WITH_CODE(NULL != powerplay_tab, 819 + "Missing PowerPlay Table!", return -EINVAL); 820 + 821 + *vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime); 822 + *bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime); 823 + 824 + return 0; 825 + } 813 826 814 827 int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, 815 828 unsigned long *num_of_entries)
+10 -7
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
··· 32 32 extern const struct pp_table_func pptable_funcs; 33 33 34 34 typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr, 35 - struct pp_hw_power_state *hw_ps, 36 - unsigned int index, 37 - const void *clock_info); 35 + struct pp_hw_power_state *hw_ps, 36 + unsigned int index, 37 + const void *clock_info); 38 38 39 39 int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, 40 - unsigned long *num_of_entries); 40 + unsigned long *num_of_entries); 41 41 42 42 int pp_tables_get_entry(struct pp_hwmgr *hwmgr, 43 - unsigned long entry_index, 44 - struct pp_power_state *ps, 45 - pp_tables_hw_clock_info_callback func); 43 + unsigned long entry_index, 44 + struct pp_power_state *ps, 45 + pp_tables_hw_clock_info_callback func); 46 + 47 + int pp_tables_get_response_times(struct pp_hwmgr *hwmgr, 48 + uint32_t *vol_rep_time, uint32_t *bb_rep_time); 46 49 47 50 #endif
+29
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
··· 131 131 smum_wait_on_indirect_register(smumgr, \ 132 132 mm##port##_INDEX, index, value, mask) 133 133 134 + #define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \ 135 + SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) 136 + 137 + #define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ 138 + SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ 139 + SMUM_FIELD_MASK(reg, field) ) 134 140 135 141 #define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ 136 142 index, value, mask) \ ··· 163 157 (((value) & ~SMUM_FIELD_MASK(reg, field)) | \ 164 158 (SMUM_FIELD_MASK(reg, field) & ((field_val) << \ 165 159 SMUM_FIELD_SHIFT(reg, field)))) 160 + 161 + #define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \ 162 + SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ 163 + reg, field) 166 164 167 165 #define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ 168 166 port, index, value, mask) \ ··· 201 191 SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ 202 192 reg, field, fieldval)) 203 193 194 + 195 + #define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \ 196 + cgs_write_ind_register(device, port, ix##reg, \ 197 + SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ 198 + reg, field, fieldval)) 199 + 200 + 204 201 #define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ 205 202 SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \ 206 203 (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ ··· 217 200 SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \ 218 201 (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ 219 202 SMUM_FIELD_MASK(reg, field)) 203 + 204 + #define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \ 205 + smum_wait_for_indirect_register_unequal(smumgr, \ 206 + mm##port##_INDEX, index, value, mask) 207 + 208 + #define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \ 209 + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask) 210 + 211 + #define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \ 212 + SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ 213 + SMUM_FIELD_MASK(reg, field) ) 214 + 220 215 #endif
+1
drivers/gpu/drm/radeon/atombios_encoders.c
··· 120 120 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 121 121 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 122 122 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 123 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 123 124 if (dig->backlight_level == 0) 124 125 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); 125 126 else {