Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-next-4.2-amdgpu' of git://people.freedesktop.org/~agd5f/linux into drm-next

More fixes for amdgpu for 4.2. We've integrated Jerome's comments
about the interface among other things. I'll be on vacation next week
so Christian will be handling any updates next week.

* 'drm-next-4.2-amdgpu' of git://people.freedesktop.org/~agd5f/linux: (23 commits)
drm/amdgpu: fix a amdgpu_dpm=0 bug
drm/amdgpu: don't enable/disable display twice on suspend/resume
drm/amdgpu: fix UVD/VCE VM emulation
drm/amdgpu: enable vce powergating
drm/amdgpu/iceland: don't call smu_init on resume
drm/amdgpu/tonga: don't call smu_init on resume
drm/amdgpu/cz: don't call smu_init on resume
drm/amdgpu: update to latest gfx8 golden register settings
drm/amdgpu: whitespace cleanup in gmc8 golden regs
drm/admgpu: move XDMA golden registers to dce code
drm/amdgpu: fix the build on big endian
drm/amdgpu: cleanup UAPI comments
drm/amdgpu: remove AMDGPU_CTX_OP_STATE_RUNNING
drm/amdgpu: remove the VI hardware semaphore in ring sync
drm/amdgpu: set the gfx config properly for all CZ variants (v2)
drm/amdgpu: also print the pci revision when printing the pci ids
drm/amdgpu: cleanup VA IOCTL
drm/amdgpu: fix saddr handling in amdgpu_vm_bo_unmap
drm/amdgpu: fix amdgpu_vm_bo_map
drm/amdgpu: remove unused AMDGPU_IB_FLAG_GDS
...

+446 -245
+7 -2
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 317 317 void (*emit_ib)(struct amdgpu_ring *ring, 318 318 struct amdgpu_ib *ib); 319 319 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 320 - uint64_t seq, bool write64bit); 320 + uint64_t seq, unsigned flags); 321 321 bool (*emit_semaphore)(struct amdgpu_ring *ring, 322 322 struct amdgpu_semaphore *semaphore, 323 323 bool emit_wait); ··· 391 391 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 392 392 #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 393 393 #define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul) 394 + 395 + #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 396 + #define AMDGPU_FENCE_FLAG_INT (1 << 1) 394 397 395 398 struct amdgpu_fence { 396 399 struct fence base; ··· 1509 1506 int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); 1510 1507 bool (*vblank_too_short)(struct amdgpu_device *adev); 1511 1508 void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); 1509 + void (*powergate_vce)(struct amdgpu_device *adev, bool gate); 1512 1510 void (*enable_bapm)(struct amdgpu_device *adev, bool enable); 1513 1511 void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); 1514 1512 u32 (*get_fan_control_mode)(struct amdgpu_device *adev); ··· 2146 2142 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2147 2143 #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) 2148 2144 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 2149 - #define amdgpu_ring_emit_fence(r, addr, seq, write64bit) (r)->funcs->emit_fence((r), (addr), (seq), (write64bit)) 2145 + #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 2150 2146 #define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait)) 2151 2147 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 2152 2148 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) ··· 2183 2179 #define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l)) 2184 2180 #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) 2185 2181 #define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g)) 2182 + #define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g)) 2186 2183 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) 2187 2184 #define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m)) 2188 2185 #define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev))
+15 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 564 564 return r; 565 565 566 566 if (ring->funcs->parse_cs) { 567 + struct amdgpu_bo_va_mapping *m; 567 568 struct amdgpu_bo *aobj = NULL; 568 - void *kptr; 569 + uint64_t offset; 570 + uint8_t *kptr; 569 571 570 - amdgpu_cs_find_mapping(parser, chunk_ib->va_start, &aobj); 572 + m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, 573 + &aobj); 571 574 if (!aobj) { 572 575 DRM_ERROR("IB va_start is invalid\n"); 573 576 return -EINVAL; 574 577 } 575 578 579 + if ((chunk_ib->va_start + chunk_ib->ib_bytes) > 580 + (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { 581 + DRM_ERROR("IB va_start+ib_bytes is invalid\n"); 582 + return -EINVAL; 583 + } 584 + 576 585 /* the IB should be reserved at this point */ 577 - r = amdgpu_bo_kmap(aobj, &kptr); 586 + r = amdgpu_bo_kmap(aobj, (void **)&kptr); 578 587 if (r) { 579 588 return r; 580 589 } 590 + 591 + offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; 592 + kptr += chunk_ib->va_start - offset; 581 593 582 594 r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); 583 595 if (r) {
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1388 1388 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 1389 1389 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 1390 1390 1391 - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1392 - amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 1393 - pdev->subsystem_vendor, pdev->subsystem_device); 1391 + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 1392 + amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 1393 + pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 1394 1394 1395 1395 /* mutex initialization are all done here so we 1396 1396 * can recall function without having locking issues */
+7 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 128 128 fence_init(&(*fence)->base, &amdgpu_fence_ops, 129 129 &adev->fence_queue.lock, adev->fence_context + ring->idx, 130 130 (*fence)->seq); 131 - amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, false); 131 + amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 132 + (*fence)->seq, 133 + AMDGPU_FENCE_FLAG_INT); 132 134 trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); 133 135 return 0; 134 136 } ··· 523 521 uint64_t last_seq[AMDGPU_MAX_RINGS]; 524 522 bool signaled; 525 523 int i, r; 524 + 525 + if (timeout == 0) { 526 + return amdgpu_fence_any_seq_signaled(adev, target_seq); 527 + } 526 528 527 529 while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) { 528 530
+22 -43
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 37 37 if (robj) { 38 38 if (robj->gem_base.import_attach) 39 39 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 40 + amdgpu_mn_unregister(robj); 40 41 amdgpu_bo_unref(&robj); 41 42 } 42 43 } ··· 505 504 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 506 505 struct drm_file *filp) 507 506 { 508 - union drm_amdgpu_gem_va *args = data; 507 + struct drm_amdgpu_gem_va *args = data; 509 508 struct drm_gem_object *gobj; 510 509 struct amdgpu_device *adev = dev->dev_private; 511 510 struct amdgpu_fpriv *fpriv = filp->driver_priv; ··· 514 513 uint32_t invalid_flags, va_flags = 0; 515 514 int r = 0; 516 515 517 - if (!adev->vm_manager.enabled) { 518 - memset(args, 0, sizeof(*args)); 519 - args->out.result = AMDGPU_VA_RESULT_ERROR; 516 + if (!adev->vm_manager.enabled) 520 517 return -ENOTTY; 521 - } 522 518 523 - if (args->in.va_address < AMDGPU_VA_RESERVED_SIZE) { 519 + if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { 524 520 dev_err(&dev->pdev->dev, 525 521 "va_address 0x%lX is in reserved area 0x%X\n", 526 - (unsigned long)args->in.va_address, 522 + (unsigned long)args->va_address, 527 523 AMDGPU_VA_RESERVED_SIZE); 528 - memset(args, 0, sizeof(*args)); 529 - args->out.result = AMDGPU_VA_RESULT_ERROR; 530 524 return -EINVAL; 531 525 } 532 526 533 527 invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | 534 528 AMDGPU_VM_PAGE_EXECUTABLE); 535 - if ((args->in.flags & invalid_flags)) { 529 + if ((args->flags & invalid_flags)) { 536 530 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 537 - args->in.flags, invalid_flags); 538 - memset(args, 0, sizeof(*args)); 539 - args->out.result = AMDGPU_VA_RESULT_ERROR; 531 + args->flags, invalid_flags); 540 532 return -EINVAL; 541 533 } 542 534 543 - switch (args->in.operation) { 535 + switch (args->operation) { 544 536 case AMDGPU_VA_OP_MAP: 545 537 case AMDGPU_VA_OP_UNMAP: 546 538 break; 547 539 default: 548 540 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 549 - args->in.operation); 550 - memset(args, 0, sizeof(*args)); 551 - args->out.result = AMDGPU_VA_RESULT_ERROR; 541 + args->operation); 552 542 return -EINVAL; 553 543 } 554 544 555 - gobj = drm_gem_object_lookup(dev, filp, args->in.handle); 556 - if (gobj == NULL) { 557 - memset(args, 0, sizeof(*args)); 558 - args->out.result = AMDGPU_VA_RESULT_ERROR; 545 + gobj = drm_gem_object_lookup(dev, filp, args->handle); 546 + if (gobj == NULL) 559 547 return -ENOENT; 560 - } 548 + 561 549 rbo = gem_to_amdgpu_bo(gobj); 562 550 r = amdgpu_bo_reserve(rbo, false); 563 551 if (r) { 564 - if (r != -ERESTARTSYS) { 565 - memset(args, 0, sizeof(*args)); 566 - args->out.result = AMDGPU_VA_RESULT_ERROR; 567 - } 568 552 drm_gem_object_unreference_unlocked(gobj); 569 553 return r; 570 554 } 555 + 571 556 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 572 557 if (!bo_va) { 573 - memset(args, 0, sizeof(*args)); 574 - args->out.result = AMDGPU_VA_RESULT_ERROR; 575 - drm_gem_object_unreference_unlocked(gobj); 558 + amdgpu_bo_unreserve(rbo); 576 559 return -ENOENT; 577 560 } 578 561 579 - switch (args->in.operation) { 562 + switch (args->operation) { 580 563 case AMDGPU_VA_OP_MAP: 581 - if (args->in.flags & AMDGPU_VM_PAGE_READABLE) 564 + if (args->flags & AMDGPU_VM_PAGE_READABLE) 582 565 va_flags |= AMDGPU_PTE_READABLE; 583 - if (args->in.flags & AMDGPU_VM_PAGE_WRITEABLE) 566 + if (args->flags & AMDGPU_VM_PAGE_WRITEABLE) 584 567 va_flags |= AMDGPU_PTE_WRITEABLE; 585 - if (args->in.flags & AMDGPU_VM_PAGE_EXECUTABLE) 568 + if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE) 586 569 va_flags |= AMDGPU_PTE_EXECUTABLE; 587 - r = amdgpu_vm_bo_map(adev, bo_va, args->in.va_address, 588 - args->in.offset_in_bo, args->in.map_size, 570 + r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, 571 + args->offset_in_bo, args->map_size, 589 572 va_flags); 590 573 break; 591 574 case AMDGPU_VA_OP_UNMAP: 592 - r = amdgpu_vm_bo_unmap(adev, bo_va, args->in.va_address); 575 + r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); 593 576 break; 594 577 default: 595 578 break; 596 579 } 597 580 598 - if (!r) { 581 + if (!r) 599 582 amdgpu_gem_va_update_vm(adev, bo_va); 600 - memset(args, 0, sizeof(*args)); 601 - args->out.result = AMDGPU_VA_RESULT_OK; 602 - } else { 603 - memset(args, 0, sizeof(*args)); 604 - args->out.result = AMDGPU_VA_RESULT_ERROR; 605 - } 606 583 607 584 drm_gem_object_unreference_unlocked(gobj); 608 585 return r;
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 216 216 if (ib->user) { 217 217 uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); 218 218 addr += ib->user->offset; 219 - amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, true); 219 + amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, 220 + AMDGPU_FENCE_FLAG_64BIT); 220 221 } 221 222 222 223 if (ib->vm)
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 95 95 bo = container_of(tbo, struct amdgpu_bo, tbo); 96 96 97 97 amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); 98 - amdgpu_mn_unregister(bo); 99 98 100 99 mutex_lock(&bo->adev->gem.mutex); 101 100 list_del_init(&bo->list);
+17 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 656 656 657 657 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 658 658 { 659 - if (enable) { 659 + if (adev->pm.funcs->powergate_vce) { 660 660 mutex_lock(&adev->pm.mutex); 661 - adev->pm.dpm.vce_active = true; 662 - /* XXX select vce level based on ring/task */ 663 - adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; 661 + /* enable/disable VCE */ 662 + amdgpu_dpm_powergate_vce(adev, !enable); 663 + 664 664 mutex_unlock(&adev->pm.mutex); 665 665 } else { 666 - mutex_lock(&adev->pm.mutex); 667 - adev->pm.dpm.vce_active = false; 668 - mutex_unlock(&adev->pm.mutex); 669 - } 666 + if (enable) { 667 + mutex_lock(&adev->pm.mutex); 668 + adev->pm.dpm.vce_active = true; 669 + /* XXX select vce level based on ring/task */ 670 + adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; 671 + mutex_unlock(&adev->pm.mutex); 672 + } else { 673 + mutex_lock(&adev->pm.mutex); 674 + adev->pm.dpm.vce_active = false; 675 + mutex_unlock(&adev->pm.mutex); 676 + } 670 677 671 - amdgpu_pm_compute_clocks(adev); 678 + amdgpu_pm_compute_clocks(adev); 679 + } 672 680 } 673 681 674 682 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 637 637 * 638 638 */ 639 639 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 640 - bool write64bits) 640 + unsigned flags) 641 641 { 642 - WARN_ON(write64bits); 642 + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 643 643 644 644 amdgpu_ring_write(ring, VCE_CMD_FENCE); 645 645 amdgpu_ring_write(ring, addr);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
··· 40 40 bool emit_wait); 41 41 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 42 42 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 43 - bool write64bit); 43 + unsigned flags); 44 44 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); 45 45 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring); 46 46
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1002 1002 list_add(&mapping->list, &bo_va->mappings); 1003 1003 interval_tree_insert(&mapping->it, &vm->va); 1004 1004 1005 + bo_va->addr = 0; 1006 + 1005 1007 /* Make sure the page tables are allocated */ 1006 1008 saddr >>= amdgpu_vm_block_size; 1007 1009 eaddr >>= amdgpu_vm_block_size; ··· 1083 1081 { 1084 1082 struct amdgpu_bo_va_mapping *mapping; 1085 1083 struct amdgpu_vm *vm = bo_va->vm; 1084 + 1085 + saddr /= AMDGPU_GPU_PAGE_SIZE; 1086 1086 1087 1087 list_for_each_entry(mapping, &bo_va->mappings, list) { 1088 1088 if (mapping->it.start == saddr)
+4 -2
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
··· 259 259 * an interrupt if needed (CIK). 260 260 */ 261 261 static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 262 - bool write64bit) 262 + unsigned flags) 263 263 { 264 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 264 265 /* write the fence */ 265 266 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 266 267 amdgpu_ring_write(ring, lower_32_bits(addr)); ··· 411 410 rb_bufsz = order_base_2(ring->ring_size / 4); 412 411 rb_cntl = rb_bufsz << 1; 413 412 #ifdef __BIG_ENDIAN 414 - rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; 413 + rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | 414 + SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK; 415 415 #endif 416 416 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 417 417
+140 -13
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
··· 43 43 #include "gfx_v8_0.h" 44 44 45 45 static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); 46 + static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); 46 47 47 48 static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps) 48 49 { ··· 557 556 { 558 557 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 559 558 560 - /* powerdown unused blocks for now */ 561 - cz_dpm_powergate_uvd(adev, true); 559 + if (amdgpu_dpm) { 560 + /* powerdown unused blocks for now */ 561 + cz_dpm_powergate_uvd(adev, true); 562 + cz_dpm_powergate_vce(adev, true); 563 + } 562 564 563 565 return 0; 564 566 } ··· 830 826 return; 831 827 } 832 828 833 - pi->vce_dpm.soft_min_clk = 0; 834 - pi->vce_dpm.hard_min_clk = 0; 829 + pi->vce_dpm.soft_min_clk = table->entries[0].ecclk; 830 + pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; 835 831 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); 836 832 level = cz_get_argument(adev); 837 833 if (level < table->count) 838 - clock = table->entries[level].evclk; 834 + clock = table->entries[level].ecclk; 839 835 else { 840 836 /* future BIOS would fix this error */ 841 837 DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n"); 842 - clock = table->entries[table->count - 1].evclk; 838 + clock = table->entries[table->count - 1].ecclk; 843 839 } 844 840 845 841 pi->vce_dpm.soft_max_clk = clock; ··· 1000 996 break; 1001 997 if (i < 0) 1002 998 i = 0; 999 + break; 1000 + default: 1001 + break; 1002 + } 1003 + 1004 + return i; 1005 + } 1006 + 1007 + static uint32_t cz_get_eclk_level(struct amdgpu_device *adev, 1008 + uint32_t clock, uint16_t msg) 1009 + { 1010 + int i = 0; 1011 + struct amdgpu_vce_clock_voltage_dependency_table *table = 1012 + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1013 + 1014 + if (table->count == 0) 1015 + return 0; 1016 + 1017 + switch (msg) { 1018 + case PPSMC_MSG_SetEclkSoftMin: 1019 + case PPSMC_MSG_SetEclkHardMin: 1020 + for (i = 0; i < table->count-1; i++) 1021 + if (clock <= table->entries[i].ecclk) 1022 + break; 1023 + break; 1024 + case PPSMC_MSG_SetEclkSoftMax: 1025 + case PPSMC_MSG_SetEclkHardMax: 1026 + for (i = table->count - 1; i > 0; i--) 1027 + if (clock >= table->entries[i].ecclk) 1028 + break; 1003 1029 break; 1004 1030 default: 1005 1031 break; ··· 1264 1230 1265 1231 mutex_lock(&adev->pm.mutex); 1266 1232 1267 - /* init smc in dpm hw init */ 1233 + /* smu init only needs to be called at startup, not resume. 1234 + * It should be in sw_init, but requires the fw info gathered 1235 + * in sw_init from other IP modules. 1236 + */ 1268 1237 ret = cz_smu_init(adev); 1269 1238 if (ret) { 1270 1239 DRM_ERROR("amdgpu: smc initialization failed\n"); ··· 1319 1282 1320 1283 /* powerup blocks */ 1321 1284 cz_dpm_powergate_uvd(adev, false); 1285 + cz_dpm_powergate_vce(adev, false); 1322 1286 1323 1287 cz_clear_voting_clients(adev); 1324 1288 cz_stop_dpm(adev); ··· 1335 1297 1336 1298 mutex_lock(&adev->pm.mutex); 1337 1299 1300 + /* smu fini only needs to be called at teardown, not suspend. 1301 + * It should be in sw_fini, but we put it here for symmetry 1302 + * with smu init. 1303 + */ 1338 1304 cz_smu_fini(adev); 1339 1305 1340 1306 if (adev->pm.dpm_enabled) { ··· 1382 1340 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1383 1341 1384 1342 mutex_lock(&adev->pm.mutex); 1385 - ret = cz_smu_init(adev); 1386 - if (ret) { 1387 - DRM_ERROR("amdgpu: smc resume failed\n"); 1388 - mutex_unlock(&adev->pm.mutex); 1389 - return ret; 1390 - } 1391 1343 1392 1344 /* do the actual fw loading */ 1393 1345 ret = cz_smu_start(adev); ··· 1810 1774 } 1811 1775 } 1812 1776 1777 + static int cz_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 1778 + { 1779 + struct cz_power_info *pi = cz_get_pi(adev); 1780 + int ret = 0; 1781 + 1782 + if (enable && pi->caps_vce_dpm) { 1783 + pi->dpm_flags |= DPMFlags_VCE_Enabled; 1784 + DRM_DEBUG("VCE DPM Enabled.\n"); 1785 + 1786 + ret = cz_send_msg_to_smc_with_parameter(adev, 1787 + PPSMC_MSG_EnableAllSmuFeatures, VCE_DPM_MASK); 1788 + 1789 + } else { 1790 + pi->dpm_flags &= ~DPMFlags_VCE_Enabled; 1791 + DRM_DEBUG("VCE DPM Stopped\n"); 1792 + 1793 + ret = cz_send_msg_to_smc_with_parameter(adev, 1794 + PPSMC_MSG_DisableAllSmuFeatures, VCE_DPM_MASK); 1795 + } 1796 + 1797 + return ret; 1798 + } 1799 + 1800 + static int cz_update_vce_dpm(struct amdgpu_device *adev) 1801 + { 1802 + struct cz_power_info *pi = cz_get_pi(adev); 1803 + struct amdgpu_vce_clock_voltage_dependency_table *table = 1804 + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1805 + 1806 + /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ 1807 + if (pi->caps_stable_power_state) { 1808 + pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk; 1809 + 1810 + } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */ 1811 + pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; 1812 + } 1813 + 1814 + cz_send_msg_to_smc_with_parameter(adev, 1815 + PPSMC_MSG_SetEclkHardMin, 1816 + cz_get_eclk_level(adev, 1817 + pi->vce_dpm.hard_min_clk, 1818 + PPSMC_MSG_SetEclkHardMin)); 1819 + return 0; 1820 + } 1821 + 1822 + static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 1823 + { 1824 + struct cz_power_info *pi = cz_get_pi(adev); 1825 + 1826 + if (pi->caps_vce_pg) { 1827 + if (pi->vce_power_gated != gate) { 1828 + if (gate) { 1829 + /* disable clockgating so we can properly shut down the block */ 1830 + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1831 + AMD_CG_STATE_UNGATE); 1832 + /* shutdown the VCE block */ 1833 + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1834 + AMD_PG_STATE_GATE); 1835 + 1836 + cz_enable_vce_dpm(adev, false); 1837 + /* TODO: to figure out why vce can't be poweroff. */ 1838 + /* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */ 1839 + pi->vce_power_gated = true; 1840 + } else { 1841 + cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); 1842 + pi->vce_power_gated = false; 1843 + 1844 + /* re-init the VCE block */ 1845 + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1846 + AMD_PG_STATE_UNGATE); 1847 + /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ 1848 + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1849 + AMD_CG_STATE_GATE); 1850 + 1851 + cz_update_vce_dpm(adev); 1852 + cz_enable_vce_dpm(adev, true); 1853 + } 1854 + } else { 1855 + if (! pi->vce_power_gated) { 1856 + cz_update_vce_dpm(adev); 1857 + } 1858 + } 1859 + } else { /*pi->caps_vce_pg*/ 1860 + cz_update_vce_dpm(adev); 1861 + cz_enable_vce_dpm(adev, true); 1862 + } 1863 + 1864 + return; 1865 + } 1866 + 1813 1867 const struct amd_ip_funcs cz_dpm_ip_funcs = { 1814 1868 .early_init = cz_dpm_early_init, 1815 1869 .late_init = cz_dpm_late_init, ··· 1931 1805 .force_performance_level = cz_dpm_force_dpm_level, 1932 1806 .vblank_too_short = NULL, 1933 1807 .powergate_uvd = cz_dpm_powergate_uvd, 1808 + .powergate_vce = cz_dpm_powergate_vce, 1934 1809 }; 1935 1810 1936 1811 static void cz_dpm_set_funcs(struct amdgpu_device *adev)
+9 -18
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 120 120 mmHDMI_CONTROL, 0x31000111, 0x00000011, 121 121 }; 122 122 123 + static const u32 tonga_mgcg_cgcg_init[] = 124 + { 125 + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 126 + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 127 + }; 128 + 123 129 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev) 124 130 { 125 131 switch (adev->asic_type) { 126 132 case CHIP_TONGA: 133 + amdgpu_program_register_sequence(adev, 134 + tonga_mgcg_cgcg_init, 135 + (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 127 136 amdgpu_program_register_sequence(adev, 128 137 golden_settings_tonga_a11, 129 138 (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); ··· 3017 3008 3018 3009 static int dce_v10_0_suspend(void *handle) 3019 3010 { 3020 - struct drm_connector *connector; 3021 3011 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3022 - 3023 - drm_kms_helper_poll_disable(adev->ddev); 3024 - 3025 - /* turn off display hw */ 3026 - list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { 3027 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 3028 - } 3029 3012 3030 3013 amdgpu_atombios_scratch_regs_save(adev); 3031 3014 ··· 3028 3027 3029 3028 static int dce_v10_0_resume(void *handle) 3030 3029 { 3031 - struct drm_connector *connector; 3032 3030 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3033 3031 3034 3032 dce_v10_0_init_golden_registers(adev); ··· 3047 3047 3048 3048 /* initialize hpd */ 3049 3049 dce_v10_0_hpd_init(adev); 3050 - 3051 - /* blat the mode back in */ 3052 - drm_helper_resume_force_mode(adev->ddev); 3053 - /* turn on display hw */ 3054 - list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { 3055 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 3056 - } 3057 - 3058 - drm_kms_helper_poll_enable(adev->ddev); 3059 3050 3060 3051 return 0; 3061 3052 }
+9 -18
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 120 120 mmFBC_MISC, 0x1f311fff, 0x14300000, 121 121 }; 122 122 123 + static const u32 cz_mgcg_cgcg_init[] = 124 + { 125 + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 126 + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 127 + }; 128 + 123 129 static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) 124 130 { 125 131 switch (adev->asic_type) { 126 132 case CHIP_CARRIZO: 133 + amdgpu_program_register_sequence(adev, 134 + cz_mgcg_cgcg_init, 135 + (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 127 136 amdgpu_program_register_sequence(adev, 128 137 cz_golden_settings_a11, 129 138 (const u32)ARRAY_SIZE(cz_golden_settings_a11)); ··· 3015 3006 3016 3007 static int dce_v11_0_suspend(void *handle) 3017 3008 { 3018 - struct drm_connector *connector; 3019 3009 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3020 - 3021 - drm_kms_helper_poll_disable(adev->ddev); 3022 - 3023 - /* turn off display hw */ 3024 - list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { 3025 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 3026 - } 3027 3010 3028 3011 amdgpu_atombios_scratch_regs_save(adev); 3029 3012 ··· 3026 3025 3027 3026 static int dce_v11_0_resume(void *handle) 3028 3027 { 3029 - struct drm_connector *connector; 3030 3028 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3031 3029 3032 3030 dce_v11_0_init_golden_registers(adev); ··· 3046 3046 3047 3047 /* initialize hpd */ 3048 3048 dce_v11_0_hpd_init(adev); 3049 - 3050 - /* blat the mode back in */ 3051 - drm_helper_resume_force_mode(adev->ddev); 3052 - /* turn on display hw */ 3053 - list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { 3054 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 3055 - } 3056 - 3057 - drm_kms_helper_poll_enable(adev->ddev); 3058 3049 3059 3050 return 0; 3060 3051 }
-18
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 2948 2948 2949 2949 static int dce_v8_0_suspend(void *handle) 2950 2950 { 2951 - struct drm_connector *connector; 2952 2951 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2953 - 2954 - drm_kms_helper_poll_disable(adev->ddev); 2955 - 2956 - /* turn off display hw */ 2957 - list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { 2958 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 2959 - } 2960 2952 2961 2953 amdgpu_atombios_scratch_regs_save(adev); 2962 2954 ··· 2959 2967 2960 2968 static int dce_v8_0_resume(void *handle) 2961 2969 { 2962 - struct drm_connector *connector; 2963 2970 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2964 2971 2965 2972 amdgpu_atombios_scratch_regs_restore(adev); ··· 2976 2985 2977 2986 /* initialize hpd */ 2978 2987 dce_v8_0_hpd_init(adev); 2979 - 2980 - /* blat the mode back in */ 2981 - drm_helper_resume_force_mode(adev->ddev); 2982 - /* turn on display hw */ 2983 - list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { 2984 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 2985 - } 2986 - 2987 - drm_kms_helper_poll_enable(adev->ddev); 2988 2988 2989 2989 return 0; 2990 2990 }
+12 -6
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 2414 2414 * GPU caches. 2415 2415 */ 2416 2416 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 2417 - u64 seq, bool write64bit) 2417 + u64 seq, unsigned flags) 2418 2418 { 2419 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 2420 + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 2419 2421 /* Workaround for cache flush problems. First send a dummy EOP 2420 2422 * event down the pipe with seq one below. 2421 2423 */ ··· 2440 2438 EVENT_INDEX(5))); 2441 2439 amdgpu_ring_write(ring, addr & 0xfffffffc); 2442 2440 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | 2443 - DATA_SEL(write64bit ? 2 : 1) | INT_SEL(2)); 2441 + DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 2444 2442 amdgpu_ring_write(ring, lower_32_bits(seq)); 2445 2443 amdgpu_ring_write(ring, upper_32_bits(seq)); 2446 2444 } ··· 2456 2454 */ 2457 2455 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, 2458 2456 u64 addr, u64 seq, 2459 - bool write64bits) 2457 + unsigned flags) 2460 2458 { 2459 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 2460 + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 2461 + 2461 2462 /* RELEASE_MEM - flush caches, send int */ 2462 2463 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); 2463 2464 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 2464 2465 EOP_TC_ACTION_EN | 2465 2466 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2466 2467 EVENT_INDEX(5))); 2467 - amdgpu_ring_write(ring, DATA_SEL(write64bits ? 2 : 1) | INT_SEL(2)); 2468 + amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 2468 2469 amdgpu_ring_write(ring, addr & 0xfffffffc); 2469 2470 amdgpu_ring_write(ring, upper_32_bits(addr)); 2470 2471 amdgpu_ring_write(ring, lower_32_bits(seq)); ··· 2881 2876 rb_bufsz = order_base_2(ring->ring_size / 8); 2882 2877 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2883 2878 #ifdef __BIG_ENDIAN 2884 - tmp |= BUF_SWAP_32BIT; 2879 + tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT; 2885 2880 #endif 2886 2881 WREG32(mmCP_RB0_CNTL, tmp); 2887 2882 ··· 3400 3395 mqd->queue_state.cp_hqd_pq_control |= 3401 3396 (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8); 3402 3397 #ifdef __BIG_ENDIAN 3403 - mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT; 3398 + mqd->queue_state.cp_hqd_pq_control |= 3399 + 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT; 3404 3400 #endif 3405 3401 mqd->queue_state.cp_hqd_pq_control &= 3406 3402 ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
+57 -11
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 116 116 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 117 117 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc, 118 118 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 119 + mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 119 120 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 120 121 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 122 + mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 121 123 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb, 122 124 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b, 123 125 mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876, 126 + mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, 124 127 }; 125 128 126 129 static const u32 tonga_golden_common_all[] = ··· 227 224 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 228 225 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002, 229 226 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, 227 + mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 230 228 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 231 229 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 230 + mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 232 231 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1, 233 232 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, 234 233 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010, ··· 323 318 mmGB_GPU_ID, 0x0000000f, 0x00000000, 324 319 mmPA_SC_ENHANCE, 0xffffffff, 0x00000001, 325 320 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 321 + mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 326 322 mmTA_CNTL_AUX, 0x000f000f, 0x00010000, 323 + mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 327 324 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3, 328 325 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302 329 326 }; ··· 1940 1933 case CHIP_CARRIZO: 1941 1934 adev->gfx.config.max_shader_engines = 1; 1942 1935 adev->gfx.config.max_tile_pipes = 2; 1943 - adev->gfx.config.max_cu_per_sh = 8; 1944 1936 adev->gfx.config.max_sh_per_se = 1; 1945 - adev->gfx.config.max_backends_per_se = 2; 1937 + 1938 + switch (adev->pdev->revision) { 1939 + case 0xc4: 1940 + case 0x84: 1941 + case 0xc8: 1942 + case 0xcc: 1943 + /* B10 */ 1944 + adev->gfx.config.max_cu_per_sh = 8; 1945 + adev->gfx.config.max_backends_per_se = 2; 1946 + break; 1947 + case 0xc5: 1948 + case 0x81: 1949 + case 0x85: 1950 + case 0xc9: 1951 + case 0xcd: 1952 + /* B8 */ 1953 + adev->gfx.config.max_cu_per_sh = 6; 1954 + adev->gfx.config.max_backends_per_se = 2; 1955 + break; 1956 + case 0xc6: 1957 + case 0xca: 1958 + case 0xce: 1959 + /* B6 */ 1960 + adev->gfx.config.max_cu_per_sh = 6; 1961 + adev->gfx.config.max_backends_per_se = 2; 1962 + break; 1963 + case 0xc7: 1964 + case 0x87: 1965 + case 0xcb: 1966 + default: 1967 + /* B4 */ 1968 + adev->gfx.config.max_cu_per_sh = 4; 1969 + adev->gfx.config.max_backends_per_se = 1; 1970 + break; 1971 + } 1972 + 1946 1973 adev->gfx.config.max_texture_channel_caches = 2; 1947 1974 adev->gfx.config.max_gprs = 256; 1948 1975 adev->gfx.config.max_gs_threads = 32; ··· 3754 3713 } 3755 3714 3756 3715 static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 3757 - u64 seq, bool write64bit) 3716 + u64 seq, unsigned flags) 3758 3717 { 3718 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 3719 + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 3720 + 3759 3721 /* EVENT_WRITE_EOP - flush caches, send int */ 3760 3722 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 3761 3723 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | ··· 3767 3723 EVENT_INDEX(5))); 3768 3724 amdgpu_ring_write(ring, addr & 0xfffffffc); 3769 3725 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | 3770 - DATA_SEL(write64bit ? 2 : 1) | INT_SEL(2)); 3726 + DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 3771 3727 amdgpu_ring_write(ring, lower_32_bits(seq)); 3772 3728 amdgpu_ring_write(ring, upper_32_bits(seq)); 3773 3729 } ··· 3790 3746 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 3791 3747 3792 3748 if (ring->adev->asic_type == CHIP_TOPAZ || 3793 - ring->adev->asic_type == CHIP_TONGA) { 3794 - amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 3795 - amdgpu_ring_write(ring, lower_32_bits(addr)); 3796 - amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3797 - } else { 3749 + ring->adev->asic_type == CHIP_TONGA) 3750 + /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */ 3751 + return false; 3752 + else { 3798 3753 amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2)); 3799 3754 amdgpu_ring_write(ring, lower_32_bits(addr)); 3800 3755 amdgpu_ring_write(ring, upper_32_bits(addr)); ··· 3923 3880 3924 3881 static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, 3925 3882 u64 addr, u64 seq, 3926 - bool write64bits) 3883 + unsigned flags) 3927 3884 { 3885 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 3886 + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 3887 + 3928 3888 /* RELEASE_MEM - flush caches, send int */ 3929 3889 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); 3930 3890 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 3931 3891 EOP_TC_ACTION_EN | 3932 3892 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3933 3893 EVENT_INDEX(5))); 3934 - amdgpu_ring_write(ring, DATA_SEL(write64bits ? 2 : 1) | INT_SEL(2)); 3894 + amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 3935 3895 amdgpu_ring_write(ring, addr & 0xfffffffc); 3936 3896 amdgpu_ring_write(ring, upper_32_bits(addr)); 3937 3897 amdgpu_ring_write(ring, lower_32_bits(seq));
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 71 71 72 72 static const u32 iceland_mgcg_cgcg_init[] = 73 73 { 74 - mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 74 + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 75 75 }; 76 76 77 77 static const u32 cz_mgcg_cgcg_init[] =
+19 -6
drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
··· 82 82 83 83 mutex_lock(&adev->pm.mutex); 84 84 85 + /* smu init only needs to be called at startup, not resume. 86 + * It should be in sw_init, but requires the fw info gathered 87 + * in sw_init from other IP modules. 88 + */ 85 89 ret = iceland_smu_init(adev); 86 90 if (ret) { 87 91 DRM_ERROR("SMU initialization failed\n"); ··· 112 108 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 113 109 114 110 mutex_lock(&adev->pm.mutex); 111 + /* smu fini only needs to be called at teardown, not suspend. 112 + * It should be in sw_fini, but we put it here for symmetry 113 + * with smu init. 114 + */ 115 115 iceland_smu_fini(adev); 116 116 mutex_unlock(&adev->pm.mutex); 117 117 return 0; ··· 123 115 124 116 static int iceland_dpm_suspend(void *handle) 125 117 { 126 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 127 - 128 - iceland_dpm_hw_fini(adev); 129 - 130 118 return 0; 131 119 } 132 120 133 121 static int iceland_dpm_resume(void *handle) 134 122 { 123 + int ret; 135 124 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 136 125 137 - iceland_dpm_hw_init(adev); 126 + mutex_lock(&adev->pm.mutex); 138 127 139 - return 0; 128 + ret = iceland_smu_start(adev); 129 + if (ret) { 130 + DRM_ERROR("SMU start failed\n"); 131 + goto fail; 132 + } 133 + 134 + fail: 135 + mutex_unlock(&adev->pm.mutex); 136 + return ret; 140 137 } 141 138 142 139 static int iceland_dpm_set_clockgating_state(void *handle,
+3 -2
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 292 292 * an interrupt if needed (VI). 293 293 */ 294 294 static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 295 - bool write64bits) 295 + unsigned flags) 296 296 { 297 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 297 298 /* write the fence */ 298 299 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 299 300 amdgpu_ring_write(ring, lower_32_bits(addr)); ··· 302 301 amdgpu_ring_write(ring, lower_32_bits(seq)); 303 302 304 303 /* optionally write high bits as well */ 305 - if (write64bits) { 304 + if (write64bit) { 306 305 addr += 4; 307 306 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 308 307 amdgpu_ring_write(ring, lower_32_bits(addr));
+3 -2
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
··· 347 347 * an interrupt if needed (VI). 348 348 */ 349 349 static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 350 - bool write64bits) 350 + unsigned flags) 351 351 { 352 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 352 353 /* write the fence */ 353 354 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 354 355 amdgpu_ring_write(ring, lower_32_bits(addr)); ··· 357 356 amdgpu_ring_write(ring, lower_32_bits(seq)); 358 357 359 358 /* optionally write high bits as well */ 360 - if (write64bits) { 359 + if (write64bit) { 361 360 addr += 4; 362 361 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 363 362 amdgpu_ring_write(ring, lower_32_bits(addr));
+19 -6
drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
··· 81 81 82 82 mutex_lock(&adev->pm.mutex); 83 83 84 + /* smu init only needs to be called at startup, not resume. 85 + * It should be in sw_init, but requires the fw info gathered 86 + * in sw_init from other IP modules. 87 + */ 84 88 ret = tonga_smu_init(adev); 85 89 if (ret) { 86 90 DRM_ERROR("SMU initialization failed\n"); ··· 111 107 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 112 108 113 109 mutex_lock(&adev->pm.mutex); 110 + /* smu fini only needs to be called at teardown, not suspend. 111 + * It should be in sw_fini, but we put it here for symmetry 112 + * with smu init. 113 + */ 114 114 tonga_smu_fini(adev); 115 115 mutex_unlock(&adev->pm.mutex); 116 116 return 0; ··· 122 114 123 115 static int tonga_dpm_suspend(void *handle) 124 116 { 125 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 126 - 127 - tonga_dpm_hw_fini(adev); 128 - 129 117 return 0; 130 118 } 131 119 132 120 static int tonga_dpm_resume(void *handle) 133 121 { 122 + int ret; 134 123 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 135 124 136 - tonga_dpm_hw_init(adev); 125 + mutex_lock(&adev->pm.mutex); 137 126 138 - return 0; 127 + ret = tonga_smu_start(adev); 128 + if (ret) { 129 + DRM_ERROR("SMU start failed\n"); 130 + goto fail; 131 + } 132 + 133 + fail: 134 + mutex_unlock(&adev->pm.mutex); 135 + return ret; 139 136 } 140 137 141 138 static int tonga_dpm_set_clockgating_state(void *handle,
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
··· 417 417 * Write a fence and a trap command to the ring. 418 418 */ 419 419 static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 420 - bool write64bit) 420 + unsigned flags) 421 421 { 422 - WARN_ON(write64bit); 422 + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 423 423 424 424 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 425 425 amdgpu_ring_write(ring, seq);
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
··· 461 461 * Write a fence and a trap command to the ring. 462 462 */ 463 463 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 464 - bool write64bit) 464 + unsigned flags) 465 465 { 466 - WARN_ON(write64bit); 466 + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 467 467 468 468 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 469 469 amdgpu_ring_write(ring, seq);
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 457 457 * Write a fence and a trap command to the ring. 458 458 */ 459 459 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 460 - bool write64bit) 460 + unsigned flags) 461 461 { 462 - WARN_ON(write64bit); 462 + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 463 463 464 464 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 465 465 amdgpu_ring_write(ring, seq);
+1 -5
drivers/gpu/drm/amd/amdgpu/vi.c
··· 173 173 mmPCIE_DATA, 0x000f0000, 0x00000000, 174 174 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 175 175 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 176 - mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 177 - mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 178 176 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 179 177 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 180 178 }; ··· 191 193 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 192 194 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 193 195 mmPCIE_DATA, 0x000f0000, 0x00000000, 194 - mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 195 - mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 196 196 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 197 197 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 198 198 }; ··· 1263 1267 case CHIP_CARRIZO: 1264 1268 adev->has_uvd = true; 1265 1269 adev->cg_flags = 0; 1266 - adev->pg_flags = AMDGPU_PG_SUPPORT_UVD; 1270 + adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE; 1267 1271 adev->external_rev_id = adev->rev_id + 0x1; 1268 1272 if (amdgpu_smc_load_fw && smc_enabled) 1269 1273 adev->firmware.smu_load = true;
+83 -65
include/uapi/drm/amdgpu_drm.h
··· 55 55 #define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info) 56 56 #define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata) 57 57 #define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle) 58 - #define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, union drm_amdgpu_gem_va) 58 + #define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va) 59 59 #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) 60 60 #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) 61 61 #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) ··· 139 139 #define AMDGPU_CTX_OP_FREE_CTX 2 140 140 #define AMDGPU_CTX_OP_QUERY_STATE 3 141 141 142 - #define AMDGPU_CTX_OP_STATE_RUNNING 1 143 - 144 142 /* GPU reset status */ 145 143 #define AMDGPU_CTX_NO_RESET 0 146 - #define AMDGPU_CTX_GUILTY_RESET 1 /* this the context caused it */ 147 - #define AMDGPU_CTX_INNOCENT_RESET 2 /* some other context caused it */ 148 - #define AMDGPU_CTX_UNKNOWN_RESET 3 /* unknown cause */ 144 + /* this the context caused it */ 145 + #define AMDGPU_CTX_GUILTY_RESET 1 146 + /* some other context caused it */ 147 + #define AMDGPU_CTX_INNOCENT_RESET 2 148 + /* unknown cause */ 149 + #define AMDGPU_CTX_UNKNOWN_RESET 3 149 150 150 151 struct drm_amdgpu_ctx_in { 152 + /** AMDGPU_CTX_OP_* */ 151 153 uint32_t op; 154 + /** For future use, no flags defined so far */ 152 155 uint32_t flags; 153 156 uint32_t ctx_id; 154 157 uint32_t _pad; ··· 164 161 } alloc; 165 162 166 163 struct { 164 + /** For future use, no flags defined so far */ 167 165 uint64_t flags; 168 166 /** Number of resets caused by this context so far. */ 169 167 uint32_t hangs; ··· 191 187 struct drm_amdgpu_gem_userptr { 192 188 uint64_t addr; 193 189 uint64_t size; 190 + /* AMDGPU_GEM_USERPTR_* */ 194 191 uint32_t flags; 192 + /* Resulting GEM handle */ 195 193 uint32_t handle; 196 194 }; 197 195 ··· 225 219 226 220 /** The same structure is shared for input/output */ 227 221 struct drm_amdgpu_gem_metadata { 228 - uint32_t handle; /* GEM Object handle */ 229 - uint32_t op; /** Do we want get or set metadata */ 222 + /** GEM Object handle */ 223 + uint32_t handle; 224 + /** Do we want get or set metadata */ 225 + uint32_t op; 230 226 struct { 227 + /** For future use, no flags defined so far */ 231 228 uint64_t flags; 232 - uint64_t tiling_info; /* family specific tiling info */ 229 + /** family specific tiling info */ 230 + uint64_t tiling_info; 233 231 uint32_t data_size_bytes; 234 232 uint32_t data[64]; 235 233 } data; 236 234 }; 237 235 238 236 struct drm_amdgpu_gem_mmap_in { 239 - uint32_t handle; /** the GEM object handle */ 237 + /** the GEM object handle */ 238 + uint32_t handle; 240 239 uint32_t _pad; 241 240 }; 242 241 243 242 struct drm_amdgpu_gem_mmap_out { 244 - uint64_t addr_ptr; /** mmap offset from the vma offset manager */ 243 + /** mmap offset from the vma offset manager */ 244 + uint64_t addr_ptr; 245 245 }; 246 246 247 247 union drm_amdgpu_gem_mmap { ··· 256 244 }; 257 245 258 246 struct drm_amdgpu_gem_wait_idle_in { 259 - uint32_t handle; /* GEM object handle */ 247 + /** GEM object handle */ 248 + uint32_t handle; 249 + /** For future use, no flags defined so far */ 260 250 uint32_t flags; 261 - uint64_t timeout; /* Timeout to wait. If 0 then returned immediately with the status */ 251 + /** Absolute timeout to wait */ 252 + uint64_t timeout; 262 253 }; 263 254 264 255 struct drm_amdgpu_gem_wait_idle_out { 265 - uint32_t status; /* BO status: 0 - BO is idle, 1 - BO is busy */ 266 - uint32_t domain; /* Returned current memory domain */ 256 + /** BO status: 0 - BO is idle, 1 - BO is busy */ 257 + uint32_t status; 258 + /** Returned current memory domain */ 259 + uint32_t domain; 267 260 }; 268 261 269 262 union drm_amdgpu_gem_wait_idle { ··· 277 260 }; 278 261 279 262 struct drm_amdgpu_wait_cs_in { 263 + /** Command submission handle */ 280 264 uint64_t handle; 265 + /** Absolute timeout to wait */ 281 266 uint64_t timeout; 282 267 uint32_t ip_type; 283 268 uint32_t ip_instance; ··· 288 269 }; 289 270 290 271 struct drm_amdgpu_wait_cs_out { 272 + /** CS status: 0 - CS completed, 1 - CS still busy */ 291 273 uint64_t status; 292 274 }; 293 275 ··· 297 277 struct drm_amdgpu_wait_cs_out out; 298 278 }; 299 279 300 - /* Sets or returns a value associated with a buffer. */ 301 - struct drm_amdgpu_gem_op { 302 - uint32_t handle; /* buffer */ 303 - uint32_t op; /* AMDGPU_GEM_OP_* */ 304 - uint64_t value; /* input or return value */ 305 - }; 306 - 307 280 #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 308 281 #define AMDGPU_GEM_OP_SET_PLACEMENT 1 309 282 283 + /* Sets or returns a value associated with a buffer. */ 284 + struct drm_amdgpu_gem_op { 285 + /** GEM object handle */ 286 + uint32_t handle; 287 + /** AMDGPU_GEM_OP_* */ 288 + uint32_t op; 289 + /** Input or return value */ 290 + uint64_t value; 291 + }; 292 + 310 293 #define AMDGPU_VA_OP_MAP 1 311 294 #define AMDGPU_VA_OP_UNMAP 2 312 - 313 - #define AMDGPU_VA_RESULT_OK 0 314 - #define AMDGPU_VA_RESULT_ERROR 1 315 - #define AMDGPU_VA_RESULT_VA_INVALID_ALIGNMENT 2 316 295 317 296 /* Mapping flags */ 318 297 /* readable mapping */ ··· 321 302 /* executable mapping, new for VI */ 322 303 #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) 323 304 324 - struct drm_amdgpu_gem_va_in { 325 - /* GEM object handle */ 305 + struct drm_amdgpu_gem_va { 306 + /** GEM object handle */ 326 307 uint32_t handle; 327 308 uint32_t _pad; 328 - /* map or unmap*/ 309 + /** AMDGPU_VA_OP_* */ 329 310 uint32_t operation; 330 - /* specify mapping flags */ 311 + /** AMDGPU_VM_PAGE_* */ 331 312 uint32_t flags; 332 - /* va address to assign . Must be correctly aligned.*/ 313 + /** va address to assign . Must be correctly aligned.*/ 333 314 uint64_t va_address; 334 - /* Specify offset inside of BO to assign. Must be correctly aligned.*/ 315 + /** Specify offset inside of BO to assign. Must be correctly aligned.*/ 335 316 uint64_t offset_in_bo; 336 - /* Specify mapping size. If 0 and offset is 0 then map the whole BO.*/ 337 - /* Must be correctly aligned. */ 317 + /** Specify mapping size. Must be correctly aligned. */ 338 318 uint64_t map_size; 339 - }; 340 - 341 - struct drm_amdgpu_gem_va_out { 342 - uint32_t result; 343 - uint32_t _pad; 344 - }; 345 - 346 - union drm_amdgpu_gem_va { 347 - struct drm_amdgpu_gem_va_in in; 348 - struct drm_amdgpu_gem_va_out out; 349 319 }; 350 320 351 321 #define AMDGPU_HW_IP_GFX 0 ··· 348 340 349 341 #define AMDGPU_CHUNK_ID_IB 0x01 350 342 #define AMDGPU_CHUNK_ID_FENCE 0x02 343 + 351 344 struct drm_amdgpu_cs_chunk { 352 345 uint32_t chunk_id; 353 346 uint32_t length_dw; ··· 362 353 uint32_t bo_list_handle; 363 354 uint32_t num_chunks; 364 355 uint32_t _pad; 365 - /* this points to uint64_t * which point to cs chunks */ 356 + /** this points to uint64_t * which point to cs chunks */ 366 357 uint64_t chunks; 367 358 }; 368 359 ··· 371 362 }; 372 363 373 364 union drm_amdgpu_cs { 374 - struct drm_amdgpu_cs_in in; 375 - struct drm_amdgpu_cs_out out; 365 + struct drm_amdgpu_cs_in in; 366 + struct drm_amdgpu_cs_out out; 376 367 }; 377 368 378 369 /* Specify flags to be used for IB */ ··· 380 371 /* This IB should be submitted to CE */ 381 372 #define AMDGPU_IB_FLAG_CE (1<<0) 382 373 383 - /* GDS is used by this IB */ 384 - #define AMDGPU_IB_FLAG_GDS (1<<1) 385 - 386 374 /* CE Preamble */ 387 - #define AMDGPU_IB_FLAG_PREAMBLE (1<<2) 375 + #define AMDGPU_IB_FLAG_PREAMBLE (1<<1) 388 376 389 377 struct drm_amdgpu_cs_chunk_ib { 390 378 uint32_t _pad; 391 - uint32_t flags; /* IB Flags */ 392 - uint64_t va_start; /* Virtual address to begin IB execution */ 393 - uint32_t ib_bytes; /* Size of submission */ 394 - uint32_t ip_type; /* HW IP to submit to */ 395 - uint32_t ip_instance; /* HW IP index of the same type to submit to */ 396 - uint32_t ring; /* Ring index to submit to */ 379 + /** AMDGPU_IB_FLAG_* */ 380 + uint32_t flags; 381 + /** Virtual address to begin IB execution */ 382 + uint64_t va_start; 383 + /** Size of submission */ 384 + uint32_t ib_bytes; 385 + /** HW IP to submit to */ 386 + uint32_t ip_type; 387 + /** HW IP index of the same type to submit to */ 388 + uint32_t ip_instance; 389 + /** Ring index to submit to */ 390 + uint32_t ring; 397 391 }; 398 392 399 393 struct drm_amdgpu_cs_chunk_fence { ··· 491 479 /** AMDGPU_HW_IP_* */ 492 480 uint32_t type; 493 481 /** 494 - * Index of the IP if there are more IPs of the same type. 495 - * Ignored by AMDGPU_INFO_HW_IP_COUNT. 482 + * Index of the IP if there are more IPs of the same 483 + * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. 496 484 */ 497 485 uint32_t ip_instance; 498 486 } query_hw_ip; 499 487 500 488 struct { 501 489 uint32_t dword_offset; 502 - uint32_t count; /* number of registers to read */ 490 + /** number of registers to read */ 491 + uint32_t count; 503 492 uint32_t instance; 493 + /** For future use, no flags defined so far */ 504 494 uint32_t flags; 505 495 } read_mmr_reg; 506 496 507 497 struct { 508 498 /** AMDGPU_INFO_FW_* */ 509 499 uint32_t fw_type; 510 - /** Index of the IP if there are more IPs of the same type. */ 500 + /** 501 + * Index of the IP if there are more IPs of 502 + * the same type. 503 + */ 511 504 uint32_t ip_instance; 512 505 /** 513 506 * Index of the engine. Whether this is used depends ··· 573 556 uint32_t family; 574 557 uint32_t num_shader_engines; 575 558 uint32_t num_shader_arrays_per_engine; 576 - uint32_t gpu_counter_freq; /* in KHz */ 577 - uint64_t max_engine_clock; /* in KHz */ 578 - uint64_t max_memory_clock; /* in KHz */ 559 + /* in KHz */ 560 + uint32_t gpu_counter_freq; 561 + uint64_t max_engine_clock; 562 + uint64_t max_memory_clock; 579 563 /* cu information */ 580 564 uint32_t cu_active_number; 581 565 uint32_t cu_ao_mask; ··· 598 580 uint32_t gart_page_size; 599 581 /** constant engine ram size*/ 600 582 uint32_t ce_ram_size; 601 - /** video memory type infro*/ 583 + /** video memory type info*/ 602 584 uint32_t vram_type; 603 585 /** video memory bit width*/ 604 586 uint32_t vram_bit_width;