Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm radeon fixes from Dave Airlie:
"One core fix, but mostly radeon fixes for s/r and big endian UVD
support, and a fix to stop the GPU being reset for no good reason, and
crashing people's machines."

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
drm/radeon: update lockup tracking when scheduling in empty ring
drm/prime: Honor requested file flags when exporting a buffer
drm/radeon: fix UVD on big endian
drm/radeon: fix write back suspend regression with uvd v2
drm/radeon: do not try to uselessly update virtual memory pagetable

+85 -55
+1 -2
drivers/gpu/drm/drm_prime.c
··· 190 190 if (ret) 191 191 return ERR_PTR(ret); 192 192 } 193 - return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, 194 - 0600); 193 + return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); 195 194 } 196 195 EXPORT_SYMBOL(drm_gem_prime_export); 197 196
+10 -3
drivers/gpu/drm/radeon/r600.c
··· 2687 2687 int r600_uvd_init(struct radeon_device *rdev) 2688 2688 { 2689 2689 int i, j, r; 2690 + /* disable byte swapping */ 2691 + u32 lmi_swap_cntl = 0; 2692 + u32 mp_swap_cntl = 0; 2690 2693 2691 2694 /* raise clocks while booting up the VCPU */ 2692 2695 radeon_set_uvd_clocks(rdev, 53300, 40000); ··· 2714 2711 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 2715 2712 (1 << 21) | (1 << 9) | (1 << 20)); 2716 2713 2717 - /* disable byte swapping */ 2718 - WREG32(UVD_LMI_SWAP_CNTL, 0); 2719 - WREG32(UVD_MP_SWAP_CNTL, 0); 2714 + #ifdef __BIG_ENDIAN 2715 + /* swap (8 in 32) RB and IB */ 2716 + lmi_swap_cntl = 0xa; 2717 + mp_swap_cntl = 0; 2718 + #endif 2719 + WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); 2720 + WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); 2720 2721 2721 2722 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); 2722 2723 WREG32(UVD_MPC_SET_MUXA1, 0x0);
+24 -29
drivers/gpu/drm/radeon/radeon_device.c
··· 244 244 */ 245 245 void radeon_wb_disable(struct radeon_device *rdev) 246 246 { 247 - int r; 248 - 249 - if (rdev->wb.wb_obj) { 250 - r = radeon_bo_reserve(rdev->wb.wb_obj, false); 251 - if (unlikely(r != 0)) 252 - return; 253 - radeon_bo_kunmap(rdev->wb.wb_obj); 254 - radeon_bo_unpin(rdev->wb.wb_obj); 255 - radeon_bo_unreserve(rdev->wb.wb_obj); 256 - } 257 247 rdev->wb.enabled = false; 258 248 } 259 249 ··· 259 269 { 260 270 radeon_wb_disable(rdev); 261 271 if (rdev->wb.wb_obj) { 272 + if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { 273 + radeon_bo_kunmap(rdev->wb.wb_obj); 274 + radeon_bo_unpin(rdev->wb.wb_obj); 275 + radeon_bo_unreserve(rdev->wb.wb_obj); 276 + } 262 277 radeon_bo_unref(&rdev->wb.wb_obj); 263 278 rdev->wb.wb = NULL; 264 279 rdev->wb.wb_obj = NULL; ··· 290 295 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 291 296 return r; 292 297 } 293 - } 294 - r = radeon_bo_reserve(rdev->wb.wb_obj, false); 295 - if (unlikely(r != 0)) { 296 - radeon_wb_fini(rdev); 297 - return r; 298 - } 299 - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 300 - &rdev->wb.gpu_addr); 301 - if (r) { 298 + r = radeon_bo_reserve(rdev->wb.wb_obj, false); 299 + if (unlikely(r != 0)) { 300 + radeon_wb_fini(rdev); 301 + return r; 302 + } 303 + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 304 + &rdev->wb.gpu_addr); 305 + if (r) { 306 + radeon_bo_unreserve(rdev->wb.wb_obj); 307 + dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 308 + radeon_wb_fini(rdev); 309 + return r; 310 + } 311 + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 302 312 radeon_bo_unreserve(rdev->wb.wb_obj); 303 - dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 304 - radeon_wb_fini(rdev); 305 - return r; 306 - } 307 - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 308 - radeon_bo_unreserve(rdev->wb.wb_obj); 309 - if (r) { 310 - dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 311 - radeon_wb_fini(rdev); 312 - return r; 313 + if (r) { 314 + dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 315 + radeon_wb_fini(rdev); 316 + return r; 317 + } 313 318 } 314 319 315 320 /* clear wb memory */
+8 -2
drivers/gpu/drm/radeon/radeon_fence.c
··· 63 63 { 64 64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 65 65 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 66 - *drv->cpu_addr = cpu_to_le32(seq); 66 + if (drv->cpu_addr) { 67 + *drv->cpu_addr = cpu_to_le32(seq); 68 + } 67 69 } else { 68 70 WREG32(drv->scratch_reg, seq); 69 71 } ··· 86 84 u32 seq = 0; 87 85 88 86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 89 - seq = le32_to_cpu(*drv->cpu_addr); 87 + if (drv->cpu_addr) { 88 + seq = le32_to_cpu(*drv->cpu_addr); 89 + } else { 90 + seq = lower_32_bits(atomic64_read(&drv->last_seq)); 91 + } 90 92 } else { 91 93 seq = RREG32(drv->scratch_reg); 92 94 }
+4 -2
drivers/gpu/drm/radeon/radeon_gart.c
··· 1197 1197 int radeon_vm_bo_rmv(struct radeon_device *rdev, 1198 1198 struct radeon_bo_va *bo_va) 1199 1199 { 1200 - int r; 1200 + int r = 0; 1201 1201 1202 1202 mutex_lock(&rdev->vm_manager.lock); 1203 1203 mutex_lock(&bo_va->vm->mutex); 1204 - r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1204 + if (bo_va->soffset) { 1205 + r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1206 + } 1205 1207 mutex_unlock(&rdev->vm_manager.lock); 1206 1208 list_del(&bo_va->vm_list); 1207 1209 mutex_unlock(&bo_va->vm->mutex);
+7
drivers/gpu/drm/radeon/radeon_ring.c
··· 402 402 return -ENOMEM; 403 403 /* Align requested size with padding so unlock_commit can 404 404 * pad safely */ 405 + radeon_ring_free_size(rdev, ring); 406 + if (ring->ring_free_dw == (ring->ring_size / 4)) { 407 + /* This is an empty ring update lockup info to avoid 408 + * false positive. 409 + */ 410 + radeon_ring_lockup_update(ring); 411 + } 405 412 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 406 413 while (ndw > (ring->ring_free_dw - 1)) { 407 414 radeon_ring_free_size(rdev, ring);
+31 -17
drivers/gpu/drm/radeon/radeon_uvd.c
··· 159 159 if (!r) { 160 160 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 161 161 radeon_bo_unpin(rdev->uvd.vcpu_bo); 162 + rdev->uvd.cpu_addr = NULL; 163 + if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { 164 + radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); 165 + } 162 166 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 167 + 168 + if (rdev->uvd.cpu_addr) { 169 + radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); 170 + } else { 171 + rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; 172 + } 163 173 } 164 174 return r; 165 175 } ··· 187 177 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); 188 178 return r; 189 179 } 180 + 181 + /* Have been pin in cpu unmap unpin */ 182 + radeon_bo_kunmap(rdev->uvd.vcpu_bo); 183 + radeon_bo_unpin(rdev->uvd.vcpu_bo); 190 184 191 185 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 192 186 &rdev->uvd.gpu_addr); ··· 627 613 } 628 614 629 615 /* stitch together an UVD create msg */ 630 - msg[0] = 0x00000de4; 631 - msg[1] = 0x00000000; 632 - msg[2] = handle; 633 - msg[3] = 0x00000000; 634 - msg[4] = 0x00000000; 635 - msg[5] = 0x00000000; 636 - msg[6] = 0x00000000; 637 - msg[7] = 0x00000780; 638 - msg[8] = 0x00000440; 639 - msg[9] = 0x00000000; 640 - msg[10] = 0x01b37000; 616 + msg[0] = cpu_to_le32(0x00000de4); 617 + msg[1] = cpu_to_le32(0x00000000); 618 + msg[2] = cpu_to_le32(handle); 619 + msg[3] = cpu_to_le32(0x00000000); 620 + msg[4] = cpu_to_le32(0x00000000); 621 + msg[5] = cpu_to_le32(0x00000000); 622 + msg[6] = cpu_to_le32(0x00000000); 623 + msg[7] = cpu_to_le32(0x00000780); 624 + msg[8] = cpu_to_le32(0x00000440); 625 + msg[9] = cpu_to_le32(0x00000000); 626 + msg[10] = cpu_to_le32(0x01b37000); 641 627 for (i = 11; i < 1024; ++i) 642 - msg[i] = 0x0; 628 + msg[i] = cpu_to_le32(0x0); 643 629 644 630 radeon_bo_kunmap(bo); 645 631 radeon_bo_unreserve(bo); ··· 673 659 } 674 660 675 661 /* stitch together an UVD destroy msg */ 676 - msg[0] = 0x00000de4; 677 - msg[1] = 0x00000002; 678 - msg[2] = handle; 679 - msg[3] = 0x00000000; 662 + msg[0] = cpu_to_le32(0x00000de4); 663 + msg[1] = cpu_to_le32(0x00000002); 664 + msg[2] = cpu_to_le32(handle); 665 + msg[3] = cpu_to_le32(0x00000000); 680 666 for (i = 4; i < 1024; ++i) 681 - msg[i] = 0x0; 667 + msg[i] = cpu_to_le32(0x0); 682 668 683 669 radeon_bo_kunmap(bo); 684 670 radeon_bo_unreserve(bo);