Merge tag 'drm-fixes-for-v4.10-rc6' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"drm fixes across the board.

Okay holidays and LCA kinda caught up with me, I thought I'd get some
of this dequeued last week, but Hobart was sunny and warm and not all
gloomy and rainy as usual.

This is a bit large, but not too much considering it's two weeks stuff
from AMD and Intel.

core:
- one locking fix that helps with dynamic suspend/resume races

i915:
- mostly GVT updates, GVT was a recent introduction so fixes for it
shouldn't cause any notable side effects.

amdgpu:
- a bunch of fixes for GPUs with a different memory controller design
that need different firmware.

exynos:
- decon regression fixes

msm:
- two regression fixes

etnaviv:
- a workaround for an mmu bug that needs a lot more work.

virtio:
- sparse fix, and a maintainers update"

* tag 'drm-fixes-for-v4.10-rc6' of git://people.freedesktop.org/~airlied/linux: (56 commits)
drm/exynos/decon5433: set STANDALONE_UPDATE_F on output enablement
drm/exynos/decon5433: fix CMU programming
drm/exynos/decon5433: do not disable video after reset
drm/i915: Ignore bogus plane coordinates on SKL when the plane is not visible
drm/i915: Remove WaDisableLSQCROPERFforOCL KBL workaround.
drm/amdgpu: add support for new hainan variants
drm/radeon: add support for new hainan variants
drm/amdgpu: change clock gating mode for uvd_v4.
drm/amdgpu: fix program vce instance logic error.
drm/amdgpu: fix bug set incorrect value to vce register
Revert "drm/amdgpu: Only update the CUR_SIZE register when necessary"
drm/msm: fix potential null ptr issue in non-iommu case
drm/msm/mdp5: rip out plane->pending tracking
drm/exynos/decon5433: set STANDALONE_UPDATE_F also if planes are disabled
drm/exynos/decon5433: update shadow registers iff there are active windows
drm/i915/gvt: rewrite gt reset handler using new function intel_gvt_reset_vgpu_locked
drm/i915/gvt: fix vGPU instance reuse issues by vGPU reset function
drm/i915/gvt: introduce intel_vgpu_reset_mmio() to reset mmio space
drm/i915/gvt: move mmio init/clean function to mmio.c
drm/i915/gvt: introduce intel_vgpu_reset_cfg_space to reset configuration space
...

+584 -520
+13 -3
MAINTAINERS
··· 4100 4100 4101 4101 DRM DRIVER FOR BOCHS VIRTUAL GPU 4102 4102 M: Gerd Hoffmann <kraxel@redhat.com> 4103 - S: Odd Fixes 4103 + L: virtualization@lists.linux-foundation.org 4104 + T: git git://git.kraxel.org/linux drm-qemu 4105 + S: Maintained 4104 4106 F: drivers/gpu/drm/bochs/ 4105 4107 4106 4108 DRM DRIVER FOR QEMU'S CIRRUS DEVICE 4107 4109 M: Dave Airlie <airlied@redhat.com> 4108 - S: Odd Fixes 4110 + M: Gerd Hoffmann <kraxel@redhat.com> 4111 + L: virtualization@lists.linux-foundation.org 4112 + T: git git://git.kraxel.org/linux drm-qemu 4113 + S: Obsolete 4114 + W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ 4109 4115 F: drivers/gpu/drm/cirrus/ 4110 4116 4111 4117 RADEON and AMDGPU DRM DRIVERS ··· 4304 4298 4305 4299 DRM DRIVER FOR QXL VIRTUAL GPU 4306 4300 M: Dave Airlie <airlied@redhat.com> 4307 - S: Odd Fixes 4301 + M: Gerd Hoffmann <kraxel@redhat.com> 4302 + L: virtualization@lists.linux-foundation.org 4303 + T: git git://git.kraxel.org/linux drm-qemu 4304 + S: Maintained 4308 4305 F: drivers/gpu/drm/qxl/ 4309 4306 F: include/uapi/drm/qxl_drm.h 4310 4307 ··· 13101 13092 M: Gerd Hoffmann <kraxel@redhat.com> 13102 13093 L: dri-devel@lists.freedesktop.org 13103 13094 L: virtualization@lists.linux-foundation.org 13095 + T: git git://git.kraxel.org/linux drm-qemu 13104 13096 S: Maintained 13105 13097 F: drivers/gpu/drm/virtio/ 13106 13098 F: include/uapi/linux/virtio_gpu.h
+7 -15
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 2512 2512 2513 2513 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2514 2514 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2515 + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2516 + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2515 2517 2516 2518 return 0; 2517 2519 } ··· 2539 2537 int32_t hot_y) 2540 2538 { 2541 2539 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2542 - struct amdgpu_device *adev = crtc->dev->dev_private; 2543 2540 struct drm_gem_object *obj; 2544 2541 struct amdgpu_bo *aobj; 2545 2542 int ret; ··· 2579 2578 2580 2579 dce_v10_0_lock_cursor(crtc, true); 2581 2580 2582 - if (hot_x != amdgpu_crtc->cursor_hot_x || 2581 + if (width != amdgpu_crtc->cursor_width || 2582 + height != amdgpu_crtc->cursor_height || 2583 + hot_x != amdgpu_crtc->cursor_hot_x || 2583 2584 hot_y != amdgpu_crtc->cursor_hot_y) { 2584 2585 int x, y; 2585 2586 ··· 2590 2587 2591 2588 dce_v10_0_cursor_move_locked(crtc, x, y); 2592 2589 2593 - amdgpu_crtc->cursor_hot_x = hot_x; 2594 - amdgpu_crtc->cursor_hot_y = hot_y; 2595 - } 2596 - 2597 - if (width != amdgpu_crtc->cursor_width || 2598 - height != amdgpu_crtc->cursor_height) { 2599 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2600 - (width - 1) << 16 | (height - 1)); 2601 2590 amdgpu_crtc->cursor_width = width; 2602 2591 amdgpu_crtc->cursor_height = height; 2592 + amdgpu_crtc->cursor_hot_x = hot_x; 2593 + amdgpu_crtc->cursor_hot_y = hot_y; 2603 2594 } 2604 2595 2605 2596 dce_v10_0_show_cursor(crtc); ··· 2617 2620 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) 2618 2621 { 2619 2622 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2620 - struct amdgpu_device *adev = crtc->dev->dev_private; 2621 2623 2622 2624 if (amdgpu_crtc->cursor_bo) { 2623 2625 dce_v10_0_lock_cursor(crtc, true); 2624 2626 2625 2627 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2626 2628 amdgpu_crtc->cursor_y); 2627 - 2628 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2629 - (amdgpu_crtc->cursor_width - 1) << 16 | 2630 - (amdgpu_crtc->cursor_height - 1)); 2631 2629 2632 2630 dce_v10_0_show_cursor(crtc); 2633 2631
+7 -15
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 2532 2532 2533 2533 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2534 2534 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2535 + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2536 + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2535 2537 2536 2538 return 0; 2537 2539 } ··· 2559 2557 int32_t hot_y) 2560 2558 { 2561 2559 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2562 - struct amdgpu_device *adev = crtc->dev->dev_private; 2563 2560 struct drm_gem_object *obj; 2564 2561 struct amdgpu_bo *aobj; 2565 2562 int ret; ··· 2599 2598 2600 2599 dce_v11_0_lock_cursor(crtc, true); 2601 2600 2602 - if (hot_x != amdgpu_crtc->cursor_hot_x || 2601 + if (width != amdgpu_crtc->cursor_width || 2602 + height != amdgpu_crtc->cursor_height || 2603 + hot_x != amdgpu_crtc->cursor_hot_x || 2603 2604 hot_y != amdgpu_crtc->cursor_hot_y) { 2604 2605 int x, y; 2605 2606 ··· 2610 2607 2611 2608 dce_v11_0_cursor_move_locked(crtc, x, y); 2612 2609 2613 - amdgpu_crtc->cursor_hot_x = hot_x; 2614 - amdgpu_crtc->cursor_hot_y = hot_y; 2615 - } 2616 - 2617 - if (width != amdgpu_crtc->cursor_width || 2618 - height != amdgpu_crtc->cursor_height) { 2619 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2620 - (width - 1) << 16 | (height - 1)); 2621 2610 amdgpu_crtc->cursor_width = width; 2622 2611 amdgpu_crtc->cursor_height = height; 2612 + amdgpu_crtc->cursor_hot_x = hot_x; 2613 + amdgpu_crtc->cursor_hot_y = hot_y; 2623 2614 } 2624 2615 2625 2616 dce_v11_0_show_cursor(crtc); ··· 2637 2640 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) 2638 2641 { 2639 2642 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2640 - struct amdgpu_device *adev = crtc->dev->dev_private; 2641 2643 2642 2644 if (amdgpu_crtc->cursor_bo) { 2643 2645 dce_v11_0_lock_cursor(crtc, true); 2644 2646 2645 2647 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2646 2648 amdgpu_crtc->cursor_y); 2647 - 2648 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2649 - (amdgpu_crtc->cursor_width - 1) << 16 | 2650 - (amdgpu_crtc->cursor_height - 1)); 2651 2649 2652 2650 dce_v11_0_show_cursor(crtc); 2653 2651
+9 -15
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 1859 1859 struct amdgpu_device *adev = crtc->dev->dev_private; 1860 1860 int xorigin = 0, yorigin = 0; 1861 1861 1862 + int w = amdgpu_crtc->cursor_width; 1863 + 1862 1864 amdgpu_crtc->cursor_x = x; 1863 1865 amdgpu_crtc->cursor_y = y; 1864 1866 ··· 1880 1878 1881 1879 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 1882 1880 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 1881 + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 1882 + ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 1883 1883 1884 1884 return 0; 1885 1885 } ··· 1907 1903 int32_t hot_y) 1908 1904 { 1909 1905 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1910 - struct amdgpu_device *adev = crtc->dev->dev_private; 1911 1906 struct drm_gem_object *obj; 1912 1907 struct amdgpu_bo *aobj; 1913 1908 int ret; ··· 1947 1944 1948 1945 dce_v6_0_lock_cursor(crtc, true); 1949 1946 1950 - if (hot_x != amdgpu_crtc->cursor_hot_x || 1947 + if (width != amdgpu_crtc->cursor_width || 1948 + height != amdgpu_crtc->cursor_height || 1949 + hot_x != amdgpu_crtc->cursor_hot_x || 1951 1950 hot_y != amdgpu_crtc->cursor_hot_y) { 1952 1951 int x, y; 1953 1952 ··· 1958 1953 1959 1954 dce_v6_0_cursor_move_locked(crtc, x, y); 1960 1955 1961 - amdgpu_crtc->cursor_hot_x = hot_x; 1962 - amdgpu_crtc->cursor_hot_y = hot_y; 1963 - } 1964 - 1965 - if (width != amdgpu_crtc->cursor_width || 1966 - height != amdgpu_crtc->cursor_height) { 1967 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 1968 - (width - 1) << 16 | (height - 1)); 1969 1956 amdgpu_crtc->cursor_width = width; 1970 1957 amdgpu_crtc->cursor_height = height; 1958 + amdgpu_crtc->cursor_hot_x = hot_x; 1959 + amdgpu_crtc->cursor_hot_y = hot_y; 1971 1960 } 1972 1961 1973 1962 dce_v6_0_show_cursor(crtc); ··· 1985 1986 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) 1986 1987 { 1987 1988 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1988 - struct amdgpu_device *adev = crtc->dev->dev_private; 1989 1989 1990 1990 if (amdgpu_crtc->cursor_bo) { 1991 1991 dce_v6_0_lock_cursor(crtc, true); 1992 1992 1993 1993 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 1994 1994 amdgpu_crtc->cursor_y); 1995 - 1996 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 1997 - (amdgpu_crtc->cursor_width - 1) << 16 | 1998 - (amdgpu_crtc->cursor_height - 1)); 1999 1995 2000 1996 dce_v6_0_show_cursor(crtc); 2001 1997 dce_v6_0_lock_cursor(crtc, false);
+7 -15
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 2363 2363 2364 2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2365 2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2366 + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2367 + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2366 2368 2367 2369 return 0; 2368 2370 } ··· 2390 2388 int32_t hot_y) 2391 2389 { 2392 2390 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2393 - struct amdgpu_device *adev = crtc->dev->dev_private; 2394 2391 struct drm_gem_object *obj; 2395 2392 struct amdgpu_bo *aobj; 2396 2393 int ret; ··· 2430 2429 2431 2430 dce_v8_0_lock_cursor(crtc, true); 2432 2431 2433 - if (hot_x != amdgpu_crtc->cursor_hot_x || 2432 + if (width != amdgpu_crtc->cursor_width || 2433 + height != amdgpu_crtc->cursor_height || 2434 + hot_x != amdgpu_crtc->cursor_hot_x || 2434 2435 hot_y != amdgpu_crtc->cursor_hot_y) { 2435 2436 int x, y; 2436 2437 ··· 2441 2438 2442 2439 dce_v8_0_cursor_move_locked(crtc, x, y); 2443 2440 2444 - amdgpu_crtc->cursor_hot_x = hot_x; 2445 - amdgpu_crtc->cursor_hot_y = hot_y; 2446 - } 2447 - 2448 - if (width != amdgpu_crtc->cursor_width || 2449 - height != amdgpu_crtc->cursor_height) { 2450 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2451 - (width - 1) << 16 | (height - 1)); 2452 2441 amdgpu_crtc->cursor_width = width; 2453 2442 amdgpu_crtc->cursor_height = height; 2443 + amdgpu_crtc->cursor_hot_x = hot_x; 2444 + amdgpu_crtc->cursor_hot_y = hot_y; 2454 2445 } 2455 2446 2456 2447 dce_v8_0_show_cursor(crtc); ··· 2468 2471 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) 2469 2472 { 2470 2473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2471 - struct amdgpu_device *adev = crtc->dev->dev_private; 2472 2474 2473 2475 if (amdgpu_crtc->cursor_bo) { 2474 2476 dce_v8_0_lock_cursor(crtc, true); 2475 2477 2476 2478 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2477 2479 amdgpu_crtc->cursor_y); 2478 - 2479 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2480 - (amdgpu_crtc->cursor_width - 1) << 16 | 2481 - (amdgpu_crtc->cursor_height - 1)); 2482 2480 2483 2481 dce_v8_0_show_cursor(crtc); 2484 2482
+19 -15
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 44 44 MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); 45 45 MODULE_FIRMWARE("radeon/verde_mc.bin"); 46 46 MODULE_FIRMWARE("radeon/oland_mc.bin"); 47 + MODULE_FIRMWARE("radeon/si58_mc.bin"); 47 48 48 49 #define MC_SEQ_MISC0__MT__MASK 0xf0000000 49 50 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 ··· 114 113 const char *chip_name; 115 114 char fw_name[30]; 116 115 int err; 116 + bool is_58_fw = false; 117 117 118 118 DRM_DEBUG("\n"); 119 119 ··· 137 135 default: BUG(); 138 136 } 139 137 140 - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 138 + /* this memory configuration requires special firmware */ 139 + if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) 140 + is_58_fw = true; 141 + 142 + if (is_58_fw) 143 + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); 144 + else 145 + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 141 146 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 142 147 if (err) 143 148 goto out; ··· 472 463 WREG32(mmVM_CONTEXT1_CNTL, 473 464 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | 474 465 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | 475 - ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | 476 - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 477 - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 478 - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 479 - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 480 - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 481 - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 482 - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 483 - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 484 - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 485 - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 486 - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 487 - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK); 466 + ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); 467 + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 468 + gmc_v6_0_set_fault_enable_default(adev, false); 469 + else 470 + gmc_v6_0_set_fault_enable_default(adev, true); 488 471 489 472 gmc_v6_0_gart_flush_gpu_tlb(adev, 0); 490 473 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", ··· 755 754 { 756 755 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 757 756 758 - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 757 + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 758 + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 759 + else 760 + return 0; 759 761 } 760 762 761 763 static int gmc_v6_0_sw_init(void *handle)
+5 -15
drivers/gpu/drm/amd/amdgpu/si_dpm.c
··· 64 64 MODULE_FIRMWARE("radeon/oland_k_smc.bin"); 65 65 MODULE_FIRMWARE("radeon/hainan_smc.bin"); 66 66 MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 67 + MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); 67 68 68 69 union power_info { 69 70 struct _ATOM_POWERPLAY_INFO info; ··· 3488 3487 (adev->pdev->device == 0x6817) || 3489 3488 (adev->pdev->device == 0x6806)) 3490 3489 max_mclk = 120000; 3491 - } else if (adev->asic_type == CHIP_OLAND) { 3492 - if ((adev->pdev->revision == 0xC7) || 3493 - (adev->pdev->revision == 0x80) || 3494 - (adev->pdev->revision == 0x81) || 3495 - (adev->pdev->revision == 0x83) || 3496 - (adev->pdev->revision == 0x87) || 3497 - (adev->pdev->device == 0x6604) || 3498 - (adev->pdev->device == 0x6605)) { 3499 - max_sclk = 75000; 3500 - max_mclk = 80000; 3501 - } 3502 3490 } else if (adev->asic_type == CHIP_HAINAN) { 3503 3491 if ((adev->pdev->revision == 0x81) || 3504 3492 (adev->pdev->revision == 0x83) || ··· 3496 3506 (adev->pdev->device == 0x6665) || 3497 3507 (adev->pdev->device == 0x6667)) { 3498 3508 max_sclk = 75000; 3499 - max_mclk = 80000; 3500 3509 } 3501 3510 } 3502 3511 /* Apply dpm quirks */ ··· 7702 7713 ((adev->pdev->device == 0x6660) || 7703 7714 (adev->pdev->device == 0x6663) || 7704 7715 (adev->pdev->device == 0x6665) || 7705 - (adev->pdev->device == 0x6667))) || 7706 - ((adev->pdev->revision == 0xc3) && 7707 - (adev->pdev->device == 0x6665))) 7716 + (adev->pdev->device == 0x6667)))) 7708 7717 chip_name = "hainan_k"; 7718 + else if ((adev->pdev->revision == 0xc3) && 7719 + (adev->pdev->device == 0x6665)) 7720 + chip_name = "banks_k_2"; 7709 7721 else 7710 7722 chip_name = "hainan"; 7711 7723 break;
+10 -32
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
··· 40 40 #include "smu/smu_7_0_1_sh_mask.h" 41 41 42 42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 43 - static void uvd_v4_2_init_cg(struct amdgpu_device *adev); 44 43 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 45 44 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 46 45 static int uvd_v4_2_start(struct amdgpu_device *adev); 47 46 static void uvd_v4_2_stop(struct amdgpu_device *adev); 48 47 static int uvd_v4_2_set_clockgating_state(void *handle, 49 48 enum amd_clockgating_state state); 49 + static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 50 + bool sw_mode); 50 51 /** 51 52 * uvd_v4_2_ring_get_rptr - get read pointer 52 53 * ··· 141 140 142 141 return r; 143 142 } 144 - 143 + static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 144 + bool enable); 145 145 /** 146 146 * uvd_v4_2_hw_init - start and test UVD block 147 147 * ··· 157 155 uint32_t tmp; 158 156 int r; 159 157 160 - uvd_v4_2_init_cg(adev); 161 - uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE); 158 + uvd_v4_2_enable_mgcg(adev, true); 162 159 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 163 160 r = uvd_v4_2_start(adev); 164 161 if (r) ··· 267 266 struct amdgpu_ring *ring = &adev->uvd.ring; 268 267 uint32_t rb_bufsz; 269 268 int i, j, r; 270 - 271 269 /* disable byte swapping */ 272 270 u32 lmi_swap_cntl = 0; 273 271 u32 mp_swap_cntl = 0; 272 + 273 + WREG32(mmUVD_CGC_GATE, 0); 274 + uvd_v4_2_set_dcm(adev, true); 274 275 275 276 uvd_v4_2_mc_resume(adev); 276 277 ··· 409 406 410 407 /* Unstall UMC and register bus */ 411 408 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 409 + 410 + uvd_v4_2_set_dcm(adev, false); 412 411 } 413 412 414 413 /** ··· 624 619 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 625 620 } 626 621 627 - static void uvd_v4_2_init_cg(struct amdgpu_device *adev) 628 - { 629 - bool hw_mode = true; 630 - 631 - if (hw_mode) { 632 - uvd_v4_2_set_dcm(adev, false); 633 - } else { 634 - u32 tmp = RREG32(mmUVD_CGC_CTRL); 635 - tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 636 - WREG32(mmUVD_CGC_CTRL, tmp); 637 - } 638 - } 639 - 640 622 static bool uvd_v4_2_is_idle(void *handle) 641 623 { 642 624 struct amdgpu_device *adev = (struct amdgpu_device *)handle; ··· 677 685 static int uvd_v4_2_set_clockgating_state(void *handle, 678 686 enum amd_clockgating_state state) 679 687 { 680 - bool gate = false; 681 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 682 - 683 - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 684 - return 0; 685 - 686 - if (state == AMD_CG_STATE_GATE) 687 - gate = true; 688 - 689 - uvd_v4_2_enable_mgcg(adev, gate); 690 - 691 688 return 0; 692 689 } 693 690 ··· 691 710 * the smc and the hw blocks 692 711 */ 693 712 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 694 - 695 - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) 696 - return 0; 697 713 698 714 if (state == AMD_PG_STATE_GATE) { 699 715 uvd_v4_2_stop(adev);
+17 -10
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 43 43 44 44 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 45 45 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 46 + #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 47 + 46 48 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 47 49 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 48 50 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 51 + #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 52 + 49 53 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 50 54 51 55 #define VCE_V3_0_FW_SIZE (384 * 1024) ··· 57 53 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 58 54 59 55 #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 56 + 57 + #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ 58 + | GRBM_GFX_INDEX__VCE_ALL_PIPE) 60 59 61 60 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 62 61 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); ··· 182 175 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 183 176 184 177 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 185 - data &= ~0xffc00000; 178 + data &= ~0x3ff; 186 179 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 187 180 188 181 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); ··· 256 249 if (adev->vce.harvest_config & (1 << idx)) 257 250 continue; 258 251 259 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 252 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 260 253 vce_v3_0_mc_resume(adev, idx); 261 254 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 262 255 ··· 280 273 } 281 274 } 282 275 283 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 276 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 284 277 mutex_unlock(&adev->grbm_idx_mutex); 285 278 286 279 return 0; ··· 295 288 if (adev->vce.harvest_config & (1 << idx)) 296 289 continue; 297 290 298 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 291 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 299 292 300 293 if (adev->asic_type >= CHIP_STONEY) 301 294 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); ··· 313 306 vce_v3_0_set_vce_sw_clock_gating(adev, false); 314 307 } 315 308 316 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 309 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 317 310 mutex_unlock(&adev->grbm_idx_mutex); 318 311 319 312 return 0; ··· 593 586 * VCE team suggest use bit 3--bit 6 for busy status check 594 587 */ 595 588 mutex_lock(&adev->grbm_idx_mutex); 596 - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 589 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 597 590 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 598 591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 599 592 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 600 593 } 601 - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); 594 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 602 595 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 603 596 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 604 597 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 605 598 } 606 - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 599 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 607 600 mutex_unlock(&adev->grbm_idx_mutex); 608 601 609 602 if (srbm_soft_reset) { ··· 741 734 if (adev->vce.harvest_config & (1 << i)) 742 735 continue; 743 736 744 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); 737 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); 745 738 746 739 if (enable) { 747 740 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ ··· 760 753 vce_v3_0_set_vce_sw_clock_gating(adev, enable); 761 754 } 762 755 763 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 756 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 764 757 mutex_unlock(&adev->grbm_idx_mutex); 765 758 766 759 return 0;
+2 -2
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
··· 200 200 cgs_set_clockgating_state( 201 201 hwmgr->device, 202 202 AMD_IP_BLOCK_TYPE_VCE, 203 - AMD_CG_STATE_UNGATE); 203 + AMD_CG_STATE_GATE); 204 204 cgs_set_powergating_state( 205 205 hwmgr->device, 206 206 AMD_IP_BLOCK_TYPE_VCE, ··· 218 218 cgs_set_clockgating_state( 219 219 hwmgr->device, 220 220 AMD_IP_BLOCK_TYPE_VCE, 221 - AMD_PG_STATE_GATE); 221 + AMD_PG_STATE_UNGATE); 222 222 cz_dpm_update_vce_dpm(hwmgr); 223 223 cz_enable_disable_vce_dpm(hwmgr, true); 224 224 return 0;
+16 -8
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
··· 1402 1402 cz_hwmgr->vce_dpm.hard_min_clk, 1403 1403 PPSMC_MSG_SetEclkHardMin)); 1404 1404 } else { 1405 - /*EPR# 419220 -HW limitation to to */ 1406 - cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; 1407 - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1408 - PPSMC_MSG_SetEclkHardMin, 1409 - cz_get_eclk_level(hwmgr, 1410 - cz_hwmgr->vce_dpm.hard_min_clk, 1411 - PPSMC_MSG_SetEclkHardMin)); 1412 - 1405 + /*Program HardMin based on the vce_arbiter.ecclk */ 1406 + if (hwmgr->vce_arbiter.ecclk == 0) { 1407 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1408 + PPSMC_MSG_SetEclkHardMin, 0); 1409 + /* disable ECLK DPM 0. Otherwise VCE could hang if 1410 + * switching SCLK from DPM 0 to 6/7 */ 1411 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1412 + PPSMC_MSG_SetEclkSoftMin, 1); 1413 + } else { 1414 + cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; 1415 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1416 + PPSMC_MSG_SetEclkHardMin, 1417 + cz_get_eclk_level(hwmgr, 1418 + cz_hwmgr->vce_dpm.hard_min_clk, 1419 + PPSMC_MSG_SetEclkHardMin)); 1420 + } 1413 1421 } 1414 1422 return 0; 1415 1423 }
+7
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
··· 1382 1382 1383 1383 pm_runtime_enable(dev); 1384 1384 1385 + pm_runtime_get_sync(dev); 1385 1386 phy_power_on(dp->phy); 1386 1387 1387 1388 analogix_dp_init_dp(dp); ··· 1415 1414 goto err_disable_pm_runtime; 1416 1415 } 1417 1416 1417 + phy_power_off(dp->phy); 1418 + pm_runtime_put(dev); 1419 + 1418 1420 return 0; 1419 1421 1420 1422 err_disable_pm_runtime: 1423 + 1424 + phy_power_off(dp->phy); 1425 + pm_runtime_put(dev); 1421 1426 pm_runtime_disable(dev); 1422 1427 1423 1428 return ret;
+9
drivers/gpu/drm/cirrus/Kconfig
··· 7 7 This is a KMS driver for emulated cirrus device in qemu. 8 8 It is *NOT* intended for real cirrus devices. This requires 9 9 the modesetting userspace X.org driver. 10 + 11 + Cirrus is obsolete, the hardware was designed in the 90ies 12 + and can't keep up with todays needs. More background: 13 + https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ 14 + 15 + Better alternatives are: 16 + - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+) 17 + - qxl (DRM_QXL, qemu -vga qxl, works best with spice) 18 + - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
+7
drivers/gpu/drm/drm_modes.c
··· 1460 1460 return NULL; 1461 1461 1462 1462 mode->type |= DRM_MODE_TYPE_USERDEF; 1463 + /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */ 1464 + if (cmd->xres == 1366 && mode->hdisplay == 1368) { 1465 + mode->hdisplay = 1366; 1466 + mode->hsync_start--; 1467 + mode->hsync_end--; 1468 + drm_mode_set_name(mode); 1469 + } 1463 1470 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1464 1471 return mode; 1465 1472 }
+31 -32
drivers/gpu/drm/drm_probe_helper.c
··· 115 115 116 116 #define DRM_OUTPUT_POLL_PERIOD (10*HZ) 117 117 /** 118 - * drm_kms_helper_poll_enable_locked - re-enable output polling. 118 + * drm_kms_helper_poll_enable - re-enable output polling. 119 119 * @dev: drm_device 120 120 * 121 - * This function re-enables the output polling work without 122 - * locking the mode_config mutex. 121 + * This function re-enables the output polling work, after it has been 122 + * temporarily disabled using drm_kms_helper_poll_disable(), for example over 123 + * suspend/resume. 123 124 * 124 - * This is like drm_kms_helper_poll_enable() however it is to be 125 - * called from a context where the mode_config mutex is locked 126 - * already. 125 + * Drivers can call this helper from their device resume implementation. It is 126 + * an error to call this when the output polling support has not yet been set 127 + * up. 128 + * 129 + * Note that calls to enable and disable polling must be strictly ordered, which 130 + * is automatically the case when they're only call from suspend/resume 131 + * callbacks. 127 132 */ 128 - void drm_kms_helper_poll_enable_locked(struct drm_device *dev) 133 + void drm_kms_helper_poll_enable(struct drm_device *dev) 129 134 { 130 135 bool poll = false; 131 136 struct drm_connector *connector; 132 137 unsigned long delay = DRM_OUTPUT_POLL_PERIOD; 133 - 134 - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 135 138 136 139 if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) 137 140 return; ··· 146 143 } 147 144 148 145 if (dev->mode_config.delayed_event) { 146 + /* 147 + * FIXME: 148 + * 149 + * Use short (1s) delay to handle the initial delayed event. 150 + * This delay should not be needed, but Optimus/nouveau will 151 + * fail in a mysterious way if the delayed event is handled as 152 + * soon as possible like it is done in 153 + * drm_helper_probe_single_connector_modes() in case the poll 154 + * was enabled before. 155 + */ 149 156 poll = true; 150 - delay = 0; 157 + delay = HZ; 151 158 } 152 159 153 160 if (poll) 154 161 schedule_delayed_work(&dev->mode_config.output_poll_work, delay); 155 162 } 156 - EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); 163 + EXPORT_SYMBOL(drm_kms_helper_poll_enable); 157 164 158 165 static enum drm_connector_status 159 166 drm_connector_detect(struct drm_connector *connector, bool force) ··· 290 277 291 278 /* Re-enable polling in case the global poll config changed. */ 292 279 if (drm_kms_helper_poll != dev->mode_config.poll_running) 293 - drm_kms_helper_poll_enable_locked(dev); 280 + drm_kms_helper_poll_enable(dev); 294 281 295 282 dev->mode_config.poll_running = drm_kms_helper_poll; 296 283 ··· 482 469 * This function disables the output polling work. 483 470 * 484 471 * Drivers can call this helper from their device suspend implementation. It is 485 - * not an error to call this even when output polling isn't enabled or arlready 486 - * disabled. 472 + * not an error to call this even when output polling isn't enabled or already 473 + * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable(). 474 + * 475 + * Note that calls to enable and disable polling must be strictly ordered, which 476 + * is automatically the case when they're only call from suspend/resume 477 + * callbacks. 487 478 */ 488 479 void drm_kms_helper_poll_disable(struct drm_device *dev) 489 480 { ··· 496 479 cancel_delayed_work_sync(&dev->mode_config.output_poll_work); 497 480 } 498 481 EXPORT_SYMBOL(drm_kms_helper_poll_disable); 499 - 500 - /** 501 - * drm_kms_helper_poll_enable - re-enable output polling. 502 - * @dev: drm_device 503 - * 504 - * This function re-enables the output polling work. 505 - * 506 - * Drivers can call this helper from their device resume implementation. It is 507 - * an error to call this when the output polling support has not yet been set 508 - * up. 509 - */ 510 - void drm_kms_helper_poll_enable(struct drm_device *dev) 511 - { 512 - mutex_lock(&dev->mode_config.mutex); 513 - drm_kms_helper_poll_enable_locked(dev); 514 - mutex_unlock(&dev->mode_config.mutex); 515 - } 516 - EXPORT_SYMBOL(drm_kms_helper_poll_enable); 517 482 518 483 /** 519 484 * drm_kms_helper_poll_init - initialize and enable output polling
+6 -1
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
··· 116 116 struct list_head list; 117 117 bool found; 118 118 119 + /* 120 + * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick 121 + * drm_mm into giving out a low IOVA after address space 122 + * rollover. This needs a proper fix. 123 + */ 119 124 ret = drm_mm_insert_node_in_range(&mmu->mm, node, 120 125 size, 0, mmu->last_iova, ~0UL, 121 - DRM_MM_SEARCH_DEFAULT); 126 + mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); 122 127 123 128 if (ret != -ENOSPC) 124 129 break;
+6 -9
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
··· 46 46 BIT_CLKS_ENABLED, 47 47 BIT_IRQS_ENABLED, 48 48 BIT_WIN_UPDATED, 49 - BIT_SUSPENDED 49 + BIT_SUSPENDED, 50 + BIT_REQUEST_UPDATE 50 51 }; 51 52 52 53 struct decon_context { ··· 141 140 m->crtc_vsync_start = m->crtc_vdisplay + 1; 142 141 m->crtc_vsync_end = m->crtc_vsync_start + 1; 143 142 } 144 - 145 - decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0); 146 - 147 - /* enable clock gate */ 148 - val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F; 149 - writel(val, ctx->addr + DECON_CMU); 150 143 151 144 if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) 152 145 decon_setup_trigger(ctx); ··· 310 315 311 316 /* window enable */ 312 317 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); 318 + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); 313 319 } 314 320 315 321 static void decon_disable_plane(struct exynos_drm_crtc *crtc, ··· 323 327 return; 324 328 325 329 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); 330 + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); 326 331 } 327 332 328 333 static void decon_atomic_flush(struct exynos_drm_crtc *crtc) ··· 337 340 for (i = ctx->first_win; i < WINDOWS_NR; i++) 338 341 decon_shadow_protect_win(ctx, i, false); 339 342 340 - /* standalone update */ 341 - decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); 343 + if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags)) 344 + decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); 342 345 343 346 if (ctx->out_type & IFTYPE_I80) 344 347 set_bit(BIT_WIN_UPDATED, &ctx->flags);
+27 -9
drivers/gpu/drm/i915/gvt/aperture_gm.c
··· 37 37 #include "i915_drv.h" 38 38 #include "gvt.h" 39 39 40 - #define MB_TO_BYTES(mb) ((mb) << 20ULL) 41 - #define BYTES_TO_MB(b) ((b) >> 20ULL) 42 - 43 - #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) 44 - #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) 45 - #define HOST_FENCE 4 46 - 47 40 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 48 41 { 49 42 struct intel_gvt *gvt = vgpu->gvt; ··· 158 165 POSTING_READ(fence_reg_lo); 159 166 } 160 167 168 + static void _clear_vgpu_fence(struct intel_vgpu *vgpu) 169 + { 170 + int i; 171 + 172 + for (i = 0; i < vgpu_fence_sz(vgpu); i++) 173 + intel_vgpu_write_fence(vgpu, i, 0); 174 + } 175 + 161 176 static void free_vgpu_fence(struct intel_vgpu *vgpu) 162 177 { 163 178 struct intel_gvt *gvt = vgpu->gvt; ··· 179 178 intel_runtime_pm_get(dev_priv); 180 179 181 180 mutex_lock(&dev_priv->drm.struct_mutex); 181 + _clear_vgpu_fence(vgpu); 182 182 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 183 183 reg = vgpu->fence.regs[i]; 184 - intel_vgpu_write_fence(vgpu, i, 0); 185 184 list_add_tail(&reg->link, 186 185 &dev_priv->mm.fence_list); 187 186 } ··· 209 208 continue; 210 209 list_del(pos); 211 210 vgpu->fence.regs[i] = reg; 212 - intel_vgpu_write_fence(vgpu, i, 0); 213 211 if (++i == vgpu_fence_sz(vgpu)) 214 212 break; 215 213 } 216 214 if (i != vgpu_fence_sz(vgpu)) 217 215 goto out_free_fence; 216 + 217 + _clear_vgpu_fence(vgpu); 218 218 219 219 mutex_unlock(&dev_priv->drm.struct_mutex); 220 220 intel_runtime_pm_put(dev_priv); ··· 313 311 free_vgpu_gm(vgpu); 314 312 free_vgpu_fence(vgpu); 315 313 free_resource(vgpu); 314 + } 315 + 316 + /** 317 + * intel_vgpu_reset_resource - reset resource state owned by a vGPU 318 + * @vgpu: a vGPU 319 + * 320 + * This function is used to reset resource state owned by a vGPU. 321 + * 322 + */ 323 + void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) 324 + { 325 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 326 + 327 + intel_runtime_pm_get(dev_priv); 328 + _clear_vgpu_fence(vgpu); 329 + intel_runtime_pm_put(dev_priv); 316 330 } 317 331 318 332 /**
+74
drivers/gpu/drm/i915/gvt/cfg_space.c
··· 282 282 } 283 283 return 0; 284 284 } 285 + 286 + /** 287 + * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU 288 + * 289 + * @vgpu: a vGPU 290 + * @primary: is the vGPU presented as primary 291 + * 292 + */ 293 + void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 294 + bool primary) 295 + { 296 + struct intel_gvt *gvt = vgpu->gvt; 297 + const struct intel_gvt_device_info *info = &gvt->device_info; 298 + u16 *gmch_ctl; 299 + int i; 300 + 301 + memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 302 + info->cfg_space_size); 303 + 304 + if (!primary) { 305 + vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 306 + INTEL_GVT_PCI_CLASS_VGA_OTHER; 307 + vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 308 + INTEL_GVT_PCI_CLASS_VGA_OTHER; 309 + } 310 + 311 + /* Show guest that there isn't any stolen memory.*/ 312 + gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 313 + *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 314 + 315 + intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 316 + gvt_aperture_pa_base(gvt), true); 317 + 318 + vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 319 + | PCI_COMMAND_MEMORY 320 + | PCI_COMMAND_MASTER); 321 + /* 322 + * Clear the bar upper 32bit and let guest to assign the new value 323 + */ 324 + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 325 + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 326 + memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 327 + 328 + for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 329 + vgpu->cfg_space.bar[i].size = pci_resource_len( 330 + gvt->dev_priv->drm.pdev, i * 2); 331 + vgpu->cfg_space.bar[i].tracked = false; 332 + } 333 + } 334 + 335 + /** 336 + * intel_vgpu_reset_cfg_space - reset vGPU configuration space 337 + * 338 + * @vgpu: a vGPU 339 + * 340 + */ 341 + void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) 342 + { 343 + u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; 344 + bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != 345 + INTEL_GVT_PCI_CLASS_VGA_OTHER; 346 + 347 + if (cmd & PCI_COMMAND_MEMORY) { 348 + trap_gttmmio(vgpu, false); 349 + map_aperture(vgpu, false); 350 + } 351 + 352 + /** 353 + * Currently we only do such reset when vGPU is not 354 + * owned by any VM, so we simply restore entire cfg 355 + * space to default value. 356 + */ 357 + intel_vgpu_init_cfg_space(vgpu, primary); 358 + }
+44 -37
drivers/gpu/drm/i915/gvt/gtt.c
··· 240 240 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 241 241 { 242 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 243 - u64 pte; 244 243 245 - #ifdef readq 246 - pte = readq(addr); 247 - #else 248 - pte = ioread32(addr); 249 - pte |= (u64)ioread32(addr + 4) << 32; 250 - #endif 251 - return pte; 244 + return readq(addr); 252 245 } 253 246 254 247 static void write_pte64(struct drm_i915_private *dev_priv, ··· 249 256 { 250 257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 251 258 252 - #ifdef writeq 253 259 writeq(pte, addr); 254 - #else 255 - iowrite32((u32)pte, addr); 256 - iowrite32(pte >> 32, addr + 4); 257 - #endif 260 + 258 261 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 259 262 POSTING_READ(GFX_FLSH_CNTL_GEN6); 260 263 } ··· 1369 1380 info->gtt_entry_size; 1370 1381 mem = kzalloc(mm->has_shadow_page_table ? 1371 1382 mm->page_table_entry_size * 2 1372 - : mm->page_table_entry_size, 1373 - GFP_ATOMIC); 1383 + : mm->page_table_entry_size, GFP_KERNEL); 1374 1384 if (!mem) 1375 1385 return -ENOMEM; 1376 1386 mm->virtual_page_table = mem; ··· 1520 1532 struct intel_vgpu_mm *mm; 1521 1533 int ret; 1522 1534 1523 - mm = kzalloc(sizeof(*mm), GFP_ATOMIC); 1535 + mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1524 1536 if (!mm) { 1525 1537 ret = -ENOMEM; 1526 1538 goto fail; ··· 1874 1886 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1875 1887 int page_entry_num = GTT_PAGE_SIZE >> 1876 1888 vgpu->gvt->device_info.gtt_entry_size_shift; 1877 - struct page *scratch_pt; 1889 + void *scratch_pt; 1878 1890 unsigned long mfn; 1879 1891 int i; 1880 - void *p; 1881 1892 1882 1893 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1883 1894 return -EINVAL; 1884 1895 1885 - scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 1896 + scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1886 1897 if (!scratch_pt) { 1887 1898 gvt_err("fail to allocate scratch page\n"); 1888 1899 return -ENOMEM; 1889 1900 } 1890 1901 1891 - p = kmap_atomic(scratch_pt); 1892 - mfn = intel_gvt_hypervisor_virt_to_mfn(p); 1902 + mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); 1893 1903 if (mfn == INTEL_GVT_INVALID_ADDR) { 1894 - gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); 1895 - kunmap_atomic(p); 1896 - __free_page(scratch_pt); 1904 + gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); 1905 + free_page((unsigned long)scratch_pt); 1897 1906 return -EFAULT; 1898 1907 } 1899 1908 gtt->scratch_pt[type].page_mfn = mfn; 1900 - gtt->scratch_pt[type].page = scratch_pt; 1909 + gtt->scratch_pt[type].page = virt_to_page(scratch_pt); 1901 1910 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1902 1911 vgpu->id, type, mfn); 1903 1912 ··· 1903 1918 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1904 1919 * 'type' pt. 1905 1920 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1906 - * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self 1921 + * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self 1907 1922 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1908 1923 */ 1909 1924 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { ··· 1921 1936 se.val64 |= PPAT_CACHED_INDEX; 1922 1937 1923 1938 for (i = 0; i < page_entry_num; i++) 1924 - ops->set_entry(p, &se, i, false, 0, vgpu); 1939 + ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); 1925 1940 } 1926 - 1927 - kunmap_atomic(p); 1928 1941 1929 1942 return 0; 1930 1943 } ··· 2191 2208 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2192 2209 { 2193 2210 int ret; 2194 - void *page_addr; 2211 + void *page; 2195 2212 2196 2213 gvt_dbg_core("init gtt\n"); 2197 2214 ··· 2204 2221 return -ENODEV; 2205 2222 } 2206 2223 2207 - gvt->gtt.scratch_ggtt_page = 2208 - alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 2209 - if (!gvt->gtt.scratch_ggtt_page) { 2224 + page = (void *)get_zeroed_page(GFP_KERNEL); 2225 + if (!page) { 2210 2226 gvt_err("fail to allocate scratch ggtt page\n"); 2211 2227 return -ENOMEM; 2212 2228 } 2229 + gvt->gtt.scratch_ggtt_page = virt_to_page(page); 2213 2230 2214 - page_addr = page_address(gvt->gtt.scratch_ggtt_page); 2215 - 2216 - gvt->gtt.scratch_ggtt_mfn = 2217 - intel_gvt_hypervisor_virt_to_mfn(page_addr); 2231 + gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); 2218 2232 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { 2219 2233 gvt_err("fail to translate scratch ggtt page\n"); 2220 2234 __free_page(gvt->gtt.scratch_ggtt_page); ··· 2276 2296 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2277 2297 for (offset = 0; offset < num_entries; offset++) 2278 2298 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2299 + } 2300 + 2301 + /** 2302 + * intel_vgpu_reset_gtt - reset the all GTT related status 2303 + * @vgpu: a vGPU 2304 + * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset 2305 + * 2306 + * This function is called from vfio core to reset reset all 2307 + * GTT related status, including GGTT, PPGTT, scratch page. 2308 + * 2309 + */ 2310 + void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) 2311 + { 2312 + int i; 2313 + 2314 + ppgtt_free_all_shadow_page(vgpu); 2315 + if (!dmlr) 2316 + return; 2317 + 2318 + intel_vgpu_reset_ggtt(vgpu); 2319 + 2320 + /* clear scratch page for security */ 2321 + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2322 + if (vgpu->gtt.scratch_pt[i].page != NULL) 2323 + memset(page_address(vgpu->gtt.scratch_pt[i].page), 2324 + 0, PAGE_SIZE); 2325 + } 2279 2326 }
+1
drivers/gpu/drm/i915/gvt/gtt.h
··· 208 208 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 209 209 210 210 extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 211 + extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); 211 212 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 212 213 213 214 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+7 -1
drivers/gpu/drm/i915/gvt/gvt.c
··· 201 201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); 202 202 intel_gvt_clean_vgpu_types(gvt); 203 203 204 + idr_destroy(&gvt->vgpu_idr); 205 + 204 206 kfree(dev_priv->gvt); 205 207 dev_priv->gvt = NULL; 206 208 } ··· 239 237 240 238 gvt_dbg_core("init gvt device\n"); 241 239 240 + idr_init(&gvt->vgpu_idr); 241 + 242 242 mutex_init(&gvt->lock); 243 243 gvt->dev_priv = dev_priv; 244 244 ··· 248 244 249 245 ret = intel_gvt_setup_mmio_info(gvt); 250 246 if (ret) 251 - return ret; 247 + goto out_clean_idr; 252 248 253 249 ret = intel_gvt_load_firmware(gvt); 254 250 if (ret) ··· 317 313 intel_gvt_free_firmware(gvt); 318 314 out_clean_mmio_info: 319 315 intel_gvt_clean_mmio_info(gvt); 316 + out_clean_idr: 317 + idr_destroy(&gvt->vgpu_idr); 320 318 kfree(gvt); 321 319 return ret; 322 320 }
+7 -1
drivers/gpu/drm/i915/gvt/gvt.h
··· 323 323 324 324 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 325 325 struct intel_vgpu_creation_params *param); 326 + void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); 326 327 void intel_vgpu_free_resource(struct intel_vgpu *vgpu); 327 328 void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 328 329 u32 fence, u64 value); ··· 376 375 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 377 376 struct intel_vgpu_type *type); 378 377 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 378 + void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 379 + unsigned int engine_mask); 379 380 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 380 381 381 382 ··· 414 411 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 415 412 unsigned long *g_index); 416 413 414 + void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 415 + bool primary); 416 + void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); 417 + 417 418 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 418 419 void *p_data, unsigned int bytes); 419 420 ··· 431 424 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); 432 425 433 426 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); 434 - int setup_vgpu_mmio(struct intel_vgpu *vgpu); 435 427 void populate_pvinfo_page(struct intel_vgpu *vgpu); 436 428 437 429 struct intel_gvt_ops {
+35 -68
drivers/gpu/drm/i915/gvt/handlers.c
··· 93 93 static int new_mmio_info(struct intel_gvt *gvt, 94 94 u32 offset, u32 flags, u32 size, 95 95 u32 addr_mask, u32 ro_mask, u32 device, 96 - void *read, void *write) 96 + int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), 97 + int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int)) 97 98 { 98 99 struct intel_gvt_mmio_info *info, *p; 99 100 u32 start, end, i; ··· 220 219 default: 221 220 /*should not hit here*/ 222 221 gvt_err("invalid forcewake offset 0x%x\n", offset); 223 - return 1; 222 + return -EINVAL; 224 223 } 225 224 } else { 226 225 ack_reg_offset = FORCEWAKE_ACK_HSW_REG; ··· 231 230 return 0; 232 231 } 233 232 234 - static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, 235 - void *p_data, unsigned int bytes, unsigned long bitmap) 236 - { 237 - struct intel_gvt_workload_scheduler *scheduler = 238 - &vgpu->gvt->scheduler; 239 - 240 - vgpu->resetting = true; 241 - 242 - intel_vgpu_stop_schedule(vgpu); 243 - /* 244 - * The current_vgpu will set to NULL after stopping the 245 - * scheduler when the reset is triggered by current vgpu. 246 - */ 247 - if (scheduler->current_vgpu == NULL) { 248 - mutex_unlock(&vgpu->gvt->lock); 249 - intel_gvt_wait_vgpu_idle(vgpu); 250 - mutex_lock(&vgpu->gvt->lock); 251 - } 252 - 253 - intel_vgpu_reset_execlist(vgpu, bitmap); 254 - 255 - /* full GPU reset */ 256 - if (bitmap == 0xff) { 257 - mutex_unlock(&vgpu->gvt->lock); 258 - intel_vgpu_clean_gtt(vgpu); 259 - mutex_lock(&vgpu->gvt->lock); 260 - setup_vgpu_mmio(vgpu); 261 - populate_pvinfo_page(vgpu); 262 - intel_vgpu_init_gtt(vgpu); 263 - } 264 - 265 - vgpu->resetting = false; 266 - 267 - return 0; 268 - } 269 - 270 233 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 271 - void *p_data, unsigned int bytes) 234 + void *p_data, unsigned int bytes) 272 235 { 236 + unsigned int engine_mask = 0; 273 237 u32 data; 274 - u64 bitmap = 0; 275 238 276 239 write_vreg(vgpu, offset, p_data, bytes); 277 240 data = vgpu_vreg(vgpu, offset); 278 241 279 242 if (data & GEN6_GRDOM_FULL) { 280 243 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); 281 - bitmap = 0xff; 244 + engine_mask = ALL_ENGINES; 245 + } else { 246 + if (data & GEN6_GRDOM_RENDER) { 247 + gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); 248 + engine_mask |= (1 << RCS); 249 + } 250 + if (data & GEN6_GRDOM_MEDIA) { 251 + gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); 252 + engine_mask |= (1 << VCS); 253 + } 254 + if (data & GEN6_GRDOM_BLT) { 255 + gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); 256 + engine_mask |= (1 << BCS); 257 + } 258 + if (data & GEN6_GRDOM_VECS) { 259 + gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); 260 + engine_mask |= (1 << VECS); 261 + } 262 + if (data & GEN8_GRDOM_MEDIA2) { 263 + gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); 264 + if (HAS_BSD2(vgpu->gvt->dev_priv)) 265 + engine_mask |= (1 << VCS2); 266 + } 282 267 } 283 - if (data & GEN6_GRDOM_RENDER) { 284 - gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); 285 - bitmap |= (1 << RCS); 286 - } 287 - if (data & GEN6_GRDOM_MEDIA) { 288 - gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); 289 - bitmap |= (1 << VCS); 290 - } 291 - if (data & GEN6_GRDOM_BLT) { 292 - gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); 293 - bitmap |= (1 << BCS); 294 - } 295 - if (data & GEN6_GRDOM_VECS) { 296 - gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); 297 - bitmap |= (1 << VECS); 298 - } 299 - if (data & GEN8_GRDOM_MEDIA2) { 300 - gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); 301 - if (HAS_BSD2(vgpu->gvt->dev_priv)) 302 - bitmap |= (1 << VCS2); 303 - } 304 - return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); 268 + 269 + intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); 270 + 271 + return 0; 305 272 } 306 273 307 274 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ··· 943 974 return 0; 944 975 } 945 976 946 - static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 977 + static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 947 978 void *p_data, unsigned int bytes) 948 979 { 949 980 u32 data; ··· 1335 1366 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, 1336 1367 unsigned int offset, void *p_data, unsigned int bytes) 1337 1368 { 1338 - int rc = 0; 1339 1369 unsigned int id = 0; 1340 1370 1341 1371 write_vreg(vgpu, offset, p_data, bytes); ··· 1357 1389 id = VECS; 1358 1390 break; 1359 1391 default: 1360 - rc = -EINVAL; 1361 - break; 1392 + return -EINVAL; 1362 1393 } 1363 1394 set_bit(id, (void *)vgpu->tlb_handle_pending); 1364 1395 1365 - return rc; 1396 + return 0; 1366 1397 } 1367 1398 1368 1399 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
+10 -4
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 398 398 struct intel_vgpu_type *type; 399 399 struct device *pdev; 400 400 void *gvt; 401 + int ret; 401 402 402 403 pdev = mdev_parent_dev(mdev); 403 404 gvt = kdev_to_i915(pdev)->gvt; ··· 407 406 if (!type) { 408 407 gvt_err("failed to find type %s to create\n", 409 408 kobject_name(kobj)); 410 - return -EINVAL; 409 + ret = -EINVAL; 410 + goto out; 411 411 } 412 412 413 413 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 414 414 if (IS_ERR_OR_NULL(vgpu)) { 415 - gvt_err("create intel vgpu failed\n"); 416 - return -EINVAL; 415 + ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); 416 + gvt_err("failed to create intel vgpu: %d\n", ret); 417 + goto out; 417 418 } 418 419 419 420 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); ··· 425 422 426 423 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", 427 424 dev_name(mdev_dev(mdev))); 428 - return 0; 425 + ret = 0; 426 + 427 + out: 428 + return ret; 429 429 } 430 430 431 431 static int intel_vgpu_remove(struct mdev_device *mdev)
+69 -15
drivers/gpu/drm/i915/gvt/mmio.c
··· 125 125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 126 126 goto err; 127 127 128 - mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 129 - if (!mmio && !vgpu->mmio.disable_warn_untrack) { 130 - gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", 131 - vgpu->id, offset, bytes, *(u32 *)p_data); 132 - 133 - if (offset == 0x206c) { 134 - gvt_err("------------------------------------------\n"); 135 - gvt_err("vgpu%d: likely triggers a gfx reset\n", 136 - vgpu->id); 137 - gvt_err("------------------------------------------\n"); 138 - vgpu->mmio.disable_warn_untrack = true; 139 - } 140 - } 141 - 142 128 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 143 129 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 144 130 goto err; 145 131 } 146 132 133 + mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 147 134 if (mmio) { 148 135 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 149 136 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) ··· 139 152 goto err; 140 153 } 141 154 ret = mmio->read(vgpu, offset, p_data, bytes); 142 - } else 155 + } else { 143 156 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 157 + 158 + if (!vgpu->mmio.disable_warn_untrack) { 159 + gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", 160 + vgpu->id, offset, bytes, *(u32 *)p_data); 161 + 162 + if (offset == 0x206c) { 163 + gvt_err("------------------------------------------\n"); 164 + gvt_err("vgpu%d: likely triggers a gfx reset\n", 165 + vgpu->id); 166 + gvt_err("------------------------------------------\n"); 167 + vgpu->mmio.disable_warn_untrack = true; 168 + } 169 + } 170 + } 144 171 145 172 if (ret) 146 173 goto err; ··· 302 301 vgpu->id, offset, bytes); 303 302 mutex_unlock(&gvt->lock); 304 303 return ret; 304 + } 305 + 306 + 307 + /** 308 + * intel_vgpu_reset_mmio - reset virtual MMIO space 309 + * @vgpu: a vGPU 310 + * 311 + */ 312 + void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) 313 + { 314 + struct intel_gvt *gvt = vgpu->gvt; 315 + const struct intel_gvt_device_info *info = &gvt->device_info; 316 + 317 + memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 318 + memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); 319 + 320 + vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 321 + 322 + /* set the bit 0:2(Core C-State ) to C0 */ 323 + vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 324 + } 325 + 326 + /** 327 + * intel_vgpu_init_mmio - init MMIO space 328 + * @vgpu: a vGPU 329 + * 330 + * Returns: 331 + * Zero on success, negative error code if failed 332 + */ 333 + int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) 334 + { 335 + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 336 + 337 + vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); 338 + if (!vgpu->mmio.vreg) 339 + return -ENOMEM; 340 + 341 + vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 342 + 343 + intel_vgpu_reset_mmio(vgpu); 344 + 345 + return 0; 346 + } 347 + 348 + /** 349 + * intel_vgpu_clean_mmio - clean MMIO space 350 + * @vgpu: a vGPU 351 + * 352 + */ 353 + void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) 354 + { 355 + vfree(vgpu->mmio.vreg); 356 + vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; 305 357 }
+4
drivers/gpu/drm/i915/gvt/mmio.h
··· 86 86 *offset; \ 87 87 }) 88 88 89 + int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); 90 + void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); 91 + void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); 92 + 89 93 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 90 94 91 95 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
+4 -4
drivers/gpu/drm/i915/gvt/opregion.c
··· 36 36 vgpu->id)) 37 37 return -EINVAL; 38 38 39 - vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | 40 - GFP_DMA32 | __GFP_ZERO, 41 - INTEL_GVT_OPREGION_PORDER); 39 + vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | 40 + __GFP_ZERO, 41 + get_order(INTEL_GVT_OPREGION_SIZE)); 42 42 43 43 if (!vgpu_opregion(vgpu)->va) 44 44 return -ENOMEM; ··· 97 97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { 98 98 map_vgpu_opregion(vgpu, false); 99 99 free_pages((unsigned long)vgpu_opregion(vgpu)->va, 100 - INTEL_GVT_OPREGION_PORDER); 100 + get_order(INTEL_GVT_OPREGION_SIZE)); 101 101 102 102 vgpu_opregion(vgpu)->va = NULL; 103 103 }
+1 -2
drivers/gpu/drm/i915/gvt/reg.h
··· 50 50 #define INTEL_GVT_OPREGION_PARM 0x204 51 51 52 52 #define INTEL_GVT_OPREGION_PAGES 2 53 - #define INTEL_GVT_OPREGION_PORDER 1 54 - #define INTEL_GVT_OPREGION_SIZE (2 * 4096) 53 + #define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) 55 54 56 55 #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) 57 56
+7 -7
drivers/gpu/drm/i915/gvt/scheduler.c
··· 350 350 { 351 351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 352 352 struct intel_vgpu_workload *workload; 353 + struct intel_vgpu *vgpu; 353 354 int event; 354 355 355 356 mutex_lock(&gvt->lock); 356 357 357 358 workload = scheduler->current_workload[ring_id]; 359 + vgpu = workload->vgpu; 358 360 359 - if (!workload->status && !workload->vgpu->resetting) { 361 + if (!workload->status && !vgpu->resetting) { 360 362 wait_event(workload->shadow_ctx_status_wq, 361 363 !atomic_read(&workload->shadow_ctx_active)); 362 364 ··· 366 364 367 365 for_each_set_bit(event, workload->pending_events, 368 366 INTEL_GVT_EVENT_MAX) 369 - intel_vgpu_trigger_virtual_event(workload->vgpu, 370 - event); 367 + intel_vgpu_trigger_virtual_event(vgpu, event); 371 368 } 372 369 373 370 gvt_dbg_sched("ring id %d complete workload %p status %d\n", ··· 374 373 375 374 scheduler->current_workload[ring_id] = NULL; 376 375 377 - atomic_dec(&workload->vgpu->running_workload_num); 378 - 379 376 list_del_init(&workload->list); 380 377 workload->complete(workload); 381 378 379 + atomic_dec(&vgpu->running_workload_num); 382 380 wake_up(&scheduler->workload_complete_wq); 383 381 mutex_unlock(&gvt->lock); 384 382 } ··· 459 459 gvt_dbg_sched("will complete workload %p\n, status: %d\n", 460 460 workload, workload->status); 461 461 462 - complete_current_workload(gvt, ring_id); 463 - 464 462 if (workload->req) 465 463 i915_gem_request_put(fetch_and_zero(&workload->req)); 464 + 465 + complete_current_workload(gvt, ring_id); 466 466 467 467 if (need_force_wake) 468 468 intel_uncore_forcewake_put(gvt->dev_priv,
+81 -81
drivers/gpu/drm/i915/gvt/vgpu.c
··· 35 35 #include "gvt.h" 36 36 #include "i915_pvinfo.h" 37 37 38 - static void clean_vgpu_mmio(struct intel_vgpu *vgpu) 39 - { 40 - vfree(vgpu->mmio.vreg); 41 - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; 42 - } 43 - 44 - int setup_vgpu_mmio(struct intel_vgpu *vgpu) 45 - { 46 - struct intel_gvt *gvt = vgpu->gvt; 47 - const struct intel_gvt_device_info *info = &gvt->device_info; 48 - 49 - if (vgpu->mmio.vreg) 50 - memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); 51 - else { 52 - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); 53 - if (!vgpu->mmio.vreg) 54 - return -ENOMEM; 55 - } 56 - 57 - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 58 - 59 - memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 60 - memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); 61 - 62 - vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 63 - 64 - /* set the bit 0:2(Core C-State ) to C0 */ 65 - vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 66 - return 0; 67 - } 68 - 69 - static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, 70 - struct intel_vgpu_creation_params *param) 71 - { 72 - struct intel_gvt *gvt = vgpu->gvt; 73 - const struct intel_gvt_device_info *info = &gvt->device_info; 74 - u16 *gmch_ctl; 75 - int i; 76 - 77 - memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 78 - info->cfg_space_size); 79 - 80 - if (!param->primary) { 81 - vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 82 - INTEL_GVT_PCI_CLASS_VGA_OTHER; 83 - vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 84 - INTEL_GVT_PCI_CLASS_VGA_OTHER; 85 - } 86 - 87 - /* Show guest that there isn't any stolen memory.*/ 88 - gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 89 - *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 90 - 91 - intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 92 - gvt_aperture_pa_base(gvt), true); 93 - 94 - vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 95 - | PCI_COMMAND_MEMORY 96 - | PCI_COMMAND_MASTER); 97 - /* 98 - * Clear the bar upper 32bit and let guest to assign the new value 99 - */ 100 - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 101 - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 102 - memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 103 - 104 - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 105 - vgpu->cfg_space.bar[i].size = pci_resource_len( 106 - gvt->dev_priv->drm.pdev, i * 2); 107 - vgpu->cfg_space.bar[i].tracked = false; 108 - } 109 - } 110 - 111 38 void populate_pvinfo_page(struct intel_vgpu *vgpu) 112 39 { 113 40 /* setup the ballooning information */ ··· 104 177 if (low_avail / min_low == 0) 105 178 break; 106 179 gvt->types[i].low_gm_size = min_low; 107 - gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; 180 + gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); 108 181 gvt->types[i].fence = 4; 109 182 gvt->types[i].max_instance = low_avail / min_low; 110 183 gvt->types[i].avail_instance = gvt->types[i].max_instance; ··· 144 217 */ 145 218 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - 146 219 gvt->gm.vgpu_allocated_low_gm_size; 147 - high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - 220 + high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - 148 221 gvt->gm.vgpu_allocated_high_gm_size; 149 222 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - 150 223 gvt->fence.vgpu_allocated_fence_num; ··· 195 268 intel_vgpu_clean_gtt(vgpu); 196 269 intel_gvt_hypervisor_detach_vgpu(vgpu); 197 270 intel_vgpu_free_resource(vgpu); 198 - clean_vgpu_mmio(vgpu); 271 + intel_vgpu_clean_mmio(vgpu); 199 272 vfree(vgpu); 200 273 201 274 intel_gvt_update_vgpu_types(gvt); ··· 227 300 vgpu->gvt = gvt; 228 301 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); 229 302 230 - setup_vgpu_cfg_space(vgpu, param); 303 + intel_vgpu_init_cfg_space(vgpu, param->primary); 231 304 232 - ret = setup_vgpu_mmio(vgpu); 305 + ret = intel_vgpu_init_mmio(vgpu); 233 306 if (ret) 234 - goto out_free_vgpu; 307 + goto out_clean_idr; 235 308 236 309 ret = intel_vgpu_alloc_resource(vgpu, param); 237 310 if (ret) ··· 281 354 out_clean_vgpu_resource: 282 355 intel_vgpu_free_resource(vgpu); 283 356 out_clean_vgpu_mmio: 284 - clean_vgpu_mmio(vgpu); 357 + intel_vgpu_clean_mmio(vgpu); 358 + out_clean_idr: 359 + idr_remove(&gvt->vgpu_idr, vgpu->id); 285 360 out_free_vgpu: 286 361 vfree(vgpu); 287 362 mutex_unlock(&gvt->lock); ··· 327 398 } 328 399 329 400 /** 330 - * intel_gvt_reset_vgpu - reset a virtual GPU 401 + * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset 402 + * @vgpu: virtual GPU 403 + * @dmlr: vGPU Device Model Level Reset or GT Reset 404 + * @engine_mask: engines to reset for GT reset 405 + * 406 + * This function is called when user wants to reset a virtual GPU through 407 + * device model reset or GT reset. The caller should hold the gvt lock. 408 + * 409 + * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset 410 + * the whole vGPU to default state as when it is created. This vGPU function 411 + * is required both for functionary and security concerns.The ultimate goal 412 + * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we 413 + * assign a vGPU to a virtual machine we must isse such reset first. 414 + * 415 + * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines 416 + * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. 417 + * Unlike the FLR, GT reset only reset particular resource of a vGPU per 418 + * the reset request. Guest driver can issue a GT reset by programming the 419 + * virtual GDRST register to reset specific virtual GPU engine or all 420 + * engines. 421 + * 422 + * The parameter dev_level is to identify if we will do DMLR or GT reset. 423 + * The parameter engine_mask is to specific the engines that need to be 424 + * resetted. If value ALL_ENGINES is given for engine_mask, it means 425 + * the caller requests a full GT reset that we will reset all virtual 426 + * GPU engines. For FLR, engine_mask is ignored. 427 + */ 428 + void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 429 + unsigned int engine_mask) 430 + { 431 + struct intel_gvt *gvt = vgpu->gvt; 432 + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 433 + 434 + gvt_dbg_core("------------------------------------------\n"); 435 + gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", 436 + vgpu->id, dmlr, engine_mask); 437 + vgpu->resetting = true; 438 + 439 + intel_vgpu_stop_schedule(vgpu); 440 + /* 441 + * The current_vgpu will set to NULL after stopping the 442 + * scheduler when the reset is triggered by current vgpu. 443 + */ 444 + if (scheduler->current_vgpu == NULL) { 445 + mutex_unlock(&gvt->lock); 446 + intel_gvt_wait_vgpu_idle(vgpu); 447 + mutex_lock(&gvt->lock); 448 + } 449 + 450 + intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); 451 + 452 + /* full GPU reset or device model level reset */ 453 + if (engine_mask == ALL_ENGINES || dmlr) { 454 + intel_vgpu_reset_gtt(vgpu, dmlr); 455 + intel_vgpu_reset_resource(vgpu); 456 + intel_vgpu_reset_mmio(vgpu); 457 + populate_pvinfo_page(vgpu); 458 + 459 + if (dmlr) 460 + intel_vgpu_reset_cfg_space(vgpu); 461 + } 462 + 463 + vgpu->resetting = false; 464 + gvt_dbg_core("reset vgpu%d done\n", vgpu->id); 465 + gvt_dbg_core("------------------------------------------\n"); 466 + } 467 + 468 + /** 469 + * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) 331 470 * @vgpu: virtual GPU 332 471 * 333 472 * This function is called when user wants to reset a virtual GPU. ··· 403 406 */ 404 407 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) 405 408 { 409 + mutex_lock(&vgpu->gvt->lock); 410 + intel_gvt_reset_vgpu_locked(vgpu, true, 0); 411 + mutex_unlock(&vgpu->gvt->lock); 406 412 }
+4 -30
drivers/gpu/drm/i915/i915_gem.c
··· 595 595 struct drm_i915_gem_pwrite *args, 596 596 struct drm_file *file) 597 597 { 598 - struct drm_device *dev = obj->base.dev; 599 598 void *vaddr = obj->phys_handle->vaddr + args->offset; 600 599 char __user *user_data = u64_to_user_ptr(args->data_ptr); 601 - int ret; 602 600 603 601 /* We manually control the domain here and pretend that it 604 602 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 605 603 */ 606 - lockdep_assert_held(&obj->base.dev->struct_mutex); 607 - ret = i915_gem_object_wait(obj, 608 - I915_WAIT_INTERRUPTIBLE | 609 - I915_WAIT_LOCKED | 610 - I915_WAIT_ALL, 611 - MAX_SCHEDULE_TIMEOUT, 612 - to_rps_client(file)); 613 - if (ret) 614 - return ret; 615 - 616 604 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 617 - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 618 - unsigned long unwritten; 619 - 620 - /* The physical object once assigned is fixed for the lifetime 621 - * of the obj, so we can safely drop the lock and continue 622 - * to access vaddr. 623 - */ 624 - mutex_unlock(&dev->struct_mutex); 625 - unwritten = copy_from_user(vaddr, user_data, args->size); 626 - mutex_lock(&dev->struct_mutex); 627 - if (unwritten) { 628 - ret = -EFAULT; 629 - goto out; 630 - } 631 - } 605 + if (copy_from_user(vaddr, user_data, args->size)) 606 + return -EFAULT; 632 607 633 608 drm_clflush_virt_range(vaddr, args->size); 634 - i915_gem_chipset_flush(to_i915(dev)); 609 + i915_gem_chipset_flush(to_i915(obj->base.dev)); 635 610 636 - out: 637 611 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 638 - return ret; 612 + return 0; 639 613 } 640 614 641 615 void *i915_gem_object_alloc(struct drm_device *dev)
+1
drivers/gpu/drm/i915/i915_gem_evict.c
··· 199 199 } 200 200 201 201 /* Unbinding will emit any required flushes */ 202 + ret = 0; 202 203 while (!list_empty(&eviction_list)) { 203 204 vma = list_first_entry(&eviction_list, 204 205 struct i915_vma,
+3
drivers/gpu/drm/i915/intel_display.c
··· 2967 2967 unsigned int rotation = plane_state->base.rotation; 2968 2968 int ret; 2969 2969 2970 + if (!plane_state->base.visible) 2971 + return 0; 2972 + 2970 2973 /* Rotate src coordinates to match rotated GTT view */ 2971 2974 if (drm_rotation_90_or_270(rotation)) 2972 2975 drm_rect_rotate(&plane_state->base.src,
+2 -2
drivers/gpu/drm/i915/intel_hotplug.c
··· 180 180 181 181 /* Enable polling and queue hotplug re-enabling. */ 182 182 if (hpd_disabled) { 183 - drm_kms_helper_poll_enable_locked(dev); 183 + drm_kms_helper_poll_enable(dev); 184 184 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, 185 185 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 186 186 } ··· 511 511 } 512 512 513 513 if (enabled) 514 - drm_kms_helper_poll_enable_locked(dev); 514 + drm_kms_helper_poll_enable(dev); 515 515 516 516 mutex_unlock(&dev->mode_config.mutex); 517 517
-10
drivers/gpu/drm/i915/intel_lrc.c
··· 979 979 uint32_t *batch, 980 980 uint32_t index) 981 981 { 982 - struct drm_i915_private *dev_priv = engine->i915; 983 982 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 984 - 985 - /* 986 - * WaDisableLSQCROPERFforOCL:kbl 987 - * This WA is implemented in skl_init_clock_gating() but since 988 - * this batch updates GEN8_L3SQCREG4 with default value we need to 989 - * set this bit here to retain the WA during flush. 990 - */ 991 - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 992 - l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 993 983 994 984 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 995 985 MI_SRM_LRM_GLOBAL_GTT));
-8
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 1095 1095 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1096 1096 HDC_FENCE_DEST_SLM_DISABLE); 1097 1097 1098 - /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1099 - * involving this register should also be added to WA batch as required. 1100 - */ 1101 - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 1102 - /* WaDisableLSQCROPERFforOCL:kbl */ 1103 - I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1104 - GEN8_LQSC_RO_PERF_DIS); 1105 - 1106 1098 /* WaToEnableHwFixForPushConstHWBug:kbl */ 1107 1099 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) 1108 1100 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+2 -3
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 345 345 { 346 346 struct adreno_platform_config *config = pdev->dev.platform_data; 347 347 struct msm_gpu *gpu = &adreno_gpu->base; 348 - struct msm_mmu *mmu; 349 348 int ret; 350 349 351 350 adreno_gpu->funcs = funcs; ··· 384 385 return ret; 385 386 } 386 387 387 - mmu = gpu->aspace->mmu; 388 - if (mmu) { 388 + if (gpu->aspace && gpu->aspace->mmu) { 389 + struct msm_mmu *mmu = gpu->aspace->mmu; 389 390 ret = mmu->funcs->attach(mmu, iommu_ports, 390 391 ARRAY_SIZE(iommu_ports)); 391 392 if (ret)
-6
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
··· 119 119 120 120 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 121 121 { 122 - int i; 123 122 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 124 - struct drm_plane *plane; 125 - struct drm_plane_state *plane_state; 126 - 127 - for_each_plane_in_state(state, plane, plane_state, i) 128 - mdp5_plane_complete_commit(plane, plane_state); 129 123 130 124 if (mdp5_kms->smp) 131 125 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
-4
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
··· 104 104 105 105 /* assigned by crtc blender */ 106 106 enum mdp_mixer_stage_id stage; 107 - 108 - bool pending : 1; 109 107 }; 110 108 #define to_mdp5_plane_state(x) \ 111 109 container_of(x, struct mdp5_plane_state, base) ··· 230 232 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); 231 233 232 234 uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 233 - void mdp5_plane_complete_commit(struct drm_plane *plane, 234 - struct drm_plane_state *state); 235 235 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 236 236 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); 237 237
-22
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 179 179 drm_printf(p, "\tzpos=%u\n", pstate->zpos); 180 180 drm_printf(p, "\talpha=%u\n", pstate->alpha); 181 181 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); 182 - drm_printf(p, "\tpending=%u\n", pstate->pending); 183 182 } 184 183 185 184 static void mdp5_plane_reset(struct drm_plane *plane) ··· 218 219 219 220 if (mdp5_state && mdp5_state->base.fb) 220 221 drm_framebuffer_reference(mdp5_state->base.fb); 221 - 222 - mdp5_state->pending = false; 223 222 224 223 return &mdp5_state->base; 225 224 } ··· 284 287 285 288 DBG("%s: check (%d -> %d)", plane->name, 286 289 plane_enabled(old_state), plane_enabled(state)); 287 - 288 - /* We don't allow faster-than-vblank updates.. if we did add this 289 - * some day, we would need to disallow in cases where hwpipe 290 - * changes 291 - */ 292 - if (WARN_ON(to_mdp5_plane_state(old_state)->pending)) 293 - return -EBUSY; 294 290 295 291 max_width = config->hw->lm.max_width << 16; 296 292 max_height = config->hw->lm.max_height << 16; ··· 360 370 struct drm_plane_state *old_state) 361 371 { 362 372 struct drm_plane_state *state = plane->state; 363 - struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); 364 373 365 374 DBG("%s: update", plane->name); 366 - 367 - mdp5_state->pending = true; 368 375 369 376 if (plane_enabled(state)) { 370 377 int ret; ··· 836 849 return 0; 837 850 838 851 return pstate->hwpipe->flush_mask; 839 - } 840 - 841 - /* called after vsync in thread context */ 842 - void mdp5_plane_complete_commit(struct drm_plane *plane, 843 - struct drm_plane_state *state) 844 - { 845 - struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); 846 - 847 - pstate->pending = false; 848 852 } 849 853 850 854 /* initialize plane */
+2
drivers/gpu/drm/msm/msm_gem.c
··· 294 294 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 295 295 296 296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 297 + if (!priv->aspace[id]) 298 + continue; 297 299 msm_gem_unmap_vma(priv->aspace[id], 298 300 &msm_obj->domain[id], msm_obj->sgt); 299 301 }
+20 -5
drivers/gpu/drm/radeon/si.c
··· 114 114 MODULE_FIRMWARE("radeon/hainan_rlc.bin"); 115 115 MODULE_FIRMWARE("radeon/hainan_smc.bin"); 116 116 MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 117 + MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); 118 + 119 + MODULE_FIRMWARE("radeon/si58_mc.bin"); 117 120 118 121 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); 119 122 static void si_pcie_gen3_enable(struct radeon_device *rdev); ··· 1653 1650 int err; 1654 1651 int new_fw = 0; 1655 1652 bool new_smc = false; 1653 + bool si58_fw = false; 1654 + bool banks2_fw = false; 1656 1655 1657 1656 DRM_DEBUG("\n"); 1658 1657 ··· 1732 1727 ((rdev->pdev->device == 0x6660) || 1733 1728 (rdev->pdev->device == 0x6663) || 1734 1729 (rdev->pdev->device == 0x6665) || 1735 - (rdev->pdev->device == 0x6667))) || 1736 - ((rdev->pdev->revision == 0xc3) && 1737 - (rdev->pdev->device == 0x6665))) 1730 + (rdev->pdev->device == 0x6667)))) 1738 1731 new_smc = true; 1732 + else if ((rdev->pdev->revision == 0xc3) && 1733 + (rdev->pdev->device == 0x6665)) 1734 + banks2_fw = true; 1739 1735 new_chip_name = "hainan"; 1740 1736 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1741 1737 me_req_size = SI_PM4_UCODE_SIZE * 4; ··· 1747 1741 break; 1748 1742 default: BUG(); 1749 1743 } 1744 + 1745 + /* this memory configuration requires special firmware */ 1746 + if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) 1747 + si58_fw = true; 1750 1748 1751 1749 DRM_INFO("Loading %s Microcode\n", new_chip_name); 1752 1750 ··· 1855 1845 } 1856 1846 } 1857 1847 1858 - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); 1848 + if (si58_fw) 1849 + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); 1850 + else 1851 + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); 1859 1852 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1860 1853 if (err) { 1861 1854 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); ··· 1889 1876 } 1890 1877 } 1891 1878 1892 - if (new_smc) 1879 + if (banks2_fw) 1880 + snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin"); 1881 + else if (new_smc) 1893 1882 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); 1894 1883 else 1895 1884 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
-12
drivers/gpu/drm/radeon/si_dpm.c
··· 3008 3008 (rdev->pdev->device == 0x6817) || 3009 3009 (rdev->pdev->device == 0x6806)) 3010 3010 max_mclk = 120000; 3011 - } else if (rdev->family == CHIP_OLAND) { 3012 - if ((rdev->pdev->revision == 0xC7) || 3013 - (rdev->pdev->revision == 0x80) || 3014 - (rdev->pdev->revision == 0x81) || 3015 - (rdev->pdev->revision == 0x83) || 3016 - (rdev->pdev->revision == 0x87) || 3017 - (rdev->pdev->device == 0x6604) || 3018 - (rdev->pdev->device == 0x6605)) { 3019 - max_sclk = 75000; 3020 - max_mclk = 80000; 3021 - } 3022 3011 } else if (rdev->family == CHIP_HAINAN) { 3023 3012 if ((rdev->pdev->revision == 0x81) || 3024 3013 (rdev->pdev->revision == 0x83) || ··· 3016 3027 (rdev->pdev->device == 0x6665) || 3017 3028 (rdev->pdev->device == 0x6667)) { 3018 3029 max_sclk = 75000; 3019 - max_mclk = 80000; 3020 3030 } 3021 3031 } 3022 3032 /* Apply dpm quirks */
+1 -1
drivers/gpu/drm/virtio/virtgpu_fb.c
··· 331 331 info->fbops = &virtio_gpufb_ops; 332 332 info->pixmap.flags = FB_PIXMAP_SYSTEM; 333 333 334 - info->screen_base = obj->vmap; 334 + info->screen_buffer = obj->vmap; 335 335 info->screen_size = obj->gem_base.size; 336 336 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 337 337 drm_fb_helper_fill_var(info, &vfbdev->helper,
-1
include/drm/drm_crtc_helper.h
··· 73 73 74 74 extern void drm_kms_helper_poll_disable(struct drm_device *dev); 75 75 extern void drm_kms_helper_poll_enable(struct drm_device *dev); 76 - extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); 77 76 78 77 #endif