Merge tag 'drm-fixes-for-v4.10-rc6' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"drm fixes across the board.

Okay holidays and LCA kinda caught up with me, I thought I'd get some
of this dequeued last week, but Hobart was sunny and warm and not all
gloomy and rainy as usual.

This is a bit large, but not too much considering it's two weeks stuff
from AMD and Intel.

core:
- one locking fix that helps with dynamic suspend/resume races

i915:
- mostly GVT updates, GVT was a recent introduction so fixes for it
shouldn't cause any notable side effects.

amdgpu:
- a bunch of fixes for GPUs with a different memory controller design
that need different firmware.

exynos:
- decon regression fixes

msm:
- two regression fixes

etnaviv:
- a workaround for an mmu bug that needs a lot more work.

virtio:
- sparse fix, and a maintainers update"

* tag 'drm-fixes-for-v4.10-rc6' of git://people.freedesktop.org/~airlied/linux: (56 commits)
drm/exynos/decon5433: set STANDALONE_UPDATE_F on output enablement
drm/exynos/decon5433: fix CMU programming
drm/exynos/decon5433: do not disable video after reset
drm/i915: Ignore bogus plane coordinates on SKL when the plane is not visible
drm/i915: Remove WaDisableLSQCROPERFforOCL KBL workaround.
drm/amdgpu: add support for new hainan variants
drm/radeon: add support for new hainan variants
drm/amdgpu: change clock gating mode for uvd_v4.
drm/amdgpu: fix program vce instance logic error.
drm/amdgpu: fix bug set incorrect value to vce register
Revert "drm/amdgpu: Only update the CUR_SIZE register when necessary"
drm/msm: fix potential null ptr issue in non-iommu case
drm/msm/mdp5: rip out plane->pending tracking
drm/exynos/decon5433: set STANDALONE_UPDATE_F also if planes are disabled
drm/exynos/decon5433: update shadow registers iff there are active windows
drm/i915/gvt: rewrite gt reset handler using new function intel_gvt_reset_vgpu_locked
drm/i915/gvt: fix vGPU instance reuse issues by vGPU reset function
drm/i915/gvt: introduce intel_vgpu_reset_mmio() to reset mmio space
drm/i915/gvt: move mmio init/clean function to mmio.c
drm/i915/gvt: introduce intel_vgpu_reset_cfg_space to reset configuration space
...

+584 -520
+13 -3
MAINTAINERS
··· 4100 4101 DRM DRIVER FOR BOCHS VIRTUAL GPU 4102 M: Gerd Hoffmann <kraxel@redhat.com> 4103 - S: Odd Fixes 4104 F: drivers/gpu/drm/bochs/ 4105 4106 DRM DRIVER FOR QEMU'S CIRRUS DEVICE 4107 M: Dave Airlie <airlied@redhat.com> 4108 - S: Odd Fixes 4109 F: drivers/gpu/drm/cirrus/ 4110 4111 RADEON and AMDGPU DRM DRIVERS ··· 4304 4305 DRM DRIVER FOR QXL VIRTUAL GPU 4306 M: Dave Airlie <airlied@redhat.com> 4307 - S: Odd Fixes 4308 F: drivers/gpu/drm/qxl/ 4309 F: include/uapi/drm/qxl_drm.h 4310 ··· 13101 M: Gerd Hoffmann <kraxel@redhat.com> 13102 L: dri-devel@lists.freedesktop.org 13103 L: virtualization@lists.linux-foundation.org 13104 S: Maintained 13105 F: drivers/gpu/drm/virtio/ 13106 F: include/uapi/linux/virtio_gpu.h
··· 4100 4101 DRM DRIVER FOR BOCHS VIRTUAL GPU 4102 M: Gerd Hoffmann <kraxel@redhat.com> 4103 + L: virtualization@lists.linux-foundation.org 4104 + T: git git://git.kraxel.org/linux drm-qemu 4105 + S: Maintained 4106 F: drivers/gpu/drm/bochs/ 4107 4108 DRM DRIVER FOR QEMU'S CIRRUS DEVICE 4109 M: Dave Airlie <airlied@redhat.com> 4110 + M: Gerd Hoffmann <kraxel@redhat.com> 4111 + L: virtualization@lists.linux-foundation.org 4112 + T: git git://git.kraxel.org/linux drm-qemu 4113 + S: Obsolete 4114 + W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ 4115 F: drivers/gpu/drm/cirrus/ 4116 4117 RADEON and AMDGPU DRM DRIVERS ··· 4298 4299 DRM DRIVER FOR QXL VIRTUAL GPU 4300 M: Dave Airlie <airlied@redhat.com> 4301 + M: Gerd Hoffmann <kraxel@redhat.com> 4302 + L: virtualization@lists.linux-foundation.org 4303 + T: git git://git.kraxel.org/linux drm-qemu 4304 + S: Maintained 4305 F: drivers/gpu/drm/qxl/ 4306 F: include/uapi/drm/qxl_drm.h 4307 ··· 13092 M: Gerd Hoffmann <kraxel@redhat.com> 13093 L: dri-devel@lists.freedesktop.org 13094 L: virtualization@lists.linux-foundation.org 13095 + T: git git://git.kraxel.org/linux drm-qemu 13096 S: Maintained 13097 F: drivers/gpu/drm/virtio/ 13098 F: include/uapi/linux/virtio_gpu.h
+7 -15
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 2512 2513 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2514 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2515 2516 return 0; 2517 } ··· 2539 int32_t hot_y) 2540 { 2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2542 - struct amdgpu_device *adev = crtc->dev->dev_private; 2543 struct drm_gem_object *obj; 2544 struct amdgpu_bo *aobj; 2545 int ret; ··· 2579 2580 dce_v10_0_lock_cursor(crtc, true); 2581 2582 - if (hot_x != amdgpu_crtc->cursor_hot_x || 2583 hot_y != amdgpu_crtc->cursor_hot_y) { 2584 int x, y; 2585 ··· 2590 2591 dce_v10_0_cursor_move_locked(crtc, x, y); 2592 2593 - amdgpu_crtc->cursor_hot_x = hot_x; 2594 - amdgpu_crtc->cursor_hot_y = hot_y; 2595 - } 2596 - 2597 - if (width != amdgpu_crtc->cursor_width || 2598 - height != amdgpu_crtc->cursor_height) { 2599 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2600 - (width - 1) << 16 | (height - 1)); 2601 amdgpu_crtc->cursor_width = width; 2602 amdgpu_crtc->cursor_height = height; 2603 } 2604 2605 dce_v10_0_show_cursor(crtc); ··· 2617 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) 2618 { 2619 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2620 - struct amdgpu_device *adev = crtc->dev->dev_private; 2621 2622 if (amdgpu_crtc->cursor_bo) { 2623 dce_v10_0_lock_cursor(crtc, true); 2624 2625 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2626 amdgpu_crtc->cursor_y); 2627 - 2628 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2629 - (amdgpu_crtc->cursor_width - 1) << 16 | 2630 - (amdgpu_crtc->cursor_height - 1)); 2631 2632 dce_v10_0_show_cursor(crtc); 2633
··· 2512 2513 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2514 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2515 + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2516 + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2517 2518 return 0; 2519 } ··· 2537 int32_t hot_y) 2538 { 2539 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2540 struct drm_gem_object *obj; 2541 struct amdgpu_bo *aobj; 2542 int ret; ··· 2578 2579 dce_v10_0_lock_cursor(crtc, true); 2580 2581 + if (width != amdgpu_crtc->cursor_width || 2582 + height != amdgpu_crtc->cursor_height || 2583 + hot_x != amdgpu_crtc->cursor_hot_x || 2584 hot_y != amdgpu_crtc->cursor_hot_y) { 2585 int x, y; 2586 ··· 2587 2588 dce_v10_0_cursor_move_locked(crtc, x, y); 2589 2590 amdgpu_crtc->cursor_width = width; 2591 amdgpu_crtc->cursor_height = height; 2592 + amdgpu_crtc->cursor_hot_x = hot_x; 2593 + amdgpu_crtc->cursor_hot_y = hot_y; 2594 } 2595 2596 dce_v10_0_show_cursor(crtc); ··· 2620 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) 2621 { 2622 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2623 2624 if (amdgpu_crtc->cursor_bo) { 2625 dce_v10_0_lock_cursor(crtc, true); 2626 2627 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2628 amdgpu_crtc->cursor_y); 2629 2630 dce_v10_0_show_cursor(crtc); 2631
+7 -15
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 2532 2533 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2534 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2535 2536 return 0; 2537 } ··· 2559 int32_t hot_y) 2560 { 2561 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2562 - struct amdgpu_device *adev = crtc->dev->dev_private; 2563 struct drm_gem_object *obj; 2564 struct amdgpu_bo *aobj; 2565 int ret; ··· 2599 2600 dce_v11_0_lock_cursor(crtc, true); 2601 2602 - if (hot_x != amdgpu_crtc->cursor_hot_x || 2603 hot_y != amdgpu_crtc->cursor_hot_y) { 2604 int x, y; 2605 ··· 2610 2611 dce_v11_0_cursor_move_locked(crtc, x, y); 2612 2613 - amdgpu_crtc->cursor_hot_x = hot_x; 2614 - amdgpu_crtc->cursor_hot_y = hot_y; 2615 - } 2616 - 2617 - if (width != amdgpu_crtc->cursor_width || 2618 - height != amdgpu_crtc->cursor_height) { 2619 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2620 - (width - 1) << 16 | (height - 1)); 2621 amdgpu_crtc->cursor_width = width; 2622 amdgpu_crtc->cursor_height = height; 2623 } 2624 2625 dce_v11_0_show_cursor(crtc); ··· 2637 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) 2638 { 2639 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2640 - struct amdgpu_device *adev = crtc->dev->dev_private; 2641 2642 if (amdgpu_crtc->cursor_bo) { 2643 dce_v11_0_lock_cursor(crtc, true); 2644 2645 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2646 amdgpu_crtc->cursor_y); 2647 - 2648 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2649 - (amdgpu_crtc->cursor_width - 1) << 16 | 2650 - (amdgpu_crtc->cursor_height - 1)); 2651 2652 dce_v11_0_show_cursor(crtc); 2653
··· 2532 2533 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2534 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2535 + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2536 + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2537 2538 return 0; 2539 } ··· 2557 int32_t hot_y) 2558 { 2559 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2560 struct drm_gem_object *obj; 2561 struct amdgpu_bo *aobj; 2562 int ret; ··· 2598 2599 dce_v11_0_lock_cursor(crtc, true); 2600 2601 + if (width != amdgpu_crtc->cursor_width || 2602 + height != amdgpu_crtc->cursor_height || 2603 + hot_x != amdgpu_crtc->cursor_hot_x || 2604 hot_y != amdgpu_crtc->cursor_hot_y) { 2605 int x, y; 2606 ··· 2607 2608 dce_v11_0_cursor_move_locked(crtc, x, y); 2609 2610 amdgpu_crtc->cursor_width = width; 2611 amdgpu_crtc->cursor_height = height; 2612 + amdgpu_crtc->cursor_hot_x = hot_x; 2613 + amdgpu_crtc->cursor_hot_y = hot_y; 2614 } 2615 2616 dce_v11_0_show_cursor(crtc); ··· 2640 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) 2641 { 2642 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2643 2644 if (amdgpu_crtc->cursor_bo) { 2645 dce_v11_0_lock_cursor(crtc, true); 2646 2647 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2648 amdgpu_crtc->cursor_y); 2649 2650 dce_v11_0_show_cursor(crtc); 2651
+9 -15
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 1859 struct amdgpu_device *adev = crtc->dev->dev_private; 1860 int xorigin = 0, yorigin = 0; 1861 1862 amdgpu_crtc->cursor_x = x; 1863 amdgpu_crtc->cursor_y = y; 1864 ··· 1880 1881 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 1882 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 1883 1884 return 0; 1885 } ··· 1907 int32_t hot_y) 1908 { 1909 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1910 - struct amdgpu_device *adev = crtc->dev->dev_private; 1911 struct drm_gem_object *obj; 1912 struct amdgpu_bo *aobj; 1913 int ret; ··· 1947 1948 dce_v6_0_lock_cursor(crtc, true); 1949 1950 - if (hot_x != amdgpu_crtc->cursor_hot_x || 1951 hot_y != amdgpu_crtc->cursor_hot_y) { 1952 int x, y; 1953 ··· 1958 1959 dce_v6_0_cursor_move_locked(crtc, x, y); 1960 1961 - amdgpu_crtc->cursor_hot_x = hot_x; 1962 - amdgpu_crtc->cursor_hot_y = hot_y; 1963 - } 1964 - 1965 - if (width != amdgpu_crtc->cursor_width || 1966 - height != amdgpu_crtc->cursor_height) { 1967 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 1968 - (width - 1) << 16 | (height - 1)); 1969 amdgpu_crtc->cursor_width = width; 1970 amdgpu_crtc->cursor_height = height; 1971 } 1972 1973 dce_v6_0_show_cursor(crtc); ··· 1985 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) 1986 { 1987 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1988 - struct amdgpu_device *adev = crtc->dev->dev_private; 1989 1990 if (amdgpu_crtc->cursor_bo) { 1991 dce_v6_0_lock_cursor(crtc, true); 1992 1993 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 1994 amdgpu_crtc->cursor_y); 1995 - 1996 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 1997 - (amdgpu_crtc->cursor_width - 1) << 16 | 1998 - (amdgpu_crtc->cursor_height - 1)); 1999 2000 dce_v6_0_show_cursor(crtc); 2001 dce_v6_0_lock_cursor(crtc, false);
··· 1859 struct amdgpu_device *adev = crtc->dev->dev_private; 1860 int xorigin = 0, yorigin = 0; 1861 1862 + int w = amdgpu_crtc->cursor_width; 1863 + 1864 amdgpu_crtc->cursor_x = x; 1865 amdgpu_crtc->cursor_y = y; 1866 ··· 1878 1879 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 1880 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 1881 + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 1882 + ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 1883 1884 return 0; 1885 } ··· 1903 int32_t hot_y) 1904 { 1905 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1906 struct drm_gem_object *obj; 1907 struct amdgpu_bo *aobj; 1908 int ret; ··· 1944 1945 dce_v6_0_lock_cursor(crtc, true); 1946 1947 + if (width != amdgpu_crtc->cursor_width || 1948 + height != amdgpu_crtc->cursor_height || 1949 + hot_x != amdgpu_crtc->cursor_hot_x || 1950 hot_y != amdgpu_crtc->cursor_hot_y) { 1951 int x, y; 1952 ··· 1953 1954 dce_v6_0_cursor_move_locked(crtc, x, y); 1955 1956 amdgpu_crtc->cursor_width = width; 1957 amdgpu_crtc->cursor_height = height; 1958 + amdgpu_crtc->cursor_hot_x = hot_x; 1959 + amdgpu_crtc->cursor_hot_y = hot_y; 1960 } 1961 1962 dce_v6_0_show_cursor(crtc); ··· 1986 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) 1987 { 1988 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1989 1990 if (amdgpu_crtc->cursor_bo) { 1991 dce_v6_0_lock_cursor(crtc, true); 1992 1993 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 1994 amdgpu_crtc->cursor_y); 1995 1996 dce_v6_0_show_cursor(crtc); 1997 dce_v6_0_lock_cursor(crtc, false);
+7 -15
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 2363 2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2366 2367 return 0; 2368 } ··· 2390 int32_t hot_y) 2391 { 2392 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2393 - struct amdgpu_device *adev = crtc->dev->dev_private; 2394 struct drm_gem_object *obj; 2395 struct amdgpu_bo *aobj; 2396 int ret; ··· 2430 2431 dce_v8_0_lock_cursor(crtc, true); 2432 2433 - if (hot_x != amdgpu_crtc->cursor_hot_x || 2434 hot_y != amdgpu_crtc->cursor_hot_y) { 2435 int x, y; 2436 ··· 2441 2442 dce_v8_0_cursor_move_locked(crtc, x, y); 2443 2444 - amdgpu_crtc->cursor_hot_x = hot_x; 2445 - amdgpu_crtc->cursor_hot_y = hot_y; 2446 - } 2447 - 2448 - if (width != amdgpu_crtc->cursor_width || 2449 - height != amdgpu_crtc->cursor_height) { 2450 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2451 - (width - 1) << 16 | (height - 1)); 2452 amdgpu_crtc->cursor_width = width; 2453 amdgpu_crtc->cursor_height = height; 2454 } 2455 2456 dce_v8_0_show_cursor(crtc); ··· 2468 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) 2469 { 2470 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2471 - struct amdgpu_device *adev = crtc->dev->dev_private; 2472 2473 if (amdgpu_crtc->cursor_bo) { 2474 dce_v8_0_lock_cursor(crtc, true); 2475 2476 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2477 amdgpu_crtc->cursor_y); 2478 - 2479 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2480 - (amdgpu_crtc->cursor_width - 1) << 16 | 2481 - (amdgpu_crtc->cursor_height - 1)); 2482 2483 dce_v8_0_show_cursor(crtc); 2484
··· 2363 2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2366 + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2367 + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2368 2369 return 0; 2370 } ··· 2388 int32_t hot_y) 2389 { 2390 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2391 struct drm_gem_object *obj; 2392 struct amdgpu_bo *aobj; 2393 int ret; ··· 2429 2430 dce_v8_0_lock_cursor(crtc, true); 2431 2432 + if (width != amdgpu_crtc->cursor_width || 2433 + height != amdgpu_crtc->cursor_height || 2434 + hot_x != amdgpu_crtc->cursor_hot_x || 2435 hot_y != amdgpu_crtc->cursor_hot_y) { 2436 int x, y; 2437 ··· 2438 2439 dce_v8_0_cursor_move_locked(crtc, x, y); 2440 2441 amdgpu_crtc->cursor_width = width; 2442 amdgpu_crtc->cursor_height = height; 2443 + amdgpu_crtc->cursor_hot_x = hot_x; 2444 + amdgpu_crtc->cursor_hot_y = hot_y; 2445 } 2446 2447 dce_v8_0_show_cursor(crtc); ··· 2471 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) 2472 { 2473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2474 2475 if (amdgpu_crtc->cursor_bo) { 2476 dce_v8_0_lock_cursor(crtc, true); 2477 2478 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2479 amdgpu_crtc->cursor_y); 2480 2481 dce_v8_0_show_cursor(crtc); 2482
+19 -15
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 44 MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); 45 MODULE_FIRMWARE("radeon/verde_mc.bin"); 46 MODULE_FIRMWARE("radeon/oland_mc.bin"); 47 48 #define MC_SEQ_MISC0__MT__MASK 0xf0000000 49 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 ··· 114 const char *chip_name; 115 char fw_name[30]; 116 int err; 117 118 DRM_DEBUG("\n"); 119 ··· 137 default: BUG(); 138 } 139 140 - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 141 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 142 if (err) 143 goto out; ··· 472 WREG32(mmVM_CONTEXT1_CNTL, 473 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | 474 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | 475 - ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | 476 - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 477 - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 478 - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 479 - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 480 - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 481 - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 482 - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 483 - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 484 - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 485 - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 486 - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 487 - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK); 488 489 gmc_v6_0_gart_flush_gpu_tlb(adev, 0); 490 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", ··· 755 { 756 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 757 758 - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 759 } 760 761 static int gmc_v6_0_sw_init(void *handle)
··· 44 MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); 45 MODULE_FIRMWARE("radeon/verde_mc.bin"); 46 MODULE_FIRMWARE("radeon/oland_mc.bin"); 47 + MODULE_FIRMWARE("radeon/si58_mc.bin"); 48 49 #define MC_SEQ_MISC0__MT__MASK 0xf0000000 50 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 ··· 113 const char *chip_name; 114 char fw_name[30]; 115 int err; 116 + bool is_58_fw = false; 117 118 DRM_DEBUG("\n"); 119 ··· 135 default: BUG(); 136 } 137 138 + /* this memory configuration requires special firmware */ 139 + if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) 140 + is_58_fw = true; 141 + 142 + if (is_58_fw) 143 + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); 144 + else 145 + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 146 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 147 if (err) 148 goto out; ··· 463 WREG32(mmVM_CONTEXT1_CNTL, 464 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | 465 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | 466 + ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); 467 + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 468 + gmc_v6_0_set_fault_enable_default(adev, false); 469 + else 470 + gmc_v6_0_set_fault_enable_default(adev, true); 471 472 gmc_v6_0_gart_flush_gpu_tlb(adev, 0); 473 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", ··· 754 { 755 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 756 757 + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 758 + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 759 + else 760 + return 0; 761 } 762 763 static int gmc_v6_0_sw_init(void *handle)
+5 -15
drivers/gpu/drm/amd/amdgpu/si_dpm.c
··· 64 MODULE_FIRMWARE("radeon/oland_k_smc.bin"); 65 MODULE_FIRMWARE("radeon/hainan_smc.bin"); 66 MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 67 68 union power_info { 69 struct _ATOM_POWERPLAY_INFO info; ··· 3488 (adev->pdev->device == 0x6817) || 3489 (adev->pdev->device == 0x6806)) 3490 max_mclk = 120000; 3491 - } else if (adev->asic_type == CHIP_OLAND) { 3492 - if ((adev->pdev->revision == 0xC7) || 3493 - (adev->pdev->revision == 0x80) || 3494 - (adev->pdev->revision == 0x81) || 3495 - (adev->pdev->revision == 0x83) || 3496 - (adev->pdev->revision == 0x87) || 3497 - (adev->pdev->device == 0x6604) || 3498 - (adev->pdev->device == 0x6605)) { 3499 - max_sclk = 75000; 3500 - max_mclk = 80000; 3501 - } 3502 } else if (adev->asic_type == CHIP_HAINAN) { 3503 if ((adev->pdev->revision == 0x81) || 3504 (adev->pdev->revision == 0x83) || ··· 3496 (adev->pdev->device == 0x6665) || 3497 (adev->pdev->device == 0x6667)) { 3498 max_sclk = 75000; 3499 - max_mclk = 80000; 3500 } 3501 } 3502 /* Apply dpm quirks */ ··· 7702 ((adev->pdev->device == 0x6660) || 7703 (adev->pdev->device == 0x6663) || 7704 (adev->pdev->device == 0x6665) || 7705 - (adev->pdev->device == 0x6667))) || 7706 - ((adev->pdev->revision == 0xc3) && 7707 - (adev->pdev->device == 0x6665))) 7708 chip_name = "hainan_k"; 7709 else 7710 chip_name = "hainan"; 7711 break;
··· 64 MODULE_FIRMWARE("radeon/oland_k_smc.bin"); 65 MODULE_FIRMWARE("radeon/hainan_smc.bin"); 66 MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 67 + MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); 68 69 union power_info { 70 struct _ATOM_POWERPLAY_INFO info; ··· 3487 (adev->pdev->device == 0x6817) || 3488 (adev->pdev->device == 0x6806)) 3489 max_mclk = 120000; 3490 } else if (adev->asic_type == CHIP_HAINAN) { 3491 if ((adev->pdev->revision == 0x81) || 3492 (adev->pdev->revision == 0x83) || ··· 3506 (adev->pdev->device == 0x6665) || 3507 (adev->pdev->device == 0x6667)) { 3508 max_sclk = 75000; 3509 } 3510 } 3511 /* Apply dpm quirks */ ··· 7713 ((adev->pdev->device == 0x6660) || 7714 (adev->pdev->device == 0x6663) || 7715 (adev->pdev->device == 0x6665) || 7716 + (adev->pdev->device == 0x6667)))) 7717 chip_name = "hainan_k"; 7718 + else if ((adev->pdev->revision == 0xc3) && 7719 + (adev->pdev->device == 0x6665)) 7720 + chip_name = "banks_k_2"; 7721 else 7722 chip_name = "hainan"; 7723 break;
+10 -32
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
··· 40 #include "smu/smu_7_0_1_sh_mask.h" 41 42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 43 - static void uvd_v4_2_init_cg(struct amdgpu_device *adev); 44 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 45 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 46 static int uvd_v4_2_start(struct amdgpu_device *adev); 47 static void uvd_v4_2_stop(struct amdgpu_device *adev); 48 static int uvd_v4_2_set_clockgating_state(void *handle, 49 enum amd_clockgating_state state); 50 /** 51 * uvd_v4_2_ring_get_rptr - get read pointer 52 * ··· 141 142 return r; 143 } 144 - 145 /** 146 * uvd_v4_2_hw_init - start and test UVD block 147 * ··· 157 uint32_t tmp; 158 int r; 159 160 - uvd_v4_2_init_cg(adev); 161 - uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE); 162 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 163 r = uvd_v4_2_start(adev); 164 if (r) ··· 267 struct amdgpu_ring *ring = &adev->uvd.ring; 268 uint32_t rb_bufsz; 269 int i, j, r; 270 - 271 /* disable byte swapping */ 272 u32 lmi_swap_cntl = 0; 273 u32 mp_swap_cntl = 0; 274 275 uvd_v4_2_mc_resume(adev); 276 ··· 409 410 /* Unstall UMC and register bus */ 411 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 412 } 413 414 /** ··· 624 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 625 } 626 627 - static void uvd_v4_2_init_cg(struct amdgpu_device *adev) 628 - { 629 - bool hw_mode = true; 630 - 631 - if (hw_mode) { 632 - uvd_v4_2_set_dcm(adev, false); 633 - } else { 634 - u32 tmp = RREG32(mmUVD_CGC_CTRL); 635 - tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 636 - WREG32(mmUVD_CGC_CTRL, tmp); 637 - } 638 - } 639 - 640 static bool uvd_v4_2_is_idle(void *handle) 641 { 642 struct amdgpu_device *adev = (struct amdgpu_device *)handle; ··· 677 static int uvd_v4_2_set_clockgating_state(void *handle, 678 enum amd_clockgating_state state) 679 { 680 - bool gate = false; 681 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 682 - 683 - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 684 - return 0; 685 - 686 - if (state == AMD_CG_STATE_GATE) 687 - gate = true; 688 - 689 - uvd_v4_2_enable_mgcg(adev, gate); 690 - 691 return 0; 692 } 693 ··· 691 * the smc and the hw blocks 692 */ 693 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 694 - 695 - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) 696 - return 0; 697 698 if (state == AMD_PG_STATE_GATE) { 699 uvd_v4_2_stop(adev);
··· 40 #include "smu/smu_7_0_1_sh_mask.h" 41 42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 43 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 44 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 45 static int uvd_v4_2_start(struct amdgpu_device *adev); 46 static void uvd_v4_2_stop(struct amdgpu_device *adev); 47 static int uvd_v4_2_set_clockgating_state(void *handle, 48 enum amd_clockgating_state state); 49 + static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 50 + bool sw_mode); 51 /** 52 * uvd_v4_2_ring_get_rptr - get read pointer 53 * ··· 140 141 return r; 142 } 143 + static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 144 + bool enable); 145 /** 146 * uvd_v4_2_hw_init - start and test UVD block 147 * ··· 155 uint32_t tmp; 156 int r; 157 158 + uvd_v4_2_enable_mgcg(adev, true); 159 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 160 r = uvd_v4_2_start(adev); 161 if (r) ··· 266 struct amdgpu_ring *ring = &adev->uvd.ring; 267 uint32_t rb_bufsz; 268 int i, j, r; 269 /* disable byte swapping */ 270 u32 lmi_swap_cntl = 0; 271 u32 mp_swap_cntl = 0; 272 + 273 + WREG32(mmUVD_CGC_GATE, 0); 274 + uvd_v4_2_set_dcm(adev, true); 275 276 uvd_v4_2_mc_resume(adev); 277 ··· 406 407 /* Unstall UMC and register bus */ 408 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 409 + 410 + uvd_v4_2_set_dcm(adev, false); 411 } 412 413 /** ··· 619 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 620 } 621 622 static bool uvd_v4_2_is_idle(void *handle) 623 { 624 struct amdgpu_device *adev = (struct amdgpu_device *)handle; ··· 685 static int uvd_v4_2_set_clockgating_state(void *handle, 686 enum amd_clockgating_state state) 687 { 688 return 0; 689 } 690 ··· 710 * the smc and the hw blocks 711 */ 712 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 713 714 if (state == AMD_PG_STATE_GATE) { 715 uvd_v4_2_stop(adev);
+17 -10
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 43 44 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 45 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 46 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 47 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 48 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 49 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 50 51 #define VCE_V3_0_FW_SIZE (384 * 1024) ··· 57 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 58 59 #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 60 61 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 62 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); ··· 182 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 183 184 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 185 - data &= ~0xffc00000; 186 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 187 188 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); ··· 256 if (adev->vce.harvest_config & (1 << idx)) 257 continue; 258 259 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 260 vce_v3_0_mc_resume(adev, idx); 261 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 262 ··· 280 } 281 } 282 283 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 284 mutex_unlock(&adev->grbm_idx_mutex); 285 286 return 0; ··· 295 if (adev->vce.harvest_config & (1 << idx)) 296 continue; 297 298 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 299 300 if (adev->asic_type >= CHIP_STONEY) 301 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); ··· 313 vce_v3_0_set_vce_sw_clock_gating(adev, false); 314 } 315 316 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 317 mutex_unlock(&adev->grbm_idx_mutex); 318 319 return 0; ··· 593 * VCE team suggest use bit 3--bit 6 for busy status check 594 */ 595 mutex_lock(&adev->grbm_idx_mutex); 596 - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 597 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 598 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 599 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 600 } 601 - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); 602 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 603 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 604 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 605 } 606 - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 607 mutex_unlock(&adev->grbm_idx_mutex); 608 609 if (srbm_soft_reset) { ··· 741 if (adev->vce.harvest_config & (1 << i)) 742 continue; 743 744 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); 745 746 if (enable) { 747 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ ··· 760 vce_v3_0_set_vce_sw_clock_gating(adev, enable); 761 } 762 763 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 764 mutex_unlock(&adev->grbm_idx_mutex); 765 766 return 0;
··· 43 44 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 45 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 46 + #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 47 + 48 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 49 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 50 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 51 + #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 52 + 53 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 54 55 #define VCE_V3_0_FW_SIZE (384 * 1024) ··· 53 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 54 55 #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 56 + 57 + #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ 58 + | GRBM_GFX_INDEX__VCE_ALL_PIPE) 59 60 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 61 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); ··· 175 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 176 177 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 178 + data &= ~0x3ff; 179 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 180 181 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); ··· 249 if (adev->vce.harvest_config & (1 << idx)) 250 continue; 251 252 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 253 vce_v3_0_mc_resume(adev, idx); 254 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 255 ··· 273 } 274 } 275 276 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 277 mutex_unlock(&adev->grbm_idx_mutex); 278 279 return 0; ··· 288 if (adev->vce.harvest_config & (1 << idx)) 289 continue; 290 291 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 292 293 if (adev->asic_type >= CHIP_STONEY) 294 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); ··· 306 vce_v3_0_set_vce_sw_clock_gating(adev, false); 307 } 308 309 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 310 mutex_unlock(&adev->grbm_idx_mutex); 311 312 return 0; ··· 586 * VCE team suggest use bit 3--bit 6 for busy status check 587 */ 588 mutex_lock(&adev->grbm_idx_mutex); 589 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 590 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 592 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 593 } 594 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 595 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 596 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 597 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 598 } 599 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 600 mutex_unlock(&adev->grbm_idx_mutex); 601 602 if (srbm_soft_reset) { ··· 734 if (adev->vce.harvest_config & (1 << i)) 735 continue; 736 737 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); 738 739 if (enable) { 740 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ ··· 753 vce_v3_0_set_vce_sw_clock_gating(adev, enable); 754 } 755 756 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 757 mutex_unlock(&adev->grbm_idx_mutex); 758 759 return 0;
+2 -2
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
··· 200 cgs_set_clockgating_state( 201 hwmgr->device, 202 AMD_IP_BLOCK_TYPE_VCE, 203 - AMD_CG_STATE_UNGATE); 204 cgs_set_powergating_state( 205 hwmgr->device, 206 AMD_IP_BLOCK_TYPE_VCE, ··· 218 cgs_set_clockgating_state( 219 hwmgr->device, 220 AMD_IP_BLOCK_TYPE_VCE, 221 - AMD_PG_STATE_GATE); 222 cz_dpm_update_vce_dpm(hwmgr); 223 cz_enable_disable_vce_dpm(hwmgr, true); 224 return 0;
··· 200 cgs_set_clockgating_state( 201 hwmgr->device, 202 AMD_IP_BLOCK_TYPE_VCE, 203 + AMD_CG_STATE_GATE); 204 cgs_set_powergating_state( 205 hwmgr->device, 206 AMD_IP_BLOCK_TYPE_VCE, ··· 218 cgs_set_clockgating_state( 219 hwmgr->device, 220 AMD_IP_BLOCK_TYPE_VCE, 221 + AMD_PG_STATE_UNGATE); 222 cz_dpm_update_vce_dpm(hwmgr); 223 cz_enable_disable_vce_dpm(hwmgr, true); 224 return 0;
+16 -8
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
··· 1402 cz_hwmgr->vce_dpm.hard_min_clk, 1403 PPSMC_MSG_SetEclkHardMin)); 1404 } else { 1405 - /*EPR# 419220 -HW limitation to to */ 1406 - cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; 1407 - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1408 - PPSMC_MSG_SetEclkHardMin, 1409 - cz_get_eclk_level(hwmgr, 1410 - cz_hwmgr->vce_dpm.hard_min_clk, 1411 - PPSMC_MSG_SetEclkHardMin)); 1412 - 1413 } 1414 return 0; 1415 }
··· 1402 cz_hwmgr->vce_dpm.hard_min_clk, 1403 PPSMC_MSG_SetEclkHardMin)); 1404 } else { 1405 + /*Program HardMin based on the vce_arbiter.ecclk */ 1406 + if (hwmgr->vce_arbiter.ecclk == 0) { 1407 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1408 + PPSMC_MSG_SetEclkHardMin, 0); 1409 + /* disable ECLK DPM 0. Otherwise VCE could hang if 1410 + * switching SCLK from DPM 0 to 6/7 */ 1411 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1412 + PPSMC_MSG_SetEclkSoftMin, 1); 1413 + } else { 1414 + cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; 1415 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1416 + PPSMC_MSG_SetEclkHardMin, 1417 + cz_get_eclk_level(hwmgr, 1418 + cz_hwmgr->vce_dpm.hard_min_clk, 1419 + PPSMC_MSG_SetEclkHardMin)); 1420 + } 1421 } 1422 return 0; 1423 }
+7
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
··· 1382 1383 pm_runtime_enable(dev); 1384 1385 phy_power_on(dp->phy); 1386 1387 analogix_dp_init_dp(dp); ··· 1415 goto err_disable_pm_runtime; 1416 } 1417 1418 return 0; 1419 1420 err_disable_pm_runtime: 1421 pm_runtime_disable(dev); 1422 1423 return ret;
··· 1382 1383 pm_runtime_enable(dev); 1384 1385 + pm_runtime_get_sync(dev); 1386 phy_power_on(dp->phy); 1387 1388 analogix_dp_init_dp(dp); ··· 1414 goto err_disable_pm_runtime; 1415 } 1416 1417 + phy_power_off(dp->phy); 1418 + pm_runtime_put(dev); 1419 + 1420 return 0; 1421 1422 err_disable_pm_runtime: 1423 + 1424 + phy_power_off(dp->phy); 1425 + pm_runtime_put(dev); 1426 pm_runtime_disable(dev); 1427 1428 return ret;
+9
drivers/gpu/drm/cirrus/Kconfig
··· 7 This is a KMS driver for emulated cirrus device in qemu. 8 It is *NOT* intended for real cirrus devices. This requires 9 the modesetting userspace X.org driver.
··· 7 This is a KMS driver for emulated cirrus device in qemu. 8 It is *NOT* intended for real cirrus devices. This requires 9 the modesetting userspace X.org driver. 10 + 11 + Cirrus is obsolete, the hardware was designed in the 90ies 12 + and can't keep up with todays needs. More background: 13 + https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ 14 + 15 + Better alternatives are: 16 + - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+) 17 + - qxl (DRM_QXL, qemu -vga qxl, works best with spice) 18 + - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
+7
drivers/gpu/drm/drm_modes.c
··· 1460 return NULL; 1461 1462 mode->type |= DRM_MODE_TYPE_USERDEF; 1463 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1464 return mode; 1465 }
··· 1460 return NULL; 1461 1462 mode->type |= DRM_MODE_TYPE_USERDEF; 1463 + /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */ 1464 + if (cmd->xres == 1366 && mode->hdisplay == 1368) { 1465 + mode->hdisplay = 1366; 1466 + mode->hsync_start--; 1467 + mode->hsync_end--; 1468 + drm_mode_set_name(mode); 1469 + } 1470 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1471 return mode; 1472 }
+31 -32
drivers/gpu/drm/drm_probe_helper.c
··· 115 116 #define DRM_OUTPUT_POLL_PERIOD (10*HZ) 117 /** 118 - * drm_kms_helper_poll_enable_locked - re-enable output polling. 119 * @dev: drm_device 120 * 121 - * This function re-enables the output polling work without 122 - * locking the mode_config mutex. 123 * 124 - * This is like drm_kms_helper_poll_enable() however it is to be 125 - * called from a context where the mode_config mutex is locked 126 - * already. 127 */ 128 - void drm_kms_helper_poll_enable_locked(struct drm_device *dev) 129 { 130 bool poll = false; 131 struct drm_connector *connector; 132 unsigned long delay = DRM_OUTPUT_POLL_PERIOD; 133 - 134 - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 135 136 if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) 137 return; ··· 146 } 147 148 if (dev->mode_config.delayed_event) { 149 poll = true; 150 - delay = 0; 151 } 152 153 if (poll) 154 schedule_delayed_work(&dev->mode_config.output_poll_work, delay); 155 } 156 - EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); 157 158 static enum drm_connector_status 159 drm_connector_detect(struct drm_connector *connector, bool force) ··· 290 291 /* Re-enable polling in case the global poll config changed. */ 292 if (drm_kms_helper_poll != dev->mode_config.poll_running) 293 - drm_kms_helper_poll_enable_locked(dev); 294 295 dev->mode_config.poll_running = drm_kms_helper_poll; 296 ··· 482 * This function disables the output polling work. 483 * 484 * Drivers can call this helper from their device suspend implementation. It is 485 - * not an error to call this even when output polling isn't enabled or arlready 486 - * disabled. 487 */ 488 void drm_kms_helper_poll_disable(struct drm_device *dev) 489 { ··· 496 cancel_delayed_work_sync(&dev->mode_config.output_poll_work); 497 } 498 EXPORT_SYMBOL(drm_kms_helper_poll_disable); 499 - 500 - /** 501 - * drm_kms_helper_poll_enable - re-enable output polling. 502 - * @dev: drm_device 503 - * 504 - * This function re-enables the output polling work. 505 - * 506 - * Drivers can call this helper from their device resume implementation. It is 507 - * an error to call this when the output polling support has not yet been set 508 - * up. 509 - */ 510 - void drm_kms_helper_poll_enable(struct drm_device *dev) 511 - { 512 - mutex_lock(&dev->mode_config.mutex); 513 - drm_kms_helper_poll_enable_locked(dev); 514 - mutex_unlock(&dev->mode_config.mutex); 515 - } 516 - EXPORT_SYMBOL(drm_kms_helper_poll_enable); 517 518 /** 519 * drm_kms_helper_poll_init - initialize and enable output polling
··· 115 116 #define DRM_OUTPUT_POLL_PERIOD (10*HZ) 117 /** 118 + * drm_kms_helper_poll_enable - re-enable output polling. 119 * @dev: drm_device 120 * 121 + * This function re-enables the output polling work, after it has been 122 + * temporarily disabled using drm_kms_helper_poll_disable(), for example over 123 + * suspend/resume. 124 * 125 + * Drivers can call this helper from their device resume implementation. It is 126 + * an error to call this when the output polling support has not yet been set 127 + * up. 128 + * 129 + * Note that calls to enable and disable polling must be strictly ordered, which 130 + * is automatically the case when they're only call from suspend/resume 131 + * callbacks. 132 */ 133 + void drm_kms_helper_poll_enable(struct drm_device *dev) 134 { 135 bool poll = false; 136 struct drm_connector *connector; 137 unsigned long delay = DRM_OUTPUT_POLL_PERIOD; 138 139 if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) 140 return; ··· 143 } 144 145 if (dev->mode_config.delayed_event) { 146 + /* 147 + * FIXME: 148 + * 149 + * Use short (1s) delay to handle the initial delayed event. 150 + * This delay should not be needed, but Optimus/nouveau will 151 + * fail in a mysterious way if the delayed event is handled as 152 + * soon as possible like it is done in 153 + * drm_helper_probe_single_connector_modes() in case the poll 154 + * was enabled before. 155 + */ 156 poll = true; 157 + delay = HZ; 158 } 159 160 if (poll) 161 schedule_delayed_work(&dev->mode_config.output_poll_work, delay); 162 } 163 + EXPORT_SYMBOL(drm_kms_helper_poll_enable); 164 165 static enum drm_connector_status 166 drm_connector_detect(struct drm_connector *connector, bool force) ··· 277 278 /* Re-enable polling in case the global poll config changed. */ 279 if (drm_kms_helper_poll != dev->mode_config.poll_running) 280 + drm_kms_helper_poll_enable(dev); 281 282 dev->mode_config.poll_running = drm_kms_helper_poll; 283 ··· 469 * This function disables the output polling work. 470 * 471 * Drivers can call this helper from their device suspend implementation. It is 472 + * not an error to call this even when output polling isn't enabled or already 473 + * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable(). 474 + * 475 + * Note that calls to enable and disable polling must be strictly ordered, which 476 + * is automatically the case when they're only call from suspend/resume 477 + * callbacks. 478 */ 479 void drm_kms_helper_poll_disable(struct drm_device *dev) 480 { ··· 479 cancel_delayed_work_sync(&dev->mode_config.output_poll_work); 480 } 481 EXPORT_SYMBOL(drm_kms_helper_poll_disable); 482 483 /** 484 * drm_kms_helper_poll_init - initialize and enable output polling
+6 -1
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
··· 116 struct list_head list; 117 bool found; 118 119 ret = drm_mm_insert_node_in_range(&mmu->mm, node, 120 size, 0, mmu->last_iova, ~0UL, 121 - DRM_MM_SEARCH_DEFAULT); 122 123 if (ret != -ENOSPC) 124 break;
··· 116 struct list_head list; 117 bool found; 118 119 + /* 120 + * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick 121 + * drm_mm into giving out a low IOVA after address space 122 + * rollover. This needs a proper fix. 123 + */ 124 ret = drm_mm_insert_node_in_range(&mmu->mm, node, 125 size, 0, mmu->last_iova, ~0UL, 126 + mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); 127 128 if (ret != -ENOSPC) 129 break;
+6 -9
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
··· 46 BIT_CLKS_ENABLED, 47 BIT_IRQS_ENABLED, 48 BIT_WIN_UPDATED, 49 - BIT_SUSPENDED 50 }; 51 52 struct decon_context { ··· 141 m->crtc_vsync_start = m->crtc_vdisplay + 1; 142 m->crtc_vsync_end = m->crtc_vsync_start + 1; 143 } 144 - 145 - decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0); 146 - 147 - /* enable clock gate */ 148 - val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F; 149 - writel(val, ctx->addr + DECON_CMU); 150 151 if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) 152 decon_setup_trigger(ctx); ··· 310 311 /* window enable */ 312 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); 313 } 314 315 static void decon_disable_plane(struct exynos_drm_crtc *crtc, ··· 323 return; 324 325 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); 326 } 327 328 static void decon_atomic_flush(struct exynos_drm_crtc *crtc) ··· 337 for (i = ctx->first_win; i < WINDOWS_NR; i++) 338 decon_shadow_protect_win(ctx, i, false); 339 340 - /* standalone update */ 341 - decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); 342 343 if (ctx->out_type & IFTYPE_I80) 344 set_bit(BIT_WIN_UPDATED, &ctx->flags);
··· 46 BIT_CLKS_ENABLED, 47 BIT_IRQS_ENABLED, 48 BIT_WIN_UPDATED, 49 + BIT_SUSPENDED, 50 + BIT_REQUEST_UPDATE 51 }; 52 53 struct decon_context { ··· 140 m->crtc_vsync_start = m->crtc_vdisplay + 1; 141 m->crtc_vsync_end = m->crtc_vsync_start + 1; 142 } 143 144 if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) 145 decon_setup_trigger(ctx); ··· 315 316 /* window enable */ 317 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); 318 + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); 319 } 320 321 static void decon_disable_plane(struct exynos_drm_crtc *crtc, ··· 327 return; 328 329 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); 330 + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); 331 } 332 333 static void decon_atomic_flush(struct exynos_drm_crtc *crtc) ··· 340 for (i = ctx->first_win; i < WINDOWS_NR; i++) 341 decon_shadow_protect_win(ctx, i, false); 342 343 + if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags)) 344 + decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); 345 346 if (ctx->out_type & IFTYPE_I80) 347 set_bit(BIT_WIN_UPDATED, &ctx->flags);
+27 -9
drivers/gpu/drm/i915/gvt/aperture_gm.c
··· 37 #include "i915_drv.h" 38 #include "gvt.h" 39 40 - #define MB_TO_BYTES(mb) ((mb) << 20ULL) 41 - #define BYTES_TO_MB(b) ((b) >> 20ULL) 42 - 43 - #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) 44 - #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) 45 - #define HOST_FENCE 4 46 - 47 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 48 { 49 struct intel_gvt *gvt = vgpu->gvt; ··· 158 POSTING_READ(fence_reg_lo); 159 } 160 161 static void free_vgpu_fence(struct intel_vgpu *vgpu) 162 { 163 struct intel_gvt *gvt = vgpu->gvt; ··· 179 intel_runtime_pm_get(dev_priv); 180 181 mutex_lock(&dev_priv->drm.struct_mutex); 182 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 183 reg = vgpu->fence.regs[i]; 184 - intel_vgpu_write_fence(vgpu, i, 0); 185 list_add_tail(&reg->link, 186 &dev_priv->mm.fence_list); 187 } ··· 209 continue; 210 list_del(pos); 211 vgpu->fence.regs[i] = reg; 212 - intel_vgpu_write_fence(vgpu, i, 0); 213 if (++i == vgpu_fence_sz(vgpu)) 214 break; 215 } 216 if (i != vgpu_fence_sz(vgpu)) 217 goto out_free_fence; 218 219 mutex_unlock(&dev_priv->drm.struct_mutex); 220 intel_runtime_pm_put(dev_priv); ··· 313 free_vgpu_gm(vgpu); 314 free_vgpu_fence(vgpu); 315 free_resource(vgpu); 316 } 317 318 /**
··· 37 #include "i915_drv.h" 38 #include "gvt.h" 39 40 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 41 { 42 struct intel_gvt *gvt = vgpu->gvt; ··· 165 POSTING_READ(fence_reg_lo); 166 } 167 168 + static void _clear_vgpu_fence(struct intel_vgpu *vgpu) 169 + { 170 + int i; 171 + 172 + for (i = 0; i < vgpu_fence_sz(vgpu); i++) 173 + intel_vgpu_write_fence(vgpu, i, 0); 174 + } 175 + 176 static void free_vgpu_fence(struct intel_vgpu *vgpu) 177 { 178 struct intel_gvt *gvt = vgpu->gvt; ··· 178 intel_runtime_pm_get(dev_priv); 179 180 mutex_lock(&dev_priv->drm.struct_mutex); 181 + _clear_vgpu_fence(vgpu); 182 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 183 reg = vgpu->fence.regs[i]; 184 list_add_tail(&reg->link, 185 &dev_priv->mm.fence_list); 186 } ··· 208 continue; 209 list_del(pos); 210 vgpu->fence.regs[i] = reg; 211 if (++i == vgpu_fence_sz(vgpu)) 212 break; 213 } 214 if (i != vgpu_fence_sz(vgpu)) 215 goto out_free_fence; 216 + 217 + _clear_vgpu_fence(vgpu); 218 219 mutex_unlock(&dev_priv->drm.struct_mutex); 220 intel_runtime_pm_put(dev_priv); ··· 311 free_vgpu_gm(vgpu); 312 free_vgpu_fence(vgpu); 313 free_resource(vgpu); 314 + } 315 + 316 + /** 317 + * intel_vgpu_reset_resource - reset resource state owned by a vGPU 318 + * @vgpu: a vGPU 319 + * 320 + * This function is used to reset resource state owned by a vGPU. 321 + * 322 + */ 323 + void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) 324 + { 325 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 326 + 327 + intel_runtime_pm_get(dev_priv); 328 + _clear_vgpu_fence(vgpu); 329 + intel_runtime_pm_put(dev_priv); 330 } 331 332 /**
+74
drivers/gpu/drm/i915/gvt/cfg_space.c
··· 282 } 283 return 0; 284 }
··· 282 } 283 return 0; 284 } 285 + 286 + /** 287 + * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU 288 + * 289 + * @vgpu: a vGPU 290 + * @primary: is the vGPU presented as primary 291 + * 292 + */ 293 + void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 294 + bool primary) 295 + { 296 + struct intel_gvt *gvt = vgpu->gvt; 297 + const struct intel_gvt_device_info *info = &gvt->device_info; 298 + u16 *gmch_ctl; 299 + int i; 300 + 301 + memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 302 + info->cfg_space_size); 303 + 304 + if (!primary) { 305 + vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 306 + INTEL_GVT_PCI_CLASS_VGA_OTHER; 307 + vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 308 + INTEL_GVT_PCI_CLASS_VGA_OTHER; 309 + } 310 + 311 + /* Show guest that there isn't any stolen memory.*/ 312 + gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 313 + *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 314 + 315 + intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 316 + gvt_aperture_pa_base(gvt), true); 317 + 318 + vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 319 + | PCI_COMMAND_MEMORY 320 + | PCI_COMMAND_MASTER); 321 + /* 322 + * Clear the bar upper 32bit and let guest to assign the new value 323 + */ 324 + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 325 + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 326 + memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 327 + 328 + for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 329 + vgpu->cfg_space.bar[i].size = pci_resource_len( 330 + gvt->dev_priv->drm.pdev, i * 2); 331 + vgpu->cfg_space.bar[i].tracked = false; 332 + } 333 + } 334 + 335 + /** 336 + * intel_vgpu_reset_cfg_space - reset vGPU configuration space 337 + * 338 + * @vgpu: a vGPU 339 + * 340 + */ 341 + void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) 342 + { 343 + u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; 344 + bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != 345 + INTEL_GVT_PCI_CLASS_VGA_OTHER; 346 + 347 + if (cmd & PCI_COMMAND_MEMORY) { 348 + trap_gttmmio(vgpu, false); 349 + map_aperture(vgpu, false); 350 + } 351 + 352 + /** 353 + * Currently we only do such reset when vGPU is not 354 + * owned by any VM, so we simply restore entire cfg 355 + * space to default value. 356 + */ 357 + intel_vgpu_init_cfg_space(vgpu, primary); 358 + }
+44 -37
drivers/gpu/drm/i915/gvt/gtt.c
··· 240 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 241 { 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 243 - u64 pte; 244 245 - #ifdef readq 246 - pte = readq(addr); 247 - #else 248 - pte = ioread32(addr); 249 - pte |= (u64)ioread32(addr + 4) << 32; 250 - #endif 251 - return pte; 252 } 253 254 static void write_pte64(struct drm_i915_private *dev_priv, ··· 249 { 250 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 251 252 - #ifdef writeq 253 writeq(pte, addr); 254 - #else 255 - iowrite32((u32)pte, addr); 256 - iowrite32(pte >> 32, addr + 4); 257 - #endif 258 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 259 POSTING_READ(GFX_FLSH_CNTL_GEN6); 260 } ··· 1369 info->gtt_entry_size; 1370 mem = kzalloc(mm->has_shadow_page_table ? 1371 mm->page_table_entry_size * 2 1372 - : mm->page_table_entry_size, 1373 - GFP_ATOMIC); 1374 if (!mem) 1375 return -ENOMEM; 1376 mm->virtual_page_table = mem; ··· 1520 struct intel_vgpu_mm *mm; 1521 int ret; 1522 1523 - mm = kzalloc(sizeof(*mm), GFP_ATOMIC); 1524 if (!mm) { 1525 ret = -ENOMEM; 1526 goto fail; ··· 1874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1875 int page_entry_num = GTT_PAGE_SIZE >> 1876 vgpu->gvt->device_info.gtt_entry_size_shift; 1877 - struct page *scratch_pt; 1878 unsigned long mfn; 1879 int i; 1880 - void *p; 1881 1882 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1883 return -EINVAL; 1884 1885 - scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 1886 if (!scratch_pt) { 1887 gvt_err("fail to allocate scratch page\n"); 1888 return -ENOMEM; 1889 } 1890 1891 - p = kmap_atomic(scratch_pt); 1892 - mfn = intel_gvt_hypervisor_virt_to_mfn(p); 1893 if (mfn == INTEL_GVT_INVALID_ADDR) { 1894 - gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); 1895 - kunmap_atomic(p); 1896 - __free_page(scratch_pt); 1897 return -EFAULT; 1898 } 1899 gtt->scratch_pt[type].page_mfn = mfn; 1900 - gtt->scratch_pt[type].page = scratch_pt; 1901 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1902 vgpu->id, type, mfn); 1903 ··· 1903 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1904 * 'type' pt. 1905 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1906 - * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self 1907 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1908 */ 1909 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { ··· 1921 se.val64 |= PPAT_CACHED_INDEX; 1922 1923 for (i = 0; i < page_entry_num; i++) 1924 - ops->set_entry(p, &se, i, false, 0, vgpu); 1925 } 1926 - 1927 - kunmap_atomic(p); 1928 1929 return 0; 1930 } ··· 2191 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2192 { 2193 int ret; 2194 - void *page_addr; 2195 2196 gvt_dbg_core("init gtt\n"); 2197 ··· 2204 return -ENODEV; 2205 } 2206 2207 - gvt->gtt.scratch_ggtt_page = 2208 - alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 2209 - if (!gvt->gtt.scratch_ggtt_page) { 2210 gvt_err("fail to allocate scratch ggtt page\n"); 2211 return -ENOMEM; 2212 } 2213 2214 - page_addr = page_address(gvt->gtt.scratch_ggtt_page); 2215 - 2216 - gvt->gtt.scratch_ggtt_mfn = 2217 - intel_gvt_hypervisor_virt_to_mfn(page_addr); 2218 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { 2219 gvt_err("fail to translate scratch ggtt page\n"); 2220 __free_page(gvt->gtt.scratch_ggtt_page); ··· 2276 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2277 for (offset = 0; offset < num_entries; offset++) 2278 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2279 }
··· 240 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 241 { 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 243 244 + return readq(addr); 245 } 246 247 static void write_pte64(struct drm_i915_private *dev_priv, ··· 256 { 257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 258 259 writeq(pte, addr); 260 + 261 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 262 POSTING_READ(GFX_FLSH_CNTL_GEN6); 263 } ··· 1380 info->gtt_entry_size; 1381 mem = kzalloc(mm->has_shadow_page_table ? 1382 mm->page_table_entry_size * 2 1383 + : mm->page_table_entry_size, GFP_KERNEL); 1384 if (!mem) 1385 return -ENOMEM; 1386 mm->virtual_page_table = mem; ··· 1532 struct intel_vgpu_mm *mm; 1533 int ret; 1534 1535 + mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1536 if (!mm) { 1537 ret = -ENOMEM; 1538 goto fail; ··· 1886 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1887 int page_entry_num = GTT_PAGE_SIZE >> 1888 vgpu->gvt->device_info.gtt_entry_size_shift; 1889 + void *scratch_pt; 1890 unsigned long mfn; 1891 int i; 1892 1893 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1894 return -EINVAL; 1895 1896 + scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1897 if (!scratch_pt) { 1898 gvt_err("fail to allocate scratch page\n"); 1899 return -ENOMEM; 1900 } 1901 1902 + mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); 1903 if (mfn == INTEL_GVT_INVALID_ADDR) { 1904 + gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); 1905 + free_page((unsigned long)scratch_pt); 1906 return -EFAULT; 1907 } 1908 gtt->scratch_pt[type].page_mfn = mfn; 1909 + gtt->scratch_pt[type].page = virt_to_page(scratch_pt); 1910 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1911 vgpu->id, type, mfn); 1912 ··· 1918 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1919 * 'type' pt. 1920 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1921 + * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self 1922 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1923 */ 1924 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { ··· 1936 se.val64 |= PPAT_CACHED_INDEX; 1937 1938 for (i = 0; i < page_entry_num; i++) 1939 + ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); 1940 } 1941 1942 return 0; 1943 } ··· 2208 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2209 { 2210 int ret; 2211 + void *page; 2212 2213 gvt_dbg_core("init gtt\n"); 2214 ··· 2221 return -ENODEV; 2222 } 2223 2224 + page = (void *)get_zeroed_page(GFP_KERNEL); 2225 + if (!page) { 2226 gvt_err("fail to allocate scratch ggtt page\n"); 2227 return -ENOMEM; 2228 } 2229 + gvt->gtt.scratch_ggtt_page = virt_to_page(page); 2230 2231 + gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); 2232 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { 2233 gvt_err("fail to translate scratch ggtt page\n"); 2234 __free_page(gvt->gtt.scratch_ggtt_page); ··· 2296 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2297 for (offset = 0; offset < num_entries; offset++) 2298 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2299 + } 2300 + 2301 + /** 2302 + * intel_vgpu_reset_gtt - reset the all GTT related status 2303 + * @vgpu: a vGPU 2304 + * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset 2305 + * 2306 + * This function is called from vfio core to reset reset all 2307 + * GTT related status, including GGTT, PPGTT, scratch page. 2308 + * 2309 + */ 2310 + void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) 2311 + { 2312 + int i; 2313 + 2314 + ppgtt_free_all_shadow_page(vgpu); 2315 + if (!dmlr) 2316 + return; 2317 + 2318 + intel_vgpu_reset_ggtt(vgpu); 2319 + 2320 + /* clear scratch page for security */ 2321 + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2322 + if (vgpu->gtt.scratch_pt[i].page != NULL) 2323 + memset(page_address(vgpu->gtt.scratch_pt[i].page), 2324 + 0, PAGE_SIZE); 2325 + } 2326 }
+1
drivers/gpu/drm/i915/gvt/gtt.h
··· 208 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 209 210 extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 211 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 212 213 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
··· 208 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 209 210 extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 211 + extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); 212 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 213 214 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+7 -1
drivers/gpu/drm/i915/gvt/gvt.c
··· 201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); 202 intel_gvt_clean_vgpu_types(gvt); 203 204 kfree(dev_priv->gvt); 205 dev_priv->gvt = NULL; 206 } ··· 239 240 gvt_dbg_core("init gvt device\n"); 241 242 mutex_init(&gvt->lock); 243 gvt->dev_priv = dev_priv; 244 ··· 248 249 ret = intel_gvt_setup_mmio_info(gvt); 250 if (ret) 251 - return ret; 252 253 ret = intel_gvt_load_firmware(gvt); 254 if (ret) ··· 317 intel_gvt_free_firmware(gvt); 318 out_clean_mmio_info: 319 intel_gvt_clean_mmio_info(gvt); 320 kfree(gvt); 321 return ret; 322 }
··· 201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); 202 intel_gvt_clean_vgpu_types(gvt); 203 204 + idr_destroy(&gvt->vgpu_idr); 205 + 206 kfree(dev_priv->gvt); 207 dev_priv->gvt = NULL; 208 } ··· 237 238 gvt_dbg_core("init gvt device\n"); 239 240 + idr_init(&gvt->vgpu_idr); 241 + 242 mutex_init(&gvt->lock); 243 gvt->dev_priv = dev_priv; 244 ··· 244 245 ret = intel_gvt_setup_mmio_info(gvt); 246 if (ret) 247 + goto out_clean_idr; 248 249 ret = intel_gvt_load_firmware(gvt); 250 if (ret) ··· 313 intel_gvt_free_firmware(gvt); 314 out_clean_mmio_info: 315 intel_gvt_clean_mmio_info(gvt); 316 + out_clean_idr: 317 + idr_destroy(&gvt->vgpu_idr); 318 kfree(gvt); 319 return ret; 320 }
+7 -1
drivers/gpu/drm/i915/gvt/gvt.h
··· 323 324 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 325 struct intel_vgpu_creation_params *param); 326 void intel_vgpu_free_resource(struct intel_vgpu *vgpu); 327 void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 328 u32 fence, u64 value); ··· 376 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 377 struct intel_vgpu_type *type); 378 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 379 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 380 381 ··· 414 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 415 unsigned long *g_index); 416 417 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 418 void *p_data, unsigned int bytes); 419 ··· 431 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); 432 433 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); 434 - int setup_vgpu_mmio(struct intel_vgpu *vgpu); 435 void populate_pvinfo_page(struct intel_vgpu *vgpu); 436 437 struct intel_gvt_ops {
··· 323 324 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 325 struct intel_vgpu_creation_params *param); 326 + void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); 327 void intel_vgpu_free_resource(struct intel_vgpu *vgpu); 328 void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 329 u32 fence, u64 value); ··· 375 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 376 struct intel_vgpu_type *type); 377 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 378 + void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 379 + unsigned int engine_mask); 380 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 381 382 ··· 411 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 412 unsigned long *g_index); 413 414 + void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 415 + bool primary); 416 + void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); 417 + 418 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 419 void *p_data, unsigned int bytes); 420 ··· 424 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); 425 426 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); 427 void populate_pvinfo_page(struct intel_vgpu *vgpu); 428 429 struct intel_gvt_ops {
+35 -68
drivers/gpu/drm/i915/gvt/handlers.c
··· 93 static int new_mmio_info(struct intel_gvt *gvt, 94 u32 offset, u32 flags, u32 size, 95 u32 addr_mask, u32 ro_mask, u32 device, 96 - void *read, void *write) 97 { 98 struct intel_gvt_mmio_info *info, *p; 99 u32 start, end, i; ··· 220 default: 221 /*should not hit here*/ 222 gvt_err("invalid forcewake offset 0x%x\n", offset); 223 - return 1; 224 } 225 } else { 226 ack_reg_offset = FORCEWAKE_ACK_HSW_REG; ··· 231 return 0; 232 } 233 234 - static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, 235 - void *p_data, unsigned int bytes, unsigned long bitmap) 236 - { 237 - struct intel_gvt_workload_scheduler *scheduler = 238 - &vgpu->gvt->scheduler; 239 - 240 - vgpu->resetting = true; 241 - 242 - intel_vgpu_stop_schedule(vgpu); 243 - /* 244 - * The current_vgpu will set to NULL after stopping the 245 - * scheduler when the reset is triggered by current vgpu. 246 - */ 247 - if (scheduler->current_vgpu == NULL) { 248 - mutex_unlock(&vgpu->gvt->lock); 249 - intel_gvt_wait_vgpu_idle(vgpu); 250 - mutex_lock(&vgpu->gvt->lock); 251 - } 252 - 253 - intel_vgpu_reset_execlist(vgpu, bitmap); 254 - 255 - /* full GPU reset */ 256 - if (bitmap == 0xff) { 257 - mutex_unlock(&vgpu->gvt->lock); 258 - intel_vgpu_clean_gtt(vgpu); 259 - mutex_lock(&vgpu->gvt->lock); 260 - setup_vgpu_mmio(vgpu); 261 - populate_pvinfo_page(vgpu); 262 - intel_vgpu_init_gtt(vgpu); 263 - } 264 - 265 - vgpu->resetting = false; 266 - 267 - return 0; 268 - } 269 - 270 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 271 - void *p_data, unsigned int bytes) 272 { 273 u32 data; 274 - u64 bitmap = 0; 275 276 write_vreg(vgpu, offset, p_data, bytes); 277 data = vgpu_vreg(vgpu, offset); 278 279 if (data & GEN6_GRDOM_FULL) { 280 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); 281 - bitmap = 0xff; 282 } 283 - if (data & GEN6_GRDOM_RENDER) { 284 - gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); 285 - bitmap |= (1 << RCS); 286 - } 287 - if (data & GEN6_GRDOM_MEDIA) { 288 - gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); 289 - bitmap |= (1 << VCS); 290 - } 291 - if (data & GEN6_GRDOM_BLT) { 292 - gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); 293 - bitmap |= (1 << BCS); 294 - } 295 - if (data & GEN6_GRDOM_VECS) { 296 - gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); 297 - bitmap |= (1 << VECS); 298 - } 299 - if (data & GEN8_GRDOM_MEDIA2) { 300 - gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); 301 - if (HAS_BSD2(vgpu->gvt->dev_priv)) 302 - bitmap |= (1 << VCS2); 303 - } 304 - return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); 305 } 306 307 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ··· 943 return 0; 944 } 945 946 - static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 947 void *p_data, unsigned int bytes) 948 { 949 u32 data; ··· 1335 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, 1336 unsigned int offset, void *p_data, unsigned int bytes) 1337 { 1338 - int rc = 0; 1339 unsigned int id = 0; 1340 1341 write_vreg(vgpu, offset, p_data, bytes); ··· 1357 id = VECS; 1358 break; 1359 default: 1360 - rc = -EINVAL; 1361 - break; 1362 } 1363 set_bit(id, (void *)vgpu->tlb_handle_pending); 1364 1365 - return rc; 1366 } 1367 1368 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
··· 93 static int new_mmio_info(struct intel_gvt *gvt, 94 u32 offset, u32 flags, u32 size, 95 u32 addr_mask, u32 ro_mask, u32 device, 96 + int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), 97 + int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int)) 98 { 99 struct intel_gvt_mmio_info *info, *p; 100 u32 start, end, i; ··· 219 default: 220 /*should not hit here*/ 221 gvt_err("invalid forcewake offset 0x%x\n", offset); 222 + return -EINVAL; 223 } 224 } else { 225 ack_reg_offset = FORCEWAKE_ACK_HSW_REG; ··· 230 return 0; 231 } 232 233 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 234 + void *p_data, unsigned int bytes) 235 { 236 + unsigned int engine_mask = 0; 237 u32 data; 238 239 write_vreg(vgpu, offset, p_data, bytes); 240 data = vgpu_vreg(vgpu, offset); 241 242 if (data & GEN6_GRDOM_FULL) { 243 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); 244 + engine_mask = ALL_ENGINES; 245 + } else { 246 + if (data & GEN6_GRDOM_RENDER) { 247 + gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); 248 + engine_mask |= (1 << RCS); 249 + } 250 + if (data & GEN6_GRDOM_MEDIA) { 251 + gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); 252 + engine_mask |= (1 << VCS); 253 + } 254 + if (data & GEN6_GRDOM_BLT) { 255 + gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); 256 + engine_mask |= (1 << BCS); 257 + } 258 + if (data & GEN6_GRDOM_VECS) { 259 + gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); 260 + engine_mask |= (1 << VECS); 261 + } 262 + if (data & GEN8_GRDOM_MEDIA2) { 263 + gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); 264 + if (HAS_BSD2(vgpu->gvt->dev_priv)) 265 + engine_mask |= (1 << VCS2); 266 + } 267 } 268 + 269 + intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); 270 + 271 + return 0; 272 } 273 274 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ··· 974 return 0; 975 } 976 977 + static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 978 void *p_data, unsigned int bytes) 979 { 980 u32 data; ··· 1366 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, 1367 unsigned int offset, void *p_data, unsigned int bytes) 1368 { 1369 unsigned int id = 0; 1370 1371 write_vreg(vgpu, offset, p_data, bytes); ··· 1389 id = VECS; 1390 break; 1391 default: 1392 + return -EINVAL; 1393 } 1394 set_bit(id, (void *)vgpu->tlb_handle_pending); 1395 1396 + return 0; 1397 } 1398 1399 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
+10 -4
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 398 struct intel_vgpu_type *type; 399 struct device *pdev; 400 void *gvt; 401 402 pdev = mdev_parent_dev(mdev); 403 gvt = kdev_to_i915(pdev)->gvt; ··· 407 if (!type) { 408 gvt_err("failed to find type %s to create\n", 409 kobject_name(kobj)); 410 - return -EINVAL; 411 } 412 413 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 414 if (IS_ERR_OR_NULL(vgpu)) { 415 - gvt_err("create intel vgpu failed\n"); 416 - return -EINVAL; 417 } 418 419 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); ··· 425 426 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", 427 dev_name(mdev_dev(mdev))); 428 - return 0; 429 } 430 431 static int intel_vgpu_remove(struct mdev_device *mdev)
··· 398 struct intel_vgpu_type *type; 399 struct device *pdev; 400 void *gvt; 401 + int ret; 402 403 pdev = mdev_parent_dev(mdev); 404 gvt = kdev_to_i915(pdev)->gvt; ··· 406 if (!type) { 407 gvt_err("failed to find type %s to create\n", 408 kobject_name(kobj)); 409 + ret = -EINVAL; 410 + goto out; 411 } 412 413 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 414 if (IS_ERR_OR_NULL(vgpu)) { 415 + ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); 416 + gvt_err("failed to create intel vgpu: %d\n", ret); 417 + goto out; 418 } 419 420 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); ··· 422 423 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", 424 dev_name(mdev_dev(mdev))); 425 + ret = 0; 426 + 427 + out: 428 + return ret; 429 } 430 431 static int intel_vgpu_remove(struct mdev_device *mdev)
+69 -15
drivers/gpu/drm/i915/gvt/mmio.c
··· 125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 126 goto err; 127 128 - mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 129 - if (!mmio && !vgpu->mmio.disable_warn_untrack) { 130 - gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", 131 - vgpu->id, offset, bytes, *(u32 *)p_data); 132 - 133 - if (offset == 0x206c) { 134 - gvt_err("------------------------------------------\n"); 135 - gvt_err("vgpu%d: likely triggers a gfx reset\n", 136 - vgpu->id); 137 - gvt_err("------------------------------------------\n"); 138 - vgpu->mmio.disable_warn_untrack = true; 139 - } 140 - } 141 - 142 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 143 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 144 goto err; 145 } 146 147 if (mmio) { 148 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 149 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) ··· 139 goto err; 140 } 141 ret = mmio->read(vgpu, offset, p_data, bytes); 142 - } else 143 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 144 145 if (ret) 146 goto err; ··· 302 vgpu->id, offset, bytes); 303 mutex_unlock(&gvt->lock); 304 return ret; 305 }
··· 125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 126 goto err; 127 128 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 129 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 130 goto err; 131 } 132 133 + mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 134 if (mmio) { 135 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 136 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) ··· 152 goto err; 153 } 154 ret = mmio->read(vgpu, offset, p_data, bytes); 155 + } else { 156 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 157 + 158 + if (!vgpu->mmio.disable_warn_untrack) { 159 + gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", 160 + vgpu->id, offset, bytes, *(u32 *)p_data); 161 + 162 + if (offset == 0x206c) { 163 + gvt_err("------------------------------------------\n"); 164 + gvt_err("vgpu%d: likely triggers a gfx reset\n", 165 + vgpu->id); 166 + gvt_err("------------------------------------------\n"); 167 + vgpu->mmio.disable_warn_untrack = true; 168 + } 169 + } 170 + } 171 172 if (ret) 173 goto err; ··· 301 vgpu->id, offset, bytes); 302 mutex_unlock(&gvt->lock); 303 return ret; 304 + } 305 + 306 + 307 + /** 308 + * intel_vgpu_reset_mmio - reset virtual MMIO space 309 + * @vgpu: a vGPU 310 + * 311 + */ 312 + void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) 313 + { 314 + struct intel_gvt *gvt = vgpu->gvt; 315 + const struct intel_gvt_device_info *info = &gvt->device_info; 316 + 317 + memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 318 + memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); 319 + 320 + vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 321 + 322 + /* set the bit 0:2(Core C-State ) to C0 */ 323 + vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 324 + } 325 + 326 + /** 327 + * intel_vgpu_init_mmio - init MMIO space 328 + * @vgpu: a vGPU 329 + * 330 + * Returns: 331 + * Zero on success, negative error code if failed 332 + */ 333 + int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) 334 + { 335 + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 336 + 337 + vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); 338 + if (!vgpu->mmio.vreg) 339 + return -ENOMEM; 340 + 341 + vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 342 + 343 + intel_vgpu_reset_mmio(vgpu); 344 + 345 + return 0; 346 + } 347 + 348 + /** 349 + * intel_vgpu_clean_mmio - clean MMIO space 350 + * @vgpu: a vGPU 351 + * 352 + */ 353 + void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) 354 + { 355 + vfree(vgpu->mmio.vreg); 356 + vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; 357 }
+4
drivers/gpu/drm/i915/gvt/mmio.h
··· 86 *offset; \ 87 }) 88 89 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 90 91 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
··· 86 *offset; \ 87 }) 88 89 + int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); 90 + void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); 91 + void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); 92 + 93 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 94 95 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
+4 -4
drivers/gpu/drm/i915/gvt/opregion.c
··· 36 vgpu->id)) 37 return -EINVAL; 38 39 - vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | 40 - GFP_DMA32 | __GFP_ZERO, 41 - INTEL_GVT_OPREGION_PORDER); 42 43 if (!vgpu_opregion(vgpu)->va) 44 return -ENOMEM; ··· 97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { 98 map_vgpu_opregion(vgpu, false); 99 free_pages((unsigned long)vgpu_opregion(vgpu)->va, 100 - INTEL_GVT_OPREGION_PORDER); 101 102 vgpu_opregion(vgpu)->va = NULL; 103 }
··· 36 vgpu->id)) 37 return -EINVAL; 38 39 + vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | 40 + __GFP_ZERO, 41 + get_order(INTEL_GVT_OPREGION_SIZE)); 42 43 if (!vgpu_opregion(vgpu)->va) 44 return -ENOMEM; ··· 97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { 98 map_vgpu_opregion(vgpu, false); 99 free_pages((unsigned long)vgpu_opregion(vgpu)->va, 100 + get_order(INTEL_GVT_OPREGION_SIZE)); 101 102 vgpu_opregion(vgpu)->va = NULL; 103 }
+1 -2
drivers/gpu/drm/i915/gvt/reg.h
··· 50 #define INTEL_GVT_OPREGION_PARM 0x204 51 52 #define INTEL_GVT_OPREGION_PAGES 2 53 - #define INTEL_GVT_OPREGION_PORDER 1 54 - #define INTEL_GVT_OPREGION_SIZE (2 * 4096) 55 56 #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) 57
··· 50 #define INTEL_GVT_OPREGION_PARM 0x204 51 52 #define INTEL_GVT_OPREGION_PAGES 2 53 + #define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) 54 55 #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) 56
+7 -7
drivers/gpu/drm/i915/gvt/scheduler.c
··· 350 { 351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 352 struct intel_vgpu_workload *workload; 353 int event; 354 355 mutex_lock(&gvt->lock); 356 357 workload = scheduler->current_workload[ring_id]; 358 359 - if (!workload->status && !workload->vgpu->resetting) { 360 wait_event(workload->shadow_ctx_status_wq, 361 !atomic_read(&workload->shadow_ctx_active)); 362 ··· 366 367 for_each_set_bit(event, workload->pending_events, 368 INTEL_GVT_EVENT_MAX) 369 - intel_vgpu_trigger_virtual_event(workload->vgpu, 370 - event); 371 } 372 373 gvt_dbg_sched("ring id %d complete workload %p status %d\n", ··· 374 375 scheduler->current_workload[ring_id] = NULL; 376 377 - atomic_dec(&workload->vgpu->running_workload_num); 378 - 379 list_del_init(&workload->list); 380 workload->complete(workload); 381 382 wake_up(&scheduler->workload_complete_wq); 383 mutex_unlock(&gvt->lock); 384 } ··· 459 gvt_dbg_sched("will complete workload %p\n, status: %d\n", 460 workload, workload->status); 461 462 - complete_current_workload(gvt, ring_id); 463 - 464 if (workload->req) 465 i915_gem_request_put(fetch_and_zero(&workload->req)); 466 467 if (need_force_wake) 468 intel_uncore_forcewake_put(gvt->dev_priv,
··· 350 { 351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 352 struct intel_vgpu_workload *workload; 353 + struct intel_vgpu *vgpu; 354 int event; 355 356 mutex_lock(&gvt->lock); 357 358 workload = scheduler->current_workload[ring_id]; 359 + vgpu = workload->vgpu; 360 361 + if (!workload->status && !vgpu->resetting) { 362 wait_event(workload->shadow_ctx_status_wq, 363 !atomic_read(&workload->shadow_ctx_active)); 364 ··· 364 365 for_each_set_bit(event, workload->pending_events, 366 INTEL_GVT_EVENT_MAX) 367 + intel_vgpu_trigger_virtual_event(vgpu, event); 368 } 369 370 gvt_dbg_sched("ring id %d complete workload %p status %d\n", ··· 373 374 scheduler->current_workload[ring_id] = NULL; 375 376 list_del_init(&workload->list); 377 workload->complete(workload); 378 379 + atomic_dec(&vgpu->running_workload_num); 380 wake_up(&scheduler->workload_complete_wq); 381 mutex_unlock(&gvt->lock); 382 } ··· 459 gvt_dbg_sched("will complete workload %p\n, status: %d\n", 460 workload, workload->status); 461 462 if (workload->req) 463 i915_gem_request_put(fetch_and_zero(&workload->req)); 464 + 465 + complete_current_workload(gvt, ring_id); 466 467 if (need_force_wake) 468 intel_uncore_forcewake_put(gvt->dev_priv,
+81 -81
drivers/gpu/drm/i915/gvt/vgpu.c
··· 35 #include "gvt.h" 36 #include "i915_pvinfo.h" 37 38 - static void clean_vgpu_mmio(struct intel_vgpu *vgpu) 39 - { 40 - vfree(vgpu->mmio.vreg); 41 - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; 42 - } 43 - 44 - int setup_vgpu_mmio(struct intel_vgpu *vgpu) 45 - { 46 - struct intel_gvt *gvt = vgpu->gvt; 47 - const struct intel_gvt_device_info *info = &gvt->device_info; 48 - 49 - if (vgpu->mmio.vreg) 50 - memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); 51 - else { 52 - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); 53 - if (!vgpu->mmio.vreg) 54 - return -ENOMEM; 55 - } 56 - 57 - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 58 - 59 - memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 60 - memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); 61 - 62 - vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 63 - 64 - /* set the bit 0:2(Core C-State ) to C0 */ 65 - vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 66 - return 0; 67 - } 68 - 69 - static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, 70 - struct intel_vgpu_creation_params *param) 71 - { 72 - struct intel_gvt *gvt = vgpu->gvt; 73 - const struct intel_gvt_device_info *info = &gvt->device_info; 74 - u16 *gmch_ctl; 75 - int i; 76 - 77 - memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 78 - info->cfg_space_size); 79 - 80 - if (!param->primary) { 81 - vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 82 - INTEL_GVT_PCI_CLASS_VGA_OTHER; 83 - vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 84 - INTEL_GVT_PCI_CLASS_VGA_OTHER; 85 - } 86 - 87 - /* Show guest that there isn't any stolen memory.*/ 88 - gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 89 - *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 90 - 91 - intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 92 - gvt_aperture_pa_base(gvt), true); 93 - 94 - vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 95 - | PCI_COMMAND_MEMORY 96 - | PCI_COMMAND_MASTER); 97 - /* 98 - * Clear the bar upper 32bit and let guest to assign the new value 99 - */ 100 - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 101 - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 102 - memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 103 - 104 - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 105 - vgpu->cfg_space.bar[i].size = pci_resource_len( 106 - gvt->dev_priv->drm.pdev, i * 2); 107 - vgpu->cfg_space.bar[i].tracked = false; 108 - } 109 - } 110 - 111 void populate_pvinfo_page(struct intel_vgpu *vgpu) 112 { 113 /* setup the ballooning information */ ··· 104 if (low_avail / min_low == 0) 105 break; 106 gvt->types[i].low_gm_size = min_low; 107 - gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; 108 gvt->types[i].fence = 4; 109 gvt->types[i].max_instance = low_avail / min_low; 110 gvt->types[i].avail_instance = gvt->types[i].max_instance; ··· 144 */ 145 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - 146 gvt->gm.vgpu_allocated_low_gm_size; 147 - high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - 148 gvt->gm.vgpu_allocated_high_gm_size; 149 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - 150 gvt->fence.vgpu_allocated_fence_num; ··· 195 intel_vgpu_clean_gtt(vgpu); 196 intel_gvt_hypervisor_detach_vgpu(vgpu); 197 intel_vgpu_free_resource(vgpu); 198 - clean_vgpu_mmio(vgpu); 199 vfree(vgpu); 200 201 intel_gvt_update_vgpu_types(gvt); ··· 227 vgpu->gvt = gvt; 228 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); 229 230 - setup_vgpu_cfg_space(vgpu, param); 231 232 - ret = setup_vgpu_mmio(vgpu); 233 if (ret) 234 - goto out_free_vgpu; 235 236 ret = intel_vgpu_alloc_resource(vgpu, param); 237 if (ret) ··· 281 out_clean_vgpu_resource: 282 intel_vgpu_free_resource(vgpu); 283 out_clean_vgpu_mmio: 284 - clean_vgpu_mmio(vgpu); 285 out_free_vgpu: 286 vfree(vgpu); 287 mutex_unlock(&gvt->lock); ··· 327 } 328 329 /** 330 - * intel_gvt_reset_vgpu - reset a virtual GPU 331 * @vgpu: virtual GPU 332 * 333 * This function is called when user wants to reset a virtual GPU. ··· 403 */ 404 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) 405 { 406 }
··· 35 #include "gvt.h" 36 #include "i915_pvinfo.h" 37 38 void populate_pvinfo_page(struct intel_vgpu *vgpu) 39 { 40 /* setup the ballooning information */ ··· 177 if (low_avail / min_low == 0) 178 break; 179 gvt->types[i].low_gm_size = min_low; 180 + gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); 181 gvt->types[i].fence = 4; 182 gvt->types[i].max_instance = low_avail / min_low; 183 gvt->types[i].avail_instance = gvt->types[i].max_instance; ··· 217 */ 218 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - 219 gvt->gm.vgpu_allocated_low_gm_size; 220 + high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - 221 gvt->gm.vgpu_allocated_high_gm_size; 222 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - 223 gvt->fence.vgpu_allocated_fence_num; ··· 268 intel_vgpu_clean_gtt(vgpu); 269 intel_gvt_hypervisor_detach_vgpu(vgpu); 270 intel_vgpu_free_resource(vgpu); 271 + intel_vgpu_clean_mmio(vgpu); 272 vfree(vgpu); 273 274 intel_gvt_update_vgpu_types(gvt); ··· 300 vgpu->gvt = gvt; 301 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); 302 303 + intel_vgpu_init_cfg_space(vgpu, param->primary); 304 305 + ret = intel_vgpu_init_mmio(vgpu); 306 if (ret) 307 + goto out_clean_idr; 308 309 ret = intel_vgpu_alloc_resource(vgpu, param); 310 if (ret) ··· 354 out_clean_vgpu_resource: 355 intel_vgpu_free_resource(vgpu); 356 out_clean_vgpu_mmio: 357 + intel_vgpu_clean_mmio(vgpu); 358 + out_clean_idr: 359 + idr_remove(&gvt->vgpu_idr, vgpu->id); 360 out_free_vgpu: 361 vfree(vgpu); 362 mutex_unlock(&gvt->lock); ··· 398 } 399 400 /** 401 + * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset 402 + * @vgpu: virtual GPU 403 + * @dmlr: vGPU Device Model Level Reset or GT Reset 404 + * @engine_mask: engines to reset for GT reset 405 + * 406 + * This function is called when user wants to reset a virtual GPU through 407 + * device model reset or GT reset. The caller should hold the gvt lock. 408 + * 409 + * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset 410 + * the whole vGPU to default state as when it is created. This vGPU function 411 + * is required both for functionary and security concerns.The ultimate goal 412 + * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we 413 + * assign a vGPU to a virtual machine we must isse such reset first. 414 + * 415 + * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines 416 + * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. 417 + * Unlike the FLR, GT reset only reset particular resource of a vGPU per 418 + * the reset request. Guest driver can issue a GT reset by programming the 419 + * virtual GDRST register to reset specific virtual GPU engine or all 420 + * engines. 421 + * 422 + * The parameter dev_level is to identify if we will do DMLR or GT reset. 423 + * The parameter engine_mask is to specific the engines that need to be 424 + * resetted. If value ALL_ENGINES is given for engine_mask, it means 425 + * the caller requests a full GT reset that we will reset all virtual 426 + * GPU engines. For FLR, engine_mask is ignored. 427 + */ 428 + void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 429 + unsigned int engine_mask) 430 + { 431 + struct intel_gvt *gvt = vgpu->gvt; 432 + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 433 + 434 + gvt_dbg_core("------------------------------------------\n"); 435 + gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", 436 + vgpu->id, dmlr, engine_mask); 437 + vgpu->resetting = true; 438 + 439 + intel_vgpu_stop_schedule(vgpu); 440 + /* 441 + * The current_vgpu will set to NULL after stopping the 442 + * scheduler when the reset is triggered by current vgpu. 443 + */ 444 + if (scheduler->current_vgpu == NULL) { 445 + mutex_unlock(&gvt->lock); 446 + intel_gvt_wait_vgpu_idle(vgpu); 447 + mutex_lock(&gvt->lock); 448 + } 449 + 450 + intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); 451 + 452 + /* full GPU reset or device model level reset */ 453 + if (engine_mask == ALL_ENGINES || dmlr) { 454 + intel_vgpu_reset_gtt(vgpu, dmlr); 455 + intel_vgpu_reset_resource(vgpu); 456 + intel_vgpu_reset_mmio(vgpu); 457 + populate_pvinfo_page(vgpu); 458 + 459 + if (dmlr) 460 + intel_vgpu_reset_cfg_space(vgpu); 461 + } 462 + 463 + vgpu->resetting = false; 464 + gvt_dbg_core("reset vgpu%d done\n", vgpu->id); 465 + gvt_dbg_core("------------------------------------------\n"); 466 + } 467 + 468 + /** 469 + * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) 470 * @vgpu: virtual GPU 471 * 472 * This function is called when user wants to reset a virtual GPU. ··· 406 */ 407 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) 408 { 409 + mutex_lock(&vgpu->gvt->lock); 410 + intel_gvt_reset_vgpu_locked(vgpu, true, 0); 411 + mutex_unlock(&vgpu->gvt->lock); 412 }
+4 -30
drivers/gpu/drm/i915/i915_gem.c
··· 595 struct drm_i915_gem_pwrite *args, 596 struct drm_file *file) 597 { 598 - struct drm_device *dev = obj->base.dev; 599 void *vaddr = obj->phys_handle->vaddr + args->offset; 600 char __user *user_data = u64_to_user_ptr(args->data_ptr); 601 - int ret; 602 603 /* We manually control the domain here and pretend that it 604 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 605 */ 606 - lockdep_assert_held(&obj->base.dev->struct_mutex); 607 - ret = i915_gem_object_wait(obj, 608 - I915_WAIT_INTERRUPTIBLE | 609 - I915_WAIT_LOCKED | 610 - I915_WAIT_ALL, 611 - MAX_SCHEDULE_TIMEOUT, 612 - to_rps_client(file)); 613 - if (ret) 614 - return ret; 615 - 616 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 617 - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 618 - unsigned long unwritten; 619 - 620 - /* The physical object once assigned is fixed for the lifetime 621 - * of the obj, so we can safely drop the lock and continue 622 - * to access vaddr. 623 - */ 624 - mutex_unlock(&dev->struct_mutex); 625 - unwritten = copy_from_user(vaddr, user_data, args->size); 626 - mutex_lock(&dev->struct_mutex); 627 - if (unwritten) { 628 - ret = -EFAULT; 629 - goto out; 630 - } 631 - } 632 633 drm_clflush_virt_range(vaddr, args->size); 634 - i915_gem_chipset_flush(to_i915(dev)); 635 636 - out: 637 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 638 - return ret; 639 } 640 641 void *i915_gem_object_alloc(struct drm_device *dev)
··· 595 struct drm_i915_gem_pwrite *args, 596 struct drm_file *file) 597 { 598 void *vaddr = obj->phys_handle->vaddr + args->offset; 599 char __user *user_data = u64_to_user_ptr(args->data_ptr); 600 601 /* We manually control the domain here and pretend that it 602 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 603 */ 604 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 605 + if (copy_from_user(vaddr, user_data, args->size)) 606 + return -EFAULT; 607 608 drm_clflush_virt_range(vaddr, args->size); 609 + i915_gem_chipset_flush(to_i915(obj->base.dev)); 610 611 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 612 + return 0; 613 } 614 615 void *i915_gem_object_alloc(struct drm_device *dev)
+1
drivers/gpu/drm/i915/i915_gem_evict.c
··· 199 } 200 201 /* Unbinding will emit any required flushes */ 202 while (!list_empty(&eviction_list)) { 203 vma = list_first_entry(&eviction_list, 204 struct i915_vma,
··· 199 } 200 201 /* Unbinding will emit any required flushes */ 202 + ret = 0; 203 while (!list_empty(&eviction_list)) { 204 vma = list_first_entry(&eviction_list, 205 struct i915_vma,
+3
drivers/gpu/drm/i915/intel_display.c
··· 2967 unsigned int rotation = plane_state->base.rotation; 2968 int ret; 2969 2970 /* Rotate src coordinates to match rotated GTT view */ 2971 if (drm_rotation_90_or_270(rotation)) 2972 drm_rect_rotate(&plane_state->base.src,
··· 2967 unsigned int rotation = plane_state->base.rotation; 2968 int ret; 2969 2970 + if (!plane_state->base.visible) 2971 + return 0; 2972 + 2973 /* Rotate src coordinates to match rotated GTT view */ 2974 if (drm_rotation_90_or_270(rotation)) 2975 drm_rect_rotate(&plane_state->base.src,
+2 -2
drivers/gpu/drm/i915/intel_hotplug.c
··· 180 181 /* Enable polling and queue hotplug re-enabling. */ 182 if (hpd_disabled) { 183 - drm_kms_helper_poll_enable_locked(dev); 184 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, 185 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 186 } ··· 511 } 512 513 if (enabled) 514 - drm_kms_helper_poll_enable_locked(dev); 515 516 mutex_unlock(&dev->mode_config.mutex); 517
··· 180 181 /* Enable polling and queue hotplug re-enabling. */ 182 if (hpd_disabled) { 183 + drm_kms_helper_poll_enable(dev); 184 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, 185 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 186 } ··· 511 } 512 513 if (enabled) 514 + drm_kms_helper_poll_enable(dev); 515 516 mutex_unlock(&dev->mode_config.mutex); 517
-10
drivers/gpu/drm/i915/intel_lrc.c
··· 979 uint32_t *batch, 980 uint32_t index) 981 { 982 - struct drm_i915_private *dev_priv = engine->i915; 983 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 984 - 985 - /* 986 - * WaDisableLSQCROPERFforOCL:kbl 987 - * This WA is implemented in skl_init_clock_gating() but since 988 - * this batch updates GEN8_L3SQCREG4 with default value we need to 989 - * set this bit here to retain the WA during flush. 990 - */ 991 - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 992 - l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; 993 994 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 995 MI_SRM_LRM_GLOBAL_GTT));
··· 979 uint32_t *batch, 980 uint32_t index) 981 { 982 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 983 984 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 985 MI_SRM_LRM_GLOBAL_GTT));
-8
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 1095 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1096 HDC_FENCE_DEST_SLM_DISABLE); 1097 1098 - /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1099 - * involving this register should also be added to WA batch as required. 1100 - */ 1101 - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) 1102 - /* WaDisableLSQCROPERFforOCL:kbl */ 1103 - I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1104 - GEN8_LQSC_RO_PERF_DIS); 1105 - 1106 /* WaToEnableHwFixForPushConstHWBug:kbl */ 1107 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) 1108 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
··· 1095 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1096 HDC_FENCE_DEST_SLM_DISABLE); 1097 1098 /* WaToEnableHwFixForPushConstHWBug:kbl */ 1099 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) 1100 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+2 -3
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 345 { 346 struct adreno_platform_config *config = pdev->dev.platform_data; 347 struct msm_gpu *gpu = &adreno_gpu->base; 348 - struct msm_mmu *mmu; 349 int ret; 350 351 adreno_gpu->funcs = funcs; ··· 384 return ret; 385 } 386 387 - mmu = gpu->aspace->mmu; 388 - if (mmu) { 389 ret = mmu->funcs->attach(mmu, iommu_ports, 390 ARRAY_SIZE(iommu_ports)); 391 if (ret)
··· 345 { 346 struct adreno_platform_config *config = pdev->dev.platform_data; 347 struct msm_gpu *gpu = &adreno_gpu->base; 348 int ret; 349 350 adreno_gpu->funcs = funcs; ··· 385 return ret; 386 } 387 388 + if (gpu->aspace && gpu->aspace->mmu) { 389 + struct msm_mmu *mmu = gpu->aspace->mmu; 390 ret = mmu->funcs->attach(mmu, iommu_ports, 391 ARRAY_SIZE(iommu_ports)); 392 if (ret)
-6
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
··· 119 120 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 121 { 122 - int i; 123 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 124 - struct drm_plane *plane; 125 - struct drm_plane_state *plane_state; 126 - 127 - for_each_plane_in_state(state, plane, plane_state, i) 128 - mdp5_plane_complete_commit(plane, plane_state); 129 130 if (mdp5_kms->smp) 131 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
··· 119 120 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 121 { 122 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 123 124 if (mdp5_kms->smp) 125 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
-4
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
··· 104 105 /* assigned by crtc blender */ 106 enum mdp_mixer_stage_id stage; 107 - 108 - bool pending : 1; 109 }; 110 #define to_mdp5_plane_state(x) \ 111 container_of(x, struct mdp5_plane_state, base) ··· 230 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); 231 232 uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 233 - void mdp5_plane_complete_commit(struct drm_plane *plane, 234 - struct drm_plane_state *state); 235 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 236 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); 237
··· 104 105 /* assigned by crtc blender */ 106 enum mdp_mixer_stage_id stage; 107 }; 108 #define to_mdp5_plane_state(x) \ 109 container_of(x, struct mdp5_plane_state, base) ··· 232 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); 233 234 uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 235 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 236 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); 237
-22
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 179 drm_printf(p, "\tzpos=%u\n", pstate->zpos); 180 drm_printf(p, "\talpha=%u\n", pstate->alpha); 181 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); 182 - drm_printf(p, "\tpending=%u\n", pstate->pending); 183 } 184 185 static void mdp5_plane_reset(struct drm_plane *plane) ··· 218 219 if (mdp5_state && mdp5_state->base.fb) 220 drm_framebuffer_reference(mdp5_state->base.fb); 221 - 222 - mdp5_state->pending = false; 223 224 return &mdp5_state->base; 225 } ··· 284 285 DBG("%s: check (%d -> %d)", plane->name, 286 plane_enabled(old_state), plane_enabled(state)); 287 - 288 - /* We don't allow faster-than-vblank updates.. if we did add this 289 - * some day, we would need to disallow in cases where hwpipe 290 - * changes 291 - */ 292 - if (WARN_ON(to_mdp5_plane_state(old_state)->pending)) 293 - return -EBUSY; 294 295 max_width = config->hw->lm.max_width << 16; 296 max_height = config->hw->lm.max_height << 16; ··· 360 struct drm_plane_state *old_state) 361 { 362 struct drm_plane_state *state = plane->state; 363 - struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); 364 365 DBG("%s: update", plane->name); 366 - 367 - mdp5_state->pending = true; 368 369 if (plane_enabled(state)) { 370 int ret; ··· 836 return 0; 837 838 return pstate->hwpipe->flush_mask; 839 - } 840 - 841 - /* called after vsync in thread context */ 842 - void mdp5_plane_complete_commit(struct drm_plane *plane, 843 - struct drm_plane_state *state) 844 - { 845 - struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); 846 - 847 - pstate->pending = false; 848 } 849 850 /* initialize plane */
··· 179 drm_printf(p, "\tzpos=%u\n", pstate->zpos); 180 drm_printf(p, "\talpha=%u\n", pstate->alpha); 181 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); 182 } 183 184 static void mdp5_plane_reset(struct drm_plane *plane) ··· 219 220 if (mdp5_state && mdp5_state->base.fb) 221 drm_framebuffer_reference(mdp5_state->base.fb); 222 223 return &mdp5_state->base; 224 } ··· 287 288 DBG("%s: check (%d -> %d)", plane->name, 289 plane_enabled(old_state), plane_enabled(state)); 290 291 max_width = config->hw->lm.max_width << 16; 292 max_height = config->hw->lm.max_height << 16; ··· 370 struct drm_plane_state *old_state) 371 { 372 struct drm_plane_state *state = plane->state; 373 374 DBG("%s: update", plane->name); 375 376 if (plane_enabled(state)) { 377 int ret; ··· 849 return 0; 850 851 return pstate->hwpipe->flush_mask; 852 } 853 854 /* initialize plane */
+2
drivers/gpu/drm/msm/msm_gem.c
··· 294 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 295 296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 297 msm_gem_unmap_vma(priv->aspace[id], 298 &msm_obj->domain[id], msm_obj->sgt); 299 }
··· 294 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 295 296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 297 + if (!priv->aspace[id]) 298 + continue; 299 msm_gem_unmap_vma(priv->aspace[id], 300 &msm_obj->domain[id], msm_obj->sgt); 301 }
+20 -5
drivers/gpu/drm/radeon/si.c
··· 114 MODULE_FIRMWARE("radeon/hainan_rlc.bin"); 115 MODULE_FIRMWARE("radeon/hainan_smc.bin"); 116 MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 117 118 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); 119 static void si_pcie_gen3_enable(struct radeon_device *rdev); ··· 1653 int err; 1654 int new_fw = 0; 1655 bool new_smc = false; 1656 1657 DRM_DEBUG("\n"); 1658 ··· 1732 ((rdev->pdev->device == 0x6660) || 1733 (rdev->pdev->device == 0x6663) || 1734 (rdev->pdev->device == 0x6665) || 1735 - (rdev->pdev->device == 0x6667))) || 1736 - ((rdev->pdev->revision == 0xc3) && 1737 - (rdev->pdev->device == 0x6665))) 1738 new_smc = true; 1739 new_chip_name = "hainan"; 1740 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1741 me_req_size = SI_PM4_UCODE_SIZE * 4; ··· 1747 break; 1748 default: BUG(); 1749 } 1750 1751 DRM_INFO("Loading %s Microcode\n", new_chip_name); 1752 ··· 1855 } 1856 } 1857 1858 - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); 1859 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1860 if (err) { 1861 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); ··· 1889 } 1890 } 1891 1892 - if (new_smc) 1893 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); 1894 else 1895 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
··· 114 MODULE_FIRMWARE("radeon/hainan_rlc.bin"); 115 MODULE_FIRMWARE("radeon/hainan_smc.bin"); 116 MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 117 + MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); 118 + 119 + MODULE_FIRMWARE("radeon/si58_mc.bin"); 120 121 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); 122 static void si_pcie_gen3_enable(struct radeon_device *rdev); ··· 1650 int err; 1651 int new_fw = 0; 1652 bool new_smc = false; 1653 + bool si58_fw = false; 1654 + bool banks2_fw = false; 1655 1656 DRM_DEBUG("\n"); 1657 ··· 1727 ((rdev->pdev->device == 0x6660) || 1728 (rdev->pdev->device == 0x6663) || 1729 (rdev->pdev->device == 0x6665) || 1730 + (rdev->pdev->device == 0x6667)))) 1731 new_smc = true; 1732 + else if ((rdev->pdev->revision == 0xc3) && 1733 + (rdev->pdev->device == 0x6665)) 1734 + banks2_fw = true; 1735 new_chip_name = "hainan"; 1736 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1737 me_req_size = SI_PM4_UCODE_SIZE * 4; ··· 1741 break; 1742 default: BUG(); 1743 } 1744 + 1745 + /* this memory configuration requires special firmware */ 1746 + if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) 1747 + si58_fw = true; 1748 1749 DRM_INFO("Loading %s Microcode\n", new_chip_name); 1750 ··· 1845 } 1846 } 1847 1848 + if (si58_fw) 1849 + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); 1850 + else 1851 + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); 1852 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1853 if (err) { 1854 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); ··· 1876 } 1877 } 1878 1879 + if (banks2_fw) 1880 + snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin"); 1881 + else if (new_smc) 1882 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); 1883 else 1884 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
-12
drivers/gpu/drm/radeon/si_dpm.c
··· 3008 (rdev->pdev->device == 0x6817) || 3009 (rdev->pdev->device == 0x6806)) 3010 max_mclk = 120000; 3011 - } else if (rdev->family == CHIP_OLAND) { 3012 - if ((rdev->pdev->revision == 0xC7) || 3013 - (rdev->pdev->revision == 0x80) || 3014 - (rdev->pdev->revision == 0x81) || 3015 - (rdev->pdev->revision == 0x83) || 3016 - (rdev->pdev->revision == 0x87) || 3017 - (rdev->pdev->device == 0x6604) || 3018 - (rdev->pdev->device == 0x6605)) { 3019 - max_sclk = 75000; 3020 - max_mclk = 80000; 3021 - } 3022 } else if (rdev->family == CHIP_HAINAN) { 3023 if ((rdev->pdev->revision == 0x81) || 3024 (rdev->pdev->revision == 0x83) || ··· 3016 (rdev->pdev->device == 0x6665) || 3017 (rdev->pdev->device == 0x6667)) { 3018 max_sclk = 75000; 3019 - max_mclk = 80000; 3020 } 3021 } 3022 /* Apply dpm quirks */
··· 3008 (rdev->pdev->device == 0x6817) || 3009 (rdev->pdev->device == 0x6806)) 3010 max_mclk = 120000; 3011 } else if (rdev->family == CHIP_HAINAN) { 3012 if ((rdev->pdev->revision == 0x81) || 3013 (rdev->pdev->revision == 0x83) || ··· 3027 (rdev->pdev->device == 0x6665) || 3028 (rdev->pdev->device == 0x6667)) { 3029 max_sclk = 75000; 3030 } 3031 } 3032 /* Apply dpm quirks */
+1 -1
drivers/gpu/drm/virtio/virtgpu_fb.c
··· 331 info->fbops = &virtio_gpufb_ops; 332 info->pixmap.flags = FB_PIXMAP_SYSTEM; 333 334 - info->screen_base = obj->vmap; 335 info->screen_size = obj->gem_base.size; 336 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 337 drm_fb_helper_fill_var(info, &vfbdev->helper,
··· 331 info->fbops = &virtio_gpufb_ops; 332 info->pixmap.flags = FB_PIXMAP_SYSTEM; 333 334 + info->screen_buffer = obj->vmap; 335 info->screen_size = obj->gem_base.size; 336 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 337 drm_fb_helper_fill_var(info, &vfbdev->helper,
-1
include/drm/drm_crtc_helper.h
··· 73 74 extern void drm_kms_helper_poll_disable(struct drm_device *dev); 75 extern void drm_kms_helper_poll_enable(struct drm_device *dev); 76 - extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); 77 78 #endif
··· 73 74 extern void drm_kms_helper_poll_disable(struct drm_device *dev); 75 extern void drm_kms_helper_poll_enable(struct drm_device *dev); 76 77 #endif