Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-2023-11-07' of git://anongit.freedesktop.org/drm/drm

Pull more drm updates from Dave Airlie:
"Geert pointed out I missed the renesas reworks in my main pull, so
this pull contains the renesas next work for atomic conversion and DT
support.

It also contains a bunch of amdgpu and some small ssd13xx fixes.

renesas:
- atomic conversion
- DT support

ssd13xx:
- dt binding fix for ssd132x
- Initialize ssd130x crtc_state to NULL.

amdgpu:
- Fix RAS support check
- RAS fixes
- MES fixes
- SMU13 fixes
- Contiguous memory allocation fix
- BACO fixes
- GPU reset fixes
- Min power limit fixes
- GFX11 fixes
- USB4/TB hotplug fixes
- ARM regression fix
- GFX9.4.3 fixes
- KASAN/KCSAN stack size check fixes
- SR-IOV fixes
- SMU14 fixes
- PSP13 fixes
- Display blend fixes
- Flexible array size fixes

amdkfd:
- GPUVM fix

radeon:
- Flexible array size fixes"

* tag 'drm-next-2023-11-07' of git://anongit.freedesktop.org/drm/drm: (83 commits)
drm/amd/display: Enable fast update on blendTF change
drm/amd/display: Fix blend LUT programming
drm/amd/display: Program plane color setting correctly
drm/amdgpu: Query and report boot status
drm/amdgpu: Add psp v13 function to query boot status
drm/amd/swsmu: remove fw version check in sw_init.
drm/amd/swsmu: update smu v14_0_0 driver if and metrics table
drm/amdgpu: Add C2PMSG_109/126 reg field shift/masks
drm/amdgpu: Optimize the asic type fix code
drm/amdgpu: fix GRBM read timeout when do mes_self_test
drm/amdgpu: check recovery status of xgmi hive in ras_reset_error_count
drm/amd/pm: only check sriov vf flag once when creating hwmon sysfs
drm/amdgpu: Attach eviction fence on alloc
drm/amdkfd: Improve amdgpu_vm_handle_moved
drm/amd/display: Increase frame warning limit with KASAN or KCSAN in dml2
drm/amd/display: Avoid NULL dereference of timing generator
drm/amdkfd: Update cache info for GFX 9.4.3
drm/amdkfd: Populate cache info for GFX 9.4.3
drm/amdgpu: don't put MQDs in VRAM on ARM | ARM64
drm/amdgpu/smu13: drop compute workload workaround
...

+1730 -1357
+130
Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/renesas,shmobile-lcdc.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Renesas SH-Mobile LCD Controller (LCDC) 8 + 9 + maintainers: 10 + - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> 11 + - Geert Uytterhoeven <geert+renesas@glider.be> 12 + 13 + properties: 14 + compatible: 15 + enum: 16 + - renesas,r8a7740-lcdc # R-Mobile A1 17 + - renesas,sh73a0-lcdc # SH-Mobile AG5 18 + 19 + reg: 20 + maxItems: 1 21 + 22 + interrupts: 23 + maxItems: 1 24 + 25 + clocks: 26 + minItems: 1 27 + maxItems: 5 28 + description: 29 + Only the functional clock is mandatory. 30 + Some of the optional clocks are model-dependent (e.g. "video" (a.k.a. 31 + "vou" or "dv_clk") is available on R-Mobile A1 only). 32 + 33 + clock-names: 34 + minItems: 1 35 + items: 36 + - const: fck 37 + - enum: [ media, lclk, hdmi, video ] 38 + - enum: [ media, lclk, hdmi, video ] 39 + - enum: [ media, lclk, hdmi, video ] 40 + - enum: [ media, lclk, hdmi, video ] 41 + 42 + power-domains: 43 + maxItems: 1 44 + 45 + ports: 46 + $ref: /schemas/graph.yaml#/properties/ports 47 + 48 + properties: 49 + port@0: 50 + $ref: /schemas/graph.yaml#/properties/port 51 + description: LCD port (R-Mobile A1 and SH-Mobile AG5) 52 + unevaluatedProperties: false 53 + 54 + port@1: 55 + $ref: /schemas/graph.yaml#/properties/port 56 + description: HDMI port (R-Mobile A1 LCDC1 and SH-Mobile AG5) 57 + unevaluatedProperties: false 58 + 59 + port@2: 60 + $ref: /schemas/graph.yaml#/properties/port 61 + description: MIPI-DSI port (SH-Mobile AG5) 62 + unevaluatedProperties: false 63 + 64 + required: 65 + - port@0 66 + 67 + unevaluatedProperties: false 68 + 69 + required: 70 + - compatible 71 + - reg 72 + - interrupts 73 + - clocks 74 + - clock-names 75 + - power-domains 76 + - ports 77 + 78 + additionalProperties: false 79 + 80 + allOf: 81 + - if: 82 + properties: 83 + compatible: 84 + contains: 85 + const: renesas,r8a7740-lcdc 86 + then: 87 + properties: 88 + ports: 89 + properties: 90 + port@2: false 91 + 92 + - if: 93 + properties: 94 + compatible: 95 + contains: 96 + const: renesas,sh73a0-lcdc 97 + then: 98 + properties: 99 + ports: 100 + required: 101 + - port@1 102 + - port@2 103 + 104 + examples: 105 + - | 106 + #include <dt-bindings/clock/r8a7740-clock.h> 107 + #include <dt-bindings/interrupt-controller/arm-gic.h> 108 + 109 + lcd-controller@fe940000 { 110 + compatible = "renesas,r8a7740-lcdc"; 111 + reg = <0xfe940000 0x4000>; 112 + interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; 113 + clocks = <&mstp1_clks R8A7740_CLK_LCDC0>, 114 + <&cpg_clocks R8A7740_CLK_M3>, <&lcdlclk0_clk>, 115 + <&vou_clk>; 116 + clock-names = "fck", "media", "lclk", "video"; 117 + power-domains = <&pd_a4lc>; 118 + 119 + ports { 120 + #address-cells = <1>; 121 + #size-cells = <0>; 122 + 123 + port@0 { 124 + reg = <0>; 125 + 126 + lcdc0_rgb: endpoint { 127 + }; 128 + }; 129 + }; 130 + };
+4 -4
Documentation/devicetree/bindings/display/solomon,ssd132x.yaml
··· 11 11 12 12 properties: 13 13 compatible: 14 - - enum: 15 - - solomon,ssd1322 16 - - solomon,ssd1325 17 - - solomon,ssd1327 14 + enum: 15 + - solomon,ssd1322 16 + - solomon,ssd1325 17 + - solomon,ssd1327 18 18 19 19 required: 20 20 - compatible
+72
Documentation/userspace-api/media/v4l/subdev-formats.rst
··· 949 949 - b\ :sub:`2` 950 950 - b\ :sub:`1` 951 951 - b\ :sub:`0` 952 + * .. _MEDIA-BUS-FMT-RGB666-2X9-BE: 953 + 954 + - MEDIA_BUS_FMT_RGB666_2X9_BE 955 + - 0x1025 956 + - 957 + - 958 + - 959 + - 960 + - 961 + - 962 + - 963 + - 964 + - 965 + - 966 + - 967 + - 968 + - 969 + - 970 + - 971 + - 972 + - 973 + - 974 + - 975 + - 976 + - 977 + - 978 + - 979 + - 980 + - r\ :sub:`5` 981 + - r\ :sub:`4` 982 + - r\ :sub:`3` 983 + - r\ :sub:`2` 984 + - r\ :sub:`1` 985 + - r\ :sub:`0` 986 + - g\ :sub:`5` 987 + - g\ :sub:`4` 988 + - g\ :sub:`3` 989 + * - 990 + - 991 + - 992 + - 993 + - 994 + - 995 + - 996 + - 997 + - 998 + - 999 + - 1000 + - 1001 + - 1002 + - 1003 + - 1004 + - 1005 + - 1006 + - 1007 + - 1008 + - 1009 + - 1010 + - 1011 + - 1012 + - 1013 + - 1014 + - 1015 + - g\ :sub:`2` 1016 + - g\ :sub:`1` 1017 + - g\ :sub:`0` 1018 + - b\ :sub:`5` 1019 + - b\ :sub:`4` 1020 + - b\ :sub:`3` 1021 + - b\ :sub:`2` 1022 + - b\ :sub:`1` 1023 + - b\ :sub:`0` 952 1024 * .. _MEDIA-BUS-FMT-BGR666-1X18: 953 1025 954 1026 - MEDIA_BUS_FMT_BGR666_1X18
+11 -2
MAINTAINERS
··· 7133 7133 F: include/linux/host1x.h 7134 7134 F: include/uapi/drm/tegra_drm.h 7135 7135 7136 - DRM DRIVERS FOR RENESAS 7136 + DRM DRIVERS FOR RENESAS R-CAR 7137 7137 M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 7138 7138 M: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com> 7139 7139 L: dri-devel@lists.freedesktop.org ··· 7144 7144 F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml 7145 7145 F: Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml 7146 7146 F: Documentation/devicetree/bindings/display/renesas,du.yaml 7147 - F: drivers/gpu/drm/renesas/ 7147 + F: drivers/gpu/drm/renesas/rcar-du/ 7148 + 7149 + DRM DRIVERS FOR RENESAS SHMOBILE 7150 + M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 7151 + M: Geert Uytterhoeven <geert+renesas@glider.be> 7152 + L: dri-devel@lists.freedesktop.org 7153 + L: linux-renesas-soc@vger.kernel.org 7154 + S: Supported 7155 + F: Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml 7156 + F: drivers/gpu/drm/renesas/shmobile/ 7148 7157 F: include/linux/platform_data/shmob_drm.h 7149 7158 7150 7159 DRM DRIVERS FOR ROCKCHIP
-3
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 363 363 const struct amd_ip_funcs *funcs; 364 364 }; 365 365 366 - #define HW_REV(_Major, _Minor, _Rev) \ 367 - ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) 368 - 369 366 struct amdgpu_ip_block { 370 367 struct amdgpu_ip_block_status status; 371 368 const struct amdgpu_ip_block_version *version;
+48 -31
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 425 425 return ret; 426 426 } 427 427 428 + static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, 429 + uint32_t domain, 430 + struct dma_fence *fence) 431 + { 432 + int ret = amdgpu_bo_reserve(bo, false); 433 + 434 + if (ret) 435 + return ret; 436 + 437 + ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 438 + if (ret) 439 + goto unreserve_out; 440 + 441 + ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); 442 + if (ret) 443 + goto unreserve_out; 444 + 445 + dma_resv_add_fence(bo->tbo.base.resv, fence, 446 + DMA_RESV_USAGE_BOOKKEEP); 447 + 448 + unreserve_out: 449 + amdgpu_bo_unreserve(bo); 450 + 451 + return ret; 452 + } 453 + 428 454 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 429 455 { 430 456 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); ··· 1810 1784 } 1811 1785 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 1812 1786 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 1787 + } else { 1788 + mutex_lock(&avm->process_info->lock); 1789 + if (avm->process_info->eviction_fence && 1790 + !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) 1791 + ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain, 1792 + &avm->process_info->eviction_fence->base); 1793 + mutex_unlock(&avm->process_info->lock); 1794 + if (ret) 1795 + goto err_validate_bo; 1813 1796 } 1814 1797 1815 1798 if (offset) ··· 1828 1793 1829 1794 allocate_init_user_pages_failed: 1830 1795 err_pin_bo: 1796 + err_validate_bo: 1831 1797 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 1832 1798 drm_vma_node_revoke(&gobj->vma_node, drm_priv); 1833 1799 err_node_allow: ··· 1902 1866 if (unlikely(ret)) 1903 1867 return ret; 1904 1868 1905 - /* The eviction fence should be removed by the last unmap. 1906 - * TODO: Log an error condition if the bo still has the eviction fence 1907 - * attached 1908 - */ 1909 1869 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1910 1870 process_info->eviction_fence); 1911 1871 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, ··· 2030 1998 if (unlikely(ret)) 2031 1999 goto out_unreserve; 2032 2000 2033 - if (mem->mapped_to_gpu_memory == 0 && 2034 - !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 2035 - /* Validate BO only once. The eviction fence gets added to BO 2036 - * the first time it is mapped. Validate will wait for all 2037 - * background evictions to complete. 2038 - */ 2039 - ret = amdgpu_amdkfd_bo_validate(bo, domain, true); 2040 - if (ret) { 2041 - pr_debug("Validate failed\n"); 2042 - goto out_unreserve; 2043 - } 2044 - } 2045 - 2046 2001 list_for_each_entry(entry, &mem->attachments, list) { 2047 2002 if (entry->bo_va->base.vm != avm || entry->is_mapped) 2048 2003 continue; ··· 2056 2037 mem->mapped_to_gpu_memory); 2057 2038 } 2058 2039 2059 - if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) 2060 - dma_resv_add_fence(bo->tbo.base.resv, 2061 - &avm->process_info->eviction_fence->base, 2062 - DMA_RESV_USAGE_BOOKKEEP); 2063 2040 ret = unreserve_bo_and_vms(&ctx, false, false); 2064 2041 2065 2042 goto out; ··· 2089 2074 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) 2090 2075 { 2091 2076 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 2092 - struct amdkfd_process_info *process_info = avm->process_info; 2093 2077 unsigned long bo_size = mem->bo->tbo.base.size; 2094 2078 struct kfd_mem_attachment *entry; 2095 2079 struct bo_vm_reservation_context ctx; ··· 2128 2114 pr_debug("\t DEC mapping count %d\n", 2129 2115 mem->mapped_to_gpu_memory); 2130 2116 } 2131 - 2132 - /* If BO is unmapped from all VMs, unfence it. It can be evicted if 2133 - * required. 2134 - */ 2135 - if (mem->mapped_to_gpu_memory == 0 && 2136 - !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && 2137 - !mem->bo->tbo.pin_count) 2138 - amdgpu_amdkfd_remove_eviction_fence(mem->bo, 2139 - process_info->eviction_fence); 2140 2117 2141 2118 unreserve_out: 2142 2119 unreserve_bo_and_vms(&ctx, false, false); ··· 2356 2351 amdgpu_sync_create(&(*mem)->sync); 2357 2352 (*mem)->is_imported = true; 2358 2353 2354 + mutex_lock(&avm->process_info->lock); 2355 + if (avm->process_info->eviction_fence && 2356 + !dma_fence_is_signaled(&avm->process_info->eviction_fence->base)) 2357 + ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain, 2358 + &avm->process_info->eviction_fence->base); 2359 + mutex_unlock(&avm->process_info->lock); 2360 + if (ret) 2361 + goto err_remove_mem; 2362 + 2359 2363 return 0; 2360 2364 2365 + err_remove_mem: 2366 + remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); 2367 + drm_vma_node_revoke(&obj->vma_node, drm_priv); 2361 2368 err_free_mem: 2362 2369 kfree(*mem); 2363 2370 err_put_obj:
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
··· 29 29 #include "amdgpu.h" 30 30 #include "atom.h" 31 31 32 + #include <linux/device.h> 32 33 #include <linux/pci.h> 33 34 #include <linux/slab.h> 34 35 #include <linux/acpi.h> ··· 286 285 287 286 /* ATRM is for the discrete card only */ 288 287 if (adev->flags & AMD_IS_APU) 288 + return false; 289 + 290 + /* ATRM is for on-platform devices only */ 291 + if (dev_is_removable(&adev->pdev->dev)) 289 292 return false; 290 293 291 294 while ((pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
+6 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 1117 1117 return r; 1118 1118 } 1119 1119 1120 + /* FIXME: In theory this loop shouldn't be needed any more when 1121 + * amdgpu_vm_handle_moved handles all moved BOs that are reserved 1122 + * with p->ticket. But removing it caused test regressions, so I'm 1123 + * leaving it here for now. 1124 + */ 1120 1125 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 1121 1126 bo_va = e->bo_va; 1122 1127 if (bo_va == NULL) ··· 1136 1131 return r; 1137 1132 } 1138 1133 1139 - r = amdgpu_vm_handle_moved(adev, vm); 1134 + r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket); 1140 1135 if (r) 1141 1136 return r; 1142 1137
+23 -15
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 41 41 #include <drm/drm_fb_helper.h> 42 42 #include <drm/drm_probe_helper.h> 43 43 #include <drm/amdgpu_drm.h> 44 + #include <linux/device.h> 44 45 #include <linux/vgaarb.h> 45 46 #include <linux/vga_switcheroo.h> 46 47 #include <linux/efi.h> ··· 1074 1073 amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { 1075 1074 amdgpu_psp_wait_for_bootloader(adev); 1076 1075 ret = amdgpu_atomfirmware_asic_init(adev, true); 1076 + /* TODO: check the return val and stop device initialization if boot fails */ 1077 + amdgpu_psp_query_boot_status(adev); 1077 1078 return ret; 1078 1079 } else { 1079 1080 return amdgpu_atom_asic_init(adev->mode_info.atom_context); ··· 2226 2223 */ 2227 2224 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 2228 2225 { 2229 - struct drm_device *dev = adev_to_drm(adev); 2230 2226 struct pci_dev *parent; 2231 2227 int i, r; 2232 2228 bool total; ··· 2296 2294 (amdgpu_is_atpx_hybrid() || 2297 2295 amdgpu_has_atpx_dgpu_power_cntl()) && 2298 2296 ((adev->flags & AMD_IS_APU) == 0) && 2299 - !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) 2297 + !dev_is_removable(&adev->pdev->dev)) 2300 2298 adev->flags |= AMD_IS_PX; 2301 2299 2302 2300 if (!(adev->flags & AMD_IS_APU)) { ··· 3964 3962 } 3965 3963 } 3966 3964 } else { 3967 - tmp = amdgpu_reset_method; 3968 - /* It should do a default reset when loading or reloading the driver, 3969 - * regardless of the module parameter reset_method. 3970 - */ 3971 - amdgpu_reset_method = AMD_RESET_METHOD_NONE; 3972 - r = amdgpu_asic_reset(adev); 3973 - amdgpu_reset_method = tmp; 3965 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 3966 + case IP_VERSION(13, 0, 0): 3967 + case IP_VERSION(13, 0, 7): 3968 + case IP_VERSION(13, 0, 10): 3969 + r = psp_gpu_reset(adev); 3970 + break; 3971 + default: 3972 + tmp = amdgpu_reset_method; 3973 + /* It should do a default reset when loading or reloading the driver, 3974 + * regardless of the module parameter reset_method. 3975 + */ 3976 + amdgpu_reset_method = AMD_RESET_METHOD_NONE; 3977 + r = amdgpu_asic_reset(adev); 3978 + amdgpu_reset_method = tmp; 3979 + break; 3980 + } 3981 + 3974 3982 if (r) { 3975 3983 dev_err(adev->dev, "asic reset on init failed\n"); 3976 3984 goto failed; ··· 4144 4132 4145 4133 px = amdgpu_device_supports_px(ddev); 4146 4134 4147 - if (px || (!pci_is_thunderbolt_attached(adev->pdev) && 4135 + if (px || (!dev_is_removable(&adev->pdev->dev) && 4148 4136 apple_gmux_detect(NULL, NULL))) 4149 4137 vga_switcheroo_register_client(adev->pdev, 4150 4138 &amdgpu_switcheroo_ops, px); ··· 4294 4282 4295 4283 px = amdgpu_device_supports_px(adev_to_drm(adev)); 4296 4284 4297 - if (px || (!pci_is_thunderbolt_attached(adev->pdev) && 4285 + if (px || (!dev_is_removable(&adev->pdev->dev) && 4298 4286 apple_gmux_detect(NULL, NULL))) 4299 4287 vga_switcheroo_unregister_client(adev->pdev); 4300 4288 ··· 5577 5565 5578 5566 drm_sched_start(&ring->sched, true); 5579 5567 } 5580 - 5581 - if (adev->enable_mes && 5582 - amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)) 5583 - amdgpu_mes_self_test(tmp_adev); 5584 5568 5585 5569 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) 5586 5570 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
+24 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 99 99 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); 100 100 101 101 #define mmRCC_CONFIG_MEMSIZE 0xde3 102 + #define mmMP0_SMN_C2PMSG_33 0x16061 102 103 #define mmMM_INDEX 0x0 103 104 #define mmMM_INDEX_HI 0x6 104 105 #define mmMM_DATA 0x1 ··· 240 239 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, 241 240 uint8_t *binary) 242 241 { 243 - uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 244 - int ret = 0; 242 + uint64_t vram_size; 243 + u32 msg; 244 + int i, ret = 0; 245 + 246 + /* It can take up to a second for IFWI init to complete on some dGPUs, 247 + * but generally it should be in the 60-100ms range. Normally this starts 248 + * as soon as the device gets power so by the time the OS loads this has long 249 + * completed. However, when a card is hotplugged via e.g., USB4, we need to 250 + * wait for this to complete. Once the C2PMSG is updated, we can 251 + * continue. 252 + */ 253 + if (dev_is_removable(&adev->pdev->dev)) { 254 + for (i = 0; i < 1000; i++) { 255 + msg = RREG32(mmMP0_SMN_C2PMSG_33); 256 + if (msg & 0x80000000) 257 + break; 258 + msleep(1); 259 + } 260 + } 261 + vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 245 262 246 263 if (vram_size) { 247 264 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; ··· 2467 2448 2468 2449 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0)) 2469 2450 adev->gmc.xgmi.supported = true; 2451 + 2452 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 2453 + adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0); 2470 2454 2471 2455 /* set NBIO version */ 2472 2456 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 409 409 if (!r) 410 410 r = amdgpu_vm_clear_freed(adev, vm, NULL); 411 411 if (!r) 412 - r = amdgpu_vm_handle_moved(adev, vm); 412 + r = amdgpu_vm_handle_moved(adev, vm, ticket); 413 413 414 414 if (r && r != -EBUSY) 415 415 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+26 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2041 2041 2042 2042 MODULE_DEVICE_TABLE(pci, pciidlist); 2043 2043 2044 + static const struct amdgpu_asic_type_quirk asic_type_quirks[] = { 2045 + /* differentiate between P10 and P11 asics with the same DID */ 2046 + {0x67FF, 0xE3, CHIP_POLARIS10}, 2047 + {0x67FF, 0xE7, CHIP_POLARIS10}, 2048 + {0x67FF, 0xF3, CHIP_POLARIS10}, 2049 + {0x67FF, 0xF7, CHIP_POLARIS10}, 2050 + }; 2051 + 2044 2052 static const struct drm_driver amdgpu_kms_driver; 2045 2053 2046 2054 static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) ··· 2091 2083 } 2092 2084 } 2093 2085 2086 + static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) 2087 + { 2088 + int i; 2089 + 2090 + for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) { 2091 + if (pdev->device == asic_type_quirks[i].device && 2092 + pdev->revision == asic_type_quirks[i].revision) { 2093 + flags &= ~AMD_ASIC_MASK; 2094 + flags |= asic_type_quirks[i].type; 2095 + break; 2096 + } 2097 + } 2098 + 2099 + return flags; 2100 + } 2101 + 2094 2102 static int amdgpu_pci_probe(struct pci_dev *pdev, 2095 2103 const struct pci_device_id *ent) 2096 2104 { ··· 2134 2110 "See modparam exp_hw_support\n"); 2135 2111 return -ENODEV; 2136 2112 } 2137 - /* differentiate between P10 and P11 asics with the same DID */ 2138 - if (pdev->device == 0x67FF && 2139 - (pdev->revision == 0xE3 || 2140 - pdev->revision == 0xE7 || 2141 - pdev->revision == 0xF3 || 2142 - pdev->revision == 0xF7)) { 2143 - flags &= ~AMD_ASIC_MASK; 2144 - flags |= CHIP_POLARIS10; 2145 - } 2113 + 2114 + flags = amdgpu_fix_asic_type(pdev, flags); 2146 2115 2147 2116 /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, 2148 2117 * however, SME requires an indirect IOMMU mapping because the encryption
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 385 385 struct amdgpu_ring *ring = &kiq->ring; 386 386 u32 domain = AMDGPU_GEM_DOMAIN_GTT; 387 387 388 + #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) 388 389 /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ 389 390 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) 390 391 domain |= AMDGPU_GEM_DOMAIN_VRAM; 392 + #endif 391 393 392 394 /* create MQD for KIQ */ 393 395 if (!adev->enable_mes_kiq && !ring->mqd_obj) {
+16
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
··· 557 557 mqd_prop.hqd_queue_priority = p->hqd_queue_priority; 558 558 mqd_prop.hqd_active = false; 559 559 560 + if (p->queue_type == AMDGPU_RING_TYPE_GFX || 561 + p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 562 + mutex_lock(&adev->srbm_mutex); 563 + amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0); 564 + } 565 + 560 566 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); 567 + 568 + if (p->queue_type == AMDGPU_RING_TYPE_GFX || 569 + p->queue_type == AMDGPU_RING_TYPE_COMPUTE) { 570 + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0); 571 + mutex_unlock(&adev->srbm_mutex); 572 + } 561 573 562 574 amdgpu_bo_unreserve(q->mqd_obj); 563 575 } ··· 1006 994 switch (queue_type) { 1007 995 case AMDGPU_RING_TYPE_GFX: 1008 996 ring->funcs = adev->gfx.gfx_ring[0].funcs; 997 + ring->me = adev->gfx.gfx_ring[0].me; 998 + ring->pipe = adev->gfx.gfx_ring[0].pipe; 1009 999 break; 1010 1000 case AMDGPU_RING_TYPE_COMPUTE: 1011 1001 ring->funcs = adev->gfx.compute_ring[0].funcs; 1002 + ring->me = adev->gfx.compute_ring[0].me; 1003 + ring->pipe = adev->gfx.compute_ring[0].pipe; 1012 1004 break; 1013 1005 case AMDGPU_RING_TYPE_SDMA: 1014 1006 ring->funcs = adev->sdma.instance[0].ring.funcs;
+15
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 2120 2120 return ret; 2121 2121 } 2122 2122 2123 + int amdgpu_psp_query_boot_status(struct amdgpu_device *adev) 2124 + { 2125 + struct psp_context *psp = &adev->psp; 2126 + int ret = 0; 2127 + 2128 + if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2129 + return 0; 2130 + 2131 + if (psp->funcs && 2132 + psp->funcs->query_boot_status) 2133 + ret = psp->funcs->query_boot_status(psp); 2134 + 2135 + return ret; 2136 + } 2137 + 2123 2138 static int psp_hw_start(struct psp_context *psp) 2124 2139 { 2125 2140 struct amdgpu_device *adev = psp->adev;
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
··· 134 134 int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr); 135 135 int (*vbflash_stat)(struct psp_context *psp); 136 136 int (*fatal_error_recovery_quirk)(struct psp_context *psp); 137 + int (*query_boot_status)(struct psp_context *psp); 137 138 }; 138 139 139 140 struct ta_funcs { ··· 537 536 int is_psp_fw_valid(struct psp_bin_desc bin); 538 537 539 538 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev); 539 + 540 + int amdgpu_psp_query_boot_status(struct amdgpu_device *adev); 540 541 541 542 #endif
+14 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 1222 1222 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1223 1223 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1224 1224 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; 1225 + struct amdgpu_hive_info *hive; 1226 + int hive_ras_recovery = 0; 1225 1227 1226 1228 if (!block_obj || !block_obj->hw_ops) { 1227 1229 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", ··· 1231 1229 return -EOPNOTSUPP; 1232 1230 } 1233 1231 1234 - /* skip ras error reset in gpu reset */ 1235 - if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery)) && 1236 - mca_funcs && mca_funcs->mca_set_debug_mode) 1237 - return -EOPNOTSUPP; 1238 - 1239 1232 if (!amdgpu_ras_is_supported(adev, block) || 1240 1233 !amdgpu_ras_get_mca_debug_mode(adev)) 1234 + return -EOPNOTSUPP; 1235 + 1236 + hive = amdgpu_get_xgmi_hive(adev); 1237 + if (hive) { 1238 + hive_ras_recovery = atomic_read(&hive->ras_recovery); 1239 + amdgpu_put_xgmi_hive(hive); 1240 + } 1241 + 1242 + /* skip ras error reset in gpu reset */ 1243 + if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) || 1244 + hive_ras_recovery) && 1245 + mca_funcs && mca_funcs->mca_set_debug_mode) 1241 1246 return -EOPNOTSUPP; 1242 1247 1243 1248 if (block_obj->hw_ops->reset_ras_error_count)
+5 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
··· 166 166 } 167 167 } 168 168 169 - if (reset) 169 + if (reset) { 170 + /* use mode-2 reset for poison consumption */ 171 + if (!entry) 172 + con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; 170 173 amdgpu_ras_reset_gpu(adev); 174 + } 171 175 } 172 176 173 177 kfree(err_data->err_addr);
+14 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1373 1373 * 1374 1374 * @adev: amdgpu_device pointer 1375 1375 * @vm: requested vm 1376 + * @ticket: optional reservation ticket used to reserve the VM 1376 1377 * 1377 1378 * Make sure all BOs which are moved are updated in the PTs. 1378 1379 * ··· 1383 1382 * PTs have to be reserved! 1384 1383 */ 1385 1384 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 1386 - struct amdgpu_vm *vm) 1385 + struct amdgpu_vm *vm, 1386 + struct ww_acquire_ctx *ticket) 1387 1387 { 1388 1388 struct amdgpu_bo_va *bo_va; 1389 1389 struct dma_resv *resv; 1390 - bool clear; 1390 + bool clear, unlock; 1391 1391 int r; 1392 1392 1393 1393 spin_lock(&vm->status_lock); ··· 1411 1409 spin_unlock(&vm->status_lock); 1412 1410 1413 1411 /* Try to reserve the BO to avoid clearing its ptes */ 1414 - if (!adev->debug_vm && dma_resv_trylock(resv)) 1412 + if (!adev->debug_vm && dma_resv_trylock(resv)) { 1415 1413 clear = false; 1414 + unlock = true; 1415 + /* The caller is already holding the reservation lock */ 1416 + } else if (ticket && dma_resv_locking_ctx(resv) == ticket) { 1417 + clear = false; 1418 + unlock = false; 1416 1419 /* Somebody else is using the BO right now */ 1417 - else 1420 + } else { 1418 1421 clear = true; 1422 + unlock = false; 1423 + } 1419 1424 1420 1425 r = amdgpu_vm_bo_update(adev, bo_va, clear); 1421 1426 if (r) 1422 1427 return r; 1423 1428 1424 - if (!clear) 1429 + if (unlock) 1425 1430 dma_resv_unlock(resv); 1426 1431 spin_lock(&vm->status_lock); 1427 1432 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 443 443 struct amdgpu_vm *vm, 444 444 struct dma_fence **fence); 445 445 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 446 - struct amdgpu_vm *vm); 446 + struct amdgpu_vm *vm, 447 + struct ww_acquire_ctx *ticket); 447 448 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, 448 449 struct amdgpu_vm *vm, struct amdgpu_bo *bo); 449 450 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+13 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 77 77 return true; 78 78 } 79 79 80 + static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head) 81 + { 82 + struct drm_buddy_block *block; 83 + u64 size = 0; 80 84 85 + list_for_each_entry(block, head, link) 86 + size += amdgpu_vram_mgr_block_size(block); 87 + 88 + return size; 89 + } 81 90 82 91 /** 83 92 * DOC: mem_info_vram_total ··· 525 516 mutex_unlock(&mgr->lock); 526 517 527 518 vres->base.start = 0; 519 + size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks), 520 + vres->base.size); 528 521 list_for_each_entry(block, &vres->blocks, link) { 529 522 unsigned long start; 530 523 ··· 534 523 amdgpu_vram_mgr_block_size(block); 535 524 start >>= PAGE_SHIFT; 536 525 537 - if (start > PFN_UP(vres->base.size)) 538 - start -= PFN_UP(vres->base.size); 526 + if (start > PFN_UP(size)) 527 + start -= PFN_UP(size); 539 528 else 540 529 start = 0; 541 530 vres->base.start = max(vres->base.start, start);
+22 -6
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 3498 3498 static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev, 3499 3499 unsigned int vmid); 3500 3500 3501 + static int gfx_v10_0_set_powergating_state(void *handle, 3502 + enum amd_powergating_state state); 3501 3503 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 3502 3504 { 3503 3505 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); ··· 6467 6465 nv_grbm_select(adev, 0, 0, 0, 0); 6468 6466 mutex_unlock(&adev->srbm_mutex); 6469 6467 if (adev->gfx.me.mqd_backup[mqd_idx]) 6470 - memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 6468 + memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 6471 6469 } else { 6470 + mutex_lock(&adev->srbm_mutex); 6471 + nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 6472 + if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1) 6473 + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); 6474 + 6475 + nv_grbm_select(adev, 0, 0, 0, 0); 6476 + mutex_unlock(&adev->srbm_mutex); 6472 6477 /* restore mqd with the backup copy */ 6473 6478 if (adev->gfx.me.mqd_backup[mqd_idx]) 6474 - memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 6479 + memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 6475 6480 /* reset the ring */ 6476 6481 ring->wptr = 0; 6477 6482 *ring->wptr_cpu_addr = 0; ··· 6752 6743 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 6753 6744 /* reset MQD to a clean status */ 6754 6745 if (adev->gfx.kiq[0].mqd_backup) 6755 - memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); 6746 + memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); 6756 6747 6757 6748 /* reset ring buffer */ 6758 6749 ring->wptr = 0; ··· 6775 6766 mutex_unlock(&adev->srbm_mutex); 6776 6767 6777 6768 if (adev->gfx.kiq[0].mqd_backup) 6778 - memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); 6769 + memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); 6779 6770 } 6780 6771 6781 6772 return 0; ··· 6796 6787 mutex_unlock(&adev->srbm_mutex); 6797 6788 6798 6789 if (adev->gfx.mec.mqd_backup[mqd_idx]) 6799 - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 6790 + memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 6800 6791 } else { 6801 6792 /* restore MQD to a clean status */ 6802 6793 if (adev->gfx.mec.mqd_backup[mqd_idx]) 6803 - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 6794 + memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 6804 6795 /* reset ring buffer */ 6805 6796 ring->wptr = 0; 6806 6797 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); ··· 7180 7171 7181 7172 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 7182 7173 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 7174 + 7175 + /* WA added for Vangogh asic fixing the SMU suspend failure 7176 + * It needs to set power gating again during gfxoff control 7177 + * otherwise the gfxoff disallowing will be failed to set. 7178 + */ 7179 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1)) 7180 + gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE); 7183 7181 7184 7182 if (!adev->no_hw_access) { 7185 7183 if (amdgpu_async_gfx_ring) {
+7 -6
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 155 155 { 156 156 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 157 157 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 158 + PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */ 158 159 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 159 160 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 160 161 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ ··· 3715 3714 soc21_grbm_select(adev, 0, 0, 0, 0); 3716 3715 mutex_unlock(&adev->srbm_mutex); 3717 3716 if (adev->gfx.me.mqd_backup[mqd_idx]) 3718 - memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3717 + memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3719 3718 } else { 3720 3719 /* restore mqd with the backup copy */ 3721 3720 if (adev->gfx.me.mqd_backup[mqd_idx]) 3722 - memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 3721 + memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 3723 3722 /* reset the ring */ 3724 3723 ring->wptr = 0; 3725 3724 *ring->wptr_cpu_addr = 0; ··· 4008 4007 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4009 4008 /* reset MQD to a clean status */ 4010 4009 if (adev->gfx.kiq[0].mqd_backup) 4011 - memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); 4010 + memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd)); 4012 4011 4013 4012 /* reset ring buffer */ 4014 4013 ring->wptr = 0; ··· 4031 4030 mutex_unlock(&adev->srbm_mutex); 4032 4031 4033 4032 if (adev->gfx.kiq[0].mqd_backup) 4034 - memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); 4033 + memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd)); 4035 4034 } 4036 4035 4037 4036 return 0; ··· 4052 4051 mutex_unlock(&adev->srbm_mutex); 4053 4052 4054 4053 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4055 - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4054 + memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4056 4055 } else { 4057 4056 /* restore MQD to a clean status */ 4058 4057 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4059 - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4058 + memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4060 4059 /* reset ring buffer */ 4061 4060 ring->wptr = 0; 4062 4061 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
+3 -2
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
··· 28 28 #include "nbio/nbio_2_3_offset.h" 29 29 #include "nbio/nbio_2_3_sh_mask.h" 30 30 #include <uapi/linux/kfd_ioctl.h> 31 + #include <linux/device.h> 31 32 #include <linux/pci.h> 32 33 33 34 #define smnPCIE_CONFIG_CNTL 0x11180044 ··· 362 361 363 362 data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; 364 363 365 - if (pci_is_thunderbolt_attached(adev->pdev)) 364 + if (dev_is_removable(&adev->pdev->dev)) 366 365 data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 367 366 else 368 367 data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; ··· 481 480 482 481 def = data = RREG32_PCIE(smnPCIE_LC_CNTL); 483 482 data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; 484 - if (pci_is_thunderbolt_attached(adev->pdev)) 483 + if (dev_is_removable(&adev->pdev->dev)) 485 484 data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 486 485 else 487 486 data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+78
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
··· 759 759 return 0; 760 760 } 761 761 762 + 763 + static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev, 764 + uint32_t inst, 765 + uint32_t boot_error) 766 + { 767 + uint32_t socket_id; 768 + uint32_t aid_id; 769 + uint32_t hbm_id; 770 + uint32_t reg_data; 771 + 772 + socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID); 773 + aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID); 774 + hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID); 775 + 776 + reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109); 777 + dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n", 778 + socket_id, aid_id, reg_data); 779 + 780 + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING)) 781 + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n", 782 + socket_id, aid_id, hbm_id); 783 + 784 + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD)) 785 + dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n", 786 + socket_id, aid_id); 787 + 788 + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING)) 789 + dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n", 790 + socket_id, aid_id); 791 + 792 + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING)) 793 + dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n", 794 + socket_id, aid_id); 795 + 796 + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING)) 797 + dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n", 798 + socket_id, aid_id); 799 + 800 + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING)) 801 + dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n", 802 + socket_id, aid_id); 803 + 804 + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST)) 805 + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n", 806 + socket_id, aid_id, hbm_id); 807 + 808 + if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST)) 809 + dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n", 810 + socket_id, aid_id, hbm_id); 811 + } 812 + 813 + static int psp_v13_0_query_boot_status(struct psp_context *psp) 814 + { 815 + struct amdgpu_device *adev = psp->adev; 816 + int inst_mask = adev->aid_mask; 817 + uint32_t reg_data; 818 + uint32_t i; 819 + int ret = 0; 820 + 821 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) 822 + return 0; 823 + 824 + if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10007) 825 + return 0; 826 + 827 + for_each_inst(i, inst_mask) { 828 + reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126); 829 + if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) { 830 + psp_v13_0_boot_error_reporting(adev, i, reg_data); 831 + ret = -EINVAL; 832 + break; 833 + } 834 + } 835 + 836 + return ret; 837 + } 838 + 762 839 static const struct psp_funcs psp_v13_0_funcs = { 763 840 .init_microcode = psp_v13_0_init_microcode, 764 841 .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, ··· 858 781 .update_spirom = psp_v13_0_update_spirom, 859 782 .vbflash_stat = psp_v13_0_vbflash_status, 860 783 .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, 784 + .query_boot_status = psp_v13_0_query_boot_status, 861 785 }; 862 786 863 787 void psp_v13_0_set_psp_funcs(struct psp_context *psp)
+1 -2
drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
··· 91 91 static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) 92 92 { 93 93 return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 94 - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || 95 - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || 94 + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || 96 95 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || 97 96 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); 98 97 }
+65 -1
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
··· 1404 1404 return i; 1405 1405 } 1406 1406 1407 + static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev, 1408 + struct kfd_gpu_cache_info *pcache_info) 1409 + { 1410 + struct amdgpu_device *adev = kdev->adev; 1411 + int i = 0; 1412 + 1413 + /* TCP L1 Cache per CU */ 1414 + if (adev->gfx.config.gc_tcp_size_per_cu) { 1415 + pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu; 1416 + pcache_info[i].cache_level = 1; 1417 + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | 1418 + CRAT_CACHE_FLAGS_DATA_CACHE | 1419 + CRAT_CACHE_FLAGS_SIMD_CACHE); 1420 + pcache_info[i].num_cu_shared = 1; 1421 + i++; 1422 + } 1423 + /* Scalar L1 Instruction Cache per SQC */ 1424 + if (adev->gfx.config.gc_l1_instruction_cache_size_per_sqc) { 1425 + pcache_info[i].cache_size = 1426 + adev->gfx.config.gc_l1_instruction_cache_size_per_sqc; 1427 + pcache_info[i].cache_level = 1; 1428 + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | 1429 + CRAT_CACHE_FLAGS_INST_CACHE | 1430 + CRAT_CACHE_FLAGS_SIMD_CACHE); 1431 + pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc; 1432 + i++; 1433 + } 1434 + /* Scalar L1 Data Cache per SQC */ 1435 + if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) { 1436 + pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc; 1437 + pcache_info[i].cache_level = 1; 1438 + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | 1439 + CRAT_CACHE_FLAGS_DATA_CACHE | 1440 + CRAT_CACHE_FLAGS_SIMD_CACHE); 1441 + pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc; 1442 + i++; 1443 + } 1444 + /* L2 Data Cache per GPU (Total Tex Cache) */ 1445 + if (adev->gfx.config.gc_tcc_size) { 1446 + pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size; 1447 + pcache_info[i].cache_level = 2; 1448 + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | 1449 + CRAT_CACHE_FLAGS_DATA_CACHE | 1450 + CRAT_CACHE_FLAGS_SIMD_CACHE); 1451 + pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; 1452 + i++; 1453 + } 1454 + /* L3 Data Cache per GPU */ 1455 + if (adev->gmc.mall_size) { 1456 + pcache_info[i].cache_size = adev->gmc.mall_size / 1024; 1457 + pcache_info[i].cache_level = 3; 1458 + pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED | 1459 + CRAT_CACHE_FLAGS_DATA_CACHE | 1460 + CRAT_CACHE_FLAGS_SIMD_CACHE); 1461 + pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; 1462 + i++; 1463 + } 1464 + return i; 1465 + } 1466 + 1407 1467 int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info) 1408 1468 { 1409 1469 int num_of_cache_types = 0; ··· 1521 1461 num_of_cache_types = ARRAY_SIZE(vega20_cache_info); 1522 1462 break; 1523 1463 case IP_VERSION(9, 4, 2): 1524 - case IP_VERSION(9, 4, 3): 1525 1464 *pcache_info = aldebaran_cache_info; 1526 1465 num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info); 1466 + break; 1467 + case IP_VERSION(9, 4, 3): 1468 + num_of_cache_types = 1469 + kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd, 1470 + *pcache_info); 1527 1471 break; 1528 1472 case IP_VERSION(9, 1, 0): 1529 1473 case IP_VERSION(9, 2, 2):
+16 -2
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 1602 1602 unsigned int cu_sibling_map_mask; 1603 1603 int first_active_cu; 1604 1604 int i, j, k, xcc, start, end; 1605 + int num_xcc = NUM_XCC(knode->xcc_mask); 1605 1606 struct kfd_cache_properties *pcache = NULL; 1607 + enum amdgpu_memory_partition mode; 1608 + struct amdgpu_device *adev = knode->adev; 1606 1609 1607 1610 start = ffs(knode->xcc_mask) - 1; 1608 - end = start + NUM_XCC(knode->xcc_mask); 1611 + end = start + num_xcc; 1609 1612 cu_sibling_map_mask = cu_info->bitmap[start][0][0]; 1610 1613 cu_sibling_map_mask &= 1611 1614 ((1 << pcache_info[cache_type].num_cu_shared) - 1); ··· 1627 1624 pcache->processor_id_low = cu_processor_id 1628 1625 + (first_active_cu - 1); 1629 1626 pcache->cache_level = pcache_info[cache_type].cache_level; 1630 - pcache->cache_size = pcache_info[cache_type].cache_size; 1627 + 1628 + if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3)) 1629 + mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 1630 + else 1631 + mode = UNKNOWN_MEMORY_PARTITION_MODE; 1632 + 1633 + if (pcache->cache_level == 2) 1634 + pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc; 1635 + else if (mode) 1636 + pcache->cache_size = pcache_info[cache_type].cache_size / mode; 1637 + else 1638 + pcache->cache_size = pcache_info[cache_type].cache_size; 1631 1639 1632 1640 if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE) 1633 1641 pcache->cache_type |= HSA_CACHE_TYPE_DATA;
-1
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 4348 4348 srf_updates[i].in_transfer_func || 4349 4349 srf_updates[i].func_shaper || 4350 4350 srf_updates[i].lut3d_func || 4351 - srf_updates[i].blend_tf || 4352 4351 srf_updates[i].surface->force_full_update || 4353 4352 (srf_updates[i].flip_addr && 4354 4353 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
+2 -2
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 533 533 for (i = 0; i < MAX_PIPES; i++) { 534 534 struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; 535 535 536 - if (res_ctx->pipe_ctx[i].stream != stream) 536 + if (res_ctx->pipe_ctx[i].stream != stream || !tg) 537 537 continue; 538 538 539 539 return tg->funcs->get_frame_count(tg); ··· 592 592 for (i = 0; i < MAX_PIPES; i++) { 593 593 struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; 594 594 595 - if (res_ctx->pipe_ctx[i].stream != stream) 595 + if (res_ctx->pipe_ctx[i].stream != stream || !tg) 596 596 continue; 597 597 598 598 tg->funcs->get_scanoutpos(tg,
+3
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
··· 613 613 REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); 614 614 REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); 615 615 } else { 616 + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); 616 617 REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4); 617 618 for (i = 0 ; i < num; i++) 618 619 REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); 619 620 REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); 620 621 622 + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); 621 623 REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2); 622 624 for (i = 0 ; i < num; i++) 623 625 REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); 624 626 REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green); 625 627 628 + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); 626 629 REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1); 627 630 for (i = 0 ; i < num; i++) 628 631 REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg);
+1 -1
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
··· 316 316 return true; 317 317 } 318 318 319 - static void hubp3_program_tiling( 319 + void hubp3_program_tiling( 320 320 struct dcn20_hubp *hubp2, 321 321 const union dc_tiling_info *info, 322 322 const enum surface_pixel_format pixel_format)
+5
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
··· 278 278 struct _vcs_dpi_display_rq_regs_st *rq_regs, 279 279 struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); 280 280 281 + void hubp3_program_tiling( 282 + struct dcn20_hubp *hubp2, 283 + const union dc_tiling_info *info, 284 + const enum surface_pixel_format pixel_format); 285 + 281 286 void hubp3_dcc_control(struct hubp *hubp, bool enable, 282 287 enum hubp_ind_block_size blk_size); 283 288
+3
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
··· 237 237 REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); 238 238 REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); 239 239 } else { 240 + REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); 240 241 REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 4); 241 242 for (i = 0 ; i < num; i++) 242 243 REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg); 243 244 REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red); 244 245 246 + REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); 245 247 REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 2); 246 248 for (i = 0 ; i < num; i++) 247 249 REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].green_reg); 248 250 REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_green); 249 251 252 + REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0); 250 253 REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 1); 251 254 for (i = 0 ; i < num; i++) 252 255 REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].blue_reg);
+136 -1
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c
··· 53 53 54 54 /*do nothing for now for dcn3.5 or later*/ 55 55 } 56 + 57 + void hubp35_program_pixel_format( 58 + struct hubp *hubp, 59 + enum surface_pixel_format format) 60 + { 61 + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); 62 + uint32_t green_bar = 1; 63 + uint32_t red_bar = 3; 64 + uint32_t blue_bar = 2; 65 + 66 + /* swap for ABGR format */ 67 + if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 68 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 69 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS 70 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 71 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) { 72 + red_bar = 2; 73 + blue_bar = 3; 74 + } 75 + 76 + REG_UPDATE_3(HUBPRET_CONTROL, 77 + CROSSBAR_SRC_Y_G, green_bar, 78 + CROSSBAR_SRC_CB_B, blue_bar, 79 + CROSSBAR_SRC_CR_R, red_bar); 80 + 81 + /* Mapping is same as ipp programming (cnvc) */ 82 + 83 + switch (format) { 84 + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: 85 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 86 + SURFACE_PIXEL_FORMAT, 1); 87 + break; 88 + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: 89 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 90 + SURFACE_PIXEL_FORMAT, 3); 91 + break; 92 + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: 93 + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: 94 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 95 + SURFACE_PIXEL_FORMAT, 8); 96 + break; 97 + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: 98 + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: 99 + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: 100 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 101 + SURFACE_PIXEL_FORMAT, 10); 102 + break; 103 + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: 104 + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /* we use crossbar already */ 105 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 106 + SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */ 107 + break; 108 + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: 109 + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/ 110 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 111 + SURFACE_PIXEL_FORMAT, 24); 112 + break; 113 + 114 + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: 115 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 116 + SURFACE_PIXEL_FORMAT, 65); 117 + break; 118 + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: 119 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 120 + SURFACE_PIXEL_FORMAT, 64); 121 + break; 122 + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: 123 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 124 + SURFACE_PIXEL_FORMAT, 67); 125 + break; 126 + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: 127 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 128 + SURFACE_PIXEL_FORMAT, 66); 129 + break; 130 + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: 131 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 132 + SURFACE_PIXEL_FORMAT, 12); 133 + break; 134 + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: 135 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 136 + SURFACE_PIXEL_FORMAT, 112); 137 + break; 138 + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: 139 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 140 + SURFACE_PIXEL_FORMAT, 113); 141 + break; 142 + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: 143 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 144 + SURFACE_PIXEL_FORMAT, 114); 145 + break; 146 + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: 147 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 148 + SURFACE_PIXEL_FORMAT, 118); 149 + break; 150 + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: 151 + REG_UPDATE(DCSURF_SURFACE_CONFIG, 152 + SURFACE_PIXEL_FORMAT, 119); 153 + break; 154 + case SURFACE_PIXEL_FORMAT_GRPH_RGBE: 155 + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, 156 + SURFACE_PIXEL_FORMAT, 116, 157 + ALPHA_PLANE_EN, 0); 158 + break; 159 + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: 160 + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, 161 + SURFACE_PIXEL_FORMAT, 116, 162 + ALPHA_PLANE_EN, 1); 163 + break; 164 + default: 165 + BREAK_TO_DEBUGGER(); 166 + break; 167 + } 168 + 169 + /* don't see the need of program the xbar in DCN 1.0 */ 170 + } 171 + 172 + void hubp35_program_surface_config( 173 + struct hubp *hubp, 174 + enum surface_pixel_format format, 175 + union dc_tiling_info *tiling_info, 176 + struct plane_size *plane_size, 177 + enum dc_rotation_angle rotation, 178 + struct dc_plane_dcc_param *dcc, 179 + bool horizontal_mirror, 180 + unsigned int compat_level) 181 + { 182 + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); 183 + 184 + hubp3_dcc_control_sienna_cichlid(hubp, dcc); 185 + hubp3_program_tiling(hubp2, tiling_info, format); 186 + hubp2_program_size(hubp, format, plane_size, dcc); 187 + hubp2_program_rotation(hubp, rotation, horizontal_mirror); 188 + hubp35_program_pixel_format(hubp, format); 189 + } 190 + 56 191 struct hubp_funcs dcn35_hubp_funcs = { 57 192 .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, 58 193 .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, 59 194 .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr, 60 - .hubp_program_surface_config = hubp3_program_surface_config, 195 + .hubp_program_surface_config = hubp35_program_surface_config, 61 196 .hubp_is_flip_pending = hubp2_is_flip_pending, 62 197 .hubp_setup = hubp3_setup, 63 198 .hubp_setup_interdependent = hubp2_setup_interdependent,
+14
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h
··· 58 58 59 59 void hubp35_set_fgcg(struct hubp *hubp, bool enable); 60 60 61 + void hubp35_program_pixel_format( 62 + struct hubp *hubp, 63 + enum surface_pixel_format format); 64 + 65 + void hubp35_program_surface_config( 66 + struct hubp *hubp, 67 + enum surface_pixel_format format, 68 + union dc_tiling_info *tiling_info, 69 + struct plane_size *plane_size, 70 + enum dc_rotation_angle rotation, 71 + struct dc_plane_dcc_param *dcc, 72 + bool horizontal_mirror, 73 + unsigned int compat_level); 74 + 61 75 #endif /* __DC_HUBP_DCN35_H__ */
+4
drivers/gpu/drm/amd/display/dc/dml2/Makefile
··· 60 60 endif 61 61 62 62 ifneq ($(CONFIG_FRAME_WARN),0) 63 + ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) 64 + frame_warn_flag := -Wframe-larger-than=3072 65 + else 63 66 frame_warn_flag := -Wframe-larger-than=2048 67 + endif 64 68 endif 65 69 66 70 CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
+28
drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h
··· 242 242 //MP0_SMN_C2PMSG_103 243 243 #define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 244 244 #define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL 245 + //MP0_SMN_C2PMSG_109 246 + #define MP0_SMN_C2PMSG_109__CONTENT__SHIFT 0x0 247 + #define MP0_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL 248 + //MP0_SMN_C2PMSG_126 249 + #define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING__SHIFT 0x0 250 + #define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD__SHIFT 0x1 251 + #define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING__SHIFT 0x2 252 + #define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING__SHIFT 0x3 253 + #define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING__SHIFT 0x4 254 + #define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING__SHIFT 0x5 255 + #define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST__SHIFT 0x6 256 + #define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST__SHIFT 0x7 257 + #define MP0_SMN_C2PMSG_126__SOCKET_ID__SHIFT 0x8 258 + #define MP0_SMN_C2PMSG_126__AID_ID__SHIFT 0xb 259 + #define MP0_SMN_C2PMSG_126__HBM_ID__SHIFT 0xd 260 + #define MP0_SMN_C2PMSG_126__BOOT_STATUS__SHIFT 0x1f 261 + #define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING_MASK 0x00000001L 262 + #define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD_MASK 0x00000002L 263 + #define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING_MASK 0x00000004L 264 + #define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING_MASK 0x00000008L 265 + #define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING_MASK 0x00000010L 266 + #define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING_MASK 0x00000020L 267 + #define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST_MASK 0x00000040L 268 + #define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST_MASK 0x00000080L 269 + #define MP0_SMN_C2PMSG_126__SOCKET_ID_MASK 0x00000700L 270 + #define MP0_SMN_C2PMSG_126__AID_ID_MASK 0x00001800L 271 + #define MP0_SMN_C2PMSG_126__HBM_ID_MASK 0x00002000L 272 + #define MP0_SMN_C2PMSG_126__BOOT_STATUS_MASK 0x80000000L 245 273 //MP0_SMN_IH_CREDIT 246 274 #define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 247 275 #define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+16 -14
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 1080 1080 uint16_t average_ipu_activity[8]; 1081 1081 /* time filtered per-core C0 residency % [0-100]*/ 1082 1082 uint16_t average_core_c0_activity[16]; 1083 - /* time filtered DRAM read bandwidth [GB/sec] */ 1083 + /* time filtered DRAM read bandwidth [MB/sec] */ 1084 1084 uint16_t average_dram_reads; 1085 - /* time filtered DRAM write bandwidth [GB/sec] */ 1085 + /* time filtered DRAM write bandwidth [MB/sec] */ 1086 1086 uint16_t average_dram_writes; 1087 1087 1088 1088 /* Driver attached timestamp (in ns) */ 1089 1089 uint64_t system_clock_counter; 1090 1090 1091 1091 /* Power/Energy */ 1092 - /* average dGPU + APU power on A + A platform */ 1092 + /* time filtered power used for PPT/STAPM [APU+dGPU] [mW] */ 1093 1093 uint32_t average_socket_power; 1094 - /* average IPU power [W] */ 1094 + /* time filtered IPU power [mW] */ 1095 1095 uint16_t average_ipu_power; 1096 - /* average APU power [W] */ 1096 + /* time filtered APU power [mW] */ 1097 1097 uint32_t average_apu_power; 1098 - /* average dGPU power [W] */ 1098 + /* time filtered GFX power [mW] */ 1099 + uint32_t average_gfx_power; 1100 + /* time filtered dGPU power [mW] */ 1099 1101 uint32_t average_dgpu_power; 1100 - /* sum of core power across all cores in the socket [W] */ 1101 - uint32_t average_core_power; 1102 - /* calculated core power [W] */ 1103 - uint16_t core_power[16]; 1104 - /* maximum IRM defined STAPM power limit [W] */ 1102 + /* time filtered sum of core power across all cores in the socket [mW] */ 1103 + uint32_t average_all_core_power; 1104 + /* calculated core power [mW] */ 1105 + uint16_t average_core_power[16]; 1106 + /* maximum IRM defined STAPM power limit [mW] */ 1105 1107 uint16_t stapm_power_limit; 1106 - /* time filtered STAPM power limit [W] */ 1108 + /* time filtered STAPM power limit [mW] */ 1107 1109 uint16_t current_stapm_power_limit; 1108 1110 1109 - /* Average clocks */ 1111 + /* time filtered clocks [MHz] */ 1110 1112 uint16_t average_gfxclk_frequency; 1111 1113 uint16_t average_socclk_frequency; 1112 1114 uint16_t average_vpeclk_frequency; ··· 1117 1115 uint16_t average_vclk_frequency; 1118 1116 1119 1117 /* Current clocks */ 1120 - /* target core frequency */ 1118 + /* target core frequency [MHz] */ 1121 1119 uint16_t current_coreclk[16]; 1122 1120 /* CCLK frequency limit enforced on classic cores [MHz] */ 1123 1121 uint16_t current_core_maxfreq;
+14 -13
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 3288 3288 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 3289 3289 uint32_t tmp; 3290 3290 3291 - /* under multi-vf mode, the hwmon attributes are all not supported */ 3292 - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 3293 - return 0; 3294 - 3295 3291 /* under pp one vf mode manage of hwmon attributes is not supported */ 3296 3292 if (amdgpu_sriov_is_pp_one_vf(adev)) 3297 3293 effective_mode &= ~S_IWUSR; ··· 4158 4162 4159 4163 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 4160 4164 { 4165 + enum amdgpu_sriov_vf_mode mode; 4161 4166 uint32_t mask = 0; 4162 4167 int ret; 4163 4168 ··· 4170 4173 if (adev->pm.dpm_enabled == 0) 4171 4174 return 0; 4172 4175 4173 - adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 4174 - DRIVER_NAME, adev, 4175 - hwmon_groups); 4176 - if (IS_ERR(adev->pm.int_hwmon_dev)) { 4177 - ret = PTR_ERR(adev->pm.int_hwmon_dev); 4178 - dev_err(adev->dev, 4179 - "Unable to register hwmon device: %d\n", ret); 4180 - return ret; 4176 + mode = amdgpu_virt_get_sriov_vf_mode(adev); 4177 + 4178 + /* under multi-vf mode, the hwmon attributes are all not supported */ 4179 + if (mode != SRIOV_VF_MODE_MULTI_VF) { 4180 + adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 4181 + DRIVER_NAME, adev, 4182 + hwmon_groups); 4183 + if (IS_ERR(adev->pm.int_hwmon_dev)) { 4184 + ret = PTR_ERR(adev->pm.int_hwmon_dev); 4185 + dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret); 4186 + return ret; 4187 + } 4181 4188 } 4182 4189 4183 - switch (amdgpu_virt_get_sriov_vf_mode(adev)) { 4190 + switch (mode) { 4184 4191 case SRIOV_VF_MODE_ONE_VF: 4185 4192 mask = ATTR_FLAG_ONEVF; 4186 4193 break;
+3
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
··· 1022 1022 *limit /= 100; 1023 1023 } 1024 1024 break; 1025 + case PP_PWR_LIMIT_MIN: 1026 + *limit = 0; 1027 + break; 1025 1028 default: 1026 1029 ret = -EOPNOTSUPP; 1027 1030 break;
+2 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
··· 367 367 typedef struct _ATOM_Tonga_VCE_State_Table { 368 368 UCHAR ucRevId; 369 369 UCHAR ucNumEntries; 370 - ATOM_Tonga_VCE_State_Record entries[1]; 370 + ATOM_Tonga_VCE_State_Record entries[]; 371 371 } ATOM_Tonga_VCE_State_Table; 372 372 373 373 typedef struct _ATOM_Tonga_PowerTune_Table { ··· 481 481 typedef struct _ATOM_Tonga_Hard_Limit_Table { 482 482 UCHAR ucRevId; 483 483 UCHAR ucNumEntries; 484 - ATOM_Tonga_Hard_Limit_Record entries[1]; 484 + ATOM_Tonga_Hard_Limit_Record entries[]; 485 485 } ATOM_Tonga_Hard_Limit_Table; 486 486 487 487 typedef struct _ATOM_Tonga_GPIO_Table {
+12 -12
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
··· 129 129 typedef struct _ATOM_Vega10_State_Array { 130 130 UCHAR ucRevId; 131 131 UCHAR ucNumEntries; /* Number of entries. */ 132 - ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */ 132 + ATOM_Vega10_State states[]; /* Dynamically allocate entries. */ 133 133 } ATOM_Vega10_State_Array; 134 134 135 135 typedef struct _ATOM_Vega10_CLK_Dependency_Record { ··· 169 169 typedef struct _ATOM_Vega10_MCLK_Dependency_Table { 170 170 UCHAR ucRevId; 171 171 UCHAR ucNumEntries; /* Number of entries. */ 172 - ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 172 + ATOM_Vega10_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ 173 173 } ATOM_Vega10_MCLK_Dependency_Table; 174 174 175 175 typedef struct _ATOM_Vega10_SOCCLK_Dependency_Table { 176 176 UCHAR ucRevId; 177 177 UCHAR ucNumEntries; /* Number of entries. */ 178 - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 178 + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ 179 179 } ATOM_Vega10_SOCCLK_Dependency_Table; 180 180 181 181 typedef struct _ATOM_Vega10_DCEFCLK_Dependency_Table { 182 182 UCHAR ucRevId; 183 183 UCHAR ucNumEntries; /* Number of entries. */ 184 - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 184 + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ 185 185 } ATOM_Vega10_DCEFCLK_Dependency_Table; 186 186 187 187 typedef struct _ATOM_Vega10_PIXCLK_Dependency_Table { 188 188 UCHAR ucRevId; 189 189 UCHAR ucNumEntries; /* Number of entries. */ 190 - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 190 + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ 191 191 } ATOM_Vega10_PIXCLK_Dependency_Table; 192 192 193 193 typedef struct _ATOM_Vega10_DISPCLK_Dependency_Table { 194 194 UCHAR ucRevId; 195 195 UCHAR ucNumEntries; /* Number of entries.*/ 196 - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 196 + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ 197 197 } ATOM_Vega10_DISPCLK_Dependency_Table; 198 198 199 199 typedef struct _ATOM_Vega10_PHYCLK_Dependency_Table { 200 200 UCHAR ucRevId; 201 201 UCHAR ucNumEntries; /* Number of entries. */ 202 - ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 202 + ATOM_Vega10_CLK_Dependency_Record entries[]; /* Dynamically allocate entries. */ 203 203 } ATOM_Vega10_PHYCLK_Dependency_Table; 204 204 205 205 typedef struct _ATOM_Vega10_MM_Dependency_Record { ··· 213 213 typedef struct _ATOM_Vega10_MM_Dependency_Table { 214 214 UCHAR ucRevId; 215 215 UCHAR ucNumEntries; /* Number of entries */ 216 - ATOM_Vega10_MM_Dependency_Record entries[1]; /* Dynamically allocate entries */ 216 + ATOM_Vega10_MM_Dependency_Record entries[]; /* Dynamically allocate entries */ 217 217 } ATOM_Vega10_MM_Dependency_Table; 218 218 219 219 typedef struct _ATOM_Vega10_PCIE_Record { ··· 225 225 typedef struct _ATOM_Vega10_PCIE_Table { 226 226 UCHAR ucRevId; 227 227 UCHAR ucNumEntries; /* Number of entries */ 228 - ATOM_Vega10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ 228 + ATOM_Vega10_PCIE_Record entries[]; /* Dynamically allocate entries. */ 229 229 } ATOM_Vega10_PCIE_Table; 230 230 231 231 typedef struct _ATOM_Vega10_Voltage_Lookup_Record { ··· 235 235 typedef struct _ATOM_Vega10_Voltage_Lookup_Table { 236 236 UCHAR ucRevId; 237 237 UCHAR ucNumEntries; /* Number of entries */ 238 - ATOM_Vega10_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries */ 238 + ATOM_Vega10_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries */ 239 239 } ATOM_Vega10_Voltage_Lookup_Table; 240 240 241 241 typedef struct _ATOM_Vega10_Fan_Table { ··· 327 327 typedef struct _ATOM_Vega10_VCE_State_Table { 328 328 UCHAR ucRevId; 329 329 UCHAR ucNumEntries; 330 - ATOM_Vega10_VCE_State_Record entries[1]; 330 + ATOM_Vega10_VCE_State_Record entries[]; 331 331 } ATOM_Vega10_VCE_State_Table; 332 332 333 333 typedef struct _ATOM_Vega10_PowerTune_Table { ··· 427 427 typedef struct _ATOM_Vega10_Hard_Limit_Table { 428 428 UCHAR ucRevId; 429 429 UCHAR ucNumEntries; 430 - ATOM_Vega10_Hard_Limit_Record entries[1]; 430 + ATOM_Vega10_Hard_Limit_Record entries[]; 431 431 } ATOM_Vega10_Hard_Limit_Table; 432 432 433 433 typedef struct _Vega10_PPTable_Generic_SubTable_Header {
+31 -2
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 733 733 smu->adev = adev; 734 734 smu->pm_enabled = !!amdgpu_dpm; 735 735 smu->is_apu = false; 736 - smu->smu_baco.state = SMU_BACO_STATE_EXIT; 736 + smu->smu_baco.state = SMU_BACO_STATE_NONE; 737 737 smu->smu_baco.platform_support = false; 738 738 smu->user_dpm_profile.fan_mode = -1; 739 739 ··· 1742 1742 return 0; 1743 1743 } 1744 1744 1745 + static int smu_reset_mp1_state(struct smu_context *smu) 1746 + { 1747 + struct amdgpu_device *adev = smu->adev; 1748 + int ret = 0; 1749 + 1750 + if ((!adev->in_runpm) && (!adev->in_suspend) && 1751 + (!amdgpu_in_reset(adev))) 1752 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1753 + case IP_VERSION(13, 0, 0): 1754 + case IP_VERSION(13, 0, 7): 1755 + case IP_VERSION(13, 0, 10): 1756 + ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); 1757 + break; 1758 + default: 1759 + break; 1760 + } 1761 + 1762 + return ret; 1763 + } 1764 + 1745 1765 static int smu_hw_fini(void *handle) 1746 1766 { 1747 1767 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1748 1768 struct smu_context *smu = adev->powerplay.pp_handle; 1769 + int ret; 1749 1770 1750 1771 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1751 1772 return 0; ··· 1784 1763 1785 1764 adev->pm.dpm_enabled = false; 1786 1765 1787 - return smu_smc_hw_cleanup(smu); 1766 + ret = smu_smc_hw_cleanup(smu); 1767 + if (ret) 1768 + return ret; 1769 + 1770 + ret = smu_reset_mp1_state(smu); 1771 + if (ret) 1772 + return ret; 1773 + 1774 + return 0; 1788 1775 } 1789 1776 1790 1777 static void smu_late_fini(void *handle)
+1
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
··· 419 419 enum smu_baco_state { 420 420 SMU_BACO_STATE_ENTER = 0, 421 421 SMU_BACO_STATE_EXIT, 422 + SMU_BACO_STATE_NONE, 422 423 }; 423 424 424 425 struct smu_baco_context {
+31 -89
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
··· 150 150 } DpmClocks_t; 151 151 152 152 typedef struct { 153 - uint16_t CoreFrequency[16]; //Target core frequency [MHz] 154 - uint16_t CorePower[16]; //CAC calculated core power [W] [Q8.8] 155 - uint16_t CoreTemperature[16]; //TSEN measured core temperature [C] [Q8.8] 156 - uint16_t GfxTemperature; //TSEN measured GFX temperature [C] [Q8.8] 157 - uint16_t SocTemperature; //TSEN measured SOC temperature [C] [Q8.8] 158 - uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [W] [Q8.8] 159 - uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [W] [Q8.8] 160 - uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz] 161 - uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz] 162 - uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [C] [Q8.8] 163 - uint16_t AverageGfxclkFrequency; //Time filtered target GFXCLK frequency [MHz] 164 - uint16_t AverageFclkFrequency; //Time filtered target FCLK frequency [MHz] 165 - uint16_t AverageGfxActivity; //Time filtered GFX busy % [0-100] [Q8.8] 166 - uint16_t AverageSocclkFrequency; //Time filtered target SOCCLK frequency [MHz] 167 - uint16_t AverageVclkFrequency; //Time filtered target VCLK frequency [MHz] 168 - uint16_t AverageVcnActivity; //Time filtered VCN busy % [0-100] [Q8.8] 169 - uint16_t AverageVpeclkFrequency; //Time filtered target VPECLK frequency [MHz] 170 - uint16_t AverageIpuclkFrequency; //Time filtered target IPUCLK frequency [MHz] 171 - uint16_t AverageIpuBusy[8]; //Time filtered IPU per-column busy % [0-100] [Q8.8] 172 - uint16_t AverageDRAMReads; //Time filtered DRAM read bandwidth [GB/sec] [Q8.8] 173 - uint16_t AverageDRAMWrites; //Time filtered DRAM write bandwidth [GB/sec] [Q8.8] 174 - uint16_t AverageCoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100] [Q8.8] 175 - uint16_t IpuPower; //Time filtered IPU power [W] [Q8.8] 176 - uint32_t ApuPower; //Time filtered APU power [W] [Q24.8] 177 - uint32_t dGpuPower; //Time filtered dGPU power [W] [Q24.8] 178 - uint32_t AverageSocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [W] [Q24.8] 179 - uint32_t AverageCorePower; //Time filtered sum of core power across all cores in the socket [W] [Q24.8] 180 - uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us] 181 - uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles] 153 + uint16_t CoreFrequency[16]; //Target core frequency [MHz] 154 + uint16_t CorePower[16]; //CAC calculated core power [mW] 155 + uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C] 156 + uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C] 157 + uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C] 158 + uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW] 159 + uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW] 160 + uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz] 161 + uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz] 162 + uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C] 163 + uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz] 164 + uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz] 165 + uint16_t GfxActivity; //Time filtered GFX busy % [0-100] 166 + uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz] 167 + uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz] 168 + uint16_t VcnActivity; //Time filtered VCN busy % [0-100] 169 + uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz] 170 + uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz] 171 + uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100] 172 + uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec] 173 + uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec] 174 + uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100] 175 + uint16_t IpuPower; //Time filtered IPU power [mW] 176 + uint32_t ApuPower; //Time filtered APU power [mW] 177 + uint32_t GfxPower; //Time filtered GFX power [mW] 178 + uint32_t dGpuPower; //Time filtered dGPU power [mW] 179 + uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW] 180 + uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW] 181 + uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us] 182 + uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles] 183 + uint32_t spare[16]; 182 184 } SmuMetrics_t; 183 - 184 - typedef struct { 185 - uint16_t GfxclkFrequency; //[MHz] 186 - uint16_t SocclkFrequency; //[MHz] 187 - uint16_t VclkFrequency; //[MHz] 188 - uint16_t DclkFrequency; //[MHz] 189 - uint16_t MemclkFrequency; //[MHz] 190 - uint16_t spare; 191 - uint16_t UvdActivity; //[centi] 192 - uint16_t GfxActivity; //[centi] 193 - 194 - uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC 195 - uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC 196 - uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC 197 - 198 - uint16_t CoreFrequency[8]; //[MHz] 199 - uint16_t CorePower[8]; //[mW] 200 - uint16_t CoreTemperature[8]; //[centi-Celsius] 201 - uint16_t L3Frequency[2]; //[MHz] 202 - uint16_t L3Temperature[2]; //[centi-Celsius] 203 - 204 - uint16_t spare2[24]; 205 - 206 - uint16_t GfxTemperature; //[centi-Celsius] 207 - uint16_t SocTemperature; //[centi-Celsius] 208 - uint16_t ThrottlerStatus; 209 - 210 - uint16_t CurrentSocketPower; //[mW] 211 - uint16_t StapmOpnLimit; //[W] 212 - uint16_t StapmCurrentLimit; //[W] 213 - uint32_t ApuPower; //[mW] 214 - uint32_t dGpuPower; //[mW] 215 - 216 - uint16_t VddTdcValue; //[mA] 217 - uint16_t SocTdcValue; //[mA] 218 - uint16_t VddEdcValue; //[mA] 219 - uint16_t SocEdcValue; //[mA] 220 - 221 - uint16_t InfrastructureCpuMaxFreq; //[MHz] 222 - uint16_t InfrastructureGfxMaxFreq; //[MHz] 223 - 224 - uint16_t SkinTemp; 225 - uint16_t DeviceState; 226 - uint16_t CurTemp; //[centi-Celsius] 227 - uint16_t FilterAlphaValue; //[m] 228 - 229 - uint16_t AverageGfxclkFrequency; 230 - uint16_t AverageFclkFrequency; 231 - uint16_t AverageGfxActivity; 232 - uint16_t AverageSocclkFrequency; 233 - uint16_t AverageVclkFrequency; 234 - uint16_t AverageVcnActivity; 235 - uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads 236 - uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes 237 - uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower 238 - uint16_t AverageCorePower[2]; //Filtered of [sum of CorePower[8] per ccx]) 239 - uint16_t AverageCoreC0Residency[16]; //Filtered of [average C0 residency % per core] 240 - uint16_t spare1; 241 - uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing. 242 - } SmuMetrics_legacy_t; 243 185 244 186 //ISP tile definitions 245 187 typedef enum {
+2
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
··· 299 299 uint8_t pcie_gen_cap, 300 300 uint8_t pcie_width_cap); 301 301 302 + int smu_v13_0_disable_pmfw_state(struct smu_context *smu); 303 + 302 304 #endif 303 305 #endif
+4 -13
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 234 234 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 235 235 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 236 236 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 237 + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), 238 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 237 239 238 - if (smu->smc_fw_if_version < 0x3) { 239 - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), 240 - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 241 - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); 242 - } else { 243 - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 244 - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 245 - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 246 - } 240 + smu_table->metrics_table = kzalloc(max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), GFP_KERNEL); 247 241 if (!smu_table->metrics_table) 248 242 goto err0_out; 249 243 smu_table->metrics_time = 0; 250 244 251 - if (smu->smc_fw_version >= 0x043F3E00) 252 - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3); 253 - else 254 - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 245 + smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2)); 255 246 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 256 247 if (!smu_table->gpu_metrics_table) 257 248 goto err1_out;
+13
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 2477 2477 2478 2478 return 0; 2479 2479 } 2480 + 2481 + int smu_v13_0_disable_pmfw_state(struct smu_context *smu) 2482 + { 2483 + int ret; 2484 + struct amdgpu_device *adev = smu->adev; 2485 + 2486 + WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0); 2487 + 2488 + ret = RREG32_PCIE(MP1_Public | 2489 + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 2490 + 2491 + return ret == 0 ? 0 : -EINVAL; 2492 + }
+21 -37
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 354 354 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) 355 355 smu->dc_controlled_by_gpio = true; 356 356 357 - if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO || 358 - powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) 357 + if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) { 359 358 smu_baco->platform_support = true; 360 359 361 - if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) 362 - smu_baco->maco_support = true; 360 + if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) 361 + smu_baco->maco_support = true; 362 + } 363 363 364 364 if (!overdrive_lowerlimits->FeatureCtrlMask || 365 365 !overdrive_upperlimits->FeatureCtrlMask) ··· 2530 2530 } 2531 2531 } 2532 2532 2533 - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && 2534 - (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) || 2535 - ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) { 2536 - ret = smu_cmn_update_table(smu, 2537 - SMU_TABLE_ACTIVITY_MONITOR_COEFF, 2538 - WORKLOAD_PPLIB_COMPUTE_BIT, 2539 - (void *)(&activity_monitor_external), 2540 - false); 2541 - if (ret) { 2542 - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 2543 - return ret; 2544 - } 2545 - 2546 - ret = smu_cmn_update_table(smu, 2547 - SMU_TABLE_ACTIVITY_MONITOR_COEFF, 2548 - WORKLOAD_PPLIB_CUSTOM_BIT, 2549 - (void *)(&activity_monitor_external), 2550 - true); 2551 - if (ret) { 2552 - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 2553 - return ret; 2554 - } 2555 - 2556 - workload_type = smu_cmn_to_asic_specific_index(smu, 2557 - CMN2ASIC_MAPPING_WORKLOAD, 2558 - PP_SMC_POWER_PROFILE_CUSTOM); 2559 - } else { 2560 - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 2561 - workload_type = smu_cmn_to_asic_specific_index(smu, 2533 + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 2534 + workload_type = smu_cmn_to_asic_specific_index(smu, 2562 2535 CMN2ASIC_MAPPING_WORKLOAD, 2563 2536 smu->power_profile_mode); 2564 - } 2565 2537 2566 2538 if (workload_type < 0) 2567 2539 return -EINVAL; ··· 2574 2602 static int smu_v13_0_0_baco_exit(struct smu_context *smu) 2575 2603 { 2576 2604 struct amdgpu_device *adev = smu->adev; 2605 + int ret; 2577 2606 2578 2607 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2579 2608 /* Wait for PMFW handling for the Dstate change */ 2580 2609 usleep_range(10000, 11000); 2581 - return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2610 + ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2582 2611 } else { 2583 - return smu_v13_0_baco_exit(smu); 2612 + ret = smu_v13_0_baco_exit(smu); 2584 2613 } 2614 + 2615 + if (!ret) 2616 + adev->gfx.is_poweron = false; 2617 + 2618 + return ret; 2585 2619 } 2586 2620 2587 2621 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) ··· 2772 2794 2773 2795 switch (mp1_state) { 2774 2796 case PP_MP1_STATE_UNLOAD: 2775 - ret = smu_cmn_set_mp1_state(smu, mp1_state); 2797 + ret = smu_cmn_send_smc_msg_with_param(smu, 2798 + SMU_MSG_PrepareMp1ForUnload, 2799 + 0x55, NULL); 2800 + 2801 + if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) 2802 + ret = smu_v13_0_disable_pmfw_state(smu); 2803 + 2776 2804 break; 2777 2805 default: 2778 2806 /* Ignore others */
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 270 270 struct amdgpu_device *adev = smu->adev; 271 271 uint32_t p2s_table_id = P2S_TABLE_ID_A; 272 272 int ret = 0, i, p2stable_count; 273 - char ucode_prefix[30]; 273 + char ucode_prefix[15]; 274 274 char fw_name[30]; 275 275 276 276 /* No need to load P2S tables in IOV mode */
+20 -7
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
··· 346 346 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) 347 347 smu->dc_controlled_by_gpio = true; 348 348 349 - if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO || 350 - powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO) 349 + if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) { 351 350 smu_baco->platform_support = true; 352 351 353 - if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) 354 - smu_baco->maco_support = true; 352 + if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO) 353 + && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) 354 + smu_baco->maco_support = true; 355 + } 355 356 356 357 if (!overdrive_lowerlimits->FeatureCtrlMask || 357 358 !overdrive_upperlimits->FeatureCtrlMask) ··· 2499 2498 2500 2499 switch (mp1_state) { 2501 2500 case PP_MP1_STATE_UNLOAD: 2502 - ret = smu_cmn_set_mp1_state(smu, mp1_state); 2501 + ret = smu_cmn_send_smc_msg_with_param(smu, 2502 + SMU_MSG_PrepareMp1ForUnload, 2503 + 0x55, NULL); 2504 + 2505 + if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) 2506 + ret = smu_v13_0_disable_pmfw_state(smu); 2507 + 2503 2508 break; 2504 2509 default: 2505 2510 /* Ignore others */ ··· 2531 2524 static int smu_v13_0_7_baco_exit(struct smu_context *smu) 2532 2525 { 2533 2526 struct amdgpu_device *adev = smu->adev; 2527 + int ret; 2534 2528 2535 2529 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2536 2530 /* Wait for PMFW handling for the Dstate change */ 2537 2531 usleep_range(10000, 11000); 2538 - return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2532 + ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2539 2533 } else { 2540 - return smu_v13_0_baco_exit(smu); 2534 + ret = smu_v13_0_baco_exit(smu); 2541 2535 } 2536 + 2537 + if (!ret) 2538 + adev->gfx.is_poweron = false; 2539 + 2540 + return ret; 2542 2541 } 2543 2542 2544 2543 static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
+3 -1
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
··· 57 57 { 58 58 struct amdgpu_device *adev = smu->adev; 59 59 char fw_name[30]; 60 - char ucode_prefix[30]; 60 + char ucode_prefix[15]; 61 61 int err = 0; 62 62 const struct smc_firmware_header_v1_0 *hdr; 63 63 const struct common_firmware_header *header; ··· 229 229 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; 230 230 break; 231 231 case IP_VERSION(14, 0, 0): 232 + if ((smu->smc_fw_version < 0x5d3a00)) 233 + dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version); 232 234 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; 233 235 break; 234 236 default:
+45 -215
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
··· 156 156 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 157 157 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 158 158 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 159 - if (smu->smc_fw_version > 0x5d3500) { 160 - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 161 - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 162 - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 163 - } else { 164 - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), 165 - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 166 - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); 167 - } 159 + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 160 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 161 + 162 + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 168 163 if (!smu_table->metrics_table) 169 164 goto err0_out; 170 165 smu_table->metrics_time = 0; ··· 172 177 if (!smu_table->watermarks_table) 173 178 goto err2_out; 174 179 175 - if (smu->smc_fw_version > 0x5d3500) 176 - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0); 177 - else 178 - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 180 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0); 179 181 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 180 182 if (!smu_table->gpu_metrics_table) 181 183 goto err3_out; ··· 234 242 235 243 switch (member) { 236 244 case METRICS_AVERAGE_GFXCLK: 237 - *value = metrics->AverageGfxclkFrequency; 245 + *value = metrics->GfxclkFrequency; 238 246 break; 239 247 case METRICS_AVERAGE_SOCCLK: 240 - *value = metrics->AverageSocclkFrequency; 248 + *value = metrics->SocclkFrequency; 241 249 break; 242 250 case METRICS_AVERAGE_VCLK: 243 - *value = metrics->AverageVclkFrequency; 251 + *value = metrics->VclkFrequency; 244 252 break; 245 253 case METRICS_AVERAGE_DCLK: 246 254 *value = 0; ··· 249 257 *value = 0; 250 258 break; 251 259 case METRICS_AVERAGE_FCLK: 252 - *value = metrics->AverageFclkFrequency; 260 + *value = metrics->FclkFrequency; 253 261 break; 254 262 case METRICS_AVERAGE_GFXACTIVITY: 255 - *value = metrics->AverageGfxActivity >> 8; 263 + *value = metrics->GfxActivity / 100; 256 264 break; 257 265 case METRICS_AVERAGE_VCNACTIVITY: 258 - *value = metrics->AverageVcnActivity >> 8; 266 + *value = metrics->VcnActivity / 100; 259 267 break; 260 268 case METRICS_AVERAGE_SOCKETPOWER: 261 269 case METRICS_CURR_SOCKETPOWER: 262 - *value = (metrics->AverageSocketPower & 0xff00) + 263 - ((metrics->AverageSocketPower & 0xff) * 100 >> 8); 270 + *value = (metrics->SocketPower / 1000 << 8) + 271 + (metrics->SocketPower % 1000 / 10); 264 272 break; 265 273 case METRICS_TEMPERATURE_EDGE: 266 - *value = (metrics->GfxTemperature >> 8) * 274 + *value = metrics->GfxTemperature / 100 * 267 275 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 268 276 break; 269 277 case METRICS_TEMPERATURE_HOTSPOT: 270 - *value = (metrics->SocTemperature >> 8) * 278 + *value = metrics->SocTemperature / 100 * 271 279 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 272 280 break; 273 281 case METRICS_THROTTLER_STATUS: ··· 309 317 return ret; 310 318 } 311 319 312 - static int smu_v14_0_0_legacy_get_smu_metrics_data(struct smu_context *smu, 313 - MetricsMember_t member, 314 - uint32_t *value) 315 - { 316 - struct smu_table_context *smu_table = &smu->smu_table; 317 - 318 - SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; 319 - int ret = 0; 320 - 321 - ret = smu_cmn_get_metrics_table(smu, NULL, false); 322 - if (ret) 323 - return ret; 324 - 325 - switch (member) { 326 - case METRICS_AVERAGE_GFXCLK: 327 - *value = metrics->GfxclkFrequency; 328 - break; 329 - case METRICS_AVERAGE_SOCCLK: 330 - *value = metrics->SocclkFrequency; 331 - break; 332 - case METRICS_AVERAGE_VCLK: 333 - *value = metrics->VclkFrequency; 334 - break; 335 - case METRICS_AVERAGE_DCLK: 336 - *value = metrics->DclkFrequency; 337 - break; 338 - case METRICS_AVERAGE_UCLK: 339 - *value = metrics->MemclkFrequency; 340 - break; 341 - case METRICS_AVERAGE_GFXACTIVITY: 342 - *value = metrics->GfxActivity / 100; 343 - break; 344 - case METRICS_AVERAGE_FCLK: 345 - *value = metrics->AverageFclkFrequency; 346 - break; 347 - case METRICS_AVERAGE_VCNACTIVITY: 348 - *value = metrics->UvdActivity; 349 - break; 350 - case METRICS_AVERAGE_SOCKETPOWER: 351 - *value = (metrics->AverageSocketPower << 8) / 1000; 352 - break; 353 - case METRICS_CURR_SOCKETPOWER: 354 - *value = (metrics->CurrentSocketPower << 8) / 1000; 355 - break; 356 - case METRICS_TEMPERATURE_EDGE: 357 - *value = metrics->GfxTemperature / 100 * 358 - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 359 - break; 360 - case METRICS_TEMPERATURE_HOTSPOT: 361 - *value = metrics->SocTemperature / 100 * 362 - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 363 - break; 364 - case METRICS_THROTTLER_STATUS: 365 - *value = metrics->ThrottlerStatus; 366 - break; 367 - case METRICS_VOLTAGE_VDDGFX: 368 - *value = metrics->Voltage[0]; 369 - break; 370 - case METRICS_VOLTAGE_VDDSOC: 371 - *value = metrics->Voltage[1]; 372 - break; 373 - case METRICS_SS_APU_SHARE: 374 - /* return the percentage of APU power with respect to APU's power limit. 375 - * percentage is reported, this isn't boost value. Smartshift power 376 - * boost/shift is only when the percentage is more than 100. 377 - */ 378 - if (metrics->StapmOpnLimit > 0) 379 - *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; 380 - else 381 - *value = 0; 382 - break; 383 - case METRICS_SS_DGPU_SHARE: 384 - /* return the percentage of dGPU power with respect to dGPU's power limit. 385 - * percentage is reported, this isn't boost value. Smartshift power 386 - * boost/shift is only when the percentage is more than 100. 387 - */ 388 - if ((metrics->dGpuPower > 0) && 389 - (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) 390 - *value = (metrics->dGpuPower * 100) / 391 - (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); 392 - else 393 - *value = 0; 394 - break; 395 - default: 396 - *value = UINT_MAX; 397 - break; 398 - } 399 - 400 - return ret; 401 - } 402 - 403 - static int smu_v14_0_0_common_get_smu_metrics_data(struct smu_context *smu, 404 - MetricsMember_t member, 405 - uint32_t *value) 406 - { 407 - if (smu->smc_fw_version > 0x5d3500) 408 - return smu_v14_0_0_get_smu_metrics_data(smu, member, value); 409 - else 410 - return smu_v14_0_0_legacy_get_smu_metrics_data(smu, member, value); 411 - } 412 - 413 320 static int smu_v14_0_0_read_sensor(struct smu_context *smu, 414 321 enum amd_pp_sensors sensor, 415 322 void *data, uint32_t *size) ··· 320 429 321 430 switch (sensor) { 322 431 case AMDGPU_PP_SENSOR_GPU_LOAD: 323 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 432 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 324 433 METRICS_AVERAGE_GFXACTIVITY, 325 434 (uint32_t *)data); 326 435 *size = 4; 327 436 break; 328 437 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 329 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 438 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 330 439 METRICS_AVERAGE_SOCKETPOWER, 331 440 (uint32_t *)data); 332 441 *size = 4; 333 442 break; 334 443 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 335 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 444 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 336 445 METRICS_CURR_SOCKETPOWER, 337 446 (uint32_t *)data); 338 447 *size = 4; 339 448 break; 340 449 case AMDGPU_PP_SENSOR_EDGE_TEMP: 341 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 450 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 342 451 METRICS_TEMPERATURE_EDGE, 343 452 (uint32_t *)data); 344 453 *size = 4; 345 454 break; 346 455 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 347 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 456 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 348 457 METRICS_TEMPERATURE_HOTSPOT, 349 458 (uint32_t *)data); 350 459 *size = 4; 351 460 break; 352 461 case AMDGPU_PP_SENSOR_GFX_MCLK: 353 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 462 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 354 463 METRICS_AVERAGE_UCLK, 355 464 (uint32_t *)data); 356 465 *(uint32_t *)data *= 100; 357 466 *size = 4; 358 467 break; 359 468 case AMDGPU_PP_SENSOR_GFX_SCLK: 360 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 469 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 361 470 METRICS_AVERAGE_GFXCLK, 362 471 (uint32_t *)data); 363 472 *(uint32_t *)data *= 100; 364 473 *size = 4; 365 474 break; 366 475 case AMDGPU_PP_SENSOR_VDDGFX: 367 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 476 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 368 477 METRICS_VOLTAGE_VDDGFX, 369 478 (uint32_t *)data); 370 479 *size = 4; 371 480 break; 372 481 case AMDGPU_PP_SENSOR_VDDNB: 373 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 482 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 374 483 METRICS_VOLTAGE_VDDSOC, 375 484 (uint32_t *)data); 376 485 *size = 4; 377 486 break; 378 487 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 379 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 488 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 380 489 METRICS_SS_APU_SHARE, 381 490 (uint32_t *)data); 382 491 *size = 4; 383 492 break; 384 493 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 385 - ret = smu_v14_0_0_common_get_smu_metrics_data(smu, 494 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 386 495 METRICS_SS_DGPU_SHARE, 387 496 (uint32_t *)data); 388 497 *size = 4; ··· 479 588 if (ret) 480 589 return ret; 481 590 482 - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 591 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 3, 0); 483 592 484 593 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 485 594 gpu_metrics->temperature_soc = metrics.SocTemperature; ··· 488 597 sizeof(uint16_t) * 16); 489 598 gpu_metrics->temperature_skin = metrics.SkinTemp; 490 599 491 - gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; 492 - gpu_metrics->average_vcn_activity = metrics.AverageVcnActivity; 600 + gpu_metrics->average_gfx_activity = metrics.GfxActivity; 601 + gpu_metrics->average_vcn_activity = metrics.VcnActivity; 493 602 memcpy(&gpu_metrics->average_ipu_activity[0], 494 - &metrics.AverageIpuBusy[0], 603 + &metrics.IpuBusy[0], 495 604 sizeof(uint16_t) * 8); 496 605 memcpy(&gpu_metrics->average_core_c0_activity[0], 497 - &metrics.AverageCoreC0Residency[0], 606 + &metrics.CoreC0Residency[0], 498 607 sizeof(uint16_t) * 16); 499 - gpu_metrics->average_dram_reads = metrics.AverageDRAMReads; 500 - gpu_metrics->average_dram_writes = metrics.AverageDRAMWrites; 608 + gpu_metrics->average_dram_reads = metrics.DRAMReads; 609 + gpu_metrics->average_dram_writes = metrics.DRAMWrites; 501 610 502 - gpu_metrics->average_socket_power = metrics.AverageSocketPower; 611 + gpu_metrics->average_socket_power = metrics.SocketPower; 503 612 gpu_metrics->average_ipu_power = metrics.IpuPower; 504 613 gpu_metrics->average_apu_power = metrics.ApuPower; 614 + gpu_metrics->average_gfx_power = metrics.GfxPower; 505 615 gpu_metrics->average_dgpu_power = metrics.dGpuPower; 506 - gpu_metrics->average_core_power = metrics.AverageCorePower; 507 - memcpy(&gpu_metrics->core_power[0], 616 + gpu_metrics->average_all_core_power = metrics.AllCorePower; 617 + memcpy(&gpu_metrics->average_core_power[0], 508 618 &metrics.CorePower[0], 509 619 sizeof(uint16_t) * 16); 510 620 511 - gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; 512 - gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; 513 - gpu_metrics->average_vpeclk_frequency = metrics.AverageVpeclkFrequency; 514 - gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency; 515 - gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency; 516 - gpu_metrics->average_ipuclk_frequency = metrics.AverageIpuclkFrequency; 621 + gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 622 + gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 623 + gpu_metrics->average_vpeclk_frequency = metrics.VpeclkFrequency; 624 + gpu_metrics->average_fclk_frequency = metrics.FclkFrequency; 625 + gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 626 + gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency; 517 627 518 628 memcpy(&gpu_metrics->current_coreclk[0], 519 629 &metrics.CoreFrequency[0], ··· 528 636 *table = (void *)gpu_metrics; 529 637 530 638 return sizeof(struct gpu_metrics_v3_0); 531 - } 532 - 533 - static ssize_t smu_v14_0_0_get_legacy_gpu_metrics(struct smu_context *smu, 534 - void **table) 535 - { 536 - struct smu_table_context *smu_table = &smu->smu_table; 537 - struct gpu_metrics_v2_1 *gpu_metrics = 538 - (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 539 - SmuMetrics_legacy_t metrics; 540 - int ret = 0; 541 - 542 - ret = smu_cmn_get_metrics_table(smu, &metrics, true); 543 - if (ret) 544 - return ret; 545 - 546 - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 547 - 548 - gpu_metrics->temperature_gfx = metrics.GfxTemperature; 549 - gpu_metrics->temperature_soc = metrics.SocTemperature; 550 - memcpy(&gpu_metrics->temperature_core[0], 551 - &metrics.CoreTemperature[0], 552 - sizeof(uint16_t) * 8); 553 - gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 554 - gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1]; 555 - 556 - gpu_metrics->average_gfx_activity = metrics.GfxActivity; 557 - gpu_metrics->average_mm_activity = metrics.UvdActivity; 558 - 559 - gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 560 - gpu_metrics->average_gfx_power = metrics.Power[0]; 561 - gpu_metrics->average_soc_power = metrics.Power[1]; 562 - memcpy(&gpu_metrics->average_core_power[0], 563 - &metrics.CorePower[0], 564 - sizeof(uint16_t) * 8); 565 - 566 - gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 567 - gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 568 - gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 569 - gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 570 - gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 571 - gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 572 - 573 - memcpy(&gpu_metrics->current_coreclk[0], 574 - &metrics.CoreFrequency[0], 575 - sizeof(uint16_t) * 8); 576 - 577 - gpu_metrics->throttle_status = metrics.ThrottlerStatus; 578 - gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 579 - 580 - *table = (void *)gpu_metrics; 581 - 582 - return sizeof(struct gpu_metrics_v2_1); 583 - } 584 - 585 - static ssize_t smu_v14_0_0_common_get_gpu_metrics(struct smu_context *smu, 586 - void **table) 587 - { 588 - 589 - if (smu->smc_fw_version > 0x5d3500) 590 - return smu_v14_0_0_get_gpu_metrics(smu, table); 591 - else 592 - return smu_v14_0_0_get_legacy_gpu_metrics(smu, table); 593 639 } 594 640 595 641 static int smu_v14_0_0_mode2_reset(struct smu_context *smu) ··· 758 928 return -EINVAL; 759 929 } 760 930 761 - return smu_v14_0_0_common_get_smu_metrics_data(smu, member_type, value); 931 + return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value); 762 932 } 763 933 764 934 static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, ··· 1060 1230 .read_sensor = smu_v14_0_0_read_sensor, 1061 1231 .is_dpm_running = smu_v14_0_0_is_dpm_running, 1062 1232 .set_watermarks_table = smu_v14_0_0_set_watermarks_table, 1063 - .get_gpu_metrics = smu_v14_0_0_common_get_gpu_metrics, 1233 + .get_gpu_metrics = smu_v14_0_0_get_gpu_metrics, 1064 1234 .get_enabled_mask = smu_cmn_get_enabled_mask, 1065 1235 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1066 1236 .set_driver_table_location = smu_v14_0_set_driver_table_location,
+3
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 1004 1004 case METRICS_VERSION(2, 4): 1005 1005 structure_size = sizeof(struct gpu_metrics_v2_4); 1006 1006 break; 1007 + case METRICS_VERSION(3, 0): 1008 + structure_size = sizeof(struct gpu_metrics_v3_0); 1009 + break; 1007 1010 default: 1008 1011 return; 1009 1012 }
+21 -21
drivers/gpu/drm/radeon/atombios.h
··· 3893 3893 typedef struct _ATOM_GPIO_PIN_LUT 3894 3894 { 3895 3895 ATOM_COMMON_TABLE_HEADER sHeader; 3896 - ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; 3896 + ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[]; 3897 3897 }ATOM_GPIO_PIN_LUT; 3898 3898 3899 3899 /****************************************************************************/ ··· 4061 4061 UCHAR ucNumberOfSrc; 4062 4062 USHORT usSrcObjectID[1]; 4063 4063 UCHAR ucNumberOfDst; 4064 - USHORT usDstObjectID[1]; 4064 + USHORT usDstObjectID[]; 4065 4065 }ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; 4066 4066 4067 4067 ··· 4233 4233 ATOM_COMMON_RECORD_HEADER sheader; 4234 4234 UCHAR ucNumberOfDevice; 4235 4235 UCHAR ucReserved; 4236 - ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation 4236 + ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation 4237 4237 }ATOM_CONNECTOR_DEVICE_TAG_RECORD; 4238 4238 4239 4239 ··· 4293 4293 ATOM_COMMON_RECORD_HEADER sheader; 4294 4294 UCHAR ucFlags; // Future expnadibility 4295 4295 UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object 4296 - ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins 4296 + ATOM_GPIO_PIN_CONTROL_PAIR asGpio[]; // the real gpio pin pair determined by number of pins ucNumberOfPins 4297 4297 }ATOM_OBJECT_GPIO_CNTL_RECORD; 4298 4298 4299 4299 //Definitions for GPIO pin state ··· 4444 4444 UCHAR ucWidth; 4445 4445 UCHAR ucConnNum; 4446 4446 UCHAR ucReserved; 4447 - ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1]; 4447 + ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[]; 4448 4448 }ATOM_BRACKET_LAYOUT_RECORD; 4449 4449 4450 4450 /****************************************************************************/ ··· 4600 4600 UCHAR ucVoltageControlAddress; 4601 4601 UCHAR ucVoltageControlOffset; 4602 4602 ULONG ulReserved; 4603 - VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff 4603 + VOLTAGE_LUT_ENTRY asVolI2cLut[]; // end with 0xff 4604 4604 }ATOM_I2C_VOLTAGE_OBJECT_V3; 4605 4605 4606 4606 // ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag ··· 4625 4625 UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table 4626 4626 UCHAR ucReserved[2]; 4627 4627 ULONG ulMaxVoltageLevel; 4628 - LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1]; 4628 + LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[]; 4629 4629 }ATOM_LEAKAGE_VOLTAGE_OBJECT_V3; 4630 4630 4631 4631 ··· 4753 4753 { 4754 4754 ATOM_COMMON_TABLE_HEADER asHeader; 4755 4755 UCHAR asPwrbehave[16]; 4756 - ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; 4756 + ATOM_POWER_SOURCE_OBJECT asPwrObj[]; 4757 4757 }ATOM_POWER_SOURCE_INFO; 4758 4758 4759 4759 ··· 5440 5440 typedef struct _ATOM_I2C_DATA_RECORD 5441 5441 { 5442 5442 UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" 5443 - UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually 5443 + UCHAR ucI2CData[]; //I2C data in bytes, should be less than 16 bytes usually 5444 5444 }ATOM_I2C_DATA_RECORD; 5445 5445 5446 5446 ··· 5451 5451 UCHAR ucSSChipID; //SS chip being used 5452 5452 UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip 5453 5453 UCHAR ucNumOfI2CDataRecords; //number of data block 5454 - ATOM_I2C_DATA_RECORD asI2CData[1]; 5454 + ATOM_I2C_DATA_RECORD asI2CData[]; 5455 5455 }ATOM_I2C_DEVICE_SETUP_INFO; 5456 5456 5457 5457 //========================================================================================== 5458 5458 typedef struct _ATOM_ASIC_MVDD_INFO 5459 5459 { 5460 5460 ATOM_COMMON_TABLE_HEADER sHeader; 5461 - ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; 5461 + ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[]; 5462 5462 }ATOM_ASIC_MVDD_INFO; 5463 5463 5464 5464 //========================================================================================== ··· 5520 5520 typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 5521 5521 { 5522 5522 ATOM_COMMON_TABLE_HEADER sHeader; 5523 - ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only. 5523 + ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[]; //this is point only. 5524 5524 }ATOM_ASIC_INTERNAL_SS_INFO_V2; 5525 5525 5526 5526 typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3 ··· 5542 5542 typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 5543 5543 { 5544 5544 ATOM_COMMON_TABLE_HEADER sHeader; 5545 - ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only. 5545 + ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[]; //this is pointer only. 5546 5546 }ATOM_ASIC_INTERNAL_SS_INFO_V3; 5547 5547 5548 5548 ··· 6282 6282 6283 6283 typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{ 6284 6284 ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; 6285 - ULONG aulMemData[1]; 6285 + ULONG aulMemData[]; 6286 6286 }ATOM_MEMORY_SETTING_DATA_BLOCK; 6287 6287 6288 6288 ··· 7092 7092 UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE 7093 7093 UCHAR ucDispCaps; 7094 7094 UCHAR ucReserved[2]; 7095 - ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only 7095 + ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[]; // for alligment only 7096 7096 }ATOM_DISP_OUT_INFO_V3; 7097 7097 7098 7098 //ucDispCaps ··· 7324 7324 USHORT usMaxClockFreq; 7325 7325 UCHAR ucEncodeMode; 7326 7326 UCHAR ucPhySel; 7327 - ULONG ulAnalogSetting[1]; 7327 + ULONG ulAnalogSetting[]; 7328 7328 }CLOCK_CONDITION_SETTING_ENTRY; 7329 7329 7330 7330 typedef struct _CLOCK_CONDITION_SETTING_INFO{ 7331 7331 USHORT usEntrySize; 7332 - CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1]; 7332 + CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[]; 7333 7333 }CLOCK_CONDITION_SETTING_INFO; 7334 7334 7335 7335 typedef struct _PHY_CONDITION_REG_VAL{ ··· 7346 7346 typedef struct _PHY_CONDITION_REG_INFO{ 7347 7347 USHORT usRegIndex; 7348 7348 USHORT usSize; 7349 - PHY_CONDITION_REG_VAL asRegVal[1]; 7349 + PHY_CONDITION_REG_VAL asRegVal[]; 7350 7350 }PHY_CONDITION_REG_INFO; 7351 7351 7352 7352 typedef struct _PHY_CONDITION_REG_INFO_V2{ 7353 7353 USHORT usRegIndex; 7354 7354 USHORT usSize; 7355 - PHY_CONDITION_REG_VAL_V2 asRegVal[1]; 7355 + PHY_CONDITION_REG_VAL_V2 asRegVal[]; 7356 7356 }PHY_CONDITION_REG_INFO_V2; 7357 7357 7358 7358 typedef struct _PHY_ANALOG_SETTING_INFO{ 7359 7359 UCHAR ucEncodeMode; 7360 7360 UCHAR ucPhySel; 7361 7361 USHORT usSize; 7362 - PHY_CONDITION_REG_INFO asAnalogSetting[1]; 7362 + PHY_CONDITION_REG_INFO asAnalogSetting[]; 7363 7363 }PHY_ANALOG_SETTING_INFO; 7364 7364 7365 7365 typedef struct _PHY_ANALOG_SETTING_INFO_V2{ 7366 7366 UCHAR ucEncodeMode; 7367 7367 UCHAR ucPhySel; 7368 7368 USHORT usSize; 7369 - PHY_CONDITION_REG_INFO_V2 asAnalogSetting[1]; 7369 + PHY_CONDITION_REG_INFO_V2 asAnalogSetting[]; 7370 7370 }PHY_ANALOG_SETTING_INFO_V2; 7371 7371 7372 7372 typedef struct _GFX_HAVESTING_PARAMETERS {
+2 -1
drivers/gpu/drm/renesas/shmobile/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 config DRM_SHMOBILE 3 3 tristate "DRM Support for SH Mobile" 4 - depends on DRM 4 + depends on DRM && PM 5 5 depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST 6 6 select BACKLIGHT_CLASS_DEVICE 7 7 select DRM_KMS_HELPER 8 8 select DRM_GEM_DMA_HELPER 9 + select VIDEOMODE_HELPERS 9 10 help 10 11 Choose this option if you have an SH Mobile chipset. 11 12 If M is selected the module will be called shmob-drm.
+1 -2
drivers/gpu/drm/renesas/shmobile/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - shmob-drm-y := shmob_drm_backlight.o \ 3 - shmob_drm_crtc.o \ 2 + shmob-drm-y := shmob_drm_crtc.o \ 4 3 shmob_drm_drv.o \ 5 4 shmob_drm_kms.o \ 6 5 shmob_drm_plane.o
-82
drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c
··· 1 - // SPDX-License-Identifier: GPL-2.0+ 2 - /* 3 - * shmob_drm_backlight.c -- SH Mobile DRM Backlight 4 - * 5 - * Copyright (C) 2012 Renesas Electronics Corporation 6 - * 7 - * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 - */ 9 - 10 - #include <linux/backlight.h> 11 - 12 - #include "shmob_drm_backlight.h" 13 - #include "shmob_drm_crtc.h" 14 - #include "shmob_drm_drv.h" 15 - 16 - static int shmob_drm_backlight_update(struct backlight_device *bdev) 17 - { 18 - struct shmob_drm_connector *scon = bl_get_data(bdev); 19 - struct shmob_drm_device *sdev = scon->connector.dev->dev_private; 20 - const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight; 21 - int brightness = backlight_get_brightness(bdev); 22 - 23 - return bdata->set_brightness(brightness); 24 - } 25 - 26 - static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev) 27 - { 28 - struct shmob_drm_connector *scon = bl_get_data(bdev); 29 - struct shmob_drm_device *sdev = scon->connector.dev->dev_private; 30 - const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight; 31 - 32 - return bdata->get_brightness(); 33 - } 34 - 35 - static const struct backlight_ops shmob_drm_backlight_ops = { 36 - .options = BL_CORE_SUSPENDRESUME, 37 - .update_status = shmob_drm_backlight_update, 38 - .get_brightness = shmob_drm_backlight_get_brightness, 39 - }; 40 - 41 - void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode) 42 - { 43 - if (scon->backlight == NULL) 44 - return; 45 - 46 - scon->backlight->props.power = mode == DRM_MODE_DPMS_ON 47 - ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; 48 - backlight_update_status(scon->backlight); 49 - } 50 - 51 - int shmob_drm_backlight_init(struct shmob_drm_connector *scon) 52 - { 53 - struct shmob_drm_device *sdev = scon->connector.dev->dev_private; 54 - const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight; 55 - struct drm_connector *connector = &scon->connector; 56 - struct drm_device *dev = connector->dev; 57 - struct backlight_device *backlight; 58 - 59 - if (!bdata->max_brightness) 60 - return 0; 61 - 62 - backlight = backlight_device_register(bdata->name, dev->dev, scon, 63 - &shmob_drm_backlight_ops, NULL); 64 - if (IS_ERR(backlight)) { 65 - dev_err(dev->dev, "unable to register backlight device: %ld\n", 66 - PTR_ERR(backlight)); 67 - return PTR_ERR(backlight); 68 - } 69 - 70 - backlight->props.max_brightness = bdata->max_brightness; 71 - backlight->props.brightness = bdata->max_brightness; 72 - backlight->props.power = FB_BLANK_POWERDOWN; 73 - backlight_update_status(backlight); 74 - 75 - scon->backlight = backlight; 76 - return 0; 77 - } 78 - 79 - void shmob_drm_backlight_exit(struct shmob_drm_connector *scon) 80 - { 81 - backlight_device_unregister(scon->backlight); 82 - }
-19
drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0+ */ 2 - /* 3 - * shmob_drm_backlight.h -- SH Mobile DRM Backlight 4 - * 5 - * Copyright (C) 2012 Renesas Electronics Corporation 6 - * 7 - * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 - */ 9 - 10 - #ifndef __SHMOB_DRM_BACKLIGHT_H__ 11 - #define __SHMOB_DRM_BACKLIGHT_H__ 12 - 13 - struct shmob_drm_connector; 14 - 15 - void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode); 16 - int shmob_drm_backlight_init(struct shmob_drm_connector *scon); 17 - void shmob_drm_backlight_exit(struct shmob_drm_connector *scon); 18 - 19 - #endif /* __SHMOB_DRM_BACKLIGHT_H__ */
+283 -373
drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.c
··· 7 7 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 8 */ 9 9 10 - #include <linux/backlight.h> 11 10 #include <linux/clk.h> 11 + #include <linux/media-bus-format.h> 12 + #include <linux/of.h> 13 + #include <linux/of_graph.h> 14 + #include <linux/pm_runtime.h> 12 15 16 + #include <drm/drm_atomic.h> 17 + #include <drm/drm_atomic_helper.h> 18 + #include <drm/drm_atomic_state_helper.h> 19 + #include <drm/drm_atomic_uapi.h> 20 + #include <drm/drm_bridge.h> 21 + #include <drm/drm_bridge_connector.h> 13 22 #include <drm/drm_crtc.h> 14 23 #include <drm/drm_crtc_helper.h> 15 24 #include <drm/drm_fb_dma_helper.h> ··· 27 18 #include <drm/drm_gem_dma_helper.h> 28 19 #include <drm/drm_modeset_helper.h> 29 20 #include <drm/drm_modeset_helper_vtables.h> 30 - #include <drm/drm_plane_helper.h> 21 + #include <drm/drm_panel.h> 31 22 #include <drm/drm_probe_helper.h> 32 23 #include <drm/drm_simple_kms_helper.h> 33 24 #include <drm/drm_vblank.h> 34 25 35 - #include "shmob_drm_backlight.h" 26 + #include <video/videomode.h> 27 + 36 28 #include "shmob_drm_crtc.h" 37 29 #include "shmob_drm_drv.h" 38 30 #include "shmob_drm_kms.h" 39 31 #include "shmob_drm_plane.h" 40 32 #include "shmob_drm_regs.h" 41 33 42 - /* 43 - * TODO: panel support 44 - */ 45 - 46 34 /* ----------------------------------------------------------------------------- 47 - * Clock management 35 + * Page Flip 48 36 */ 49 37 50 - static int shmob_drm_clk_on(struct shmob_drm_device *sdev) 38 + void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) 51 39 { 52 - int ret; 40 + struct drm_pending_vblank_event *event; 41 + struct drm_device *dev = scrtc->base.dev; 42 + unsigned long flags; 53 43 54 - if (sdev->clock) { 55 - ret = clk_prepare_enable(sdev->clock); 56 - if (ret < 0) 57 - return ret; 44 + spin_lock_irqsave(&dev->event_lock, flags); 45 + event = scrtc->event; 46 + scrtc->event = NULL; 47 + if (event) { 48 + drm_crtc_send_vblank_event(&scrtc->base, event); 49 + wake_up(&scrtc->flip_wait); 50 + drm_crtc_vblank_put(&scrtc->base); 58 51 } 59 - 60 - return 0; 52 + spin_unlock_irqrestore(&dev->event_lock, flags); 61 53 } 62 54 63 - static void shmob_drm_clk_off(struct shmob_drm_device *sdev) 55 + static bool shmob_drm_crtc_page_flip_pending(struct shmob_drm_crtc *scrtc) 64 56 { 65 - if (sdev->clock) 66 - clk_disable_unprepare(sdev->clock); 57 + struct drm_device *dev = scrtc->base.dev; 58 + unsigned long flags; 59 + bool pending; 60 + 61 + spin_lock_irqsave(&dev->event_lock, flags); 62 + pending = scrtc->event != NULL; 63 + spin_unlock_irqrestore(&dev->event_lock, flags); 64 + 65 + return pending; 66 + } 67 + 68 + static void shmob_drm_crtc_wait_page_flip(struct shmob_drm_crtc *scrtc) 69 + { 70 + struct drm_crtc *crtc = &scrtc->base; 71 + struct shmob_drm_device *sdev = to_shmob_device(crtc->dev); 72 + 73 + if (wait_event_timeout(scrtc->flip_wait, 74 + !shmob_drm_crtc_page_flip_pending(scrtc), 75 + msecs_to_jiffies(50))) 76 + return; 77 + 78 + dev_warn(sdev->dev, "page flip timeout\n"); 79 + 80 + shmob_drm_crtc_finish_page_flip(scrtc); 67 81 } 68 82 69 83 /* ----------------------------------------------------------------------------- 70 84 * CRTC 71 85 */ 72 86 87 + static const struct { 88 + u32 fmt; 89 + u32 ldmt1r; 90 + } shmob_drm_bus_fmts[] = { 91 + { MEDIA_BUS_FMT_RGB888_3X8, LDMT1R_MIFTYP_RGB8 }, 92 + { MEDIA_BUS_FMT_RGB666_2X9_BE, LDMT1R_MIFTYP_RGB9 }, 93 + { MEDIA_BUS_FMT_RGB888_2X12_BE, LDMT1R_MIFTYP_RGB12A }, 94 + { MEDIA_BUS_FMT_RGB444_1X12, LDMT1R_MIFTYP_RGB12B }, 95 + { MEDIA_BUS_FMT_RGB565_1X16, LDMT1R_MIFTYP_RGB16 }, 96 + { MEDIA_BUS_FMT_RGB666_1X18, LDMT1R_MIFTYP_RGB18 }, 97 + { MEDIA_BUS_FMT_RGB888_1X24, LDMT1R_MIFTYP_RGB24 }, 98 + { MEDIA_BUS_FMT_UYVY8_1X16, LDMT1R_MIFTYP_YCBCR }, 99 + }; 100 + 73 101 static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc) 74 102 { 75 - struct drm_crtc *crtc = &scrtc->crtc; 76 - struct shmob_drm_device *sdev = crtc->dev->dev_private; 77 - const struct shmob_drm_interface_data *idata = &sdev->pdata->iface; 103 + struct drm_crtc *crtc = &scrtc->base; 104 + struct shmob_drm_device *sdev = to_shmob_device(crtc->dev); 105 + const struct drm_display_info *info = &sdev->connector->display_info; 78 106 const struct drm_display_mode *mode = &crtc->mode; 107 + unsigned int i; 79 108 u32 value; 80 109 81 - value = sdev->ldmt1r 82 - | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL) 83 - | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL) 84 - | ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0) 85 - | ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0) 86 - | ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0) 87 - | ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0) 88 - | ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0); 89 - lcdc_write(sdev, LDMT1R, value); 90 - 91 - if (idata->interface >= SHMOB_DRM_IFACE_SYS8A && 92 - idata->interface <= SHMOB_DRM_IFACE_SYS24) { 93 - /* Setup SYS bus. */ 94 - value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT) 95 - | (idata->sys.vsync_active_high ? LDMT2R_RSV : 0) 96 - | (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0) 97 - | (idata->sys.write_setup << LDMT2R_WCSC_SHIFT) 98 - | (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT) 99 - | (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT); 100 - lcdc_write(sdev, LDMT2R, value); 101 - 102 - value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT) 103 - | (idata->sys.read_setup << LDMT3R_RCSC_SHIFT) 104 - | (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT) 105 - | (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT); 106 - lcdc_write(sdev, LDMT3R, value); 110 + if (!info->num_bus_formats || !info->bus_formats) { 111 + dev_warn(sdev->dev, "No bus format reported, using RGB888\n"); 112 + value = LDMT1R_MIFTYP_RGB24; 113 + } else { 114 + for (i = 0; i < ARRAY_SIZE(shmob_drm_bus_fmts); i++) { 115 + if (shmob_drm_bus_fmts[i].fmt == info->bus_formats[0]) 116 + break; 117 + } 118 + if (i < ARRAY_SIZE(shmob_drm_bus_fmts)) { 119 + value = shmob_drm_bus_fmts[i].ldmt1r; 120 + } else { 121 + dev_warn(sdev->dev, 122 + "unsupported bus format 0x%x, using RGB888\n", 123 + info->bus_formats[0]); 124 + value = LDMT1R_MIFTYP_RGB24; 125 + } 107 126 } 127 + 128 + if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) 129 + value |= LDMT1R_DWPOL; 130 + if (info->bus_flags & DRM_BUS_FLAG_DE_LOW) 131 + value |= LDMT1R_DIPOL; 132 + if (mode->flags & DRM_MODE_FLAG_NVSYNC) 133 + value |= LDMT1R_VPOL; 134 + if (mode->flags & DRM_MODE_FLAG_NHSYNC) 135 + value |= LDMT1R_HPOL; 136 + lcdc_write(sdev, LDMT1R, value); 108 137 109 138 value = ((mode->hdisplay / 8) << 16) /* HDCN */ 110 139 | (mode->htotal / 8); /* HTCN */ ··· 168 121 169 122 static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start) 170 123 { 171 - struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private; 124 + struct shmob_drm_device *sdev = to_shmob_device(scrtc->base.dev); 172 125 u32 value; 173 126 174 127 value = lcdc_read(sdev, LDCNT2R); ··· 192 145 } 193 146 } 194 147 195 - /* 196 - * shmob_drm_crtc_start - Configure and start the LCDC 197 - * @scrtc: the SH Mobile CRTC 198 - * 199 - * Configure and start the LCDC device. External devices (clocks, MERAM, panels, 200 - * ...) are not touched by this function. 201 - */ 202 - static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc) 148 + static inline struct shmob_drm_crtc *to_shmob_crtc(struct drm_crtc *crtc) 203 149 { 204 - struct drm_crtc *crtc = &scrtc->crtc; 205 - struct shmob_drm_device *sdev = crtc->dev->dev_private; 206 - const struct shmob_drm_interface_data *idata = &sdev->pdata->iface; 207 - const struct shmob_drm_format_info *format; 208 - struct drm_device *dev = sdev->ddev; 209 - struct drm_plane *plane; 150 + return container_of(crtc, struct shmob_drm_crtc, base); 151 + } 152 + 153 + static void shmob_drm_crtc_atomic_enable(struct drm_crtc *crtc, 154 + struct drm_atomic_state *state) 155 + { 156 + struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); 157 + struct shmob_drm_device *sdev = to_shmob_device(crtc->dev); 158 + unsigned int clk_div = sdev->config.clk_div; 159 + struct device *dev = sdev->dev; 210 160 u32 value; 211 161 int ret; 212 162 213 - if (scrtc->started) 214 - return; 215 - 216 - format = shmob_drm_format_info(crtc->primary->fb->format->format); 217 - if (WARN_ON(format == NULL)) 218 - return; 219 - 220 - /* Enable clocks before accessing the hardware. */ 221 - ret = shmob_drm_clk_on(sdev); 222 - if (ret < 0) 163 + ret = pm_runtime_resume_and_get(dev); 164 + if (ret) 223 165 return; 224 166 225 167 /* Reset and enable the LCDC. */ ··· 224 188 lcdc_write(sdev, LDPMR, 0); 225 189 226 190 value = sdev->lddckr; 227 - if (idata->clk_div) { 191 + if (clk_div) { 228 192 /* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider 229 193 * denominator. 230 194 */ 231 195 lcdc_write(sdev, LDDCKPAT1R, 0); 232 - lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1); 196 + lcdc_write(sdev, LDDCKPAT2R, (1 << (clk_div / 2)) - 1); 233 197 234 - if (idata->clk_div == 1) 198 + if (clk_div == 1) 235 199 value |= LDDCKR_MOSEL; 236 200 else 237 - value |= idata->clk_div; 201 + value |= clk_div; 238 202 } 239 203 240 204 lcdc_write(sdev, LDDCKR, value); 241 205 lcdc_write(sdev, LDDCKSTPR, 0); 242 206 lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0); 243 207 244 - /* TODO: Setup SYS panel */ 245 - 246 208 /* Setup geometry, format, frame buffer memory and operation mode. */ 247 209 shmob_drm_crtc_setup_geometry(scrtc); 248 210 249 - /* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */ 250 - lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1); 251 - lcdc_write(sdev, LDMLSR, scrtc->line_size); 252 - lcdc_write(sdev, LDSA1R, scrtc->dma[0]); 253 - if (format->yuv) 254 - lcdc_write(sdev, LDSA2R, scrtc->dma[1]); 255 211 lcdc_write(sdev, LDSM1R, 0); 256 - 257 - /* Word and long word swap. */ 258 - switch (format->fourcc) { 259 - case DRM_FORMAT_RGB565: 260 - case DRM_FORMAT_NV21: 261 - case DRM_FORMAT_NV61: 262 - case DRM_FORMAT_NV42: 263 - value = LDDDSR_LS | LDDDSR_WS; 264 - break; 265 - case DRM_FORMAT_RGB888: 266 - case DRM_FORMAT_NV12: 267 - case DRM_FORMAT_NV16: 268 - case DRM_FORMAT_NV24: 269 - value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS; 270 - break; 271 - case DRM_FORMAT_ARGB8888: 272 - case DRM_FORMAT_XRGB8888: 273 - default: 274 - value = LDDDSR_LS; 275 - break; 276 - } 277 - lcdc_write(sdev, LDDDSR, value); 278 - 279 - /* Setup planes. */ 280 - drm_for_each_legacy_plane(plane, dev) { 281 - if (plane->crtc == crtc) 282 - shmob_drm_plane_setup(plane); 283 - } 284 212 285 213 /* Enable the display output. */ 286 214 lcdc_write(sdev, LDCNT1R, LDCNT1R_DE); 287 215 288 216 shmob_drm_crtc_start_stop(scrtc, true); 289 217 290 - scrtc->started = true; 218 + /* Turn vertical blank interrupt reporting back on. */ 219 + drm_crtc_vblank_on(crtc); 291 220 } 292 221 293 - static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc) 222 + static void shmob_drm_crtc_atomic_disable(struct drm_crtc *crtc, 223 + struct drm_atomic_state *state) 294 224 { 295 - struct drm_crtc *crtc = &scrtc->crtc; 296 - struct shmob_drm_device *sdev = crtc->dev->dev_private; 225 + struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); 226 + struct shmob_drm_device *sdev = to_shmob_device(crtc->dev); 297 227 298 - if (!scrtc->started) 299 - return; 228 + /* 229 + * Disable vertical blank interrupt reporting. We first need to wait 230 + * for page flip completion before stopping the CRTC as userspace 231 + * expects page flips to eventually complete. 232 + */ 233 + shmob_drm_crtc_wait_page_flip(scrtc); 234 + drm_crtc_vblank_off(crtc); 300 235 301 236 /* Stop the LCDC. */ 302 237 shmob_drm_crtc_start_stop(scrtc, false); ··· 275 268 /* Disable the display output. */ 276 269 lcdc_write(sdev, LDCNT1R, 0); 277 270 278 - /* Stop clocks. */ 279 - shmob_drm_clk_off(sdev); 280 - 281 - scrtc->started = false; 271 + pm_runtime_put(sdev->dev); 282 272 } 283 273 284 - void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc) 274 + static void shmob_drm_crtc_atomic_flush(struct drm_crtc *crtc, 275 + struct drm_atomic_state *state) 285 276 { 286 - shmob_drm_crtc_stop(scrtc); 287 - } 277 + struct drm_pending_vblank_event *event; 278 + struct drm_device *dev = crtc->dev; 279 + unsigned long flags; 288 280 289 - void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc) 290 - { 291 - if (scrtc->dpms != DRM_MODE_DPMS_ON) 292 - return; 293 - 294 - shmob_drm_crtc_start(scrtc); 295 - } 296 - 297 - static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc, 298 - int x, int y) 299 - { 300 - struct drm_crtc *crtc = &scrtc->crtc; 301 - struct drm_framebuffer *fb = crtc->primary->fb; 302 - struct drm_gem_dma_object *gem; 303 - unsigned int bpp; 304 - 305 - bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp; 306 - gem = drm_fb_dma_get_gem_obj(fb, 0); 307 - scrtc->dma[0] = gem->dma_addr + fb->offsets[0] 308 - + y * fb->pitches[0] + x * bpp / 8; 309 - 310 - if (scrtc->format->yuv) { 311 - bpp = scrtc->format->bpp - 8; 312 - gem = drm_fb_dma_get_gem_obj(fb, 1); 313 - scrtc->dma[1] = gem->dma_addr + fb->offsets[1] 314 - + y / (bpp == 4 ? 2 : 1) * fb->pitches[1] 315 - + x * (bpp == 16 ? 2 : 1); 281 + if (crtc->state->event) { 282 + spin_lock_irqsave(&dev->event_lock, flags); 283 + event = crtc->state->event; 284 + crtc->state->event = NULL; 285 + drm_crtc_send_vblank_event(crtc, event); 286 + spin_unlock_irqrestore(&dev->event_lock, flags); 316 287 } 317 - } 318 - 319 - static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc) 320 - { 321 - struct drm_crtc *crtc = &scrtc->crtc; 322 - struct shmob_drm_device *sdev = crtc->dev->dev_private; 323 - 324 - shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y); 325 - 326 - lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]); 327 - if (scrtc->format->yuv) 328 - lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]); 329 - 330 - lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS); 331 - } 332 - 333 - #define to_shmob_crtc(c) container_of(c, struct shmob_drm_crtc, crtc) 334 - 335 - static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 336 - { 337 - struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); 338 - 339 - if (scrtc->dpms == mode) 340 - return; 341 - 342 - if (mode == DRM_MODE_DPMS_ON) 343 - shmob_drm_crtc_start(scrtc); 344 - else 345 - shmob_drm_crtc_stop(scrtc); 346 - 347 - scrtc->dpms = mode; 348 - } 349 - 350 - static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc) 351 - { 352 - shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 353 - } 354 - 355 - static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc, 356 - struct drm_display_mode *mode, 357 - struct drm_display_mode *adjusted_mode, 358 - int x, int y, 359 - struct drm_framebuffer *old_fb) 360 - { 361 - struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); 362 - struct shmob_drm_device *sdev = crtc->dev->dev_private; 363 - const struct shmob_drm_format_info *format; 364 - 365 - format = shmob_drm_format_info(crtc->primary->fb->format->format); 366 - if (format == NULL) { 367 - dev_dbg(sdev->dev, "mode_set: unsupported format %p4cc\n", 368 - &crtc->primary->fb->format->format); 369 - return -EINVAL; 370 - } 371 - 372 - scrtc->format = format; 373 - scrtc->line_size = crtc->primary->fb->pitches[0]; 374 - 375 - shmob_drm_crtc_compute_base(scrtc, x, y); 376 - 377 - return 0; 378 - } 379 - 380 - static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc) 381 - { 382 - shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 383 - } 384 - 385 - static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 386 - struct drm_framebuffer *old_fb) 387 - { 388 - shmob_drm_crtc_update_base(to_shmob_crtc(crtc)); 389 - 390 - return 0; 391 288 } 392 289 393 290 static const struct drm_crtc_helper_funcs crtc_helper_funcs = { 394 - .dpms = shmob_drm_crtc_dpms, 395 - .prepare = shmob_drm_crtc_mode_prepare, 396 - .commit = shmob_drm_crtc_mode_commit, 397 - .mode_set = shmob_drm_crtc_mode_set, 398 - .mode_set_base = shmob_drm_crtc_mode_set_base, 291 + .atomic_flush = shmob_drm_crtc_atomic_flush, 292 + .atomic_enable = shmob_drm_crtc_atomic_enable, 293 + .atomic_disable = shmob_drm_crtc_atomic_disable, 399 294 }; 400 - 401 - void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) 402 - { 403 - struct drm_pending_vblank_event *event; 404 - struct drm_device *dev = scrtc->crtc.dev; 405 - unsigned long flags; 406 - 407 - spin_lock_irqsave(&dev->event_lock, flags); 408 - event = scrtc->event; 409 - scrtc->event = NULL; 410 - if (event) { 411 - drm_crtc_send_vblank_event(&scrtc->crtc, event); 412 - drm_crtc_vblank_put(&scrtc->crtc); 413 - } 414 - spin_unlock_irqrestore(&dev->event_lock, flags); 415 - } 416 295 417 296 static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, 418 297 struct drm_framebuffer *fb, ··· 307 414 struct drm_modeset_acquire_ctx *ctx) 308 415 { 309 416 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc); 310 - struct drm_device *dev = scrtc->crtc.dev; 417 + struct drm_device *dev = scrtc->base.dev; 311 418 unsigned long flags; 312 419 313 420 spin_lock_irqsave(&dev->event_lock, flags); ··· 317 424 } 318 425 spin_unlock_irqrestore(&dev->event_lock, flags); 319 426 320 - crtc->primary->fb = fb; 321 - shmob_drm_crtc_update_base(scrtc); 427 + drm_atomic_set_fb_for_plane(crtc->primary->state, fb); 322 428 323 429 if (event) { 324 430 event->pipe = 0; 325 - drm_crtc_vblank_get(&scrtc->crtc); 431 + drm_crtc_vblank_get(&scrtc->base); 326 432 spin_lock_irqsave(&dev->event_lock, flags); 327 433 scrtc->event = event; 328 434 spin_unlock_irqrestore(&dev->event_lock, flags); ··· 349 457 350 458 static int shmob_drm_enable_vblank(struct drm_crtc *crtc) 351 459 { 352 - struct shmob_drm_device *sdev = crtc->dev->dev_private; 460 + struct shmob_drm_device *sdev = to_shmob_device(crtc->dev); 353 461 354 462 shmob_drm_crtc_enable_vblank(sdev, true); 355 463 ··· 358 466 359 467 static void shmob_drm_disable_vblank(struct drm_crtc *crtc) 360 468 { 361 - struct shmob_drm_device *sdev = crtc->dev->dev_private; 469 + struct shmob_drm_device *sdev = to_shmob_device(crtc->dev); 362 470 363 471 shmob_drm_crtc_enable_vblank(sdev, false); 364 472 } 365 473 366 474 static const struct drm_crtc_funcs crtc_funcs = { 475 + .reset = drm_atomic_helper_crtc_reset, 367 476 .destroy = drm_crtc_cleanup, 368 - .set_config = drm_crtc_helper_set_config, 477 + .set_config = drm_atomic_helper_set_config, 369 478 .page_flip = shmob_drm_crtc_page_flip, 479 + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 480 + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 370 481 .enable_vblank = shmob_drm_enable_vblank, 371 482 .disable_vblank = shmob_drm_disable_vblank, 372 483 }; 373 484 374 - static const uint32_t modeset_formats[] = { 375 - DRM_FORMAT_RGB565, 376 - DRM_FORMAT_RGB888, 377 - DRM_FORMAT_ARGB8888, 378 - DRM_FORMAT_XRGB8888, 379 - }; 380 - 381 - static const struct drm_plane_funcs primary_plane_funcs = { 382 - DRM_PLANE_NON_ATOMIC_FUNCS, 383 - }; 384 - 385 485 int shmob_drm_crtc_create(struct shmob_drm_device *sdev) 386 486 { 387 - struct drm_crtc *crtc = &sdev->crtc.crtc; 388 - struct drm_plane *primary; 487 + struct drm_crtc *crtc = &sdev->crtc.base; 488 + struct drm_plane *primary, *plane; 489 + unsigned int i; 389 490 int ret; 390 491 391 - sdev->crtc.dpms = DRM_MODE_DPMS_OFF; 492 + init_waitqueue_head(&sdev->crtc.flip_wait); 392 493 393 - primary = __drm_universal_plane_alloc(sdev->ddev, sizeof(*primary), 0, 394 - 0, &primary_plane_funcs, 395 - modeset_formats, 396 - ARRAY_SIZE(modeset_formats), 397 - NULL, DRM_PLANE_TYPE_PRIMARY, 398 - NULL); 494 + primary = shmob_drm_plane_create(sdev, DRM_PLANE_TYPE_PRIMARY, 0); 399 495 if (IS_ERR(primary)) 400 496 return PTR_ERR(primary); 401 497 402 - ret = drm_crtc_init_with_planes(sdev->ddev, crtc, primary, NULL, 403 - &crtc_funcs, NULL); 404 - if (ret < 0) { 405 - drm_plane_cleanup(primary); 406 - kfree(primary); 407 - return ret; 498 + for (i = 1; i < 5; ++i) { 499 + plane = shmob_drm_plane_create(sdev, DRM_PLANE_TYPE_OVERLAY, i); 500 + if (IS_ERR(plane)) 501 + return PTR_ERR(plane); 408 502 } 409 503 504 + ret = drm_crtc_init_with_planes(&sdev->ddev, crtc, primary, NULL, 505 + &crtc_funcs, NULL); 506 + if (ret < 0) 507 + return ret; 508 + 410 509 drm_crtc_helper_add(crtc, &crtc_helper_funcs); 510 + 511 + /* Start with vertical blank interrupt reporting disabled. */ 512 + drm_crtc_vblank_off(crtc); 411 513 412 514 return 0; 413 515 } 414 516 415 517 /* ----------------------------------------------------------------------------- 416 - * Encoder 518 + * Legacy Encoder 417 519 */ 418 - 419 - #define to_shmob_encoder(e) \ 420 - container_of(e, struct shmob_drm_encoder, encoder) 421 - 422 - static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode) 423 - { 424 - struct shmob_drm_encoder *senc = to_shmob_encoder(encoder); 425 - struct shmob_drm_device *sdev = encoder->dev->dev_private; 426 - struct shmob_drm_connector *scon = &sdev->connector; 427 - 428 - if (senc->dpms == mode) 429 - return; 430 - 431 - shmob_drm_backlight_dpms(scon, mode); 432 - 433 - senc->dpms = mode; 434 - } 435 520 436 521 static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder, 437 522 const struct drm_display_mode *mode, 438 523 struct drm_display_mode *adjusted_mode) 439 524 { 440 525 struct drm_device *dev = encoder->dev; 441 - struct shmob_drm_device *sdev = dev->dev_private; 442 - struct drm_connector *connector = &sdev->connector.connector; 526 + struct shmob_drm_device *sdev = to_shmob_device(dev); 527 + struct drm_connector *connector = sdev->connector; 443 528 const struct drm_display_mode *panel_mode; 444 529 445 530 if (list_empty(&connector->modes)) { ··· 432 563 return true; 433 564 } 434 565 435 - static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder) 436 - { 437 - /* No-op, everything is handled in the CRTC code. */ 438 - } 439 - 440 - static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder, 441 - struct drm_display_mode *mode, 442 - struct drm_display_mode *adjusted_mode) 443 - { 444 - /* No-op, everything is handled in the CRTC code. */ 445 - } 446 - 447 - static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder) 448 - { 449 - /* No-op, everything is handled in the CRTC code. */ 450 - } 451 - 452 566 static const struct drm_encoder_helper_funcs encoder_helper_funcs = { 453 - .dpms = shmob_drm_encoder_dpms, 454 567 .mode_fixup = shmob_drm_encoder_mode_fixup, 455 - .prepare = shmob_drm_encoder_mode_prepare, 456 - .commit = shmob_drm_encoder_mode_commit, 457 - .mode_set = shmob_drm_encoder_mode_set, 458 568 }; 569 + 570 + /* ----------------------------------------------------------------------------- 571 + * Encoder 572 + */ 459 573 460 574 int shmob_drm_encoder_create(struct shmob_drm_device *sdev) 461 575 { 462 - struct drm_encoder *encoder = &sdev->encoder.encoder; 576 + struct drm_encoder *encoder = &sdev->encoder; 577 + struct drm_bridge *bridge; 463 578 int ret; 464 - 465 - sdev->encoder.dpms = DRM_MODE_DPMS_OFF; 466 579 467 580 encoder->possible_crtcs = 1; 468 581 469 - ret = drm_simple_encoder_init(sdev->ddev, encoder, 470 - DRM_MODE_ENCODER_LVDS); 582 + ret = drm_simple_encoder_init(&sdev->ddev, encoder, 583 + DRM_MODE_ENCODER_DPI); 471 584 if (ret < 0) 472 585 return ret; 473 586 474 - drm_encoder_helper_add(encoder, &encoder_helper_funcs); 587 + if (sdev->pdata) { 588 + drm_encoder_helper_add(encoder, &encoder_helper_funcs); 589 + return 0; 590 + } 591 + 592 + /* Create a panel bridge */ 593 + bridge = devm_drm_of_get_bridge(sdev->dev, sdev->dev->of_node, 0, 0); 594 + if (IS_ERR(bridge)) 595 + return PTR_ERR(bridge); 596 + 597 + /* Attach the bridge to the encoder */ 598 + ret = drm_bridge_attach(encoder, bridge, NULL, 599 + DRM_BRIDGE_ATTACH_NO_CONNECTOR); 600 + if (ret) { 601 + dev_err(sdev->dev, "failed to attach bridge: %pe\n", 602 + ERR_PTR(ret)); 603 + return ret; 604 + } 475 605 476 606 return 0; 477 607 } 478 608 479 609 /* ----------------------------------------------------------------------------- 480 - * Connector 610 + * Legacy Connector 481 611 */ 482 612 483 - #define to_shmob_connector(c) \ 484 - container_of(c, struct shmob_drm_connector, connector) 613 + static inline struct shmob_drm_connector *to_shmob_connector(struct drm_connector *connector) 614 + { 615 + return container_of(connector, struct shmob_drm_connector, base); 616 + } 485 617 486 618 static int shmob_drm_connector_get_modes(struct drm_connector *connector) 487 619 { 488 - struct shmob_drm_device *sdev = connector->dev->dev_private; 620 + struct shmob_drm_connector *scon = to_shmob_connector(connector); 489 621 struct drm_display_mode *mode; 490 622 491 623 mode = drm_mode_create(connector->dev); ··· 494 624 return 0; 495 625 496 626 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; 497 - mode->clock = sdev->pdata->panel.mode.clock; 498 - mode->hdisplay = sdev->pdata->panel.mode.hdisplay; 499 - mode->hsync_start = sdev->pdata->panel.mode.hsync_start; 500 - mode->hsync_end = sdev->pdata->panel.mode.hsync_end; 501 - mode->htotal = sdev->pdata->panel.mode.htotal; 502 - mode->vdisplay = sdev->pdata->panel.mode.vdisplay; 503 - mode->vsync_start = sdev->pdata->panel.mode.vsync_start; 504 - mode->vsync_end = sdev->pdata->panel.mode.vsync_end; 505 - mode->vtotal = sdev->pdata->panel.mode.vtotal; 506 - mode->flags = sdev->pdata->panel.mode.flags; 507 627 508 - drm_mode_set_name(mode); 628 + drm_display_mode_from_videomode(scon->mode, mode); 629 + 509 630 drm_mode_probed_add(connector, mode); 510 - 511 - connector->display_info.width_mm = sdev->pdata->panel.width_mm; 512 - connector->display_info.height_mm = sdev->pdata->panel.height_mm; 513 631 514 632 return 1; 515 633 } ··· 517 659 518 660 static void shmob_drm_connector_destroy(struct drm_connector *connector) 519 661 { 520 - struct shmob_drm_connector *scon = to_shmob_connector(connector); 521 - 522 - shmob_drm_backlight_exit(scon); 523 662 drm_connector_unregister(connector); 524 663 drm_connector_cleanup(connector); 664 + 665 + kfree(connector); 525 666 } 526 667 527 668 static const struct drm_connector_funcs connector_funcs = { 528 - .dpms = drm_helper_connector_dpms, 669 + .reset = drm_atomic_helper_connector_reset, 529 670 .fill_modes = drm_helper_probe_single_connector_modes, 530 671 .destroy = shmob_drm_connector_destroy, 672 + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 673 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 531 674 }; 675 + 676 + static struct drm_connector * 677 + shmob_drm_connector_init(struct shmob_drm_device *sdev, 678 + struct drm_encoder *encoder) 679 + { 680 + u32 bus_fmt = sdev->pdata->iface.bus_fmt; 681 + struct shmob_drm_connector *scon; 682 + struct drm_connector *connector; 683 + struct drm_display_info *info; 684 + unsigned int i; 685 + int ret; 686 + 687 + for (i = 0; i < ARRAY_SIZE(shmob_drm_bus_fmts); i++) { 688 + if (shmob_drm_bus_fmts[i].fmt == bus_fmt) 689 + break; 690 + } 691 + if (i == ARRAY_SIZE(shmob_drm_bus_fmts)) { 692 + dev_err(sdev->dev, "unsupported bus format 0x%x\n", bus_fmt); 693 + return ERR_PTR(-EINVAL); 694 + } 695 + 696 + scon = kzalloc(sizeof(*scon), GFP_KERNEL); 697 + if (!scon) 698 + return ERR_PTR(-ENOMEM); 699 + 700 + connector = &scon->base; 701 + scon->encoder = encoder; 702 + scon->mode = &sdev->pdata->panel.mode; 703 + 704 + info = &connector->display_info; 705 + info->width_mm = sdev->pdata->panel.width_mm; 706 + info->height_mm = sdev->pdata->panel.height_mm; 707 + 708 + if (scon->mode->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE) 709 + info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; 710 + if (scon->mode->flags & DISPLAY_FLAGS_DE_LOW) 711 + info->bus_flags |= DRM_BUS_FLAG_DE_LOW; 712 + 713 + ret = drm_display_info_set_bus_formats(info, &bus_fmt, 1); 714 + if (ret < 0) { 715 + kfree(scon); 716 + return ERR_PTR(ret); 717 + } 718 + 719 + ret = drm_connector_init(&sdev->ddev, connector, &connector_funcs, 720 + DRM_MODE_CONNECTOR_DPI); 721 + if (ret < 0) { 722 + kfree(scon); 723 + return ERR_PTR(ret); 724 + } 725 + 726 + drm_connector_helper_add(connector, &connector_helper_funcs); 727 + 728 + return connector; 729 + } 730 + 731 + /* ----------------------------------------------------------------------------- 732 + * Connector 733 + */ 532 734 533 735 int shmob_drm_connector_create(struct shmob_drm_device *sdev, 534 736 struct drm_encoder *encoder) 535 737 { 536 - struct drm_connector *connector = &sdev->connector.connector; 738 + struct drm_connector *connector; 537 739 int ret; 538 740 539 - sdev->connector.encoder = encoder; 540 - 541 - connector->display_info.width_mm = sdev->pdata->panel.width_mm; 542 - connector->display_info.height_mm = sdev->pdata->panel.height_mm; 543 - 544 - ret = drm_connector_init(sdev->ddev, connector, &connector_funcs, 545 - DRM_MODE_CONNECTOR_LVDS); 546 - if (ret < 0) 547 - return ret; 548 - 549 - drm_connector_helper_add(connector, &connector_helper_funcs); 550 - 551 - ret = shmob_drm_backlight_init(&sdev->connector); 552 - if (ret < 0) 553 - goto err_cleanup; 741 + if (sdev->pdata) 742 + connector = shmob_drm_connector_init(sdev, encoder); 743 + else 744 + connector = drm_bridge_connector_init(&sdev->ddev, encoder); 745 + if (IS_ERR(connector)) { 746 + dev_err(sdev->dev, "failed to created connector: %pe\n", 747 + connector); 748 + return PTR_ERR(connector); 749 + } 554 750 555 751 ret = drm_connector_attach_encoder(connector, encoder); 556 752 if (ret < 0) 557 - goto err_backlight; 753 + goto error; 558 754 559 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 560 - drm_object_property_set_value(&connector->base, 561 - sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); 755 + connector->dpms = DRM_MODE_DPMS_OFF; 756 + 757 + sdev->connector = connector; 562 758 563 759 return 0; 564 760 565 - err_backlight: 566 - shmob_drm_backlight_exit(&sdev->connector); 567 - err_cleanup: 761 + error: 568 762 drm_connector_cleanup(connector); 569 763 return ret; 570 764 }
+9 -18
drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.h
··· 14 14 #include <drm/drm_connector.h> 15 15 #include <drm/drm_encoder.h> 16 16 17 - struct backlight_device; 17 + #include <linux/wait.h> 18 + 19 + #include <video/videomode.h> 20 + 18 21 struct drm_pending_vblank_event; 19 22 struct shmob_drm_device; 20 23 struct shmob_drm_format_info; 21 24 22 25 struct shmob_drm_crtc { 23 - struct drm_crtc crtc; 26 + struct drm_crtc base; 24 27 25 28 struct drm_pending_vblank_event *event; 26 - int dpms; 27 - 28 - const struct shmob_drm_format_info *format; 29 - unsigned long dma[2]; 30 - unsigned int line_size; 31 - bool started; 29 + wait_queue_head_t flip_wait; 32 30 }; 33 31 34 - struct shmob_drm_encoder { 35 - struct drm_encoder encoder; 36 - int dpms; 37 - }; 38 - 32 + /* Legacy connector */ 39 33 struct shmob_drm_connector { 40 - struct drm_connector connector; 34 + struct drm_connector base; 41 35 struct drm_encoder *encoder; 42 - 43 - struct backlight_device *backlight; 36 + const struct videomode *mode; 44 37 }; 45 38 46 39 int shmob_drm_crtc_create(struct shmob_drm_device *sdev); 47 40 void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc); 48 - void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc); 49 - void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc); 50 41 51 42 int shmob_drm_encoder_create(struct shmob_drm_device *sdev); 52 43 int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+84 -97
drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
··· 11 11 #include <linux/io.h> 12 12 #include <linux/mm.h> 13 13 #include <linux/module.h> 14 + #include <linux/of.h> 14 15 #include <linux/platform_device.h> 15 16 #include <linux/pm.h> 17 + #include <linux/pm_runtime.h> 16 18 #include <linux/slab.h> 17 19 20 + #include <drm/drm_atomic_helper.h> 18 21 #include <drm/drm_drv.h> 19 22 #include <drm/drm_fbdev_generic.h> 20 23 #include <drm/drm_gem_dma_helper.h> 24 + #include <drm/drm_modeset_helper.h> 21 25 #include <drm/drm_module.h> 22 26 #include <drm/drm_probe_helper.h> 23 27 #include <drm/drm_vblank.h> ··· 35 31 * Hardware initialization 36 32 */ 37 33 38 - static int shmob_drm_init_interface(struct shmob_drm_device *sdev) 39 - { 40 - static const u32 ldmt1r[] = { 41 - [SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8, 42 - [SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9, 43 - [SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A, 44 - [SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B, 45 - [SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16, 46 - [SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18, 47 - [SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24, 48 - [SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR, 49 - [SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A, 50 - [SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B, 51 - [SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C, 52 - [SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D, 53 - [SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9, 54 - [SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12, 55 - [SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A, 56 - [SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B, 57 - [SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C, 58 - [SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18, 59 - [SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24, 60 - }; 61 - 62 - if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) { 63 - dev_err(sdev->dev, "invalid interface type %u\n", 64 - sdev->pdata->iface.interface); 65 - return -EINVAL; 66 - } 67 - 68 - sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface]; 69 - return 0; 70 - } 71 - 72 34 static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev, 73 - enum shmob_drm_clk_source clksrc) 35 + enum shmob_drm_clk_source clksrc) 74 36 { 75 37 struct clk *clk; 76 38 char *clkname; 77 39 78 40 switch (clksrc) { 79 41 case SHMOB_DRM_CLK_BUS: 80 - clkname = "bus_clk"; 42 + clkname = "fck"; 81 43 sdev->lddckr = LDDCKR_ICKSEL_BUS; 82 44 break; 83 45 case SHMOB_DRM_CLK_PERIPHERAL: 84 - clkname = "peripheral_clk"; 46 + clkname = "media"; 85 47 sdev->lddckr = LDDCKR_ICKSEL_MIPI; 86 48 break; 87 49 case SHMOB_DRM_CLK_EXTERNAL: 88 - clkname = NULL; 50 + clkname = "lclk"; 89 51 sdev->lddckr = LDDCKR_ICKSEL_HDMI; 90 52 break; 91 53 default: ··· 75 105 static irqreturn_t shmob_drm_irq(int irq, void *arg) 76 106 { 77 107 struct drm_device *dev = arg; 78 - struct shmob_drm_device *sdev = dev->dev_private; 108 + struct shmob_drm_device *sdev = to_shmob_device(dev); 79 109 unsigned long flags; 80 110 u32 status; 81 111 ··· 89 119 spin_unlock_irqrestore(&sdev->irq_lock, flags); 90 120 91 121 if (status & LDINTR_VES) { 92 - drm_handle_vblank(dev, 0); 122 + drm_crtc_handle_vblank(&sdev->crtc.base); 93 123 shmob_drm_crtc_finish_page_flip(&sdev->crtc); 94 124 } 95 125 ··· 99 129 DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops); 100 130 101 131 static const struct drm_driver shmob_drm_driver = { 102 - .driver_features = DRIVER_GEM | DRIVER_MODESET, 132 + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 103 133 DRM_GEM_DMA_DRIVER_OPS, 104 134 .fops = &shmob_drm_fops, 105 135 .name = "shmob-drm", ··· 117 147 { 118 148 struct shmob_drm_device *sdev = dev_get_drvdata(dev); 119 149 120 - drm_kms_helper_poll_disable(sdev->ddev); 121 - shmob_drm_crtc_suspend(&sdev->crtc); 122 - 123 - return 0; 150 + return drm_mode_config_helper_suspend(&sdev->ddev); 124 151 } 125 152 126 153 static int shmob_drm_pm_resume(struct device *dev) 127 154 { 128 155 struct shmob_drm_device *sdev = dev_get_drvdata(dev); 129 156 130 - drm_modeset_lock_all(sdev->ddev); 131 - shmob_drm_crtc_resume(&sdev->crtc); 132 - drm_modeset_unlock_all(sdev->ddev); 157 + return drm_mode_config_helper_resume(&sdev->ddev); 158 + } 133 159 134 - drm_kms_helper_poll_enable(sdev->ddev); 160 + static int shmob_drm_pm_runtime_suspend(struct device *dev) 161 + { 162 + struct shmob_drm_device *sdev = dev_get_drvdata(dev); 163 + 164 + if (sdev->clock) 165 + clk_disable_unprepare(sdev->clock); 166 + 135 167 return 0; 136 168 } 137 169 138 - static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops, 139 - shmob_drm_pm_suspend, shmob_drm_pm_resume); 170 + static int shmob_drm_pm_runtime_resume(struct device *dev) 171 + { 172 + struct shmob_drm_device *sdev = dev_get_drvdata(dev); 173 + int ret; 174 + 175 + if (sdev->clock) { 176 + ret = clk_prepare_enable(sdev->clock); 177 + if (ret < 0) 178 + return ret; 179 + } 180 + 181 + return 0; 182 + } 183 + 184 + static const struct dev_pm_ops shmob_drm_pm_ops = { 185 + SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume) 186 + RUNTIME_PM_OPS(shmob_drm_pm_runtime_suspend, 187 + shmob_drm_pm_runtime_resume, NULL) 188 + }; 140 189 141 190 /* ----------------------------------------------------------------------------- 142 191 * Platform driver ··· 164 175 static void shmob_drm_remove(struct platform_device *pdev) 165 176 { 166 177 struct shmob_drm_device *sdev = platform_get_drvdata(pdev); 167 - struct drm_device *ddev = sdev->ddev; 178 + struct drm_device *ddev = &sdev->ddev; 168 179 169 180 drm_dev_unregister(ddev); 181 + drm_atomic_helper_shutdown(ddev); 170 182 drm_kms_helper_poll_fini(ddev); 171 - free_irq(sdev->irq, ddev); 172 - drm_dev_put(ddev); 173 183 } 174 184 175 185 static int shmob_drm_probe(struct platform_device *pdev) 176 186 { 177 187 struct shmob_drm_platform_data *pdata = pdev->dev.platform_data; 188 + const struct shmob_drm_config *config; 178 189 struct shmob_drm_device *sdev; 179 190 struct drm_device *ddev; 180 - unsigned int i; 181 191 int ret; 182 192 183 - if (pdata == NULL) { 193 + config = of_device_get_match_data(&pdev->dev); 194 + if (!config && !pdata) { 184 195 dev_err(&pdev->dev, "no platform data\n"); 185 196 return -EINVAL; 186 197 } 187 198 188 199 /* 189 - * Allocate and initialize the driver private data, I/O resources and 190 - * clocks. 200 + * Allocate and initialize the DRM device, driver private data, I/O 201 + * resources and clocks. 191 202 */ 192 - sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL); 193 - if (sdev == NULL) 194 - return -ENOMEM; 203 + sdev = devm_drm_dev_alloc(&pdev->dev, &shmob_drm_driver, 204 + struct shmob_drm_device, ddev); 205 + if (IS_ERR(sdev)) 206 + return PTR_ERR(sdev); 195 207 208 + ddev = &sdev->ddev; 196 209 sdev->dev = &pdev->dev; 197 - sdev->pdata = pdata; 210 + if (config) { 211 + sdev->config = *config; 212 + } else { 213 + sdev->pdata = pdata; 214 + sdev->config.clk_source = pdata->clk_source; 215 + sdev->config.clk_div = pdata->iface.clk_div; 216 + } 198 217 spin_lock_init(&sdev->irq_lock); 199 218 200 219 platform_set_drvdata(pdev, sdev); ··· 211 214 if (IS_ERR(sdev->mmio)) 212 215 return PTR_ERR(sdev->mmio); 213 216 214 - ret = shmob_drm_setup_clocks(sdev, pdata->clk_source); 217 + ret = shmob_drm_setup_clocks(sdev, sdev->config.clk_source); 215 218 if (ret < 0) 216 219 return ret; 217 220 218 - ret = shmob_drm_init_interface(sdev); 219 - if (ret < 0) 221 + ret = devm_pm_runtime_enable(&pdev->dev); 222 + if (ret) 220 223 return ret; 221 - 222 - /* Allocate and initialize the DRM device. */ 223 - ddev = drm_dev_alloc(&shmob_drm_driver, &pdev->dev); 224 - if (IS_ERR(ddev)) 225 - return PTR_ERR(ddev); 226 - 227 - sdev->ddev = ddev; 228 - ddev->dev_private = sdev; 229 - 230 - ret = shmob_drm_modeset_init(sdev); 231 - if (ret < 0) { 232 - dev_err(&pdev->dev, "failed to initialize mode setting\n"); 233 - goto err_free_drm_dev; 234 - } 235 - 236 - for (i = 0; i < 4; ++i) { 237 - ret = shmob_drm_plane_create(sdev, i); 238 - if (ret < 0) { 239 - dev_err(&pdev->dev, "failed to create plane %u\n", i); 240 - goto err_modeset_cleanup; 241 - } 242 - } 243 224 244 225 ret = drm_vblank_init(ddev, 1); 245 226 if (ret < 0) { 246 227 dev_err(&pdev->dev, "failed to initialize vblank\n"); 247 - goto err_modeset_cleanup; 228 + return ret; 248 229 } 230 + 231 + ret = shmob_drm_modeset_init(sdev); 232 + if (ret < 0) 233 + return dev_err_probe(&pdev->dev, ret, 234 + "failed to initialize mode setting\n"); 249 235 250 236 ret = platform_get_irq(pdev, 0); 251 237 if (ret < 0) 252 238 goto err_modeset_cleanup; 253 239 sdev->irq = ret; 254 240 255 - ret = request_irq(sdev->irq, shmob_drm_irq, 0, ddev->driver->name, 256 - ddev); 241 + ret = devm_request_irq(&pdev->dev, sdev->irq, shmob_drm_irq, 0, 242 + ddev->driver->name, ddev); 257 243 if (ret < 0) { 258 244 dev_err(&pdev->dev, "failed to install IRQ handler\n"); 259 245 goto err_modeset_cleanup; ··· 248 268 */ 249 269 ret = drm_dev_register(ddev, 0); 250 270 if (ret < 0) 251 - goto err_irq_uninstall; 271 + goto err_modeset_cleanup; 252 272 253 273 drm_fbdev_generic_setup(ddev, 16); 254 274 255 275 return 0; 256 276 257 - err_irq_uninstall: 258 - free_irq(sdev->irq, ddev); 259 277 err_modeset_cleanup: 260 278 drm_kms_helper_poll_fini(ddev); 261 - err_free_drm_dev: 262 - drm_dev_put(ddev); 263 - 264 279 return ret; 265 280 } 281 + 282 + static const struct shmob_drm_config shmob_arm_config = { 283 + .clk_source = SHMOB_DRM_CLK_BUS, 284 + .clk_div = 5, 285 + }; 286 + 287 + static const struct of_device_id shmob_drm_of_table[] __maybe_unused = { 288 + { .compatible = "renesas,r8a7740-lcdc", .data = &shmob_arm_config, }, 289 + { .compatible = "renesas,sh73a0-lcdc", .data = &shmob_arm_config, }, 290 + { /* sentinel */ } 291 + }; 266 292 267 293 static struct platform_driver shmob_drm_platform_driver = { 268 294 .probe = shmob_drm_probe, 269 295 .remove_new = shmob_drm_remove, 270 296 .driver = { 271 297 .name = "shmob-drm", 272 - .pm = pm_sleep_ptr(&shmob_drm_pm_ops), 298 + .of_match_table = of_match_ptr(shmob_drm_of_table), 299 + .pm = &shmob_drm_pm_ops, 273 300 }, 274 301 }; 275 302
+14 -4
drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.h
··· 20 20 struct device; 21 21 struct drm_device; 22 22 23 + struct shmob_drm_config { 24 + enum shmob_drm_clk_source clk_source; 25 + unsigned int clk_div; 26 + }; 27 + 23 28 struct shmob_drm_device { 24 29 struct device *dev; 25 30 const struct shmob_drm_platform_data *pdata; 31 + struct shmob_drm_config config; 26 32 27 33 void __iomem *mmio; 28 34 struct clk *clock; 29 35 u32 lddckr; 30 - u32 ldmt1r; 31 36 32 37 unsigned int irq; 33 38 spinlock_t irq_lock; /* Protects hardware LDINTR register */ 34 39 35 - struct drm_device *ddev; 40 + struct drm_device ddev; 36 41 37 42 struct shmob_drm_crtc crtc; 38 - struct shmob_drm_encoder encoder; 39 - struct shmob_drm_connector connector; 43 + struct drm_encoder encoder; 44 + struct drm_connector *connector; 40 45 }; 46 + 47 + static inline struct shmob_drm_device *to_shmob_device(struct drm_device *dev) 48 + { 49 + return container_of(dev, struct shmob_drm_device, ddev); 50 + } 41 51 42 52 #endif /* __SHMOB_DRM_DRV_H__ */
+55 -22
drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
··· 7 7 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 8 */ 9 9 10 + #include <drm/drm_atomic_helper.h> 10 11 #include <drm/drm_crtc.h> 11 12 #include <drm/drm_crtc_helper.h> 12 13 #include <drm/drm_fourcc.h> ··· 18 17 #include "shmob_drm_crtc.h" 19 18 #include "shmob_drm_drv.h" 20 19 #include "shmob_drm_kms.h" 20 + #include "shmob_drm_plane.h" 21 21 #include "shmob_drm_regs.h" 22 22 23 23 /* ----------------------------------------------------------------------------- ··· 29 27 { 30 28 .fourcc = DRM_FORMAT_RGB565, 31 29 .bpp = 16, 32 - .yuv = false, 33 30 .lddfr = LDDFR_PKF_RGB16, 31 + .ldddsr = LDDDSR_LS | LDDDSR_WS, 32 + .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW | 33 + LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16, 34 34 }, { 35 35 .fourcc = DRM_FORMAT_RGB888, 36 36 .bpp = 24, 37 - .yuv = false, 38 37 .lddfr = LDDFR_PKF_RGB24, 38 + .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS, 39 + .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW | 40 + LDBBSIFR_SWPB | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24, 39 41 }, { 40 42 .fourcc = DRM_FORMAT_ARGB8888, 41 43 .bpp = 32, 42 - .yuv = false, 43 44 .lddfr = LDDFR_PKF_ARGB32, 45 + .ldddsr = LDDDSR_LS, 46 + .ldbbsifr = LDBBSIFR_AL_PK | LDBBSIFR_SWPL | LDBBSIFR_RY | 47 + LDBBSIFR_RPKF_ARGB32, 44 48 }, { 45 49 .fourcc = DRM_FORMAT_XRGB8888, 46 50 .bpp = 32, 47 - .yuv = false, 48 51 .lddfr = LDDFR_PKF_ARGB32, 52 + .ldddsr = LDDDSR_LS, 53 + .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_RY | 54 + LDBBSIFR_RPKF_ARGB32, 49 55 }, { 50 56 .fourcc = DRM_FORMAT_NV12, 51 57 .bpp = 12, 52 - .yuv = true, 53 58 .lddfr = LDDFR_CC | LDDFR_YF_420, 59 + .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS, 60 + .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW | 61 + LDBBSIFR_SWPB | LDBBSIFR_CHRR_420, 54 62 }, { 55 63 .fourcc = DRM_FORMAT_NV21, 56 64 .bpp = 12, 57 - .yuv = true, 58 65 .lddfr = LDDFR_CC | LDDFR_YF_420, 66 + .ldddsr = LDDDSR_LS | LDDDSR_WS, 67 + .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW | 68 + LDBBSIFR_CHRR_420, 59 69 }, { 60 70 .fourcc = DRM_FORMAT_NV16, 61 71 .bpp = 16, 62 - .yuv = true, 63 72 .lddfr = LDDFR_CC | LDDFR_YF_422, 73 + .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS, 74 + .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW | 75 + LDBBSIFR_SWPB | LDBBSIFR_CHRR_422, 64 76 }, { 65 77 .fourcc = DRM_FORMAT_NV61, 66 78 .bpp = 16, 67 - .yuv = true, 68 79 .lddfr = LDDFR_CC | LDDFR_YF_422, 80 + .ldddsr = LDDDSR_LS | LDDDSR_WS, 81 + .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW | 82 + LDBBSIFR_CHRR_422, 69 83 }, { 70 84 .fourcc = DRM_FORMAT_NV24, 71 85 .bpp = 24, 72 - .yuv = true, 73 86 .lddfr = LDDFR_CC | LDDFR_YF_444, 87 + .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS, 88 + .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW | 89 + LDBBSIFR_SWPB | LDBBSIFR_CHRR_444, 74 90 }, { 75 91 .fourcc = DRM_FORMAT_NV42, 76 92 .bpp = 24, 77 - .yuv = true, 78 93 .lddfr = LDDFR_CC | LDDFR_YF_444, 94 + .ldddsr = LDDDSR_LS | LDDDSR_WS, 95 + .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW | 96 + LDBBSIFR_CHRR_444, 79 97 }, 80 98 }; 81 99 ··· 134 112 return ERR_PTR(-EINVAL); 135 113 } 136 114 137 - if (format->yuv) { 115 + if (shmob_drm_format_is_yuv(format)) { 138 116 unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1; 139 117 140 118 if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) { ··· 149 127 150 128 static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = { 151 129 .fb_create = shmob_drm_fb_create, 130 + .atomic_check = drm_atomic_helper_check, 131 + .atomic_commit = drm_atomic_helper_commit, 152 132 }; 153 133 154 134 int shmob_drm_modeset_init(struct shmob_drm_device *sdev) 155 135 { 136 + struct drm_device *dev = &sdev->ddev; 156 137 int ret; 157 138 158 - ret = drmm_mode_config_init(sdev->ddev); 139 + ret = drmm_mode_config_init(dev); 159 140 if (ret) 160 141 return ret; 161 142 162 - shmob_drm_crtc_create(sdev); 163 - shmob_drm_encoder_create(sdev); 164 - shmob_drm_connector_create(sdev, &sdev->encoder.encoder); 143 + ret = shmob_drm_crtc_create(sdev); 144 + if (ret < 0) 145 + return ret; 165 146 166 - drm_kms_helper_poll_init(sdev->ddev); 147 + ret = shmob_drm_encoder_create(sdev); 148 + if (ret < 0) 149 + return ret; 167 150 168 - sdev->ddev->mode_config.min_width = 0; 169 - sdev->ddev->mode_config.min_height = 0; 170 - sdev->ddev->mode_config.max_width = 4095; 171 - sdev->ddev->mode_config.max_height = 4095; 172 - sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs; 151 + ret = shmob_drm_connector_create(sdev, &sdev->encoder); 152 + if (ret < 0) 153 + return ret; 173 154 174 - drm_helper_disable_unused_functions(sdev->ddev); 155 + drm_mode_config_reset(dev); 156 + 157 + drm_kms_helper_poll_init(dev); 158 + 159 + sdev->ddev.mode_config.min_width = 0; 160 + sdev->ddev.mode_config.min_height = 0; 161 + sdev->ddev.mode_config.max_width = 4095; 162 + sdev->ddev.mode_config.max_height = 4095; 163 + sdev->ddev.mode_config.funcs = &shmob_drm_mode_config_funcs; 175 164 176 165 return 0; 177 166 }
+6 -3
drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.h
··· 17 17 18 18 struct shmob_drm_format_info { 19 19 u32 fourcc; 20 - unsigned int bpp; 21 - bool yuv; 22 - u32 lddfr; 20 + u32 lddfr; /* LCD Data Format Register */ 21 + u16 ldbbsifr; /* CHn Source Image Format Register low bits */ 22 + u8 ldddsr; /* LCDC Input Image Data Swap Register low bits */ 23 + u8 bpp; 23 24 }; 25 + 26 + #define shmob_drm_format_is_yuv(format) ((format)->lddfr & LDDFR_CC) 24 27 25 28 const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc); 26 29
+197 -145
drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
··· 7 7 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 8 */ 9 9 10 + #include <drm/drm_atomic.h> 11 + #include <drm/drm_atomic_helper.h> 10 12 #include <drm/drm_crtc.h> 11 13 #include <drm/drm_fb_dma_helper.h> 12 14 #include <drm/drm_fourcc.h> 13 15 #include <drm/drm_framebuffer.h> 14 16 #include <drm/drm_gem_dma_helper.h> 17 + #include <drm/drm_plane_helper.h> 15 18 16 19 #include "shmob_drm_drv.h" 17 20 #include "shmob_drm_kms.h" ··· 22 19 #include "shmob_drm_regs.h" 23 20 24 21 struct shmob_drm_plane { 25 - struct drm_plane plane; 22 + struct drm_plane base; 26 23 unsigned int index; 27 - unsigned int alpha; 28 - 29 - const struct shmob_drm_format_info *format; 30 - unsigned long dma[2]; 31 - 32 - unsigned int src_x; 33 - unsigned int src_y; 34 - unsigned int crtc_x; 35 - unsigned int crtc_y; 36 - unsigned int crtc_w; 37 - unsigned int crtc_h; 38 24 }; 39 25 40 - #define to_shmob_plane(p) container_of(p, struct shmob_drm_plane, plane) 26 + struct shmob_drm_plane_state { 27 + struct drm_plane_state base; 41 28 42 - static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane, 43 - struct drm_framebuffer *fb, 44 - int x, int y) 29 + const struct shmob_drm_format_info *format; 30 + u32 dma[2]; 31 + }; 32 + 33 + static inline struct shmob_drm_plane *to_shmob_plane(struct drm_plane *plane) 45 34 { 35 + return container_of(plane, struct shmob_drm_plane, base); 36 + } 37 + 38 + static inline struct shmob_drm_plane_state *to_shmob_plane_state(struct drm_plane_state *state) 39 + { 40 + return container_of(state, struct shmob_drm_plane_state, base); 41 + } 42 + 43 + static void shmob_drm_plane_compute_base(struct shmob_drm_plane_state *sstate) 44 + { 45 + struct drm_framebuffer *fb = sstate->base.fb; 46 + unsigned int x = sstate->base.src_x >> 16; 47 + unsigned int y = sstate->base.src_y >> 16; 46 48 struct drm_gem_dma_object *gem; 47 49 unsigned int bpp; 48 50 49 - bpp = splane->format->yuv ? 8 : splane->format->bpp; 51 + bpp = shmob_drm_format_is_yuv(sstate->format) ? 8 : sstate->format->bpp; 50 52 gem = drm_fb_dma_get_gem_obj(fb, 0); 51 - splane->dma[0] = gem->dma_addr + fb->offsets[0] 53 + sstate->dma[0] = gem->dma_addr + fb->offsets[0] 52 54 + y * fb->pitches[0] + x * bpp / 8; 53 55 54 - if (splane->format->yuv) { 55 - bpp = splane->format->bpp - 8; 56 + if (shmob_drm_format_is_yuv(sstate->format)) { 57 + bpp = sstate->format->bpp - 8; 56 58 gem = drm_fb_dma_get_gem_obj(fb, 1); 57 - splane->dma[1] = gem->dma_addr + fb->offsets[1] 59 + sstate->dma[1] = gem->dma_addr + fb->offsets[1] 58 60 + y / (bpp == 4 ? 2 : 1) * fb->pitches[1] 59 61 + x * (bpp == 16 ? 2 : 1); 60 62 } 61 63 } 62 64 63 - static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane, 64 - struct drm_framebuffer *fb) 65 + static void shmob_drm_primary_plane_setup(struct shmob_drm_plane *splane, 66 + struct drm_plane_state *state) 65 67 { 66 - struct shmob_drm_device *sdev = splane->plane.dev->dev_private; 68 + struct shmob_drm_plane_state *sstate = to_shmob_plane_state(state); 69 + struct shmob_drm_device *sdev = to_shmob_device(splane->base.dev); 70 + struct drm_framebuffer *fb = state->fb; 71 + 72 + /* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */ 73 + lcdc_write(sdev, LDDFR, sstate->format->lddfr | LDDFR_CF1); 74 + lcdc_write(sdev, LDMLSR, fb->pitches[0]); 75 + 76 + /* Word and long word swap. */ 77 + lcdc_write(sdev, LDDDSR, sstate->format->ldddsr); 78 + 79 + lcdc_write_mirror(sdev, LDSA1R, sstate->dma[0]); 80 + if (shmob_drm_format_is_yuv(sstate->format)) 81 + lcdc_write_mirror(sdev, LDSA2R, sstate->dma[1]); 82 + 83 + lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS); 84 + } 85 + 86 + static void shmob_drm_overlay_plane_setup(struct shmob_drm_plane *splane, 87 + struct drm_plane_state *state) 88 + { 89 + struct shmob_drm_plane_state *sstate = to_shmob_plane_state(state); 90 + struct shmob_drm_device *sdev = to_shmob_device(splane->base.dev); 91 + struct drm_framebuffer *fb = state->fb; 67 92 u32 format; 68 93 69 94 /* TODO: Support ROP3 mode */ 70 - format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT); 71 - 72 - switch (splane->format->fourcc) { 73 - case DRM_FORMAT_RGB565: 74 - case DRM_FORMAT_NV21: 75 - case DRM_FORMAT_NV61: 76 - case DRM_FORMAT_NV42: 77 - format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW; 78 - break; 79 - case DRM_FORMAT_RGB888: 80 - case DRM_FORMAT_NV12: 81 - case DRM_FORMAT_NV16: 82 - case DRM_FORMAT_NV24: 83 - format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB; 84 - break; 85 - case DRM_FORMAT_ARGB8888: 86 - case DRM_FORMAT_XRGB8888: 87 - default: 88 - format |= LDBBSIFR_SWPL; 89 - break; 90 - } 91 - 92 - switch (splane->format->fourcc) { 93 - case DRM_FORMAT_RGB565: 94 - format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16; 95 - break; 96 - case DRM_FORMAT_RGB888: 97 - format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24; 98 - break; 99 - case DRM_FORMAT_ARGB8888: 100 - format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32; 101 - break; 102 - case DRM_FORMAT_XRGB8888: 103 - format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDDFR_PKF_ARGB32; 104 - break; 105 - case DRM_FORMAT_NV12: 106 - case DRM_FORMAT_NV21: 107 - format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420; 108 - break; 109 - case DRM_FORMAT_NV16: 110 - case DRM_FORMAT_NV61: 111 - format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422; 112 - break; 113 - case DRM_FORMAT_NV24: 114 - case DRM_FORMAT_NV42: 115 - format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444; 116 - break; 117 - } 95 + format = LDBBSIFR_EN | ((state->alpha >> 8) << LDBBSIFR_LAY_SHIFT) | 96 + sstate->format->ldbbsifr; 118 97 119 98 #define plane_reg_dump(sdev, splane, reg) \ 120 - dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \ 99 + dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \ 121 100 splane->index, #reg, \ 122 101 lcdc_read(sdev, reg(splane->index)), \ 123 102 lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET)) ··· 112 127 plane_reg_dump(sdev, splane, LDBnBSACR); 113 128 114 129 lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index)); 115 - dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index, 130 + dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x\n", __func__, splane->index, 116 131 "LDBCR", lcdc_read(sdev, LDBCR)); 117 132 118 133 lcdc_write(sdev, LDBnBSIFR(splane->index), format); 119 134 120 135 lcdc_write(sdev, LDBnBSSZR(splane->index), 121 - (splane->crtc_h << LDBBSSZR_BVSS_SHIFT) | 122 - (splane->crtc_w << LDBBSSZR_BHSS_SHIFT)); 136 + (state->crtc_h << LDBBSSZR_BVSS_SHIFT) | 137 + (state->crtc_w << LDBBSSZR_BHSS_SHIFT)); 123 138 lcdc_write(sdev, LDBnBLOCR(splane->index), 124 - (splane->crtc_y << LDBBLOCR_CVLC_SHIFT) | 125 - (splane->crtc_x << LDBBLOCR_CHLC_SHIFT)); 139 + (state->crtc_y << LDBBLOCR_CVLC_SHIFT) | 140 + (state->crtc_x << LDBBLOCR_CHLC_SHIFT)); 126 141 lcdc_write(sdev, LDBnBSMWR(splane->index), 127 142 fb->pitches[0] << LDBBSMWR_BSMW_SHIFT); 128 143 129 - shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y); 130 - 131 - lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]); 132 - if (splane->format->yuv) 133 - lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]); 144 + lcdc_write(sdev, LDBnBSAYR(splane->index), sstate->dma[0]); 145 + if (shmob_drm_format_is_yuv(sstate->format)) 146 + lcdc_write(sdev, LDBnBSACR(splane->index), sstate->dma[1]); 134 147 135 148 lcdc_write(sdev, LDBCR, 136 149 LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index)); 137 - dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index, 150 + dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x\n", __func__, splane->index, 138 151 "LDBCR", lcdc_read(sdev, LDBCR)); 139 152 140 153 plane_reg_dump(sdev, splane, LDBnBSIFR); ··· 143 160 plane_reg_dump(sdev, splane, LDBnBSACR); 144 161 } 145 162 146 - void shmob_drm_plane_setup(struct drm_plane *plane) 163 + static int shmob_drm_plane_atomic_check(struct drm_plane *plane, 164 + struct drm_atomic_state *state) 147 165 { 166 + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); 167 + struct shmob_drm_plane_state *sstate = to_shmob_plane_state(new_plane_state); 168 + struct drm_crtc_state *crtc_state; 169 + bool is_primary = plane->type == DRM_PLANE_TYPE_PRIMARY; 170 + int ret; 171 + 172 + if (!new_plane_state->crtc) { 173 + /* 174 + * The visible field is not reset by the DRM core but only 175 + * updated by drm_atomic_helper_check_plane_state(), set it 176 + * manually. 177 + */ 178 + new_plane_state->visible = false; 179 + sstate->format = NULL; 180 + return 0; 181 + } 182 + 183 + crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); 184 + if (IS_ERR(crtc_state)) 185 + return PTR_ERR(crtc_state); 186 + 187 + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 188 + DRM_PLANE_NO_SCALING, 189 + DRM_PLANE_NO_SCALING, 190 + !is_primary, true); 191 + if (ret < 0) 192 + return ret; 193 + 194 + if (!new_plane_state->visible) { 195 + sstate->format = NULL; 196 + return 0; 197 + } 198 + 199 + sstate->format = shmob_drm_format_info(new_plane_state->fb->format->format); 200 + if (!sstate->format) { 201 + dev_dbg(plane->dev->dev, 202 + "plane_atomic_check: unsupported format %p4cc\n", 203 + &new_plane_state->fb->format->format); 204 + return -EINVAL; 205 + } 206 + 207 + shmob_drm_plane_compute_base(sstate); 208 + 209 + return 0; 210 + } 211 + 212 + static void shmob_drm_plane_atomic_update(struct drm_plane *plane, 213 + struct drm_atomic_state *state) 214 + { 215 + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); 148 216 struct shmob_drm_plane *splane = to_shmob_plane(plane); 149 217 150 - if (plane->fb == NULL) 218 + if (!new_plane_state->visible) 151 219 return; 152 220 153 - __shmob_drm_plane_setup(splane, plane->fb); 221 + if (plane->type == DRM_PLANE_TYPE_PRIMARY) 222 + shmob_drm_primary_plane_setup(splane, new_plane_state); 223 + else 224 + shmob_drm_overlay_plane_setup(splane, new_plane_state); 154 225 } 155 226 156 - static int 157 - shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, 158 - struct drm_framebuffer *fb, int crtc_x, int crtc_y, 159 - unsigned int crtc_w, unsigned int crtc_h, 160 - uint32_t src_x, uint32_t src_y, 161 - uint32_t src_w, uint32_t src_h, 162 - struct drm_modeset_acquire_ctx *ctx) 227 + static void shmob_drm_plane_atomic_disable(struct drm_plane *plane, 228 + struct drm_atomic_state *state) 163 229 { 230 + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); 231 + struct shmob_drm_device *sdev = to_shmob_device(plane->dev); 164 232 struct shmob_drm_plane *splane = to_shmob_plane(plane); 165 - struct shmob_drm_device *sdev = plane->dev->dev_private; 166 - const struct shmob_drm_format_info *format; 167 233 168 - format = shmob_drm_format_info(fb->format->format); 169 - if (format == NULL) { 170 - dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n", 171 - fb->format->format); 172 - return -EINVAL; 173 - } 234 + if (!old_state->crtc) 235 + return; 174 236 175 - if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) { 176 - dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__); 177 - return -EINVAL; 178 - } 237 + if (plane->type != DRM_PLANE_TYPE_OVERLAY) 238 + return; 179 239 180 - splane->format = format; 181 - 182 - splane->src_x = src_x >> 16; 183 - splane->src_y = src_y >> 16; 184 - splane->crtc_x = crtc_x; 185 - splane->crtc_y = crtc_y; 186 - splane->crtc_w = crtc_w; 187 - splane->crtc_h = crtc_h; 188 - 189 - __shmob_drm_plane_setup(splane, fb); 190 - return 0; 191 - } 192 - 193 - static int shmob_drm_plane_disable(struct drm_plane *plane, 194 - struct drm_modeset_acquire_ctx *ctx) 195 - { 196 - struct shmob_drm_plane *splane = to_shmob_plane(plane); 197 - struct shmob_drm_device *sdev = plane->dev->dev_private; 198 - 199 - splane->format = NULL; 200 - 240 + lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index)); 201 241 lcdc_write(sdev, LDBnBSIFR(splane->index), 0); 202 - return 0; 242 + lcdc_write(sdev, LDBCR, 243 + LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index)); 203 244 } 204 245 205 - static void shmob_drm_plane_destroy(struct drm_plane *plane) 246 + static struct drm_plane_state * 247 + shmob_drm_plane_atomic_duplicate_state(struct drm_plane *plane) 206 248 { 207 - drm_plane_force_disable(plane); 208 - drm_plane_cleanup(plane); 249 + struct shmob_drm_plane_state *state; 250 + struct shmob_drm_plane_state *copy; 251 + 252 + if (WARN_ON(!plane->state)) 253 + return NULL; 254 + 255 + state = to_shmob_plane_state(plane->state); 256 + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); 257 + if (copy == NULL) 258 + return NULL; 259 + 260 + __drm_atomic_helper_plane_duplicate_state(plane, &copy->base); 261 + 262 + return &copy->base; 209 263 } 264 + 265 + static void shmob_drm_plane_atomic_destroy_state(struct drm_plane *plane, 266 + struct drm_plane_state *state) 267 + { 268 + __drm_atomic_helper_plane_destroy_state(state); 269 + kfree(to_shmob_plane_state(state)); 270 + } 271 + 272 + static void shmob_drm_plane_reset(struct drm_plane *plane) 273 + { 274 + struct shmob_drm_plane_state *state; 275 + 276 + if (plane->state) { 277 + shmob_drm_plane_atomic_destroy_state(plane, plane->state); 278 + plane->state = NULL; 279 + } 280 + 281 + state = kzalloc(sizeof(*state), GFP_KERNEL); 282 + if (state == NULL) 283 + return; 284 + 285 + __drm_atomic_helper_plane_reset(plane, &state->base); 286 + } 287 + 288 + static const struct drm_plane_helper_funcs shmob_drm_plane_helper_funcs = { 289 + .atomic_check = shmob_drm_plane_atomic_check, 290 + .atomic_update = shmob_drm_plane_atomic_update, 291 + .atomic_disable = shmob_drm_plane_atomic_disable, 292 + }; 210 293 211 294 static const struct drm_plane_funcs shmob_drm_plane_funcs = { 212 - .update_plane = shmob_drm_plane_update, 213 - .disable_plane = shmob_drm_plane_disable, 214 - .destroy = shmob_drm_plane_destroy, 295 + .update_plane = drm_atomic_helper_update_plane, 296 + .disable_plane = drm_atomic_helper_disable_plane, 297 + .reset = shmob_drm_plane_reset, 298 + .atomic_duplicate_state = shmob_drm_plane_atomic_duplicate_state, 299 + .atomic_destroy_state = shmob_drm_plane_atomic_destroy_state, 215 300 }; 216 301 217 302 static const uint32_t formats[] = { ··· 295 244 DRM_FORMAT_NV42, 296 245 }; 297 246 298 - int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index) 247 + struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev, 248 + enum drm_plane_type type, 249 + unsigned int index) 299 250 { 300 251 struct shmob_drm_plane *splane; 301 - int ret; 302 252 303 - splane = devm_kzalloc(sdev->dev, sizeof(*splane), GFP_KERNEL); 304 - if (splane == NULL) 305 - return -ENOMEM; 253 + splane = drmm_universal_plane_alloc(&sdev->ddev, 254 + struct shmob_drm_plane, base, 1, 255 + &shmob_drm_plane_funcs, formats, 256 + ARRAY_SIZE(formats), NULL, type, 257 + NULL); 258 + if (IS_ERR(splane)) 259 + return ERR_CAST(splane); 306 260 307 261 splane->index = index; 308 - splane->alpha = 255; 309 262 310 - ret = drm_universal_plane_init(sdev->ddev, &splane->plane, 1, 311 - &shmob_drm_plane_funcs, 312 - formats, ARRAY_SIZE(formats), NULL, 313 - DRM_PLANE_TYPE_OVERLAY, NULL); 263 + drm_plane_helper_add(&splane->base, &shmob_drm_plane_helper_funcs); 314 264 315 - return ret; 265 + return &splane->base; 316 266 }
+3 -2
drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.h
··· 13 13 struct drm_plane; 14 14 struct shmob_drm_device; 15 15 16 - int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index); 17 - void shmob_drm_plane_setup(struct drm_plane *plane); 16 + struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev, 17 + enum drm_plane_type type, 18 + unsigned int index); 18 19 19 20 #endif /* __SHMOB_DRM_PLANE_H__ */
+1 -1
drivers/gpu/drm/solomon/ssd130x.c
··· 910 910 struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); 911 911 struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state); 912 912 struct drm_crtc *crtc = plane_state->crtc; 913 - struct drm_crtc_state *crtc_state; 913 + struct drm_crtc_state *crtc_state = NULL; 914 914 const struct drm_format_info *fi; 915 915 unsigned int pitch; 916 916 int ret;
+5
include/drm/amd_asic_type.h
··· 68 68 69 69 extern const char *amdgpu_asic_name[]; 70 70 71 + struct amdgpu_asic_type_quirk { 72 + unsigned short device; /* PCI device ID */ 73 + u8 revision; /* revision ID */ 74 + unsigned short type; /* real ASIC type */ 75 + }; 71 76 #endif /*__AMD_ASIC_TYPE_H__ */
+3 -54
include/linux/platform_data/shmob_drm.h
··· 10 10 #ifndef __SHMOB_DRM_H__ 11 11 #define __SHMOB_DRM_H__ 12 12 13 - #include <drm/drm_mode.h> 13 + #include <video/videomode.h> 14 14 15 15 enum shmob_drm_clk_source { 16 16 SHMOB_DRM_CLK_BUS, ··· 18 18 SHMOB_DRM_CLK_EXTERNAL, 19 19 }; 20 20 21 - enum shmob_drm_interface { 22 - SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */ 23 - SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */ 24 - SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */ 25 - SHMOB_DRM_IFACE_RGB12B, /* 12bpp */ 26 - SHMOB_DRM_IFACE_RGB16, /* 16bpp */ 27 - SHMOB_DRM_IFACE_RGB18, /* 18bpp */ 28 - SHMOB_DRM_IFACE_RGB24, /* 24bpp */ 29 - SHMOB_DRM_IFACE_YUV422, /* 16bpp */ 30 - SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */ 31 - SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */ 32 - SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */ 33 - SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */ 34 - SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */ 35 - SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */ 36 - SHMOB_DRM_IFACE_SYS16A, /* 16bpp */ 37 - SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */ 38 - SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */ 39 - SHMOB_DRM_IFACE_SYS18, /* 18bpp */ 40 - SHMOB_DRM_IFACE_SYS24, /* 24bpp */ 41 - }; 42 - 43 - struct shmob_drm_backlight_data { 44 - const char *name; 45 - int max_brightness; 46 - int (*get_brightness)(void); 47 - int (*set_brightness)(int brightness); 48 - }; 49 - 50 21 struct shmob_drm_panel_data { 51 22 unsigned int width_mm; /* Panel width in mm */ 52 23 unsigned int height_mm; /* Panel height in mm */ 53 - struct drm_mode_modeinfo mode; 24 + struct videomode mode; 54 25 }; 55 - 56 - struct shmob_drm_sys_interface_data { 57 - unsigned int read_latch:6; 58 - unsigned int read_setup:8; 59 - unsigned int read_cycle:8; 60 - unsigned int read_strobe:8; 61 - unsigned int write_setup:8; 62 - unsigned int write_cycle:8; 63 - unsigned int write_strobe:8; 64 - unsigned int cs_setup:3; 65 - unsigned int vsync_active_high:1; 66 - unsigned int vsync_dir_input:1; 67 - }; 68 - 69 - #define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */ 70 - #define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */ 71 - #define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */ 72 - #define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */ 73 - #define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */ 74 26 75 27 struct shmob_drm_interface_data { 76 - enum shmob_drm_interface interface; 77 - struct shmob_drm_sys_interface_data sys; 28 + unsigned int bus_fmt; /* MEDIA_BUS_FMT_* */ 78 29 unsigned int clk_div; 79 - unsigned int flags; 80 30 }; 81 31 82 32 struct shmob_drm_platform_data { 83 33 enum shmob_drm_clk_source clk_source; 84 34 struct shmob_drm_interface_data iface; 85 35 struct shmob_drm_panel_data panel; 86 - struct shmob_drm_backlight_data backlight; 87 36 }; 88 37 89 38 #endif /* __SHMOB_DRM_H__ */
+2 -1
include/uapi/linux/media-bus-format.h
··· 34 34 35 35 #define MEDIA_BUS_FMT_FIXED 0x0001 36 36 37 - /* RGB - next is 0x1025 */ 37 + /* RGB - next is 0x1026 */ 38 38 #define MEDIA_BUS_FMT_RGB444_1X12 0x1016 39 39 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 40 40 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 ··· 46 46 #define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007 47 47 #define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008 48 48 #define MEDIA_BUS_FMT_RGB666_1X18 0x1009 49 + #define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025 49 50 #define MEDIA_BUS_FMT_BGR666_1X18 0x1023 50 51 #define MEDIA_BUS_FMT_RBG888_1X24 0x100e 51 52 #define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015