Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Create generic DF struct in adev

The only data fabric information the adev struct currently
contains is a function pointer table. In the near future,
we will be adding some cached DF information into adev. As
such, this patch creates a new amdgpu_df struct for adev.
Right now, it only containst the old function pointer table,
but new stuff will be added soon.

Signed-off-by: Joseph Greathouse <Joseph.Greathouse@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Joseph Greathouse and committed by
Alex Deucher
bdf84a80 61e50646

+90 -49
+4 -25
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 90 90 #include "amdgpu_mes.h" 91 91 #include "amdgpu_umc.h" 92 92 #include "amdgpu_mmhub.h" 93 + #include "amdgpu_df.h" 93 94 94 95 #define MAX_GPU_INSTANCE 16 95 96 ··· 665 664 resource_size_t bus_addr; 666 665 }; 667 666 668 - struct amdgpu_df_funcs { 669 - void (*sw_init)(struct amdgpu_device *adev); 670 - void (*sw_fini)(struct amdgpu_device *adev); 671 - void (*enable_broadcast_mode)(struct amdgpu_device *adev, 672 - bool enable); 673 - u32 (*get_fb_channel_number)(struct amdgpu_device *adev); 674 - u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); 675 - void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, 676 - bool enable); 677 - void (*get_clockgating_state)(struct amdgpu_device *adev, 678 - u32 *flags); 679 - void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, 680 - bool enable); 681 - int (*pmc_start)(struct amdgpu_device *adev, uint64_t config, 682 - int is_enable); 683 - int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config, 684 - int is_disable); 685 - void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config, 686 - uint64_t *count); 687 - uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); 688 - void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, 689 - uint32_t ficadl_val, uint32_t ficadh_val); 690 - }; 691 667 /* Define the HW IP blocks will be used in driver , add more if necessary */ 692 668 enum amd_hw_ip_block_type { 693 669 GC_HWIP = 1, ··· 908 930 bool enable_mes; 909 931 struct amdgpu_mes mes; 910 932 933 + /* df */ 934 + struct amdgpu_df df; 935 + 911 936 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; 912 937 int num_ip_blocks; 913 938 struct mutex mn_lock; ··· 923 942 924 943 /* soc15 register offset based on ip, instance and segment */ 925 944 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 926 - 927 - const struct amdgpu_df_funcs *df_funcs; 928 945 929 946 /* delayed work_func for deferring clockgating during resume */ 930 947 struct delayed_work delayed_init_work;
+62
drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
··· 1 + /* 2 + * Copyright 2020 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef __AMDGPU_DF_H__ 25 + #define __AMDGPU_DF_H__ 26 + 27 + struct amdgpu_df_hash_status { 28 + bool hash_64k; 29 + bool hash_2m; 30 + bool hash_1g; 31 + }; 32 + 33 + struct amdgpu_df_funcs { 34 + void (*sw_init)(struct amdgpu_device *adev); 35 + void (*sw_fini)(struct amdgpu_device *adev); 36 + void (*enable_broadcast_mode)(struct amdgpu_device *adev, 37 + bool enable); 38 + u32 (*get_fb_channel_number)(struct amdgpu_device *adev); 39 + u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); 40 + void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, 41 + bool enable); 42 + void (*get_clockgating_state)(struct amdgpu_device *adev, 43 + u32 *flags); 44 + void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, 45 + bool enable); 46 + int (*pmc_start)(struct amdgpu_device *adev, uint64_t config, 47 + int is_enable); 48 + int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config, 49 + int is_disable); 50 + void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config, 51 + uint64_t *count); 52 + uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); 53 + void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, 54 + uint32_t ficadl_val, uint32_t ficadh_val); 55 + }; 56 + 57 + struct amdgpu_df { 58 + struct amdgpu_df_hash_status hash_status; 59 + const struct amdgpu_df_funcs *funcs; 60 + }; 61 + 62 + #endif /* __AMDGPU_DF_H__ */
+6 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
··· 74 74 switch (pe->pmu_perf_type) { 75 75 case PERF_TYPE_AMDGPU_DF: 76 76 if (!(flags & PERF_EF_RELOAD)) 77 - pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1); 77 + pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1); 78 78 79 - pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0); 79 + pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 0); 80 80 break; 81 81 default: 82 82 break; ··· 101 101 102 102 switch (pe->pmu_perf_type) { 103 103 case PERF_TYPE_AMDGPU_DF: 104 - pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf, 104 + pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->conf, 105 105 &count); 106 106 break; 107 107 default: ··· 126 126 127 127 switch (pe->pmu_perf_type) { 128 128 case PERF_TYPE_AMDGPU_DF: 129 - pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0); 129 + pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 0); 130 130 break; 131 131 default: 132 132 break; ··· 156 156 157 157 switch (pe->pmu_perf_type) { 158 158 case PERF_TYPE_AMDGPU_DF: 159 - retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1); 159 + retval = pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1); 160 160 break; 161 161 default: 162 162 return 0; ··· 184 184 185 185 switch (pe->pmu_perf_type) { 186 186 case PERF_TYPE_AMDGPU_DF: 187 - pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1); 187 + pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 1); 188 188 break; 189 189 default: 190 190 break;
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
··· 146 146 ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200); 147 147 ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208); 148 148 149 - fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_ctl_in); 149 + fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in); 150 150 if (fica_out != 0x1f) 151 151 pr_err("xGMI error counters not enabled!\n"); 152 152 153 - fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_status_in); 153 + fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in); 154 154 155 155 if ((fica_out & 0xffff) == 2) 156 156 error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63); 157 157 158 - adev->df_funcs->set_fica(adev, ficaa_pie_status_in, 0, 0); 158 + adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0); 159 159 160 160 return snprintf(buf, PAGE_SIZE, "%d\n", error_count); 161 161 }
+3 -3
drivers/gpu/drm/amd/amdgpu/df_v1_7.c
··· 66 66 { 67 67 int fb_channel_number; 68 68 69 - fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); 69 + fb_channel_number = adev->df.funcs->get_fb_channel_number(adev); 70 70 71 71 return df_v1_7_channel_number[fb_channel_number]; 72 72 } ··· 77 77 u32 tmp; 78 78 79 79 /* Put DF on broadcast mode */ 80 - adev->df_funcs->enable_broadcast_mode(adev, true); 80 + adev->df.funcs->enable_broadcast_mode(adev, true); 81 81 82 82 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { 83 83 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); ··· 92 92 } 93 93 94 94 /* Exit boradcast mode */ 95 - adev->df_funcs->enable_broadcast_mode(adev, false); 95 + adev->df.funcs->enable_broadcast_mode(adev, false); 96 96 } 97 97 98 98 static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
+3 -3
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
··· 311 311 { 312 312 int fb_channel_number; 313 313 314 - fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); 314 + fb_channel_number = adev->df.funcs->get_fb_channel_number(adev); 315 315 if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)) 316 316 fb_channel_number = 0; 317 317 ··· 325 325 326 326 if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) { 327 327 /* Put DF on broadcast mode */ 328 - adev->df_funcs->enable_broadcast_mode(adev, true); 328 + adev->df.funcs->enable_broadcast_mode(adev, true); 329 329 330 330 if (enable) { 331 331 tmp = RREG32_SOC15(DF, 0, ··· 344 344 } 345 345 346 346 /* Exit broadcast mode */ 347 - adev->df_funcs->enable_broadcast_mode(adev, false); 347 + adev->df.funcs->enable_broadcast_mode(adev, false); 348 348 } 349 349 } 350 350
+3 -3
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 817 817 r = amdgpu_atomfirmware_mem_ecc_supported(adev); 818 818 if (!r) { 819 819 DRM_INFO("ECC is not present.\n"); 820 - if (adev->df_funcs->enable_ecc_force_par_wr_rmw) 821 - adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false); 820 + if (adev->df.funcs->enable_ecc_force_par_wr_rmw) 821 + adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); 822 822 } else { 823 823 DRM_INFO("ECC is active.\n"); 824 824 } ··· 1023 1023 else 1024 1024 chansize = 128; 1025 1025 1026 - numchan = adev->df_funcs->get_hbm_channel_number(adev); 1026 + numchan = adev->df.funcs->get_hbm_channel_number(adev); 1027 1027 adev->gmc.vram_width = numchan * chansize; 1028 1028 } 1029 1029
+6 -6
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 677 677 } 678 678 679 679 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 680 - adev->df_funcs = &df_v3_6_funcs; 680 + adev->df.funcs = &df_v3_6_funcs; 681 681 else 682 - adev->df_funcs = &df_v1_7_funcs; 682 + adev->df.funcs = &df_v1_7_funcs; 683 683 684 684 adev->rev_id = soc15_get_rev_id(adev); 685 685 adev->nbio.funcs->detect_hw_virt(adev); ··· 1247 1247 if (amdgpu_sriov_vf(adev)) 1248 1248 xgpu_ai_mailbox_add_irq_id(adev); 1249 1249 1250 - adev->df_funcs->sw_init(adev); 1250 + adev->df.funcs->sw_init(adev); 1251 1251 1252 1252 return 0; 1253 1253 } ··· 1257 1257 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1258 1258 1259 1259 amdgpu_nbio_ras_fini(adev); 1260 - adev->df_funcs->sw_fini(adev); 1260 + adev->df.funcs->sw_fini(adev); 1261 1261 return 0; 1262 1262 } 1263 1263 ··· 1478 1478 state == AMD_CG_STATE_GATE ? true : false); 1479 1479 soc15_update_rom_medium_grain_clock_gating(adev, 1480 1480 state == AMD_CG_STATE_GATE ? true : false); 1481 - adev->df_funcs->update_medium_grain_clock_gating(adev, 1481 + adev->df.funcs->update_medium_grain_clock_gating(adev, 1482 1482 state == AMD_CG_STATE_GATE ? true : false); 1483 1483 break; 1484 1484 case CHIP_RAVEN: ··· 1536 1536 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1537 1537 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1538 1538 1539 - adev->df_funcs->get_clockgating_state(adev, flags); 1539 + adev->df.funcs->get_clockgating_state(adev, flags); 1540 1540 } 1541 1541 1542 1542 static int soc15_common_set_powergating_state(void *handle,