Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Modify gfx block to fit for the unified ras block data and ops

1.Modify gfx block to fit for the unified ras block data and ops.
2.Change amdgpu_gfx_ras_funcs to amdgpu_gfx_ras, and the corresponding variable name remove _funcs suffix.
3.Remove the const flag of gfx ras variable so that gfx ras block can be able to be inserted into amdgpu device ras block link list.
4.Invoke amdgpu_ras_register_ras_block function to register gfx ras block into amdgpu device ras block link list.
5.Remove the redundant code about gfx in amdgpu_ras.c after using the unified ras block.
6.Fill unified ras block .name .block .ras_late_init and .ras_fini for all of gfx versions. If .ras_late_init and .ras_fini had been defined by the selected gfx version, the defined functions will take effect; if not defined, default fill with amdgpu_gfx_ras_late_init and amdgpu_gfx_ras_fini.

Signed-off-by: yipechai <YiPeng.Chai@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: John Clements <john.clements@amd.com>
Reviewed-by: Tao Zhou <tao.zhou1@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

yipechai and committed by
Alex Deucher
8b0fb0e9 7cab2124

+123 -83
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 622 622 return r; 623 623 } 624 624 625 - int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) 625 + int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info) 626 626 { 627 627 int r; 628 628 struct ras_fs_if fs_info = { ··· 695 695 */ 696 696 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { 697 697 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 698 - if (adev->gfx.ras_funcs && 699 - adev->gfx.ras_funcs->query_ras_error_count) 700 - adev->gfx.ras_funcs->query_ras_error_count(adev, err_data); 698 + if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && 699 + adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) 700 + adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); 701 701 amdgpu_ras_reset_gpu(adev); 702 702 } 703 703 return AMDGPU_RAS_SUCCESS;
+5 -12
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
··· 31 31 #include "amdgpu_ring.h" 32 32 #include "amdgpu_rlc.h" 33 33 #include "soc15.h" 34 + #include "amdgpu_ras.h" 34 35 35 36 /* GFX current status */ 36 37 #define AMDGPU_GFX_NORMAL_MODE 0x00000000L ··· 199 198 uint32_t bitmap[4][4]; 200 199 }; 201 200 202 - struct amdgpu_gfx_ras_funcs { 203 - int (*ras_late_init)(struct amdgpu_device *adev); 204 - void (*ras_fini)(struct amdgpu_device *adev); 205 - int (*ras_error_inject)(struct amdgpu_device *adev, 206 - void *inject_if); 207 - int (*query_ras_error_count)(struct amdgpu_device *adev, 208 - void *ras_error_status); 209 - void (*reset_ras_error_count)(struct amdgpu_device *adev); 210 - void (*query_ras_error_status)(struct amdgpu_device *adev); 211 - void (*reset_ras_error_status)(struct amdgpu_device *adev); 201 + struct amdgpu_gfx_ras { 202 + struct amdgpu_ras_block_object ras_block; 212 203 void (*enable_watchdog_timer)(struct amdgpu_device *adev); 213 204 }; 214 205 ··· 324 331 325 332 /*ras */ 326 333 struct ras_common_if *ras_if; 327 - const struct amdgpu_gfx_ras_funcs *ras_funcs; 334 + struct amdgpu_gfx_ras *ras; 328 335 }; 329 336 330 337 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) ··· 386 393 int pipe, int queue); 387 394 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); 388 395 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); 389 - int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev); 396 + int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info); 390 397 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); 391 398 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, 392 399 void *err_data,
+42 -21
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 89 89 return ras_block_string[ras_block->block]; 90 90 } 91 91 92 + #define ras_block_str(_BLOCK_) (((_BLOCK_) < (sizeof(*ras_block_string)/sizeof(const char*))) ? ras_block_string[_BLOCK_] : "Out Of Range") 93 + 92 94 #define ras_err_str(i) (ras_error_string[ffs(i)]) 93 95 94 96 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) ··· 964 962 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, 965 963 struct ras_query_if *info) 966 964 { 965 + struct amdgpu_ras_block_object* block_obj = NULL; 967 966 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 968 967 struct ras_err_data err_data = {0, 0, 0, NULL}; 969 968 int i; 970 969 971 970 if (!obj) 972 971 return -EINVAL; 972 + 973 + block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); 973 974 974 975 switch (info->head.block) { 975 976 case AMDGPU_RAS_BLOCK__UMC: ··· 986 981 } 987 982 break; 988 983 case AMDGPU_RAS_BLOCK__GFX: 989 - if (adev->gfx.ras_funcs && 990 - adev->gfx.ras_funcs->query_ras_error_count) 991 - adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data); 984 + if (!block_obj || !block_obj->hw_ops) { 985 + dev_info(adev->dev, "%s doesn't config ras function \n", 986 + get_ras_block_str(&info->head)); 987 + return -EINVAL; 988 + } 989 + if (block_obj->hw_ops->query_ras_error_count) 990 + block_obj->hw_ops->query_ras_error_count(adev, &err_data); 992 991 993 - if (adev->gfx.ras_funcs && 994 - adev->gfx.ras_funcs->query_ras_error_status) 995 - adev->gfx.ras_funcs->query_ras_error_status(adev); 992 + if (block_obj->hw_ops->query_ras_error_status) 993 + block_obj->hw_ops->query_ras_error_status(adev); 996 994 break; 997 995 case AMDGPU_RAS_BLOCK__MMHUB: 998 996 if (adev->mmhub.ras_funcs && ··· 1082 1074 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, 1083 1075 enum amdgpu_ras_block block) 1084 1076 { 1077 + struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1078 + 1085 1079 if (!amdgpu_ras_is_supported(adev, block)) 1086 1080 return -EINVAL; 1087 1081 1088 1082 switch (block) { 1089 1083 case AMDGPU_RAS_BLOCK__GFX: 1090 - if (adev->gfx.ras_funcs && 1091 - adev->gfx.ras_funcs->reset_ras_error_count) 1092 - adev->gfx.ras_funcs->reset_ras_error_count(adev); 1084 + if (!block_obj || !block_obj->hw_ops) { 1085 + dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); 1086 + return -EINVAL; 1087 + } 1093 1088 1094 - if (adev->gfx.ras_funcs && 1095 - adev->gfx.ras_funcs->reset_ras_error_status) 1096 - adev->gfx.ras_funcs->reset_ras_error_status(adev); 1089 + if (block_obj->hw_ops->reset_ras_error_count) 1090 + block_obj->hw_ops->reset_ras_error_count(adev); 1091 + 1092 + if (block_obj->hw_ops->reset_ras_error_status) 1093 + block_obj->hw_ops->reset_ras_error_status(adev); 1097 1094 break; 1098 1095 case AMDGPU_RAS_BLOCK__MMHUB: 1099 1096 if (adev->mmhub.ras_funcs && ··· 1163 1150 .address = info->address, 1164 1151 .value = info->value, 1165 1152 }; 1166 - int ret = 0; 1153 + int ret = -EINVAL; 1154 + struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); 1167 1155 1168 1156 if (!obj) 1169 1157 return -EINVAL; ··· 1178 1164 1179 1165 switch (info->head.block) { 1180 1166 case AMDGPU_RAS_BLOCK__GFX: 1181 - if (adev->gfx.ras_funcs && 1182 - adev->gfx.ras_funcs->ras_error_inject) 1183 - ret = adev->gfx.ras_funcs->ras_error_inject(adev, info); 1184 - else 1185 - ret = -EINVAL; 1167 + if (!block_obj || !block_obj->hw_ops) { 1168 + dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); 1169 + return -EINVAL; 1170 + } 1171 + 1172 + if (block_obj->hw_ops->ras_error_inject) 1173 + ret = block_obj->hw_ops->ras_error_inject(adev, info); 1186 1174 break; 1187 1175 case AMDGPU_RAS_BLOCK__UMC: 1188 1176 case AMDGPU_RAS_BLOCK__SDMA: ··· 1816 1800 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, 1817 1801 struct ras_query_if *info) 1818 1802 { 1803 + struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); 1819 1804 /* 1820 1805 * Only two block need to query read/write 1821 1806 * RspStatus at current state 1822 1807 */ 1823 1808 switch (info->head.block) { 1824 1809 case AMDGPU_RAS_BLOCK__GFX: 1825 - if (adev->gfx.ras_funcs && 1826 - adev->gfx.ras_funcs->query_ras_error_status) 1827 - adev->gfx.ras_funcs->query_ras_error_status(adev); 1810 + if (!block_obj || !block_obj->hw_ops) { 1811 + dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); 1812 + return ; 1813 + } 1814 + 1815 + if (block_obj->hw_ops->query_ras_error_status) 1816 + block_obj->hw_ops->query_ras_error_status(adev); 1828 1817 break; 1829 1818 case AMDGPU_RAS_BLOCK__MMHUB: 1830 1819 if (adev->mmhub.ras_funcs &&
+42 -23
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 882 882 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); 883 883 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); 884 884 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); 885 - static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, 885 + static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, 886 886 void *ras_error_status); 887 887 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, 888 888 void *inject_if); ··· 2197 2197 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, 2198 2198 }; 2199 2199 2200 - static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = { 2201 - .ras_late_init = amdgpu_gfx_ras_late_init, 2202 - .ras_fini = amdgpu_gfx_ras_fini, 2203 - .ras_error_inject = &gfx_v9_0_ras_error_inject, 2204 - .query_ras_error_count = &gfx_v9_0_query_ras_error_count, 2205 - .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, 2200 + const struct amdgpu_ras_block_hw_ops gfx_v9_0_ras_ops = { 2201 + .ras_error_inject = &gfx_v9_0_ras_error_inject, 2202 + .query_ras_error_count = &gfx_v9_0_query_ras_error_count, 2203 + .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, 2204 + }; 2205 + 2206 + static struct amdgpu_gfx_ras gfx_v9_0_ras = { 2207 + .ras_block = { 2208 + .hw_ops = &gfx_v9_0_ras_ops, 2209 + }, 2206 2210 }; 2207 2211 2208 2212 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ··· 2235 2231 DRM_INFO("fix gfx.config for vega12\n"); 2236 2232 break; 2237 2233 case IP_VERSION(9, 4, 0): 2238 - adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs; 2234 + adev->gfx.ras = &gfx_v9_0_ras; 2239 2235 adev->gfx.config.max_hw_contexts = 8; 2240 2236 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 2241 2237 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; ··· 2262 2258 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; 2263 2259 break; 2264 2260 case IP_VERSION(9, 4, 1): 2265 - adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs; 2261 + adev->gfx.ras = &gfx_v9_4_ras; 2266 2262 adev->gfx.config.max_hw_contexts = 8; 2267 2263 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 2268 2264 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; ··· 2283 2279 gb_addr_config |= 0x22010042; 2284 2280 break; 2285 2281 case IP_VERSION(9, 4, 2): 2286 - adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs; 2282 + adev->gfx.ras = &gfx_v9_4_2_ras; 2287 2283 adev->gfx.config.max_hw_contexts = 8; 2288 2284 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 2289 2285 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; ··· 2300 2296 default: 2301 2297 BUG(); 2302 2298 break; 2299 + } 2300 + 2301 + if (adev->gfx.ras) { 2302 + err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block); 2303 + if (err) { 2304 + DRM_ERROR("Failed to register gfx ras block!\n"); 2305 + return err; 2306 + } 2307 + 2308 + strcpy(adev->gfx.ras->ras_block.name,"gfx"); 2309 + adev->gfx.ras->ras_block.block = AMDGPU_RAS_BLOCK__GFX; 2310 + 2311 + /* If not define special ras_late_init function, use gfx default ras_late_init */ 2312 + if (!adev->gfx.ras->ras_block.ras_late_init) 2313 + adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; 2314 + 2315 + /* If not define special ras_fini function, use gfx default ras_fini */ 2316 + if (!adev->gfx.ras->ras_block.ras_fini) 2317 + adev->gfx.ras->ras_block.ras_fini = amdgpu_gfx_ras_fini; 2303 2318 } 2304 2319 2305 2320 adev->gfx.config.gb_addr_config = gb_addr_config; ··· 2536 2513 int i; 2537 2514 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2538 2515 2539 - if (adev->gfx.ras_funcs && 2540 - adev->gfx.ras_funcs->ras_fini) 2541 - adev->gfx.ras_funcs->ras_fini(adev); 2516 + if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_fini) 2517 + adev->gfx.ras->ras_block.ras_fini(adev); 2542 2518 2543 2519 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2544 2520 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); ··· 4892 4870 if (r) 4893 4871 return r; 4894 4872 4895 - if (adev->gfx.ras_funcs && 4896 - adev->gfx.ras_funcs->ras_late_init) { 4897 - r = adev->gfx.ras_funcs->ras_late_init(adev); 4873 + if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_late_init) { 4874 + r = adev->gfx.ras->ras_block.ras_late_init(adev, NULL); 4898 4875 if (r) 4899 4876 return r; 4900 4877 } 4901 4878 4902 - if (adev->gfx.ras_funcs && 4903 - adev->gfx.ras_funcs->enable_watchdog_timer) 4904 - adev->gfx.ras_funcs->enable_watchdog_timer(adev); 4879 + if (adev->gfx.ras && 4880 + adev->gfx.ras->enable_watchdog_timer) 4881 + adev->gfx.ras->enable_watchdog_timer(adev); 4905 4882 4906 4883 return 0; 4907 4884 } ··· 6840 6819 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); 6841 6820 } 6842 6821 6843 - static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, 6822 + static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, 6844 6823 void *ras_error_status) 6845 6824 { 6846 6825 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; ··· 6849 6828 uint32_t reg_value; 6850 6829 6851 6830 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) 6852 - return -EINVAL; 6831 + return; 6853 6832 6854 6833 err_data->ue_count = 0; 6855 6834 err_data->ce_count = 0; ··· 6878 6857 mutex_unlock(&adev->grbm_idx_mutex); 6879 6858 6880 6859 gfx_v9_0_query_utc_edc_status(adev, err_data); 6881 - 6882 - return 0; 6883 6860 } 6884 6861 6885 6862 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
+14 -10
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
··· 863 863 return 0; 864 864 } 865 865 866 - static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, 866 + static void gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, 867 867 void *ras_error_status) 868 868 { 869 869 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; ··· 872 872 uint32_t reg_value; 873 873 874 874 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) 875 - return -EINVAL; 875 + return; 876 876 877 877 err_data->ue_count = 0; 878 878 err_data->ce_count = 0; ··· 903 903 904 904 gfx_v9_4_query_utc_edc_status(adev, err_data); 905 905 906 - return 0; 907 906 } 908 907 909 908 static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) ··· 1028 1029 mutex_unlock(&adev->grbm_idx_mutex); 1029 1030 } 1030 1031 1031 - const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = { 1032 - .ras_late_init = amdgpu_gfx_ras_late_init, 1033 - .ras_fini = amdgpu_gfx_ras_fini, 1034 - .ras_error_inject = &gfx_v9_4_ras_error_inject, 1035 - .query_ras_error_count = &gfx_v9_4_query_ras_error_count, 1036 - .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, 1037 - .query_ras_error_status = &gfx_v9_4_query_ras_error_status, 1032 + 1033 + const struct amdgpu_ras_block_hw_ops gfx_v9_4_ras_ops = { 1034 + .ras_error_inject = &gfx_v9_4_ras_error_inject, 1035 + .query_ras_error_count = &gfx_v9_4_query_ras_error_count, 1036 + .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, 1037 + .query_ras_error_status = &gfx_v9_4_query_ras_error_status, 1038 + }; 1039 + 1040 + struct amdgpu_gfx_ras gfx_v9_4_ras = { 1041 + .ras_block = { 1042 + .hw_ops = &gfx_v9_4_ras_ops, 1043 + }, 1038 1044 };
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
··· 24 24 #ifndef __GFX_V9_4_H__ 25 25 #define __GFX_V9_4_H__ 26 26 27 - extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs; 27 + extern struct amdgpu_gfx_ras gfx_v9_4_ras; 28 28 29 29 #endif /* __GFX_V9_4_H__ */
+14 -11
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
··· 1641 1641 return 0; 1642 1642 } 1643 1643 1644 - static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, 1644 + static void gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, 1645 1645 void *ras_error_status) 1646 1646 { 1647 1647 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 1648 1648 uint32_t sec_count = 0, ded_count = 0; 1649 1649 1650 1650 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) 1651 - return -EINVAL; 1651 + return; 1652 1652 1653 1653 err_data->ue_count = 0; 1654 1654 err_data->ce_count = 0; ··· 1661 1661 err_data->ce_count += sec_count; 1662 1662 err_data->ue_count += ded_count; 1663 1663 1664 - return 0; 1665 1664 } 1666 1665 1667 1666 static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev) ··· 1930 1931 mutex_unlock(&adev->grbm_idx_mutex); 1931 1932 } 1932 1933 1933 - const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = { 1934 - .ras_late_init = amdgpu_gfx_ras_late_init, 1935 - .ras_fini = amdgpu_gfx_ras_fini, 1936 - .ras_error_inject = &gfx_v9_4_2_ras_error_inject, 1937 - .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, 1938 - .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, 1939 - .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, 1940 - .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status, 1934 + struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops ={ 1935 + .ras_error_inject = &gfx_v9_4_2_ras_error_inject, 1936 + .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, 1937 + .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, 1938 + .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, 1939 + .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status, 1940 + }; 1941 + 1942 + struct amdgpu_gfx_ras gfx_v9_4_2_ras = { 1943 + .ras_block = { 1944 + .hw_ops = &gfx_v9_4_2_ras_ops, 1945 + }, 1941 1946 .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer, 1942 1947 };
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
··· 31 31 void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev); 32 32 int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev); 33 33 34 - extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs; 34 + extern struct amdgpu_gfx_ras gfx_v9_4_2_ras; 35 35 36 36 #endif /* __GFX_V9_4_2_H__ */