Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: qat - generate dynamically arbiter mappings

The thread-to-arbiter mapping describes which arbiter can assign jobs
to an acceleration engine thread.
The existing mappings are functionally correct, but hardcoded and not
optimized.

Replace the static mappings with an algorithm that generates optimal
mappings, based on the loaded configuration.

The logic has been made common so that it can be shared between all
QAT GEN4 devices.

Signed-off-by: Damian Muszynski <damian.muszynski@intel.com>
Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Damian Muszynski and committed by
Herbert Xu
5da6a2d5 eb527077

+235 -112
+52 -79
drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
··· 25 25 #define ADF_AE_GROUP_3 GENMASK(15, 12) 26 26 #define ADF_AE_GROUP_4 BIT(16) 27 27 28 + #define ENA_THD_MASK_ASYM GENMASK(1, 0) 29 + #define ENA_THD_MASK_SYM GENMASK(3, 0) 30 + #define ENA_THD_MASK_DC GENMASK(1, 0) 31 + 28 32 static const char * const adf_420xx_fw_objs[] = { 29 33 [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ, 30 34 [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ, ··· 87 83 {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, 88 84 }; 89 85 90 - /* Worker thread to service arbiter mappings */ 91 - static const u32 default_thrd_to_arb_map[ADF_420XX_MAX_ACCELENGINES] = { 92 - 0x00000055, 0x00000055, 0x00000055, 0x00000055, 93 - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 94 - 0x00000055, 0x00000055, 0x00000055, 0x00000055, 95 - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 96 - 0x0 97 - }; 98 - 99 - static const u32 thrd_to_arb_map_asym[ADF_420XX_MAX_ACCELENGINES] = { 100 - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 101 - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 102 - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 103 - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 104 - 0x0 105 - }; 106 - 107 - static const u32 thrd_to_arb_map_sym[ADF_420XX_MAX_ACCELENGINES] = { 108 - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 109 - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 110 - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 111 - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 112 - 0x0 113 - }; 114 - 115 - static const u32 thrd_to_arb_map_asym_dc[ADF_420XX_MAX_ACCELENGINES] = { 116 - 0x00000055, 0x00000055, 0x00000055, 0x00000055, 117 - 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, 118 - 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, 119 - 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, 120 - 0x0 121 - }; 122 - 123 - static const u32 thrd_to_arb_map_sym_dc[ADF_420XX_MAX_ACCELENGINES] = { 124 - 0x00000055, 0x00000055, 0x00000055, 0x00000055, 125 - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 126 - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 127 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 128 - 0x0 129 - }; 130 - 131 - static const u32 thrd_to_arb_map_dc[ADF_420XX_MAX_ACCELENGINES] = { 132 - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 133 - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 134 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 135 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 136 - 0x0 137 - }; 138 - 139 - static const u32 thrd_to_arb_map_dcc[ADF_420XX_MAX_ACCELENGINES] = { 140 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 141 - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 142 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 143 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 144 - 0x0 145 - }; 146 86 147 87 static struct adf_hw_device_class adf_420xx_class = { 148 88 .name = ADF_420XX_DEVICE_NAME, ··· 294 346 295 347 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) 296 348 { 297 - switch (adf_get_service_enabled(accel_dev)) { 298 - case SVC_ASYM: 299 - return thrd_to_arb_map_asym; 300 - case SVC_SYM: 301 - return thrd_to_arb_map_sym; 302 - case SVC_DC: 303 - return thrd_to_arb_map_dc; 304 - case SVC_DCC: 305 - return thrd_to_arb_map_dcc; 306 - case SVC_ASYM_DC: 307 - case SVC_DC_ASYM: 308 - return thrd_to_arb_map_asym_dc; 309 - case SVC_DC_SYM: 310 - case SVC_SYM_DC: 311 - return thrd_to_arb_map_sym_dc; 312 - default: 313 - return default_thrd_to_arb_map; 314 - } 349 + if (adf_gen4_init_thd2arb_map(accel_dev)) 350 + dev_warn(&GET_DEV(accel_dev), 351 + "Generate of the thread to arbiter map failed"); 352 + 353 + return GET_HW_DATA(accel_dev)->thd_to_arb_map; 315 354 } 316 355 317 356 static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) ··· 319 384 rl_data->scale_ref = ADF_420XX_RL_SLICE_REF; 320 385 } 321 386 322 - enum adf_rp_groups { 323 - RP_GROUP_0 = 0, 324 - RP_GROUP_1, 325 - RP_GROUP_COUNT 326 - }; 387 + static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) 388 + { 389 + switch (ae_mask) { 390 + case ADF_AE_GROUP_0: 391 + return RP_GROUP_0; 392 + case ADF_AE_GROUP_1: 393 + case ADF_AE_GROUP_3: 394 + return RP_GROUP_1; 395 + case ADF_AE_GROUP_2: 396 + if (get_fw_config(accel_dev) == adf_fw_cy_config) 397 + return RP_GROUP_0; 398 + else 399 + return RP_GROUP_1; 400 + default: 401 + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); 402 + return -EINVAL; 403 + } 404 + } 405 + 406 + static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) 407 + { 408 + const struct adf_fw_config *fw_config; 409 + 410 + if (obj_num >= uof_get_num_objs(accel_dev)) 411 + return ADF_GEN4_ENA_THD_MASK_ERROR; 412 + 413 + fw_config = get_fw_config(accel_dev); 414 + if (!fw_config) 415 + return ADF_GEN4_ENA_THD_MASK_ERROR; 416 + 417 + switch (fw_config[obj_num].obj) { 418 + case ADF_FW_ASYM_OBJ: 419 + return ENA_THD_MASK_ASYM; 420 + case ADF_FW_SYM_OBJ: 421 + return ENA_THD_MASK_SYM; 422 + case ADF_FW_DC_OBJ: 423 + return ENA_THD_MASK_DC; 424 + default: 425 + return ADF_GEN4_ENA_THD_MASK_ERROR; 426 + } 427 + } 327 428 328 429 static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) 329 430 { ··· 497 526 hw_data->uof_get_name = uof_get_name_420xx; 498 527 hw_data->uof_get_num_objs = uof_get_num_objs; 499 528 hw_data->uof_get_ae_mask = uof_get_ae_mask; 529 + hw_data->get_rp_group = get_rp_group; 530 + hw_data->get_ena_thd_mask = get_ena_thd_mask; 500 531 hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; 501 532 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; 502 533 hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+77 -33
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
··· 23 23 #define ADF_AE_GROUP_1 GENMASK(7, 4) 24 24 #define ADF_AE_GROUP_2 BIT(8) 25 25 26 + #define ENA_THD_MASK_ASYM GENMASK(1, 0) 27 + #define ENA_THD_MASK_ASYM_401XX GENMASK(5, 0) 28 + #define ENA_THD_MASK_SYM GENMASK(6, 0) 29 + #define ENA_THD_MASK_DC GENMASK(1, 0) 30 + 26 31 static const char * const adf_4xxx_fw_objs[] = { 27 32 [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ, 28 33 [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ, ··· 90 85 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config)); 91 86 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config)); 92 87 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config)); 93 - 94 - /* Worker thread to service arbiter mappings */ 95 - static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 96 - 0x5555555, 0x5555555, 0x5555555, 0x5555555, 97 - 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 98 - 0x0 99 - }; 100 - 101 - static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = { 102 - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 103 - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 104 - 0x0 105 - }; 106 - 107 - static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = { 108 - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 109 - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 110 - 0x0 111 - }; 112 88 113 89 static struct adf_hw_device_class adf_4xxx_class = { 114 90 .name = ADF_4XXX_DEVICE_NAME, ··· 206 220 207 221 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) 208 222 { 209 - switch (adf_get_service_enabled(accel_dev)) { 210 - case SVC_DC: 211 - return thrd_to_arb_map_dc; 212 - case SVC_DCC: 213 - return thrd_to_arb_map_dcc; 214 - default: 215 - return default_thrd_to_arb_map; 216 - } 223 + if (adf_gen4_init_thd2arb_map(accel_dev)) 224 + dev_warn(&GET_DEV(accel_dev), 225 + "Generate of the thread to arbiter map failed"); 226 + 227 + return GET_HW_DATA(accel_dev)->thd_to_arb_map; 217 228 } 218 229 219 230 static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) ··· 261 278 } 262 279 } 263 280 264 - enum adf_rp_groups { 265 - RP_GROUP_0 = 0, 266 - RP_GROUP_1, 267 - RP_GROUP_COUNT 268 - }; 281 + static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) 282 + { 283 + switch (ae_mask) { 284 + case ADF_AE_GROUP_0: 285 + return RP_GROUP_0; 286 + case ADF_AE_GROUP_1: 287 + return RP_GROUP_1; 288 + default: 289 + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); 290 + return -EINVAL; 291 + } 292 + } 293 + 294 + static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) 295 + { 296 + const struct adf_fw_config *fw_config; 297 + 298 + if (obj_num >= uof_get_num_objs(accel_dev)) 299 + return ADF_GEN4_ENA_THD_MASK_ERROR; 300 + 301 + fw_config = get_fw_config(accel_dev); 302 + if (!fw_config) 303 + return ADF_GEN4_ENA_THD_MASK_ERROR; 304 + 305 + switch (fw_config[obj_num].obj) { 306 + case ADF_FW_ASYM_OBJ: 307 + return ENA_THD_MASK_ASYM; 308 + case ADF_FW_SYM_OBJ: 309 + return ENA_THD_MASK_SYM; 310 + case ADF_FW_DC_OBJ: 311 + return ENA_THD_MASK_DC; 312 + default: 313 + return ADF_GEN4_ENA_THD_MASK_ERROR; 314 + } 315 + } 316 + 317 + static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num) 318 + { 319 + const struct adf_fw_config *fw_config; 320 + 321 + if (obj_num >= uof_get_num_objs(accel_dev)) 322 + return ADF_GEN4_ENA_THD_MASK_ERROR; 323 + 324 + fw_config = get_fw_config(accel_dev); 325 + if (!fw_config) 326 + return ADF_GEN4_ENA_THD_MASK_ERROR; 327 + 328 + switch (fw_config[obj_num].obj) { 329 + case ADF_FW_ASYM_OBJ: 330 + return ENA_THD_MASK_ASYM_401XX; 331 + case ADF_FW_SYM_OBJ: 332 + return ENA_THD_MASK_SYM; 333 + case ADF_FW_DC_OBJ: 334 + return ENA_THD_MASK_DC; 335 + default: 336 + return ADF_GEN4_ENA_THD_MASK_ERROR; 337 + } 338 + } 269 339 270 340 static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) 271 341 { ··· 464 428 hw_data->fw_mmp_name = ADF_402XX_MMP; 465 429 hw_data->uof_get_name = uof_get_name_402xx; 466 430 break; 467 - 431 + case ADF_401XX_PCI_DEVICE_ID: 432 + hw_data->fw_name = ADF_4XXX_FW; 433 + hw_data->fw_mmp_name = ADF_4XXX_MMP; 434 + hw_data->uof_get_name = uof_get_name_4xxx; 435 + hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx; 436 + break; 468 437 default: 469 438 hw_data->fw_name = ADF_4XXX_FW; 470 439 hw_data->fw_mmp_name = ADF_4XXX_MMP; 471 440 hw_data->uof_get_name = uof_get_name_4xxx; 441 + hw_data->get_ena_thd_mask = get_ena_thd_mask; 442 + break; 472 443 } 473 444 hw_data->uof_get_num_objs = uof_get_num_objs; 474 445 hw_data->uof_get_ae_mask = uof_get_ae_mask; 446 + hw_data->get_rp_group = get_rp_group; 475 447 hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; 476 448 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; 477 449 hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+4
drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
··· 13 13 #include "adf_rl.h" 14 14 #include "adf_telemetry.h" 15 15 #include "adf_pfvf_msg.h" 16 + #include "icp_qat_hw.h" 16 17 17 18 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" 18 19 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" ··· 249 248 const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); 250 249 u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); 251 250 u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); 251 + int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); 252 + u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); 252 253 int (*dev_config)(struct adf_accel_dev *accel_dev); 253 254 struct adf_pfvf_ops pfvf_ops; 254 255 struct adf_hw_csr_ops csr_ops; ··· 273 270 u32 admin_ae_mask; 274 271 u16 tx_rings_mask; 275 272 u16 ring_to_svc_map; 273 + u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER]; 276 274 u8 tx_rx_gap; 277 275 u8 num_banks; 278 276 u16 num_banks_per_vf;
+90
drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
··· 2 2 /* Copyright(c) 2020 Intel Corporation */ 3 3 #include <linux/iopoll.h> 4 4 #include "adf_accel_devices.h" 5 + #include "adf_cfg_services.h" 5 6 #include "adf_common_drv.h" 6 7 #include "adf_gen4_hw_data.h" 7 8 #include "adf_gen4_pm.h" ··· 341 340 return ret; 342 341 } 343 342 EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset); 343 + 344 + static const u32 thrd_to_arb_map_dcc[] = { 345 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 346 + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 347 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 348 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 349 + 0x0 350 + }; 351 + 352 + static const u16 rp_group_to_arb_mask[] = { 353 + [RP_GROUP_0] = 0x5, 354 + [RP_GROUP_1] = 0xA, 355 + }; 356 + 357 + static bool is_single_service(int service_id) 358 + { 359 + switch (service_id) { 360 + case SVC_DC: 361 + case SVC_SYM: 362 + case SVC_ASYM: 363 + return true; 364 + case SVC_CY: 365 + case SVC_CY2: 366 + case SVC_DCC: 367 + case SVC_ASYM_DC: 368 + case SVC_DC_ASYM: 369 + case SVC_SYM_DC: 370 + case SVC_DC_SYM: 371 + default: 372 + return false; 373 + } 374 + } 375 + 376 + int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) 377 + { 378 + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 379 + u32 *thd2arb_map = hw_data->thd_to_arb_map; 380 + unsigned int ae_cnt, worker_obj_cnt, i, j; 381 + unsigned long ae_mask, thds_mask; 382 + int srv_id, rp_group; 383 + u32 thd2arb_map_base; 384 + u16 arb_mask; 385 + 386 + if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask || 387 + !hw_data->get_num_aes || !hw_data->uof_get_num_objs || 388 + !hw_data->uof_get_ae_mask) 389 + return -EFAULT; 390 + 391 + srv_id = adf_get_service_enabled(accel_dev); 392 + if (srv_id < 0) 393 + return srv_id; 394 + 395 + ae_cnt = hw_data->get_num_aes(hw_data); 396 + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - 397 + ADF_GEN4_ADMIN_ACCELENGINES; 398 + 399 + if (srv_id == SVC_DCC) { 400 + memcpy(thd2arb_map, thrd_to_arb_map_dcc, 401 + array_size(sizeof(*thd2arb_map), ae_cnt)); 402 + return 0; 403 + } 404 + 405 + for (i = 0; i < worker_obj_cnt; i++) { 406 + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); 407 + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); 408 + thds_mask = hw_data->get_ena_thd_mask(accel_dev, i); 409 + thd2arb_map_base = 0; 410 + 411 + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) 412 + return -EINVAL; 413 + 414 + if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR) 415 + return -EINVAL; 416 + 417 + if (is_single_service(srv_id)) 418 + arb_mask = rp_group_to_arb_mask[RP_GROUP_0] | 419 + rp_group_to_arb_mask[RP_GROUP_1]; 420 + else 421 + arb_mask = rp_group_to_arb_mask[rp_group]; 422 + 423 + for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE) 424 + thd2arb_map_base |= arb_mask << (j * 4); 425 + 426 + for_each_set_bit(j, &ae_mask, ae_cnt) 427 + thd2arb_map[j] = thd2arb_map_base; 428 + } 429 + return 0; 430 + } 431 + EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
+12
drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
··· 28 28 /* Accelerators */ 29 29 #define ADF_GEN4_ACCELERATORS_MASK 0x1 30 30 #define ADF_GEN4_MAX_ACCELERATORS 1 31 + #define ADF_GEN4_ADMIN_ACCELENGINES 1 31 32 32 33 /* MSIX interrupt */ 33 34 #define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040 ··· 194 193 #define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800 195 194 #define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 196 195 196 + /* Arbiter threads mask with error value */ 197 + #define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0) 198 + 197 199 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); 198 200 199 201 enum icp_qat_gen4_slice_mask { ··· 209 205 ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7), 210 206 ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE = BIT(8), 211 207 ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9), 208 + }; 209 + 210 + enum adf_gen4_rp_groups { 211 + RP_GROUP_0, 212 + RP_GROUP_1, 213 + RP_GROUP_COUNT 212 214 }; 213 215 214 216 void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev); ··· 234 224 int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); 235 225 void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); 236 226 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); 227 + int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); 228 + 237 229 #endif