Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: qat - add rate limiting feature to qat_4xxx

The Rate Limiting (RL) feature allows to control the rate of requests
that can be submitted on a ring pair (RP). This allows sharing a QAT
device among multiple users while ensuring a guaranteed throughput.

The driver provides a mechanism that allows users to set policies, that
are programmed to the device. The device is then enforcing those policies.

Configuration of RL is accomplished through entities called SLAs
(Service Level Agreement). Each SLA object gets a unique identifier
and defines the limitations for a single service across up to four
ring pairs (RPs count allocated to a single VF).

The rate is determined using two fields:
* CIR (Committed Information Rate), i.e., the guaranteed rate.
* PIR (Peak Information Rate), i.e., the maximum rate achievable
when the device has available resources.
The rate values are expressed in permille scale i.e. 0-1000.
Ring pair selection is achieved by providing a 64-bit mask, where
each bit corresponds to one of the ring pairs.

This adds an interface and logic that allow to add, update, retrieve
and remove an SLA.

Signed-off-by: Damian Muszynski <damian.muszynski@intel.com>
Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Tero Kristo <tero.kristo@linux.intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Damian Muszynski and committed by
Herbert Xu
d9fb8408 c7fd5379

+1590 -1
+20
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
··· 343 343 return ADF_4XXX_KPT_COUNTER_FREQ; 344 344 } 345 345 346 + static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) 347 + { 348 + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; 349 + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; 350 + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; 351 + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; 352 + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; 353 + 354 + rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV; 355 + rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL; 356 + rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION; 357 + rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM; 358 + rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM; 359 + rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC; 360 + rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC; 361 + rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF; 362 + } 363 + 346 364 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) 347 365 { 348 366 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; ··· 612 594 hw_data->stop_timer = adf_gen4_timer_stop; 613 595 hw_data->get_hb_clock = get_heartbeat_clock; 614 596 hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; 597 + hw_data->clock_frequency = ADF_4XXX_AE_FREQ; 615 598 616 599 adf_gen4_set_err_mask(&hw_data->dev_err_mask); 617 600 adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); 618 601 adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); 619 602 adf_gen4_init_dc_ops(&hw_data->dc_ops); 620 603 adf_gen4_init_ras_ops(&hw_data->ras_ops); 604 + adf_init_rl_data(&hw_data->rl_data); 621 605 } 622 606 623 607 void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
+12 -1
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
··· 82 82 #define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin" 83 83 #define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin" 84 84 85 + /* RL constants */ 86 + #define ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV 100 87 + #define ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL 102 88 + #define ADF_4XXX_RL_DCPR_CORRECTION 1 89 + #define ADF_4XXX_RL_SCANS_PER_SEC 954 90 + #define ADF_4XXX_RL_MAX_TP_ASYM 173750UL 91 + #define ADF_4XXX_RL_MAX_TP_SYM 95000UL 92 + #define ADF_4XXX_RL_MAX_TP_DC 45000UL 93 + #define ADF_4XXX_RL_SLICE_REF 1000UL 94 + 85 95 /* Clocks frequency */ 86 - #define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) 96 + #define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) 97 + #define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ) 87 98 88 99 /* qat_4xxx fuse bits are different from old GENs, redefine them */ 89 100 enum icp_qat_4xxx_slice_mask {
+2
drivers/crypto/intel/qat/qat_common/Makefile
··· 28 28 qat_algs.o \ 29 29 qat_asym_algs.o \ 30 30 qat_algs_send.o \ 31 + adf_rl.o \ 32 + adf_rl_admin.o \ 31 33 qat_uclo.o \ 32 34 qat_hal.o \ 33 35 qat_bl.o
+3
drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
··· 9 9 #include <linux/ratelimit.h> 10 10 #include <linux/types.h> 11 11 #include "adf_cfg_common.h" 12 + #include "adf_rl.h" 12 13 #include "adf_pfvf_msg.h" 13 14 14 15 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" ··· 248 247 struct adf_dc_ops dc_ops; 249 248 struct adf_ras_ops ras_ops; 250 249 struct adf_dev_err_mask dev_err_mask; 250 + struct adf_rl_hw_data rl_data; 251 251 const char *fw_name; 252 252 const char *fw_mmp_name; 253 253 u32 fuses; ··· 360 358 struct adf_accel_pci accel_pci_dev; 361 359 struct adf_timer *timer; 362 360 struct adf_heartbeat *heartbeat; 361 + struct adf_rl *rate_limiting; 363 362 union { 364 363 struct { 365 364 /* protects VF2PF interrupts access */
+47
drivers/crypto/intel/qat/qat_common/adf_admin.c
··· 330 330 return 0; 331 331 } 332 332 333 + int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, 334 + struct icp_qat_fw_init_admin_slice_cnt *slices) 335 + { 336 + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; 337 + struct icp_qat_fw_init_admin_resp resp = { }; 338 + struct icp_qat_fw_init_admin_req req = { }; 339 + int ret; 340 + 341 + req.cmd_id = ICP_QAT_FW_RL_INIT; 342 + 343 + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); 344 + if (ret) 345 + return ret; 346 + 347 + memcpy(slices, &resp.slices, sizeof(*slices)); 348 + 349 + return 0; 350 + } 351 + 352 + int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, 353 + struct icp_qat_fw_init_admin_req *req) 354 + { 355 + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; 356 + struct icp_qat_fw_init_admin_resp resp = { }; 357 + 358 + /* 359 + * req struct filled in rl implementation. Used commands 360 + * ICP_QAT_FW_RL_ADD for a new SLA 361 + * ICP_QAT_FW_RL_UPDATE for update SLA 362 + */ 363 + return adf_send_admin(accel_dev, req, &resp, ae_mask); 364 + } 365 + 366 + int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, 367 + u8 node_type) 368 + { 369 + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; 370 + struct icp_qat_fw_init_admin_resp resp = { }; 371 + struct icp_qat_fw_init_admin_req req = { }; 372 + 373 + req.cmd_id = ICP_QAT_FW_RL_REMOVE; 374 + req.node_id = node_id; 375 + req.node_type = node_type; 376 + 377 + return adf_send_admin(accel_dev, &req, &resp, ae_mask); 378 + } 379 + 333 380 /** 334 381 * adf_send_admin_init() - Function sends init message to FW 335 382 * @accel_dev: Pointer to acceleration device.
+8
drivers/crypto/intel/qat/qat_common/adf_admin.h
··· 3 3 #ifndef ADF_ADMIN 4 4 #define ADF_ADMIN 5 5 6 + #include "icp_qat_fw_init_admin.h" 7 + 6 8 struct adf_accel_dev; 7 9 8 10 int adf_init_admin_comms(struct adf_accel_dev *accel_dev); ··· 14 12 int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); 15 13 int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); 16 14 int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); 15 + int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, 16 + struct icp_qat_fw_init_admin_slice_cnt *slices); 17 + int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, 18 + struct icp_qat_fw_init_admin_req *req); 19 + int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, 20 + u8 node_type); 17 21 int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); 18 22 int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); 19 23 int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err);
+7
drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
··· 139 139 /* Number of heartbeat counter pairs */ 140 140 #define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE 141 141 142 + /* Rate Limiting */ 143 + #define ADF_GEN4_RL_R2L_OFFSET 0x508000 144 + #define ADF_GEN4_RL_L2C_OFFSET 0x509000 145 + #define ADF_GEN4_RL_C2S_OFFSET 0x508818 146 + #define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800 147 + #define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 148 + 142 149 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); 143 150 void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); 144 151 int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
+10
drivers/crypto/intel/qat/qat_common/adf_init.c
··· 9 9 #include "adf_common_drv.h" 10 10 #include "adf_dbgfs.h" 11 11 #include "adf_heartbeat.h" 12 + #include "adf_rl.h" 12 13 #include "adf_sysfs_ras_counters.h" 13 14 14 15 static LIST_HEAD(service_table); ··· 138 137 } 139 138 140 139 adf_heartbeat_init(accel_dev); 140 + ret = adf_rl_init(accel_dev); 141 + if (ret && ret != -EOPNOTSUPP) 142 + return ret; 141 143 142 144 /* 143 145 * Subservice initialisation is divided into two stages: init and start. ··· 216 212 } 217 213 218 214 adf_heartbeat_start(accel_dev); 215 + ret = adf_rl_start(accel_dev); 216 + if (ret && ret != -EOPNOTSUPP) 217 + return ret; 219 218 220 219 list_for_each_entry(service, &service_table, list) { 221 220 if (service->event_hld(accel_dev, ADF_EVENT_START)) { ··· 279 272 !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) 280 273 return; 281 274 275 + adf_rl_stop(accel_dev); 282 276 adf_dbgfs_rm(accel_dev); 283 277 adf_sysfs_stop_ras(accel_dev); 284 278 ··· 366 358 else 367 359 clear_bit(accel_dev->accel_id, service->init_status); 368 360 } 361 + 362 + adf_rl_exit(accel_dev); 369 363 370 364 if (hw_data->ras_ops.disable_ras_errors) 371 365 hw_data->ras_ops.disable_ras_errors(accel_dev);
+1159
drivers/crypto/intel/qat/qat_common/adf_rl.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright(c) 2023 Intel Corporation */ 3 + 4 + #define dev_fmt(fmt) "RateLimiting: " fmt 5 + 6 + #include <asm/errno.h> 7 + #include <asm/div64.h> 8 + 9 + #include <linux/dev_printk.h> 10 + #include <linux/kernel.h> 11 + #include <linux/pci.h> 12 + #include <linux/slab.h> 13 + #include <linux/units.h> 14 + 15 + #include "adf_accel_devices.h" 16 + #include "adf_common_drv.h" 17 + #include "adf_rl_admin.h" 18 + #include "adf_rl.h" 19 + 20 + #define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET 0U 21 + #define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET 0U 22 + #define RL_TOKEN_PCIE_SIZE 64 23 + #define RL_TOKEN_ASYM_SIZE 1024 24 + #define RL_CSR_SIZE 4U 25 + #define RL_CAPABILITY_MASK GENMASK(6, 4) 26 + #define RL_CAPABILITY_VALUE 0x70 27 + #define RL_VALIDATE_NON_ZERO(input) ((input) == 0) 28 + #define ROOT_MASK GENMASK(1, 0) 29 + #define CLUSTER_MASK GENMASK(3, 0) 30 + #define LEAF_MASK GENMASK(5, 0) 31 + 32 + static int validate_user_input(struct adf_accel_dev *accel_dev, 33 + struct adf_rl_sla_input_data *sla_in, 34 + bool is_update) 35 + { 36 + const unsigned long rp_mask = sla_in->rp_mask; 37 + size_t rp_mask_size; 38 + int i, cnt; 39 + 40 + if (sla_in->pir < sla_in->cir) { 41 + dev_notice(&GET_DEV(accel_dev), 42 + "PIR must be >= CIR, setting PIR to CIR\n"); 43 + sla_in->pir = sla_in->cir; 44 + } 45 + 46 + if (!is_update) { 47 + cnt = 0; 48 + rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE; 49 + for_each_set_bit(i, &rp_mask, rp_mask_size) { 50 + if (++cnt > RL_RP_CNT_PER_LEAF_MAX) { 51 + dev_notice(&GET_DEV(accel_dev), 52 + "Too many ring pairs selected for this SLA\n"); 53 + return -EINVAL; 54 + } 55 + } 56 + 57 + if (sla_in->srv >= ADF_SVC_NONE) { 58 + dev_notice(&GET_DEV(accel_dev), 59 + "Wrong service type\n"); 60 + return -EINVAL; 61 + } 62 + 63 + if (sla_in->type > RL_LEAF) { 64 + dev_notice(&GET_DEV(accel_dev), 65 + "Wrong node type\n"); 66 + return -EINVAL; 67 + } 68 + 69 + if (sla_in->parent_id < RL_PARENT_DEFAULT_ID || 70 + sla_in->parent_id >= RL_NODES_CNT_MAX) { 71 + dev_notice(&GET_DEV(accel_dev), 72 + "Wrong parent ID\n"); 73 + return -EINVAL; 74 + } 75 + } 76 + 77 + return 0; 78 + } 79 + 80 + static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id) 81 + { 82 + struct rl_sla *sla; 83 + 84 + if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) { 85 + dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n"); 86 + return -EINVAL; 87 + } 88 + 89 + sla = accel_dev->rate_limiting->sla[sla_id]; 90 + 91 + if (!sla) { 92 + dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n"); 93 + return -EINVAL; 94 + } 95 + 96 + if (sla->type != RL_LEAF) { 97 + dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n"); 98 + return -EINVAL; 99 + } 100 + 101 + return 0; 102 + } 103 + 104 + /** 105 + * find_parent() - Find the parent for a new SLA 106 + * @rl_data: pointer to ratelimiting data 107 + * @sla_in: pointer to user input data for a new SLA 108 + * 109 + * Function returns a pointer to the parent SLA. If the parent ID is provided 110 + * as input in the user data, then such ID is validated and the parent SLA 111 + * is returned. 112 + * Otherwise, it returns the default parent SLA (root or cluster) for 113 + * the new object. 114 + * 115 + * Return: 116 + * * Pointer to the parent SLA object 117 + * * NULL - when parent cannot be found 118 + */ 119 + static struct rl_sla *find_parent(struct adf_rl *rl_data, 120 + struct adf_rl_sla_input_data *sla_in) 121 + { 122 + int input_parent_id = sla_in->parent_id; 123 + struct rl_sla *root = NULL; 124 + struct rl_sla *parent_sla; 125 + int i; 126 + 127 + if (sla_in->type == RL_ROOT) 128 + return NULL; 129 + 130 + if (input_parent_id > RL_PARENT_DEFAULT_ID) { 131 + parent_sla = rl_data->sla[input_parent_id]; 132 + /* 133 + * SLA can be a parent if it has the same service as the child 134 + * and its type is higher in the hierarchy, 135 + * for example the parent type of a LEAF must be a CLUSTER. 136 + */ 137 + if (parent_sla && parent_sla->srv == sla_in->srv && 138 + parent_sla->type == sla_in->type - 1) 139 + return parent_sla; 140 + 141 + return NULL; 142 + } 143 + 144 + /* If input_parent_id is not valid, get root for this service type. */ 145 + for (i = 0; i < RL_ROOT_MAX; i++) { 146 + if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) { 147 + root = rl_data->root[i]; 148 + break; 149 + } 150 + } 151 + 152 + if (!root) 153 + return NULL; 154 + 155 + /* 156 + * If the type of this SLA is cluster, then return the root. 157 + * Otherwise, find the default (i.e. first) cluster for this service. 158 + */ 159 + if (sla_in->type == RL_CLUSTER) 160 + return root; 161 + 162 + for (i = 0; i < RL_CLUSTER_MAX; i++) { 163 + if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root) 164 + return rl_data->cluster[i]; 165 + } 166 + 167 + return NULL; 168 + } 169 + 170 + static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv) 171 + { 172 + switch (rl_srv) { 173 + case ADF_SVC_ASYM: 174 + return ASYM; 175 + case ADF_SVC_SYM: 176 + return SYM; 177 + case ADF_SVC_DC: 178 + return COMP; 179 + default: 180 + return UNUSED; 181 + } 182 + } 183 + 184 + /** 185 + * get_sla_arr_of_type() - Returns a pointer to SLA type specific array 186 + * @rl_data: pointer to ratelimiting data 187 + * @type: SLA type 188 + * @sla_arr: pointer to variable where requested pointer will be stored 189 + * 190 + * Return: Max number of elements allowed for the returned array 191 + */ 192 + static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, 193 + struct rl_sla ***sla_arr) 194 + { 195 + switch (type) { 196 + case RL_LEAF: 197 + *sla_arr = rl_data->leaf; 198 + return RL_LEAF_MAX; 199 + case RL_CLUSTER: 200 + *sla_arr = rl_data->cluster; 201 + return RL_CLUSTER_MAX; 202 + case RL_ROOT: 203 + *sla_arr = rl_data->root; 204 + return RL_ROOT_MAX; 205 + default: 206 + *sla_arr = NULL; 207 + return 0; 208 + } 209 + } 210 + 211 + static bool is_service_enabled(struct adf_accel_dev *accel_dev, 212 + enum adf_base_services rl_srv) 213 + { 214 + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv); 215 + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 216 + u8 rps_per_bundle = hw_data->num_banks_per_vf; 217 + int i; 218 + 219 + for (i = 0; i < rps_per_bundle; i++) { 220 + if (GET_SRV_TYPE(accel_dev, i) == arb_srv) 221 + return true; 222 + } 223 + 224 + return false; 225 + } 226 + 227 + /** 228 + * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask 229 + * @accel_dev: pointer to acceleration device structure 230 + * @sla: SLA object data where result will be written 231 + * @rp_mask: bitmask of ring pair IDs 232 + * 233 + * Function tries to convert provided bitmap to an array of IDs. It checks if 234 + * RPs aren't in use, are assigned to SLA service or if a number of provided 235 + * IDs is not too big. If successful, writes the result into the field 236 + * sla->ring_pairs_cnt. 237 + * 238 + * Return: 239 + * * 0 - ok 240 + * * -EINVAL - ring pairs array cannot be created from provided mask 241 + */ 242 + static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla, 243 + const unsigned long rp_mask) 244 + { 245 + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv); 246 + u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf; 247 + bool *rp_in_use = accel_dev->rate_limiting->rp_in_use; 248 + size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids); 249 + u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks; 250 + u16 cnt = 0; 251 + u16 rp_id; 252 + 253 + for_each_set_bit(rp_id, &rp_mask, rp_id_max) { 254 + if (cnt >= rp_cnt_max) { 255 + dev_notice(&GET_DEV(accel_dev), 256 + "Assigned more ring pairs than supported"); 257 + return -EINVAL; 258 + } 259 + 260 + if (rp_in_use[rp_id]) { 261 + dev_notice(&GET_DEV(accel_dev), 262 + "RP %u already assigned to other SLA", rp_id); 263 + return -EINVAL; 264 + } 265 + 266 + if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) { 267 + dev_notice(&GET_DEV(accel_dev), 268 + "RP %u does not support SLA service", rp_id); 269 + return -EINVAL; 270 + } 271 + 272 + sla->ring_pairs_ids[cnt++] = rp_id; 273 + } 274 + 275 + sla->ring_pairs_cnt = cnt; 276 + 277 + return 0; 278 + } 279 + 280 + static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used) 281 + { 282 + u16 rp_id; 283 + int i; 284 + 285 + for (i = 0; i < sla->ring_pairs_cnt; i++) { 286 + rp_id = sla->ring_pairs_ids[i]; 287 + rp_in_use[rp_id] = used; 288 + } 289 + } 290 + 291 + static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev, 292 + struct rl_sla *sla, bool clear) 293 + { 294 + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 295 + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 296 + u32 base_offset = hw_data->rl_data.r2l_offset; 297 + u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK); 298 + u32 offset; 299 + int i; 300 + 301 + for (i = 0; i < sla->ring_pairs_cnt; i++) { 302 + offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]); 303 + ADF_CSR_WR(pmisc_addr, offset, node_id); 304 + } 305 + } 306 + 307 + static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev, 308 + struct rl_sla *sla, bool clear) 309 + { 310 + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 311 + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 312 + u32 base_offset = hw_data->rl_data.l2c_offset; 313 + u32 node_id = sla->node_id & LEAF_MASK; 314 + u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK); 315 + u32 offset; 316 + 317 + offset = base_offset + (RL_CSR_SIZE * node_id); 318 + ADF_CSR_WR(pmisc_addr, offset, parent_id); 319 + } 320 + 321 + static void assign_cluster_to_root(struct adf_accel_dev *accel_dev, 322 + struct rl_sla *sla, bool clear) 323 + { 324 + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 325 + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 326 + u32 base_offset = hw_data->rl_data.c2s_offset; 327 + u32 node_id = sla->node_id & CLUSTER_MASK; 328 + u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK); 329 + u32 offset; 330 + 331 + offset = base_offset + (RL_CSR_SIZE * node_id); 332 + ADF_CSR_WR(pmisc_addr, offset, parent_id); 333 + } 334 + 335 + static void assign_node_to_parent(struct adf_accel_dev *accel_dev, 336 + struct rl_sla *sla, bool clear_assignment) 337 + { 338 + switch (sla->type) { 339 + case RL_LEAF: 340 + assign_rps_to_leaf(accel_dev, sla, clear_assignment); 341 + assign_leaf_to_cluster(accel_dev, sla, clear_assignment); 342 + break; 343 + case RL_CLUSTER: 344 + assign_cluster_to_root(accel_dev, sla, clear_assignment); 345 + break; 346 + default: 347 + break; 348 + } 349 + } 350 + 351 + /** 352 + * can_parent_afford_sla() - Verifies if parent allows to create an SLA 353 + * @sla_in: pointer to user input data for a new SLA 354 + * @sla_parent: pointer to parent SLA object 355 + * @sla_cir: current child CIR value (only for update) 356 + * @is_update: request is a update 357 + * 358 + * Algorithm verifies if parent has enough remaining budget to take assignment 359 + * of a child with provided parameters. In update case current CIR value must be 360 + * returned to budget first. 361 + * PIR value cannot exceed the PIR assigned to parent. 362 + * 363 + * Return: 364 + * * true - SLA can be created 365 + * * false - SLA cannot be created 366 + */ 367 + static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in, 368 + struct rl_sla *sla_parent, u32 sla_cir, 369 + bool is_update) 370 + { 371 + u32 rem_cir = sla_parent->rem_cir; 372 + 373 + if (is_update) 374 + rem_cir += sla_cir; 375 + 376 + if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir) 377 + return false; 378 + 379 + return true; 380 + } 381 + 382 + /** 383 + * can_node_afford_update() - Verifies if SLA can be updated with input data 384 + * @sla_in: pointer to user input data for a new SLA 385 + * @sla: pointer to SLA object selected for update 386 + * 387 + * Algorithm verifies if a new CIR value is big enough to satisfy currently 388 + * assigned child SLAs and if PIR can be updated 389 + * 390 + * Return: 391 + * * true - SLA can be updated 392 + * * false - SLA cannot be updated 393 + */ 394 + static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in, 395 + struct rl_sla *sla) 396 + { 397 + u32 cir_in_use = sla->cir - sla->rem_cir; 398 + 399 + /* new CIR cannot be smaller then currently consumed value */ 400 + if (cir_in_use > sla_in->cir) 401 + return false; 402 + 403 + /* PIR of root/cluster cannot be reduced in node with assigned children */ 404 + if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0) 405 + return false; 406 + 407 + return true; 408 + } 409 + 410 + static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla, 411 + struct adf_rl_sla_input_data *sla_in, 412 + bool is_update) 413 + { 414 + u32 max_val = rl_data->device_data->scale_ref; 415 + struct rl_sla *parent = sla->parent; 416 + bool ret = true; 417 + 418 + if (sla_in->cir > max_val || sla_in->pir > max_val) 419 + ret = false; 420 + 421 + switch (sla->type) { 422 + case RL_LEAF: 423 + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, 424 + is_update); 425 + break; 426 + case RL_CLUSTER: 427 + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, 428 + is_update); 429 + 430 + if (is_update) 431 + ret &= can_node_afford_update(sla_in, sla); 432 + 433 + break; 434 + case RL_ROOT: 435 + if (is_update) 436 + ret &= can_node_afford_update(sla_in, sla); 437 + 438 + break; 439 + default: 440 + ret = false; 441 + break; 442 + } 443 + 444 + return ret; 445 + } 446 + 447 + static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update) 448 + { 449 + switch (sla->type) { 450 + case RL_LEAF: 451 + if (is_update) 452 + sla->parent->rem_cir += old_cir; 453 + 454 + sla->parent->rem_cir -= sla->cir; 455 + sla->rem_cir = 0; 456 + break; 457 + case RL_CLUSTER: 458 + if (is_update) { 459 + sla->parent->rem_cir += old_cir; 460 + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); 461 + } else { 462 + sla->rem_cir = sla->cir; 463 + } 464 + 465 + sla->parent->rem_cir -= sla->cir; 466 + break; 467 + case RL_ROOT: 468 + if (is_update) 469 + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); 470 + else 471 + sla->rem_cir = sla->cir; 472 + break; 473 + default: 474 + break; 475 + } 476 + } 477 + 478 + /** 479 + * get_next_free_sla_id() - finds next free ID in the SLA array 480 + * @rl_data: Pointer to ratelimiting data structure 481 + * 482 + * Return: 483 + * * 0 : RL_NODES_CNT_MAX - correct ID 484 + * * -ENOSPC - all SLA slots are in use 485 + */ 486 + static int get_next_free_sla_id(struct adf_rl *rl_data) 487 + { 488 + int i = 0; 489 + 490 + while (i < RL_NODES_CNT_MAX && rl_data->sla[i++]) 491 + ; 492 + 493 + if (i == RL_NODES_CNT_MAX) 494 + return -ENOSPC; 495 + 496 + return i - 1; 497 + } 498 + 499 + /** 500 + * get_next_free_node_id() - finds next free ID in the array of that node type 501 + * @rl_data: Pointer to ratelimiting data structure 502 + * @sla: Pointer to SLA object for which the ID is searched 503 + * 504 + * Return: 505 + * * 0 : RL_[NODE_TYPE]_MAX - correct ID 506 + * * -ENOSPC - all slots of that type are in use 507 + */ 508 + static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla) 509 + { 510 + struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev); 511 + int max_id, i, step, rp_per_leaf; 512 + struct rl_sla **sla_list; 513 + 514 + rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf; 515 + 516 + /* 517 + * Static nodes mapping: 518 + * root0 - cluster[0,4,8,12] - leaf[0-15] 519 + * root1 - cluster[1,5,9,13] - leaf[16-31] 520 + * root2 - cluster[2,6,10,14] - leaf[32-47] 521 + */ 522 + switch (sla->type) { 523 + case RL_LEAF: 524 + i = sla->srv * rp_per_leaf; 525 + step = 1; 526 + max_id = i + rp_per_leaf; 527 + sla_list = rl_data->leaf; 528 + break; 529 + case RL_CLUSTER: 530 + i = sla->srv; 531 + step = 4; 532 + max_id = RL_CLUSTER_MAX; 533 + sla_list = rl_data->cluster; 534 + break; 535 + case RL_ROOT: 536 + return sla->srv; 537 + default: 538 + return -EINVAL; 539 + } 540 + 541 + while (i < max_id && sla_list[i]) 542 + i += step; 543 + 544 + if (i >= max_id) 545 + return -ENOSPC; 546 + 547 + return i; 548 + } 549 + 550 + u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, 551 + enum adf_base_services svc_type) 552 + { 553 + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; 554 + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 555 + u64 avail_slice_cycles, allocated_tokens; 556 + 557 + if (!sla_val) 558 + return 0; 559 + 560 + avail_slice_cycles = hw_data->clock_frequency; 561 + 562 + switch (svc_type) { 563 + case ADF_SVC_ASYM: 564 + avail_slice_cycles *= device_data->slices.pke_cnt; 565 + break; 566 + case ADF_SVC_SYM: 567 + avail_slice_cycles *= device_data->slices.cph_cnt; 568 + break; 569 + case ADF_SVC_DC: 570 + avail_slice_cycles *= device_data->slices.dcpr_cnt; 571 + break; 572 + default: 573 + break; 574 + } 575 + 576 + do_div(avail_slice_cycles, device_data->scan_interval); 577 + allocated_tokens = avail_slice_cycles * sla_val; 578 + do_div(allocated_tokens, device_data->scale_ref); 579 + 580 + return allocated_tokens; 581 + } 582 + 583 + u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, 584 + enum adf_base_services svc_type) 585 + { 586 + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; 587 + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 588 + u64 allocated_ae_cycles, avail_ae_cycles; 589 + 590 + if (!sla_val) 591 + return 0; 592 + 593 + avail_ae_cycles = hw_data->clock_frequency; 594 + avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1; 595 + do_div(avail_ae_cycles, device_data->scan_interval); 596 + 597 + sla_val *= device_data->max_tp[svc_type]; 598 + sla_val /= device_data->scale_ref; 599 + 600 + allocated_ae_cycles = (sla_val * avail_ae_cycles); 601 + do_div(allocated_ae_cycles, device_data->max_tp[svc_type]); 602 + 603 + return allocated_ae_cycles; 604 + } 605 + 606 + u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, 607 + enum adf_base_services svc_type, bool is_bw_out) 608 + { 609 + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; 610 + u64 sla_to_bytes, allocated_bw, sla_scaled; 611 + 612 + if (!sla_val) 613 + return 0; 614 + 615 + sla_to_bytes = sla_val; 616 + sla_to_bytes *= device_data->max_tp[svc_type]; 617 + do_div(sla_to_bytes, device_data->scale_ref); 618 + 619 + sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : 620 + BYTES_PER_MBIT; 621 + if (svc_type == ADF_SVC_DC && is_bw_out) 622 + sla_to_bytes *= device_data->slices.dcpr_cnt - 623 + device_data->dcpr_correction; 624 + 625 + sla_scaled = sla_to_bytes * device_data->pcie_scale_mul; 626 + do_div(sla_scaled, device_data->pcie_scale_div); 627 + allocated_bw = sla_scaled; 628 + do_div(allocated_bw, RL_TOKEN_PCIE_SIZE); 629 + do_div(allocated_bw, device_data->scan_interval); 630 + 631 + return allocated_bw; 632 + } 633 + 634 + /** 635 + * add_new_sla_entry() - creates a new SLA object and fills it with user data 636 + * @accel_dev: pointer to acceleration device structure 637 + * @sla_in: pointer to user input data for a new SLA 638 + * @sla_out: Pointer to variable that will contain the address of a new 639 + * SLA object if the operation succeeds 640 + * 641 + * Return: 642 + * * 0 - ok 643 + * * -ENOMEM - memory allocation failed 644 + * * -EINVAL - invalid user input 645 + * * -ENOSPC - all available SLAs are in use 646 + */ 647 + static int add_new_sla_entry(struct adf_accel_dev *accel_dev, 648 + struct adf_rl_sla_input_data *sla_in, 649 + struct rl_sla **sla_out) 650 + { 651 + struct adf_rl *rl_data = accel_dev->rate_limiting; 652 + struct rl_sla *sla; 653 + int ret = 0; 654 + 655 + sla = kzalloc(sizeof(*sla), GFP_KERNEL); 656 + if (!sla) { 657 + ret = -ENOMEM; 658 + goto ret_err; 659 + } 660 + *sla_out = sla; 661 + 662 + if (!is_service_enabled(accel_dev, sla_in->srv)) { 663 + dev_notice(&GET_DEV(accel_dev), 664 + "Provided service is not enabled\n"); 665 + ret = -EINVAL; 666 + goto ret_err; 667 + } 668 + 669 + sla->srv = sla_in->srv; 670 + sla->type = sla_in->type; 671 + ret = get_next_free_node_id(rl_data, sla); 672 + if (ret < 0) { 673 + dev_notice(&GET_DEV(accel_dev), 674 + "Exceeded number of available nodes for that service\n"); 675 + goto ret_err; 676 + } 677 + sla->node_id = ret; 678 + 679 + ret = get_next_free_sla_id(rl_data); 680 + if (ret < 0) { 681 + dev_notice(&GET_DEV(accel_dev), 682 + "Allocated maximum SLAs number\n"); 683 + goto ret_err; 684 + } 685 + sla->sla_id = ret; 686 + 687 + sla->parent = find_parent(rl_data, sla_in); 688 + if (!sla->parent && sla->type != RL_ROOT) { 689 + if (sla_in->parent_id != RL_PARENT_DEFAULT_ID) 690 + dev_notice(&GET_DEV(accel_dev), 691 + "Provided parent ID does not exist or cannot be parent for this SLA."); 692 + else 693 + dev_notice(&GET_DEV(accel_dev), 694 + "Unable to find parent node for this service. Is service enabled?"); 695 + ret = -EINVAL; 696 + goto ret_err; 697 + } 698 + 699 + if (sla->type == RL_LEAF) { 700 + ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask); 701 + if (!sla->ring_pairs_cnt || ret) { 702 + dev_notice(&GET_DEV(accel_dev), 703 + "Unable to find ring pairs to assign to the leaf"); 704 + if (!ret) 705 + ret = -EINVAL; 706 + 707 + goto ret_err; 708 + } 709 + } 710 + 711 + return 0; 712 + 713 + ret_err: 714 + kfree(sla); 715 + *sla_out = NULL; 716 + 717 + return ret; 718 + } 719 + 720 + static int initialize_default_nodes(struct adf_accel_dev *accel_dev) 721 + { 722 + struct adf_rl *rl_data = accel_dev->rate_limiting; 723 + struct adf_rl_hw_data *device_data = rl_data->device_data; 724 + struct adf_rl_sla_input_data sla_in = { }; 725 + int ret = 0; 726 + int i; 727 + 728 + /* Init root for each enabled service */ 729 + sla_in.type = RL_ROOT; 730 + sla_in.parent_id = RL_PARENT_DEFAULT_ID; 731 + 732 + for (i = 0; i < ADF_SVC_NONE; i++) { 733 + if (!is_service_enabled(accel_dev, i)) 734 + continue; 735 + 736 + sla_in.cir = device_data->scale_ref; 737 + sla_in.pir = sla_in.cir; 738 + sla_in.srv = i; 739 + 740 + ret = adf_rl_add_sla(accel_dev, &sla_in); 741 + if (ret) 742 + return ret; 743 + } 744 + 745 + /* Init default cluster for each root */ 746 + sla_in.type = RL_CLUSTER; 747 + for (i = 0; i < ADF_SVC_NONE; i++) { 748 + if (!rl_data->root[i]) 749 + continue; 750 + 751 + sla_in.cir = rl_data->root[i]->cir; 752 + sla_in.pir = sla_in.cir; 753 + sla_in.srv = rl_data->root[i]->srv; 754 + 755 + ret = adf_rl_add_sla(accel_dev, &sla_in); 756 + if (ret) 757 + return ret; 758 + } 759 + 760 + return 0; 761 + } 762 + 763 + static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) 764 + { 765 + bool *rp_in_use = rl_data->rp_in_use; 766 + struct rl_sla **sla_type_arr = NULL; 767 + int i, sla_id, node_id; 768 + u32 old_cir; 769 + 770 + sla_id = sla->sla_id; 771 + node_id = sla->node_id; 772 + old_cir = sla->cir; 773 + sla->cir = 0; 774 + sla->pir = 0; 775 + 776 + for (i = 0; i < sla->ring_pairs_cnt; i++) 777 + rp_in_use[sla->ring_pairs_ids[i]] = false; 778 + 779 + update_budget(sla, old_cir, true); 780 + get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); 781 + assign_node_to_parent(rl_data->accel_dev, sla, true); 782 + adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type); 783 + mark_rps_usage(sla, rl_data->rp_in_use, false); 784 + 785 + kfree(sla); 786 + rl_data->sla[sla_id] = NULL; 787 + sla_type_arr[node_id] = NULL; 788 + } 789 + 790 + /** 791 + * add_update_sla() - handles the creation and the update of an SLA 792 + * @accel_dev: pointer to acceleration device structure 793 + * @sla_in: pointer to user input data for a new/updated SLA 794 + * @is_update: flag to indicate if this is an update or an add operation 795 + * 796 + * Return: 797 + * * 0 - ok 798 + * * -ENOMEM - memory allocation failed 799 + * * -EINVAL - user input data cannot be used to create SLA 800 + * * -ENOSPC - all available SLAs are in use 801 + */ 802 + static int add_update_sla(struct adf_accel_dev *accel_dev, 803 + struct adf_rl_sla_input_data *sla_in, bool is_update) 804 + { 805 + struct adf_rl *rl_data = accel_dev->rate_limiting; 806 + struct rl_sla **sla_type_arr = NULL; 807 + struct rl_sla *sla = NULL; 808 + u32 old_cir = 0; 809 + int ret; 810 + 811 + if (!sla_in) { 812 + dev_warn(&GET_DEV(accel_dev), 813 + "SLA input data pointer is missing\n"); 814 + ret = -EFAULT; 815 + goto ret_err; 816 + } 817 + 818 + /* Input validation */ 819 + ret = validate_user_input(accel_dev, sla_in, is_update); 820 + if (ret) 821 + goto ret_err; 822 + 823 + mutex_lock(&rl_data->rl_lock); 824 + 825 + if (is_update) { 826 + ret = validate_sla_id(accel_dev, sla_in->sla_id); 827 + if (ret) 828 + goto ret_err; 829 + 830 + sla = rl_data->sla[sla_in->sla_id]; 831 + old_cir = sla->cir; 832 + } else { 833 + ret = add_new_sla_entry(accel_dev, sla_in, &sla); 834 + if (ret) 835 + goto ret_err; 836 + } 837 + 838 + if (!is_enough_budget(rl_data, sla, sla_in, is_update)) { 839 + dev_notice(&GET_DEV(accel_dev), 840 + "Input value exceeds the remaining budget%s\n", 841 + is_update ? " or more budget is already in use" : ""); 842 + ret = -EINVAL; 843 + goto ret_err; 844 + } 845 + sla->cir = sla_in->cir; 846 + sla->pir = sla_in->pir; 847 + 848 + /* Apply SLA */ 849 + assign_node_to_parent(accel_dev, sla, false); 850 + ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update); 851 + if (ret) { 852 + dev_notice(&GET_DEV(accel_dev), 853 + "Failed to apply an SLA\n"); 854 + goto ret_err; 855 + } 856 + update_budget(sla, old_cir, is_update); 857 + 858 + if (!is_update) { 859 + mark_rps_usage(sla, rl_data->rp_in_use, true); 860 + get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); 861 + sla_type_arr[sla->node_id] = sla; 862 + rl_data->sla[sla->sla_id] = sla; 863 + } 864 + 865 + sla_in->sla_id = sla->sla_id; 866 + goto ret_ok; 867 + 868 + ret_err: 869 + if (!is_update) { 870 + sla_in->sla_id = -1; 871 + kfree(sla); 872 + } 873 + ret_ok: 874 + mutex_unlock(&rl_data->rl_lock); 875 + return ret; 876 + } 877 + 878 + /** 879 + * adf_rl_add_sla() - handles the creation of an SLA 880 + * @accel_dev: pointer to acceleration device structure 881 + * @sla_in: pointer to user input data required to add an SLA 882 + * 883 + * Return: 884 + * * 0 - ok 885 + * * -ENOMEM - memory allocation failed 886 + * * -EINVAL - invalid user input 887 + * * -ENOSPC - all available SLAs are in use 888 + */ 889 + int adf_rl_add_sla(struct adf_accel_dev *accel_dev, 890 + struct adf_rl_sla_input_data *sla_in) 891 + { 892 + return add_update_sla(accel_dev, sla_in, false); 893 + } 894 + 895 + /** 896 + * adf_rl_update_sla() - handles the update of an SLA 897 + * @accel_dev: pointer to acceleration device structure 898 + * @sla_in: pointer to user input data required to update an SLA 899 + * 900 + * Return: 901 + * * 0 - ok 902 + * * -EINVAL - user input data cannot be used to update SLA 903 + */ 904 + int adf_rl_update_sla(struct adf_accel_dev *accel_dev, 905 + struct adf_rl_sla_input_data *sla_in) 906 + { 907 + return add_update_sla(accel_dev, sla_in, true); 908 + } 909 + 910 + /** 911 + * adf_rl_get_sla() - returns an existing SLA data 912 + * @accel_dev: pointer to acceleration device structure 913 + * @sla_in: pointer to user data where SLA info will be stored 914 + * 915 + * The sla_id for which data are requested should be set in sla_id structure 916 + * 917 + * Return: 918 + * * 0 - ok 919 + * * -EINVAL - provided sla_id does not exist 920 + */ 921 + int adf_rl_get_sla(struct adf_accel_dev *accel_dev, 922 + struct adf_rl_sla_input_data *sla_in) 923 + { 924 + struct rl_sla *sla; 925 + int ret, i; 926 + 927 + ret = validate_sla_id(accel_dev, sla_in->sla_id); 928 + if (ret) 929 + return ret; 930 + 931 + sla = accel_dev->rate_limiting->sla[sla_in->sla_id]; 932 + sla_in->type = sla->type; 933 + sla_in->srv = sla->srv; 934 + sla_in->cir = sla->cir; 935 + sla_in->pir = sla->pir; 936 + sla_in->rp_mask = 0U; 937 + if (sla->parent) 938 + sla_in->parent_id = sla->parent->sla_id; 939 + else 940 + sla_in->parent_id = RL_PARENT_DEFAULT_ID; 941 + 942 + for (i = 0; i < sla->ring_pairs_cnt; i++) 943 + sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]); 944 + 945 + return 0; 946 + } 947 + 948 + /** 949 + * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for 950 + * selected service or provided sla_id 951 + * @accel_dev: pointer to acceleration device structure 952 + * @srv: service ID for which capability is requested 953 + * @sla_id: ID of the cluster or root to which we want assign a new SLA 954 + * 955 + * Check if the provided SLA id is valid. If it is and the service matches 956 + * the requested service and the type is cluster or root, return the remaining 957 + * capability. 958 + * If the provided ID does not match the service or type, return the remaining 959 + * capacity of the default cluster for that service. 960 + * 961 + * Return: 962 + * * Positive value - correct remaining value 963 + * * -EINVAL - algorithm cannot find a remaining value for provided data 964 + */ 965 + int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, 966 + enum adf_base_services srv, int sla_id) 967 + { 968 + struct adf_rl *rl_data = accel_dev->rate_limiting; 969 + struct rl_sla *sla = NULL; 970 + int i; 971 + 972 + if (srv >= ADF_SVC_NONE) 973 + return -EINVAL; 974 + 975 + if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) { 976 + sla = rl_data->sla[sla_id]; 977 + 978 + if (sla->srv == srv && sla->type <= RL_CLUSTER) 979 + goto ret_ok; 980 + } 981 + 982 + for (i = 0; i < RL_CLUSTER_MAX; i++) { 983 + if (!rl_data->cluster[i]) 984 + continue; 985 + 986 + if (rl_data->cluster[i]->srv == srv) { 987 + sla = rl_data->cluster[i]; 988 + goto ret_ok; 989 + } 990 + } 991 + 992 + return -EINVAL; 993 + ret_ok: 994 + return sla->rem_cir; 995 + } 996 + 997 + /** 998 + * adf_rl_remove_sla() - removes provided sla_id 999 + * @accel_dev: pointer to acceleration device structure 1000 + * @sla_id: ID of the cluster or root to which we want assign an new SLA 1001 + * 1002 + * Return: 1003 + * * 0 - ok 1004 + * * -EINVAL - wrong sla_id or it still have assigned children 1005 + */ 1006 + int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id) 1007 + { 1008 + struct adf_rl *rl_data = accel_dev->rate_limiting; 1009 + struct rl_sla *sla; 1010 + int ret = 0; 1011 + 1012 + mutex_lock(&rl_data->rl_lock); 1013 + ret = validate_sla_id(accel_dev, sla_id); 1014 + if (ret) 1015 + goto err_ret; 1016 + 1017 + sla = rl_data->sla[sla_id]; 1018 + 1019 + if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) { 1020 + dev_notice(&GET_DEV(accel_dev), 1021 + "To remove parent SLA all its children must be removed first"); 1022 + ret = -EINVAL; 1023 + goto err_ret; 1024 + } 1025 + 1026 + clear_sla(rl_data, sla); 1027 + 1028 + err_ret: 1029 + mutex_unlock(&rl_data->rl_lock); 1030 + return ret; 1031 + } 1032 + 1033 + /** 1034 + * adf_rl_remove_sla_all() - removes all SLAs from device 1035 + * @accel_dev: pointer to acceleration device structure 1036 + * @incl_default: set to true if default SLAs also should be removed 1037 + */ 1038 + void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default) 1039 + { 1040 + struct adf_rl *rl_data = accel_dev->rate_limiting; 1041 + int end_type = incl_default ? RL_ROOT : RL_LEAF; 1042 + struct rl_sla **sla_type_arr = NULL; 1043 + u32 max_id; 1044 + int i, j; 1045 + 1046 + mutex_lock(&rl_data->rl_lock); 1047 + 1048 + /* Unregister and remove all SLAs */ 1049 + for (j = RL_LEAF; j >= end_type; j--) { 1050 + max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr); 1051 + 1052 + for (i = 0; i < max_id; i++) { 1053 + if (!sla_type_arr[i]) 1054 + continue; 1055 + 1056 + clear_sla(rl_data, sla_type_arr[i]); 1057 + } 1058 + } 1059 + 1060 + mutex_unlock(&rl_data->rl_lock); 1061 + } 1062 + 1063 + int adf_rl_init(struct adf_accel_dev *accel_dev) 1064 + { 1065 + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 1066 + struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data; 1067 + struct adf_rl *rl; 1068 + int ret = 0; 1069 + 1070 + /* Validate device parameters */ 1071 + if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) || 1072 + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) || 1073 + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) || 1074 + RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) || 1075 + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) || 1076 + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) || 1077 + RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) { 1078 + ret = -EOPNOTSUPP; 1079 + goto err_ret; 1080 + } 1081 + 1082 + rl = kzalloc(sizeof(*rl), GFP_KERNEL); 1083 + if (!rl) { 1084 + ret = -ENOMEM; 1085 + goto err_ret; 1086 + } 1087 + 1088 + mutex_init(&rl->rl_lock); 1089 + rl->device_data = &accel_dev->hw_device->rl_data; 1090 + rl->accel_dev = accel_dev; 1091 + accel_dev->rate_limiting = rl; 1092 + 1093 + err_ret: 1094 + return ret; 1095 + } 1096 + 1097 + int adf_rl_start(struct adf_accel_dev *accel_dev) 1098 + { 1099 + struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data; 1100 + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 1101 + u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; 1102 + int ret; 1103 + 1104 + if (!accel_dev->rate_limiting) { 1105 + ret = -EOPNOTSUPP; 1106 + goto ret_err; 1107 + } 1108 + 1109 + if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) { 1110 + dev_info(&GET_DEV(accel_dev), "not supported\n"); 1111 + ret = -EOPNOTSUPP; 1112 + goto ret_free; 1113 + } 1114 + 1115 + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset, 1116 + RL_TOKEN_GRANULARITY_PCIEIN_BUCKET); 1117 + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset, 1118 + RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET); 1119 + 1120 + ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices); 1121 + if (ret) { 1122 + dev_err(&GET_DEV(accel_dev), "initialization failed\n"); 1123 + goto ret_free; 1124 + } 1125 + 1126 + ret = initialize_default_nodes(accel_dev); 1127 + if (ret) { 1128 + dev_err(&GET_DEV(accel_dev), 1129 + "failed to initialize default SLAs\n"); 1130 + goto ret_sla_rm; 1131 + } 1132 + 1133 + return 0; 1134 + 1135 + ret_sla_rm: 1136 + adf_rl_remove_sla_all(accel_dev, true); 1137 + ret_free: 1138 + kfree(accel_dev->rate_limiting); 1139 + accel_dev->rate_limiting = NULL; 1140 + ret_err: 1141 + return ret; 1142 + } 1143 + 1144 + void adf_rl_stop(struct adf_accel_dev *accel_dev) 1145 + { 1146 + if (!accel_dev->rate_limiting) 1147 + return; 1148 + 1149 + adf_rl_remove_sla_all(accel_dev, true); 1150 + } 1151 + 1152 + void adf_rl_exit(struct adf_accel_dev *accel_dev) 1153 + { 1154 + if (!accel_dev->rate_limiting) 1155 + return; 1156 + 1157 + kfree(accel_dev->rate_limiting); 1158 + accel_dev->rate_limiting = NULL; 1159 + }
+169
drivers/crypto/intel/qat/qat_common/adf_rl.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* Copyright(c) 2023 Intel Corporation */ 3 + 4 + #ifndef ADF_RL_H_ 5 + #define ADF_RL_H_ 6 + 7 + #include <linux/mutex.h> 8 + #include <linux/types.h> 9 + 10 + struct adf_accel_dev; 11 + 12 + #define RL_ROOT_MAX 4 13 + #define RL_CLUSTER_MAX 16 14 + #define RL_LEAF_MAX 64 15 + #define RL_NODES_CNT_MAX (RL_ROOT_MAX + RL_CLUSTER_MAX + RL_LEAF_MAX) 16 + #define RL_RP_CNT_PER_LEAF_MAX 4U 17 + #define RL_RP_CNT_MAX 64 18 + #define RL_SLA_EMPTY_ID -1 19 + #define RL_PARENT_DEFAULT_ID -1 20 + 21 + enum rl_node_type { 22 + RL_ROOT, 23 + RL_CLUSTER, 24 + RL_LEAF, 25 + }; 26 + 27 + enum adf_base_services { 28 + ADF_SVC_ASYM = 0, 29 + ADF_SVC_SYM, 30 + ADF_SVC_DC, 31 + ADF_SVC_NONE, 32 + }; 33 + 34 + /** 35 + * struct adf_rl_sla_input_data - ratelimiting user input data structure 36 + * @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA. 37 + * Eg. 0x5 -> RP0 and RP2 assigned; 0xA005 -> RP0,2,13,15 assigned. 38 + * @sla_id: ID of current SLA for operations update, rm, get. For the add 39 + * operation, this field will be updated with the ID of the newly 40 + * added SLA 41 + * @parent_id: ID of the SLA to which the current one should be assigned. 42 + * Set to -1 to refer to the default parent. 43 + * @cir: Committed information rate. Rate guaranteed to be achieved. Input value 44 + * is expressed in permille scale, i.e. 1000 refers to the maximum 45 + * device throughput for a selected service. 46 + * @pir: Peak information rate. Maximum rate available that the SLA can achieve. 47 + * Input value is expressed in permille scale, i.e. 1000 refers to 48 + * the maximum device throughput for a selected service. 49 + * @type: SLA type: root, cluster, node 50 + * @srv: Service associated to the SLA: asym, sym dc. 51 + * 52 + * This structure is used to perform operations on an SLA. 53 + * Depending on the operation, some of the parameters are ignored. 54 + * The following list reports which parameters should be set for each operation. 55 + * - add: all except sla_id 56 + * - update: cir, pir, sla_id 57 + * - rm: sla_id 58 + * - rm_all: - 59 + * - get: sla_id 60 + * - get_capability_rem: srv, sla_id 61 + */ 62 + struct adf_rl_sla_input_data { 63 + u64 rp_mask; 64 + int sla_id; 65 + int parent_id; 66 + unsigned int cir; 67 + unsigned int pir; 68 + enum rl_node_type type; 69 + enum adf_base_services srv; 70 + }; 71 + 72 + struct rl_slice_cnt { 73 + u8 dcpr_cnt; 74 + u8 pke_cnt; 75 + u8 cph_cnt; 76 + }; 77 + 78 + struct adf_rl_hw_data { 79 + u32 scale_ref; 80 + u32 scan_interval; 81 + u32 r2l_offset; 82 + u32 l2c_offset; 83 + u32 c2s_offset; 84 + u32 pciin_tb_offset; 85 + u32 pciout_tb_offset; 86 + u32 pcie_scale_mul; 87 + u32 pcie_scale_div; 88 + u32 dcpr_correction; 89 + u32 max_tp[RL_ROOT_MAX]; 90 + struct rl_slice_cnt slices; 91 + }; 92 + 93 + /** 94 + * struct adf_rl - ratelimiting data structure 95 + * @accel_dev: pointer to acceleration device data 96 + * @device_data: pointer to rate limiting data specific to a device type (or revision) 97 + * @sla: array of pointers to SLA objects 98 + * @root: array of pointers to root type SLAs, element number reflects node_id 99 + * @cluster: array of pointers to cluster type SLAs, element number reflects node_id 100 + * @leaf: array of pointers to leaf type SLAs, element number reflects node_id 101 + * @rp_in_use: array of ring pair IDs already used in one of SLAs 102 + * @rl_lock: mutex object which is protecting data in this structure 103 + * @input: structure which is used for holding the data received from user 104 + */ 105 + struct adf_rl { 106 + struct adf_accel_dev *accel_dev; 107 + struct adf_rl_hw_data *device_data; 108 + /* mapping sla_id to SLA objects */ 109 + struct rl_sla *sla[RL_NODES_CNT_MAX]; 110 + struct rl_sla *root[RL_ROOT_MAX]; 111 + struct rl_sla *cluster[RL_CLUSTER_MAX]; 112 + struct rl_sla *leaf[RL_LEAF_MAX]; 113 + bool rp_in_use[RL_RP_CNT_MAX]; 114 + /* Mutex protecting writing to SLAs lists */ 115 + struct mutex rl_lock; 116 + }; 117 + 118 + /** 119 + * struct rl_sla - SLA object data structure 120 + * @parent: pointer to the parent SLA (root/cluster) 121 + * @type: SLA type 122 + * @srv: service associated with this SLA 123 + * @sla_id: ID of the SLA, used as element number in SLA array and as identifier 124 + * shared with the user 125 + * @node_id: ID of node, each of SLA type have a separate ID list 126 + * @cir: committed information rate 127 + * @pir: peak information rate (PIR >= CIR) 128 + * @rem_cir: if this SLA is a parent then this field represents a remaining 129 + * value to be used by child SLAs. 130 + * @ring_pairs_ids: array with numeric ring pairs IDs assigned to this SLA 131 + * @ring_pairs_cnt: number of assigned ring pairs listed in the array above 132 + */ 133 + struct rl_sla { 134 + struct rl_sla *parent; 135 + enum rl_node_type type; 136 + enum adf_base_services srv; 137 + u32 sla_id; 138 + u32 node_id; 139 + u32 cir; 140 + u32 pir; 141 + u32 rem_cir; 142 + u16 ring_pairs_ids[RL_RP_CNT_PER_LEAF_MAX]; 143 + u16 ring_pairs_cnt; 144 + }; 145 + 146 + int adf_rl_add_sla(struct adf_accel_dev *accel_dev, 147 + struct adf_rl_sla_input_data *sla_in); 148 + int adf_rl_update_sla(struct adf_accel_dev *accel_dev, 149 + struct adf_rl_sla_input_data *sla_in); 150 + int adf_rl_get_sla(struct adf_accel_dev *accel_dev, 151 + struct adf_rl_sla_input_data *sla_in); 152 + int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, 153 + enum adf_base_services srv, int sla_id); 154 + int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id); 155 + void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default); 156 + 157 + int adf_rl_init(struct adf_accel_dev *accel_dev); 158 + int adf_rl_start(struct adf_accel_dev *accel_dev); 159 + void adf_rl_stop(struct adf_accel_dev *accel_dev); 160 + void adf_rl_exit(struct adf_accel_dev *accel_dev); 161 + 162 + u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, 163 + enum adf_base_services svc_type, bool is_bw_out); 164 + u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, 165 + enum adf_base_services svc_type); 166 + u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, 167 + enum adf_base_services svc_type); 168 + 169 + #endif /* ADF_RL_H_ */
+97
drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright(c) 2023 Intel Corporation */ 3 + 4 + #include <linux/dma-mapping.h> 5 + #include <linux/pci.h> 6 + 7 + #include "adf_admin.h" 8 + #include "adf_accel_devices.h" 9 + #include "adf_rl_admin.h" 10 + 11 + static void 12 + prep_admin_req_msg(struct rl_sla *sla, dma_addr_t dma_addr, 13 + struct icp_qat_fw_init_admin_sla_config_params *fw_params, 14 + struct icp_qat_fw_init_admin_req *req, bool is_update) 15 + { 16 + req->cmd_id = is_update ? ICP_QAT_FW_RL_UPDATE : ICP_QAT_FW_RL_ADD; 17 + req->init_cfg_ptr = dma_addr; 18 + req->init_cfg_sz = sizeof(*fw_params); 19 + req->node_id = sla->node_id; 20 + req->node_type = sla->type; 21 + req->rp_count = sla->ring_pairs_cnt; 22 + req->svc_type = sla->srv; 23 + } 24 + 25 + static void 26 + prep_admin_req_params(struct adf_accel_dev *accel_dev, struct rl_sla *sla, 27 + struct icp_qat_fw_init_admin_sla_config_params *fw_params) 28 + { 29 + fw_params->pcie_in_cir = 30 + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, false); 31 + fw_params->pcie_in_pir = 32 + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, false); 33 + fw_params->pcie_out_cir = 34 + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, true); 35 + fw_params->pcie_out_pir = 36 + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, true); 37 + 38 + fw_params->slice_util_cir = 39 + adf_rl_calculate_slice_tokens(accel_dev, sla->cir, sla->srv); 40 + fw_params->slice_util_pir = 41 + adf_rl_calculate_slice_tokens(accel_dev, sla->pir, sla->srv); 42 + 43 + fw_params->ae_util_cir = 44 + adf_rl_calculate_ae_cycles(accel_dev, sla->cir, sla->srv); 45 + fw_params->ae_util_pir = 46 + adf_rl_calculate_ae_cycles(accel_dev, sla->pir, sla->srv); 47 + 48 + memcpy(fw_params->rp_ids, sla->ring_pairs_ids, 49 + sizeof(sla->ring_pairs_ids)); 50 + } 51 + 52 + int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, 53 + struct rl_slice_cnt *slices_int) 54 + { 55 + struct icp_qat_fw_init_admin_slice_cnt slices_resp = { }; 56 + int ret; 57 + 58 + ret = adf_send_admin_rl_init(accel_dev, &slices_resp); 59 + if (ret) 60 + return ret; 61 + 62 + slices_int->dcpr_cnt = slices_resp.dcpr_cnt; 63 + slices_int->pke_cnt = slices_resp.pke_cnt; 64 + /* For symmetric crypto, slice tokens are relative to the UCS slice */ 65 + slices_int->cph_cnt = slices_resp.ucs_cnt; 66 + 67 + return 0; 68 + } 69 + 70 + int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, 71 + struct rl_sla *sla, bool is_update) 72 + { 73 + struct icp_qat_fw_init_admin_sla_config_params *fw_params; 74 + struct icp_qat_fw_init_admin_req req = { }; 75 + dma_addr_t dma_addr; 76 + int ret; 77 + 78 + fw_params = dma_alloc_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), 79 + &dma_addr, GFP_KERNEL); 80 + if (!fw_params) 81 + return -ENOMEM; 82 + 83 + prep_admin_req_params(accel_dev, sla, fw_params); 84 + prep_admin_req_msg(sla, dma_addr, fw_params, &req, is_update); 85 + ret = adf_send_admin_rl_add_update(accel_dev, &req); 86 + 87 + dma_free_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), fw_params, 88 + dma_addr); 89 + 90 + return ret; 91 + } 92 + 93 + int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, 94 + u8 node_type) 95 + { 96 + return adf_send_admin_rl_delete(accel_dev, node_id, node_type); 97 + }
+18
drivers/crypto/intel/qat/qat_common/adf_rl_admin.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* Copyright(c) 2023 Intel Corporation */ 3 + 4 + #ifndef ADF_RL_ADMIN_H_ 5 + #define ADF_RL_ADMIN_H_ 6 + 7 + #include <linux/types.h> 8 + 9 + #include "adf_rl.h" 10 + 11 + int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, 12 + struct rl_slice_cnt *slices_int); 13 + int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, 14 + struct rl_sla *sla, bool is_update); 15 + int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, 16 + u8 node_type); 17 + 18 + #endif /* ADF_RL_ADMIN_H_ */
+38
drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
··· 5 5 6 6 #include "icp_qat_fw.h" 7 7 8 + #define RL_MAX_RP_IDS 16 9 + 8 10 enum icp_qat_fw_init_admin_cmd_id { 9 11 ICP_QAT_FW_INIT_AE = 0, 10 12 ICP_QAT_FW_TRNG_ENABLE = 1, ··· 21 19 ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, 22 20 ICP_QAT_FW_DC_CHAIN_INIT = 11, 23 21 ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, 22 + ICP_QAT_FW_RL_INIT = 15, 24 23 ICP_QAT_FW_TIMER_GET = 19, 25 24 ICP_QAT_FW_CNV_STATS_GET = 20, 26 25 ICP_QAT_FW_PM_STATE_CONFIG = 128, 27 26 ICP_QAT_FW_PM_INFO = 129, 27 + ICP_QAT_FW_RL_ADD = 134, 28 + ICP_QAT_FW_RL_UPDATE = 135, 29 + ICP_QAT_FW_RL_REMOVE = 136, 28 30 }; 29 31 30 32 enum icp_qat_fw_init_admin_resp_status { 31 33 ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0, 32 34 ICP_QAT_FW_INIT_RESP_STATUS_FAIL 35 + }; 36 + 37 + struct icp_qat_fw_init_admin_slice_cnt { 38 + __u8 cpr_cnt; 39 + __u8 xlt_cnt; 40 + __u8 dcpr_cnt; 41 + __u8 pke_cnt; 42 + __u8 wat_cnt; 43 + __u8 wcp_cnt; 44 + __u8 ucs_cnt; 45 + __u8 cph_cnt; 46 + __u8 ath_cnt; 47 + }; 48 + 49 + struct icp_qat_fw_init_admin_sla_config_params { 50 + __u32 pcie_in_cir; 51 + __u32 pcie_in_pir; 52 + __u32 pcie_out_cir; 53 + __u32 pcie_out_pir; 54 + __u32 slice_util_cir; 55 + __u32 slice_util_pir; 56 + __u32 ae_util_cir; 57 + __u32 ae_util_pir; 58 + __u16 rp_ids[RL_MAX_RP_IDS]; 33 59 }; 34 60 35 61 struct icp_qat_fw_init_admin_req { ··· 78 48 }; 79 49 struct { 80 50 __u32 heartbeat_ticks; 51 + }; 52 + struct { 53 + __u16 node_id; 54 + __u8 node_type; 55 + __u8 svc_type; 56 + __u8 resrvd5[3]; 57 + __u8 rp_count; 81 58 }; 82 59 __u32 idle_filter; 83 60 }; ··· 147 110 __u32 unsuccessful_count; 148 111 __u64 resrvd8; 149 112 }; 113 + struct icp_qat_fw_init_admin_slice_cnt slices; 150 114 __u16 fw_capabilities; 151 115 }; 152 116 } __packed;