Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: cavium - Add support for CNN55XX adapters.

Add Physical Function driver support for CNN55XX crypto adapters.
CNN55XX adapters belongs to Cavium NITROX family series,
which accelerate both Symmetric and Asymmetric crypto workloads.

These adapters have crypto engines that need firmware
to become operational.

Signed-off-by: Srikanth Jampala <Jampala.Srikanth@cavium.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Srikanth Jampala and committed by
Herbert Xu
14fa93cd 3bca64c1

+4002
+1
drivers/crypto/Kconfig
··· 541 541 542 542 source "drivers/crypto/qat/Kconfig" 543 543 source "drivers/crypto/cavium/cpt/Kconfig" 544 + source "drivers/crypto/cavium/nitrox/Kconfig" 544 545 545 546 config CRYPTO_DEV_CAVIUM_ZIP 546 547 tristate "Cavium ZIP driver"
+1
drivers/crypto/Makefile
··· 6 6 obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ 7 7 obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ 8 8 obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/ 9 + obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/ 9 10 obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o 10 11 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ 11 12 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
+21
drivers/crypto/cavium/nitrox/Kconfig
··· 1 + # 2 + # Cavium NITROX Crypto Device configuration 3 + # 4 + config CRYPTO_DEV_NITROX 5 + tristate 6 + select CRYPTO_BLKCIPHER 7 + select CRYPTO_AES 8 + select CRYPTO_DES 9 + select FW_LOADER 10 + 11 + config CRYPTO_DEV_NITROX_CNN55XX 12 + tristate "Support for Cavium CNN55XX driver" 13 + depends on PCI_MSI && 64BIT 14 + select CRYPTO_DEV_NITROX 15 + default m 16 + help 17 + Support for Cavium NITROX family CNN55XX driver 18 + for accelerating crypto workloads. 19 + 20 + To compile this as a module, choose M here: the module 21 + will be called n5pf.
+7
drivers/crypto/cavium/nitrox/Makefile
··· 1 + obj-$(CONFIG_CRYPTO_DEV_NITROX_CNN55XX) += n5pf.o 2 + 3 + n5pf-objs := nitrox_main.o \ 4 + nitrox_isr.o \ 5 + nitrox_lib.o \ 6 + nitrox_hal.o \ 7 + nitrox_reqmgr.o
+35
drivers/crypto/cavium/nitrox/nitrox_common.h
··· 1 + #ifndef __NITROX_COMMON_H 2 + #define __NITROX_COMMON_H 3 + 4 + #include "nitrox_dev.h" 5 + #include "nitrox_req.h" 6 + 7 + void nitrox_pf_cleanup_isr(struct nitrox_device *ndev); 8 + int nitrox_pf_init_isr(struct nitrox_device *ndev); 9 + 10 + int nitrox_common_sw_init(struct nitrox_device *ndev); 11 + void nitrox_common_sw_cleanup(struct nitrox_device *ndev); 12 + 13 + void pkt_slc_resp_handler(unsigned long data); 14 + int nitrox_process_se_request(struct nitrox_device *ndev, 15 + struct se_crypto_request *req, 16 + completion_t cb, 17 + struct skcipher_request *skreq); 18 + void backlog_qflush_work(struct work_struct *work); 19 + 20 + void nitrox_config_emu_unit(struct nitrox_device *ndev); 21 + void nitrox_config_pkt_input_rings(struct nitrox_device *ndev); 22 + void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev); 23 + void nitrox_config_vfmode(struct nitrox_device *ndev, int mode); 24 + void nitrox_config_nps_unit(struct nitrox_device *ndev); 25 + void nitrox_config_pom_unit(struct nitrox_device *ndev); 26 + void nitrox_config_rand_unit(struct nitrox_device *ndev); 27 + void nitrox_config_efl_unit(struct nitrox_device *ndev); 28 + void nitrox_config_bmi_unit(struct nitrox_device *ndev); 29 + void nitrox_config_bmo_unit(struct nitrox_device *ndev); 30 + void nitrox_config_lbc_unit(struct nitrox_device *ndev); 31 + void invalidate_lbc(struct nitrox_device *ndev); 32 + void enable_pkt_input_ring(struct nitrox_device *ndev, int ring); 33 + void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); 34 + 35 + #endif /* __NITROX_COMMON_H */
+1080
drivers/crypto/cavium/nitrox/nitrox_csr.h
··· 1 + #ifndef __NITROX_CSR_H 2 + #define __NITROX_CSR_H 3 + 4 + #include <asm/byteorder.h> 5 + #include <linux/types.h> 6 + 7 + /* EMU clusters */ 8 + #define NR_CLUSTERS 4 9 + #define AE_CORES_PER_CLUSTER 20 10 + #define SE_CORES_PER_CLUSTER 16 11 + 12 + /* BIST registers */ 13 + #define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000)) 14 + #define UCD_BIST_STATUS 0x12C0070 15 + #define NPS_CORE_BIST_REG 0x10000E8 16 + #define NPS_CORE_NPC_BIST_REG 0x1000128 17 + #define NPS_PKT_SLC_BIST_REG 0x1040088 18 + #define NPS_PKT_IN_BIST_REG 0x1040100 19 + #define POM_BIST_REG 0x11C0100 20 + #define BMI_BIST_REG 0x1140080 21 + #define EFL_CORE_BIST_REGX(_i) (0x1240100 + ((_i) * 0x400)) 22 + #define EFL_TOP_BIST_STAT 0x1241090 23 + #define BMO_BIST_REG 0x1180080 24 + #define LBC_BIST_STATUS 0x1200020 25 + #define PEM_BIST_STATUSX(_i) (0x1080468 | ((_i) << 18)) 26 + 27 + /* EMU registers */ 28 + #define EMU_SE_ENABLEX(_i) (0x1400000 + ((_i) * 0x40000)) 29 + #define EMU_AE_ENABLEX(_i) (0x1400008 + ((_i) * 0x40000)) 30 + #define EMU_WD_INT_ENA_W1SX(_i) (0x1402318 + ((_i) * 0x40000)) 31 + #define EMU_GE_INT_ENA_W1SX(_i) (0x1402518 + ((_i) * 0x40000)) 32 + #define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000)) 33 + 34 + /* UCD registers */ 35 + #define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010 36 + #define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20)) 37 + #define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000)) 38 + 39 + /* NPS core registers */ 40 + #define NPS_CORE_GBL_VFCFG 0x1000000 41 + #define NPS_CORE_CONTROL 0x1000008 42 + #define NPS_CORE_INT_ACTIVE 0x1000080 43 + #define NPS_CORE_INT 0x10000A0 44 + #define NPS_CORE_INT_ENA_W1S 0x10000B8 45 + 46 + /* NPS packet registers */ 47 + #define NPS_PKT_INT 0x1040018 48 + #define NPS_PKT_IN_RERR_HI 0x1040108 49 + #define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120 50 + #define NPS_PKT_IN_RERR_LO 0x1040128 51 + #define NPS_PKT_IN_RERR_LO_ENA_W1S 0x1040140 52 + #define NPS_PKT_IN_ERR_TYPE 0x1040148 53 + #define NPS_PKT_IN_ERR_TYPE_ENA_W1S 0x1040160 54 + #define NPS_PKT_IN_INSTR_CTLX(_i) (0x10060 + ((_i) * 0x40000)) 55 + #define NPS_PKT_IN_INSTR_BADDRX(_i) (0x10068 + ((_i) * 0x40000)) 56 + #define NPS_PKT_IN_INSTR_RSIZEX(_i) (0x10070 + ((_i) * 0x40000)) 57 + #define NPS_PKT_IN_DONE_CNTSX(_i) (0x10080 + ((_i) * 0x40000)) 58 + #define NPS_PKT_IN_INSTR_BAOFF_DBELLX(_i) (0x10078 + ((_i) * 0x40000)) 59 + #define NPS_PKT_IN_INT_LEVELSX(_i) (0x10088 + ((_i) * 0x40000)) 60 + 61 + #define NPS_PKT_SLC_RERR_HI 0x1040208 62 + #define NPS_PKT_SLC_RERR_HI_ENA_W1S 0x1040220 63 + #define NPS_PKT_SLC_RERR_LO 0x1040228 64 + #define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240 65 + #define NPS_PKT_SLC_ERR_TYPE 0x1040248 66 + #define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260 67 + #define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000)) 68 + #define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000)) 69 + #define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000)) 70 + 71 + /* POM registers */ 72 + #define POM_INT_ENA_W1S 0x11C0018 73 + #define POM_GRP_EXECMASKX(_i) (0x11C1100 | ((_i) * 8)) 74 + #define POM_INT 0x11C0000 75 + #define POM_PERF_CTL 0x11CC400 76 + 77 + /* BMI registers */ 78 + #define BMI_INT 0x1140000 79 + #define BMI_CTL 0x1140020 80 + #define BMI_INT_ENA_W1S 0x1140018 81 + 82 + /* EFL registers */ 83 + #define EFL_CORE_INT_ENA_W1SX(_i) (0x1240018 + ((_i) * 0x400)) 84 + #define EFL_CORE_VF_ERR_INT0X(_i) (0x1240050 + ((_i) * 0x400)) 85 + #define EFL_CORE_VF_ERR_INT0_ENA_W1SX(_i) (0x1240068 + ((_i) * 0x400)) 86 + #define EFL_CORE_VF_ERR_INT1X(_i) (0x1240070 + ((_i) * 0x400)) 87 + #define EFL_CORE_VF_ERR_INT1_ENA_W1SX(_i) (0x1240088 + ((_i) * 0x400)) 88 + #define EFL_CORE_SE_ERR_INTX(_i) (0x12400A0 + ((_i) * 0x400)) 89 + #define EFL_RNM_CTL_STATUS 0x1241800 90 + #define EFL_CORE_INTX(_i) (0x1240000 + ((_i) * 0x400)) 91 + 92 + /* BMO registers */ 93 + #define BMO_CTL2 0x1180028 94 + 95 + /* LBC registers */ 96 + #define LBC_INT 0x1200000 97 + #define LBC_INVAL_CTL 0x1201010 98 + #define LBC_PLM_VF1_64_INT 0x1202008 99 + #define LBC_INVAL_STATUS 0x1202010 100 + #define LBC_INT_ENA_W1S 0x1203000 101 + #define LBC_PLM_VF1_64_INT_ENA_W1S 0x1205008 102 + #define LBC_PLM_VF65_128_INT 0x1206008 103 + #define LBC_ELM_VF1_64_INT 0x1208000 104 + #define LBC_PLM_VF65_128_INT_ENA_W1S 0x1209008 105 + #define LBC_ELM_VF1_64_INT_ENA_W1S 0x120B000 106 + #define LBC_ELM_VF65_128_INT 0x120C000 107 + #define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000 108 + 109 + /* PEM registers */ 110 + #define PEM0_INT 0x1080428 111 + 112 + /** 113 + * struct emu_fuse_map - EMU Fuse Map Registers 114 + * @ae_fuse: Fuse settings for AE 19..0 115 + * @se_fuse: Fuse settings for SE 15..0 116 + * 117 + * A set bit indicates the unit is fuse disabled. 118 + */ 119 + union emu_fuse_map { 120 + u64 value; 121 + struct { 122 + #if (defined(__BIG_ENDIAN_BITFIELD)) 123 + u64 valid : 1; 124 + u64 raz_52_62 : 11; 125 + u64 ae_fuse : 20; 126 + u64 raz_16_31 : 16; 127 + u64 se_fuse : 16; 128 + #else 129 + u64 se_fuse : 16; 130 + u64 raz_16_31 : 16; 131 + u64 ae_fuse : 20; 132 + u64 raz_52_62 : 11; 133 + u64 valid : 1; 134 + #endif 135 + } s; 136 + }; 137 + 138 + /** 139 + * struct emu_se_enable - Symmetric Engine Enable Registers 140 + * @enable: Individual enables for each of the clusters 141 + * 16 symmetric engines. 142 + */ 143 + union emu_se_enable { 144 + u64 value; 145 + struct { 146 + #if (defined(__BIG_ENDIAN_BITFIELD)) 147 + u64 raz : 48; 148 + u64 enable : 16; 149 + #else 150 + u64 enable : 16; 151 + u64 raz : 48; 152 + #endif 153 + } s; 154 + }; 155 + 156 + /** 157 + * struct emu_ae_enable - EMU Asymmetric engines. 158 + * @enable: Individual enables for each of the cluster's 159 + * 20 Asymmetric Engines. 160 + */ 161 + union emu_ae_enable { 162 + u64 value; 163 + struct { 164 + #if (defined(__BIG_ENDIAN_BITFIELD)) 165 + u64 raz : 44; 166 + u64 enable : 20; 167 + #else 168 + u64 enable : 20; 169 + u64 raz : 44; 170 + #endif 171 + } s; 172 + }; 173 + 174 + /** 175 + * struct emu_wd_int_ena_w1s - EMU Interrupt Enable Registers 176 + * @ae_wd: Reads or sets enable for EMU(0..3)_WD_INT[AE_WD] 177 + * @se_wd: Reads or sets enable for EMU(0..3)_WD_INT[SE_WD] 178 + */ 179 + union emu_wd_int_ena_w1s { 180 + u64 value; 181 + struct { 182 + #if (defined(__BIG_ENDIAN_BITFIELD)) 183 + u64 raz2 : 12; 184 + u64 ae_wd : 20; 185 + u64 raz1 : 16; 186 + u64 se_wd : 16; 187 + #else 188 + u64 se_wd : 16; 189 + u64 raz1 : 16; 190 + u64 ae_wd : 20; 191 + u64 raz2 : 12; 192 + #endif 193 + } s; 194 + }; 195 + 196 + /** 197 + * struct emu_ge_int_ena_w1s - EMU Interrupt Enable set registers 198 + * @ae_ge: Reads or sets enable for EMU(0..3)_GE_INT[AE_GE] 199 + * @se_ge: Reads or sets enable for EMU(0..3)_GE_INT[SE_GE] 200 + */ 201 + union emu_ge_int_ena_w1s { 202 + u64 value; 203 + struct { 204 + #if (defined(__BIG_ENDIAN_BITFIELD)) 205 + u64 raz_52_63 : 12; 206 + u64 ae_ge : 20; 207 + u64 raz_16_31: 16; 208 + u64 se_ge : 16; 209 + #else 210 + u64 se_ge : 16; 211 + u64 raz_16_31: 16; 212 + u64 ae_ge : 20; 213 + u64 raz_52_63 : 12; 214 + #endif 215 + } s; 216 + }; 217 + 218 + /** 219 + * struct nps_pkt_slc_ctl - Solicited Packet Out Control Registers 220 + * @rh: Indicates whether to remove or include the response header 221 + * 1 = Include, 0 = Remove 222 + * @z: If set, 8 trailing 0x00 bytes will be added to the end of the 223 + * outgoing packet. 224 + * @enb: Enable for this port. 225 + */ 226 + union nps_pkt_slc_ctl { 227 + u64 value; 228 + struct { 229 + #if defined(__BIG_ENDIAN_BITFIELD) 230 + u64 raz : 61; 231 + u64 rh : 1; 232 + u64 z : 1; 233 + u64 enb : 1; 234 + #else 235 + u64 enb : 1; 236 + u64 z : 1; 237 + u64 rh : 1; 238 + u64 raz : 61; 239 + #endif 240 + } s; 241 + }; 242 + 243 + /** 244 + * struct nps_pkt_slc_cnts - Solicited Packet Out Count Registers 245 + * @slc_int: Returns a 1 when: 246 + * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or 247 + * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET]. 248 + * To clear the bit, the CNTS register must be written to clear. 249 + * @in_int: Returns a 1 when: 250 + * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]. 251 + * To clear the bit, the DONE_CNTS register must be written to clear. 252 + * @mbox_int: Returns a 1 when: 253 + * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set. To clear the bit, 254 + * write NPS_PKT_MBOX_PF_VF(i)_INT[INTR] with 1. 255 + * @timer: Timer, incremented every 2048 coprocessor clock cycles 256 + * when [CNT] is not zero. The hardware clears both [TIMER] and 257 + * [INT] when [CNT] goes to 0. 258 + * @cnt: Packet counter. Hardware adds to [CNT] as it sends packets out. 259 + * On a write to this CSR, hardware subtracts the amount written to the 260 + * [CNT] field from [CNT]. 261 + */ 262 + union nps_pkt_slc_cnts { 263 + u64 value; 264 + struct { 265 + #if defined(__BIG_ENDIAN_BITFIELD) 266 + u64 slc_int : 1; 267 + u64 uns_int : 1; 268 + u64 in_int : 1; 269 + u64 mbox_int : 1; 270 + u64 resend : 1; 271 + u64 raz : 5; 272 + u64 timer : 22; 273 + u64 cnt : 32; 274 + #else 275 + u64 cnt : 32; 276 + u64 timer : 22; 277 + u64 raz : 5; 278 + u64 resend : 1; 279 + u64 mbox_int : 1; 280 + u64 in_int : 1; 281 + u64 uns_int : 1; 282 + u64 slc_int : 1; 283 + #endif 284 + } s; 285 + }; 286 + 287 + /** 288 + * struct nps_pkt_slc_int_levels - Solicited Packet Out Interrupt Levels 289 + * Registers. 290 + * @bmode: Determines whether NPS_PKT_SLC_CNTS[CNT] is a byte or 291 + * packet counter. 292 + * @timet: Output port counter time interrupt threshold. 293 + * @cnt: Output port counter interrupt threshold. 294 + */ 295 + union nps_pkt_slc_int_levels { 296 + u64 value; 297 + struct { 298 + #if defined(__BIG_ENDIAN_BITFIELD) 299 + u64 bmode : 1; 300 + u64 raz : 9; 301 + u64 timet : 22; 302 + u64 cnt : 32; 303 + #else 304 + u64 cnt : 32; 305 + u64 timet : 22; 306 + u64 raz : 9; 307 + u64 bmode : 1; 308 + #endif 309 + } s; 310 + }; 311 + 312 + /** 313 + * struct nps_pkt_inst - NPS Packet Interrupt Register 314 + * @in_err: Set when any NPS_PKT_IN_RERR_HI/LO bit and 315 + * corresponding NPS_PKT_IN_RERR_*_ENA_* bit are bot set. 316 + * @uns_err: Set when any NSP_PKT_UNS_RERR_HI/LO bit and 317 + * corresponding NPS_PKT_UNS_RERR_*_ENA_* bit are both set. 318 + * @slc_er: Set when any NSP_PKT_SLC_RERR_HI/LO bit and 319 + * corresponding NPS_PKT_SLC_RERR_*_ENA_* bit are both set. 320 + */ 321 + union nps_pkt_int { 322 + u64 value; 323 + struct { 324 + #if defined(__BIG_ENDIAN_BITFIELD) 325 + u64 raz : 54; 326 + u64 uns_wto : 1; 327 + u64 in_err : 1; 328 + u64 uns_err : 1; 329 + u64 slc_err : 1; 330 + u64 in_dbe : 1; 331 + u64 in_sbe : 1; 332 + u64 uns_dbe : 1; 333 + u64 uns_sbe : 1; 334 + u64 slc_dbe : 1; 335 + u64 slc_sbe : 1; 336 + #else 337 + u64 slc_sbe : 1; 338 + u64 slc_dbe : 1; 339 + u64 uns_sbe : 1; 340 + u64 uns_dbe : 1; 341 + u64 in_sbe : 1; 342 + u64 in_dbe : 1; 343 + u64 slc_err : 1; 344 + u64 uns_err : 1; 345 + u64 in_err : 1; 346 + u64 uns_wto : 1; 347 + u64 raz : 54; 348 + #endif 349 + } s; 350 + }; 351 + 352 + /** 353 + * struct nps_pkt_in_done_cnts - Input instruction ring counts registers 354 + * @slc_cnt: Returns a 1 when: 355 + * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or 356 + * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SCL(i)_INT_LEVELS[TIMET] 357 + * To clear the bit, the CNTS register must be 358 + * written to clear the underlying condition 359 + * @uns_int: Return a 1 when: 360 + * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT], or 361 + * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET] 362 + * To clear the bit, the CNTS register must be 363 + * written to clear the underlying condition 364 + * @in_int: Returns a 1 when: 365 + * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT] 366 + * To clear the bit, the DONE_CNTS register 367 + * must be written to clear the underlying condition 368 + * @mbox_int: Returns a 1 when: 369 + * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set. 370 + * To clear the bit, write NPS_PKT_MBOX_PF_VF(i)_INT[INTR] 371 + * with 1. 372 + * @resend: A write of 1 will resend an MSI-X interrupt message if any 373 + * of the following conditions are true for this ring "i". 374 + * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT] 375 + * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET] 376 + * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT] 377 + * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET] 378 + * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT] 379 + * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set 380 + * @cnt: Packet counter. Hardware adds to [CNT] as it reads 381 + * packets. On a write to this CSR, hardware substracts the 382 + * amount written to the [CNT] field from [CNT], which will 383 + * clear PKT_IN(i)_INT_STATUS[INTR] if [CNT] becomes <= 384 + * NPS_PKT_IN(i)_INT_LEVELS[CNT]. This register should be 385 + * cleared before enabling a ring by reading the current 386 + * value and writing it back. 387 + */ 388 + union nps_pkt_in_done_cnts { 389 + u64 value; 390 + struct { 391 + #if defined(__BIG_ENDIAN_BITFIELD) 392 + u64 slc_int : 1; 393 + u64 uns_int : 1; 394 + u64 in_int : 1; 395 + u64 mbox_int : 1; 396 + u64 resend : 1; 397 + u64 raz : 27; 398 + u64 cnt : 32; 399 + #else 400 + u64 cnt : 32; 401 + u64 raz : 27; 402 + u64 resend : 1; 403 + u64 mbox_int : 1; 404 + u64 in_int : 1; 405 + u64 uns_int : 1; 406 + u64 slc_int : 1; 407 + #endif 408 + } s; 409 + }; 410 + 411 + /** 412 + * struct nps_pkt_in_instr_ctl - Input Instruction Ring Control Registers. 413 + * @is64b: If 1, the ring uses 64-byte instructions. If 0, the 414 + * ring uses 32-byte instructions. 415 + * @enb: Enable for the input ring. 416 + */ 417 + union nps_pkt_in_instr_ctl { 418 + u64 value; 419 + struct { 420 + #if (defined(__BIG_ENDIAN_BITFIELD)) 421 + u64 raz : 62; 422 + u64 is64b : 1; 423 + u64 enb : 1; 424 + #else 425 + u64 enb : 1; 426 + u64 is64b : 1; 427 + u64 raz : 62; 428 + #endif 429 + } s; 430 + }; 431 + 432 + /** 433 + * struct nps_pkt_in_instr_rsize - Input instruction ring size registers 434 + * @rsize: Ring size (number of instructions) 435 + */ 436 + union nps_pkt_in_instr_rsize { 437 + u64 value; 438 + struct { 439 + #if (defined(__BIG_ENDIAN_BITFIELD)) 440 + u64 raz : 32; 441 + u64 rsize : 32; 442 + #else 443 + u64 rsize : 32; 444 + u64 raz : 32; 445 + #endif 446 + } s; 447 + }; 448 + 449 + /** 450 + * struct nps_pkt_in_instr_baoff_dbell - Input instruction ring 451 + * base address offset and doorbell registers 452 + * @aoff: Address offset. The offset from the NPS_PKT_IN_INSTR_BADDR 453 + * where the next pointer is read. 454 + * @dbell: Pointer list doorbell count. Write operations to this field 455 + * increments the present value here. Read operations return the 456 + * present value. 457 + */ 458 + union nps_pkt_in_instr_baoff_dbell { 459 + u64 value; 460 + struct { 461 + #if (defined(__BIG_ENDIAN_BITFIELD)) 462 + u64 aoff : 32; 463 + u64 dbell : 32; 464 + #else 465 + u64 dbell : 32; 466 + u64 aoff : 32; 467 + #endif 468 + } s; 469 + }; 470 + 471 + /** 472 + * struct nps_core_int_ena_w1s - NPS core interrupt enable set register 473 + * @host_nps_wr_err: Reads or sets enable for 474 + * NPS_CORE_INT[HOST_NPS_WR_ERR]. 475 + * @npco_dma_malform: Reads or sets enable for 476 + * NPS_CORE_INT[NPCO_DMA_MALFORM]. 477 + * @exec_wr_timeout: Reads or sets enable for 478 + * NPS_CORE_INT[EXEC_WR_TIMEOUT]. 479 + * @host_wr_timeout: Reads or sets enable for 480 + * NPS_CORE_INT[HOST_WR_TIMEOUT]. 481 + * @host_wr_err: Reads or sets enable for 482 + * NPS_CORE_INT[HOST_WR_ERR] 483 + */ 484 + union nps_core_int_ena_w1s { 485 + u64 value; 486 + struct { 487 + #if (defined(__BIG_ENDIAN_BITFIELD)) 488 + u64 raz4 : 55; 489 + u64 host_nps_wr_err : 1; 490 + u64 npco_dma_malform : 1; 491 + u64 exec_wr_timeout : 1; 492 + u64 host_wr_timeout : 1; 493 + u64 host_wr_err : 1; 494 + u64 raz3 : 1; 495 + u64 raz2 : 1; 496 + u64 raz1 : 1; 497 + u64 raz0 : 1; 498 + #else 499 + u64 raz0 : 1; 500 + u64 raz1 : 1; 501 + u64 raz2 : 1; 502 + u64 raz3 : 1; 503 + u64 host_wr_err : 1; 504 + u64 host_wr_timeout : 1; 505 + u64 exec_wr_timeout : 1; 506 + u64 npco_dma_malform : 1; 507 + u64 host_nps_wr_err : 1; 508 + u64 raz4 : 55; 509 + #endif 510 + } s; 511 + }; 512 + 513 + /** 514 + * struct nps_core_gbl_vfcfg - Global VF Configuration Register. 515 + * @ilk_disable: When set, this bit indicates that the ILK interface has 516 + * been disabled. 517 + * @obaf: BMO allocation control 518 + * 0 = allocate per queue 519 + * 1 = allocate per VF 520 + * @ibaf: BMI allocation control 521 + * 0 = allocate per queue 522 + * 1 = allocate per VF 523 + * @zaf: ZIP allocation control 524 + * 0 = allocate per queue 525 + * 1 = allocate per VF 526 + * @aeaf: AE allocation control 527 + * 0 = allocate per queue 528 + * 1 = allocate per VF 529 + * @seaf: SE allocation control 530 + * 0 = allocation per queue 531 + * 1 = allocate per VF 532 + * @cfg: VF/PF mode. 533 + */ 534 + union nps_core_gbl_vfcfg { 535 + u64 value; 536 + struct { 537 + #if (defined(__BIG_ENDIAN_BITFIELD)) 538 + u64 raz :55; 539 + u64 ilk_disable :1; 540 + u64 obaf :1; 541 + u64 ibaf :1; 542 + u64 zaf :1; 543 + u64 aeaf :1; 544 + u64 seaf :1; 545 + u64 cfg :3; 546 + #else 547 + u64 cfg :3; 548 + u64 seaf :1; 549 + u64 aeaf :1; 550 + u64 zaf :1; 551 + u64 ibaf :1; 552 + u64 obaf :1; 553 + u64 ilk_disable :1; 554 + u64 raz :55; 555 + #endif 556 + } s; 557 + }; 558 + 559 + /** 560 + * struct nps_core_int_active - NPS Core Interrupt Active Register 561 + * @resend: Resend MSI-X interrupt if needs to handle interrupts 562 + * Sofware can set this bit and then exit the ISR. 563 + * @ocla: Set when any OCLA(0)_INT and corresponding OCLA(0_INT_ENA_W1C 564 + * bit are set 565 + * @mbox: Set when any NPS_PKT_MBOX_INT_LO/HI and corresponding 566 + * NPS_PKT_MBOX_INT_LO_ENA_W1C/HI_ENA_W1C bits are set 567 + * @emu: bit i is set in [EMU] when any EMU(i)_INT bit is set 568 + * @bmo: Set when any BMO_INT bit is set 569 + * @bmi: Set when any BMI_INT bit is set or when any non-RO 570 + * BMI_INT and corresponding BMI_INT_ENA_W1C bits are both set 571 + * @aqm: Set when any AQM_INT bit is set 572 + * @zqm: Set when any ZQM_INT bit is set 573 + * @efl: Set when any EFL_INT RO bit is set or when any non-RO EFL_INT 574 + * and corresponding EFL_INT_ENA_W1C bits are both set 575 + * @ilk: Set when any ILK_INT bit is set 576 + * @lbc: Set when any LBC_INT RO bit is set or when any non-RO LBC_INT 577 + * and corresponding LBC_INT_ENA_W1C bits are bot set 578 + * @pem: Set when any PEM(0)_INT RO bit is set or when any non-RO 579 + * PEM(0)_INT and corresponding PEM(0)_INT_ENA_W1C bit are both set 580 + * @ucd: Set when any UCD_INT bit is set 581 + * @zctl: Set when any ZIP_INT RO bit is set or when any non-RO ZIP_INT 582 + * and corresponding ZIP_INT_ENA_W1C bits are both set 583 + * @lbm: Set when any LBM_INT bit is set 584 + * @nps_pkt: Set when any NPS_PKT_INT bit is set 585 + * @nps_core: Set when any NPS_CORE_INT RO bit is set or when non-RO 586 + * NPS_CORE_INT and corresponding NSP_CORE_INT_ENA_W1C bits are both set 587 + */ 588 + union nps_core_int_active { 589 + u64 value; 590 + struct { 591 + #if (defined(__BIG_ENDIAN_BITFIELD)) 592 + u64 resend : 1; 593 + u64 raz : 43; 594 + u64 ocla : 1; 595 + u64 mbox : 1; 596 + u64 emu : 4; 597 + u64 bmo : 1; 598 + u64 bmi : 1; 599 + u64 aqm : 1; 600 + u64 zqm : 1; 601 + u64 efl : 1; 602 + u64 ilk : 1; 603 + u64 lbc : 1; 604 + u64 pem : 1; 605 + u64 pom : 1; 606 + u64 ucd : 1; 607 + u64 zctl : 1; 608 + u64 lbm : 1; 609 + u64 nps_pkt : 1; 610 + u64 nps_core : 1; 611 + #else 612 + u64 nps_core : 1; 613 + u64 nps_pkt : 1; 614 + u64 lbm : 1; 615 + u64 zctl: 1; 616 + u64 ucd : 1; 617 + u64 pom : 1; 618 + u64 pem : 1; 619 + u64 lbc : 1; 620 + u64 ilk : 1; 621 + u64 efl : 1; 622 + u64 zqm : 1; 623 + u64 aqm : 1; 624 + u64 bmi : 1; 625 + u64 bmo : 1; 626 + u64 emu : 4; 627 + u64 mbox : 1; 628 + u64 ocla : 1; 629 + u64 raz : 43; 630 + u64 resend : 1; 631 + #endif 632 + } s; 633 + }; 634 + 635 + /** 636 + * struct efl_core_int - EFL Interrupt Registers 637 + * @epci_decode_err: EPCI decoded a transacation that was unknown 638 + * This error should only occurred when there is a micrcode/SE error 639 + * and should be considered fatal 640 + * @ae_err: An AE uncorrectable error occurred. 641 + * See EFL_CORE(0..3)_AE_ERR_INT 642 + * @se_err: An SE uncorrectable error occurred. 643 + * See EFL_CORE(0..3)_SE_ERR_INT 644 + * @dbe: Double-bit error occurred in EFL 645 + * @sbe: Single-bit error occurred in EFL 646 + * @d_left: Asserted when new POM-Header-BMI-data is 647 + * being sent to an Exec, and that Exec has Not read all BMI 648 + * data associated with the previous POM header 649 + * @len_ovr: Asserted when an Exec-Read is issued that is more than 650 + * 14 greater in length that the BMI data left to be read 651 + */ 652 + union efl_core_int { 653 + u64 value; 654 + struct { 655 + #if (defined(__BIG_ENDIAN_BITFIELD)) 656 + u64 raz : 57; 657 + u64 epci_decode_err : 1; 658 + u64 ae_err : 1; 659 + u64 se_err : 1; 660 + u64 dbe : 1; 661 + u64 sbe : 1; 662 + u64 d_left : 1; 663 + u64 len_ovr : 1; 664 + #else 665 + u64 len_ovr : 1; 666 + u64 d_left : 1; 667 + u64 sbe : 1; 668 + u64 dbe : 1; 669 + u64 se_err : 1; 670 + u64 ae_err : 1; 671 + u64 epci_decode_err : 1; 672 + u64 raz : 57; 673 + #endif 674 + } s; 675 + }; 676 + 677 + /** 678 + * struct efl_core_int_ena_w1s - EFL core interrupt enable set register 679 + * @epci_decode_err: Reads or sets enable for 680 + * EFL_CORE(0..3)_INT[EPCI_DECODE_ERR]. 681 + * @d_left: Reads or sets enable for 682 + * EFL_CORE(0..3)_INT[D_LEFT]. 683 + * @len_ovr: Reads or sets enable for 684 + * EFL_CORE(0..3)_INT[LEN_OVR]. 685 + */ 686 + union efl_core_int_ena_w1s { 687 + u64 value; 688 + struct { 689 + #if (defined(__BIG_ENDIAN_BITFIELD)) 690 + u64 raz_7_63 : 57; 691 + u64 epci_decode_err : 1; 692 + u64 raz_2_5 : 4; 693 + u64 d_left : 1; 694 + u64 len_ovr : 1; 695 + #else 696 + u64 len_ovr : 1; 697 + u64 d_left : 1; 698 + u64 raz_2_5 : 4; 699 + u64 epci_decode_err : 1; 700 + u64 raz_7_63 : 57; 701 + #endif 702 + } s; 703 + }; 704 + 705 + /** 706 + * struct efl_rnm_ctl_status - RNM Control and Status Register 707 + * @ent_sel: Select input to RNM FIFO 708 + * @exp_ent: Exported entropy enable for random number generator 709 + * @rng_rst: Reset to RNG. Setting this bit to 1 cancels the generation 710 + * of the current random number. 711 + * @rnm_rst: Reset the RNM. Setting this bit to 1 clears all sorted numbers 712 + * in the random number memory. 713 + * @rng_en: Enabled the output of the RNG. 714 + * @ent_en: Entropy enable for random number generator. 715 + */ 716 + union efl_rnm_ctl_status { 717 + u64 value; 718 + struct { 719 + #if (defined(__BIG_ENDIAN_BITFIELD)) 720 + u64 raz_9_63 : 55; 721 + u64 ent_sel : 4; 722 + u64 exp_ent : 1; 723 + u64 rng_rst : 1; 724 + u64 rnm_rst : 1; 725 + u64 rng_en : 1; 726 + u64 ent_en : 1; 727 + #else 728 + u64 ent_en : 1; 729 + u64 rng_en : 1; 730 + u64 rnm_rst : 1; 731 + u64 rng_rst : 1; 732 + u64 exp_ent : 1; 733 + u64 ent_sel : 4; 734 + u64 raz_9_63 : 55; 735 + #endif 736 + } s; 737 + }; 738 + 739 + /** 740 + * struct bmi_ctl - BMI control register 741 + * @ilk_hdrq_thrsh: Maximum number of header queue locations 742 + * that ILK packets may consume. When the threshold is 743 + * exceeded ILK_XOFF is sent to the BMI_X2P_ARB. 744 + * @nps_hdrq_thrsh: Maximum number of header queue locations 745 + * that NPS packets may consume. When the threshold is 746 + * exceeded NPS_XOFF is sent to the BMI_X2P_ARB. 747 + * @totl_hdrq_thrsh: Maximum number of header queue locations 748 + * that the sum of ILK and NPS packets may consume. 749 + * @ilk_free_thrsh: Maximum number of buffers that ILK packet 750 + * flows may consume before ILK_XOFF is sent to the BMI_X2P_ARB. 751 + * @nps_free_thrsh: Maximum number of buffers that NPS packet 752 + * flows may consume before NPS XOFF is sent to the BMI_X2p_ARB. 753 + * @totl_free_thrsh: Maximum number of buffers that bot ILK and NPS 754 + * packet flows may consume before both NPS_XOFF and ILK_XOFF 755 + * are asserted to the BMI_X2P_ARB. 756 + * @max_pkt_len: Maximum packet length, integral number of 256B 757 + * buffers. 758 + */ 759 + union bmi_ctl { 760 + u64 value; 761 + struct { 762 + #if (defined(__BIG_ENDIAN_BITFIELD)) 763 + u64 raz_56_63 : 8; 764 + u64 ilk_hdrq_thrsh : 8; 765 + u64 nps_hdrq_thrsh : 8; 766 + u64 totl_hdrq_thrsh : 8; 767 + u64 ilk_free_thrsh : 8; 768 + u64 nps_free_thrsh : 8; 769 + u64 totl_free_thrsh : 8; 770 + u64 max_pkt_len : 8; 771 + #else 772 + u64 max_pkt_len : 8; 773 + u64 totl_free_thrsh : 8; 774 + u64 nps_free_thrsh : 8; 775 + u64 ilk_free_thrsh : 8; 776 + u64 totl_hdrq_thrsh : 8; 777 + u64 nps_hdrq_thrsh : 8; 778 + u64 ilk_hdrq_thrsh : 8; 779 + u64 raz_56_63 : 8; 780 + #endif 781 + } s; 782 + }; 783 + 784 + /** 785 + * struct bmi_int_ena_w1s - BMI interrupt enable set register 786 + * @ilk_req_oflw: Reads or sets enable for 787 + * BMI_INT[ILK_REQ_OFLW]. 788 + * @nps_req_oflw: Reads or sets enable for 789 + * BMI_INT[NPS_REQ_OFLW]. 790 + * @fpf_undrrn: Reads or sets enable for 791 + * BMI_INT[FPF_UNDRRN]. 792 + * @eop_err_ilk: Reads or sets enable for 793 + * BMI_INT[EOP_ERR_ILK]. 794 + * @eop_err_nps: Reads or sets enable for 795 + * BMI_INT[EOP_ERR_NPS]. 796 + * @sop_err_ilk: Reads or sets enable for 797 + * BMI_INT[SOP_ERR_ILK]. 798 + * @sop_err_nps: Reads or sets enable for 799 + * BMI_INT[SOP_ERR_NPS]. 800 + * @pkt_rcv_err_ilk: Reads or sets enable for 801 + * BMI_INT[PKT_RCV_ERR_ILK]. 802 + * @pkt_rcv_err_nps: Reads or sets enable for 803 + * BMI_INT[PKT_RCV_ERR_NPS]. 804 + * @max_len_err_ilk: Reads or sets enable for 805 + * BMI_INT[MAX_LEN_ERR_ILK]. 806 + * @max_len_err_nps: Reads or sets enable for 807 + * BMI_INT[MAX_LEN_ERR_NPS]. 808 + */ 809 + union bmi_int_ena_w1s { 810 + u64 value; 811 + struct { 812 + #if (defined(__BIG_ENDIAN_BITFIELD)) 813 + u64 raz_13_63 : 51; 814 + u64 ilk_req_oflw : 1; 815 + u64 nps_req_oflw : 1; 816 + u64 raz_10 : 1; 817 + u64 raz_9 : 1; 818 + u64 fpf_undrrn : 1; 819 + u64 eop_err_ilk : 1; 820 + u64 eop_err_nps : 1; 821 + u64 sop_err_ilk : 1; 822 + u64 sop_err_nps : 1; 823 + u64 pkt_rcv_err_ilk : 1; 824 + u64 pkt_rcv_err_nps : 1; 825 + u64 max_len_err_ilk : 1; 826 + u64 max_len_err_nps : 1; 827 + #else 828 + u64 max_len_err_nps : 1; 829 + u64 max_len_err_ilk : 1; 830 + u64 pkt_rcv_err_nps : 1; 831 + u64 pkt_rcv_err_ilk : 1; 832 + u64 sop_err_nps : 1; 833 + u64 sop_err_ilk : 1; 834 + u64 eop_err_nps : 1; 835 + u64 eop_err_ilk : 1; 836 + u64 fpf_undrrn : 1; 837 + u64 raz_9 : 1; 838 + u64 raz_10 : 1; 839 + u64 nps_req_oflw : 1; 840 + u64 ilk_req_oflw : 1; 841 + u64 raz_13_63 : 51; 842 + #endif 843 + } s; 844 + }; 845 + 846 + /** 847 + * struct bmo_ctl2 - BMO Control2 Register 848 + * @arb_sel: Determines P2X Arbitration 849 + * @ilk_buf_thrsh: Maximum number of buffers that the 850 + * ILK packet flows may consume before ILK XOFF is 851 + * asserted to the POM. 852 + * @nps_slc_buf_thrsh: Maximum number of buffers that the 853 + * NPS_SLC packet flow may consume before NPS_SLC XOFF is 854 + * asserted to the POM. 855 + * @nps_uns_buf_thrsh: Maximum number of buffers that the 856 + * NPS_UNS packet flow may consume before NPS_UNS XOFF is 857 + * asserted to the POM. 858 + * @totl_buf_thrsh: Maximum number of buffers that ILK, NPS_UNS and 859 + * NPS_SLC packet flows may consume before NPS_UNS XOFF, NSP_SLC and 860 + * ILK_XOFF are all asserted POM. 861 + */ 862 + union bmo_ctl2 { 863 + u64 value; 864 + struct { 865 + #if (defined(__BIG_ENDIAN_BITFIELD)) 866 + u64 arb_sel : 1; 867 + u64 raz_32_62 : 31; 868 + u64 ilk_buf_thrsh : 8; 869 + u64 nps_slc_buf_thrsh : 8; 870 + u64 nps_uns_buf_thrsh : 8; 871 + u64 totl_buf_thrsh : 8; 872 + #else 873 + u64 totl_buf_thrsh : 8; 874 + u64 nps_uns_buf_thrsh : 8; 875 + u64 nps_slc_buf_thrsh : 8; 876 + u64 ilk_buf_thrsh : 8; 877 + u64 raz_32_62 : 31; 878 + u64 arb_sel : 1; 879 + #endif 880 + } s; 881 + }; 882 + 883 + /** 884 + * struct pom_int_ena_w1s - POM interrupt enable set register 885 + * @illegal_intf: Reads or sets enable for POM_INT[ILLEGAL_INTF]. 886 + * @illegal_dport: Reads or sets enable for POM_INT[ILLEGAL_DPORT]. 887 + */ 888 + union pom_int_ena_w1s { 889 + u64 value; 890 + struct { 891 + #if (defined(__BIG_ENDIAN_BITFIELD)) 892 + u64 raz2 : 60; 893 + u64 illegal_intf : 1; 894 + u64 illegal_dport : 1; 895 + u64 raz1 : 1; 896 + u64 raz0 : 1; 897 + #else 898 + u64 raz0 : 1; 899 + u64 raz1 : 1; 900 + u64 illegal_dport : 1; 901 + u64 illegal_intf : 1; 902 + u64 raz2 : 60; 903 + #endif 904 + } s; 905 + }; 906 + 907 + /** 908 + * struct lbc_inval_ctl - LBC invalidation control register 909 + * @wait_timer: Wait timer for wait state. [WAIT_TIMER] must 910 + * always be written with its reset value. 911 + * @cam_inval_start: Software should write [CAM_INVAL_START]=1 912 + * to initiate an LBC cache invalidation. After this, software 913 + * should read LBC_INVAL_STATUS until LBC_INVAL_STATUS[DONE] is set. 914 + * LBC hardware clears [CAVM_INVAL_START] before software can 915 + * observed LBC_INVAL_STATUS[DONE] to be set 916 + */ 917 + union lbc_inval_ctl { 918 + u64 value; 919 + struct { 920 + #if (defined(__BIG_ENDIAN_BITFIELD)) 921 + u64 raz2 : 48; 922 + u64 wait_timer : 8; 923 + u64 raz1 : 6; 924 + u64 cam_inval_start : 1; 925 + u64 raz0 : 1; 926 + #else 927 + u64 raz0 : 1; 928 + u64 cam_inval_start : 1; 929 + u64 raz1 : 6; 930 + u64 wait_timer : 8; 931 + u64 raz2 : 48; 932 + #endif 933 + } s; 934 + }; 935 + 936 + /** 937 + * struct lbc_int_ena_w1s - LBC interrupt enable set register 938 + * @cam_hard_err: Reads or sets enable for LBC_INT[CAM_HARD_ERR]. 939 + * @cam_inval_abort: Reads or sets enable for LBC_INT[CAM_INVAL_ABORT]. 940 + * @over_fetch_err: Reads or sets enable for LBC_INT[OVER_FETCH_ERR]. 941 + * @cache_line_to_err: Reads or sets enable for 942 + * LBC_INT[CACHE_LINE_TO_ERR]. 943 + * @cam_soft_err: Reads or sets enable for 944 + * LBC_INT[CAM_SOFT_ERR]. 945 + * @dma_rd_err: Reads or sets enable for 946 + * LBC_INT[DMA_RD_ERR]. 947 + */ 948 + union lbc_int_ena_w1s { 949 + u64 value; 950 + struct { 951 + #if (defined(__BIG_ENDIAN_BITFIELD)) 952 + u64 raz_10_63 : 54; 953 + u64 cam_hard_err : 1; 954 + u64 cam_inval_abort : 1; 955 + u64 over_fetch_err : 1; 956 + u64 cache_line_to_err : 1; 957 + u64 raz_2_5 : 4; 958 + u64 cam_soft_err : 1; 959 + u64 dma_rd_err : 1; 960 + #else 961 + u64 dma_rd_err : 1; 962 + u64 cam_soft_err : 1; 963 + u64 raz_2_5 : 4; 964 + u64 cache_line_to_err : 1; 965 + u64 over_fetch_err : 1; 966 + u64 cam_inval_abort : 1; 967 + u64 cam_hard_err : 1; 968 + u64 raz_10_63 : 54; 969 + #endif 970 + } s; 971 + }; 972 + 973 + /** 974 + * struct lbc_int - LBC interrupt summary register 975 + * @cam_hard_err: indicates a fatal hardware error. 976 + * It requires system reset. 977 + * When [CAM_HARD_ERR] is set, LBC stops logging any new information in 978 + * LBC_POM_MISS_INFO_LOG, 979 + * LBC_POM_MISS_ADDR_LOG, 980 + * LBC_EFL_MISS_INFO_LOG, and 981 + * LBC_EFL_MISS_ADDR_LOG. 982 + * Software should sample them. 983 + * @cam_inval_abort: indicates a fatal hardware error. 984 + * System reset is required. 985 + * @over_fetch_err: indicates a fatal hardware error 986 + * System reset is required 987 + * @cache_line_to_err: is a debug feature. 988 + * This timeout interrupt bit tells the software that 989 + * a cacheline in LBC has non-zero usage and the context 990 + * has not been used for greater than the 991 + * LBC_TO_CNT[TO_CNT] time interval. 992 + * @sbe: Memory SBE error. This is recoverable via ECC. 993 + * See LBC_ECC_INT for more details. 994 + * @dbe: Memory DBE error. This is a fatal and requires a 995 + * system reset. 996 + * @pref_dat_len_mismatch_err: Summary bit for context length 997 + * mismatch errors. 998 + * @rd_dat_len_mismatch_err: Summary bit for SE read data length 999 + * greater than data prefect length errors. 1000 + * @cam_soft_err: is recoverable. Software must complete a 1001 + * LBC_INVAL_CTL[CAM_INVAL_START] invalidation sequence and 1002 + * then clear [CAM_SOFT_ERR]. 1003 + * @dma_rd_err: A context prefect read of host memory returned with 1004 + * a read error. 1005 + */ 1006 + union lbc_int { 1007 + u64 value; 1008 + struct { 1009 + #if (defined(__BIG_ENDIAN_BITFIELD)) 1010 + u64 raz_10_63 : 54; 1011 + u64 cam_hard_err : 1; 1012 + u64 cam_inval_abort : 1; 1013 + u64 over_fetch_err : 1; 1014 + u64 cache_line_to_err : 1; 1015 + u64 sbe : 1; 1016 + u64 dbe : 1; 1017 + u64 pref_dat_len_mismatch_err : 1; 1018 + u64 rd_dat_len_mismatch_err : 1; 1019 + u64 cam_soft_err : 1; 1020 + u64 dma_rd_err : 1; 1021 + #else 1022 + u64 dma_rd_err : 1; 1023 + u64 cam_soft_err : 1; 1024 + u64 rd_dat_len_mismatch_err : 1; 1025 + u64 pref_dat_len_mismatch_err : 1; 1026 + u64 dbe : 1; 1027 + u64 sbe : 1; 1028 + u64 cache_line_to_err : 1; 1029 + u64 over_fetch_err : 1; 1030 + u64 cam_inval_abort : 1; 1031 + u64 cam_hard_err : 1; 1032 + u64 raz_10_63 : 54; 1033 + #endif 1034 + } s; 1035 + }; 1036 + 1037 + /** 1038 + * struct lbc_inval_status: LBC Invalidation status register 1039 + * @cam_clean_entry_complete_cnt: The number of entries that are 1040 + * cleaned up successfully. 1041 + * @cam_clean_entry_cnt: The number of entries that have the CAM 1042 + * inval command issued. 1043 + * @cam_inval_state: cam invalidation FSM state 1044 + * @cam_inval_abort: cam invalidation abort 1045 + * @cam_rst_rdy: lbc_cam reset ready 1046 + * @done: LBC clears [DONE] when 1047 + * LBC_INVAL_CTL[CAM_INVAL_START] is written with a one, 1048 + * and sets [DONE] when it completes the invalidation 1049 + * sequence. 1050 + */ 1051 + union lbc_inval_status { 1052 + u64 value; 1053 + struct { 1054 + #if (defined(__BIG_ENDIAN_BITFIELD)) 1055 + u64 raz3 : 23; 1056 + u64 cam_clean_entry_complete_cnt : 9; 1057 + u64 raz2 : 7; 1058 + u64 cam_clean_entry_cnt : 9; 1059 + u64 raz1 : 5; 1060 + u64 cam_inval_state : 3; 1061 + u64 raz0 : 5; 1062 + u64 cam_inval_abort : 1; 1063 + u64 cam_rst_rdy : 1; 1064 + u64 done : 1; 1065 + #else 1066 + u64 done : 1; 1067 + u64 cam_rst_rdy : 1; 1068 + u64 cam_inval_abort : 1; 1069 + u64 raz0 : 5; 1070 + u64 cam_inval_state : 3; 1071 + u64 raz1 : 5; 1072 + u64 cam_clean_entry_cnt : 9; 1073 + u64 raz2 : 7; 1074 + u64 cam_clean_entry_complete_cnt : 9; 1075 + u64 raz3 : 23; 1076 + #endif 1077 + } s; 1078 + }; 1079 + 1080 + #endif /* __NITROX_CSR_H */
+175
drivers/crypto/cavium/nitrox/nitrox_dev.h
··· 1 + #ifndef __NITROX_DEV_H 2 + #define __NITROX_DEV_H 3 + 4 + #include <linux/dma-mapping.h> 5 + #include <linux/interrupt.h> 6 + #include <linux/pci.h> 7 + 8 + #define VERSION_LEN 32 9 + 10 + struct nitrox_cmdq { 11 + /* command queue lock */ 12 + spinlock_t cmdq_lock; 13 + /* response list lock */ 14 + spinlock_t response_lock; 15 + /* backlog list lock */ 16 + spinlock_t backlog_lock; 17 + 18 + /* request submitted to chip, in progress */ 19 + struct list_head response_head; 20 + /* hw queue full, hold in backlog list */ 21 + struct list_head backlog_head; 22 + 23 + /* doorbell address */ 24 + u8 __iomem *dbell_csr_addr; 25 + /* base address of the queue */ 26 + u8 *head; 27 + 28 + struct nitrox_device *ndev; 29 + /* flush pending backlog commands */ 30 + struct work_struct backlog_qflush; 31 + 32 + /* requests posted waiting for completion */ 33 + atomic_t pending_count; 34 + /* requests in backlog queues */ 35 + atomic_t backlog_count; 36 + 37 + /* command size 32B/64B */ 38 + u8 instr_size; 39 + u8 qno; 40 + u32 qsize; 41 + 42 + /* unaligned addresses */ 43 + u8 *head_unaligned; 44 + dma_addr_t dma_unaligned; 45 + /* dma address of the base */ 46 + dma_addr_t dma; 47 + }; 48 + 49 + struct nitrox_hw { 50 + /* firmware version */ 51 + char fw_name[VERSION_LEN]; 52 + 53 + u16 vendor_id; 54 + u16 device_id; 55 + u8 revision_id; 56 + 57 + /* CNN55XX cores */ 58 + u8 se_cores; 59 + u8 ae_cores; 60 + u8 zip_cores; 61 + }; 62 + 63 + #define MAX_MSIX_VECTOR_NAME 20 64 + /** 65 + * vectors for queues (64 AE, 64 SE and 64 ZIP) and 66 + * error condition/mailbox. 67 + */ 68 + #define MAX_MSIX_VECTORS 192 69 + 70 + struct nitrox_msix { 71 + struct msix_entry *entries; 72 + char **names; 73 + DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS); 74 + u32 nr_entries; 75 + }; 76 + 77 + struct bh_data { 78 + /* slc port completion count address */ 79 + u8 __iomem *completion_cnt_csr_addr; 80 + 81 + struct nitrox_cmdq *cmdq; 82 + struct tasklet_struct resp_handler; 83 + }; 84 + 85 + struct nitrox_bh { 86 + struct bh_data *slc; 87 + }; 88 + 89 + /* NITROX-5 driver state */ 90 + #define NITROX_UCODE_LOADED 0 91 + #define NITROX_READY 1 92 + 93 + /* command queue size */ 94 + #define DEFAULT_CMD_QLEN 2048 95 + /* command timeout in milliseconds */ 96 + #define CMD_TIMEOUT 2000 97 + 98 + #define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev)) 99 + #define PF_MODE 0 100 + 101 + #define NITROX_CSR_ADDR(ndev, offset) \ 102 + ((ndev)->bar_addr + (offset)) 103 + 104 + /** 105 + * struct nitrox_device - NITROX Device Information. 106 + * @list: pointer to linked list of devices 107 + * @bar_addr: iomap address 108 + * @pdev: PCI device information 109 + * @status: NITROX status 110 + * @timeout: Request timeout in jiffies 111 + * @refcnt: Device usage count 112 + * @idx: device index (0..N) 113 + * @node: NUMA node id attached 114 + * @qlen: Command queue length 115 + * @nr_queues: Number of command queues 116 + * @ctx_pool: DMA pool for crypto context 117 + * @pkt_cmdqs: SE Command queues 118 + * @msix: MSI-X information 119 + * @bh: post processing work 120 + * @hw: hardware information 121 + */ 122 + struct nitrox_device { 123 + struct list_head list; 124 + 125 + u8 __iomem *bar_addr; 126 + struct pci_dev *pdev; 127 + 128 + unsigned long status; 129 + unsigned long timeout; 130 + refcount_t refcnt; 131 + 132 + u8 idx; 133 + int node; 134 + u16 qlen; 135 + u16 nr_queues; 136 + 137 + struct dma_pool *ctx_pool; 138 + struct nitrox_cmdq *pkt_cmdqs; 139 + 140 + struct nitrox_msix msix; 141 + struct nitrox_bh bh; 142 + 143 + struct nitrox_hw hw; 144 + }; 145 + 146 + /** 147 + * nitrox_read_csr - Read from device register 148 + * @ndev: NITROX device 149 + * @offset: offset of the register to read 150 + * 151 + * Returns: value read 152 + */ 153 + static inline u64 nitrox_read_csr(struct nitrox_device *ndev, u64 offset) 154 + { 155 + return readq(ndev->bar_addr + offset); 156 + } 157 + 158 + /** 159 + * nitrox_write_csr - Write to device register 160 + * @ndev: NITROX device 161 + * @offset: offset of the register to write 162 + * @value: value to write 163 + */ 164 + static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset, 165 + u64 value) 166 + { 167 + writeq(value, (ndev->bar_addr + offset)); 168 + } 169 + 170 + static inline int nitrox_ready(struct nitrox_device *ndev) 171 + { 172 + return test_bit(NITROX_READY, &ndev->status); 173 + } 174 + 175 + #endif /* __NITROX_DEV_H */
+401
drivers/crypto/cavium/nitrox/nitrox_hal.c
··· 1 + #include <linux/delay.h> 2 + 3 + #include "nitrox_dev.h" 4 + #include "nitrox_csr.h" 5 + 6 + /** 7 + * emu_enable_cores - Enable EMU cluster cores. 8 + * @ndev: N5 device 9 + */ 10 + static void emu_enable_cores(struct nitrox_device *ndev) 11 + { 12 + union emu_se_enable emu_se; 13 + union emu_ae_enable emu_ae; 14 + int i; 15 + 16 + /* AE cores 20 per cluster */ 17 + emu_ae.value = 0; 18 + emu_ae.s.enable = 0xfffff; 19 + 20 + /* SE cores 16 per cluster */ 21 + emu_se.value = 0; 22 + emu_se.s.enable = 0xffff; 23 + 24 + /* enable per cluster cores */ 25 + for (i = 0; i < NR_CLUSTERS; i++) { 26 + nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value); 27 + nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value); 28 + } 29 + } 30 + 31 + /** 32 + * nitrox_config_emu_unit - configure EMU unit. 33 + * @ndev: N5 device 34 + */ 35 + void nitrox_config_emu_unit(struct nitrox_device *ndev) 36 + { 37 + union emu_wd_int_ena_w1s emu_wd_int; 38 + union emu_ge_int_ena_w1s emu_ge_int; 39 + u64 offset; 40 + int i; 41 + 42 + /* enable cores */ 43 + emu_enable_cores(ndev); 44 + 45 + /* enable general error and watch dog interrupts */ 46 + emu_ge_int.value = 0; 47 + emu_ge_int.s.se_ge = 0xffff; 48 + emu_ge_int.s.ae_ge = 0xfffff; 49 + emu_wd_int.value = 0; 50 + emu_wd_int.s.se_wd = 1; 51 + 52 + for (i = 0; i < NR_CLUSTERS; i++) { 53 + offset = EMU_WD_INT_ENA_W1SX(i); 54 + nitrox_write_csr(ndev, offset, emu_wd_int.value); 55 + offset = EMU_GE_INT_ENA_W1SX(i); 56 + nitrox_write_csr(ndev, offset, emu_ge_int.value); 57 + } 58 + } 59 + 60 + static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring) 61 + { 62 + union nps_pkt_in_instr_ctl pkt_in_ctl; 63 + union nps_pkt_in_instr_baoff_dbell pkt_in_dbell; 64 + union nps_pkt_in_done_cnts pkt_in_cnts; 65 + u64 offset; 66 + 67 + offset = NPS_PKT_IN_INSTR_CTLX(ring); 68 + /* disable the ring */ 69 + pkt_in_ctl.value = nitrox_read_csr(ndev, offset); 70 + pkt_in_ctl.s.enb = 0; 71 + nitrox_write_csr(ndev, offset, pkt_in_ctl.value); 72 + usleep_range(100, 150); 73 + 74 + /* wait to clear [ENB] */ 75 + do { 76 + pkt_in_ctl.value = nitrox_read_csr(ndev, offset); 77 + } while (pkt_in_ctl.s.enb); 78 + 79 + /* clear off door bell counts */ 80 + offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring); 81 + pkt_in_dbell.value = 0; 82 + pkt_in_dbell.s.dbell = 0xffffffff; 83 + nitrox_write_csr(ndev, offset, pkt_in_dbell.value); 84 + 85 + /* clear done counts */ 86 + offset = NPS_PKT_IN_DONE_CNTSX(ring); 87 + pkt_in_cnts.value = nitrox_read_csr(ndev, offset); 88 + nitrox_write_csr(ndev, offset, pkt_in_cnts.value); 89 + usleep_range(50, 100); 90 + } 91 + 92 + void enable_pkt_input_ring(struct nitrox_device *ndev, int ring) 93 + { 94 + union nps_pkt_in_instr_ctl pkt_in_ctl; 95 + u64 offset; 96 + 97 + /* 64-byte instruction size */ 98 + offset = NPS_PKT_IN_INSTR_CTLX(ring); 99 + pkt_in_ctl.value = nitrox_read_csr(ndev, offset); 100 + pkt_in_ctl.s.is64b = 1; 101 + pkt_in_ctl.s.enb = 1; 102 + nitrox_write_csr(ndev, offset, pkt_in_ctl.value); 103 + 104 + /* wait for set [ENB] */ 105 + do { 106 + pkt_in_ctl.value = nitrox_read_csr(ndev, offset); 107 + } while (!pkt_in_ctl.s.enb); 108 + } 109 + 110 + /** 111 + * nitrox_config_pkt_input_rings - configure Packet Input Rings 112 + * @ndev: N5 device 113 + */ 114 + void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) 115 + { 116 + int i; 117 + 118 + for (i = 0; i < ndev->nr_queues; i++) { 119 + struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i]; 120 + union nps_pkt_in_instr_rsize pkt_in_rsize; 121 + u64 offset; 122 + 123 + reset_pkt_input_ring(ndev, i); 124 + 125 + /* configure ring base address 16-byte aligned, 126 + * size and interrupt threshold. 127 + */ 128 + offset = NPS_PKT_IN_INSTR_BADDRX(i); 129 + nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma); 130 + 131 + /* configure ring size */ 132 + offset = NPS_PKT_IN_INSTR_RSIZEX(i); 133 + pkt_in_rsize.value = 0; 134 + pkt_in_rsize.s.rsize = ndev->qlen; 135 + nitrox_write_csr(ndev, offset, pkt_in_rsize.value); 136 + 137 + /* set high threshold for pkt input ring interrupts */ 138 + offset = NPS_PKT_IN_INT_LEVELSX(i); 139 + nitrox_write_csr(ndev, offset, 0xffffffff); 140 + 141 + enable_pkt_input_ring(ndev, i); 142 + } 143 + } 144 + 145 + static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port) 146 + { 147 + union nps_pkt_slc_ctl pkt_slc_ctl; 148 + union nps_pkt_slc_cnts pkt_slc_cnts; 149 + u64 offset; 150 + 151 + /* disable slc port */ 152 + offset = NPS_PKT_SLC_CTLX(port); 153 + pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); 154 + pkt_slc_ctl.s.enb = 0; 155 + nitrox_write_csr(ndev, offset, pkt_slc_ctl.value); 156 + usleep_range(100, 150); 157 + 158 + /* wait to clear [ENB] */ 159 + do { 160 + pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); 161 + } while (pkt_slc_ctl.s.enb); 162 + 163 + /* clear slc counters */ 164 + offset = NPS_PKT_SLC_CNTSX(port); 165 + pkt_slc_cnts.value = nitrox_read_csr(ndev, offset); 166 + nitrox_write_csr(ndev, offset, pkt_slc_cnts.value); 167 + usleep_range(50, 100); 168 + } 169 + 170 + void enable_pkt_solicit_port(struct nitrox_device *ndev, int port) 171 + { 172 + union nps_pkt_slc_ctl pkt_slc_ctl; 173 + u64 offset; 174 + 175 + offset = NPS_PKT_SLC_CTLX(port); 176 + pkt_slc_ctl.value = 0; 177 + pkt_slc_ctl.s.enb = 1; 178 + 179 + /* 180 + * 8 trailing 0x00 bytes will be added 181 + * to the end of the outgoing packet. 182 + */ 183 + pkt_slc_ctl.s.z = 1; 184 + /* enable response header */ 185 + pkt_slc_ctl.s.rh = 1; 186 + nitrox_write_csr(ndev, offset, pkt_slc_ctl.value); 187 + 188 + /* wait to set [ENB] */ 189 + do { 190 + pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); 191 + } while (!pkt_slc_ctl.s.enb); 192 + } 193 + 194 + static void config_single_pkt_solicit_port(struct nitrox_device *ndev, 195 + int port) 196 + { 197 + union nps_pkt_slc_int_levels pkt_slc_int; 198 + u64 offset; 199 + 200 + reset_pkt_solicit_port(ndev, port); 201 + 202 + offset = NPS_PKT_SLC_INT_LEVELSX(port); 203 + pkt_slc_int.value = 0; 204 + /* time interrupt threshold */ 205 + pkt_slc_int.s.timet = 0x3fffff; 206 + nitrox_write_csr(ndev, offset, pkt_slc_int.value); 207 + 208 + enable_pkt_solicit_port(ndev, port); 209 + } 210 + 211 + void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev) 212 + { 213 + int i; 214 + 215 + for (i = 0; i < ndev->nr_queues; i++) 216 + config_single_pkt_solicit_port(ndev, i); 217 + } 218 + 219 + /** 220 + * enable_nps_interrupts - enable NPS interrutps 221 + * @ndev: N5 device. 222 + * 223 + * This includes NPS core, packet in and slc interrupts. 224 + */ 225 + static void enable_nps_interrupts(struct nitrox_device *ndev) 226 + { 227 + union nps_core_int_ena_w1s core_int; 228 + 229 + /* NPS core interrutps */ 230 + core_int.value = 0; 231 + core_int.s.host_wr_err = 1; 232 + core_int.s.host_wr_timeout = 1; 233 + core_int.s.exec_wr_timeout = 1; 234 + core_int.s.npco_dma_malform = 1; 235 + core_int.s.host_nps_wr_err = 1; 236 + nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value); 237 + 238 + /* NPS packet in ring interrupts */ 239 + nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL)); 240 + nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL)); 241 + nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL)); 242 + /* NPS packet slc port interrupts */ 243 + nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL)); 244 + nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL)); 245 + nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL)); 246 + } 247 + 248 + void nitrox_config_nps_unit(struct nitrox_device *ndev) 249 + { 250 + union nps_core_gbl_vfcfg core_gbl_vfcfg; 251 + 252 + /* endian control information */ 253 + nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL); 254 + 255 + /* disable ILK interface */ 256 + core_gbl_vfcfg.value = 0; 257 + core_gbl_vfcfg.s.ilk_disable = 1; 258 + core_gbl_vfcfg.s.cfg = PF_MODE; 259 + nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value); 260 + /* config input and solicit ports */ 261 + nitrox_config_pkt_input_rings(ndev); 262 + nitrox_config_pkt_solicit_ports(ndev); 263 + 264 + /* enable interrupts */ 265 + enable_nps_interrupts(ndev); 266 + } 267 + 268 + void nitrox_config_pom_unit(struct nitrox_device *ndev) 269 + { 270 + union pom_int_ena_w1s pom_int; 271 + int i; 272 + 273 + /* enable pom interrupts */ 274 + pom_int.value = 0; 275 + pom_int.s.illegal_dport = 1; 276 + nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value); 277 + 278 + /* enable perf counters */ 279 + for (i = 0; i < ndev->hw.se_cores; i++) 280 + nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i)); 281 + } 282 + 283 + /** 284 + * nitrox_config_rand_unit - enable N5 random number unit 285 + * @ndev: N5 device 286 + */ 287 + void nitrox_config_rand_unit(struct nitrox_device *ndev) 288 + { 289 + union efl_rnm_ctl_status efl_rnm_ctl; 290 + u64 offset; 291 + 292 + offset = EFL_RNM_CTL_STATUS; 293 + efl_rnm_ctl.value = nitrox_read_csr(ndev, offset); 294 + efl_rnm_ctl.s.ent_en = 1; 295 + efl_rnm_ctl.s.rng_en = 1; 296 + nitrox_write_csr(ndev, offset, efl_rnm_ctl.value); 297 + } 298 + 299 + void nitrox_config_efl_unit(struct nitrox_device *ndev) 300 + { 301 + int i; 302 + 303 + for (i = 0; i < NR_CLUSTERS; i++) { 304 + union efl_core_int_ena_w1s efl_core_int; 305 + u64 offset; 306 + 307 + /* EFL core interrupts */ 308 + offset = EFL_CORE_INT_ENA_W1SX(i); 309 + efl_core_int.value = 0; 310 + efl_core_int.s.len_ovr = 1; 311 + efl_core_int.s.d_left = 1; 312 + efl_core_int.s.epci_decode_err = 1; 313 + nitrox_write_csr(ndev, offset, efl_core_int.value); 314 + 315 + offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i); 316 + nitrox_write_csr(ndev, offset, (~0ULL)); 317 + offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i); 318 + nitrox_write_csr(ndev, offset, (~0ULL)); 319 + } 320 + } 321 + 322 + void nitrox_config_bmi_unit(struct nitrox_device *ndev) 323 + { 324 + union bmi_ctl bmi_ctl; 325 + union bmi_int_ena_w1s bmi_int_ena; 326 + u64 offset; 327 + 328 + /* no threshold limits for PCIe */ 329 + offset = BMI_CTL; 330 + bmi_ctl.value = nitrox_read_csr(ndev, offset); 331 + bmi_ctl.s.max_pkt_len = 0xff; 332 + bmi_ctl.s.nps_free_thrsh = 0xff; 333 + bmi_ctl.s.nps_hdrq_thrsh = 0x7a; 334 + nitrox_write_csr(ndev, offset, bmi_ctl.value); 335 + 336 + /* enable interrupts */ 337 + offset = BMI_INT_ENA_W1S; 338 + bmi_int_ena.value = 0; 339 + bmi_int_ena.s.max_len_err_nps = 1; 340 + bmi_int_ena.s.pkt_rcv_err_nps = 1; 341 + bmi_int_ena.s.fpf_undrrn = 1; 342 + nitrox_write_csr(ndev, offset, bmi_int_ena.value); 343 + } 344 + 345 + void nitrox_config_bmo_unit(struct nitrox_device *ndev) 346 + { 347 + union bmo_ctl2 bmo_ctl2; 348 + u64 offset; 349 + 350 + /* no threshold limits for PCIe */ 351 + offset = BMO_CTL2; 352 + bmo_ctl2.value = nitrox_read_csr(ndev, offset); 353 + bmo_ctl2.s.nps_slc_buf_thrsh = 0xff; 354 + nitrox_write_csr(ndev, offset, bmo_ctl2.value); 355 + } 356 + 357 + void invalidate_lbc(struct nitrox_device *ndev) 358 + { 359 + union lbc_inval_ctl lbc_ctl; 360 + union lbc_inval_status lbc_stat; 361 + u64 offset; 362 + 363 + /* invalidate LBC */ 364 + offset = LBC_INVAL_CTL; 365 + lbc_ctl.value = nitrox_read_csr(ndev, offset); 366 + lbc_ctl.s.cam_inval_start = 1; 367 + nitrox_write_csr(ndev, offset, lbc_ctl.value); 368 + 369 + offset = LBC_INVAL_STATUS; 370 + 371 + do { 372 + lbc_stat.value = nitrox_read_csr(ndev, offset); 373 + } while (!lbc_stat.s.done); 374 + } 375 + 376 + void nitrox_config_lbc_unit(struct nitrox_device *ndev) 377 + { 378 + union lbc_int_ena_w1s lbc_int_ena; 379 + u64 offset; 380 + 381 + invalidate_lbc(ndev); 382 + 383 + /* enable interrupts */ 384 + offset = LBC_INT_ENA_W1S; 385 + lbc_int_ena.value = 0; 386 + lbc_int_ena.s.dma_rd_err = 1; 387 + lbc_int_ena.s.over_fetch_err = 1; 388 + lbc_int_ena.s.cam_inval_abort = 1; 389 + lbc_int_ena.s.cam_hard_err = 1; 390 + nitrox_write_csr(ndev, offset, lbc_int_ena.value); 391 + 392 + offset = LBC_PLM_VF1_64_INT_ENA_W1S; 393 + nitrox_write_csr(ndev, offset, (~0ULL)); 394 + offset = LBC_PLM_VF65_128_INT_ENA_W1S; 395 + nitrox_write_csr(ndev, offset, (~0ULL)); 396 + 397 + offset = LBC_ELM_VF1_64_INT_ENA_W1S; 398 + nitrox_write_csr(ndev, offset, (~0ULL)); 399 + offset = LBC_ELM_VF65_128_INT_ENA_W1S; 400 + nitrox_write_csr(ndev, offset, (~0ULL)); 401 + }
+467
drivers/crypto/cavium/nitrox/nitrox_isr.c
··· 1 + #include <linux/pci.h> 2 + #include <linux/printk.h> 3 + #include <linux/slab.h> 4 + 5 + #include "nitrox_dev.h" 6 + #include "nitrox_csr.h" 7 + #include "nitrox_common.h" 8 + 9 + #define NR_RING_VECTORS 3 10 + #define NPS_CORE_INT_ACTIVE_ENTRY 192 11 + 12 + /** 13 + * nps_pkt_slc_isr - IRQ handler for NPS solicit port 14 + * @irq: irq number 15 + * @data: argument 16 + */ 17 + static irqreturn_t nps_pkt_slc_isr(int irq, void *data) 18 + { 19 + struct bh_data *slc = data; 20 + union nps_pkt_slc_cnts pkt_slc_cnts; 21 + 22 + pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr); 23 + /* New packet on SLC output port */ 24 + if (pkt_slc_cnts.s.slc_int) 25 + tasklet_hi_schedule(&slc->resp_handler); 26 + 27 + return IRQ_HANDLED; 28 + } 29 + 30 + static void clear_nps_core_err_intr(struct nitrox_device *ndev) 31 + { 32 + u64 value; 33 + 34 + /* Write 1 to clear */ 35 + value = nitrox_read_csr(ndev, NPS_CORE_INT); 36 + nitrox_write_csr(ndev, NPS_CORE_INT, value); 37 + 38 + dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT 0x%016llx\n", value); 39 + } 40 + 41 + static void clear_nps_pkt_err_intr(struct nitrox_device *ndev) 42 + { 43 + union nps_pkt_int pkt_int; 44 + unsigned long value, offset; 45 + int i; 46 + 47 + pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT); 48 + dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT 0x%016llx\n", 49 + pkt_int.value); 50 + 51 + if (pkt_int.s.slc_err) { 52 + offset = NPS_PKT_SLC_ERR_TYPE; 53 + value = nitrox_read_csr(ndev, offset); 54 + nitrox_write_csr(ndev, offset, value); 55 + dev_err_ratelimited(DEV(ndev), 56 + "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value); 57 + 58 + offset = NPS_PKT_SLC_RERR_LO; 59 + value = nitrox_read_csr(ndev, offset); 60 + nitrox_write_csr(ndev, offset, value); 61 + /* enable the solicit ports */ 62 + for_each_set_bit(i, &value, BITS_PER_LONG) 63 + enable_pkt_solicit_port(ndev, i); 64 + 65 + dev_err_ratelimited(DEV(ndev), 66 + "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value); 67 + 68 + offset = NPS_PKT_SLC_RERR_HI; 69 + value = nitrox_read_csr(ndev, offset); 70 + nitrox_write_csr(ndev, offset, value); 71 + dev_err_ratelimited(DEV(ndev), 72 + "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value); 73 + } 74 + 75 + if (pkt_int.s.in_err) { 76 + offset = NPS_PKT_IN_ERR_TYPE; 77 + value = nitrox_read_csr(ndev, offset); 78 + nitrox_write_csr(ndev, offset, value); 79 + dev_err_ratelimited(DEV(ndev), 80 + "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value); 81 + offset = NPS_PKT_IN_RERR_LO; 82 + value = nitrox_read_csr(ndev, offset); 83 + nitrox_write_csr(ndev, offset, value); 84 + /* enable the input ring */ 85 + for_each_set_bit(i, &value, BITS_PER_LONG) 86 + enable_pkt_input_ring(ndev, i); 87 + 88 + dev_err_ratelimited(DEV(ndev), 89 + "NPS_PKT_IN_RERR_LO 0x%016lx\n", value); 90 + 91 + offset = NPS_PKT_IN_RERR_HI; 92 + value = nitrox_read_csr(ndev, offset); 93 + nitrox_write_csr(ndev, offset, value); 94 + dev_err_ratelimited(DEV(ndev), 95 + "NPS_PKT_IN_RERR_HI 0x%016lx\n", value); 96 + } 97 + } 98 + 99 + static void clear_pom_err_intr(struct nitrox_device *ndev) 100 + { 101 + u64 value; 102 + 103 + value = nitrox_read_csr(ndev, POM_INT); 104 + nitrox_write_csr(ndev, POM_INT, value); 105 + dev_err_ratelimited(DEV(ndev), "POM_INT 0x%016llx\n", value); 106 + } 107 + 108 + static void clear_pem_err_intr(struct nitrox_device *ndev) 109 + { 110 + u64 value; 111 + 112 + value = nitrox_read_csr(ndev, PEM0_INT); 113 + nitrox_write_csr(ndev, PEM0_INT, value); 114 + dev_err_ratelimited(DEV(ndev), "PEM(0)_INT 0x%016llx\n", value); 115 + } 116 + 117 + static void clear_lbc_err_intr(struct nitrox_device *ndev) 118 + { 119 + union lbc_int lbc_int; 120 + u64 value, offset; 121 + int i; 122 + 123 + lbc_int.value = nitrox_read_csr(ndev, LBC_INT); 124 + dev_err_ratelimited(DEV(ndev), "LBC_INT 0x%016llx\n", lbc_int.value); 125 + 126 + if (lbc_int.s.dma_rd_err) { 127 + for (i = 0; i < NR_CLUSTERS; i++) { 128 + offset = EFL_CORE_VF_ERR_INT0X(i); 129 + value = nitrox_read_csr(ndev, offset); 130 + nitrox_write_csr(ndev, offset, value); 131 + offset = EFL_CORE_VF_ERR_INT1X(i); 132 + value = nitrox_read_csr(ndev, offset); 133 + nitrox_write_csr(ndev, offset, value); 134 + } 135 + } 136 + 137 + if (lbc_int.s.cam_soft_err) { 138 + dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n"); 139 + invalidate_lbc(ndev); 140 + } 141 + 142 + if (lbc_int.s.pref_dat_len_mismatch_err) { 143 + offset = LBC_PLM_VF1_64_INT; 144 + value = nitrox_read_csr(ndev, offset); 145 + nitrox_write_csr(ndev, offset, value); 146 + offset = LBC_PLM_VF65_128_INT; 147 + value = nitrox_read_csr(ndev, offset); 148 + nitrox_write_csr(ndev, offset, value); 149 + } 150 + 151 + if (lbc_int.s.rd_dat_len_mismatch_err) { 152 + offset = LBC_ELM_VF1_64_INT; 153 + value = nitrox_read_csr(ndev, offset); 154 + nitrox_write_csr(ndev, offset, value); 155 + offset = LBC_ELM_VF65_128_INT; 156 + value = nitrox_read_csr(ndev, offset); 157 + nitrox_write_csr(ndev, offset, value); 158 + } 159 + nitrox_write_csr(ndev, LBC_INT, lbc_int.value); 160 + } 161 + 162 + static void clear_efl_err_intr(struct nitrox_device *ndev) 163 + { 164 + int i; 165 + 166 + for (i = 0; i < NR_CLUSTERS; i++) { 167 + union efl_core_int core_int; 168 + u64 value, offset; 169 + 170 + offset = EFL_CORE_INTX(i); 171 + core_int.value = nitrox_read_csr(ndev, offset); 172 + nitrox_write_csr(ndev, offset, core_int.value); 173 + dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT 0x%016llx\n", 174 + i, core_int.value); 175 + if (core_int.s.se_err) { 176 + offset = EFL_CORE_SE_ERR_INTX(i); 177 + value = nitrox_read_csr(ndev, offset); 178 + nitrox_write_csr(ndev, offset, value); 179 + } 180 + } 181 + } 182 + 183 + static void clear_bmi_err_intr(struct nitrox_device *ndev) 184 + { 185 + u64 value; 186 + 187 + value = nitrox_read_csr(ndev, BMI_INT); 188 + nitrox_write_csr(ndev, BMI_INT, value); 189 + dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value); 190 + } 191 + 192 + /** 193 + * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts 194 + * @ndev: NITROX device 195 + */ 196 + static void clear_nps_core_int_active(struct nitrox_device *ndev) 197 + { 198 + union nps_core_int_active core_int_active; 199 + 200 + core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE); 201 + 202 + if (core_int_active.s.nps_core) 203 + clear_nps_core_err_intr(ndev); 204 + 205 + if (core_int_active.s.nps_pkt) 206 + clear_nps_pkt_err_intr(ndev); 207 + 208 + if (core_int_active.s.pom) 209 + clear_pom_err_intr(ndev); 210 + 211 + if (core_int_active.s.pem) 212 + clear_pem_err_intr(ndev); 213 + 214 + if (core_int_active.s.lbc) 215 + clear_lbc_err_intr(ndev); 216 + 217 + if (core_int_active.s.efl) 218 + clear_efl_err_intr(ndev); 219 + 220 + if (core_int_active.s.bmi) 221 + clear_bmi_err_intr(ndev); 222 + 223 + /* If more work callback the ISR, set resend */ 224 + core_int_active.s.resend = 1; 225 + nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value); 226 + } 227 + 228 + static irqreturn_t nps_core_int_isr(int irq, void *data) 229 + { 230 + struct nitrox_device *ndev = data; 231 + 232 + clear_nps_core_int_active(ndev); 233 + 234 + return IRQ_HANDLED; 235 + } 236 + 237 + static int nitrox_enable_msix(struct nitrox_device *ndev) 238 + { 239 + struct msix_entry *entries; 240 + char **names; 241 + int i, nr_entries, ret; 242 + 243 + /* 244 + * PF MSI-X vectors 245 + * 246 + * Entry 0: NPS PKT ring 0 247 + * Entry 1: AQMQ ring 0 248 + * Entry 2: ZQM ring 0 249 + * Entry 3: NPS PKT ring 1 250 + * Entry 4: AQMQ ring 1 251 + * Entry 5: ZQM ring 1 252 + * .... 253 + * Entry 192: NPS_CORE_INT_ACTIVE 254 + */ 255 + nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1; 256 + entries = kzalloc_node(nr_entries * sizeof(struct msix_entry), 257 + GFP_KERNEL, ndev->node); 258 + if (!entries) 259 + return -ENOMEM; 260 + 261 + names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL); 262 + if (!names) { 263 + kfree(entries); 264 + return -ENOMEM; 265 + } 266 + 267 + /* fill entires */ 268 + for (i = 0; i < (nr_entries - 1); i++) 269 + entries[i].entry = i; 270 + 271 + entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY; 272 + 273 + for (i = 0; i < nr_entries; i++) { 274 + *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL); 275 + if (!(*(names + i))) { 276 + ret = -ENOMEM; 277 + goto msix_fail; 278 + } 279 + } 280 + ndev->msix.entries = entries; 281 + ndev->msix.names = names; 282 + ndev->msix.nr_entries = nr_entries; 283 + 284 + ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries, 285 + ndev->msix.nr_entries); 286 + if (ret) { 287 + dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n", 288 + ret); 289 + goto msix_fail; 290 + } 291 + return 0; 292 + 293 + msix_fail: 294 + for (i = 0; i < nr_entries; i++) 295 + kfree(*(names + i)); 296 + 297 + kfree(entries); 298 + kfree(names); 299 + return ret; 300 + } 301 + 302 + static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev) 303 + { 304 + int i; 305 + 306 + if (!ndev->bh.slc) 307 + return; 308 + 309 + for (i = 0; i < ndev->nr_queues; i++) { 310 + struct bh_data *bh = &ndev->bh.slc[i]; 311 + 312 + tasklet_disable(&bh->resp_handler); 313 + tasklet_kill(&bh->resp_handler); 314 + } 315 + kfree(ndev->bh.slc); 316 + ndev->bh.slc = NULL; 317 + } 318 + 319 + static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev) 320 + { 321 + u32 size; 322 + int i; 323 + 324 + size = ndev->nr_queues * sizeof(struct bh_data); 325 + ndev->bh.slc = kzalloc(size, GFP_KERNEL); 326 + if (!ndev->bh.slc) 327 + return -ENOMEM; 328 + 329 + for (i = 0; i < ndev->nr_queues; i++) { 330 + struct bh_data *bh = &ndev->bh.slc[i]; 331 + u64 offset; 332 + 333 + offset = NPS_PKT_SLC_CNTSX(i); 334 + /* pre calculate completion count address */ 335 + bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); 336 + bh->cmdq = &ndev->pkt_cmdqs[i]; 337 + 338 + tasklet_init(&bh->resp_handler, pkt_slc_resp_handler, 339 + (unsigned long)bh); 340 + } 341 + 342 + return 0; 343 + } 344 + 345 + static int nitrox_request_irqs(struct nitrox_device *ndev) 346 + { 347 + struct pci_dev *pdev = ndev->pdev; 348 + struct msix_entry *msix_ent = ndev->msix.entries; 349 + int nr_ring_vectors, i = 0, ring, cpu, ret; 350 + char *name; 351 + 352 + /* 353 + * PF MSI-X vectors 354 + * 355 + * Entry 0: NPS PKT ring 0 356 + * Entry 1: AQMQ ring 0 357 + * Entry 2: ZQM ring 0 358 + * Entry 3: NPS PKT ring 1 359 + * .... 360 + * Entry 192: NPS_CORE_INT_ACTIVE 361 + */ 362 + nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS; 363 + 364 + /* request irq for pkt ring/ports only */ 365 + while (i < nr_ring_vectors) { 366 + name = *(ndev->msix.names + i); 367 + ring = (i / NR_RING_VECTORS); 368 + snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d", 369 + ndev->idx, ring); 370 + 371 + ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0, 372 + name, &ndev->bh.slc[ring]); 373 + if (ret) { 374 + dev_err(&pdev->dev, "failed to get irq %d for %s\n", 375 + msix_ent[i].vector, name); 376 + return ret; 377 + } 378 + cpu = ring % num_online_cpus(); 379 + irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu)); 380 + 381 + set_bit(i, ndev->msix.irqs); 382 + i += NR_RING_VECTORS; 383 + } 384 + 385 + /* Request IRQ for NPS_CORE_INT_ACTIVE */ 386 + name = *(ndev->msix.names + i); 387 + snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx); 388 + ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev); 389 + if (ret) { 390 + dev_err(&pdev->dev, "failed to get irq %d for %s\n", 391 + msix_ent[i].vector, name); 392 + return ret; 393 + } 394 + set_bit(i, ndev->msix.irqs); 395 + 396 + return 0; 397 + } 398 + 399 + static void nitrox_disable_msix(struct nitrox_device *ndev) 400 + { 401 + struct msix_entry *msix_ent = ndev->msix.entries; 402 + char **names = ndev->msix.names; 403 + int i = 0, ring, nr_ring_vectors; 404 + 405 + nr_ring_vectors = ndev->msix.nr_entries - 1; 406 + 407 + /* clear pkt ring irqs */ 408 + while (i < nr_ring_vectors) { 409 + if (test_and_clear_bit(i, ndev->msix.irqs)) { 410 + ring = (i / NR_RING_VECTORS); 411 + irq_set_affinity_hint(msix_ent[i].vector, NULL); 412 + free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]); 413 + } 414 + i += NR_RING_VECTORS; 415 + } 416 + irq_set_affinity_hint(msix_ent[i].vector, NULL); 417 + free_irq(msix_ent[i].vector, ndev); 418 + clear_bit(i, ndev->msix.irqs); 419 + 420 + kfree(ndev->msix.entries); 421 + for (i = 0; i < ndev->msix.nr_entries; i++) 422 + kfree(*(names + i)); 423 + 424 + kfree(names); 425 + pci_disable_msix(ndev->pdev); 426 + } 427 + 428 + /** 429 + * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ 430 + * @ndev: NITROX device 431 + */ 432 + void nitrox_pf_cleanup_isr(struct nitrox_device *ndev) 433 + { 434 + nitrox_disable_msix(ndev); 435 + nitrox_cleanup_pkt_slc_bh(ndev); 436 + } 437 + 438 + /** 439 + * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ 440 + * @ndev: NITROX device 441 + * 442 + * Return: 0 on success, a negative value on failure. 443 + */ 444 + int nitrox_pf_init_isr(struct nitrox_device *ndev) 445 + { 446 + int err; 447 + 448 + err = nitrox_setup_pkt_slc_bh(ndev); 449 + if (err) 450 + return err; 451 + 452 + err = nitrox_enable_msix(ndev); 453 + if (err) 454 + goto msix_fail; 455 + 456 + err = nitrox_request_irqs(ndev); 457 + if (err) 458 + goto irq_fail; 459 + 460 + return 0; 461 + 462 + irq_fail: 463 + nitrox_disable_msix(ndev); 464 + msix_fail: 465 + nitrox_cleanup_pkt_slc_bh(ndev); 466 + return err; 467 + }
+172
drivers/crypto/cavium/nitrox/nitrox_lib.c
··· 1 + #include <linux/cpumask.h> 2 + #include <linux/dma-mapping.h> 3 + #include <linux/dmapool.h> 4 + #include <linux/delay.h> 5 + #include <linux/gfp.h> 6 + #include <linux/kernel.h> 7 + #include <linux/module.h> 8 + #include <linux/pci_regs.h> 9 + #include <linux/vmalloc.h> 10 + #include <linux/pci.h> 11 + 12 + #include "nitrox_dev.h" 13 + #include "nitrox_common.h" 14 + #include "nitrox_req.h" 15 + #include "nitrox_csr.h" 16 + 17 + #define CRYPTO_CTX_SIZE 256 18 + 19 + /* command queue alignments */ 20 + #define PKT_IN_ALIGN 16 21 + 22 + static int cmdq_common_init(struct nitrox_cmdq *cmdq) 23 + { 24 + struct nitrox_device *ndev = cmdq->ndev; 25 + u32 qsize; 26 + 27 + qsize = (ndev->qlen) * cmdq->instr_size; 28 + cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev), 29 + (qsize + PKT_IN_ALIGN), 30 + &cmdq->dma_unaligned, 31 + GFP_KERNEL); 32 + if (!cmdq->head_unaligned) 33 + return -ENOMEM; 34 + 35 + cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); 36 + cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); 37 + cmdq->qsize = (qsize + PKT_IN_ALIGN); 38 + 39 + spin_lock_init(&cmdq->response_lock); 40 + spin_lock_init(&cmdq->cmdq_lock); 41 + spin_lock_init(&cmdq->backlog_lock); 42 + 43 + INIT_LIST_HEAD(&cmdq->response_head); 44 + INIT_LIST_HEAD(&cmdq->backlog_head); 45 + INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work); 46 + 47 + atomic_set(&cmdq->pending_count, 0); 48 + atomic_set(&cmdq->backlog_count, 0); 49 + return 0; 50 + } 51 + 52 + static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq) 53 + { 54 + struct nitrox_device *ndev = cmdq->ndev; 55 + 56 + cancel_work_sync(&cmdq->backlog_qflush); 57 + 58 + dma_free_coherent(DEV(ndev), cmdq->qsize, 59 + cmdq->head_unaligned, cmdq->dma_unaligned); 60 + 61 + atomic_set(&cmdq->pending_count, 0); 62 + atomic_set(&cmdq->backlog_count, 0); 63 + 64 + cmdq->dbell_csr_addr = NULL; 65 + cmdq->head = NULL; 66 + cmdq->dma = 0; 67 + cmdq->qsize = 0; 68 + cmdq->instr_size = 0; 69 + } 70 + 71 + static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev) 72 + { 73 + int i; 74 + 75 + for (i = 0; i < ndev->nr_queues; i++) { 76 + struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i]; 77 + 78 + cmdq_common_cleanup(cmdq); 79 + } 80 + kfree(ndev->pkt_cmdqs); 81 + ndev->pkt_cmdqs = NULL; 82 + } 83 + 84 + static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev) 85 + { 86 + int i, err, size; 87 + 88 + size = ndev->nr_queues * sizeof(struct nitrox_cmdq); 89 + ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL); 90 + if (!ndev->pkt_cmdqs) 91 + return -ENOMEM; 92 + 93 + for (i = 0; i < ndev->nr_queues; i++) { 94 + struct nitrox_cmdq *cmdq; 95 + u64 offset; 96 + 97 + cmdq = &ndev->pkt_cmdqs[i]; 98 + cmdq->ndev = ndev; 99 + cmdq->qno = i; 100 + cmdq->instr_size = sizeof(struct nps_pkt_instr); 101 + 102 + offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); 103 + /* SE ring doorbell address for this queue */ 104 + cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); 105 + 106 + err = cmdq_common_init(cmdq); 107 + if (err) 108 + goto pkt_cmdq_fail; 109 + } 110 + return 0; 111 + 112 + pkt_cmdq_fail: 113 + nitrox_cleanup_pkt_cmdqs(ndev); 114 + return err; 115 + } 116 + 117 + static int create_crypto_dma_pool(struct nitrox_device *ndev) 118 + { 119 + size_t size; 120 + 121 + /* Crypto context pool, 16 byte aligned */ 122 + size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr); 123 + ndev->ctx_pool = dma_pool_create("crypto-context", 124 + DEV(ndev), size, 16, 0); 125 + if (!ndev->ctx_pool) 126 + return -ENOMEM; 127 + 128 + return 0; 129 + } 130 + 131 + static void destroy_crypto_dma_pool(struct nitrox_device *ndev) 132 + { 133 + if (!ndev->ctx_pool) 134 + return; 135 + 136 + dma_pool_destroy(ndev->ctx_pool); 137 + ndev->ctx_pool = NULL; 138 + } 139 + 140 + /** 141 + * nitrox_common_sw_init - allocate software resources. 142 + * @ndev: NITROX device 143 + * 144 + * Allocates crypto context pools and command queues etc. 145 + * 146 + * Return: 0 on success, or a negative error code on error. 147 + */ 148 + int nitrox_common_sw_init(struct nitrox_device *ndev) 149 + { 150 + int err = 0; 151 + 152 + /* per device crypto context pool */ 153 + err = create_crypto_dma_pool(ndev); 154 + if (err) 155 + return err; 156 + 157 + err = nitrox_init_pkt_cmdqs(ndev); 158 + if (err) 159 + destroy_crypto_dma_pool(ndev); 160 + 161 + return err; 162 + } 163 + 164 + /** 165 + * nitrox_common_sw_cleanup - free software resources. 166 + * @ndev: NITROX device 167 + */ 168 + void nitrox_common_sw_cleanup(struct nitrox_device *ndev) 169 + { 170 + nitrox_cleanup_pkt_cmdqs(ndev); 171 + destroy_crypto_dma_pool(ndev); 172 + }
+465
drivers/crypto/cavium/nitrox/nitrox_main.c
··· 1 + #include <linux/aer.h> 2 + #include <linux/delay.h> 3 + #include <linux/firmware.h> 4 + #include <linux/list.h> 5 + #include <linux/module.h> 6 + #include <linux/mutex.h> 7 + #include <linux/pci.h> 8 + #include <linux/pci_ids.h> 9 + 10 + #include "nitrox_dev.h" 11 + #include "nitrox_common.h" 12 + #include "nitrox_csr.h" 13 + 14 + #define CNN55XX_DEV_ID 0x12 15 + #define MAX_PF_QUEUES 64 16 + #define UCODE_HLEN 48 17 + #define SE_GROUP 0 18 + 19 + #define DRIVER_VERSION "1.0" 20 + /* SE microcode */ 21 + #define SE_FW "cnn55xx_se.fw" 22 + 23 + static const char nitrox_driver_name[] = "CNN55XX"; 24 + 25 + static LIST_HEAD(ndevlist); 26 + static DEFINE_MUTEX(devlist_lock); 27 + static unsigned int num_devices; 28 + 29 + /** 30 + * nitrox_pci_tbl - PCI Device ID Table 31 + */ 32 + static const struct pci_device_id nitrox_pci_tbl[] = { 33 + {PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0}, 34 + /* required last entry */ 35 + {0, } 36 + }; 37 + MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl); 38 + 39 + static unsigned int qlen = DEFAULT_CMD_QLEN; 40 + module_param(qlen, uint, 0644); 41 + MODULE_PARM_DESC(qlen, "Command queue length - default 2048"); 42 + 43 + /** 44 + * struct ucode - Firmware Header 45 + * @id: microcode ID 46 + * @version: firmware version 47 + * @code_size: code section size 48 + * @raz: alignment 49 + * @code: code section 50 + */ 51 + struct ucode { 52 + u8 id; 53 + char version[VERSION_LEN - 1]; 54 + __be32 code_size; 55 + u8 raz[12]; 56 + u64 code[0]; 57 + }; 58 + 59 + /** 60 + * write_to_ucd_unit - Write Firmware to NITROX UCD unit 61 + */ 62 + static void write_to_ucd_unit(struct nitrox_device *ndev, 63 + struct ucode *ucode) 64 + { 65 + u32 code_size = be32_to_cpu(ucode->code_size) * 2; 66 + u64 offset, data; 67 + int i = 0; 68 + 69 + /* 70 + * UCD structure 71 + * 72 + * ------------- 73 + * | BLK 7 | 74 + * ------------- 75 + * | BLK 6 | 76 + * ------------- 77 + * | ... | 78 + * ------------- 79 + * | BLK 0 | 80 + * ------------- 81 + * Total of 8 blocks, each size 32KB 82 + */ 83 + 84 + /* set the block number */ 85 + offset = UCD_UCODE_LOAD_BLOCK_NUM; 86 + nitrox_write_csr(ndev, offset, 0); 87 + 88 + code_size = roundup(code_size, 8); 89 + while (code_size) { 90 + data = ucode->code[i]; 91 + /* write 8 bytes at a time */ 92 + offset = UCD_UCODE_LOAD_IDX_DATAX(i); 93 + nitrox_write_csr(ndev, offset, data); 94 + code_size -= 8; 95 + i++; 96 + } 97 + 98 + /* put all SE cores in group 0 */ 99 + offset = POM_GRP_EXECMASKX(SE_GROUP); 100 + nitrox_write_csr(ndev, offset, (~0ULL)); 101 + 102 + for (i = 0; i < ndev->hw.se_cores; i++) { 103 + /* 104 + * write block number and firware length 105 + * bit:<2:0> block number 106 + * bit:3 is set SE uses 32KB microcode 107 + * bit:3 is clear SE uses 64KB microcode 108 + */ 109 + offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i); 110 + nitrox_write_csr(ndev, offset, 0x8); 111 + } 112 + usleep_range(300, 400); 113 + } 114 + 115 + static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name) 116 + { 117 + const struct firmware *fw; 118 + struct ucode *ucode; 119 + int ret; 120 + 121 + dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); 122 + 123 + ret = request_firmware(&fw, fw_name, DEV(ndev)); 124 + if (ret < 0) { 125 + dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name); 126 + return ret; 127 + } 128 + 129 + ucode = (struct ucode *)fw->data; 130 + /* copy the firmware version */ 131 + memcpy(ndev->hw.fw_name, ucode->version, (VERSION_LEN - 2)); 132 + ndev->hw.fw_name[VERSION_LEN - 1] = '\0'; 133 + 134 + write_to_ucd_unit(ndev, ucode); 135 + release_firmware(fw); 136 + 137 + set_bit(NITROX_UCODE_LOADED, &ndev->status); 138 + /* barrier to sync with other cpus */ 139 + smp_mb__after_atomic(); 140 + return 0; 141 + } 142 + 143 + /** 144 + * nitrox_add_to_devlist - add NITROX device to global device list 145 + * @ndev: NITROX device 146 + */ 147 + static int nitrox_add_to_devlist(struct nitrox_device *ndev) 148 + { 149 + struct nitrox_device *dev; 150 + int ret = 0; 151 + 152 + INIT_LIST_HEAD(&ndev->list); 153 + refcount_set(&ndev->refcnt, 1); 154 + 155 + mutex_lock(&devlist_lock); 156 + list_for_each_entry(dev, &ndevlist, list) { 157 + if (dev == ndev) { 158 + ret = -EEXIST; 159 + goto unlock; 160 + } 161 + } 162 + ndev->idx = num_devices++; 163 + list_add_tail(&ndev->list, &ndevlist); 164 + unlock: 165 + mutex_unlock(&devlist_lock); 166 + return ret; 167 + } 168 + 169 + /** 170 + * nitrox_remove_from_devlist - remove NITROX device from 171 + * global device list 172 + * @ndev: NITROX device 173 + */ 174 + static void nitrox_remove_from_devlist(struct nitrox_device *ndev) 175 + { 176 + mutex_lock(&devlist_lock); 177 + list_del(&ndev->list); 178 + num_devices--; 179 + mutex_unlock(&devlist_lock); 180 + } 181 + 182 + static int nitrox_reset_device(struct pci_dev *pdev) 183 + { 184 + int pos = 0; 185 + 186 + pos = pci_save_state(pdev); 187 + if (pos) { 188 + dev_err(&pdev->dev, "Failed to save pci state\n"); 189 + return -ENOMEM; 190 + } 191 + 192 + pos = pci_pcie_cap(pdev); 193 + if (!pos) 194 + return -ENOTTY; 195 + 196 + if (!pci_wait_for_pending_transaction(pdev)) 197 + dev_err(&pdev->dev, "waiting for pending transaction\n"); 198 + 199 + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 200 + msleep(100); 201 + pci_restore_state(pdev); 202 + 203 + return 0; 204 + } 205 + 206 + static int nitrox_pf_sw_init(struct nitrox_device *ndev) 207 + { 208 + int err; 209 + 210 + err = nitrox_common_sw_init(ndev); 211 + if (err) 212 + return err; 213 + 214 + err = nitrox_pf_init_isr(ndev); 215 + if (err) 216 + nitrox_common_sw_cleanup(ndev); 217 + 218 + return err; 219 + } 220 + 221 + static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev) 222 + { 223 + nitrox_pf_cleanup_isr(ndev); 224 + nitrox_common_sw_cleanup(ndev); 225 + } 226 + 227 + /** 228 + * nitrox_bist_check - Check NITORX BIST registers status 229 + * @ndev: NITROX device 230 + */ 231 + static int nitrox_bist_check(struct nitrox_device *ndev) 232 + { 233 + u64 value = 0; 234 + int i; 235 + 236 + for (i = 0; i < NR_CLUSTERS; i++) { 237 + value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i)); 238 + value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i)); 239 + } 240 + value += nitrox_read_csr(ndev, UCD_BIST_STATUS); 241 + value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG); 242 + value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG); 243 + value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG); 244 + value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG); 245 + value += nitrox_read_csr(ndev, POM_BIST_REG); 246 + value += nitrox_read_csr(ndev, BMI_BIST_REG); 247 + value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT); 248 + value += nitrox_read_csr(ndev, BMO_BIST_REG); 249 + value += nitrox_read_csr(ndev, LBC_BIST_STATUS); 250 + value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0)); 251 + if (value) 252 + return -EIO; 253 + return 0; 254 + } 255 + 256 + static void nitrox_get_hwinfo(struct nitrox_device *ndev) 257 + { 258 + union emu_fuse_map emu_fuse; 259 + u64 offset; 260 + int i; 261 + 262 + for (i = 0; i < NR_CLUSTERS; i++) { 263 + u8 dead_cores; 264 + 265 + offset = EMU_FUSE_MAPX(i); 266 + emu_fuse.value = nitrox_read_csr(ndev, offset); 267 + if (emu_fuse.s.valid) { 268 + dead_cores = hweight32(emu_fuse.s.ae_fuse); 269 + ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores; 270 + dead_cores = hweight16(emu_fuse.s.se_fuse); 271 + ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores; 272 + } 273 + } 274 + } 275 + 276 + static int nitrox_pf_hw_init(struct nitrox_device *ndev) 277 + { 278 + int err; 279 + 280 + err = nitrox_bist_check(ndev); 281 + if (err) { 282 + dev_err(&ndev->pdev->dev, "BIST check failed\n"); 283 + return err; 284 + } 285 + /* get cores information */ 286 + nitrox_get_hwinfo(ndev); 287 + 288 + nitrox_config_nps_unit(ndev); 289 + nitrox_config_pom_unit(ndev); 290 + nitrox_config_efl_unit(ndev); 291 + /* configure IO units */ 292 + nitrox_config_bmi_unit(ndev); 293 + nitrox_config_bmo_unit(ndev); 294 + /* configure Local Buffer Cache */ 295 + nitrox_config_lbc_unit(ndev); 296 + nitrox_config_rand_unit(ndev); 297 + 298 + /* load firmware on SE cores */ 299 + err = nitrox_load_fw(ndev, SE_FW); 300 + if (err) 301 + return err; 302 + 303 + nitrox_config_emu_unit(ndev); 304 + 305 + return 0; 306 + } 307 + 308 + /** 309 + * nitrox_probe - NITROX Initialization function. 310 + * @pdev: PCI device information struct 311 + * @id: entry in nitrox_pci_tbl 312 + * 313 + * Return: 0, if the driver is bound to the device, or 314 + * a negative error if there is failure. 315 + */ 316 + static int nitrox_probe(struct pci_dev *pdev, 317 + const struct pci_device_id *id) 318 + { 319 + struct nitrox_device *ndev; 320 + int err; 321 + 322 + dev_info_once(&pdev->dev, "%s driver version %s\n", 323 + nitrox_driver_name, DRIVER_VERSION); 324 + 325 + err = pci_enable_device_mem(pdev); 326 + if (err) 327 + return err; 328 + 329 + /* do FLR */ 330 + err = nitrox_reset_device(pdev); 331 + if (err) { 332 + dev_err(&pdev->dev, "FLR failed\n"); 333 + pci_disable_device(pdev); 334 + return err; 335 + } 336 + 337 + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 338 + dev_dbg(&pdev->dev, "DMA to 64-BIT address\n"); 339 + } else { 340 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 341 + if (err) { 342 + dev_err(&pdev->dev, "DMA configuration failed\n"); 343 + pci_disable_device(pdev); 344 + return err; 345 + } 346 + } 347 + 348 + err = pci_request_mem_regions(pdev, nitrox_driver_name); 349 + if (err) { 350 + pci_disable_device(pdev); 351 + return err; 352 + } 353 + pci_set_master(pdev); 354 + 355 + ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 356 + if (!ndev) 357 + goto ndev_fail; 358 + 359 + pci_set_drvdata(pdev, ndev); 360 + ndev->pdev = pdev; 361 + 362 + /* add to device list */ 363 + nitrox_add_to_devlist(ndev); 364 + 365 + ndev->hw.vendor_id = pdev->vendor; 366 + ndev->hw.device_id = pdev->device; 367 + ndev->hw.revision_id = pdev->revision; 368 + /* command timeout in jiffies */ 369 + ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT); 370 + ndev->node = dev_to_node(&pdev->dev); 371 + if (ndev->node == NUMA_NO_NODE) 372 + ndev->node = 0; 373 + 374 + ndev->bar_addr = ioremap(pci_resource_start(pdev, 0), 375 + pci_resource_len(pdev, 0)); 376 + if (!ndev->bar_addr) { 377 + err = -EIO; 378 + goto ioremap_err; 379 + } 380 + /* allocate command queus based on cpus, max queues are 64 */ 381 + ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus()); 382 + ndev->qlen = qlen; 383 + 384 + err = nitrox_pf_sw_init(ndev); 385 + if (err) 386 + goto ioremap_err; 387 + 388 + err = nitrox_pf_hw_init(ndev); 389 + if (err) 390 + goto pf_hw_fail; 391 + 392 + set_bit(NITROX_READY, &ndev->status); 393 + /* barrier to sync with other cpus */ 394 + smp_mb__after_atomic(); 395 + return 0; 396 + 397 + pf_hw_fail: 398 + nitrox_pf_sw_cleanup(ndev); 399 + ioremap_err: 400 + nitrox_remove_from_devlist(ndev); 401 + kfree(ndev); 402 + pci_set_drvdata(pdev, NULL); 403 + ndev_fail: 404 + pci_release_mem_regions(pdev); 405 + pci_disable_device(pdev); 406 + return err; 407 + } 408 + 409 + /** 410 + * nitrox_remove - Unbind the driver from the device. 411 + * @pdev: PCI device information struct 412 + */ 413 + static void nitrox_remove(struct pci_dev *pdev) 414 + { 415 + struct nitrox_device *ndev = pci_get_drvdata(pdev); 416 + 417 + if (!ndev) 418 + return; 419 + 420 + if (!refcount_dec_and_test(&ndev->refcnt)) { 421 + dev_err(DEV(ndev), "Device refcnt not zero (%d)\n", 422 + refcount_read(&ndev->refcnt)); 423 + return; 424 + } 425 + 426 + dev_info(DEV(ndev), "Removing Device %x:%x\n", 427 + ndev->hw.vendor_id, ndev->hw.device_id); 428 + 429 + clear_bit(NITROX_READY, &ndev->status); 430 + /* barrier to sync with other cpus */ 431 + smp_mb__after_atomic(); 432 + 433 + nitrox_remove_from_devlist(ndev); 434 + nitrox_pf_sw_cleanup(ndev); 435 + 436 + iounmap(ndev->bar_addr); 437 + kfree(ndev); 438 + 439 + pci_set_drvdata(pdev, NULL); 440 + pci_release_mem_regions(pdev); 441 + pci_disable_device(pdev); 442 + } 443 + 444 + static void nitrox_shutdown(struct pci_dev *pdev) 445 + { 446 + pci_set_drvdata(pdev, NULL); 447 + pci_release_mem_regions(pdev); 448 + pci_disable_device(pdev); 449 + } 450 + 451 + static struct pci_driver nitrox_driver = { 452 + .name = nitrox_driver_name, 453 + .id_table = nitrox_pci_tbl, 454 + .probe = nitrox_probe, 455 + .remove = nitrox_remove, 456 + .shutdown = nitrox_shutdown, 457 + }; 458 + 459 + module_pci_driver(nitrox_driver); 460 + 461 + MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>"); 462 + MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " "); 463 + MODULE_LICENSE("GPL"); 464 + MODULE_VERSION(DRIVER_VERSION); 465 + MODULE_FIRMWARE(SE_FW);
+445
drivers/crypto/cavium/nitrox/nitrox_req.h
··· 1 + #ifndef __NITROX_REQ_H 2 + #define __NITROX_REQ_H 3 + 4 + #include <linux/dma-mapping.h> 5 + #include <crypto/aes.h> 6 + 7 + #include "nitrox_dev.h" 8 + 9 + /** 10 + * struct gphdr - General purpose Header 11 + * @param0: first parameter. 12 + * @param1: second parameter. 13 + * @param2: third parameter. 14 + * @param3: fourth parameter. 15 + * 16 + * Params tell the iv and enc/dec data offsets. 17 + */ 18 + struct gphdr { 19 + __be16 param0; 20 + __be16 param1; 21 + __be16 param2; 22 + __be16 param3; 23 + }; 24 + 25 + /** 26 + * struct se_req_ctrl - SE request information. 27 + * @arg: Minor number of the opcode 28 + * @ctxc: Context control. 29 + * @unca: Uncertainity enabled. 30 + * @info: Additional information for SE cores. 31 + * @ctxl: Context length in bytes. 32 + * @uddl: User defined data length 33 + */ 34 + union se_req_ctrl { 35 + u64 value; 36 + struct { 37 + u64 raz : 22; 38 + u64 arg : 8; 39 + u64 ctxc : 2; 40 + u64 unca : 1; 41 + u64 info : 3; 42 + u64 unc : 8; 43 + u64 ctxl : 12; 44 + u64 uddl : 8; 45 + } s; 46 + }; 47 + 48 + struct nitrox_sglist { 49 + u16 len; 50 + u16 raz0; 51 + u32 raz1; 52 + dma_addr_t dma; 53 + }; 54 + 55 + #define MAX_IV_LEN 16 56 + 57 + /** 58 + * struct se_crypto_request - SE crypto request structure. 59 + * @opcode: Request opcode (enc/dec) 60 + * @flags: flags from crypto subsystem 61 + * @ctx_handle: Crypto context handle. 62 + * @gph: GP Header 63 + * @ctrl: Request Information. 64 + * @in: Input sglist 65 + * @out: Output sglist 66 + */ 67 + struct se_crypto_request { 68 + u8 opcode; 69 + gfp_t gfp; 70 + u32 flags; 71 + u64 ctx_handle; 72 + 73 + struct gphdr gph; 74 + union se_req_ctrl ctrl; 75 + 76 + u8 iv[MAX_IV_LEN]; 77 + u16 ivsize; 78 + 79 + struct scatterlist *src; 80 + struct scatterlist *dst; 81 + }; 82 + 83 + /* Crypto opcodes */ 84 + #define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33 85 + #define ENCRYPT 0 86 + #define DECRYPT 1 87 + 88 + /* IV from context */ 89 + #define IV_FROM_CTX 0 90 + /* IV from Input data */ 91 + #define IV_FROM_DPTR 1 92 + 93 + /** 94 + * cipher opcodes for firmware 95 + */ 96 + enum flexi_cipher { 97 + CIPHER_NULL = 0, 98 + CIPHER_3DES_CBC, 99 + CIPHER_3DES_ECB, 100 + CIPHER_AES_CBC, 101 + CIPHER_AES_ECB, 102 + CIPHER_AES_CFB, 103 + CIPHER_AES_CTR, 104 + CIPHER_AES_GCM, 105 + CIPHER_AES_XTS, 106 + CIPHER_AES_CCM, 107 + CIPHER_AES_CBC_CTS, 108 + CIPHER_AES_ECB_CTS, 109 + CIPHER_INVALID 110 + }; 111 + 112 + /** 113 + * struct crypto_keys - Crypto keys 114 + * @key: Encryption key or KEY1 for AES-XTS 115 + * @iv: Encryption IV or Tweak for AES-XTS 116 + */ 117 + struct crypto_keys { 118 + union { 119 + u8 key[AES_MAX_KEY_SIZE]; 120 + u8 key1[AES_MAX_KEY_SIZE]; 121 + } u; 122 + u8 iv[AES_BLOCK_SIZE]; 123 + }; 124 + 125 + /** 126 + * struct auth_keys - Authentication keys 127 + * @ipad: IPAD or KEY2 for AES-XTS 128 + * @opad: OPAD or AUTH KEY if auth_input_type = 1 129 + */ 130 + struct auth_keys { 131 + union { 132 + u8 ipad[64]; 133 + u8 key2[64]; 134 + } u; 135 + u8 opad[64]; 136 + }; 137 + 138 + /** 139 + * struct flexi_crypto_context - Crypto context 140 + * @cipher_type: Encryption cipher type 141 + * @aes_keylen: AES key length 142 + * @iv_source: Encryption IV source 143 + * @hash_type: Authentication type 144 + * @auth_input_type: Authentication input type 145 + * 1 - Authentication IV and KEY, microcode calculates OPAD/IPAD 146 + * 0 - Authentication OPAD/IPAD 147 + * @mac_len: mac length 148 + * @crypto: Crypto keys 149 + * @auth: Authentication keys 150 + */ 151 + struct flexi_crypto_context { 152 + union { 153 + __be64 flags; 154 + struct { 155 + #if defined(__BIG_ENDIAN_BITFIELD) 156 + u64 cipher_type : 4; 157 + u64 reserved_59 : 1; 158 + u64 aes_keylen : 2; 159 + u64 iv_source : 1; 160 + u64 hash_type : 4; 161 + u64 reserved_49_51 : 3; 162 + u64 auth_input_type: 1; 163 + u64 mac_len : 8; 164 + u64 reserved_0_39 : 40; 165 + #else 166 + u64 reserved_0_39 : 40; 167 + u64 mac_len : 8; 168 + u64 auth_input_type: 1; 169 + u64 reserved_49_51 : 3; 170 + u64 hash_type : 4; 171 + u64 iv_source : 1; 172 + u64 aes_keylen : 2; 173 + u64 reserved_59 : 1; 174 + u64 cipher_type : 4; 175 + #endif 176 + } w0; 177 + }; 178 + 179 + struct crypto_keys crypto; 180 + struct auth_keys auth; 181 + }; 182 + 183 + struct nitrox_crypto_ctx { 184 + struct nitrox_device *ndev; 185 + union { 186 + u64 ctx_handle; 187 + struct flexi_crypto_context *fctx; 188 + } u; 189 + }; 190 + 191 + struct nitrox_kcrypt_request { 192 + struct se_crypto_request creq; 193 + struct nitrox_crypto_ctx *nctx; 194 + struct skcipher_request *skreq; 195 + }; 196 + 197 + /** 198 + * struct pkt_instr_hdr - Packet Instruction Header 199 + * @g: Gather used 200 + * When [G] is set and [GSZ] != 0, the instruction is 201 + * indirect gather instruction. 202 + * When [G] is set and [GSZ] = 0, the instruction is 203 + * direct gather instruction. 204 + * @gsz: Number of pointers in the indirect gather list 205 + * @ihi: When set hardware duplicates the 1st 8 bytes of pkt_instr_hdr 206 + * and adds them to the packet after the pkt_instr_hdr but before any UDD 207 + * @ssz: Not used by the input hardware. But can become slc_store_int[SSZ] 208 + * when [IHI] is set. 209 + * @fsz: The number of front data bytes directly included in the 210 + * PCIe instruction. 211 + * @tlen: The length of the input packet in bytes, include: 212 + * - 16B pkt_hdr 213 + * - Inline context bytes if any, 214 + * - UDD if any, 215 + * - packet payload bytes 216 + */ 217 + union pkt_instr_hdr { 218 + u64 value; 219 + struct { 220 + #if defined(__BIG_ENDIAN_BITFIELD) 221 + u64 raz_48_63 : 16; 222 + u64 g : 1; 223 + u64 gsz : 7; 224 + u64 ihi : 1; 225 + u64 ssz : 7; 226 + u64 raz_30_31 : 2; 227 + u64 fsz : 6; 228 + u64 raz_16_23 : 8; 229 + u64 tlen : 16; 230 + #else 231 + u64 tlen : 16; 232 + u64 raz_16_23 : 8; 233 + u64 fsz : 6; 234 + u64 raz_30_31 : 2; 235 + u64 ssz : 7; 236 + u64 ihi : 1; 237 + u64 gsz : 7; 238 + u64 g : 1; 239 + u64 raz_48_63 : 16; 240 + #endif 241 + } s; 242 + }; 243 + 244 + /** 245 + * struct pkt_hdr - Packet Input Header 246 + * @opcode: Request opcode (Major) 247 + * @arg: Request opcode (Minor) 248 + * @ctxc: Context control. 249 + * @unca: When set [UNC] is the uncertainty count for an input packet. 250 + * The hardware uses uncertainty counts to predict 251 + * output buffer use and avoid deadlock. 252 + * @info: Not used by input hardware. Available for use 253 + * during SE processing. 254 + * @destport: The expected destination port/ring/channel for the packet. 255 + * @unc: Uncertainty count for an input packet. 256 + * @grp: SE group that will process the input packet. 257 + * @ctxl: Context Length in 64-bit words. 258 + * @uddl: User-defined data (UDD) length in bytes. 259 + * @ctxp: Context pointer. CTXP<63,2:0> must be zero in all cases. 260 + */ 261 + union pkt_hdr { 262 + u64 value[2]; 263 + struct { 264 + #if defined(__BIG_ENDIAN_BITFIELD) 265 + u64 opcode : 8; 266 + u64 arg : 8; 267 + u64 ctxc : 2; 268 + u64 unca : 1; 269 + u64 raz_44 : 1; 270 + u64 info : 3; 271 + u64 destport : 9; 272 + u64 unc : 8; 273 + u64 raz_19_23 : 5; 274 + u64 grp : 3; 275 + u64 raz_15 : 1; 276 + u64 ctxl : 7; 277 + u64 uddl : 8; 278 + #else 279 + u64 uddl : 8; 280 + u64 ctxl : 7; 281 + u64 raz_15 : 1; 282 + u64 grp : 3; 283 + u64 raz_19_23 : 5; 284 + u64 unc : 8; 285 + u64 destport : 9; 286 + u64 info : 3; 287 + u64 raz_44 : 1; 288 + u64 unca : 1; 289 + u64 ctxc : 2; 290 + u64 arg : 8; 291 + u64 opcode : 8; 292 + #endif 293 + __be64 ctxp; 294 + } s; 295 + }; 296 + 297 + /** 298 + * struct slc_store_info - Solicited Paceket Output Store Information. 299 + * @ssz: The number of scatterlist pointers for the solicited output port 300 + * packet. 301 + * @rptr: The result pointer for the solicited output port packet. 302 + * If [SSZ]=0, [RPTR] must point directly to a buffer on the remote 303 + * host that is large enough to hold the entire output packet. 304 + * If [SSZ]!=0, [RPTR] must point to an array of ([SSZ]+3)/4 305 + * sglist components at [RPTR] on the remote host. 306 + */ 307 + union slc_store_info { 308 + u64 value[2]; 309 + struct { 310 + #if defined(__BIG_ENDIAN_BITFIELD) 311 + u64 raz_39_63 : 25; 312 + u64 ssz : 7; 313 + u64 raz_0_31 : 32; 314 + #else 315 + u64 raz_0_31 : 32; 316 + u64 ssz : 7; 317 + u64 raz_39_63 : 25; 318 + #endif 319 + __be64 rptr; 320 + } s; 321 + }; 322 + 323 + /** 324 + * struct nps_pkt_instr - NPS Packet Instruction of SE cores. 325 + * @dptr0 : Input pointer points to buffer in remote host. 326 + * @ih: Packet Instruction Header (8 bytes) 327 + * @irh: Packet Input Header (16 bytes) 328 + * @slc: Solicited Packet Output Store Information (16 bytes) 329 + * @fdata: Front data 330 + * 331 + * 64-Byte Instruction Format 332 + */ 333 + struct nps_pkt_instr { 334 + __be64 dptr0; 335 + union pkt_instr_hdr ih; 336 + union pkt_hdr irh; 337 + union slc_store_info slc; 338 + u64 fdata[2]; 339 + }; 340 + 341 + /** 342 + * struct ctx_hdr - Book keeping data about the crypto context 343 + * @pool: Pool used to allocate crypto context 344 + * @dma: Base DMA address of the cypto context 345 + * @ctx_dma: Actual usable crypto context for NITROX 346 + */ 347 + struct ctx_hdr { 348 + struct dma_pool *pool; 349 + dma_addr_t dma; 350 + dma_addr_t ctx_dma; 351 + }; 352 + 353 + /* 354 + * struct sglist_component - SG list component format 355 + * @len0: The number of bytes at [PTR0] on the remote host. 356 + * @len1: The number of bytes at [PTR1] on the remote host. 357 + * @len2: The number of bytes at [PTR2] on the remote host. 358 + * @len3: The number of bytes at [PTR3] on the remote host. 359 + * @dma0: First pointer point to buffer in remote host. 360 + * @dma1: Second pointer point to buffer in remote host. 361 + * @dma2: Third pointer point to buffer in remote host. 362 + * @dma3: Fourth pointer point to buffer in remote host. 363 + */ 364 + struct nitrox_sgcomp { 365 + __be16 len[4]; 366 + __be64 dma[4]; 367 + }; 368 + 369 + /* 370 + * strutct nitrox_sgtable - SG list information 371 + * @map_cnt: Number of buffers mapped 372 + * @nr_comp: Number of sglist components 373 + * @total_bytes: Total bytes in sglist. 374 + * @len: Total sglist components length. 375 + * @dma: DMA address of sglist component. 376 + * @dir: DMA direction. 377 + * @buf: crypto request buffer. 378 + * @sglist: SG list of input/output buffers. 379 + * @sgcomp: sglist component for NITROX. 380 + */ 381 + struct nitrox_sgtable { 382 + u8 map_bufs_cnt; 383 + u8 nr_sgcomp; 384 + u16 total_bytes; 385 + u32 len; 386 + dma_addr_t dma; 387 + enum dma_data_direction dir; 388 + 389 + struct scatterlist *buf; 390 + struct nitrox_sglist *sglist; 391 + struct nitrox_sgcomp *sgcomp; 392 + }; 393 + 394 + /* Response Header Length */ 395 + #define ORH_HLEN 8 396 + /* Completion bytes Length */ 397 + #define COMP_HLEN 8 398 + 399 + struct resp_hdr { 400 + u64 orh; 401 + dma_addr_t orh_dma; 402 + u64 completion; 403 + dma_addr_t completion_dma; 404 + }; 405 + 406 + typedef void (*completion_t)(struct skcipher_request *skreq, int err); 407 + 408 + /** 409 + * struct nitrox_softreq - Represents the NIROX Request. 410 + * @response: response list entry 411 + * @backlog: Backlog list entry 412 + * @ndev: Device used to submit the request 413 + * @cmdq: Command queue for submission 414 + * @resp: Response headers 415 + * @instr: 64B instruction 416 + * @in: SG table for input 417 + * @out SG table for output 418 + * @tstamp: Request submitted time in jiffies 419 + * @callback: callback after request completion/timeout 420 + * @cb_arg: callback argument 421 + */ 422 + struct nitrox_softreq { 423 + struct list_head response; 424 + struct list_head backlog; 425 + 426 + u32 flags; 427 + gfp_t gfp; 428 + atomic_t status; 429 + bool inplace; 430 + 431 + struct nitrox_device *ndev; 432 + struct nitrox_cmdq *cmdq; 433 + 434 + struct nps_pkt_instr instr; 435 + struct resp_hdr resp; 436 + struct nitrox_sgtable in; 437 + struct nitrox_sgtable out; 438 + 439 + unsigned long tstamp; 440 + 441 + completion_t callback; 442 + struct skcipher_request *skreq; 443 + }; 444 + 445 + #endif /* __NITROX_REQ_H */
+732
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
··· 1 + #include <linux/gfp.h> 2 + #include <linux/workqueue.h> 3 + #include <crypto/internal/skcipher.h> 4 + 5 + #include "nitrox_dev.h" 6 + #include "nitrox_req.h" 7 + #include "nitrox_csr.h" 8 + #include "nitrox_req.h" 9 + 10 + /* SLC_STORE_INFO */ 11 + #define MIN_UDD_LEN 16 12 + /* PKT_IN_HDR + SLC_STORE_INFO */ 13 + #define FDATA_SIZE 32 14 + /* Base destination port for the solicited requests */ 15 + #define SOLICIT_BASE_DPORT 256 16 + #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL 17 + 18 + #define REQ_NOT_POSTED 1 19 + #define REQ_BACKLOG 2 20 + #define REQ_POSTED 3 21 + 22 + /** 23 + * Response codes from SE microcode 24 + * 0x00 - Success 25 + * Completion with no error 26 + * 0x43 - ERR_GC_DATA_LEN_INVALID 27 + * Invalid Data length if Encryption Data length is 28 + * less than 16 bytes for AES-XTS and AES-CTS. 29 + * 0x45 - ERR_GC_CTX_LEN_INVALID 30 + * Invalid context length: CTXL != 23 words. 31 + * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID 32 + * DOCSIS support is enabled with other than 33 + * AES/DES-CBC mode encryption. 34 + * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID 35 + * Authentication offset is other than 0 with 36 + * Encryption IV source = 0. 37 + * Authentication offset is other than 8 (DES)/16 (AES) 38 + * with Encryption IV source = 1 39 + * 0x51 - ERR_GC_CRC32_INVALID_SELECTION 40 + * CRC32 is enabled for other than DOCSIS encryption. 41 + * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID 42 + * Invalid flag options in AES-CCM IV. 43 + */ 44 + 45 + /** 46 + * dma_free_sglist - unmap and free the sg lists. 47 + * @ndev: N5 device 48 + * @sgtbl: SG table 49 + */ 50 + static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) 51 + { 52 + struct nitrox_device *ndev = sr->ndev; 53 + struct device *dev = DEV(ndev); 54 + struct nitrox_sglist *sglist; 55 + 56 + /* unmap in sgbuf */ 57 + sglist = sr->in.sglist; 58 + if (!sglist) 59 + goto out_unmap; 60 + 61 + /* unmap iv */ 62 + dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL); 63 + /* unmpa src sglist */ 64 + dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir); 65 + /* unamp gather component */ 66 + dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE); 67 + kfree(sr->in.sglist); 68 + kfree(sr->in.sgcomp); 69 + sr->in.sglist = NULL; 70 + sr->in.buf = NULL; 71 + sr->in.map_bufs_cnt = 0; 72 + 73 + out_unmap: 74 + /* unmap out sgbuf */ 75 + sglist = sr->out.sglist; 76 + if (!sglist) 77 + return; 78 + 79 + /* unmap orh */ 80 + dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); 81 + 82 + /* unmap dst sglist */ 83 + if (!sr->inplace) { 84 + dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3), 85 + sr->out.dir); 86 + } 87 + /* unmap completion */ 88 + dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); 89 + 90 + /* unmap scatter component */ 91 + dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE); 92 + kfree(sr->out.sglist); 93 + kfree(sr->out.sgcomp); 94 + sr->out.sglist = NULL; 95 + sr->out.buf = NULL; 96 + sr->out.map_bufs_cnt = 0; 97 + } 98 + 99 + static void softreq_destroy(struct nitrox_softreq *sr) 100 + { 101 + softreq_unmap_sgbufs(sr); 102 + kfree(sr); 103 + } 104 + 105 + /** 106 + * create_sg_component - create SG componets for N5 device. 107 + * @sr: Request structure 108 + * @sgtbl: SG table 109 + * @nr_comp: total number of components required 110 + * 111 + * Component structure 112 + * 113 + * 63 48 47 32 31 16 15 0 114 + * -------------------------------------- 115 + * | LEN0 | LEN1 | LEN2 | LEN3 | 116 + * |------------------------------------- 117 + * | PTR0 | 118 + * -------------------------------------- 119 + * | PTR1 | 120 + * -------------------------------------- 121 + * | PTR2 | 122 + * -------------------------------------- 123 + * | PTR3 | 124 + * -------------------------------------- 125 + * 126 + * Returns 0 if success or a negative errno code on error. 127 + */ 128 + static int create_sg_component(struct nitrox_softreq *sr, 129 + struct nitrox_sgtable *sgtbl, int map_nents) 130 + { 131 + struct nitrox_device *ndev = sr->ndev; 132 + struct nitrox_sgcomp *sgcomp; 133 + struct nitrox_sglist *sglist; 134 + dma_addr_t dma; 135 + size_t sz_comp; 136 + int i, j, nr_sgcomp; 137 + 138 + nr_sgcomp = roundup(map_nents, 4) / 4; 139 + 140 + /* each component holds 4 dma pointers */ 141 + sz_comp = nr_sgcomp * sizeof(*sgcomp); 142 + sgcomp = kzalloc(sz_comp, sr->gfp); 143 + if (!sgcomp) 144 + return -ENOMEM; 145 + 146 + sgtbl->sgcomp = sgcomp; 147 + sgtbl->nr_sgcomp = nr_sgcomp; 148 + 149 + sglist = sgtbl->sglist; 150 + /* populate device sg component */ 151 + for (i = 0; i < nr_sgcomp; i++) { 152 + for (j = 0; j < 4; j++) { 153 + sgcomp->len[j] = cpu_to_be16(sglist->len); 154 + sgcomp->dma[j] = cpu_to_be64(sglist->dma); 155 + sglist++; 156 + } 157 + sgcomp++; 158 + } 159 + /* map the device sg component */ 160 + dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE); 161 + if (dma_mapping_error(DEV(ndev), dma)) { 162 + kfree(sgtbl->sgcomp); 163 + sgtbl->sgcomp = NULL; 164 + return -ENOMEM; 165 + } 166 + 167 + sgtbl->dma = dma; 168 + sgtbl->len = sz_comp; 169 + 170 + return 0; 171 + } 172 + 173 + /** 174 + * dma_map_inbufs - DMA map input sglist and creates sglist component 175 + * for N5 device. 176 + * @sr: Request structure 177 + * @req: Crypto request structre 178 + * 179 + * Returns 0 if successful or a negative errno code on error. 180 + */ 181 + static int dma_map_inbufs(struct nitrox_softreq *sr, 182 + struct se_crypto_request *req) 183 + { 184 + struct device *dev = DEV(sr->ndev); 185 + struct scatterlist *sg = req->src; 186 + struct nitrox_sglist *glist; 187 + int i, nents, ret = 0; 188 + dma_addr_t dma; 189 + size_t sz; 190 + 191 + nents = sg_nents(req->src); 192 + 193 + /* creater gather list IV and src entries */ 194 + sz = roundup((1 + nents), 4) * sizeof(*glist); 195 + glist = kzalloc(sz, sr->gfp); 196 + if (!glist) 197 + return -ENOMEM; 198 + 199 + sr->in.sglist = glist; 200 + /* map IV */ 201 + dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL); 202 + ret = dma_mapping_error(dev, dma); 203 + if (ret) 204 + goto iv_map_err; 205 + 206 + sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 207 + /* map src entries */ 208 + nents = dma_map_sg(dev, req->src, nents, sr->in.dir); 209 + if (!nents) { 210 + ret = -EINVAL; 211 + goto src_map_err; 212 + } 213 + sr->in.buf = req->src; 214 + 215 + /* store the mappings */ 216 + glist->len = req->ivsize; 217 + glist->dma = dma; 218 + glist++; 219 + sr->in.total_bytes += req->ivsize; 220 + 221 + for_each_sg(req->src, sg, nents, i) { 222 + glist->len = sg_dma_len(sg); 223 + glist->dma = sg_dma_address(sg); 224 + sr->in.total_bytes += glist->len; 225 + glist++; 226 + } 227 + /* roundup map count to align with entires in sg component */ 228 + sr->in.map_bufs_cnt = (1 + nents); 229 + 230 + /* create NITROX gather component */ 231 + ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt); 232 + if (ret) 233 + goto incomp_err; 234 + 235 + return 0; 236 + 237 + incomp_err: 238 + dma_unmap_sg(dev, req->src, nents, sr->in.dir); 239 + sr->in.map_bufs_cnt = 0; 240 + src_map_err: 241 + dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL); 242 + iv_map_err: 243 + kfree(sr->in.sglist); 244 + sr->in.sglist = NULL; 245 + return ret; 246 + } 247 + 248 + static int dma_map_outbufs(struct nitrox_softreq *sr, 249 + struct se_crypto_request *req) 250 + { 251 + struct device *dev = DEV(sr->ndev); 252 + struct nitrox_sglist *glist = sr->in.sglist; 253 + struct nitrox_sglist *slist; 254 + struct scatterlist *sg; 255 + int i, nents, map_bufs_cnt, ret = 0; 256 + size_t sz; 257 + 258 + nents = sg_nents(req->dst); 259 + 260 + /* create scatter list ORH, IV, dst entries and Completion header */ 261 + sz = roundup((3 + nents), 4) * sizeof(*slist); 262 + slist = kzalloc(sz, sr->gfp); 263 + if (!slist) 264 + return -ENOMEM; 265 + 266 + sr->out.sglist = slist; 267 + sr->out.dir = DMA_BIDIRECTIONAL; 268 + /* map ORH */ 269 + sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN, 270 + sr->out.dir); 271 + ret = dma_mapping_error(dev, sr->resp.orh_dma); 272 + if (ret) 273 + goto orh_map_err; 274 + 275 + /* map completion */ 276 + sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion, 277 + COMP_HLEN, sr->out.dir); 278 + ret = dma_mapping_error(dev, sr->resp.completion_dma); 279 + if (ret) 280 + goto compl_map_err; 281 + 282 + sr->inplace = (req->src == req->dst) ? true : false; 283 + /* out place */ 284 + if (!sr->inplace) { 285 + nents = dma_map_sg(dev, req->dst, nents, sr->out.dir); 286 + if (!nents) { 287 + ret = -EINVAL; 288 + goto dst_map_err; 289 + } 290 + } 291 + sr->out.buf = req->dst; 292 + 293 + /* store the mappings */ 294 + /* orh */ 295 + slist->len = ORH_HLEN; 296 + slist->dma = sr->resp.orh_dma; 297 + slist++; 298 + 299 + /* copy the glist mappings */ 300 + if (sr->inplace) { 301 + nents = sr->in.map_bufs_cnt - 1; 302 + map_bufs_cnt = sr->in.map_bufs_cnt; 303 + while (map_bufs_cnt--) { 304 + slist->len = glist->len; 305 + slist->dma = glist->dma; 306 + slist++; 307 + glist++; 308 + } 309 + } else { 310 + /* copy iv mapping */ 311 + slist->len = glist->len; 312 + slist->dma = glist->dma; 313 + slist++; 314 + /* copy remaining maps */ 315 + for_each_sg(req->dst, sg, nents, i) { 316 + slist->len = sg_dma_len(sg); 317 + slist->dma = sg_dma_address(sg); 318 + slist++; 319 + } 320 + } 321 + 322 + /* completion */ 323 + slist->len = COMP_HLEN; 324 + slist->dma = sr->resp.completion_dma; 325 + 326 + sr->out.map_bufs_cnt = (3 + nents); 327 + 328 + ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt); 329 + if (ret) 330 + goto outcomp_map_err; 331 + 332 + return 0; 333 + 334 + outcomp_map_err: 335 + if (!sr->inplace) 336 + dma_unmap_sg(dev, req->dst, nents, sr->out.dir); 337 + sr->out.map_bufs_cnt = 0; 338 + sr->out.buf = NULL; 339 + dst_map_err: 340 + dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); 341 + sr->resp.completion_dma = 0; 342 + compl_map_err: 343 + dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); 344 + sr->resp.orh_dma = 0; 345 + orh_map_err: 346 + kfree(sr->out.sglist); 347 + sr->out.sglist = NULL; 348 + return ret; 349 + } 350 + 351 + static inline int softreq_map_iobuf(struct nitrox_softreq *sr, 352 + struct se_crypto_request *creq) 353 + { 354 + int ret; 355 + 356 + ret = dma_map_inbufs(sr, creq); 357 + if (ret) 358 + return ret; 359 + 360 + ret = dma_map_outbufs(sr, creq); 361 + if (ret) 362 + softreq_unmap_sgbufs(sr); 363 + 364 + return ret; 365 + } 366 + 367 + static inline void backlog_list_add(struct nitrox_softreq *sr, 368 + struct nitrox_cmdq *cmdq) 369 + { 370 + INIT_LIST_HEAD(&sr->backlog); 371 + 372 + spin_lock_bh(&cmdq->backlog_lock); 373 + list_add_tail(&sr->backlog, &cmdq->backlog_head); 374 + atomic_inc(&cmdq->backlog_count); 375 + atomic_set(&sr->status, REQ_BACKLOG); 376 + spin_unlock_bh(&cmdq->backlog_lock); 377 + } 378 + 379 + static inline void response_list_add(struct nitrox_softreq *sr, 380 + struct nitrox_cmdq *cmdq) 381 + { 382 + INIT_LIST_HEAD(&sr->response); 383 + 384 + spin_lock_bh(&cmdq->response_lock); 385 + list_add_tail(&sr->response, &cmdq->response_head); 386 + spin_unlock_bh(&cmdq->response_lock); 387 + } 388 + 389 + static inline void response_list_del(struct nitrox_softreq *sr, 390 + struct nitrox_cmdq *cmdq) 391 + { 392 + spin_lock_bh(&cmdq->response_lock); 393 + list_del(&sr->response); 394 + spin_unlock_bh(&cmdq->response_lock); 395 + } 396 + 397 + static struct nitrox_softreq * 398 + get_first_response_entry(struct nitrox_cmdq *cmdq) 399 + { 400 + return list_first_entry_or_null(&cmdq->response_head, 401 + struct nitrox_softreq, response); 402 + } 403 + 404 + static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen) 405 + { 406 + if (atomic_inc_return(&cmdq->pending_count) > qlen) { 407 + atomic_dec(&cmdq->pending_count); 408 + /* sync with other cpus */ 409 + smp_mb__after_atomic(); 410 + return true; 411 + } 412 + return false; 413 + } 414 + 415 + /** 416 + * post_se_instr - Post SE instruction to Packet Input ring 417 + * @sr: Request structure 418 + * 419 + * Returns 0 if successful or a negative error code, 420 + * if no space in ring. 421 + */ 422 + static void post_se_instr(struct nitrox_softreq *sr, 423 + struct nitrox_cmdq *cmdq) 424 + { 425 + struct nitrox_device *ndev = sr->ndev; 426 + union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; 427 + u64 offset; 428 + u8 *ent; 429 + 430 + spin_lock_bh(&cmdq->cmdq_lock); 431 + 432 + /* get the next write offset */ 433 + offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); 434 + pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); 435 + /* copy the instruction */ 436 + ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; 437 + memcpy(ent, &sr->instr, cmdq->instr_size); 438 + /* flush the command queue updates */ 439 + dma_wmb(); 440 + 441 + sr->tstamp = jiffies; 442 + atomic_set(&sr->status, REQ_POSTED); 443 + response_list_add(sr, cmdq); 444 + 445 + /* Ring doorbell with count 1 */ 446 + writeq(1, cmdq->dbell_csr_addr); 447 + /* orders the doorbell rings */ 448 + mmiowb(); 449 + 450 + spin_unlock_bh(&cmdq->cmdq_lock); 451 + } 452 + 453 + static int post_backlog_cmds(struct nitrox_cmdq *cmdq) 454 + { 455 + struct nitrox_device *ndev = cmdq->ndev; 456 + struct nitrox_softreq *sr, *tmp; 457 + int ret = 0; 458 + 459 + spin_lock_bh(&cmdq->backlog_lock); 460 + 461 + list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { 462 + struct skcipher_request *skreq; 463 + 464 + /* submit until space available */ 465 + if (unlikely(cmdq_full(cmdq, ndev->qlen))) { 466 + ret = -EBUSY; 467 + break; 468 + } 469 + /* delete from backlog list */ 470 + list_del(&sr->backlog); 471 + atomic_dec(&cmdq->backlog_count); 472 + /* sync with other cpus */ 473 + smp_mb__after_atomic(); 474 + 475 + skreq = sr->skreq; 476 + /* post the command */ 477 + post_se_instr(sr, cmdq); 478 + 479 + /* backlog requests are posted, wakeup with -EINPROGRESS */ 480 + skcipher_request_complete(skreq, -EINPROGRESS); 481 + } 482 + spin_unlock_bh(&cmdq->backlog_lock); 483 + 484 + return ret; 485 + } 486 + 487 + static int nitrox_enqueue_request(struct nitrox_softreq *sr) 488 + { 489 + struct nitrox_cmdq *cmdq = sr->cmdq; 490 + struct nitrox_device *ndev = sr->ndev; 491 + int ret = -EBUSY; 492 + 493 + if (unlikely(cmdq_full(cmdq, ndev->qlen))) { 494 + if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 495 + return -EAGAIN; 496 + 497 + backlog_list_add(sr, cmdq); 498 + } else { 499 + ret = post_backlog_cmds(cmdq); 500 + if (ret) { 501 + backlog_list_add(sr, cmdq); 502 + return ret; 503 + } 504 + post_se_instr(sr, cmdq); 505 + ret = -EINPROGRESS; 506 + } 507 + return ret; 508 + } 509 + 510 + /** 511 + * nitrox_se_request - Send request to SE core 512 + * @ndev: NITROX device 513 + * @req: Crypto request 514 + * 515 + * Returns 0 on success, or a negative error code. 516 + */ 517 + int nitrox_process_se_request(struct nitrox_device *ndev, 518 + struct se_crypto_request *req, 519 + completion_t callback, 520 + struct skcipher_request *skreq) 521 + { 522 + struct nitrox_softreq *sr; 523 + dma_addr_t ctx_handle = 0; 524 + int qno, ret = 0; 525 + 526 + if (!nitrox_ready(ndev)) 527 + return -ENODEV; 528 + 529 + sr = kzalloc(sizeof(*sr), req->gfp); 530 + if (!sr) 531 + return -ENOMEM; 532 + 533 + sr->ndev = ndev; 534 + sr->flags = req->flags; 535 + sr->gfp = req->gfp; 536 + sr->callback = callback; 537 + sr->skreq = skreq; 538 + 539 + atomic_set(&sr->status, REQ_NOT_POSTED); 540 + 541 + WRITE_ONCE(sr->resp.orh, PENDING_SIG); 542 + WRITE_ONCE(sr->resp.completion, PENDING_SIG); 543 + 544 + ret = softreq_map_iobuf(sr, req); 545 + if (ret) { 546 + kfree(sr); 547 + return ret; 548 + } 549 + 550 + /* get the context handle */ 551 + if (req->ctx_handle) { 552 + struct ctx_hdr *hdr; 553 + u8 *ctx_ptr; 554 + 555 + ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle; 556 + hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr)); 557 + ctx_handle = hdr->ctx_dma; 558 + } 559 + 560 + /* select the queue */ 561 + qno = smp_processor_id() % ndev->nr_queues; 562 + 563 + sr->cmdq = &ndev->pkt_cmdqs[qno]; 564 + 565 + /* 566 + * 64-Byte Instruction Format 567 + * 568 + * ---------------------- 569 + * | DPTR0 | 8 bytes 570 + * ---------------------- 571 + * | PKT_IN_INSTR_HDR | 8 bytes 572 + * ---------------------- 573 + * | PKT_IN_HDR | 16 bytes 574 + * ---------------------- 575 + * | SLC_INFO | 16 bytes 576 + * ---------------------- 577 + * | Front data | 16 bytes 578 + * ---------------------- 579 + */ 580 + 581 + /* fill the packet instruction */ 582 + /* word 0 */ 583 + sr->instr.dptr0 = cpu_to_be64(sr->in.dma); 584 + 585 + /* word 1 */ 586 + sr->instr.ih.value = 0; 587 + sr->instr.ih.s.g = 1; 588 + sr->instr.ih.s.gsz = sr->in.map_bufs_cnt; 589 + sr->instr.ih.s.ssz = sr->out.map_bufs_cnt; 590 + sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr); 591 + sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes; 592 + sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value); 593 + 594 + /* word 2 */ 595 + sr->instr.irh.value[0] = 0; 596 + sr->instr.irh.s.uddl = MIN_UDD_LEN; 597 + /* context length in 64-bit words */ 598 + sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8); 599 + /* offset from solicit base port 256 */ 600 + sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno; 601 + sr->instr.irh.s.ctxc = req->ctrl.s.ctxc; 602 + sr->instr.irh.s.arg = req->ctrl.s.arg; 603 + sr->instr.irh.s.opcode = req->opcode; 604 + sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]); 605 + 606 + /* word 3 */ 607 + sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle); 608 + 609 + /* word 4 */ 610 + sr->instr.slc.value[0] = 0; 611 + sr->instr.slc.s.ssz = sr->out.map_bufs_cnt; 612 + sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]); 613 + 614 + /* word 5 */ 615 + sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma); 616 + 617 + /* 618 + * No conversion for front data, 619 + * It goes into payload 620 + * put GP Header in front data 621 + */ 622 + sr->instr.fdata[0] = *((u64 *)&req->gph); 623 + sr->instr.fdata[1] = 0; 624 + /* flush the soft_req changes before posting the cmd */ 625 + wmb(); 626 + 627 + ret = nitrox_enqueue_request(sr); 628 + if (ret == -EAGAIN) 629 + goto send_fail; 630 + 631 + return ret; 632 + 633 + send_fail: 634 + softreq_destroy(sr); 635 + return ret; 636 + } 637 + 638 + static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout) 639 + { 640 + return time_after_eq(jiffies, (tstamp + timeout)); 641 + } 642 + 643 + void backlog_qflush_work(struct work_struct *work) 644 + { 645 + struct nitrox_cmdq *cmdq; 646 + 647 + cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush); 648 + post_backlog_cmds(cmdq); 649 + } 650 + 651 + /** 652 + * process_request_list - process completed requests 653 + * @ndev: N5 device 654 + * @qno: queue to operate 655 + * 656 + * Returns the number of responses processed. 657 + */ 658 + static void process_response_list(struct nitrox_cmdq *cmdq) 659 + { 660 + struct nitrox_device *ndev = cmdq->ndev; 661 + struct nitrox_softreq *sr; 662 + struct skcipher_request *skreq; 663 + completion_t callback; 664 + int req_completed = 0, err = 0, budget; 665 + 666 + /* check all pending requests */ 667 + budget = atomic_read(&cmdq->pending_count); 668 + 669 + while (req_completed < budget) { 670 + sr = get_first_response_entry(cmdq); 671 + if (!sr) 672 + break; 673 + 674 + if (atomic_read(&sr->status) != REQ_POSTED) 675 + break; 676 + 677 + /* check orh and completion bytes updates */ 678 + if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) { 679 + /* request not completed, check for timeout */ 680 + if (!cmd_timeout(sr->tstamp, ndev->timeout)) 681 + break; 682 + dev_err_ratelimited(DEV(ndev), 683 + "Request timeout, orh 0x%016llx\n", 684 + READ_ONCE(sr->resp.orh)); 685 + } 686 + atomic_dec(&cmdq->pending_count); 687 + /* sync with other cpus */ 688 + smp_mb__after_atomic(); 689 + /* remove from response list */ 690 + response_list_del(sr, cmdq); 691 + 692 + callback = sr->callback; 693 + skreq = sr->skreq; 694 + 695 + /* ORH error code */ 696 + err = READ_ONCE(sr->resp.orh) & 0xff; 697 + softreq_destroy(sr); 698 + 699 + if (callback) 700 + callback(skreq, err); 701 + 702 + req_completed++; 703 + } 704 + } 705 + 706 + /** 707 + * pkt_slc_resp_handler - post processing of SE responses 708 + */ 709 + void pkt_slc_resp_handler(unsigned long data) 710 + { 711 + struct bh_data *bh = (void *)(uintptr_t)(data); 712 + struct nitrox_cmdq *cmdq = bh->cmdq; 713 + union nps_pkt_slc_cnts pkt_slc_cnts; 714 + 715 + /* read completion count */ 716 + pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr); 717 + /* resend the interrupt if more work to do */ 718 + pkt_slc_cnts.s.resend = 1; 719 + 720 + process_response_list(cmdq); 721 + 722 + /* 723 + * clear the interrupt with resend bit enabled, 724 + * MSI-X interrupt generates if Completion count > Threshold 725 + */ 726 + writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr); 727 + /* order the writes */ 728 + mmiowb(); 729 + 730 + if (atomic_read(&cmdq->backlog_count)) 731 + schedule_work(&cmdq->backlog_qflush); 732 + }