Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: cavium/nitrox - Enabled Mailbox support

Enabled the PF->VF Mailbox support. Mailbox message are interpreted
as {type, opcode, data}. Supported message types are REQ, ACK and NACK.

Signed-off-by: Srikanth Jampala <Jampala.Srikanth@cavium.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Srikanth, Jampala and committed by
Herbert Xu
cf718eaa 19c11c97

+441 -54
+2 -1
drivers/crypto/cavium/nitrox/Makefile
··· 6 6 nitrox_lib.o \ 7 7 nitrox_hal.o \ 8 8 nitrox_reqmgr.o \ 9 - nitrox_algs.o 9 + nitrox_algs.o \ 10 + nitrox_mbx.o 10 11 11 12 n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o 12 13 n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
+11 -1
drivers/crypto/cavium/nitrox/nitrox_csr.h
··· 54 54 #define NPS_STATS_PKT_DMA_WR_CNT 0x1000190 55 55 56 56 /* NPS packet registers */ 57 - #define NPS_PKT_INT 0x1040018 57 + #define NPS_PKT_INT 0x1040018 58 + #define NPS_PKT_MBOX_INT_LO 0x1040020 59 + #define NPS_PKT_MBOX_INT_LO_ENA_W1C 0x1040030 60 + #define NPS_PKT_MBOX_INT_LO_ENA_W1S 0x1040038 61 + #define NPS_PKT_MBOX_INT_HI 0x1040040 62 + #define NPS_PKT_MBOX_INT_HI_ENA_W1C 0x1040050 63 + #define NPS_PKT_MBOX_INT_HI_ENA_W1S 0x1040058 58 64 #define NPS_PKT_IN_RERR_HI 0x1040108 59 65 #define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120 60 66 #define NPS_PKT_IN_RERR_LO 0x1040128 ··· 80 74 #define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240 81 75 #define NPS_PKT_SLC_ERR_TYPE 0x1040248 82 76 #define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260 77 + /* Mailbox PF->VF PF Accessible Data registers */ 78 + #define NPS_PKT_MBOX_PF_VF_PFDATAX(_i) (0x1040800 + ((_i) * 0x8)) 79 + #define NPS_PKT_MBOX_VF_PF_PFDATAX(_i) (0x1040C00 + ((_i) * 0x8)) 80 + 83 81 #define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000)) 84 82 #define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000)) 85 83 #define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000))
+22
drivers/crypto/cavium/nitrox/nitrox_debugfs.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #ifndef __NITROX_DEBUGFS_H 3 + #define __NITROX_DEBUGFS_H 4 + 5 + #include "nitrox_dev.h" 6 + 7 + #ifdef CONFIG_DEBUG_FS 8 + int nitrox_debugfs_init(struct nitrox_device *ndev); 9 + void nitrox_debugfs_exit(struct nitrox_device *ndev); 10 + #else 11 + static inline int nitrox_debugfs_init(struct nitrox_device *ndev) 12 + { 13 + return 0; 14 + } 15 + 16 + static inline int nitrox_sriov_debugfs_init(struct nitrox_device *ndev) 17 + { 18 + return 0; 19 + } 20 + #endif /* !CONFIG_DEBUG_FS */ 21 + 22 + #endif /* __NITROX_DEBUGFS_H */
+50 -11
drivers/crypto/cavium/nitrox/nitrox_dev.h
··· 8 8 #include <linux/if.h> 9 9 10 10 #define VERSION_LEN 32 11 + /* Maximum queues in PF mode */ 12 + #define MAX_PF_QUEUES 64 11 13 12 14 /** 13 15 * struct nitrox_cmdq - NITROX command queue ··· 106 104 }; 107 105 108 106 /** 107 + * mbox_msg - Mailbox message data 108 + * @type: message type 109 + * @opcode: message opcode 110 + * @data: message data 111 + */ 112 + union mbox_msg { 113 + u64 value; 114 + struct { 115 + u64 type: 2; 116 + u64 opcode: 6; 117 + u64 data: 58; 118 + }; 119 + struct { 120 + u64 type: 2; 121 + u64 opcode: 6; 122 + u64 chipid: 8; 123 + u64 vfid: 8; 124 + } id; 125 + }; 126 + 127 + /** 128 + * nitrox_vfdev - NITROX VF device instance in PF 129 + * @state: VF device state 130 + * @vfno: VF number 131 + * @nr_queues: number of queues enabled in VF 132 + * @ring: ring to communicate with VF 133 + * @msg: Mailbox message data from VF 134 + * @mbx_resp: Mailbox counters 135 + */ 136 + struct nitrox_vfdev { 137 + atomic_t state; 138 + int vfno; 139 + int nr_queues; 140 + int ring; 141 + union mbox_msg msg; 142 + atomic64_t mbx_resp; 143 + }; 144 + 145 + /** 109 146 * struct nitrox_iov - SR-IOV information 110 147 * @num_vfs: number of VF(s) enabled 111 - * @msix: MSI-X for PF in SR-IOV case 148 + * @max_vf_queues: Maximum number of queues allowed for VF 149 + * @vfdev: VF(s) devices 150 + * @pf2vf_wq: workqueue for PF2VF communication 151 + * @msix: MSI-X entry for PF in SR-IOV case 112 152 */ 113 153 struct nitrox_iov { 114 154 int num_vfs; 155 + int max_vf_queues; 156 + struct nitrox_vfdev *vfdev; 157 + struct workqueue_struct *pf2vf_wq; 115 158 struct msix_entry msix; 116 159 }; 117 160 ··· 273 226 return atomic_read(&ndev->state) == __NDEV_READY; 274 227 } 275 228 276 - #ifdef CONFIG_DEBUG_FS 277 - int nitrox_debugfs_init(struct nitrox_device *ndev); 278 - void nitrox_debugfs_exit(struct nitrox_device *ndev); 279 - #else 280 - static inline int nitrox_debugfs_init(struct nitrox_device *ndev) 229 + static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev) 281 230 { 282 - return 0; 231 + return atomic_read(&vfdev->state) == __NDEV_READY; 283 232 } 284 - 285 - static inline void nitrox_debugfs_exit(struct nitrox_device *ndev) 286 - { } 287 - #endif 288 233 289 234 #endif /* __NITROX_DEV_H */
+83 -31
drivers/crypto/cavium/nitrox/nitrox_hal.c
··· 5 5 #include "nitrox_csr.h" 6 6 7 7 #define PLL_REF_CLK 50 8 + #define MAX_CSR_RETRIES 10 8 9 9 10 /** 10 11 * emu_enable_cores - Enable EMU cluster cores. 11 - * @ndev: N5 device 12 + * @ndev: NITROX device 12 13 */ 13 14 static void emu_enable_cores(struct nitrox_device *ndev) 14 15 { ··· 34 33 35 34 /** 36 35 * nitrox_config_emu_unit - configure EMU unit. 37 - * @ndev: N5 device 36 + * @ndev: NITROX device 38 37 */ 39 38 void nitrox_config_emu_unit(struct nitrox_device *ndev) 40 39 { ··· 64 63 static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring) 65 64 { 66 65 union nps_pkt_in_instr_ctl pkt_in_ctl; 67 - union nps_pkt_in_instr_baoff_dbell pkt_in_dbell; 68 66 union nps_pkt_in_done_cnts pkt_in_cnts; 67 + int max_retries = MAX_CSR_RETRIES; 69 68 u64 offset; 70 69 70 + /* step 1: disable the ring, clear enable bit */ 71 71 offset = NPS_PKT_IN_INSTR_CTLX(ring); 72 - /* disable the ring */ 73 72 pkt_in_ctl.value = nitrox_read_csr(ndev, offset); 74 73 pkt_in_ctl.s.enb = 0; 75 74 nitrox_write_csr(ndev, offset, pkt_in_ctl.value); 76 - usleep_range(100, 150); 77 75 78 - /* wait to clear [ENB] */ 76 + /* step 2: wait to clear [ENB] */ 77 + usleep_range(100, 150); 79 78 do { 80 79 pkt_in_ctl.value = nitrox_read_csr(ndev, offset); 81 - } while (pkt_in_ctl.s.enb); 80 + if (!pkt_in_ctl.s.enb) 81 + break; 82 + udelay(50); 83 + } while (max_retries--); 82 84 83 - /* clear off door bell counts */ 84 - offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring); 85 - pkt_in_dbell.value = 0; 86 - pkt_in_dbell.s.dbell = 0xffffffff; 87 - nitrox_write_csr(ndev, offset, pkt_in_dbell.value); 88 - 89 - /* clear done counts */ 85 + /* step 3: clear done counts */ 90 86 offset = NPS_PKT_IN_DONE_CNTSX(ring); 91 87 pkt_in_cnts.value = nitrox_read_csr(ndev, offset); 92 88 nitrox_write_csr(ndev, offset, pkt_in_cnts.value); ··· 93 95 void enable_pkt_input_ring(struct nitrox_device *ndev, int ring) 94 96 { 95 97 union nps_pkt_in_instr_ctl pkt_in_ctl; 98 + int max_retries = MAX_CSR_RETRIES; 96 99 u64 offset; 97 100 98 101 /* 64-byte instruction size */ ··· 106 107 /* wait for set [ENB] */ 107 108 do { 108 109 pkt_in_ctl.value = nitrox_read_csr(ndev, offset); 109 - } while (!pkt_in_ctl.s.enb); 110 + if (pkt_in_ctl.s.enb) 111 + break; 112 + udelay(50); 113 + } while (max_retries--); 110 114 } 111 115 112 116 /** 113 117 * nitrox_config_pkt_input_rings - configure Packet Input Rings 114 - * @ndev: N5 device 118 + * @ndev: NITROX device 115 119 */ 116 120 void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) 117 121 { ··· 123 121 for (i = 0; i < ndev->nr_queues; i++) { 124 122 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i]; 125 123 union nps_pkt_in_instr_rsize pkt_in_rsize; 124 + union nps_pkt_in_instr_baoff_dbell pkt_in_dbell; 126 125 u64 offset; 127 126 128 127 reset_pkt_input_ring(ndev, i); 129 128 130 - /* configure ring base address 16-byte aligned, 129 + /** 130 + * step 4: 131 + * configure ring base address 16-byte aligned, 131 132 * size and interrupt threshold. 132 133 */ 133 134 offset = NPS_PKT_IN_INSTR_BADDRX(i); ··· 146 141 offset = NPS_PKT_IN_INT_LEVELSX(i); 147 142 nitrox_write_csr(ndev, offset, 0xffffffff); 148 143 144 + /* step 5: clear off door bell counts */ 145 + offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); 146 + pkt_in_dbell.value = 0; 147 + pkt_in_dbell.s.dbell = 0xffffffff; 148 + nitrox_write_csr(ndev, offset, pkt_in_dbell.value); 149 + 150 + /* enable the ring */ 149 151 enable_pkt_input_ring(ndev, i); 150 152 } 151 153 } ··· 161 149 { 162 150 union nps_pkt_slc_ctl pkt_slc_ctl; 163 151 union nps_pkt_slc_cnts pkt_slc_cnts; 152 + int max_retries = MAX_CSR_RETRIES; 164 153 u64 offset; 165 154 166 - /* disable slc port */ 155 + /* step 1: disable slc port */ 167 156 offset = NPS_PKT_SLC_CTLX(port); 168 157 pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); 169 158 pkt_slc_ctl.s.enb = 0; 170 159 nitrox_write_csr(ndev, offset, pkt_slc_ctl.value); 171 - usleep_range(100, 150); 172 160 161 + /* step 2 */ 162 + usleep_range(100, 150); 173 163 /* wait to clear [ENB] */ 174 164 do { 175 165 pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); 176 - } while (pkt_slc_ctl.s.enb); 166 + if (!pkt_slc_ctl.s.enb) 167 + break; 168 + udelay(50); 169 + } while (max_retries--); 177 170 178 - /* clear slc counters */ 171 + /* step 3: clear slc counters */ 179 172 offset = NPS_PKT_SLC_CNTSX(port); 180 173 pkt_slc_cnts.value = nitrox_read_csr(ndev, offset); 181 174 nitrox_write_csr(ndev, offset, pkt_slc_cnts.value); ··· 190 173 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port) 191 174 { 192 175 union nps_pkt_slc_ctl pkt_slc_ctl; 176 + int max_retries = MAX_CSR_RETRIES; 193 177 u64 offset; 194 178 195 179 offset = NPS_PKT_SLC_CTLX(port); 196 180 pkt_slc_ctl.value = 0; 197 181 pkt_slc_ctl.s.enb = 1; 198 - 199 182 /* 200 183 * 8 trailing 0x00 bytes will be added 201 184 * to the end of the outgoing packet. ··· 208 191 /* wait to set [ENB] */ 209 192 do { 210 193 pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); 211 - } while (!pkt_slc_ctl.s.enb); 194 + if (pkt_slc_ctl.s.enb) 195 + break; 196 + udelay(50); 197 + } while (max_retries--); 212 198 } 213 199 214 - static void config_single_pkt_solicit_port(struct nitrox_device *ndev, 215 - int port) 200 + static void config_pkt_solicit_port(struct nitrox_device *ndev, int port) 216 201 { 217 202 union nps_pkt_slc_int_levels pkt_slc_int; 218 203 u64 offset; 219 204 220 205 reset_pkt_solicit_port(ndev, port); 221 206 207 + /* step 4: configure interrupt levels */ 222 208 offset = NPS_PKT_SLC_INT_LEVELSX(port); 223 209 pkt_slc_int.value = 0; 224 210 /* time interrupt threshold */ 225 211 pkt_slc_int.s.timet = 0x3fffff; 226 212 nitrox_write_csr(ndev, offset, pkt_slc_int.value); 227 213 214 + /* enable the solicit port */ 228 215 enable_pkt_solicit_port(ndev, port); 229 216 } 230 217 ··· 237 216 int i; 238 217 239 218 for (i = 0; i < ndev->nr_queues; i++) 240 - config_single_pkt_solicit_port(ndev, i); 219 + config_pkt_solicit_port(ndev, i); 241 220 } 242 221 243 222 /** 244 223 * enable_nps_interrupts - enable NPS interrutps 245 - * @ndev: N5 device. 224 + * @ndev: NITROX device. 246 225 * 247 226 * This includes NPS core, packet in and slc interrupts. 248 227 */ ··· 305 284 } 306 285 307 286 /** 308 - * nitrox_config_rand_unit - enable N5 random number unit 309 - * @ndev: N5 device 287 + * nitrox_config_rand_unit - enable NITROX random number unit 288 + * @ndev: NITROX device 310 289 */ 311 290 void nitrox_config_rand_unit(struct nitrox_device *ndev) 312 291 { ··· 382 361 { 383 362 union lbc_inval_ctl lbc_ctl; 384 363 union lbc_inval_status lbc_stat; 364 + int max_retries = MAX_CSR_RETRIES; 385 365 u64 offset; 386 366 387 367 /* invalidate LBC */ ··· 392 370 nitrox_write_csr(ndev, offset, lbc_ctl.value); 393 371 394 372 offset = LBC_INVAL_STATUS; 395 - 396 373 do { 397 374 lbc_stat.value = nitrox_read_csr(ndev, offset); 398 - } while (!lbc_stat.s.done); 375 + if (lbc_stat.s.done) 376 + break; 377 + udelay(50); 378 + } while (max_retries--); 399 379 } 400 380 401 381 void nitrox_config_lbc_unit(struct nitrox_device *ndev) ··· 490 466 491 467 /* copy partname */ 492 468 strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname)); 469 + } 470 + 471 + void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev) 472 + { 473 + u64 value = ~0ULL; 474 + u64 reg_addr; 475 + 476 + /* Mailbox interrupt low enable set register */ 477 + reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S; 478 + nitrox_write_csr(ndev, reg_addr, value); 479 + 480 + /* Mailbox interrupt high enable set register */ 481 + reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S; 482 + nitrox_write_csr(ndev, reg_addr, value); 483 + } 484 + 485 + void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev) 486 + { 487 + u64 value = ~0ULL; 488 + u64 reg_addr; 489 + 490 + /* Mailbox interrupt low enable clear register */ 491 + reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C; 492 + nitrox_write_csr(ndev, reg_addr, value); 493 + 494 + /* Mailbox interrupt high enable clear register */ 495 + reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C; 496 + nitrox_write_csr(ndev, reg_addr, value); 493 497 }
+2
drivers/crypto/cavium/nitrox/nitrox_hal.h
··· 19 19 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); 20 20 void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode); 21 21 void nitrox_get_hwinfo(struct nitrox_device *ndev); 22 + void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev); 23 + void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev); 22 24 23 25 #endif /* __NITROX_HAL_H */
+7 -1
drivers/crypto/cavium/nitrox/nitrox_isr.c
··· 7 7 #include "nitrox_csr.h" 8 8 #include "nitrox_common.h" 9 9 #include "nitrox_hal.h" 10 + #include "nitrox_mbx.h" 10 11 11 12 /** 12 13 * One vector for each type of ring ··· 221 220 */ 222 221 static irqreturn_t nps_core_int_isr(int irq, void *data) 223 222 { 224 - struct nitrox_device *ndev = data; 223 + struct nitrox_q_vector *qvec = data; 224 + struct nitrox_device *ndev = qvec->ndev; 225 225 union nps_core_int_active core_int; 226 226 227 227 core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE); ··· 247 245 248 246 if (core_int.s.bmi) 249 247 clear_bmi_err_intr(ndev); 248 + 249 + /* Mailbox interrupt */ 250 + if (core_int.s.mbox) 251 + nitrox_pf2vf_mbox_handler(ndev); 250 252 251 253 /* If more work callback the ISR, set resend */ 252 254 core_int.s.resend = 1;
+1 -2
drivers/crypto/cavium/nitrox/nitrox_main.c
··· 1 1 #include <linux/aer.h> 2 2 #include <linux/delay.h> 3 - #include <linux/debugfs.h> 4 3 #include <linux/firmware.h> 5 4 #include <linux/list.h> 6 5 #include <linux/module.h> ··· 12 13 #include "nitrox_csr.h" 13 14 #include "nitrox_hal.h" 14 15 #include "nitrox_isr.h" 16 + #include "nitrox_debugfs.h" 15 17 16 18 #define CNN55XX_DEV_ID 0x12 17 - #define MAX_PF_QUEUES 64 18 19 #define UCODE_HLEN 48 19 20 #define SE_GROUP 0 20 21
+204
drivers/crypto/cavium/nitrox/nitrox_mbx.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/workqueue.h> 3 + 4 + #include "nitrox_csr.h" 5 + #include "nitrox_hal.h" 6 + #include "nitrox_dev.h" 7 + 8 + #define RING_TO_VFNO(_x, _y) ((_x) / (_y)) 9 + 10 + /** 11 + * mbx_msg_type - Mailbox message types 12 + */ 13 + enum mbx_msg_type { 14 + MBX_MSG_TYPE_NOP, 15 + MBX_MSG_TYPE_REQ, 16 + MBX_MSG_TYPE_ACK, 17 + MBX_MSG_TYPE_NACK, 18 + }; 19 + 20 + /** 21 + * mbx_msg_opcode - Mailbox message opcodes 22 + */ 23 + enum mbx_msg_opcode { 24 + MSG_OP_VF_MODE = 1, 25 + MSG_OP_VF_UP, 26 + MSG_OP_VF_DOWN, 27 + MSG_OP_CHIPID_VFID, 28 + }; 29 + 30 + struct pf2vf_work { 31 + struct nitrox_vfdev *vfdev; 32 + struct nitrox_device *ndev; 33 + struct work_struct pf2vf_resp; 34 + }; 35 + 36 + static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring) 37 + { 38 + u64 reg_addr; 39 + 40 + reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring); 41 + return nitrox_read_csr(ndev, reg_addr); 42 + } 43 + 44 + static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value, 45 + int ring) 46 + { 47 + u64 reg_addr; 48 + 49 + reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring); 50 + nitrox_write_csr(ndev, reg_addr, value); 51 + } 52 + 53 + static void pf2vf_send_response(struct nitrox_device *ndev, 54 + struct nitrox_vfdev *vfdev) 55 + { 56 + union mbox_msg msg; 57 + 58 + msg.value = vfdev->msg.value; 59 + 60 + switch (vfdev->msg.opcode) { 61 + case MSG_OP_VF_MODE: 62 + msg.data = ndev->mode; 63 + break; 64 + case MSG_OP_VF_UP: 65 + vfdev->nr_queues = vfdev->msg.data; 66 + atomic_set(&vfdev->state, __NDEV_READY); 67 + break; 68 + case MSG_OP_CHIPID_VFID: 69 + msg.id.chipid = ndev->idx; 70 + msg.id.vfid = vfdev->vfno; 71 + break; 72 + case MSG_OP_VF_DOWN: 73 + vfdev->nr_queues = 0; 74 + atomic_set(&vfdev->state, __NDEV_NOT_READY); 75 + break; 76 + default: 77 + msg.type = MBX_MSG_TYPE_NOP; 78 + break; 79 + } 80 + 81 + if (msg.type == MBX_MSG_TYPE_NOP) 82 + return; 83 + 84 + /* send ACK to VF */ 85 + msg.type = MBX_MSG_TYPE_ACK; 86 + pf2vf_write_mbox(ndev, msg.value, vfdev->ring); 87 + 88 + vfdev->msg.value = 0; 89 + atomic64_inc(&vfdev->mbx_resp); 90 + } 91 + 92 + static void pf2vf_resp_handler(struct work_struct *work) 93 + { 94 + struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work, 95 + pf2vf_resp); 96 + struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev; 97 + struct nitrox_device *ndev = pf2vf_resp->ndev; 98 + 99 + switch (vfdev->msg.type) { 100 + case MBX_MSG_TYPE_REQ: 101 + /* process the request from VF */ 102 + pf2vf_send_response(ndev, vfdev); 103 + break; 104 + case MBX_MSG_TYPE_ACK: 105 + case MBX_MSG_TYPE_NACK: 106 + break; 107 + }; 108 + 109 + kfree(pf2vf_resp); 110 + } 111 + 112 + void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev) 113 + { 114 + struct nitrox_vfdev *vfdev; 115 + struct pf2vf_work *pfwork; 116 + u64 value, reg_addr; 117 + u32 i; 118 + int vfno; 119 + 120 + /* loop for VF(0..63) */ 121 + reg_addr = NPS_PKT_MBOX_INT_LO; 122 + value = nitrox_read_csr(ndev, reg_addr); 123 + for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) { 124 + /* get the vfno from ring */ 125 + vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues); 126 + vfdev = ndev->iov.vfdev + vfno; 127 + vfdev->ring = i; 128 + /* fill the vf mailbox data */ 129 + vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring); 130 + pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC); 131 + if (!pfwork) 132 + continue; 133 + 134 + pfwork->vfdev = vfdev; 135 + pfwork->ndev = ndev; 136 + INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler); 137 + queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); 138 + /* clear the corresponding vf bit */ 139 + nitrox_write_csr(ndev, reg_addr, BIT_ULL(i)); 140 + } 141 + 142 + /* loop for VF(64..127) */ 143 + reg_addr = NPS_PKT_MBOX_INT_HI; 144 + value = nitrox_read_csr(ndev, reg_addr); 145 + for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) { 146 + /* get the vfno from ring */ 147 + vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues); 148 + vfdev = ndev->iov.vfdev + vfno; 149 + vfdev->ring = (i + 64); 150 + /* fill the vf mailbox data */ 151 + vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring); 152 + 153 + pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC); 154 + if (!pfwork) 155 + continue; 156 + 157 + pfwork->vfdev = vfdev; 158 + pfwork->ndev = ndev; 159 + INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler); 160 + queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); 161 + /* clear the corresponding vf bit */ 162 + nitrox_write_csr(ndev, reg_addr, BIT_ULL(i)); 163 + } 164 + } 165 + 166 + int nitrox_mbox_init(struct nitrox_device *ndev) 167 + { 168 + struct nitrox_vfdev *vfdev; 169 + int i; 170 + 171 + ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs, 172 + sizeof(struct nitrox_vfdev), GFP_KERNEL); 173 + if (!ndev->iov.vfdev) 174 + return -ENOMEM; 175 + 176 + for (i = 0; i < ndev->iov.num_vfs; i++) { 177 + vfdev = ndev->iov.vfdev + i; 178 + vfdev->vfno = i; 179 + } 180 + 181 + /* allocate pf2vf response workqueue */ 182 + ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); 183 + if (!ndev->iov.pf2vf_wq) { 184 + kfree(ndev->iov.vfdev); 185 + return -ENOMEM; 186 + } 187 + /* enable pf2vf mailbox interrupts */ 188 + enable_pf2vf_mbox_interrupts(ndev); 189 + 190 + return 0; 191 + } 192 + 193 + void nitrox_mbox_cleanup(struct nitrox_device *ndev) 194 + { 195 + /* disable pf2vf mailbox interrupts */ 196 + disable_pf2vf_mbox_interrupts(ndev); 197 + /* destroy workqueue */ 198 + if (ndev->iov.pf2vf_wq) 199 + destroy_workqueue(ndev->iov.pf2vf_wq); 200 + 201 + kfree(ndev->iov.vfdev); 202 + ndev->iov.pf2vf_wq = NULL; 203 + ndev->iov.vfdev = NULL; 204 + }
+9
drivers/crypto/cavium/nitrox/nitrox_mbx.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #ifndef __NITROX_MBX_H 3 + #define __NITROX_MBX_H 4 + 5 + int nitrox_mbox_init(struct nitrox_device *ndev); 6 + void nitrox_mbox_cleanup(struct nitrox_device *ndev); 7 + void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev); 8 + 9 + #endif /* __NITROX_MBX_H */
+50 -7
drivers/crypto/cavium/nitrox/nitrox_sriov.c
··· 6 6 #include "nitrox_hal.h" 7 7 #include "nitrox_common.h" 8 8 #include "nitrox_isr.h" 9 + #include "nitrox_mbx.h" 9 10 10 11 /** 11 12 * num_vfs_valid - validate VF count ··· 53 52 return mode; 54 53 } 55 54 55 + static inline int vf_mode_to_nr_queues(enum vf_mode mode) 56 + { 57 + int nr_queues = 0; 58 + 59 + switch (mode) { 60 + case __NDEV_MODE_PF: 61 + nr_queues = MAX_PF_QUEUES; 62 + break; 63 + case __NDEV_MODE_VF16: 64 + nr_queues = 8; 65 + break; 66 + case __NDEV_MODE_VF32: 67 + nr_queues = 4; 68 + break; 69 + case __NDEV_MODE_VF64: 70 + nr_queues = 2; 71 + break; 72 + case __NDEV_MODE_VF128: 73 + nr_queues = 1; 74 + break; 75 + } 76 + 77 + return nr_queues; 78 + } 79 + 56 80 static void nitrox_pf_cleanup(struct nitrox_device *ndev) 57 81 { 58 82 /* PF has no queues in SR-IOV mode */ ··· 120 94 return nitrox_crypto_register(); 121 95 } 122 96 123 - static int nitrox_sriov_init(struct nitrox_device *ndev) 124 - { 125 - /* register interrupts for PF in SR-IOV */ 126 - return nitrox_sriov_register_interupts(ndev); 127 - } 128 - 129 97 static void nitrox_sriov_cleanup(struct nitrox_device *ndev) 130 98 { 131 99 /* unregister interrupts for PF in SR-IOV */ 132 100 nitrox_sriov_unregister_interrupts(ndev); 101 + nitrox_mbox_cleanup(ndev); 102 + } 103 + 104 + static int nitrox_sriov_init(struct nitrox_device *ndev) 105 + { 106 + int ret; 107 + 108 + /* register interrupts for PF in SR-IOV */ 109 + ret = nitrox_sriov_register_interupts(ndev); 110 + if (ret) 111 + return ret; 112 + 113 + ret = nitrox_mbox_init(ndev); 114 + if (ret) 115 + goto sriov_init_fail; 116 + 117 + return 0; 118 + 119 + sriov_init_fail: 120 + nitrox_sriov_cleanup(ndev); 121 + return ret; 133 122 } 134 123 135 124 static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) ··· 167 126 } 168 127 dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs); 169 128 170 - ndev->iov.num_vfs = num_vfs; 171 129 ndev->mode = num_vfs_to_mode(num_vfs); 130 + ndev->iov.num_vfs = num_vfs; 131 + ndev->iov.max_vf_queues = vf_mode_to_nr_queues(ndev->mode); 172 132 /* set bit in flags */ 173 133 set_bit(__NDEV_SRIOV_BIT, &ndev->flags); 174 134 ··· 211 169 clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); 212 170 213 171 ndev->iov.num_vfs = 0; 172 + ndev->iov.max_vf_queues = 0; 214 173 ndev->mode = __NDEV_MODE_PF; 215 174 216 175 /* cleanup PF SR-IOV resources */