Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: cavium/nitrox - Configure asymmetric queue manager Hardware unit

This patch configures and initializes CNN55XX device AQM hardware unit.

Signed-off-by: Phani Kiran Hemadri <phemadri@marvell.com>
Reviewed-by: Srikanth Jampala <jsrikanth@marvell.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Phani Kiran Hemadri and committed by
Herbert Xu
5f05cdca b31c17c8

+265 -17
+111
drivers/crypto/cavium/nitrox/nitrox_csr.h
··· 257 257 }; 258 258 259 259 /** 260 + * struct aqmq_drbl - AQM Queue Doorbell Counter Registers 261 + * @dbell_count: Doorbell Counter 262 + */ 263 + union aqmq_drbl { 264 + u64 value; 265 + struct { 266 + #if (defined(__BIG_ENDIAN_BITFIELD)) 267 + u64 raz_32_63 : 32; 268 + u64 dbell_count : 32; 269 + #else 270 + u64 dbell_count : 32; 271 + u64 raz_32_63 : 32; 272 + #endif 273 + }; 274 + }; 275 + 276 + /** 277 + * struct aqmq_qsz - AQM Queue Host Queue Size Registers 278 + * @host_queue_size: Size, in numbers of 'aqmq_command_s' command 279 + * of the Host Ring. 280 + */ 281 + union aqmq_qsz { 282 + u64 value; 283 + struct { 284 + #if (defined(__BIG_ENDIAN_BITFIELD)) 285 + u64 raz_32_63 : 32; 286 + u64 host_queue_size : 32; 287 + #else 288 + u64 host_queue_size : 32; 289 + u64 raz_32_63 : 32; 290 + #endif 291 + }; 292 + }; 293 + 294 + /** 295 + * struct aqmq_cmp_thr - AQM Queue Commands Completed Threshold Registers 296 + * @commands_completed_threshold: Count of 'aqmq_command_s' commands executed 297 + * by AE engines for which completion interrupt is asserted. 298 + */ 299 + union aqmq_cmp_thr { 300 + u64 value; 301 + struct { 302 + #if (defined(__BIG_ENDIAN_BITFIELD)) 303 + u64 raz_32_63 : 32; 304 + u64 commands_completed_threshold : 32; 305 + #else 306 + u64 commands_completed_threshold : 32; 307 + u64 raz_32_63 : 32; 308 + #endif 309 + }; 310 + }; 311 + 312 + /** 313 + * struct aqmq_cmp_cnt - AQM Queue Commands Completed Count Registers 314 + * @resend: Bit to request completion interrupt Resend. 315 + * @completion_status: Command completion status of the ring. 316 + * @commands_completed_count: Count of 'aqmq_command_s' commands executed by 317 + * AE engines. 318 + */ 319 + union aqmq_cmp_cnt { 320 + u64 value; 321 + struct { 322 + #if (defined(__BIG_ENDIAN_BITFIELD)) 323 + u64 raz_34_63 : 30; 324 + u64 resend : 1; 325 + u64 completion_status : 1; 326 + u64 commands_completed_count : 32; 327 + #else 328 + u64 commands_completed_count : 32; 329 + u64 completion_status : 1; 330 + u64 resend : 1; 331 + u64 raz_34_63 : 30; 332 + #endif 333 + }; 334 + }; 335 + 336 + /** 337 + * struct aqmq_en - AQM Queue Enable Registers 338 + * @queue_status: 1 = AQMQ is enabled, 0 = AQMQ is disabled 339 + */ 340 + union aqmq_en { 341 + u64 value; 342 + struct { 343 + #if (defined(__BIG_ENDIAN_BITFIELD)) 344 + u64 raz_1_63 : 63; 345 + u64 queue_enable : 1; 346 + #else 347 + u64 queue_enable : 1; 348 + u64 raz_1_63 : 63; 349 + #endif 350 + }; 351 + }; 352 + 353 + /** 354 + * struct aqmq_activity_stat - AQM Queue Activity Status Registers 355 + * @queue_active: 1 = AQMQ is active, 0 = AQMQ is quiescent 356 + */ 357 + union aqmq_activity_stat { 358 + u64 value; 359 + struct { 360 + #if (defined(__BIG_ENDIAN_BITFIELD)) 361 + u64 raz_1_63 : 63; 362 + u64 queue_active : 1; 363 + #else 364 + u64 queue_active : 1; 365 + u64 raz_1_63 : 63; 366 + #endif 367 + }; 368 + }; 369 + 370 + /** 260 371 * struct emu_fuse_map - EMU Fuse Map Registers 261 372 * @ae_fuse: Fuse settings for AE 19..0 262 373 * @se_fuse: Fuse settings for SE 15..0
+143 -15
drivers/crypto/cavium/nitrox/nitrox_hal.c
··· 241 241 } 242 242 243 243 /** 244 - * enable_nps_interrupts - enable NPS interrutps 244 + * enable_nps_core_interrupts - enable NPS core interrutps 245 245 * @ndev: NITROX device. 246 246 * 247 - * This includes NPS core, packet in and slc interrupts. 247 + * This includes NPS core interrupts. 248 248 */ 249 - static void enable_nps_interrupts(struct nitrox_device *ndev) 249 + static void enable_nps_core_interrupts(struct nitrox_device *ndev) 250 250 { 251 251 union nps_core_int_ena_w1s core_int; 252 252 ··· 258 258 core_int.s.npco_dma_malform = 1; 259 259 core_int.s.host_nps_wr_err = 1; 260 260 nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value); 261 - 262 - /* NPS packet in ring interrupts */ 263 - nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL)); 264 - nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL)); 265 - nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL)); 266 - /* NPS packet slc port interrupts */ 267 - nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL)); 268 - nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL)); 269 - nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL)); 270 261 } 271 262 272 - void nitrox_config_nps_unit(struct nitrox_device *ndev) 263 + void nitrox_config_nps_core_unit(struct nitrox_device *ndev) 273 264 { 274 265 union nps_core_gbl_vfcfg core_gbl_vfcfg; 275 266 ··· 272 281 core_gbl_vfcfg.s.ilk_disable = 1; 273 282 core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF; 274 283 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value); 284 + 285 + /* enable nps core interrupts */ 286 + enable_nps_core_interrupts(ndev); 287 + } 288 + 289 + /** 290 + * enable_nps_pkt_interrupts - enable NPS packet interrutps 291 + * @ndev: NITROX device. 292 + * 293 + * This includes NPS packet in and slc interrupts. 294 + */ 295 + static void enable_nps_pkt_interrupts(struct nitrox_device *ndev) 296 + { 297 + /* NPS packet in ring interrupts */ 298 + nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL)); 299 + nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL)); 300 + nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL)); 301 + /* NPS packet slc port interrupts */ 302 + nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL)); 303 + nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL)); 304 + nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL)); 305 + } 306 + 307 + void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev) 308 + { 275 309 /* config input and solicit ports */ 276 310 nitrox_config_pkt_input_rings(ndev); 277 311 nitrox_config_pkt_solicit_ports(ndev); 278 312 279 - /* enable interrupts */ 280 - enable_nps_interrupts(ndev); 313 + /* enable nps packet interrupts */ 314 + enable_nps_pkt_interrupts(ndev); 315 + } 316 + 317 + static void reset_aqm_ring(struct nitrox_device *ndev, int ring) 318 + { 319 + union aqmq_en aqmq_en_reg; 320 + union aqmq_activity_stat activity_stat; 321 + union aqmq_cmp_cnt cmp_cnt; 322 + int max_retries = MAX_CSR_RETRIES; 323 + u64 offset; 324 + 325 + /* step 1: disable the queue */ 326 + offset = AQMQ_ENX(ring); 327 + aqmq_en_reg.value = 0; 328 + aqmq_en_reg.queue_enable = 0; 329 + nitrox_write_csr(ndev, offset, aqmq_en_reg.value); 330 + 331 + /* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */ 332 + usleep_range(100, 150); 333 + offset = AQMQ_ACTIVITY_STATX(ring); 334 + do { 335 + activity_stat.value = nitrox_read_csr(ndev, offset); 336 + if (!activity_stat.queue_active) 337 + break; 338 + udelay(50); 339 + } while (max_retries--); 340 + 341 + /* step 3: clear commands completed count */ 342 + offset = AQMQ_CMP_CNTX(ring); 343 + cmp_cnt.value = nitrox_read_csr(ndev, offset); 344 + nitrox_write_csr(ndev, offset, cmp_cnt.value); 345 + usleep_range(50, 100); 346 + } 347 + 348 + void enable_aqm_ring(struct nitrox_device *ndev, int ring) 349 + { 350 + union aqmq_en aqmq_en_reg; 351 + u64 offset; 352 + 353 + offset = AQMQ_ENX(ring); 354 + aqmq_en_reg.value = 0; 355 + aqmq_en_reg.queue_enable = 1; 356 + nitrox_write_csr(ndev, offset, aqmq_en_reg.value); 357 + usleep_range(50, 100); 358 + } 359 + 360 + void nitrox_config_aqm_rings(struct nitrox_device *ndev) 361 + { 362 + int ring; 363 + 364 + for (ring = 0; ring < ndev->nr_queues; ring++) { 365 + struct nitrox_cmdq *cmdq = ndev->aqmq[ring]; 366 + union aqmq_drbl drbl; 367 + union aqmq_qsz qsize; 368 + union aqmq_cmp_thr cmp_thr; 369 + u64 offset; 370 + 371 + /* steps 1 - 3 */ 372 + reset_aqm_ring(ndev, ring); 373 + 374 + /* step 4: clear doorbell count of ring */ 375 + offset = AQMQ_DRBLX(ring); 376 + drbl.value = 0; 377 + drbl.dbell_count = 0xFFFFFFFF; 378 + nitrox_write_csr(ndev, offset, drbl.value); 379 + 380 + /* step 5: configure host ring details */ 381 + 382 + /* set host address for next command of ring */ 383 + offset = AQMQ_NXT_CMDX(ring); 384 + nitrox_write_csr(ndev, offset, 0ULL); 385 + 386 + /* set host address of ring base */ 387 + offset = AQMQ_BADRX(ring); 388 + nitrox_write_csr(ndev, offset, cmdq->dma); 389 + 390 + /* set ring size */ 391 + offset = AQMQ_QSZX(ring); 392 + qsize.value = 0; 393 + qsize.host_queue_size = ndev->qlen; 394 + nitrox_write_csr(ndev, offset, qsize.value); 395 + 396 + /* set command completion threshold */ 397 + offset = AQMQ_CMP_THRX(ring); 398 + cmp_thr.value = 0; 399 + cmp_thr.commands_completed_threshold = 1; 400 + nitrox_write_csr(ndev, offset, cmp_thr.value); 401 + 402 + /* step 6: enable the queue */ 403 + enable_aqm_ring(ndev, ring); 404 + } 405 + } 406 + 407 + static void enable_aqm_interrupts(struct nitrox_device *ndev) 408 + { 409 + /* clear interrupt enable bits */ 410 + nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL)); 411 + nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL)); 412 + nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL)); 413 + nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL)); 414 + nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL)); 415 + nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL)); 416 + nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL)); 417 + nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL)); 418 + } 419 + 420 + void nitrox_config_aqm_unit(struct nitrox_device *ndev) 421 + { 422 + /* config aqm command queues */ 423 + nitrox_config_aqm_rings(ndev); 424 + 425 + /* enable aqm interrupts */ 426 + enable_aqm_interrupts(ndev); 281 427 } 282 428 283 429 void nitrox_config_pom_unit(struct nitrox_device *ndev)
+5 -1
drivers/crypto/cavium/nitrox/nitrox_hal.h
··· 4 4 5 5 #include "nitrox_dev.h" 6 6 7 + void nitrox_config_aqm_rings(struct nitrox_device *ndev); 8 + void nitrox_config_aqm_unit(struct nitrox_device *ndev); 7 9 void nitrox_config_emu_unit(struct nitrox_device *ndev); 8 10 void nitrox_config_pkt_input_rings(struct nitrox_device *ndev); 9 11 void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev); 10 - void nitrox_config_nps_unit(struct nitrox_device *ndev); 12 + void nitrox_config_nps_core_unit(struct nitrox_device *ndev); 13 + void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev); 11 14 void nitrox_config_pom_unit(struct nitrox_device *ndev); 12 15 void nitrox_config_rand_unit(struct nitrox_device *ndev); 13 16 void nitrox_config_efl_unit(struct nitrox_device *ndev); ··· 18 15 void nitrox_config_bmo_unit(struct nitrox_device *ndev); 19 16 void nitrox_config_lbc_unit(struct nitrox_device *ndev); 20 17 void invalidate_lbc(struct nitrox_device *ndev); 18 + void enable_aqm_ring(struct nitrox_device *ndev, int qno); 21 19 void enable_pkt_input_ring(struct nitrox_device *ndev, int ring); 22 20 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); 23 21 void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
+3 -1
drivers/crypto/cavium/nitrox/nitrox_main.c
··· 387 387 /* get cores information */ 388 388 nitrox_get_hwinfo(ndev); 389 389 390 - nitrox_config_nps_unit(ndev); 390 + nitrox_config_nps_core_unit(ndev); 391 + nitrox_config_aqm_unit(ndev); 392 + nitrox_config_nps_pkt_unit(ndev); 391 393 nitrox_config_pom_unit(ndev); 392 394 nitrox_config_efl_unit(ndev); 393 395 /* configure IO units */
+3
drivers/crypto/cavium/nitrox/nitrox_sriov.c
··· 109 109 return err; 110 110 } 111 111 112 + /* configure the AQM queues */ 113 + nitrox_config_aqm_rings(ndev); 114 + 112 115 /* configure the packet queues */ 113 116 nitrox_config_pkt_input_rings(ndev); 114 117 nitrox_config_pkt_solicit_ports(ndev);