Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: cn23xx: fix typos

This patch fixes a few typos, spelling mistakes, and a bit of grammar,
increasing the comments readability.

Signed-off-by: Janik Haag <janik@aq0.de>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250307145648.1679912-2-janik@aq0.de
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Janik Haag and committed by
Paolo Abeni
676cc91e 9e328504

+38 -38
+38 -38
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
··· 49 49 lio_pci_readq(oct, CN23XX_RST_SOFT_RST); 50 50 lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST); 51 51 52 - /* Wait for 100ms as Octeon resets. */ 52 + /* Wait for 100ms as Octeon resets */ 53 53 mdelay(100); 54 54 55 55 if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) { ··· 61 61 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n", 62 62 oct->octeon_id); 63 63 64 - /* restore the reset value*/ 64 + /* Restore the reset value */ 65 65 octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF); 66 66 67 67 return 0; ··· 121 121 oqticks_per_us /= 1024; 122 122 123 123 /* time_intr is in microseconds. The next 2 steps gives the oq ticks 124 - * corressponding to time_intr. 124 + * corresponding to time_intr. 125 125 */ 126 126 oqticks_per_us *= time_intr_in_us; 127 127 oqticks_per_us /= 1000; ··· 136 136 u64 reg_val; 137 137 u64 temp; 138 138 139 - /* programming SRN and TRS for each MAC(0..3) */ 139 + /* Programming SRN and TRS for each MAC(0..3) */ 140 140 141 141 dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n", 142 142 __func__, mac_no); 143 - /* By default, mapping all 64 IOQs to a single MACs */ 143 + /* By default, map all 64 IOQs to a single MAC */ 144 144 145 145 reg_val = 146 146 octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)); ··· 164 164 temp = oct->sriov_info.max_vfs & 0xff; 165 165 reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS); 166 166 167 - /* write these settings to MAC register */ 167 + /* Write these settings to MAC register */ 168 168 octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num), 169 169 reg_val); 170 170 ··· 183 183 srn = oct->sriov_info.pf_srn; 184 184 ern = srn + oct->sriov_info.num_pf_rings; 185 185 186 - /*As per HRM reg description, s/w cant write 0 to ENB. */ 187 - /*to make the queue off, need to set the RST bit. */ 186 + /* As per HRM reg description, s/w can't write 0 to ENB. */ 187 + /* We need to set the RST bit, to turn the queue off. */ 188 188 189 - /* Reset the Enable bit for all the 64 IQs. */ 189 + /* Reset the enable bit for all the 64 IQs. */ 190 190 for (q_no = srn; q_no < ern; q_no++) { 191 191 /* set RST bit to 1. This bit applies to both IQ and OQ */ 192 192 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); ··· 194 194 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); 195 195 } 196 196 197 - /*wait until the RST bit is clear or the RST and quite bits are set*/ 197 + /* Wait until the RST bit is clear or the RST and quiet bits are set */ 198 198 for (q_no = srn; q_no < ern; q_no++) { 199 199 u64 reg_val = octeon_read_csr64(oct, 200 200 CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); ··· 245 245 if (cn23xx_reset_io_queues(oct)) 246 246 return -1; 247 247 248 - /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg 249 - * for all queues.Only PF can set these bits. 248 + /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg 249 + * for all queues. Only PF can set these bits. 250 250 * bits 29:30 indicate the MAC num. 251 251 * bits 32:47 indicate the PVF num. 252 252 */ 253 253 for (q_no = 0; q_no < ern; q_no++) { 254 254 reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; 255 255 256 - /* for VF assigned queues. */ 256 + /* For VF assigned queues. */ 257 257 if (q_no < oct->sriov_info.pf_srn) { 258 258 vf_num = q_no / oct->sriov_info.rings_per_vf; 259 259 vf_num += 1; /* VF1, VF2,........ */ ··· 268 268 reg_val); 269 269 } 270 270 271 - /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for 271 + /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for 272 272 * pf queues 273 273 */ 274 274 for (q_no = srn; q_no < ern; q_no++) { ··· 289 289 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), 290 290 reg_val); 291 291 292 - /* Set WMARK level for triggering PI_INT */ 292 + /* Set WMARK level to trigger PI_INT */ 293 293 /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */ 294 294 intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) & 295 295 CN23XX_PKT_IN_DONE_WMARK_MASK; ··· 354 354 /* set the ES bit */ 355 355 reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES); 356 356 357 - /* write all the selected settings */ 357 + /* Write all the selected settings */ 358 358 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val); 359 359 360 360 /* Enabling these interrupt in oct->fn_list.enable_interrupt() ··· 373 373 /** Setting the water mark level for pko back pressure **/ 374 374 writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK); 375 375 376 - /** Disabling setting OQs in reset when ring has no dorebells 376 + /* Disabling setting OQs in reset when ring has no doorbells 377 377 * enabling this will cause of head of line blocking 378 378 */ 379 379 /* Do it only for pass1.1. and pass1.2 */ ··· 383 383 CN23XX_SLI_GBL_CONTROL) | 0x2, 384 384 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL); 385 385 386 - /** Enable channel-level backpressure */ 386 + /** Enable channel-level backpressure **/ 387 387 if (oct->pf_num) 388 388 writeq(0xffffffffffffffffULL, 389 389 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S); ··· 396 396 { 397 397 cn23xx_enable_error_reporting(oct); 398 398 399 - /* program the MAC(0..3)_RINFO before setting up input/output regs */ 399 + /* Program the MAC(0..3)_RINFO before setting up input/output regs */ 400 400 cn23xx_setup_global_mac_regs(oct); 401 401 402 402 if (cn23xx_pf_setup_global_input_regs(oct)) ··· 410 410 octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL, 411 411 CN23XX_SLI_WINDOW_CTL_DEFAULT); 412 412 413 - /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */ 413 + /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */ 414 414 octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER); 415 415 return 0; 416 416 } ··· 574 574 mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr + 575 575 CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1); 576 576 577 - /*Mail Box Thread creation*/ 577 + /* Mail Box Thread creation */ 578 578 INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work, 579 579 cn23xx_pf_mbox_thread); 580 580 mbox->mbox_poll_wk.ctxptr = (void *)mbox; ··· 626 626 ern = srn + oct->num_iqs; 627 627 628 628 for (q_no = srn; q_no < ern; q_no++) { 629 - /* set the corresponding IQ IS_64B bit */ 629 + /* Set the corresponding IQ IS_64B bit */ 630 630 if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) { 631 631 reg_val = octeon_read_csr64( 632 632 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); ··· 635 635 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); 636 636 } 637 637 638 - /* set the corresponding IQ ENB bit */ 638 + /* Set the corresponding IQ ENB bit */ 639 639 if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) { 640 640 /* IOQs are in reset by default in PEM2 mode, 641 641 * clearing reset bit ··· 681 681 } 682 682 for (q_no = srn; q_no < ern; q_no++) { 683 683 u32 reg_val; 684 - /* set the corresponding OQ ENB bit */ 684 + /* Set the corresponding OQ ENB bit */ 685 685 if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) { 686 686 reg_val = octeon_read_csr( 687 687 oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); ··· 707 707 for (q_no = srn; q_no < ern; q_no++) { 708 708 loop = HZ; 709 709 710 - /* start the Reset for a particular ring */ 710 + /* Start the Reset for a particular ring */ 711 711 WRITE_ONCE(d64, octeon_read_csr64( 712 712 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); 713 713 WRITE_ONCE(d64, READ_ONCE(d64) & ··· 740 740 loop = HZ; 741 741 742 742 /* Wait until hardware indicates that the particular IQ 743 - * is out of reset.It given that SLI_PKT_RING_RST is 743 + * is out of reset. Given that SLI_PKT_RING_RST is 744 744 * common for both IQs and OQs 745 745 */ 746 746 WRITE_ONCE(d64, octeon_read_csr64( ··· 760 760 schedule_timeout_uninterruptible(1); 761 761 } 762 762 763 - /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */ 763 + /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */ 764 764 WRITE_ONCE(d32, octeon_read_csr( 765 765 oct, CN23XX_SLI_OQ_PKTS_SENT(q_no))); 766 766 octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no), ··· 793 793 if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL)) 794 794 return ret; 795 795 796 - /* Write count reg in sli_pkt_cnts to clear these int.*/ 796 + /* Write count reg in sli_pkt_cnts to clear these int. */ 797 797 if ((pkts_sent & CN23XX_INTR_PO_INT) || 798 798 (pkts_sent & CN23XX_INTR_PI_INT)) { 799 799 if (pkts_sent & CN23XX_INTR_PO_INT) ··· 908 908 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); 909 909 } 910 910 911 - /* always call with lock held */ 911 + /* Always call with lock held */ 912 912 static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq) 913 913 { 914 914 u32 new_idx; ··· 919 919 iq->pkt_in_done = pkt_in_done; 920 920 921 921 /* Modulo of the new index with the IQ size will give us 922 - * the new index. The iq->reset_instr_cnt is always zero for 922 + * the new index. The iq->reset_instr_cnt is always zero for 923 923 * cn23xx, so no extra adjustments are needed. 924 924 */ 925 925 new_idx = (iq->octeon_read_index + ··· 934 934 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; 935 935 u64 intr_val = 0; 936 936 937 - /* Divide the single write to multiple writes based on the flag. */ 938 - /* Enable Interrupt */ 937 + /* Divide the single write to multiple writes based on the flag. */ 938 + /* Enable Interrupts */ 939 939 if (intr_flag == OCTEON_ALL_INTR) { 940 940 writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64); 941 941 } else if (intr_flag & OCTEON_OUTPUT_INTR) { ··· 990 990 991 991 ret = 0; 992 992 993 - /** Read Function Dependency Link reg to get the function number */ 993 + /* Read Function Dependency Link reg to get the function number */ 994 994 if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, 995 995 &fdl_bit) == 0) { 996 996 oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & ··· 1003 1003 * In this case, read the PF number from the 1004 1004 * SLI_PKT0_INPUT_CONTROL reg (written by f/w) 1005 1005 */ 1006 - pkt0_in_ctl = octeon_read_csr64(oct, 1007 - CN23XX_SLI_IQ_PKT_CONTROL64(0)); 1006 + pkt0_in_ctl = 1007 + octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0)); 1008 1008 pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) & 1009 1009 CN23XX_PKT_INPUT_CTL_PF_NUM_MASK; 1010 1010 mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff; 1011 1011 1012 - /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/ 1012 + /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */ 1013 1013 d64 = octeon_read_csr64(oct, 1014 1014 CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum)); 1015 1015 trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff; ··· 1252 1252 u64 val; 1253 1253 1254 1254 /* If there's more than one active PF on this NIC, then that 1255 - * implies that the NIC firmware is loaded and running. This check 1255 + * implies that the NIC firmware is loaded and running. This check 1256 1256 * prevents a rare false negative that might occur if we only relied 1257 - * on checking the SCR2_BIT_FW_LOADED flag. The false negative would 1257 + * on checking the SCR2_BIT_FW_LOADED flag. The false negative would 1258 1258 * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even 1259 1259 * though the firmware was already loaded but still booting and has yet 1260 1260 * to set SCR2_BIT_FW_LOADED.