Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'add-cn20k-nix-and-npa-contexts'

Subbaraya Sundeep says:

====================
Add CN20K NIX and NPA contexts

The hardware contexts of blocks NIX and NPA in CN20K silicon are
different than that of previous silicons CN10K and CN9XK. This
patchset adds the new contexts of CN20K in AF and PF drivers.
A new mailbox for enqueuing contexts to hardware is added.

Patch 1 simplifies context writing and reading by using max context
size supported by hardware instead of using each context size.
Patch 2 and 3 adds NIX block contexts in AF driver and extends
debugfs to display those new contexts
Patch 4 and 5 adds NPA block contexts in AF driver and extends
debugfs to display those new contexts
Patch 6 omits NDC configuration since CN20K NPA does not use NDC
for caching its contexts
Patch 7 and 8 uses the new NIX and NPA contexts in PF/VF driver.
Patch 9, 10 and 11 are to support more bandwidth profiles present in
CN20K for RX ratelimiting and to display new profiles in debugfs

v3: https://lore.kernel.org/all/1752772063-6160-1-git-send-email-sbhatta@marvell.com/
====================

Link: https://patch.msgid.link/1761388367-16579-1-git-send-email-sbhatta@marvell.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+1092 -55
+2 -1
drivers/net/ethernet/marvell/octeontx2/af/Makefile
··· 12 12 rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ 13 13 rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ 14 14 rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ 15 - rvu_rep.o cn20k/mbox_init.o 15 + rvu_rep.o cn20k/mbox_init.o cn20k/nix.o cn20k/debugfs.o \ 16 + cn20k/npa.o
+218
drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell RVU Admin Function driver 3 + * 4 + * Copyright (C) 2024 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/fs.h> 9 + #include <linux/debugfs.h> 10 + #include <linux/module.h> 11 + #include <linux/pci.h> 12 + 13 + #include "struct.h" 14 + #include "debugfs.h" 15 + 16 + void print_nix_cn20k_sq_ctx(struct seq_file *m, 17 + struct nix_cn20k_sq_ctx_s *sq_ctx) 18 + { 19 + seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", 20 + sq_ctx->ena, sq_ctx->qint_idx); 21 + seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", 22 + sq_ctx->substream, sq_ctx->sdp_mcast); 23 + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", 24 + sq_ctx->cq, sq_ctx->sqe_way_mask); 25 + 26 + seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", 27 + sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); 28 + seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", 29 + sq_ctx->sso_ena, sq_ctx->smq_rr_weight); 30 + seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", 31 + sq_ctx->default_chan, sq_ctx->sqb_count); 32 + 33 + seq_printf(m, "W1: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); 34 + seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); 35 + seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", 36 + sq_ctx->sqb_aura, sq_ctx->sq_int); 37 + seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", 38 + sq_ctx->sq_int_ena, sq_ctx->sqe_stype); 39 + 40 + seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", 41 + sq_ctx->max_sqe_size, sq_ctx->cq_limit); 42 + seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", 43 + sq_ctx->lmt_dis, sq_ctx->mnq_dis); 44 + seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", 45 + sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); 46 + seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", 47 + sq_ctx->tail_offset, sq_ctx->smenq_offset); 48 + seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", 49 + sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); 50 + 51 + seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", 52 + sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); 53 + seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); 54 + seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); 55 + seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); 56 + seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", 57 + sq_ctx->smenq_next_sqb); 58 + 59 + seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); 60 + 61 + seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); 62 + seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", 63 + sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); 64 + seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", 65 + sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); 66 + seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", 67 + sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); 68 + 69 + seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", 70 + (u64)sq_ctx->scm_lso_rem); 71 + seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); 72 + seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); 73 + seq_printf(m, "W13: aged_drop_octs \t\t\t%llu\n\n", 74 + (u64)sq_ctx->aged_drop_octs); 75 + seq_printf(m, "W13: aged_drop_pkts \t\t\t%llu\n\n", 76 + (u64)sq_ctx->aged_drop_pkts); 77 + seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", 78 + (u64)sq_ctx->dropped_octs); 79 + seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", 80 + (u64)sq_ctx->dropped_pkts); 81 + } 82 + 83 + void print_nix_cn20k_cq_ctx(struct seq_file *m, 84 + struct nix_cn20k_aq_enq_rsp *rsp) 85 + { 86 + struct nix_cn20k_cq_ctx_s *cq_ctx = &rsp->cq; 87 + 88 + seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); 89 + 90 + seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); 91 + seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", 92 + cq_ctx->avg_con, cq_ctx->cint_idx); 93 + seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", 94 + cq_ctx->cq_err, cq_ctx->qint_idx); 95 + seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", 96 + cq_ctx->bpid, cq_ctx->bp_ena); 97 + 98 + seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high); 99 + seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med); 100 + seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low); 101 + seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n", 102 + cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 | 103 + cq_ctx->lbpid_low); 104 + seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena); 105 + 106 + seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", 107 + cq_ctx->update_time, cq_ctx->avg_level); 108 + seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", 109 + cq_ctx->head, cq_ctx->tail); 110 + 111 + seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", 112 + cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); 113 + seq_printf(m, "W3: qsize \t\t\t%d\nW3:stashing \t\t\t%d\n", 114 + cq_ctx->qsize, cq_ctx->stashing); 115 + 116 + seq_printf(m, "W3: caching \t\t\t%d\n", cq_ctx->caching); 117 + seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac); 118 + seq_printf(m, "W3: stash_thresh \t\t\t%d\n", 119 + cq_ctx->stash_thresh); 120 + 121 + seq_printf(m, "W3: msh_valid \t\t\t%d\nW3:msh_dst \t\t\t%d\n", 122 + cq_ctx->msh_valid, cq_ctx->msh_dst); 123 + 124 + seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n", 125 + cq_ctx->cpt_drop_err_en); 126 + seq_printf(m, "W3: ena \t\t\t%d\n", 127 + cq_ctx->ena); 128 + seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", 129 + cq_ctx->drop_ena, cq_ctx->drop); 130 + seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); 131 + 132 + seq_printf(m, "W4: lbpid_ext \t\t\t\t%d\n\n", cq_ctx->lbpid_ext); 133 + seq_printf(m, "W4: bpid_ext \t\t\t\t%d\n\n", cq_ctx->bpid_ext); 134 + } 135 + 136 + void print_npa_cn20k_aura_ctx(struct seq_file *m, 137 + struct npa_cn20k_aq_enq_rsp *rsp) 138 + { 139 + struct npa_cn20k_aura_s *aura = &rsp->aura; 140 + 141 + seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); 142 + 143 + seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", 144 + aura->ena, aura->pool_caching); 145 + seq_printf(m, "W1: avg con\t\t%d\n", aura->avg_con); 146 + seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", 147 + aura->pool_drop_ena, aura->aura_drop_ena); 148 + seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", 149 + aura->bp_ena, aura->aura_drop); 150 + seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", 151 + aura->shift, aura->avg_level); 152 + 153 + seq_printf(m, "W2: count\t\t%llu\nW2: nix_bpid\t\t%d\n", 154 + (u64)aura->count, aura->bpid); 155 + 156 + seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", 157 + (u64)aura->limit, aura->bp, aura->fc_ena); 158 + 159 + seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", 160 + aura->fc_up_crossing, aura->fc_stype); 161 + seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); 162 + 163 + seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); 164 + 165 + seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", 166 + aura->pool_drop, aura->update_time); 167 + seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", 168 + aura->err_int, aura->err_int_ena); 169 + seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", 170 + aura->thresh_int, aura->thresh_int_ena); 171 + seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", 172 + aura->thresh_up, aura->thresh_qint_idx); 173 + seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); 174 + 175 + seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); 176 + seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); 177 + } 178 + 179 + void print_npa_cn20k_pool_ctx(struct seq_file *m, 180 + struct npa_cn20k_aq_enq_rsp *rsp) 181 + { 182 + struct npa_cn20k_pool_s *pool = &rsp->pool; 183 + 184 + seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); 185 + 186 + seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", 187 + pool->ena, pool->nat_align); 188 + seq_printf(m, "W1: stack_caching\t%d\n", 189 + pool->stack_caching); 190 + seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", 191 + pool->buf_offset, pool->buf_size); 192 + 193 + seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", 194 + pool->stack_max_pages, pool->stack_pages); 195 + 196 + seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", 197 + pool->stack_offset, pool->shift, pool->avg_level); 198 + seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", 199 + pool->avg_con, pool->fc_ena, pool->fc_stype); 200 + seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", 201 + pool->fc_hyst_bits, pool->fc_up_crossing); 202 + seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); 203 + 204 + seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); 205 + 206 + seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); 207 + 208 + seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); 209 + 210 + seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", 211 + pool->err_int, pool->err_int_ena); 212 + seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); 213 + seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", 214 + pool->thresh_int_ena, pool->thresh_up); 215 + seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", 216 + pool->thresh_qint_idx, pool->err_qint_idx); 217 + seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); 218 + }
+28
drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell OcteonTx2 CGX driver 3 + * 4 + * Copyright (C) 2024 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef DEBUFS_H 9 + #define DEBUFS_H 10 + 11 + #include <linux/fs.h> 12 + #include <linux/debugfs.h> 13 + #include <linux/module.h> 14 + #include <linux/pci.h> 15 + 16 + #include "struct.h" 17 + #include "../mbox.h" 18 + 19 + void print_nix_cn20k_sq_ctx(struct seq_file *m, 20 + struct nix_cn20k_sq_ctx_s *sq_ctx); 21 + void print_nix_cn20k_cq_ctx(struct seq_file *m, 22 + struct nix_cn20k_aq_enq_rsp *rsp); 23 + void print_npa_cn20k_aura_ctx(struct seq_file *m, 24 + struct npa_cn20k_aq_enq_rsp *rsp); 25 + void print_npa_cn20k_pool_ctx(struct seq_file *m, 26 + struct npa_cn20k_aq_enq_rsp *rsp); 27 + 28 + #endif
+20
drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell RVU Admin Function driver 3 + * 4 + * Copyright (C) 2024 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/module.h> 9 + #include <linux/pci.h> 10 + 11 + #include "struct.h" 12 + #include "../rvu.h" 13 + 14 + int rvu_mbox_handler_nix_cn20k_aq_enq(struct rvu *rvu, 15 + struct nix_cn20k_aq_enq_req *req, 16 + struct nix_cn20k_aq_enq_rsp *rsp) 17 + { 18 + return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 19 + (struct nix_aq_enq_rsp *)rsp); 20 + }
+21
drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell RVU Admin Function driver 3 + * 4 + * Copyright (C) 2024 Marvell. 5 + * 6 + */ 7 + 8 + #include <linux/module.h> 9 + #include <linux/pci.h> 10 + 11 + #include "struct.h" 12 + #include "../rvu.h" 13 + 14 + int rvu_mbox_handler_npa_cn20k_aq_enq(struct rvu *rvu, 15 + struct npa_cn20k_aq_enq_req *req, 16 + struct npa_cn20k_aq_enq_rsp *rsp) 17 + { 18 + return rvu_npa_aq_enq_inst(rvu, (struct npa_aq_enq_req *)req, 19 + (struct npa_aq_enq_rsp *)rsp); 20 + } 21 + EXPORT_SYMBOL(rvu_mbox_handler_npa_cn20k_aq_enq);
+340
drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h
··· 8 8 #ifndef STRUCT_H 9 9 #define STRUCT_H 10 10 11 + #define NIX_MAX_CTX_SIZE 128 12 + 11 13 /* 12 14 * CN20k RVU PF MBOX Interrupt Vector Enumeration 13 15 * ··· 39 37 RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9, 40 38 RVU_AF_CN20K_INT_VEC_CNT = 0xa, 41 39 }; 40 + 41 + struct nix_cn20k_sq_ctx_s { 42 + u64 ena : 1; /* W0 */ 43 + u64 qint_idx : 6; 44 + u64 substream : 20; 45 + u64 sdp_mcast : 1; 46 + u64 cq : 20; 47 + u64 sqe_way_mask : 16; 48 + u64 smq : 11; /* W1 */ 49 + u64 cq_ena : 1; 50 + u64 xoff : 1; 51 + u64 sso_ena : 1; 52 + u64 smq_rr_weight : 14; 53 + u64 default_chan : 12; 54 + u64 sqb_count : 16; 55 + u64 reserved_120_120 : 1; 56 + u64 smq_rr_count_lb : 7; 57 + u64 smq_rr_count_ub : 25; /* W2 */ 58 + u64 sqb_aura : 20; 59 + u64 sq_int : 8; 60 + u64 sq_int_ena : 8; 61 + u64 sqe_stype : 2; 62 + u64 reserved_191_191 : 1; 63 + u64 max_sqe_size : 2; /* W3 */ 64 + u64 cq_limit : 8; 65 + u64 lmt_dis : 1; 66 + u64 mnq_dis : 1; 67 + u64 smq_next_sq : 20; 68 + u64 smq_lso_segnum : 8; 69 + u64 tail_offset : 6; 70 + u64 smenq_offset : 6; 71 + u64 head_offset : 6; 72 + u64 smenq_next_sqb_vld : 1; 73 + u64 smq_pend : 1; 74 + u64 smq_next_sq_vld : 1; 75 + u64 reserved_253_255 : 3; 76 + u64 next_sqb : 64; /* W4 */ 77 + u64 tail_sqb : 64; /* W5 */ 78 + u64 smenq_sqb : 64; /* W6 */ 79 + u64 smenq_next_sqb : 64; /* W7 */ 80 + u64 head_sqb : 64; /* W8 */ 81 + u64 reserved_576_583 : 8; /* W9 */ 82 + u64 vfi_lso_total : 18; 83 + u64 vfi_lso_sizem1 : 3; 84 + u64 vfi_lso_sb : 8; 85 + u64 vfi_lso_mps : 14; 86 + u64 vfi_lso_vlan0_ins_ena : 1; 87 + u64 vfi_lso_vlan1_ins_ena : 1; 88 + u64 vfi_lso_vld : 1; 89 + u64 reserved_630_639 : 10; 90 + u64 scm_lso_rem : 18; /* W10 */ 91 + u64 reserved_658_703 : 46; 92 + u64 octs : 48; /* W11 */ 93 + u64 reserved_752_767 : 16; 94 + u64 pkts : 48; /* W12 */ 95 + u64 reserved_816_831 : 16; 96 + u64 aged_drop_octs : 32; /* W13 */ 97 + u64 aged_drop_pkts : 32; 98 + u64 dropped_octs : 48; /* W14 */ 99 + u64 reserved_944_959 : 16; 100 + u64 dropped_pkts : 48; /* W15 */ 101 + u64 reserved_1008_1023 : 16; 102 + }; 103 + 104 + static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE); 105 + 106 + struct nix_cn20k_cq_ctx_s { 107 + u64 base : 64; /* W0 */ 108 + u64 lbp_ena : 1; /* W1 */ 109 + u64 lbpid_low : 3; 110 + u64 bp_ena : 1; 111 + u64 lbpid_med : 3; 112 + u64 bpid : 9; 113 + u64 lbpid_high : 3; 114 + u64 qint_idx : 7; 115 + u64 cq_err : 1; 116 + u64 cint_idx : 7; 117 + u64 avg_con : 9; 118 + u64 wrptr : 20; 119 + u64 tail : 20; /* W2 */ 120 + u64 head : 20; 121 + u64 avg_level : 8; 122 + u64 update_time : 16; 123 + u64 bp : 8; /* W3 */ 124 + u64 drop : 8; 125 + u64 drop_ena : 1; 126 + u64 ena : 1; 127 + u64 cpt_drop_err_en : 1; 128 + u64 reserved_211_211 : 1; 129 + u64 msh_dst : 11; 130 + u64 msh_valid : 1; 131 + u64 stash_thresh : 4; 132 + u64 lbp_frac : 4; 133 + u64 caching : 1; 134 + u64 stashing : 1; 135 + u64 reserved_234_235 : 2; 136 + u64 qsize : 4; 137 + u64 cq_err_int : 8; 138 + u64 cq_err_int_ena : 8; 139 + u64 bpid_ext : 2; /* W4 */ 140 + u64 reserved_258_259 : 2; 141 + u64 lbpid_ext : 2; 142 + u64 reserved_262_319 : 58; 143 + u64 reserved_320_383 : 64; /* W5 */ 144 + u64 reserved_384_447 : 64; /* W6 */ 145 + u64 reserved_448_511 : 64; /* W7 */ 146 + u64 padding[8]; 147 + }; 148 + 149 + static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE); 150 + 151 + struct nix_cn20k_rq_ctx_s { 152 + u64 ena : 1; 153 + u64 sso_ena : 1; 154 + u64 ipsech_ena : 1; 155 + u64 ena_wqwd : 1; 156 + u64 cq : 20; 157 + u64 reserved_24_34 : 11; 158 + u64 port_il4_dis : 1; 159 + u64 port_ol4_dis : 1; 160 + u64 lenerr_dis : 1; 161 + u64 csum_il4_dis : 1; 162 + u64 csum_ol4_dis : 1; 163 + u64 len_il4_dis : 1; 164 + u64 len_il3_dis : 1; 165 + u64 len_ol4_dis : 1; 166 + u64 len_ol3_dis : 1; 167 + u64 wqe_aura : 20; 168 + u64 spb_aura : 20; 169 + u64 lpb_aura : 20; 170 + u64 sso_grp : 10; 171 + u64 sso_tt : 2; 172 + u64 pb_caching : 2; 173 + u64 wqe_caching : 1; 174 + u64 xqe_drop_ena : 1; 175 + u64 spb_drop_ena : 1; 176 + u64 lpb_drop_ena : 1; 177 + u64 pb_stashing : 1; 178 + u64 ipsecd_drop_en : 1; 179 + u64 chi_ena : 1; 180 + u64 reserved_125_127 : 3; 181 + u64 band_prof_id_l : 10; 182 + u64 sso_fc_ena : 1; 183 + u64 policer_ena : 1; 184 + u64 spb_sizem1 : 6; 185 + u64 wqe_skip : 2; 186 + u64 spb_high_sizem1 : 3; 187 + u64 spb_ena : 1; 188 + u64 lpb_sizem1 : 12; 189 + u64 first_skip : 7; 190 + u64 reserved_171_171 : 1; 191 + u64 later_skip : 6; 192 + u64 xqe_imm_size : 6; 193 + u64 band_prof_id_h : 4; 194 + u64 reserved_188_189 : 2; 195 + u64 xqe_imm_copy : 1; 196 + u64 xqe_hdr_split : 1; 197 + u64 xqe_drop : 8; 198 + u64 xqe_pass : 8; 199 + u64 wqe_pool_drop : 8; 200 + u64 wqe_pool_pass : 8; 201 + u64 spb_aura_drop : 8; 202 + u64 spb_aura_pass : 8; 203 + u64 spb_pool_drop : 8; 204 + u64 spb_pool_pass : 8; 205 + u64 lpb_aura_drop : 8; 206 + u64 lpb_aura_pass : 8; 207 + u64 lpb_pool_drop : 8; 208 + u64 lpb_pool_pass : 8; 209 + u64 reserved_288_291 : 4; 210 + u64 rq_int : 8; 211 + u64 rq_int_ena : 8; 212 + u64 qint_idx : 7; 213 + u64 reserved_315_319 : 5; 214 + u64 ltag : 24; 215 + u64 good_utag : 8; 216 + u64 bad_utag : 8; 217 + u64 flow_tagw : 6; 218 + u64 ipsec_vwqe : 1; 219 + u64 vwqe_ena : 1; 220 + u64 vtime_wait : 8; 221 + u64 max_vsize_exp : 4; 222 + u64 vwqe_skip : 2; 223 + u64 reserved_382_383 : 2; 224 + u64 octs : 48; 225 + u64 reserved_432_447 : 16; 226 + u64 pkts : 48; 227 + u64 reserved_496_511 : 16; 228 + u64 drop_octs : 48; 229 + u64 reserved_560_575 : 16; 230 + u64 drop_pkts : 48; 231 + u64 reserved_624_639 : 16; 232 + u64 re_pkts : 48; 233 + u64 reserved_688_703 : 16; 234 + u64 reserved_704_767 : 64; 235 + u64 reserved_768_831 : 64; 236 + u64 reserved_832_895 : 64; 237 + u64 reserved_896_959 : 64; 238 + u64 reserved_960_1023 : 64; 239 + }; 240 + 241 + static_assert(sizeof(struct nix_cn20k_rq_ctx_s) == NIX_MAX_CTX_SIZE); 242 + 243 + struct npa_cn20k_aura_s { 244 + u64 pool_addr; /* W0 */ 245 + u64 ena : 1; /* W1 */ 246 + u64 reserved_65 : 2; 247 + u64 pool_caching : 1; 248 + u64 reserved_68 : 16; 249 + u64 avg_con : 9; 250 + u64 reserved_93 : 1; 251 + u64 pool_drop_ena : 1; 252 + u64 aura_drop_ena : 1; 253 + u64 bp_ena : 1; 254 + u64 reserved_97_103 : 7; 255 + u64 aura_drop : 8; 256 + u64 shift : 6; 257 + u64 reserved_118_119 : 2; 258 + u64 avg_level : 8; 259 + u64 count : 36; /* W2 */ 260 + u64 reserved_164_167 : 4; 261 + u64 bpid : 12; 262 + u64 reserved_180_191 : 12; 263 + u64 limit : 36; /* W3 */ 264 + u64 reserved_228_231 : 4; 265 + u64 bp : 7; 266 + u64 reserved_239_243 : 5; 267 + u64 fc_ena : 1; 268 + u64 fc_up_crossing : 1; 269 + u64 fc_stype : 2; 270 + u64 fc_hyst_bits : 4; 271 + u64 reserved_252_255 : 4; 272 + u64 fc_addr; /* W4 */ 273 + u64 pool_drop : 8; /* W5 */ 274 + u64 update_time : 16; 275 + u64 err_int : 8; 276 + u64 err_int_ena : 8; 277 + u64 thresh_int : 1; 278 + u64 thresh_int_ena : 1; 279 + u64 thresh_up : 1; 280 + u64 reserved_363 : 1; 281 + u64 thresh_qint_idx : 7; 282 + u64 reserved_371 : 1; 283 + u64 err_qint_idx : 7; 284 + u64 reserved_379_383 : 5; 285 + u64 thresh : 36; /* W6*/ 286 + u64 rsvd_423_420 : 4; 287 + u64 fc_msh_dst : 11; 288 + u64 reserved_435_438 : 4; 289 + u64 op_dpc_ena : 1; 290 + u64 op_dpc_set : 5; 291 + u64 reserved_445_445 : 1; 292 + u64 stream_ctx : 1; 293 + u64 unified_ctx : 1; 294 + u64 reserved_448_511; /* W7 */ 295 + u64 padding[8]; 296 + }; 297 + 298 + static_assert(sizeof(struct npa_cn20k_aura_s) == NIX_MAX_CTX_SIZE); 299 + 300 + struct npa_cn20k_pool_s { 301 + u64 stack_base; /* W0 */ 302 + u64 ena : 1; 303 + u64 nat_align : 1; 304 + u64 reserved_66_67 : 2; 305 + u64 stack_caching : 1; 306 + u64 reserved_69_87 : 19; 307 + u64 buf_offset : 12; 308 + u64 reserved_100_103 : 4; 309 + u64 buf_size : 12; 310 + u64 reserved_116_119 : 4; 311 + u64 ref_cnt_prof : 3; 312 + u64 reserved_123_127 : 5; 313 + u64 stack_max_pages : 32; 314 + u64 stack_pages : 32; 315 + uint64_t bp_0 : 7; 316 + uint64_t bp_1 : 7; 317 + uint64_t bp_2 : 7; 318 + uint64_t bp_3 : 7; 319 + uint64_t bp_4 : 7; 320 + uint64_t bp_5 : 7; 321 + uint64_t bp_6 : 7; 322 + uint64_t bp_7 : 7; 323 + uint64_t bp_ena_0 : 1; 324 + uint64_t bp_ena_1 : 1; 325 + uint64_t bp_ena_2 : 1; 326 + uint64_t bp_ena_3 : 1; 327 + uint64_t bp_ena_4 : 1; 328 + uint64_t bp_ena_5 : 1; 329 + uint64_t bp_ena_6 : 1; 330 + uint64_t bp_ena_7 : 1; 331 + u64 stack_offset : 4; 332 + u64 reserved_260_263 : 4; 333 + u64 shift : 6; 334 + u64 reserved_270_271 : 2; 335 + u64 avg_level : 8; 336 + u64 avg_con : 9; 337 + u64 fc_ena : 1; 338 + u64 fc_stype : 2; 339 + u64 fc_hyst_bits : 4; 340 + u64 fc_up_crossing : 1; 341 + u64 reserved_297_299 : 3; 342 + u64 update_time : 16; 343 + u64 reserved_316_319 : 4; 344 + u64 fc_addr; /* W5 */ 345 + u64 ptr_start; /* W6 */ 346 + u64 ptr_end; /* W7 */ 347 + u64 bpid_0 : 12; 348 + u64 reserved_524_535 : 12; 349 + u64 err_int : 8; 350 + u64 err_int_ena : 8; 351 + u64 thresh_int : 1; 352 + u64 thresh_int_ena : 1; 353 + u64 thresh_up : 1; 354 + u64 reserved_555 : 1; 355 + u64 thresh_qint_idx : 7; 356 + u64 reserved_563 : 1; 357 + u64 err_qint_idx : 7; 358 + u64 reserved_571_575 : 5; 359 + u64 thresh : 36; 360 + u64 rsvd_612_615 : 4; 361 + u64 fc_msh_dst : 11; 362 + u64 reserved_627_630 : 4; 363 + u64 op_dpc_ena : 1; 364 + u64 op_dpc_set : 5; 365 + u64 reserved_637_637 : 1; 366 + u64 stream_ctx : 1; 367 + u64 reserved_639 : 1; 368 + u64 reserved_640_703; /* W10 */ 369 + u64 reserved_704_767; /* W11 */ 370 + u64 reserved_768_831; /* W12 */ 371 + u64 reserved_832_895; /* W13 */ 372 + u64 reserved_896_959; /* W14 */ 373 + u64 reserved_960_1023; /* W15 */ 374 + }; 375 + 376 + static_assert(sizeof(struct npa_cn20k_pool_s) == NIX_MAX_CTX_SIZE); 377 + 42 378 #endif
+73
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
··· 203 203 M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \ 204 204 M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \ 205 205 M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\ 206 + M(NPA_CN20K_AQ_ENQ, 0x404, npa_cn20k_aq_enq, npa_cn20k_aq_enq_req, \ 207 + npa_cn20k_aq_enq_rsp) \ 206 208 /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ 207 209 /* TIM mbox IDs (range 0x800 - 0x9FF) */ \ 208 210 /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ ··· 338 336 nix_mcast_grp_update_req, \ 339 337 nix_mcast_grp_update_rsp) \ 340 338 M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \ 339 + M(NIX_CN20K_AQ_ENQ, 0x802f, nix_cn20k_aq_enq, nix_cn20k_aq_enq_req, \ 340 + nix_cn20k_aq_enq_rsp) \ 341 341 /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ 342 342 M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ 343 343 mcs_alloc_rsrc_rsp) \ ··· 836 832 }; 837 833 }; 838 834 835 + struct npa_cn20k_aq_enq_req { 836 + struct mbox_msghdr hdr; 837 + u32 aura_id; 838 + u8 ctype; 839 + u8 op; 840 + union { 841 + /* Valid when op == WRITE/INIT and ctype == AURA. 842 + * LF fills the pool_id in aura.pool_addr. AF will translate 843 + * the pool_id to pool context pointer. 844 + */ 845 + struct npa_cn20k_aura_s aura; 846 + /* Valid when op == WRITE/INIT and ctype == POOL */ 847 + struct npa_cn20k_pool_s pool; 848 + }; 849 + /* Mask data when op == WRITE (1=write, 0=don't write) */ 850 + union { 851 + /* Valid when op == WRITE and ctype == AURA */ 852 + struct npa_cn20k_aura_s aura_mask; 853 + /* Valid when op == WRITE and ctype == POOL */ 854 + struct npa_cn20k_pool_s pool_mask; 855 + }; 856 + }; 857 + 858 + struct npa_cn20k_aq_enq_rsp { 859 + struct mbox_msghdr hdr; 860 + union { 861 + /* Valid when op == READ and ctype == AURA */ 862 + struct npa_cn20k_aura_s aura; 863 + /* Valid when op == READ and ctype == POOL */ 864 + struct npa_cn20k_pool_s pool; 865 + }; 866 + }; 867 + 839 868 /* Disable all contexts of type 'ctype' */ 840 869 struct hwctx_disable_req { 841 870 struct mbox_msghdr hdr; ··· 975 938 #define NIX_LF_DISABLE_FLOWS BIT_ULL(0) 976 939 #define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1) 977 940 u64 flags; 941 + }; 942 + 943 + /* CN20K NIX AQ enqueue msg */ 944 + struct nix_cn20k_aq_enq_req { 945 + struct mbox_msghdr hdr; 946 + u32 qidx; 947 + u8 ctype; 948 + u8 op; 949 + union { 950 + struct nix_cn20k_rq_ctx_s rq; 951 + struct nix_cn20k_sq_ctx_s sq; 952 + struct nix_cn20k_cq_ctx_s cq; 953 + struct nix_rsse_s rss; 954 + struct nix_rx_mce_s mce; 955 + struct nix_bandprof_s prof; 956 + }; 957 + union { 958 + struct nix_cn20k_rq_ctx_s rq_mask; 959 + struct nix_cn20k_sq_ctx_s sq_mask; 960 + struct nix_cn20k_cq_ctx_s cq_mask; 961 + struct nix_rsse_s rss_mask; 962 + struct nix_rx_mce_s mce_mask; 963 + struct nix_bandprof_s prof_mask; 964 + }; 965 + }; 966 + 967 + struct nix_cn20k_aq_enq_rsp { 968 + struct mbox_msghdr hdr; 969 + union { 970 + struct nix_cn20k_rq_ctx_s rq; 971 + struct nix_cn20k_sq_ctx_s sq; 972 + struct nix_cn20k_cq_ctx_s cq; 973 + struct nix_rsse_s rss; 974 + struct nix_rx_mce_s mce; 975 + struct nix_bandprof_s prof; 976 + }; 978 977 }; 979 978 980 979 /* CN10K NIX AQ enqueue msg */
+14 -1
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
··· 498 498 u8 reserved[RVU_CHANL_INFO_RESERVED]; 499 499 }; 500 500 501 + struct altaf_intr_notify { 502 + unsigned long flr_pf_bmap[2]; 503 + unsigned long flr_vf_bmap[2]; 504 + unsigned long gint_paddr; 505 + unsigned long gint_iova_addr; 506 + unsigned long reserved[6]; 507 + }; 508 + 501 509 struct rvu_fwdata { 502 510 #define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/ 503 511 #define RVU_FWDATA_VERSION 0x0001 ··· 525 517 u32 ptp_ext_clk_rate; 526 518 u32 ptp_ext_tstamp; 527 519 struct channel_fwdata channel_data; 528 - #define FWDATA_RESERVED_MEM 958 520 + struct altaf_intr_notify altaf_intr_info; 521 + #define FWDATA_RESERVED_MEM 946 529 522 u64 reserved[FWDATA_RESERVED_MEM]; 530 523 #define CGX_MAX 9 531 524 #define CGX_LMACS_MAX 4 ··· 657 648 658 649 struct mutex mbox_lock; /* Serialize mbox up and down msgs */ 659 650 u16 rep_pcifunc; 651 + bool altaf_ready; 660 652 int rep_cnt; 661 653 u16 *rep2pfvf_map; 662 654 u8 rep_mode; ··· 1042 1032 int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, 1043 1033 int blkaddr, int nixlf); 1044 1034 void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr); 1035 + int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1036 + struct nix_aq_enq_rsp *rsp); 1037 + 1045 1038 /* NPC APIs */ 1046 1039 void rvu_npc_freemem(struct rvu *rvu); 1047 1040 int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
+37 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
··· 21 21 #include "rvu_npc_hash.h" 22 22 #include "mcs.h" 23 23 24 + #include "cn20k/debugfs.h" 25 + 24 26 #define DEBUGFS_DIR_NAME "octeontx2" 25 27 26 28 enum { ··· 1103 1101 struct npa_aura_s *aura = &rsp->aura; 1104 1102 struct rvu *rvu = m->private; 1105 1103 1104 + if (is_cn20k(rvu->pdev)) { 1105 + print_npa_cn20k_aura_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp); 1106 + return; 1107 + } 1108 + 1106 1109 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); 1107 1110 1108 1111 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", ··· 1155 1148 { 1156 1149 struct npa_pool_s *pool = &rsp->pool; 1157 1150 struct rvu *rvu = m->private; 1151 + 1152 + if (is_cn20k(rvu->pdev)) { 1153 + print_npa_cn20k_pool_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp); 1154 + return; 1155 + } 1158 1156 1159 1157 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); 1160 1158 ··· 2021 2009 struct nix_hw *nix_hw = m->private; 2022 2010 struct rvu *rvu = nix_hw->rvu; 2023 2011 2012 + if (is_cn20k(rvu->pdev)) { 2013 + print_nix_cn20k_sq_ctx(m, (struct nix_cn20k_sq_ctx_s *)sq_ctx); 2014 + return; 2015 + } 2016 + 2024 2017 if (!is_rvu_otx2(rvu)) { 2025 2018 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); 2026 2019 return; 2027 2020 } 2021 + 2028 2022 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", 2029 2023 sq_ctx->sqe_way_mask, sq_ctx->cq); 2030 2024 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", ··· 2121 2103 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", 2122 2104 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); 2123 2105 2124 - seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); 2106 + seq_printf(m, "W2: band_prof_id \t\t%d\n", 2107 + (u16)rq_ctx->band_prof_id_h << 10 | rq_ctx->band_prof_id); 2108 + 2125 2109 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); 2126 2110 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); 2127 2111 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", ··· 2245 2225 struct nix_hw *nix_hw = m->private; 2246 2226 struct rvu *rvu = nix_hw->rvu; 2247 2227 2228 + if (is_cn20k(rvu->pdev)) { 2229 + print_nix_cn20k_cq_ctx(m, (struct nix_cn20k_aq_enq_rsp *)rsp); 2230 + return; 2231 + } 2232 + 2248 2233 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); 2249 2234 2250 2235 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); ··· 2279 2254 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); 2280 2255 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", 2281 2256 cq_ctx->qsize, cq_ctx->caching); 2257 + 2282 2258 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", 2283 2259 cq_ctx->substream, cq_ctx->ena); 2284 2260 if (!is_rvu_otx2(rvu)) { ··· 2641 2615 (prof->rc_action == 1) ? "DROP" : "RED"; 2642 2616 seq_printf(m, "W1: rc_action\t\t%s\n", str); 2643 2617 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo); 2644 - seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id); 2618 + 2619 + seq_printf(m, "W1: band_prof_id\t%d\n", 2620 + (u16)prof->band_prof_id_h << 7 | prof->band_prof_id); 2621 + 2645 2622 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en); 2646 2623 2647 2624 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts); ··· 2813 2784 &rvu_dbg_npa_aura_ctx_fops); 2814 2785 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, 2815 2786 &rvu_dbg_npa_pool_ctx_fops); 2787 + 2788 + if (is_cn20k(rvu->pdev)) /* NDC not appliable for cn20k */ 2789 + return; 2816 2790 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, 2817 2791 &rvu_dbg_npa_ndc_cache_fops); 2818 2792 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu, ··· 3982 3950 3983 3951 static const char *rvu_get_dbg_dir_name(struct rvu *rvu) 3984 3952 { 3953 + if (is_cn20k(rvu->pdev)) 3954 + return "cn20k"; 3955 + 3985 3956 if (!is_rvu_otx2(rvu)) 3986 3957 return "cn10k"; 3987 3958 else
+47 -29
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
··· 1019 1019 { 1020 1020 struct nix_cn10k_aq_enq_req *aq_req; 1021 1021 1022 + if (is_cn20k(rvu->pdev)) { 1023 + *smq = ((struct nix_cn20k_aq_enq_req *)req)->sq.smq; 1024 + *smq_mask = ((struct nix_cn20k_aq_enq_req *)req)->sq_mask.smq; 1025 + return; 1026 + } 1027 + 1022 1028 if (!is_rvu_otx2(rvu)) { 1023 1029 aq_req = (struct nix_cn10k_aq_enq_req *)req; 1024 1030 *smq = aq_req->sq.smq; ··· 1155 1149 case NIX_AQ_INSTOP_WRITE: 1156 1150 if (req->ctype == NIX_AQ_CTYPE_RQ) 1157 1151 memcpy(mask, &req->rq_mask, 1158 - sizeof(struct nix_rq_ctx_s)); 1152 + NIX_MAX_CTX_SIZE); 1159 1153 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1160 1154 memcpy(mask, &req->sq_mask, 1161 - sizeof(struct nix_sq_ctx_s)); 1155 + NIX_MAX_CTX_SIZE); 1162 1156 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1163 1157 memcpy(mask, &req->cq_mask, 1164 - sizeof(struct nix_cq_ctx_s)); 1158 + NIX_MAX_CTX_SIZE); 1165 1159 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1166 1160 memcpy(mask, &req->rss_mask, 1167 - sizeof(struct nix_rsse_s)); 1161 + NIX_MAX_CTX_SIZE); 1168 1162 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1169 1163 memcpy(mask, &req->mce_mask, 1170 - sizeof(struct nix_rx_mce_s)); 1164 + NIX_MAX_CTX_SIZE); 1171 1165 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1172 1166 memcpy(mask, &req->prof_mask, 1173 - sizeof(struct nix_bandprof_s)); 1167 + NIX_MAX_CTX_SIZE); 1174 1168 fallthrough; 1175 1169 case NIX_AQ_INSTOP_INIT: 1176 1170 if (req->ctype == NIX_AQ_CTYPE_RQ) 1177 - memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); 1171 + memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE); 1178 1172 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1179 - memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); 1173 + memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE); 1180 1174 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1181 - memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); 1175 + memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE); 1182 1176 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1183 - memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); 1177 + memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE); 1184 1178 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1185 - memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); 1179 + memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE); 1186 1180 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1187 - memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); 1181 + memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE); 1188 1182 break; 1189 1183 case NIX_AQ_INSTOP_NOP: 1190 1184 case NIX_AQ_INSTOP_READ: ··· 1249 1243 if (req->op == NIX_AQ_INSTOP_READ) { 1250 1244 if (req->ctype == NIX_AQ_CTYPE_RQ) 1251 1245 memcpy(&rsp->rq, ctx, 1252 - sizeof(struct nix_rq_ctx_s)); 1246 + NIX_MAX_CTX_SIZE); 1253 1247 else if (req->ctype == NIX_AQ_CTYPE_SQ) 1254 1248 memcpy(&rsp->sq, ctx, 1255 - sizeof(struct nix_sq_ctx_s)); 1249 + NIX_MAX_CTX_SIZE); 1256 1250 else if (req->ctype == NIX_AQ_CTYPE_CQ) 1257 1251 memcpy(&rsp->cq, ctx, 1258 - sizeof(struct nix_cq_ctx_s)); 1252 + NIX_MAX_CTX_SIZE); 1259 1253 else if (req->ctype == NIX_AQ_CTYPE_RSS) 1260 1254 memcpy(&rsp->rss, ctx, 1261 - sizeof(struct nix_rsse_s)); 1255 + NIX_MAX_CTX_SIZE); 1262 1256 else if (req->ctype == NIX_AQ_CTYPE_MCE) 1263 1257 memcpy(&rsp->mce, ctx, 1264 - sizeof(struct nix_rx_mce_s)); 1258 + NIX_MAX_CTX_SIZE); 1265 1259 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) 1266 1260 memcpy(&rsp->prof, ctx, 1267 - sizeof(struct nix_bandprof_s)); 1261 + NIX_MAX_CTX_SIZE); 1268 1262 } 1269 1263 } 1270 1264 ··· 1295 1289 /* Make copy of original context & mask which are required 1296 1290 * for resubmission 1297 1291 */ 1298 - memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); 1299 - memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); 1292 + memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE); 1293 + memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE); 1300 1294 1301 1295 /* exclude fields which HW can update */ 1302 1296 aq_req.cq_mask.cq_err = 0; ··· 1315 1309 * updated fields are masked out for request and response 1316 1310 * comparison 1317 1311 */ 1318 - for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); 1312 + for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64); 1319 1313 word++) { 1320 1314 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= 1321 1315 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); ··· 1323 1317 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); 1324 1318 } 1325 1319 1326 - if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) 1320 + if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE)) 1327 1321 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; 1328 1322 1329 1323 return 0; 1330 1324 } 1331 1325 1332 - static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1333 - struct nix_aq_enq_rsp *rsp) 1326 + int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, 1327 + struct nix_aq_enq_rsp *rsp) 1334 1328 { 1335 1329 struct nix_hw *nix_hw; 1336 1330 int err, retries = 5; ··· 5818 5812 } 5819 5813 } 5820 5814 5815 + #define NIX_BW_PROF_HI_MASK GENMASK(10, 7) 5816 + 5821 5817 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, 5822 5818 struct nix_hw *nix_hw, u16 pcifunc) 5823 5819 { ··· 5858 5850 return -EINVAL; 5859 5851 5860 5852 ipolicer = &nix_hw->ipolicer[hi_layer]; 5861 - prof_idx = req->prof.band_prof_id; 5853 + prof_idx = FIELD_PREP(NIX_BW_PROF_HI_MASK, req->prof.band_prof_id_h); 5854 + prof_idx |= req->prof.band_prof_id; 5862 5855 if (prof_idx >= ipolicer->band_prof.max || 5863 5856 ipolicer->pfvf_map[prof_idx] != pcifunc) 5864 5857 return -EINVAL; ··· 6024 6015 aq_req->op = NIX_AQ_INSTOP_WRITE; 6025 6016 aq_req->qidx = leaf_prof; 6026 6017 6027 - aq_req->prof.band_prof_id = mid_prof; 6018 + aq_req->prof.band_prof_id = mid_prof & 0x7F; 6028 6019 aq_req->prof_mask.band_prof_id = GENMASK(6, 0); 6020 + aq_req->prof.band_prof_id_h = FIELD_GET(NIX_BW_PROF_HI_MASK, mid_prof); 6021 + aq_req->prof_mask.band_prof_id_h = GENMASK(3, 0); 6029 6022 aq_req->prof.hl_en = 1; 6030 6023 aq_req->prof_mask.hl_en = 1; 6031 6024 ··· 6035 6024 (struct nix_aq_enq_req *)aq_req, 6036 6025 (struct nix_aq_enq_rsp *)aq_rsp); 6037 6026 } 6027 + 6028 + #define NIX_RQ_PROF_HI_MASK GENMASK(13, 10) 6038 6029 6039 6030 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, 6040 6031 u16 rq_idx, u16 match_id) ··· 6069 6056 return 0; 6070 6057 6071 6058 /* Get the bandwidth profile ID mapped to this RQ */ 6072 - leaf_prof = aq_rsp.rq.band_prof_id; 6059 + leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h); 6060 + leaf_prof |= aq_rsp.rq.band_prof_id; 6073 6061 6074 6062 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; 6075 6063 ipolicer->match_id[leaf_prof] = match_id; ··· 6108 6094 * to different RQs and marked with same match_id 6109 6095 * are rate limited in a aggregate fashion 6110 6096 */ 6111 - mid_prof = aq_rsp.prof.band_prof_id; 6097 + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, 6098 + aq_rsp.prof.band_prof_id_h); 6099 + mid_prof |= aq_rsp.prof.band_prof_id; 6100 + 6112 6101 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, 6113 6102 &aq_req, &aq_rsp, 6114 6103 leaf_prof, mid_prof); ··· 6233 6216 if (!aq_rsp.prof.hl_en) 6234 6217 return; 6235 6218 6236 - mid_prof = aq_rsp.prof.band_prof_id; 6219 + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, aq_rsp.prof.band_prof_id_h); 6220 + mid_prof |= aq_rsp.prof.band_prof_id; 6237 6221 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; 6238 6222 ipolicer->ref_count[mid_prof]--; 6239 6223 /* If ref_count is zero, free mid layer profile */
+21 -8
drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
··· 464 464 return 0; 465 465 } 466 466 467 + static void npa_aq_ndc_config(struct rvu *rvu, struct rvu_block *block) 468 + { 469 + u64 cfg; 470 + 471 + if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */ 472 + return; 473 + 474 + /* Do not bypass NDC cache */ 475 + cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); 476 + cfg &= ~0x03DULL; 477 + #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 478 + /* Disable caching of stack pages */ 479 + cfg |= 0x10ULL; 480 + #endif 481 + rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); 482 + } 483 + 467 484 static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) 468 485 { 469 486 u64 cfg; ··· 496 479 rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); 497 480 #endif 498 481 499 - /* Do not bypass NDC cache */ 500 - cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); 501 - cfg &= ~0x03DULL; 502 - #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING 503 - /* Disable caching of stack pages */ 504 - cfg |= 0x10ULL; 505 - #endif 506 - rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); 482 + npa_aq_ndc_config(rvu, block); 507 483 508 484 /* For CN10K NPA BATCH DMA set 35 cache lines */ 509 485 if (!is_rvu_otx2(rvu)) { ··· 576 566 { 577 567 int bank, max_bank, line, max_line, err; 578 568 u64 reg, ndc_af_const; 569 + 570 + if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */ 571 + return 0; 579 572 580 573 /* Set the ENABLE bit(63) to '0' */ 581 574 reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
+28 -3
drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
··· 13 13 14 14 #define RVU_MULTI_BLK_VER 0x7ULL 15 15 16 + #define NIX_MAX_CTX_SIZE 128 17 + 16 18 /* RVU Block Address Enumeration */ 17 19 enum rvu_block_addr_e { 18 20 BLKADDR_RVUM = 0x0ULL, ··· 372 370 u64 qsize : 4; 373 371 u64 cq_err_int : 8; 374 372 u64 cq_err_int_ena : 8; 373 + /* Ensure all context sizes are 128 bytes */ 374 + u64 padding[12]; 375 375 }; 376 + 377 + static_assert(sizeof(struct nix_cq_ctx_s) == NIX_MAX_CTX_SIZE); 376 378 377 379 /* CN10K NIX Receive queue context structure */ 378 380 struct nix_cn10k_rq_ctx_s { ··· 419 413 u64 rsvd_171 : 1; 420 414 u64 later_skip : 6; 421 415 u64 xqe_imm_size : 6; 422 - u64 rsvd_189_184 : 6; 416 + u64 band_prof_id_h : 4; 417 + u64 rsvd_189_188 : 2; 423 418 u64 xqe_imm_copy : 1; 424 419 u64 xqe_hdr_split : 1; 425 420 u64 xqe_drop : 8; /* W3 */ ··· 466 459 u64 rsvd_959_896; /* W14 */ 467 460 u64 rsvd_1023_960; /* W15 */ 468 461 }; 462 + 463 + static_assert(sizeof(struct nix_cn10k_rq_ctx_s) == NIX_MAX_CTX_SIZE); 469 464 470 465 /* CN10K NIX Send queue context structure */ 471 466 struct nix_cn10k_sq_ctx_s { ··· 531 522 u64 dropped_pkts : 48; 532 523 u64 rsvd_1023_1008 : 16; 533 524 }; 525 + 526 + static_assert(sizeof(struct nix_cn10k_sq_ctx_s) == NIX_MAX_CTX_SIZE); 534 527 535 528 /* NIX Receive queue context structure */ 536 529 struct nix_rq_ctx_s { ··· 604 593 u64 rsvd_959_896; /* W14 */ 605 594 u64 rsvd_1023_960; /* W15 */ 606 595 }; 596 + 597 + static_assert(sizeof(struct nix_rq_ctx_s) == NIX_MAX_CTX_SIZE); 607 598 608 599 /* NIX sqe sizes */ 609 600 enum nix_maxsqesz { ··· 681 668 u64 rsvd_1023_1008 : 16; 682 669 }; 683 670 671 + static_assert(sizeof(struct nix_sq_ctx_s) == NIX_MAX_CTX_SIZE); 672 + 684 673 /* NIX Receive side scaling entry structure*/ 685 674 struct nix_rsse_s { 686 675 uint32_t rq : 20; 687 676 uint32_t reserved_20_31 : 12; 688 - 677 + /* Ensure all context sizes are minimum 128 bytes */ 678 + u64 padding[15]; 689 679 }; 680 + 681 + static_assert(sizeof(struct nix_rsse_s) == NIX_MAX_CTX_SIZE); 690 682 691 683 /* NIX receive multicast/mirror entry structure */ 692 684 struct nix_rx_mce_s { ··· 702 684 uint64_t rsvd_31_24 : 8; 703 685 uint64_t pf_func : 16; 704 686 uint64_t next : 16; 687 + /* Ensure all context sizes are minimum 128 bytes */ 688 + u64 padding[15]; 705 689 }; 690 + 691 + static_assert(sizeof(struct nix_rx_mce_s) == NIX_MAX_CTX_SIZE); 706 692 707 693 enum nix_band_prof_layers { 708 694 BAND_PROF_LEAF_LAYER = 0, ··· 758 736 uint64_t rc_action : 2; 759 737 uint64_t meter_algo : 2; 760 738 uint64_t band_prof_id : 7; 761 - uint64_t reserved_111_118 : 8; 739 + uint64_t band_prof_id_h : 4; 740 + uint64_t reserved_115_118 : 4; 762 741 uint64_t hl_en : 1; 763 742 uint64_t reserved_120_127 : 8; 764 743 uint64_t ts : 48; /* W2 */ ··· 791 768 uint64_t red_octs_drop : 48; /* W15 */ 792 769 uint64_t reserved_1008_1023 : 16; 793 770 }; 771 + 772 + static_assert(sizeof(struct nix_bandprof_s) == NIX_MAX_CTX_SIZE); 794 773 795 774 enum nix_lsoalg { 796 775 NIX_LSOALG_NOP,
+10
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
··· 15 15 .aura_freeptr = otx2_aura_freeptr, 16 16 .refill_pool_ptrs = otx2_refill_pool_ptrs, 17 17 .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, 18 + .aura_aq_init = otx2_aura_aq_init, 19 + .pool_aq_init = otx2_pool_aq_init, 18 20 }; 19 21 20 22 static struct dev_hw_ops cn10k_hw_ops = { ··· 25 23 .aura_freeptr = cn10k_aura_freeptr, 26 24 .refill_pool_ptrs = cn10k_refill_pool_ptrs, 27 25 .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, 26 + .aura_aq_init = otx2_aura_aq_init, 27 + .pool_aq_init = otx2_pool_aq_init, 28 28 }; 29 29 30 30 void otx2_init_hw_ops(struct otx2_nic *pfvf) ··· 340 336 341 337 aq->rq.band_prof_id = policer; 342 338 aq->rq_mask.band_prof_id = GENMASK(9, 0); 339 + 340 + /* If policer id is greater than 1023 then it implies hardware supports 341 + * more leaf profiles. In that case use band_prof_id_h for 4 MSBs. 342 + */ 343 + aq->rq.band_prof_id_h = policer >> 10; 344 + aq->rq_mask.band_prof_id_h = GENMASK(3, 0); 343 345 344 346 /* Fill AQ info */ 345 347 aq->qidx = rq_idx;
+209 -11
drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
··· 10 10 #include "otx2_struct.h" 11 11 #include "cn10k.h" 12 12 13 - static struct dev_hw_ops cn20k_hw_ops = { 14 - .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, 15 - .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, 16 - .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, 17 - }; 18 - 19 - void cn20k_init(struct otx2_nic *pfvf) 20 - { 21 - pfvf->hw_ops = &cn20k_hw_ops; 22 - } 23 - EXPORT_SYMBOL(cn20k_init); 24 13 /* CN20K mbox AF => PFx irq handler */ 25 14 irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq) 26 15 { ··· 239 250 240 251 return 0; 241 252 } 253 + 254 + #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ 255 + 256 + static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id) 257 + { 258 + #ifdef CONFIG_DCB 259 + return pfvf->queue_to_pfc_map[aura_id]; 260 + #else 261 + return 0; 262 + #endif 263 + } 264 + 265 + static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id, 266 + int pool_id, int numptrs) 267 + { 268 + struct npa_cn20k_aq_enq_req *aq; 269 + struct otx2_pool *pool; 270 + u8 bpid_idx; 271 + int err; 272 + 273 + pool = &pfvf->qset.pool[pool_id]; 274 + 275 + /* Allocate memory for HW to update Aura count. 276 + * Alloc one cache line, so that it fits all FC_STYPE modes. 277 + */ 278 + if (!pool->fc_addr) { 279 + err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); 280 + if (err) 281 + return err; 282 + } 283 + 284 + /* Initialize this aura's context via AF */ 285 + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); 286 + if (!aq) { 287 + /* Shared mbox memory buffer is full, flush it and retry */ 288 + err = otx2_sync_mbox_msg(&pfvf->mbox); 289 + if (err) 290 + return err; 291 + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); 292 + if (!aq) 293 + return -ENOMEM; 294 + } 295 + 296 + aq->aura_id = aura_id; 297 + 298 + /* Will be filled by AF with correct pool context address */ 299 + aq->aura.pool_addr = pool_id; 300 + aq->aura.pool_caching = 1; 301 + aq->aura.shift = ilog2(numptrs) - 8; 302 + aq->aura.count = numptrs; 303 + aq->aura.limit = numptrs; 304 + aq->aura.avg_level = 255; 305 + aq->aura.ena = 1; 306 + aq->aura.fc_ena = 1; 307 + aq->aura.fc_addr = pool->fc_addr->iova; 308 + aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ 309 + 310 + /* Enable backpressure for RQ aura */ 311 + if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { 312 + aq->aura.bp_ena = 0; 313 + /* If NIX1 LF is attached then specify NIX1_RX. 314 + * 315 + * Below NPA_AURA_S[BP_ENA] is set according to the 316 + * NPA_BPINTF_E enumeration given as: 317 + * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so 318 + * NIX0_RX is 0x0 + 0*0x1 = 0 319 + * NIX1_RX is 0x0 + 1*0x1 = 1 320 + * But in HRM it is given that 321 + * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to 322 + * NIX-RX based on [BP] level. One bit per NIX-RX; index 323 + * enumerated by NPA_BPINTF_E." 324 + */ 325 + if (pfvf->nix_blkaddr == BLKADDR_NIX1) 326 + aq->aura.bp_ena = 1; 327 + 328 + bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id); 329 + aq->aura.bpid = pfvf->bpid[bpid_idx]; 330 + 331 + /* Set backpressure level for RQ's Aura */ 332 + aq->aura.bp = RQ_BP_LVL_AURA; 333 + } 334 + 335 + /* Fill AQ info */ 336 + aq->ctype = NPA_AQ_CTYPE_AURA; 337 + aq->op = NPA_AQ_INSTOP_INIT; 338 + 339 + return 0; 340 + } 341 + 342 + static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, 343 + int stack_pages, int numptrs, int buf_size, 344 + int type) 345 + { 346 + struct page_pool_params pp_params = { 0 }; 347 + struct npa_cn20k_aq_enq_req *aq; 348 + struct otx2_pool *pool; 349 + int err, sz; 350 + 351 + pool = &pfvf->qset.pool[pool_id]; 352 + /* Alloc memory for stack which is used to store buffer pointers */ 353 + err = qmem_alloc(pfvf->dev, &pool->stack, 354 + stack_pages, pfvf->hw.stack_pg_bytes); 355 + if (err) 356 + return err; 357 + 358 + pool->rbsize = buf_size; 359 + 360 + /* Initialize this pool's context via AF */ 361 + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); 362 + if (!aq) { 363 + /* Shared mbox memory buffer is full, flush it and retry */ 364 + err = otx2_sync_mbox_msg(&pfvf->mbox); 365 + if (err) { 366 + qmem_free(pfvf->dev, pool->stack); 367 + return err; 368 + } 369 + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); 370 + if (!aq) { 371 + qmem_free(pfvf->dev, pool->stack); 372 + return -ENOMEM; 373 + } 374 + } 375 + 376 + aq->aura_id = pool_id; 377 + aq->pool.stack_base = pool->stack->iova; 378 + aq->pool.stack_caching = 1; 379 + aq->pool.ena = 1; 380 + aq->pool.buf_size = buf_size / 128; 381 + aq->pool.stack_max_pages = stack_pages; 382 + aq->pool.shift = ilog2(numptrs) - 8; 383 + aq->pool.ptr_start = 0; 384 + aq->pool.ptr_end = ~0ULL; 385 + 386 + /* Fill AQ info */ 387 + aq->ctype = NPA_AQ_CTYPE_POOL; 388 + aq->op = NPA_AQ_INSTOP_INIT; 389 + 390 + if (type != AURA_NIX_RQ) { 391 + pool->page_pool = NULL; 392 + return 0; 393 + } 394 + 395 + sz = ALIGN(ALIGN(SKB_DATA_ALIGN(buf_size), OTX2_ALIGN), PAGE_SIZE); 396 + pp_params.order = get_order(sz); 397 + pp_params.flags = PP_FLAG_DMA_MAP; 398 + pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); 399 + pp_params.nid = NUMA_NO_NODE; 400 + pp_params.dev = pfvf->dev; 401 + pp_params.dma_dir = DMA_FROM_DEVICE; 402 + pool->page_pool = page_pool_create(&pp_params); 403 + if (IS_ERR(pool->page_pool)) { 404 + netdev_err(pfvf->netdev, "Creation of page pool failed\n"); 405 + return PTR_ERR(pool->page_pool); 406 + } 407 + 408 + return 0; 409 + } 410 + 411 + static int cn20k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) 412 + { 413 + struct nix_cn20k_aq_enq_req *aq; 414 + struct otx2_nic *pfvf = dev; 415 + 416 + /* Get memory to put this msg */ 417 + aq = otx2_mbox_alloc_msg_nix_cn20k_aq_enq(&pfvf->mbox); 418 + if (!aq) 419 + return -ENOMEM; 420 + 421 + aq->sq.cq = pfvf->hw.rx_queues + qidx; 422 + aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ 423 + aq->sq.cq_ena = 1; 424 + aq->sq.ena = 1; 425 + aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); 426 + aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); 427 + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; 428 + aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ 429 + aq->sq.sqb_aura = sqb_aura; 430 + aq->sq.sq_int_ena = NIX_SQINT_BITS; 431 + aq->sq.qint_idx = 0; 432 + /* Due pipelining impact minimum 2000 unused SQ CQE's 433 + * need to maintain to avoid CQ overflow. 434 + */ 435 + aq->sq.cq_limit = (SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt); 436 + 437 + /* Fill AQ info */ 438 + aq->qidx = qidx; 439 + aq->ctype = NIX_AQ_CTYPE_SQ; 440 + aq->op = NIX_AQ_INSTOP_INIT; 441 + 442 + return otx2_sync_mbox_msg(&pfvf->mbox); 443 + } 444 + 445 + static struct dev_hw_ops cn20k_hw_ops = { 446 + .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, 447 + .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, 448 + .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, 449 + .sq_aq_init = cn20k_sq_aq_init, 450 + .sqe_flush = cn10k_sqe_flush, 451 + .aura_freeptr = cn10k_aura_freeptr, 452 + .refill_pool_ptrs = cn10k_refill_pool_ptrs, 453 + .aura_aq_init = cn20k_aura_aq_init, 454 + .pool_aq_init = cn20k_pool_aq_init, 455 + }; 456 + 457 + void cn20k_init(struct otx2_nic *pfvf) 458 + { 459 + pfvf->hw_ops = &cn20k_hw_ops; 460 + } 461 + EXPORT_SYMBOL(cn20k_init);
+14
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
··· 1369 1369 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 1370 1370 int pool_id, int numptrs) 1371 1371 { 1372 + return pfvf->hw_ops->aura_aq_init(pfvf, aura_id, pool_id, 1373 + numptrs); 1374 + } 1375 + 1376 + int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, 1377 + int pool_id, int numptrs) 1378 + { 1372 1379 struct npa_aq_enq_req *aq; 1373 1380 struct otx2_pool *pool; 1374 1381 int err; ··· 1452 1445 1453 1446 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 1454 1447 int stack_pages, int numptrs, int buf_size, int type) 1448 + { 1449 + return pfvf->hw_ops->pool_aq_init(pfvf, pool_id, stack_pages, numptrs, 1450 + buf_size, type); 1451 + } 1452 + 1453 + int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, 1454 + int stack_pages, int numptrs, int buf_size, int type) 1455 1455 { 1456 1456 struct page_pool_params pp_params = { 0 }; 1457 1457 struct xsk_buff_pool *xsk_pool;
+10
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 14 14 #include <linux/net_tstamp.h> 15 15 #include <linux/ptp_clock_kernel.h> 16 16 #include <linux/timecounter.h> 17 + #include <linux/soc/marvell/silicons.h> 17 18 #include <linux/soc/marvell/octeontx2/asm.h> 18 19 #include <net/macsec.h> 19 20 #include <net/pkt_cls.h> ··· 376 375 irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq); 377 376 irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq); 378 377 irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq); 378 + int (*aura_aq_init)(struct otx2_nic *pfvf, int aura_id, 379 + int pool_id, int numptrs); 380 + int (*pool_aq_init)(struct otx2_nic *pfvf, u16 pool_id, 381 + int stack_pages, int numptrs, int buf_size, 382 + int type); 379 383 }; 380 384 381 385 #define CN10K_MCS_SA_PER_SC 4 ··· 1065 1059 int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); 1066 1060 int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); 1067 1061 int otx2_set_hw_capabilities(struct otx2_nic *pfvf); 1062 + int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, 1063 + int pool_id, int numptrs); 1064 + int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, 1065 + int stack_pages, int numptrs, int buf_size, int type); 1068 1066 1069 1067 /* RSS configuration APIs*/ 1070 1068 int otx2_rss_init(struct otx2_nic *pfvf);