Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'marvell-cn10k'

Geetha sowjanya says:

====================
Add Marvell CN10K support

The current admin function (AF) driver and the netdev driver supports
OcteonTx2 silicon variants. The same OcteonTx2's
Resource Virtualization Unit (RVU) is carried forward to the next-gen
silicon ie OcteonTx3, with some changes and feature enhancements.

This patch set adds support for OcteonTx3 (CN10K) silicon and gets
the drivers to the same level as OcteonTx2. No new OcteonTx3 specific
features are added.

Changes cover below HW level differences
- PCIe BAR address changes wrt shared mailbox memory region
- Receive buffer freeing to HW
- Transmit packet's descriptor submission to HW
- Programmable HW interface identifiers (channels)
- Increased MTU support
- A Serdes MAC block (RPM) configuration

v5-v6
Rebased on top of latest net-next branch.

v4-v5
Fixed sparse warnings.

v3-v4
Fixed compiler warnings.

v2-v3
Reposting as a single thread.
Rebased on top latest net-next branch.

v1-v2
Fixed check-patch reported issues.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+2613 -739
+2
MAINTAINERS
··· 10719 10719 M: Linu Cherian <lcherian@marvell.com> 10720 10720 M: Geetha sowjanya <gakula@marvell.com> 10721 10721 M: Jerin Jacob <jerinj@marvell.com> 10722 + M: hariprasad <hkelam@marvell.com> 10723 + M: Subbaraya Sundeep <sbhatta@marvell.com> 10722 10724 L: netdev@vger.kernel.org 10723 10725 S: Supported 10724 10726 F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
+5 -5
drivers/net/ethernet/marvell/octeontx2/af/Makefile
··· 4 4 # 5 5 6 6 ccflags-y += -I$(src) 7 - obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o 8 - obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o 7 + obj-$(CONFIG_OCTEONTX2_MBOX) += rvu_mbox.o 8 + obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o 9 9 10 - octeontx2_mbox-y := mbox.o rvu_trace.o 11 - octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ 10 + rvu_mbox-y := mbox.o rvu_trace.o 11 + rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ 12 12 rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ 13 - rvu_cpt.o rvu_devlink.o 13 + rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o
+226 -87
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
··· 21 21 #include <linux/of_net.h> 22 22 23 23 #include "cgx.h" 24 + #include "rvu.h" 25 + #include "lmac_common.h" 24 26 25 - #define DRV_NAME "octeontx2-cgx" 26 - #define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver" 27 - 28 - /** 29 - * struct lmac 30 - * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion 31 - * @cmd_lock: Lock to serialize the command interface 32 - * @resp: command response 33 - * @link_info: link related information 34 - * @event_cb: callback for linkchange events 35 - * @event_cb_lock: lock for serializing callback with unregister 36 - * @cmd_pend: flag set before new command is started 37 - * flag cleared after command response is received 38 - * @cgx: parent cgx port 39 - * @lmac_id: lmac port id 40 - * @name: lmac port name 41 - */ 42 - struct lmac { 43 - wait_queue_head_t wq_cmd_cmplt; 44 - struct mutex cmd_lock; 45 - u64 resp; 46 - struct cgx_link_user_info link_info; 47 - struct cgx_event_cb event_cb; 48 - spinlock_t event_cb_lock; 49 - bool cmd_pend; 50 - struct cgx *cgx; 51 - u8 lmac_id; 52 - char *name; 53 - }; 54 - 55 - struct cgx { 56 - void __iomem *reg_base; 57 - struct pci_dev *pdev; 58 - u8 cgx_id; 59 - u8 lmac_count; 60 - struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; 61 - struct work_struct cgx_cmd_work; 62 - struct workqueue_struct *cgx_cmd_workq; 63 - struct list_head cgx_list; 64 - }; 27 + #define DRV_NAME "Marvell-CGX/RPM" 28 + #define DRV_STRING "Marvell CGX/RPM Driver" 65 29 66 30 static LIST_HEAD(cgx_list); 67 31 ··· 41 77 /* Supported devices */ 42 78 static const struct pci_device_id cgx_id_table[] = { 43 79 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, 80 + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, 44 81 { 0, } /* end of table */ 45 82 }; 46 83 47 84 MODULE_DEVICE_TABLE(pci, cgx_id_table); 48 85 49 - static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) 86 + static bool is_dev_rpm(void *cgxd) 50 87 { 51 - writeq(val, cgx->reg_base + (lmac << 18) + offset); 88 + struct cgx *cgx = cgxd; 89 + 90 + return (cgx->pdev->device == PCI_DEVID_CN10K_RPM); 52 91 } 53 92 54 - static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) 93 + bool is_lmac_valid(struct cgx *cgx, int lmac_id) 55 94 { 56 - return readq(cgx->reg_base + (lmac << 18) + offset); 95 + return cgx && test_bit(lmac_id, &cgx->lmac_bmap); 57 96 } 58 97 59 - static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) 98 + struct mac_ops *get_mac_ops(void *cgxd) 99 + { 100 + if (!cgxd) 101 + return cgxd; 102 + 103 + return ((struct cgx *)cgxd)->mac_ops; 104 + } 105 + 106 + void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) 107 + { 108 + writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + 109 + offset); 110 + } 111 + 112 + u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) 113 + { 114 + return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + 115 + offset); 116 + } 117 + 118 + struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) 60 119 { 61 120 if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) 62 121 return NULL; ··· 121 134 return cgx_dev; 122 135 } 123 136 return NULL; 137 + } 138 + 139 + void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val) 140 + { 141 + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 142 + 143 + cgx_write(cgx_dev, lmac_id, offset, val); 144 + } 145 + 146 + u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset) 147 + { 148 + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 149 + 150 + return cgx_read(cgx_dev, lmac_id, offset); 124 151 } 125 152 126 153 int cgx_get_cgxid(void *cgxd) ··· 187 186 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) 188 187 { 189 188 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 189 + struct mac_ops *mac_ops; 190 190 u64 cfg; 191 191 192 + mac_ops = cgx_dev->mac_ops; 192 193 /* copy 6bytes from macaddr */ 193 194 /* memcpy(&cfg, mac_addr, 6); */ 194 195 ··· 209 206 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) 210 207 { 211 208 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 209 + struct mac_ops *mac_ops; 212 210 u64 cfg; 211 + 212 + mac_ops = cgx_dev->mac_ops; 213 213 214 214 cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); 215 215 return cfg & CGX_RX_DMAC_ADR_MASK; ··· 222 216 { 223 217 struct cgx *cgx = cgxd; 224 218 225 - if (!cgx || lmac_id >= cgx->lmac_count) 219 + if (!is_lmac_valid(cgx, lmac_id)) 226 220 return -ENODEV; 227 221 228 222 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F)); 229 223 return 0; 230 224 } 231 225 232 - static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id) 226 + static u8 cgx_get_lmac_type(void *cgxd, int lmac_id) 233 227 { 228 + struct cgx *cgx = cgxd; 234 229 u64 cfg; 235 230 236 231 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); ··· 245 238 u8 lmac_type; 246 239 u64 cfg; 247 240 248 - if (!cgx || lmac_id >= cgx->lmac_count) 241 + if (!is_lmac_valid(cgx, lmac_id)) 249 242 return -ENODEV; 250 243 251 - lmac_type = cgx_get_lmac_type(cgx, lmac_id); 244 + lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id); 252 245 if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) { 253 246 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL); 254 247 if (enable) ··· 270 263 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) 271 264 { 272 265 struct cgx *cgx = cgx_get_pdata(cgx_id); 266 + struct mac_ops *mac_ops; 273 267 u64 cfg = 0; 274 268 275 269 if (!cgx) 276 270 return; 277 271 272 + mac_ops = cgx->mac_ops; 278 273 if (enable) { 279 274 /* Enable promiscuous mode on LMAC */ 280 275 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); ··· 334 325 { 335 326 struct cgx *cgx = cgxd; 336 327 337 - if (!cgx || lmac_id >= cgx->lmac_count) 328 + if (!is_lmac_valid(cgx, lmac_id)) 338 329 return -ENODEV; 339 330 *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); 340 331 return 0; ··· 344 335 { 345 336 struct cgx *cgx = cgxd; 346 337 347 - if (!cgx || lmac_id >= cgx->lmac_count) 338 + if (!is_lmac_valid(cgx, lmac_id)) 348 339 return -ENODEV; 349 340 *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8)); 350 341 return 0; 342 + } 343 + 344 + u64 cgx_features_get(void *cgxd) 345 + { 346 + return ((struct cgx *)cgxd)->hw_features; 351 347 } 352 348 353 349 static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo) ··· 414 400 struct cgx *cgx = cgxd; 415 401 u64 cfg; 416 402 417 - if (!cgx || lmac_id >= cgx->lmac_count) 403 + if (!is_lmac_valid(cgx, lmac_id)) 418 404 return -ENODEV; 419 405 420 406 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); ··· 431 417 struct cgx *cgx = cgxd; 432 418 u64 cfg, last; 433 419 434 - if (!cgx || lmac_id >= cgx->lmac_count) 420 + if (!is_lmac_valid(cgx, lmac_id)) 435 421 return -ENODEV; 436 422 437 423 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); ··· 446 432 return !!(last & DATA_PKT_TX_EN); 447 433 } 448 434 449 - int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id, 450 - u8 *tx_pause, u8 *rx_pause) 435 + static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id, 436 + u8 *tx_pause, u8 *rx_pause) 451 437 { 452 438 struct cgx *cgx = cgxd; 453 439 u64 cfg; 454 440 455 - if (!cgx || lmac_id >= cgx->lmac_count) 441 + if (is_dev_rpm(cgx)) 442 + return 0; 443 + 444 + if (!is_lmac_valid(cgx, lmac_id)) 456 445 return -ENODEV; 457 446 458 447 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); ··· 466 449 return 0; 467 450 } 468 451 469 - int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id, 470 - u8 tx_pause, u8 rx_pause) 452 + static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id, 453 + u8 tx_pause, u8 rx_pause) 471 454 { 472 455 struct cgx *cgx = cgxd; 473 456 u64 cfg; 474 457 475 - if (!cgx || lmac_id >= cgx->lmac_count) 458 + if (is_dev_rpm(cgx)) 459 + return 0; 460 + 461 + if (!is_lmac_valid(cgx, lmac_id)) 476 462 return -ENODEV; 477 463 478 464 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); ··· 499 479 return 0; 500 480 } 501 481 502 - static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable) 482 + static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable) 503 483 { 484 + struct cgx *cgx = cgxd; 504 485 u64 cfg; 505 486 506 - if (!cgx || lmac_id >= cgx->lmac_count) 487 + if (!is_lmac_valid(cgx, lmac_id)) 507 488 return; 508 489 if (enable) { 509 490 /* Enable receive pause frames */ ··· 562 541 if (!cgx) 563 542 return; 564 543 544 + if (is_dev_rpm(cgx)) 545 + return; 546 + 565 547 if (enable) { 566 548 /* Enable inbound PTP timestamping */ 567 549 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); ··· 587 563 } 588 564 589 565 /* CGX Firmware interface low level support */ 590 - static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) 566 + int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) 591 567 { 592 568 struct cgx *cgx = lmac->cgx; 593 569 struct device *dev; ··· 635 611 return err; 636 612 } 637 613 638 - static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp, 639 - struct cgx *cgx, int lmac_id) 614 + int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id) 640 615 { 641 616 struct lmac *lmac; 642 617 int err; ··· 908 885 909 886 static irqreturn_t cgx_fwi_event_handler(int irq, void *data) 910 887 { 888 + u64 event, offset, clear_bit; 911 889 struct lmac *lmac = data; 912 890 struct cgx *cgx; 913 - u64 event; 914 891 915 892 cgx = lmac->cgx; 893 + 894 + /* Clear SW_INT for RPM and CMR_INT for CGX */ 895 + offset = cgx->mac_ops->int_register; 896 + clear_bit = cgx->mac_ops->int_ena_bit; 916 897 917 898 event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); 918 899 ··· 953 926 * Ack the interrupt register as well. 954 927 */ 955 928 cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); 956 - cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT); 929 + cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit); 957 930 958 931 return IRQ_HANDLED; 959 932 } ··· 997 970 { 998 971 u64 req = 0, resp; 999 972 struct cgx *cgx; 973 + int first_lmac; 1000 974 int err; 1001 975 1002 976 cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list); 1003 977 if (!cgx) 1004 978 return -ENXIO; 1005 979 980 + first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); 1006 981 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); 1007 - err = cgx_fwi_cmd_generic(req, &resp, cgx, 0); 982 + err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac); 1008 983 if (!err) 1009 984 *base = FIELD_GET(RESP_FWD_BASE, resp); 1010 985 ··· 1085 1056 1086 1057 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) 1087 1058 { 1059 + int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); 1088 1060 u64 req = 0; 1089 1061 1090 1062 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); 1091 - return cgx_fwi_cmd_generic(req, resp, cgx, 0); 1063 + return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac); 1092 1064 } 1093 1065 1094 1066 static int cgx_lmac_verify_fwi_version(struct cgx *cgx) ··· 1122 1092 struct device *dev = &cgx->pdev->dev; 1123 1093 int i, err; 1124 1094 1125 - /* Do Link up for all the lmacs */ 1126 - for (i = 0; i < cgx->lmac_count; i++) { 1095 + /* Do Link up for all the enabled lmacs */ 1096 + for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { 1127 1097 err = cgx_fwi_link_change(cgx, i, true); 1128 1098 if (err) 1129 1099 dev_info(dev, "cgx port %d:%d Link up command failed\n", ··· 1143 1113 return 0; 1144 1114 } 1145 1115 1116 + static void cgx_lmac_get_fifolen(struct cgx *cgx) 1117 + { 1118 + u64 cfg; 1119 + 1120 + cfg = cgx_read(cgx, 0, CGX_CONST); 1121 + cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); 1122 + } 1123 + 1124 + static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac, 1125 + int cnt, bool req_free) 1126 + { 1127 + struct mac_ops *mac_ops = cgx->mac_ops; 1128 + u64 offset, ena_bit; 1129 + unsigned int irq; 1130 + int err; 1131 + 1132 + irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi + 1133 + cnt * mac_ops->irq_offset); 1134 + offset = mac_ops->int_set_reg; 1135 + ena_bit = mac_ops->int_ena_bit; 1136 + 1137 + if (req_free) { 1138 + free_irq(irq, lmac); 1139 + return 0; 1140 + } 1141 + 1142 + err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac); 1143 + if (err) 1144 + return err; 1145 + 1146 + /* Enable interrupt */ 1147 + cgx_write(cgx, lmac->lmac_id, offset, ena_bit); 1148 + return 0; 1149 + } 1150 + 1151 + int cgx_get_nr_lmacs(void *cgxd) 1152 + { 1153 + struct cgx *cgx = cgxd; 1154 + 1155 + return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL; 1156 + } 1157 + 1158 + u8 cgx_get_lmacid(void *cgxd, u8 lmac_index) 1159 + { 1160 + struct cgx *cgx = cgxd; 1161 + 1162 + return cgx->lmac_idmap[lmac_index]->lmac_id; 1163 + } 1164 + 1165 + unsigned long cgx_get_lmac_bmap(void *cgxd) 1166 + { 1167 + struct cgx *cgx = cgxd; 1168 + 1169 + return cgx->lmac_bmap; 1170 + } 1171 + 1146 1172 static int cgx_lmac_init(struct cgx *cgx) 1147 1173 { 1148 1174 struct lmac *lmac; 1175 + u64 lmac_list; 1149 1176 int i, err; 1150 1177 1151 - cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7; 1178 + cgx_lmac_get_fifolen(cgx); 1179 + 1180 + cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); 1181 + /* lmac_list specifies which lmacs are enabled 1182 + * when bit n is set to 1, LMAC[n] is enabled 1183 + */ 1184 + if (cgx->mac_ops->non_contiguous_serdes_lane) 1185 + lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; 1186 + 1152 1187 if (cgx->lmac_count > MAX_LMAC_PER_CGX) 1153 1188 cgx->lmac_count = MAX_LMAC_PER_CGX; 1154 1189 ··· 1227 1132 goto err_lmac_free; 1228 1133 } 1229 1134 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); 1230 - lmac->lmac_id = i; 1135 + if (cgx->mac_ops->non_contiguous_serdes_lane) { 1136 + lmac->lmac_id = __ffs64(lmac_list); 1137 + lmac_list &= ~BIT_ULL(lmac->lmac_id); 1138 + } else { 1139 + lmac->lmac_id = i; 1140 + } 1141 + 1231 1142 lmac->cgx = cgx; 1232 1143 init_waitqueue_head(&lmac->wq_cmd_cmplt); 1233 1144 mutex_init(&lmac->cmd_lock); 1234 1145 spin_lock_init(&lmac->event_cb_lock); 1235 - err = request_irq(pci_irq_vector(cgx->pdev, 1236 - CGX_LMAC_FWI + i * 9), 1237 - cgx_fwi_event_handler, 0, lmac->name, lmac); 1146 + err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false); 1238 1147 if (err) 1239 1148 goto err_irq; 1240 1149 1241 - /* Enable interrupt */ 1242 - cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, 1243 - FW_CGX_INT); 1244 - 1245 1150 /* Add reference */ 1246 - cgx->lmac_idmap[i] = lmac; 1247 - cgx_lmac_pause_frm_config(cgx, i, true); 1151 + cgx->lmac_idmap[lmac->lmac_id] = lmac; 1152 + cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); 1153 + set_bit(lmac->lmac_id, &cgx->lmac_bmap); 1248 1154 } 1249 1155 1250 1156 return cgx_lmac_verify_fwi_version(cgx); ··· 1269 1173 } 1270 1174 1271 1175 /* Free all lmac related resources */ 1272 - for (i = 0; i < cgx->lmac_count; i++) { 1273 - cgx_lmac_pause_frm_config(cgx, i, false); 1176 + for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { 1274 1177 lmac = cgx->lmac_idmap[i]; 1275 1178 if (!lmac) 1276 1179 continue; 1277 - free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac); 1180 + cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); 1181 + cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); 1278 1182 kfree(lmac->name); 1279 1183 kfree(lmac); 1280 1184 } 1281 1185 1282 1186 return 0; 1283 1187 } 1188 + 1189 + static void cgx_populate_features(struct cgx *cgx) 1190 + { 1191 + if (is_dev_rpm(cgx)) 1192 + cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC); 1193 + else 1194 + cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); 1195 + } 1196 + 1197 + static struct mac_ops cgx_mac_ops = { 1198 + .name = "cgx", 1199 + .csr_offset = 0, 1200 + .lmac_offset = 18, 1201 + .int_register = CGXX_CMRX_INT, 1202 + .int_set_reg = CGXX_CMRX_INT_ENA_W1S, 1203 + .irq_offset = 9, 1204 + .int_ena_bit = FW_CGX_INT, 1205 + .lmac_fwi = CGX_LMAC_FWI, 1206 + .non_contiguous_serdes_lane = false, 1207 + .rx_stats_cnt = 9, 1208 + .tx_stats_cnt = 18, 1209 + .get_nr_lmacs = cgx_get_nr_lmacs, 1210 + .get_lmac_type = cgx_get_lmac_type, 1211 + .mac_lmac_intl_lbk = cgx_lmac_internal_loopback, 1212 + .mac_get_rx_stats = cgx_get_rx_stats, 1213 + .mac_get_tx_stats = cgx_get_tx_stats, 1214 + .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding, 1215 + .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, 1216 + .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, 1217 + .mac_pause_frm_config = cgx_lmac_pause_frm_config, 1218 + }; 1284 1219 1285 1220 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1286 1221 { ··· 1325 1198 cgx->pdev = pdev; 1326 1199 1327 1200 pci_set_drvdata(pdev, cgx); 1201 + 1202 + /* Use mac_ops to get MAC specific features */ 1203 + if (pdev->device == PCI_DEVID_CN10K_RPM) 1204 + cgx->mac_ops = rpm_get_mac_ops(); 1205 + else 1206 + cgx->mac_ops = &cgx_mac_ops; 1328 1207 1329 1208 err = pci_enable_device(pdev); 1330 1209 if (err) { ··· 1353 1220 goto err_release_regions; 1354 1221 } 1355 1222 1356 - nvec = CGX_NVEC; 1223 + nvec = pci_msix_vec_count(cgx->pdev); 1357 1224 err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); 1358 1225 if (err < 0 || err != nvec) { 1359 1226 dev_err(dev, "Request for %d msix vectors failed, err %d\n", ··· 1376 1243 list_add(&cgx->cgx_list, &cgx_list); 1377 1244 1378 1245 cgx_link_usertable_init(); 1246 + 1247 + cgx_populate_features(cgx); 1248 + 1249 + mutex_init(&cgx->lock); 1379 1250 1380 1251 err = cgx_lmac_init(cgx); 1381 1252 if (err) ··· 1404 1267 { 1405 1268 struct cgx *cgx = pci_get_drvdata(pdev); 1406 1269 1407 - cgx_lmac_exit(cgx); 1408 - list_del(&cgx->cgx_list); 1270 + if (cgx) { 1271 + cgx_lmac_exit(cgx); 1272 + list_del(&cgx->cgx_list); 1273 + } 1409 1274 pci_free_irq_vectors(pdev); 1410 1275 pci_release_regions(pdev); 1411 1276 pci_disable_device(pdev);
+11 -4
drivers/net/ethernet/marvell/octeontx2/af/cgx.h
··· 13 13 14 14 #include "mbox.h" 15 15 #include "cgx_fw_if.h" 16 + #include "rpm.h" 16 17 17 18 /* PCI device IDs */ 18 19 #define PCI_DEVID_OCTEONTX2_CGX 0xA059 ··· 43 42 #define CGXX_CMRX_RX_ID_MAP 0x060 44 43 #define CGXX_CMRX_RX_STAT0 0x070 45 44 #define CGXX_CMRX_RX_LMACS 0x128 46 - #define CGXX_CMRX_RX_DMAC_CTL0 0x1F8 45 + #define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset) 47 46 #define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) 48 47 #define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) 49 48 #define CGX_DMAC_MCAST_MODE BIT_ULL(1) 50 49 #define CGX_DMAC_BCAST_MODE BIT_ULL(0) 51 - #define CGXX_CMRX_RX_DMAC_CAM0 0x200 50 + #define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset) 52 51 #define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) 53 52 #define CGXX_CMRX_RX_DMAC_CAM1 0x400 54 53 #define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) ··· 56 55 #define CGXX_SCRATCH0_REG 0x1050 57 56 #define CGXX_SCRATCH1_REG 0x1058 58 57 #define CGX_CONST 0x2000 58 + #define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0) 59 59 #define CGXX_SPUX_CONTROL1 0x10000 60 60 #define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700 61 61 #define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800 ··· 88 86 #define CGX_CMD_TIMEOUT 2200 /* msecs */ 89 87 #define DEFAULT_PAUSE_TIME 0x7FF 90 88 91 - #define CGX_NVEC 37 92 89 #define CGX_LMAC_FWI 0 93 90 94 91 enum cgx_nix_stat_type { ··· 158 157 int cgx_get_phy_fec_stats(void *cgxd, int lmac_id); 159 158 int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, 160 159 int cgx_id, int lmac_id); 161 - 160 + u64 cgx_features_get(void *cgxd); 161 + struct mac_ops *get_mac_ops(void *cgxd); 162 + int cgx_get_nr_lmacs(void *cgxd); 163 + u8 cgx_get_lmacid(void *cgxd, u8 lmac_index); 164 + unsigned long cgx_get_lmac_bmap(void *cgxd); 165 + void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val); 166 + u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset); 162 167 #endif /* CGX_H */
+1
drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
··· 204 204 * CGX_STAT_SUCCESS 205 205 */ 206 206 #define RESP_FWD_BASE GENMASK_ULL(56, 9) 207 + #define RESP_LINKSTAT_LMAC_TYPE GENMASK_ULL(35, 28) 207 208 208 209 /* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE 209 210 * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
+5
drivers/net/ethernet/marvell/octeontx2/af/common.h
··· 155 155 #define NIC_HW_MIN_FRS 40 156 156 #define NIC_HW_MAX_FRS 9212 157 157 #define SDP_HW_MAX_FRS 65535 158 + #define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */ 159 + #define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */ 158 160 159 161 /* NIX RX action operation*/ 160 162 #define NIX_RX_ACTIONOP_DROP (0x0ull) ··· 193 191 #define NIX_LINK_LBK(a) (12 + (a)) 194 192 #define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) 195 193 #define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b)) 194 + #define NIX_CHAN_SDP_CH_START (0x700ull) 195 + 196 + #define SDP_CHANNELS 256 196 197 197 198 /* NIX LSO format indices. 198 199 * As of now TSO is the only one using, so statically assigning indices.
+131
drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell OcteonTx2 RPM driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + */ 6 + 7 + #ifndef LMAC_COMMON_H 8 + #define LMAC_COMMON_H 9 + 10 + #include "rvu.h" 11 + #include "cgx.h" 12 + /** 13 + * struct lmac 14 + * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion 15 + * @cmd_lock: Lock to serialize the command interface 16 + * @resp: command response 17 + * @link_info: link related information 18 + * @event_cb: callback for linkchange events 19 + * @event_cb_lock: lock for serializing callback with unregister 20 + * @cmd_pend: flag set before new command is started 21 + * flag cleared after command response is received 22 + * @cgx: parent cgx port 23 + * @lmac_id: lmac port id 24 + * @name: lmac port name 25 + */ 26 + struct lmac { 27 + wait_queue_head_t wq_cmd_cmplt; 28 + /* Lock to serialize the command interface */ 29 + struct mutex cmd_lock; 30 + u64 resp; 31 + struct cgx_link_user_info link_info; 32 + struct cgx_event_cb event_cb; 33 + /* lock for serializing callback with unregister */ 34 + spinlock_t event_cb_lock; 35 + bool cmd_pend; 36 + struct cgx *cgx; 37 + u8 lmac_id; 38 + char *name; 39 + }; 40 + 41 + /* CGX & RPM has different feature set 42 + * update the structure fields with different one 43 + */ 44 + struct mac_ops { 45 + char *name; 46 + /* Features like RXSTAT, TXSTAT, DMAC FILTER csrs differs by fixed 47 + * bar offset for example 48 + * CGX DMAC_CTL0 0x1f8 49 + * RPM DMAC_CTL0 0x4ff8 50 + */ 51 + u64 csr_offset; 52 + /* For ATF to send events to kernel, there is no dedicated interrupt 53 + * defined hence CGX uses OVERFLOW bit in CMR_INT. RPM block supports 54 + * SW_INT so that ATF triggers this interrupt after processing of 55 + * requested command 56 + */ 57 + u64 int_register; 58 + u64 int_set_reg; 59 + /* lmac offset is different is RPM */ 60 + u8 lmac_offset; 61 + u8 irq_offset; 62 + u8 int_ena_bit; 63 + u8 lmac_fwi; 64 + u32 fifo_len; 65 + bool non_contiguous_serdes_lane; 66 + /* RPM & CGX differs in number of Receive/transmit stats */ 67 + u8 rx_stats_cnt; 68 + u8 tx_stats_cnt; 69 + /* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST] 70 + * number of setbits in lmac_exist tells number of lmacs 71 + */ 72 + int (*get_nr_lmacs)(void *cgx); 73 + u8 (*get_lmac_type)(void *cgx, int lmac_id); 74 + int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id, 75 + bool enable); 76 + /* Register Stats related functions */ 77 + int (*mac_get_rx_stats)(void *cgx, int lmac_id, 78 + int idx, u64 *rx_stat); 79 + int (*mac_get_tx_stats)(void *cgx, int lmac_id, 80 + int idx, u64 *tx_stat); 81 + 82 + /* Enable LMAC Pause Frame Configuration */ 83 + void (*mac_enadis_rx_pause_fwding)(void *cgxd, 84 + int lmac_id, 85 + bool enable); 86 + 87 + int (*mac_get_pause_frm_status)(void *cgxd, 88 + int lmac_id, 89 + u8 *tx_pause, 90 + u8 *rx_pause); 91 + 92 + int (*mac_enadis_pause_frm)(void *cgxd, 93 + int lmac_id, 94 + u8 tx_pause, 95 + u8 rx_pause); 96 + 97 + void (*mac_pause_frm_config)(void *cgxd, 98 + int lmac_id, 99 + bool enable); 100 + }; 101 + 102 + struct cgx { 103 + void __iomem *reg_base; 104 + struct pci_dev *pdev; 105 + u8 cgx_id; 106 + u8 lmac_count; 107 + struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; 108 + struct work_struct cgx_cmd_work; 109 + struct workqueue_struct *cgx_cmd_workq; 110 + struct list_head cgx_list; 111 + u64 hw_features; 112 + struct mac_ops *mac_ops; 113 + unsigned long lmac_bmap; /* bitmap of enabled lmacs */ 114 + /* Lock to serialize read/write of global csrs like 115 + * RPMX_MTI_STAT_DATA_HI_CDC etc 116 + */ 117 + struct mutex lock; 118 + }; 119 + 120 + typedef struct cgx rpm_t; 121 + 122 + /* Function Declarations */ 123 + void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val); 124 + u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset); 125 + struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx); 126 + int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac); 127 + int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id); 128 + bool is_lmac_valid(struct cgx *cgx, int lmac_id); 129 + struct mac_ops *rpm_get_mac_ops(void); 130 + 131 + #endif /* LMAC_COMMON_H */
+50 -9
drivers/net/ethernet/marvell/octeontx2/af/mbox.c
··· 20 20 21 21 void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid) 22 22 { 23 - void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 24 23 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 25 24 struct mbox_hdr *tx_hdr, *rx_hdr; 25 + void *hw_mbase = mdev->hwbase; 26 26 27 27 tx_hdr = hw_mbase + mbox->tx_start; 28 28 rx_hdr = hw_mbase + mbox->rx_start; ··· 56 56 } 57 57 EXPORT_SYMBOL(otx2_mbox_destroy); 58 58 59 - int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, 60 - void *reg_base, int direction, int ndevs) 59 + static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, 60 + void *reg_base, int direction, int ndevs) 61 61 { 62 - struct otx2_mbox_dev *mdev; 63 - int devid; 64 - 65 62 switch (direction) { 66 63 case MBOX_DIR_AFPF: 67 64 case MBOX_DIR_PFVF: ··· 118 121 } 119 122 120 123 mbox->reg_base = reg_base; 121 - mbox->hwbase = hwbase; 122 124 mbox->pdev = pdev; 123 125 124 126 mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); ··· 125 129 otx2_mbox_destroy(mbox); 126 130 return -ENOMEM; 127 131 } 128 - 129 132 mbox->ndevs = ndevs; 133 + 134 + return 0; 135 + } 136 + 137 + int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, 138 + void *reg_base, int direction, int ndevs) 139 + { 140 + struct otx2_mbox_dev *mdev; 141 + int devid, err; 142 + 143 + err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs); 144 + if (err) 145 + return err; 146 + 147 + mbox->hwbase = hwbase; 148 + 130 149 for (devid = 0; devid < ndevs; devid++) { 131 150 mdev = &mbox->dev[devid]; 132 151 mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); 152 + mdev->hwbase = mdev->mbase; 133 153 spin_lock_init(&mdev->mbox_lock); 134 154 /* Init header to reset value */ 135 155 otx2_mbox_reset(mbox, devid); ··· 154 142 return 0; 155 143 } 156 144 EXPORT_SYMBOL(otx2_mbox_init); 145 + 146 + /* Initialize mailbox with the set of mailbox region addresses 147 + * in the array hwbase. 148 + */ 149 + int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase, 150 + struct pci_dev *pdev, void *reg_base, 151 + int direction, int ndevs) 152 + { 153 + struct otx2_mbox_dev *mdev; 154 + int devid, err; 155 + 156 + err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs); 157 + if (err) 158 + return err; 159 + 160 + mbox->hwbase = hwbase[0]; 161 + 162 + for (devid = 0; devid < ndevs; devid++) { 163 + mdev = &mbox->dev[devid]; 164 + mdev->mbase = hwbase[devid]; 165 + mdev->hwbase = hwbase[devid]; 166 + spin_lock_init(&mdev->mbox_lock); 167 + /* Init header to reset value */ 168 + otx2_mbox_reset(mbox, devid); 169 + } 170 + 171 + return 0; 172 + } 173 + EXPORT_SYMBOL(otx2_mbox_regions_init); 157 174 158 175 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) 159 176 { ··· 216 175 217 176 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) 218 177 { 219 - void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 220 178 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 221 179 struct mbox_hdr *tx_hdr, *rx_hdr; 180 + void *hw_mbase = mdev->hwbase; 222 181 223 182 tx_hdr = hw_mbase + mbox->tx_start; 224 183 rx_hdr = hw_mbase + mbox->rx_start;
+69 -1
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
··· 52 52 53 53 struct otx2_mbox_dev { 54 54 void *mbase; /* This dev's mbox region */ 55 + void *hwbase; 55 56 spinlock_t mbox_lock; 56 57 u16 msg_size; /* Total msg size to be sent */ 57 58 u16 rsp_size; /* Total rsp size to be sure the reply is ok */ ··· 99 98 int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase, 100 99 struct pci_dev *pdev, void __force *reg_base, 101 100 int direction, int ndevs); 101 + int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase, 102 + struct pci_dev *pdev, void __force *reg_base, 103 + int direction, int ndevs); 102 104 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); 103 105 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); 104 106 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); ··· 159 155 M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ 160 156 M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ 161 157 cgx_set_link_mode_rsp) \ 158 + M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ 159 + cgx_features_info_msg) \ 160 + M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ 162 161 /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ 163 162 /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ 164 163 M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ ··· 251 244 nix_bp_cfg_rsp) \ 252 245 M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ 253 246 M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ 247 + M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ 248 + nix_cn10k_aq_enq_rsp) \ 249 + M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info) 254 250 255 251 /* Messages initiated by AF (range 0xC00 - 0xDFF) */ 256 252 #define MBOX_UP_CGX_MESSAGES \ ··· 371 361 372 362 struct cgx_stats_rsp { 373 363 struct mbox_msghdr hdr; 374 - #define CGX_RX_STATS_COUNT 13 364 + #define CGX_RX_STATS_COUNT 9 375 365 #define CGX_TX_STATS_COUNT 18 376 366 u64 rx_stats[CGX_RX_STATS_COUNT]; 377 367 u64 tx_stats[CGX_TX_STATS_COUNT]; ··· 486 476 struct cgx_set_link_mode_rsp { 487 477 struct mbox_msghdr hdr; 488 478 int status; 479 + }; 480 + 481 + #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ 482 + #define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precison time protocol */ 483 + #define RVU_MAC_VERSION BIT_ULL(2) 484 + #define RVU_MAC_CGX BIT_ULL(3) 485 + #define RVU_MAC_RPM BIT_ULL(4) 486 + 487 + struct cgx_features_info_msg { 488 + struct mbox_msghdr hdr; 489 + u64 lmac_features; 490 + }; 491 + 492 + struct rpm_stats_rsp { 493 + struct mbox_msghdr hdr; 494 + #define RPM_RX_STATS_COUNT 43 495 + #define RPM_TX_STATS_COUNT 34 496 + u64 rx_stats[RPM_RX_STATS_COUNT]; 497 + u64 tx_stats[RPM_TX_STATS_COUNT]; 489 498 }; 490 499 491 500 /* NPA mbox message formats */ ··· 659 630 #define NIX_LF_DISABLE_FLOWS BIT_ULL(0) 660 631 #define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1) 661 632 u64 flags; 633 + }; 634 + 635 + /* CN10K NIX AQ enqueue msg */ 636 + struct nix_cn10k_aq_enq_req { 637 + struct mbox_msghdr hdr; 638 + u32 qidx; 639 + u8 ctype; 640 + u8 op; 641 + union { 642 + struct nix_cn10k_rq_ctx_s rq; 643 + struct nix_cn10k_sq_ctx_s sq; 644 + struct nix_cq_ctx_s cq; 645 + struct nix_rsse_s rss; 646 + struct nix_rx_mce_s mce; 647 + }; 648 + union { 649 + struct nix_cn10k_rq_ctx_s rq_mask; 650 + struct nix_cn10k_sq_ctx_s sq_mask; 651 + struct nix_cq_ctx_s cq_mask; 652 + struct nix_rsse_s rss_mask; 653 + struct nix_rx_mce_s mce_mask; 654 + }; 655 + }; 656 + 657 + struct nix_cn10k_aq_enq_rsp { 658 + struct mbox_msghdr hdr; 659 + union { 660 + struct nix_cn10k_rq_ctx_s rq; 661 + struct nix_cn10k_sq_ctx_s sq; 662 + struct nix_cq_ctx_s cq; 663 + struct nix_rsse_s rss; 664 + struct nix_rx_mce_s mce; 665 + }; 662 666 }; 663 667 664 668 /* NIX AQ enqueue msg */ ··· 956 894 struct mbox_msghdr hdr; 957 895 u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */ 958 896 u8 chan_cnt; /* Number of channel for which bpids are assigned */ 897 + }; 898 + 899 + struct nix_hw_info { 900 + struct mbox_msghdr hdr; 901 + u16 max_mtu; 902 + u16 min_mtu; 959 903 }; 960 904 961 905 /* NPC mbox message structs */
+12
drivers/net/ethernet/marvell/octeontx2/af/ptp.c
··· 21 21 #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300 22 22 #define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400 23 23 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500 24 + #define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900 25 + #define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00 26 + #define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00 24 27 #define PCI_DEVID_OCTEONTX2_RST 0xA085 25 28 26 29 #define PCI_PTP_BAR_NO 0 ··· 237 234 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, 238 235 PCI_VENDOR_ID_CAVIUM, 239 236 PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) }, 237 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, 238 + PCI_VENDOR_ID_CAVIUM, 239 + PCI_SUBSYS_DEVID_CN10K_A_PTP) }, 240 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, 241 + PCI_VENDOR_ID_CAVIUM, 242 + PCI_SUBSYS_DEVID_CNF10K_A_PTP) }, 243 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, 244 + PCI_VENDOR_ID_CAVIUM, 245 + PCI_SUBSYS_DEVID_CNF10K_B_PTP) }, 240 246 { 0, } 241 247 }; 242 248
+272
drivers/net/ethernet/marvell/octeontx2/af/rpm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell OcteonTx2 RPM driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #include "cgx.h" 9 + #include "lmac_common.h" 10 + 11 + static struct mac_ops rpm_mac_ops = { 12 + .name = "rpm", 13 + .csr_offset = 0x4e00, 14 + .lmac_offset = 20, 15 + .int_register = RPMX_CMRX_SW_INT, 16 + .int_set_reg = RPMX_CMRX_SW_INT_ENA_W1S, 17 + .irq_offset = 1, 18 + .int_ena_bit = BIT_ULL(0), 19 + .lmac_fwi = RPM_LMAC_FWI, 20 + .non_contiguous_serdes_lane = true, 21 + .rx_stats_cnt = 43, 22 + .tx_stats_cnt = 34, 23 + .get_nr_lmacs = rpm_get_nr_lmacs, 24 + .get_lmac_type = rpm_get_lmac_type, 25 + .mac_lmac_intl_lbk = rpm_lmac_internal_loopback, 26 + .mac_get_rx_stats = rpm_get_rx_stats, 27 + .mac_get_tx_stats = rpm_get_tx_stats, 28 + .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding, 29 + .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, 30 + .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, 31 + .mac_pause_frm_config = rpm_lmac_pause_frm_config, 32 + }; 33 + 34 + struct mac_ops *rpm_get_mac_ops(void) 35 + { 36 + return &rpm_mac_ops; 37 + } 38 + 39 + static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val) 40 + { 41 + cgx_write(rpm, lmac, offset, val); 42 + } 43 + 44 + static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset) 45 + { 46 + return cgx_read(rpm, lmac, offset); 47 + } 48 + 49 + int rpm_get_nr_lmacs(void *rpmd) 50 + { 51 + rpm_t *rpm = rpmd; 52 + 53 + return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL); 54 + } 55 + 56 + void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable) 57 + { 58 + rpm_t *rpm = rpmd; 59 + u64 cfg; 60 + 61 + if (!rpm) 62 + return; 63 + 64 + if (enable) { 65 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 66 + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; 67 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 68 + } else { 69 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 70 + cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; 71 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 72 + } 73 + } 74 + 75 + int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id, 76 + u8 *tx_pause, u8 *rx_pause) 77 + { 78 + rpm_t *rpm = rpmd; 79 + u64 cfg; 80 + 81 + if (!is_lmac_valid(rpm, lmac_id)) 82 + return -ENODEV; 83 + 84 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 85 + *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE); 86 + 87 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 88 + *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE); 89 + return 0; 90 + } 91 + 92 + int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, 93 + u8 rx_pause) 94 + { 95 + rpm_t *rpm = rpmd; 96 + u64 cfg; 97 + 98 + if (!is_lmac_valid(rpm, lmac_id)) 99 + return -ENODEV; 100 + 101 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 102 + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; 103 + cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; 104 + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; 105 + cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; 106 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 107 + 108 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 109 + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; 110 + cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; 111 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 112 + 113 + cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP); 114 + if (tx_pause) { 115 + cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id); 116 + } else { 117 + cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id); 118 + cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id); 119 + } 120 + rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg); 121 + return 0; 122 + } 123 + 124 + void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) 125 + { 126 + rpm_t *rpm = rpmd; 127 + u64 cfg; 128 + 129 + if (enable) { 130 + /* Enable 802.3 pause frame mode */ 131 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 132 + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE; 133 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 134 + 135 + /* Enable receive pause frames */ 136 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 137 + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; 138 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 139 + 140 + /* Enable forward pause to TX block */ 141 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 142 + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; 143 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 144 + 145 + /* Enable pause frames transmission */ 146 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 147 + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; 148 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 149 + 150 + /* Set pause time and interval */ 151 + cfg = rpm_read(rpm, lmac_id, 152 + RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA); 153 + cfg &= ~0xFFFFULL; 154 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA, 155 + cfg | RPM_DEFAULT_PAUSE_TIME); 156 + /* Set pause interval as the hardware default is too short */ 157 + cfg = rpm_read(rpm, lmac_id, 158 + RPMX_MTI_MAC100X_CL01_QUANTA_THRESH); 159 + cfg &= ~0xFFFFULL; 160 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_QUANTA_THRESH, 161 + cfg | (RPM_DEFAULT_PAUSE_TIME / 2)); 162 + 163 + } else { 164 + /* ALL pause frames received are completely ignored */ 165 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 166 + cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; 167 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 168 + 169 + /* Disable forward pause to TX block */ 170 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 171 + cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; 172 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 173 + 174 + /* Disable pause frames transmission */ 175 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); 176 + cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; 177 + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); 178 + } 179 + } 180 + 181 + int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat) 182 + { 183 + rpm_t *rpm = rpmd; 184 + u64 val_lo, val_hi; 185 + 186 + if (!rpm || lmac_id >= rpm->lmac_count) 187 + return -ENODEV; 188 + 189 + mutex_lock(&rpm->lock); 190 + 191 + /* Update idx to point per lmac Rx statistics page */ 192 + idx += lmac_id * rpm->mac_ops->rx_stats_cnt; 193 + 194 + /* Read lower 32 bits of counter */ 195 + val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX + 196 + (idx * 8)); 197 + 198 + /* upon read of lower 32 bits, higher 32 bits are written 199 + * to RPMX_MTI_STAT_DATA_HI_CDC 200 + */ 201 + val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); 202 + 203 + *rx_stat = (val_hi << 32 | val_lo); 204 + 205 + mutex_unlock(&rpm->lock); 206 + return 0; 207 + } 208 + 209 + int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat) 210 + { 211 + rpm_t *rpm = rpmd; 212 + u64 val_lo, val_hi; 213 + 214 + if (!rpm || lmac_id >= rpm->lmac_count) 215 + return -ENODEV; 216 + 217 + mutex_lock(&rpm->lock); 218 + 219 + /* Update idx to point per lmac Tx statistics page */ 220 + idx += lmac_id * rpm->mac_ops->tx_stats_cnt; 221 + 222 + val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX + 223 + (idx * 8)); 224 + val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); 225 + 226 + *tx_stat = (val_hi << 32 | val_lo); 227 + 228 + mutex_unlock(&rpm->lock); 229 + return 0; 230 + } 231 + 232 + u8 rpm_get_lmac_type(void *rpmd, int lmac_id) 233 + { 234 + rpm_t *rpm = rpmd; 235 + u64 req = 0, resp; 236 + int err; 237 + 238 + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req); 239 + err = cgx_fwi_cmd_generic(req, &resp, rpm, 0); 240 + if (!err) 241 + return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp); 242 + return err; 243 + } 244 + 245 + int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable) 246 + { 247 + rpm_t *rpm = rpmd; 248 + u8 lmac_type; 249 + u64 cfg; 250 + 251 + if (!rpm || lmac_id >= rpm->lmac_count) 252 + return -ENODEV; 253 + lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id); 254 + if (lmac_type == LMAC_MODE_100G_R) { 255 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1); 256 + 257 + if (enable) 258 + cfg |= RPMX_MTI_PCS_LBK; 259 + else 260 + cfg &= ~RPMX_MTI_PCS_LBK; 261 + rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg); 262 + } else { 263 + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1); 264 + if (enable) 265 + cfg |= RPMX_MTI_PCS_LBK; 266 + else 267 + cfg &= ~RPMX_MTI_PCS_LBK; 268 + rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg); 269 + } 270 + 271 + return 0; 272 + }
+57
drivers/net/ethernet/marvell/octeontx2/af/rpm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Marvell OcteonTx2 RPM driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + * 6 + */ 7 + 8 + #ifndef RPM_H 9 + #define RPM_H 10 + 11 + #include <linux/bits.h> 12 + 13 + /* PCI device IDs */ 14 + #define PCI_DEVID_CN10K_RPM 0xA060 15 + 16 + /* Registers */ 17 + #define RPMX_CMRX_SW_INT 0x180 18 + #define RPMX_CMRX_SW_INT_W1S 0x188 19 + #define RPMX_CMRX_SW_INT_ENA_W1S 0x198 20 + #define RPMX_CMRX_LINK_CFG 0x1070 21 + #define RPMX_MTI_PCS100X_CONTROL1 0x20000 22 + #define RPMX_MTI_LPCSX_CONTROL1 0x30000 23 + #define RPMX_MTI_PCS_LBK BIT_ULL(14) 24 + #define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100)) 25 + 26 + #define RPMX_CMRX_LINK_RANGE_MASK GENMASK_ULL(19, 16) 27 + #define RPMX_CMRX_LINK_BASE_MASK GENMASK_ULL(11, 0) 28 + #define RPMX_MTI_MAC100X_COMMAND_CONFIG 0x8010 29 + #define RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE BIT_ULL(29) 30 + #define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE BIT_ULL(28) 31 + #define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8) 32 + #define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19) 33 + #define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8 34 + #define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8 35 + #define RPM_DEFAULT_PAUSE_TIME 0xFFFF 36 + #define RPMX_CMR_RX_OVR_BP 0x4120 37 + #define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8) 38 + #define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4) 39 + #define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000 40 + #define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000 41 + #define RPMX_MTI_STAT_DATA_HI_CDC 0x10038 42 + 43 + #define RPM_LMAC_FWI 0xa 44 + 45 + /* Function Declarations */ 46 + int rpm_get_nr_lmacs(void *rpmd); 47 + u8 rpm_get_lmac_type(void *rpmd, int lmac_id); 48 + int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable); 49 + void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable); 50 + int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause, 51 + u8 *rx_pause); 52 + void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable); 53 + int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, 54 + u8 rx_pause); 55 + int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat); 56 + int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat); 57 + #endif /* RPM_H */
+129 -30
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
··· 22 22 23 23 #include "rvu_trace.h" 24 24 25 - #define DRV_NAME "octeontx2-af" 25 + #define DRV_NAME "rvu_af" 26 26 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" 27 27 28 28 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); ··· 78 78 if (is_rvu_96xx_A0(rvu)) 79 79 hw->cap.nix_rx_multicast = false; 80 80 } 81 + 82 + if (!is_rvu_otx2(rvu)) 83 + hw->cap.per_pf_mbox_regs = true; 81 84 } 82 85 83 86 /* Poll a RVU block's register 'offset', for a 'zero' ··· 855 852 return rvu_alloc_bitmap(&block->lf); 856 853 } 857 854 855 + static void rvu_get_lbk_bufsize(struct rvu *rvu) 856 + { 857 + struct pci_dev *pdev = NULL; 858 + void __iomem *base; 859 + u64 lbk_const; 860 + 861 + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 862 + PCI_DEVID_OCTEONTX2_LBK, pdev); 863 + if (!pdev) 864 + return; 865 + 866 + base = pci_ioremap_bar(pdev, 0); 867 + if (!base) 868 + goto err_put; 869 + 870 + lbk_const = readq(base + LBK_CONST); 871 + 872 + /* cache fifo size */ 873 + rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const); 874 + 875 + iounmap(base); 876 + err_put: 877 + pci_dev_put(pdev); 878 + } 879 + 858 880 static int rvu_setup_hw_resources(struct rvu *rvu) 859 881 { 860 882 struct rvu_hwinfo *hw = rvu->hw; ··· 1031 1003 rvu_scan_block(rvu, block); 1032 1004 } 1033 1005 1006 + err = rvu_set_channels_base(rvu); 1007 + if (err) 1008 + goto msix_err; 1009 + 1034 1010 err = rvu_npc_init(rvu); 1035 1011 if (err) 1036 1012 goto npc_err; ··· 1050 1018 if (err) 1051 1019 goto npa_err; 1052 1020 1021 + rvu_get_lbk_bufsize(rvu); 1022 + 1053 1023 err = rvu_nix_init(rvu); 1054 1024 if (err) 1055 1025 goto nix_err; 1026 + 1027 + rvu_program_channels(rvu); 1056 1028 1057 1029 return 0; 1058 1030 ··· 1972 1936 __rvu_mbox_up_handler(mwork, TYPE_AFVF); 1973 1937 } 1974 1938 1939 + static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, 1940 + int num, int type) 1941 + { 1942 + struct rvu_hwinfo *hw = rvu->hw; 1943 + int region; 1944 + u64 bar4; 1945 + 1946 + /* For cn10k platform VF mailbox regions of a PF follows after the 1947 + * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from 1948 + * RVU_PF_VF_BAR4_ADDR register. 1949 + */ 1950 + if (type == TYPE_AFVF) { 1951 + for (region = 0; region < num; region++) { 1952 + if (hw->cap.per_pf_mbox_regs) { 1953 + bar4 = rvu_read64(rvu, BLKADDR_RVUM, 1954 + RVU_AF_PFX_BAR4_ADDR(0)) + 1955 + MBOX_SIZE; 1956 + bar4 += region * MBOX_SIZE; 1957 + } else { 1958 + bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); 1959 + bar4 += region * MBOX_SIZE; 1960 + } 1961 + mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); 1962 + if (!mbox_addr[region]) 1963 + goto error; 1964 + } 1965 + return 0; 1966 + } 1967 + 1968 + /* For cn10k platform AF <-> PF mailbox region of a PF is read from per 1969 + * PF registers. Whereas for Octeontx2 it is read from 1970 + * RVU_AF_PF_BAR4_ADDR register. 1971 + */ 1972 + for (region = 0; region < num; region++) { 1973 + if (hw->cap.per_pf_mbox_regs) { 1974 + bar4 = rvu_read64(rvu, BLKADDR_RVUM, 1975 + RVU_AF_PFX_BAR4_ADDR(region)); 1976 + } else { 1977 + bar4 = rvu_read64(rvu, BLKADDR_RVUM, 1978 + RVU_AF_PF_BAR4_ADDR); 1979 + bar4 += region * MBOX_SIZE; 1980 + } 1981 + mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); 1982 + if (!mbox_addr[region]) 1983 + goto error; 1984 + } 1985 + return 0; 1986 + 1987 + error: 1988 + while (region--) 1989 + iounmap((void __iomem *)mbox_addr[region]); 1990 + return -ENOMEM; 1991 + } 1992 + 1975 1993 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, 1976 1994 int type, int num, 1977 1995 void (mbox_handler)(struct work_struct *), 1978 1996 void (mbox_up_handler)(struct work_struct *)) 1979 1997 { 1980 - void __iomem *hwbase = NULL, *reg_base; 1981 - int err, i, dir, dir_up; 1998 + int err = -EINVAL, i, dir, dir_up; 1999 + void __iomem *reg_base; 1982 2000 struct rvu_work *mwork; 2001 + void **mbox_regions; 1983 2002 const char *name; 1984 - u64 bar4_addr; 2003 + 2004 + mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); 2005 + if (!mbox_regions) 2006 + return -ENOMEM; 1985 2007 1986 2008 switch (type) { 1987 2009 case TYPE_AFPF: 1988 2010 name = "rvu_afpf_mailbox"; 1989 - bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); 1990 2011 dir = MBOX_DIR_AFPF; 1991 2012 dir_up = MBOX_DIR_AFPF_UP; 1992 2013 reg_base = rvu->afreg_base; 2014 + err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF); 2015 + if (err) 2016 + goto free_regions; 1993 2017 break; 1994 2018 case TYPE_AFVF: 1995 2019 name = "rvu_afvf_mailbox"; 1996 - bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); 1997 2020 dir = MBOX_DIR_PFVF; 1998 2021 dir_up = MBOX_DIR_PFVF_UP; 1999 2022 reg_base = rvu->pfreg_base; 2023 + err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF); 2024 + if (err) 2025 + goto free_regions; 2000 2026 break; 2001 2027 default: 2002 - return -EINVAL; 2028 + return err; 2003 2029 } 2004 2030 2005 2031 mw->mbox_wq = alloc_workqueue(name, 2006 2032 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 2007 2033 num); 2008 - if (!mw->mbox_wq) 2009 - return -ENOMEM; 2034 + if (!mw->mbox_wq) { 2035 + err = -ENOMEM; 2036 + goto unmap_regions; 2037 + } 2010 2038 2011 2039 mw->mbox_wrk = devm_kcalloc(rvu->dev, num, 2012 2040 sizeof(struct rvu_work), GFP_KERNEL); ··· 2086 1986 goto exit; 2087 1987 } 2088 1988 2089 - /* Mailbox is a reserved memory (in RAM) region shared between 2090 - * RVU devices, shouldn't be mapped as device memory to allow 2091 - * unaligned accesses. 2092 - */ 2093 - hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num); 2094 - if (!hwbase) { 2095 - dev_err(rvu->dev, "Unable to map mailbox region\n"); 2096 - err = -ENOMEM; 2097 - goto exit; 2098 - } 2099 - 2100 - err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num); 1989 + err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev, 1990 + reg_base, dir, num); 2101 1991 if (err) 2102 1992 goto exit; 2103 1993 2104 - err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev, 2105 - reg_base, dir_up, num); 1994 + err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev, 1995 + reg_base, dir_up, num); 2106 1996 if (err) 2107 1997 goto exit; 2108 1998 ··· 2105 2015 mwork->rvu = rvu; 2106 2016 INIT_WORK(&mwork->work, mbox_up_handler); 2107 2017 } 2108 - 2018 + kfree(mbox_regions); 2109 2019 return 0; 2020 + 2110 2021 exit: 2111 - if (hwbase) 2112 - iounmap((void __iomem *)hwbase); 2113 2022 destroy_workqueue(mw->mbox_wq); 2023 + unmap_regions: 2024 + while (num--) 2025 + iounmap((void __iomem *)mbox_regions[num]); 2026 + free_regions: 2027 + kfree(mbox_regions); 2114 2028 return err; 2115 2029 } 2116 2030 2117 2031 static void rvu_mbox_destroy(struct mbox_wq_info *mw) 2118 2032 { 2033 + struct otx2_mbox *mbox = &mw->mbox; 2034 + struct otx2_mbox_dev *mdev; 2035 + int devid; 2036 + 2119 2037 if (mw->mbox_wq) { 2120 2038 flush_workqueue(mw->mbox_wq); 2121 2039 destroy_workqueue(mw->mbox_wq); 2122 2040 mw->mbox_wq = NULL; 2123 2041 } 2124 2042 2125 - if (mw->mbox.hwbase) 2126 - iounmap((void __iomem *)mw->mbox.hwbase); 2043 + for (devid = 0; devid < mbox->ndevs; devid++) { 2044 + mdev = &mbox->dev[devid]; 2045 + if (mdev->hwbase) 2046 + iounmap((void __iomem *)mdev->hwbase); 2047 + } 2127 2048 2128 2049 otx2_mbox_destroy(&mw->mbox); 2129 2050 otx2_mbox_destroy(&mw->mbox_up); ··· 2753 2652 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2754 2653 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); 2755 2654 } 2756 - 2757 - #define PCI_DEVID_OCTEONTX2_LBK 0xA061 2758 2655 2759 2656 int rvu_get_num_lbk_chans(void) 2760 2657 {
+71
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
··· 19 19 #include "common.h" 20 20 #include "mbox.h" 21 21 #include "npc.h" 22 + #include "rvu_reg.h" 22 23 23 24 /* PCI device IDs */ 24 25 #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 26 + #define PCI_DEVID_OCTEONTX2_LBK 0xA061 25 27 26 28 /* Subsystem Device ID */ 27 29 #define PCI_SUBSYS_DEVID_96XX 0xB200 30 + #define PCI_SUBSYS_DEVID_CN10K_A 0xB900 28 31 29 32 /* PCI BAR nos */ 30 33 #define PCI_AF_REG_BAR_NUM 0 ··· 306 303 bool nix_shaping; /* Is shaping and coloring supported */ 307 304 bool nix_tx_link_bp; /* Can link backpressure TL queues ? */ 308 305 bool nix_rx_multicast; /* Rx packet replication support */ 306 + bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */ 307 + bool programmable_chans; /* Channels programmable ? */ 309 308 }; 310 309 311 310 struct rvu_hwinfo { ··· 316 311 u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */ 317 312 u8 cgx; 318 313 u8 lmac_per_cgx; 314 + u16 cgx_chan_base; /* CGX base channel number */ 315 + u16 lbk_chan_base; /* LBK base channel number */ 316 + u16 sdp_chan_base; /* SDP base channel number */ 317 + u16 cpt_chan_base; /* CPT base channel number */ 319 318 u8 cgx_links; 320 319 u8 lbk_links; 321 320 u8 sdp_links; 321 + u8 cpt_links; /* Number of CPT links */ 322 322 u8 npc_kpus; /* No of parser units */ 323 323 u8 npc_pkinds; /* No of port kinds */ 324 324 u8 npc_intfs; /* No of interfaces */ 325 325 u8 npc_kpu_entries; /* No of KPU entries */ 326 326 u16 npc_counters; /* No of match stats counters */ 327 + u32 lbk_bufsize; /* FIFO size supported by LBK */ 327 328 bool npc_ext_set; /* Extended register set */ 328 329 329 330 struct hw_cap cap; ··· 487 476 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); 488 477 } 489 478 479 + /* REVID for PCIe devices. 480 + * Bits 0..1: minor pass, bit 3..2: major pass 481 + * bits 7..4: midr id 482 + */ 483 + #define PCI_REVISION_ID_96XX 0x00 484 + #define PCI_REVISION_ID_95XX 0x10 485 + #define PCI_REVISION_ID_LOKI 0x20 486 + #define PCI_REVISION_ID_98XX 0x30 487 + #define PCI_REVISION_ID_95XXMM 0x40 488 + 489 + static inline bool is_rvu_otx2(struct rvu *rvu) 490 + { 491 + struct pci_dev *pdev = rvu->pdev; 492 + 493 + u8 midr = pdev->revision & 0xF0; 494 + 495 + return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 496 + midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX || 497 + midr == PCI_REVISION_ID_95XXMM); 498 + } 499 + 500 + static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid, 501 + u8 lmacid, u8 chan) 502 + { 503 + u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST); 504 + u16 cgx_chans = nix_const & 0xFFULL; 505 + struct rvu_hwinfo *hw = rvu->hw; 506 + 507 + if (!hw->cap.programmable_chans) 508 + return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan); 509 + 510 + return rvu->hw->cgx_chan_base + 511 + (cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan; 512 + } 513 + 514 + static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid, 515 + u8 chan) 516 + { 517 + u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST); 518 + u16 lbk_chans = (nix_const >> 16) & 0xFFULL; 519 + struct rvu_hwinfo *hw = rvu->hw; 520 + 521 + if (!hw->cap.programmable_chans) 522 + return NIX_CHAN_LBK_CHX(lbkid, chan); 523 + 524 + return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan; 525 + } 526 + 527 + static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan) 528 + { 529 + return rvu->hw->cpt_chan_base + chan; 530 + } 531 + 490 532 /* Function Prototypes 491 533 * RVU 492 534 */ ··· 676 612 void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, 677 613 int blkaddr, u16 src, struct mcam_entry *entry, 678 614 u8 *intf, u8 *ena); 615 + bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); 616 + u32 rvu_cgx_get_fifolen(struct rvu *rvu); 617 + 679 618 /* CPT APIs */ 680 619 int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); 620 + 621 + /* CN10K RVU */ 622 + int rvu_set_channels_base(struct rvu *rvu); 623 + void rvu_program_channels(struct rvu *rvu); 681 624 682 625 #ifdef CONFIG_DEBUG_FS 683 626 void rvu_dbg_init(struct rvu *rvu);
+111 -23
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
··· 14 14 15 15 #include "rvu.h" 16 16 #include "cgx.h" 17 + #include "lmac_common.h" 17 18 #include "rvu_reg.h" 18 19 #include "rvu_trace.h" 19 20 ··· 42 41 43 42 MBOX_UP_CGX_MESSAGES 44 43 #undef M 44 + 45 + bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) 46 + { 47 + u8 cgx_id, lmac_id; 48 + void *cgxd; 49 + 50 + if (!is_pf_cgxmapped(rvu, pf)) 51 + return 0; 52 + 53 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 54 + cgxd = rvu_cgx_pdata(cgx_id, rvu); 55 + 56 + return (cgx_features_get(cgxd) & feature); 57 + } 45 58 46 59 /* Returns bitmap of mapped PFs */ 47 60 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) ··· 107 92 { 108 93 struct npc_pkind *pkind = &rvu->hw->pkind; 109 94 int cgx_cnt_max = rvu->cgx_cnt_max; 110 - int cgx, lmac_cnt, lmac; 111 95 int pf = PF_CGXMAP_BASE; 96 + unsigned long lmac_bmap; 112 97 int size, free_pkind; 98 + int cgx, lmac, iter; 113 99 114 100 if (!cgx_cnt_max) 115 101 return 0; ··· 141 125 for (cgx = 0; cgx < cgx_cnt_max; cgx++) { 142 126 if (!rvu_cgx_pdata(cgx, rvu)) 143 127 continue; 144 - lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 145 - for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) { 128 + lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); 129 + for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { 130 + lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), 131 + iter); 146 132 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); 147 133 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; 148 134 free_pkind = rvu_alloc_rsrc(&pkind->rsrc); 149 135 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; 150 136 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); 151 137 rvu->cgx_mapped_pfs++; 138 + pf++; 152 139 } 153 140 } 154 141 return 0; ··· 173 154 &qentry->link_event.link_uinfo); 174 155 qentry->link_event.cgx_id = cgx_id; 175 156 qentry->link_event.lmac_id = lmac_id; 176 - if (err) 157 + if (err) { 158 + kfree(qentry); 177 159 goto skip_add; 160 + } 178 161 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); 179 162 skip_add: 180 163 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); ··· 272 251 273 252 static int cgx_lmac_event_handler_init(struct rvu *rvu) 274 253 { 254 + unsigned long lmac_bmap; 275 255 struct cgx_event_cb cb; 276 256 int cgx, lmac, err; 277 257 void *cgxd; ··· 293 271 cgxd = rvu_cgx_pdata(cgx, rvu); 294 272 if (!cgxd) 295 273 continue; 296 - for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) { 274 + lmac_bmap = cgx_get_lmac_bmap(cgxd); 275 + for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) { 297 276 err = cgx_lmac_evh_register(&cb, cgxd, lmac); 298 277 if (err) 299 278 dev_err(rvu->dev, ··· 372 349 373 350 int rvu_cgx_exit(struct rvu *rvu) 374 351 { 352 + unsigned long lmac_bmap; 375 353 int cgx, lmac; 376 354 void *cgxd; 377 355 ··· 380 356 cgxd = rvu_cgx_pdata(cgx, rvu); 381 357 if (!cgxd) 382 358 continue; 383 - for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) 359 + lmac_bmap = cgx_get_lmac_bmap(cgxd); 360 + for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) 384 361 cgx_lmac_evh_unregister(cgxd, lmac); 385 362 } 386 363 ··· 406 381 407 382 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) 408 383 { 384 + struct mac_ops *mac_ops; 409 385 u8 cgx_id, lmac_id; 410 386 void *cgxd; 411 387 ··· 416 390 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 417 391 cgxd = rvu_cgx_pdata(cgx_id, rvu); 418 392 393 + mac_ops = get_mac_ops(cgxd); 419 394 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */ 420 395 if (enable) 421 - cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true); 396 + mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true); 422 397 else 423 - cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false); 398 + mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false); 424 399 } 425 400 426 401 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) ··· 453 426 return 0; 454 427 } 455 428 456 - int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, 457 - struct cgx_stats_rsp *rsp) 429 + static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, 430 + void *rsp) 458 431 { 459 432 int pf = rvu_get_pf(req->hdr.pcifunc); 433 + struct mac_ops *mac_ops; 460 434 int stat = 0, err = 0; 461 435 u64 tx_stat, rx_stat; 462 436 u8 cgx_idx, lmac; ··· 468 440 469 441 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 470 442 cgxd = rvu_cgx_pdata(cgx_idx, rvu); 443 + mac_ops = get_mac_ops(cgxd); 471 444 472 445 /* Rx stats */ 473 - while (stat < CGX_RX_STATS_COUNT) { 474 - err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat); 446 + while (stat < mac_ops->rx_stats_cnt) { 447 + err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat); 475 448 if (err) 476 449 return err; 477 - rsp->rx_stats[stat] = rx_stat; 450 + if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT) 451 + ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; 452 + else 453 + ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; 478 454 stat++; 479 455 } 480 456 481 457 /* Tx stats */ 482 458 stat = 0; 483 - while (stat < CGX_TX_STATS_COUNT) { 484 - err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat); 459 + while (stat < mac_ops->tx_stats_cnt) { 460 + err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat); 485 461 if (err) 486 462 return err; 487 - rsp->tx_stats[stat] = tx_stat; 463 + if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT) 464 + ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; 465 + else 466 + ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; 488 467 stat++; 489 468 } 490 469 return 0; 470 + } 471 + 472 + int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, 473 + struct cgx_stats_rsp *rsp) 474 + { 475 + return rvu_lmac_get_stats(rvu, req, (void *)rsp); 476 + } 477 + 478 + int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, 479 + struct rpm_stats_rsp *rsp) 480 + { 481 + return rvu_lmac_get_stats(rvu, req, (void *)rsp); 491 482 } 492 483 493 484 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, ··· 601 554 u8 cgx_id, lmac_id; 602 555 void *cgxd; 603 556 557 + if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 558 + return 0; 559 + 604 560 /* This msg is expected only from PFs that are mapped to CGX LMACs, 605 561 * if received from other PF/VF simply ACK, nothing to do. 606 562 */ ··· 690 640 return err; 691 641 } 692 642 643 + int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, 644 + struct msg_req *req, 645 + struct cgx_features_info_msg *rsp) 646 + { 647 + int pf = rvu_get_pf(req->hdr.pcifunc); 648 + u8 cgx_idx, lmac; 649 + void *cgxd; 650 + 651 + if (!is_pf_cgxmapped(rvu, pf)) 652 + return 0; 653 + 654 + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); 655 + cgxd = rvu_cgx_pdata(cgx_idx, rvu); 656 + rsp->lmac_features = cgx_features_get(cgxd); 657 + 658 + return 0; 659 + } 660 + 661 + u32 rvu_cgx_get_fifolen(struct rvu *rvu) 662 + { 663 + struct mac_ops *mac_ops; 664 + int rvu_def_cgx_id = 0; 665 + u32 fifo_len; 666 + 667 + mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); 668 + fifo_len = mac_ops ? mac_ops->fifo_len : 0; 669 + 670 + return fifo_len; 671 + } 672 + 693 673 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) 694 674 { 695 - int pf = rvu_get_pf(pcifunc); 675 + struct mac_ops *mac_ops; 696 676 u8 cgx_id, lmac_id; 697 677 698 678 if (!is_cgx_config_permitted(rvu, pcifunc)) 699 679 return -EPERM; 700 680 701 - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 681 + mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); 702 682 703 - return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu), 683 + return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu), 704 684 lmac_id, en); 705 685 } 706 686 ··· 753 673 struct cgx_pause_frm_cfg *rsp) 754 674 { 755 675 int pf = rvu_get_pf(req->hdr.pcifunc); 676 + struct mac_ops *mac_ops; 756 677 u8 cgx_id, lmac_id; 678 + void *cgxd; 679 + 680 + if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC)) 681 + return 0; 757 682 758 683 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, 759 684 * if received from other PF/VF simply ACK, nothing to do. ··· 767 682 return -ENODEV; 768 683 769 684 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 685 + cgxd = rvu_cgx_pdata(cgx_id, rvu); 686 + mac_ops = get_mac_ops(cgxd); 770 687 771 688 if (req->set) 772 - cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id, 773 - req->tx_pause, req->rx_pause); 689 + mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, 690 + req->tx_pause, req->rx_pause); 774 691 else 775 - cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id, 776 - &rsp->tx_pause, &rsp->rx_pause); 692 + mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, 693 + &rsp->tx_pause, 694 + &rsp->rx_pause); 777 695 return 0; 778 696 } 779 697
+261
drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell RPM CN10K driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + */ 6 + 7 + #include <linux/bitfield.h> 8 + #include <linux/pci.h> 9 + #include "rvu.h" 10 + #include "cgx.h" 11 + #include "rvu_reg.h" 12 + 13 + int rvu_set_channels_base(struct rvu *rvu) 14 + { 15 + struct rvu_hwinfo *hw = rvu->hw; 16 + u16 cpt_chan_base; 17 + u64 nix_const; 18 + int blkaddr; 19 + 20 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); 21 + if (blkaddr < 0) 22 + return blkaddr; 23 + 24 + nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 25 + 26 + hw->cgx = (nix_const >> 12) & 0xFULL; 27 + hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL; 28 + hw->cgx_links = hw->cgx * hw->lmac_per_cgx; 29 + hw->lbk_links = (nix_const >> 24) & 0xFULL; 30 + hw->cpt_links = (nix_const >> 44) & 0xFULL; 31 + hw->sdp_links = 1; 32 + 33 + hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0); 34 + hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0); 35 + hw->sdp_chan_base = NIX_CHAN_SDP_CH_START; 36 + 37 + /* No Programmable channels */ 38 + if (!(nix_const & BIT_ULL(60))) 39 + return 0; 40 + 41 + hw->cap.programmable_chans = true; 42 + 43 + /* If programmable channels are present then configure 44 + * channels such that all channel numbers are contiguous 45 + * leaving no holes. This way the new CPT channels can be 46 + * accomodated. The order of channel numbers assigned is 47 + * LBK, SDP, CGX and CPT. 48 + */ 49 + hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * 50 + ((nix_const >> 16) & 0xFFULL); 51 + hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS; 52 + 53 + cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * 54 + (nix_const & 0xFFULL); 55 + 56 + /* Out of 4096 channels start CPT from 2048 so 57 + * that MSB for CPT channels is always set 58 + */ 59 + if (cpt_chan_base <= 0x800) { 60 + hw->cpt_chan_base = 0x800; 61 + } else { 62 + dev_err(rvu->dev, 63 + "CPT channels could not fit in the range 2048-4095\n"); 64 + return -EINVAL; 65 + } 66 + 67 + return 0; 68 + } 69 + 70 + #define LBK_CONNECT_NIXX(a) (0x0 + (a)) 71 + 72 + static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base, 73 + u64 offset, int lbkid, u16 chans) 74 + { 75 + struct rvu_hwinfo *hw = rvu->hw; 76 + u64 cfg; 77 + 78 + cfg = readq(base + offset); 79 + cfg &= ~(LBK_LINK_CFG_RANGE_MASK | 80 + LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK); 81 + cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans)); 82 + cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid); 83 + cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base); 84 + 85 + writeq(cfg, base + offset); 86 + } 87 + 88 + static void rvu_lbk_set_channels(struct rvu *rvu) 89 + { 90 + struct pci_dev *pdev = NULL; 91 + void __iomem *base; 92 + u64 lbk_const; 93 + u8 src, dst; 94 + u16 chans; 95 + 96 + /* To loopback packets between multiple NIX blocks 97 + * mutliple LBK blocks are needed. With two NIX blocks, 98 + * four LBK blocks are needed and each LBK block 99 + * source and destination are as follows: 100 + * LBK0 - source NIX0 and destination NIX1 101 + * LBK1 - source NIX0 and destination NIX1 102 + * LBK2 - source NIX1 and destination NIX0 103 + * LBK3 - source NIX1 and destination NIX1 104 + * As per the HRM channel numbers should be programmed as: 105 + * P2X and X2P of LBK0 as same 106 + * P2X and X2P of LBK3 as same 107 + * P2X of LBK1 and X2P of LBK2 as same 108 + * P2X of LBK2 and X2P of LBK1 as same 109 + */ 110 + while (true) { 111 + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 112 + PCI_DEVID_OCTEONTX2_LBK, pdev); 113 + if (!pdev) 114 + return; 115 + 116 + base = pci_ioremap_bar(pdev, 0); 117 + if (!base) 118 + goto err_put; 119 + 120 + lbk_const = readq(base + LBK_CONST); 121 + chans = FIELD_GET(LBK_CONST_CHANS, lbk_const); 122 + dst = FIELD_GET(LBK_CONST_DST, lbk_const); 123 + src = FIELD_GET(LBK_CONST_SRC, lbk_const); 124 + 125 + if (src == dst) { 126 + if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */ 127 + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, 128 + 0, chans); 129 + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, 130 + 0, chans); 131 + } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */ 132 + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, 133 + 1, chans); 134 + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, 135 + 1, chans); 136 + } 137 + } else { 138 + if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */ 139 + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, 140 + 0, chans); 141 + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, 142 + 1, chans); 143 + } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */ 144 + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, 145 + 1, chans); 146 + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, 147 + 0, chans); 148 + } 149 + } 150 + iounmap(base); 151 + } 152 + err_put: 153 + pci_dev_put(pdev); 154 + } 155 + 156 + static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr) 157 + { 158 + u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 159 + u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans; 160 + struct rvu_hwinfo *hw = rvu->hw; 161 + int link, nix_link = 0; 162 + u16 start; 163 + u64 cfg; 164 + 165 + cgx_chans = nix_const & 0xFFULL; 166 + lbk_chans = (nix_const >> 16) & 0xFFULL; 167 + sdp_chans = SDP_CHANNELS; 168 + cpt_chans = (nix_const >> 32) & 0xFFFULL; 169 + 170 + start = hw->cgx_chan_base; 171 + for (link = 0; link < hw->cgx_links; link++, nix_link++) { 172 + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); 173 + cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); 174 + cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans)); 175 + cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); 176 + rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); 177 + start += cgx_chans; 178 + } 179 + 180 + start = hw->lbk_chan_base; 181 + for (link = 0; link < hw->lbk_links; link++, nix_link++) { 182 + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); 183 + cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); 184 + cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans)); 185 + cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); 186 + rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); 187 + start += lbk_chans; 188 + } 189 + 190 + start = hw->sdp_chan_base; 191 + for (link = 0; link < hw->sdp_links; link++, nix_link++) { 192 + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); 193 + cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); 194 + cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans)); 195 + cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); 196 + rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); 197 + start += sdp_chans; 198 + } 199 + 200 + start = hw->cpt_chan_base; 201 + for (link = 0; link < hw->cpt_links; link++, nix_link++) { 202 + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); 203 + cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); 204 + cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans)); 205 + cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); 206 + rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); 207 + start += cpt_chans; 208 + } 209 + } 210 + 211 + static void rvu_nix_set_channels(struct rvu *rvu) 212 + { 213 + int blkaddr = 0; 214 + 215 + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 216 + while (blkaddr) { 217 + __rvu_nix_set_channels(rvu, blkaddr); 218 + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); 219 + } 220 + } 221 + 222 + static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base) 223 + { 224 + u64 cfg; 225 + 226 + cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG); 227 + cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK); 228 + 229 + /* There is no read-only constant register to read 230 + * the number of channels for LMAC and it is always 16. 231 + */ 232 + cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16)); 233 + cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base); 234 + cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg); 235 + } 236 + 237 + static void rvu_rpm_set_channels(struct rvu *rvu) 238 + { 239 + struct rvu_hwinfo *hw = rvu->hw; 240 + u16 base = hw->cgx_chan_base; 241 + int cgx, lmac; 242 + 243 + for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) { 244 + for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) { 245 + __rvu_rpm_set_channels(cgx, lmac, base); 246 + base += 16; 247 + } 248 + } 249 + } 250 + 251 + void rvu_program_channels(struct rvu *rvu) 252 + { 253 + struct rvu_hwinfo *hw = rvu->hw; 254 + 255 + if (!hw->cap.programmable_chans) 256 + return; 257 + 258 + rvu_nix_set_channels(rvu); 259 + rvu_lbk_set_channels(rvu); 260 + rvu_rpm_set_channels(rvu); 261 + }
+319 -20
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
··· 19 19 #include "rvu_reg.h" 20 20 #include "rvu.h" 21 21 #include "cgx.h" 22 + #include "lmac_common.h" 22 23 #include "npc.h" 23 24 24 25 #define DEBUGFS_DIR_NAME "octeontx2" ··· 108 107 [CGX_STAT15] = "Packets sent to the multicast DMAC", 109 108 [CGX_STAT16] = "Transmit underflow and were truncated", 110 109 [CGX_STAT17] = "Control/PAUSE packets sent", 110 + }; 111 + 112 + static char *rpm_rx_stats_fields[] = { 113 + "Octets of received packets", 114 + "Octets of received packets with out error", 115 + "Received packets with alignment errors", 116 + "Control/PAUSE packets received", 117 + "Packets received with Frame too long Errors", 118 + "Packets received with a1nrange length Errors", 119 + "Received packets", 120 + "Packets received with FrameCheckSequenceErrors", 121 + "Packets received with VLAN header", 122 + "Error packets", 123 + "Packets recievd with unicast DMAC", 124 + "Packets received with multicast DMAC", 125 + "Packets received with broadcast DMAC", 126 + "Dropped packets", 127 + "Total frames received on interface", 128 + "Packets received with an octet count < 64", 129 + "Packets received with an octet count == 64", 130 + "Packets received with an octet count of 65–127", 131 + "Packets received with an octet count of 128-255", 132 + "Packets received with an octet count of 256-511", 133 + "Packets received with an octet count of 512-1023", 134 + "Packets received with an octet count of 1024-1518", 135 + "Packets received with an octet count of > 1518", 136 + "Oversized Packets", 137 + "Jabber Packets", 138 + "Fragmented Packets", 139 + "CBFC(class based flow control) pause frames received for class 0", 140 + "CBFC pause frames received for class 1", 141 + "CBFC pause frames received for class 2", 142 + "CBFC pause frames received for class 3", 143 + "CBFC pause frames received for class 4", 144 + "CBFC pause frames received for class 5", 145 + "CBFC pause frames received for class 6", 146 + "CBFC pause frames received for class 7", 147 + "CBFC pause frames received for class 8", 148 + "CBFC pause frames received for class 9", 149 + "CBFC pause frames received for class 10", 150 + "CBFC pause frames received for class 11", 151 + "CBFC pause frames received for class 12", 152 + "CBFC pause frames received for class 13", 153 + "CBFC pause frames received for class 14", 154 + "CBFC pause frames received for class 15", 155 + "MAC control packets received", 156 + }; 157 + 158 + static char *rpm_tx_stats_fields[] = { 159 + "Total octets sent on the interface", 160 + "Total octets transmitted OK", 161 + "Control/Pause frames sent", 162 + "Total frames transmitted OK", 163 + "Total frames sent with VLAN header", 164 + "Error Packets", 165 + "Packets sent to unicast DMAC", 166 + "Packets sent to the multicast DMAC", 167 + "Packets sent to a broadcast DMAC", 168 + "Packets sent with an octet count == 64", 169 + "Packets sent with an octet count of 65–127", 170 + "Packets sent with an octet count of 128-255", 171 + "Packets sent with an octet count of 256-511", 172 + "Packets sent with an octet count of 512-1023", 173 + "Packets sent with an octet count of 1024-1518", 174 + "Packets sent with an octet count of > 1518", 175 + "CBFC(class based flow control) pause frames transmitted for class 0", 176 + "CBFC pause frames transmitted for class 1", 177 + "CBFC pause frames transmitted for class 2", 178 + "CBFC pause frames transmitted for class 3", 179 + "CBFC pause frames transmitted for class 4", 180 + "CBFC pause frames transmitted for class 5", 181 + "CBFC pause frames transmitted for class 6", 182 + "CBFC pause frames transmitted for class 7", 183 + "CBFC pause frames transmitted for class 8", 184 + "CBFC pause frames transmitted for class 9", 185 + "CBFC pause frames transmitted for class 10", 186 + "CBFC pause frames transmitted for class 11", 187 + "CBFC pause frames transmitted for class 12", 188 + "CBFC pause frames transmitted for class 13", 189 + "CBFC pause frames transmitted for class 14", 190 + "CBFC pause frames transmitted for class 15", 191 + "MAC control packets sent", 192 + "Total frames sent on the interface" 111 193 }; 112 194 113 195 enum cpt_eng_type { ··· 318 234 { 319 235 struct rvu *rvu = filp->private; 320 236 struct pci_dev *pdev = NULL; 237 + struct mac_ops *mac_ops; 238 + int rvu_def_cgx_id = 0; 321 239 char cgx[10], lmac[10]; 322 240 struct rvu_pfvf *pfvf; 323 241 int pf, domain, blkid; ··· 327 241 u16 pcifunc; 328 242 329 243 domain = 2; 330 - seq_puts(filp, "PCI dev\t\tRVU PF Func\tNIX block\tCGX\tLMAC\n"); 244 + mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); 245 + seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", 246 + mac_ops->name); 331 247 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 332 248 if (!is_pf_cgxmapped(rvu, pf)) 333 249 continue; ··· 350 262 351 263 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, 352 264 &lmac_id); 353 - sprintf(cgx, "CGX%d", cgx_id); 265 + sprintf(cgx, "%s%d", mac_ops->name, cgx_id); 354 266 sprintf(lmac, "LMAC%d", lmac_id); 355 267 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", 356 268 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); ··· 537 449 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) 538 450 { 539 451 struct npa_aura_s *aura = &rsp->aura; 452 + struct rvu *rvu = m->private; 540 453 541 454 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); 542 455 ··· 557 468 558 469 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", 559 470 (u64)aura->limit, aura->bp, aura->fc_ena); 471 + 472 + if (!is_rvu_otx2(rvu)) 473 + seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be); 560 474 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", 561 475 aura->fc_up_crossing, aura->fc_stype); 562 476 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); ··· 577 485 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); 578 486 579 487 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); 488 + if (!is_rvu_otx2(rvu)) 489 + seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); 580 490 } 581 491 582 492 /* Dumps given NPA Pool's context */ 583 493 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) 584 494 { 585 495 struct npa_pool_s *pool = &rsp->pool; 496 + struct rvu *rvu = m->private; 586 497 587 498 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); 588 499 ··· 607 512 pool->avg_con, pool->fc_ena, pool->fc_stype); 608 513 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", 609 514 pool->fc_hyst_bits, pool->fc_up_crossing); 515 + if (!is_rvu_otx2(rvu)) 516 + seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be); 610 517 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); 611 518 612 519 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); ··· 622 525 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); 623 526 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", 624 527 pool->thresh_int_ena, pool->thresh_up); 625 - seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n", 528 + seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", 626 529 pool->thresh_qint_idx, pool->err_qint_idx); 530 + if (!is_rvu_otx2(rvu)) 531 + seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); 627 532 } 628 533 629 534 /* Reads aura/pool's ctx from admin queue */ ··· 1009 910 1010 911 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL); 1011 912 913 + static void print_nix_cn10k_sq_ctx(struct seq_file *m, 914 + struct nix_cn10k_sq_ctx_s *sq_ctx) 915 + { 916 + seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", 917 + sq_ctx->ena, sq_ctx->qint_idx); 918 + seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", 919 + sq_ctx->substream, sq_ctx->sdp_mcast); 920 + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", 921 + sq_ctx->cq, sq_ctx->sqe_way_mask); 922 + 923 + seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", 924 + sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); 925 + seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", 926 + sq_ctx->sso_ena, sq_ctx->smq_rr_weight); 927 + seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", 928 + sq_ctx->default_chan, sq_ctx->sqb_count); 929 + 930 + seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); 931 + seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); 932 + seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", 933 + sq_ctx->sqb_aura, sq_ctx->sq_int); 934 + seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", 935 + sq_ctx->sq_int_ena, sq_ctx->sqe_stype); 936 + 937 + seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", 938 + sq_ctx->max_sqe_size, sq_ctx->cq_limit); 939 + seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", 940 + sq_ctx->mnq_dis, sq_ctx->lmt_dis); 941 + seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", 942 + sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); 943 + seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", 944 + sq_ctx->tail_offset, sq_ctx->smenq_offset); 945 + seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", 946 + sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); 947 + 948 + seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); 949 + seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); 950 + seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); 951 + seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", 952 + sq_ctx->smenq_next_sqb); 953 + 954 + seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); 955 + 956 + seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); 957 + seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", 958 + sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); 959 + seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", 960 + sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); 961 + seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", 962 + sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); 963 + 964 + seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", 965 + (u64)sq_ctx->scm_lso_rem); 966 + seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); 967 + seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); 968 + seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", 969 + (u64)sq_ctx->dropped_octs); 970 + seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", 971 + (u64)sq_ctx->dropped_pkts); 972 + } 973 + 1012 974 /* Dumps given nix_sq's context */ 1013 975 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1014 976 { 1015 977 struct nix_sq_ctx_s *sq_ctx = &rsp->sq; 978 + struct nix_hw *nix_hw = m->private; 979 + struct rvu *rvu = nix_hw->rvu; 1016 980 981 + if (!is_rvu_otx2(rvu)) { 982 + print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); 983 + return; 984 + } 1017 985 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", 1018 986 sq_ctx->sqe_way_mask, sq_ctx->cq); 1019 987 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", ··· 1140 974 (u64)sq_ctx->dropped_pkts); 1141 975 } 1142 976 977 + static void print_nix_cn10k_rq_ctx(struct seq_file *m, 978 + struct nix_cn10k_rq_ctx_s *rq_ctx) 979 + { 980 + seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", 981 + rq_ctx->ena, rq_ctx->sso_ena); 982 + seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", 983 + rq_ctx->ipsech_ena, rq_ctx->ena_wqwd); 984 + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n", 985 + rq_ctx->cq, rq_ctx->lenerr_dis); 986 + seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n", 987 + rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis); 988 + seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n", 989 + rq_ctx->len_il4_dis, rq_ctx->len_il3_dis); 990 + seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n", 991 + rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis); 992 + seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura); 993 + 994 + seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", 995 + rq_ctx->spb_aura, rq_ctx->lpb_aura); 996 + seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura); 997 + seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n", 998 + rq_ctx->sso_grp, rq_ctx->sso_tt); 999 + seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n", 1000 + rq_ctx->pb_caching, rq_ctx->wqe_caching); 1001 + seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", 1002 + rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena); 1003 + seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n", 1004 + rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing); 1005 + seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", 1006 + rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); 1007 + 1008 + seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); 1009 + seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); 1010 + seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); 1011 + seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", 1012 + rq_ctx->wqe_skip, rq_ctx->spb_ena); 1013 + seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n", 1014 + rq_ctx->lpb_sizem1, rq_ctx->first_skip); 1015 + seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n", 1016 + rq_ctx->later_skip, rq_ctx->xqe_imm_size); 1017 + seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n", 1018 + rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split); 1019 + 1020 + seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n", 1021 + rq_ctx->xqe_drop, rq_ctx->xqe_pass); 1022 + seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n", 1023 + rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass); 1024 + seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n", 1025 + rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass); 1026 + seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n", 1027 + rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); 1028 + 1029 + seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n", 1030 + rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop); 1031 + seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n", 1032 + rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass); 1033 + seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n", 1034 + rq_ctx->rq_int, rq_ctx->rq_int_ena); 1035 + seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx); 1036 + 1037 + seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n", 1038 + rq_ctx->ltag, rq_ctx->good_utag); 1039 + seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n", 1040 + rq_ctx->bad_utag, rq_ctx->flow_tagw); 1041 + seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n", 1042 + rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena); 1043 + seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n", 1044 + rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp); 1045 + seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip); 1046 + 1047 + seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); 1048 + seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); 1049 + seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); 1050 + seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); 1051 + seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); 1052 + } 1053 + 1143 1054 /* Dumps given nix_rq's context */ 1144 1055 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) 1145 1056 { 1146 1057 struct nix_rq_ctx_s *rq_ctx = &rsp->rq; 1058 + struct nix_hw *nix_hw = m->private; 1059 + struct rvu *rvu = nix_hw->rvu; 1060 + 1061 + if (!is_rvu_otx2(rvu)) { 1062 + print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx); 1063 + return; 1064 + } 1147 1065 1148 1066 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n", 1149 1067 rq_ctx->wqe_aura, rq_ctx->substream); ··· 1689 1439 static int cgx_print_stats(struct seq_file *s, int lmac_id) 1690 1440 { 1691 1441 struct cgx_link_user_info linfo; 1442 + struct mac_ops *mac_ops; 1692 1443 void *cgxd = s->private; 1693 1444 u64 ucast, mcast, bcast; 1694 1445 int stat = 0, err = 0; ··· 1701 1450 if (!rvu) 1702 1451 return -ENODEV; 1703 1452 1453 + mac_ops = get_mac_ops(cgxd); 1454 + 1455 + if (!mac_ops) 1456 + return 0; 1457 + 1704 1458 /* Link status */ 1705 1459 seq_puts(s, "\n=======Link Status======\n\n"); 1706 1460 err = cgx_get_link_info(cgxd, lmac_id, &linfo); ··· 1715 1459 linfo.link_up ? "UP" : "DOWN", linfo.speed); 1716 1460 1717 1461 /* Rx stats */ 1718 - seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n"); 1462 + seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n", 1463 + mac_ops->name); 1719 1464 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames"); 1720 1465 if (err) 1721 1466 return err; ··· 1738 1481 return err; 1739 1482 1740 1483 /* Tx stats */ 1741 - seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n"); 1484 + seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n", 1485 + mac_ops->name); 1742 1486 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames"); 1743 1487 if (err) 1744 1488 return err; ··· 1758 1500 return err; 1759 1501 1760 1502 /* Rx stats */ 1761 - seq_puts(s, "\n=======CGX RX_STATS======\n\n"); 1762 - while (stat < CGX_RX_STATS_COUNT) { 1763 - err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); 1503 + seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name); 1504 + while (stat < mac_ops->rx_stats_cnt) { 1505 + err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); 1764 1506 if (err) 1765 1507 return err; 1766 - seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat); 1508 + if (is_rvu_otx2(rvu)) 1509 + seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], 1510 + rx_stat); 1511 + else 1512 + seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat], 1513 + rx_stat); 1767 1514 stat++; 1768 1515 } 1769 1516 1770 1517 /* Tx stats */ 1771 1518 stat = 0; 1772 - seq_puts(s, "\n=======CGX TX_STATS======\n\n"); 1773 - while (stat < CGX_TX_STATS_COUNT) { 1774 - err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); 1519 + seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name); 1520 + while (stat < mac_ops->tx_stats_cnt) { 1521 + err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); 1775 1522 if (err) 1776 1523 return err; 1777 - seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat); 1778 - stat++; 1524 + 1525 + if (is_rvu_otx2(rvu)) 1526 + seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], 1527 + tx_stat); 1528 + else 1529 + seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], 1530 + tx_stat); 1531 + stat++; 1779 1532 } 1780 1533 1781 1534 return err; ··· 1816 1547 1817 1548 static void rvu_dbg_cgx_init(struct rvu *rvu) 1818 1549 { 1550 + struct mac_ops *mac_ops; 1551 + unsigned long lmac_bmap; 1552 + int rvu_def_cgx_id = 0; 1819 1553 int i, lmac_id; 1820 1554 char dname[20]; 1821 1555 void *cgx; 1822 1556 1823 - rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root); 1557 + if (!cgx_get_cgxcnt_max()) 1558 + return; 1559 + 1560 + mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); 1561 + if (!mac_ops) 1562 + return; 1563 + 1564 + rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name, 1565 + rvu->rvu_dbg.root); 1824 1566 1825 1567 for (i = 0; i < cgx_get_cgxcnt_max(); i++) { 1826 1568 cgx = rvu_cgx_pdata(i, rvu); 1827 1569 if (!cgx) 1828 1570 continue; 1571 + lmac_bmap = cgx_get_lmac_bmap(cgx); 1829 1572 /* cgx debugfs dir */ 1830 - sprintf(dname, "cgx%d", i); 1573 + sprintf(dname, "%s%d", mac_ops->name, i); 1831 1574 rvu->rvu_dbg.cgx = debugfs_create_dir(dname, 1832 1575 rvu->rvu_dbg.cgx_root); 1833 - for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) { 1576 + 1577 + for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) { 1834 1578 /* lmac debugfs dir */ 1835 1579 sprintf(dname, "lmac%d", lmac_id); 1836 1580 rvu->rvu_dbg.lmac = ··· 2410 2128 &rvu_dbg_cpt_err_info_fops); 2411 2129 } 2412 2130 2131 + static const char *rvu_get_dbg_dir_name(struct rvu *rvu) 2132 + { 2133 + if (!is_rvu_otx2(rvu)) 2134 + return "cn10k"; 2135 + else 2136 + return "octeontx2"; 2137 + } 2138 + 2413 2139 void rvu_dbg_init(struct rvu *rvu) 2414 2140 { 2415 - rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); 2141 + rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL); 2416 2142 2417 2143 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, 2418 2144 &rvu_dbg_rsrc_status_fops); 2419 - debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, rvu, 2420 - &rvu_dbg_rvu_pf_cgx_map_fops); 2421 2145 2146 + if (!cgx_get_cgxcnt_max()) 2147 + goto create; 2148 + 2149 + if (is_rvu_otx2(rvu)) 2150 + debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, 2151 + rvu, &rvu_dbg_rvu_pf_cgx_map_fops); 2152 + else 2153 + debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, 2154 + rvu, &rvu_dbg_rvu_pf_cgx_map_fops); 2155 + 2156 + create: 2422 2157 rvu_dbg_npa_init(rvu); 2423 2158 rvu_dbg_nix_init(rvu, BLKADDR_NIX0); 2424 2159
+91 -21
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
··· 16 16 #include "rvu.h" 17 17 #include "npc.h" 18 18 #include "cgx.h" 19 + #include "lmac_common.h" 19 20 20 21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); 21 22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, ··· 215 214 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) 216 215 { 217 216 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); 217 + struct mac_ops *mac_ops; 218 218 int pkind, pf, vf, lbkid; 219 219 u8 cgx_id, lmac_id; 220 220 int err; ··· 235 233 "PF_Func 0x%x: Invalid pkind\n", pcifunc); 236 234 return -EINVAL; 237 235 } 238 - pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0); 236 + pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); 239 237 pfvf->tx_chan_base = pfvf->rx_chan_base; 240 238 pfvf->rx_chan_cnt = 1; 241 239 pfvf->tx_chan_cnt = 1; 242 240 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); 243 241 rvu_npc_set_pkind(rvu, pkind, pfvf); 244 242 243 + mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); 245 244 /* By default we enable pause frames */ 246 245 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) 247 - cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), 248 - lmac_id, true, true); 246 + mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, 247 + rvu), 248 + lmac_id, true, true); 249 249 break; 250 250 case NIX_INTF_TYPE_LBK: 251 251 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; ··· 266 262 * loopback channels.Therefore if odd number of AF VFs are 267 263 * enabled then the last VF remains with no pair. 268 264 */ 269 - pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf); 265 + pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); 270 266 pfvf->tx_chan_base = vf & 0x1 ? 271 - NIX_CHAN_LBK_CHX(lbkid, vf - 1) : 272 - NIX_CHAN_LBK_CHX(lbkid, vf + 1); 267 + rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : 268 + rvu_nix_chan_lbk(rvu, lbkid, vf + 1); 273 269 pfvf->rx_chan_cnt = 1; 274 270 pfvf->tx_chan_cnt = 1; 275 271 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, ··· 1004 1000 return rvu_nix_aq_enq_inst(rvu, req, rsp); 1005 1001 } 1006 1002 #endif 1003 + /* CN10K mbox handler */ 1004 + int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, 1005 + struct nix_cn10k_aq_enq_req *req, 1006 + struct nix_cn10k_aq_enq_rsp *rsp) 1007 + { 1008 + return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, 1009 + (struct nix_aq_enq_rsp *)rsp); 1010 + } 1007 1011 1008 1012 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, 1009 1013 struct hwctx_disable_req *req, ··· 2547 2535 return 0; 2548 2536 } 2549 2537 2538 + static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2539 + { 2540 + /* CN10K supports LBK FIFO size 72 KB */ 2541 + if (rvu->hw->lbk_bufsize == 0x12000) 2542 + *max_mtu = CN10K_LBK_LINK_MAX_FRS; 2543 + else 2544 + *max_mtu = NIC_HW_MAX_FRS; 2545 + } 2546 + 2547 + static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) 2548 + { 2549 + /* RPM supports FIFO len 128 KB */ 2550 + if (rvu_cgx_get_fifolen(rvu) == 0x20000) 2551 + *max_mtu = CN10K_LMAC_LINK_MAX_FRS; 2552 + else 2553 + *max_mtu = NIC_HW_MAX_FRS; 2554 + } 2555 + 2556 + int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, 2557 + struct nix_hw_info *rsp) 2558 + { 2559 + u16 pcifunc = req->hdr.pcifunc; 2560 + int blkaddr; 2561 + 2562 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 2563 + if (blkaddr < 0) 2564 + return NIX_AF_ERR_AF_LF_INVALID; 2565 + 2566 + if (is_afvf(pcifunc)) 2567 + rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); 2568 + else 2569 + rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); 2570 + 2571 + rsp->min_mtu = NIC_HW_MIN_FRS; 2572 + return 0; 2573 + } 2574 + 2550 2575 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, 2551 2576 struct msg_rsp *rsp) 2552 2577 { ··· 3148 3099 u64 cfg, lmac_fifo_len; 3149 3100 struct nix_hw *nix_hw; 3150 3101 u8 cgx = 0, lmac = 0; 3102 + u16 max_mtu; 3151 3103 3152 3104 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3153 3105 if (blkaddr < 0) ··· 3158 3108 if (!nix_hw) 3159 3109 return -EINVAL; 3160 3110 3161 - if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS) 3111 + if (is_afvf(pcifunc)) 3112 + rvu_get_lbk_link_max_frs(rvu, &max_mtu); 3113 + else 3114 + rvu_get_lmac_link_max_frs(rvu, &max_mtu); 3115 + 3116 + if (!req->sdp_link && req->maxlen > max_mtu) 3162 3117 return NIX_AF_ERR_FRS_INVALID; 3163 3118 3164 3119 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) ··· 3223 3168 3224 3169 /* Update transmit credits for CGX links */ 3225 3170 lmac_fifo_len = 3226 - CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3171 + rvu_cgx_get_fifolen(rvu) / 3172 + cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3227 3173 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); 3228 3174 cfg &= ~(0xFFFFFULL << 12); 3229 3175 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; ··· 3264 3208 return 0; 3265 3209 } 3266 3210 3211 + static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) 3212 + { 3213 + /* CN10k supports 72KB FIFO size and max packet size of 64k */ 3214 + if (rvu->hw->lbk_bufsize == 0x12000) 3215 + return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; 3216 + 3217 + return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ 3218 + } 3219 + 3267 3220 static void nix_link_config(struct rvu *rvu, int blkaddr) 3268 3221 { 3269 3222 struct rvu_hwinfo *hw = rvu->hw; 3270 3223 int cgx, lmac_cnt, slink, link; 3224 + u16 lbk_max_frs, lmac_max_frs; 3271 3225 u64 tx_credits; 3226 + 3227 + rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); 3228 + rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); 3272 3229 3273 3230 /* Set default min/max packet lengths allowed on NIX Rx links. 3274 3231 * ··· 3289 3220 * as undersize and report them to SW as error pkts, hence 3290 3221 * setting it to 40 bytes. 3291 3222 */ 3292 - for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) { 3223 + for (link = 0; link < hw->cgx_links; link++) { 3293 3224 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3294 - NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); 3225 + ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); 3295 3226 } 3296 3227 3228 + for (link = hw->cgx_links; link < hw->lbk_links; link++) { 3229 + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), 3230 + ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); 3231 + } 3297 3232 if (hw->sdp_links) { 3298 3233 link = hw->cgx_links + hw->lbk_links; 3299 3234 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), ··· 3309 3236 */ 3310 3237 for (cgx = 0; cgx < hw->cgx; cgx++) { 3311 3238 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); 3312 - tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16; 3239 + tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - 3240 + lmac_max_frs) / 16; 3313 3241 /* Enable credits and set credit pkt count to max allowed */ 3314 3242 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3315 3243 slink = cgx * hw->lmac_per_cgx; ··· 3324 3250 /* Set Tx credits for LBK link */ 3325 3251 slink = hw->cgx_links; 3326 3252 for (link = slink; link < (slink + hw->lbk_links); link++) { 3327 - tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */ 3253 + tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); 3328 3254 /* Enable credits and set credit pkt count to max allowed */ 3329 3255 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); 3330 3256 rvu_write64(rvu, blkaddr, ··· 3454 3380 err = nix_calibrate_x2p(rvu, blkaddr); 3455 3381 if (err) 3456 3382 return err; 3457 - 3458 - /* Set num of links of each type */ 3459 - cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); 3460 - hw->cgx = (cfg >> 12) & 0xF; 3461 - hw->lmac_per_cgx = (cfg >> 8) & 0xF; 3462 - hw->cgx_links = hw->cgx * hw->lmac_per_cgx; 3463 - hw->lbk_links = (cfg >> 24) & 0xF; 3464 - hw->sdp_links = 1; 3465 3383 3466 3384 /* Initialize admin queue */ 3467 3385 err = nix_aq_init(rvu, block); ··· 3689 3623 { 3690 3624 struct rvu_hwinfo *hw = rvu->hw; 3691 3625 struct rvu_block *block; 3692 - int blkaddr; 3626 + int blkaddr, pf; 3693 3627 int nixlf; 3694 3628 u64 cfg; 3629 + 3630 + pf = rvu_get_pf(pcifunc); 3631 + if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) 3632 + return 0; 3695 3633 3696 3634 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); 3697 3635 if (blkaddr < 0)
+2 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
··· 102 102 return -EINVAL; 103 103 } else { 104 104 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); 105 - base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0x0); 105 + base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0); 106 106 /* CGX mapped functions has maximum of 16 channels */ 107 - end = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0xF); 107 + end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF); 108 108 } 109 109 110 110 if (channel < base || channel > end)
+24
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
··· 44 44 #define RVU_AF_PFME_INT_W1S (0x28c8) 45 45 #define RVU_AF_PFME_INT_ENA_W1S (0x28d0) 46 46 #define RVU_AF_PFME_INT_ENA_W1C (0x28d8) 47 + #define RVU_AF_PFX_BAR4_ADDR(a) (0x5000 | (a) << 4) 48 + #define RVU_AF_PFX_BAR4_CFG (0x5200 | (a) << 4) 49 + #define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4) 50 + #define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4) 51 + #define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4) 47 52 48 53 /* Admin function's privileged PF/VF registers */ 49 54 #define RVU_PRIV_CONST (0x8000000) ··· 105 100 #define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) 106 101 #define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) 107 102 #define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) 103 + #define RVU_PF_VF_MBOX_ADDR (0xC40) 104 + #define RVU_PF_LMTLINE_ADDR (0xC48) 108 105 109 106 /* RVU VF registers */ 110 107 #define RVU_VF_VFPF_MBOX0 (0x00000) ··· 406 399 #define NIX_AF_RX_NPC_MIRROR_RCV (0x4720) 407 400 #define NIX_AF_RX_NPC_MIRROR_DROP (0x4730) 408 401 #define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16) 402 + #define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17) 409 403 410 404 #define NIX_PRIV_AF_INT_CFG (0x8000000) 411 405 #define NIX_PRIV_LFX_CFG (0x8000010) 412 406 #define NIX_PRIV_LFX_INT_CFG (0x8000020) 413 407 #define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030) 408 + 409 + #define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0) 410 + #define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16) 414 411 415 412 /* SSO */ 416 413 #define SSO_AF_CONST (0x1000) ··· 648 637 (0x00F00 | (a) << 5 | (b) << 4) 649 638 #define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3) 650 639 #define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3) 640 + 641 + /* LBK */ 642 + #define LBK_CONST (0x10ull) 643 + #define LBK_LINK_CFG_P2X (0x400ull) 644 + #define LBK_LINK_CFG_X2P (0x408ull) 645 + #define LBK_CONST_CHANS GENMASK_ULL(47, 32) 646 + #define LBK_CONST_DST GENMASK_ULL(31, 28) 647 + #define LBK_CONST_SRC GENMASK_ULL(27, 24) 648 + #define LBK_CONST_BUF_SIZE GENMASK_ULL(23, 0) 649 + #define LBK_LINK_CFG_RANGE_MASK GENMASK_ULL(19, 16) 650 + #define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6) 651 + #define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0) 652 + 651 653 #endif /* RVU_REG_H */
+180 -424
drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
··· 139 139 140 140 /* NPA admin queue instruction structure */ 141 141 struct npa_aq_inst_s { 142 - #if defined(__BIG_ENDIAN_BITFIELD) 143 - u64 doneint : 1; /* W0 */ 144 - u64 reserved_44_62 : 19; 145 - u64 cindex : 20; 146 - u64 reserved_17_23 : 7; 147 - u64 lf : 9; 148 - u64 ctype : 4; 149 - u64 op : 4; 150 - #else 151 - u64 op : 4; 142 + u64 op : 4; /* W0 */ 152 143 u64 ctype : 4; 153 144 u64 lf : 9; 154 145 u64 reserved_17_23 : 7; 155 146 u64 cindex : 20; 156 147 u64 reserved_44_62 : 19; 157 148 u64 doneint : 1; 158 - #endif 159 149 u64 res_addr; /* W1 */ 160 150 }; 161 151 162 152 /* NPA admin queue result structure */ 163 153 struct npa_aq_res_s { 164 - #if defined(__BIG_ENDIAN_BITFIELD) 165 - u64 reserved_17_63 : 47; /* W0 */ 166 - u64 doneint : 1; 167 - u64 compcode : 8; 168 - u64 ctype : 4; 169 - u64 op : 4; 170 - #else 171 - u64 op : 4; 154 + u64 op : 4; /* W0 */ 172 155 u64 ctype : 4; 173 156 u64 compcode : 8; 174 157 u64 doneint : 1; 175 158 u64 reserved_17_63 : 47; 176 - #endif 177 159 u64 reserved_64_127; /* W1 */ 178 160 }; 179 161 180 162 struct npa_aura_s { 181 163 u64 pool_addr; /* W0 */ 182 - #if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ 183 - u64 avg_level : 8; 184 - u64 reserved_118_119 : 2; 185 - u64 shift : 6; 186 - u64 aura_drop : 8; 187 - u64 reserved_98_103 : 6; 188 - u64 bp_ena : 2; 189 - u64 aura_drop_ena : 1; 190 - u64 pool_drop_ena : 1; 191 - u64 reserved_93 : 1; 192 - u64 avg_con : 9; 193 - u64 pool_way_mask : 16; 194 - u64 pool_caching : 1; 195 - u64 reserved_65 : 2; 196 - u64 ena : 1; 197 - #else 198 - u64 ena : 1; 164 + u64 ena : 1; /* W1 */ 199 165 u64 reserved_65 : 2; 200 166 u64 pool_caching : 1; 201 167 u64 pool_way_mask : 16; ··· 175 209 u64 shift : 6; 176 210 u64 reserved_118_119 : 2; 177 211 u64 avg_level : 8; 178 - #endif 179 - #if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ 180 - u64 reserved_189_191 : 3; 181 - u64 nix1_bpid : 9; 182 - u64 reserved_177_179 : 3; 183 - u64 nix0_bpid : 9; 184 - u64 reserved_164_167 : 4; 185 - u64 count : 36; 186 - #else 187 - u64 count : 36; 212 + u64 count : 36; /* W2 */ 188 213 u64 reserved_164_167 : 4; 189 214 u64 nix0_bpid : 9; 190 215 u64 reserved_177_179 : 3; 191 216 u64 nix1_bpid : 9; 192 217 u64 reserved_189_191 : 3; 193 - #endif 194 - #if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ 195 - u64 reserved_252_255 : 4; 196 - u64 fc_hyst_bits : 4; 197 - u64 fc_stype : 2; 198 - u64 fc_up_crossing : 1; 199 - u64 fc_ena : 1; 200 - u64 reserved_240_243 : 4; 201 - u64 bp : 8; 202 - u64 reserved_228_231 : 4; 203 - u64 limit : 36; 204 - #else 205 - u64 limit : 36; 218 + u64 limit : 36; /* W3 */ 206 219 u64 reserved_228_231 : 4; 207 220 u64 bp : 8; 208 - u64 reserved_240_243 : 4; 221 + u64 reserved_241_243 : 3; 222 + u64 fc_be : 1; 209 223 u64 fc_ena : 1; 210 224 u64 fc_up_crossing : 1; 211 225 u64 fc_stype : 2; 212 226 u64 fc_hyst_bits : 4; 213 227 u64 reserved_252_255 : 4; 214 - #endif 215 228 u64 fc_addr; /* W4 */ 216 - #if defined(__BIG_ENDIAN_BITFIELD) /* W5 */ 217 - u64 reserved_379_383 : 5; 218 - u64 err_qint_idx : 7; 219 - u64 reserved_371 : 1; 220 - u64 thresh_qint_idx : 7; 221 - u64 reserved_363 : 1; 222 - u64 thresh_up : 1; 223 - u64 thresh_int_ena : 1; 224 - u64 thresh_int : 1; 225 - u64 err_int_ena : 8; 226 - u64 err_int : 8; 227 - u64 update_time : 16; 228 - u64 pool_drop : 8; 229 - #else 230 - u64 pool_drop : 8; 229 + u64 pool_drop : 8; /* W5 */ 231 230 u64 update_time : 16; 232 231 u64 err_int : 8; 233 232 u64 err_int_ena : 8; ··· 204 273 u64 reserved_371 : 1; 205 274 u64 err_qint_idx : 7; 206 275 u64 reserved_379_383 : 5; 207 - #endif 208 - #if defined(__BIG_ENDIAN_BITFIELD) /* W6 */ 209 - u64 reserved_420_447 : 28; 210 - u64 thresh : 36; 211 - #else 212 - u64 thresh : 36; 213 - u64 reserved_420_447 : 28; 214 - #endif 276 + u64 thresh : 36; /* W6*/ 277 + u64 rsvd_423_420 : 4; 278 + u64 fc_msh_dst : 11; 279 + u64 reserved_435_447 : 13; 215 280 u64 reserved_448_511; /* W7 */ 216 281 }; 217 282 218 283 struct npa_pool_s { 219 284 u64 stack_base; /* W0 */ 220 - #if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ 221 - u64 reserved_115_127 : 13; 222 - u64 buf_size : 11; 223 - u64 reserved_100_103 : 4; 224 - u64 buf_offset : 12; 225 - u64 stack_way_mask : 16; 226 - u64 reserved_70_71 : 3; 227 - u64 stack_caching : 1; 228 - u64 reserved_66_67 : 2; 229 - u64 nat_align : 1; 230 - u64 ena : 1; 231 - #else 232 285 u64 ena : 1; 233 286 u64 nat_align : 1; 234 287 u64 reserved_66_67 : 2; ··· 223 308 u64 reserved_100_103 : 4; 224 309 u64 buf_size : 11; 225 310 u64 reserved_115_127 : 13; 226 - #endif 227 - #if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ 228 - u64 stack_pages : 32; 229 - u64 stack_max_pages : 32; 230 - #else 231 311 u64 stack_max_pages : 32; 232 312 u64 stack_pages : 32; 233 - #endif 234 - #if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ 235 - u64 reserved_240_255 : 16; 236 - u64 op_pc : 48; 237 - #else 238 313 u64 op_pc : 48; 239 314 u64 reserved_240_255 : 16; 240 - #endif 241 - #if defined(__BIG_ENDIAN_BITFIELD) /* W4 */ 242 - u64 reserved_316_319 : 4; 243 - u64 update_time : 16; 244 - u64 reserved_297_299 : 3; 245 - u64 fc_up_crossing : 1; 246 - u64 fc_hyst_bits : 4; 247 - u64 fc_stype : 2; 248 - u64 fc_ena : 1; 249 - u64 avg_con : 9; 250 - u64 avg_level : 8; 251 - u64 reserved_270_271 : 2; 252 - u64 shift : 6; 253 - u64 reserved_260_263 : 4; 254 - u64 stack_offset : 4; 255 - #else 256 315 u64 stack_offset : 4; 257 316 u64 reserved_260_263 : 4; 258 317 u64 shift : 6; ··· 237 348 u64 fc_stype : 2; 238 349 u64 fc_hyst_bits : 4; 239 350 u64 fc_up_crossing : 1; 240 - u64 reserved_297_299 : 3; 351 + u64 fc_be : 1; 352 + u64 reserved_298_299 : 2; 241 353 u64 update_time : 16; 242 354 u64 reserved_316_319 : 4; 243 - #endif 244 355 u64 fc_addr; /* W5 */ 245 356 u64 ptr_start; /* W6 */ 246 357 u64 ptr_end; /* W7 */ 247 - #if defined(__BIG_ENDIAN_BITFIELD) /* W8 */ 248 - u64 reserved_571_575 : 5; 249 - u64 err_qint_idx : 7; 250 - u64 reserved_563 : 1; 251 - u64 thresh_qint_idx : 7; 252 - u64 reserved_555 : 1; 253 - u64 thresh_up : 1; 254 - u64 thresh_int_ena : 1; 255 - u64 thresh_int : 1; 256 - u64 err_int_ena : 8; 257 - u64 err_int : 8; 258 - u64 reserved_512_535 : 24; 259 - #else 260 358 u64 reserved_512_535 : 24; 261 359 u64 err_int : 8; 262 360 u64 err_int_ena : 8; ··· 255 379 u64 reserved_563 : 1; 256 380 u64 err_qint_idx : 7; 257 381 u64 reserved_571_575 : 5; 258 - #endif 259 - #if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ 260 - u64 reserved_612_639 : 28; 261 382 u64 thresh : 36; 262 - #else 263 - u64 thresh : 36; 264 - u64 reserved_612_639 : 28; 265 - #endif 383 + u64 rsvd_615_612 : 4; 384 + u64 fc_msh_dst : 11; 385 + u64 reserved_627_639 : 13; 266 386 u64 reserved_640_703; /* W10 */ 267 387 u64 reserved_704_767; /* W11 */ 268 388 u64 reserved_768_831; /* W12 */ ··· 286 414 NIX_AQ_CTYPE_MCE = 0x3, 287 415 NIX_AQ_CTYPE_RSS = 0x4, 288 416 NIX_AQ_CTYPE_DYNO = 0x5, 417 + NIX_AQ_CTYPE_BAND_PROF = 0x6, 289 418 }; 290 419 291 420 /* NIX admin queue instruction opcodes */ ··· 301 428 302 429 /* NIX admin queue instruction structure */ 303 430 struct nix_aq_inst_s { 304 - #if defined(__BIG_ENDIAN_BITFIELD) 305 - u64 doneint : 1; /* W0 */ 306 - u64 reserved_44_62 : 19; 307 - u64 cindex : 20; 308 - u64 reserved_15_23 : 9; 309 - u64 lf : 7; 310 - u64 ctype : 4; 311 - u64 op : 4; 312 - #else 313 431 u64 op : 4; 314 432 u64 ctype : 4; 315 - u64 lf : 7; 316 - u64 reserved_15_23 : 9; 433 + u64 lf : 9; 434 + u64 reserved_17_23 : 7; 317 435 u64 cindex : 20; 318 436 u64 reserved_44_62 : 19; 319 437 u64 doneint : 1; 320 - #endif 321 438 u64 res_addr; /* W1 */ 322 439 }; 323 440 324 441 /* NIX admin queue result structure */ 325 442 struct nix_aq_res_s { 326 - #if defined(__BIG_ENDIAN_BITFIELD) 327 - u64 reserved_17_63 : 47; /* W0 */ 328 - u64 doneint : 1; 329 - u64 compcode : 8; 330 - u64 ctype : 4; 331 - u64 op : 4; 332 - #else 333 443 u64 op : 4; 334 444 u64 ctype : 4; 335 445 u64 compcode : 8; 336 446 u64 doneint : 1; 337 447 u64 reserved_17_63 : 47; 338 - #endif 339 448 u64 reserved_64_127; /* W1 */ 340 449 }; 341 450 342 451 /* NIX Completion queue context structure */ 343 452 struct nix_cq_ctx_s { 344 453 u64 base; 345 - #if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ 346 - u64 wrptr : 20; 347 - u64 avg_con : 9; 348 - u64 cint_idx : 7; 349 - u64 cq_err : 1; 350 - u64 qint_idx : 7; 351 - u64 rsvd_81_83 : 3; 352 - u64 bpid : 9; 353 - u64 rsvd_69_71 : 3; 354 - u64 bp_ena : 1; 355 - u64 rsvd_64_67 : 4; 356 - #else 357 454 u64 rsvd_64_67 : 4; 358 455 u64 bp_ena : 1; 359 456 u64 rsvd_69_71 : 3; ··· 334 491 u64 cint_idx : 7; 335 492 u64 avg_con : 9; 336 493 u64 wrptr : 20; 337 - #endif 338 - #if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ 339 - u64 update_time : 16; 340 - u64 avg_level : 8; 341 - u64 head : 20; 342 - u64 tail : 20; 343 - #else 344 494 u64 tail : 20; 345 495 u64 head : 20; 346 496 u64 avg_level : 8; 347 497 u64 update_time : 16; 348 - #endif 349 - #if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ 350 - u64 cq_err_int_ena : 8; 351 - u64 cq_err_int : 8; 352 - u64 qsize : 4; 353 - u64 rsvd_233_235 : 3; 354 - u64 caching : 1; 355 - u64 substream : 20; 356 - u64 rsvd_210_211 : 2; 357 - u64 ena : 1; 358 - u64 drop_ena : 1; 359 - u64 drop : 8; 360 - u64 bp : 8; 361 - #else 362 498 u64 bp : 8; 363 499 u64 drop : 8; 364 500 u64 drop_ena : 1; ··· 349 527 u64 qsize : 4; 350 528 u64 cq_err_int : 8; 351 529 u64 cq_err_int_ena : 8; 352 - #endif 530 + }; 531 + 532 + /* CN10K NIX Receive queue context structure */ 533 + struct nix_cn10k_rq_ctx_s { 534 + u64 ena : 1; 535 + u64 sso_ena : 1; 536 + u64 ipsech_ena : 1; 537 + u64 ena_wqwd : 1; 538 + u64 cq : 20; 539 + u64 rsvd_36_24 : 13; 540 + u64 lenerr_dis : 1; 541 + u64 csum_il4_dis : 1; 542 + u64 csum_ol4_dis : 1; 543 + u64 len_il4_dis : 1; 544 + u64 len_il3_dis : 1; 545 + u64 len_ol4_dis : 1; 546 + u64 len_ol3_dis : 1; 547 + u64 wqe_aura : 20; 548 + u64 spb_aura : 20; 549 + u64 lpb_aura : 20; 550 + u64 sso_grp : 10; 551 + u64 sso_tt : 2; 552 + u64 pb_caching : 2; 553 + u64 wqe_caching : 1; 554 + u64 xqe_drop_ena : 1; 555 + u64 spb_drop_ena : 1; 556 + u64 lpb_drop_ena : 1; 557 + u64 pb_stashing : 1; 558 + u64 ipsecd_drop_ena : 1; 559 + u64 chi_ena : 1; 560 + u64 rsvd_127_125 : 3; 561 + u64 band_prof_id : 10; /* W2 */ 562 + u64 rsvd_138 : 1; 563 + u64 policer_ena : 1; 564 + u64 spb_sizem1 : 6; 565 + u64 wqe_skip : 2; 566 + u64 rsvd_150_148 : 3; 567 + u64 spb_ena : 1; 568 + u64 lpb_sizem1 : 12; 569 + u64 first_skip : 7; 570 + u64 rsvd_171 : 1; 571 + u64 later_skip : 6; 572 + u64 xqe_imm_size : 6; 573 + u64 rsvd_189_184 : 6; 574 + u64 xqe_imm_copy : 1; 575 + u64 xqe_hdr_split : 1; 576 + u64 xqe_drop : 8; /* W3 */ 577 + u64 xqe_pass : 8; 578 + u64 wqe_pool_drop : 8; 579 + u64 wqe_pool_pass : 8; 580 + u64 spb_aura_drop : 8; 581 + u64 spb_aura_pass : 8; 582 + u64 spb_pool_drop : 8; 583 + u64 spb_pool_pass : 8; 584 + u64 lpb_aura_drop : 8; /* W4 */ 585 + u64 lpb_aura_pass : 8; 586 + u64 lpb_pool_drop : 8; 587 + u64 lpb_pool_pass : 8; 588 + u64 rsvd_291_288 : 4; 589 + u64 rq_int : 8; 590 + u64 rq_int_ena : 8; 591 + u64 qint_idx : 7; 592 + u64 rsvd_319_315 : 5; 593 + u64 ltag : 24; /* W5 */ 594 + u64 good_utag : 8; 595 + u64 bad_utag : 8; 596 + u64 flow_tagw : 6; 597 + u64 ipsec_vwqe : 1; 598 + u64 vwqe_ena : 1; 599 + u64 vwqe_wait : 8; 600 + u64 max_vsize_exp : 4; 601 + u64 vwqe_skip : 2; 602 + u64 rsvd_383_382 : 2; 603 + u64 octs : 48; /* W6 */ 604 + u64 rsvd_447_432 : 16; 605 + u64 pkts : 48; /* W7 */ 606 + u64 rsvd_511_496 : 16; 607 + u64 drop_octs : 48; /* W8 */ 608 + u64 rsvd_575_560 : 16; 609 + u64 drop_pkts : 48; /* W9 */ 610 + u64 rsvd_639_624 : 16; 611 + u64 re_pkts : 48; /* W10 */ 612 + u64 rsvd_703_688 : 16; 613 + u64 rsvd_767_704; /* W11 */ 614 + u64 rsvd_831_768; /* W12 */ 615 + u64 rsvd_895_832; /* W13 */ 616 + u64 rsvd_959_896; /* W14 */ 617 + u64 rsvd_1023_960; /* W15 */ 618 + }; 619 + 620 + /* CN10K NIX Send queue context structure */ 621 + struct nix_cn10k_sq_ctx_s { 622 + u64 ena : 1; 623 + u64 qint_idx : 6; 624 + u64 substream : 20; 625 + u64 sdp_mcast : 1; 626 + u64 cq : 20; 627 + u64 sqe_way_mask : 16; 628 + u64 smq : 10; /* W1 */ 629 + u64 cq_ena : 1; 630 + u64 xoff : 1; 631 + u64 sso_ena : 1; 632 + u64 smq_rr_weight : 14; 633 + u64 default_chan : 12; 634 + u64 sqb_count : 16; 635 + u64 rsvd_120_119 : 2; 636 + u64 smq_rr_count_lb : 7; 637 + u64 smq_rr_count_ub : 25; /* W2 */ 638 + u64 sqb_aura : 20; 639 + u64 sq_int : 8; 640 + u64 sq_int_ena : 8; 641 + u64 sqe_stype : 2; 642 + u64 rsvd_191 : 1; 643 + u64 max_sqe_size : 2; /* W3 */ 644 + u64 cq_limit : 8; 645 + u64 lmt_dis : 1; 646 + u64 mnq_dis : 1; 647 + u64 smq_next_sq : 20; 648 + u64 smq_lso_segnum : 8; 649 + u64 tail_offset : 6; 650 + u64 smenq_offset : 6; 651 + u64 head_offset : 6; 652 + u64 smenq_next_sqb_vld : 1; 653 + u64 smq_pend : 1; 654 + u64 smq_next_sq_vld : 1; 655 + u64 rsvd_255_253 : 3; 656 + u64 next_sqb : 64; /* W4 */ 657 + u64 tail_sqb : 64; /* W5 */ 658 + u64 smenq_sqb : 64; /* W6 */ 659 + u64 smenq_next_sqb : 64; /* W7 */ 660 + u64 head_sqb : 64; /* W8 */ 661 + u64 rsvd_583_576 : 8; /* W9 */ 662 + u64 vfi_lso_total : 18; 663 + u64 vfi_lso_sizem1 : 3; 664 + u64 vfi_lso_sb : 8; 665 + u64 vfi_lso_mps : 14; 666 + u64 vfi_lso_vlan0_ins_ena : 1; 667 + u64 vfi_lso_vlan1_ins_ena : 1; 668 + u64 vfi_lso_vld : 1; 669 + u64 rsvd_639_630 : 10; 670 + u64 scm_lso_rem : 18; /* W10 */ 671 + u64 rsvd_703_658 : 46; 672 + u64 octs : 48; /* W11 */ 673 + u64 rsvd_767_752 : 16; 674 + u64 pkts : 48; /* W12 */ 675 + u64 rsvd_831_816 : 16; 676 + u64 rsvd_895_832 : 64; /* W13 */ 677 + u64 dropped_octs : 48; 678 + u64 rsvd_959_944 : 16; 679 + u64 dropped_pkts : 48; 680 + u64 rsvd_1023_1008 : 16; 353 681 }; 354 682 355 683 /* NIX Receive queue context structure */ 356 684 struct nix_rq_ctx_s { 357 - #if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ 358 - u64 wqe_aura : 20; 359 - u64 substream : 20; 360 - u64 cq : 20; 361 - u64 ena_wqwd : 1; 362 - u64 ipsech_ena : 1; 363 - u64 sso_ena : 1; 364 - u64 ena : 1; 365 - #else 366 685 u64 ena : 1; 367 686 u64 sso_ena : 1; 368 687 u64 ipsech_ena : 1; ··· 511 548 u64 cq : 20; 512 549 u64 substream : 20; 513 550 u64 wqe_aura : 20; 514 - #endif 515 - #if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ 516 - u64 rsvd_127_122 : 6; 517 - u64 lpb_drop_ena : 1; 518 - u64 spb_drop_ena : 1; 519 - u64 xqe_drop_ena : 1; 520 - u64 wqe_caching : 1; 521 - u64 pb_caching : 2; 522 - u64 sso_tt : 2; 523 - u64 sso_grp : 10; 524 - u64 lpb_aura : 20; 525 - u64 spb_aura : 20; 526 - #else 527 551 u64 spb_aura : 20; 528 552 u64 lpb_aura : 20; 529 553 u64 sso_grp : 10; ··· 521 571 u64 spb_drop_ena : 1; 522 572 u64 lpb_drop_ena : 1; 523 573 u64 rsvd_127_122 : 6; 524 - #endif 525 - #if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ 526 - u64 xqe_hdr_split : 1; 527 - u64 xqe_imm_copy : 1; 528 - u64 rsvd_189_184 : 6; 529 - u64 xqe_imm_size : 6; 530 - u64 later_skip : 6; 531 - u64 rsvd_171 : 1; 532 - u64 first_skip : 7; 533 - u64 lpb_sizem1 : 12; 534 - u64 spb_ena : 1; 535 - u64 rsvd_150_148 : 3; 536 - u64 wqe_skip : 2; 537 - u64 spb_sizem1 : 6; 538 - u64 rsvd_139_128 : 12; 539 - #else 540 - u64 rsvd_139_128 : 12; 574 + u64 rsvd_139_128 : 12; /* W2 */ 541 575 u64 spb_sizem1 : 6; 542 576 u64 wqe_skip : 2; 543 577 u64 rsvd_150_148 : 3; ··· 534 600 u64 rsvd_189_184 : 6; 535 601 u64 xqe_imm_copy : 1; 536 602 u64 xqe_hdr_split : 1; 537 - #endif 538 - #if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ 539 - u64 spb_pool_pass : 8; 540 - u64 spb_pool_drop : 8; 541 - u64 spb_aura_pass : 8; 542 - u64 spb_aura_drop : 8; 543 - u64 wqe_pool_pass : 8; 544 - u64 wqe_pool_drop : 8; 545 - u64 xqe_pass : 8; 546 - u64 xqe_drop : 8; 547 - #else 548 - u64 xqe_drop : 8; 603 + u64 xqe_drop : 8; /* W3*/ 549 604 u64 xqe_pass : 8; 550 605 u64 wqe_pool_drop : 8; 551 606 u64 wqe_pool_pass : 8; ··· 542 619 u64 spb_aura_pass : 8; 543 620 u64 spb_pool_drop : 8; 544 621 u64 spb_pool_pass : 8; 545 - #endif 546 - #if defined(__BIG_ENDIAN_BITFIELD) /* W4 */ 547 - u64 rsvd_319_315 : 5; 548 - u64 qint_idx : 7; 549 - u64 rq_int_ena : 8; 550 - u64 rq_int : 8; 551 - u64 rsvd_291_288 : 4; 552 - u64 lpb_pool_pass : 8; 553 - u64 lpb_pool_drop : 8; 554 - u64 lpb_aura_pass : 8; 555 - u64 lpb_aura_drop : 8; 556 - #else 557 - u64 lpb_aura_drop : 8; 622 + u64 lpb_aura_drop : 8; /* W4 */ 558 623 u64 lpb_aura_pass : 8; 559 624 u64 lpb_pool_drop : 8; 560 625 u64 lpb_pool_pass : 8; ··· 551 640 u64 rq_int_ena : 8; 552 641 u64 qint_idx : 7; 553 642 u64 rsvd_319_315 : 5; 554 - #endif 555 - #if defined(__BIG_ENDIAN_BITFIELD) /* W5 */ 556 - u64 rsvd_383_366 : 18; 557 - u64 flow_tagw : 6; 558 - u64 bad_utag : 8; 559 - u64 good_utag : 8; 560 - u64 ltag : 24; 561 - #else 562 - u64 ltag : 24; 643 + u64 ltag : 24; /* W5 */ 563 644 u64 good_utag : 8; 564 645 u64 bad_utag : 8; 565 646 u64 flow_tagw : 6; 566 647 u64 rsvd_383_366 : 18; 567 - #endif 568 - #if defined(__BIG_ENDIAN_BITFIELD) /* W6 */ 648 + u64 octs : 48; /* W6 */ 569 649 u64 rsvd_447_432 : 16; 570 - u64 octs : 48; 571 - #else 572 - u64 octs : 48; 573 - u64 rsvd_447_432 : 16; 574 - #endif 575 - #if defined(__BIG_ENDIAN_BITFIELD) /* W7 */ 650 + u64 pkts : 48; /* W7 */ 576 651 u64 rsvd_511_496 : 16; 577 - u64 pkts : 48; 578 - #else 579 - u64 pkts : 48; 580 - u64 rsvd_511_496 : 16; 581 - #endif 582 - #if defined(__BIG_ENDIAN_BITFIELD) /* W8 */ 652 + u64 drop_octs : 48; /* W8 */ 583 653 u64 rsvd_575_560 : 16; 584 - u64 drop_octs : 48; 585 - #else 586 - u64 drop_octs : 48; 587 - u64 rsvd_575_560 : 16; 588 - #endif 589 - #if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ 654 + u64 drop_pkts : 48; /* W9 */ 590 655 u64 rsvd_639_624 : 16; 591 - u64 drop_pkts : 48; 592 - #else 593 - u64 drop_pkts : 48; 594 - u64 rsvd_639_624 : 16; 595 - #endif 596 - #if defined(__BIG_ENDIAN_BITFIELD) /* W10 */ 656 + u64 re_pkts : 48; /* W10 */ 597 657 u64 rsvd_703_688 : 16; 598 - u64 re_pkts : 48; 599 - #else 600 - u64 re_pkts : 48; 601 - u64 rsvd_703_688 : 16; 602 - #endif 603 658 u64 rsvd_767_704; /* W11 */ 604 659 u64 rsvd_831_768; /* W12 */ 605 660 u64 rsvd_895_832; /* W13 */ ··· 588 711 589 712 /* NIX Send queue context structure */ 590 713 struct nix_sq_ctx_s { 591 - #if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ 592 - u64 sqe_way_mask : 16; 593 - u64 cq : 20; 594 - u64 sdp_mcast : 1; 595 - u64 substream : 20; 596 - u64 qint_idx : 6; 597 - u64 ena : 1; 598 - #else 599 714 u64 ena : 1; 600 715 u64 qint_idx : 6; 601 716 u64 substream : 20; 602 717 u64 sdp_mcast : 1; 603 718 u64 cq : 20; 604 719 u64 sqe_way_mask : 16; 605 - #endif 606 - #if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ 607 - u64 sqb_count : 16; 608 - u64 default_chan : 12; 609 - u64 smq_rr_quantum : 24; 610 - u64 sso_ena : 1; 611 - u64 xoff : 1; 612 - u64 cq_ena : 1; 613 - u64 smq : 9; 614 - #else 615 720 u64 smq : 9; 616 721 u64 cq_ena : 1; 617 722 u64 xoff : 1; ··· 601 742 u64 smq_rr_quantum : 24; 602 743 u64 default_chan : 12; 603 744 u64 sqb_count : 16; 604 - #endif 605 - #if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ 606 - u64 rsvd_191 : 1; 607 - u64 sqe_stype : 2; 608 - u64 sq_int_ena : 8; 609 - u64 sq_int : 8; 610 - u64 sqb_aura : 20; 611 - u64 smq_rr_count : 25; 612 - #else 613 745 u64 smq_rr_count : 25; 614 746 u64 sqb_aura : 20; 615 747 u64 sq_int : 8; 616 748 u64 sq_int_ena : 8; 617 749 u64 sqe_stype : 2; 618 750 u64 rsvd_191 : 1; 619 - #endif 620 - #if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ 621 - u64 rsvd_255_253 : 3; 622 - u64 smq_next_sq_vld : 1; 623 - u64 smq_pend : 1; 624 - u64 smenq_next_sqb_vld : 1; 625 - u64 head_offset : 6; 626 - u64 smenq_offset : 6; 627 - u64 tail_offset : 6; 628 - u64 smq_lso_segnum : 8; 629 - u64 smq_next_sq : 20; 630 - u64 mnq_dis : 1; 631 - u64 lmt_dis : 1; 632 - u64 cq_limit : 8; 633 - u64 max_sqe_size : 2; 634 - #else 635 751 u64 max_sqe_size : 2; 636 752 u64 cq_limit : 8; 637 753 u64 lmt_dis : 1; ··· 620 786 u64 smq_pend : 1; 621 787 u64 smq_next_sq_vld : 1; 622 788 u64 rsvd_255_253 : 3; 623 - #endif 624 789 u64 next_sqb : 64;/* W4 */ 625 790 u64 tail_sqb : 64;/* W5 */ 626 791 u64 smenq_sqb : 64;/* W6 */ 627 792 u64 smenq_next_sqb : 64;/* W7 */ 628 793 u64 head_sqb : 64;/* W8 */ 629 - #if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ 630 - u64 rsvd_639_630 : 10; 631 - u64 vfi_lso_vld : 1; 632 - u64 vfi_lso_vlan1_ins_ena : 1; 633 - u64 vfi_lso_vlan0_ins_ena : 1; 634 - u64 vfi_lso_mps : 14; 635 - u64 vfi_lso_sb : 8; 636 - u64 vfi_lso_sizem1 : 3; 637 - u64 vfi_lso_total : 18; 638 - u64 rsvd_583_576 : 8; 639 - #else 640 794 u64 rsvd_583_576 : 8; 641 795 u64 vfi_lso_total : 18; 642 796 u64 vfi_lso_sizem1 : 3; ··· 634 812 u64 vfi_lso_vlan1_ins_ena : 1; 635 813 u64 vfi_lso_vld : 1; 636 814 u64 rsvd_639_630 : 10; 637 - #endif 638 - #if defined(__BIG_ENDIAN_BITFIELD) /* W10 */ 639 - u64 rsvd_703_658 : 46; 640 - u64 scm_lso_rem : 18; 641 - #else 642 815 u64 scm_lso_rem : 18; 643 816 u64 rsvd_703_658 : 46; 644 - #endif 645 - #if defined(__BIG_ENDIAN_BITFIELD) /* W11 */ 646 - u64 rsvd_767_752 : 16; 647 - u64 octs : 48; 648 - #else 649 817 u64 octs : 48; 650 818 u64 rsvd_767_752 : 16; 651 - #endif 652 - #if defined(__BIG_ENDIAN_BITFIELD) /* W12 */ 653 - u64 rsvd_831_816 : 16; 654 - u64 pkts : 48; 655 - #else 656 819 u64 pkts : 48; 657 820 u64 rsvd_831_816 : 16; 658 - #endif 659 821 u64 rsvd_895_832 : 64;/* W13 */ 660 - #if defined(__BIG_ENDIAN_BITFIELD) /* W14 */ 661 - u64 rsvd_959_944 : 16; 662 - u64 dropped_octs : 48; 663 - #else 664 822 u64 dropped_octs : 48; 665 823 u64 rsvd_959_944 : 16; 666 - #endif 667 - #if defined(__BIG_ENDIAN_BITFIELD) /* W15 */ 668 - u64 rsvd_1023_1008 : 16; 669 - u64 dropped_pkts : 48; 670 - #else 671 824 u64 dropped_pkts : 48; 672 825 u64 rsvd_1023_1008 : 16; 673 - #endif 674 826 }; 675 827 676 828 /* NIX Receive side scaling entry structure*/ 677 829 struct nix_rsse_s { 678 - #if defined(__BIG_ENDIAN_BITFIELD) 679 - uint32_t reserved_20_31 : 12; 680 - uint32_t rq : 20; 681 - #else 682 830 uint32_t rq : 20; 683 831 uint32_t reserved_20_31 : 12; 684 832 685 - #endif 686 833 }; 687 834 688 835 /* NIX receive multicast/mirror entry structure */ 689 836 struct nix_rx_mce_s { 690 - #if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ 691 - uint64_t next : 16; 692 - uint64_t pf_func : 16; 693 - uint64_t rsvd_31_24 : 8; 694 - uint64_t index : 20; 695 - uint64_t eol : 1; 696 - uint64_t rsvd_2 : 1; 697 - uint64_t op : 2; 698 - #else 699 837 uint64_t op : 2; 700 838 uint64_t rsvd_2 : 1; 701 839 uint64_t eol : 1; ··· 663 881 uint64_t rsvd_31_24 : 8; 664 882 uint64_t pf_func : 16; 665 883 uint64_t next : 16; 666 - #endif 667 884 }; 668 885 669 886 enum nix_lsoalg { ··· 681 900 }; 682 901 683 902 struct nix_lso_format { 684 - #if defined(__BIG_ENDIAN_BITFIELD) 685 - u64 rsvd_19_63 : 45; 686 - u64 alg : 3; 687 - u64 rsvd_14_15 : 2; 688 - u64 sizem1 : 2; 689 - u64 rsvd_10_11 : 2; 690 - u64 layer : 2; 691 - u64 offset : 8; 692 - #else 693 903 u64 offset : 8; 694 904 u64 layer : 2; 695 905 u64 rsvd_10_11 : 2; ··· 688 916 u64 rsvd_14_15 : 2; 689 917 u64 alg : 3; 690 918 u64 rsvd_19_63 : 45; 691 - #endif 692 919 }; 693 920 694 921 struct nix_rx_flowkey_alg { 695 - #if defined(__BIG_ENDIAN_BITFIELD) 696 - u64 reserved_35_63 :29; 697 - u64 ltype_match :4; 698 - u64 ltype_mask :4; 699 - u64 sel_chan :1; 700 - u64 ena :1; 701 - u64 reserved_24_24 :1; 702 - u64 lid :3; 703 - u64 bytesm1 :5; 704 - u64 hdr_offset :8; 705 - u64 fn_mask :1; 706 - u64 ln_mask :1; 707 - u64 key_offset :6; 708 - #else 709 922 u64 key_offset :6; 710 923 u64 ln_mask :1; 711 924 u64 fn_mask :1; ··· 703 946 u64 ltype_mask :4; 704 947 u64 ltype_match :4; 705 948 u64 reserved_35_63 :29; 706 - #endif 707 949 }; 708 950 709 951 /* NIX VTAG size */
+5 -5
drivers/net/ethernet/marvell/octeontx2/nic/Makefile
··· 3 3 # Makefile for Marvell's OcteonTX2 ethernet device drivers 4 4 # 5 5 6 - obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o 7 - obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o 6 + obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o 7 + obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o 8 8 9 - octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ 10 - otx2_ptp.o otx2_flows.o 11 - octeontx2_nicvf-y := otx2_vf.o 9 + rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ 10 + otx2_ptp.o otx2_flows.o cn10k.o 11 + rvu_nicvf-y := otx2_vf.o 12 12 13 13 ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+181
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Marvell OcteonTx2 RVU Physcial Function ethernet driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + */ 6 + 7 + #include "cn10k.h" 8 + #include "otx2_reg.h" 9 + #include "otx2_struct.h" 10 + 11 + static struct dev_hw_ops otx2_hw_ops = { 12 + .sq_aq_init = otx2_sq_aq_init, 13 + .sqe_flush = otx2_sqe_flush, 14 + .aura_freeptr = otx2_aura_freeptr, 15 + .refill_pool_ptrs = otx2_refill_pool_ptrs, 16 + }; 17 + 18 + static struct dev_hw_ops cn10k_hw_ops = { 19 + .sq_aq_init = cn10k_sq_aq_init, 20 + .sqe_flush = cn10k_sqe_flush, 21 + .aura_freeptr = cn10k_aura_freeptr, 22 + .refill_pool_ptrs = cn10k_refill_pool_ptrs, 23 + }; 24 + 25 + int cn10k_pf_lmtst_init(struct otx2_nic *pf) 26 + { 27 + int size, num_lines; 28 + u64 base; 29 + 30 + if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { 31 + pf->hw_ops = &otx2_hw_ops; 32 + return 0; 33 + } 34 + 35 + pf->hw_ops = &cn10k_hw_ops; 36 + base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + 37 + (MBOX_SIZE * (pf->total_vfs + 1)); 38 + 39 + size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) - 40 + (MBOX_SIZE * (pf->total_vfs + 1)); 41 + 42 + pf->hw.lmt_base = ioremap(base, size); 43 + 44 + if (!pf->hw.lmt_base) { 45 + dev_err(pf->dev, "Unable to map PF LMTST region\n"); 46 + return -ENOMEM; 47 + } 48 + 49 + /* FIXME: Get the num of LMTST lines from LMT table */ 50 + pf->tot_lmt_lines = size / LMT_LINE_SIZE; 51 + num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) / 52 + pf->hw.tx_queues; 53 + /* Number of LMT lines per SQ queues */ 54 + pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; 55 + 56 + pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE; 57 + return 0; 58 + } 59 + 60 + int cn10k_vf_lmtst_init(struct otx2_nic *vf) 61 + { 62 + int size, num_lines; 63 + 64 + if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) { 65 + vf->hw_ops = &otx2_hw_ops; 66 + return 0; 67 + } 68 + 69 + vf->hw_ops = &cn10k_hw_ops; 70 + size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM); 71 + vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev, 72 + PCI_MBOX_BAR_NUM), 73 + size); 74 + if (!vf->hw.lmt_base) { 75 + dev_err(vf->dev, "Unable to map VF LMTST region\n"); 76 + return -ENOMEM; 77 + } 78 + 79 + vf->tot_lmt_lines = size / LMT_LINE_SIZE; 80 + /* LMTST lines per SQ */ 81 + num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) / 82 + vf->hw.tx_queues; 83 + vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; 84 + vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE; 85 + return 0; 86 + } 87 + EXPORT_SYMBOL(cn10k_vf_lmtst_init); 88 + 89 + int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) 90 + { 91 + struct nix_cn10k_aq_enq_req *aq; 92 + struct otx2_nic *pfvf = dev; 93 + struct otx2_snd_queue *sq; 94 + 95 + sq = &pfvf->qset.sq[qidx]; 96 + sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base + 97 + (qidx * pfvf->nix_lmt_size)); 98 + 99 + /* Get memory to put this msg */ 100 + aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); 101 + if (!aq) 102 + return -ENOMEM; 103 + 104 + aq->sq.cq = pfvf->hw.rx_queues + qidx; 105 + aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ 106 + aq->sq.cq_ena = 1; 107 + aq->sq.ena = 1; 108 + /* Only one SMQ is allocated, map all SQ's to that SMQ */ 109 + aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; 110 + /* FIXME: set based on NIX_AF_DWRR_RPM_MTU*/ 111 + aq->sq.smq_rr_weight = pfvf->netdev->mtu; 112 + aq->sq.default_chan = pfvf->hw.tx_chan_base; 113 + aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ 114 + aq->sq.sqb_aura = sqb_aura; 115 + aq->sq.sq_int_ena = NIX_SQINT_BITS; 116 + aq->sq.qint_idx = 0; 117 + /* Due pipelining impact minimum 2000 unused SQ CQE's 118 + * need to maintain to avoid CQ overflow. 119 + */ 120 + aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); 121 + 122 + /* Fill AQ info */ 123 + aq->qidx = qidx; 124 + aq->ctype = NIX_AQ_CTYPE_SQ; 125 + aq->op = NIX_AQ_INSTOP_INIT; 126 + 127 + return otx2_sync_mbox_msg(&pfvf->mbox); 128 + } 129 + 130 + #define NPA_MAX_BURST 16 131 + void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) 132 + { 133 + struct otx2_nic *pfvf = dev; 134 + u64 ptrs[NPA_MAX_BURST]; 135 + int num_ptrs = 1; 136 + dma_addr_t bufptr; 137 + 138 + /* Refill pool with new buffers */ 139 + while (cq->pool_ptrs) { 140 + if (otx2_alloc_buffer(pfvf, cq, &bufptr)) { 141 + if (num_ptrs--) 142 + __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, 143 + num_ptrs, 144 + cq->rbpool->lmt_addr); 145 + break; 146 + } 147 + cq->pool_ptrs--; 148 + ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM; 149 + num_ptrs++; 150 + if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) { 151 + __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, 152 + num_ptrs, 153 + cq->rbpool->lmt_addr); 154 + num_ptrs = 1; 155 + } 156 + } 157 + } 158 + 159 + void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) 160 + { 161 + struct otx2_nic *pfvf = dev; 162 + int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines); 163 + u64 val = 0, tar_addr = 0; 164 + 165 + /* FIXME: val[0:10] LMT_ID. 166 + * [12:15] no of LMTST - 1 in the burst. 167 + * [19:63] data size of each LMTST in the burst except first. 168 + */ 169 + val = (lmt_id & 0x7FF); 170 + /* Target address for LMTST flush tells HW how many 128bit 171 + * words are present. 172 + * tar_addr[6:4] size of first LMTST - 1 in units of 128b. 173 + */ 174 + tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4; 175 + dma_wmb(); 176 + memcpy(sq->lmt_addr, sq->sqe_base, size); 177 + cn10k_lmt_flush(val, tar_addr); 178 + 179 + sq->head++; 180 + sq->head &= (sq->sqe_cnt - 1); 181 + }
+17
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * Marvell OcteonTx2 RVU Ethernet driver 3 + * 4 + * Copyright (C) 2020 Marvell. 5 + */ 6 + 7 + #ifndef CN10K_H 8 + #define CN10K_H 9 + 10 + #include "otx2_common.h" 11 + 12 + void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); 13 + void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); 14 + int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 15 + int cn10k_pf_lmtst_init(struct otx2_nic *pf); 16 + int cn10k_vf_lmtst_init(struct otx2_nic *vf); 17 + #endif /* CN10K_H */
+111 -39
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
··· 15 15 #include "otx2_reg.h" 16 16 #include "otx2_common.h" 17 17 #include "otx2_struct.h" 18 + #include "cn10k.h" 18 19 19 20 static void otx2_nix_rq_op_stats(struct queue_stats *stats, 20 21 struct otx2_nic *pfvf, int qidx) ··· 230 229 return -ENOMEM; 231 230 } 232 231 233 - pfvf->max_frs = mtu + OTX2_ETH_HLEN; 234 232 req->maxlen = pfvf->max_frs; 235 233 236 234 err = otx2_sync_mbox_msg(&pfvf->mbox); ··· 526 526 return ret; 527 527 } 528 528 529 + int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 530 + dma_addr_t *dma) 531 + { 532 + if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) { 533 + struct refill_work *work; 534 + struct delayed_work *dwork; 535 + 536 + work = &pfvf->refill_wrk[cq->cq_idx]; 537 + dwork = &work->pool_refill_work; 538 + /* Schedule a task if no other task is running */ 539 + if (!cq->refill_task_sched) { 540 + cq->refill_task_sched = true; 541 + schedule_delayed_work(dwork, 542 + msecs_to_jiffies(100)); 543 + } 544 + return -ENOMEM; 545 + } 546 + return 0; 547 + } 548 + 529 549 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) 530 550 { 531 551 struct otx2_nic *pfvf = netdev_priv(netdev); ··· 605 585 /* Set topology e.t.c configuration */ 606 586 if (lvl == NIX_TXSCH_LVL_SMQ) { 607 587 req->reg[0] = NIX_AF_SMQX_CFG(schq); 608 - req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) | 609 - OTX2_MIN_MTU; 588 + req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8) 589 + | OTX2_MIN_MTU; 610 590 611 591 req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | 612 592 (0x2ULL << 36); ··· 748 728 #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ 749 729 #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ 750 730 751 - /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 752 - #define SEND_CQ_SKID 2000 753 - 754 731 static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) 755 732 { 756 733 struct otx2_qset *qset = &pfvf->qset; ··· 781 764 return otx2_sync_mbox_msg(&pfvf->mbox); 782 765 } 783 766 767 + int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) 768 + { 769 + struct otx2_nic *pfvf = dev; 770 + struct otx2_snd_queue *sq; 771 + struct nix_aq_enq_req *aq; 772 + 773 + sq = &pfvf->qset.sq[qidx]; 774 + sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); 775 + /* Get memory to put this msg */ 776 + aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); 777 + if (!aq) 778 + return -ENOMEM; 779 + 780 + aq->sq.cq = pfvf->hw.rx_queues + qidx; 781 + aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ 782 + aq->sq.cq_ena = 1; 783 + aq->sq.ena = 1; 784 + /* Only one SMQ is allocated, map all SQ's to that SMQ */ 785 + aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; 786 + aq->sq.smq_rr_quantum = DFLT_RR_QTM; 787 + aq->sq.default_chan = pfvf->hw.tx_chan_base; 788 + aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ 789 + aq->sq.sqb_aura = sqb_aura; 790 + aq->sq.sq_int_ena = NIX_SQINT_BITS; 791 + aq->sq.qint_idx = 0; 792 + /* Due pipelining impact minimum 2000 unused SQ CQE's 793 + * need to maintain to avoid CQ overflow. 794 + */ 795 + aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); 796 + 797 + /* Fill AQ info */ 798 + aq->qidx = qidx; 799 + aq->ctype = NIX_AQ_CTYPE_SQ; 800 + aq->op = NIX_AQ_INSTOP_INIT; 801 + 802 + return otx2_sync_mbox_msg(&pfvf->mbox); 803 + } 804 + 784 805 static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) 785 806 { 786 807 struct otx2_qset *qset = &pfvf->qset; 787 808 struct otx2_snd_queue *sq; 788 - struct nix_aq_enq_req *aq; 789 809 struct otx2_pool *pool; 790 810 int err; 791 811 ··· 859 805 sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100; 860 806 sq->aura_id = sqb_aura; 861 807 sq->aura_fc_addr = pool->fc_addr->base; 862 - sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); 863 808 sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); 864 809 865 810 sq->stats.bytes = 0; 866 811 sq->stats.pkts = 0; 867 812 868 - /* Get memory to put this msg */ 869 - aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); 870 - if (!aq) 871 - return -ENOMEM; 813 + return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); 872 814 873 - aq->sq.cq = pfvf->hw.rx_queues + qidx; 874 - aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ 875 - aq->sq.cq_ena = 1; 876 - aq->sq.ena = 1; 877 - /* Only one SMQ is allocated, map all SQ's to that SMQ */ 878 - aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; 879 - aq->sq.smq_rr_quantum = DFLT_RR_QTM; 880 - aq->sq.default_chan = pfvf->hw.tx_chan_base; 881 - aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ 882 - aq->sq.sqb_aura = sqb_aura; 883 - aq->sq.sq_int_ena = NIX_SQINT_BITS; 884 - aq->sq.qint_idx = 0; 885 - /* Due pipelining impact minimum 2000 unused SQ CQE's 886 - * need to maintain to avoid CQ overflow. 887 - */ 888 - aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt)); 889 - 890 - /* Fill AQ info */ 891 - aq->qidx = qidx; 892 - aq->ctype = NIX_AQ_CTYPE_SQ; 893 - aq->op = NIX_AQ_INSTOP_INIT; 894 - 895 - return otx2_sync_mbox_msg(&pfvf->mbox); 896 815 } 897 816 898 817 static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) ··· 969 942 } 970 943 return; 971 944 } 972 - otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); 945 + pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); 973 946 cq->pool_ptrs--; 974 947 } 975 948 cq->refill_task_sched = false; ··· 1213 1186 1214 1187 pool->rbsize = buf_size; 1215 1188 1189 + /* Set LMTST addr for NPA batch free */ 1190 + if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) 1191 + pool->lmt_addr = (__force u64 *)((u64)pfvf->hw.npa_lmt_base + 1192 + (pool_id * LMT_LINE_SIZE)); 1193 + 1216 1194 /* Initialize this pool's context via AF */ 1217 1195 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); 1218 1196 if (!aq) { ··· 1306 1274 for (ptr = 0; ptr < num_sqbs; ptr++) { 1307 1275 if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) 1308 1276 return -ENOMEM; 1309 - otx2_aura_freeptr(pfvf, pool_id, bufptr); 1277 + pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); 1310 1278 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; 1311 1279 } 1312 1280 } ··· 1356 1324 for (ptr = 0; ptr < num_ptrs; ptr++) { 1357 1325 if (otx2_alloc_rbuf(pfvf, pool, &bufptr)) 1358 1326 return -ENOMEM; 1359 - otx2_aura_freeptr(pfvf, pool_id, 1360 - bufptr + OTX2_HEAD_ROOM); 1327 + pfvf->hw_ops->aura_freeptr(pfvf, pool_id, 1328 + bufptr + OTX2_HEAD_ROOM); 1361 1329 } 1362 1330 } 1363 1331 ··· 1635 1603 cpu = 0; 1636 1604 } 1637 1605 } 1606 + 1607 + u16 otx2_get_max_mtu(struct otx2_nic *pfvf) 1608 + { 1609 + struct nix_hw_info *rsp; 1610 + struct msg_req *req; 1611 + u16 max_mtu; 1612 + int rc; 1613 + 1614 + mutex_lock(&pfvf->mbox.lock); 1615 + 1616 + req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox); 1617 + if (!req) { 1618 + rc = -ENOMEM; 1619 + goto out; 1620 + } 1621 + 1622 + rc = otx2_sync_mbox_msg(&pfvf->mbox); 1623 + if (!rc) { 1624 + rsp = (struct nix_hw_info *) 1625 + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 1626 + 1627 + /* HW counts VLAN insertion bytes (8 for double tag) 1628 + * irrespective of whether SQE is requesting to insert VLAN 1629 + * in the packet or not. Hence these 8 bytes have to be 1630 + * discounted from max packet size otherwise HW will throw 1631 + * SMQ errors 1632 + */ 1633 + max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN; 1634 + } 1635 + 1636 + out: 1637 + mutex_unlock(&pfvf->mbox.lock); 1638 + if (rc) { 1639 + dev_warn(pfvf->dev, 1640 + "Failed to get MTU from hardware setting default value(1500)\n"); 1641 + max_mtu = 1500; 1642 + } 1643 + return max_mtu; 1644 + } 1645 + EXPORT_SYMBOL(otx2_get_max_mtu); 1638 1646 1639 1647 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 1640 1648 int __weak \
+103 -9
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 50 50 #define NIX_LF_ERR_VEC 0x81 51 51 #define NIX_LF_POISON_VEC 0x82 52 52 53 + /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 54 + #define SEND_CQ_SKID 2000 55 + 53 56 /* RSS configuration */ 54 57 struct otx2_rss_ctx { 55 58 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; ··· 193 190 u8 lso_tsov6_idx; 194 191 u8 lso_udpv4_idx; 195 192 u8 lso_udpv6_idx; 196 - u8 hw_tso; 197 193 198 194 /* MSI-X */ 199 195 u8 cint_cnt; /* CQ interrupt count */ ··· 210 208 u64 cgx_fec_uncorr_blks; 211 209 u8 cgx_links; /* No. of CGX links present in HW */ 212 210 u8 lbk_links; /* No. of LBK links present in HW */ 211 + #define HW_TSO BIT_ULL(0) 212 + #define CN10K_MBOX BIT_ULL(1) 213 + #define CN10K_LMTST BIT_ULL(2) 214 + unsigned long cap_flag; 215 + 216 + #define LMT_LINE_SIZE 128 217 + #define NIX_LMTID_BASE 72 /* RX + TX + XDP */ 218 + void __iomem *lmt_base; 219 + u64 *npa_lmt_base; 220 + u64 *nix_lmt_base; 213 221 }; 214 222 215 223 struct otx2_vf_config { ··· 278 266 struct list_head flow_list; 279 267 }; 280 268 269 + struct dev_hw_ops { 270 + int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); 271 + void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, 272 + int size, int qidx); 273 + void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); 274 + void (*aura_freeptr)(void *dev, int aura, u64 buf); 275 + }; 276 + 281 277 struct otx2_nic { 282 278 void __iomem *reg_base; 283 279 struct net_device *netdev; 280 + struct dev_hw_ops *hw_ops; 284 281 void *iommu_domain; 285 282 u16 max_frs; 286 283 u16 rbsize; /* Receive buffer size */ ··· 338 317 339 318 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 340 319 int nix_blkaddr; 320 + /* LMTST Lines info */ 321 + u16 tot_lmt_lines; 322 + u16 nix_lmt_lines; 323 + u32 nix_lmt_size; 341 324 342 325 struct otx2_ptp *ptp; 343 326 struct hwtstamp_config tstamp; ··· 366 341 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 367 342 } 368 343 344 + /* REVID for PCIe devices. 345 + * Bits 0..1: minor pass, bit 3..2: major pass 346 + * bits 7..4: midr id 347 + */ 348 + #define PCI_REVISION_ID_96XX 0x00 349 + #define PCI_REVISION_ID_95XX 0x10 350 + #define PCI_REVISION_ID_LOKI 0x20 351 + #define PCI_REVISION_ID_98XX 0x30 352 + #define PCI_REVISION_ID_95XXMM 0x40 353 + 354 + static inline bool is_dev_otx2(struct pci_dev *pdev) 355 + { 356 + u8 midr = pdev->revision & 0xF0; 357 + 358 + return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 359 + midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX || 360 + midr == PCI_REVISION_ID_95XXMM); 361 + } 362 + 369 363 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 370 364 { 371 365 struct otx2_hw *hw = &pfvf->hw; ··· 393 349 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 394 350 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 395 351 396 - hw->hw_tso = true; 352 + __set_bit(HW_TSO, &hw->cap_flag); 397 353 398 354 if (is_96xx_A0(pfvf->pdev)) { 399 - hw->hw_tso = false; 355 + __clear_bit(HW_TSO, &hw->cap_flag); 400 356 401 357 /* Time based irq coalescing is not supported */ 402 358 pfvf->hw.cq_qcount_wait = 0x0; ··· 406 362 */ 407 363 pfvf->hw.rq_skid = 600; 408 364 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 365 + } 366 + if (!is_dev_otx2(pfvf->pdev)) { 367 + __set_bit(CN10K_MBOX, &hw->cap_flag); 368 + __set_bit(CN10K_LMTST, &hw->cap_flag); 409 369 } 410 370 } 411 371 ··· 519 471 } 520 472 521 473 #else 522 - #define otx2_write128(lo, hi, addr) 474 + #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) 523 475 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) 524 476 #endif 477 + 478 + static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, 479 + u64 *ptrs, u64 num_ptrs, 480 + u64 *lmt_addr) 481 + { 482 + u64 size = 0, count_eot = 0; 483 + u64 tar_addr, val = 0; 484 + 485 + tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0); 486 + /* LMTID is same as AURA Id */ 487 + val = (aura & 0x7FF) | BIT_ULL(63); 488 + /* Set if [127:64] of last 128bit word has a valid pointer */ 489 + count_eot = (num_ptrs % 2) ? 0ULL : 1ULL; 490 + /* Set AURA ID to free pointer */ 491 + ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF); 492 + /* Target address for LMTST flush tells HW how many 128bit 493 + * words are valid from NPA_LF_AURA_BATCH_FREE0. 494 + * 495 + * tar_addr[6:4] is LMTST size-1 in units of 128b. 496 + */ 497 + if (num_ptrs > 2) { 498 + size = (sizeof(u64) * num_ptrs) / 16; 499 + if (!count_eot) 500 + size++; 501 + tar_addr |= ((size - 1) & 0x7) << 4; 502 + } 503 + memcpy(lmt_addr, ptrs, sizeof(u64) * num_ptrs); 504 + /* Perform LMTST flush */ 505 + cn10k_lmt_flush(val, tar_addr); 506 + } 507 + 508 + static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) 509 + { 510 + struct otx2_nic *pfvf = dev; 511 + struct otx2_pool *pool; 512 + u64 ptrs[2]; 513 + 514 + pool = &pfvf->qset.pool[aura]; 515 + ptrs[1] = buf; 516 + __cn10k_aura_freeptr(pfvf, aura, ptrs, 2, pool->lmt_addr); 517 + } 525 518 526 519 /* Alloc pointer from pool/aura */ 527 520 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) ··· 575 486 } 576 487 577 488 /* Free pointer to a pool/aura */ 578 - static inline void otx2_aura_freeptr(struct otx2_nic *pfvf, 579 - int aura, u64 buf) 489 + static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf) 580 490 { 581 - otx2_write128(buf, (u64)aura | BIT_ULL(63), 582 - otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0)); 491 + struct otx2_nic *pfvf = dev; 492 + void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0); 493 + 494 + otx2_write128(buf, (u64)aura | BIT_ULL(63), addr); 583 495 } 584 496 585 497 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) ··· 735 645 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 736 646 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 737 647 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 648 + int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 649 + int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); 650 + int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 651 + dma_addr_t *dma); 738 652 739 653 /* RSS configuration APIs*/ 740 654 int otx2_rss_init(struct otx2_nic *pfvf); ··· 798 704 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); 799 705 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); 800 706 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); 801 - 707 + u16 otx2_get_max_mtu(struct otx2_nic *pfvf); 802 708 #endif /* OTX2_COMMON_H */
+63 -10
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 22 22 #include "otx2_txrx.h" 23 23 #include "otx2_struct.h" 24 24 #include "otx2_ptp.h" 25 + #include "cn10k.h" 25 26 #include <rvu_trace.h> 26 27 27 - #define DRV_NAME "octeontx2-nicpf" 28 - #define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver" 28 + #define DRV_NAME "rvu_nicpf" 29 + #define DRV_STRING "Marvell RVU NIC Physical Function Driver" 29 30 30 31 /* Supported devices */ 31 32 static const struct pci_device_id otx2_pf_id_table[] = { ··· 586 585 if (!pf->mbox_pfvf_wq) 587 586 return -ENOMEM; 588 587 589 - base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR)); 590 - hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); 588 + /* On CN10K platform, PF <-> VF mailbox region follows after 589 + * PF <-> AF mailbox region. 590 + */ 591 + if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) 592 + base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + 593 + MBOX_SIZE; 594 + else 595 + base = readq((void __iomem *)((u64)pf->reg_base + 596 + RVU_PF_VF_BAR4_ADDR)); 591 597 598 + hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); 592 599 if (!hwbase) { 593 600 err = -ENOMEM; 594 601 goto free_wq; ··· 1051 1042 * device memory to allow unaligned accesses. 1052 1043 */ 1053 1044 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), 1054 - pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM)); 1045 + MBOX_SIZE); 1055 1046 if (!hwbase) { 1056 1047 dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); 1057 1048 err = -ENOMEM; ··· 1288 1279 } 1289 1280 } 1290 1281 1282 + static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu) 1283 + { 1284 + int frame_size; 1285 + int total_size; 1286 + int rbuf_size; 1287 + 1288 + /* The data transferred by NIX to memory consists of actual packet 1289 + * plus additional data which has timestamp and/or EDSA/HIGIG2 1290 + * headers if interface is configured in corresponding modes. 1291 + * NIX transfers entire data using 6 segments/buffers and writes 1292 + * a CQE_RX descriptor with those segment addresses. First segment 1293 + * has additional data prepended to packet. Also software omits a 1294 + * headroom of 128 bytes and sizeof(struct skb_shared_info) in 1295 + * each segment. Hence the total size of memory needed 1296 + * to receive a packet with 'mtu' is: 1297 + * frame size = mtu + additional data; 1298 + * memory = frame_size + (headroom + struct skb_shared_info size) * 6; 1299 + * each receive buffer size = memory / 6; 1300 + */ 1301 + frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; 1302 + total_size = frame_size + (OTX2_HEAD_ROOM + 1303 + OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6; 1304 + rbuf_size = total_size / 6; 1305 + 1306 + return ALIGN(rbuf_size, 2048); 1307 + } 1308 + 1291 1309 static int otx2_init_hw_resources(struct otx2_nic *pf) 1292 1310 { 1293 1311 struct nix_lf_free_req *free_req; ··· 1331 1295 hw->sqpool_cnt = hw->tx_queues; 1332 1296 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; 1333 1297 1334 - /* Get the size of receive buffers to allocate */ 1335 - pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu + 1336 - OTX2_ETH_HLEN); 1298 + pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; 1299 + 1300 + pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); 1337 1301 1338 1302 mutex_lock(&mbox->lock); 1339 1303 /* NPA init */ ··· 1525 1489 sizeof(struct otx2_rcv_queue), GFP_KERNEL); 1526 1490 if (!qset->rq) 1527 1491 goto err_free_mem; 1492 + 1493 + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { 1494 + /* Reserve LMT lines for NPA AURA batch free */ 1495 + pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base; 1496 + /* Reserve LMT lines for NIX TX */ 1497 + pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base + 1498 + (NIX_LMTID_BASE * LMT_LINE_SIZE)); 1499 + } 1528 1500 1529 1501 err = otx2_init_hw_resources(pf); 1530 1502 if (err) ··· 2372 2328 goto err_free_netdev; 2373 2329 } 2374 2330 2331 + otx2_setup_dev_hw_settings(pf); 2332 + 2375 2333 /* Init PF <=> AF mailbox stuff */ 2376 2334 err = otx2_pfaf_mbox_init(pf); 2377 2335 if (err) ··· 2399 2353 if (err) 2400 2354 goto err_detach_rsrc; 2401 2355 2402 - otx2_setup_dev_hw_settings(pf); 2356 + err = cn10k_pf_lmtst_init(pf); 2357 + if (err) 2358 + goto err_detach_rsrc; 2403 2359 2404 2360 /* Assign default mac address */ 2405 2361 otx2_get_mac_from_af(netdev); ··· 2456 2408 2457 2409 /* MTU range: 64 - 9190 */ 2458 2410 netdev->min_mtu = OTX2_MIN_MTU; 2459 - netdev->max_mtu = OTX2_MAX_MTU; 2411 + netdev->max_mtu = otx2_get_max_mtu(pf); 2460 2412 2461 2413 err = register_netdev(netdev); 2462 2414 if (err) { ··· 2486 2438 err_ptp_destroy: 2487 2439 otx2_ptp_destroy(pf); 2488 2440 err_detach_rsrc: 2441 + if (hw->lmt_base) 2442 + iounmap(hw->lmt_base); 2489 2443 otx2_detach_resources(&pf->mbox); 2490 2444 err_disable_mbox_intr: 2491 2445 otx2_disable_mbox_intr(pf); ··· 2647 2597 otx2_ptp_destroy(pf); 2648 2598 otx2_mcam_flow_del(pf); 2649 2599 otx2_detach_resources(&pf->mbox); 2600 + if (pf->hw.lmt_base) 2601 + iounmap(pf->hw.lmt_base); 2602 + 2650 2603 otx2_disable_mbox_intr(pf); 2651 2604 otx2_pfaf_mbox_destroy(pf); 2652 2605 pci_free_irq_vectors(pf->pdev);
+4
drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
··· 44 44 #define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) 45 45 #define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) 46 46 #define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) 47 + #define RVU_PF_VF_MBOX_ADDR (0xC40) 48 + #define RVU_PF_LMTLINE_ADDR (0xC48) 47 49 48 50 /* RVU VF registers */ 49 51 #define RVU_VF_VFPF_MBOX0 (0x00000) ··· 59 57 #define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) 60 58 #define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) 61 59 #define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3) 60 + #define RVU_VF_MBOX_REGION (0xC0000) 62 61 63 62 #define RVU_FUNC_BLKADDR_SHIFT 20 64 63 #define RVU_FUNC_BLKADDR_MASK 0x1FULL ··· 94 91 #define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12) 95 92 #define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12) 96 93 #define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12) 94 + #define NPA_LF_AURA_BATCH_FREE0 (NPA_LFBASE | 0x400) 97 95 98 96 /* NIX LF registers */ 99 97 #define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
+7 -3
drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
··· 142 142 u64 vtag0_ptr : 8; /* W5 */ 143 143 u64 vtag1_ptr : 8; 144 144 u64 flow_key_alg : 5; 145 - u64 rsvd_383_341 : 43; 145 + u64 rsvd_359_341 : 19; 146 + u64 color : 2; 147 + u64 rsvd_383_362 : 22; 146 148 u64 rsvd_447_384; /* W6 */ 147 149 }; 148 150 ··· 220 218 u64 vlan1_ins_tci : 16; 221 219 u64 vlan0_ins_ena : 1; 222 220 u64 vlan1_ins_ena : 1; 223 - u64 rsvd_127_114 : 14; 221 + u64 init_color : 2; 222 + u64 rsvd_127_116 : 12; 224 223 }; 225 224 226 225 struct nix_sqe_sg_s { ··· 240 237 /* NIX send memory subdescriptor structure */ 241 238 struct nix_sqe_mem_s { 242 239 u64 offset : 16; /* W0 */ 243 - u64 rsvd_52_16 : 37; 240 + u64 rsvd_51_16 : 36; 241 + u64 per_lso_seg : 1; 244 242 u64 wmem : 1; 245 243 u64 dsz : 2; 246 244 u64 alg : 4;
+42 -30
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
··· 17 17 #include "otx2_struct.h" 18 18 #include "otx2_txrx.h" 19 19 #include "otx2_ptp.h" 20 + #include "cn10k.h" 20 21 21 22 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) 22 23 ··· 200 199 sg = (struct nix_rx_sg_s *)start; 201 200 seg_addr = &sg->seg_addr; 202 201 for (seg = 0; seg < sg->segs; seg++, seg_addr++) 203 - otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL); 202 + pfvf->hw_ops->aura_freeptr(pfvf, qidx, 203 + *seg_addr & ~0x07ULL); 204 204 start += sizeof(*sg); 205 205 } 206 206 } ··· 257 255 /* For now ignore all the NPC parser errors and 258 256 * pass the packets to stack. 259 257 */ 260 - if (cqe->sg.segs == 1) 261 - return false; 258 + return false; 262 259 } 263 260 264 261 /* If RXALL is enabled pass on packets to stack. */ 265 - if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL)) 262 + if (pfvf->netdev->features & NETIF_F_RXALL) 266 263 return false; 267 264 268 265 /* Free buffer back to pool */ ··· 276 275 struct nix_cqe_rx_s *cqe) 277 276 { 278 277 struct nix_rx_parse_s *parse = &cqe->parse; 278 + struct nix_rx_sg_s *sg = &cqe->sg; 279 279 struct sk_buff *skb = NULL; 280 + void *end, *start; 281 + u64 *seg_addr; 282 + u16 *seg_size; 283 + int seg; 280 284 281 - if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) { 285 + if (unlikely(parse->errlev || parse->errcode)) { 282 286 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) 283 287 return; 284 288 } ··· 292 286 if (unlikely(!skb)) 293 287 return; 294 288 295 - otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse); 296 - cq->pool_ptrs++; 297 - 289 + start = (void *)sg; 290 + end = start + ((cqe->parse.desc_sizem1 + 1) * 16); 291 + while (start < end) { 292 + sg = (struct nix_rx_sg_s *)start; 293 + seg_addr = &sg->seg_addr; 294 + seg_size = (void *)sg; 295 + for (seg = 0; seg < sg->segs; seg++, seg_addr++) { 296 + otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg], 297 + parse); 298 + cq->pool_ptrs++; 299 + } 300 + start += sizeof(*sg); 301 + } 298 302 otx2_set_rxhash(pfvf, cqe, skb); 299 303 300 304 skb_record_rx_queue(skb, cq->cq_idx); ··· 320 304 { 321 305 struct nix_cqe_rx_s *cqe; 322 306 int processed_cqe = 0; 323 - dma_addr_t bufptr; 324 307 325 308 while (likely(processed_cqe < budget)) { 326 309 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); ··· 345 330 346 331 if (unlikely(!cq->pool_ptrs)) 347 332 return 0; 348 - 349 333 /* Refill pool with new buffers */ 350 - while (cq->pool_ptrs) { 351 - if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, &bufptr))) { 352 - struct refill_work *work; 353 - struct delayed_work *dwork; 334 + pfvf->hw_ops->refill_pool_ptrs(pfvf, cq); 354 335 355 - work = &pfvf->refill_wrk[cq->cq_idx]; 356 - dwork = &work->pool_refill_work; 357 - /* Schedule a task if no other task is running */ 358 - if (!cq->refill_task_sched) { 359 - cq->refill_task_sched = true; 360 - schedule_delayed_work(dwork, 361 - msecs_to_jiffies(100)); 362 - } 336 + return processed_cqe; 337 + } 338 + 339 + void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) 340 + { 341 + struct otx2_nic *pfvf = dev; 342 + dma_addr_t bufptr; 343 + 344 + while (cq->pool_ptrs) { 345 + if (otx2_alloc_buffer(pfvf, cq, &bufptr)) 363 346 break; 364 - } 365 347 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); 366 348 cq->pool_ptrs--; 367 349 } 368 - 369 - return processed_cqe; 370 350 } 371 351 372 352 static int otx2_tx_napi_handler(struct otx2_nic *pfvf, ··· 448 438 return workdone; 449 439 } 450 440 451 - static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size) 441 + void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, 442 + int size, int qidx) 452 443 { 453 444 u64 status; 454 445 ··· 807 796 sqe_hdr->sizem1 = (offset / 16) - 1; 808 797 809 798 /* Flush SQE to HW */ 810 - otx2_sqe_flush(sq, offset); 799 + pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); 811 800 } 812 801 } 813 802 ··· 816 805 { 817 806 int payload_len, last_seg_size; 818 807 819 - if (!pfvf->hw.hw_tso) 820 - return false; 821 808 822 809 /* HW has an issue due to which when the payload of the last LSO 823 810 * segment is shorter than 16 bytes, some header fields may not ··· 827 818 payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); 828 819 last_seg_size = payload_len % skb_shinfo(skb)->gso_size; 829 820 if (last_seg_size && last_seg_size < 16) 821 + return false; 822 + 823 + if (!test_bit(HW_TSO, &pfvf->hw.cap_flag)) 830 824 return false; 831 825 832 826 return true; ··· 926 914 netdev_tx_sent_queue(txq, skb->len); 927 915 928 916 /* Flush SQE to HW */ 929 - otx2_sqe_flush(sq, offset); 917 + pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); 930 918 931 919 return true; 932 920 }
+7 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
··· 24 24 25 25 #define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN) 26 26 #define OTX2_MIN_MTU 64 27 - #define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN) 28 27 29 28 #define OTX2_MAX_GSO_SEGS 255 30 29 #define OTX2_MAX_FRAGS_IN_SQE 9 ··· 113 114 struct otx2_pool { 114 115 struct qmem *stack; 115 116 struct qmem *fc_addr; 117 + u64 *lmt_addr; 116 118 u16 rbsize; 117 119 }; 118 120 ··· 156 156 int otx2_napi_handler(struct napi_struct *napi, int budget); 157 157 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, 158 158 struct sk_buff *skb, u16 qidx); 159 + void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, 160 + int size, int qidx); 161 + void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, 162 + int size, int qidx); 163 + void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); 164 + void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); 159 165 #endif /* OTX2_TXRX_H */
+36 -16
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
··· 7 7 8 8 #include "otx2_common.h" 9 9 #include "otx2_reg.h" 10 + #include "cn10k.h" 10 11 11 - #define DRV_NAME "octeontx2-nicvf" 12 - #define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver" 12 + #define DRV_NAME "rvu_nicvf" 13 + #define DRV_STRING "Marvell RVU NIC Virtual Function Driver" 13 14 14 15 static const struct pci_device_id otx2_vf_id_table[] = { 15 16 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, ··· 278 277 vf->mbox_wq = NULL; 279 278 } 280 279 281 - if (mbox->mbox.hwbase) 280 + if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) 282 281 iounmap((void __iomem *)mbox->mbox.hwbase); 283 282 284 283 otx2_mbox_destroy(&mbox->mbox); ··· 298 297 if (!vf->mbox_wq) 299 298 return -ENOMEM; 300 299 301 - /* Mailbox is a reserved memory (in RAM) region shared between 302 - * admin function (i.e PF0) and this VF, shouldn't be mapped as 303 - * device memory to allow unaligned accesses. 304 - */ 305 - hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM), 306 - pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM)); 307 - if (!hwbase) { 308 - dev_err(vf->dev, "Unable to map VFAF mailbox region\n"); 309 - err = -ENOMEM; 310 - goto exit; 300 + if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { 301 + /* For cn10k platform, VF mailbox region is in its BAR2 302 + * register space 303 + */ 304 + hwbase = vf->reg_base + RVU_VF_MBOX_REGION; 305 + } else { 306 + /* Mailbox is a reserved memory (in RAM) region shared between 307 + * admin function (i.e PF0) and this VF, shouldn't be mapped as 308 + * device memory to allow unaligned accesses. 309 + */ 310 + hwbase = ioremap_wc(pci_resource_start(vf->pdev, 311 + PCI_MBOX_BAR_NUM), 312 + pci_resource_len(vf->pdev, 313 + PCI_MBOX_BAR_NUM)); 314 + if (!hwbase) { 315 + dev_err(vf->dev, "Unable to map VFAF mailbox region\n"); 316 + err = -ENOMEM; 317 + goto exit; 318 + } 311 319 } 312 320 313 321 err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base, ··· 339 329 340 330 return 0; 341 331 exit: 332 + if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) 333 + iounmap(hwbase); 342 334 destroy_workqueue(vf->mbox_wq); 343 335 return err; 344 336 } ··· 537 525 goto err_free_irq_vectors; 538 526 } 539 527 528 + otx2_setup_dev_hw_settings(vf); 540 529 /* Init VF <=> PF mailbox stuff */ 541 530 err = otx2vf_vfaf_mbox_init(vf); 542 531 if (err) ··· 561 548 if (err) 562 549 goto err_detach_rsrc; 563 550 564 - otx2_setup_dev_hw_settings(vf); 551 + err = cn10k_vf_lmtst_init(vf); 552 + if (err) 553 + goto err_detach_rsrc; 565 554 566 555 /* Assign default mac address */ 567 556 otx2_get_mac_from_af(netdev); ··· 586 571 587 572 /* MTU range: 68 - 9190 */ 588 573 netdev->min_mtu = OTX2_MIN_MTU; 589 - netdev->max_mtu = OTX2_MAX_MTU; 574 + netdev->max_mtu = otx2_get_max_mtu(vf); 590 575 591 576 INIT_WORK(&vf->reset_task, otx2vf_reset_task); 592 577 ··· 615 600 return 0; 616 601 617 602 err_detach_rsrc: 603 + if (hw->lmt_base) 604 + iounmap(hw->lmt_base); 618 605 otx2_detach_resources(&vf->mbox); 619 606 err_disable_mbox_intr: 620 607 otx2vf_disable_mbox_intr(vf); ··· 645 628 cancel_work_sync(&vf->reset_task); 646 629 unregister_netdev(netdev); 647 630 otx2vf_disable_mbox_intr(vf); 648 - 649 631 otx2_detach_resources(&vf->mbox); 632 + 633 + if (vf->hw.lmt_base) 634 + iounmap(vf->hw.lmt_base); 635 + 650 636 otx2vf_vfaf_mbox_destroy(vf); 651 637 pci_free_irq_vectors(vf->pdev); 652 638 pci_set_drvdata(pdev, NULL);
+8
include/linux/soc/marvell/octeontx2/asm.h
··· 22 22 : [rs]"r" (ioaddr)); \ 23 23 (result); \ 24 24 }) 25 + #define cn10k_lmt_flush(val, addr) \ 26 + ({ \ 27 + __asm__ volatile(".cpu generic+lse\n" \ 28 + "steor %x[rf],[%[rs]]" \ 29 + : [rf]"+r"(val) \ 30 + : [rs]"r"(addr)); \ 31 + }) 25 32 #else 26 33 #define otx2_lmt_flush(ioaddr) ({ 0; }) 34 + #define cn10k_lmt_flush(val, addr) ({ addr = val; }) 27 35 #endif 28 36 29 37 #endif /* __SOC_OTX2_ASM_H */