Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ocelot: add FDMA support

Ethernet frames can be extracted or injected autonomously to or from
the device’s DDR3/DDR3L memory and/or PCIe memory space. Linked list
data structures in memory are used for injecting or extracting Ethernet
frames. The FDMA generates interrupts when frame extraction or
injection is done and when the linked lists need updating.

The FDMA is shared between all the ethernet ports of the switch and
uses a linked list of descriptors (DCB) to inject and extract packets.
Before adding descriptors, the FDMA channels must be stopped. It would
be inefficient to do that each time a descriptor would be added so the
channels are restarted only once they stopped.

Both channels uses ring-like structure to feed the DCBs to the FDMA.
head and tail are never touched by hardware and are completely handled
by the driver. On top of that, page recycling has been added and is
mostly taken from gianfar driver.

Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Co-developed-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Clément Léger <clement.leger@bootlin.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Clément Léger and committed by
Jakub Kicinski
753a026c de5841e1

+1095 -4
+1
drivers/net/ethernet/mscc/Makefile
··· 12 12 mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o 13 13 obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o 14 14 mscc_ocelot-y := \ 15 + ocelot_fdma.o \ 15 16 ocelot_vsc7514.o \ 16 17 ocelot_net.o
+894
drivers/net/ethernet/mscc/ocelot_fdma.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 + /* 3 + * Microsemi SoCs FDMA driver 4 + * 5 + * Copyright (c) 2021 Microchip 6 + * 7 + * Page recycling code is mostly taken from gianfar driver. 8 + */ 9 + 10 + #include <linux/align.h> 11 + #include <linux/bitops.h> 12 + #include <linux/dmapool.h> 13 + #include <linux/dsa/ocelot.h> 14 + #include <linux/netdevice.h> 15 + #include <linux/of_platform.h> 16 + #include <linux/skbuff.h> 17 + 18 + #include "ocelot_fdma.h" 19 + #include "ocelot_qs.h" 20 + 21 + DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled); 22 + 23 + static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data) 24 + { 25 + regmap_write(ocelot->targets[FDMA], reg, data); 26 + } 27 + 28 + static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg) 29 + { 30 + u32 retval; 31 + 32 + regmap_read(ocelot->targets[FDMA], reg, &retval); 33 + 34 + return retval; 35 + } 36 + 37 + static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx) 38 + { 39 + return base + idx * sizeof(struct ocelot_fdma_dcb); 40 + } 41 + 42 + static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma) 43 + { 44 + return (dma - base) / sizeof(struct ocelot_fdma_dcb); 45 + } 46 + 47 + static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz) 48 + { 49 + return unlikely(idx == ring_sz - 1) ? 0 : idx + 1; 50 + } 51 + 52 + static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz) 53 + { 54 + return unlikely(idx == 0) ? ring_sz - 1 : idx - 1; 55 + } 56 + 57 + static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma) 58 + { 59 + struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring; 60 + 61 + if (rx_ring->next_to_use >= rx_ring->next_to_clean) 62 + return OCELOT_FDMA_RX_RING_SIZE - 63 + (rx_ring->next_to_use - rx_ring->next_to_clean) - 1; 64 + else 65 + return rx_ring->next_to_clean - rx_ring->next_to_use - 1; 66 + } 67 + 68 + static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma) 69 + { 70 + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; 71 + 72 + if (tx_ring->next_to_use >= tx_ring->next_to_clean) 73 + return OCELOT_FDMA_TX_RING_SIZE - 74 + (tx_ring->next_to_use - tx_ring->next_to_clean) - 1; 75 + else 76 + return tx_ring->next_to_clean - tx_ring->next_to_use - 1; 77 + } 78 + 79 + static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma) 80 + { 81 + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; 82 + 83 + return tx_ring->next_to_clean == tx_ring->next_to_use; 84 + } 85 + 86 + static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma, 87 + int chan) 88 + { 89 + ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma); 90 + /* Barrier to force memory writes to DCB to be completed before starting 91 + * the channel. 92 + */ 93 + wmb(); 94 + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan)); 95 + } 96 + 97 + static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan) 98 + { 99 + unsigned long timeout; 100 + u32 safe; 101 + 102 + timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US); 103 + do { 104 + safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE); 105 + if (safe & BIT(chan)) 106 + return 0; 107 + } while (time_after(jiffies, timeout)); 108 + 109 + return -ETIMEDOUT; 110 + } 111 + 112 + static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb, 113 + dma_addr_t dma_addr, 114 + size_t size) 115 + { 116 + u32 offset = dma_addr & 0x3; 117 + 118 + dcb->llp = 0; 119 + dcb->datap = ALIGN_DOWN(dma_addr, 4); 120 + dcb->datal = ALIGN_DOWN(size, 4); 121 + dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset); 122 + } 123 + 124 + static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot, 125 + struct ocelot_fdma_rx_buf *rxb) 126 + { 127 + dma_addr_t mapping; 128 + struct page *page; 129 + 130 + page = dev_alloc_page(); 131 + if (unlikely(!page)) 132 + return false; 133 + 134 + mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE, 135 + DMA_FROM_DEVICE); 136 + if (unlikely(dma_mapping_error(ocelot->dev, mapping))) { 137 + __free_page(page); 138 + return false; 139 + } 140 + 141 + rxb->page = page; 142 + rxb->page_offset = 0; 143 + rxb->dma_addr = mapping; 144 + 145 + return true; 146 + } 147 + 148 + static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt) 149 + { 150 + struct ocelot_fdma *fdma = ocelot->fdma; 151 + struct ocelot_fdma_rx_ring *rx_ring; 152 + struct ocelot_fdma_rx_buf *rxb; 153 + struct ocelot_fdma_dcb *dcb; 154 + dma_addr_t dma_addr; 155 + int ret = 0; 156 + u16 idx; 157 + 158 + rx_ring = &fdma->rx_ring; 159 + idx = rx_ring->next_to_use; 160 + 161 + while (alloc_cnt--) { 162 + rxb = &rx_ring->bufs[idx]; 163 + /* try reuse page */ 164 + if (unlikely(!rxb->page)) { 165 + if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) { 166 + dev_err_ratelimited(ocelot->dev, 167 + "Failed to allocate rx\n"); 168 + ret = -ENOMEM; 169 + break; 170 + } 171 + } 172 + 173 + dcb = &rx_ring->dcbs[idx]; 174 + dma_addr = rxb->dma_addr + rxb->page_offset; 175 + ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE); 176 + 177 + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); 178 + /* Chain the DCB to the next one */ 179 + dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx); 180 + } 181 + 182 + rx_ring->next_to_use = idx; 183 + rx_ring->next_to_alloc = idx; 184 + 185 + return ret; 186 + } 187 + 188 + static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot, 189 + struct ocelot_fdma_tx_buf *tx_buf, 190 + struct ocelot_fdma_dcb *dcb, 191 + struct sk_buff *skb) 192 + { 193 + dma_addr_t mapping; 194 + 195 + mapping = dma_map_single(ocelot->dev, skb->data, skb->len, 196 + DMA_TO_DEVICE); 197 + if (unlikely(dma_mapping_error(ocelot->dev, mapping))) 198 + return false; 199 + 200 + dma_unmap_addr_set(tx_buf, dma_addr, mapping); 201 + 202 + ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE); 203 + tx_buf->skb = skb; 204 + dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len); 205 + dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF; 206 + 207 + return true; 208 + } 209 + 210 + static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot) 211 + { 212 + u32 llp; 213 + 214 + /* Check if the FDMA hits the DCB with LLP == NULL */ 215 + llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN)); 216 + if (unlikely(llp)) 217 + return false; 218 + 219 + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE, 220 + BIT(MSCC_FDMA_XTR_CHAN)); 221 + 222 + return true; 223 + } 224 + 225 + static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring) 226 + { 227 + struct ocelot_fdma_dcb *dcb; 228 + unsigned int idx; 229 + 230 + idx = ocelot_fdma_idx_prev(rx_ring->next_to_use, 231 + OCELOT_FDMA_RX_RING_SIZE); 232 + dcb = &rx_ring->dcbs[idx]; 233 + dcb->llp = 0; 234 + } 235 + 236 + static void ocelot_fdma_rx_restart(struct ocelot *ocelot) 237 + { 238 + struct ocelot_fdma *fdma = ocelot->fdma; 239 + struct ocelot_fdma_rx_ring *rx_ring; 240 + const u8 chan = MSCC_FDMA_XTR_CHAN; 241 + dma_addr_t new_llp, dma_base; 242 + unsigned int idx; 243 + u32 llp_prev; 244 + int ret; 245 + 246 + rx_ring = &fdma->rx_ring; 247 + ret = ocelot_fdma_wait_chan_safe(ocelot, chan); 248 + if (ret) { 249 + dev_err_ratelimited(ocelot->dev, 250 + "Unable to stop RX channel\n"); 251 + return; 252 + } 253 + 254 + ocelot_fdma_rx_set_llp(rx_ring); 255 + 256 + /* FDMA stopped on the last DCB that contained a NULL LLP, since 257 + * we processed some DCBs in RX, there is free space, and we must set 258 + * DCB_LLP to point to the next DCB 259 + */ 260 + llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan)); 261 + dma_base = rx_ring->dcbs_dma; 262 + 263 + /* Get the next DMA addr located after LLP == NULL DCB */ 264 + idx = ocelot_fdma_dma_idx(dma_base, llp_prev); 265 + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); 266 + new_llp = ocelot_fdma_idx_dma(dma_base, idx); 267 + 268 + /* Finally reactivate the channel */ 269 + ocelot_fdma_activate_chan(ocelot, new_llp, chan); 270 + } 271 + 272 + static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat, 273 + struct sk_buff *skb, bool first) 274 + { 275 + int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat); 276 + struct page *page = rxb->page; 277 + 278 + if (likely(first)) { 279 + skb_put(skb, size); 280 + } else { 281 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 282 + rxb->page_offset, size, OCELOT_FDMA_RX_SIZE); 283 + } 284 + 285 + /* Try to reuse page */ 286 + if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page))) 287 + return false; 288 + 289 + /* Change offset to the other half */ 290 + rxb->page_offset ^= OCELOT_FDMA_RX_SIZE; 291 + 292 + page_ref_inc(page); 293 + 294 + return true; 295 + } 296 + 297 + static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot, 298 + struct ocelot_fdma_rx_buf *old_rxb) 299 + { 300 + struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring; 301 + struct ocelot_fdma_rx_buf *new_rxb; 302 + 303 + new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc]; 304 + rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc, 305 + OCELOT_FDMA_RX_RING_SIZE); 306 + 307 + /* Copy page reference */ 308 + *new_rxb = *old_rxb; 309 + 310 + /* Sync for use by the device */ 311 + dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr, 312 + old_rxb->page_offset, 313 + OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE); 314 + } 315 + 316 + static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat, 317 + struct ocelot_fdma_rx_buf *rxb, 318 + struct sk_buff *skb) 319 + { 320 + bool first = false; 321 + 322 + /* Allocate skb head and data */ 323 + if (likely(!skb)) { 324 + void *buff_addr = page_address(rxb->page) + 325 + rxb->page_offset; 326 + 327 + skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE); 328 + if (unlikely(!skb)) { 329 + dev_err_ratelimited(ocelot->dev, 330 + "build_skb failed !\n"); 331 + return NULL; 332 + } 333 + first = true; 334 + } 335 + 336 + dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr, 337 + rxb->page_offset, OCELOT_FDMA_RX_SIZE, 338 + DMA_FROM_DEVICE); 339 + 340 + if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) { 341 + /* Reuse the free half of the page for the next_to_alloc DCB*/ 342 + ocelot_fdma_reuse_rx_page(ocelot, rxb); 343 + } else { 344 + /* page cannot be reused, unmap it */ 345 + dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, 346 + DMA_FROM_DEVICE); 347 + } 348 + 349 + /* clear rx buff content */ 350 + rxb->page = NULL; 351 + 352 + return skb; 353 + } 354 + 355 + static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb) 356 + { 357 + struct net_device *ndev; 358 + void *xfh = skb->data; 359 + u64 timestamp; 360 + u64 src_port; 361 + 362 + skb_pull(skb, OCELOT_TAG_LEN); 363 + 364 + ocelot_xfh_get_src_port(xfh, &src_port); 365 + if (unlikely(src_port >= ocelot->num_phys_ports)) 366 + return false; 367 + 368 + ndev = ocelot_port_to_netdev(ocelot, src_port); 369 + if (unlikely(!ndev)) 370 + return false; 371 + 372 + pskb_trim(skb, skb->len - ETH_FCS_LEN); 373 + 374 + skb->dev = ndev; 375 + skb->protocol = eth_type_trans(skb, skb->dev); 376 + skb->dev->stats.rx_bytes += skb->len; 377 + skb->dev->stats.rx_packets++; 378 + 379 + if (ocelot->ptp) { 380 + ocelot_xfh_get_rew_val(xfh, &timestamp); 381 + ocelot_ptp_rx_timestamp(ocelot, skb, timestamp); 382 + } 383 + 384 + if (likely(!skb_defer_rx_timestamp(skb))) 385 + netif_receive_skb(skb); 386 + 387 + return true; 388 + } 389 + 390 + static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget) 391 + { 392 + struct ocelot_fdma *fdma = ocelot->fdma; 393 + struct ocelot_fdma_rx_ring *rx_ring; 394 + struct ocelot_fdma_rx_buf *rxb; 395 + struct ocelot_fdma_dcb *dcb; 396 + struct sk_buff *skb; 397 + int work_done = 0; 398 + int cleaned_cnt; 399 + u32 stat; 400 + u16 idx; 401 + 402 + cleaned_cnt = ocelot_fdma_rx_ring_free(fdma); 403 + rx_ring = &fdma->rx_ring; 404 + skb = rx_ring->skb; 405 + 406 + while (budget--) { 407 + idx = rx_ring->next_to_clean; 408 + dcb = &rx_ring->dcbs[idx]; 409 + stat = dcb->stat; 410 + if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0) 411 + break; 412 + 413 + /* New packet is a start of frame but we already got a skb set, 414 + * we probably lost an EOF packet, free skb 415 + */ 416 + if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) { 417 + dev_kfree_skb(skb); 418 + skb = NULL; 419 + } 420 + 421 + rxb = &rx_ring->bufs[idx]; 422 + /* Fetch next to clean buffer from the rx_ring */ 423 + skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb); 424 + if (unlikely(!skb)) 425 + break; 426 + 427 + work_done++; 428 + cleaned_cnt++; 429 + 430 + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); 431 + rx_ring->next_to_clean = idx; 432 + 433 + if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT || 434 + stat & MSCC_FDMA_DCB_STAT_PD)) { 435 + dev_err_ratelimited(ocelot->dev, 436 + "DCB aborted or pruned\n"); 437 + dev_kfree_skb(skb); 438 + skb = NULL; 439 + continue; 440 + } 441 + 442 + /* We still need to process the other fragment of the packet 443 + * before delivering it to the network stack 444 + */ 445 + if (!(stat & MSCC_FDMA_DCB_STAT_EOF)) 446 + continue; 447 + 448 + if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb))) 449 + dev_kfree_skb(skb); 450 + 451 + skb = NULL; 452 + } 453 + 454 + rx_ring->skb = skb; 455 + 456 + if (cleaned_cnt) 457 + ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt); 458 + 459 + return work_done; 460 + } 461 + 462 + static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot) 463 + { 464 + struct ocelot_port_private *priv; 465 + struct ocelot_port *ocelot_port; 466 + struct net_device *dev; 467 + int port; 468 + 469 + for (port = 0; port < ocelot->num_phys_ports; port++) { 470 + ocelot_port = ocelot->ports[port]; 471 + if (!ocelot_port) 472 + continue; 473 + priv = container_of(ocelot_port, struct ocelot_port_private, 474 + port); 475 + dev = priv->dev; 476 + 477 + if (unlikely(netif_queue_stopped(dev))) 478 + netif_wake_queue(dev); 479 + } 480 + } 481 + 482 + static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget) 483 + { 484 + struct ocelot_fdma *fdma = ocelot->fdma; 485 + struct ocelot_fdma_tx_ring *tx_ring; 486 + struct ocelot_fdma_tx_buf *buf; 487 + unsigned int new_null_llp_idx; 488 + struct ocelot_fdma_dcb *dcb; 489 + bool end_of_list = false; 490 + struct sk_buff *skb; 491 + dma_addr_t dma; 492 + u32 dcb_llp; 493 + u16 ntc; 494 + int ret; 495 + 496 + tx_ring = &fdma->tx_ring; 497 + 498 + /* Purge the TX packets that have been sent up to the NULL llp or the 499 + * end of done list. 500 + */ 501 + while (!ocelot_fdma_tx_ring_empty(fdma)) { 502 + ntc = tx_ring->next_to_clean; 503 + dcb = &tx_ring->dcbs[ntc]; 504 + if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD)) 505 + break; 506 + 507 + buf = &tx_ring->bufs[ntc]; 508 + skb = buf->skb; 509 + dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr), 510 + skb->len, DMA_TO_DEVICE); 511 + napi_consume_skb(skb, budget); 512 + dcb_llp = dcb->llp; 513 + 514 + /* Only update after accessing all dcb fields */ 515 + tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc, 516 + OCELOT_FDMA_TX_RING_SIZE); 517 + 518 + /* If we hit the NULL LLP, stop, we might need to reload FDMA */ 519 + if (dcb_llp == 0) { 520 + end_of_list = true; 521 + break; 522 + } 523 + } 524 + 525 + /* No need to try to wake if there were no TX cleaned_cnt up. */ 526 + if (ocelot_fdma_tx_ring_free(fdma)) 527 + ocelot_fdma_wakeup_netdev(ocelot); 528 + 529 + /* If there is still some DCBs to be processed by the FDMA or if the 530 + * pending list is empty, there is no need to restart the FDMA. 531 + */ 532 + if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma)) 533 + return; 534 + 535 + ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN); 536 + if (ret) { 537 + dev_warn(ocelot->dev, 538 + "Failed to wait for TX channel to stop\n"); 539 + return; 540 + } 541 + 542 + /* Set NULL LLP to be the last DCB used */ 543 + new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use, 544 + OCELOT_FDMA_TX_RING_SIZE); 545 + dcb = &tx_ring->dcbs[new_null_llp_idx]; 546 + dcb->llp = 0; 547 + 548 + dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean); 549 + ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN); 550 + } 551 + 552 + static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget) 553 + { 554 + struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi); 555 + struct ocelot *ocelot = fdma->ocelot; 556 + int work_done = 0; 557 + bool rx_stopped; 558 + 559 + ocelot_fdma_tx_cleanup(ocelot, budget); 560 + 561 + rx_stopped = ocelot_fdma_check_stop_rx(ocelot); 562 + 563 + work_done = ocelot_fdma_rx_get(ocelot, budget); 564 + 565 + if (rx_stopped) 566 + ocelot_fdma_rx_restart(ocelot); 567 + 568 + if (work_done < budget) { 569 + napi_complete_done(&fdma->napi, work_done); 570 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 571 + BIT(MSCC_FDMA_INJ_CHAN) | 572 + BIT(MSCC_FDMA_XTR_CHAN)); 573 + } 574 + 575 + return work_done; 576 + } 577 + 578 + static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id) 579 + { 580 + u32 ident, llp, frm, err, err_code; 581 + struct ocelot *ocelot = dev_id; 582 + 583 + ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT); 584 + frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM); 585 + llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP); 586 + 587 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident); 588 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident); 589 + if (frm || llp) { 590 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); 591 + napi_schedule(&ocelot->fdma->napi); 592 + } 593 + 594 + err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR); 595 + if (unlikely(err)) { 596 + err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE); 597 + dev_err_ratelimited(ocelot->dev, 598 + "Error ! chans mask: %#x, code: %#x\n", 599 + err, err_code); 600 + 601 + ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err); 602 + ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code); 603 + } 604 + 605 + return IRQ_HANDLED; 606 + } 607 + 608 + static void ocelot_fdma_send_skb(struct ocelot *ocelot, 609 + struct ocelot_fdma *fdma, struct sk_buff *skb) 610 + { 611 + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; 612 + struct ocelot_fdma_tx_buf *tx_buf; 613 + struct ocelot_fdma_dcb *dcb; 614 + dma_addr_t dma; 615 + u16 next_idx; 616 + 617 + dcb = &tx_ring->dcbs[tx_ring->next_to_use]; 618 + tx_buf = &tx_ring->bufs[tx_ring->next_to_use]; 619 + if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) { 620 + dev_kfree_skb_any(skb); 621 + return; 622 + } 623 + 624 + next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use, 625 + OCELOT_FDMA_TX_RING_SIZE); 626 + skb_tx_timestamp(skb); 627 + 628 + /* If the FDMA TX chan is empty, then enqueue the DCB directly */ 629 + if (ocelot_fdma_tx_ring_empty(fdma)) { 630 + dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, 631 + tx_ring->next_to_use); 632 + ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN); 633 + } else { 634 + /* Chain the DCBs */ 635 + dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx); 636 + } 637 + 638 + tx_ring->next_to_use = next_idx; 639 + } 640 + 641 + static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op, 642 + struct sk_buff *skb, struct net_device *dev) 643 + { 644 + int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0); 645 + int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); 646 + void *ifh; 647 + int err; 648 + 649 + if (unlikely(needed_headroom || needed_tailroom || 650 + skb_header_cloned(skb))) { 651 + err = pskb_expand_head(skb, needed_headroom, needed_tailroom, 652 + GFP_ATOMIC); 653 + if (unlikely(err)) { 654 + dev_kfree_skb_any(skb); 655 + return 1; 656 + } 657 + } 658 + 659 + err = skb_linearize(skb); 660 + if (err) { 661 + net_err_ratelimited("%s: skb_linearize error (%d)!\n", 662 + dev->name, err); 663 + dev_kfree_skb_any(skb); 664 + return 1; 665 + } 666 + 667 + ifh = skb_push(skb, OCELOT_TAG_LEN); 668 + skb_put(skb, ETH_FCS_LEN); 669 + memset(ifh, 0, OCELOT_TAG_LEN); 670 + ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb)); 671 + 672 + return 0; 673 + } 674 + 675 + int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op, 676 + struct sk_buff *skb, struct net_device *dev) 677 + { 678 + struct ocelot_fdma *fdma = ocelot->fdma; 679 + int ret = NETDEV_TX_OK; 680 + 681 + spin_lock(&fdma->tx_ring.xmit_lock); 682 + 683 + if (ocelot_fdma_tx_ring_free(fdma) == 0) { 684 + netif_stop_queue(dev); 685 + ret = NETDEV_TX_BUSY; 686 + goto out; 687 + } 688 + 689 + if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev)) 690 + goto out; 691 + 692 + ocelot_fdma_send_skb(ocelot, fdma, skb); 693 + 694 + out: 695 + spin_unlock(&fdma->tx_ring.xmit_lock); 696 + 697 + return ret; 698 + } 699 + 700 + static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot) 701 + { 702 + struct ocelot_fdma *fdma = ocelot->fdma; 703 + struct ocelot_fdma_rx_ring *rx_ring; 704 + struct ocelot_fdma_rx_buf *rxb; 705 + u16 idx; 706 + 707 + rx_ring = &fdma->rx_ring; 708 + idx = rx_ring->next_to_clean; 709 + 710 + /* Free the pages held in the RX ring */ 711 + while (idx != rx_ring->next_to_use) { 712 + rxb = &rx_ring->bufs[idx]; 713 + dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, 714 + DMA_FROM_DEVICE); 715 + __free_page(rxb->page); 716 + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); 717 + } 718 + 719 + if (fdma->rx_ring.skb) 720 + dev_kfree_skb_any(fdma->rx_ring.skb); 721 + } 722 + 723 + static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot) 724 + { 725 + struct ocelot_fdma *fdma = ocelot->fdma; 726 + struct ocelot_fdma_tx_ring *tx_ring; 727 + struct ocelot_fdma_tx_buf *txb; 728 + struct sk_buff *skb; 729 + u16 idx; 730 + 731 + tx_ring = &fdma->tx_ring; 732 + idx = tx_ring->next_to_clean; 733 + 734 + while (idx != tx_ring->next_to_use) { 735 + txb = &tx_ring->bufs[idx]; 736 + skb = txb->skb; 737 + dma_unmap_single(ocelot->dev, txb->dma_addr, skb->len, 738 + DMA_TO_DEVICE); 739 + dev_kfree_skb_any(skb); 740 + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE); 741 + } 742 + } 743 + 744 + static int ocelot_fdma_rings_alloc(struct ocelot *ocelot) 745 + { 746 + struct ocelot_fdma *fdma = ocelot->fdma; 747 + struct ocelot_fdma_dcb *dcbs; 748 + unsigned int adjust; 749 + dma_addr_t dcbs_dma; 750 + int ret; 751 + 752 + /* Create a pool of consistent memory blocks for hardware descriptors */ 753 + fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev, 754 + OCELOT_DCBS_HW_ALLOC_SIZE, 755 + &fdma->dcbs_dma_base, GFP_KERNEL); 756 + if (!fdma->dcbs_base) 757 + return -ENOMEM; 758 + 759 + /* DCBs must be aligned on a 32bit boundary */ 760 + dcbs = fdma->dcbs_base; 761 + dcbs_dma = fdma->dcbs_dma_base; 762 + if (!IS_ALIGNED(dcbs_dma, 4)) { 763 + adjust = dcbs_dma & 0x3; 764 + dcbs_dma = ALIGN(dcbs_dma, 4); 765 + dcbs = (void *)dcbs + adjust; 766 + } 767 + 768 + /* TX queue */ 769 + fdma->tx_ring.dcbs = dcbs; 770 + fdma->tx_ring.dcbs_dma = dcbs_dma; 771 + spin_lock_init(&fdma->tx_ring.xmit_lock); 772 + 773 + /* RX queue */ 774 + fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE; 775 + fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE; 776 + ret = ocelot_fdma_alloc_rx_buffs(ocelot, 777 + ocelot_fdma_tx_ring_free(fdma)); 778 + if (ret) { 779 + ocelot_fdma_free_rx_ring(ocelot); 780 + return ret; 781 + } 782 + 783 + /* Set the last DCB LLP as NULL, this is normally done when restarting 784 + * the RX chan, but this is for the first run 785 + */ 786 + ocelot_fdma_rx_set_llp(&fdma->rx_ring); 787 + 788 + return 0; 789 + } 790 + 791 + void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev) 792 + { 793 + struct ocelot_fdma *fdma = ocelot->fdma; 794 + 795 + dev->needed_headroom = OCELOT_TAG_LEN; 796 + dev->needed_tailroom = ETH_FCS_LEN; 797 + 798 + if (fdma->ndev) 799 + return; 800 + 801 + fdma->ndev = dev; 802 + netif_napi_add(dev, &fdma->napi, ocelot_fdma_napi_poll, 803 + OCELOT_FDMA_WEIGHT); 804 + } 805 + 806 + void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev) 807 + { 808 + struct ocelot_fdma *fdma = ocelot->fdma; 809 + 810 + if (fdma->ndev == dev) { 811 + netif_napi_del(&fdma->napi); 812 + fdma->ndev = NULL; 813 + } 814 + } 815 + 816 + void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot) 817 + { 818 + struct device *dev = ocelot->dev; 819 + struct ocelot_fdma *fdma; 820 + int ret; 821 + 822 + fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL); 823 + if (!fdma) 824 + return; 825 + 826 + ocelot->fdma = fdma; 827 + ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32); 828 + 829 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); 830 + 831 + fdma->ocelot = ocelot; 832 + fdma->irq = platform_get_irq_byname(pdev, "fdma"); 833 + ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0, 834 + dev_name(dev), ocelot); 835 + if (ret) 836 + goto err_free_fdma; 837 + 838 + ret = ocelot_fdma_rings_alloc(ocelot); 839 + if (ret) 840 + goto err_free_irq; 841 + 842 + static_branch_enable(&ocelot_fdma_enabled); 843 + 844 + return; 845 + 846 + err_free_irq: 847 + devm_free_irq(dev, fdma->irq, fdma); 848 + err_free_fdma: 849 + devm_kfree(dev, fdma); 850 + 851 + ocelot->fdma = NULL; 852 + } 853 + 854 + void ocelot_fdma_start(struct ocelot *ocelot) 855 + { 856 + struct ocelot_fdma *fdma = ocelot->fdma; 857 + 858 + /* Reconfigure for extraction and injection using DMA */ 859 + ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0); 860 + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0); 861 + 862 + ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0); 863 + 864 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff); 865 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff); 866 + 867 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA, 868 + BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN)); 869 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA, 870 + BIT(MSCC_FDMA_XTR_CHAN)); 871 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 872 + BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN)); 873 + 874 + napi_enable(&fdma->napi); 875 + 876 + ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma, 877 + MSCC_FDMA_XTR_CHAN); 878 + } 879 + 880 + void ocelot_fdma_deinit(struct ocelot *ocelot) 881 + { 882 + struct ocelot_fdma *fdma = ocelot->fdma; 883 + 884 + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); 885 + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS, 886 + BIT(MSCC_FDMA_XTR_CHAN)); 887 + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS, 888 + BIT(MSCC_FDMA_INJ_CHAN)); 889 + napi_synchronize(&fdma->napi); 890 + napi_disable(&fdma->napi); 891 + 892 + ocelot_fdma_free_rx_ring(ocelot); 893 + ocelot_fdma_free_tx_ring(ocelot); 894 + }
+166
drivers/net/ethernet/mscc/ocelot_fdma.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ 2 + /* 3 + * Microsemi SoCs FDMA driver 4 + * 5 + * Copyright (c) 2021 Microchip 6 + */ 7 + #ifndef _MSCC_OCELOT_FDMA_H_ 8 + #define _MSCC_OCELOT_FDMA_H_ 9 + 10 + #include "ocelot.h" 11 + 12 + #define MSCC_FDMA_DCB_STAT_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) 13 + #define MSCC_FDMA_DCB_STAT_BLOCKO_M GENMASK(31, 20) 14 + #define MSCC_FDMA_DCB_STAT_BLOCKO_X(x) (((x) & GENMASK(31, 20)) >> 20) 15 + #define MSCC_FDMA_DCB_STAT_PD BIT(19) 16 + #define MSCC_FDMA_DCB_STAT_ABORT BIT(18) 17 + #define MSCC_FDMA_DCB_STAT_EOF BIT(17) 18 + #define MSCC_FDMA_DCB_STAT_SOF BIT(16) 19 + #define MSCC_FDMA_DCB_STAT_BLOCKL_M GENMASK(15, 0) 20 + #define MSCC_FDMA_DCB_STAT_BLOCKL(x) ((x) & GENMASK(15, 0)) 21 + 22 + #define MSCC_FDMA_DCB_LLP(x) ((x) * 4 + 0x0) 23 + #define MSCC_FDMA_DCB_LLP_PREV(x) ((x) * 4 + 0xA0) 24 + #define MSCC_FDMA_CH_SAFE 0xcc 25 + #define MSCC_FDMA_CH_ACTIVATE 0xd0 26 + #define MSCC_FDMA_CH_DISABLE 0xd4 27 + #define MSCC_FDMA_CH_FORCEDIS 0xd8 28 + #define MSCC_FDMA_EVT_ERR 0x164 29 + #define MSCC_FDMA_EVT_ERR_CODE 0x168 30 + #define MSCC_FDMA_INTR_LLP 0x16c 31 + #define MSCC_FDMA_INTR_LLP_ENA 0x170 32 + #define MSCC_FDMA_INTR_FRM 0x174 33 + #define MSCC_FDMA_INTR_FRM_ENA 0x178 34 + #define MSCC_FDMA_INTR_ENA 0x184 35 + #define MSCC_FDMA_INTR_IDENT 0x188 36 + 37 + #define MSCC_FDMA_INJ_CHAN 2 38 + #define MSCC_FDMA_XTR_CHAN 0 39 + 40 + #define OCELOT_FDMA_WEIGHT 32 41 + 42 + #define OCELOT_FDMA_CH_SAFE_TIMEOUT_US 10 43 + 44 + #define OCELOT_FDMA_RX_RING_SIZE 512 45 + #define OCELOT_FDMA_TX_RING_SIZE 128 46 + 47 + #define OCELOT_FDMA_RX_DCB_SIZE (OCELOT_FDMA_RX_RING_SIZE * \ 48 + sizeof(struct ocelot_fdma_dcb)) 49 + #define OCELOT_FDMA_TX_DCB_SIZE (OCELOT_FDMA_TX_RING_SIZE * \ 50 + sizeof(struct ocelot_fdma_dcb)) 51 + /* +4 allows for word alignment after allocation */ 52 + #define OCELOT_DCBS_HW_ALLOC_SIZE (OCELOT_FDMA_RX_DCB_SIZE + \ 53 + OCELOT_FDMA_TX_DCB_SIZE + \ 54 + 4) 55 + 56 + #define OCELOT_FDMA_RX_SIZE (PAGE_SIZE / 2) 57 + 58 + #define OCELOT_FDMA_SKBFRAG_OVR (4 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 59 + #define OCELOT_FDMA_RXB_SIZE ALIGN_DOWN(OCELOT_FDMA_RX_SIZE - OCELOT_FDMA_SKBFRAG_OVR, 4) 60 + #define OCELOT_FDMA_SKBFRAG_SIZE (OCELOT_FDMA_RXB_SIZE + OCELOT_FDMA_SKBFRAG_OVR) 61 + 62 + DECLARE_STATIC_KEY_FALSE(ocelot_fdma_enabled); 63 + 64 + struct ocelot_fdma_dcb { 65 + u32 llp; 66 + u32 datap; 67 + u32 datal; 68 + u32 stat; 69 + } __packed; 70 + 71 + /** 72 + * struct ocelot_fdma_tx_buf - TX buffer structure 73 + * @skb: SKB currently used in the corresponding DCB. 74 + * @dma_addr: SKB DMA mapped address. 75 + */ 76 + struct ocelot_fdma_tx_buf { 77 + struct sk_buff *skb; 78 + DEFINE_DMA_UNMAP_ADDR(dma_addr); 79 + }; 80 + 81 + /** 82 + * struct ocelot_fdma_tx_ring - TX ring description of DCBs 83 + * 84 + * @dcbs: DCBs allocated for the ring 85 + * @dcbs_dma: DMA base address of the DCBs 86 + * @bufs: List of TX buffer associated to the DCBs 87 + * @xmit_lock: lock for concurrent xmit access 88 + * @next_to_clean: Next DCB to be cleaned in tx_cleanup 89 + * @next_to_use: Next available DCB to send SKB 90 + */ 91 + struct ocelot_fdma_tx_ring { 92 + struct ocelot_fdma_dcb *dcbs; 93 + dma_addr_t dcbs_dma; 94 + struct ocelot_fdma_tx_buf bufs[OCELOT_FDMA_TX_RING_SIZE]; 95 + /* Protect concurrent xmit calls */ 96 + spinlock_t xmit_lock; 97 + u16 next_to_clean; 98 + u16 next_to_use; 99 + }; 100 + 101 + /** 102 + * struct ocelot_fdma_rx_buf - RX buffer structure 103 + * @page: Struct page used in this buffer 104 + * @page_offset: Current page offset (either 0 or PAGE_SIZE/2) 105 + * @dma_addr: DMA address of the page 106 + */ 107 + struct ocelot_fdma_rx_buf { 108 + struct page *page; 109 + u32 page_offset; 110 + dma_addr_t dma_addr; 111 + }; 112 + 113 + /** 114 + * struct ocelot_fdma_rx_ring - TX ring description of DCBs 115 + * 116 + * @dcbs: DCBs allocated for the ring 117 + * @dcbs_dma: DMA base address of the DCBs 118 + * @bufs: List of RX buffer associated to the DCBs 119 + * @skb: SKB currently received by the netdev 120 + * @next_to_clean: Next DCB to be cleaned NAPI polling 121 + * @next_to_use: Next available DCB to send SKB 122 + * @next_to_alloc: Next buffer that needs to be allocated (page reuse or alloc) 123 + */ 124 + struct ocelot_fdma_rx_ring { 125 + struct ocelot_fdma_dcb *dcbs; 126 + dma_addr_t dcbs_dma; 127 + struct ocelot_fdma_rx_buf bufs[OCELOT_FDMA_RX_RING_SIZE]; 128 + struct sk_buff *skb; 129 + u16 next_to_clean; 130 + u16 next_to_use; 131 + u16 next_to_alloc; 132 + }; 133 + 134 + /** 135 + * struct ocelot_fdma - FDMA context 136 + * 137 + * @irq: FDMA interrupt 138 + * @ndev: Net device used to initialize NAPI 139 + * @dcbs_base: Memory coherent DCBs 140 + * @dcbs_dma_base: DMA base address of memory coherent DCBs 141 + * @tx_ring: Injection ring 142 + * @rx_ring: Extraction ring 143 + * @napi: NAPI context 144 + * @ocelot: Back-pointer to ocelot struct 145 + */ 146 + struct ocelot_fdma { 147 + int irq; 148 + struct net_device *ndev; 149 + struct ocelot_fdma_dcb *dcbs_base; 150 + dma_addr_t dcbs_dma_base; 151 + struct ocelot_fdma_tx_ring tx_ring; 152 + struct ocelot_fdma_rx_ring rx_ring; 153 + struct napi_struct napi; 154 + struct ocelot *ocelot; 155 + }; 156 + 157 + void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot); 158 + void ocelot_fdma_start(struct ocelot *ocelot); 159 + void ocelot_fdma_deinit(struct ocelot *ocelot); 160 + int ocelot_fdma_inject_frame(struct ocelot *fdma, int port, u32 rew_op, 161 + struct sk_buff *skb, struct net_device *dev); 162 + void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev); 163 + void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, 164 + struct net_device *dev); 165 + 166 + #endif
+21 -4
drivers/net/ethernet/mscc/ocelot_net.c
··· 15 15 #include <net/pkt_cls.h> 16 16 #include "ocelot.h" 17 17 #include "ocelot_vcap.h" 18 + #include "ocelot_fdma.h" 18 19 19 20 #define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP 20 21 ··· 458 457 int port = priv->chip_port; 459 458 u32 rew_op = 0; 460 459 461 - if (!ocelot_can_inject(ocelot, 0)) 460 + if (!static_branch_unlikely(&ocelot_fdma_enabled) && 461 + !ocelot_can_inject(ocelot, 0)) 462 462 return NETDEV_TX_BUSY; 463 463 464 464 /* Check if timestamping is needed */ ··· 477 475 rew_op = ocelot_ptp_rew_op(skb); 478 476 } 479 477 480 - ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 478 + if (static_branch_unlikely(&ocelot_fdma_enabled)) { 479 + ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev); 480 + } else { 481 + ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 481 482 482 - kfree_skb(skb); 483 + consume_skb(skb); 484 + } 483 485 484 486 return NETDEV_TX_OK; 485 487 } ··· 1705 1699 if (err) 1706 1700 goto out; 1707 1701 1702 + if (ocelot->fdma) 1703 + ocelot_fdma_netdev_init(ocelot, dev); 1704 + 1708 1705 err = register_netdev(dev); 1709 1706 if (err) { 1710 1707 dev_err(ocelot->dev, "register_netdev failed\n"); 1711 - goto out; 1708 + goto out_fdma_deinit; 1712 1709 } 1713 1710 1714 1711 return 0; 1715 1712 1713 + out_fdma_deinit: 1714 + if (ocelot->fdma) 1715 + ocelot_fdma_netdev_deinit(ocelot, dev); 1716 1716 out: 1717 1717 ocelot->ports[port] = NULL; 1718 1718 free_netdev(dev); ··· 1731 1719 struct ocelot_port_private *priv = container_of(ocelot_port, 1732 1720 struct ocelot_port_private, 1733 1721 port); 1722 + struct ocelot *ocelot = ocelot_port->ocelot; 1723 + struct ocelot_fdma *fdma = ocelot->fdma; 1734 1724 1735 1725 unregister_netdev(priv->dev); 1726 + 1727 + if (fdma) 1728 + ocelot_fdma_netdev_deinit(ocelot, priv->dev); 1736 1729 1737 1730 if (priv->phylink) { 1738 1731 rtnl_lock();
+10
drivers/net/ethernet/mscc/ocelot_vsc7514.c
··· 19 19 #include <soc/mscc/ocelot_vcap.h> 20 20 #include <soc/mscc/ocelot_hsio.h> 21 21 #include <soc/mscc/vsc7514_regs.h> 22 + #include "ocelot_fdma.h" 22 23 #include "ocelot.h" 23 24 24 25 #define VSC7514_VCAP_POLICER_BASE 128 ··· 551 550 { S1, "s1" }, 552 551 { S2, "s2" }, 553 552 { PTP, "ptp", 1 }, 553 + { FDMA, "fdma", 1 }, 554 554 }; 555 555 556 556 if (!np && !pdev->dev.platform_data) ··· 586 584 587 585 ocelot->targets[io_target[i].id] = target; 588 586 } 587 + 588 + if (ocelot->targets[FDMA]) 589 + ocelot_fdma_init(pdev, ocelot); 589 590 590 591 hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio"); 591 592 if (IS_ERR(hsio)) { ··· 653 648 if (err) 654 649 goto out_ocelot_devlink_unregister; 655 650 651 + if (ocelot->fdma) 652 + ocelot_fdma_start(ocelot); 653 + 656 654 err = ocelot_devlink_sb_register(ocelot); 657 655 if (err) 658 656 goto out_ocelot_release_ports; ··· 696 688 { 697 689 struct ocelot *ocelot = platform_get_drvdata(pdev); 698 690 691 + if (ocelot->fdma) 692 + ocelot_fdma_deinit(ocelot); 699 693 devlink_unregister(ocelot->devlink); 700 694 ocelot_deinit_timestamp(ocelot); 701 695 ocelot_devlink_sb_unregister(ocelot);
+3
include/soc/mscc/ocelot.h
··· 118 118 S2, 119 119 HSIO, 120 120 PTP, 121 + FDMA, 121 122 GCB, 122 123 DEV_GMII, 123 124 TARGET_MAX, ··· 733 732 /* Protects the PTP clock */ 734 733 spinlock_t ptp_clock_lock; 735 734 struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM]; 735 + 736 + struct ocelot_fdma *fdma; 736 737 }; 737 738 738 739 struct ocelot_policer {