Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-stmmac-Stop-using-hard-coded-callbacks'

Jose Abreu says:

====================
net: stmmac: Stop using hard-coded callbacks

This a starting point for a cleanup and re-organization of stmmac.

In this series we stop using hard-coded callbacks along the code and use
instead helpers which are defined in a single place ("hwif.h").

This brings several advantages:
1) Less typing :)
2) Guaranteed function pointer check
3) More flexibility

By 2) we stop using the repeated pattern of:
if (priv->hw->mac->some_func)
priv->hw->mac->some_func(...)

I didn't check but I expect the final .ko will be bigger with this series
because *all* of function pointers are checked.

Anyway, I hope this can make the code more readable and more flexible now.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+726 -577
+16 -18
drivers/net/ethernet/stmicro/stmmac/chain_mode.c
··· 24 24 25 25 #include "stmmac.h" 26 26 27 - static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 27 + static int jumbo_frm(void *p, struct sk_buff *skb, int csum) 28 28 { 29 29 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; 30 30 unsigned int nopaged_len = skb_headlen(skb); ··· 51 51 tx_q->tx_skbuff_dma[entry].buf = des2; 52 52 tx_q->tx_skbuff_dma[entry].len = bmax; 53 53 /* do not close the descriptor and do not set own bit */ 54 - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, 55 - 0, false, skb->len); 54 + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE, 55 + 0, false, skb->len); 56 56 57 57 while (len != 0) { 58 58 tx_q->tx_skbuff[entry] = NULL; ··· 68 68 return -1; 69 69 tx_q->tx_skbuff_dma[entry].buf = des2; 70 70 tx_q->tx_skbuff_dma[entry].len = bmax; 71 - priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, 72 - STMMAC_CHAIN_MODE, 1, 73 - false, skb->len); 71 + stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum, 72 + STMMAC_CHAIN_MODE, 1, false, skb->len); 74 73 len -= bmax; 75 74 i++; 76 75 } else { ··· 82 83 tx_q->tx_skbuff_dma[entry].buf = des2; 83 84 tx_q->tx_skbuff_dma[entry].len = len; 84 85 /* last descriptor can be set now */ 85 - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, 86 - STMMAC_CHAIN_MODE, 1, 87 - true, skb->len); 86 + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, 87 + STMMAC_CHAIN_MODE, 1, true, skb->len); 88 88 len = 0; 89 89 } 90 90 } ··· 93 95 return entry; 94 96 } 95 97 96 - static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) 98 + static unsigned int is_jumbo_frm(int len, int enh_desc) 97 99 { 98 100 unsigned int ret = 0; 99 101 ··· 105 107 return ret; 106 108 } 107 109 108 - static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr, 110 + static void init_dma_chain(void *des, dma_addr_t phy_addr, 109 111 unsigned int size, unsigned int extend_desc) 110 112 { 111 113 /* ··· 135 137 } 136 138 } 137 139 138 - static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) 140 + static void refill_desc3(void *priv_ptr, struct dma_desc *p) 139 141 { 140 142 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr; 141 143 struct stmmac_priv *priv = rx_q->priv_data; ··· 151 153 sizeof(struct dma_desc))); 152 154 } 153 155 154 - static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) 156 + static void clean_desc3(void *priv_ptr, struct dma_desc *p) 155 157 { 156 158 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; 157 159 struct stmmac_priv *priv = tx_q->priv_data; ··· 169 171 } 170 172 171 173 const struct stmmac_mode_ops chain_mode_ops = { 172 - .init = stmmac_init_dma_chain, 173 - .is_jumbo_frm = stmmac_is_jumbo_frm, 174 - .jumbo_frm = stmmac_jumbo_frm, 175 - .refill_desc3 = stmmac_refill_desc3, 176 - .clean_desc3 = stmmac_clean_desc3, 174 + .init = init_dma_chain, 175 + .is_jumbo_frm = is_jumbo_frm, 176 + .jumbo_frm = jumbo_frm, 177 + .refill_desc3 = refill_desc3, 178 + .clean_desc3 = clean_desc3, 177 179 };
+1 -198
drivers/net/ethernet/stmicro/stmmac/common.h
··· 32 32 #endif 33 33 34 34 #include "descs.h" 35 + #include "hwif.h" 35 36 #include "mmc.h" 36 37 37 38 /* Synopsys Core versions */ ··· 378 377 379 378 #define JUMBO_LEN 9000 380 379 381 - /* Descriptors helpers */ 382 - struct stmmac_desc_ops { 383 - /* DMA RX descriptor ring initialization */ 384 - void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode, 385 - int end); 386 - /* DMA TX descriptor ring initialization */ 387 - void (*init_tx_desc) (struct dma_desc *p, int mode, int end); 388 - 389 - /* Invoked by the xmit function to prepare the tx descriptor */ 390 - void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, 391 - bool csum_flag, int mode, bool tx_own, 392 - bool ls, unsigned int tot_pkt_len); 393 - void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, 394 - int len2, bool tx_own, bool ls, 395 - unsigned int tcphdrlen, 396 - unsigned int tcppayloadlen); 397 - /* Set/get the owner of the descriptor */ 398 - void (*set_tx_owner) (struct dma_desc *p); 399 - int (*get_tx_owner) (struct dma_desc *p); 400 - /* Clean the tx descriptor as soon as the tx irq is received */ 401 - void (*release_tx_desc) (struct dma_desc *p, int mode); 402 - /* Clear interrupt on tx frame completion. When this bit is 403 - * set an interrupt happens as soon as the frame is transmitted */ 404 - void (*set_tx_ic)(struct dma_desc *p); 405 - /* Last tx segment reports the transmit status */ 406 - int (*get_tx_ls) (struct dma_desc *p); 407 - /* Return the transmit status looking at the TDES1 */ 408 - int (*tx_status) (void *data, struct stmmac_extra_stats *x, 409 - struct dma_desc *p, void __iomem *ioaddr); 410 - /* Get the buffer size from the descriptor */ 411 - int (*get_tx_len) (struct dma_desc *p); 412 - /* Handle extra events on specific interrupts hw dependent */ 413 - void (*set_rx_owner) (struct dma_desc *p); 414 - /* Get the receive frame size */ 415 - int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type); 416 - /* Return the reception status looking at the RDES1 */ 417 - int (*rx_status) (void *data, struct stmmac_extra_stats *x, 418 - struct dma_desc *p); 419 - void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x, 420 - struct dma_extended_desc *p); 421 - /* Set tx timestamp enable bit */ 422 - void (*enable_tx_timestamp) (struct dma_desc *p); 423 - /* get tx timestamp status */ 424 - int (*get_tx_timestamp_status) (struct dma_desc *p); 425 - /* get timestamp value */ 426 - u64(*get_timestamp) (void *desc, u32 ats); 427 - /* get rx timestamp status */ 428 - int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); 429 - /* Display ring */ 430 - void (*display_ring)(void *head, unsigned int size, bool rx); 431 - /* set MSS via context descriptor */ 432 - void (*set_mss)(struct dma_desc *p, unsigned int mss); 433 - }; 434 - 435 380 extern const struct stmmac_desc_ops enh_desc_ops; 436 381 extern const struct stmmac_desc_ops ndesc_ops; 437 382 438 - /* Specific DMA helpers */ 439 - struct stmmac_dma_ops { 440 - /* DMA core initialization */ 441 - int (*reset)(void __iomem *ioaddr); 442 - void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, 443 - u32 dma_tx, u32 dma_rx, int atds); 444 - void (*init_chan)(void __iomem *ioaddr, 445 - struct stmmac_dma_cfg *dma_cfg, u32 chan); 446 - void (*init_rx_chan)(void __iomem *ioaddr, 447 - struct stmmac_dma_cfg *dma_cfg, 448 - u32 dma_rx_phy, u32 chan); 449 - void (*init_tx_chan)(void __iomem *ioaddr, 450 - struct stmmac_dma_cfg *dma_cfg, 451 - u32 dma_tx_phy, u32 chan); 452 - /* Configure the AXI Bus Mode Register */ 453 - void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); 454 - /* Dump DMA registers */ 455 - void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); 456 - /* Set tx/rx threshold in the csr6 register 457 - * An invalid value enables the store-and-forward mode */ 458 - void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, 459 - int rxfifosz); 460 - void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, 461 - int fifosz, u8 qmode); 462 - void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, 463 - int fifosz, u8 qmode); 464 - /* To track extra statistic (if supported) */ 465 - void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, 466 - void __iomem *ioaddr); 467 - void (*enable_dma_transmission) (void __iomem *ioaddr); 468 - void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); 469 - void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); 470 - void (*start_tx)(void __iomem *ioaddr, u32 chan); 471 - void (*stop_tx)(void __iomem *ioaddr, u32 chan); 472 - void (*start_rx)(void __iomem *ioaddr, u32 chan); 473 - void (*stop_rx)(void __iomem *ioaddr, u32 chan); 474 - int (*dma_interrupt) (void __iomem *ioaddr, 475 - struct stmmac_extra_stats *x, u32 chan); 476 - /* If supported then get the optional core features */ 477 - void (*get_hw_feature)(void __iomem *ioaddr, 478 - struct dma_features *dma_cap); 479 - /* Program the HW RX Watchdog */ 480 - void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); 481 - void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); 482 - void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); 483 - void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 484 - void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 485 - void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); 486 - }; 487 - 488 383 struct mac_device_info; 489 - 490 - /* Helpers to program the MAC core */ 491 - struct stmmac_ops { 492 - /* MAC core initialization */ 493 - void (*core_init)(struct mac_device_info *hw, struct net_device *dev); 494 - /* Enable the MAC RX/TX */ 495 - void (*set_mac)(void __iomem *ioaddr, bool enable); 496 - /* Enable and verify that the IPC module is supported */ 497 - int (*rx_ipc)(struct mac_device_info *hw); 498 - /* Enable RX Queues */ 499 - void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); 500 - /* RX Queues Priority */ 501 - void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); 502 - /* TX Queues Priority */ 503 - void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); 504 - /* RX Queues Routing */ 505 - void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, 506 - u32 queue); 507 - /* Program RX Algorithms */ 508 - void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); 509 - /* Program TX Algorithms */ 510 - void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); 511 - /* Set MTL TX queues weight */ 512 - void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, 513 - u32 weight, u32 queue); 514 - /* RX MTL queue to RX dma mapping */ 515 - void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); 516 - /* Configure AV Algorithm */ 517 - void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, 518 - u32 idle_slope, u32 high_credit, u32 low_credit, 519 - u32 queue); 520 - /* Dump MAC registers */ 521 - void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); 522 - /* Handle extra events on specific interrupts hw dependent */ 523 - int (*host_irq_status)(struct mac_device_info *hw, 524 - struct stmmac_extra_stats *x); 525 - /* Handle MTL interrupts */ 526 - int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); 527 - /* Multicast filter setting */ 528 - void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); 529 - /* Flow control setting */ 530 - void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, 531 - unsigned int fc, unsigned int pause_time, u32 tx_cnt); 532 - /* Set power management mode (e.g. magic frame) */ 533 - void (*pmt)(struct mac_device_info *hw, unsigned long mode); 534 - /* Set/Get Unicast MAC addresses */ 535 - void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr, 536 - unsigned int reg_n); 537 - void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, 538 - unsigned int reg_n); 539 - void (*set_eee_mode)(struct mac_device_info *hw, 540 - bool en_tx_lpi_clockgating); 541 - void (*reset_eee_mode)(struct mac_device_info *hw); 542 - void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); 543 - void (*set_eee_pls)(struct mac_device_info *hw, int link); 544 - void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, 545 - u32 rx_queues, u32 tx_queues); 546 - /* PCS calls */ 547 - void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, 548 - bool loopback); 549 - void (*pcs_rane)(void __iomem *ioaddr, bool restart); 550 - void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); 551 - /* Safety Features */ 552 - int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp); 553 - bool (*safety_feat_irq_status)(struct net_device *ndev, 554 - void __iomem *ioaddr, unsigned int asp, 555 - struct stmmac_safety_stats *stats); 556 - const char *(*safety_feat_dump)(struct stmmac_safety_stats *stats, 557 - int index, unsigned long *count); 558 - }; 559 - 560 - /* PTP and HW Timer helpers */ 561 - struct stmmac_hwtimestamp { 562 - void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); 563 - u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, 564 - int gmac4); 565 - int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); 566 - int (*config_addend) (void __iomem *ioaddr, u32 addend); 567 - int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, 568 - int add_sub, int gmac4); 569 - u64(*get_systime) (void __iomem *ioaddr); 570 - }; 571 384 572 385 extern const struct stmmac_hwtimestamp stmmac_ptp; 573 386 extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; ··· 403 588 unsigned int reg_mask; /* MII reg mask */ 404 589 unsigned int clk_csr_shift; 405 590 unsigned int clk_csr_mask; 406 - }; 407 - 408 - /* Helpers to manage the descriptors for chain and ring modes */ 409 - struct stmmac_mode_ops { 410 - void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, 411 - unsigned int extend_desc); 412 - unsigned int (*is_jumbo_frm) (int len, int ehn_desc); 413 - int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); 414 - int (*set_16kib_bfsize)(int mtu); 415 - void (*init_desc3)(struct dma_desc *p); 416 - void (*refill_desc3) (void *priv, struct dma_desc *p); 417 - void (*clean_desc3) (void *priv, struct dma_desc *p); 418 591 }; 419 592 420 593 struct mac_device_info {
+2 -2
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
··· 223 223 return 0; 224 224 } 225 225 226 - static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) 226 + static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) 227 227 { 228 228 struct dma_desc *p = (struct dma_desc *)desc; 229 229 u64 ns; ··· 232 232 /* convert high/sec time stamp value to nanosecond */ 233 233 ns += le32_to_cpu(p->des1) * 1000000000ULL; 234 234 235 - return ns; 235 + *ts = ns; 236 236 } 237 237 238 238 static int dwmac4_rx_check_timestamp(void *desc)
+11 -8
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
··· 237 237 return 0; 238 238 } 239 239 240 - bool dwmac5_safety_feat_irq_status(struct net_device *ndev, 240 + int dwmac5_safety_feat_irq_status(struct net_device *ndev, 241 241 void __iomem *ioaddr, unsigned int asp, 242 242 struct stmmac_safety_stats *stats) 243 243 { 244 - bool ret = false, err, corr; 244 + bool err, corr; 245 245 u32 mtl, dma; 246 + int ret = 0; 246 247 247 248 if (!asp) 248 - return false; 249 + return -EINVAL; 249 250 250 251 mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS); 251 252 dma = readl(ioaddr + DMA_SAFETY_INT_STATUS); ··· 283 282 { dwmac5_dma_errors }, 284 283 }; 285 284 286 - const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, 287 - int index, unsigned long *count) 285 + int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, 286 + int index, unsigned long *count, const char **desc) 288 287 { 289 288 int module = index / 32, offset = index % 32; 290 289 unsigned long *ptr = (unsigned long *)stats; 291 290 292 291 if (module >= ARRAY_SIZE(dwmac5_all_errors)) 293 - return NULL; 292 + return -EINVAL; 294 293 if (!dwmac5_all_errors[module].desc[offset].valid) 295 - return NULL; 294 + return -EINVAL; 296 295 if (count) 297 296 *count = *(ptr + index); 298 - return dwmac5_all_errors[module].desc[offset].desc; 297 + if (desc) 298 + *desc = dwmac5_all_errors[module].desc[offset].desc; 299 + return 0; 299 300 }
+3 -3
drivers/net/ethernet/stmicro/stmmac/dwmac5.h
··· 43 43 #define DMA_ECC_INT_STATUS 0x00001088 44 44 45 45 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); 46 - bool dwmac5_safety_feat_irq_status(struct net_device *ndev, 46 + int dwmac5_safety_feat_irq_status(struct net_device *ndev, 47 47 void __iomem *ioaddr, unsigned int asp, 48 48 struct stmmac_safety_stats *stats); 49 - const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, 50 - int index, unsigned long *count); 49 + int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, 50 + int index, unsigned long *count, const char **desc); 51 51 52 52 #endif /* __DWMAC5_H__ */
+2 -2
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
··· 382 382 return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17; 383 383 } 384 384 385 - static u64 enh_desc_get_timestamp(void *desc, u32 ats) 385 + static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts) 386 386 { 387 387 u64 ns; 388 388 ··· 397 397 ns += le32_to_cpu(p->des3) * 1000000000ULL; 398 398 } 399 399 400 - return ns; 400 + *ts = ns; 401 401 } 402 402 403 403 static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+421
drivers/net/ethernet/stmicro/stmmac/hwif.h
··· 1 + // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 + // Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. 3 + // stmmac HW Interface Callbacks 4 + 5 + #ifndef __STMMAC_HWIF_H__ 6 + #define __STMMAC_HWIF_H__ 7 + 8 + #define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \ 9 + ({ \ 10 + int __result = -EINVAL; \ 11 + if ((__priv)->hw->__module->__cname) { \ 12 + (__priv)->hw->__module->__cname((__arg0), ##__args); \ 13 + __result = 0; \ 14 + } \ 15 + __result; \ 16 + }) 17 + #define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \ 18 + ({ \ 19 + int __result = -EINVAL; \ 20 + if ((__priv)->hw->__module->__cname) \ 21 + __result = (__priv)->hw->__module->__cname((__arg0), ##__args); \ 22 + __result; \ 23 + }) 24 + 25 + struct stmmac_extra_stats; 26 + struct stmmac_safety_stats; 27 + struct dma_desc; 28 + struct dma_extended_desc; 29 + 30 + /* Descriptors helpers */ 31 + struct stmmac_desc_ops { 32 + /* DMA RX descriptor ring initialization */ 33 + void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, 34 + int end); 35 + /* DMA TX descriptor ring initialization */ 36 + void (*init_tx_desc)(struct dma_desc *p, int mode, int end); 37 + /* Invoked by the xmit function to prepare the tx descriptor */ 38 + void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len, 39 + bool csum_flag, int mode, bool tx_own, bool ls, 40 + unsigned int tot_pkt_len); 41 + void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, 42 + int len2, bool tx_own, bool ls, unsigned int tcphdrlen, 43 + unsigned int tcppayloadlen); 44 + /* Set/get the owner of the descriptor */ 45 + void (*set_tx_owner)(struct dma_desc *p); 46 + int (*get_tx_owner)(struct dma_desc *p); 47 + /* Clean the tx descriptor as soon as the tx irq is received */ 48 + void (*release_tx_desc)(struct dma_desc *p, int mode); 49 + /* Clear interrupt on tx frame completion. When this bit is 50 + * set an interrupt happens as soon as the frame is transmitted */ 51 + void (*set_tx_ic)(struct dma_desc *p); 52 + /* Last tx segment reports the transmit status */ 53 + int (*get_tx_ls)(struct dma_desc *p); 54 + /* Return the transmit status looking at the TDES1 */ 55 + int (*tx_status)(void *data, struct stmmac_extra_stats *x, 56 + struct dma_desc *p, void __iomem *ioaddr); 57 + /* Get the buffer size from the descriptor */ 58 + int (*get_tx_len)(struct dma_desc *p); 59 + /* Handle extra events on specific interrupts hw dependent */ 60 + void (*set_rx_owner)(struct dma_desc *p); 61 + /* Get the receive frame size */ 62 + int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type); 63 + /* Return the reception status looking at the RDES1 */ 64 + int (*rx_status)(void *data, struct stmmac_extra_stats *x, 65 + struct dma_desc *p); 66 + void (*rx_extended_status)(void *data, struct stmmac_extra_stats *x, 67 + struct dma_extended_desc *p); 68 + /* Set tx timestamp enable bit */ 69 + void (*enable_tx_timestamp) (struct dma_desc *p); 70 + /* get tx timestamp status */ 71 + int (*get_tx_timestamp_status) (struct dma_desc *p); 72 + /* get timestamp value */ 73 + void (*get_timestamp)(void *desc, u32 ats, u64 *ts); 74 + /* get rx timestamp status */ 75 + int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); 76 + /* Display ring */ 77 + void (*display_ring)(void *head, unsigned int size, bool rx); 78 + /* set MSS via context descriptor */ 79 + void (*set_mss)(struct dma_desc *p, unsigned int mss); 80 + }; 81 + 82 + #define stmmac_init_rx_desc(__priv, __args...) \ 83 + stmmac_do_void_callback(__priv, desc, init_rx_desc, __args) 84 + #define stmmac_init_tx_desc(__priv, __args...) \ 85 + stmmac_do_void_callback(__priv, desc, init_tx_desc, __args) 86 + #define stmmac_prepare_tx_desc(__priv, __args...) \ 87 + stmmac_do_void_callback(__priv, desc, prepare_tx_desc, __args) 88 + #define stmmac_prepare_tso_tx_desc(__priv, __args...) \ 89 + stmmac_do_void_callback(__priv, desc, prepare_tso_tx_desc, __args) 90 + #define stmmac_set_tx_owner(__priv, __args...) \ 91 + stmmac_do_void_callback(__priv, desc, set_tx_owner, __args) 92 + #define stmmac_get_tx_owner(__priv, __args...) \ 93 + stmmac_do_callback(__priv, desc, get_tx_owner, __args) 94 + #define stmmac_release_tx_desc(__priv, __args...) \ 95 + stmmac_do_void_callback(__priv, desc, release_tx_desc, __args) 96 + #define stmmac_set_tx_ic(__priv, __args...) \ 97 + stmmac_do_void_callback(__priv, desc, set_tx_ic, __args) 98 + #define stmmac_get_tx_ls(__priv, __args...) \ 99 + stmmac_do_callback(__priv, desc, get_tx_ls, __args) 100 + #define stmmac_tx_status(__priv, __args...) \ 101 + stmmac_do_callback(__priv, desc, tx_status, __args) 102 + #define stmmac_get_tx_len(__priv, __args...) \ 103 + stmmac_do_callback(__priv, desc, get_tx_len, __args) 104 + #define stmmac_set_rx_owner(__priv, __args...) \ 105 + stmmac_do_void_callback(__priv, desc, set_rx_owner, __args) 106 + #define stmmac_get_rx_frame_len(__priv, __args...) \ 107 + stmmac_do_callback(__priv, desc, get_rx_frame_len, __args) 108 + #define stmmac_rx_status(__priv, __args...) \ 109 + stmmac_do_callback(__priv, desc, rx_status, __args) 110 + #define stmmac_rx_extended_status(__priv, __args...) \ 111 + stmmac_do_void_callback(__priv, desc, rx_extended_status, __args) 112 + #define stmmac_enable_tx_timestamp(__priv, __args...) \ 113 + stmmac_do_void_callback(__priv, desc, enable_tx_timestamp, __args) 114 + #define stmmac_get_tx_timestamp_status(__priv, __args...) \ 115 + stmmac_do_callback(__priv, desc, get_tx_timestamp_status, __args) 116 + #define stmmac_get_timestamp(__priv, __args...) \ 117 + stmmac_do_void_callback(__priv, desc, get_timestamp, __args) 118 + #define stmmac_get_rx_timestamp_status(__priv, __args...) \ 119 + stmmac_do_callback(__priv, desc, get_rx_timestamp_status, __args) 120 + #define stmmac_display_ring(__priv, __args...) \ 121 + stmmac_do_void_callback(__priv, desc, display_ring, __args) 122 + #define stmmac_set_mss(__priv, __args...) \ 123 + stmmac_do_void_callback(__priv, desc, set_mss, __args) 124 + 125 + struct stmmac_dma_cfg; 126 + struct dma_features; 127 + 128 + /* Specific DMA helpers */ 129 + struct stmmac_dma_ops { 130 + /* DMA core initialization */ 131 + int (*reset)(void __iomem *ioaddr); 132 + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, 133 + u32 dma_tx, u32 dma_rx, int atds); 134 + void (*init_chan)(void __iomem *ioaddr, 135 + struct stmmac_dma_cfg *dma_cfg, u32 chan); 136 + void (*init_rx_chan)(void __iomem *ioaddr, 137 + struct stmmac_dma_cfg *dma_cfg, 138 + u32 dma_rx_phy, u32 chan); 139 + void (*init_tx_chan)(void __iomem *ioaddr, 140 + struct stmmac_dma_cfg *dma_cfg, 141 + u32 dma_tx_phy, u32 chan); 142 + /* Configure the AXI Bus Mode Register */ 143 + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); 144 + /* Dump DMA registers */ 145 + void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); 146 + /* Set tx/rx threshold in the csr6 register 147 + * An invalid value enables the store-and-forward mode */ 148 + void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, 149 + int rxfifosz); 150 + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, 151 + int fifosz, u8 qmode); 152 + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, 153 + int fifosz, u8 qmode); 154 + /* To track extra statistic (if supported) */ 155 + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, 156 + void __iomem *ioaddr); 157 + void (*enable_dma_transmission) (void __iomem *ioaddr); 158 + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); 159 + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); 160 + void (*start_tx)(void __iomem *ioaddr, u32 chan); 161 + void (*stop_tx)(void __iomem *ioaddr, u32 chan); 162 + void (*start_rx)(void __iomem *ioaddr, u32 chan); 163 + void (*stop_rx)(void __iomem *ioaddr, u32 chan); 164 + int (*dma_interrupt) (void __iomem *ioaddr, 165 + struct stmmac_extra_stats *x, u32 chan); 166 + /* If supported then get the optional core features */ 167 + void (*get_hw_feature)(void __iomem *ioaddr, 168 + struct dma_features *dma_cap); 169 + /* Program the HW RX Watchdog */ 170 + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); 171 + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); 172 + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); 173 + void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 174 + void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 175 + void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); 176 + }; 177 + 178 + #define stmmac_reset(__priv, __args...) \ 179 + stmmac_do_callback(__priv, dma, reset, __args) 180 + #define stmmac_dma_init(__priv, __args...) \ 181 + stmmac_do_void_callback(__priv, dma, init, __args) 182 + #define stmmac_init_chan(__priv, __args...) \ 183 + stmmac_do_void_callback(__priv, dma, init_chan, __args) 184 + #define stmmac_init_rx_chan(__priv, __args...) \ 185 + stmmac_do_void_callback(__priv, dma, init_rx_chan, __args) 186 + #define stmmac_init_tx_chan(__priv, __args...) \ 187 + stmmac_do_void_callback(__priv, dma, init_tx_chan, __args) 188 + #define stmmac_axi(__priv, __args...) \ 189 + stmmac_do_void_callback(__priv, dma, axi, __args) 190 + #define stmmac_dump_dma_regs(__priv, __args...) \ 191 + stmmac_do_void_callback(__priv, dma, dump_regs, __args) 192 + #define stmmac_dma_mode(__priv, __args...) \ 193 + stmmac_do_void_callback(__priv, dma, dma_mode, __args) 194 + #define stmmac_dma_rx_mode(__priv, __args...) \ 195 + stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args) 196 + #define stmmac_dma_tx_mode(__priv, __args...) \ 197 + stmmac_do_void_callback(__priv, dma, dma_tx_mode, __args) 198 + #define stmmac_dma_diagnostic_fr(__priv, __args...) \ 199 + stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args) 200 + #define stmmac_enable_dma_transmission(__priv, __args...) \ 201 + stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args) 202 + #define stmmac_enable_dma_irq(__priv, __args...) \ 203 + stmmac_do_void_callback(__priv, dma, enable_dma_irq, __args) 204 + #define stmmac_disable_dma_irq(__priv, __args...) \ 205 + stmmac_do_void_callback(__priv, dma, disable_dma_irq, __args) 206 + #define stmmac_start_tx(__priv, __args...) \ 207 + stmmac_do_void_callback(__priv, dma, start_tx, __args) 208 + #define stmmac_stop_tx(__priv, __args...) \ 209 + stmmac_do_void_callback(__priv, dma, stop_tx, __args) 210 + #define stmmac_start_rx(__priv, __args...) \ 211 + stmmac_do_void_callback(__priv, dma, start_rx, __args) 212 + #define stmmac_stop_rx(__priv, __args...) \ 213 + stmmac_do_void_callback(__priv, dma, stop_rx, __args) 214 + #define stmmac_dma_interrupt_status(__priv, __args...) \ 215 + stmmac_do_callback(__priv, dma, dma_interrupt, __args) 216 + #define stmmac_get_hw_feature(__priv, __args...) \ 217 + stmmac_do_void_callback(__priv, dma, get_hw_feature, __args) 218 + #define stmmac_rx_watchdog(__priv, __args...) \ 219 + stmmac_do_void_callback(__priv, dma, rx_watchdog, __args) 220 + #define stmmac_set_tx_ring_len(__priv, __args...) \ 221 + stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __args) 222 + #define stmmac_set_rx_ring_len(__priv, __args...) \ 223 + stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __args) 224 + #define stmmac_set_rx_tail_ptr(__priv, __args...) \ 225 + stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __args) 226 + #define stmmac_set_tx_tail_ptr(__priv, __args...) \ 227 + stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) 228 + #define stmmac_enable_tso(__priv, __args...) \ 229 + stmmac_do_void_callback(__priv, dma, enable_tso, __args) 230 + 231 + struct mac_device_info; 232 + struct net_device; 233 + struct rgmii_adv; 234 + struct stmmac_safety_stats; 235 + 236 + /* Helpers to program the MAC core */ 237 + struct stmmac_ops { 238 + /* MAC core initialization */ 239 + void (*core_init)(struct mac_device_info *hw, struct net_device *dev); 240 + /* Enable the MAC RX/TX */ 241 + void (*set_mac)(void __iomem *ioaddr, bool enable); 242 + /* Enable and verify that the IPC module is supported */ 243 + int (*rx_ipc)(struct mac_device_info *hw); 244 + /* Enable RX Queues */ 245 + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); 246 + /* RX Queues Priority */ 247 + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); 248 + /* TX Queues Priority */ 249 + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); 250 + /* RX Queues Routing */ 251 + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, 252 + u32 queue); 253 + /* Program RX Algorithms */ 254 + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); 255 + /* Program TX Algorithms */ 256 + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); 257 + /* Set MTL TX queues weight */ 258 + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, 259 + u32 weight, u32 queue); 260 + /* RX MTL queue to RX dma mapping */ 261 + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); 262 + /* Configure AV Algorithm */ 263 + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, 264 + u32 idle_slope, u32 high_credit, u32 low_credit, 265 + u32 queue); 266 + /* Dump MAC registers */ 267 + void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); 268 + /* Handle extra events on specific interrupts hw dependent */ 269 + int (*host_irq_status)(struct mac_device_info *hw, 270 + struct stmmac_extra_stats *x); 271 + /* Handle MTL interrupts */ 272 + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); 273 + /* Multicast filter setting */ 274 + void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); 275 + /* Flow control setting */ 276 + void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, 277 + unsigned int fc, unsigned int pause_time, u32 tx_cnt); 278 + /* Set power management mode (e.g. magic frame) */ 279 + void (*pmt)(struct mac_device_info *hw, unsigned long mode); 280 + /* Set/Get Unicast MAC addresses */ 281 + void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr, 282 + unsigned int reg_n); 283 + void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, 284 + unsigned int reg_n); 285 + void (*set_eee_mode)(struct mac_device_info *hw, 286 + bool en_tx_lpi_clockgating); 287 + void (*reset_eee_mode)(struct mac_device_info *hw); 288 + void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); 289 + void (*set_eee_pls)(struct mac_device_info *hw, int link); 290 + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, 291 + u32 rx_queues, u32 tx_queues); 292 + /* PCS calls */ 293 + void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, 294 + bool loopback); 295 + void (*pcs_rane)(void __iomem *ioaddr, bool restart); 296 + void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); 297 + /* Safety Features */ 298 + int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp); 299 + int (*safety_feat_irq_status)(struct net_device *ndev, 300 + void __iomem *ioaddr, unsigned int asp, 301 + struct stmmac_safety_stats *stats); 302 + int (*safety_feat_dump)(struct stmmac_safety_stats *stats, 303 + int index, unsigned long *count, const char **desc); 304 + }; 305 + 306 + #define stmmac_core_init(__priv, __args...) \ 307 + stmmac_do_void_callback(__priv, mac, core_init, __args) 308 + #define stmmac_mac_set(__priv, __args...) \ 309 + stmmac_do_void_callback(__priv, mac, set_mac, __args) 310 + #define stmmac_rx_ipc(__priv, __args...) \ 311 + stmmac_do_callback(__priv, mac, rx_ipc, __args) 312 + #define stmmac_rx_queue_enable(__priv, __args...) \ 313 + stmmac_do_void_callback(__priv, mac, rx_queue_enable, __args) 314 + #define stmmac_rx_queue_prio(__priv, __args...) \ 315 + stmmac_do_void_callback(__priv, mac, rx_queue_prio, __args) 316 + #define stmmac_tx_queue_prio(__priv, __args...) \ 317 + stmmac_do_void_callback(__priv, mac, tx_queue_prio, __args) 318 + #define stmmac_rx_queue_routing(__priv, __args...) \ 319 + stmmac_do_void_callback(__priv, mac, rx_queue_routing, __args) 320 + #define stmmac_prog_mtl_rx_algorithms(__priv, __args...) \ 321 + stmmac_do_void_callback(__priv, mac, prog_mtl_rx_algorithms, __args) 322 + #define stmmac_prog_mtl_tx_algorithms(__priv, __args...) \ 323 + stmmac_do_void_callback(__priv, mac, prog_mtl_tx_algorithms, __args) 324 + #define stmmac_set_mtl_tx_queue_weight(__priv, __args...) \ 325 + stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __args) 326 + #define stmmac_map_mtl_to_dma(__priv, __args...) \ 327 + stmmac_do_void_callback(__priv, mac, map_mtl_to_dma, __args) 328 + #define stmmac_config_cbs(__priv, __args...) \ 329 + stmmac_do_void_callback(__priv, mac, config_cbs, __args) 330 + #define stmmac_dump_mac_regs(__priv, __args...) \ 331 + stmmac_do_void_callback(__priv, mac, dump_regs, __args) 332 + #define stmmac_host_irq_status(__priv, __args...) \ 333 + stmmac_do_callback(__priv, mac, host_irq_status, __args) 334 + #define stmmac_host_mtl_irq_status(__priv, __args...) \ 335 + stmmac_do_callback(__priv, mac, host_mtl_irq_status, __args) 336 + #define stmmac_set_filter(__priv, __args...) \ 337 + stmmac_do_void_callback(__priv, mac, set_filter, __args) 338 + #define stmmac_flow_ctrl(__priv, __args...) \ 339 + stmmac_do_void_callback(__priv, mac, flow_ctrl, __args) 340 + #define stmmac_pmt(__priv, __args...) \ 341 + stmmac_do_void_callback(__priv, mac, pmt, __args) 342 + #define stmmac_set_umac_addr(__priv, __args...) \ 343 + stmmac_do_void_callback(__priv, mac, set_umac_addr, __args) 344 + #define stmmac_get_umac_addr(__priv, __args...) \ 345 + stmmac_do_void_callback(__priv, mac, get_umac_addr, __args) 346 + #define stmmac_set_eee_mode(__priv, __args...) \ 347 + stmmac_do_void_callback(__priv, mac, set_eee_mode, __args) 348 + #define stmmac_reset_eee_mode(__priv, __args...) \ 349 + stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args) 350 + #define stmmac_set_eee_timer(__priv, __args...) \ 351 + stmmac_do_void_callback(__priv, mac, set_eee_timer, __args) 352 + #define stmmac_set_eee_pls(__priv, __args...) \ 353 + stmmac_do_void_callback(__priv, mac, set_eee_pls, __args) 354 + #define stmmac_mac_debug(__priv, __args...) \ 355 + stmmac_do_void_callback(__priv, mac, debug, __args) 356 + #define stmmac_pcs_ctrl_ane(__priv, __args...) \ 357 + stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args) 358 + #define stmmac_pcs_rane(__priv, __args...) \ 359 + stmmac_do_void_callback(__priv, mac, pcs_rane, __args) 360 + #define stmmac_pcs_get_adv_lp(__priv, __args...) \ 361 + stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args) 362 + #define stmmac_safety_feat_config(__priv, __args...) \ 363 + stmmac_do_callback(__priv, mac, safety_feat_config, __args) 364 + #define stmmac_safety_feat_irq_status(__priv, __args...) \ 365 + stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args) 366 + #define stmmac_safety_feat_dump(__priv, __args...) \ 367 + stmmac_do_callback(__priv, mac, safety_feat_dump, __args) 368 + 369 + /* PTP and HW Timer helpers */ 370 + struct stmmac_hwtimestamp { 371 + void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); 372 + void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, 373 + int gmac4, u32 *ssinc); 374 + int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); 375 + int (*config_addend) (void __iomem *ioaddr, u32 addend); 376 + int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, 377 + int add_sub, int gmac4); 378 + void (*get_systime) (void __iomem *ioaddr, u64 *systime); 379 + }; 380 + 381 + #define stmmac_config_hw_tstamping(__priv, __args...) \ 382 + stmmac_do_void_callback(__priv, ptp, config_hw_tstamping, __args) 383 + #define stmmac_config_sub_second_increment(__priv, __args...) \ 384 + stmmac_do_void_callback(__priv, ptp, config_sub_second_increment, __args) 385 + #define stmmac_init_systime(__priv, __args...) \ 386 + stmmac_do_callback(__priv, ptp, init_systime, __args) 387 + #define stmmac_config_addend(__priv, __args...) \ 388 + stmmac_do_callback(__priv, ptp, config_addend, __args) 389 + #define stmmac_adjust_systime(__priv, __args...) \ 390 + stmmac_do_callback(__priv, ptp, adjust_systime, __args) 391 + #define stmmac_get_systime(__priv, __args...) \ 392 + stmmac_do_void_callback(__priv, ptp, get_systime, __args) 393 + 394 + /* Helpers to manage the descriptors for chain and ring modes */ 395 + struct stmmac_mode_ops { 396 + void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, 397 + unsigned int extend_desc); 398 + unsigned int (*is_jumbo_frm) (int len, int ehn_desc); 399 + int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); 400 + int (*set_16kib_bfsize)(int mtu); 401 + void (*init_desc3)(struct dma_desc *p); 402 + void (*refill_desc3) (void *priv, struct dma_desc *p); 403 + void (*clean_desc3) (void *priv, struct dma_desc *p); 404 + }; 405 + 406 + #define stmmac_mode_init(__priv, __args...) \ 407 + stmmac_do_void_callback(__priv, mode, init, __args) 408 + #define stmmac_is_jumbo_frm(__priv, __args...) \ 409 + stmmac_do_callback(__priv, mode, is_jumbo_frm, __args) 410 + #define stmmac_jumbo_frm(__priv, __args...) \ 411 + stmmac_do_callback(__priv, mode, jumbo_frm, __args) 412 + #define stmmac_set_16kib_bfsize(__priv, __args...) \ 413 + stmmac_do_callback(__priv, mode, set_16kib_bfsize, __args) 414 + #define stmmac_init_desc3(__priv, __args...) \ 415 + stmmac_do_void_callback(__priv, mode, init_desc3, __args) 416 + #define stmmac_refill_desc3(__priv, __args...) \ 417 + stmmac_do_void_callback(__priv, mode, refill_desc3, __args) 418 + #define stmmac_clean_desc3(__priv, __args...) \ 419 + stmmac_do_void_callback(__priv, mode, clean_desc3, __args) 420 + 421 + #endif /* __STMMAC_HWIF_H__ */
+2 -2
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
··· 253 253 return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17; 254 254 } 255 255 256 - static u64 ndesc_get_timestamp(void *desc, u32 ats) 256 + static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts) 257 257 { 258 258 struct dma_desc *p = (struct dma_desc *)desc; 259 259 u64 ns; ··· 262 262 /* convert high/sec time stamp value to nanosecond */ 263 263 ns += le32_to_cpu(p->des3) * 1000000000ULL; 264 264 265 - return ns; 265 + *ts = ns; 266 266 } 267 267 268 268 static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
+18 -21
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
··· 24 24 25 25 #include "stmmac.h" 26 26 27 - static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 27 + static int jumbo_frm(void *p, struct sk_buff *skb, int csum) 28 28 { 29 29 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; 30 30 unsigned int nopaged_len = skb_headlen(skb); ··· 58 58 tx_q->tx_skbuff_dma[entry].is_jumbo = true; 59 59 60 60 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 61 - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, 62 - STMMAC_RING_MODE, 0, 63 - false, skb->len); 61 + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, 62 + STMMAC_RING_MODE, 0, false, skb->len); 64 63 tx_q->tx_skbuff[entry] = NULL; 65 64 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 66 65 ··· 78 79 tx_q->tx_skbuff_dma[entry].is_jumbo = true; 79 80 80 81 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 81 - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, 82 - STMMAC_RING_MODE, 1, 83 - true, skb->len); 82 + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, 83 + STMMAC_RING_MODE, 1, true, skb->len); 84 84 } else { 85 85 des2 = dma_map_single(priv->device, skb->data, 86 86 nopaged_len, DMA_TO_DEVICE); ··· 90 92 tx_q->tx_skbuff_dma[entry].len = nopaged_len; 91 93 tx_q->tx_skbuff_dma[entry].is_jumbo = true; 92 94 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 93 - priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, 94 - STMMAC_RING_MODE, 0, 95 - true, skb->len); 95 + stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, 96 + STMMAC_RING_MODE, 0, true, skb->len); 96 97 } 97 98 98 99 tx_q->cur_tx = entry; ··· 99 102 return entry; 100 103 } 101 104 102 - static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) 105 + static unsigned int is_jumbo_frm(int len, int enh_desc) 103 106 { 104 107 unsigned int ret = 0; 105 108 ··· 109 112 return ret; 110 113 } 111 114 112 - static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) 115 + static void refill_desc3(void *priv_ptr, struct dma_desc *p) 113 116 { 114 117 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; 115 118 ··· 119 122 } 120 123 121 124 /* In ring mode we need to fill the desc3 because it is used as buffer */ 122 - static void stmmac_init_desc3(struct dma_desc *p) 125 + static void init_desc3(struct dma_desc *p) 123 126 { 124 127 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); 125 128 } 126 129 127 - static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) 130 + static void clean_desc3(void *priv_ptr, struct dma_desc *p) 128 131 { 129 132 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; 130 133 struct stmmac_priv *priv = tx_q->priv_data; ··· 137 140 p->des3 = 0; 138 141 } 139 142 140 - static int stmmac_set_16kib_bfsize(int mtu) 143 + static int set_16kib_bfsize(int mtu) 141 144 { 142 145 int ret = 0; 143 146 if (unlikely(mtu >= BUF_SIZE_8KiB)) ··· 146 149 } 147 150 148 151 const struct stmmac_mode_ops ring_mode_ops = { 149 - .is_jumbo_frm = stmmac_is_jumbo_frm, 150 - .jumbo_frm = stmmac_jumbo_frm, 151 - .refill_desc3 = stmmac_refill_desc3, 152 - .init_desc3 = stmmac_init_desc3, 153 - .clean_desc3 = stmmac_clean_desc3, 154 - .set_16kib_bfsize = stmmac_set_16kib_bfsize, 152 + .is_jumbo_frm = is_jumbo_frm, 153 + .jumbo_frm = jumbo_frm, 154 + .refill_desc3 = refill_desc3, 155 + .init_desc3 = init_desc3, 156 + .clean_desc3 = clean_desc3, 157 + .set_16kib_bfsize = set_16kib_bfsize, 155 158 };
+31 -51
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
··· 291 291 cmd->base.speed = priv->xstats.pcs_speed; 292 292 293 293 /* Get and convert ADV/LP_ADV from the HW AN registers */ 294 - if (!priv->hw->mac->pcs_get_adv_lp) 294 + if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv)) 295 295 return -EOPNOTSUPP; /* should never happen indeed */ 296 - 297 - priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv); 298 296 299 297 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ 300 298 ··· 391 393 ADVERTISED_10baseT_Full); 392 394 393 395 spin_lock(&priv->lock); 394 - 395 - if (priv->hw->mac->pcs_ctrl_ane) 396 - priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1, 397 - priv->hw->ps, 0); 398 - 396 + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 399 397 spin_unlock(&priv->lock); 400 398 401 399 return 0; ··· 436 442 437 443 memset(reg_space, 0x0, REG_SPACE_SIZE); 438 444 439 - priv->hw->mac->dump_regs(priv->hw, reg_space); 440 - priv->hw->dma->dump_regs(priv->ioaddr, reg_space); 445 + stmmac_dump_mac_regs(priv, priv->hw, reg_space); 446 + stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); 441 447 /* Copy DMA registers to where ethtool expects them */ 442 448 memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4], 443 449 NUM_DWMAC1000_DMA_REGS * 4); ··· 448 454 struct ethtool_pauseparam *pause) 449 455 { 450 456 struct stmmac_priv *priv = netdev_priv(netdev); 457 + struct rgmii_adv adv_lp; 451 458 452 459 pause->rx_pause = 0; 453 460 pause->tx_pause = 0; 454 461 455 - if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { 456 - struct rgmii_adv adv_lp; 457 - 462 + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { 458 463 pause->autoneg = 1; 459 - priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); 460 464 if (!adv_lp.pause) 461 465 return; 462 466 } else { ··· 480 488 u32 tx_cnt = priv->plat->tx_queues_to_use; 481 489 struct phy_device *phy = netdev->phydev; 482 490 int new_pause = FLOW_OFF; 491 + struct rgmii_adv adv_lp; 483 492 484 - if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { 485 - struct rgmii_adv adv_lp; 486 - 493 + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { 487 494 pause->autoneg = 1; 488 - priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); 489 495 if (!adv_lp.pause) 490 496 return -EOPNOTSUPP; 491 497 } else { ··· 505 515 return phy_start_aneg(phy); 506 516 } 507 517 508 - priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, 509 - priv->pause, tx_cnt); 518 + stmmac_flow_ctrl(priv, priv->hw, phy->duplex, priv->flow_ctrl, 519 + priv->pause, tx_cnt); 510 520 return 0; 511 521 } 512 522 513 523 static void stmmac_get_ethtool_stats(struct net_device *dev, 514 524 struct ethtool_stats *dummy, u64 *data) 515 525 { 516 - const char *(*dump)(struct stmmac_safety_stats *stats, int index, 517 - unsigned long *count); 518 526 struct stmmac_priv *priv = netdev_priv(dev); 519 527 u32 rx_queues_count = priv->plat->rx_queues_to_use; 520 528 u32 tx_queues_count = priv->plat->tx_queues_to_use; 521 529 unsigned long count; 522 - int i, j = 0; 530 + int i, j = 0, ret; 523 531 524 - if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { 525 - dump = priv->hw->mac->safety_feat_dump; 526 - 532 + if (priv->dma_cap.asp) { 527 533 for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { 528 - if (dump(&priv->sstats, i, &count)) 534 + if (!stmmac_safety_feat_dump(priv, &priv->sstats, i, 535 + &count, NULL)) 529 536 data[j++] = count; 530 537 } 531 538 } 532 539 533 540 /* Update the DMA HW counters for dwmac10/100 */ 534 - if (priv->hw->dma->dma_diagnostic_fr) 535 - priv->hw->dma->dma_diagnostic_fr(&dev->stats, 536 - (void *) &priv->xstats, 537 - priv->ioaddr); 538 - else { 541 + ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats, 542 + priv->ioaddr); 543 + if (ret) { 539 544 /* If supported, for new GMAC chips expose the MMC counters */ 540 545 if (priv->dma_cap.rmon) { 541 546 dwmac_mmc_read(priv->mmcaddr, &priv->mmc); ··· 550 565 priv->xstats.phy_eee_wakeup_error_n = val; 551 566 } 552 567 553 - if ((priv->hw->mac->debug) && 554 - (priv->synopsys_id >= DWMAC_CORE_3_50)) 555 - priv->hw->mac->debug(priv->ioaddr, 556 - (void *)&priv->xstats, 557 - rx_queues_count, tx_queues_count); 568 + if (priv->synopsys_id >= DWMAC_CORE_3_50) 569 + stmmac_mac_debug(priv, priv->ioaddr, 570 + (void *)&priv->xstats, 571 + rx_queues_count, tx_queues_count); 558 572 } 559 573 for (i = 0; i < STMMAC_STATS_LEN; i++) { 560 574 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; ··· 565 581 static int stmmac_get_sset_count(struct net_device *netdev, int sset) 566 582 { 567 583 struct stmmac_priv *priv = netdev_priv(netdev); 568 - const char *(*dump)(struct stmmac_safety_stats *stats, int index, 569 - unsigned long *count); 570 584 int i, len, safety_len = 0; 571 585 572 586 switch (sset) { ··· 573 591 574 592 if (priv->dma_cap.rmon) 575 593 len += STMMAC_MMC_STATS_LEN; 576 - if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { 577 - dump = priv->hw->mac->safety_feat_dump; 578 - 594 + if (priv->dma_cap.asp) { 579 595 for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { 580 - if (dump(&priv->sstats, i, NULL)) 596 + if (!stmmac_safety_feat_dump(priv, 597 + &priv->sstats, i, 598 + NULL, NULL)) 581 599 safety_len++; 582 600 } 583 601 ··· 595 613 int i; 596 614 u8 *p = data; 597 615 struct stmmac_priv *priv = netdev_priv(dev); 598 - const char *(*dump)(struct stmmac_safety_stats *stats, int index, 599 - unsigned long *count); 600 616 601 617 switch (stringset) { 602 618 case ETH_SS_STATS: 603 - if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { 604 - dump = priv->hw->mac->safety_feat_dump; 619 + if (priv->dma_cap.asp) { 605 620 for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { 606 - const char *desc = dump(&priv->sstats, i, NULL); 607 - 608 - if (desc) { 621 + const char *desc; 622 + if (!stmmac_safety_feat_dump(priv, 623 + &priv->sstats, i, 624 + NULL, &desc)) { 609 625 memcpy(p, desc, ETH_GSTRING_LEN); 610 626 p += ETH_GSTRING_LEN; 611 627 } ··· 790 810 priv->tx_coal_frames = ec->tx_max_coalesced_frames; 791 811 priv->tx_coal_timer = ec->tx_coalesce_usecs; 792 812 priv->rx_riwt = rx_riwt; 793 - priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt); 813 + stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); 794 814 795 815 return 0; 796 816 }
+18 -16
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
··· 24 24 #include "common.h" 25 25 #include "stmmac_ptp.h" 26 26 27 - static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data) 27 + static void config_hw_tstamping(void __iomem *ioaddr, u32 data) 28 28 { 29 29 writel(data, ioaddr + PTP_TCR); 30 30 } 31 31 32 - static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, 33 - u32 ptp_clock, int gmac4) 32 + static void config_sub_second_increment(void __iomem *ioaddr, 33 + u32 ptp_clock, int gmac4, u32 *ssinc) 34 34 { 35 35 u32 value = readl(ioaddr + PTP_TCR); 36 36 unsigned long data; ··· 57 57 58 58 writel(reg_value, ioaddr + PTP_SSIR); 59 59 60 - return data; 60 + if (ssinc) 61 + *ssinc = data; 61 62 } 62 63 63 - static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) 64 + static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) 64 65 { 65 66 int limit; 66 67 u32 value; ··· 86 85 return 0; 87 86 } 88 87 89 - static int stmmac_config_addend(void __iomem *ioaddr, u32 addend) 88 + static int config_addend(void __iomem *ioaddr, u32 addend) 90 89 { 91 90 u32 value; 92 91 int limit; ··· 110 109 return 0; 111 110 } 112 111 113 - static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, 114 - int add_sub, int gmac4) 112 + static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, 113 + int add_sub, int gmac4) 115 114 { 116 115 u32 value; 117 116 int limit; ··· 153 152 return 0; 154 153 } 155 154 156 - static u64 stmmac_get_systime(void __iomem *ioaddr) 155 + static void get_systime(void __iomem *ioaddr, u64 *systime) 157 156 { 158 157 u64 ns; 159 158 ··· 162 161 /* Get the TSS and convert sec time value to nanosecond */ 163 162 ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; 164 163 165 - return ns; 164 + if (systime) 165 + *systime = ns; 166 166 } 167 167 168 168 const struct stmmac_hwtimestamp stmmac_ptp = { 169 - .config_hw_tstamping = stmmac_config_hw_tstamping, 170 - .init_systime = stmmac_init_systime, 171 - .config_sub_second_increment = stmmac_config_sub_second_increment, 172 - .config_addend = stmmac_config_addend, 173 - .adjust_systime = stmmac_adjust_systime, 174 - .get_systime = stmmac_get_systime, 169 + .config_hw_tstamping = config_hw_tstamping, 170 + .init_systime = init_systime, 171 + .config_sub_second_increment = config_sub_second_increment, 172 + .config_addend = config_addend, 173 + .adjust_systime = adjust_systime, 174 + .get_systime = get_systime, 175 175 };
+196 -243
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 50 50 #include <linux/reset.h> 51 51 #include <linux/of_mdio.h> 52 52 #include "dwmac1000.h" 53 + #include "hwif.h" 53 54 54 55 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 55 56 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) ··· 336 335 337 336 /* Check and enter in LPI mode */ 338 337 if (!priv->tx_path_in_lpi_mode) 339 - priv->hw->mac->set_eee_mode(priv->hw, 340 - priv->plat->en_tx_lpi_clockgating); 338 + stmmac_set_eee_mode(priv, priv->hw, 339 + priv->plat->en_tx_lpi_clockgating); 341 340 } 342 341 343 342 /** ··· 348 347 */ 349 348 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 350 349 { 351 - priv->hw->mac->reset_eee_mode(priv->hw); 350 + stmmac_reset_eee_mode(priv, priv->hw); 352 351 del_timer_sync(&priv->eee_ctrl_timer); 353 352 priv->tx_path_in_lpi_mode = false; 354 353 } ··· 411 410 if (priv->eee_active) { 412 411 netdev_dbg(priv->dev, "disable EEE\n"); 413 412 del_timer_sync(&priv->eee_ctrl_timer); 414 - priv->hw->mac->set_eee_timer(priv->hw, 0, 415 - tx_lpi_timer); 413 + stmmac_set_eee_timer(priv, priv->hw, 0, 414 + tx_lpi_timer); 416 415 } 417 416 priv->eee_active = 0; 418 417 spin_unlock_irqrestore(&priv->lock, flags); ··· 427 426 mod_timer(&priv->eee_ctrl_timer, 428 427 STMMAC_LPI_T(eee_timer)); 429 428 430 - priv->hw->mac->set_eee_timer(priv->hw, 431 - STMMAC_DEFAULT_LIT_LS, 432 - tx_lpi_timer); 429 + stmmac_set_eee_timer(priv, priv->hw, 430 + STMMAC_DEFAULT_LIT_LS, tx_lpi_timer); 433 431 } 434 432 /* Set HW EEE according to the speed */ 435 - priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link); 433 + stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link); 436 434 437 435 ret = true; 438 436 spin_unlock_irqrestore(&priv->lock, flags); ··· 464 464 return; 465 465 466 466 /* check tx tstamp status */ 467 - if (priv->hw->desc->get_tx_timestamp_status(p)) { 467 + if (stmmac_get_tx_timestamp_status(priv, p)) { 468 468 /* get the valid tstamp */ 469 - ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 469 + stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 470 470 471 471 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 472 472 shhwtstamp.hwtstamp = ns_to_ktime(ns); ··· 502 502 desc = np; 503 503 504 504 /* Check if timestamp is available */ 505 - if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) { 506 - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 505 + if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 506 + stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 507 507 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 508 508 shhwtstamp = skb_hwtstamps(skb); 509 509 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); ··· 707 707 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 708 708 709 709 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 710 - priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); 710 + stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 711 711 else { 712 712 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 713 713 tstamp_all | ptp_v2 | ptp_over_ethernet | 714 714 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 715 715 ts_master_en | snap_type_sel); 716 - priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value); 716 + stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 717 717 718 718 /* program Sub Second Increment reg */ 719 - sec_inc = priv->hw->ptp->config_sub_second_increment( 720 - priv->ptpaddr, priv->plat->clk_ptp_rate, 721 - priv->plat->has_gmac4); 719 + stmmac_config_sub_second_increment(priv, 720 + priv->ptpaddr, priv->plat->clk_ptp_rate, 721 + priv->plat->has_gmac4, &sec_inc); 722 722 temp = div_u64(1000000000ULL, sec_inc); 723 723 724 724 /* calculate default added value: ··· 728 728 */ 729 729 temp = (u64)(temp << 32); 730 730 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 731 - priv->hw->ptp->config_addend(priv->ptpaddr, 732 - priv->default_addend); 731 + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 733 732 734 733 /* initialize system time */ 735 734 ktime_get_real_ts64(&now); 736 735 737 736 /* lower 32 bits of tv_sec are safe until y2106 */ 738 - priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, 739 - now.tv_nsec); 737 + stmmac_init_systime(priv, priv->ptpaddr, 738 + (u32)now.tv_sec, now.tv_nsec); 740 739 } 741 740 742 741 return copy_to_user(ifr->ifr_data, &config, ··· 794 795 { 795 796 u32 tx_cnt = priv->plat->tx_queues_to_use; 796 797 797 - priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl, 798 - priv->pause, tx_cnt); 798 + stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 799 + priv->pause, tx_cnt); 799 800 } 800 801 801 802 /** ··· 1007 1008 head_rx = (void *)rx_q->dma_rx; 1008 1009 1009 1010 /* Display RX ring */ 1010 - priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); 1011 + stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); 1011 1012 } 1012 1013 } 1013 1014 ··· 1028 1029 else 1029 1030 head_tx = (void *)tx_q->dma_tx; 1030 1031 1031 - priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); 1032 + stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); 1032 1033 } 1033 1034 } 1034 1035 ··· 1072 1073 /* Clear the RX descriptors */ 1073 1074 for (i = 0; i < DMA_RX_SIZE; i++) 1074 1075 if (priv->extend_desc) 1075 - priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic, 1076 - priv->use_riwt, priv->mode, 1077 - (i == DMA_RX_SIZE - 1)); 1076 + stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1077 + priv->use_riwt, priv->mode, 1078 + (i == DMA_RX_SIZE - 1)); 1078 1079 else 1079 - priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i], 1080 - priv->use_riwt, priv->mode, 1081 - (i == DMA_RX_SIZE - 1)); 1080 + stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1081 + priv->use_riwt, priv->mode, 1082 + (i == DMA_RX_SIZE - 1)); 1082 1083 } 1083 1084 1084 1085 /** ··· 1096 1097 /* Clear the TX descriptors */ 1097 1098 for (i = 0; i < DMA_TX_SIZE; i++) 1098 1099 if (priv->extend_desc) 1099 - priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, 1100 - priv->mode, 1101 - (i == DMA_TX_SIZE - 1)); 1100 + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, 1101 + priv->mode, (i == DMA_TX_SIZE - 1)); 1102 1102 else 1103 - priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], 1104 - priv->mode, 1105 - (i == DMA_TX_SIZE - 1)); 1103 + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], 1104 + priv->mode, (i == DMA_TX_SIZE - 1)); 1106 1105 } 1107 1106 1108 1107 /** ··· 1161 1164 else 1162 1165 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); 1163 1166 1164 - if ((priv->hw->mode->init_desc3) && 1165 - (priv->dma_buf_sz == BUF_SIZE_16KiB)) 1166 - priv->hw->mode->init_desc3(p); 1167 + if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1168 + stmmac_init_desc3(priv, p); 1167 1169 1168 1170 return 0; 1169 1171 } ··· 1228 1232 { 1229 1233 struct stmmac_priv *priv = netdev_priv(dev); 1230 1234 u32 rx_count = priv->plat->rx_queues_to_use; 1231 - unsigned int bfsize = 0; 1232 1235 int ret = -ENOMEM; 1236 + int bfsize = 0; 1233 1237 int queue; 1234 1238 int i; 1235 1239 1236 - if (priv->hw->mode->set_16kib_bfsize) 1237 - bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); 1240 + bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 1241 + if (bfsize < 0) 1242 + bfsize = 0; 1238 1243 1239 1244 if (bfsize < BUF_SIZE_16KiB) 1240 1245 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); ··· 1279 1282 /* Setup the chained descriptor addresses */ 1280 1283 if (priv->mode == STMMAC_CHAIN_MODE) { 1281 1284 if (priv->extend_desc) 1282 - priv->hw->mode->init(rx_q->dma_erx, 1283 - rx_q->dma_rx_phy, 1284 - DMA_RX_SIZE, 1); 1285 + stmmac_mode_init(priv, rx_q->dma_erx, 1286 + rx_q->dma_rx_phy, DMA_RX_SIZE, 1); 1285 1287 else 1286 - priv->hw->mode->init(rx_q->dma_rx, 1287 - rx_q->dma_rx_phy, 1288 - DMA_RX_SIZE, 0); 1288 + stmmac_mode_init(priv, rx_q->dma_rx, 1289 + rx_q->dma_rx_phy, DMA_RX_SIZE, 0); 1289 1290 } 1290 1291 } 1291 1292 ··· 1330 1335 /* Setup the chained descriptor addresses */ 1331 1336 if (priv->mode == STMMAC_CHAIN_MODE) { 1332 1337 if (priv->extend_desc) 1333 - priv->hw->mode->init(tx_q->dma_etx, 1334 - tx_q->dma_tx_phy, 1335 - DMA_TX_SIZE, 1); 1338 + stmmac_mode_init(priv, tx_q->dma_etx, 1339 + tx_q->dma_tx_phy, DMA_TX_SIZE, 1); 1336 1340 else 1337 - priv->hw->mode->init(tx_q->dma_tx, 1338 - tx_q->dma_tx_phy, 1339 - DMA_TX_SIZE, 0); 1341 + stmmac_mode_init(priv, tx_q->dma_tx, 1342 + tx_q->dma_tx_phy, DMA_TX_SIZE, 0); 1340 1343 } 1341 1344 1342 1345 for (i = 0; i < DMA_TX_SIZE; i++) { ··· 1657 1664 1658 1665 for (queue = 0; queue < rx_queues_count; queue++) { 1659 1666 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 1660 - priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); 1667 + stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 1661 1668 } 1662 1669 } 1663 1670 ··· 1671 1678 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 1672 1679 { 1673 1680 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 1674 - priv->hw->dma->start_rx(priv->ioaddr, chan); 1681 + stmmac_start_rx(priv, priv->ioaddr, chan); 1675 1682 } 1676 1683 1677 1684 /** ··· 1684 1691 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 1685 1692 { 1686 1693 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 1687 - priv->hw->dma->start_tx(priv->ioaddr, chan); 1694 + stmmac_start_tx(priv, priv->ioaddr, chan); 1688 1695 } 1689 1696 1690 1697 /** ··· 1697 1704 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 1698 1705 { 1699 1706 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 1700 - priv->hw->dma->stop_rx(priv->ioaddr, chan); 1707 + stmmac_stop_rx(priv, priv->ioaddr, chan); 1701 1708 } 1702 1709 1703 1710 /** ··· 1710 1717 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 1711 1718 { 1712 1719 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 1713 - priv->hw->dma->stop_tx(priv->ioaddr, chan); 1720 + stmmac_stop_tx(priv, priv->ioaddr, chan); 1714 1721 } 1715 1722 1716 1723 /** ··· 1801 1808 for (chan = 0; chan < rx_channels_count; chan++) { 1802 1809 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 1803 1810 1804 - priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, 1805 - rxfifosz, qmode); 1811 + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 1812 + rxfifosz, qmode); 1806 1813 } 1807 1814 1808 1815 for (chan = 0; chan < tx_channels_count; chan++) { 1809 1816 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 1810 1817 1811 - priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, 1812 - txfifosz, qmode); 1818 + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 1819 + txfifosz, qmode); 1813 1820 } 1814 1821 } else { 1815 - priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, 1816 - rxfifosz); 1822 + stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz); 1817 1823 } 1818 1824 } 1819 1825 ··· 1843 1851 else 1844 1852 p = tx_q->dma_tx + entry; 1845 1853 1846 - status = priv->hw->desc->tx_status(&priv->dev->stats, 1847 - &priv->xstats, p, 1848 - priv->ioaddr); 1854 + status = stmmac_tx_status(priv, &priv->dev->stats, 1855 + &priv->xstats, p, priv->ioaddr); 1849 1856 /* Check if the descriptor is owned by the DMA */ 1850 1857 if (unlikely(status & tx_dma_own)) 1851 1858 break; ··· 1882 1891 tx_q->tx_skbuff_dma[entry].map_as_page = false; 1883 1892 } 1884 1893 1885 - if (priv->hw->mode->clean_desc3) 1886 - priv->hw->mode->clean_desc3(tx_q, p); 1894 + stmmac_clean_desc3(priv, tx_q, p); 1887 1895 1888 1896 tx_q->tx_skbuff_dma[entry].last_segment = false; 1889 1897 tx_q->tx_skbuff_dma[entry].is_jumbo = false; ··· 1894 1904 tx_q->tx_skbuff[entry] = NULL; 1895 1905 } 1896 1906 1897 - priv->hw->desc->release_tx_desc(p, priv->mode); 1907 + stmmac_release_tx_desc(priv, p, priv->mode); 1898 1908 1899 1909 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 1900 1910 } ··· 1919 1929 netif_tx_unlock(priv->dev); 1920 1930 } 1921 1931 1922 - static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan) 1923 - { 1924 - priv->hw->dma->enable_dma_irq(priv->ioaddr, chan); 1925 - } 1926 - 1927 - static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan) 1928 - { 1929 - priv->hw->dma->disable_dma_irq(priv->ioaddr, chan); 1930 - } 1931 - 1932 1932 /** 1933 1933 * stmmac_tx_err - to manage the tx error 1934 1934 * @priv: driver private structure ··· 1937 1957 dma_free_tx_skbufs(priv, chan); 1938 1958 for (i = 0; i < DMA_TX_SIZE; i++) 1939 1959 if (priv->extend_desc) 1940 - priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, 1941 - priv->mode, 1942 - (i == DMA_TX_SIZE - 1)); 1960 + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, 1961 + priv->mode, (i == DMA_TX_SIZE - 1)); 1943 1962 else 1944 - priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], 1945 - priv->mode, 1946 - (i == DMA_TX_SIZE - 1)); 1963 + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], 1964 + priv->mode, (i == DMA_TX_SIZE - 1)); 1947 1965 tx_q->dirty_tx = 0; 1948 1966 tx_q->cur_tx = 0; 1949 1967 tx_q->mss = 0; ··· 1982 2004 txfifosz /= tx_channels_count; 1983 2005 1984 2006 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 1985 - priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, 1986 - rxfifosz, rxqmode); 1987 - priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, 1988 - txfifosz, txqmode); 2007 + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, 2008 + rxqmode); 2009 + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, 2010 + txqmode); 1989 2011 } else { 1990 - priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, 1991 - rxfifosz); 2012 + stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz); 1992 2013 } 1993 2014 } 1994 2015 1995 2016 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 1996 2017 { 1997 - bool ret = false; 2018 + int ret = false; 1998 2019 1999 2020 /* Safety features are only available in cores >= 5.10 */ 2000 2021 if (priv->synopsys_id < DWMAC_CORE_5_10) 2001 2022 return ret; 2002 - if (priv->hw->mac->safety_feat_irq_status) 2003 - ret = priv->hw->mac->safety_feat_irq_status(priv->dev, 2004 - priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2005 - 2006 - if (ret) 2023 + ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2024 + priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2025 + if (ret && (ret != -EINVAL)) { 2007 2026 stmmac_global_err(priv); 2008 - return ret; 2027 + return true; 2028 + } 2029 + 2030 + return false; 2009 2031 } 2010 2032 2011 2033 /** ··· 2032 2054 * all tx queues rather than just a single tx queue. 2033 2055 */ 2034 2056 for (chan = 0; chan < channels_to_check; chan++) 2035 - status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr, 2036 - &priv->xstats, 2037 - chan); 2057 + status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2058 + &priv->xstats, chan); 2038 2059 2039 2060 for (chan = 0; chan < rx_channel_count; chan++) { 2040 2061 if (likely(status[chan] & handle_rx)) { 2041 2062 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; 2042 2063 2043 2064 if (likely(napi_schedule_prep(&rx_q->napi))) { 2044 - stmmac_disable_dma_irq(priv, chan); 2065 + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); 2045 2066 __napi_schedule(&rx_q->napi); 2046 2067 poll_scheduled = true; 2047 2068 } ··· 2061 2084 &priv->rx_queue[0]; 2062 2085 2063 2086 if (likely(napi_schedule_prep(&rx_q->napi))) { 2064 - stmmac_disable_dma_irq(priv, chan); 2087 + stmmac_disable_dma_irq(priv, 2088 + priv->ioaddr, chan); 2065 2089 __napi_schedule(&rx_q->napi); 2066 2090 } 2067 2091 break; ··· 2158 2180 */ 2159 2181 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2160 2182 { 2161 - u32 ret = 0; 2162 - 2163 - if (priv->hw->dma->get_hw_feature) { 2164 - priv->hw->dma->get_hw_feature(priv->ioaddr, 2165 - &priv->dma_cap); 2166 - ret = 1; 2167 - } 2168 - 2169 - return ret; 2183 + return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2170 2184 } 2171 2185 2172 2186 /** ··· 2171 2201 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2172 2202 { 2173 2203 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2174 - priv->hw->mac->get_umac_addr(priv->hw, 2175 - priv->dev->dev_addr, 0); 2204 + stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); 2176 2205 if (!is_valid_ether_addr(priv->dev->dev_addr)) 2177 2206 eth_hw_addr_random(priv->dev); 2178 2207 netdev_info(priv->dev, "device MAC address %pM\n", ··· 2207 2238 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2208 2239 atds = 1; 2209 2240 2210 - ret = priv->hw->dma->reset(priv->ioaddr); 2241 + ret = stmmac_reset(priv, priv->ioaddr); 2211 2242 if (ret) { 2212 2243 dev_err(priv->device, "Failed to reset the dma\n"); 2213 2244 return ret; ··· 2215 2246 2216 2247 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 2217 2248 /* DMA Configuration */ 2218 - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, 2219 - dummy_dma_tx_phy, dummy_dma_rx_phy, atds); 2249 + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, 2250 + dummy_dma_tx_phy, dummy_dma_rx_phy, atds); 2220 2251 2221 2252 /* DMA RX Channel Configuration */ 2222 2253 for (chan = 0; chan < rx_channels_count; chan++) { 2223 2254 rx_q = &priv->rx_queue[chan]; 2224 2255 2225 - priv->hw->dma->init_rx_chan(priv->ioaddr, 2226 - priv->plat->dma_cfg, 2227 - rx_q->dma_rx_phy, chan); 2256 + stmmac_init_rx_chan(priv, priv->ioaddr, 2257 + priv->plat->dma_cfg, rx_q->dma_rx_phy, 2258 + chan); 2228 2259 2229 2260 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2230 2261 (DMA_RX_SIZE * sizeof(struct dma_desc)); 2231 - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, 2232 - rx_q->rx_tail_addr, 2233 - chan); 2262 + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2263 + rx_q->rx_tail_addr, chan); 2234 2264 } 2235 2265 2236 2266 /* DMA TX Channel Configuration */ 2237 2267 for (chan = 0; chan < tx_channels_count; chan++) { 2238 2268 tx_q = &priv->tx_queue[chan]; 2239 2269 2240 - priv->hw->dma->init_chan(priv->ioaddr, 2241 - priv->plat->dma_cfg, 2242 - chan); 2270 + stmmac_init_chan(priv, priv->ioaddr, 2271 + priv->plat->dma_cfg, chan); 2243 2272 2244 - priv->hw->dma->init_tx_chan(priv->ioaddr, 2245 - priv->plat->dma_cfg, 2246 - tx_q->dma_tx_phy, chan); 2273 + stmmac_init_tx_chan(priv, priv->ioaddr, 2274 + priv->plat->dma_cfg, tx_q->dma_tx_phy, 2275 + chan); 2247 2276 2248 2277 tx_q->tx_tail_addr = tx_q->dma_tx_phy + 2249 2278 (DMA_TX_SIZE * sizeof(struct dma_desc)); 2250 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, 2251 - tx_q->tx_tail_addr, 2252 - chan); 2279 + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2280 + tx_q->tx_tail_addr, chan); 2253 2281 } 2254 2282 } else { 2255 2283 rx_q = &priv->rx_queue[chan]; 2256 2284 tx_q = &priv->tx_queue[chan]; 2257 - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, 2258 - tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); 2285 + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, 2286 + tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); 2259 2287 } 2260 2288 2261 - if (priv->plat->axi && priv->hw->dma->axi) 2262 - priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); 2289 + if (priv->plat->axi) 2290 + stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2263 2291 2264 2292 return ret; 2265 2293 } ··· 2302 2336 u32 chan; 2303 2337 2304 2338 /* set TX ring length */ 2305 - if (priv->hw->dma->set_tx_ring_len) { 2306 - for (chan = 0; chan < tx_channels_count; chan++) 2307 - priv->hw->dma->set_tx_ring_len(priv->ioaddr, 2308 - (DMA_TX_SIZE - 1), chan); 2309 - } 2339 + for (chan = 0; chan < tx_channels_count; chan++) 2340 + stmmac_set_tx_ring_len(priv, priv->ioaddr, 2341 + (DMA_TX_SIZE - 1), chan); 2310 2342 2311 2343 /* set RX ring length */ 2312 - if (priv->hw->dma->set_rx_ring_len) { 2313 - for (chan = 0; chan < rx_channels_count; chan++) 2314 - priv->hw->dma->set_rx_ring_len(priv->ioaddr, 2315 - (DMA_RX_SIZE - 1), chan); 2316 - } 2344 + for (chan = 0; chan < rx_channels_count; chan++) 2345 + stmmac_set_rx_ring_len(priv, priv->ioaddr, 2346 + (DMA_RX_SIZE - 1), chan); 2317 2347 } 2318 2348 2319 2349 /** ··· 2325 2363 2326 2364 for (queue = 0; queue < tx_queues_count; queue++) { 2327 2365 weight = priv->plat->tx_queues_cfg[queue].weight; 2328 - priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue); 2366 + stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 2329 2367 } 2330 2368 } 2331 2369 ··· 2346 2384 if (mode_to_use == MTL_QUEUE_DCB) 2347 2385 continue; 2348 2386 2349 - priv->hw->mac->config_cbs(priv->hw, 2387 + stmmac_config_cbs(priv, priv->hw, 2350 2388 priv->plat->tx_queues_cfg[queue].send_slope, 2351 2389 priv->plat->tx_queues_cfg[queue].idle_slope, 2352 2390 priv->plat->tx_queues_cfg[queue].high_credit, ··· 2368 2406 2369 2407 for (queue = 0; queue < rx_queues_count; queue++) { 2370 2408 chan = priv->plat->rx_queues_cfg[queue].chan; 2371 - priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan); 2409 + stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 2372 2410 } 2373 2411 } 2374 2412 ··· 2388 2426 continue; 2389 2427 2390 2428 prio = priv->plat->rx_queues_cfg[queue].prio; 2391 - priv->hw->mac->rx_queue_prio(priv->hw, prio, queue); 2429 + stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 2392 2430 } 2393 2431 } 2394 2432 ··· 2408 2446 continue; 2409 2447 2410 2448 prio = priv->plat->tx_queues_cfg[queue].prio; 2411 - priv->hw->mac->tx_queue_prio(priv->hw, prio, queue); 2449 + stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 2412 2450 } 2413 2451 } 2414 2452 ··· 2429 2467 continue; 2430 2468 2431 2469 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 2432 - priv->hw->mac->rx_queue_routing(priv->hw, packet, queue); 2470 + stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 2433 2471 } 2434 2472 } 2435 2473 ··· 2443 2481 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2444 2482 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2445 2483 2446 - if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight) 2484 + if (tx_queues_count > 1) 2447 2485 stmmac_set_tx_queue_weight(priv); 2448 2486 2449 2487 /* Configure MTL RX algorithms */ 2450 - if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms) 2451 - priv->hw->mac->prog_mtl_rx_algorithms(priv->hw, 2452 - priv->plat->rx_sched_algorithm); 2488 + if (rx_queues_count > 1) 2489 + stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 2490 + priv->plat->rx_sched_algorithm); 2453 2491 2454 2492 /* Configure MTL TX algorithms */ 2455 - if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms) 2456 - priv->hw->mac->prog_mtl_tx_algorithms(priv->hw, 2457 - priv->plat->tx_sched_algorithm); 2493 + if (tx_queues_count > 1) 2494 + stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 2495 + priv->plat->tx_sched_algorithm); 2458 2496 2459 2497 /* Configure CBS in AVB TX queues */ 2460 - if (tx_queues_count > 1 && priv->hw->mac->config_cbs) 2498 + if (tx_queues_count > 1) 2461 2499 stmmac_configure_cbs(priv); 2462 2500 2463 2501 /* Map RX MTL to DMA channels */ 2464 - if (priv->hw->mac->map_mtl_to_dma) 2465 - stmmac_rx_queue_dma_chan_map(priv); 2502 + stmmac_rx_queue_dma_chan_map(priv); 2466 2503 2467 2504 /* Enable MAC RX Queues */ 2468 - if (priv->hw->mac->rx_queue_enable) 2469 - stmmac_mac_enable_rx_queues(priv); 2505 + stmmac_mac_enable_rx_queues(priv); 2470 2506 2471 2507 /* Set RX priorities */ 2472 - if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio) 2508 + if (rx_queues_count > 1) 2473 2509 stmmac_mac_config_rx_queues_prio(priv); 2474 2510 2475 2511 /* Set TX priorities */ 2476 - if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio) 2512 + if (tx_queues_count > 1) 2477 2513 stmmac_mac_config_tx_queues_prio(priv); 2478 2514 2479 2515 /* Set RX routing */ 2480 - if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing) 2516 + if (rx_queues_count > 1) 2481 2517 stmmac_mac_config_rx_queues_routing(priv); 2482 2518 } 2483 2519 2484 2520 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 2485 2521 { 2486 - if (priv->hw->mac->safety_feat_config && priv->dma_cap.asp) { 2522 + if (priv->dma_cap.asp) { 2487 2523 netdev_info(priv->dev, "Enabling Safety Features\n"); 2488 - priv->hw->mac->safety_feat_config(priv->ioaddr, 2489 - priv->dma_cap.asp); 2524 + stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); 2490 2525 } else { 2491 2526 netdev_info(priv->dev, "No Safety Features support found\n"); 2492 2527 } ··· 2518 2559 } 2519 2560 2520 2561 /* Copy the MAC addr into the HW */ 2521 - priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); 2562 + stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 2522 2563 2523 2564 /* PS and related bits will be programmed according to the speed */ 2524 2565 if (priv->hw->pcs) { ··· 2534 2575 } 2535 2576 2536 2577 /* Initialize the MAC Core */ 2537 - priv->hw->mac->core_init(priv->hw, dev); 2578 + stmmac_core_init(priv, priv->hw, dev); 2538 2579 2539 2580 /* Initialize MTL*/ 2540 2581 if (priv->synopsys_id >= DWMAC_CORE_4_00) ··· 2544 2585 if (priv->synopsys_id >= DWMAC_CORE_5_10) 2545 2586 stmmac_safety_feat_configuration(priv); 2546 2587 2547 - ret = priv->hw->mac->rx_ipc(priv->hw); 2588 + ret = stmmac_rx_ipc(priv, priv->hw); 2548 2589 if (!ret) { 2549 2590 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 2550 2591 priv->plat->rx_coe = STMMAC_RX_COE_NONE; ··· 2552 2593 } 2553 2594 2554 2595 /* Enable the MAC Rx/Tx */ 2555 - priv->hw->mac->set_mac(priv->ioaddr, true); 2596 + stmmac_mac_set(priv, priv->ioaddr, true); 2556 2597 2557 2598 /* Set the HW DMA mode and the COE */ 2558 2599 stmmac_dma_operation_mode(priv); ··· 2582 2623 2583 2624 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 2584 2625 2585 - if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { 2586 - priv->rx_riwt = MAX_DMA_RIWT; 2587 - priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt); 2626 + if (priv->use_riwt) { 2627 + ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt); 2628 + if (!ret) 2629 + priv->rx_riwt = MAX_DMA_RIWT; 2588 2630 } 2589 2631 2590 - if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) 2591 - priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); 2632 + if (priv->hw->pcs) 2633 + stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); 2592 2634 2593 2635 /* set TX and RX rings length */ 2594 2636 stmmac_set_rings_length(priv); ··· 2597 2637 /* Enable TSO */ 2598 2638 if (priv->tso) { 2599 2639 for (chan = 0; chan < tx_cnt; chan++) 2600 - priv->hw->dma->enable_tso(priv->ioaddr, 1, chan); 2640 + stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 2601 2641 } 2602 2642 2603 2643 return 0; ··· 2768 2808 free_dma_desc_resources(priv); 2769 2809 2770 2810 /* Disable the MAC Rx/Tx */ 2771 - priv->hw->mac->set_mac(priv->ioaddr, false); 2811 + stmmac_mac_set(priv, priv->ioaddr, false); 2772 2812 2773 2813 netif_carrier_off(dev); 2774 2814 ··· 2811 2851 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 2812 2852 TSO_MAX_BUFF_SIZE : tmp_len; 2813 2853 2814 - priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, 2815 - 0, 1, 2816 - (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 2817 - 0, 0); 2854 + stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 2855 + 0, 1, 2856 + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 2857 + 0, 0); 2818 2858 2819 2859 tmp_len -= TSO_MAX_BUFF_SIZE; 2820 2860 } ··· 2886 2926 /* set new MSS value if needed */ 2887 2927 if (mss != tx_q->mss) { 2888 2928 mss_desc = tx_q->dma_tx + tx_q->cur_tx; 2889 - priv->hw->desc->set_mss(mss_desc, mss); 2929 + stmmac_set_mss(priv, mss_desc, mss); 2890 2930 tx_q->mss = mss; 2891 2931 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2892 2932 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); ··· 2972 3012 STMMAC_COAL_TIMER(priv->tx_coal_timer)); 2973 3013 } else { 2974 3014 priv->tx_count_frames = 0; 2975 - priv->hw->desc->set_tx_ic(desc); 3015 + stmmac_set_tx_ic(priv, desc); 2976 3016 priv->xstats.tx_set_ic_bit++; 2977 3017 } 2978 3018 ··· 2982 3022 priv->hwts_tx_en)) { 2983 3023 /* declare that device is doing timestamping */ 2984 3024 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2985 - priv->hw->desc->enable_tx_timestamp(first); 3025 + stmmac_enable_tx_timestamp(priv, first); 2986 3026 } 2987 3027 2988 3028 /* Complete the first descriptor before granting the DMA */ 2989 - priv->hw->desc->prepare_tso_tx_desc(first, 1, 3029 + stmmac_prepare_tso_tx_desc(priv, first, 1, 2990 3030 proto_hdr_len, 2991 3031 pay_len, 2992 3032 1, tx_q->tx_skbuff_dma[first_entry].last_segment, ··· 3000 3040 * sure that MSS's own bit is the last thing written. 3001 3041 */ 3002 3042 dma_wmb(); 3003 - priv->hw->desc->set_tx_owner(mss_desc); 3043 + stmmac_set_tx_owner(priv, mss_desc); 3004 3044 } 3005 3045 3006 3046 /* The own bit must be the latest setting done when prepare the ··· 3014 3054 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3015 3055 tx_q->cur_tx, first, nfrags); 3016 3056 3017 - priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE, 3018 - 0); 3057 + stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); 3019 3058 3020 3059 pr_info(">>> frame to be transmitted: "); 3021 3060 print_pkt(skb->data, skb_headlen(skb)); ··· 3022 3063 3023 3064 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3024 3065 3025 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, 3026 - queue); 3066 + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3027 3067 3028 3068 return NETDEV_TX_OK; 3029 3069 ··· 3094 3136 enh_desc = priv->plat->enh_desc; 3095 3137 /* To program the descriptors according to the size of the frame */ 3096 3138 if (enh_desc) 3097 - is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); 3139 + is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 3098 3140 3099 3141 if (unlikely(is_jumbo) && likely(priv->synopsys_id < 3100 3142 DWMAC_CORE_4_00)) { 3101 - entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion); 3143 + entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 3102 3144 if (unlikely(entry < 0)) 3103 3145 goto dma_map_err; 3104 3146 } ··· 3132 3174 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 3133 3175 3134 3176 /* Prepare the descriptor and set the own bit too */ 3135 - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, 3136 - priv->mode, 1, last_segment, 3137 - skb->len); 3177 + stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 3178 + priv->mode, 1, last_segment, skb->len); 3138 3179 } 3139 3180 3140 3181 /* Only the last descriptor gets to point to the skb. */ ··· 3160 3203 else 3161 3204 tx_head = (void *)tx_q->dma_tx; 3162 3205 3163 - priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); 3206 + stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); 3164 3207 3165 3208 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 3166 3209 print_pkt(skb->data, skb->len); ··· 3185 3228 STMMAC_COAL_TIMER(priv->tx_coal_timer)); 3186 3229 } else { 3187 3230 priv->tx_count_frames = 0; 3188 - priv->hw->desc->set_tx_ic(desc); 3231 + stmmac_set_tx_ic(priv, desc); 3189 3232 priv->xstats.tx_set_ic_bit++; 3190 3233 } 3191 3234 ··· 3216 3259 priv->hwts_tx_en)) { 3217 3260 /* declare that device is doing timestamping */ 3218 3261 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3219 - priv->hw->desc->enable_tx_timestamp(first); 3262 + stmmac_enable_tx_timestamp(priv, first); 3220 3263 } 3221 3264 3222 3265 /* Prepare the first descriptor setting the OWN bit too */ 3223 - priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, 3224 - csum_insertion, priv->mode, 1, 3225 - last_segment, skb->len); 3266 + stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 3267 + csum_insertion, priv->mode, 1, last_segment, 3268 + skb->len); 3226 3269 3227 3270 /* The own bit must be the latest setting done when prepare the 3228 3271 * descriptor and then barrier is needed to make sure that ··· 3234 3277 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3235 3278 3236 3279 if (priv->synopsys_id < DWMAC_CORE_4_00) 3237 - priv->hw->dma->enable_dma_transmission(priv->ioaddr); 3280 + stmmac_enable_dma_transmission(priv, priv->ioaddr); 3238 3281 else 3239 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, 3240 - queue); 3282 + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, 3283 + queue); 3241 3284 3242 3285 return NETDEV_TX_OK; 3243 3286 ··· 3327 3370 } else { 3328 3371 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); 3329 3372 } 3330 - if (priv->hw->mode->refill_desc3) 3331 - priv->hw->mode->refill_desc3(rx_q, p); 3373 + 3374 + stmmac_refill_desc3(priv, rx_q, p); 3332 3375 3333 3376 if (rx_q->rx_zeroc_thresh > 0) 3334 3377 rx_q->rx_zeroc_thresh--; ··· 3339 3382 dma_wmb(); 3340 3383 3341 3384 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) 3342 - priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); 3385 + stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0); 3343 3386 else 3344 - priv->hw->desc->set_rx_owner(p); 3387 + stmmac_set_rx_owner(priv, p); 3345 3388 3346 3389 dma_wmb(); 3347 3390 ··· 3375 3418 else 3376 3419 rx_head = (void *)rx_q->dma_rx; 3377 3420 3378 - priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); 3421 + stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); 3379 3422 } 3380 3423 while (count < limit) { 3381 3424 int status; ··· 3388 3431 p = rx_q->dma_rx + entry; 3389 3432 3390 3433 /* read the status of the incoming frame */ 3391 - status = priv->hw->desc->rx_status(&priv->dev->stats, 3392 - &priv->xstats, p); 3434 + status = stmmac_rx_status(priv, &priv->dev->stats, 3435 + &priv->xstats, p); 3393 3436 /* check if managed by the DMA otherwise go ahead */ 3394 3437 if (unlikely(status & dma_own)) 3395 3438 break; ··· 3406 3449 3407 3450 prefetch(np); 3408 3451 3409 - if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) 3410 - priv->hw->desc->rx_extended_status(&priv->dev->stats, 3411 - &priv->xstats, 3412 - rx_q->dma_erx + 3413 - entry); 3452 + if (priv->extend_desc) 3453 + stmmac_rx_extended_status(priv, &priv->dev->stats, 3454 + &priv->xstats, rx_q->dma_erx + entry); 3414 3455 if (unlikely(status == discard_frame)) { 3415 3456 priv->dev->stats.rx_errors++; 3416 3457 if (priv->hwts_rx_en && !priv->extend_desc) { ··· 3434 3479 else 3435 3480 des = le32_to_cpu(p->des2); 3436 3481 3437 - frame_len = priv->hw->desc->get_rx_frame_len(p, coe); 3482 + frame_len = stmmac_get_rx_frame_len(priv, p, coe); 3438 3483 3439 3484 /* If frame length is greater than skb buffer size 3440 3485 * (preallocated during init) then the packet is ··· 3571 3616 work_done = stmmac_rx(priv, budget, rx_q->queue_index); 3572 3617 if (work_done < budget) { 3573 3618 napi_complete_done(napi, work_done); 3574 - stmmac_enable_dma_irq(priv, chan); 3619 + stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3575 3620 } 3576 3621 return work_done; 3577 3622 } ··· 3604 3649 { 3605 3650 struct stmmac_priv *priv = netdev_priv(dev); 3606 3651 3607 - priv->hw->mac->set_filter(priv->hw, dev); 3652 + stmmac_set_filter(priv, priv->hw, dev); 3608 3653 } 3609 3654 3610 3655 /** ··· 3677 3722 /* No check needed because rx_coe has been set before and it will be 3678 3723 * fixed in case of issue. 3679 3724 */ 3680 - priv->hw->mac->rx_ipc(priv->hw); 3725 + stmmac_rx_ipc(priv, priv->hw); 3681 3726 3682 3727 return 0; 3683 3728 } ··· 3721 3766 3722 3767 /* To handle GMAC own interrupts */ 3723 3768 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { 3724 - int status = priv->hw->mac->host_irq_status(priv->hw, 3725 - &priv->xstats); 3769 + int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 3726 3770 3727 3771 if (unlikely(status)) { 3728 3772 /* For LPI we need to save the tx status */ ··· 3736 3782 struct stmmac_rx_queue *rx_q = 3737 3783 &priv->rx_queue[queue]; 3738 3784 3739 - status |= 3740 - priv->hw->mac->host_mtl_irq_status(priv->hw, 3741 - queue); 3785 + status |= stmmac_host_mtl_irq_status(priv, 3786 + priv->hw, queue); 3742 3787 3743 - if (status & CORE_IRQ_MTL_RX_OVERFLOW && 3744 - priv->hw->dma->set_rx_tail_ptr) 3745 - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, 3746 - rx_q->rx_tail_addr, 3747 - queue); 3788 + if (status & CORE_IRQ_MTL_RX_OVERFLOW) 3789 + stmmac_set_rx_tail_ptr(priv, 3790 + priv->ioaddr, 3791 + rx_q->rx_tail_addr, 3792 + queue); 3748 3793 } 3749 3794 } 3750 3795 ··· 3817 3864 if (ret) 3818 3865 return ret; 3819 3866 3820 - priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0); 3867 + stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 3821 3868 3822 3869 return ret; 3823 3870 } ··· 4406 4453 4407 4454 stmmac_stop_all_dma(priv); 4408 4455 4409 - priv->hw->mac->set_mac(priv->ioaddr, false); 4456 + stmmac_mac_set(priv, priv->ioaddr, false); 4410 4457 netif_carrier_off(ndev); 4411 4458 unregister_netdev(ndev); 4412 4459 if (priv->plat->stmmac_rst) ··· 4455 4502 4456 4503 /* Enable Power down mode by programming the PMT regs */ 4457 4504 if (device_may_wakeup(priv->device)) { 4458 - priv->hw->mac->pmt(priv->hw, priv->wolopts); 4505 + stmmac_pmt(priv, priv->hw, priv->wolopts); 4459 4506 priv->irq_wake = 1; 4460 4507 } else { 4461 - priv->hw->mac->set_mac(priv->ioaddr, false); 4508 + stmmac_mac_set(priv, priv->ioaddr, false); 4462 4509 pinctrl_pm_select_sleep_state(priv->device); 4463 4510 /* Disable clock in case of PWM is off */ 4464 4511 clk_disable(priv->plat->pclk); ··· 4522 4569 */ 4523 4570 if (device_may_wakeup(priv->device)) { 4524 4571 spin_lock_irqsave(&priv->lock, flags); 4525 - priv->hw->mac->pmt(priv->hw, 0); 4572 + stmmac_pmt(priv, priv->hw, 0); 4526 4573 spin_unlock_irqrestore(&priv->lock, flags); 4527 4574 priv->irq_wake = 0; 4528 4575 } else {
+5 -13
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
··· 49 49 addend = neg_adj ? (addend - diff) : (addend + diff); 50 50 51 51 spin_lock_irqsave(&priv->ptp_lock, flags); 52 - 53 - priv->hw->ptp->config_addend(priv->ptpaddr, addend); 54 - 52 + stmmac_config_addend(priv, priv->ptpaddr, addend); 55 53 spin_unlock_irqrestore(&priv->ptp_lock, flags); 56 54 57 55 return 0; ··· 82 84 nsec = reminder; 83 85 84 86 spin_lock_irqsave(&priv->ptp_lock, flags); 85 - 86 - priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj, 87 - priv->plat->has_gmac4); 88 - 87 + stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, 88 + priv->plat->has_gmac4); 89 89 spin_unlock_irqrestore(&priv->ptp_lock, flags); 90 90 91 91 return 0; ··· 106 110 u64 ns; 107 111 108 112 spin_lock_irqsave(&priv->ptp_lock, flags); 109 - 110 - ns = priv->hw->ptp->get_systime(priv->ptpaddr); 111 - 113 + stmmac_get_systime(priv, priv->ptpaddr, &ns); 112 114 spin_unlock_irqrestore(&priv->ptp_lock, flags); 113 115 114 116 *ts = ns_to_timespec64(ns); ··· 131 137 unsigned long flags; 132 138 133 139 spin_lock_irqsave(&priv->ptp_lock, flags); 134 - 135 - priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec); 136 - 140 + stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec); 137 141 spin_unlock_irqrestore(&priv->ptp_lock, flags); 138 142 139 143 return 0;