Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'linux-can-next-for-5.5-20191111' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next

Marc Kleine-Budde says:

====================
pull-request: can-next 2019-10-07

this is a pull request for net-next/master consisting of 32 patches.

The first patch is by Gustavo A. R. Silva and removes unused code in the
generic CAN infrastructure.

The next three patches target the mcp251x driver. The one by Andy
Shevchenko removes the legacy platform data support from the driver. The
other two are by Timo Schlüßler and reset the device only when needed,
to prevent glitches on the output when GPIO support is added.

I'm contributing two patches fixing checkpatch warnings in the
c_can_platform and peak_canfd driver.

Stephane Grosjean's patch for the peak_canfd driver adds hw timestamps
support in rx skbs.

The next three patches target the xilinx_can driver. One patch by me to
fix checkpatch warnings, one patch by Anssi Hannula to avoid non
requested bus error frames, and a patch by YueHaibing that switches the
driver to devm_platform_ioremap_resource().

Pankaj Sharma contributes two patches for the m_can driver, the first
one adds support for one shot mode, the other support for handling
arbitration errors.

Followed by four patches by YueHaibing, switching the grcan, ifi, rcar,
and sun4i drivers to devm_platform_ioremap_resource()

I'm contributing cleanup patches for the rx-offload helper, while Joakim
Zhang's patch prepares the rx-offload helper for CAN-FD support. The rx
offload users flexcan and ti_hecc are converted accordingly.

The remaining twelve patches target the flexcan driver. First Joakim
Zhang switches the driver to devm_platform_ioremap_resource(). The
remaining eleven patch are by me and clean up the abstract the access of
the iflag1 and iflag2 register both for RX and TX mailboxes. This is a
preparation for the upcoming CAN-FD support.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+360 -277
+5 -4
arch/arm/mach-pxa/icontrol.c
··· 12 12 13 13 #include <linux/irq.h> 14 14 #include <linux/platform_device.h> 15 + #include <linux/property.h> 15 16 #include <linux/gpio.h> 16 17 17 18 #include <asm/mach-types.h> ··· 23 22 24 23 #include <linux/spi/spi.h> 25 24 #include <linux/spi/pxa2xx_spi.h> 26 - #include <linux/can/platform/mcp251x.h> 27 25 #include <linux/regulator/machine.h> 28 26 29 27 #include "generic.h" ··· 69 69 .gpio_cs = ICONTROL_MCP251x_nCS4 70 70 }; 71 71 72 - static struct mcp251x_platform_data mcp251x_info = { 73 - .oscillator_frequency = 16E6, 72 + static const struct property_entry mcp251x_properties[] = { 73 + PROPERTY_ENTRY_U32("clock-frequency", 16000000), 74 + {} 74 75 }; 75 76 76 77 static struct spi_board_info mcp251x_board_info[] = { ··· 80 79 .max_speed_hz = 6500000, 81 80 .bus_num = 3, 82 81 .chip_select = 0, 83 - .platform_data = &mcp251x_info, 82 + .properties = mcp251x_properties, 84 83 .controller_data = &mcp251x_chip_info1, 85 84 .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ1) 86 85 },
+5 -4
arch/arm/mach-pxa/zeus.c
··· 13 13 #include <linux/leds.h> 14 14 #include <linux/irq.h> 15 15 #include <linux/pm.h> 16 + #include <linux/property.h> 16 17 #include <linux/gpio.h> 17 18 #include <linux/gpio/machine.h> 18 19 #include <linux/serial_8250.h> ··· 28 27 #include <linux/platform_data/i2c-pxa.h> 29 28 #include <linux/platform_data/pca953x.h> 30 29 #include <linux/apm-emulation.h> 31 - #include <linux/can/platform/mcp251x.h> 32 30 #include <linux/regulator/fixed.h> 33 31 #include <linux/regulator/machine.h> 34 32 ··· 428 428 }, 429 429 }; 430 430 431 - static struct mcp251x_platform_data zeus_mcp2515_pdata = { 432 - .oscillator_frequency = 16*1000*1000, 431 + static const struct property_entry mcp251x_properties[] = { 432 + PROPERTY_ENTRY_U32("clock-frequency", 16000000), 433 + {} 433 434 }; 434 435 435 436 static struct spi_board_info zeus_spi_board_info[] = { 436 437 [0] = { 437 438 .modalias = "mcp2515", 438 - .platform_data = &zeus_mcp2515_pdata, 439 + .properties = mcp251x_properties, 439 440 .irq = PXA_GPIO_TO_IRQ(ZEUS_CAN_GPIO), 440 441 .max_speed_hz = 1*1000*1000, 441 442 .bus_num = 3,
+11 -10
drivers/net/can/c_can/c_can_platform.c
··· 39 39 40 40 #include "c_can.h" 41 41 42 - #define DCAN_RAM_INIT_BIT (1 << 3) 42 + #define DCAN_RAM_INIT_BIT BIT(3) 43 + 43 44 static DEFINE_SPINLOCK(raminit_lock); 44 - /* 45 - * 16-bit c_can registers can be arranged differently in the memory 45 + 46 + /* 16-bit c_can registers can be arranged differently in the memory 46 47 * architecture of different implementations. For example: 16-bit 47 48 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. 48 49 * Handle the same by providing a common read/write interface. ··· 55 54 } 56 55 57 56 static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv, 58 - enum reg index, u16 val) 57 + enum reg index, u16 val) 59 58 { 60 59 writew(val, priv->base + priv->regs[index]); 61 60 } ··· 67 66 } 68 67 69 68 static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv, 70 - enum reg index, u16 val) 69 + enum reg index, u16 val) 71 70 { 72 71 writew(val, priv->base + 2 * priv->regs[index]); 73 72 } ··· 145 144 u32 val; 146 145 147 146 val = priv->read_reg(priv, index); 148 - val |= ((u32) priv->read_reg(priv, index + 1)) << 16; 147 + val |= ((u32)priv->read_reg(priv, index + 1)) << 16; 149 148 150 149 return val; 151 150 } 152 151 153 - static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index, 154 - u32 val) 152 + static void c_can_plat_write_reg32(const struct c_can_priv *priv, 153 + enum reg index, u32 val) 155 154 { 156 155 priv->write_reg(priv, index + 1, val >> 16); 157 156 priv->write_reg(priv, index, val); ··· 162 161 return readl(priv->base + priv->regs[index]); 163 162 } 164 163 165 - static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index, 166 - u32 val) 164 + static void d_can_plat_write_reg32(const struct c_can_priv *priv, 165 + enum reg index, u32 val) 167 166 { 168 167 writel(val, priv->base + priv->regs[index]); 169 168 }
+2 -3
drivers/net/can/dev.c
··· 553 553 554 554 /* send restart message upstream */ 555 555 skb = alloc_can_err_skb(dev, &cf); 556 - if (!skb) { 557 - err = -ENOMEM; 556 + if (!skb) 558 557 goto restart; 559 - } 558 + 560 559 cf->can_id |= CAN_ERR_RESTARTED; 561 560 562 561 netif_rx(skb);
+75 -56
drivers/net/can/flexcan.c
··· 142 142 #define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 143 143 #define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 144 144 #define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1) 145 - #define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f) 145 + #define FLEXCAN_IFLAG_MB(x) BIT_ULL(x) 146 146 #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) 147 147 #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) 148 148 #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) ··· 277 277 u8 mb_size; 278 278 u8 clk_src; /* clock source of CAN Protocol Engine */ 279 279 280 + u64 rx_mask; 281 + u64 tx_mask; 280 282 u32 reg_ctrl_default; 281 - u32 reg_imask1_default; 282 - u32 reg_imask2_default; 283 283 284 284 struct clk *clk_ipg; 285 285 struct clk *clk_per; ··· 743 743 u32 timestamp; 744 744 int err; 745 745 746 - timestamp = priv->read(&regs->timer) << 16; 747 - 748 746 flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; 749 747 if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { 750 748 tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ? ··· 762 764 if (likely(new_state == priv->can.state)) 763 765 return; 764 766 767 + timestamp = priv->read(&regs->timer) << 16; 768 + 765 769 skb = alloc_can_err_skb(dev, &cf); 766 770 if (unlikely(!skb)) 767 771 return; ··· 778 778 dev->stats.rx_fifo_errors++; 779 779 } 780 780 781 + static inline u64 flexcan_read64_mask(struct flexcan_priv *priv, void __iomem *addr, u64 mask) 782 + { 783 + u64 reg = 0; 784 + 785 + if (upper_32_bits(mask)) 786 + reg = (u64)priv->read(addr - 4) << 32; 787 + if (lower_32_bits(mask)) 788 + reg |= priv->read(addr); 789 + 790 + return reg & mask; 791 + } 792 + 793 + static inline void flexcan_write64(struct flexcan_priv *priv, u64 val, void __iomem *addr) 794 + { 795 + if (upper_32_bits(val)) 796 + priv->write(upper_32_bits(val), addr - 4); 797 + if (lower_32_bits(val)) 798 + priv->write(lower_32_bits(val), addr); 799 + } 800 + 801 + static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) 802 + { 803 + return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->rx_mask); 804 + } 805 + 806 + static inline u64 flexcan_read_reg_iflag_tx(struct flexcan_priv *priv) 807 + { 808 + return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->tx_mask); 809 + } 810 + 781 811 static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) 782 812 { 783 813 return container_of(offload, struct flexcan_priv, offload); 784 814 } 785 815 786 - static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, 787 - struct can_frame *cf, 788 - u32 *timestamp, unsigned int n) 816 + static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, 817 + unsigned int n, u32 *timestamp, 818 + bool drop) 789 819 { 790 820 struct flexcan_priv *priv = rx_offload_to_priv(offload); 791 821 struct flexcan_regs __iomem *regs = priv->regs; 792 822 struct flexcan_mb __iomem *mb; 823 + struct sk_buff *skb; 824 + struct can_frame *cf; 793 825 u32 reg_ctrl, reg_id, reg_iflag1; 794 826 int i; 827 + 828 + if (unlikely(drop)) { 829 + skb = ERR_PTR(-ENOBUFS); 830 + goto mark_as_read; 831 + } 795 832 796 833 mb = flexcan_get_mb(priv, n); 797 834 ··· 843 806 code = reg_ctrl & FLEXCAN_MB_CODE_MASK; 844 807 if ((code != FLEXCAN_MB_CODE_RX_FULL) && 845 808 (code != FLEXCAN_MB_CODE_RX_OVERRUN)) 846 - return 0; 809 + return NULL; 847 810 848 811 if (code == FLEXCAN_MB_CODE_RX_OVERRUN) { 849 812 /* This MB was overrun, we lost data */ ··· 853 816 } else { 854 817 reg_iflag1 = priv->read(&regs->iflag1); 855 818 if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE)) 856 - return 0; 819 + return NULL; 857 820 858 821 reg_ctrl = priv->read(&mb->can_ctrl); 822 + } 823 + 824 + skb = alloc_can_skb(offload->dev, &cf); 825 + if (!skb) { 826 + skb = ERR_PTR(-ENOMEM); 827 + goto mark_as_read; 859 828 } 860 829 861 830 /* increase timstamp to full 32 bit */ ··· 882 839 *(__be32 *)(cf->data + i) = data; 883 840 } 884 841 885 - /* mark as read */ 886 - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 887 - /* Clear IRQ */ 888 - if (n < 32) 889 - priv->write(BIT(n), &regs->iflag1); 890 - else 891 - priv->write(BIT(n - 32), &regs->iflag2); 892 - } else { 842 + mark_as_read: 843 + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) 844 + flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), &regs->iflag1); 845 + else 893 846 priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1); 894 - } 895 847 896 848 /* Read the Free Running Timer. It is optional but recommended 897 849 * to unlock Mailbox as soon as possible and make it available ··· 894 856 */ 895 857 priv->read(&regs->timer); 896 858 897 - return 1; 898 - } 899 - 900 - 901 - static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) 902 - { 903 - struct flexcan_regs __iomem *regs = priv->regs; 904 - u32 iflag1, iflag2; 905 - 906 - iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default & 907 - ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); 908 - iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default; 909 - 910 - return (u64)iflag2 << 32 | iflag1; 859 + return skb; 911 860 } 912 861 913 862 static irqreturn_t flexcan_irq(int irq, void *dev_id) ··· 904 879 struct flexcan_priv *priv = netdev_priv(dev); 905 880 struct flexcan_regs __iomem *regs = priv->regs; 906 881 irqreturn_t handled = IRQ_NONE; 907 - u32 reg_iflag2, reg_esr; 882 + u64 reg_iflag_tx; 883 + u32 reg_esr; 908 884 enum can_state last_state = priv->can.state; 909 885 910 886 /* reception interrupt */ 911 887 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 912 - u64 reg_iflag; 888 + u64 reg_iflag_rx; 913 889 int ret; 914 890 915 - while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) { 891 + while ((reg_iflag_rx = flexcan_read_reg_iflag_rx(priv))) { 916 892 handled = IRQ_HANDLED; 917 893 ret = can_rx_offload_irq_offload_timestamp(&priv->offload, 918 - reg_iflag); 894 + reg_iflag_rx); 919 895 if (!ret) 920 896 break; 921 897 } ··· 939 913 } 940 914 } 941 915 942 - reg_iflag2 = priv->read(&regs->iflag2); 916 + reg_iflag_tx = flexcan_read_reg_iflag_tx(priv); 943 917 944 918 /* transmission complete interrupt */ 945 - if (reg_iflag2 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { 919 + if (reg_iflag_tx & priv->tx_mask) { 946 920 u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl); 947 921 948 922 handled = IRQ_HANDLED; ··· 954 928 /* after sending a RTR frame MB is in RX mode */ 955 929 priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, 956 930 &priv->tx_mb->can_ctrl); 957 - priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag2); 931 + flexcan_write64(priv, priv->tx_mask, &regs->iflag1); 958 932 netif_wake_queue(dev); 959 933 } 960 934 ··· 1066 1040 struct flexcan_priv *priv = netdev_priv(dev); 1067 1041 struct flexcan_regs __iomem *regs = priv->regs; 1068 1042 u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr; 1043 + u64 reg_imask; 1069 1044 int err, i; 1070 1045 struct flexcan_mb __iomem *mb; 1071 1046 ··· 1241 1214 /* enable interrupts atomically */ 1242 1215 disable_irq(dev->irq); 1243 1216 priv->write(priv->reg_ctrl_default, &regs->ctrl); 1244 - priv->write(priv->reg_imask1_default, &regs->imask1); 1245 - priv->write(priv->reg_imask2_default, &regs->imask2); 1217 + reg_imask = priv->rx_mask | priv->tx_mask; 1218 + priv->write(upper_32_bits(reg_imask), &regs->imask2); 1219 + priv->write(lower_32_bits(reg_imask), &regs->imask1); 1246 1220 enable_irq(dev->irq); 1247 1221 1248 1222 /* print chip status */ ··· 1311 1283 flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO); 1312 1284 priv->tx_mb_idx = priv->mb_count - 1; 1313 1285 priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx); 1314 - 1315 - priv->reg_imask1_default = 0; 1316 - priv->reg_imask2_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); 1286 + priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); 1317 1287 1318 1288 priv->offload.mailbox_read = flexcan_mailbox_read; 1319 1289 1320 1290 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { 1321 - u64 imask; 1322 - 1323 1291 priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; 1324 1292 priv->offload.mb_last = priv->mb_count - 2; 1325 1293 1326 - imask = GENMASK_ULL(priv->offload.mb_last, 1327 - priv->offload.mb_first); 1328 - priv->reg_imask1_default |= imask; 1329 - priv->reg_imask2_default |= imask >> 32; 1330 - 1294 + priv->rx_mask = GENMASK_ULL(priv->offload.mb_last, 1295 + priv->offload.mb_first); 1331 1296 err = can_rx_offload_add_timestamp(dev, &priv->offload); 1332 1297 } else { 1333 - priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | 1298 + priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | 1334 1299 FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; 1335 1300 err = can_rx_offload_add_fifo(dev, &priv->offload, 1336 1301 FLEXCAN_NAPI_WEIGHT); ··· 1555 1534 struct net_device *dev; 1556 1535 struct flexcan_priv *priv; 1557 1536 struct regulator *reg_xceiver; 1558 - struct resource *mem; 1559 1537 struct clk *clk_ipg = NULL, *clk_per = NULL; 1560 1538 struct flexcan_regs __iomem *regs; 1561 1539 int err, irq; ··· 1589 1569 clock_freq = clk_get_rate(clk_per); 1590 1570 } 1591 1571 1592 - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1593 1572 irq = platform_get_irq(pdev, 0); 1594 1573 if (irq <= 0) 1595 1574 return -ENODEV; 1596 1575 1597 - regs = devm_ioremap_resource(&pdev->dev, mem); 1576 + regs = devm_platform_ioremap_resource(pdev, 0); 1598 1577 if (IS_ERR(regs)) 1599 1578 return PTR_ERR(regs); 1600 1579
+1 -3
drivers/net/can/grcan.c
··· 1652 1652 static int grcan_probe(struct platform_device *ofdev) 1653 1653 { 1654 1654 struct device_node *np = ofdev->dev.of_node; 1655 - struct resource *res; 1656 1655 u32 sysid, ambafreq; 1657 1656 int irq, err; 1658 1657 void __iomem *base; ··· 1671 1672 goto exit_error; 1672 1673 } 1673 1674 1674 - res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 1675 - base = devm_ioremap_resource(&ofdev->dev, res); 1675 + base = devm_platform_ioremap_resource(ofdev, 0); 1676 1676 if (IS_ERR(base)) { 1677 1677 err = PTR_ERR(base); 1678 1678 goto exit_error;
+1 -3
drivers/net/can/ifi_canfd/ifi_canfd.c
··· 942 942 struct device *dev = &pdev->dev; 943 943 struct net_device *ndev; 944 944 struct ifi_canfd_priv *priv; 945 - struct resource *res; 946 945 void __iomem *addr; 947 946 int irq, ret; 948 947 u32 id, rev; 949 948 950 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 951 - addr = devm_ioremap_resource(dev, res); 949 + addr = devm_platform_ioremap_resource(pdev, 0); 952 950 irq = platform_get_irq(pdev, 0); 953 951 if (IS_ERR(addr) || irq < 0) 954 952 return -EINVAL;
+51 -3
drivers/net/can/m_can/m_can.c
··· 123 123 #define CCCR_CME_CANFD_BRS 0x2 124 124 #define CCCR_TXP BIT(14) 125 125 #define CCCR_TEST BIT(7) 126 + #define CCCR_DAR BIT(6) 126 127 #define CCCR_MON BIT(5) 127 128 #define CCCR_CSR BIT(4) 128 129 #define CCCR_CSA BIT(3) ··· 778 777 return psr && (psr != LEC_UNUSED); 779 778 } 780 779 780 + static inline bool m_can_is_protocol_err(u32 irqstatus) 781 + { 782 + return irqstatus & IR_ERR_LEC_31X; 783 + } 784 + 785 + static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus) 786 + { 787 + struct net_device_stats *stats = &dev->stats; 788 + struct m_can_classdev *cdev = netdev_priv(dev); 789 + struct can_frame *cf; 790 + struct sk_buff *skb; 791 + 792 + /* propagate the error condition to the CAN stack */ 793 + skb = alloc_can_err_skb(dev, &cf); 794 + 795 + /* update tx error stats since there is protocol error */ 796 + stats->tx_errors++; 797 + 798 + /* update arbitration lost status */ 799 + if (cdev->version >= 31 && (irqstatus & IR_PEA)) { 800 + netdev_dbg(dev, "Protocol error in Arbitration fail\n"); 801 + cdev->can.can_stats.arbitration_lost++; 802 + if (skb) { 803 + cf->can_id |= CAN_ERR_LOSTARB; 804 + cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC; 805 + } 806 + } 807 + 808 + if (unlikely(!skb)) { 809 + netdev_dbg(dev, "allocation of skb failed\n"); 810 + return 0; 811 + } 812 + netif_receive_skb(skb); 813 + 814 + return 1; 815 + } 816 + 781 817 static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, 782 818 u32 psr) 783 819 { ··· 828 790 if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && 829 791 is_lec_err(psr)) 830 792 work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); 793 + 794 + /* handle protocol errors in arbitration phase */ 795 + if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && 796 + m_can_is_protocol_err(irqstatus)) 797 + work_done += m_can_handle_protocol_error(dev, irqstatus); 831 798 832 799 /* other unproccessed error interrupts */ 833 800 m_can_handle_other_err(dev, irqstatus); ··· 1178 1135 if (cdev->version == 30) { 1179 1136 /* Version 3.0.x */ 1180 1137 1181 - cccr &= ~(CCCR_TEST | CCCR_MON | 1138 + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR | 1182 1139 (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | 1183 1140 (CCCR_CME_MASK << CCCR_CME_SHIFT)); 1184 1141 ··· 1188 1145 } else { 1189 1146 /* Version 3.1.x or 3.2.x */ 1190 1147 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | 1191 - CCCR_NISO); 1148 + CCCR_NISO | CCCR_DAR); 1192 1149 1193 1150 /* Only 3.2.x has NISO Bit implemented */ 1194 1151 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ··· 1207 1164 /* Enable Monitoring (all versions) */ 1208 1165 if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 1209 1166 cccr |= CCCR_MON; 1167 + 1168 + /* Disable Auto Retransmission (all versions) */ 1169 + if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 1170 + cccr |= CCCR_DAR; 1210 1171 1211 1172 /* Write config */ 1212 1173 m_can_write(cdev, M_CAN_CCCR, cccr); ··· 1357 1310 m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1358 1311 CAN_CTRLMODE_LISTENONLY | 1359 1312 CAN_CTRLMODE_BERR_REPORTING | 1360 - CAN_CTRLMODE_FD; 1313 + CAN_CTRLMODE_FD | 1314 + CAN_CTRLMODE_ONE_SHOT; 1361 1315 1362 1316 /* Set properties depending on M_CAN version */ 1363 1317 switch (m_can_dev->version) {
+19 -6
drivers/net/can/peak_canfd/peak_canfd.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> 2 + /* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> 4 3 * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com> 5 4 * 6 5 * Copyright (C) 2016 PEAK System-Technik GmbH ··· 121 122 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW); 122 123 123 124 cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1, 124 - priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES); 125 + priv->can.ctrlmode & 126 + CAN_CTRLMODE_3_SAMPLES); 125 127 cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1); 126 128 cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1); 127 129 cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1)); ··· 232 232 return pucan_write_cmd(priv); 233 233 } 234 234 235 + static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high) 236 + { 237 + struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); 238 + u64 ts_us; 239 + 240 + ts_us = (u64)le32_to_cpu(ts_high) << 32; 241 + ts_us |= le32_to_cpu(ts_low); 242 + 243 + /* IP core timestamps are µs. */ 244 + hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC); 245 + 246 + return netif_rx(skb); 247 + } 248 + 235 249 /* handle the reception of one CAN frame */ 236 250 static int pucan_handle_can_rx(struct peak_canfd_priv *priv, 237 251 struct pucan_rx_msg *msg) ··· 313 299 stats->rx_bytes += cf->len; 314 300 stats->rx_packets++; 315 301 316 - netif_rx(skb); 302 + pucan_netif_rx(skb, msg->ts_low, msg->ts_high); 317 303 318 304 return 0; 319 305 } ··· 339 325 340 326 /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ 341 327 if (pucan_status_is_rx_barrier(msg)) { 342 - 343 328 if (priv->enable_tx_path) { 344 329 int err = priv->enable_tx_path(priv); 345 330 ··· 406 393 407 394 stats->rx_packets++; 408 395 stats->rx_bytes += cf->can_dlc; 409 - netif_rx(skb); 396 + pucan_netif_rx(skb, msg->ts_low, msg->ts_high); 410 397 411 398 return 0; 412 399 }
+1 -2
drivers/net/can/peak_canfd/peak_canfd_user.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * CAN driver for PEAK System micro-CAN based adapters 2 + /* CAN driver for PEAK System micro-CAN based adapters 4 3 * 5 4 * Copyright (C) 2003-2011 PEAK System-Technik GmbH 6 5 * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+3 -3
drivers/net/can/peak_canfd/peak_pciefd_main.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> 2 + /* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> 4 3 * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com> 5 4 * 6 5 * Derived from the PCAN project file driver/src/pcan_pci.c: ··· 840 841 841 842 /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while 842 843 * the probe() function must return a negative errno in case of failure 843 - * (err is unchanged if negative) */ 844 + * (err is unchanged if negative) 845 + */ 844 846 return pcibios_err_to_errno(err); 845 847 } 846 848
+1 -3
drivers/net/can/rcar/rcar_can.c
··· 744 744 { 745 745 struct rcar_can_priv *priv; 746 746 struct net_device *ndev; 747 - struct resource *mem; 748 747 void __iomem *addr; 749 748 u32 clock_select = CLKR_CLKP1; 750 749 int err = -ENODEV; ··· 758 759 goto fail; 759 760 } 760 761 761 - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 762 - addr = devm_ioremap_resource(&pdev->dev, mem); 762 + addr = devm_platform_ioremap_resource(pdev, 0); 763 763 if (IS_ERR(addr)) { 764 764 err = PTR_ERR(addr); 765 765 goto fail;
+1 -3
drivers/net/can/rcar/rcar_canfd.c
··· 1630 1630 1631 1631 static int rcar_canfd_probe(struct platform_device *pdev) 1632 1632 { 1633 - struct resource *mem; 1634 1633 void __iomem *addr; 1635 1634 u32 sts, ch, fcan_freq; 1636 1635 struct rcar_canfd_global *gpriv; ··· 1703 1704 /* CANFD clock is further divided by (1/2) within the IP */ 1704 1705 fcan_freq /= 2; 1705 1706 1706 - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1707 - addr = devm_ioremap_resource(&pdev->dev, mem); 1707 + addr = devm_platform_ioremap_resource(pdev, 0); 1708 1708 if (IS_ERR(addr)) { 1709 1709 err = PTR_ERR(addr); 1710 1710 goto fail_dev;
+46 -76
drivers/net/can/rx-offload.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Copyright (c) 2014 David Jander, Protonic Holland 4 - * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> 2 + /* Copyright (c) 2014 Protonic Holland, 3 + * David Jander 4 + * Copyright (C) 2014-2017 Pengutronix, 5 + * Marc Kleine-Budde <kernel@pengutronix.de> 5 6 */ 6 7 7 8 #include <linux/can/dev.h> ··· 12 11 u32 timestamp; 13 12 }; 14 13 15 - static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb) 14 + static inline struct can_rx_offload_cb * 15 + can_rx_offload_get_cb(struct sk_buff *skb) 16 16 { 17 17 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); 18 18 19 19 return (struct can_rx_offload_cb *)skb->cb; 20 20 } 21 21 22 - static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) 22 + static inline bool 23 + can_rx_offload_le(struct can_rx_offload *offload, 24 + unsigned int a, unsigned int b) 23 25 { 24 26 if (offload->inc) 25 27 return a <= b; ··· 30 26 return a >= b; 31 27 } 32 28 33 - static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) 29 + static inline unsigned int 30 + can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) 34 31 { 35 32 if (offload->inc) 36 33 return (*val)++; ··· 41 36 42 37 static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) 43 38 { 44 - struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); 39 + struct can_rx_offload *offload = container_of(napi, 40 + struct can_rx_offload, 41 + napi); 45 42 struct net_device *dev = offload->dev; 46 43 struct net_device_stats *stats = &dev->stats; 47 44 struct sk_buff *skb; ··· 72 65 return work_done; 73 66 } 74 67 75 - static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, 76 - int (*compare)(struct sk_buff *a, struct sk_buff *b)) 68 + static inline void 69 + __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, 70 + int (*compare)(struct sk_buff *a, struct sk_buff *b)) 77 71 { 78 72 struct sk_buff *pos, *insert = NULL; 79 73 ··· 109 101 cb_a = can_rx_offload_get_cb(a); 110 102 cb_b = can_rx_offload_get_cb(b); 111 103 112 - /* Substract two u32 and return result as int, to keep 104 + /* Subtract two u32 and return result as int, to keep 113 105 * difference steady around the u32 overflow. 114 106 */ 115 107 return cb_b->timestamp - cb_a->timestamp; ··· 139 131 static struct sk_buff * 140 132 can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) 141 133 { 142 - struct sk_buff *skb = NULL, *skb_error = NULL; 134 + struct sk_buff *skb; 143 135 struct can_rx_offload_cb *cb; 144 - struct can_frame *cf; 145 - int ret; 136 + bool drop = false; 137 + u32 timestamp; 146 138 147 - if (likely(skb_queue_len(&offload->skb_queue) < 148 - offload->skb_queue_len_max)) { 149 - skb = alloc_can_skb(offload->dev, &cf); 150 - if (unlikely(!skb)) 151 - skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */ 152 - } else { 153 - skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */ 154 - } 139 + /* If queue is full drop frame */ 140 + if (unlikely(skb_queue_len(&offload->skb_queue) > 141 + offload->skb_queue_len_max)) 142 + drop = true; 155 143 156 - /* If queue is full or skb not available, drop by reading into 157 - * overflow buffer. 158 - */ 159 - if (unlikely(skb_error)) { 160 - struct can_frame cf_overflow; 161 - u32 timestamp; 162 - 163 - ret = offload->mailbox_read(offload, &cf_overflow, 164 - &timestamp, n); 165 - 166 - /* Mailbox was empty. */ 167 - if (unlikely(!ret)) 168 - return NULL; 169 - 170 - /* Mailbox has been read and we're dropping it or 171 - * there was a problem reading the mailbox. 172 - * 173 - * Increment error counters in any case. 174 - */ 175 - offload->dev->stats.rx_dropped++; 176 - offload->dev->stats.rx_fifo_errors++; 177 - 178 - /* There was a problem reading the mailbox, propagate 179 - * error value. 180 - */ 181 - if (unlikely(ret < 0)) 182 - return ERR_PTR(ret); 183 - 184 - return skb_error; 185 - } 186 - 187 - cb = can_rx_offload_get_cb(skb); 188 - ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); 189 - 144 + skb = offload->mailbox_read(offload, n, &timestamp, drop); 190 145 /* Mailbox was empty. */ 191 - if (unlikely(!ret)) { 192 - kfree_skb(skb); 146 + if (unlikely(!skb)) 193 147 return NULL; 194 - } 195 148 196 - /* There was a problem reading the mailbox, propagate error value. */ 197 - if (unlikely(ret < 0)) { 198 - kfree_skb(skb); 199 - 149 + /* There was a problem reading the mailbox, propagate 150 + * error value. 151 + */ 152 + if (unlikely(IS_ERR(skb))) { 200 153 offload->dev->stats.rx_dropped++; 201 154 offload->dev->stats.rx_fifo_errors++; 202 155 203 - return ERR_PTR(ret); 156 + return skb; 204 157 } 205 158 206 159 /* Mailbox was read. */ 160 + cb = can_rx_offload_get_cb(skb); 161 + cb->timestamp = timestamp; 162 + 207 163 return skb; 208 164 } 209 165 210 - int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) 166 + int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, 167 + u64 pending) 211 168 { 212 169 struct sk_buff_head skb_queue; 213 170 unsigned int i; ··· 202 229 skb_queue_splice_tail(&skb_queue, &offload->skb_queue); 203 230 spin_unlock_irqrestore(&offload->skb_queue.lock, flags); 204 231 205 - if ((queue_len = skb_queue_len(&offload->skb_queue)) > 206 - (offload->skb_queue_len_max / 8)) 232 + queue_len = skb_queue_len(&offload->skb_queue); 233 + if (queue_len > offload->skb_queue_len_max / 8) 207 234 netdev_dbg(offload->dev, "%s: queue_len=%d\n", 208 235 __func__, queue_len); 209 236 ··· 301 328 } 302 329 EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); 303 330 304 - static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) 331 + static int can_rx_offload_init_queue(struct net_device *dev, 332 + struct can_rx_offload *offload, 333 + unsigned int weight) 305 334 { 306 335 offload->dev = dev; 307 336 ··· 312 337 offload->skb_queue_len_max *= 4; 313 338 skb_queue_head_init(&offload->skb_queue); 314 339 315 - can_rx_offload_reset(offload); 316 340 netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); 317 341 318 342 dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", ··· 320 346 return 0; 321 347 } 322 348 323 - int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) 349 + int can_rx_offload_add_timestamp(struct net_device *dev, 350 + struct can_rx_offload *offload) 324 351 { 325 352 unsigned int weight; 326 353 ··· 341 366 } 342 367 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); 343 368 344 - int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) 369 + int can_rx_offload_add_fifo(struct net_device *dev, 370 + struct can_rx_offload *offload, unsigned int weight) 345 371 { 346 372 if (!offload->mailbox_read) 347 373 return -EINVAL; ··· 353 377 354 378 void can_rx_offload_enable(struct can_rx_offload *offload) 355 379 { 356 - can_rx_offload_reset(offload); 357 380 napi_enable(&offload->napi); 358 381 } 359 382 EXPORT_SYMBOL_GPL(can_rx_offload_enable); ··· 363 388 skb_queue_purge(&offload->skb_queue); 364 389 } 365 390 EXPORT_SYMBOL_GPL(can_rx_offload_del); 366 - 367 - void can_rx_offload_reset(struct can_rx_offload *offload) 368 - { 369 - } 370 - EXPORT_SYMBOL_GPL(can_rx_offload_reset);
+61 -14
drivers/net/can/spi/mcp251x.c
··· 22 22 #include <linux/can/core.h> 23 23 #include <linux/can/dev.h> 24 24 #include <linux/can/led.h> 25 - #include <linux/can/platform/mcp251x.h> 26 25 #include <linux/clk.h> 27 26 #include <linux/completion.h> 28 27 #include <linux/delay.h> ··· 320 321 mcp251x_spi_trans(spi, 3); 321 322 } 322 323 324 + static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2) 325 + { 326 + struct mcp251x_priv *priv = spi_get_drvdata(spi); 327 + 328 + priv->spi_tx_buf[0] = INSTRUCTION_WRITE; 329 + priv->spi_tx_buf[1] = reg; 330 + priv->spi_tx_buf[2] = v1; 331 + priv->spi_tx_buf[3] = v2; 332 + 333 + mcp251x_spi_trans(spi, 4); 334 + } 335 + 323 336 static void mcp251x_write_bits(struct spi_device *spi, u8 reg, 324 337 u8 mask, u8 val) 325 338 { ··· 466 455 static void mcp251x_hw_sleep(struct spi_device *spi) 467 456 { 468 457 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP); 458 + } 459 + 460 + /* May only be called when device is sleeping! */ 461 + static int mcp251x_hw_wake(struct spi_device *spi) 462 + { 463 + unsigned long timeout; 464 + 465 + /* Force wakeup interrupt to wake device, but don't execute IST */ 466 + disable_irq(spi->irq); 467 + mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF); 468 + 469 + /* Wait for oscillator startup timer after wake up */ 470 + mdelay(MCP251X_OST_DELAY_MS); 471 + 472 + /* Put device into config mode */ 473 + mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_CONF); 474 + 475 + /* Wait for the device to enter config mode */ 476 + timeout = jiffies + HZ; 477 + while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) != 478 + CANCTRL_REQOP_CONF) { 479 + schedule(); 480 + if (time_after(jiffies, timeout)) { 481 + dev_err(&spi->dev, "MCP251x didn't enter in config mode\n"); 482 + return -EBUSY; 483 + } 484 + } 485 + 486 + /* Disable and clear pending interrupts */ 487 + mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00); 488 + enable_irq(spi->irq); 489 + 490 + return 0; 469 491 } 470 492 471 493 static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb, ··· 690 646 mutex_lock(&priv->mcp_lock); 691 647 692 648 /* Disable and clear pending interrupts */ 693 - mcp251x_write_reg(spi, CANINTE, 0x00); 694 - mcp251x_write_reg(spi, CANINTF, 0x00); 649 + mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00); 695 650 696 651 mcp251x_write_reg(spi, TXBCTRL(0), 0); 697 652 mcp251x_clean(net); ··· 758 715 759 716 mutex_lock(&priv->mcp_lock); 760 717 if (priv->after_suspend) { 761 - mcp251x_hw_reset(spi); 762 - mcp251x_setup(net, spi); 718 + if (priv->after_suspend & AFTER_SUSPEND_POWER) { 719 + mcp251x_hw_reset(spi); 720 + mcp251x_setup(net, spi); 721 + } else { 722 + mcp251x_hw_wake(spi); 723 + } 763 724 priv->force_quit = 0; 764 725 if (priv->after_suspend & AFTER_SUSPEND_RESTART) { 765 726 mcp251x_set_normal_mode(spi); ··· 960 913 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 961 914 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 962 915 963 - ret = mcp251x_hw_reset(spi); 916 + ret = mcp251x_hw_wake(spi); 964 917 if (ret) 965 918 goto out_free_wq; 966 919 ret = mcp251x_setup(net, spi); ··· 1033 986 static int mcp251x_can_probe(struct spi_device *spi) 1034 987 { 1035 988 const void *match = device_get_match_data(&spi->dev); 1036 - struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev); 1037 989 struct net_device *net; 1038 990 struct mcp251x_priv *priv; 1039 991 struct clk *clk; 1040 - int freq, ret; 992 + u32 freq; 993 + int ret; 1041 994 1042 995 clk = devm_clk_get_optional(&spi->dev, NULL); 1043 996 if (IS_ERR(clk)) 1044 997 return PTR_ERR(clk); 1045 998 1046 999 freq = clk_get_rate(clk); 1047 - if (freq == 0 && pdata) 1048 - freq = pdata->oscillator_frequency; 1000 + if (freq == 0) 1001 + device_property_read_u32(&spi->dev, "clock-frequency", &freq); 1049 1002 1050 1003 /* Sanity check */ 1051 1004 if (freq < 1000000 || freq > 25000000) ··· 1202 1155 1203 1156 if (priv->after_suspend & AFTER_SUSPEND_POWER) 1204 1157 mcp251x_power_enable(priv->power, 1); 1205 - 1206 - if (priv->after_suspend & AFTER_SUSPEND_UP) { 1158 + if (priv->after_suspend & AFTER_SUSPEND_UP) 1207 1159 mcp251x_power_enable(priv->transceiver, 1); 1160 + 1161 + if (priv->after_suspend & (AFTER_SUSPEND_POWER | AFTER_SUSPEND_UP)) 1208 1162 queue_work(priv->wq, &priv->restart_work); 1209 - } else { 1163 + else 1210 1164 priv->after_suspend = 0; 1211 - } 1212 1165 1213 1166 priv->force_quit = 0; 1214 1167 enable_irq(spi->irq);
+1 -3
drivers/net/can/sun4i_can.c
··· 771 771 static int sun4ican_probe(struct platform_device *pdev) 772 772 { 773 773 struct device_node *np = pdev->dev.of_node; 774 - struct resource *mem; 775 774 struct clk *clk; 776 775 void __iomem *addr; 777 776 int err, irq; ··· 790 791 goto exit; 791 792 } 792 793 793 - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 794 - addr = devm_ioremap_resource(&pdev->dev, mem); 794 + addr = devm_platform_ioremap_resource(pdev, 0); 795 795 if (IS_ERR(addr)) { 796 796 err = -EBUSY; 797 797 goto exit;
+20 -6
drivers/net/can/ti_hecc.c
··· 535 535 return container_of(offload, struct ti_hecc_priv, offload); 536 536 } 537 537 538 - static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload, 539 - struct can_frame *cf, 540 - u32 *timestamp, unsigned int mbxno) 538 + static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload, 539 + unsigned int mbxno, u32 *timestamp, 540 + bool drop) 541 541 { 542 542 struct ti_hecc_priv *priv = rx_offload_to_priv(offload); 543 + struct sk_buff *skb; 544 + struct can_frame *cf; 543 545 u32 data, mbx_mask; 544 - int ret = 1; 545 546 546 547 mbx_mask = BIT(mbxno); 548 + 549 + if (unlikely(drop)) { 550 + skb = ERR_PTR(-ENOBUFS); 551 + goto mark_as_read; 552 + } 553 + 554 + skb = alloc_can_skb(offload->dev, &cf); 555 + if (unlikely(!skb)) { 556 + skb = ERR_PTR(-ENOMEM); 557 + goto mark_as_read; 558 + } 559 + 547 560 data = hecc_read_mbx(priv, mbxno, HECC_CANMID); 548 561 if (data & HECC_CANMID_IDE) 549 562 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; ··· 591 578 */ 592 579 if (unlikely(mbxno == HECC_RX_LAST_MBOX && 593 580 hecc_read(priv, HECC_CANRML) & mbx_mask)) 594 - ret = -ENOBUFS; 581 + skb = ERR_PTR(-ENOBUFS); 595 582 583 + mark_as_read: 596 584 hecc_write(priv, HECC_CANRMP, mbx_mask); 597 585 598 - return ret; 586 + return skb; 599 587 } 600 588 601 589 static int ti_hecc_error(struct net_device *ndev, int int_status,
+53 -49
drivers/net/can/xilinx_can.c
··· 194 194 */ 195 195 struct xcan_priv { 196 196 struct can_priv can; 197 - spinlock_t tx_lock; 197 + spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */ 198 198 unsigned int tx_head; 199 199 unsigned int tx_tail; 200 200 unsigned int tx_max; ··· 400 400 XCAN_SR_CONFIG_MASK; 401 401 if (!is_config_mode) { 402 402 netdev_alert(ndev, 403 - "BUG! Cannot set bittiming - CAN is not in config mode\n"); 403 + "BUG! Cannot set bittiming - CAN is not in config mode\n"); 404 404 return -EPERM; 405 405 } 406 406 ··· 470 470 if (err < 0) 471 471 return err; 472 472 473 - /* Enable interrupts */ 473 + /* Enable interrupts 474 + * 475 + * We enable the ERROR interrupt even with 476 + * CAN_CTRLMODE_BERR_REPORTING disabled as there is no 477 + * dedicated interrupt for a state change to 478 + * ERROR_WARNING/ERROR_PASSIVE. 479 + */ 474 480 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | 475 481 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | 476 482 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | ··· 488 482 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 489 483 490 484 /* Check whether it is loopback mode or normal mode */ 491 - if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 485 + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 492 486 reg_msr = XCAN_MSR_LBACK_MASK; 493 - } else { 487 + else 494 488 reg_msr = 0x0; 495 - } 496 489 497 490 /* enable the first extended filter, if any, as cores with extended 498 491 * filtering default to non-receipt if all filters are disabled ··· 986 981 { 987 982 struct xcan_priv *priv = netdev_priv(ndev); 988 983 struct net_device_stats *stats = &ndev->stats; 989 - struct can_frame *cf; 990 - struct sk_buff *skb; 984 + struct can_frame cf = { }; 991 985 u32 err_status; 992 - 993 - skb = alloc_can_err_skb(ndev, &cf); 994 986 995 987 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 996 988 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); ··· 998 996 /* Leave device in Config Mode in bus-off state */ 999 997 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 1000 998 can_bus_off(ndev); 1001 - if (skb) 1002 - cf->can_id |= CAN_ERR_BUSOFF; 999 + cf.can_id |= CAN_ERR_BUSOFF; 1003 1000 } else { 1004 1001 enum can_state new_state = xcan_current_error_state(ndev); 1005 1002 1006 1003 if (new_state != priv->can.state) 1007 - xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 1004 + xcan_set_error_state(ndev, new_state, &cf); 1008 1005 } 1009 1006 1010 1007 /* Check for Arbitration lost interrupt */ 1011 1008 if (isr & XCAN_IXR_ARBLST_MASK) { 1012 1009 priv->can.can_stats.arbitration_lost++; 1013 - if (skb) { 1014 - cf->can_id |= CAN_ERR_LOSTARB; 1015 - cf->data[0] = CAN_ERR_LOSTARB_UNSPEC; 1016 - } 1010 + cf.can_id |= CAN_ERR_LOSTARB; 1011 + cf.data[0] = CAN_ERR_LOSTARB_UNSPEC; 1017 1012 } 1018 1013 1019 1014 /* Check for RX FIFO Overflow interrupt */ 1020 1015 if (isr & XCAN_IXR_RXOFLW_MASK) { 1021 1016 stats->rx_over_errors++; 1022 1017 stats->rx_errors++; 1023 - if (skb) { 1024 - cf->can_id |= CAN_ERR_CRTL; 1025 - cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 1026 - } 1018 + cf.can_id |= CAN_ERR_CRTL; 1019 + cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 1027 1020 } 1028 1021 1029 1022 /* Check for RX Match Not Finished interrupt */ ··· 1026 1029 stats->rx_dropped++; 1027 1030 stats->rx_errors++; 1028 1031 netdev_err(ndev, "RX match not finished, frame discarded\n"); 1029 - if (skb) { 1030 - cf->can_id |= CAN_ERR_CRTL; 1031 - cf->data[1] |= CAN_ERR_CRTL_UNSPEC; 1032 - } 1032 + cf.can_id |= CAN_ERR_CRTL; 1033 + cf.data[1] |= CAN_ERR_CRTL_UNSPEC; 1033 1034 } 1034 1035 1035 1036 /* Check for error interrupt */ 1036 1037 if (isr & XCAN_IXR_ERROR_MASK) { 1037 - if (skb) 1038 - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1038 + bool berr_reporting = false; 1039 + 1040 + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { 1041 + berr_reporting = true; 1042 + cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1043 + } 1039 1044 1040 1045 /* Check for Ack error interrupt */ 1041 1046 if (err_status & XCAN_ESR_ACKER_MASK) { 1042 1047 stats->tx_errors++; 1043 - if (skb) { 1044 - cf->can_id |= CAN_ERR_ACK; 1045 - cf->data[3] = CAN_ERR_PROT_LOC_ACK; 1048 + if (berr_reporting) { 1049 + cf.can_id |= CAN_ERR_ACK; 1050 + cf.data[3] = CAN_ERR_PROT_LOC_ACK; 1046 1051 } 1047 1052 } 1048 1053 1049 1054 /* Check for Bit error interrupt */ 1050 1055 if (err_status & XCAN_ESR_BERR_MASK) { 1051 1056 stats->tx_errors++; 1052 - if (skb) { 1053 - cf->can_id |= CAN_ERR_PROT; 1054 - cf->data[2] = CAN_ERR_PROT_BIT; 1057 + if (berr_reporting) { 1058 + cf.can_id |= CAN_ERR_PROT; 1059 + cf.data[2] = CAN_ERR_PROT_BIT; 1055 1060 } 1056 1061 } 1057 1062 1058 1063 /* Check for Stuff error interrupt */ 1059 1064 if (err_status & XCAN_ESR_STER_MASK) { 1060 1065 stats->rx_errors++; 1061 - if (skb) { 1062 - cf->can_id |= CAN_ERR_PROT; 1063 - cf->data[2] = CAN_ERR_PROT_STUFF; 1066 + if (berr_reporting) { 1067 + cf.can_id |= CAN_ERR_PROT; 1068 + cf.data[2] = CAN_ERR_PROT_STUFF; 1064 1069 } 1065 1070 } 1066 1071 1067 1072 /* Check for Form error interrupt */ 1068 1073 if (err_status & XCAN_ESR_FMER_MASK) { 1069 1074 stats->rx_errors++; 1070 - if (skb) { 1071 - cf->can_id |= CAN_ERR_PROT; 1072 - cf->data[2] = CAN_ERR_PROT_FORM; 1075 + if (berr_reporting) { 1076 + cf.can_id |= CAN_ERR_PROT; 1077 + cf.data[2] = CAN_ERR_PROT_FORM; 1073 1078 } 1074 1079 } 1075 1080 1076 1081 /* Check for CRC error interrupt */ 1077 1082 if (err_status & XCAN_ESR_CRCER_MASK) { 1078 1083 stats->rx_errors++; 1079 - if (skb) { 1080 - cf->can_id |= CAN_ERR_PROT; 1081 - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 1084 + if (berr_reporting) { 1085 + cf.can_id |= CAN_ERR_PROT; 1086 + cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 1082 1087 } 1083 1088 } 1084 1089 priv->can.can_stats.bus_error++; 1085 1090 } 1086 1091 1087 - if (skb) { 1088 - stats->rx_packets++; 1089 - stats->rx_bytes += cf->can_dlc; 1090 - netif_rx(skb); 1092 + if (cf.can_id) { 1093 + struct can_frame *skb_cf; 1094 + struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf); 1095 + 1096 + if (skb) { 1097 + skb_cf->can_id |= cf.can_id; 1098 + memcpy(skb_cf->data, cf.data, CAN_ERR_DLC); 1099 + stats->rx_packets++; 1100 + stats->rx_bytes += CAN_ERR_DLC; 1101 + netif_rx(skb); 1102 + } 1091 1103 } 1092 1104 1093 1105 netdev_dbg(ndev, "%s: error status register:0x%x\n", ··· 1657 1651 */ 1658 1652 static int xcan_probe(struct platform_device *pdev) 1659 1653 { 1660 - struct resource *res; /* IO mem resources */ 1661 1654 struct net_device *ndev; 1662 1655 struct xcan_priv *priv; 1663 1656 const struct of_device_id *of_id; ··· 1668 1663 const char *hw_tx_max_property; 1669 1664 1670 1665 /* Get the virtual base address for the device */ 1671 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1672 - addr = devm_ioremap_resource(&pdev->dev, res); 1666 + addr = devm_platform_ioremap_resource(pdev, 0); 1673 1667 if (IS_ERR(addr)) { 1674 1668 ret = PTR_ERR(addr); 1675 1669 goto err;
-22
include/linux/can/platform/mcp251x.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _CAN_PLATFORM_MCP251X_H 3 - #define _CAN_PLATFORM_MCP251X_H 4 - 5 - /* 6 - * 7 - * CAN bus driver for Microchip 251x CAN Controller with SPI Interface 8 - * 9 - */ 10 - 11 - #include <linux/spi/spi.h> 12 - 13 - /* 14 - * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data 15 - * @oscillator_frequency: - oscillator frequency in Hz 16 - */ 17 - 18 - struct mcp251x_platform_data { 19 - unsigned long oscillator_frequency; 20 - }; 21 - 22 - #endif /* !_CAN_PLATFORM_MCP251X_H */
+3 -4
include/linux/can/rx-offload.h
··· 15 15 struct can_rx_offload { 16 16 struct net_device *dev; 17 17 18 - unsigned int (*mailbox_read)(struct can_rx_offload *offload, 19 - struct can_frame *cf, 20 - u32 *timestamp, unsigned int mb); 18 + struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload, 19 + unsigned int mb, u32 *timestamp, 20 + bool drop); 21 21 22 22 struct sk_buff_head skb_queue; 23 23 u32 skb_queue_len_max; ··· 44 44 unsigned int idx, u32 timestamp); 45 45 int can_rx_offload_queue_tail(struct can_rx_offload *offload, 46 46 struct sk_buff *skb); 47 - void can_rx_offload_reset(struct can_rx_offload *offload); 48 47 void can_rx_offload_del(struct can_rx_offload *offload); 49 48 void can_rx_offload_enable(struct can_rx_offload *offload); 50 49