Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'linux-can-next-for-6.6-20230719' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next

Marc Kleine-Budde says:

====================
pull-request: can-next 2023-07-19

The first 2 patches are by Judith Mendez, target the m_can driver and
add hrtimer based polling support for TI AM62x SoCs, where the
interrupt of the MCU domain's m_can cores is not routed to the Cortex
A53 core.

A patch by Rob Herring converts the grcan driver to use the correct DT
include files.

Michal Simek and Srinivas Neeli add support for optional reset control
to the xilinx_can driver.

The next 2 patches are by Jimmy Assarsson and add support for new
Kvaser pciefd to the kvaser_pciefd driver.

Mao Zhu's patch for the ucan driver removes a repeated word from a
comment.

* tag 'linux-can-next-for-6.6-20230719' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next:
can: ucan: Remove repeated word
can: kvaser_pciefd: Add support for new Kvaser pciefd devices
can: kvaser_pciefd: Move hardware specific constants and functions into a driver_data struct
can: Explicitly include correct DT includes
can: xilinx_can: Add support for controller reset
dt-bindings: can: xilinx_can: Add reset description
can: m_can: Add hrtimer to generate software interrupt
dt-bindings: net: can: Remove interrupt properties for MCAN
====================

Link: https://lore.kernel.org/r/20230719072348.525039-1-mkl@pengutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+331 -90
+18 -2
Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
··· 122 122 - compatible 123 123 - reg 124 124 - reg-names 125 - - interrupts 126 - - interrupt-names 127 125 - clocks 128 126 - clock-names 129 127 - bosch,mram-cfg ··· 130 132 131 133 examples: 132 134 - | 135 + // Example with interrupts 133 136 #include <dt-bindings/clock/imx6sx-clock.h> 134 137 can@20e8000 { 135 138 compatible = "bosch,m_can"; ··· 138 139 reg-names = "m_can", "message_ram"; 139 140 interrupts = <0 114 0x04>, <0 114 0x04>; 140 141 interrupt-names = "int0", "int1"; 142 + clocks = <&clks IMX6SX_CLK_CANFD>, 143 + <&clks IMX6SX_CLK_CANFD>; 144 + clock-names = "hclk", "cclk"; 145 + bosch,mram-cfg = <0x0 0 0 32 0 0 0 1>; 146 + 147 + can-transceiver { 148 + max-bitrate = <5000000>; 149 + }; 150 + }; 151 + 152 + - | 153 + // Example with timer polling 154 + #include <dt-bindings/clock/imx6sx-clock.h> 155 + can@20e8000 { 156 + compatible = "bosch,m_can"; 157 + reg = <0x020e8000 0x4000>, <0x02298000 0x4000>; 158 + reg-names = "m_can", "message_ram"; 141 159 clocks = <&clks IMX6SX_CLK_CANFD>, 142 160 <&clks IMX6SX_CLK_CANFD>; 143 161 clock-names = "hclk", "cclk";
+3
Documentation/devicetree/bindings/net/can/xilinx,can.yaml
··· 46 46 $ref: /schemas/types.yaml#/definitions/uint32 47 47 description: CAN Tx mailbox buffer count (CAN FD) 48 48 49 + resets: 50 + maxItems: 1 51 + 49 52 required: 50 53 - compatible 51 54 - reg
+5
drivers/net/can/Kconfig
··· 160 160 Kvaser PCIEcan 4xHS 161 161 Kvaser PCIEcan 2xHS v2 162 162 Kvaser PCIEcan HS v2 163 + Kvaser PCIEcan 1xCAN v3 164 + Kvaser PCIEcan 2xCAN v3 165 + Kvaser PCIEcan 4xCAN v2 163 166 Kvaser Mini PCI Express HS v2 164 167 Kvaser Mini PCI Express 2xHS v2 168 + Kvaser Mini PCI Express 1xCAN v3 169 + Kvaser Mini PCI Express 2xCAN v3 165 170 166 171 config CAN_SLCAN 167 172 tristate "Serial / USB serial CAN Adaptors (slcan)"
+2 -1
drivers/net/can/grcan.c
··· 30 30 #include <linux/ethtool.h> 31 31 #include <linux/io.h> 32 32 #include <linux/can/dev.h> 33 + #include <linux/platform_device.h> 33 34 #include <linux/spinlock.h> 34 - #include <linux/of_platform.h> 35 + #include <linux/of.h> 35 36 #include <linux/of_irq.h> 36 37 37 38 #include <linux/dma-mapping.h>
+230 -77
drivers/net/can/kvaser_pciefd.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. 3 3 * Parts of this driver are based on the following: 4 - * - Kvaser linux pciefd driver (version 5.25) 4 + * - Kvaser linux pciefd driver (version 5.42) 5 5 * - PEAK linux canfd driver 6 6 */ 7 7 ··· 33 33 #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) 34 34 35 35 #define KVASER_PCIEFD_VENDOR 0x1a07 36 + /* Altera based devices */ 36 37 #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d 37 38 #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e 38 39 #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f 39 40 #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010 40 41 #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011 41 42 42 - /* PCIe IRQ registers */ 43 - #define KVASER_PCIEFD_IRQ_REG 0x40 44 - #define KVASER_PCIEFD_IEN_REG 0x50 45 - /* DMA address translation map register base */ 46 - #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000 47 - /* Loopback control register */ 48 - #define KVASER_PCIEFD_LOOP_REG 0x1f000 49 - /* System identification and information registers */ 50 - #define KVASER_PCIEFD_SYSID_BASE 0x1f020 51 - #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) 52 - #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) 53 - #define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10) 54 - #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) 55 - /* Shared receive buffer registers */ 56 - #define KVASER_PCIEFD_SRB_BASE 0x1f200 57 - #define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4) 58 - #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) 59 - #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) 60 - #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) 61 - #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210) 62 - #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214) 63 - #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218) 43 + /* SmartFusion2 based devices */ 44 + #define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012 45 + #define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013 46 + #define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014 47 + #define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015 48 + #define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016 49 + 50 + /* Altera SerDes Enable 64-bit DMA address translation */ 51 + #define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0) 52 + 53 + /* SmartFusion2 SerDes LSB address translation mask */ 54 + #define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12) 55 + 64 56 /* Kvaser KCAN CAN controller registers */ 65 - #define KVASER_PCIEFD_KCAN0_BASE 0x10000 66 - #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000 67 57 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 68 58 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 69 59 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 ··· 67 77 #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 68 78 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 69 79 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 70 - 71 - /* PCI interrupt fields */ 72 - #define KVASER_PCIEFD_IRQ_SRB BIT(4) 73 - #define KVASER_PCIEFD_IRQ_ALL_MASK GENMASK(4, 0) 74 - 75 - /* Enable 64-bit DMA address translation */ 76 - #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0) 80 + /* System identification and information registers */ 81 + #define KVASER_PCIEFD_SYSID_VERSION_REG 0x8 82 + #define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc 83 + #define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10 84 + #define KVASER_PCIEFD_SYSID_BUILD_REG 0x14 85 + /* Shared receive buffer FIFO registers */ 86 + #define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4 87 + /* Shared receive buffer registers */ 88 + #define KVASER_PCIEFD_SRB_CMD_REG 0x0 89 + #define KVASER_PCIEFD_SRB_IEN_REG 0x04 90 + #define KVASER_PCIEFD_SRB_IRQ_REG 0x0c 91 + #define KVASER_PCIEFD_SRB_STAT_REG 0x10 92 + #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14 93 + #define KVASER_PCIEFD_SRB_CTRL_REG 0x18 77 94 78 95 /* System build information fields */ 79 96 #define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24) ··· 250 253 /* KCAN Error detected packet, second word */ 251 254 #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) 252 255 256 + /* Macros for calculating addresses of registers */ 257 + #define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \ 258 + ((pcie)->reg_base + (pcie)->driver_data->address_offset->block) 259 + #define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \ 260 + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien)) 261 + #define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \ 262 + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq)) 263 + #define KVASER_PCIEFD_SERDES_ADDR(pcie) \ 264 + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes)) 265 + #define KVASER_PCIEFD_SYSID_ADDR(pcie) \ 266 + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid)) 267 + #define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \ 268 + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback)) 269 + #define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \ 270 + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo)) 271 + #define KVASER_PCIEFD_SRB_ADDR(pcie) \ 272 + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb)) 273 + #define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \ 274 + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0)) 275 + #define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \ 276 + (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1)) 277 + #define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \ 278 + (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie))) 279 + #define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \ 280 + (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie))) 281 + 253 282 struct kvaser_pciefd; 283 + static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, 284 + dma_addr_t addr, int index); 285 + static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, 286 + dma_addr_t addr, int index); 287 + 288 + struct kvaser_pciefd_address_offset { 289 + u32 serdes; 290 + u32 pci_ien; 291 + u32 pci_irq; 292 + u32 sysid; 293 + u32 loopback; 294 + u32 kcan_srb_fifo; 295 + u32 kcan_srb; 296 + u32 kcan_ch0; 297 + u32 kcan_ch1; 298 + }; 299 + 300 + struct kvaser_pciefd_dev_ops { 301 + void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie, 302 + dma_addr_t addr, int index); 303 + }; 304 + 305 + struct kvaser_pciefd_irq_mask { 306 + u32 kcan_rx0; 307 + u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 308 + u32 all; 309 + }; 310 + 311 + struct kvaser_pciefd_driver_data { 312 + const struct kvaser_pciefd_address_offset *address_offset; 313 + const struct kvaser_pciefd_irq_mask *irq_mask; 314 + const struct kvaser_pciefd_dev_ops *ops; 315 + }; 316 + 317 + static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = { 318 + .serdes = 0x1000, 319 + .pci_ien = 0x50, 320 + .pci_irq = 0x40, 321 + .sysid = 0x1f020, 322 + .loopback = 0x1f000, 323 + .kcan_srb_fifo = 0x1f200, 324 + .kcan_srb = 0x1f400, 325 + .kcan_ch0 = 0x10000, 326 + .kcan_ch1 = 0x11000, 327 + }; 328 + 329 + static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = { 330 + .serdes = 0x280c8, 331 + .pci_ien = 0x102004, 332 + .pci_irq = 0x102008, 333 + .sysid = 0x100000, 334 + .loopback = 0x103000, 335 + .kcan_srb_fifo = 0x120000, 336 + .kcan_srb = 0x121000, 337 + .kcan_ch0 = 0x140000, 338 + .kcan_ch1 = 0x142000, 339 + }; 340 + 341 + static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = { 342 + .kcan_rx0 = BIT(4), 343 + .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) }, 344 + .all = GENMASK(4, 0), 345 + }; 346 + 347 + static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = { 348 + .kcan_rx0 = BIT(4), 349 + .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) }, 350 + .all = GENMASK(19, 16) | BIT(4), 351 + }; 352 + 353 + static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = { 354 + .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera, 355 + }; 356 + 357 + static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = { 358 + .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2, 359 + }; 360 + 361 + static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = { 362 + .address_offset = &kvaser_pciefd_altera_address_offset, 363 + .irq_mask = &kvaser_pciefd_altera_irq_mask, 364 + .ops = &kvaser_pciefd_altera_dev_ops, 365 + }; 366 + 367 + static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = { 368 + .address_offset = &kvaser_pciefd_sf2_address_offset, 369 + .irq_mask = &kvaser_pciefd_sf2_irq_mask, 370 + .ops = &kvaser_pciefd_sf2_dev_ops, 371 + }; 254 372 255 373 struct kvaser_pciefd_can { 256 374 struct can_priv can; ··· 385 273 struct pci_dev *pci; 386 274 void __iomem *reg_base; 387 275 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 276 + const struct kvaser_pciefd_driver_data *driver_data; 388 277 void *dma_data[KVASER_PCIEFD_DMA_COUNT]; 389 278 u8 nr_channels; 390 279 u32 bus_freq; ··· 418 305 static struct pci_device_id kvaser_pciefd_id_table[] = { 419 306 { 420 307 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID), 308 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 421 309 }, 422 310 { 423 311 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID), 312 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 424 313 }, 425 314 { 426 315 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID), 316 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 427 317 }, 428 318 { 429 319 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID), 320 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 430 321 }, 431 322 { 432 323 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID), 324 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 325 + }, 326 + { 327 + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID), 328 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 329 + }, 330 + { 331 + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID), 332 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 333 + }, 334 + { 335 + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID), 336 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 337 + }, 338 + { 339 + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID), 340 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 341 + }, 342 + { 343 + PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID), 344 + .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 433 345 }, 434 346 { 435 347 0, ··· 921 783 can = netdev_priv(netdev); 922 784 netdev->netdev_ops = &kvaser_pciefd_netdev_ops; 923 785 netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; 924 - can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE + 925 - i * KVASER_PCIEFD_KCAN_BASE_OFFSET; 786 + can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i); 926 787 can->kv_pcie = pcie; 927 788 can->cmd_seq = 0; 928 789 can->err_rep_cnt = 0; ··· 1002 865 return 0; 1003 866 } 1004 867 1005 - static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie, 1006 - dma_addr_t addr, int offset) 868 + static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, 869 + dma_addr_t addr, int index) 1007 870 { 871 + void __iomem *serdes_base; 1008 872 u32 word1, word2; 1009 873 1010 874 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1011 - word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT; 875 + word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT; 1012 876 word2 = addr >> 32; 1013 877 #else 1014 878 word1 = addr; 1015 879 word2 = 0; 1016 880 #endif 1017 - iowrite32(word1, pcie->reg_base + offset); 1018 - iowrite32(word2, pcie->reg_base + offset + 4); 881 + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; 882 + iowrite32(word1, serdes_base); 883 + iowrite32(word2, serdes_base + 0x4); 884 + } 885 + 886 + static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, 887 + dma_addr_t addr, int index) 888 + { 889 + void __iomem *serdes_base; 890 + u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; 891 + u32 msb = 0x0; 892 + 893 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 894 + msb = addr >> 32; 895 + #endif 896 + serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; 897 + iowrite32(lsb, serdes_base); 898 + iowrite32(msb, serdes_base + 0x4); 1019 899 } 1020 900 1021 901 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) ··· 1043 889 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; 1044 890 1045 891 /* Disable the DMA */ 1046 - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 892 + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1047 893 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { 1048 - unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i; 1049 - 1050 894 pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, 1051 895 KVASER_PCIEFD_DMA_SIZE, 1052 896 &dma_addr[i], ··· 1055 903 KVASER_PCIEFD_DMA_SIZE); 1056 904 return -ENOMEM; 1057 905 } 1058 - kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset); 906 + pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i); 1059 907 } 1060 908 1061 909 /* Reset Rx FIFO, and both DMA buffers */ 1062 910 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | 1063 911 KVASER_PCIEFD_SRB_CMD_RDB1, 1064 - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 912 + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1065 913 /* Empty Rx FIFO */ 1066 914 srb_packet_count = 1067 915 FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK, 1068 - ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); 916 + ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + 917 + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); 1069 918 while (srb_packet_count) { 1070 919 /* Drop current packet in FIFO */ 1071 - ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG); 920 + ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG); 1072 921 srb_packet_count--; 1073 922 } 1074 923 1075 - srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 924 + srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); 1076 925 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { 1077 926 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); 1078 927 return -EIO; ··· 1081 928 1082 929 /* Enable the DMA */ 1083 930 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, 1084 - pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 931 + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1085 932 1086 933 return 0; 1087 934 } ··· 1090 937 { 1091 938 u32 version, srb_status, build; 1092 939 1093 - version = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG); 940 + version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG); 1094 941 pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS, 1095 942 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version)); 1096 943 1097 - build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG); 944 + build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG); 1098 945 dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n", 1099 946 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version), 1100 947 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version), 1101 948 FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build)); 1102 949 1103 - srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG); 950 + srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); 1104 951 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { 1105 952 dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n"); 1106 953 return -ENODEV; 1107 954 } 1108 955 1109 - pcie->bus_freq = ioread32(pcie->reg_base + 1110 - KVASER_PCIEFD_SYSID_BUSFREQ_REG); 1111 - pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); 956 + pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG); 957 + pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG); 1112 958 pcie->freq_to_ticks_div = pcie->freq / 1000000; 1113 959 if (pcie->freq_to_ticks_div == 0) 1114 960 pcie->freq_to_ticks_div = 1; 1115 961 /* Turn off all loopback functionality */ 1116 - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG); 962 + iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie)); 1117 963 1118 964 return 0; 1119 965 } ··· 1582 1430 1583 1431 static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) 1584 1432 { 1585 - u32 irq; 1433 + u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1586 1434 1587 - irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1588 1435 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { 1589 1436 kvaser_pciefd_read_buffer(pcie, 0); 1590 1437 /* Reset DMA buffer 0 */ 1591 1438 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1592 - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1439 + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1593 1440 } 1594 1441 1595 1442 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { 1596 1443 kvaser_pciefd_read_buffer(pcie, 1); 1597 1444 /* Reset DMA buffer 1 */ 1598 1445 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1599 - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1446 + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1600 1447 } 1601 1448 1602 1449 if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || ··· 1604 1453 irq & KVASER_PCIEFD_SRB_IRQ_DUF1) 1605 1454 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); 1606 1455 1607 - iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1456 + iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1608 1457 } 1609 1458 1610 1459 static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) ··· 1630 1479 static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) 1631 1480 { 1632 1481 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; 1633 - u32 board_irq; 1482 + const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; 1483 + u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); 1634 1484 int i; 1635 1485 1636 - board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG); 1637 - 1638 - if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MASK)) 1486 + if (!(board_irq & irq_mask->all)) 1639 1487 return IRQ_NONE; 1640 1488 1641 - if (board_irq & KVASER_PCIEFD_IRQ_SRB) 1489 + if (board_irq & irq_mask->kcan_rx0) 1642 1490 kvaser_pciefd_receive_irq(pcie); 1643 1491 1644 1492 for (i = 0; i < pcie->nr_channels; i++) { ··· 1648 1498 } 1649 1499 1650 1500 /* Check that mask matches channel (i) IRQ mask */ 1651 - if (board_irq & (1 << i)) 1501 + if (board_irq & irq_mask->kcan_tx[i]) 1652 1502 kvaser_pciefd_transmit_irq(pcie->can[i]); 1653 1503 } 1654 1504 ··· 1675 1525 { 1676 1526 int err; 1677 1527 struct kvaser_pciefd *pcie; 1528 + const struct kvaser_pciefd_irq_mask *irq_mask; 1529 + void __iomem *irq_en_base; 1678 1530 1679 1531 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 1680 1532 if (!pcie) ··· 1684 1532 1685 1533 pci_set_drvdata(pdev, pcie); 1686 1534 pcie->pci = pdev; 1535 + pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; 1536 + irq_mask = pcie->driver_data->irq_mask; 1687 1537 1688 1538 err = pci_enable_device(pdev); 1689 1539 if (err) ··· 1721 1567 goto err_teardown_can_ctrls; 1722 1568 1723 1569 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, 1724 - pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG); 1570 + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1725 1571 1726 1572 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | 1727 1573 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | 1728 1574 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, 1729 - pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG); 1575 + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); 1730 1576 1731 1577 /* Enable PCI interrupts */ 1732 - iowrite32(KVASER_PCIEFD_IRQ_ALL_MASK, 1733 - pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1734 - 1578 + irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); 1579 + iowrite32(irq_mask->all, irq_en_base); 1735 1580 /* Ready the DMA buffers */ 1736 1581 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1737 - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1582 + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1738 1583 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1739 - pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG); 1584 + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1740 1585 1741 1586 err = kvaser_pciefd_reg_candev(pcie); 1742 1587 if (err) ··· 1745 1592 1746 1593 err_free_irq: 1747 1594 /* Disable PCI interrupts */ 1748 - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1595 + iowrite32(0, irq_en_base); 1749 1596 free_irq(pcie->pci->irq, pcie); 1750 1597 1751 1598 err_teardown_can_ctrls: 1752 1599 kvaser_pciefd_teardown_can_ctrls(pcie); 1753 - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1600 + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1754 1601 pci_clear_master(pdev); 1755 1602 1756 1603 err_pci_iounmap: ··· 1789 1636 kvaser_pciefd_remove_all_ctrls(pcie); 1790 1637 1791 1638 /* Disable interrupts */ 1792 - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG); 1793 - iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG); 1639 + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1640 + iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); 1794 1641 1795 1642 free_irq(pcie->pci->irq, pcie); 1796 1643
+31 -1
drivers/net/can/m_can/m_can.c
··· 11 11 #include <linux/bitfield.h> 12 12 #include <linux/can/dev.h> 13 13 #include <linux/ethtool.h> 14 + #include <linux/hrtimer.h> 14 15 #include <linux/interrupt.h> 15 16 #include <linux/io.h> 16 17 #include <linux/iopoll.h> ··· 308 307 /* E1 */ 309 308 #define TX_EVENT_MM_MASK GENMASK(31, 24) 310 309 #define TX_EVENT_TXTS_MASK GENMASK(15, 0) 310 + 311 + /* Hrtimer polling interval */ 312 + #define HRTIMER_POLL_INTERVAL_MS 1 311 313 312 314 /* The ID and DLC registers are adjacent in M_CAN FIFO memory, 313 315 * and we can save a (potentially slow) bus round trip by combining ··· 1418 1414 1419 1415 m_can_enable_all_interrupts(cdev); 1420 1416 1417 + if (!dev->irq) { 1418 + dev_dbg(cdev->dev, "Start hrtimer\n"); 1419 + hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS), 1420 + HRTIMER_MODE_REL_PINNED); 1421 + } 1422 + 1421 1423 return 0; 1422 1424 } 1423 1425 ··· 1577 1567 static void m_can_stop(struct net_device *dev) 1578 1568 { 1579 1569 struct m_can_classdev *cdev = netdev_priv(dev); 1570 + 1571 + if (!dev->irq) { 1572 + dev_dbg(cdev->dev, "Stop hrtimer\n"); 1573 + hrtimer_cancel(&cdev->hrtimer); 1574 + } 1580 1575 1581 1576 /* disable all interrupts */ 1582 1577 m_can_disable_all_interrupts(cdev); ··· 1808 1793 return NETDEV_TX_OK; 1809 1794 } 1810 1795 1796 + static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer) 1797 + { 1798 + struct m_can_classdev *cdev = container_of(timer, struct 1799 + m_can_classdev, hrtimer); 1800 + 1801 + m_can_isr(0, cdev->net); 1802 + 1803 + hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS)); 1804 + 1805 + return HRTIMER_RESTART; 1806 + } 1807 + 1811 1808 static int m_can_open(struct net_device *dev) 1812 1809 { 1813 1810 struct m_can_classdev *cdev = netdev_priv(dev); ··· 1858 1831 err = request_threaded_irq(dev->irq, NULL, m_can_isr, 1859 1832 IRQF_ONESHOT, 1860 1833 dev->name, dev); 1861 - } else { 1834 + } else if (dev->irq) { 1862 1835 err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, 1863 1836 dev); 1864 1837 } ··· 2053 2026 if (ret) 2054 2027 goto clk_disable; 2055 2028 } 2029 + 2030 + if (!cdev->net->irq) 2031 + cdev->hrtimer.function = &hrtimer_callback; 2056 2032 2057 2033 ret = m_can_dev_setup(cdev); 2058 2034 if (ret)
+3
drivers/net/can/m_can/m_can.h
··· 15 15 #include <linux/device.h> 16 16 #include <linux/dma-mapping.h> 17 17 #include <linux/freezer.h> 18 + #include <linux/hrtimer.h> 18 19 #include <linux/interrupt.h> 19 20 #include <linux/io.h> 20 21 #include <linux/iopoll.h> ··· 94 93 int is_peripheral; 95 94 96 95 struct mram_cfg mcfg[MRAM_CFG_NUM]; 96 + 97 + struct hrtimer hrtimer; 97 98 }; 98 99 99 100 struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv);
+17 -4
drivers/net/can/m_can/m_can_platform.c
··· 5 5 // 6 6 // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ 7 7 8 + #include <linux/hrtimer.h> 8 9 #include <linux/phy/phy.h> 9 10 #include <linux/platform_device.h> 10 11 ··· 83 82 void __iomem *addr; 84 83 void __iomem *mram_addr; 85 84 struct phy *transceiver; 86 - int irq, ret = 0; 85 + int irq = 0, ret = 0; 87 86 88 87 mcan_class = m_can_class_allocate_dev(&pdev->dev, 89 88 sizeof(struct m_can_plat_priv)); ··· 97 96 goto probe_fail; 98 97 99 98 addr = devm_platform_ioremap_resource_byname(pdev, "m_can"); 100 - irq = platform_get_irq_byname(pdev, "int0"); 101 - if (IS_ERR(addr) || irq < 0) { 102 - ret = -EINVAL; 99 + if (IS_ERR(addr)) { 100 + ret = PTR_ERR(addr); 103 101 goto probe_fail; 102 + } 103 + 104 + if (device_property_present(mcan_class->dev, "interrupts") || 105 + device_property_present(mcan_class->dev, "interrupt-names")) { 106 + irq = platform_get_irq_byname(pdev, "int0"); 107 + if (irq < 0) { 108 + ret = irq; 109 + goto probe_fail; 110 + } 111 + } else { 112 + dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer"); 113 + hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC, 114 + HRTIMER_MODE_REL_PINNED); 104 115 } 105 116 106 117 /* message ram could be shared */
+1 -1
drivers/net/can/usb/ucan.c
··· 284 284 */ 285 285 spinlock_t echo_skb_lock; 286 286 287 - /* usb device information information */ 287 + /* usb device information */ 288 288 u8 intf_index; 289 289 u8 in_ep_addr; 290 290 u8 out_ep_addr;
+21 -4
drivers/net/can/xilinx_can.c
··· 30 30 #include <linux/can/error.h> 31 31 #include <linux/phy/phy.h> 32 32 #include <linux/pm_runtime.h> 33 + #include <linux/reset.h> 33 34 34 35 #define DRIVER_NAME "xilinx_can" 35 36 ··· 201 200 * @can_clk: Pointer to struct clk 202 201 * @devtype: Device type specific constants 203 202 * @transceiver: Optional pointer to associated CAN transceiver 203 + * @rstc: Pointer to reset control 204 204 */ 205 205 struct xcan_priv { 206 206 struct can_priv can; ··· 220 218 struct clk *can_clk; 221 219 struct xcan_devtype_data devtype; 222 220 struct phy *transceiver; 221 + struct reset_control *rstc; 223 222 }; 224 223 225 224 /* CAN Bittiming constants as per Xilinx CAN specs */ ··· 1802 1799 priv->can.do_get_berr_counter = xcan_get_berr_counter; 1803 1800 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1804 1801 CAN_CTRLMODE_BERR_REPORTING; 1802 + priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); 1803 + if (IS_ERR(priv->rstc)) { 1804 + dev_err(&pdev->dev, "Cannot get CAN reset.\n"); 1805 + ret = PTR_ERR(priv->rstc); 1806 + goto err_free; 1807 + } 1808 + 1809 + ret = reset_control_reset(priv->rstc); 1810 + if (ret) 1811 + goto err_free; 1805 1812 1806 1813 if (devtype->cantype == XAXI_CANFD) { 1807 1814 priv->can.data_bittiming_const = ··· 1840 1827 /* Get IRQ for the device */ 1841 1828 ret = platform_get_irq(pdev, 0); 1842 1829 if (ret < 0) 1843 - goto err_free; 1830 + goto err_reset; 1844 1831 1845 1832 ndev->irq = ret; 1846 1833 ··· 1856 1843 if (IS_ERR(priv->can_clk)) { 1857 1844 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk), 1858 1845 "device clock not found\n"); 1859 - goto err_free; 1846 + goto err_reset; 1860 1847 } 1861 1848 1862 1849 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); 1863 1850 if (IS_ERR(priv->bus_clk)) { 1864 1851 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk), 1865 1852 "bus clock not found\n"); 1866 - goto err_free; 1853 + goto err_reset; 1867 1854 } 1868 1855 1869 1856 transceiver = devm_phy_optional_get(&pdev->dev, NULL); 1870 1857 if (IS_ERR(transceiver)) { 1871 1858 ret = PTR_ERR(transceiver); 1872 1859 dev_err_probe(&pdev->dev, ret, "failed to get phy\n"); 1873 - goto err_free; 1860 + goto err_reset; 1874 1861 } 1875 1862 priv->transceiver = transceiver; 1876 1863 ··· 1917 1904 err_disableclks: 1918 1905 pm_runtime_put(priv->dev); 1919 1906 pm_runtime_disable(&pdev->dev); 1907 + err_reset: 1908 + reset_control_assert(priv->rstc); 1920 1909 err_free: 1921 1910 free_candev(ndev); 1922 1911 err: ··· 1935 1920 static void xcan_remove(struct platform_device *pdev) 1936 1921 { 1937 1922 struct net_device *ndev = platform_get_drvdata(pdev); 1923 + struct xcan_priv *priv = netdev_priv(ndev); 1938 1924 1939 1925 unregister_candev(ndev); 1940 1926 pm_runtime_disable(&pdev->dev); 1927 + reset_control_assert(priv->rstc); 1941 1928 free_candev(ndev); 1942 1929 } 1943 1930