Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.16-rc2 1910 lines 58 kB view raw
1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. 3 * Parts of this driver are based on the following: 4 * - Kvaser linux pciefd driver (version 5.42) 5 * - PEAK linux canfd driver 6 */ 7 8#include <linux/bitfield.h> 9#include <linux/can/dev.h> 10#include <linux/device.h> 11#include <linux/ethtool.h> 12#include <linux/iopoll.h> 13#include <linux/kernel.h> 14#include <linux/minmax.h> 15#include <linux/module.h> 16#include <linux/netdevice.h> 17#include <linux/pci.h> 18#include <linux/timer.h> 19#include <net/netdev_queues.h> 20 21MODULE_LICENSE("Dual BSD/GPL"); 22MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); 23MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); 24 25#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" 26 27#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) 28#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) 29#define KVASER_PCIEFD_MAX_ERR_REP 256U 30#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U 31#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL 32#define KVASER_PCIEFD_DMA_COUNT 2U 33#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) 34 35#define KVASER_PCIEFD_VENDOR 0x1a07 36 37/* Altera based devices */ 38#define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d 39#define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e 40#define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f 41#define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010 42#define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011 43 44/* SmartFusion2 based devices */ 45#define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012 46#define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013 47#define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014 48#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015 49#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016 50 51/* Xilinx based devices */ 52#define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017 53#define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019 54 55/* Altera SerDes Enable 64-bit DMA address translation */ 56#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0) 57 58/* SmartFusion2 SerDes LSB address translation mask */ 59#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12) 60 61/* Xilinx SerDes LSB address translation mask */ 62#define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12) 63 64/* Kvaser KCAN CAN controller registers */ 65#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 66#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 67#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 68#define KVASER_PCIEFD_KCAN_CMD_REG 0x400 69#define KVASER_PCIEFD_KCAN_IEN_REG 0x408 70#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 71#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414 72#define KVASER_PCIEFD_KCAN_STAT_REG 0x418 73#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c 74#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 75#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 76#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 77#define KVASER_PCIEFD_KCAN_PWM_REG 0x430 78/* System identification and information registers */ 79#define KVASER_PCIEFD_SYSID_VERSION_REG 0x8 80#define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc 81#define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10 82#define KVASER_PCIEFD_SYSID_BUILD_REG 0x14 83/* Shared receive buffer FIFO registers */ 84#define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4 85/* Shared receive buffer registers */ 86#define KVASER_PCIEFD_SRB_CMD_REG 0x0 87#define KVASER_PCIEFD_SRB_IEN_REG 0x04 88#define KVASER_PCIEFD_SRB_IRQ_REG 0x0c 89#define KVASER_PCIEFD_SRB_STAT_REG 0x10 90#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14 91#define KVASER_PCIEFD_SRB_CTRL_REG 0x18 92 93/* System build information fields */ 94#define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24) 95#define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16) 96#define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0) 97#define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1) 98 99/* Reset DMA buffer 0, 1 and FIFO offset */ 100#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) 101#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) 102#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) 103 104/* DMA underflow, buffer 0 and 1 */ 105#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) 106#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) 107/* DMA overflow, buffer 0 and 1 */ 108#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) 109#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) 110/* DMA packet done, buffer 0 and 1 */ 111#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) 112#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) 113 114/* Got DMA support */ 115#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) 116/* DMA idle */ 117#define KVASER_PCIEFD_SRB_STAT_DI BIT(15) 118 119/* SRB current packet level */ 120#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0) 121 122/* DMA Enable */ 123#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) 124 125/* KCAN CTRL packet types */ 126#define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29) 127#define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4 128#define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5 129 130/* Command sequence number */ 131#define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16) 132/* Command bits */ 133#define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0) 134/* Abort, flush and reset */ 135#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) 136/* Request status packet */ 137#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) 138 139/* Transmitter unaligned */ 140#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) 141/* Tx FIFO empty */ 142#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) 143/* Tx FIFO overflow */ 144#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) 145/* Tx buffer flush done */ 146#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) 147/* Abort done */ 148#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) 149/* Rx FIFO overflow */ 150#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) 151/* FDF bit when controller is in classic CAN mode */ 152#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) 153/* Bus parameter protection error */ 154#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) 155/* Tx FIFO unaligned end */ 156#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) 157/* Tx FIFO unaligned read */ 158#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) 159 160/* Tx FIFO size */ 161#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16) 162/* Tx FIFO current packet level */ 163#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0) 164 165/* Current status packet sequence number */ 166#define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24) 167/* Controller got CAN FD capability */ 168#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) 169/* Controller got one-shot capability */ 170#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) 171/* Controller in reset mode */ 172#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) 173/* Reset mode request */ 174#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) 175/* Bus off */ 176#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) 177/* Idle state. Controller in reset mode and no abort or flush pending */ 178#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) 179/* Abort request */ 180#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) 181/* Controller is bus off */ 182#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \ 183 (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \ 184 KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM) 185 186/* Classic CAN mode */ 187#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) 188/* Active error flag enable. Clear to force error passive */ 189#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) 190/* Acknowledgment packet type */ 191#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) 192/* CAN FD non-ISO */ 193#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) 194/* Error packet enable */ 195#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) 196/* Listen only mode */ 197#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) 198/* Reset mode */ 199#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) 200 201/* BTRN and BTRD fields */ 202#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26) 203#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17) 204#define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13) 205#define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0) 206 207/* PWM Control fields */ 208#define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16) 209#define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0) 210 211/* KCAN packet type IDs */ 212#define KVASER_PCIEFD_PACK_TYPE_DATA 0x0 213#define KVASER_PCIEFD_PACK_TYPE_ACK 0x1 214#define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2 215#define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3 216#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4 217#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5 218#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6 219#define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8 220#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9 221 222/* Common KCAN packet definitions, second word */ 223#define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28) 224#define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25) 225#define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0) 226 227/* KCAN Transmit/Receive data packet, first word */ 228#define KVASER_PCIEFD_RPACKET_IDE BIT(30) 229#define KVASER_PCIEFD_RPACKET_RTR BIT(29) 230#define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0) 231/* KCAN Transmit data packet, second word */ 232#define KVASER_PCIEFD_TPACKET_AREQ BIT(31) 233#define KVASER_PCIEFD_TPACKET_SMS BIT(16) 234/* KCAN Transmit/Receive data packet, second word */ 235#define KVASER_PCIEFD_RPACKET_FDF BIT(15) 236#define KVASER_PCIEFD_RPACKET_BRS BIT(14) 237#define KVASER_PCIEFD_RPACKET_ESI BIT(13) 238#define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8) 239 240/* KCAN Transmit acknowledge packet, first word */ 241#define KVASER_PCIEFD_APACKET_NACK BIT(11) 242#define KVASER_PCIEFD_APACKET_ABL BIT(10) 243#define KVASER_PCIEFD_APACKET_CT BIT(9) 244#define KVASER_PCIEFD_APACKET_FLU BIT(8) 245 246/* KCAN Status packet, first word */ 247#define KVASER_PCIEFD_SPACK_RMCD BIT(22) 248#define KVASER_PCIEFD_SPACK_IRM BIT(21) 249#define KVASER_PCIEFD_SPACK_IDET BIT(20) 250#define KVASER_PCIEFD_SPACK_BOFF BIT(16) 251#define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8) 252#define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0) 253/* KCAN Status packet, second word */ 254#define KVASER_PCIEFD_SPACK_EPLR BIT(24) 255#define KVASER_PCIEFD_SPACK_EWLR BIT(23) 256#define KVASER_PCIEFD_SPACK_AUTO BIT(21) 257 258/* KCAN Error detected packet, second word */ 259#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) 260 261/* Macros for calculating addresses of registers */ 262#define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \ 263 ((pcie)->reg_base + (pcie)->driver_data->address_offset->block) 264#define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \ 265 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien)) 266#define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \ 267 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq)) 268#define KVASER_PCIEFD_SERDES_ADDR(pcie) \ 269 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes)) 270#define KVASER_PCIEFD_SYSID_ADDR(pcie) \ 271 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid)) 272#define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \ 273 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback)) 274#define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \ 275 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo)) 276#define KVASER_PCIEFD_SRB_ADDR(pcie) \ 277 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb)) 278#define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \ 279 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0)) 280#define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \ 281 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1)) 282#define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \ 283 (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie))) 284#define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \ 285 (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie))) 286 287struct kvaser_pciefd; 288static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, 289 dma_addr_t addr, int index); 290static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, 291 dma_addr_t addr, int index); 292static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, 293 dma_addr_t addr, int index); 294 295struct kvaser_pciefd_address_offset { 296 u32 serdes; 297 u32 pci_ien; 298 u32 pci_irq; 299 u32 sysid; 300 u32 loopback; 301 u32 kcan_srb_fifo; 302 u32 kcan_srb; 303 u32 kcan_ch0; 304 u32 kcan_ch1; 305}; 306 307struct kvaser_pciefd_dev_ops { 308 void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie, 309 dma_addr_t addr, int index); 310}; 311 312struct kvaser_pciefd_irq_mask { 313 u32 kcan_rx0; 314 u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 315 u32 all; 316}; 317 318struct kvaser_pciefd_driver_data { 319 const struct kvaser_pciefd_address_offset *address_offset; 320 const struct kvaser_pciefd_irq_mask *irq_mask; 321 const struct kvaser_pciefd_dev_ops *ops; 322}; 323 324static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = { 325 .serdes = 0x1000, 326 .pci_ien = 0x50, 327 .pci_irq = 0x40, 328 .sysid = 0x1f020, 329 .loopback = 0x1f000, 330 .kcan_srb_fifo = 0x1f200, 331 .kcan_srb = 0x1f400, 332 .kcan_ch0 = 0x10000, 333 .kcan_ch1 = 0x11000, 334}; 335 336static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = { 337 .serdes = 0x280c8, 338 .pci_ien = 0x102004, 339 .pci_irq = 0x102008, 340 .sysid = 0x100000, 341 .loopback = 0x103000, 342 .kcan_srb_fifo = 0x120000, 343 .kcan_srb = 0x121000, 344 .kcan_ch0 = 0x140000, 345 .kcan_ch1 = 0x142000, 346}; 347 348static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = { 349 .serdes = 0x00208, 350 .pci_ien = 0x102004, 351 .pci_irq = 0x102008, 352 .sysid = 0x100000, 353 .loopback = 0x103000, 354 .kcan_srb_fifo = 0x120000, 355 .kcan_srb = 0x121000, 356 .kcan_ch0 = 0x140000, 357 .kcan_ch1 = 0x142000, 358}; 359 360static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = { 361 .kcan_rx0 = BIT(4), 362 .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) }, 363 .all = GENMASK(4, 0), 364}; 365 366static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = { 367 .kcan_rx0 = BIT(4), 368 .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) }, 369 .all = GENMASK(19, 16) | BIT(4), 370}; 371 372static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = { 373 .kcan_rx0 = BIT(4), 374 .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) }, 375 .all = GENMASK(23, 16) | BIT(4), 376}; 377 378static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = { 379 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera, 380}; 381 382static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = { 383 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2, 384}; 385 386static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = { 387 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx, 388}; 389 390static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = { 391 .address_offset = &kvaser_pciefd_altera_address_offset, 392 .irq_mask = &kvaser_pciefd_altera_irq_mask, 393 .ops = &kvaser_pciefd_altera_dev_ops, 394}; 395 396static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = { 397 .address_offset = &kvaser_pciefd_sf2_address_offset, 398 .irq_mask = &kvaser_pciefd_sf2_irq_mask, 399 .ops = &kvaser_pciefd_sf2_dev_ops, 400}; 401 402static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = { 403 .address_offset = &kvaser_pciefd_xilinx_address_offset, 404 .irq_mask = &kvaser_pciefd_xilinx_irq_mask, 405 .ops = &kvaser_pciefd_xilinx_dev_ops, 406}; 407 408struct kvaser_pciefd_can { 409 struct can_priv can; 410 struct kvaser_pciefd *kv_pcie; 411 void __iomem *reg_base; 412 struct can_berr_counter bec; 413 u8 cmd_seq; 414 u8 tx_max_count; 415 u8 tx_idx; 416 u8 ack_idx; 417 int err_rep_cnt; 418 unsigned int completed_tx_pkts; 419 unsigned int completed_tx_bytes; 420 spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ 421 struct timer_list bec_poll_timer; 422 struct completion start_comp, flush_comp; 423}; 424 425struct kvaser_pciefd { 426 struct pci_dev *pci; 427 void __iomem *reg_base; 428 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 429 const struct kvaser_pciefd_driver_data *driver_data; 430 void *dma_data[KVASER_PCIEFD_DMA_COUNT]; 431 u8 nr_channels; 432 u32 bus_freq; 433 u32 freq; 434 u32 freq_to_ticks_div; 435}; 436 437struct kvaser_pciefd_rx_packet { 438 u32 header[2]; 439 u64 timestamp; 440}; 441 442struct kvaser_pciefd_tx_packet { 443 u32 header[2]; 444 u8 data[64]; 445}; 446 447static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { 448 .name = KVASER_PCIEFD_DRV_NAME, 449 .tseg1_min = 1, 450 .tseg1_max = 512, 451 .tseg2_min = 1, 452 .tseg2_max = 32, 453 .sjw_max = 16, 454 .brp_min = 1, 455 .brp_max = 8192, 456 .brp_inc = 1, 457}; 458 459static struct pci_device_id kvaser_pciefd_id_table[] = { 460 { 461 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID), 462 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 463 }, 464 { 465 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID), 466 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 467 }, 468 { 469 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID), 470 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 471 }, 472 { 473 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID), 474 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 475 }, 476 { 477 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID), 478 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 479 }, 480 { 481 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID), 482 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 483 }, 484 { 485 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID), 486 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 487 }, 488 { 489 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID), 490 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 491 }, 492 { 493 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID), 494 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 495 }, 496 { 497 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID), 498 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 499 }, 500 { 501 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID), 502 .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, 503 }, 504 { 505 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID), 506 .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, 507 }, 508 { 509 0, 510 }, 511}; 512MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); 513 514static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd) 515{ 516 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) | 517 FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq), 518 can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 519} 520 521static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) 522{ 523 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ); 524} 525 526static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can) 527{ 528 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT); 529} 530 531static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) 532{ 533 u32 mode; 534 unsigned long irq; 535 536 spin_lock_irqsave(&can->lock, irq); 537 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 538 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { 539 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 540 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 541 } 542 spin_unlock_irqrestore(&can->lock, irq); 543} 544 545static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) 546{ 547 u32 mode; 548 unsigned long irq; 549 550 spin_lock_irqsave(&can->lock, irq); 551 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 552 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; 553 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 554 spin_unlock_irqrestore(&can->lock, irq); 555} 556 557static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) 558{ 559 u32 msk; 560 561 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | 562 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | 563 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | 564 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | 565 KVASER_PCIEFD_KCAN_IRQ_TAR; 566 567 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 568} 569 570static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie, 571 struct sk_buff *skb, u64 timestamp) 572{ 573 skb_hwtstamps(skb)->hwtstamp = 574 ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div)); 575} 576 577static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) 578{ 579 u32 mode; 580 unsigned long irq; 581 582 spin_lock_irqsave(&can->lock, irq); 583 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 584 if (can->can.ctrlmode & CAN_CTRLMODE_FD) { 585 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; 586 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 587 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; 588 else 589 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 590 } else { 591 mode |= KVASER_PCIEFD_KCAN_MODE_CCM; 592 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 593 } 594 595 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 596 mode |= KVASER_PCIEFD_KCAN_MODE_LOM; 597 else 598 mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; 599 mode |= KVASER_PCIEFD_KCAN_MODE_EEN; 600 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 601 /* Use ACK packet type */ 602 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; 603 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 604 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 605 606 spin_unlock_irqrestore(&can->lock, irq); 607} 608 609static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) 610{ 611 u32 status; 612 unsigned long irq; 613 614 spin_lock_irqsave(&can->lock, irq); 615 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 616 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 617 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 618 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 619 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 620 /* If controller is already idle, run abort, flush and reset */ 621 kvaser_pciefd_abort_flush_reset(can); 622 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { 623 u32 mode; 624 625 /* Put controller in reset mode */ 626 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 627 mode |= KVASER_PCIEFD_KCAN_MODE_RM; 628 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 629 } 630 spin_unlock_irqrestore(&can->lock, irq); 631} 632 633static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) 634{ 635 u32 mode; 636 unsigned long irq; 637 638 timer_delete(&can->bec_poll_timer); 639 if (!completion_done(&can->flush_comp)) 640 kvaser_pciefd_start_controller_flush(can); 641 642 if (!wait_for_completion_timeout(&can->flush_comp, 643 KVASER_PCIEFD_WAIT_TIMEOUT)) { 644 netdev_err(can->can.dev, "Timeout during bus on flush\n"); 645 return -ETIMEDOUT; 646 } 647 648 spin_lock_irqsave(&can->lock, irq); 649 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 650 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 651 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 652 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 653 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 654 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 655 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 656 spin_unlock_irqrestore(&can->lock, irq); 657 658 if (!wait_for_completion_timeout(&can->start_comp, 659 KVASER_PCIEFD_WAIT_TIMEOUT)) { 660 netdev_err(can->can.dev, "Timeout during bus on reset\n"); 661 return -ETIMEDOUT; 662 } 663 /* Reset interrupt handling */ 664 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 665 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 666 667 kvaser_pciefd_set_tx_irq(can); 668 kvaser_pciefd_setup_controller(can); 669 can->can.state = CAN_STATE_ERROR_ACTIVE; 670 netif_wake_queue(can->can.dev); 671 can->bec.txerr = 0; 672 can->bec.rxerr = 0; 673 can->err_rep_cnt = 0; 674 675 return 0; 676} 677 678static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) 679{ 680 u8 top; 681 u32 pwm_ctrl; 682 unsigned long irq; 683 684 spin_lock_irqsave(&can->lock, irq); 685 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 686 top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl); 687 /* Set duty cycle to zero */ 688 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); 689 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 690 spin_unlock_irqrestore(&can->lock, irq); 691} 692 693static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) 694{ 695 int top, trigger; 696 u32 pwm_ctrl; 697 unsigned long irq; 698 699 kvaser_pciefd_pwm_stop(can); 700 spin_lock_irqsave(&can->lock, irq); 701 /* Set frequency to 500 KHz */ 702 top = can->kv_pcie->bus_freq / (2 * 500000) - 1; 703 704 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); 705 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); 706 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 707 708 /* Set duty cycle to 95 */ 709 trigger = (100 * top - 95 * (top + 1) + 50) / 100; 710 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger); 711 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); 712 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 713 spin_unlock_irqrestore(&can->lock, irq); 714} 715 716static int kvaser_pciefd_open(struct net_device *netdev) 717{ 718 int ret; 719 struct kvaser_pciefd_can *can = netdev_priv(netdev); 720 721 can->tx_idx = 0; 722 can->ack_idx = 0; 723 724 ret = open_candev(netdev); 725 if (ret) 726 return ret; 727 728 ret = kvaser_pciefd_bus_on(can); 729 if (ret) { 730 close_candev(netdev); 731 return ret; 732 } 733 734 return 0; 735} 736 737static int kvaser_pciefd_stop(struct net_device *netdev) 738{ 739 struct kvaser_pciefd_can *can = netdev_priv(netdev); 740 int ret = 0; 741 742 /* Don't interrupt ongoing flush */ 743 if (!completion_done(&can->flush_comp)) 744 kvaser_pciefd_start_controller_flush(can); 745 746 if (!wait_for_completion_timeout(&can->flush_comp, 747 KVASER_PCIEFD_WAIT_TIMEOUT)) { 748 netdev_err(can->can.dev, "Timeout during stop\n"); 749 ret = -ETIMEDOUT; 750 } else { 751 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 752 timer_delete(&can->bec_poll_timer); 753 } 754 can->can.state = CAN_STATE_STOPPED; 755 netdev_reset_queue(netdev); 756 close_candev(netdev); 757 758 return ret; 759} 760 761static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can) 762{ 763 return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx)); 764} 765 766static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, 767 struct can_priv *can, u8 seq, 768 struct sk_buff *skb) 769{ 770 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 771 int packet_size; 772 773 memset(p, 0, sizeof(*p)); 774 if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT) 775 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; 776 777 if (cf->can_id & CAN_RTR_FLAG) 778 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; 779 780 if (cf->can_id & CAN_EFF_FLAG) 781 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; 782 783 p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id); 784 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; 785 786 if (can_is_canfd_skb(skb)) { 787 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, 788 can_fd_len2dlc(cf->len)); 789 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; 790 if (cf->flags & CANFD_BRS) 791 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; 792 if (cf->flags & CANFD_ESI) 793 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; 794 } else { 795 p->header[1] |= 796 FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, 797 can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode)); 798 } 799 800 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); 801 802 packet_size = cf->len; 803 memcpy(p->data, cf->data, packet_size); 804 805 return DIV_ROUND_UP(packet_size, 4); 806} 807 808static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, 809 struct net_device *netdev) 810{ 811 struct kvaser_pciefd_can *can = netdev_priv(netdev); 812 struct kvaser_pciefd_tx_packet packet; 813 unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1); 814 unsigned int frame_len; 815 int nr_words; 816 817 if (can_dev_dropped_skb(netdev, skb)) 818 return NETDEV_TX_OK; 819 if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1)) 820 return NETDEV_TX_BUSY; 821 822 nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb); 823 824 /* Prepare and save echo skb in internal slot */ 825 WRITE_ONCE(can->can.echo_skb[seq], NULL); 826 frame_len = can_skb_get_frame_len(skb); 827 can_put_echo_skb(skb, netdev, seq, frame_len); 828 netdev_sent_queue(netdev, frame_len); 829 WRITE_ONCE(can->tx_idx, can->tx_idx + 1); 830 831 /* Write header to fifo */ 832 iowrite32(packet.header[0], 833 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 834 iowrite32(packet.header[1], 835 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 836 837 if (nr_words) { 838 u32 data_last = ((u32 *)packet.data)[nr_words - 1]; 839 840 /* Write data to fifo, except last word */ 841 iowrite32_rep(can->reg_base + 842 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, 843 nr_words - 1); 844 /* Write last word to end of fifo */ 845 __raw_writel(data_last, can->reg_base + 846 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 847 } else { 848 /* Complete write to fifo */ 849 __raw_writel(0, can->reg_base + 850 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 851 } 852 853 netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1); 854 855 return NETDEV_TX_OK; 856} 857 858static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) 859{ 860 u32 mode, test, btrn; 861 unsigned long irq_flags; 862 int ret; 863 struct can_bittiming *bt; 864 865 if (data) 866 bt = &can->can.fd.data_bittiming; 867 else 868 bt = &can->can.bittiming; 869 870 btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) | 871 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | 872 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) | 873 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1); 874 875 spin_lock_irqsave(&can->lock, irq_flags); 876 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 877 /* Put the circuit in reset mode */ 878 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, 879 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 880 881 /* Can only set bittiming if in reset mode */ 882 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, 883 test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10); 884 if (ret) { 885 spin_unlock_irqrestore(&can->lock, irq_flags); 886 return -EBUSY; 887 } 888 889 if (data) 890 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); 891 else 892 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); 893 /* Restore previous reset mode status */ 894 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 895 spin_unlock_irqrestore(&can->lock, irq_flags); 896 897 return 0; 898} 899 900static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) 901{ 902 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); 903} 904 905static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) 906{ 907 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); 908} 909 910static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) 911{ 912 struct kvaser_pciefd_can *can = netdev_priv(ndev); 913 int ret = 0; 914 915 switch (mode) { 916 case CAN_MODE_START: 917 if (!can->can.restart_ms) 918 ret = kvaser_pciefd_bus_on(can); 919 break; 920 default: 921 return -EOPNOTSUPP; 922 } 923 924 return ret; 925} 926 927static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, 928 struct can_berr_counter *bec) 929{ 930 struct kvaser_pciefd_can *can = netdev_priv(ndev); 931 932 bec->rxerr = can->bec.rxerr; 933 bec->txerr = can->bec.txerr; 934 935 return 0; 936} 937 938static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) 939{ 940 struct kvaser_pciefd_can *can = timer_container_of(can, data, 941 bec_poll_timer); 942 943 kvaser_pciefd_enable_err_gen(can); 944 kvaser_pciefd_request_status(can); 945 can->err_rep_cnt = 0; 946} 947 948static const struct net_device_ops kvaser_pciefd_netdev_ops = { 949 .ndo_open = kvaser_pciefd_open, 950 .ndo_stop = kvaser_pciefd_stop, 951 .ndo_eth_ioctl = can_eth_ioctl_hwts, 952 .ndo_start_xmit = kvaser_pciefd_start_xmit, 953 .ndo_change_mtu = can_change_mtu, 954}; 955 956static const struct ethtool_ops kvaser_pciefd_ethtool_ops = { 957 .get_ts_info = can_ethtool_op_get_ts_info_hwts, 958}; 959 960static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) 961{ 962 int i; 963 964 for (i = 0; i < pcie->nr_channels; i++) { 965 struct net_device *netdev; 966 struct kvaser_pciefd_can *can; 967 u32 status, tx_nr_packets_max; 968 969 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), 970 roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT)); 971 if (!netdev) 972 return -ENOMEM; 973 974 can = netdev_priv(netdev); 975 netdev->netdev_ops = &kvaser_pciefd_netdev_ops; 976 netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; 977 can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i); 978 can->kv_pcie = pcie; 979 can->cmd_seq = 0; 980 can->err_rep_cnt = 0; 981 can->completed_tx_pkts = 0; 982 can->completed_tx_bytes = 0; 983 can->bec.txerr = 0; 984 can->bec.rxerr = 0; 985 986 init_completion(&can->start_comp); 987 init_completion(&can->flush_comp); 988 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); 989 990 /* Disable Bus load reporting */ 991 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); 992 993 tx_nr_packets_max = 994 FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, 995 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 996 can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); 997 998 can->can.clock.freq = pcie->freq; 999 spin_lock_init(&can->lock); 1000 1001 can->can.bittiming_const = &kvaser_pciefd_bittiming_const; 1002 can->can.fd.data_bittiming_const = &kvaser_pciefd_bittiming_const; 1003 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; 1004 can->can.fd.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming; 1005 can->can.do_set_mode = kvaser_pciefd_set_mode; 1006 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; 1007 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 1008 CAN_CTRLMODE_FD | 1009 CAN_CTRLMODE_FD_NON_ISO | 1010 CAN_CTRLMODE_CC_LEN8_DLC | 1011 CAN_CTRLMODE_BERR_REPORTING; 1012 1013 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1014 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { 1015 dev_err(&pcie->pci->dev, 1016 "CAN FD not supported as expected %d\n", i); 1017 1018 free_candev(netdev); 1019 return -ENODEV; 1020 } 1021 1022 if (status & KVASER_PCIEFD_KCAN_STAT_CAP) 1023 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; 1024 1025 netdev->flags |= IFF_ECHO; 1026 SET_NETDEV_DEV(netdev, &pcie->pci->dev); 1027 1028 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1029 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1030 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1031 1032 pcie->can[i] = can; 1033 kvaser_pciefd_pwm_start(can); 1034 } 1035 1036 return 0; 1037} 1038 1039static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) 1040{ 1041 int i; 1042 1043 for (i = 0; i < pcie->nr_channels; i++) { 1044 int ret = register_candev(pcie->can[i]->can.dev); 1045 1046 if (ret) { 1047 int j; 1048 1049 /* Unregister all successfully registered devices. */ 1050 for (j = 0; j < i; j++) 1051 unregister_candev(pcie->can[j]->can.dev); 1052 return ret; 1053 } 1054 } 1055 1056 return 0; 1057} 1058 1059static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, 1060 dma_addr_t addr, int index) 1061{ 1062 void __iomem *serdes_base; 1063 u32 word1, word2; 1064 1065 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) { 1066 word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT; 1067 word2 = upper_32_bits(addr); 1068 } else { 1069 word1 = addr; 1070 word2 = 0; 1071 } 1072 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; 1073 iowrite32(word1, serdes_base); 1074 iowrite32(word2, serdes_base + 0x4); 1075} 1076 1077static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, 1078 dma_addr_t addr, int index) 1079{ 1080 void __iomem *serdes_base; 1081 u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; 1082 u32 msb = 0x0; 1083 1084 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 1085 msb = upper_32_bits(addr); 1086 1087 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; 1088 iowrite32(lsb, serdes_base); 1089 iowrite32(msb, serdes_base + 0x4); 1090} 1091 1092static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, 1093 dma_addr_t addr, int index) 1094{ 1095 void __iomem *serdes_base; 1096 u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK; 1097 u32 msb = 0x0; 1098 1099 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 1100 msb = upper_32_bits(addr); 1101 1102 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; 1103 iowrite32(msb, serdes_base); 1104 iowrite32(lsb, serdes_base + 0x4); 1105} 1106 1107static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) 1108{ 1109 int i; 1110 u32 srb_status; 1111 u32 srb_packet_count; 1112 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; 1113 1114 /* Disable the DMA */ 1115 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1116 1117 dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64)); 1118 1119 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { 1120 pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, 1121 KVASER_PCIEFD_DMA_SIZE, 1122 &dma_addr[i], 1123 GFP_KERNEL); 1124 1125 if (!pcie->dma_data[i] || !dma_addr[i]) { 1126 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", 1127 KVASER_PCIEFD_DMA_SIZE); 1128 return -ENOMEM; 1129 } 1130 pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i); 1131 } 1132 1133 /* Reset Rx FIFO, and both DMA buffers */ 1134 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | 1135 KVASER_PCIEFD_SRB_CMD_RDB1, 1136 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1137 /* Empty Rx FIFO */ 1138 srb_packet_count = 1139 FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK, 1140 ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + 1141 KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); 1142 while (srb_packet_count) { 1143 /* Drop current packet in FIFO */ 1144 ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG); 1145 srb_packet_count--; 1146 } 1147 1148 srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); 1149 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { 1150 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); 1151 return -EIO; 1152 } 1153 1154 /* Enable the DMA */ 1155 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, 1156 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1157 1158 return 0; 1159} 1160 1161static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) 1162{ 1163 u32 version, srb_status, build; 1164 1165 version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG); 1166 pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS, 1167 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version)); 1168 1169 build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG); 1170 dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n", 1171 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version), 1172 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version), 1173 FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build)); 1174 1175 srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); 1176 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { 1177 dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n"); 1178 return -ENODEV; 1179 } 1180 1181 pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG); 1182 pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG); 1183 pcie->freq_to_ticks_div = pcie->freq / 1000000; 1184 if (pcie->freq_to_ticks_div == 0) 1185 pcie->freq_to_ticks_div = 1; 1186 /* Turn off all loopback functionality */ 1187 iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie)); 1188 1189 return 0; 1190} 1191 1192static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, 1193 struct kvaser_pciefd_rx_packet *p, 1194 __le32 *data) 1195{ 1196 struct sk_buff *skb; 1197 struct canfd_frame *cf; 1198 struct can_priv *priv; 1199 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1200 u8 dlc; 1201 1202 if (ch_id >= pcie->nr_channels) 1203 return -EIO; 1204 1205 priv = &pcie->can[ch_id]->can; 1206 dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]); 1207 1208 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { 1209 skb = alloc_canfd_skb(priv->dev, &cf); 1210 if (!skb) { 1211 priv->dev->stats.rx_dropped++; 1212 return 0; 1213 } 1214 1215 cf->len = can_fd_dlc2len(dlc); 1216 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) 1217 cf->flags |= CANFD_BRS; 1218 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) 1219 cf->flags |= CANFD_ESI; 1220 } else { 1221 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); 1222 if (!skb) { 1223 priv->dev->stats.rx_dropped++; 1224 return 0; 1225 } 1226 can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); 1227 } 1228 1229 cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]); 1230 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) 1231 cf->can_id |= CAN_EFF_FLAG; 1232 1233 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { 1234 cf->can_id |= CAN_RTR_FLAG; 1235 } else { 1236 memcpy(cf->data, data, cf->len); 1237 priv->dev->stats.rx_bytes += cf->len; 1238 } 1239 priv->dev->stats.rx_packets++; 1240 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); 1241 1242 netif_rx(skb); 1243 1244 return 0; 1245} 1246 1247static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, 1248 const struct can_berr_counter *bec, 1249 struct can_frame *cf, 1250 enum can_state new_state, 1251 enum can_state tx_state, 1252 enum can_state rx_state) 1253{ 1254 enum can_state old_state; 1255 1256 old_state = can->can.state; 1257 can_change_state(can->can.dev, cf, tx_state, rx_state); 1258 1259 if (new_state == CAN_STATE_BUS_OFF) { 1260 struct net_device *ndev = can->can.dev; 1261 unsigned long irq_flags; 1262 1263 spin_lock_irqsave(&can->lock, irq_flags); 1264 netif_stop_queue(can->can.dev); 1265 spin_unlock_irqrestore(&can->lock, irq_flags); 1266 /* Prevent CAN controller from auto recover from bus off */ 1267 if (!can->can.restart_ms) { 1268 kvaser_pciefd_start_controller_flush(can); 1269 can_bus_off(ndev); 1270 } 1271 } 1272 if (old_state == CAN_STATE_BUS_OFF && 1273 new_state == CAN_STATE_ERROR_ACTIVE && 1274 can->can.restart_ms) { 1275 can->can.can_stats.restarts++; 1276 if (cf) 1277 cf->can_id |= CAN_ERR_RESTARTED; 1278 } 1279 if (cf && new_state != CAN_STATE_BUS_OFF) { 1280 cf->can_id |= CAN_ERR_CNT; 1281 cf->data[6] = bec->txerr; 1282 cf->data[7] = bec->rxerr; 1283 } 1284} 1285 1286static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, 1287 struct can_berr_counter *bec, 1288 enum can_state *new_state, 1289 enum can_state *tx_state, 1290 enum can_state *rx_state) 1291{ 1292 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || 1293 p->header[0] & KVASER_PCIEFD_SPACK_IRM) 1294 *new_state = CAN_STATE_BUS_OFF; 1295 else if (bec->txerr >= 255 || bec->rxerr >= 255) 1296 *new_state = CAN_STATE_BUS_OFF; 1297 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) 1298 *new_state = CAN_STATE_ERROR_PASSIVE; 1299 else if (bec->txerr >= 128 || bec->rxerr >= 128) 1300 *new_state = CAN_STATE_ERROR_PASSIVE; 1301 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) 1302 *new_state = CAN_STATE_ERROR_WARNING; 1303 else if (bec->txerr >= 96 || bec->rxerr >= 96) 1304 *new_state = CAN_STATE_ERROR_WARNING; 1305 else 1306 *new_state = CAN_STATE_ERROR_ACTIVE; 1307 1308 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; 1309 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; 1310} 1311 1312static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, 1313 struct kvaser_pciefd_rx_packet *p) 1314{ 1315 struct can_berr_counter bec; 1316 enum can_state old_state, new_state, tx_state, rx_state; 1317 struct net_device *ndev = can->can.dev; 1318 struct sk_buff *skb = NULL; 1319 struct can_frame *cf = NULL; 1320 1321 old_state = can->can.state; 1322 1323 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); 1324 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); 1325 1326 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); 1327 if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 1328 skb = alloc_can_err_skb(ndev, &cf); 1329 if (new_state != old_state) { 1330 kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state); 1331 } 1332 1333 can->err_rep_cnt++; 1334 can->can.can_stats.bus_error++; 1335 if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) 1336 ndev->stats.tx_errors++; 1337 else 1338 ndev->stats.rx_errors++; 1339 1340 can->bec.txerr = bec.txerr; 1341 can->bec.rxerr = bec.rxerr; 1342 1343 if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { 1344 if (!skb) { 1345 netdev_warn(ndev, "No memory left for err_skb\n"); 1346 ndev->stats.rx_dropped++; 1347 return -ENOMEM; 1348 } 1349 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1350 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; 1351 cf->data[6] = bec.txerr; 1352 cf->data[7] = bec.rxerr; 1353 netif_rx(skb); 1354 } 1355 1356 return 0; 1357} 1358 1359static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, 1360 struct kvaser_pciefd_rx_packet *p) 1361{ 1362 struct kvaser_pciefd_can *can; 1363 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1364 1365 if (ch_id >= pcie->nr_channels) 1366 return -EIO; 1367 1368 can = pcie->can[ch_id]; 1369 kvaser_pciefd_rx_error_frame(can, p); 1370 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) 1371 /* Do not report more errors, until bec_poll_timer expires */ 1372 kvaser_pciefd_disable_err_gen(can); 1373 /* Start polling the error counters */ 1374 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1375 1376 return 0; 1377} 1378 1379static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, 1380 struct kvaser_pciefd_rx_packet *p) 1381{ 1382 struct can_berr_counter bec; 1383 enum can_state old_state, new_state, tx_state, rx_state; 1384 int ret = 0; 1385 1386 old_state = can->can.state; 1387 1388 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); 1389 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); 1390 1391 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); 1392 if (new_state != old_state) { 1393 struct net_device *ndev = can->can.dev; 1394 struct sk_buff *skb; 1395 struct can_frame *cf; 1396 1397 skb = alloc_can_err_skb(ndev, &cf); 1398 kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state); 1399 if (skb) { 1400 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1401 netif_rx(skb); 1402 } else { 1403 ndev->stats.rx_dropped++; 1404 netdev_warn(ndev, "No memory left for err_skb\n"); 1405 ret = -ENOMEM; 1406 } 1407 } 1408 can->bec.txerr = bec.txerr; 1409 can->bec.rxerr = bec.rxerr; 1410 /* Check if we need to poll the error counters */ 1411 if (bec.txerr || bec.rxerr) 1412 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1413 1414 return ret; 1415} 1416 1417static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, 1418 struct kvaser_pciefd_rx_packet *p) 1419{ 1420 struct kvaser_pciefd_can *can; 1421 u8 cmdseq; 1422 u32 status; 1423 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1424 1425 if (ch_id >= pcie->nr_channels) 1426 return -EIO; 1427 1428 can = pcie->can[ch_id]; 1429 1430 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1431 cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status); 1432 1433 /* Reset done, start abort and flush */ 1434 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1435 p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1436 p->header[1] & KVASER_PCIEFD_SPACK_AUTO && 1437 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && 1438 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1439 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1440 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1441 kvaser_pciefd_abort_flush_reset(can); 1442 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && 1443 p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1444 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && 1445 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1446 /* Reset detected, send end of flush if no packet are in FIFO */ 1447 u8 count; 1448 1449 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 1450 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 1451 if (!count) 1452 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK, 1453 KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH), 1454 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1455 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && 1456 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) { 1457 /* Response to status request received */ 1458 kvaser_pciefd_handle_status_resp(can, p); 1459 if (can->can.state != CAN_STATE_BUS_OFF && 1460 can->can.state != CAN_STATE_ERROR_ACTIVE) { 1461 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1462 } 1463 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1464 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) { 1465 /* Reset to bus on detected */ 1466 if (!completion_done(&can->start_comp)) 1467 complete(&can->start_comp); 1468 } 1469 1470 return 0; 1471} 1472 1473static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, 1474 struct kvaser_pciefd_rx_packet *p) 1475{ 1476 struct sk_buff *skb; 1477 struct can_frame *cf; 1478 1479 skb = alloc_can_err_skb(can->can.dev, &cf); 1480 can->can.dev->stats.tx_errors++; 1481 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { 1482 if (skb) 1483 cf->can_id |= CAN_ERR_LOSTARB; 1484 can->can.can_stats.arbitration_lost++; 1485 } else if (skb) { 1486 cf->can_id |= CAN_ERR_ACK; 1487 } 1488 1489 if (skb) { 1490 cf->can_id |= CAN_ERR_BUSERROR; 1491 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1492 netif_rx(skb); 1493 } else { 1494 can->can.dev->stats.rx_dropped++; 1495 netdev_warn(can->can.dev, "No memory left for err_skb\n"); 1496 } 1497} 1498 1499static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, 1500 struct kvaser_pciefd_rx_packet *p) 1501{ 1502 struct kvaser_pciefd_can *can; 1503 bool one_shot_fail = false; 1504 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1505 1506 if (ch_id >= pcie->nr_channels) 1507 return -EIO; 1508 1509 can = pcie->can[ch_id]; 1510 /* Ignore control packet ACK */ 1511 if (p->header[0] & KVASER_PCIEFD_APACKET_CT) 1512 return 0; 1513 1514 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { 1515 kvaser_pciefd_handle_nack_packet(can, p); 1516 one_shot_fail = true; 1517 } 1518 1519 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1520 netdev_dbg(can->can.dev, "Packet was flushed\n"); 1521 } else { 1522 int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); 1523 unsigned int len, frame_len = 0; 1524 struct sk_buff *skb; 1525 1526 if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1))) 1527 return 0; 1528 skb = can->can.echo_skb[echo_idx]; 1529 if (!skb) 1530 return 0; 1531 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); 1532 len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len); 1533 1534 /* Pairs with barrier in kvaser_pciefd_start_xmit() */ 1535 smp_store_release(&can->ack_idx, can->ack_idx + 1); 1536 can->completed_tx_pkts++; 1537 can->completed_tx_bytes += frame_len; 1538 1539 if (!one_shot_fail) { 1540 can->can.dev->stats.tx_bytes += len; 1541 can->can.dev->stats.tx_packets++; 1542 } 1543 } 1544 1545 return 0; 1546} 1547 1548static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, 1549 struct kvaser_pciefd_rx_packet *p) 1550{ 1551 struct kvaser_pciefd_can *can; 1552 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1553 1554 if (ch_id >= pcie->nr_channels) 1555 return -EIO; 1556 1557 can = pcie->can[ch_id]; 1558 1559 if (!completion_done(&can->flush_comp)) 1560 complete(&can->flush_comp); 1561 1562 return 0; 1563} 1564 1565static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, 1566 int dma_buf) 1567{ 1568 __le32 *buffer = pcie->dma_data[dma_buf]; 1569 __le64 timestamp; 1570 struct kvaser_pciefd_rx_packet packet; 1571 struct kvaser_pciefd_rx_packet *p = &packet; 1572 u8 type; 1573 int pos = *start_pos; 1574 int size; 1575 int ret = 0; 1576 1577 size = le32_to_cpu(buffer[pos++]); 1578 if (!size) { 1579 *start_pos = 0; 1580 return 0; 1581 } 1582 1583 p->header[0] = le32_to_cpu(buffer[pos++]); 1584 p->header[1] = le32_to_cpu(buffer[pos++]); 1585 1586 /* Read 64-bit timestamp */ 1587 memcpy(&timestamp, &buffer[pos], sizeof(__le64)); 1588 pos += 2; 1589 p->timestamp = le64_to_cpu(timestamp); 1590 1591 type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]); 1592 switch (type) { 1593 case KVASER_PCIEFD_PACK_TYPE_DATA: 1594 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); 1595 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { 1596 u8 data_len; 1597 1598 data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, 1599 p->header[1])); 1600 pos += DIV_ROUND_UP(data_len, 4); 1601 } 1602 break; 1603 1604 case KVASER_PCIEFD_PACK_TYPE_ACK: 1605 ret = kvaser_pciefd_handle_ack_packet(pcie, p); 1606 break; 1607 1608 case KVASER_PCIEFD_PACK_TYPE_STATUS: 1609 ret = kvaser_pciefd_handle_status_packet(pcie, p); 1610 break; 1611 1612 case KVASER_PCIEFD_PACK_TYPE_ERROR: 1613 ret = kvaser_pciefd_handle_error_packet(pcie, p); 1614 break; 1615 1616 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: 1617 ret = kvaser_pciefd_handle_eflush_packet(pcie, p); 1618 break; 1619 1620 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: 1621 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: 1622 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: 1623 case KVASER_PCIEFD_PACK_TYPE_TXRQ: 1624 dev_info(&pcie->pci->dev, 1625 "Received unexpected packet type 0x%08X\n", type); 1626 break; 1627 1628 default: 1629 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); 1630 ret = -EIO; 1631 break; 1632 } 1633 1634 if (ret) 1635 return ret; 1636 1637 /* Position does not point to the end of the package, 1638 * corrupted packet size? 1639 */ 1640 if (unlikely((*start_pos + size) != pos)) 1641 return -EIO; 1642 1643 /* Point to the next packet header, if any */ 1644 *start_pos = pos; 1645 1646 return ret; 1647} 1648 1649static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) 1650{ 1651 int pos = 0; 1652 int res = 0; 1653 unsigned int i; 1654 1655 do { 1656 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); 1657 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); 1658 1659 /* Report ACKs in this buffer to BQL en masse for correct periods */ 1660 for (i = 0; i < pcie->nr_channels; ++i) { 1661 struct kvaser_pciefd_can *can = pcie->can[i]; 1662 1663 if (!can->completed_tx_pkts) 1664 continue; 1665 netif_subqueue_completed_wake(can->can.dev, 0, 1666 can->completed_tx_pkts, 1667 can->completed_tx_bytes, 1668 kvaser_pciefd_tx_avail(can), 1); 1669 can->completed_tx_pkts = 0; 1670 can->completed_tx_bytes = 0; 1671 } 1672 1673 return res; 1674} 1675 1676static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) 1677{ 1678 void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG; 1679 u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1680 1681 iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1682 1683 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { 1684 kvaser_pciefd_read_buffer(pcie, 0); 1685 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */ 1686 } 1687 1688 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { 1689 kvaser_pciefd_read_buffer(pcie, 1); 1690 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */ 1691 } 1692 1693 if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || 1694 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || 1695 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || 1696 irq & KVASER_PCIEFD_SRB_IRQ_DUF1)) 1697 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); 1698} 1699 1700static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) 1701{ 1702 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1703 1704 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) 1705 netdev_err(can->can.dev, "Tx FIFO overflow\n"); 1706 1707 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) 1708 netdev_err(can->can.dev, 1709 "Fail to change bittiming, when not in reset mode\n"); 1710 1711 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) 1712 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); 1713 1714 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) 1715 netdev_err(can->can.dev, "Rx FIFO overflow\n"); 1716 1717 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1718} 1719 1720static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) 1721{ 1722 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; 1723 const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; 1724 u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); 1725 int i; 1726 1727 if (!(pci_irq & irq_mask->all)) 1728 return IRQ_NONE; 1729 1730 iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); 1731 1732 if (pci_irq & irq_mask->kcan_rx0) 1733 kvaser_pciefd_receive_irq(pcie); 1734 1735 for (i = 0; i < pcie->nr_channels; i++) { 1736 if (pci_irq & irq_mask->kcan_tx[i]) 1737 kvaser_pciefd_transmit_irq(pcie->can[i]); 1738 } 1739 1740 iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); 1741 1742 return IRQ_HANDLED; 1743} 1744 1745static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) 1746{ 1747 int i; 1748 1749 for (i = 0; i < pcie->nr_channels; i++) { 1750 struct kvaser_pciefd_can *can = pcie->can[i]; 1751 1752 if (can) { 1753 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1754 kvaser_pciefd_pwm_stop(can); 1755 free_candev(can->can.dev); 1756 } 1757 } 1758} 1759 1760static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie) 1761{ 1762 unsigned int i; 1763 1764 /* Masking PCI_IRQ is insufficient as running ISR will unmask it */ 1765 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); 1766 for (i = 0; i < pcie->nr_channels; ++i) 1767 iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1768} 1769 1770static int kvaser_pciefd_probe(struct pci_dev *pdev, 1771 const struct pci_device_id *id) 1772{ 1773 int ret; 1774 struct kvaser_pciefd *pcie; 1775 const struct kvaser_pciefd_irq_mask *irq_mask; 1776 1777 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 1778 if (!pcie) 1779 return -ENOMEM; 1780 1781 pci_set_drvdata(pdev, pcie); 1782 pcie->pci = pdev; 1783 pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; 1784 irq_mask = pcie->driver_data->irq_mask; 1785 1786 ret = pci_enable_device(pdev); 1787 if (ret) 1788 return ret; 1789 1790 ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); 1791 if (ret) 1792 goto err_disable_pci; 1793 1794 pcie->reg_base = pci_iomap(pdev, 0, 0); 1795 if (!pcie->reg_base) { 1796 ret = -ENOMEM; 1797 goto err_release_regions; 1798 } 1799 1800 ret = kvaser_pciefd_setup_board(pcie); 1801 if (ret) 1802 goto err_pci_iounmap; 1803 1804 ret = kvaser_pciefd_setup_dma(pcie); 1805 if (ret) 1806 goto err_pci_iounmap; 1807 1808 pci_set_master(pdev); 1809 1810 ret = kvaser_pciefd_setup_can_ctrls(pcie); 1811 if (ret) 1812 goto err_teardown_can_ctrls; 1813 1814 ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI); 1815 if (ret < 0) { 1816 dev_err(&pcie->pci->dev, "Failed to allocate IRQ vectors.\n"); 1817 goto err_teardown_can_ctrls; 1818 } 1819 1820 ret = pci_irq_vector(pcie->pci, 0); 1821 if (ret < 0) 1822 goto err_pci_free_irq_vectors; 1823 1824 pcie->pci->irq = ret; 1825 1826 ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, 1827 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); 1828 if (ret) { 1829 dev_err(&pcie->pci->dev, "Failed to request IRQ %d\n", pcie->pci->irq); 1830 goto err_pci_free_irq_vectors; 1831 } 1832 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, 1833 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1834 1835 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | 1836 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | 1837 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, 1838 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); 1839 1840 /* Enable PCI interrupts */ 1841 iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); 1842 /* Ready the DMA buffers */ 1843 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1844 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1845 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1846 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1847 1848 ret = kvaser_pciefd_reg_candev(pcie); 1849 if (ret) 1850 goto err_free_irq; 1851 1852 return 0; 1853 1854err_free_irq: 1855 kvaser_pciefd_disable_irq_srcs(pcie); 1856 free_irq(pcie->pci->irq, pcie); 1857 1858err_pci_free_irq_vectors: 1859 pci_free_irq_vectors(pcie->pci); 1860 1861err_teardown_can_ctrls: 1862 kvaser_pciefd_teardown_can_ctrls(pcie); 1863 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1864 pci_clear_master(pdev); 1865 1866err_pci_iounmap: 1867 pci_iounmap(pdev, pcie->reg_base); 1868 1869err_release_regions: 1870 pci_release_regions(pdev); 1871 1872err_disable_pci: 1873 pci_disable_device(pdev); 1874 1875 return ret; 1876} 1877 1878static void kvaser_pciefd_remove(struct pci_dev *pdev) 1879{ 1880 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); 1881 unsigned int i; 1882 1883 for (i = 0; i < pcie->nr_channels; ++i) { 1884 struct kvaser_pciefd_can *can = pcie->can[i]; 1885 1886 unregister_candev(can->can.dev); 1887 timer_delete(&can->bec_poll_timer); 1888 kvaser_pciefd_pwm_stop(can); 1889 } 1890 1891 kvaser_pciefd_disable_irq_srcs(pcie); 1892 free_irq(pcie->pci->irq, pcie); 1893 pci_free_irq_vectors(pcie->pci); 1894 1895 for (i = 0; i < pcie->nr_channels; ++i) 1896 free_candev(pcie->can[i]->can.dev); 1897 1898 pci_iounmap(pdev, pcie->reg_base); 1899 pci_release_regions(pdev); 1900 pci_disable_device(pdev); 1901} 1902 1903static struct pci_driver kvaser_pciefd = { 1904 .name = KVASER_PCIEFD_DRV_NAME, 1905 .id_table = kvaser_pciefd_id_table, 1906 .probe = kvaser_pciefd_probe, 1907 .remove = kvaser_pciefd_remove, 1908}; 1909 1910module_pci_driver(kvaser_pciefd)