Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'linux-can-next-for-4.12-20170425' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next

Marc Kleine-Budde says:

====================
pull-request: can-next 2017-04-25

this is a pull request of 21 patches for net-next/master.

There are 4 patches by Stephane Grosjean for the PEAK PCAN-PCIe FD
CAN-FD boards. The next 7 patches are by Mario Huettel, which add
support for M_CAN IP version >= v3.1.x to the m_can driver. A patch by
Remigiusz Kołłątaj adds support for the Microchip CAN BUS Analyzer. 8
patches by Oliver Hartkopp complete the initial CAN network namespace
support. Wei Yongjun's patch for the ti_hecc driver fixes the return
value check in the probe function.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+3889 -373
+19
drivers/net/can/Kconfig
··· 9 9 This driver can also be built as a module. If so, the module 10 10 will be called vcan. 11 11 12 + config CAN_VXCAN 13 + tristate "Virtual CAN Tunnel (vxcan)" 14 + ---help--- 15 + Similar to the virtual ethernet driver veth, vxcan implements a 16 + local CAN traffic tunnel between two virtual CAN network devices. 17 + When creating a vxcan, two vxcan devices are created as pair. 18 + When one end receives the packet it appears on its pair and vice 19 + versa. The vxcan can be used for cross namespace communication. 20 + 21 + In opposite to vcan loopback devices the vxcan only forwards CAN 22 + frames to its pair and does *not* provide a local echo of sent 23 + CAN frames. To disable a potential echo in af_can.c the vxcan driver 24 + announces IFF_ECHO in the interface flags. To have a clean start 25 + in each namespace the CAN GW hop counter is set to zero. 26 + 27 + This driver can also be built as a module. If so, the module 28 + will be called vxcan. 29 + 12 30 config CAN_SLCAN 13 31 tristate "Serial / USB serial CAN Adaptors (slcan)" 14 32 depends on TTY ··· 160 142 source "drivers/net/can/ifi_canfd/Kconfig" 161 143 source "drivers/net/can/m_can/Kconfig" 162 144 source "drivers/net/can/mscan/Kconfig" 145 + source "drivers/net/can/peak_canfd/Kconfig" 163 146 source "drivers/net/can/rcar/Kconfig" 164 147 source "drivers/net/can/sja1000/Kconfig" 165 148 source "drivers/net/can/softing/Kconfig"
+2
drivers/net/can/Makefile
··· 3 3 # 4 4 5 5 obj-$(CONFIG_CAN_VCAN) += vcan.o 6 + obj-$(CONFIG_CAN_VXCAN) += vxcan.o 6 7 obj-$(CONFIG_CAN_SLCAN) += slcan.o 7 8 8 9 obj-$(CONFIG_CAN_DEV) += can-dev.o ··· 27 26 obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o 28 27 obj-$(CONFIG_CAN_MSCAN) += mscan/ 29 28 obj-$(CONFIG_CAN_M_CAN) += m_can/ 29 + obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/ 30 30 obj-$(CONFIG_CAN_SJA1000) += sja1000/ 31 31 obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o 32 32 obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
+586 -168
drivers/net/can/m_can/m_can.c
··· 23 23 #include <linux/of.h> 24 24 #include <linux/of_device.h> 25 25 #include <linux/platform_device.h> 26 - 26 + #include <linux/iopoll.h> 27 27 #include <linux/can/dev.h> 28 28 29 29 /* napi related */ ··· 37 37 M_CAN_CREL = 0x0, 38 38 M_CAN_ENDN = 0x4, 39 39 M_CAN_CUST = 0x8, 40 - M_CAN_FBTP = 0xc, 40 + M_CAN_DBTP = 0xc, 41 41 M_CAN_TEST = 0x10, 42 42 M_CAN_RWD = 0x14, 43 43 M_CAN_CCCR = 0x18, 44 - M_CAN_BTP = 0x1c, 44 + M_CAN_NBTP = 0x1c, 45 45 M_CAN_TSCC = 0x20, 46 46 M_CAN_TSCV = 0x24, 47 47 M_CAN_TOCC = 0x28, 48 48 M_CAN_TOCV = 0x2c, 49 49 M_CAN_ECR = 0x40, 50 50 M_CAN_PSR = 0x44, 51 + /* TDCR Register only available for version >=3.1.x */ 52 + M_CAN_TDCR = 0x48, 51 53 M_CAN_IR = 0x50, 52 54 M_CAN_IE = 0x54, 53 55 M_CAN_ILS = 0x58, ··· 107 105 MRAM_CFG_NUM, 108 106 }; 109 107 110 - /* Fast Bit Timing & Prescaler Register (FBTP) */ 111 - #define FBTR_FBRP_MASK 0x1f 112 - #define FBTR_FBRP_SHIFT 16 113 - #define FBTR_FTSEG1_SHIFT 8 114 - #define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT) 115 - #define FBTR_FTSEG2_SHIFT 4 116 - #define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT) 117 - #define FBTR_FSJW_SHIFT 0 118 - #define FBTR_FSJW_MASK 0x3 108 + /* Core Release Register (CREL) */ 109 + #define CREL_REL_SHIFT 28 110 + #define CREL_REL_MASK (0xF << CREL_REL_SHIFT) 111 + #define CREL_STEP_SHIFT 24 112 + #define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT) 113 + #define CREL_SUBSTEP_SHIFT 20 114 + #define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT) 115 + 116 + /* Data Bit Timing & Prescaler Register (DBTP) */ 117 + #define DBTP_TDC BIT(23) 118 + #define DBTP_DBRP_SHIFT 16 119 + #define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT) 120 + #define DBTP_DTSEG1_SHIFT 8 121 + #define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT) 122 + #define DBTP_DTSEG2_SHIFT 4 123 + #define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT) 124 + #define DBTP_DSJW_SHIFT 0 125 + #define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT) 119 126 120 127 /* Test Register (TEST) */ 121 - #define TEST_LBCK BIT(4) 128 + #define TEST_LBCK BIT(4) 122 129 123 130 /* CC Control Register(CCCR) */ 124 - #define CCCR_TEST BIT(7) 125 131 #define CCCR_CMR_MASK 0x3 126 132 #define CCCR_CMR_SHIFT 10 127 133 #define CCCR_CMR_CANFD 0x1 ··· 140 130 #define CCCR_CME_CAN 0 141 131 #define CCCR_CME_CANFD 0x1 142 132 #define CCCR_CME_CANFD_BRS 0x2 133 + #define CCCR_TXP BIT(14) 143 134 #define CCCR_TEST BIT(7) 144 135 #define CCCR_MON BIT(5) 136 + #define CCCR_CSR BIT(4) 137 + #define CCCR_CSA BIT(3) 138 + #define CCCR_ASM BIT(2) 145 139 #define CCCR_CCE BIT(1) 146 140 #define CCCR_INIT BIT(0) 147 141 #define CCCR_CANFD 0x10 142 + /* for version >=3.1.x */ 143 + #define CCCR_EFBI BIT(13) 144 + #define CCCR_PXHD BIT(12) 145 + #define CCCR_BRSE BIT(9) 146 + #define CCCR_FDOE BIT(8) 147 + /* only for version >=3.2.x */ 148 + #define CCCR_NISO BIT(15) 148 149 149 - /* Bit Timing & Prescaler Register (BTP) */ 150 - #define BTR_BRP_MASK 0x3ff 151 - #define BTR_BRP_SHIFT 16 152 - #define BTR_TSEG1_SHIFT 8 153 - #define BTR_TSEG1_MASK (0x3f << BTR_TSEG1_SHIFT) 154 - #define BTR_TSEG2_SHIFT 4 155 - #define BTR_TSEG2_MASK (0xf << BTR_TSEG2_SHIFT) 156 - #define BTR_SJW_SHIFT 0 157 - #define BTR_SJW_MASK 0xf 150 + /* Nominal Bit Timing & Prescaler Register (NBTP) */ 151 + #define NBTP_NSJW_SHIFT 25 152 + #define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT) 153 + #define NBTP_NBRP_SHIFT 16 154 + #define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT) 155 + #define NBTP_NTSEG1_SHIFT 8 156 + #define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT) 157 + #define NBTP_NTSEG2_SHIFT 0 158 + #define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT) 158 159 159 160 /* Error Counter Register(ECR) */ 160 161 #define ECR_RP BIT(15) ··· 182 161 183 162 /* Interrupt Register(IR) */ 184 163 #define IR_ALL_INT 0xffffffff 164 + 165 + /* Renamed bits for versions > 3.1.x */ 166 + #define IR_ARA BIT(29) 167 + #define IR_PED BIT(28) 168 + #define IR_PEA BIT(27) 169 + 170 + /* Bits for version 3.0.x */ 185 171 #define IR_STE BIT(31) 186 172 #define IR_FOE BIT(30) 187 173 #define IR_ACKE BIT(29) ··· 222 194 #define IR_RF0W BIT(1) 223 195 #define IR_RF0N BIT(0) 224 196 #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) 225 - #define IR_ERR_LEC (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) 226 - #define IR_ERR_BUS (IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \ 197 + 198 + /* Interrupts for version 3.0.x */ 199 + #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) 200 + #define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \ 227 201 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ 228 202 IR_RF1L | IR_RF0L) 229 - #define IR_ERR_ALL (IR_ERR_STATE | IR_ERR_BUS) 203 + #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) 204 + /* Interrupts for version >= 3.1.x */ 205 + #define IR_ERR_LEC_31X (IR_PED | IR_PEA) 206 + #define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \ 207 + IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ 208 + IR_RF1L | IR_RF0L) 209 + #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) 230 210 231 211 /* Interrupt Line Select (ILS) */ 232 212 #define ILS_ALL_INT0 0x0 233 213 #define ILS_ALL_INT1 0xFFFFFFFF 234 214 235 215 /* Interrupt Line Enable (ILE) */ 236 - #define ILE_EINT0 BIT(0) 237 216 #define ILE_EINT1 BIT(1) 217 + #define ILE_EINT0 BIT(0) 238 218 239 219 /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ 240 - #define RXFC_FWM_OFF 24 241 - #define RXFC_FWM_MASK 0x7f 242 - #define RXFC_FWM_1 (1 << RXFC_FWM_OFF) 243 - #define RXFC_FS_OFF 16 244 - #define RXFC_FS_MASK 0x7f 220 + #define RXFC_FWM_SHIFT 24 221 + #define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) 222 + #define RXFC_FS_SHIFT 16 223 + #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) 245 224 246 225 /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ 247 226 #define RXFS_RFL BIT(25) 248 227 #define RXFS_FF BIT(24) 249 - #define RXFS_FPI_OFF 16 228 + #define RXFS_FPI_SHIFT 16 250 229 #define RXFS_FPI_MASK 0x3f0000 251 - #define RXFS_FGI_OFF 8 230 + #define RXFS_FGI_SHIFT 8 252 231 #define RXFS_FGI_MASK 0x3f00 253 232 #define RXFS_FFL_MASK 0x7f 254 233 ··· 264 229 #define M_CAN_RXESC_64BYTES 0x777 265 230 266 231 /* Tx Buffer Configuration(TXBC) */ 267 - #define TXBC_NDTB_OFF 16 268 - #define TXBC_NDTB_MASK 0x3f 232 + #define TXBC_NDTB_SHIFT 16 233 + #define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT) 234 + #define TXBC_TFQS_SHIFT 24 235 + #define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT) 236 + 237 + /* Tx FIFO/Queue Status (TXFQS) */ 238 + #define TXFQS_TFQF BIT(21) 239 + #define TXFQS_TFQPI_SHIFT 16 240 + #define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT) 241 + #define TXFQS_TFGI_SHIFT 8 242 + #define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT) 243 + #define TXFQS_TFFL_SHIFT 0 244 + #define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT) 269 245 270 246 /* Tx Buffer Element Size Configuration(TXESC) */ 271 247 #define TXESC_TBDS_8BYTES 0x0 272 248 #define TXESC_TBDS_64BYTES 0x7 273 249 274 - /* Tx Event FIFO Con.guration (TXEFC) */ 275 - #define TXEFC_EFS_OFF 16 276 - #define TXEFC_EFS_MASK 0x3f 250 + /* Tx Event FIFO Configuration (TXEFC) */ 251 + #define TXEFC_EFS_SHIFT 16 252 + #define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT) 253 + 254 + /* Tx Event FIFO Status (TXEFS) */ 255 + #define TXEFS_TEFL BIT(25) 256 + #define TXEFS_EFF BIT(24) 257 + #define TXEFS_EFGI_SHIFT 8 258 + #define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT) 259 + #define TXEFS_EFFL_SHIFT 0 260 + #define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT) 261 + 262 + /* Tx Event FIFO Acknowledge (TXEFA) */ 263 + #define TXEFA_EFAI_SHIFT 0 264 + #define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT) 277 265 278 266 /* Message RAM Configuration (in bytes) */ 279 267 #define SIDF_ELEMENT_SIZE 4 280 268 #define XIDF_ELEMENT_SIZE 8 281 269 #define RXF0_ELEMENT_SIZE 72 282 270 #define RXF1_ELEMENT_SIZE 72 283 - #define RXB_ELEMENT_SIZE 16 271 + #define RXB_ELEMENT_SIZE 72 284 272 #define TXE_ELEMENT_SIZE 8 285 273 #define TXB_ELEMENT_SIZE 72 286 274 ··· 319 261 #define RX_BUF_RTR BIT(29) 320 262 /* R1 */ 321 263 #define RX_BUF_ANMF BIT(31) 322 - #define RX_BUF_EDL BIT(21) 264 + #define RX_BUF_FDF BIT(21) 323 265 #define RX_BUF_BRS BIT(20) 324 266 325 267 /* Tx Buffer Element */ 326 - /* R0 */ 268 + /* T0 */ 269 + #define TX_BUF_ESI BIT(31) 327 270 #define TX_BUF_XTD BIT(30) 328 271 #define TX_BUF_RTR BIT(29) 272 + /* T1 */ 273 + #define TX_BUF_EFC BIT(23) 274 + #define TX_BUF_FDF BIT(21) 275 + #define TX_BUF_BRS BIT(20) 276 + #define TX_BUF_MM_SHIFT 24 277 + #define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT) 278 + 279 + /* Tx event FIFO Element */ 280 + /* E1 */ 281 + #define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT 282 + #define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT) 329 283 330 284 /* address offset and element number for each FIFO/Buffer in the Message RAM */ 331 285 struct mram_cfg { ··· 355 285 struct clk *cclk; 356 286 void __iomem *base; 357 287 u32 irqstatus; 288 + int version; 358 289 359 290 /* message ram configuration */ 360 291 void __iomem *mram_base; ··· 385 314 { 386 315 writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off + 387 316 fpi * TXB_ELEMENT_SIZE + offset); 317 + } 318 + 319 + static inline u32 m_can_txe_fifo_read(const struct m_can_priv *priv, 320 + u32 fgi, 321 + u32 offset) { 322 + return readl(priv->mram_base + priv->mcfg[MRAM_TXE].off + 323 + fgi * TXE_ELEMENT_SIZE + offset); 324 + } 325 + 326 + static inline bool m_can_tx_fifo_full(const struct m_can_priv *priv) 327 + { 328 + return !!(m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQF); 388 329 } 389 330 390 331 static inline void m_can_config_endisable(const struct m_can_priv *priv, ··· 432 349 433 350 static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv) 434 351 { 435 - m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1); 352 + /* Only interrupt line 0 is used in this driver */ 353 + m_can_write(priv, M_CAN_ILE, ILE_EINT0); 436 354 } 437 355 438 356 static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) ··· 451 367 int i; 452 368 453 369 /* calculate the fifo get index for where to read data */ 454 - fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; 370 + fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT; 455 371 dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); 456 - if (dlc & RX_BUF_EDL) 372 + if (dlc & RX_BUF_FDF) 457 373 skb = alloc_canfd_skb(dev, &cf); 458 374 else 459 375 skb = alloc_can_skb(dev, (struct can_frame **)&cf); ··· 462 378 return; 463 379 } 464 380 465 - if (dlc & RX_BUF_EDL) 381 + if (dlc & RX_BUF_FDF) 466 382 cf->len = can_dlc2len((dlc >> 16) & 0x0F); 467 383 else 468 384 cf->len = get_can_dlc((dlc >> 16) & 0x0F); ··· 478 394 netdev_dbg(dev, "ESI Error\n"); 479 395 } 480 396 481 - if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) { 397 + if (!(dlc & RX_BUF_FDF) && (id & RX_BUF_RTR)) { 482 398 cf->can_id |= CAN_RTR_FLAG; 483 399 } else { 484 400 if (dlc & RX_BUF_BRS) ··· 616 532 617 533 ecr = m_can_read(priv, M_CAN_ECR); 618 534 bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; 619 - bec->txerr = ecr & ECR_TEC_MASK; 535 + bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT; 620 536 621 537 return 0; 622 538 } ··· 807 723 if (irqstatus & IR_ERR_STATE) 808 724 work_done += m_can_handle_state_errors(dev, psr); 809 725 810 - if (irqstatus & IR_ERR_BUS) 726 + if (irqstatus & IR_ERR_BUS_30X) 811 727 work_done += m_can_handle_bus_errors(dev, irqstatus, psr); 812 728 813 729 if (irqstatus & IR_RF0N) ··· 820 736 821 737 end: 822 738 return work_done; 739 + } 740 + 741 + static void m_can_echo_tx_event(struct net_device *dev) 742 + { 743 + u32 txe_count = 0; 744 + u32 m_can_txefs; 745 + u32 fgi = 0; 746 + int i = 0; 747 + unsigned int msg_mark; 748 + 749 + struct m_can_priv *priv = netdev_priv(dev); 750 + struct net_device_stats *stats = &dev->stats; 751 + 752 + /* read tx event fifo status */ 753 + m_can_txefs = m_can_read(priv, M_CAN_TXEFS); 754 + 755 + /* Get Tx Event fifo element count */ 756 + txe_count = (m_can_txefs & TXEFS_EFFL_MASK) 757 + >> TXEFS_EFFL_SHIFT; 758 + 759 + /* Get and process all sent elements */ 760 + for (i = 0; i < txe_count; i++) { 761 + /* retrieve get index */ 762 + fgi = (m_can_read(priv, M_CAN_TXEFS) & TXEFS_EFGI_MASK) 763 + >> TXEFS_EFGI_SHIFT; 764 + 765 + /* get message marker */ 766 + msg_mark = (m_can_txe_fifo_read(priv, fgi, 4) & 767 + TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT; 768 + 769 + /* ack txe element */ 770 + m_can_write(priv, M_CAN_TXEFA, (TXEFA_EFAI_MASK & 771 + (fgi << TXEFA_EFAI_SHIFT))); 772 + 773 + /* update stats */ 774 + stats->tx_bytes += can_get_echo_skb(dev, msg_mark); 775 + stats->tx_packets++; 776 + } 823 777 } 824 778 825 779 static irqreturn_t m_can_isr(int irq, void *dev_id) ··· 880 758 * - state change IRQ 881 759 * - bus error IRQ and bus error reporting 882 760 */ 883 - if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) { 761 + if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { 884 762 priv->irqstatus = ir; 885 763 m_can_disable_all_interrupts(priv); 886 764 napi_schedule(&priv->napi); 887 765 } 888 766 889 - /* transmission complete interrupt */ 890 - if (ir & IR_TC) { 891 - stats->tx_bytes += can_get_echo_skb(dev, 0); 892 - stats->tx_packets++; 893 - can_led_event(dev, CAN_LED_EVENT_TX); 894 - netif_wake_queue(dev); 767 + if (priv->version == 30) { 768 + if (ir & IR_TC) { 769 + /* Transmission Complete Interrupt*/ 770 + stats->tx_bytes += can_get_echo_skb(dev, 0); 771 + stats->tx_packets++; 772 + can_led_event(dev, CAN_LED_EVENT_TX); 773 + netif_wake_queue(dev); 774 + } 775 + } else { 776 + if (ir & IR_TEFN) { 777 + /* New TX FIFO Element arrived */ 778 + m_can_echo_tx_event(dev); 779 + can_led_event(dev, CAN_LED_EVENT_TX); 780 + if (netif_queue_stopped(dev) && 781 + !m_can_tx_fifo_full(priv)) 782 + netif_wake_queue(dev); 783 + } 895 784 } 896 785 897 786 return IRQ_HANDLED; 898 787 } 899 788 900 - static const struct can_bittiming_const m_can_bittiming_const = { 789 + static const struct can_bittiming_const m_can_bittiming_const_30X = { 901 790 .name = KBUILD_MODNAME, 902 791 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 903 792 .tseg1_max = 64, ··· 920 787 .brp_inc = 1, 921 788 }; 922 789 923 - static const struct can_bittiming_const m_can_data_bittiming_const = { 790 + static const struct can_bittiming_const m_can_data_bittiming_const_30X = { 924 791 .name = KBUILD_MODNAME, 925 792 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 926 793 .tseg1_max = 16, 927 794 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 928 795 .tseg2_max = 8, 929 796 .sjw_max = 4, 797 + .brp_min = 1, 798 + .brp_max = 32, 799 + .brp_inc = 1, 800 + }; 801 + 802 + static const struct can_bittiming_const m_can_bittiming_const_31X = { 803 + .name = KBUILD_MODNAME, 804 + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 805 + .tseg1_max = 256, 806 + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 807 + .tseg2_max = 128, 808 + .sjw_max = 128, 809 + .brp_min = 1, 810 + .brp_max = 512, 811 + .brp_inc = 1, 812 + }; 813 + 814 + static const struct can_bittiming_const m_can_data_bittiming_const_31X = { 815 + .name = KBUILD_MODNAME, 816 + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ 817 + .tseg1_max = 32, 818 + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 819 + .tseg2_max = 16, 820 + .sjw_max = 16, 930 821 .brp_min = 1, 931 822 .brp_max = 32, 932 823 .brp_inc = 1, ··· 968 811 sjw = bt->sjw - 1; 969 812 tseg1 = bt->prop_seg + bt->phase_seg1 - 1; 970 813 tseg2 = bt->phase_seg2 - 1; 971 - reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | 972 - (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); 973 - m_can_write(priv, M_CAN_BTP, reg_btp); 814 + reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) | 815 + (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT); 816 + m_can_write(priv, M_CAN_NBTP, reg_btp); 974 817 975 818 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { 976 819 brp = dbt->brp - 1; 977 820 sjw = dbt->sjw - 1; 978 821 tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; 979 822 tseg2 = dbt->phase_seg2 - 1; 980 - reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) | 981 - (tseg1 << FBTR_FTSEG1_SHIFT) | 982 - (tseg2 << FBTR_FTSEG2_SHIFT); 983 - m_can_write(priv, M_CAN_FBTP, reg_btp); 823 + reg_btp = (brp << DBTP_DBRP_SHIFT) | (sjw << DBTP_DSJW_SHIFT) | 824 + (tseg1 << DBTP_DTSEG1_SHIFT) | 825 + (tseg2 << DBTP_DTSEG2_SHIFT); 826 + m_can_write(priv, M_CAN_DBTP, reg_btp); 984 827 } 985 828 986 829 return 0; ··· 991 834 * - configure rx fifo 992 835 * - accept non-matching frame into fifo 0 993 836 * - configure tx buffer 837 + * - >= v3.1.x: TX FIFO is used 994 838 * - configure mode 995 839 * - setup bittiming 996 840 */ ··· 1008 850 /* Accept Non-matching Frames Into FIFO 0 */ 1009 851 m_can_write(priv, M_CAN_GFC, 0x0); 1010 852 1011 - /* only support one Tx Buffer currently */ 1012 - m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | 1013 - priv->mcfg[MRAM_TXB].off); 853 + if (priv->version == 30) { 854 + /* only support one Tx Buffer currently */ 855 + m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) | 856 + priv->mcfg[MRAM_TXB].off); 857 + } else { 858 + /* TX FIFO is used for newer IP Core versions */ 859 + m_can_write(priv, M_CAN_TXBC, 860 + (priv->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) | 861 + (priv->mcfg[MRAM_TXB].off)); 862 + } 1014 863 1015 864 /* support 64 bytes payload */ 1016 865 m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); 1017 866 1018 - m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | 1019 - priv->mcfg[MRAM_TXE].off); 867 + /* TX Event FIFO */ 868 + if (priv->version == 30) { 869 + m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) | 870 + priv->mcfg[MRAM_TXE].off); 871 + } else { 872 + /* Full TX Event FIFO is used */ 873 + m_can_write(priv, M_CAN_TXEFC, 874 + ((priv->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT) 875 + & TXEFC_EFS_MASK) | 876 + priv->mcfg[MRAM_TXE].off); 877 + } 1020 878 1021 879 /* rx fifo configuration, blocking mode, fifo size 1 */ 1022 880 m_can_write(priv, M_CAN_RXF0C, 1023 - (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) | 1024 - RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off); 881 + (priv->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) | 882 + priv->mcfg[MRAM_RXF0].off); 1025 883 1026 884 m_can_write(priv, M_CAN_RXF1C, 1027 - (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) | 1028 - RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); 885 + (priv->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) | 886 + priv->mcfg[MRAM_RXF1].off); 1029 887 1030 888 cccr = m_can_read(priv, M_CAN_CCCR); 1031 - cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | 1032 - (CCCR_CME_MASK << CCCR_CME_SHIFT)); 1033 889 test = m_can_read(priv, M_CAN_TEST); 1034 890 test &= ~TEST_LBCK; 891 + if (priv->version == 30) { 892 + /* Version 3.0.x */ 1035 893 1036 - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 1037 - cccr |= CCCR_MON; 894 + cccr &= ~(CCCR_TEST | CCCR_MON | 895 + (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | 896 + (CCCR_CME_MASK << CCCR_CME_SHIFT)); 1038 897 898 + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) 899 + cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; 900 + 901 + } else { 902 + /* Version 3.1.x or 3.2.x */ 903 + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); 904 + 905 + /* Only 3.2.x has NISO Bit implemented */ 906 + if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 907 + cccr |= CCCR_NISO; 908 + 909 + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) 910 + cccr |= (CCCR_BRSE | CCCR_FDOE); 911 + } 912 + 913 + /* Loopback Mode */ 1039 914 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 1040 - cccr |= CCCR_TEST; 915 + cccr |= CCCR_TEST | CCCR_MON; 1041 916 test |= TEST_LBCK; 1042 917 } 1043 918 1044 - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) 1045 - cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; 919 + /* Enable Monitoring (all versions) */ 920 + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 921 + cccr |= CCCR_MON; 1046 922 923 + /* Write config */ 1047 924 m_can_write(priv, M_CAN_CCCR, cccr); 1048 925 m_can_write(priv, M_CAN_TEST, test); 1049 926 1050 - /* enable interrupts */ 927 + /* Enable interrupts */ 1051 928 m_can_write(priv, M_CAN_IR, IR_ALL_INT); 1052 929 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) 1053 - m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC); 930 + if (priv->version == 30) 931 + m_can_write(priv, M_CAN_IE, IR_ALL_INT & 932 + ~(IR_ERR_LEC_30X)); 933 + else 934 + m_can_write(priv, M_CAN_IE, IR_ALL_INT & 935 + ~(IR_ERR_LEC_31X)); 1054 936 else 1055 937 m_can_write(priv, M_CAN_IE, IR_ALL_INT); 1056 938 ··· 1134 936 free_candev(dev); 1135 937 } 1136 938 1137 - static struct net_device *alloc_m_can_dev(void) 939 + /* Checks core release number of M_CAN 940 + * returns 0 if an unsupported device is detected 941 + * else it returns the release and step coded as: 942 + * return value = 10 * <release> + 1 * <step> 943 + */ 944 + static int m_can_check_core_release(void __iomem *m_can_base) 945 + { 946 + u32 crel_reg; 947 + u8 rel; 948 + u8 step; 949 + int res; 950 + struct m_can_priv temp_priv = { 951 + .base = m_can_base 952 + }; 953 + 954 + /* Read Core Release Version and split into version number 955 + * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1; 956 + */ 957 + crel_reg = m_can_read(&temp_priv, M_CAN_CREL); 958 + rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT); 959 + step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT); 960 + 961 + if (rel == 3) { 962 + /* M_CAN v3.x.y: create return value */ 963 + res = 30 + step; 964 + } else { 965 + /* Unsupported M_CAN version */ 966 + res = 0; 967 + } 968 + 969 + return res; 970 + } 971 + 972 + /* Selectable Non ISO support only in version 3.2.x 973 + * This function checks if the bit is writable. 974 + */ 975 + static bool m_can_niso_supported(const struct m_can_priv *priv) 976 + { 977 + u32 cccr_reg, cccr_poll; 978 + int niso_timeout; 979 + 980 + m_can_config_endisable(priv, true); 981 + cccr_reg = m_can_read(priv, M_CAN_CCCR); 982 + cccr_reg |= CCCR_NISO; 983 + m_can_write(priv, M_CAN_CCCR, cccr_reg); 984 + 985 + niso_timeout = readl_poll_timeout((priv->base + M_CAN_CCCR), cccr_poll, 986 + (cccr_poll == cccr_reg), 0, 10); 987 + 988 + /* Clear NISO */ 989 + cccr_reg &= ~(CCCR_NISO); 990 + m_can_write(priv, M_CAN_CCCR, cccr_reg); 991 + 992 + m_can_config_endisable(priv, false); 993 + 994 + /* return false if time out (-ETIMEDOUT), else return true */ 995 + return !niso_timeout; 996 + } 997 + 998 + static struct net_device *alloc_m_can_dev(struct platform_device *pdev, 999 + void __iomem *addr, u32 tx_fifo_size) 1138 1000 { 1139 1001 struct net_device *dev; 1140 1002 struct m_can_priv *priv; 1003 + int m_can_version; 1004 + unsigned int echo_buffer_count; 1141 1005 1142 - dev = alloc_candev(sizeof(*priv), 1); 1143 - if (!dev) 1144 - return NULL; 1006 + m_can_version = m_can_check_core_release(addr); 1007 + /* return if unsupported version */ 1008 + if (!m_can_version) { 1009 + dev = NULL; 1010 + goto return_dev; 1011 + } 1145 1012 1013 + /* If version < 3.1.x, then only one echo buffer is used */ 1014 + echo_buffer_count = ((m_can_version == 30) 1015 + ? 1U 1016 + : (unsigned int)tx_fifo_size); 1017 + 1018 + dev = alloc_candev(sizeof(*priv), echo_buffer_count); 1019 + if (!dev) { 1020 + dev = NULL; 1021 + goto return_dev; 1022 + } 1146 1023 priv = netdev_priv(dev); 1147 1024 netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); 1148 1025 1026 + /* Shared properties of all M_CAN versions */ 1027 + priv->version = m_can_version; 1149 1028 priv->dev = dev; 1150 - priv->can.bittiming_const = &m_can_bittiming_const; 1151 - priv->can.data_bittiming_const = &m_can_data_bittiming_const; 1029 + priv->base = addr; 1152 1030 priv->can.do_set_mode = m_can_set_mode; 1153 1031 priv->can.do_get_berr_counter = m_can_get_berr_counter; 1154 1032 1155 - /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */ 1156 - can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); 1157 - 1158 - /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */ 1033 + /* Set M_CAN supported operations */ 1159 1034 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1160 1035 CAN_CTRLMODE_LISTENONLY | 1161 1036 CAN_CTRLMODE_BERR_REPORTING | 1162 1037 CAN_CTRLMODE_FD; 1163 1038 1039 + /* Set properties depending on M_CAN version */ 1040 + switch (priv->version) { 1041 + case 30: 1042 + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ 1043 + can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); 1044 + priv->can.bittiming_const = &m_can_bittiming_const_30X; 1045 + priv->can.data_bittiming_const = 1046 + &m_can_data_bittiming_const_30X; 1047 + break; 1048 + case 31: 1049 + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ 1050 + can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); 1051 + priv->can.bittiming_const = &m_can_bittiming_const_31X; 1052 + priv->can.data_bittiming_const = 1053 + &m_can_data_bittiming_const_31X; 1054 + break; 1055 + case 32: 1056 + priv->can.bittiming_const = &m_can_bittiming_const_31X; 1057 + priv->can.data_bittiming_const = 1058 + &m_can_data_bittiming_const_31X; 1059 + priv->can.ctrlmode_supported |= (m_can_niso_supported(priv) 1060 + ? CAN_CTRLMODE_FD_NON_ISO 1061 + : 0); 1062 + break; 1063 + default: 1064 + /* Unsupported device: free candev */ 1065 + free_m_can_dev(dev); 1066 + dev_err(&pdev->dev, "Unsupported version number: %2d", 1067 + priv->version); 1068 + dev = NULL; 1069 + break; 1070 + } 1071 + 1072 + return_dev: 1164 1073 return dev; 1165 1074 } 1166 1075 ··· 1345 1040 return 0; 1346 1041 } 1347 1042 1043 + static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) 1044 + { 1045 + struct m_can_priv *priv = netdev_priv(dev); 1046 + /*get wrap around for loopback skb index */ 1047 + unsigned int wrap = priv->can.echo_skb_max; 1048 + int next_idx; 1049 + 1050 + /* calculate next index */ 1051 + next_idx = (++putidx >= wrap ? 0 : putidx); 1052 + 1053 + /* check if occupied */ 1054 + return !!priv->can.echo_skb[next_idx]; 1055 + } 1056 + 1348 1057 static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, 1349 1058 struct net_device *dev) 1350 1059 { 1351 1060 struct m_can_priv *priv = netdev_priv(dev); 1352 1061 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 1353 - u32 id, cccr; 1062 + u32 id, cccr, fdflags; 1354 1063 int i; 1064 + int putidx; 1355 1065 1356 1066 if (can_dropped_invalid_skb(dev, skb)) 1357 1067 return NETDEV_TX_OK; 1358 1068 1359 - netif_stop_queue(dev); 1360 - 1069 + /* Generate ID field for TX buffer Element */ 1070 + /* Common to all supported M_CAN versions */ 1361 1071 if (cf->can_id & CAN_EFF_FLAG) { 1362 1072 id = cf->can_id & CAN_EFF_MASK; 1363 1073 id |= TX_BUF_XTD; ··· 1383 1063 if (cf->can_id & CAN_RTR_FLAG) 1384 1064 id |= TX_BUF_RTR; 1385 1065 1386 - /* message ram configuration */ 1387 - m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); 1388 - m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16); 1066 + if (priv->version == 30) { 1067 + netif_stop_queue(dev); 1389 1068 1390 - for (i = 0; i < cf->len; i += 4) 1391 - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4), 1392 - *(u32 *)(cf->data + i)); 1069 + /* message ram configuration */ 1070 + m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); 1071 + m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, 1072 + can_len2dlc(cf->len) << 16); 1393 1073 1394 - can_put_echo_skb(skb, dev, 0); 1074 + for (i = 0; i < cf->len; i += 4) 1075 + m_can_fifo_write(priv, 0, 1076 + M_CAN_FIFO_DATA(i / 4), 1077 + *(u32 *)(cf->data + i)); 1395 1078 1396 - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { 1397 - cccr = m_can_read(priv, M_CAN_CCCR); 1398 - cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); 1399 - if (can_is_canfd_skb(skb)) { 1400 - if (cf->flags & CANFD_BRS) 1401 - cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT; 1402 - else 1403 - cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT; 1404 - } else { 1405 - cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; 1079 + can_put_echo_skb(skb, dev, 0); 1080 + 1081 + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { 1082 + cccr = m_can_read(priv, M_CAN_CCCR); 1083 + cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); 1084 + if (can_is_canfd_skb(skb)) { 1085 + if (cf->flags & CANFD_BRS) 1086 + cccr |= CCCR_CMR_CANFD_BRS << 1087 + CCCR_CMR_SHIFT; 1088 + else 1089 + cccr |= CCCR_CMR_CANFD << 1090 + CCCR_CMR_SHIFT; 1091 + } else { 1092 + cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; 1093 + } 1094 + m_can_write(priv, M_CAN_CCCR, cccr); 1406 1095 } 1407 - m_can_write(priv, M_CAN_CCCR, cccr); 1408 - } 1096 + m_can_write(priv, M_CAN_TXBTIE, 0x1); 1097 + m_can_write(priv, M_CAN_TXBAR, 0x1); 1098 + /* End of xmit function for version 3.0.x */ 1099 + } else { 1100 + /* Transmit routine for version >= v3.1.x */ 1409 1101 1410 - /* enable first TX buffer to start transfer */ 1411 - m_can_write(priv, M_CAN_TXBTIE, 0x1); 1412 - m_can_write(priv, M_CAN_TXBAR, 0x1); 1102 + /* Check if FIFO full */ 1103 + if (m_can_tx_fifo_full(priv)) { 1104 + /* This shouldn't happen */ 1105 + netif_stop_queue(dev); 1106 + netdev_warn(dev, 1107 + "TX queue active although FIFO is full."); 1108 + return NETDEV_TX_BUSY; 1109 + } 1110 + 1111 + /* get put index for frame */ 1112 + putidx = ((m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQPI_MASK) 1113 + >> TXFQS_TFQPI_SHIFT); 1114 + /* Write ID Field to FIFO Element */ 1115 + m_can_fifo_write(priv, putidx, M_CAN_FIFO_ID, id); 1116 + 1117 + /* get CAN FD configuration of frame */ 1118 + fdflags = 0; 1119 + if (can_is_canfd_skb(skb)) { 1120 + fdflags |= TX_BUF_FDF; 1121 + if (cf->flags & CANFD_BRS) 1122 + fdflags |= TX_BUF_BRS; 1123 + } 1124 + 1125 + /* Construct DLC Field. Also contains CAN-FD configuration 1126 + * use put index of fifo as message marker 1127 + * it is used in TX interrupt for 1128 + * sending the correct echo frame 1129 + */ 1130 + m_can_fifo_write(priv, putidx, M_CAN_FIFO_DLC, 1131 + ((putidx << TX_BUF_MM_SHIFT) & 1132 + TX_BUF_MM_MASK) | 1133 + (can_len2dlc(cf->len) << 16) | 1134 + fdflags | TX_BUF_EFC); 1135 + 1136 + for (i = 0; i < cf->len; i += 4) 1137 + m_can_fifo_write(priv, putidx, M_CAN_FIFO_DATA(i / 4), 1138 + *(u32 *)(cf->data + i)); 1139 + 1140 + /* Push loopback echo. 1141 + * Will be looped back on TX interrupt based on message marker 1142 + */ 1143 + can_put_echo_skb(skb, dev, putidx); 1144 + 1145 + /* Enable TX FIFO element to start transfer */ 1146 + m_can_write(priv, M_CAN_TXBAR, (1 << putidx)); 1147 + 1148 + /* stop network queue if fifo full */ 1149 + if (m_can_tx_fifo_full(priv) || 1150 + m_can_next_echo_skb_occupied(dev, putidx)) 1151 + netif_stop_queue(dev); 1152 + } 1413 1153 1414 1154 return NETDEV_TX_OK; 1415 1155 } ··· 1489 1109 return register_candev(dev); 1490 1110 } 1491 1111 1492 - static int m_can_of_parse_mram(struct platform_device *pdev, 1493 - struct m_can_priv *priv) 1112 + static void m_can_of_parse_mram(struct m_can_priv *priv, 1113 + const u32 *mram_config_vals) 1494 1114 { 1495 - struct device_node *np = pdev->dev.of_node; 1496 - struct resource *res; 1497 - void __iomem *addr; 1498 - u32 out_val[MRAM_CFG_LEN]; 1499 - int i, start, end, ret; 1115 + int i, start, end; 1500 1116 1501 - /* message ram could be shared */ 1502 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); 1503 - if (!res) 1504 - return -ENODEV; 1505 - 1506 - addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1507 - if (!addr) 1508 - return -ENOMEM; 1509 - 1510 - /* get message ram configuration */ 1511 - ret = of_property_read_u32_array(np, "bosch,mram-cfg", 1512 - out_val, sizeof(out_val) / 4); 1513 - if (ret) { 1514 - dev_err(&pdev->dev, "can not get message ram configuration\n"); 1515 - return -ENODEV; 1516 - } 1517 - 1518 - priv->mram_base = addr; 1519 - priv->mcfg[MRAM_SIDF].off = out_val[0]; 1520 - priv->mcfg[MRAM_SIDF].num = out_val[1]; 1117 + priv->mcfg[MRAM_SIDF].off = mram_config_vals[0]; 1118 + priv->mcfg[MRAM_SIDF].num = mram_config_vals[1]; 1521 1119 priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off + 1522 1120 priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; 1523 - priv->mcfg[MRAM_XIDF].num = out_val[2]; 1121 + priv->mcfg[MRAM_XIDF].num = mram_config_vals[2]; 1524 1122 priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off + 1525 1123 priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; 1526 - priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK; 1124 + priv->mcfg[MRAM_RXF0].num = mram_config_vals[3] & 1125 + (RXFC_FS_MASK >> RXFC_FS_SHIFT); 1527 1126 priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off + 1528 1127 priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; 1529 - priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK; 1128 + priv->mcfg[MRAM_RXF1].num = mram_config_vals[4] & 1129 + (RXFC_FS_MASK >> RXFC_FS_SHIFT); 1530 1130 priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off + 1531 1131 priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; 1532 - priv->mcfg[MRAM_RXB].num = out_val[5]; 1132 + priv->mcfg[MRAM_RXB].num = mram_config_vals[5]; 1533 1133 priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off + 1534 1134 priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; 1535 - priv->mcfg[MRAM_TXE].num = out_val[6]; 1135 + priv->mcfg[MRAM_TXE].num = mram_config_vals[6]; 1536 1136 priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off + 1537 1137 priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; 1538 - priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK; 1138 + priv->mcfg[MRAM_TXB].num = mram_config_vals[7] & 1139 + (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT); 1539 1140 1540 - dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", 1141 + dev_dbg(priv->device, 1142 + "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", 1541 1143 priv->mram_base, 1542 1144 priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num, 1543 1145 priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num, ··· 1538 1176 for (i = start; i < end; i += 4) 1539 1177 writel(0x0, priv->mram_base + i); 1540 1178 1541 - return 0; 1542 1179 } 1543 1180 1544 1181 static int m_can_plat_probe(struct platform_device *pdev) ··· 1546 1185 struct m_can_priv *priv; 1547 1186 struct resource *res; 1548 1187 void __iomem *addr; 1188 + void __iomem *mram_addr; 1549 1189 struct clk *hclk, *cclk; 1550 1190 int irq, ret; 1191 + struct device_node *np; 1192 + u32 mram_config_vals[MRAM_CFG_LEN]; 1193 + u32 tx_fifo_size; 1194 + 1195 + np = pdev->dev.of_node; 1551 1196 1552 1197 hclk = devm_clk_get(&pdev->dev, "hclk"); 1553 1198 cclk = devm_clk_get(&pdev->dev, "cclk"); 1199 + 1554 1200 if (IS_ERR(hclk) || IS_ERR(cclk)) { 1555 - dev_err(&pdev->dev, "no clock find\n"); 1556 - return -ENODEV; 1201 + dev_err(&pdev->dev, "no clock found\n"); 1202 + ret = -ENODEV; 1203 + goto failed_ret; 1557 1204 } 1205 + 1206 + /* Enable clocks. Necessary to read Core Release in order to determine 1207 + * M_CAN version 1208 + */ 1209 + ret = clk_prepare_enable(hclk); 1210 + if (ret) 1211 + goto disable_hclk_ret; 1212 + 1213 + ret = clk_prepare_enable(cclk); 1214 + if (ret) 1215 + goto disable_cclk_ret; 1558 1216 1559 1217 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); 1560 1218 addr = devm_ioremap_resource(&pdev->dev, res); 1561 1219 irq = platform_get_irq_byname(pdev, "int0"); 1562 - if (IS_ERR(addr) || irq < 0) 1563 - return -EINVAL; 1220 + 1221 + if (IS_ERR(addr) || irq < 0) { 1222 + ret = -EINVAL; 1223 + goto disable_cclk_ret; 1224 + } 1225 + 1226 + /* message ram could be shared */ 1227 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); 1228 + if (!res) { 1229 + ret = -ENODEV; 1230 + goto disable_cclk_ret; 1231 + } 1232 + 1233 + mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1234 + if (!mram_addr) { 1235 + ret = -ENOMEM; 1236 + goto disable_cclk_ret; 1237 + } 1238 + 1239 + /* get message ram configuration */ 1240 + ret = of_property_read_u32_array(np, "bosch,mram-cfg", 1241 + mram_config_vals, 1242 + sizeof(mram_config_vals) / 4); 1243 + if (ret) { 1244 + dev_err(&pdev->dev, "Could not get Message RAM configuration."); 1245 + goto disable_cclk_ret; 1246 + } 1247 + 1248 + /* Get TX FIFO size 1249 + * Defines the total amount of echo buffers for loopback 1250 + */ 1251 + tx_fifo_size = mram_config_vals[7]; 1564 1252 1565 1253 /* allocate the m_can device */ 1566 - dev = alloc_m_can_dev(); 1567 - if (!dev) 1568 - return -ENOMEM; 1569 - 1254 + dev = alloc_m_can_dev(pdev, addr, tx_fifo_size); 1255 + if (!dev) { 1256 + ret = -ENOMEM; 1257 + goto disable_cclk_ret; 1258 + } 1570 1259 priv = netdev_priv(dev); 1571 1260 dev->irq = irq; 1572 - priv->base = addr; 1573 1261 priv->device = &pdev->dev; 1574 1262 priv->hclk = hclk; 1575 1263 priv->cclk = cclk; 1576 1264 priv->can.clock.freq = clk_get_rate(cclk); 1265 + priv->mram_base = mram_addr; 1577 1266 1578 - ret = m_can_of_parse_mram(pdev, priv); 1579 - if (ret) 1580 - goto failed_free_dev; 1267 + m_can_of_parse_mram(priv, mram_config_vals); 1581 1268 1582 1269 platform_set_drvdata(pdev, dev); 1583 1270 SET_NETDEV_DEV(dev, &pdev->dev); ··· 1639 1230 1640 1231 devm_can_led_init(dev); 1641 1232 1642 - dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", 1643 - KBUILD_MODNAME, priv->base, dev->irq); 1233 + dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n", 1234 + KBUILD_MODNAME, dev->irq, priv->version); 1644 1235 1645 - return 0; 1236 + /* Probe finished 1237 + * Stop clocks. They will be reactivated once the M_CAN device is opened 1238 + */ 1239 + 1240 + goto disable_cclk_ret; 1646 1241 1647 1242 failed_free_dev: 1648 1243 free_m_can_dev(dev); 1244 + disable_cclk_ret: 1245 + clk_disable_unprepare(cclk); 1246 + disable_hclk_ret: 1247 + clk_disable_unprepare(hclk); 1248 + failed_ret: 1649 1249 return ret; 1650 1250 } 1651 1251
+13
drivers/net/can/peak_canfd/Kconfig
··· 1 + config CAN_PEAK_PCIEFD 2 + depends on PCI 3 + tristate "PEAK-System PCAN-PCIe FD cards" 4 + ---help--- 5 + This driver adds support for the PEAK-System PCI Express FD 6 + CAN-FD cards family. 7 + These 1x or 2x CAN-FD channels cards offer CAN 2.0 a/b as well as 8 + CAN-FD access to the CAN bus. Besides the nominal bitrate of up to 9 + 1 Mbit/s, the data bytes of CAN-FD frames can be transmitted with 10 + up to 12 Mbit/s. A galvanic isolation of the CAN ports protects the 11 + electronics of the card and the respective computer against 12 + disturbances of up to 500 Volts. The PCAN-PCI Express FD can be 13 + operated with ambient temperatures in a range of -40 to +85 °C.
+5
drivers/net/can/peak_canfd/Makefile
··· 1 + # 2 + # Makefile for the PEAK-System CAN-FD IP module drivers 3 + # 4 + obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_pciefd.o 5 + peak_pciefd-y := peak_pciefd_main.o peak_canfd.o
+801
drivers/net/can/peak_canfd/peak_canfd.c
··· 1 + /* 2 + * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> 3 + * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com> 4 + * 5 + * Copyright (C) 2016 PEAK System-Technik GmbH 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the version 2 of the GNU General Public License 9 + * as published by the Free Software Foundation 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + */ 16 + 17 + #include <linux/can.h> 18 + #include <linux/can/dev.h> 19 + 20 + #include "peak_canfd_user.h" 21 + 22 + /* internal IP core cache size (used as default echo skbs max number) */ 23 + #define PCANFD_ECHO_SKB_MAX 24 24 + 25 + /* bittiming ranges of the PEAK-System PC CAN-FD interfaces */ 26 + static const struct can_bittiming_const peak_canfd_nominal_const = { 27 + .name = "peak_canfd", 28 + .tseg1_min = 1, 29 + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), 30 + .tseg2_min = 1, 31 + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), 32 + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), 33 + .brp_min = 1, 34 + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), 35 + .brp_inc = 1, 36 + }; 37 + 38 + static const struct can_bittiming_const peak_canfd_data_const = { 39 + .name = "peak_canfd", 40 + .tseg1_min = 1, 41 + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), 42 + .tseg2_min = 1, 43 + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), 44 + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), 45 + .brp_min = 1, 46 + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), 47 + .brp_inc = 1, 48 + }; 49 + 50 + static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv) 51 + { 52 + priv->cmd_len = 0; 53 + return priv; 54 + } 55 + 56 + static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op) 57 + { 58 + struct pucan_command *cmd; 59 + 60 + if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen) 61 + return NULL; 62 + 63 + cmd = priv->cmd_buffer + priv->cmd_len; 64 + 65 + /* reset all unused bit to default */ 66 + memset(cmd, 0, sizeof(*cmd)); 67 + 68 + cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op); 69 + priv->cmd_len += sizeof(*cmd); 70 + 71 + return cmd; 72 + } 73 + 74 + static int pucan_write_cmd(struct peak_canfd_priv *priv) 75 + { 76 + int err; 77 + 78 + if (priv->pre_cmd) { 79 + err = priv->pre_cmd(priv); 80 + if (err) 81 + return err; 82 + } 83 + 84 + err = priv->write_cmd(priv); 85 + if (err) 86 + return err; 87 + 88 + if (priv->post_cmd) 89 + err = priv->post_cmd(priv); 90 + 91 + return err; 92 + } 93 + 94 + /* uCAN commands interface functions */ 95 + static int pucan_set_reset_mode(struct peak_canfd_priv *priv) 96 + { 97 + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE); 98 + return pucan_write_cmd(priv); 99 + } 100 + 101 + static int pucan_set_normal_mode(struct peak_canfd_priv *priv) 102 + { 103 + int err; 104 + 105 + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE); 106 + err = pucan_write_cmd(priv); 107 + if (!err) 108 + priv->can.state = CAN_STATE_ERROR_ACTIVE; 109 + 110 + return err; 111 + } 112 + 113 + static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv) 114 + { 115 + int err; 116 + 117 + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE); 118 + err = pucan_write_cmd(priv); 119 + if (!err) 120 + priv->can.state = CAN_STATE_ERROR_ACTIVE; 121 + 122 + return err; 123 + } 124 + 125 + static int pucan_set_timing_slow(struct peak_canfd_priv *priv, 126 + const struct can_bittiming *pbt) 127 + { 128 + struct pucan_timing_slow *cmd; 129 + 130 + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW); 131 + 132 + cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1, 133 + priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES); 134 + cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1); 135 + cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1); 136 + cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1)); 137 + 138 + cmd->ewl = 96; /* default */ 139 + 140 + netdev_dbg(priv->ndev, 141 + "nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n", 142 + le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t); 143 + 144 + return pucan_write_cmd(priv); 145 + } 146 + 147 + static int pucan_set_timing_fast(struct peak_canfd_priv *priv, 148 + const struct can_bittiming *pbt) 149 + { 150 + struct pucan_timing_fast *cmd; 151 + 152 + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_FAST); 153 + 154 + cmd->sjw = PUCAN_TFAST_SJW(pbt->sjw - 1); 155 + cmd->tseg1 = PUCAN_TFAST_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1); 156 + cmd->tseg2 = PUCAN_TFAST_TSEG2(pbt->phase_seg2 - 1); 157 + cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(pbt->brp - 1)); 158 + 159 + netdev_dbg(priv->ndev, 160 + "data: brp=%u tseg1=%u tseg2=%u sjw=%u\n", 161 + le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw); 162 + 163 + return pucan_write_cmd(priv); 164 + } 165 + 166 + static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask) 167 + { 168 + struct pucan_std_filter *cmd; 169 + 170 + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER); 171 + 172 + /* all the 11-bits CAN ID values are represented by one bit in a 173 + * 64 rows array of 32 bits: the upper 6 bits of the CAN ID select the 174 + * row while the lowest 5 bits select the bit in that row. 175 + * 176 + * bit filter 177 + * 1 passed 178 + * 0 discarded 179 + */ 180 + 181 + /* select the row */ 182 + cmd->idx = row; 183 + 184 + /* set/unset bits in the row */ 185 + cmd->mask = cpu_to_le32(mask); 186 + 187 + return pucan_write_cmd(priv); 188 + } 189 + 190 + static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags) 191 + { 192 + struct pucan_tx_abort *cmd; 193 + 194 + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT); 195 + 196 + cmd->flags = cpu_to_le16(flags); 197 + 198 + return pucan_write_cmd(priv); 199 + } 200 + 201 + static int pucan_clr_err_counters(struct peak_canfd_priv *priv) 202 + { 203 + struct pucan_wr_err_cnt *cmd; 204 + 205 + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT); 206 + 207 + cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE); 208 + cmd->tx_counter = 0; 209 + cmd->rx_counter = 0; 210 + 211 + return pucan_write_cmd(priv); 212 + } 213 + 214 + static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask) 215 + { 216 + struct pucan_options *cmd; 217 + 218 + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION); 219 + 220 + cmd->options = cpu_to_le16(opt_mask); 221 + 222 + return pucan_write_cmd(priv); 223 + } 224 + 225 + static int pucan_clr_options(struct peak_canfd_priv *priv, u16 opt_mask) 226 + { 227 + struct pucan_options *cmd; 228 + 229 + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_CLR_DIS_OPTION); 230 + 231 + cmd->options = cpu_to_le16(opt_mask); 232 + 233 + return pucan_write_cmd(priv); 234 + } 235 + 236 + static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv) 237 + { 238 + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER); 239 + 240 + return pucan_write_cmd(priv); 241 + } 242 + 243 + /* handle the reception of one CAN frame */ 244 + static int pucan_handle_can_rx(struct peak_canfd_priv *priv, 245 + struct pucan_rx_msg *msg) 246 + { 247 + struct net_device_stats *stats = &priv->ndev->stats; 248 + struct canfd_frame *cf; 249 + struct sk_buff *skb; 250 + const u16 rx_msg_flags = le16_to_cpu(msg->flags); 251 + u8 cf_len; 252 + 253 + if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) 254 + cf_len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(msg))); 255 + else 256 + cf_len = get_can_dlc(pucan_msg_get_dlc(msg)); 257 + 258 + /* if this frame is an echo, */ 259 + if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && 260 + !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { 261 + int n; 262 + unsigned long flags; 263 + 264 + spin_lock_irqsave(&priv->echo_lock, flags); 265 + n = can_get_echo_skb(priv->ndev, msg->client); 266 + spin_unlock_irqrestore(&priv->echo_lock, flags); 267 + 268 + /* count bytes of the echo instead of skb */ 269 + stats->tx_bytes += cf_len; 270 + stats->tx_packets++; 271 + 272 + if (n) { 273 + /* restart tx queue only if a slot is free */ 274 + netif_wake_queue(priv->ndev); 275 + } 276 + 277 + return 0; 278 + } 279 + 280 + /* otherwise, it should be pushed into rx fifo */ 281 + if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { 282 + /* CANFD frame case */ 283 + skb = alloc_canfd_skb(priv->ndev, &cf); 284 + if (!skb) 285 + return -ENOMEM; 286 + 287 + if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH) 288 + cf->flags |= CANFD_BRS; 289 + 290 + if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND) 291 + cf->flags |= CANFD_ESI; 292 + } else { 293 + /* CAN 2.0 frame case */ 294 + skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf); 295 + if (!skb) 296 + return -ENOMEM; 297 + } 298 + 299 + cf->can_id = le32_to_cpu(msg->can_id); 300 + cf->len = cf_len; 301 + 302 + if (rx_msg_flags & PUCAN_MSG_EXT_ID) 303 + cf->can_id |= CAN_EFF_FLAG; 304 + 305 + if (rx_msg_flags & PUCAN_MSG_RTR) 306 + cf->can_id |= CAN_RTR_FLAG; 307 + else 308 + memcpy(cf->data, msg->d, cf->len); 309 + 310 + stats->rx_bytes += cf->len; 311 + stats->rx_packets++; 312 + 313 + netif_rx(skb); 314 + 315 + return 0; 316 + } 317 + 318 + /* handle rx/tx error counters notification */ 319 + static int pucan_handle_error(struct peak_canfd_priv *priv, 320 + struct pucan_error_msg *msg) 321 + { 322 + priv->bec.txerr = msg->tx_err_cnt; 323 + priv->bec.rxerr = msg->rx_err_cnt; 324 + 325 + return 0; 326 + } 327 + 328 + /* handle status notification */ 329 + static int pucan_handle_status(struct peak_canfd_priv *priv, 330 + struct pucan_status_msg *msg) 331 + { 332 + struct net_device *ndev = priv->ndev; 333 + struct net_device_stats *stats = &ndev->stats; 334 + struct can_frame *cf; 335 + struct sk_buff *skb; 336 + 337 + /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ 338 + if (pucan_status_is_rx_barrier(msg)) { 339 + unsigned long flags; 340 + 341 + if (priv->enable_tx_path) { 342 + int err = priv->enable_tx_path(priv); 343 + 344 + if (err) 345 + return err; 346 + } 347 + 348 + /* restart network queue only if echo skb array is free */ 349 + spin_lock_irqsave(&priv->echo_lock, flags); 350 + 351 + if (!priv->can.echo_skb[priv->echo_idx]) { 352 + spin_unlock_irqrestore(&priv->echo_lock, flags); 353 + 354 + netif_wake_queue(ndev); 355 + } else { 356 + spin_unlock_irqrestore(&priv->echo_lock, flags); 357 + } 358 + 359 + return 0; 360 + } 361 + 362 + skb = alloc_can_err_skb(ndev, &cf); 363 + 364 + /* test state error bits according to their priority */ 365 + if (pucan_status_is_busoff(msg)) { 366 + netdev_dbg(ndev, "Bus-off entry status\n"); 367 + priv->can.state = CAN_STATE_BUS_OFF; 368 + priv->can.can_stats.bus_off++; 369 + can_bus_off(ndev); 370 + if (skb) 371 + cf->can_id |= CAN_ERR_BUSOFF; 372 + 373 + } else if (pucan_status_is_passive(msg)) { 374 + netdev_dbg(ndev, "Error passive status\n"); 375 + priv->can.state = CAN_STATE_ERROR_PASSIVE; 376 + priv->can.can_stats.error_passive++; 377 + if (skb) { 378 + cf->can_id |= CAN_ERR_CRTL; 379 + cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? 380 + CAN_ERR_CRTL_TX_PASSIVE : 381 + CAN_ERR_CRTL_RX_PASSIVE; 382 + cf->data[6] = priv->bec.txerr; 383 + cf->data[7] = priv->bec.rxerr; 384 + } 385 + 386 + } else if (pucan_status_is_warning(msg)) { 387 + netdev_dbg(ndev, "Error warning status\n"); 388 + priv->can.state = CAN_STATE_ERROR_WARNING; 389 + priv->can.can_stats.error_warning++; 390 + if (skb) { 391 + cf->can_id |= CAN_ERR_CRTL; 392 + cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? 393 + CAN_ERR_CRTL_TX_WARNING : 394 + CAN_ERR_CRTL_RX_WARNING; 395 + cf->data[6] = priv->bec.txerr; 396 + cf->data[7] = priv->bec.rxerr; 397 + } 398 + 399 + } else if (priv->can.state != CAN_STATE_ERROR_ACTIVE) { 400 + /* back to ERROR_ACTIVE */ 401 + netdev_dbg(ndev, "Error active status\n"); 402 + can_change_state(ndev, cf, CAN_STATE_ERROR_ACTIVE, 403 + CAN_STATE_ERROR_ACTIVE); 404 + } else { 405 + dev_kfree_skb(skb); 406 + return 0; 407 + } 408 + 409 + if (!skb) { 410 + stats->rx_dropped++; 411 + return -ENOMEM; 412 + } 413 + 414 + stats->rx_packets++; 415 + stats->rx_bytes += cf->can_dlc; 416 + netif_rx(skb); 417 + 418 + return 0; 419 + } 420 + 421 + /* handle uCAN Rx overflow notification */ 422 + static int pucan_handle_cache_critical(struct peak_canfd_priv *priv) 423 + { 424 + struct net_device_stats *stats = &priv->ndev->stats; 425 + struct can_frame *cf; 426 + struct sk_buff *skb; 427 + 428 + stats->rx_over_errors++; 429 + stats->rx_errors++; 430 + 431 + skb = alloc_can_err_skb(priv->ndev, &cf); 432 + if (!skb) { 433 + stats->rx_dropped++; 434 + return -ENOMEM; 435 + } 436 + 437 + cf->can_id |= CAN_ERR_CRTL; 438 + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 439 + 440 + cf->data[6] = priv->bec.txerr; 441 + cf->data[7] = priv->bec.rxerr; 442 + 443 + stats->rx_bytes += cf->can_dlc; 444 + stats->rx_packets++; 445 + netif_rx(skb); 446 + 447 + return 0; 448 + } 449 + 450 + /* handle a single uCAN message */ 451 + int peak_canfd_handle_msg(struct peak_canfd_priv *priv, 452 + struct pucan_rx_msg *msg) 453 + { 454 + u16 msg_type = le16_to_cpu(msg->type); 455 + int msg_size = le16_to_cpu(msg->size); 456 + int err; 457 + 458 + if (!msg_size || !msg_type) { 459 + /* null packet found: end of list */ 460 + goto exit; 461 + } 462 + 463 + switch (msg_type) { 464 + case PUCAN_MSG_CAN_RX: 465 + err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg); 466 + break; 467 + case PUCAN_MSG_ERROR: 468 + err = pucan_handle_error(priv, (struct pucan_error_msg *)msg); 469 + break; 470 + case PUCAN_MSG_STATUS: 471 + err = pucan_handle_status(priv, (struct pucan_status_msg *)msg); 472 + break; 473 + case PUCAN_MSG_CACHE_CRITICAL: 474 + err = pucan_handle_cache_critical(priv); 475 + break; 476 + default: 477 + err = 0; 478 + } 479 + 480 + if (err < 0) 481 + return err; 482 + 483 + exit: 484 + return msg_size; 485 + } 486 + 487 + /* handle a list of rx_count messages from rx_msg memory address */ 488 + int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, 489 + struct pucan_rx_msg *msg_list, int msg_count) 490 + { 491 + void *msg_ptr = msg_list; 492 + int i, msg_size; 493 + 494 + for (i = 0; i < msg_count; i++) { 495 + msg_size = peak_canfd_handle_msg(priv, msg_ptr); 496 + 497 + /* a null packet can be found at the end of a list */ 498 + if (msg_size <= 0) 499 + break; 500 + 501 + msg_ptr += msg_size; 502 + } 503 + 504 + if (msg_size < 0) 505 + return msg_size; 506 + 507 + return i; 508 + } 509 + 510 + static int peak_canfd_start(struct peak_canfd_priv *priv) 511 + { 512 + int err; 513 + 514 + err = pucan_clr_err_counters(priv); 515 + if (err) 516 + goto err_exit; 517 + 518 + priv->echo_idx = 0; 519 + 520 + priv->bec.txerr = 0; 521 + priv->bec.rxerr = 0; 522 + 523 + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 524 + err = pucan_set_listen_only_mode(priv); 525 + else 526 + err = pucan_set_normal_mode(priv); 527 + 528 + err_exit: 529 + return err; 530 + } 531 + 532 + static void peak_canfd_stop(struct peak_canfd_priv *priv) 533 + { 534 + int err; 535 + 536 + /* go back to RESET mode */ 537 + err = pucan_set_reset_mode(priv); 538 + if (err) { 539 + netdev_err(priv->ndev, "channel %u reset failed\n", 540 + priv->index); 541 + } else { 542 + /* abort last Tx (MUST be done in RESET mode only!) */ 543 + pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH); 544 + } 545 + } 546 + 547 + static int peak_canfd_set_mode(struct net_device *ndev, enum can_mode mode) 548 + { 549 + struct peak_canfd_priv *priv = netdev_priv(ndev); 550 + 551 + switch (mode) { 552 + case CAN_MODE_START: 553 + peak_canfd_start(priv); 554 + netif_wake_queue(ndev); 555 + break; 556 + default: 557 + return -EOPNOTSUPP; 558 + } 559 + 560 + return 0; 561 + } 562 + 563 + static int peak_canfd_get_berr_counter(const struct net_device *ndev, 564 + struct can_berr_counter *bec) 565 + { 566 + struct peak_canfd_priv *priv = netdev_priv(ndev); 567 + 568 + *bec = priv->bec; 569 + return 0; 570 + } 571 + 572 + static int peak_canfd_open(struct net_device *ndev) 573 + { 574 + struct peak_canfd_priv *priv = netdev_priv(ndev); 575 + int i, err = 0; 576 + 577 + err = open_candev(ndev); 578 + if (err) { 579 + netdev_err(ndev, "open_candev() failed, error %d\n", err); 580 + goto err_exit; 581 + } 582 + 583 + err = pucan_set_reset_mode(priv); 584 + if (err) 585 + goto err_close; 586 + 587 + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { 588 + if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 589 + err = pucan_clr_options(priv, PUCAN_OPTION_CANDFDISO); 590 + else 591 + err = pucan_set_options(priv, PUCAN_OPTION_CANDFDISO); 592 + 593 + if (err) 594 + goto err_close; 595 + } 596 + 597 + /* set option: get rx/tx error counters */ 598 + err = pucan_set_options(priv, PUCAN_OPTION_ERROR); 599 + if (err) 600 + goto err_close; 601 + 602 + /* accept all standard CAN ID */ 603 + for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++) 604 + pucan_set_std_filter(priv, i, 0xffffffff); 605 + 606 + err = peak_canfd_start(priv); 607 + if (err) 608 + goto err_close; 609 + 610 + /* receiving the RB status says when Tx path is ready */ 611 + err = pucan_setup_rx_barrier(priv); 612 + if (!err) 613 + goto err_exit; 614 + 615 + err_close: 616 + close_candev(ndev); 617 + err_exit: 618 + return err; 619 + } 620 + 621 + static int peak_canfd_set_bittiming(struct net_device *ndev) 622 + { 623 + struct peak_canfd_priv *priv = netdev_priv(ndev); 624 + 625 + return pucan_set_timing_slow(priv, &priv->can.bittiming); 626 + } 627 + 628 + static int peak_canfd_set_data_bittiming(struct net_device *ndev) 629 + { 630 + struct peak_canfd_priv *priv = netdev_priv(ndev); 631 + 632 + return pucan_set_timing_fast(priv, &priv->can.data_bittiming); 633 + } 634 + 635 + static int peak_canfd_close(struct net_device *ndev) 636 + { 637 + struct peak_canfd_priv *priv = netdev_priv(ndev); 638 + 639 + netif_stop_queue(ndev); 640 + peak_canfd_stop(priv); 641 + close_candev(ndev); 642 + 643 + return 0; 644 + } 645 + 646 + static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, 647 + struct net_device *ndev) 648 + { 649 + struct peak_canfd_priv *priv = netdev_priv(ndev); 650 + struct net_device_stats *stats = &ndev->stats; 651 + struct canfd_frame *cf = (struct canfd_frame *)skb->data; 652 + struct pucan_tx_msg *msg; 653 + u16 msg_size, msg_flags; 654 + unsigned long flags; 655 + bool should_stop_tx_queue; 656 + int room_left; 657 + u8 can_dlc; 658 + 659 + if (can_dropped_invalid_skb(ndev, skb)) 660 + return NETDEV_TX_OK; 661 + 662 + msg_size = ALIGN(sizeof(*msg) + cf->len, 4); 663 + msg = priv->alloc_tx_msg(priv, msg_size, &room_left); 664 + 665 + /* should never happen except under bus-off condition and (auto-)restart 666 + * mechanism 667 + */ 668 + if (!msg) { 669 + stats->tx_dropped++; 670 + netif_stop_queue(ndev); 671 + return NETDEV_TX_BUSY; 672 + } 673 + 674 + msg->size = cpu_to_le16(msg_size); 675 + msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); 676 + msg_flags = 0; 677 + 678 + if (cf->can_id & CAN_EFF_FLAG) { 679 + msg_flags |= PUCAN_MSG_EXT_ID; 680 + msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK); 681 + } else { 682 + msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK); 683 + } 684 + 685 + if (can_is_canfd_skb(skb)) { 686 + /* CAN FD frame format */ 687 + can_dlc = can_len2dlc(cf->len); 688 + 689 + msg_flags |= PUCAN_MSG_EXT_DATA_LEN; 690 + 691 + if (cf->flags & CANFD_BRS) 692 + msg_flags |= PUCAN_MSG_BITRATE_SWITCH; 693 + 694 + if (cf->flags & CANFD_ESI) 695 + msg_flags |= PUCAN_MSG_ERROR_STATE_IND; 696 + } else { 697 + /* CAN 2.0 frame format */ 698 + can_dlc = cf->len; 699 + 700 + if (cf->can_id & CAN_RTR_FLAG) 701 + msg_flags |= PUCAN_MSG_RTR; 702 + } 703 + 704 + /* always ask loopback for echo management */ 705 + msg_flags |= PUCAN_MSG_LOOPED_BACK; 706 + 707 + /* set driver specific bit to differentiate with application loopback */ 708 + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 709 + msg_flags |= PUCAN_MSG_SELF_RECEIVE; 710 + 711 + msg->flags = cpu_to_le16(msg_flags); 712 + msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, can_dlc); 713 + memcpy(msg->d, cf->data, cf->len); 714 + 715 + /* struct msg client field is used as an index in the echo skbs ring */ 716 + msg->client = priv->echo_idx; 717 + 718 + spin_lock_irqsave(&priv->echo_lock, flags); 719 + 720 + /* prepare and save echo skb in internal slot */ 721 + can_put_echo_skb(skb, ndev, priv->echo_idx); 722 + 723 + /* move echo index to the next slot */ 724 + priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max; 725 + 726 + /* if next slot is not free, stop network queue (no slot free in echo 727 + * skb ring means that the controller did not write these frames on 728 + * the bus: no need to continue). 729 + */ 730 + should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); 731 + 732 + spin_unlock_irqrestore(&priv->echo_lock, flags); 733 + 734 + /* write the skb on the interface */ 735 + priv->write_tx_msg(priv, msg); 736 + 737 + /* stop network tx queue if not enough room to save one more msg too */ 738 + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) 739 + should_stop_tx_queue |= (room_left < 740 + (sizeof(*msg) + CANFD_MAX_DLEN)); 741 + else 742 + should_stop_tx_queue |= (room_left < 743 + (sizeof(*msg) + CAN_MAX_DLEN)); 744 + 745 + if (should_stop_tx_queue) 746 + netif_stop_queue(ndev); 747 + 748 + return NETDEV_TX_OK; 749 + } 750 + 751 + static const struct net_device_ops peak_canfd_netdev_ops = { 752 + .ndo_open = peak_canfd_open, 753 + .ndo_stop = peak_canfd_close, 754 + .ndo_start_xmit = peak_canfd_start_xmit, 755 + .ndo_change_mtu = can_change_mtu, 756 + }; 757 + 758 + struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index, 759 + int echo_skb_max) 760 + { 761 + struct net_device *ndev; 762 + struct peak_canfd_priv *priv; 763 + 764 + /* we DO support local echo */ 765 + if (echo_skb_max < 0) 766 + echo_skb_max = PCANFD_ECHO_SKB_MAX; 767 + 768 + /* allocate the candev object */ 769 + ndev = alloc_candev(sizeof_priv, echo_skb_max); 770 + if (!ndev) 771 + return NULL; 772 + 773 + priv = netdev_priv(ndev); 774 + 775 + /* complete now socket-can initialization side */ 776 + priv->can.state = CAN_STATE_STOPPED; 777 + priv->can.bittiming_const = &peak_canfd_nominal_const; 778 + priv->can.data_bittiming_const = &peak_canfd_data_const; 779 + 780 + priv->can.do_set_mode = peak_canfd_set_mode; 781 + priv->can.do_get_berr_counter = peak_canfd_get_berr_counter; 782 + priv->can.do_set_bittiming = peak_canfd_set_bittiming; 783 + priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming; 784 + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 785 + CAN_CTRLMODE_LISTENONLY | 786 + CAN_CTRLMODE_3_SAMPLES | 787 + CAN_CTRLMODE_FD | 788 + CAN_CTRLMODE_FD_NON_ISO | 789 + CAN_CTRLMODE_BERR_REPORTING; 790 + 791 + priv->ndev = ndev; 792 + priv->index = index; 793 + priv->cmd_len = 0; 794 + spin_lock_init(&priv->echo_lock); 795 + 796 + ndev->flags |= IFF_ECHO; 797 + ndev->netdev_ops = &peak_canfd_netdev_ops; 798 + ndev->dev_id = index; 799 + 800 + return ndev; 801 + }
+55
drivers/net/can/peak_canfd/peak_canfd_user.h
··· 1 + /* 2 + * CAN driver for PEAK System micro-CAN based adapters 3 + * 4 + * Copyright (C) 2003-2011 PEAK System-Technik GmbH 5 + * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License as published 9 + * by the Free Software Foundation; version 2 of the License. 10 + * 11 + * This program is distributed in the hope that it will be useful, but 12 + * WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 + * General Public License for more details. 15 + */ 16 + #ifndef PEAK_CANFD_USER_H 17 + #define PEAK_CANFD_USER_H 18 + 19 + #include <linux/can/dev/peak_canfd.h> 20 + 21 + #define PCANFD_ECHO_SKB_DEF -1 22 + 23 + /* data structure private to each uCAN interface */ 24 + struct peak_canfd_priv { 25 + struct can_priv can; /* socket-can private data */ 26 + struct net_device *ndev; /* network device */ 27 + int index; /* channel index */ 28 + 29 + struct can_berr_counter bec; /* rx/tx err counters */ 30 + 31 + int echo_idx; /* echo skb free slot index */ 32 + spinlock_t echo_lock; 33 + 34 + int cmd_len; 35 + void *cmd_buffer; 36 + int cmd_maxlen; 37 + 38 + int (*pre_cmd)(struct peak_canfd_priv *priv); 39 + int (*write_cmd)(struct peak_canfd_priv *priv); 40 + int (*post_cmd)(struct peak_canfd_priv *priv); 41 + 42 + int (*enable_tx_path)(struct peak_canfd_priv *priv); 43 + void *(*alloc_tx_msg)(struct peak_canfd_priv *priv, u16 msg_size, 44 + int *room_left); 45 + int (*write_tx_msg)(struct peak_canfd_priv *priv, 46 + struct pucan_tx_msg *msg); 47 + }; 48 + 49 + struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index, 50 + int echo_skb_max); 51 + int peak_canfd_handle_msg(struct peak_canfd_priv *priv, 52 + struct pucan_rx_msg *msg); 53 + int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, 54 + struct pucan_rx_msg *rx_msg, int rx_count); 55 + #endif
+842
drivers/net/can/peak_canfd/peak_pciefd_main.c
··· 1 + /* 2 + * Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com> 3 + * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com> 4 + * 5 + * Derived from the PCAN project file driver/src/pcan_pci.c: 6 + * 7 + * Copyright (C) 2001-2006 PEAK System-Technik GmbH 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the version 2 of the GNU General Public License 11 + * as published by the Free Software Foundation 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + */ 18 + 19 + #include <linux/kernel.h> 20 + #include <linux/module.h> 21 + #include <linux/interrupt.h> 22 + #include <linux/netdevice.h> 23 + #include <linux/delay.h> 24 + #include <linux/pci.h> 25 + #include <linux/io.h> 26 + #include <linux/can.h> 27 + #include <linux/can/dev.h> 28 + 29 + #include "peak_canfd_user.h" 30 + 31 + MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); 32 + MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards"); 33 + MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards"); 34 + MODULE_LICENSE("GPL v2"); 35 + 36 + #define PCIEFD_DRV_NAME "peak_pciefd" 37 + 38 + #define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */ 39 + #define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */ 40 + 41 + /* PEAK PCIe board access description */ 42 + #define PCIEFD_BAR0_SIZE (64 * 1024) 43 + #define PCIEFD_RX_DMA_SIZE (4 * 1024) 44 + #define PCIEFD_TX_DMA_SIZE (4 * 1024) 45 + 46 + #define PCIEFD_TX_PAGE_SIZE (2 * 1024) 47 + 48 + /* System Control Registers */ 49 + #define PCIEFD_REG_SYS_CTL_SET 0x0000 /* set bits */ 50 + #define PCIEFD_REG_SYS_CTL_CLR 0x0004 /* clear bits */ 51 + 52 + /* Version info registers */ 53 + #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ 54 + #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ 55 + 56 + /* System Control Registers Bits */ 57 + #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ 58 + #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ 59 + 60 + /* CAN-FD channel addresses */ 61 + #define PCIEFD_CANX_OFF(c) (((c) + 1) * 0x1000) 62 + 63 + #define PCIEFD_ECHO_SKB_MAX PCANFD_ECHO_SKB_DEF 64 + 65 + /* CAN-FD channel registers */ 66 + #define PCIEFD_REG_CAN_MISC 0x0000 /* Misc. control */ 67 + #define PCIEFD_REG_CAN_CLK_SEL 0x0008 /* Clock selector */ 68 + #define PCIEFD_REG_CAN_CMD_PORT_L 0x0010 /* 64-bits command port */ 69 + #define PCIEFD_REG_CAN_CMD_PORT_H 0x0014 70 + #define PCIEFD_REG_CAN_TX_REQ_ACC 0x0020 /* Tx request accumulator */ 71 + #define PCIEFD_REG_CAN_TX_CTL_SET 0x0030 /* Tx control set register */ 72 + #define PCIEFD_REG_CAN_TX_CTL_CLR 0x0038 /* Tx control clear register */ 73 + #define PCIEFD_REG_CAN_TX_DMA_ADDR_L 0x0040 /* 64-bits addr for Tx DMA */ 74 + #define PCIEFD_REG_CAN_TX_DMA_ADDR_H 0x0044 75 + #define PCIEFD_REG_CAN_RX_CTL_SET 0x0050 /* Rx control set register */ 76 + #define PCIEFD_REG_CAN_RX_CTL_CLR 0x0058 /* Rx control clear register */ 77 + #define PCIEFD_REG_CAN_RX_CTL_WRT 0x0060 /* Rx control write register */ 78 + #define PCIEFD_REG_CAN_RX_CTL_ACK 0x0068 /* Rx control ACK register */ 79 + #define PCIEFD_REG_CAN_RX_DMA_ADDR_L 0x0070 /* 64-bits addr for Rx DMA */ 80 + #define PCIEFD_REG_CAN_RX_DMA_ADDR_H 0x0074 81 + 82 + /* CAN-FD channel misc register bits */ 83 + #define CANFD_MISC_TS_RST 0x00000001 /* timestamp cnt rst */ 84 + 85 + /* CAN-FD channel Clock SELector Source & DIVider */ 86 + #define CANFD_CLK_SEL_DIV_MASK 0x00000007 87 + #define CANFD_CLK_SEL_DIV_60MHZ 0x00000000 /* SRC=240MHz only */ 88 + #define CANFD_CLK_SEL_DIV_40MHZ 0x00000001 /* SRC=240MHz only */ 89 + #define CANFD_CLK_SEL_DIV_30MHZ 0x00000002 /* SRC=240MHz only */ 90 + #define CANFD_CLK_SEL_DIV_24MHZ 0x00000003 /* SRC=240MHz only */ 91 + #define CANFD_CLK_SEL_DIV_20MHZ 0x00000004 /* SRC=240MHz only */ 92 + 93 + #define CANFD_CLK_SEL_SRC_MASK 0x00000008 /* 0=80MHz, 1=240MHz */ 94 + #define CANFD_CLK_SEL_SRC_240MHZ 0x00000008 95 + #define CANFD_CLK_SEL_SRC_80MHZ (~CANFD_CLK_SEL_SRC_240MHZ & \ 96 + CANFD_CLK_SEL_SRC_MASK) 97 + 98 + #define CANFD_CLK_SEL_20MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ 99 + CANFD_CLK_SEL_DIV_20MHZ) 100 + #define CANFD_CLK_SEL_24MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ 101 + CANFD_CLK_SEL_DIV_24MHZ) 102 + #define CANFD_CLK_SEL_30MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ 103 + CANFD_CLK_SEL_DIV_30MHZ) 104 + #define CANFD_CLK_SEL_40MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ 105 + CANFD_CLK_SEL_DIV_40MHZ) 106 + #define CANFD_CLK_SEL_60MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ 107 + CANFD_CLK_SEL_DIV_60MHZ) 108 + #define CANFD_CLK_SEL_80MHZ (CANFD_CLK_SEL_SRC_80MHZ) 109 + 110 + /* CAN-FD channel Rx/Tx control register bits */ 111 + #define CANFD_CTL_UNC_BIT 0x00010000 /* Uncached DMA mem */ 112 + #define CANFD_CTL_RST_BIT 0x00020000 /* reset DMA action */ 113 + #define CANFD_CTL_IEN_BIT 0x00040000 /* IRQ enable */ 114 + 115 + /* Rx IRQ Count and Time Limits */ 116 + #define CANFD_CTL_IRQ_CL_DEF 16 /* Rx msg max nb per IRQ in Rx DMA */ 117 + #define CANFD_CTL_IRQ_TL_DEF 10 /* Time before IRQ if < CL (x100 µs) */ 118 + 119 + #define CANFD_OPTIONS_SET (CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD) 120 + 121 + /* Tx anticipation window (link logical address should be aligned on 2K 122 + * boundary) 123 + */ 124 + #define PCIEFD_TX_PAGE_COUNT (PCIEFD_TX_DMA_SIZE / PCIEFD_TX_PAGE_SIZE) 125 + 126 + #define CANFD_MSG_LNK_TX 0x1001 /* Tx msgs link */ 127 + 128 + /* 32-bits IRQ status fields, heading Rx DMA area */ 129 + static inline int pciefd_irq_tag(u32 irq_status) 130 + { 131 + return irq_status & 0x0000000f; 132 + } 133 + 134 + static inline int pciefd_irq_rx_cnt(u32 irq_status) 135 + { 136 + return (irq_status & 0x000007f0) >> 4; 137 + } 138 + 139 + static inline int pciefd_irq_is_lnk(u32 irq_status) 140 + { 141 + return irq_status & 0x00010000; 142 + } 143 + 144 + /* Rx record */ 145 + struct pciefd_rx_dma { 146 + __le32 irq_status; 147 + __le32 sys_time_low; 148 + __le32 sys_time_high; 149 + struct pucan_rx_msg msg[0]; 150 + } __packed __aligned(4); 151 + 152 + /* Tx Link record */ 153 + struct pciefd_tx_link { 154 + __le16 size; 155 + __le16 type; 156 + __le32 laddr_lo; 157 + __le32 laddr_hi; 158 + } __packed __aligned(4); 159 + 160 + /* Tx page descriptor */ 161 + struct pciefd_page { 162 + void *vbase; /* page virtual address */ 163 + dma_addr_t lbase; /* page logical address */ 164 + u32 offset; 165 + u32 size; 166 + }; 167 + 168 + #define CANFD_IRQ_SET 0x00000001 169 + #define CANFD_TX_PATH_SET 0x00000002 170 + 171 + /* CAN-FD channel object */ 172 + struct pciefd_board; 173 + struct pciefd_can { 174 + struct peak_canfd_priv ucan; /* must be the first member */ 175 + void __iomem *reg_base; /* channel config base addr */ 176 + struct pciefd_board *board; /* reverse link */ 177 + 178 + struct pucan_command pucan_cmd; /* command buffer */ 179 + 180 + dma_addr_t rx_dma_laddr; /* DMA virtual and logical addr */ 181 + void *rx_dma_vaddr; /* for Rx and Tx areas */ 182 + dma_addr_t tx_dma_laddr; 183 + void *tx_dma_vaddr; 184 + 185 + struct pciefd_page tx_pages[PCIEFD_TX_PAGE_COUNT]; 186 + u16 tx_pages_free; /* free Tx pages counter */ 187 + u16 tx_page_index; /* current page used for Tx */ 188 + spinlock_t tx_lock; 189 + 190 + u32 irq_status; 191 + u32 irq_tag; /* next irq tag */ 192 + }; 193 + 194 + /* PEAK-PCIe FD board object */ 195 + struct pciefd_board { 196 + void __iomem *reg_base; 197 + struct pci_dev *pci_dev; 198 + int can_count; 199 + spinlock_t cmd_lock; /* 64-bits cmds must be atomic */ 200 + struct pciefd_can *can[0]; /* array of network devices */ 201 + }; 202 + 203 + /* supported device ids. */ 204 + static const struct pci_device_id peak_pciefd_tbl[] = { 205 + {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, 206 + {0,} 207 + }; 208 + 209 + MODULE_DEVICE_TABLE(pci, peak_pciefd_tbl); 210 + 211 + /* read a 32 bits value from a SYS block register */ 212 + static inline u32 pciefd_sys_readreg(const struct pciefd_board *priv, u16 reg) 213 + { 214 + return readl(priv->reg_base + reg); 215 + } 216 + 217 + /* write a 32 bits value into a SYS block register */ 218 + static inline void pciefd_sys_writereg(const struct pciefd_board *priv, 219 + u32 val, u16 reg) 220 + { 221 + writel(val, priv->reg_base + reg); 222 + } 223 + 224 + /* read a 32 bits value from CAN-FD block register */ 225 + static inline u32 pciefd_can_readreg(const struct pciefd_can *priv, u16 reg) 226 + { 227 + return readl(priv->reg_base + reg); 228 + } 229 + 230 + /* write a 32 bits value into a CAN-FD block register */ 231 + static inline void pciefd_can_writereg(const struct pciefd_can *priv, 232 + u32 val, u16 reg) 233 + { 234 + writel(val, priv->reg_base + reg); 235 + } 236 + 237 + /* give a channel logical Rx DMA address to the board */ 238 + static void pciefd_can_setup_rx_dma(struct pciefd_can *priv) 239 + { 240 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 241 + const u32 dma_addr_h = (u32)(priv->rx_dma_laddr >> 32); 242 + #else 243 + const u32 dma_addr_h = 0; 244 + #endif 245 + 246 + /* (DMA must be reset for Rx) */ 247 + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_RX_CTL_SET); 248 + 249 + /* write the logical address of the Rx DMA area for this channel */ 250 + pciefd_can_writereg(priv, (u32)priv->rx_dma_laddr, 251 + PCIEFD_REG_CAN_RX_DMA_ADDR_L); 252 + pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_RX_DMA_ADDR_H); 253 + 254 + /* also indicates that Rx DMA is cacheable */ 255 + pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, PCIEFD_REG_CAN_RX_CTL_CLR); 256 + } 257 + 258 + /* clear channel logical Rx DMA address from the board */ 259 + static void pciefd_can_clear_rx_dma(struct pciefd_can *priv) 260 + { 261 + /* DMA must be reset for Rx */ 262 + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_RX_CTL_SET); 263 + 264 + /* clear the logical address of the Rx DMA area for this channel */ 265 + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_L); 266 + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_H); 267 + } 268 + 269 + /* give a channel logical Tx DMA address to the board */ 270 + static void pciefd_can_setup_tx_dma(struct pciefd_can *priv) 271 + { 272 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 273 + const u32 dma_addr_h = (u32)(priv->tx_dma_laddr >> 32); 274 + #else 275 + const u32 dma_addr_h = 0; 276 + #endif 277 + 278 + /* (DMA must be reset for Tx) */ 279 + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_SET); 280 + 281 + /* write the logical address of the Tx DMA area for this channel */ 282 + pciefd_can_writereg(priv, (u32)priv->tx_dma_laddr, 283 + PCIEFD_REG_CAN_TX_DMA_ADDR_L); 284 + pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_TX_DMA_ADDR_H); 285 + 286 + /* also indicates that Tx DMA is cacheable */ 287 + pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, PCIEFD_REG_CAN_TX_CTL_CLR); 288 + } 289 + 290 + /* clear channel logical Tx DMA address from the board */ 291 + static void pciefd_can_clear_tx_dma(struct pciefd_can *priv) 292 + { 293 + /* DMA must be reset for Tx */ 294 + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_SET); 295 + 296 + /* clear the logical address of the Tx DMA area for this channel */ 297 + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_L); 298 + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_H); 299 + } 300 + 301 + static void pciefd_can_ack_rx_dma(struct pciefd_can *priv) 302 + { 303 + /* read value of current IRQ tag and inc it for next one */ 304 + priv->irq_tag = le32_to_cpu(*(__le32 *)priv->rx_dma_vaddr); 305 + priv->irq_tag++; 306 + priv->irq_tag &= 0xf; 307 + 308 + /* write the next IRQ tag for this CAN */ 309 + pciefd_can_writereg(priv, priv->irq_tag, PCIEFD_REG_CAN_RX_CTL_ACK); 310 + } 311 + 312 + /* IRQ handler */ 313 + static irqreturn_t pciefd_irq_handler(int irq, void *arg) 314 + { 315 + struct pciefd_can *priv = arg; 316 + struct pciefd_rx_dma *rx_dma = priv->rx_dma_vaddr; 317 + 318 + /* INTA mode only to sync with PCIe transaction */ 319 + if (!pci_dev_msi_enabled(priv->board->pci_dev)) 320 + (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1); 321 + 322 + /* read IRQ status from the first 32-bits of the Rx DMA area */ 323 + priv->irq_status = le32_to_cpu(rx_dma->irq_status); 324 + 325 + /* check if this (shared) IRQ is for this CAN */ 326 + if (pciefd_irq_tag(priv->irq_status) != priv->irq_tag) 327 + return IRQ_NONE; 328 + 329 + /* handle rx messages (if any) */ 330 + peak_canfd_handle_msgs_list(&priv->ucan, 331 + rx_dma->msg, 332 + pciefd_irq_rx_cnt(priv->irq_status)); 333 + 334 + /* handle tx link interrupt (if any) */ 335 + if (pciefd_irq_is_lnk(priv->irq_status)) { 336 + unsigned long flags; 337 + 338 + spin_lock_irqsave(&priv->tx_lock, flags); 339 + priv->tx_pages_free++; 340 + spin_unlock_irqrestore(&priv->tx_lock, flags); 341 + 342 + /* wake producer up */ 343 + netif_wake_queue(priv->ucan.ndev); 344 + } 345 + 346 + /* re-enable Rx DMA transfer for this CAN */ 347 + pciefd_can_ack_rx_dma(priv); 348 + 349 + return IRQ_HANDLED; 350 + } 351 + 352 + static int pciefd_enable_tx_path(struct peak_canfd_priv *ucan) 353 + { 354 + struct pciefd_can *priv = (struct pciefd_can *)ucan; 355 + int i; 356 + 357 + /* initialize the Tx pages descriptors */ 358 + priv->tx_pages_free = PCIEFD_TX_PAGE_COUNT - 1; 359 + priv->tx_page_index = 0; 360 + 361 + priv->tx_pages[0].vbase = priv->tx_dma_vaddr; 362 + priv->tx_pages[0].lbase = priv->tx_dma_laddr; 363 + 364 + for (i = 0; i < PCIEFD_TX_PAGE_COUNT; i++) { 365 + priv->tx_pages[i].offset = 0; 366 + priv->tx_pages[i].size = PCIEFD_TX_PAGE_SIZE - 367 + sizeof(struct pciefd_tx_link); 368 + if (i) { 369 + priv->tx_pages[i].vbase = 370 + priv->tx_pages[i - 1].vbase + 371 + PCIEFD_TX_PAGE_SIZE; 372 + priv->tx_pages[i].lbase = 373 + priv->tx_pages[i - 1].lbase + 374 + PCIEFD_TX_PAGE_SIZE; 375 + } 376 + } 377 + 378 + /* setup Tx DMA addresses into IP core */ 379 + pciefd_can_setup_tx_dma(priv); 380 + 381 + /* start (TX_RST=0) Tx Path */ 382 + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_CLR); 383 + 384 + return 0; 385 + } 386 + 387 + /* board specific CANFD command pre-processing */ 388 + static int pciefd_pre_cmd(struct peak_canfd_priv *ucan) 389 + { 390 + struct pciefd_can *priv = (struct pciefd_can *)ucan; 391 + u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd); 392 + int err; 393 + 394 + /* pre-process command */ 395 + switch (cmd) { 396 + case PUCAN_CMD_NORMAL_MODE: 397 + case PUCAN_CMD_LISTEN_ONLY_MODE: 398 + 399 + if (ucan->can.state == CAN_STATE_BUS_OFF) 400 + break; 401 + 402 + /* going into operational mode: setup IRQ handler */ 403 + err = request_irq(priv->board->pci_dev->irq, 404 + pciefd_irq_handler, 405 + IRQF_SHARED, 406 + PCIEFD_DRV_NAME, 407 + priv); 408 + if (err) 409 + return err; 410 + 411 + /* setup Rx DMA address */ 412 + pciefd_can_setup_rx_dma(priv); 413 + 414 + /* setup max count of msgs per IRQ */ 415 + pciefd_can_writereg(priv, (CANFD_CTL_IRQ_TL_DEF) << 8 | 416 + CANFD_CTL_IRQ_CL_DEF, 417 + PCIEFD_REG_CAN_RX_CTL_WRT); 418 + 419 + /* clear DMA RST for Rx (Rx start) */ 420 + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, 421 + PCIEFD_REG_CAN_RX_CTL_CLR); 422 + 423 + /* reset timestamps */ 424 + pciefd_can_writereg(priv, !CANFD_MISC_TS_RST, 425 + PCIEFD_REG_CAN_MISC); 426 + 427 + /* do an initial ACK */ 428 + pciefd_can_ack_rx_dma(priv); 429 + 430 + /* enable IRQ for this CAN after having set next irq_tag */ 431 + pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, 432 + PCIEFD_REG_CAN_RX_CTL_SET); 433 + 434 + /* Tx path will be setup as soon as RX_BARRIER is received */ 435 + break; 436 + default: 437 + break; 438 + } 439 + 440 + return 0; 441 + } 442 + 443 + /* write a command */ 444 + static int pciefd_write_cmd(struct peak_canfd_priv *ucan) 445 + { 446 + struct pciefd_can *priv = (struct pciefd_can *)ucan; 447 + unsigned long flags; 448 + 449 + /* 64-bits command is atomic */ 450 + spin_lock_irqsave(&priv->board->cmd_lock, flags); 451 + 452 + pciefd_can_writereg(priv, *(u32 *)ucan->cmd_buffer, 453 + PCIEFD_REG_CAN_CMD_PORT_L); 454 + pciefd_can_writereg(priv, *(u32 *)(ucan->cmd_buffer + 4), 455 + PCIEFD_REG_CAN_CMD_PORT_H); 456 + 457 + spin_unlock_irqrestore(&priv->board->cmd_lock, flags); 458 + 459 + return 0; 460 + } 461 + 462 + /* board specific CANFD command post-processing */ 463 + static int pciefd_post_cmd(struct peak_canfd_priv *ucan) 464 + { 465 + struct pciefd_can *priv = (struct pciefd_can *)ucan; 466 + u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd); 467 + 468 + switch (cmd) { 469 + case PUCAN_CMD_RESET_MODE: 470 + 471 + if (ucan->can.state == CAN_STATE_STOPPED) 472 + break; 473 + 474 + /* controller now in reset mode: */ 475 + 476 + /* stop and reset DMA addresses in Tx/Rx engines */ 477 + pciefd_can_clear_tx_dma(priv); 478 + pciefd_can_clear_rx_dma(priv); 479 + 480 + /* disable IRQ for this CAN */ 481 + pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, 482 + PCIEFD_REG_CAN_RX_CTL_CLR); 483 + 484 + free_irq(priv->board->pci_dev->irq, priv); 485 + 486 + ucan->can.state = CAN_STATE_STOPPED; 487 + 488 + break; 489 + } 490 + 491 + return 0; 492 + } 493 + 494 + static void *pciefd_alloc_tx_msg(struct peak_canfd_priv *ucan, u16 msg_size, 495 + int *room_left) 496 + { 497 + struct pciefd_can *priv = (struct pciefd_can *)ucan; 498 + struct pciefd_page *page = priv->tx_pages + priv->tx_page_index; 499 + unsigned long flags; 500 + void *msg; 501 + 502 + spin_lock_irqsave(&priv->tx_lock, flags); 503 + 504 + if (page->offset + msg_size > page->size) { 505 + struct pciefd_tx_link *lk; 506 + 507 + /* not enough space in this page: try another one */ 508 + if (!priv->tx_pages_free) { 509 + spin_unlock_irqrestore(&priv->tx_lock, flags); 510 + 511 + /* Tx overflow */ 512 + return NULL; 513 + } 514 + 515 + priv->tx_pages_free--; 516 + 517 + /* keep address of the very last free slot of current page */ 518 + lk = page->vbase + page->offset; 519 + 520 + /* next, move on a new free page */ 521 + priv->tx_page_index = (priv->tx_page_index + 1) % 522 + PCIEFD_TX_PAGE_COUNT; 523 + page = priv->tx_pages + priv->tx_page_index; 524 + 525 + /* put link record to this new page at the end of prev one */ 526 + lk->size = cpu_to_le16(sizeof(*lk)); 527 + lk->type = cpu_to_le16(CANFD_MSG_LNK_TX); 528 + lk->laddr_lo = cpu_to_le32(page->lbase); 529 + 530 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 531 + lk->laddr_hi = cpu_to_le32(page->lbase >> 32); 532 + #else 533 + lk->laddr_hi = 0; 534 + #endif 535 + /* next msgs will be put from the begininng of this new page */ 536 + page->offset = 0; 537 + } 538 + 539 + *room_left = priv->tx_pages_free * page->size; 540 + 541 + spin_unlock_irqrestore(&priv->tx_lock, flags); 542 + 543 + msg = page->vbase + page->offset; 544 + 545 + /* give back room left in the tx ring */ 546 + *room_left += page->size - (page->offset + msg_size); 547 + 548 + return msg; 549 + } 550 + 551 + static int pciefd_write_tx_msg(struct peak_canfd_priv *ucan, 552 + struct pucan_tx_msg *msg) 553 + { 554 + struct pciefd_can *priv = (struct pciefd_can *)ucan; 555 + struct pciefd_page *page = priv->tx_pages + priv->tx_page_index; 556 + 557 + /* this slot is now reserved for writing the frame */ 558 + page->offset += le16_to_cpu(msg->size); 559 + 560 + /* tell the board a frame has been written in Tx DMA area */ 561 + pciefd_can_writereg(priv, 1, PCIEFD_REG_CAN_TX_REQ_ACC); 562 + 563 + return 0; 564 + } 565 + 566 + /* probe for CAN-FD channel #pciefd_board->can_count */ 567 + static int pciefd_can_probe(struct pciefd_board *pciefd) 568 + { 569 + struct net_device *ndev; 570 + struct pciefd_can *priv; 571 + u32 clk; 572 + int err; 573 + 574 + /* allocate the candev object with default isize of echo skbs ring */ 575 + ndev = alloc_peak_canfd_dev(sizeof(*priv), pciefd->can_count, 576 + PCIEFD_ECHO_SKB_MAX); 577 + if (!ndev) { 578 + dev_err(&pciefd->pci_dev->dev, 579 + "failed to alloc candev object\n"); 580 + goto failure; 581 + } 582 + 583 + priv = netdev_priv(ndev); 584 + 585 + /* fill-in candev private object: */ 586 + 587 + /* setup PCIe-FD own callbacks */ 588 + priv->ucan.pre_cmd = pciefd_pre_cmd; 589 + priv->ucan.write_cmd = pciefd_write_cmd; 590 + priv->ucan.post_cmd = pciefd_post_cmd; 591 + priv->ucan.enable_tx_path = pciefd_enable_tx_path; 592 + priv->ucan.alloc_tx_msg = pciefd_alloc_tx_msg; 593 + priv->ucan.write_tx_msg = pciefd_write_tx_msg; 594 + 595 + /* setup PCIe-FD own command buffer */ 596 + priv->ucan.cmd_buffer = &priv->pucan_cmd; 597 + priv->ucan.cmd_maxlen = sizeof(priv->pucan_cmd); 598 + 599 + priv->board = pciefd; 600 + 601 + /* CAN config regs block address */ 602 + priv->reg_base = pciefd->reg_base + PCIEFD_CANX_OFF(priv->ucan.index); 603 + 604 + /* allocate non-cacheable DMA'able 4KB memory area for Rx */ 605 + priv->rx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev, 606 + PCIEFD_RX_DMA_SIZE, 607 + &priv->rx_dma_laddr, 608 + GFP_KERNEL); 609 + if (!priv->rx_dma_vaddr) { 610 + dev_err(&pciefd->pci_dev->dev, 611 + "Rx dmam_alloc_coherent(%u) failure\n", 612 + PCIEFD_RX_DMA_SIZE); 613 + goto err_free_candev; 614 + } 615 + 616 + /* allocate non-cacheable DMA'able 4KB memory area for Tx */ 617 + priv->tx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev, 618 + PCIEFD_TX_DMA_SIZE, 619 + &priv->tx_dma_laddr, 620 + GFP_KERNEL); 621 + if (!priv->tx_dma_vaddr) { 622 + dev_err(&pciefd->pci_dev->dev, 623 + "Tx dmaim_alloc_coherent(%u) failure\n", 624 + PCIEFD_TX_DMA_SIZE); 625 + goto err_free_candev; 626 + } 627 + 628 + /* CAN clock in RST mode */ 629 + pciefd_can_writereg(priv, CANFD_MISC_TS_RST, PCIEFD_REG_CAN_MISC); 630 + 631 + /* read current clock value */ 632 + clk = pciefd_can_readreg(priv, PCIEFD_REG_CAN_CLK_SEL); 633 + switch (clk) { 634 + case CANFD_CLK_SEL_20MHZ: 635 + priv->ucan.can.clock.freq = 20 * 1000 * 1000; 636 + break; 637 + case CANFD_CLK_SEL_24MHZ: 638 + priv->ucan.can.clock.freq = 24 * 1000 * 1000; 639 + break; 640 + case CANFD_CLK_SEL_30MHZ: 641 + priv->ucan.can.clock.freq = 30 * 1000 * 1000; 642 + break; 643 + case CANFD_CLK_SEL_40MHZ: 644 + priv->ucan.can.clock.freq = 40 * 1000 * 1000; 645 + break; 646 + case CANFD_CLK_SEL_60MHZ: 647 + priv->ucan.can.clock.freq = 60 * 1000 * 1000; 648 + break; 649 + default: 650 + pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, 651 + PCIEFD_REG_CAN_CLK_SEL); 652 + 653 + /* fallthough */ 654 + case CANFD_CLK_SEL_80MHZ: 655 + priv->ucan.can.clock.freq = 80 * 1000 * 1000; 656 + break; 657 + } 658 + 659 + ndev->irq = pciefd->pci_dev->irq; 660 + 661 + SET_NETDEV_DEV(ndev, &pciefd->pci_dev->dev); 662 + 663 + err = register_candev(ndev); 664 + if (err) { 665 + dev_err(&pciefd->pci_dev->dev, 666 + "couldn't register CAN device: %d\n", err); 667 + goto err_free_candev; 668 + } 669 + 670 + spin_lock_init(&priv->tx_lock); 671 + 672 + /* save the object address in the board structure */ 673 + pciefd->can[pciefd->can_count] = priv; 674 + 675 + dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n", 676 + ndev->name, priv->reg_base, pciefd->pci_dev->irq); 677 + 678 + return 0; 679 + 680 + err_free_candev: 681 + free_candev(ndev); 682 + 683 + failure: 684 + return -ENOMEM; 685 + } 686 + 687 + /* remove a CAN-FD channel by releasing all of its resources */ 688 + static void pciefd_can_remove(struct pciefd_can *priv) 689 + { 690 + /* unregister (close) the can device to go back to RST mode first */ 691 + unregister_candev(priv->ucan.ndev); 692 + 693 + /* finally, free the candev object */ 694 + free_candev(priv->ucan.ndev); 695 + } 696 + 697 + /* remove all CAN-FD channels by releasing their own resources */ 698 + static void pciefd_can_remove_all(struct pciefd_board *pciefd) 699 + { 700 + while (pciefd->can_count > 0) 701 + pciefd_can_remove(pciefd->can[--pciefd->can_count]); 702 + } 703 + 704 + /* probe for the entire device */ 705 + static int peak_pciefd_probe(struct pci_dev *pdev, 706 + const struct pci_device_id *ent) 707 + { 708 + struct pciefd_board *pciefd; 709 + int err, can_count; 710 + u16 sub_sys_id; 711 + u8 hw_ver_major; 712 + u8 hw_ver_minor; 713 + u8 hw_ver_sub; 714 + u32 v2; 715 + 716 + err = pci_enable_device(pdev); 717 + if (err) 718 + return err; 719 + err = pci_request_regions(pdev, PCIEFD_DRV_NAME); 720 + if (err) 721 + goto err_disable_pci; 722 + 723 + /* the number of channels depends on sub-system id */ 724 + err = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sub_sys_id); 725 + if (err) 726 + goto err_release_regions; 727 + 728 + dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n", 729 + pdev->vendor, pdev->device, sub_sys_id); 730 + 731 + if (sub_sys_id >= 0x0012) 732 + can_count = 4; 733 + else if (sub_sys_id >= 0x0010) 734 + can_count = 3; 735 + else if (sub_sys_id >= 0x0004) 736 + can_count = 2; 737 + else 738 + can_count = 1; 739 + 740 + /* allocate board structure object */ 741 + pciefd = devm_kzalloc(&pdev->dev, sizeof(*pciefd) + 742 + can_count * sizeof(*pciefd->can), 743 + GFP_KERNEL); 744 + if (!pciefd) { 745 + err = -ENOMEM; 746 + goto err_release_regions; 747 + } 748 + 749 + /* initialize the board structure */ 750 + pciefd->pci_dev = pdev; 751 + spin_lock_init(&pciefd->cmd_lock); 752 + 753 + /* save the PCI BAR0 virtual address for further system regs access */ 754 + pciefd->reg_base = pci_iomap(pdev, 0, PCIEFD_BAR0_SIZE); 755 + if (!pciefd->reg_base) { 756 + dev_err(&pdev->dev, "failed to map PCI resource #0\n"); 757 + err = -ENOMEM; 758 + goto err_release_regions; 759 + } 760 + 761 + /* read the firmware version number */ 762 + v2 = pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER2); 763 + 764 + hw_ver_major = (v2 & 0x0000f000) >> 12; 765 + hw_ver_minor = (v2 & 0x00000f00) >> 8; 766 + hw_ver_sub = (v2 & 0x000000f0) >> 4; 767 + 768 + dev_info(&pdev->dev, 769 + "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, 770 + hw_ver_major, hw_ver_minor, hw_ver_sub); 771 + 772 + /* stop system clock */ 773 + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, 774 + PCIEFD_REG_SYS_CTL_CLR); 775 + 776 + pci_set_master(pdev); 777 + 778 + /* create now the corresponding channels objects */ 779 + while (pciefd->can_count < can_count) { 780 + err = pciefd_can_probe(pciefd); 781 + if (err) 782 + goto err_free_canfd; 783 + 784 + pciefd->can_count++; 785 + } 786 + 787 + /* set system timestamps counter in RST mode */ 788 + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST, 789 + PCIEFD_REG_SYS_CTL_SET); 790 + 791 + /* wait a bit (read cycle) */ 792 + (void)pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER1); 793 + 794 + /* free all clocks */ 795 + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST, 796 + PCIEFD_REG_SYS_CTL_CLR); 797 + 798 + /* start system clock */ 799 + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, 800 + PCIEFD_REG_SYS_CTL_SET); 801 + 802 + /* remember the board structure address in the device user data */ 803 + pci_set_drvdata(pdev, pciefd); 804 + 805 + return 0; 806 + 807 + err_free_canfd: 808 + pciefd_can_remove_all(pciefd); 809 + 810 + pci_iounmap(pdev, pciefd->reg_base); 811 + 812 + err_release_regions: 813 + pci_release_regions(pdev); 814 + 815 + err_disable_pci: 816 + pci_disable_device(pdev); 817 + 818 + return err; 819 + } 820 + 821 + /* free the board structure object, as well as its resources: */ 822 + static void peak_pciefd_remove(struct pci_dev *pdev) 823 + { 824 + struct pciefd_board *pciefd = pci_get_drvdata(pdev); 825 + 826 + /* release CAN-FD channels resources */ 827 + pciefd_can_remove_all(pciefd); 828 + 829 + pci_iounmap(pdev, pciefd->reg_base); 830 + 831 + pci_release_regions(pdev); 832 + pci_disable_device(pdev); 833 + } 834 + 835 + static struct pci_driver peak_pciefd_driver = { 836 + .name = PCIEFD_DRV_NAME, 837 + .id_table = peak_pciefd_tbl, 838 + .probe = peak_pciefd_probe, 839 + .remove = peak_pciefd_remove, 840 + }; 841 + 842 + module_pci_driver(peak_pciefd_driver);
+6 -6
drivers/net/can/ti_hecc.c
··· 898 898 } 899 899 900 900 priv->base = devm_ioremap_resource(&pdev->dev, res); 901 - if (!priv->base) { 901 + if (IS_ERR(priv->base)) { 902 902 dev_err(&pdev->dev, "hecc ioremap failed\n"); 903 - return -ENOMEM; 903 + return PTR_ERR(priv->base); 904 904 } 905 905 906 906 /* handle hecc-ram memory */ ··· 911 911 } 912 912 913 913 priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res); 914 - if (!priv->hecc_ram) { 914 + if (IS_ERR(priv->hecc_ram)) { 915 915 dev_err(&pdev->dev, "hecc-ram ioremap failed\n"); 916 - return -ENOMEM; 916 + return PTR_ERR(priv->hecc_ram); 917 917 } 918 918 919 919 /* handle mbx memory */ ··· 924 924 } 925 925 926 926 priv->mbx = devm_ioremap_resource(&pdev->dev, res); 927 - if (!priv->mbx) { 927 + if (IS_ERR(priv->mbx)) { 928 928 dev_err(&pdev->dev, "mbx ioremap failed\n"); 929 - return -ENOMEM; 929 + return PTR_ERR(priv->mbx); 930 930 } 931 931 932 932 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+6
drivers/net/can/usb/Kconfig
··· 81 81 This driver supports the USB2CAN interface 82 82 from 8 devices (http://www.8devices.com). 83 83 84 + config CAN_MCBA_USB 85 + tristate "Microchip CAN BUS Analyzer interface" 86 + ---help--- 87 + This driver supports the CAN BUS Analyzer interface 88 + from Microchip (http://www.microchip.com/development-tools/). 89 + 84 90 endmenu
+1
drivers/net/can/usb/Makefile
··· 8 8 obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o 9 9 obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ 10 10 obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o 11 + obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
+904
drivers/net/can/usb/mcba_usb.c
··· 1 + /* SocketCAN driver for Microchip CAN BUS Analyzer Tool 2 + * 3 + * Copyright (C) 2017 Mobica Limited 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published 7 + * by the Free Software Foundation; version 2 of the License. 8 + * 9 + * This program is distributed in the hope that it will be useful, but 10 + * WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 + * General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along 15 + * with this program. 16 + * 17 + * This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c 18 + */ 19 + 20 + #include <asm/unaligned.h> 21 + #include <linux/can.h> 22 + #include <linux/can/dev.h> 23 + #include <linux/can/error.h> 24 + #include <linux/can/led.h> 25 + #include <linux/module.h> 26 + #include <linux/netdevice.h> 27 + #include <linux/signal.h> 28 + #include <linux/slab.h> 29 + #include <linux/usb.h> 30 + 31 + /* vendor and product id */ 32 + #define MCBA_MODULE_NAME "mcba_usb" 33 + #define MCBA_VENDOR_ID 0x04d8 34 + #define MCBA_PRODUCT_ID 0x0a30 35 + 36 + /* driver constants */ 37 + #define MCBA_MAX_RX_URBS 20 38 + #define MCBA_MAX_TX_URBS 20 39 + #define MCBA_CTX_FREE MCBA_MAX_TX_URBS 40 + 41 + /* RX buffer must be bigger than msg size since at the 42 + * beggining USB messages are stacked. 43 + */ 44 + #define MCBA_USB_RX_BUFF_SIZE 64 45 + #define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg)) 46 + 47 + /* MCBA endpoint numbers */ 48 + #define MCBA_USB_EP_IN 1 49 + #define MCBA_USB_EP_OUT 1 50 + 51 + /* Microchip command id */ 52 + #define MBCA_CMD_RECEIVE_MESSAGE 0xE3 53 + #define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5 54 + #define MBCA_CMD_I_AM_ALIVE_FROM_USB 0xF7 55 + #define MBCA_CMD_CHANGE_BIT_RATE 0xA1 56 + #define MBCA_CMD_TRANSMIT_MESSAGE_EV 0xA3 57 + #define MBCA_CMD_SETUP_TERMINATION_RESISTANCE 0xA8 58 + #define MBCA_CMD_READ_FW_VERSION 0xA9 59 + #define MBCA_CMD_NOTHING_TO_SEND 0xFF 60 + #define MBCA_CMD_TRANSMIT_MESSAGE_RSP 0xE2 61 + 62 + #define MCBA_VER_REQ_USB 1 63 + #define MCBA_VER_REQ_CAN 2 64 + 65 + #define MCBA_SIDL_EXID_MASK 0x8 66 + #define MCBA_DLC_MASK 0xf 67 + #define MCBA_DLC_RTR_MASK 0x40 68 + 69 + #define MCBA_CAN_STATE_WRN_TH 95 70 + #define MCBA_CAN_STATE_ERR_PSV_TH 127 71 + 72 + #define MCBA_TERMINATION_DISABLED CAN_TERMINATION_DISABLED 73 + #define MCBA_TERMINATION_ENABLED 120 74 + 75 + struct mcba_usb_ctx { 76 + struct mcba_priv *priv; 77 + u32 ndx; 78 + u8 dlc; 79 + bool can; 80 + }; 81 + 82 + /* Structure to hold all of our device specific stuff */ 83 + struct mcba_priv { 84 + struct can_priv can; /* must be the first member */ 85 + struct sk_buff *echo_skb[MCBA_MAX_TX_URBS]; 86 + struct mcba_usb_ctx tx_context[MCBA_MAX_TX_URBS]; 87 + struct usb_device *udev; 88 + struct net_device *netdev; 89 + struct usb_anchor tx_submitted; 90 + struct usb_anchor rx_submitted; 91 + struct can_berr_counter bec; 92 + bool usb_ka_first_pass; 93 + bool can_ka_first_pass; 94 + bool can_speed_check; 95 + atomic_t free_ctx_cnt; 96 + }; 97 + 98 + /* CAN frame */ 99 + struct __packed mcba_usb_msg_can { 100 + u8 cmd_id; 101 + __be16 eid; 102 + __be16 sid; 103 + u8 dlc; 104 + u8 data[8]; 105 + u8 timestamp[4]; 106 + u8 checksum; 107 + }; 108 + 109 + /* command frame */ 110 + struct __packed mcba_usb_msg { 111 + u8 cmd_id; 112 + u8 unused[18]; 113 + }; 114 + 115 + struct __packed mcba_usb_msg_ka_usb { 116 + u8 cmd_id; 117 + u8 termination_state; 118 + u8 soft_ver_major; 119 + u8 soft_ver_minor; 120 + u8 unused[15]; 121 + }; 122 + 123 + struct __packed mcba_usb_msg_ka_can { 124 + u8 cmd_id; 125 + u8 tx_err_cnt; 126 + u8 rx_err_cnt; 127 + u8 rx_buff_ovfl; 128 + u8 tx_bus_off; 129 + __be16 can_bitrate; 130 + __le16 rx_lost; 131 + u8 can_stat; 132 + u8 soft_ver_major; 133 + u8 soft_ver_minor; 134 + u8 debug_mode; 135 + u8 test_complete; 136 + u8 test_result; 137 + u8 unused[4]; 138 + }; 139 + 140 + struct __packed mcba_usb_msg_change_bitrate { 141 + u8 cmd_id; 142 + __be16 bitrate; 143 + u8 unused[16]; 144 + }; 145 + 146 + struct __packed mcba_usb_msg_termination { 147 + u8 cmd_id; 148 + u8 termination; 149 + u8 unused[17]; 150 + }; 151 + 152 + struct __packed mcba_usb_msg_fw_ver { 153 + u8 cmd_id; 154 + u8 pic; 155 + u8 unused[17]; 156 + }; 157 + 158 + static const struct usb_device_id mcba_usb_table[] = { 159 + { USB_DEVICE(MCBA_VENDOR_ID, MCBA_PRODUCT_ID) }, 160 + {} /* Terminating entry */ 161 + }; 162 + 163 + MODULE_DEVICE_TABLE(usb, mcba_usb_table); 164 + 165 + static const u16 mcba_termination[] = { MCBA_TERMINATION_DISABLED, 166 + MCBA_TERMINATION_ENABLED }; 167 + 168 + static const u32 mcba_bitrate[] = { 20000, 33333, 50000, 80000, 83333, 169 + 100000, 125000, 150000, 175000, 200000, 170 + 225000, 250000, 275000, 300000, 500000, 171 + 625000, 800000, 1000000 }; 172 + 173 + static inline void mcba_init_ctx(struct mcba_priv *priv) 174 + { 175 + int i = 0; 176 + 177 + for (i = 0; i < MCBA_MAX_TX_URBS; i++) { 178 + priv->tx_context[i].ndx = MCBA_CTX_FREE; 179 + priv->tx_context[i].priv = priv; 180 + } 181 + 182 + atomic_set(&priv->free_ctx_cnt, ARRAY_SIZE(priv->tx_context)); 183 + } 184 + 185 + static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv, 186 + struct can_frame *cf) 187 + { 188 + int i = 0; 189 + struct mcba_usb_ctx *ctx = NULL; 190 + 191 + for (i = 0; i < MCBA_MAX_TX_URBS; i++) { 192 + if (priv->tx_context[i].ndx == MCBA_CTX_FREE) { 193 + ctx = &priv->tx_context[i]; 194 + ctx->ndx = i; 195 + 196 + if (cf) { 197 + ctx->can = true; 198 + ctx->dlc = cf->can_dlc; 199 + } else { 200 + ctx->can = false; 201 + ctx->dlc = 0; 202 + } 203 + 204 + atomic_dec(&priv->free_ctx_cnt); 205 + break; 206 + } 207 + } 208 + 209 + if (!atomic_read(&priv->free_ctx_cnt)) 210 + /* That was the last free ctx. Slow down tx path */ 211 + netif_stop_queue(priv->netdev); 212 + 213 + return ctx; 214 + } 215 + 216 + /* mcba_usb_free_ctx and mcba_usb_get_free_ctx are executed by different 217 + * threads. The order of execution in below function is important. 218 + */ 219 + static inline void mcba_usb_free_ctx(struct mcba_usb_ctx *ctx) 220 + { 221 + /* Increase number of free ctxs before freeing ctx */ 222 + atomic_inc(&ctx->priv->free_ctx_cnt); 223 + 224 + ctx->ndx = MCBA_CTX_FREE; 225 + 226 + /* Wake up the queue once ctx is marked free */ 227 + netif_wake_queue(ctx->priv->netdev); 228 + } 229 + 230 + static void mcba_usb_write_bulk_callback(struct urb *urb) 231 + { 232 + struct mcba_usb_ctx *ctx = urb->context; 233 + struct net_device *netdev; 234 + 235 + WARN_ON(!ctx); 236 + 237 + netdev = ctx->priv->netdev; 238 + 239 + /* free up our allocated buffer */ 240 + usb_free_coherent(urb->dev, urb->transfer_buffer_length, 241 + urb->transfer_buffer, urb->transfer_dma); 242 + 243 + if (ctx->can) { 244 + if (!netif_device_present(netdev)) 245 + return; 246 + 247 + netdev->stats.tx_packets++; 248 + netdev->stats.tx_bytes += ctx->dlc; 249 + 250 + can_led_event(netdev, CAN_LED_EVENT_TX); 251 + can_get_echo_skb(netdev, ctx->ndx); 252 + } 253 + 254 + if (urb->status) 255 + netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); 256 + 257 + /* Release the context */ 258 + mcba_usb_free_ctx(ctx); 259 + } 260 + 261 + /* Send data to device */ 262 + static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv, 263 + struct mcba_usb_msg *usb_msg, 264 + struct mcba_usb_ctx *ctx) 265 + { 266 + struct urb *urb; 267 + u8 *buf; 268 + int err; 269 + 270 + /* create a URB, and a buffer for it, and copy the data to the URB */ 271 + urb = usb_alloc_urb(0, GFP_ATOMIC); 272 + if (!urb) 273 + return -ENOMEM; 274 + 275 + buf = usb_alloc_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, GFP_ATOMIC, 276 + &urb->transfer_dma); 277 + if (!buf) { 278 + err = -ENOMEM; 279 + goto nomembuf; 280 + } 281 + 282 + memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE); 283 + 284 + usb_fill_bulk_urb(urb, priv->udev, 285 + usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf, 286 + MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback, 287 + ctx); 288 + 289 + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 290 + usb_anchor_urb(urb, &priv->tx_submitted); 291 + 292 + err = usb_submit_urb(urb, GFP_ATOMIC); 293 + if (unlikely(err)) 294 + goto failed; 295 + 296 + /* Release our reference to this URB, the USB core will eventually free 297 + * it entirely. 298 + */ 299 + usb_free_urb(urb); 300 + 301 + return 0; 302 + 303 + failed: 304 + usb_unanchor_urb(urb); 305 + usb_free_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, buf, 306 + urb->transfer_dma); 307 + 308 + if (err == -ENODEV) 309 + netif_device_detach(priv->netdev); 310 + else 311 + netdev_warn(priv->netdev, "failed tx_urb %d\n", err); 312 + 313 + nomembuf: 314 + usb_free_urb(urb); 315 + 316 + return err; 317 + } 318 + 319 + /* Send data to device */ 320 + static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, 321 + struct net_device *netdev) 322 + { 323 + struct mcba_priv *priv = netdev_priv(netdev); 324 + struct can_frame *cf = (struct can_frame *)skb->data; 325 + struct mcba_usb_ctx *ctx = NULL; 326 + struct net_device_stats *stats = &priv->netdev->stats; 327 + u16 sid; 328 + int err; 329 + struct mcba_usb_msg_can usb_msg = { 330 + .cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV 331 + }; 332 + 333 + if (can_dropped_invalid_skb(netdev, skb)) 334 + return NETDEV_TX_OK; 335 + 336 + ctx = mcba_usb_get_free_ctx(priv, cf); 337 + if (!ctx) 338 + return NETDEV_TX_BUSY; 339 + 340 + can_put_echo_skb(skb, priv->netdev, ctx->ndx); 341 + 342 + if (cf->can_id & CAN_EFF_FLAG) { 343 + /* SIDH | SIDL | EIDH | EIDL 344 + * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 345 + */ 346 + sid = MCBA_SIDL_EXID_MASK; 347 + /* store 28-18 bits */ 348 + sid |= (cf->can_id & 0x1ffc0000) >> 13; 349 + /* store 17-16 bits */ 350 + sid |= (cf->can_id & 0x30000) >> 16; 351 + put_unaligned_be16(sid, &usb_msg.sid); 352 + 353 + /* store 15-0 bits */ 354 + put_unaligned_be16(cf->can_id & 0xffff, &usb_msg.eid); 355 + } else { 356 + /* SIDH | SIDL 357 + * 10 - 3 | 2 1 0 x x x x x 358 + */ 359 + put_unaligned_be16((cf->can_id & CAN_SFF_MASK) << 5, 360 + &usb_msg.sid); 361 + usb_msg.eid = 0; 362 + } 363 + 364 + usb_msg.dlc = cf->can_dlc; 365 + 366 + memcpy(usb_msg.data, cf->data, usb_msg.dlc); 367 + 368 + if (cf->can_id & CAN_RTR_FLAG) 369 + usb_msg.dlc |= MCBA_DLC_RTR_MASK; 370 + 371 + err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx); 372 + if (err) 373 + goto xmit_failed; 374 + 375 + return NETDEV_TX_OK; 376 + 377 + xmit_failed: 378 + can_free_echo_skb(priv->netdev, ctx->ndx); 379 + mcba_usb_free_ctx(ctx); 380 + dev_kfree_skb(skb); 381 + stats->tx_dropped++; 382 + 383 + return NETDEV_TX_OK; 384 + } 385 + 386 + /* Send cmd to device */ 387 + static void mcba_usb_xmit_cmd(struct mcba_priv *priv, 388 + struct mcba_usb_msg *usb_msg) 389 + { 390 + struct mcba_usb_ctx *ctx = NULL; 391 + int err; 392 + 393 + ctx = mcba_usb_get_free_ctx(priv, NULL); 394 + if (!ctx) { 395 + netdev_err(priv->netdev, 396 + "Lack of free ctx. Sending (%d) cmd aborted", 397 + usb_msg->cmd_id); 398 + 399 + return; 400 + } 401 + 402 + err = mcba_usb_xmit(priv, usb_msg, ctx); 403 + if (err) 404 + netdev_err(priv->netdev, "Failed to send cmd (%d)", 405 + usb_msg->cmd_id); 406 + } 407 + 408 + static void mcba_usb_xmit_change_bitrate(struct mcba_priv *priv, u16 bitrate) 409 + { 410 + struct mcba_usb_msg_change_bitrate usb_msg = { 411 + .cmd_id = MBCA_CMD_CHANGE_BIT_RATE 412 + }; 413 + 414 + put_unaligned_be16(bitrate, &usb_msg.bitrate); 415 + 416 + mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); 417 + } 418 + 419 + static void mcba_usb_xmit_read_fw_ver(struct mcba_priv *priv, u8 pic) 420 + { 421 + struct mcba_usb_msg_fw_ver usb_msg = { 422 + .cmd_id = MBCA_CMD_READ_FW_VERSION, 423 + .pic = pic 424 + }; 425 + 426 + mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); 427 + } 428 + 429 + static void mcba_usb_process_can(struct mcba_priv *priv, 430 + struct mcba_usb_msg_can *msg) 431 + { 432 + struct can_frame *cf; 433 + struct sk_buff *skb; 434 + struct net_device_stats *stats = &priv->netdev->stats; 435 + u16 sid; 436 + 437 + skb = alloc_can_skb(priv->netdev, &cf); 438 + if (!skb) 439 + return; 440 + 441 + sid = get_unaligned_be16(&msg->sid); 442 + 443 + if (sid & MCBA_SIDL_EXID_MASK) { 444 + /* SIDH | SIDL | EIDH | EIDL 445 + * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 446 + */ 447 + cf->can_id = CAN_EFF_FLAG; 448 + 449 + /* store 28-18 bits */ 450 + cf->can_id |= (sid & 0xffe0) << 13; 451 + /* store 17-16 bits */ 452 + cf->can_id |= (sid & 3) << 16; 453 + /* store 15-0 bits */ 454 + cf->can_id |= get_unaligned_be16(&msg->eid); 455 + } else { 456 + /* SIDH | SIDL 457 + * 10 - 3 | 2 1 0 x x x x x 458 + */ 459 + cf->can_id = (sid & 0xffe0) >> 5; 460 + } 461 + 462 + if (msg->dlc & MCBA_DLC_RTR_MASK) 463 + cf->can_id |= CAN_RTR_FLAG; 464 + 465 + cf->can_dlc = get_can_dlc(msg->dlc & MCBA_DLC_MASK); 466 + 467 + memcpy(cf->data, msg->data, cf->can_dlc); 468 + 469 + stats->rx_packets++; 470 + stats->rx_bytes += cf->can_dlc; 471 + 472 + can_led_event(priv->netdev, CAN_LED_EVENT_RX); 473 + netif_rx(skb); 474 + } 475 + 476 + static void mcba_usb_process_ka_usb(struct mcba_priv *priv, 477 + struct mcba_usb_msg_ka_usb *msg) 478 + { 479 + if (unlikely(priv->usb_ka_first_pass)) { 480 + netdev_info(priv->netdev, "PIC USB version %hhu.%hhu\n", 481 + msg->soft_ver_major, msg->soft_ver_minor); 482 + 483 + priv->usb_ka_first_pass = false; 484 + } 485 + 486 + if (msg->termination_state) 487 + priv->can.termination = MCBA_TERMINATION_ENABLED; 488 + else 489 + priv->can.termination = MCBA_TERMINATION_DISABLED; 490 + } 491 + 492 + static u32 convert_can2host_bitrate(struct mcba_usb_msg_ka_can *msg) 493 + { 494 + const u32 bitrate = get_unaligned_be16(&msg->can_bitrate); 495 + 496 + if ((bitrate == 33) || (bitrate == 83)) 497 + return bitrate * 1000 + 333; 498 + else 499 + return bitrate * 1000; 500 + } 501 + 502 + static void mcba_usb_process_ka_can(struct mcba_priv *priv, 503 + struct mcba_usb_msg_ka_can *msg) 504 + { 505 + if (unlikely(priv->can_ka_first_pass)) { 506 + netdev_info(priv->netdev, "PIC CAN version %hhu.%hhu\n", 507 + msg->soft_ver_major, msg->soft_ver_minor); 508 + 509 + priv->can_ka_first_pass = false; 510 + } 511 + 512 + if (unlikely(priv->can_speed_check)) { 513 + const u32 bitrate = convert_can2host_bitrate(msg); 514 + 515 + priv->can_speed_check = false; 516 + 517 + if (bitrate != priv->can.bittiming.bitrate) 518 + netdev_err( 519 + priv->netdev, 520 + "Wrong bitrate reported by the device (%u). Expected %u", 521 + bitrate, priv->can.bittiming.bitrate); 522 + } 523 + 524 + priv->bec.txerr = msg->tx_err_cnt; 525 + priv->bec.rxerr = msg->rx_err_cnt; 526 + 527 + if (msg->tx_bus_off) 528 + priv->can.state = CAN_STATE_BUS_OFF; 529 + 530 + else if ((priv->bec.txerr > MCBA_CAN_STATE_ERR_PSV_TH) || 531 + (priv->bec.rxerr > MCBA_CAN_STATE_ERR_PSV_TH)) 532 + priv->can.state = CAN_STATE_ERROR_PASSIVE; 533 + 534 + else if ((priv->bec.txerr > MCBA_CAN_STATE_WRN_TH) || 535 + (priv->bec.rxerr > MCBA_CAN_STATE_WRN_TH)) 536 + priv->can.state = CAN_STATE_ERROR_WARNING; 537 + } 538 + 539 + static void mcba_usb_process_rx(struct mcba_priv *priv, 540 + struct mcba_usb_msg *msg) 541 + { 542 + switch (msg->cmd_id) { 543 + case MBCA_CMD_I_AM_ALIVE_FROM_CAN: 544 + mcba_usb_process_ka_can(priv, 545 + (struct mcba_usb_msg_ka_can *)msg); 546 + break; 547 + 548 + case MBCA_CMD_I_AM_ALIVE_FROM_USB: 549 + mcba_usb_process_ka_usb(priv, 550 + (struct mcba_usb_msg_ka_usb *)msg); 551 + break; 552 + 553 + case MBCA_CMD_RECEIVE_MESSAGE: 554 + mcba_usb_process_can(priv, (struct mcba_usb_msg_can *)msg); 555 + break; 556 + 557 + case MBCA_CMD_NOTHING_TO_SEND: 558 + /* Side effect of communication between PIC_USB and PIC_CAN. 559 + * PIC_CAN is telling us that it has nothing to send 560 + */ 561 + break; 562 + 563 + case MBCA_CMD_TRANSMIT_MESSAGE_RSP: 564 + /* Transmission response from the device containing timestamp */ 565 + break; 566 + 567 + default: 568 + netdev_warn(priv->netdev, "Unsupported msg (0x%hhX)", 569 + msg->cmd_id); 570 + break; 571 + } 572 + } 573 + 574 + /* Callback for reading data from device 575 + * 576 + * Check urb status, call read function and resubmit urb read operation. 577 + */ 578 + static void mcba_usb_read_bulk_callback(struct urb *urb) 579 + { 580 + struct mcba_priv *priv = urb->context; 581 + struct net_device *netdev; 582 + int retval; 583 + int pos = 0; 584 + 585 + netdev = priv->netdev; 586 + 587 + if (!netif_device_present(netdev)) 588 + return; 589 + 590 + switch (urb->status) { 591 + case 0: /* success */ 592 + break; 593 + 594 + case -ENOENT: 595 + case -ESHUTDOWN: 596 + return; 597 + 598 + default: 599 + netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status); 600 + 601 + goto resubmit_urb; 602 + } 603 + 604 + while (pos < urb->actual_length) { 605 + struct mcba_usb_msg *msg; 606 + 607 + if (pos + sizeof(struct mcba_usb_msg) > urb->actual_length) { 608 + netdev_err(priv->netdev, "format error\n"); 609 + break; 610 + } 611 + 612 + msg = (struct mcba_usb_msg *)(urb->transfer_buffer + pos); 613 + mcba_usb_process_rx(priv, msg); 614 + 615 + pos += sizeof(struct mcba_usb_msg); 616 + } 617 + 618 + resubmit_urb: 619 + 620 + usb_fill_bulk_urb(urb, priv->udev, 621 + usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT), 622 + urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE, 623 + mcba_usb_read_bulk_callback, priv); 624 + 625 + retval = usb_submit_urb(urb, GFP_ATOMIC); 626 + 627 + if (retval == -ENODEV) 628 + netif_device_detach(netdev); 629 + else if (retval) 630 + netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", 631 + retval); 632 + } 633 + 634 + /* Start USB device */ 635 + static int mcba_usb_start(struct mcba_priv *priv) 636 + { 637 + struct net_device *netdev = priv->netdev; 638 + int err, i; 639 + 640 + mcba_init_ctx(priv); 641 + 642 + for (i = 0; i < MCBA_MAX_RX_URBS; i++) { 643 + struct urb *urb = NULL; 644 + u8 *buf; 645 + 646 + /* create a URB, and a buffer for it */ 647 + urb = usb_alloc_urb(0, GFP_KERNEL); 648 + if (!urb) { 649 + err = -ENOMEM; 650 + break; 651 + } 652 + 653 + buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, 654 + GFP_KERNEL, &urb->transfer_dma); 655 + if (!buf) { 656 + netdev_err(netdev, "No memory left for USB buffer\n"); 657 + usb_free_urb(urb); 658 + err = -ENOMEM; 659 + break; 660 + } 661 + 662 + usb_fill_bulk_urb(urb, priv->udev, 663 + usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN), 664 + buf, MCBA_USB_RX_BUFF_SIZE, 665 + mcba_usb_read_bulk_callback, priv); 666 + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 667 + usb_anchor_urb(urb, &priv->rx_submitted); 668 + 669 + err = usb_submit_urb(urb, GFP_KERNEL); 670 + if (err) { 671 + usb_unanchor_urb(urb); 672 + usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, 673 + buf, urb->transfer_dma); 674 + usb_free_urb(urb); 675 + break; 676 + } 677 + 678 + /* Drop reference, USB core will take care of freeing it */ 679 + usb_free_urb(urb); 680 + } 681 + 682 + /* Did we submit any URBs */ 683 + if (i == 0) { 684 + netdev_warn(netdev, "couldn't setup read URBs\n"); 685 + return err; 686 + } 687 + 688 + /* Warn if we've couldn't transmit all the URBs */ 689 + if (i < MCBA_MAX_RX_URBS) 690 + netdev_warn(netdev, "rx performance may be slow\n"); 691 + 692 + mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_USB); 693 + mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_CAN); 694 + 695 + return err; 696 + } 697 + 698 + /* Open USB device */ 699 + static int mcba_usb_open(struct net_device *netdev) 700 + { 701 + struct mcba_priv *priv = netdev_priv(netdev); 702 + int err; 703 + 704 + /* common open */ 705 + err = open_candev(netdev); 706 + if (err) 707 + return err; 708 + 709 + priv->can_speed_check = true; 710 + priv->can.state = CAN_STATE_ERROR_ACTIVE; 711 + 712 + can_led_event(netdev, CAN_LED_EVENT_OPEN); 713 + netif_start_queue(netdev); 714 + 715 + return 0; 716 + } 717 + 718 + static void mcba_urb_unlink(struct mcba_priv *priv) 719 + { 720 + usb_kill_anchored_urbs(&priv->rx_submitted); 721 + usb_kill_anchored_urbs(&priv->tx_submitted); 722 + } 723 + 724 + /* Close USB device */ 725 + static int mcba_usb_close(struct net_device *netdev) 726 + { 727 + struct mcba_priv *priv = netdev_priv(netdev); 728 + 729 + priv->can.state = CAN_STATE_STOPPED; 730 + 731 + netif_stop_queue(netdev); 732 + 733 + /* Stop polling */ 734 + mcba_urb_unlink(priv); 735 + 736 + close_candev(netdev); 737 + can_led_event(netdev, CAN_LED_EVENT_STOP); 738 + 739 + return 0; 740 + } 741 + 742 + /* Set network device mode 743 + * 744 + * Maybe we should leave this function empty, because the device 745 + * set mode variable with open command. 746 + */ 747 + static int mcba_net_set_mode(struct net_device *netdev, enum can_mode mode) 748 + { 749 + return 0; 750 + } 751 + 752 + static int mcba_net_get_berr_counter(const struct net_device *netdev, 753 + struct can_berr_counter *bec) 754 + { 755 + struct mcba_priv *priv = netdev_priv(netdev); 756 + 757 + bec->txerr = priv->bec.txerr; 758 + bec->rxerr = priv->bec.rxerr; 759 + 760 + return 0; 761 + } 762 + 763 + static const struct net_device_ops mcba_netdev_ops = { 764 + .ndo_open = mcba_usb_open, 765 + .ndo_stop = mcba_usb_close, 766 + .ndo_start_xmit = mcba_usb_start_xmit, 767 + }; 768 + 769 + /* Microchip CANBUS has hardcoded bittiming values by default. 770 + * This function sends request via USB to change the speed and align bittiming 771 + * values for presentation purposes only 772 + */ 773 + static int mcba_net_set_bittiming(struct net_device *netdev) 774 + { 775 + struct mcba_priv *priv = netdev_priv(netdev); 776 + const u16 bitrate_kbps = priv->can.bittiming.bitrate / 1000; 777 + 778 + mcba_usb_xmit_change_bitrate(priv, bitrate_kbps); 779 + 780 + return 0; 781 + } 782 + 783 + static int mcba_set_termination(struct net_device *netdev, u16 term) 784 + { 785 + struct mcba_priv *priv = netdev_priv(netdev); 786 + struct mcba_usb_msg_termination usb_msg = { 787 + .cmd_id = MBCA_CMD_SETUP_TERMINATION_RESISTANCE 788 + }; 789 + 790 + if (term == MCBA_TERMINATION_ENABLED) 791 + usb_msg.termination = 1; 792 + else 793 + usb_msg.termination = 0; 794 + 795 + mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); 796 + 797 + return 0; 798 + } 799 + 800 + static int mcba_usb_probe(struct usb_interface *intf, 801 + const struct usb_device_id *id) 802 + { 803 + struct net_device *netdev; 804 + struct mcba_priv *priv; 805 + int err = -ENOMEM; 806 + struct usb_device *usbdev = interface_to_usbdev(intf); 807 + 808 + netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS); 809 + if (!netdev) { 810 + dev_err(&intf->dev, "Couldn't alloc candev\n"); 811 + return -ENOMEM; 812 + } 813 + 814 + priv = netdev_priv(netdev); 815 + 816 + priv->udev = usbdev; 817 + priv->netdev = netdev; 818 + priv->usb_ka_first_pass = true; 819 + priv->can_ka_first_pass = true; 820 + priv->can_speed_check = false; 821 + 822 + init_usb_anchor(&priv->rx_submitted); 823 + init_usb_anchor(&priv->tx_submitted); 824 + 825 + usb_set_intfdata(intf, priv); 826 + 827 + /* Init CAN device */ 828 + priv->can.state = CAN_STATE_STOPPED; 829 + priv->can.termination_const = mcba_termination; 830 + priv->can.termination_const_cnt = ARRAY_SIZE(mcba_termination); 831 + priv->can.bitrate_const = mcba_bitrate; 832 + priv->can.bitrate_const_cnt = ARRAY_SIZE(mcba_bitrate); 833 + 834 + priv->can.do_set_termination = mcba_set_termination; 835 + priv->can.do_set_mode = mcba_net_set_mode; 836 + priv->can.do_get_berr_counter = mcba_net_get_berr_counter; 837 + priv->can.do_set_bittiming = mcba_net_set_bittiming; 838 + 839 + netdev->netdev_ops = &mcba_netdev_ops; 840 + 841 + netdev->flags |= IFF_ECHO; /* we support local echo */ 842 + 843 + SET_NETDEV_DEV(netdev, &intf->dev); 844 + 845 + err = register_candev(netdev); 846 + if (err) { 847 + netdev_err(netdev, "couldn't register CAN device: %d\n", err); 848 + 849 + goto cleanup_free_candev; 850 + } 851 + 852 + devm_can_led_init(netdev); 853 + 854 + /* Start USB dev only if we have successfully registered CAN device */ 855 + err = mcba_usb_start(priv); 856 + if (err) { 857 + if (err == -ENODEV) 858 + netif_device_detach(priv->netdev); 859 + 860 + netdev_warn(netdev, "couldn't start device: %d\n", err); 861 + 862 + goto cleanup_unregister_candev; 863 + } 864 + 865 + dev_info(&intf->dev, "Microchip CAN BUS analizer connected\n"); 866 + 867 + return 0; 868 + 869 + cleanup_unregister_candev: 870 + unregister_candev(priv->netdev); 871 + 872 + cleanup_free_candev: 873 + free_candev(netdev); 874 + 875 + return err; 876 + } 877 + 878 + /* Called by the usb core when driver is unloaded or device is removed */ 879 + static void mcba_usb_disconnect(struct usb_interface *intf) 880 + { 881 + struct mcba_priv *priv = usb_get_intfdata(intf); 882 + 883 + usb_set_intfdata(intf, NULL); 884 + 885 + netdev_info(priv->netdev, "device disconnected\n"); 886 + 887 + unregister_candev(priv->netdev); 888 + free_candev(priv->netdev); 889 + 890 + mcba_urb_unlink(priv); 891 + } 892 + 893 + static struct usb_driver mcba_usb_driver = { 894 + .name = MCBA_MODULE_NAME, 895 + .probe = mcba_usb_probe, 896 + .disconnect = mcba_usb_disconnect, 897 + .id_table = mcba_usb_table, 898 + }; 899 + 900 + module_usb_driver(mcba_usb_driver); 901 + 902 + MODULE_AUTHOR("Remigiusz Kołłątaj <remigiusz.kollataj@mobica.com>"); 903 + MODULE_DESCRIPTION("SocketCAN driver for Microchip CAN BUS Analyzer Tool"); 904 + MODULE_LICENSE("GPL v2");
+75 -11
drivers/net/can/usb/peak_usb/pcan_ucan.h include/linux/can/dev/peak_canfd.h
··· 23 23 #define PUCAN_CMD_LISTEN_ONLY_MODE 0x003 24 24 #define PUCAN_CMD_TIMING_SLOW 0x004 25 25 #define PUCAN_CMD_TIMING_FAST 0x005 26 + #define PUCAN_CMD_SET_STD_FILTER 0x006 27 + #define PUCAN_CMD_RESERVED2 0x007 26 28 #define PUCAN_CMD_FILTER_STD 0x008 27 29 #define PUCAN_CMD_TX_ABORT 0x009 28 30 #define PUCAN_CMD_WR_ERR_CNT 0x00a 29 31 #define PUCAN_CMD_SET_EN_OPTION 0x00b 30 32 #define PUCAN_CMD_CLR_DIS_OPTION 0x00c 33 + #define PUCAN_CMD_RX_BARRIER 0x010 31 34 #define PUCAN_CMD_END_OF_COLLECTION 0x3ff 32 35 33 36 /* uCAN received messages list */ ··· 38 35 #define PUCAN_MSG_ERROR 0x0002 39 36 #define PUCAN_MSG_STATUS 0x0003 40 37 #define PUCAN_MSG_BUSLOAD 0x0004 38 + 39 + #define PUCAN_MSG_CACHE_CRITICAL 0x0102 40 + 41 + /* uCAN transmitted messages */ 41 42 #define PUCAN_MSG_CAN_TX 0x1000 42 43 43 44 /* uCAN command common header */ ··· 49 42 __le16 opcode_channel; 50 43 u16 args[3]; 51 44 }; 45 + 46 + /* return the opcode from the opcode_channel field of a command */ 47 + static inline u16 pucan_cmd_get_opcode(struct pucan_command *c) 48 + { 49 + return le16_to_cpu(c->opcode_channel) & 0x3ff; 50 + } 52 51 53 52 #define PUCAN_TSLOW_BRP_BITS 10 54 53 #define PUCAN_TSLOW_TSGEG1_BITS 8 ··· 119 106 120 107 __le16 idx; 121 108 __le32 mask; /* CAN-ID bitmask in idx range */ 109 + }; 110 + 111 + #define PUCAN_FLTSTD_ROW_IDX_MAX ((1 << PUCAN_FLTSTD_ROW_IDX_BITS) - 1) 112 + 113 + /* uCAN SET_STD_FILTER command fields */ 114 + struct __packed pucan_std_filter { 115 + __le16 opcode_channel; 116 + 117 + u8 unused; 118 + u8 idx; 119 + __le32 mask; /* CAN-ID bitmask in idx range */ 120 + }; 121 + 122 + /* uCAN TX_ABORT commands fields */ 123 + #define PUCAN_TX_ABORT_FLUSH 0x0001 124 + 125 + struct __packed pucan_tx_abort { 126 + __le16 opcode_channel; 127 + 128 + __le16 flags; 129 + u32 unused; 122 130 }; 123 131 124 132 /* uCAN WR_ERR_CNT command fields */ ··· 218 184 u8 rx_err_cnt; 219 185 }; 220 186 187 + static inline int pucan_error_get_channel(const struct pucan_error_msg *msg) 188 + { 189 + return msg->channel_type_d & 0x0f; 190 + } 191 + 192 + #define PUCAN_RX_BARRIER 0x10 221 193 #define PUCAN_BUS_PASSIVE 0x20 222 194 #define PUCAN_BUS_WARNING 0x40 223 195 #define PUCAN_BUS_BUSOFF 0x80 ··· 236 196 u8 channel_p_w_b; 237 197 u8 unused[3]; 238 198 }; 199 + 200 + static inline int pucan_status_get_channel(const struct pucan_status_msg *msg) 201 + { 202 + return msg->channel_p_w_b & 0x0f; 203 + } 204 + 205 + static inline int pucan_status_is_rx_barrier(const struct pucan_status_msg *msg) 206 + { 207 + return msg->channel_p_w_b & PUCAN_RX_BARRIER; 208 + } 209 + 210 + static inline int pucan_status_is_passive(const struct pucan_status_msg *msg) 211 + { 212 + return msg->channel_p_w_b & PUCAN_BUS_PASSIVE; 213 + } 214 + 215 + static inline int pucan_status_is_warning(const struct pucan_status_msg *msg) 216 + { 217 + return msg->channel_p_w_b & PUCAN_BUS_WARNING; 218 + } 219 + 220 + static inline int pucan_status_is_busoff(const struct pucan_status_msg *msg) 221 + { 222 + return msg->channel_p_w_b & PUCAN_BUS_BUSOFF; 223 + } 239 224 240 225 /* uCAN transmitted message format */ 241 226 #define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4)) ··· 278 213 }; 279 214 280 215 /* build the cmd opcode_channel field with respect to the correct endianness */ 281 - static inline __le16 pucan_cmd_opcode_channel(struct peak_usb_device *dev, 282 - int opcode) 216 + static inline __le16 pucan_cmd_opcode_channel(int index, int opcode) 283 217 { 284 - return cpu_to_le16(((dev->ctrl_idx) << 12) | ((opcode) & 0x3ff)); 218 + return cpu_to_le16(((index) << 12) | ((opcode) & 0x3ff)); 285 219 } 286 220 287 221 /* return the channel number part from any received message channel_dlc field */ 288 - static inline int pucan_msg_get_channel(struct pucan_rx_msg *rm) 222 + static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg) 289 223 { 290 - return rm->channel_dlc & 0xf; 224 + return msg->channel_dlc & 0xf; 291 225 } 292 226 293 227 /* return the dlc value from any received message channel_dlc field */ 294 - static inline int pucan_msg_get_dlc(struct pucan_rx_msg *rm) 228 + static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg) 295 229 { 296 - return rm->channel_dlc >> 4; 230 + return msg->channel_dlc >> 4; 297 231 } 298 232 299 - static inline int pucan_ermsg_get_channel(struct pucan_error_msg *em) 233 + static inline int pucan_ermsg_get_channel(const struct pucan_error_msg *msg) 300 234 { 301 - return em->channel_type_d & 0x0f; 235 + return msg->channel_type_d & 0x0f; 302 236 } 303 237 304 - static inline int pucan_stmsg_get_channel(struct pucan_status_msg *sm) 238 + static inline int pucan_stmsg_get_channel(const struct pucan_status_msg *msg) 305 239 { 306 - return sm->channel_p_w_b & 0x0f; 240 + return msg->channel_p_w_b & 0x0f; 307 241 } 308 242 309 243 #endif
+13 -12
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
··· 19 19 #include <linux/can.h> 20 20 #include <linux/can/dev.h> 21 21 #include <linux/can/error.h> 22 + #include <linux/can/dev/peak_canfd.h> 22 23 23 24 #include "pcan_usb_core.h" 24 25 #include "pcan_usb_pro.h" 25 - #include "pcan_ucan.h" 26 26 27 27 MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter"); 28 28 MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter"); ··· 238 238 239 239 /* 1st, reset error counters: */ 240 240 prc = (struct pucan_wr_err_cnt *)pc; 241 - prc->opcode_channel = pucan_cmd_opcode_channel(dev, 241 + prc->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, 242 242 PUCAN_CMD_WR_ERR_CNT); 243 243 244 244 /* select both counters */ ··· 257 257 258 258 puo->opcode_channel = 259 259 (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ? 260 - pucan_cmd_opcode_channel(dev, 260 + pucan_cmd_opcode_channel(dev->ctrl_idx, 261 261 PUCAN_CMD_CLR_DIS_OPTION) : 262 - pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION); 262 + pucan_cmd_opcode_channel(dev->ctrl_idx, 263 + PUCAN_CMD_SET_EN_OPTION); 263 264 264 265 puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO); 265 266 ··· 275 274 276 275 /* next, go back to operational mode */ 277 276 cmd = (struct pucan_command *)pc; 278 - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 277 + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, 279 278 (dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ? 280 279 PUCAN_CMD_LISTEN_ONLY_MODE : 281 280 PUCAN_CMD_NORMAL_MODE); ··· 297 296 struct pucan_command *cmd = (struct pucan_command *)pc; 298 297 299 298 /* build cmd to go back to reset mode */ 300 - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 299 + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, 301 300 PUCAN_CMD_RESET_MODE); 302 301 l = sizeof(struct pucan_command); 303 302 } ··· 333 332 } 334 333 335 334 for (i = idx; i < n; i++, cmd++) { 336 - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 335 + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, 337 336 PUCAN_CMD_FILTER_STD); 338 337 cmd->idx = cpu_to_le16(i); 339 338 cmd->mask = cpu_to_le32(mask); ··· 353 352 { 354 353 struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev); 355 354 356 - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 355 + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, 357 356 (onoff) ? PUCAN_CMD_SET_EN_OPTION : 358 357 PUCAN_CMD_CLR_DIS_OPTION); 359 358 ··· 369 368 { 370 369 struct pcan_ufd_led *cmd = pcan_usb_fd_cmd_buffer(dev); 371 370 372 - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 371 + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, 373 372 PCAN_UFD_CMD_LED_SET); 374 373 cmd->mode = led_mode; 375 374 ··· 383 382 { 384 383 struct pcan_ufd_clock *cmd = pcan_usb_fd_cmd_buffer(dev); 385 384 386 - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 385 + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, 387 386 PCAN_UFD_CMD_CLK_SET); 388 387 cmd->mode = clk_mode; 389 388 ··· 397 396 { 398 397 struct pucan_timing_slow *cmd = pcan_usb_fd_cmd_buffer(dev); 399 398 400 - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 399 + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, 401 400 PUCAN_CMD_TIMING_SLOW); 402 401 cmd->sjw_t = PUCAN_TSLOW_SJW_T(bt->sjw - 1, 403 402 dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES); ··· 418 417 { 419 418 struct pucan_timing_fast *cmd = pcan_usb_fd_cmd_buffer(dev); 420 419 421 - cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 420 + cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, 422 421 PUCAN_CMD_TIMING_FAST); 423 422 cmd->sjw = PUCAN_TFAST_SJW(bt->sjw - 1); 424 423 cmd->tseg2 = PUCAN_TFAST_TSEG2(bt->phase_seg2 - 1);
+5 -2
drivers/net/can/vcan.c
··· 1 1 /* 2 2 * vcan.c - Virtual CAN interface 3 3 * 4 - * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 4 + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 5 5 * All rights reserved. 6 6 * 7 7 * Redistribution and use in source and binary forms, with or without ··· 50 50 #include <linux/slab.h> 51 51 #include <net/rtnetlink.h> 52 52 53 + #define DRV_NAME "vcan" 54 + 53 55 MODULE_DESCRIPTION("virtual CAN interface"); 54 56 MODULE_LICENSE("Dual BSD/GPL"); 55 57 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); 58 + MODULE_ALIAS_RTNL_LINK(DRV_NAME); 56 59 57 60 58 61 /* ··· 167 164 } 168 165 169 166 static struct rtnl_link_ops vcan_link_ops __read_mostly = { 170 - .kind = "vcan", 167 + .kind = DRV_NAME, 171 168 .setup = vcan_setup, 172 169 }; 173 170
+316
drivers/net/can/vxcan.c
··· 1 + /* 2 + * vxcan.c - Virtual CAN Tunnel for cross namespace communication 3 + * 4 + * This code is derived from drivers/net/can/vcan.c for the virtual CAN 5 + * specific parts and from drivers/net/veth.c to implement the netlink API 6 + * for network interface pairs in a common and established way. 7 + * 8 + * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the version 2 of the GNU General Public License 12 + * as published by the Free Software Foundation 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 + * GNU General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 21 + */ 22 + 23 + #include <linux/module.h> 24 + #include <linux/init.h> 25 + #include <linux/netdevice.h> 26 + #include <linux/if_arp.h> 27 + #include <linux/if_ether.h> 28 + #include <linux/can.h> 29 + #include <linux/can/dev.h> 30 + #include <linux/can/skb.h> 31 + #include <linux/can/vxcan.h> 32 + #include <linux/slab.h> 33 + #include <net/rtnetlink.h> 34 + 35 + #define DRV_NAME "vxcan" 36 + 37 + MODULE_DESCRIPTION("Virtual CAN Tunnel"); 38 + MODULE_LICENSE("GPL"); 39 + MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>"); 40 + MODULE_ALIAS_RTNL_LINK(DRV_NAME); 41 + 42 + struct vxcan_priv { 43 + struct net_device __rcu *peer; 44 + }; 45 + 46 + static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) 47 + { 48 + struct vxcan_priv *priv = netdev_priv(dev); 49 + struct net_device *peer; 50 + struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 51 + struct net_device_stats *peerstats, *srcstats = &dev->stats; 52 + 53 + if (can_dropped_invalid_skb(dev, skb)) 54 + return NETDEV_TX_OK; 55 + 56 + rcu_read_lock(); 57 + peer = rcu_dereference(priv->peer); 58 + if (unlikely(!peer)) { 59 + kfree_skb(skb); 60 + dev->stats.tx_dropped++; 61 + goto out_unlock; 62 + } 63 + 64 + skb = can_create_echo_skb(skb); 65 + if (!skb) 66 + goto out_unlock; 67 + 68 + /* reset CAN GW hop counter */ 69 + skb->csum_start = 0; 70 + skb->pkt_type = PACKET_BROADCAST; 71 + skb->dev = peer; 72 + skb->ip_summed = CHECKSUM_UNNECESSARY; 73 + 74 + if (netif_rx_ni(skb) == NET_RX_SUCCESS) { 75 + srcstats->tx_packets++; 76 + srcstats->tx_bytes += cfd->len; 77 + peerstats = &peer->stats; 78 + peerstats->rx_packets++; 79 + peerstats->rx_bytes += cfd->len; 80 + } 81 + 82 + out_unlock: 83 + rcu_read_unlock(); 84 + return NETDEV_TX_OK; 85 + } 86 + 87 + 88 + static int vxcan_open(struct net_device *dev) 89 + { 90 + struct vxcan_priv *priv = netdev_priv(dev); 91 + struct net_device *peer = rtnl_dereference(priv->peer); 92 + 93 + if (!peer) 94 + return -ENOTCONN; 95 + 96 + if (peer->flags & IFF_UP) { 97 + netif_carrier_on(dev); 98 + netif_carrier_on(peer); 99 + } 100 + return 0; 101 + } 102 + 103 + static int vxcan_close(struct net_device *dev) 104 + { 105 + struct vxcan_priv *priv = netdev_priv(dev); 106 + struct net_device *peer = rtnl_dereference(priv->peer); 107 + 108 + netif_carrier_off(dev); 109 + if (peer) 110 + netif_carrier_off(peer); 111 + 112 + return 0; 113 + } 114 + 115 + static int vxcan_get_iflink(const struct net_device *dev) 116 + { 117 + struct vxcan_priv *priv = netdev_priv(dev); 118 + struct net_device *peer; 119 + int iflink; 120 + 121 + rcu_read_lock(); 122 + peer = rcu_dereference(priv->peer); 123 + iflink = peer ? peer->ifindex : 0; 124 + rcu_read_unlock(); 125 + 126 + return iflink; 127 + } 128 + 129 + static int vxcan_change_mtu(struct net_device *dev, int new_mtu) 130 + { 131 + /* Do not allow changing the MTU while running */ 132 + if (dev->flags & IFF_UP) 133 + return -EBUSY; 134 + 135 + if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU) 136 + return -EINVAL; 137 + 138 + dev->mtu = new_mtu; 139 + return 0; 140 + } 141 + 142 + static const struct net_device_ops vxcan_netdev_ops = { 143 + .ndo_open = vxcan_open, 144 + .ndo_stop = vxcan_close, 145 + .ndo_start_xmit = vxcan_xmit, 146 + .ndo_get_iflink = vxcan_get_iflink, 147 + .ndo_change_mtu = vxcan_change_mtu, 148 + }; 149 + 150 + static void vxcan_setup(struct net_device *dev) 151 + { 152 + dev->type = ARPHRD_CAN; 153 + dev->mtu = CAN_MTU; 154 + dev->hard_header_len = 0; 155 + dev->addr_len = 0; 156 + dev->tx_queue_len = 0; 157 + dev->flags = (IFF_NOARP|IFF_ECHO); 158 + dev->netdev_ops = &vxcan_netdev_ops; 159 + dev->destructor = free_netdev; 160 + } 161 + 162 + /* forward declaration for rtnl_create_link() */ 163 + static struct rtnl_link_ops vxcan_link_ops; 164 + 165 + static int vxcan_newlink(struct net *net, struct net_device *dev, 166 + struct nlattr *tb[], struct nlattr *data[]) 167 + { 168 + struct vxcan_priv *priv; 169 + struct net_device *peer; 170 + struct net *peer_net; 171 + 172 + struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb; 173 + char ifname[IFNAMSIZ]; 174 + unsigned char name_assign_type; 175 + struct ifinfomsg *ifmp = NULL; 176 + int err; 177 + 178 + /* register peer device */ 179 + if (data && data[VXCAN_INFO_PEER]) { 180 + struct nlattr *nla_peer; 181 + 182 + nla_peer = data[VXCAN_INFO_PEER]; 183 + ifmp = nla_data(nla_peer); 184 + err = rtnl_nla_parse_ifla(peer_tb, 185 + nla_data(nla_peer) + 186 + sizeof(struct ifinfomsg), 187 + nla_len(nla_peer) - 188 + sizeof(struct ifinfomsg), 189 + NULL); 190 + if (err < 0) 191 + return err; 192 + 193 + tbp = peer_tb; 194 + } 195 + 196 + if (tbp[IFLA_IFNAME]) { 197 + nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 198 + name_assign_type = NET_NAME_USER; 199 + } else { 200 + snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); 201 + name_assign_type = NET_NAME_ENUM; 202 + } 203 + 204 + peer_net = rtnl_link_get_net(net, tbp); 205 + if (IS_ERR(peer_net)) 206 + return PTR_ERR(peer_net); 207 + 208 + peer = rtnl_create_link(peer_net, ifname, name_assign_type, 209 + &vxcan_link_ops, tbp); 210 + if (IS_ERR(peer)) { 211 + put_net(peer_net); 212 + return PTR_ERR(peer); 213 + } 214 + 215 + if (ifmp && dev->ifindex) 216 + peer->ifindex = ifmp->ifi_index; 217 + 218 + err = register_netdevice(peer); 219 + put_net(peer_net); 220 + peer_net = NULL; 221 + if (err < 0) { 222 + free_netdev(peer); 223 + return err; 224 + } 225 + 226 + netif_carrier_off(peer); 227 + 228 + err = rtnl_configure_link(peer, ifmp); 229 + if (err < 0) { 230 + unregister_netdevice(peer); 231 + return err; 232 + } 233 + 234 + /* register first device */ 235 + if (tb[IFLA_IFNAME]) 236 + nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); 237 + else 238 + snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); 239 + 240 + err = register_netdevice(dev); 241 + if (err < 0) { 242 + unregister_netdevice(peer); 243 + return err; 244 + } 245 + 246 + netif_carrier_off(dev); 247 + 248 + /* cross link the device pair */ 249 + priv = netdev_priv(dev); 250 + rcu_assign_pointer(priv->peer, peer); 251 + 252 + priv = netdev_priv(peer); 253 + rcu_assign_pointer(priv->peer, dev); 254 + 255 + return 0; 256 + } 257 + 258 + static void vxcan_dellink(struct net_device *dev, struct list_head *head) 259 + { 260 + struct vxcan_priv *priv; 261 + struct net_device *peer; 262 + 263 + priv = netdev_priv(dev); 264 + peer = rtnl_dereference(priv->peer); 265 + 266 + /* Note : dellink() is called from default_device_exit_batch(), 267 + * before a rcu_synchronize() point. The devices are guaranteed 268 + * not being freed before one RCU grace period. 269 + */ 270 + RCU_INIT_POINTER(priv->peer, NULL); 271 + unregister_netdevice_queue(dev, head); 272 + 273 + if (peer) { 274 + priv = netdev_priv(peer); 275 + RCU_INIT_POINTER(priv->peer, NULL); 276 + unregister_netdevice_queue(peer, head); 277 + } 278 + } 279 + 280 + static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = { 281 + [VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, 282 + }; 283 + 284 + static struct net *vxcan_get_link_net(const struct net_device *dev) 285 + { 286 + struct vxcan_priv *priv = netdev_priv(dev); 287 + struct net_device *peer = rtnl_dereference(priv->peer); 288 + 289 + return peer ? dev_net(peer) : dev_net(dev); 290 + } 291 + 292 + static struct rtnl_link_ops vxcan_link_ops = { 293 + .kind = DRV_NAME, 294 + .priv_size = sizeof(struct vxcan_priv), 295 + .setup = vxcan_setup, 296 + .newlink = vxcan_newlink, 297 + .dellink = vxcan_dellink, 298 + .policy = vxcan_policy, 299 + .maxtype = VXCAN_INFO_MAX, 300 + .get_link_net = vxcan_get_link_net, 301 + }; 302 + 303 + static __init int vxcan_init(void) 304 + { 305 + pr_info("vxcan: Virtual CAN Tunnel driver\n"); 306 + 307 + return rtnl_link_register(&vxcan_link_ops); 308 + } 309 + 310 + static __exit void vxcan_exit(void) 311 + { 312 + rtnl_link_unregister(&vxcan_link_ops); 313 + } 314 + 315 + module_init(vxcan_init); 316 + module_exit(vxcan_exit);
+2 -2
include/linux/can/core.h
··· 5 5 * 6 6 * Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> 7 7 * Urs Thuermann <urs.thuermann@volkswagen.de> 8 - * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 8 + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 9 9 * All rights reserved. 10 10 * 11 11 */ ··· 17 17 #include <linux/skbuff.h> 18 18 #include <linux/netdevice.h> 19 19 20 - #define CAN_VERSION "20120528" 20 + #define CAN_VERSION "20170425" 21 21 22 22 /* increment this number each time you change some user-space interface */ 23 23 #define CAN_ABI_VERSION "9"
+9
include/net/netns/can.h
··· 8 8 #include <linux/spinlock.h> 9 9 10 10 struct dev_rcv_lists; 11 + struct s_stats; 12 + struct s_pstats; 11 13 12 14 struct netns_can { 13 15 #if IS_ENABLED(CONFIG_PROC_FS) ··· 23 21 struct proc_dir_entry *pde_rcvlist_sff; 24 22 struct proc_dir_entry *pde_rcvlist_eff; 25 23 struct proc_dir_entry *pde_rcvlist_err; 24 + struct proc_dir_entry *bcmproc_dir; 26 25 #endif 27 26 28 27 /* receive filters subscribed for 'all' CAN devices */ 29 28 struct dev_rcv_lists *can_rx_alldev_list; 30 29 spinlock_t can_rcvlists_lock; 30 + struct timer_list can_stattimer;/* timer for statistics update */ 31 + struct s_stats *can_stats; /* packet statistics */ 32 + struct s_pstats *can_pstats; /* receive list statistics */ 33 + 34 + /* CAN GW per-net gateway jobs */ 35 + struct hlist_head cgw_list; 31 36 }; 32 37 33 38 #endif /* __NETNS_CAN_H__ */
+12
include/uapi/linux/can/vxcan.h
··· 1 + #ifndef _UAPI_CAN_VXCAN_H 2 + #define _UAPI_CAN_VXCAN_H 3 + 4 + enum { 5 + VXCAN_INFO_UNSPEC, 6 + VXCAN_INFO_PEER, 7 + 8 + __VXCAN_INFO_MAX 9 + #define VXCAN_INFO_MAX (__VXCAN_INFO_MAX - 1) 10 + }; 11 + 12 + #endif
+40 -37
net/can/af_can.c
··· 2 2 * af_can.c - Protocol family CAN core module 3 3 * (used by different CAN protocol modules) 4 4 * 5 - * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 5 + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 6 6 * All rights reserved. 7 7 * 8 8 * Redistribution and use in source and binary forms, with or without ··· 75 75 module_param(stats_timer, int, S_IRUGO); 76 76 MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); 77 77 78 - static int can_net_id; 79 - 80 78 static struct kmem_cache *rcv_cache __read_mostly; 81 79 82 80 /* table of registered CAN protocols */ 83 81 static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; 84 82 static DEFINE_MUTEX(proto_tab_lock); 85 - 86 - struct timer_list can_stattimer; /* timer for statistics update */ 87 - struct s_stats can_stats; /* packet statistics */ 88 - struct s_pstats can_pstats; /* receive list statistics */ 89 83 90 84 static atomic_t skbcounter = ATOMIC_INIT(0); 91 85 ··· 217 223 { 218 224 struct sk_buff *newskb = NULL; 219 225 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 226 + struct s_stats *can_stats = dev_net(skb->dev)->can.can_stats; 220 227 int err = -EINVAL; 221 228 222 229 if (skb->len == CAN_MTU) { ··· 306 311 netif_rx_ni(newskb); 307 312 308 313 /* update statistics */ 309 - can_stats.tx_frames++; 310 - can_stats.tx_frames_delta++; 314 + can_stats->tx_frames++; 315 + can_stats->tx_frames_delta++; 311 316 312 317 return 0; 313 318 ··· 465 470 struct receiver *r; 466 471 struct hlist_head *rl; 467 472 struct dev_rcv_lists *d; 473 + struct s_pstats *can_pstats = net->can.can_pstats; 468 474 int err = 0; 469 475 470 476 /* insert new receiver (dev,canid,mask) -> (func,data) */ ··· 497 501 hlist_add_head_rcu(&r->list, rl); 498 502 d->entries++; 499 503 500 - can_pstats.rcv_entries++; 501 - if (can_pstats.rcv_entries_max < can_pstats.rcv_entries) 502 - can_pstats.rcv_entries_max = can_pstats.rcv_entries; 504 + can_pstats->rcv_entries++; 505 + if (can_pstats->rcv_entries_max < can_pstats->rcv_entries) 506 + can_pstats->rcv_entries_max = can_pstats->rcv_entries; 503 507 } else { 504 508 kmem_cache_free(rcv_cache, r); 505 509 err = -ENODEV; ··· 541 545 { 542 546 struct receiver *r = NULL; 543 547 struct hlist_head *rl; 548 + struct s_pstats *can_pstats = net->can.can_pstats; 544 549 struct dev_rcv_lists *d; 545 550 546 551 if (dev && dev->type != ARPHRD_CAN) ··· 588 591 hlist_del_rcu(&r->list); 589 592 d->entries--; 590 593 591 - if (can_pstats.rcv_entries > 0) 592 - can_pstats.rcv_entries--; 594 + if (can_pstats->rcv_entries > 0) 595 + can_pstats->rcv_entries--; 593 596 594 597 /* remove device structure requested by NETDEV_UNREGISTER */ 595 598 if (d->remove_on_zero_entries && !d->entries) { ··· 683 686 static void can_receive(struct sk_buff *skb, struct net_device *dev) 684 687 { 685 688 struct dev_rcv_lists *d; 689 + struct net *net = dev_net(dev); 690 + struct s_stats *can_stats = net->can.can_stats; 686 691 int matches; 687 692 688 693 /* update statistics */ 689 - can_stats.rx_frames++; 690 - can_stats.rx_frames_delta++; 694 + can_stats->rx_frames++; 695 + can_stats->rx_frames_delta++; 691 696 692 697 /* create non-zero unique skb identifier together with *skb */ 693 698 while (!(can_skb_prv(skb)->skbcnt)) ··· 698 699 rcu_read_lock(); 699 700 700 701 /* deliver the packet to sockets listening on all devices */ 701 - matches = can_rcv_filter(dev_net(dev)->can.can_rx_alldev_list, skb); 702 + matches = can_rcv_filter(net->can.can_rx_alldev_list, skb); 702 703 703 704 /* find receive list for this device */ 704 - d = find_dev_rcv_lists(dev_net(dev), dev); 705 + d = find_dev_rcv_lists(net, dev); 705 706 if (d) 706 707 matches += can_rcv_filter(d, skb); 707 708 ··· 711 712 consume_skb(skb); 712 713 713 714 if (matches > 0) { 714 - can_stats.matches++; 715 - can_stats.matches_delta++; 715 + can_stats->matches++; 716 + can_stats->matches_delta++; 716 717 } 717 718 } 718 719 ··· 877 878 net->can.can_rx_alldev_list = 878 879 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); 879 880 880 - if (IS_ENABLED(CONFIG_PROC_FS)) 881 + net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); 882 + net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); 883 + 884 + if (IS_ENABLED(CONFIG_PROC_FS)) { 885 + /* the statistics are updated every second (timer triggered) */ 886 + if (stats_timer) { 887 + setup_timer(&net->can.can_stattimer, can_stat_update, 888 + (unsigned long)net); 889 + mod_timer(&net->can.can_stattimer, 890 + round_jiffies(jiffies + HZ)); 891 + } 892 + net->can.can_stats->jiffies_init = jiffies; 881 893 can_init_proc(net); 894 + } 882 895 883 896 return 0; 884 897 } ··· 899 888 { 900 889 struct net_device *dev; 901 890 902 - if (IS_ENABLED(CONFIG_PROC_FS)) 891 + if (IS_ENABLED(CONFIG_PROC_FS)) { 903 892 can_remove_proc(net); 893 + if (stats_timer) 894 + del_timer_sync(&net->can.can_stattimer); 895 + } 904 896 905 897 /* remove created dev_rcv_lists from still registered CAN devices */ 906 898 rcu_read_lock(); ··· 917 903 } 918 904 } 919 905 rcu_read_unlock(); 906 + 907 + kfree(net->can.can_rx_alldev_list); 908 + kfree(net->can.can_stats); 909 + kfree(net->can.can_pstats); 920 910 } 921 911 922 912 /* ··· 951 933 static struct pernet_operations can_pernet_ops __read_mostly = { 952 934 .init = can_pernet_init, 953 935 .exit = can_pernet_exit, 954 - .id = &can_net_id, 955 - .size = 0, 956 936 }; 957 937 958 938 static __init int can_init(void) ··· 968 952 if (!rcv_cache) 969 953 return -ENOMEM; 970 954 971 - if (IS_ENABLED(CONFIG_PROC_FS)) { 972 - if (stats_timer) { 973 - /* the statistics are updated every second (timer triggered) */ 974 - setup_timer(&can_stattimer, can_stat_update, 0); 975 - mod_timer(&can_stattimer, round_jiffies(jiffies + HZ)); 976 - } 977 - } 978 - 979 955 register_pernet_subsys(&can_pernet_ops); 980 956 981 957 /* protocol register */ ··· 981 973 982 974 static __exit void can_exit(void) 983 975 { 984 - if (IS_ENABLED(CONFIG_PROC_FS)) { 985 - if (stats_timer) 986 - del_timer_sync(&can_stattimer); 987 - } 988 - 989 976 /* protocol unregister */ 990 977 dev_remove_pack(&canfd_packet); 991 978 dev_remove_pack(&can_packet);
-9
net/can/af_can.h
··· 110 110 unsigned long rcv_entries_max; 111 111 }; 112 112 113 - /* receive filters subscribed for 'all' CAN devices */ 114 - extern struct dev_rcv_lists can_rx_alldev_list; 115 - 116 113 /* function prototypes for the CAN networklayer procfs (proc.c) */ 117 114 void can_init_proc(struct net *net); 118 115 void can_remove_proc(struct net *net); 119 116 void can_stat_update(unsigned long data); 120 - 121 - /* structures and variables from af_can.c needed in proc.c for reading */ 122 - extern struct timer_list can_stattimer; /* timer for statistics update */ 123 - extern struct s_stats can_stats; /* packet statistics */ 124 - extern struct s_pstats can_pstats; /* receive list statistics */ 125 - extern struct hlist_head can_rx_dev_list; /* rx dispatcher structures */ 126 117 127 118 #endif /* AF_CAN_H */
+57 -33
net/can/bcm.c
··· 1 1 /* 2 2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 3 3 * 4 - * Copyright (c) 2002-2016 Volkswagen Group Electronic Research 4 + * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 5 5 * All rights reserved. 6 6 * 7 7 * Redistribution and use in source and binary forms, with or without ··· 77 77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 78 78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 79 79 80 - #define CAN_BCM_VERSION "20161123" 80 + #define CAN_BCM_VERSION "20170425" 81 81 82 82 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 83 83 MODULE_LICENSE("Dual BSD/GPL"); ··· 118 118 struct net_device *rx_reg_dev; 119 119 }; 120 120 121 - static struct proc_dir_entry *proc_dir; 122 - 123 121 struct bcm_sock { 124 122 struct sock sk; 125 123 int bound; ··· 147 149 /* 148 150 * procfs functions 149 151 */ 150 - static char *bcm_proc_getifname(char *result, int ifindex) 152 + static char *bcm_proc_getifname(struct net *net, char *result, int ifindex) 151 153 { 152 154 struct net_device *dev; 153 155 ··· 155 157 return "any"; 156 158 157 159 rcu_read_lock(); 158 - dev = dev_get_by_index_rcu(&init_net, ifindex); 160 + dev = dev_get_by_index_rcu(net, ifindex); 159 161 if (dev) 160 162 strcpy(result, dev->name); 161 163 else ··· 168 170 static int bcm_proc_show(struct seq_file *m, void *v) 169 171 { 170 172 char ifname[IFNAMSIZ]; 171 - struct sock *sk = (struct sock *)m->private; 173 + struct net *net = m->private; 174 + struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode); 172 175 struct bcm_sock *bo = bcm_sk(sk); 173 176 struct bcm_op *op; 174 177 ··· 177 178 seq_printf(m, " / sk %pK", sk); 178 179 seq_printf(m, " / bo %pK", bo); 179 180 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 180 - seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); 181 + seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); 181 182 seq_printf(m, " <<<\n"); 182 183 183 184 list_for_each_entry(op, &bo->rx_ops, list) { ··· 189 190 continue; 190 191 191 192 seq_printf(m, "rx_op: %03X %-5s ", op->can_id, 192 - bcm_proc_getifname(ifname, op->ifindex)); 193 + bcm_proc_getifname(net, ifname, op->ifindex)); 193 194 194 195 if (op->flags & CAN_FD_FRAME) 195 196 seq_printf(m, "(%u)", op->nframes); ··· 218 219 list_for_each_entry(op, &bo->tx_ops, list) { 219 220 220 221 seq_printf(m, "tx_op: %03X %s ", op->can_id, 221 - bcm_proc_getifname(ifname, op->ifindex)); 222 + bcm_proc_getifname(net, ifname, op->ifindex)); 222 223 223 224 if (op->flags & CAN_FD_FRAME) 224 225 seq_printf(m, "(%u) ", op->nframes); ··· 241 242 242 243 static int bcm_proc_open(struct inode *inode, struct file *file) 243 244 { 244 - return single_open(file, bcm_proc_show, PDE_DATA(inode)); 245 + return single_open_net(inode, file, bcm_proc_show); 245 246 } 246 247 247 248 static const struct file_operations bcm_proc_fops = { ··· 266 267 if (!op->ifindex) 267 268 return; 268 269 269 - dev = dev_get_by_index(&init_net, op->ifindex); 270 + dev = dev_get_by_index(sock_net(op->sk), op->ifindex); 270 271 if (!dev) { 271 272 /* RFC: should this bcm_op remove itself here? */ 272 273 return; ··· 763 764 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) 764 765 { 765 766 if (op->rx_reg_dev == dev) { 766 - can_rx_unregister(&init_net, dev, op->can_id, 767 + can_rx_unregister(dev_net(dev), dev, op->can_id, 767 768 REGMASK(op->can_id), bcm_rx_handler, op); 768 769 769 770 /* mark as removed subscription */ ··· 799 800 if (op->rx_reg_dev) { 800 801 struct net_device *dev; 801 802 802 - dev = dev_get_by_index(&init_net, 803 + dev = dev_get_by_index(sock_net(op->sk), 803 804 op->ifindex); 804 805 if (dev) { 805 806 bcm_rx_unreg(dev, op); ··· 807 808 } 808 809 } 809 810 } else 810 - can_rx_unregister(&init_net, NULL, op->can_id, 811 + can_rx_unregister(sock_net(op->sk), NULL, 812 + op->can_id, 811 813 REGMASK(op->can_id), 812 814 bcm_rx_handler, op); 813 815 ··· 1220 1220 if (ifindex) { 1221 1221 struct net_device *dev; 1222 1222 1223 - dev = dev_get_by_index(&init_net, ifindex); 1223 + dev = dev_get_by_index(sock_net(sk), ifindex); 1224 1224 if (dev) { 1225 - err = can_rx_register(&init_net, dev, 1225 + err = can_rx_register(sock_net(sk), dev, 1226 1226 op->can_id, 1227 1227 REGMASK(op->can_id), 1228 1228 bcm_rx_handler, op, ··· 1233 1233 } 1234 1234 1235 1235 } else 1236 - err = can_rx_register(&init_net, NULL, op->can_id, 1236 + err = can_rx_register(sock_net(sk), NULL, op->can_id, 1237 1237 REGMASK(op->can_id), 1238 1238 bcm_rx_handler, op, "bcm", sk); 1239 1239 if (err) { ··· 1273 1273 return err; 1274 1274 } 1275 1275 1276 - dev = dev_get_by_index(&init_net, ifindex); 1276 + dev = dev_get_by_index(sock_net(sk), ifindex); 1277 1277 if (!dev) { 1278 1278 kfree_skb(skb); 1279 1279 return -ENODEV; ··· 1338 1338 if (ifindex) { 1339 1339 struct net_device *dev; 1340 1340 1341 - dev = dev_get_by_index(&init_net, ifindex); 1341 + dev = dev_get_by_index(sock_net(sk), ifindex); 1342 1342 if (!dev) 1343 1343 return -ENODEV; 1344 1344 ··· 1419 1419 struct bcm_op *op; 1420 1420 int notify_enodev = 0; 1421 1421 1422 - if (!net_eq(dev_net(dev), &init_net)) 1422 + if (!net_eq(dev_net(dev), sock_net(sk))) 1423 1423 return NOTIFY_DONE; 1424 1424 1425 1425 if (dev->type != ARPHRD_CAN) ··· 1491 1491 static int bcm_release(struct socket *sock) 1492 1492 { 1493 1493 struct sock *sk = sock->sk; 1494 + struct net *net = sock_net(sk); 1494 1495 struct bcm_sock *bo; 1495 1496 struct bcm_op *op, *next; 1496 1497 ··· 1523 1522 if (op->rx_reg_dev) { 1524 1523 struct net_device *dev; 1525 1524 1526 - dev = dev_get_by_index(&init_net, op->ifindex); 1525 + dev = dev_get_by_index(net, op->ifindex); 1527 1526 if (dev) { 1528 1527 bcm_rx_unreg(dev, op); 1529 1528 dev_put(dev); 1530 1529 } 1531 1530 } 1532 1531 } else 1533 - can_rx_unregister(&init_net, NULL, op->can_id, 1532 + can_rx_unregister(net, NULL, op->can_id, 1534 1533 REGMASK(op->can_id), 1535 1534 bcm_rx_handler, op); 1536 1535 ··· 1538 1537 } 1539 1538 1540 1539 /* remove procfs entry */ 1541 - if (proc_dir && bo->bcm_proc_read) 1542 - remove_proc_entry(bo->procname, proc_dir); 1540 + if (net->can.bcmproc_dir && bo->bcm_proc_read) 1541 + remove_proc_entry(bo->procname, net->can.bcmproc_dir); 1543 1542 1544 1543 /* remove device reference */ 1545 1544 if (bo->bound) { ··· 1562 1561 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1563 1562 struct sock *sk = sock->sk; 1564 1563 struct bcm_sock *bo = bcm_sk(sk); 1564 + struct net *net = sock_net(sk); 1565 1565 int ret = 0; 1566 1566 1567 1567 if (len < sizeof(*addr)) ··· 1579 1577 if (addr->can_ifindex) { 1580 1578 struct net_device *dev; 1581 1579 1582 - dev = dev_get_by_index(&init_net, addr->can_ifindex); 1580 + dev = dev_get_by_index(net, addr->can_ifindex); 1583 1581 if (!dev) { 1584 1582 ret = -ENODEV; 1585 1583 goto fail; ··· 1598 1596 bo->ifindex = 0; 1599 1597 } 1600 1598 1601 - if (proc_dir) { 1599 + if (net->can.bcmproc_dir) { 1602 1600 /* unique socket address as filename */ 1603 1601 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1604 1602 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, 1605 - proc_dir, 1603 + net->can.bcmproc_dir, 1606 1604 &bcm_proc_fops, sk); 1607 1605 if (!bo->bcm_proc_read) { 1608 1606 ret = -ENOMEM; ··· 1689 1687 .prot = &bcm_proto, 1690 1688 }; 1691 1689 1690 + static int canbcm_pernet_init(struct net *net) 1691 + { 1692 + /* create /proc/net/can-bcm directory */ 1693 + if (IS_ENABLED(CONFIG_PROC_FS)) { 1694 + net->can.bcmproc_dir = 1695 + proc_net_mkdir(net, "can-bcm", net->proc_net); 1696 + } 1697 + 1698 + return 0; 1699 + } 1700 + 1701 + static void canbcm_pernet_exit(struct net *net) 1702 + { 1703 + /* remove /proc/net/can-bcm directory */ 1704 + if (IS_ENABLED(CONFIG_PROC_FS)) { 1705 + if (net->can.bcmproc_dir) 1706 + remove_proc_entry("can-bcm", net->proc_net); 1707 + } 1708 + } 1709 + 1710 + static struct pernet_operations canbcm_pernet_ops __read_mostly = { 1711 + .init = canbcm_pernet_init, 1712 + .exit = canbcm_pernet_exit, 1713 + }; 1714 + 1692 1715 static int __init bcm_module_init(void) 1693 1716 { 1694 1717 int err; ··· 1726 1699 return err; 1727 1700 } 1728 1701 1729 - /* create /proc/net/can-bcm directory */ 1730 - proc_dir = proc_mkdir("can-bcm", init_net.proc_net); 1702 + register_pernet_subsys(&canbcm_pernet_ops); 1731 1703 return 0; 1732 1704 } 1733 1705 1734 1706 static void __exit bcm_module_exit(void) 1735 1707 { 1736 1708 can_proto_unregister(&bcm_can_proto); 1737 - 1738 - if (proc_dir) 1739 - remove_proc_entry("can-bcm", init_net.proc_net); 1709 + unregister_pernet_subsys(&canbcm_pernet_ops); 1740 1710 } 1741 1711 1742 1712 module_init(bcm_module_init);
+44 -28
net/can/gw.c
··· 1 1 /* 2 2 * gw.c - CAN frame Gateway/Router/Bridge with netlink interface 3 3 * 4 - * Copyright (c) 2011 Volkswagen Group Electronic Research 4 + * Copyright (c) 2017 Volkswagen Group Electronic Research 5 5 * All rights reserved. 6 6 * 7 7 * Redistribution and use in source and binary forms, with or without ··· 59 59 #include <net/net_namespace.h> 60 60 #include <net/sock.h> 61 61 62 - #define CAN_GW_VERSION "20130117" 62 + #define CAN_GW_VERSION "20170425" 63 63 #define CAN_GW_NAME "can-gw" 64 64 65 65 MODULE_DESCRIPTION("PF_CAN netlink gateway"); ··· 79 79 __stringify(CGW_MAX_HOPS) " hops, " 80 80 "default: " __stringify(CGW_DEFAULT_HOPS) ")"); 81 81 82 - static HLIST_HEAD(cgw_list); 83 82 static struct notifier_block notifier; 84 - 85 83 static struct kmem_cache *cgw_cache __read_mostly; 86 84 87 85 /* structure that contains the (on-the-fly) CAN frame modifications */ ··· 436 438 gwj->handled_frames++; 437 439 } 438 440 439 - static inline int cgw_register_filter(struct cgw_job *gwj) 441 + static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) 440 442 { 441 - return can_rx_register(&init_net, gwj->src.dev, gwj->ccgw.filter.can_id, 443 + return can_rx_register(net, gwj->src.dev, gwj->ccgw.filter.can_id, 442 444 gwj->ccgw.filter.can_mask, can_can_gw_rcv, 443 445 gwj, "gw", NULL); 444 446 } 445 447 446 - static inline void cgw_unregister_filter(struct cgw_job *gwj) 448 + static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj) 447 449 { 448 - can_rx_unregister(&init_net, gwj->src.dev, gwj->ccgw.filter.can_id, 450 + can_rx_unregister(net, gwj->src.dev, gwj->ccgw.filter.can_id, 449 451 gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj); 450 452 } 451 453 ··· 453 455 unsigned long msg, void *ptr) 454 456 { 455 457 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 458 + struct net *net = dev_net(dev); 456 459 457 - if (!net_eq(dev_net(dev), &init_net)) 458 - return NOTIFY_DONE; 459 460 if (dev->type != ARPHRD_CAN) 460 461 return NOTIFY_DONE; 461 462 ··· 465 468 466 469 ASSERT_RTNL(); 467 470 468 - hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { 471 + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { 469 472 470 473 if (gwj->src.dev == dev || gwj->dst.dev == dev) { 471 474 hlist_del(&gwj->list); 472 - cgw_unregister_filter(gwj); 475 + cgw_unregister_filter(net, gwj); 473 476 kmem_cache_free(cgw_cache, gwj); 474 477 } 475 478 } ··· 589 592 /* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */ 590 593 static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) 591 594 { 595 + struct net *net = sock_net(skb->sk); 592 596 struct cgw_job *gwj = NULL; 593 597 int idx = 0; 594 598 int s_idx = cb->args[0]; 595 599 596 600 rcu_read_lock(); 597 - hlist_for_each_entry_rcu(gwj, &cgw_list, list) { 601 + hlist_for_each_entry_rcu(gwj, &net->can.cgw_list, list) { 598 602 if (idx < s_idx) 599 603 goto cont; 600 604 ··· 810 812 static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, 811 813 struct netlink_ext_ack *extack) 812 814 { 815 + struct net *net = sock_net(skb->sk); 813 816 struct rtcanmsg *r; 814 817 struct cgw_job *gwj; 815 818 struct cf_mod mod; ··· 841 842 ASSERT_RTNL(); 842 843 843 844 /* check for updating an existing job with identical uid */ 844 - hlist_for_each_entry(gwj, &cgw_list, list) { 845 + hlist_for_each_entry(gwj, &net->can.cgw_list, list) { 845 846 846 847 if (gwj->mod.uid != mod.uid) 847 848 continue; ··· 879 880 880 881 err = -ENODEV; 881 882 882 - gwj->src.dev = __dev_get_by_index(&init_net, gwj->ccgw.src_idx); 883 + gwj->src.dev = __dev_get_by_index(net, gwj->ccgw.src_idx); 883 884 884 885 if (!gwj->src.dev) 885 886 goto out; ··· 887 888 if (gwj->src.dev->type != ARPHRD_CAN) 888 889 goto out; 889 890 890 - gwj->dst.dev = __dev_get_by_index(&init_net, gwj->ccgw.dst_idx); 891 + gwj->dst.dev = __dev_get_by_index(net, gwj->ccgw.dst_idx); 891 892 892 893 if (!gwj->dst.dev) 893 894 goto out; ··· 897 898 898 899 ASSERT_RTNL(); 899 900 900 - err = cgw_register_filter(gwj); 901 + err = cgw_register_filter(net, gwj); 901 902 if (!err) 902 - hlist_add_head_rcu(&gwj->list, &cgw_list); 903 + hlist_add_head_rcu(&gwj->list, &net->can.cgw_list); 903 904 out: 904 905 if (err) 905 906 kmem_cache_free(cgw_cache, gwj); ··· 907 908 return err; 908 909 } 909 910 910 - static void cgw_remove_all_jobs(void) 911 + static void cgw_remove_all_jobs(struct net *net) 911 912 { 912 913 struct cgw_job *gwj = NULL; 913 914 struct hlist_node *nx; 914 915 915 916 ASSERT_RTNL(); 916 917 917 - hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { 918 + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { 918 919 hlist_del(&gwj->list); 919 - cgw_unregister_filter(gwj); 920 + cgw_unregister_filter(net, gwj); 920 921 kmem_cache_free(cgw_cache, gwj); 921 922 } 922 923 } ··· 924 925 static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, 925 926 struct netlink_ext_ack *extack) 926 927 { 928 + struct net *net = sock_net(skb->sk); 927 929 struct cgw_job *gwj = NULL; 928 930 struct hlist_node *nx; 929 931 struct rtcanmsg *r; ··· 953 953 954 954 /* two interface indices both set to 0 => remove all entries */ 955 955 if (!ccgw.src_idx && !ccgw.dst_idx) { 956 - cgw_remove_all_jobs(); 956 + cgw_remove_all_jobs(net); 957 957 return 0; 958 958 } 959 959 ··· 962 962 ASSERT_RTNL(); 963 963 964 964 /* remove only the first matching entry */ 965 - hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { 965 + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { 966 966 967 967 if (gwj->flags != r->flags) 968 968 continue; ··· 985 985 continue; 986 986 987 987 hlist_del(&gwj->list); 988 - cgw_unregister_filter(gwj); 988 + cgw_unregister_filter(net, gwj); 989 989 kmem_cache_free(cgw_cache, gwj); 990 990 err = 0; 991 991 break; ··· 993 993 994 994 return err; 995 995 } 996 + 997 + static int __net_init cangw_pernet_init(struct net *net) 998 + { 999 + INIT_HLIST_HEAD(&net->can.cgw_list); 1000 + return 0; 1001 + } 1002 + 1003 + static void __net_exit cangw_pernet_exit(struct net *net) 1004 + { 1005 + rtnl_lock(); 1006 + cgw_remove_all_jobs(net); 1007 + rtnl_unlock(); 1008 + } 1009 + 1010 + static struct pernet_operations cangw_pernet_ops = { 1011 + .init = cangw_pernet_init, 1012 + .exit = cangw_pernet_exit, 1013 + }; 996 1014 997 1015 static __init int cgw_module_init(void) 998 1016 { ··· 1020 1002 pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n", 1021 1003 max_hops); 1022 1004 1005 + register_pernet_subsys(&cangw_pernet_ops); 1023 1006 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job), 1024 1007 0, 0, NULL); 1025 1008 ··· 1050 1031 1051 1032 unregister_netdevice_notifier(&notifier); 1052 1033 1053 - rtnl_lock(); 1054 - cgw_remove_all_jobs(); 1055 - rtnl_unlock(); 1056 - 1034 + unregister_pernet_subsys(&cangw_pernet_ops); 1057 1035 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1058 1036 1059 1037 kmem_cache_destroy(cgw_cache);
+76 -65
net/can/proc.c
··· 75 75 * af_can statistics stuff 76 76 */ 77 77 78 - static void can_init_stats(void) 78 + static void can_init_stats(struct net *net) 79 79 { 80 + struct s_stats *can_stats = net->can.can_stats; 81 + struct s_pstats *can_pstats = net->can.can_pstats; 80 82 /* 81 83 * This memset function is called from a timer context (when 82 84 * can_stattimer is active which is the default) OR in a process 83 85 * context (reading the proc_fs when can_stattimer is disabled). 84 86 */ 85 - memset(&can_stats, 0, sizeof(can_stats)); 86 - can_stats.jiffies_init = jiffies; 87 + memset(can_stats, 0, sizeof(struct s_stats)); 88 + can_stats->jiffies_init = jiffies; 87 89 88 - can_pstats.stats_reset++; 90 + can_pstats->stats_reset++; 89 91 90 92 if (user_reset) { 91 93 user_reset = 0; 92 - can_pstats.user_reset++; 94 + can_pstats->user_reset++; 93 95 } 94 96 } 95 97 ··· 117 115 118 116 void can_stat_update(unsigned long data) 119 117 { 118 + struct net *net = (struct net *)data; 119 + struct s_stats *can_stats = net->can.can_stats; 120 120 unsigned long j = jiffies; /* snapshot */ 121 121 122 122 /* restart counting in timer context on user request */ 123 123 if (user_reset) 124 - can_init_stats(); 124 + can_init_stats(net); 125 125 126 126 /* restart counting on jiffies overflow */ 127 - if (j < can_stats.jiffies_init) 128 - can_init_stats(); 127 + if (j < can_stats->jiffies_init) 128 + can_init_stats(net); 129 129 130 130 /* prevent overflow in calc_rate() */ 131 - if (can_stats.rx_frames > (ULONG_MAX / HZ)) 132 - can_init_stats(); 131 + if (can_stats->rx_frames > (ULONG_MAX / HZ)) 132 + can_init_stats(net); 133 133 134 134 /* prevent overflow in calc_rate() */ 135 - if (can_stats.tx_frames > (ULONG_MAX / HZ)) 136 - can_init_stats(); 135 + if (can_stats->tx_frames > (ULONG_MAX / HZ)) 136 + can_init_stats(net); 137 137 138 138 /* matches overflow - very improbable */ 139 - if (can_stats.matches > (ULONG_MAX / 100)) 140 - can_init_stats(); 139 + if (can_stats->matches > (ULONG_MAX / 100)) 140 + can_init_stats(net); 141 141 142 142 /* calc total values */ 143 - if (can_stats.rx_frames) 144 - can_stats.total_rx_match_ratio = (can_stats.matches * 100) / 145 - can_stats.rx_frames; 143 + if (can_stats->rx_frames) 144 + can_stats->total_rx_match_ratio = (can_stats->matches * 100) / 145 + can_stats->rx_frames; 146 146 147 - can_stats.total_tx_rate = calc_rate(can_stats.jiffies_init, j, 148 - can_stats.tx_frames); 149 - can_stats.total_rx_rate = calc_rate(can_stats.jiffies_init, j, 150 - can_stats.rx_frames); 147 + can_stats->total_tx_rate = calc_rate(can_stats->jiffies_init, j, 148 + can_stats->tx_frames); 149 + can_stats->total_rx_rate = calc_rate(can_stats->jiffies_init, j, 150 + can_stats->rx_frames); 151 151 152 152 /* calc current values */ 153 - if (can_stats.rx_frames_delta) 154 - can_stats.current_rx_match_ratio = 155 - (can_stats.matches_delta * 100) / 156 - can_stats.rx_frames_delta; 153 + if (can_stats->rx_frames_delta) 154 + can_stats->current_rx_match_ratio = 155 + (can_stats->matches_delta * 100) / 156 + can_stats->rx_frames_delta; 157 157 158 - can_stats.current_tx_rate = calc_rate(0, HZ, can_stats.tx_frames_delta); 159 - can_stats.current_rx_rate = calc_rate(0, HZ, can_stats.rx_frames_delta); 158 + can_stats->current_tx_rate = calc_rate(0, HZ, can_stats->tx_frames_delta); 159 + can_stats->current_rx_rate = calc_rate(0, HZ, can_stats->rx_frames_delta); 160 160 161 161 /* check / update maximum values */ 162 - if (can_stats.max_tx_rate < can_stats.current_tx_rate) 163 - can_stats.max_tx_rate = can_stats.current_tx_rate; 162 + if (can_stats->max_tx_rate < can_stats->current_tx_rate) 163 + can_stats->max_tx_rate = can_stats->current_tx_rate; 164 164 165 - if (can_stats.max_rx_rate < can_stats.current_rx_rate) 166 - can_stats.max_rx_rate = can_stats.current_rx_rate; 165 + if (can_stats->max_rx_rate < can_stats->current_rx_rate) 166 + can_stats->max_rx_rate = can_stats->current_rx_rate; 167 167 168 - if (can_stats.max_rx_match_ratio < can_stats.current_rx_match_ratio) 169 - can_stats.max_rx_match_ratio = can_stats.current_rx_match_ratio; 168 + if (can_stats->max_rx_match_ratio < can_stats->current_rx_match_ratio) 169 + can_stats->max_rx_match_ratio = can_stats->current_rx_match_ratio; 170 170 171 171 /* clear values for 'current rate' calculation */ 172 - can_stats.tx_frames_delta = 0; 173 - can_stats.rx_frames_delta = 0; 174 - can_stats.matches_delta = 0; 172 + can_stats->tx_frames_delta = 0; 173 + can_stats->rx_frames_delta = 0; 174 + can_stats->matches_delta = 0; 175 175 176 176 /* restart timer (one second) */ 177 - mod_timer(&can_stattimer, round_jiffies(jiffies + HZ)); 177 + mod_timer(&net->can.can_stattimer, round_jiffies(jiffies + HZ)); 178 178 } 179 179 180 180 /* ··· 210 206 211 207 static int can_stats_proc_show(struct seq_file *m, void *v) 212 208 { 209 + struct net *net = m->private; 210 + struct s_stats *can_stats = net->can.can_stats; 211 + struct s_pstats *can_pstats = net->can.can_pstats; 212 + 213 213 seq_putc(m, '\n'); 214 - seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats.tx_frames); 215 - seq_printf(m, " %8ld received frames (RXF)\n", can_stats.rx_frames); 216 - seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats.matches); 214 + seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats->tx_frames); 215 + seq_printf(m, " %8ld received frames (RXF)\n", can_stats->rx_frames); 216 + seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats->matches); 217 217 218 218 seq_putc(m, '\n'); 219 219 220 - if (can_stattimer.function == can_stat_update) { 220 + if (net->can.can_stattimer.function == can_stat_update) { 221 221 seq_printf(m, " %8ld %% total match ratio (RXMR)\n", 222 - can_stats.total_rx_match_ratio); 222 + can_stats->total_rx_match_ratio); 223 223 224 224 seq_printf(m, " %8ld frames/s total tx rate (TXR)\n", 225 - can_stats.total_tx_rate); 225 + can_stats->total_tx_rate); 226 226 seq_printf(m, " %8ld frames/s total rx rate (RXR)\n", 227 - can_stats.total_rx_rate); 227 + can_stats->total_rx_rate); 228 228 229 229 seq_putc(m, '\n'); 230 230 231 231 seq_printf(m, " %8ld %% current match ratio (CRXMR)\n", 232 - can_stats.current_rx_match_ratio); 232 + can_stats->current_rx_match_ratio); 233 233 234 234 seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n", 235 - can_stats.current_tx_rate); 235 + can_stats->current_tx_rate); 236 236 seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n", 237 - can_stats.current_rx_rate); 237 + can_stats->current_rx_rate); 238 238 239 239 seq_putc(m, '\n'); 240 240 241 241 seq_printf(m, " %8ld %% max match ratio (MRXMR)\n", 242 - can_stats.max_rx_match_ratio); 242 + can_stats->max_rx_match_ratio); 243 243 244 244 seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n", 245 - can_stats.max_tx_rate); 245 + can_stats->max_tx_rate); 246 246 seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n", 247 - can_stats.max_rx_rate); 247 + can_stats->max_rx_rate); 248 248 249 249 seq_putc(m, '\n'); 250 250 } 251 251 252 252 seq_printf(m, " %8ld current receive list entries (CRCV)\n", 253 - can_pstats.rcv_entries); 253 + can_pstats->rcv_entries); 254 254 seq_printf(m, " %8ld maximum receive list entries (MRCV)\n", 255 - can_pstats.rcv_entries_max); 255 + can_pstats->rcv_entries_max); 256 256 257 - if (can_pstats.stats_reset) 257 + if (can_pstats->stats_reset) 258 258 seq_printf(m, "\n %8ld statistic resets (STR)\n", 259 - can_pstats.stats_reset); 259 + can_pstats->stats_reset); 260 260 261 - if (can_pstats.user_reset) 261 + if (can_pstats->user_reset) 262 262 seq_printf(m, " %8ld user statistic resets (USTR)\n", 263 - can_pstats.user_reset); 263 + can_pstats->user_reset); 264 264 265 265 seq_putc(m, '\n'); 266 266 return 0; ··· 272 264 273 265 static int can_stats_proc_open(struct inode *inode, struct file *file) 274 266 { 275 - return single_open(file, can_stats_proc_show, NULL); 267 + return single_open_net(inode, file, can_stats_proc_show); 276 268 } 277 269 278 270 static const struct file_operations can_stats_proc_fops = { ··· 285 277 286 278 static int can_reset_stats_proc_show(struct seq_file *m, void *v) 287 279 { 280 + struct net *net = m->private; 281 + struct s_pstats *can_pstats = net->can.can_pstats; 282 + struct s_stats *can_stats = net->can.can_stats; 283 + 288 284 user_reset = 1; 289 285 290 - if (can_stattimer.function == can_stat_update) { 286 + if (net->can.can_stattimer.function == can_stat_update) { 291 287 seq_printf(m, "Scheduled statistic reset #%ld.\n", 292 - can_pstats.stats_reset + 1); 293 - 288 + can_pstats->stats_reset + 1); 294 289 } else { 295 - if (can_stats.jiffies_init != jiffies) 296 - can_init_stats(); 290 + if (can_stats->jiffies_init != jiffies) 291 + can_init_stats(net); 297 292 298 293 seq_printf(m, "Performed statistic reset #%ld.\n", 299 - can_pstats.stats_reset); 294 + can_pstats->stats_reset); 300 295 } 301 296 return 0; 302 297 } 303 298 304 299 static int can_reset_stats_proc_open(struct inode *inode, struct file *file) 305 300 { 306 - return single_open(file, can_reset_stats_proc_show, NULL); 301 + return single_open_net(inode, file, can_reset_stats_proc_show); 307 302 } 308 303 309 304 static const struct file_operations can_reset_stats_proc_fops = { ··· 325 314 326 315 static int can_version_proc_open(struct inode *inode, struct file *file) 327 316 { 328 - return single_open(file, can_version_proc_show, NULL); 317 + return single_open_net(inode, file, can_version_proc_show); 329 318 } 330 319 331 320 static const struct file_operations can_version_proc_fops = {