Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'linux-can-next-for-5.13-20210330' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next

Marc Kleine-Budde says:

====================
pull-request: can-next 2021-03-30

this is a pull request of 39 patches for net-next/master.

The first two patches update the MAINTAINERS file. One is by me and
removes Dan Murphy from the from m_can and tcan4x5x. The other one is
by Pankaj Sharma and updates the maintainership of the m-can mmio
driver.

The next three patches are by me and update the CAN echo skb handling.

Vincent Mailhol provides 5 patches where Transmitter Delay
Compensation is added CAN bittiming calculation is cleaned up.

The next patch is by me and adds a missing HAS_IOMEM to the grcan
driver.

Michal Simek's patch for the xilinx driver add dev_err_probe()
support.

Arnd Bergmann's patch for the ucan driver fixes a compiler warning.

Stephane Grosjean provides 3 patches for the peak USB drivers, which
add ethtool set_phys_id and CAN one-shot mode.

Xulin Sun's patch removes a not needed return check in the m-can
driver. Torin Cooper-Bennun provides 3 patches for the m-can driver
that add rx-offload support to ensure that skbs are sent from softirq
context. Wan Jiabing's patch for the tcan4x5x driver removes a
duplicate include.

The next 6 patches are by me and target the mcp251xfd driver. They add
devcoredump support, simplify the UINC handling, and add HW timestamp
support.

The remaining 12 patches target the c_can driver. The first 6 are by
me and do generic checkpatch related cleanup work. Dario Binacchi's
patches bring some cleanups and increase the number of usable message
objects from 16 to 64.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1068 -251
+1 -9
MAINTAINERS
··· 10910 10910 F: drivers/media/radio/radio-maxiradio* 10911 10911 10912 10912 MCAN MMIO DEVICE DRIVER 10913 - M: Dan Murphy <dmurphy@ti.com> 10914 - M: Pankaj Sharma <pankj.sharma@samsung.com> 10913 + M: Chandrasekar Ramakrishnan <rcsekar@samsung.com> 10915 10914 L: linux-can@vger.kernel.org 10916 10915 S: Maintained 10917 10916 F: Documentation/devicetree/bindings/net/can/bosch,m_can.yaml ··· 17981 17982 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 17982 17983 S: Odd Fixes 17983 17984 F: sound/soc/codecs/tas571x* 17984 - 17985 - TI TCAN4X5X DEVICE DRIVER 17986 - M: Dan Murphy <dmurphy@ti.com> 17987 - L: linux-can@vger.kernel.org 17988 - S: Maintained 17989 - F: Documentation/devicetree/bindings/net/can/tcan4x5x.txt 17990 - F: drivers/net/can/m_can/tcan4x5x* 17991 17985 17992 17986 TI TRF7970A NFC DRIVER 17993 17987 M: Mark Greer <mgreer@animalcreek.com>
+1 -1
drivers/net/can/Kconfig
··· 103 103 104 104 config CAN_GRCAN 105 105 tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" 106 - depends on OF && HAS_DMA 106 + depends on OF && HAS_DMA && HAS_IOMEM 107 107 help 108 108 Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. 109 109 Note that the driver supports little endian, even though little
+77 -76
drivers/net/can/c_can/c_can.c
··· 132 132 /* For the high buffers we clear the interrupt bit and newdat */ 133 133 #define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT) 134 134 135 - 136 135 /* Receive setup of message objects */ 137 136 #define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL) 138 137 ··· 160 161 161 162 #define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB) 162 163 163 - /* 164 - * Use IF1 for RX and IF2 for TX 165 - */ 164 + /* Use IF1 for RX and IF2 for TX */ 166 165 #define IF_RX 0 167 166 #define IF_TX 1 168 167 ··· 169 172 170 173 /* Wait for ~1 sec for INIT bit */ 171 174 #define INIT_WAIT_MS 1000 172 - 173 - /* napi related */ 174 - #define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM 175 175 176 176 /* c_can lec values */ 177 177 enum c_can_lec_type { ··· 183 189 LEC_MASK = LEC_UNUSED, 184 190 }; 185 191 186 - /* 187 - * c_can error types: 192 + /* c_can error types: 188 193 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported 189 194 */ 190 195 enum c_can_bus_error_types { ··· 246 253 udelay(1); 247 254 } 248 255 netdev_err(dev, "Updating object timed out\n"); 249 - 250 256 } 251 257 252 258 static inline void c_can_object_get(struct net_device *dev, int iface, ··· 260 268 c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj); 261 269 } 262 270 263 - /* 264 - * Note: According to documentation clearing TXIE while MSGVAL is set 271 + /* Note: According to documentation clearing TXIE while MSGVAL is set 265 272 * is not allowed, but works nicely on C/DCAN. And that lowers the I/O 266 273 * load significantly. 267 274 */ ··· 276 285 { 277 286 struct c_can_priv *priv = netdev_priv(dev); 278 287 279 - priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0); 280 - priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0); 288 + priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), 0); 281 289 c_can_inval_tx_object(dev, iface, obj); 282 290 } 283 291 ··· 299 309 if (!rtr) 300 310 arb |= IF_ARB_TRANSMIT; 301 311 302 - /* 303 - * If we change the DIR bit, we need to invalidate the buffer 312 + /* If we change the DIR bit, we need to invalidate the buffer 304 313 * first, i.e. clear the MSGVAL flag in the arbiter. 305 314 */ 306 315 if (rtr != (bool)test_bit(idx, &priv->tx_dir)) { 307 - u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST; 316 + u32 obj = idx + priv->msg_obj_tx_first; 308 317 309 318 c_can_inval_msg_object(dev, iface, obj); 310 319 change_bit(idx, &priv->tx_dir); ··· 436 447 437 448 if (can_dropped_invalid_skb(dev, skb)) 438 449 return NETDEV_TX_OK; 439 - /* 440 - * This is not a FIFO. C/D_CAN sends out the buffers 450 + /* This is not a FIFO. C/D_CAN sends out the buffers 441 451 * prioritized. The lowest buffer number wins. 442 452 */ 443 453 idx = fls(atomic_read(&priv->tx_active)); 444 - obj = idx + C_CAN_MSG_OBJ_TX_FIRST; 454 + obj = idx + priv->msg_obj_tx_first; 445 455 446 456 /* If this is the last buffer, stop the xmit queue */ 447 - if (idx == C_CAN_MSG_OBJ_TX_NUM - 1) 457 + if (idx == priv->msg_obj_tx_num - 1) 448 458 netif_stop_queue(dev); 449 - /* 450 - * Store the message in the interface so we can call 459 + /* Store the message in the interface so we can call 451 460 * can_put_echo_skb(). We must do this before we enable 452 461 * transmit as we might race against do_tx(). 453 462 */ ··· 454 467 can_put_echo_skb(skb, dev, idx, 0); 455 468 456 469 /* Update the active bits */ 457 - atomic_add((1 << idx), &priv->tx_active); 470 + atomic_add(BIT(idx), &priv->tx_active); 458 471 /* Start transmission */ 459 472 c_can_object_put(dev, IF_TX, obj, IF_COMM_TX); 460 473 ··· 498 511 reg_brpe = brpe & BRP_EXT_BRPE_MASK; 499 512 500 513 netdev_info(dev, 501 - "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); 514 + "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); 502 515 503 516 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG); 504 517 ctrl_save &= ~CONTROL_INIT; ··· 514 527 return c_can_wait_for_ctrl_init(dev, priv, 0); 515 528 } 516 529 517 - /* 518 - * Configure C_CAN message objects for Tx and Rx purposes: 530 + /* Configure C_CAN message objects for Tx and Rx purposes: 519 531 * C_CAN provides a total of 32 message objects that can be configured 520 532 * either for Tx or Rx purposes. Here the first 16 message objects are used as 521 533 * a reception FIFO. The end of reception FIFO is signified by the EoB bit ··· 524 538 */ 525 539 static void c_can_configure_msg_objects(struct net_device *dev) 526 540 { 541 + struct c_can_priv *priv = netdev_priv(dev); 527 542 int i; 528 543 529 544 /* first invalidate all message objects */ 530 - for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++) 545 + for (i = priv->msg_obj_rx_first; i <= priv->msg_obj_num; i++) 531 546 c_can_inval_msg_object(dev, IF_RX, i); 532 547 533 548 /* setup receive message objects */ 534 - for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) 549 + for (i = priv->msg_obj_rx_first; i < priv->msg_obj_rx_last; i++) 535 550 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV); 536 551 537 - c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0, 552 + c_can_setup_receive_object(dev, IF_RX, priv->msg_obj_rx_last, 0, 0, 538 553 IF_MCONT_RCV_EOB); 539 554 } 540 555 ··· 559 572 return 0; 560 573 } 561 574 562 - /* 563 - * Configure C_CAN chip: 575 + /* Configure C_CAN chip: 564 576 * - enable/disable auto-retransmission 565 577 * - set operating mode 566 578 * - configure message objects ··· 700 714 struct net_device_stats *stats = &dev->stats; 701 715 u32 idx, obj, pkts = 0, bytes = 0, pend, clr; 702 716 703 - clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG); 717 + if (priv->msg_obj_tx_last > 32) 718 + pend = priv->read_reg32(priv, C_CAN_INTPND3_REG); 719 + else 720 + pend = priv->read_reg(priv, C_CAN_INTPND2_REG); 721 + clr = pend; 704 722 705 723 while ((idx = ffs(pend))) { 706 724 idx--; 707 - pend &= ~(1 << idx); 708 - obj = idx + C_CAN_MSG_OBJ_TX_FIRST; 725 + pend &= ~BIT(idx); 726 + obj = idx + priv->msg_obj_tx_first; 727 + 728 + /* We use IF_RX interface instead of IF_TX because we 729 + * are called from c_can_poll(), which runs inside 730 + * NAPI. We are not trasmitting. 731 + */ 709 732 c_can_inval_tx_object(dev, IF_RX, obj); 710 733 can_get_echo_skb(dev, idx, NULL); 711 734 bytes += priv->dlc[idx]; ··· 724 729 /* Clear the bits in the tx_active mask */ 725 730 atomic_sub(clr, &priv->tx_active); 726 731 727 - if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1))) 732 + if (clr & BIT(priv->msg_obj_tx_num - 1)) 728 733 netif_wake_queue(dev); 729 734 730 735 if (pkts) { ··· 734 739 } 735 740 } 736 741 737 - /* 738 - * If we have a gap in the pending bits, that means we either 742 + /* If we have a gap in the pending bits, that means we either 739 743 * raced with the hardware or failed to readout all upper 740 744 * objects in the last run due to quota limit. 741 745 */ 742 - static u32 c_can_adjust_pending(u32 pend) 746 + static u32 c_can_adjust_pending(u32 pend, u32 rx_mask) 743 747 { 744 748 u32 weight, lasts; 745 749 746 - if (pend == RECEIVE_OBJECT_BITS) 750 + if (pend == rx_mask) 747 751 return pend; 748 752 749 - /* 750 - * If the last set bit is larger than the number of pending 753 + /* If the last set bit is larger than the number of pending 751 754 * bits we have a gap. 752 755 */ 753 756 weight = hweight32(pend); ··· 755 762 if (lasts == weight) 756 763 return pend; 757 764 758 - /* 759 - * Find the first set bit after the gap. We walk backwards 765 + /* Find the first set bit after the gap. We walk backwards 760 766 * from the last set bit. 761 767 */ 762 - for (lasts--; pend & (1 << (lasts - 1)); lasts--); 768 + for (lasts--; pend & BIT(lasts - 1); lasts--) 769 + ; 763 770 764 - return pend & ~((1 << lasts) - 1); 771 + return pend & ~GENMASK(lasts - 1, 0); 765 772 } 766 773 767 774 static inline void c_can_rx_object_get(struct net_device *dev, 768 775 struct c_can_priv *priv, u32 obj) 769 776 { 770 - c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); 777 + c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); 771 778 } 772 779 773 780 static inline void c_can_rx_finalize(struct net_device *dev, ··· 796 803 continue; 797 804 } 798 805 799 - /* 800 - * This really should not happen, but this covers some 806 + /* This really should not happen, but this covers some 801 807 * odd HW behaviour. Do not remove that unless you 802 808 * want to brick your machine. 803 809 */ ··· 817 825 818 826 static inline u32 c_can_get_pending(struct c_can_priv *priv) 819 827 { 820 - u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); 828 + u32 pend; 829 + 830 + if (priv->msg_obj_rx_last > 16) 831 + pend = priv->read_reg32(priv, C_CAN_NEWDAT1_REG); 832 + else 833 + pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); 821 834 822 835 return pend; 823 836 } 824 837 825 - /* 826 - * theory of operation: 838 + /* theory of operation: 827 839 * 828 840 * c_can core saves a received CAN message into the first free message 829 841 * object it finds free (starting with the lowest). Bits NEWDAT and 830 842 * INTPND are set for this message object indicating that a new message 831 - * has arrived. To work-around this issue, we keep two groups of message 832 - * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. 843 + * has arrived. 833 844 * 834 845 * We clear the newdat bit right away. 835 846 * ··· 843 848 struct c_can_priv *priv = netdev_priv(dev); 844 849 u32 pkts = 0, pend = 0, toread, n; 845 850 846 - /* 847 - * It is faster to read only one 16bit register. This is only possible 848 - * for a maximum number of 16 objects. 849 - */ 850 - BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16, 851 - "Implementation does not support more message objects than 16"); 852 - 853 851 while (quota > 0) { 854 852 if (!pend) { 855 853 pend = c_can_get_pending(priv); 856 854 if (!pend) 857 855 break; 858 - /* 859 - * If the pending field has a gap, handle the 856 + /* If the pending field has a gap, handle the 860 857 * bits above the gap first. 861 858 */ 862 - toread = c_can_adjust_pending(pend); 859 + toread = c_can_adjust_pending(pend, 860 + priv->msg_obj_rx_mask); 863 861 } else { 864 862 toread = pend; 865 863 } ··· 871 883 } 872 884 873 885 static int c_can_handle_state_change(struct net_device *dev, 874 - enum c_can_bus_error_types error_type) 886 + enum c_can_bus_error_types error_type) 875 887 { 876 888 unsigned int reg_err_counter; 877 889 unsigned int rx_err_passive; ··· 967 979 struct can_frame *cf; 968 980 struct sk_buff *skb; 969 981 970 - /* 971 - * early exit if no lec update or no error. 982 + /* early exit if no lec update or no error. 972 983 * no lec update means that no CAN bus event has been detected 973 984 * since CPU wrote 0x7 value to status reg. 974 985 */ ··· 986 999 if (unlikely(!skb)) 987 1000 return 0; 988 1001 989 - /* 990 - * check for 'last error code' which tells us the 1002 + /* check for 'last error code' which tells us the 991 1003 * type of the last error to occur on the CAN bus 992 1004 */ 993 1005 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; ··· 1035 1049 1036 1050 /* Only read the status register if a status interrupt was pending */ 1037 1051 if (atomic_xchg(&priv->sie_pending, 0)) { 1038 - priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); 1052 + priv->last_status = priv->read_reg(priv, C_CAN_STS_REG); 1053 + curr = priv->last_status; 1039 1054 /* Ack status on C_CAN. D_CAN is self clearing */ 1040 1055 if (priv->type != BOSCH_D_CAN) 1041 1056 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); ··· 1134 1147 1135 1148 /* register interrupt handler */ 1136 1149 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name, 1137 - dev); 1150 + dev); 1138 1151 if (err < 0) { 1139 1152 netdev_err(dev, "failed to request interrupt\n"); 1140 1153 goto exit_irq_fail; ··· 1182 1195 return 0; 1183 1196 } 1184 1197 1185 - struct net_device *alloc_c_can_dev(void) 1198 + struct net_device *alloc_c_can_dev(int msg_obj_num) 1186 1199 { 1187 1200 struct net_device *dev; 1188 1201 struct c_can_priv *priv; 1202 + int msg_obj_tx_num = msg_obj_num / 2; 1189 1203 1190 - dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM); 1204 + dev = alloc_candev(struct_size(priv, dlc, msg_obj_tx_num), 1205 + msg_obj_tx_num); 1191 1206 if (!dev) 1192 1207 return NULL; 1193 1208 1194 1209 priv = netdev_priv(dev); 1195 - netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); 1210 + priv->msg_obj_num = msg_obj_num; 1211 + priv->msg_obj_rx_num = msg_obj_num - msg_obj_tx_num; 1212 + priv->msg_obj_rx_first = 1; 1213 + priv->msg_obj_rx_last = 1214 + priv->msg_obj_rx_first + priv->msg_obj_rx_num - 1; 1215 + priv->msg_obj_rx_mask = GENMASK(priv->msg_obj_rx_num - 1, 0); 1216 + 1217 + priv->msg_obj_tx_num = msg_obj_tx_num; 1218 + priv->msg_obj_tx_first = priv->msg_obj_rx_last + 1; 1219 + priv->msg_obj_tx_last = 1220 + priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1; 1221 + 1222 + netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num); 1196 1223 1197 1224 priv->dev = dev; 1198 1225 priv->can.bittiming_const = &c_can_bittiming_const; ··· 1240 1239 /* Wait for the PDA bit to get set */ 1241 1240 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); 1242 1241 while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && 1243 - time_after(time_out, jiffies)) 1242 + time_after(time_out, jiffies)) 1244 1243 cpu_relax(); 1245 1244 1246 1245 if (time_after(jiffies, time_out)) ··· 1281 1280 /* Wait for the PDA bit to get clear */ 1282 1281 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); 1283 1282 while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && 1284 - time_after(time_out, jiffies)) 1283 + time_after(time_out, jiffies)) 1285 1284 cpu_relax(); 1286 1285 1287 1286 if (time_after(jiffies, time_out)) {
+18 -24
drivers/net/can/c_can/c_can.h
··· 22 22 #ifndef C_CAN_H 23 23 #define C_CAN_H 24 24 25 - /* message object split */ 26 - #define C_CAN_NO_OF_OBJECTS 32 27 - #define C_CAN_MSG_OBJ_RX_NUM 16 28 - #define C_CAN_MSG_OBJ_TX_NUM 16 29 - 30 - #define C_CAN_MSG_OBJ_RX_FIRST 1 31 - #define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \ 32 - C_CAN_MSG_OBJ_RX_NUM - 1) 33 - 34 - #define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1) 35 - #define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \ 36 - C_CAN_MSG_OBJ_TX_NUM - 1) 37 - 38 - #define C_CAN_MSG_OBJ_RX_SPLIT 9 39 - #define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) 40 - #define RECEIVE_OBJECT_BITS 0x0000ffff 41 - 42 25 enum reg { 43 26 C_CAN_CTRL_REG = 0, 44 27 C_CAN_CTRL_EX_REG, ··· 59 76 C_CAN_NEWDAT2_REG, 60 77 C_CAN_INTPND1_REG, 61 78 C_CAN_INTPND2_REG, 79 + C_CAN_INTPND3_REG, 62 80 C_CAN_MSGVAL1_REG, 63 81 C_CAN_MSGVAL2_REG, 64 82 C_CAN_FUNCTION_REG, ··· 121 137 [C_CAN_NEWDAT2_REG] = 0x9E, 122 138 [C_CAN_INTPND1_REG] = 0xB0, 123 139 [C_CAN_INTPND2_REG] = 0xB2, 140 + [C_CAN_INTPND3_REG] = 0xB4, 124 141 [C_CAN_MSGVAL1_REG] = 0xC4, 125 142 [C_CAN_MSGVAL2_REG] = 0xC6, 126 143 [C_CAN_IF1_COMREQ_REG] = 0x100, ··· 161 176 162 177 struct c_can_driver_data { 163 178 enum c_can_dev_id id; 179 + unsigned int msg_obj_num; 164 180 165 181 /* RAMINIT register description. Optional. */ 166 182 const struct raminit_bits *raminit_bits; /* Array of START/DONE bit positions */ ··· 183 197 struct napi_struct napi; 184 198 struct net_device *dev; 185 199 struct device *device; 200 + unsigned int msg_obj_num; 201 + unsigned int msg_obj_rx_num; 202 + unsigned int msg_obj_tx_num; 203 + unsigned int msg_obj_rx_first; 204 + unsigned int msg_obj_rx_last; 205 + unsigned int msg_obj_tx_first; 206 + unsigned int msg_obj_tx_last; 207 + u32 msg_obj_rx_mask; 186 208 atomic_t tx_active; 187 209 atomic_t sie_pending; 188 210 unsigned long tx_dir; 189 211 int last_status; 190 - u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); 191 - void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val); 192 - u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index); 193 - void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val); 212 + u16 (*read_reg)(const struct c_can_priv *priv, enum reg index); 213 + void (*write_reg)(const struct c_can_priv *priv, enum reg index, u16 val); 214 + u32 (*read_reg32)(const struct c_can_priv *priv, enum reg index); 215 + void (*write_reg32)(const struct c_can_priv *priv, enum reg index, u32 val); 194 216 void __iomem *base; 195 217 const u16 *regs; 196 218 void *priv; /* for board-specific data */ 197 219 enum c_can_dev_id type; 198 220 struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */ 199 - void (*raminit) (const struct c_can_priv *priv, bool enable); 221 + void (*raminit)(const struct c_can_priv *priv, bool enable); 200 222 u32 comm_rcv_high; 201 223 u32 rxmasked; 202 - u32 dlc[C_CAN_MSG_OBJ_TX_NUM]; 224 + u32 dlc[]; 203 225 }; 204 226 205 - struct net_device *alloc_c_can_dev(void); 227 + struct net_device *alloc_c_can_dev(int msg_obj_num); 206 228 void free_c_can_dev(struct net_device *dev); 207 229 int register_c_can_dev(struct net_device *dev); 208 230 void unregister_c_can_dev(struct net_device *dev);
+17 -14
drivers/net/can/c_can/c_can_pci.c
··· 31 31 struct c_can_pci_data { 32 32 /* Specify if is C_CAN or D_CAN */ 33 33 enum c_can_dev_id type; 34 + /* Number of message objects */ 35 + unsigned int msg_obj_num; 34 36 /* Set the register alignment in the memory */ 35 37 enum c_can_pci_reg_align reg_align; 36 38 /* Set the frequency */ ··· 43 41 void (*init)(const struct c_can_priv *priv, bool enable); 44 42 }; 45 43 46 - /* 47 - * 16-bit c_can registers can be arranged differently in the memory 44 + /* 16-bit c_can registers can be arranged differently in the memory 48 45 * architecture of different implementations. For example: 16-bit 49 46 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. 50 47 * Handle the same by providing a common read/write interface. 51 48 */ 52 49 static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv, 53 - enum reg index) 50 + enum reg index) 54 51 { 55 52 return readw(priv->base + priv->regs[index]); 56 53 } 57 54 58 55 static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv, 59 - enum reg index, u16 val) 56 + enum reg index, u16 val) 60 57 { 61 58 writew(val, priv->base + priv->regs[index]); 62 59 } 63 60 64 61 static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv, 65 - enum reg index) 62 + enum reg index) 66 63 { 67 64 return readw(priv->base + 2 * priv->regs[index]); 68 65 } 69 66 70 67 static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv, 71 - enum reg index, u16 val) 68 + enum reg index, u16 val) 72 69 { 73 70 writew(val, priv->base + 2 * priv->regs[index]); 74 71 } ··· 89 88 u32 val; 90 89 91 90 val = priv->read_reg(priv, index); 92 - val |= ((u32) priv->read_reg(priv, index + 1)) << 16; 91 + val |= ((u32)priv->read_reg(priv, index + 1)) << 16; 93 92 94 93 return val; 95 94 } 96 95 97 96 static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index, 98 - u32 val) 97 + u32 val) 99 98 { 100 99 priv->write_reg(priv, index + 1, val >> 16); 101 100 priv->write_reg(priv, index, val); ··· 143 142 pci_resource_len(pdev, c_can_pci_data->bar)); 144 143 if (!addr) { 145 144 dev_err(&pdev->dev, 146 - "device has no PCI memory resources, " 147 - "failing adapter\n"); 145 + "device has no PCI memory resources, failing adapter\n"); 148 146 ret = -ENOMEM; 149 147 goto out_release_regions; 150 148 } 151 149 152 150 /* allocate the c_can device */ 153 - dev = alloc_c_can_dev(); 151 + dev = alloc_c_can_dev(c_can_pci_data->msg_obj_num); 154 152 if (!dev) { 155 153 ret = -ENOMEM; 156 154 goto out_iounmap; ··· 217 217 } 218 218 219 219 dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", 220 - KBUILD_MODNAME, priv->regs, dev->irq); 220 + KBUILD_MODNAME, priv->regs, dev->irq); 221 221 222 222 return 0; 223 223 ··· 252 252 pci_disable_device(pdev); 253 253 } 254 254 255 - static const struct c_can_pci_data c_can_sta2x11= { 255 + static const struct c_can_pci_data c_can_sta2x11 = { 256 256 .type = BOSCH_C_CAN, 257 + .msg_obj_num = 32, 257 258 .reg_align = C_CAN_REG_ALIGN_32, 258 259 .freq = 52000000, /* 52 Mhz */ 259 260 .bar = 0, ··· 262 261 263 262 static const struct c_can_pci_data c_can_pch = { 264 263 .type = BOSCH_C_CAN, 264 + .msg_obj_num = 32, 265 265 .reg_align = C_CAN_REG_32, 266 266 .freq = 50000000, /* 50 MHz */ 267 267 .init = c_can_pci_reset_pch, ··· 271 269 272 270 #define C_CAN_ID(_vend, _dev, _driverdata) { \ 273 271 PCI_DEVICE(_vend, _dev), \ 274 - .driver_data = (unsigned long)&_driverdata, \ 272 + .driver_data = (unsigned long)&(_driverdata), \ 275 273 } 276 274 277 275 static const struct pci_device_id c_can_pci_tbl[] = { ··· 281 279 c_can_pch), 282 280 {}, 283 281 }; 282 + 284 283 static struct pci_driver c_can_pci_driver = { 285 284 .name = KBUILD_MODNAME, 286 285 .id_table = c_can_pci_tbl,
+5 -1
drivers/net/can/c_can/c_can_platform.c
··· 193 193 194 194 static const struct c_can_driver_data c_can_drvdata = { 195 195 .id = BOSCH_C_CAN, 196 + .msg_obj_num = 32, 196 197 }; 197 198 198 199 static const struct c_can_driver_data d_can_drvdata = { 199 200 .id = BOSCH_D_CAN, 201 + .msg_obj_num = 32, 200 202 }; 201 203 202 204 static const struct raminit_bits dra7_raminit_bits[] = { ··· 208 206 209 207 static const struct c_can_driver_data dra7_dcan_drvdata = { 210 208 .id = BOSCH_D_CAN, 209 + .msg_obj_num = 64, 211 210 .raminit_num = ARRAY_SIZE(dra7_raminit_bits), 212 211 .raminit_bits = dra7_raminit_bits, 213 212 .raminit_pulse = true, ··· 221 218 222 219 static const struct c_can_driver_data am3352_dcan_drvdata = { 223 220 .id = BOSCH_D_CAN, 221 + .msg_obj_num = 64, 224 222 .raminit_num = ARRAY_SIZE(am3352_raminit_bits), 225 223 .raminit_bits = am3352_raminit_bits, 226 224 }; ··· 298 294 } 299 295 300 296 /* allocate the c_can device */ 301 - dev = alloc_c_can_dev(); 297 + dev = alloc_c_can_dev(drvdata->msg_obj_num); 302 298 if (!dev) { 303 299 ret = -ENOMEM; 304 300 goto exit;
+26 -2
drivers/net/can/dev/bittiming.c
··· 81 81 if (bt->sample_point) { 82 82 sample_point_nominal = bt->sample_point; 83 83 } else { 84 - if (bt->bitrate > 800000) 84 + if (bt->bitrate > 800 * CAN_KBPS) 85 85 sample_point_nominal = 750; 86 - else if (bt->bitrate > 500000) 86 + else if (bt->bitrate > 500 * CAN_KBPS) 87 87 sample_point_nominal = 800; 88 88 else 89 89 sample_point_nominal = 875; ··· 173 173 (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2)); 174 174 175 175 return 0; 176 + } 177 + 178 + void can_calc_tdco(struct net_device *dev) 179 + { 180 + struct can_priv *priv = netdev_priv(dev); 181 + const struct can_bittiming *dbt = &priv->data_bittiming; 182 + struct can_tdc *tdc = &priv->tdc; 183 + const struct can_tdc_const *tdc_const = priv->tdc_const; 184 + 185 + if (!tdc_const) 186 + return; 187 + 188 + /* As specified in ISO 11898-1 section 11.3.3 "Transmitter 189 + * delay compensation" (TDC) is only applicable if data BRP is 190 + * one or two. 191 + */ 192 + if (dbt->brp == 1 || dbt->brp == 2) { 193 + /* Reuse "normal" sample point and convert it to time quanta */ 194 + u32 sample_point_in_tq = can_bit_time(dbt) * dbt->sample_point / 1000; 195 + 196 + tdc->tdco = min(sample_point_in_tq, tdc_const->tdco_max); 197 + } else { 198 + tdc->tdco = 0; 199 + } 176 200 } 177 201 #endif /* CONFIG_CAN_CALC_BITTIMING */ 178 202
+13 -14
drivers/net/can/dev/netlink.c
··· 8 8 #include <net/rtnetlink.h> 9 9 10 10 static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { 11 - [IFLA_CAN_STATE] = { .type = NLA_U32 }, 12 - [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, 13 - [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, 14 - [IFLA_CAN_RESTART] = { .type = NLA_U32 }, 15 - [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, 16 - [IFLA_CAN_BITTIMING_CONST] 17 - = { .len = sizeof(struct can_bittiming_const) }, 18 - [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, 19 - [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, 20 - [IFLA_CAN_DATA_BITTIMING] 21 - = { .len = sizeof(struct can_bittiming) }, 22 - [IFLA_CAN_DATA_BITTIMING_CONST] 23 - = { .len = sizeof(struct can_bittiming_const) }, 24 - [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, 11 + [IFLA_CAN_STATE] = { .type = NLA_U32 }, 12 + [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, 13 + [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, 14 + [IFLA_CAN_RESTART] = { .type = NLA_U32 }, 15 + [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, 16 + [IFLA_CAN_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, 17 + [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, 18 + [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, 19 + [IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) }, 20 + [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, 21 + [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, 25 22 }; 26 23 27 24 static int can_validate(struct nlattr *tb[], struct nlattr *data[], ··· 185 188 } 186 189 187 190 memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); 191 + 192 + can_calc_tdco(dev); 188 193 189 194 if (priv->do_set_data_bittiming) { 190 195 /* Finally, set the bit-timing registers */
+22 -5
drivers/net/can/dev/skb.c
··· 45 45 BUG_ON(idx >= priv->echo_skb_max); 46 46 47 47 /* check flag whether this packet has to be looped back */ 48 - if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK || 48 + if (!(dev->flags & IFF_ECHO) || 49 49 (skb->protocol != htons(ETH_P_CAN) && 50 50 skb->protocol != htons(ETH_P_CANFD))) { 51 51 kfree_skb(skb); ··· 58 58 return -ENOMEM; 59 59 60 60 /* make settings for echo to reduce code in irq context */ 61 - skb->pkt_type = PACKET_BROADCAST; 62 61 skb->ip_summed = CHECKSUM_UNNECESSARY; 63 62 skb->dev = dev; 64 63 ··· 110 111 111 112 priv->echo_skb[idx] = NULL; 112 113 114 + if (skb->pkt_type == PACKET_LOOPBACK) { 115 + skb->pkt_type = PACKET_BROADCAST; 116 + } else { 117 + dev_consume_skb_any(skb); 118 + return NULL; 119 + } 120 + 113 121 return skb; 114 122 } 115 123 ··· 153 147 * 154 148 * The function is typically called when TX failed. 155 149 */ 156 - void can_free_echo_skb(struct net_device *dev, unsigned int idx) 150 + void can_free_echo_skb(struct net_device *dev, unsigned int idx, 151 + unsigned int *frame_len_ptr) 157 152 { 158 153 struct can_priv *priv = netdev_priv(dev); 159 154 160 - BUG_ON(idx >= priv->echo_skb_max); 155 + if (idx >= priv->echo_skb_max) { 156 + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", 157 + __func__, idx, priv->echo_skb_max); 158 + return; 159 + } 161 160 162 161 if (priv->echo_skb[idx]) { 163 - dev_kfree_skb_any(priv->echo_skb[idx]); 162 + struct sk_buff *skb = priv->echo_skb[idx]; 163 + struct can_skb_priv *can_skb_priv = can_skb_prv(skb); 164 + 165 + if (frame_len_ptr) 166 + *frame_len_ptr = can_skb_priv->frame_len; 167 + 168 + dev_kfree_skb_any(skb); 164 169 priv->echo_skb[idx] = NULL; 165 170 } 166 171 }
+1 -1
drivers/net/can/grcan.c
··· 520 520 can_get_echo_skb(dev, i, NULL); 521 521 } else { 522 522 /* For cleanup of untransmitted messages */ 523 - can_free_echo_skb(dev, i); 523 + can_free_echo_skb(dev, i, NULL); 524 524 } 525 525 526 526 priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE,
+138 -22
drivers/net/can/m_can/m_can.c
··· 8 8 * https://github.com/linux-can/can-doc/tree/master/m_can 9 9 */ 10 10 11 + #include <linux/bitfield.h> 11 12 #include <linux/interrupt.h> 12 13 #include <linux/io.h> 13 14 #include <linux/kernel.h> ··· 148 147 #define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT) 149 148 #define NBTP_NTSEG2_SHIFT 0 150 149 #define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT) 150 + 151 + /* Timestamp Counter Configuration Register (TSCC) */ 152 + #define TSCC_TCP_MASK GENMASK(19, 16) 153 + #define TSCC_TSS_MASK GENMASK(1, 0) 154 + #define TSCC_TSS_DISABLE 0x0 155 + #define TSCC_TSS_INTERNAL 0x1 156 + #define TSCC_TSS_EXTERNAL 0x2 157 + 158 + /* Timestamp Counter Value Register (TSCV) */ 159 + #define TSCV_TSC_MASK GENMASK(15, 0) 151 160 152 161 /* Error Counter Register(ECR) */ 153 162 #define ECR_RP BIT(15) ··· 313 302 #define RX_BUF_ANMF BIT(31) 314 303 #define RX_BUF_FDF BIT(21) 315 304 #define RX_BUF_BRS BIT(20) 305 + #define RX_BUF_RXTS_MASK GENMASK(15, 0) 316 306 317 307 /* Tx Buffer Element */ 318 308 /* T0 */ ··· 331 319 /* E1 */ 332 320 #define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT 333 321 #define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT) 322 + #define TX_EVENT_TXTS_MASK GENMASK(15, 0) 334 323 335 324 static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) 336 325 { ··· 426 413 m_can_write(cdev, M_CAN_ILE, 0x0); 427 414 } 428 415 416 + /* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit 417 + * width. 418 + */ 419 + static u32 m_can_get_timestamp(struct m_can_classdev *cdev) 420 + { 421 + u32 tscv; 422 + u32 tsc; 423 + 424 + tscv = m_can_read(cdev, M_CAN_TSCV); 425 + tsc = FIELD_GET(TSCV_TSC_MASK, tscv); 426 + 427 + return (tsc << 16); 428 + } 429 + 429 430 static void m_can_clean(struct net_device *net) 430 431 { 431 432 struct m_can_classdev *cdev = netdev_priv(net); ··· 452 425 putidx = ((m_can_read(cdev, M_CAN_TXFQS) & 453 426 TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT); 454 427 455 - can_free_echo_skb(cdev->net, putidx); 428 + can_free_echo_skb(cdev->net, putidx, NULL); 456 429 cdev->tx_skb = NULL; 457 430 } 431 + } 432 + 433 + /* For peripherals, pass skb to rx-offload, which will push skb from 434 + * napi. For non-peripherals, RX is done in napi already, so push 435 + * directly. timestamp is used to ensure good skb ordering in 436 + * rx-offload and is ignored for non-peripherals. 437 + */ 438 + static void m_can_receive_skb(struct m_can_classdev *cdev, 439 + struct sk_buff *skb, 440 + u32 timestamp) 441 + { 442 + if (cdev->is_peripheral) 443 + can_rx_offload_queue_sorted(&cdev->offload, skb, timestamp); 444 + else 445 + netif_receive_skb(skb); 458 446 } 459 447 460 448 static void m_can_read_fifo(struct net_device *dev, u32 rxfs) ··· 479 437 struct canfd_frame *cf; 480 438 struct sk_buff *skb; 481 439 u32 id, fgi, dlc; 440 + u32 timestamp = 0; 482 441 int i; 483 442 484 443 /* calculate the fifo get index for where to read data */ ··· 528 485 stats->rx_packets++; 529 486 stats->rx_bytes += cf->len; 530 487 531 - netif_receive_skb(skb); 488 + timestamp = FIELD_GET(RX_BUF_RXTS_MASK, dlc); 489 + 490 + m_can_receive_skb(cdev, skb, timestamp); 532 491 } 533 492 534 493 static int m_can_do_rx_poll(struct net_device *dev, int quota) ··· 561 516 562 517 static int m_can_handle_lost_msg(struct net_device *dev) 563 518 { 519 + struct m_can_classdev *cdev = netdev_priv(dev); 564 520 struct net_device_stats *stats = &dev->stats; 565 521 struct sk_buff *skb; 566 522 struct can_frame *frame; 523 + u32 timestamp = 0; 567 524 568 525 netdev_err(dev, "msg lost in rxf0\n"); 569 526 ··· 579 532 frame->can_id |= CAN_ERR_CRTL; 580 533 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 581 534 582 - netif_receive_skb(skb); 535 + if (cdev->is_peripheral) 536 + timestamp = m_can_get_timestamp(cdev); 537 + 538 + m_can_receive_skb(cdev, skb, timestamp); 583 539 584 540 return 1; 585 541 } ··· 594 544 struct net_device_stats *stats = &dev->stats; 595 545 struct can_frame *cf; 596 546 struct sk_buff *skb; 547 + u32 timestamp = 0; 597 548 598 549 cdev->can.can_stats.bus_error++; 599 550 stats->rx_errors++; ··· 640 589 641 590 stats->rx_packets++; 642 591 stats->rx_bytes += cf->len; 643 - netif_receive_skb(skb); 592 + 593 + if (cdev->is_peripheral) 594 + timestamp = m_can_get_timestamp(cdev); 595 + 596 + m_can_receive_skb(cdev, skb, timestamp); 644 597 645 598 return 1; 646 599 } ··· 702 647 struct sk_buff *skb; 703 648 struct can_berr_counter bec; 704 649 unsigned int ecr; 650 + u32 timestamp = 0; 705 651 706 652 switch (new_state) { 707 653 case CAN_STATE_ERROR_WARNING: ··· 764 708 765 709 stats->rx_packets++; 766 710 stats->rx_bytes += cf->len; 767 - netif_receive_skb(skb); 711 + 712 + if (cdev->is_peripheral) 713 + timestamp = m_can_get_timestamp(cdev); 714 + 715 + m_can_receive_skb(cdev, skb, timestamp); 768 716 769 717 return 1; 770 718 } ··· 833 773 struct m_can_classdev *cdev = netdev_priv(dev); 834 774 struct can_frame *cf; 835 775 struct sk_buff *skb; 776 + u32 timestamp = 0; 836 777 837 778 /* propagate the error condition to the CAN stack */ 838 779 skb = alloc_can_err_skb(dev, &cf); ··· 855 794 netdev_dbg(dev, "allocation of skb failed\n"); 856 795 return 0; 857 796 } 858 - netif_receive_skb(skb); 797 + 798 + if (cdev->is_peripheral) 799 + timestamp = m_can_get_timestamp(cdev); 800 + 801 + m_can_receive_skb(cdev, skb, timestamp); 859 802 860 803 return 1; 861 804 } ··· 960 895 return work_done; 961 896 } 962 897 898 + /* Echo tx skb and update net stats. Peripherals use rx-offload for 899 + * echo. timestamp is used for peripherals to ensure correct ordering 900 + * by rx-offload, and is ignored for non-peripherals. 901 + */ 902 + static void m_can_tx_update_stats(struct m_can_classdev *cdev, 903 + unsigned int msg_mark, 904 + u32 timestamp) 905 + { 906 + struct net_device *dev = cdev->net; 907 + struct net_device_stats *stats = &dev->stats; 908 + 909 + if (cdev->is_peripheral) 910 + stats->tx_bytes += 911 + can_rx_offload_get_echo_skb(&cdev->offload, 912 + msg_mark, 913 + timestamp, 914 + NULL); 915 + else 916 + stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); 917 + 918 + stats->tx_packets++; 919 + } 920 + 963 921 static void m_can_echo_tx_event(struct net_device *dev) 964 922 { 965 923 u32 txe_count = 0; ··· 992 904 unsigned int msg_mark; 993 905 994 906 struct m_can_classdev *cdev = netdev_priv(dev); 995 - struct net_device_stats *stats = &dev->stats; 996 907 997 908 /* read tx event fifo status */ 998 909 m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); ··· 1001 914 1002 915 /* Get and process all sent elements */ 1003 916 for (i = 0; i < txe_count; i++) { 917 + u32 txe, timestamp = 0; 918 + 1004 919 /* retrieve get index */ 1005 920 fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) >> 1006 921 TXEFS_EFGI_SHIFT; 1007 922 1008 - /* get message marker */ 1009 - msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) & 1010 - TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT; 923 + /* get message marker, timestamp */ 924 + txe = m_can_txe_fifo_read(cdev, fgi, 4); 925 + msg_mark = (txe & TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT; 926 + timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe); 1011 927 1012 928 /* ack txe element */ 1013 929 m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK & 1014 930 (fgi << TXEFA_EFAI_SHIFT))); 1015 931 1016 932 /* update stats */ 1017 - stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); 1018 - stats->tx_packets++; 933 + m_can_tx_update_stats(cdev, msg_mark, timestamp); 1019 934 } 1020 935 } 1021 936 ··· 1025 936 { 1026 937 struct net_device *dev = (struct net_device *)dev_id; 1027 938 struct m_can_classdev *cdev = netdev_priv(dev); 1028 - struct net_device_stats *stats = &dev->stats; 1029 939 u32 ir; 1030 940 1031 941 if (pm_runtime_suspended(cdev->dev)) ··· 1057 969 if (cdev->version == 30) { 1058 970 if (ir & IR_TC) { 1059 971 /* Transmission Complete Interrupt*/ 1060 - stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); 1061 - stats->tx_packets++; 972 + u32 timestamp = 0; 973 + 974 + if (cdev->is_peripheral) 975 + timestamp = m_can_get_timestamp(cdev); 976 + m_can_tx_update_stats(cdev, 0, timestamp); 977 + 1062 978 can_led_event(dev, CAN_LED_EVENT_TX); 1063 979 netif_wake_queue(dev); 1064 980 } ··· 1200 1108 * - >= v3.1.x: TX FIFO is used 1201 1109 * - configure mode 1202 1110 * - setup bittiming 1111 + * - configure timestamp generation 1203 1112 */ 1204 1113 static void m_can_chip_config(struct net_device *dev) 1205 1114 { ··· 1311 1218 1312 1219 /* set bittiming params */ 1313 1220 m_can_set_bittiming(dev); 1221 + 1222 + /* enable internal timestamp generation, with a prescalar of 16. The 1223 + * prescalar is applied to the nominal bit timing */ 1224 + m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf)); 1314 1225 1315 1226 m_can_config_endisable(cdev, false); 1316 1227 ··· 1523 1426 cdev->tx_wq = NULL; 1524 1427 } 1525 1428 1429 + if (cdev->is_peripheral) 1430 + can_rx_offload_disable(&cdev->offload); 1431 + 1526 1432 close_candev(dev); 1527 1433 can_led_event(dev, CAN_LED_EVENT_STOP); 1528 1434 ··· 1724 1624 goto exit_disable_clks; 1725 1625 } 1726 1626 1627 + if (cdev->is_peripheral) 1628 + can_rx_offload_enable(&cdev->offload); 1629 + 1727 1630 /* register interrupt handler */ 1728 1631 if (cdev->is_peripheral) { 1729 1632 cdev->tx_skb = NULL; ··· 1768 1665 if (cdev->is_peripheral) 1769 1666 destroy_workqueue(cdev->tx_wq); 1770 1667 out_wq_fail: 1668 + if (cdev->is_peripheral) 1669 + can_rx_offload_disable(&cdev->offload); 1771 1670 close_candev(dev); 1772 1671 exit_disable_clks: 1773 1672 m_can_clk_stop(cdev); ··· 1892 1787 } 1893 1788 1894 1789 class_dev = netdev_priv(net_dev); 1895 - if (!class_dev) { 1896 - dev_err(dev, "Failed to init netdev cdevate"); 1897 - goto out; 1898 - } 1899 - 1900 1790 class_dev->net = net_dev; 1901 1791 class_dev->dev = dev; 1902 1792 SET_NETDEV_DEV(net_dev, dev); ··· 1918 1818 return ret; 1919 1819 } 1920 1820 1821 + if (cdev->is_peripheral) { 1822 + ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, 1823 + M_CAN_NAPI_WEIGHT); 1824 + if (ret) 1825 + goto clk_disable; 1826 + } 1827 + 1921 1828 ret = m_can_dev_setup(cdev); 1922 1829 if (ret) 1923 - goto clk_disable; 1830 + goto rx_offload_del; 1924 1831 1925 1832 ret = register_m_can_dev(cdev->net); 1926 1833 if (ret) { 1927 1834 dev_err(cdev->dev, "registering %s failed (err=%d)\n", 1928 1835 cdev->net->name, ret); 1929 - goto clk_disable; 1836 + goto rx_offload_del; 1930 1837 } 1931 1838 1932 1839 devm_can_led_init(cdev->net); ··· 1946 1839 /* Probe finished 1947 1840 * Stop clocks. They will be reactivated once the M_CAN device is opened 1948 1841 */ 1842 + m_can_clk_stop(cdev); 1843 + 1844 + return 0; 1845 + 1846 + rx_offload_del: 1847 + if (cdev->is_peripheral) 1848 + can_rx_offload_del(&cdev->offload); 1949 1849 clk_disable: 1950 1850 m_can_clk_stop(cdev); 1951 1851 ··· 1962 1848 1963 1849 void m_can_class_unregister(struct m_can_classdev *cdev) 1964 1850 { 1851 + if (cdev->is_peripheral) 1852 + can_rx_offload_del(&cdev->offload); 1965 1853 unregister_candev(cdev->net); 1966 1854 } 1967 1855 EXPORT_SYMBOL_GPL(m_can_class_unregister);
+2
drivers/net/can/m_can/m_can.h
··· 8 8 9 9 #include <linux/can/core.h> 10 10 #include <linux/can/led.h> 11 + #include <linux/can/rx-offload.h> 11 12 #include <linux/completion.h> 12 13 #include <linux/device.h> 13 14 #include <linux/dma-mapping.h> ··· 72 71 73 72 struct m_can_classdev { 74 73 struct can_priv can; 74 + struct can_rx_offload offload; 75 75 struct napi_struct napi; 76 76 struct net_device *net; 77 77 struct device *dev;
-1
drivers/net/can/m_can/tcan4x5x.h
··· 11 11 12 12 #include <linux/gpio/consumer.h> 13 13 #include <linux/regmap.h> 14 - #include <linux/regmap.h> 15 14 #include <linux/regulator/consumer.h> 16 15 #include <linux/spi/spi.h> 17 16
+1 -1
drivers/net/can/rcar/rcar_can.c
··· 217 217 int i; 218 218 219 219 for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++) 220 - can_free_echo_skb(ndev, i); 220 + can_free_echo_skb(ndev, i, NULL); 221 221 } 222 222 223 223 static void rcar_can_error(struct net_device *ndev)
+1 -1
drivers/net/can/rcar/rcar_canfd.c
··· 617 617 u32 i; 618 618 619 619 for (i = 0; i < RCANFD_FIFO_DEPTH; i++) 620 - can_free_echo_skb(ndev, i); 620 + can_free_echo_skb(ndev, i, NULL); 621 621 } 622 622 623 623 static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
+1 -1
drivers/net/can/sja1000/sja1000.c
··· 525 525 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT && 526 526 !(status & SR_TCS)) { 527 527 stats->tx_errors++; 528 - can_free_echo_skb(dev, 0); 528 + can_free_echo_skb(dev, 0, NULL); 529 529 } else { 530 530 /* transmission complete */ 531 531 stats->tx_bytes +=
+1 -1
drivers/net/can/spi/hi311x.c
··· 179 179 net->stats.tx_errors++; 180 180 dev_kfree_skb(priv->tx_skb); 181 181 if (priv->tx_len) 182 - can_free_echo_skb(priv->net, 0); 182 + can_free_echo_skb(priv->net, 0, NULL); 183 183 priv->tx_skb = NULL; 184 184 priv->tx_len = 0; 185 185 }
+1 -1
drivers/net/can/spi/mcp251x.c
··· 276 276 net->stats.tx_errors++; 277 277 dev_kfree_skb(priv->tx_skb); 278 278 if (priv->tx_len) 279 - can_free_echo_skb(priv->net, 0); 279 + can_free_echo_skb(priv->net, 0, NULL); 280 280 priv->tx_skb = NULL; 281 281 priv->tx_len = 0; 282 282 }
+1
drivers/net/can/spi/mcp251xfd/Kconfig
··· 3 3 config CAN_MCP251XFD 4 4 tristate "Microchip MCP251xFD SPI CAN controllers" 5 5 select REGMAP 6 + select WANT_DEV_COREDUMP 6 7 help 7 8 Driver for the Microchip MCP251XFD SPI FD-CAN controller 8 9 family.
+3
drivers/net/can/spi/mcp251xfd/Makefile
··· 6 6 mcp251xfd-objs += mcp251xfd-core.o 7 7 mcp251xfd-objs += mcp251xfd-crc16.o 8 8 mcp251xfd-objs += mcp251xfd-regmap.o 9 + mcp251xfd-objs += mcp251xfd-timestamp.o 10 + 11 + mcp251xfd-$(CONFIG_DEV_COREDUMP) += mcp251xfd-dump.o
+60 -44
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
··· 2 2 // 3 3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver 4 4 // 5 - // Copyright (c) 2019, 2020 Pengutronix, 6 - // Marc Kleine-Budde <kernel@pengutronix.de> 5 + // Copyright (c) 2019, 2020, 2021 Pengutronix, 6 + // Marc Kleine-Budde <kernel@pengutronix.de> 7 7 // 8 8 // Based on: 9 9 // ··· 16 16 #include <linux/clk.h> 17 17 #include <linux/device.h> 18 18 #include <linux/module.h> 19 - #include <linux/netdevice.h> 20 19 #include <linux/of.h> 21 20 #include <linux/of_device.h> 22 21 #include <linux/pm_runtime.h> ··· 329 330 struct mcp251xfd_tx_ring *tx_ring; 330 331 struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL; 331 332 struct mcp251xfd_tx_obj *tx_obj; 333 + struct spi_transfer *xfer; 332 334 u32 val; 333 335 u16 addr; 334 336 u8 len; ··· 347 347 addr, val, val); 348 348 349 349 for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) { 350 - struct spi_transfer *xfer; 351 - 352 350 xfer = &tef_ring->uinc_xfer[j]; 353 351 xfer->tx_buf = &tef_ring->uinc_buf; 354 352 xfer->len = len; ··· 354 356 xfer->cs_change_delay.value = 0; 355 357 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; 356 358 } 359 + 360 + /* "cs_change == 1" on the last transfer results in an active 361 + * chip select after the complete SPI message. This causes the 362 + * controller to interpret the next register access as 363 + * data. Set "cs_change" of the last transfer to "0" to 364 + * properly deactivate the chip select at the end of the 365 + * message. 366 + */ 367 + xfer->cs_change = 0; 357 368 358 369 /* TX */ 359 370 tx_ring = priv->tx; ··· 404 397 addr, val, val); 405 398 406 399 for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) { 407 - struct spi_transfer *xfer; 408 - 409 400 xfer = &rx_ring->uinc_xfer[j]; 410 401 xfer->tx_buf = &rx_ring->uinc_buf; 411 402 xfer->len = len; ··· 411 406 xfer->cs_change_delay.value = 0; 412 407 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; 413 408 } 409 + 410 + /* "cs_change == 1" on the last transfer results in an 411 + * active chip select after the complete SPI 412 + * message. This causes the controller to interpret 413 + * the next register access as data. Set "cs_change" 414 + * of the last transfer to "0" to properly deactivate 415 + * the chip select at the end of the message. 416 + */ 417 + xfer->cs_change = 0; 414 418 } 415 419 } 416 420 ··· 1111 1097 return 0; 1112 1098 1113 1099 out_chip_stop: 1100 + mcp251xfd_dump(priv); 1114 1101 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 1115 1102 1116 1103 return err; ··· 1265 1250 const struct mcp251xfd_hw_tef_obj *hw_tef_obj) 1266 1251 { 1267 1252 struct net_device_stats *stats = &priv->ndev->stats; 1268 - u32 seq, seq_masked, tef_tail_masked; 1253 + struct sk_buff *skb; 1254 + u32 seq, seq_masked, tef_tail_masked, tef_tail; 1269 1255 1270 1256 seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, 1271 1257 hw_tef_obj->flags); ··· 1282 1266 if (seq_masked != tef_tail_masked) 1283 1267 return mcp251xfd_handle_tefif_recover(priv, seq); 1284 1268 1269 + tef_tail = mcp251xfd_get_tef_tail(priv); 1270 + skb = priv->can.echo_skb[tef_tail]; 1271 + if (skb) 1272 + mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); 1285 1273 stats->tx_bytes += 1286 1274 can_rx_offload_get_echo_skb(&priv->offload, 1287 - mcp251xfd_get_tef_tail(priv), 1275 + tef_tail, 1288 1276 hw_tef_obj->ts, NULL); 1289 1277 stats->tx_packets++; 1290 1278 priv->tef->tail++; ··· 1385 1365 if (len) { 1386 1366 struct mcp251xfd_tef_ring *ring = priv->tef; 1387 1367 struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1388 - struct spi_transfer *last_xfer; 1368 + int offset; 1389 1369 1390 1370 /* Increment the TEF FIFO tail pointer 'len' times in 1391 1371 * a single SPI message. 1392 1372 * 1393 1373 * Note: 1394 - * 1395 - * "cs_change == 1" on the last transfer results in an 1396 - * active chip select after the complete SPI 1397 - * message. This causes the controller to interpret 1398 - * the next register access as data. Temporary set 1399 - * "cs_change" of the last transfer to "0" to properly 1400 - * deactivate the chip select at the end of the 1401 - * message. 1374 + * Calculate offset, so that the SPI transfer ends on 1375 + * the last message of the uinc_xfer array, which has 1376 + * "cs_change == 0", to properly deactivate the chip 1377 + * select. 1402 1378 */ 1403 - last_xfer = &ring->uinc_xfer[len - 1]; 1404 - last_xfer->cs_change = 0; 1405 - err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len); 1406 - last_xfer->cs_change = 1; 1379 + offset = ARRAY_SIZE(ring->uinc_xfer) - len; 1380 + err = spi_sync_transfer(priv->spi, 1381 + ring->uinc_xfer + offset, len); 1407 1382 if (err) 1408 1383 return err; 1409 1384 ··· 1447 1432 } 1448 1433 1449 1434 static void 1450 - mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, 1435 + mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv, 1451 1436 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, 1452 1437 struct sk_buff *skb) 1453 1438 { ··· 1490 1475 1491 1476 if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) 1492 1477 memcpy(cfd->data, hw_rx_obj->data, cfd->len); 1478 + 1479 + mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); 1493 1480 } 1494 1481 1495 1482 static int ··· 1552 1535 return err; 1553 1536 1554 1537 while ((len = mcp251xfd_get_rx_linear_len(ring))) { 1555 - struct spi_transfer *last_xfer; 1538 + int offset; 1556 1539 1557 1540 rx_tail = mcp251xfd_get_rx_tail(ring); 1558 1541 ··· 1573 1556 * single SPI message. 1574 1557 * 1575 1558 * Note: 1576 - * 1577 - * "cs_change == 1" on the last transfer results in an 1578 - * active chip select after the complete SPI 1579 - * message. This causes the controller to interpret 1580 - * the next register access as data. Temporary set 1581 - * "cs_change" of the last transfer to "0" to properly 1582 - * deactivate the chip select at the end of the 1583 - * message. 1559 + * Calculate offset, so that the SPI transfer ends on 1560 + * the last message of the uinc_xfer array, which has 1561 + * "cs_change == 0", to properly deactivate the chip 1562 + * select. 1584 1563 */ 1585 - last_xfer = &ring->uinc_xfer[len - 1]; 1586 - last_xfer->cs_change = 0; 1587 - err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len); 1588 - last_xfer->cs_change = 1; 1564 + offset = ARRAY_SIZE(ring->uinc_xfer) - len; 1565 + err = spi_sync_transfer(priv->spi, 1566 + ring->uinc_xfer + offset, len); 1589 1567 if (err) 1590 1568 return err; 1591 1569 ··· 1604 1592 return 0; 1605 1593 } 1606 1594 1607 - static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv, 1608 - u32 *timestamp) 1609 - { 1610 - return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp); 1611 - } 1612 - 1613 1595 static struct sk_buff * 1614 - mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv, 1596 + mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, 1615 1597 struct can_frame **cf, u32 *timestamp) 1616 1598 { 1599 + struct sk_buff *skb; 1617 1600 int err; 1618 1601 1619 1602 err = mcp251xfd_get_timestamp(priv, timestamp); 1620 1603 if (err) 1621 1604 return NULL; 1622 1605 1623 - return alloc_can_err_skb(priv->ndev, cf); 1606 + skb = alloc_can_err_skb(priv->ndev, cf); 1607 + if (skb) 1608 + mcp251xfd_skb_set_timestamp(priv, skb, *timestamp); 1609 + 1610 + return skb; 1624 1611 } 1625 1612 1626 1613 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) ··· 1771 1760 if (!cf) 1772 1761 return 0; 1773 1762 1763 + mcp251xfd_skb_set_timestamp(priv, skb, timestamp); 1774 1764 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 1775 1765 if (err) 1776 1766 stats->rx_fifo_errors++; ··· 2289 2277 out_fail: 2290 2278 netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", 2291 2279 err, priv->regs_status.intf); 2280 + mcp251xfd_dump(priv); 2292 2281 mcp251xfd_chip_interrupts_disable(priv); 2293 2282 2294 2283 return handled; ··· 2506 2493 if (err) 2507 2494 goto out_transceiver_disable; 2508 2495 2496 + mcp251xfd_timestamp_init(priv); 2509 2497 can_rx_offload_enable(&priv->offload); 2510 2498 2511 2499 err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq, ··· 2527 2513 free_irq(spi->irq, priv); 2528 2514 out_can_rx_offload_disable: 2529 2515 can_rx_offload_disable(&priv->offload); 2516 + mcp251xfd_timestamp_stop(priv); 2530 2517 out_transceiver_disable: 2531 2518 mcp251xfd_transceiver_disable(priv); 2532 2519 out_mcp251xfd_ring_free: ··· 2549 2534 mcp251xfd_chip_interrupts_disable(priv); 2550 2535 free_irq(ndev->irq, priv); 2551 2536 can_rx_offload_disable(&priv->offload); 2537 + mcp251xfd_timestamp_stop(priv); 2552 2538 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); 2553 2539 mcp251xfd_transceiver_disable(priv); 2554 2540 mcp251xfd_ring_free(priv);
+285
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // 3 + // mcp251xfd - Microchip MCP251xFD Family CAN controller driver 4 + // 5 + // Copyright (c) 2020, 2021 Pengutronix, 6 + // Marc Kleine-Budde <kernel@pengutronix.de> 7 + // Copyright (C) 2015-2018 Etnaviv Project 8 + // 9 + 10 + #include <linux/devcoredump.h> 11 + 12 + #include "mcp251xfd.h" 13 + #include "mcp251xfd-dump.h" 14 + 15 + struct mcp251xfd_dump_iter { 16 + void *start; 17 + struct mcp251xfd_dump_object_header *hdr; 18 + void *data; 19 + }; 20 + 21 + struct mcp251xfd_dump_reg_space { 22 + u16 base; 23 + u16 size; 24 + }; 25 + 26 + struct mcp251xfd_dump_ring { 27 + enum mcp251xfd_dump_object_ring_key key; 28 + u32 val; 29 + }; 30 + 31 + static const struct mcp251xfd_dump_reg_space mcp251xfd_dump_reg_space[] = { 32 + { 33 + .base = MCP251XFD_REG_CON, 34 + .size = MCP251XFD_REG_FLTOBJ(32) - MCP251XFD_REG_CON, 35 + }, { 36 + .base = MCP251XFD_RAM_START, 37 + .size = MCP251XFD_RAM_SIZE, 38 + }, { 39 + .base = MCP251XFD_REG_OSC, 40 + .size = MCP251XFD_REG_DEVID - MCP251XFD_REG_OSC, 41 + }, 42 + }; 43 + 44 + static void mcp251xfd_dump_header(struct mcp251xfd_dump_iter *iter, 45 + enum mcp251xfd_dump_object_type object_type, 46 + const void *data_end) 47 + { 48 + struct mcp251xfd_dump_object_header *hdr = iter->hdr; 49 + unsigned int len; 50 + 51 + len = data_end - iter->data; 52 + if (!len) 53 + return; 54 + 55 + hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC); 56 + hdr->type = cpu_to_le32(object_type); 57 + hdr->offset = cpu_to_le32(iter->data - iter->start); 58 + hdr->len = cpu_to_le32(len); 59 + 60 + iter->hdr++; 61 + iter->data += len; 62 + } 63 + 64 + static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv, 65 + struct mcp251xfd_dump_iter *iter) 66 + { 67 + const int val_bytes = regmap_get_val_bytes(priv->map_rx); 68 + struct mcp251xfd_dump_object_reg *reg = iter->data; 69 + unsigned int i, j; 70 + int err; 71 + 72 + for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++) { 73 + const struct mcp251xfd_dump_reg_space *reg_space; 74 + void *buf; 75 + 76 + reg_space = &mcp251xfd_dump_reg_space[i]; 77 + 78 + buf = kmalloc(reg_space->size, GFP_KERNEL); 79 + if (!buf) 80 + goto out; 81 + 82 + err = regmap_bulk_read(priv->map_reg, reg_space->base, 83 + buf, reg_space->size / val_bytes); 84 + if (err) { 85 + kfree(buf); 86 + continue; 87 + } 88 + 89 + for (j = 0; j < reg_space->size; j += sizeof(u32), reg++) { 90 + reg->reg = cpu_to_le32(reg_space->base + j); 91 + reg->val = cpu_to_le32p(buf + j); 92 + } 93 + 94 + kfree(buf); 95 + } 96 + 97 + out: 98 + mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg); 99 + } 100 + 101 + static void mcp251xfd_dump_ring(struct mcp251xfd_dump_iter *iter, 102 + enum mcp251xfd_dump_object_type object_type, 103 + const struct mcp251xfd_dump_ring *dump_ring, 104 + unsigned int len) 105 + { 106 + struct mcp251xfd_dump_object_reg *reg = iter->data; 107 + unsigned int i; 108 + 109 + for (i = 0; i < len; i++, reg++) { 110 + reg->reg = cpu_to_le32(dump_ring[i].key); 111 + reg->val = cpu_to_le32(dump_ring[i].val); 112 + } 113 + 114 + mcp251xfd_dump_header(iter, object_type, reg); 115 + } 116 + 117 + static void mcp251xfd_dump_tef_ring(const struct mcp251xfd_priv *priv, 118 + struct mcp251xfd_dump_iter *iter) 119 + { 120 + const struct mcp251xfd_tef_ring *tef = priv->tef; 121 + const struct mcp251xfd_tx_ring *tx = priv->tx; 122 + const struct mcp251xfd_dump_ring dump_ring[] = { 123 + { 124 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, 125 + .val = tef->head, 126 + }, { 127 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, 128 + .val = tef->tail, 129 + }, { 130 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, 131 + .val = 0, 132 + }, { 133 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, 134 + .val = 0, 135 + }, { 136 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, 137 + .val = 0, 138 + }, { 139 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, 140 + .val = tx->obj_num, 141 + }, { 142 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, 143 + .val = sizeof(struct mcp251xfd_hw_tef_obj), 144 + }, 145 + }; 146 + 147 + mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TEF, 148 + dump_ring, ARRAY_SIZE(dump_ring)); 149 + } 150 + 151 + static void mcp251xfd_dump_rx_ring_one(const struct mcp251xfd_priv *priv, 152 + struct mcp251xfd_dump_iter *iter, 153 + const struct mcp251xfd_rx_ring *rx) 154 + { 155 + const struct mcp251xfd_dump_ring dump_ring[] = { 156 + { 157 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, 158 + .val = rx->head, 159 + }, { 160 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, 161 + .val = rx->tail, 162 + }, { 163 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, 164 + .val = rx->base, 165 + }, { 166 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, 167 + .val = rx->nr, 168 + }, { 169 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, 170 + .val = rx->fifo_nr, 171 + }, { 172 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, 173 + .val = rx->obj_num, 174 + }, { 175 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, 176 + .val = rx->obj_size, 177 + }, 178 + }; 179 + 180 + mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_RX, 181 + dump_ring, ARRAY_SIZE(dump_ring)); 182 + } 183 + 184 + static void mcp251xfd_dump_rx_ring(const struct mcp251xfd_priv *priv, 185 + struct mcp251xfd_dump_iter *iter) 186 + { 187 + struct mcp251xfd_rx_ring *rx_ring; 188 + unsigned int i; 189 + 190 + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) 191 + mcp251xfd_dump_rx_ring_one(priv, iter, rx_ring); 192 + } 193 + 194 + static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv, 195 + struct mcp251xfd_dump_iter *iter) 196 + { 197 + const struct mcp251xfd_tx_ring *tx = priv->tx; 198 + const struct mcp251xfd_dump_ring dump_ring[] = { 199 + { 200 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, 201 + .val = tx->head, 202 + }, { 203 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, 204 + .val = tx->tail, 205 + }, { 206 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, 207 + .val = tx->base, 208 + }, { 209 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, 210 + .val = 0, 211 + }, { 212 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, 213 + .val = MCP251XFD_TX_FIFO, 214 + }, { 215 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, 216 + .val = tx->obj_num, 217 + }, { 218 + .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, 219 + .val = tx->obj_size, 220 + }, 221 + }; 222 + 223 + mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TX, 224 + dump_ring, ARRAY_SIZE(dump_ring)); 225 + } 226 + 227 + static void mcp251xfd_dump_end(const struct mcp251xfd_priv *priv, 228 + struct mcp251xfd_dump_iter *iter) 229 + { 230 + struct mcp251xfd_dump_object_header *hdr = iter->hdr; 231 + 232 + hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC); 233 + hdr->type = cpu_to_le32(MCP251XFD_DUMP_OBJECT_TYPE_END); 234 + hdr->offset = cpu_to_le32(0); 235 + hdr->len = cpu_to_le32(0); 236 + 237 + /* provoke NULL pointer access, if used after END object */ 238 + iter->hdr = NULL; 239 + } 240 + 241 + void mcp251xfd_dump(const struct mcp251xfd_priv *priv) 242 + { 243 + struct mcp251xfd_dump_iter iter; 244 + unsigned int rings_num, obj_num; 245 + unsigned int file_size = 0; 246 + unsigned int i; 247 + 248 + /* register space + end marker */ 249 + obj_num = 2; 250 + 251 + /* register space */ 252 + for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++) 253 + file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) * 254 + sizeof(struct mcp251xfd_dump_object_reg); 255 + 256 + /* TEF ring, RX ring, TX rings */ 257 + rings_num = 1 + priv->rx_ring_num + 1; 258 + obj_num += rings_num; 259 + file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX * 260 + sizeof(struct mcp251xfd_dump_object_reg); 261 + 262 + /* size of the headers */ 263 + file_size += sizeof(*iter.hdr) * obj_num; 264 + 265 + /* allocate the file in vmalloc memory, it's likely to be big */ 266 + iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | 267 + __GFP_ZERO | __GFP_NORETRY); 268 + if (!iter.start) { 269 + netdev_warn(priv->ndev, "Failed to allocate devcoredump file.\n"); 270 + return; 271 + } 272 + 273 + /* point the data member after the headers */ 274 + iter.hdr = iter.start; 275 + iter.data = &iter.hdr[obj_num]; 276 + 277 + mcp251xfd_dump_registers(priv, &iter); 278 + mcp251xfd_dump_tef_ring(priv, &iter); 279 + mcp251xfd_dump_rx_ring(priv, &iter); 280 + mcp251xfd_dump_tx_ring(priv, &iter); 281 + mcp251xfd_dump_end(priv, &iter); 282 + 283 + dev_coredumpv(&priv->spi->dev, iter.start, 284 + iter.data - iter.start, GFP_KERNEL); 285 + }
+45
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * mcp251xfd - Microchip MCP251xFD Family CAN controller driver 4 + * 5 + * Copyright (c) 2019, 2020, 2021 Pengutronix, 6 + * Marc Kleine-Budde <kernel@pengutronix.de> 7 + */ 8 + 9 + #ifndef _MCP251XFD_DUMP_H 10 + #define _MCP251XFD_DUMP_H 11 + 12 + #define MCP251XFD_DUMP_MAGIC 0x1825434d 13 + 14 + enum mcp251xfd_dump_object_type { 15 + MCP251XFD_DUMP_OBJECT_TYPE_REG, 16 + MCP251XFD_DUMP_OBJECT_TYPE_TEF, 17 + MCP251XFD_DUMP_OBJECT_TYPE_RX, 18 + MCP251XFD_DUMP_OBJECT_TYPE_TX, 19 + MCP251XFD_DUMP_OBJECT_TYPE_END = -1, 20 + }; 21 + 22 + enum mcp251xfd_dump_object_ring_key { 23 + MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, 24 + MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, 25 + MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, 26 + MCP251XFD_DUMP_OBJECT_RING_KEY_NR, 27 + MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, 28 + MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, 29 + MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, 30 + __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX, 31 + }; 32 + 33 + struct mcp251xfd_dump_object_header { 34 + __le32 magic; 35 + __le32 type; 36 + __le32 offset; 37 + __le32 len; 38 + }; 39 + 40 + struct mcp251xfd_dump_object_reg { 41 + __le32 reg; 42 + __le32 val; 43 + }; 44 + 45 + #endif
+71
drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // 3 + // mcp251xfd - Microchip MCP251xFD Family CAN controller driver 4 + // 5 + // Copyright (c) 2021 Pengutronix, 6 + // Marc Kleine-Budde <kernel@pengutronix.de> 7 + // 8 + 9 + #include <linux/clocksource.h> 10 + #include <linux/workqueue.h> 11 + 12 + #include "mcp251xfd.h" 13 + 14 + static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc) 15 + { 16 + struct mcp251xfd_priv *priv; 17 + u32 timestamp = 0; 18 + int err; 19 + 20 + priv = container_of(cc, struct mcp251xfd_priv, cc); 21 + err = mcp251xfd_get_timestamp(priv, &timestamp); 22 + if (err) 23 + netdev_err(priv->ndev, 24 + "Error %d while reading timestamp. HW timestamps may be inaccurate.", 25 + err); 26 + 27 + return timestamp; 28 + } 29 + 30 + static void mcp251xfd_timestamp_work(struct work_struct *work) 31 + { 32 + struct delayed_work *delayed_work = to_delayed_work(work); 33 + struct mcp251xfd_priv *priv; 34 + 35 + priv = container_of(delayed_work, struct mcp251xfd_priv, timestamp); 36 + timecounter_read(&priv->tc); 37 + 38 + schedule_delayed_work(&priv->timestamp, 39 + MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); 40 + } 41 + 42 + void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv, 43 + struct sk_buff *skb, u32 timestamp) 44 + { 45 + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 46 + u64 ns; 47 + 48 + ns = timecounter_cyc2time(&priv->tc, timestamp); 49 + hwtstamps->hwtstamp = ns_to_ktime(ns); 50 + } 51 + 52 + void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv) 53 + { 54 + struct cyclecounter *cc = &priv->cc; 55 + 56 + cc->read = mcp251xfd_timestamp_read; 57 + cc->mask = CYCLECOUNTER_MASK(32); 58 + cc->shift = 1; 59 + cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift); 60 + 61 + timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); 62 + 63 + INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work); 64 + schedule_delayed_work(&priv->timestamp, 65 + MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); 66 + } 67 + 68 + void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv) 69 + { 70 + cancel_delayed_work_sync(&priv->timestamp); 71 + }
+28
drivers/net/can/spi/mcp251xfd/mcp251xfd.h
··· 15 15 #include <linux/can/rx-offload.h> 16 16 #include <linux/gpio/consumer.h> 17 17 #include <linux/kernel.h> 18 + #include <linux/netdevice.h> 18 19 #include <linux/regmap.h> 19 20 #include <linux/regulator/consumer.h> 20 21 #include <linux/spi/spi.h> 22 + #include <linux/timecounter.h> 23 + #include <linux/workqueue.h> 21 24 22 25 /* MPC251x registers */ 23 26 ··· 397 394 #define MCP251XFD_SYSCLOCK_HZ_MAX 40000000 398 395 #define MCP251XFD_SYSCLOCK_HZ_MIN 1000000 399 396 #define MCP251XFD_SPICLOCK_HZ_MAX 20000000 397 + #define MCP251XFD_TIMESTAMP_WORK_DELAY_SEC 45 398 + static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC < 399 + CYCLECOUNTER_MASK(32) / MCP251XFD_SYSCLOCK_HZ_MAX / 2); 400 400 #define MCP251XFD_OSC_PLL_MULTIPLIER 10 401 401 #define MCP251XFD_OSC_STAB_SLEEP_US (3 * USEC_PER_MSEC) 402 402 #define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US) ··· 601 595 struct mcp251xfd_ecc ecc; 602 596 struct mcp251xfd_regs_status regs_status; 603 597 598 + struct cyclecounter cc; 599 + struct timecounter tc; 600 + struct delayed_work timestamp; 601 + 604 602 struct gpio_desc *rx_int; 605 603 struct clk *clk; 606 604 struct regulator *reg_vdd; ··· 737 727 return data; 738 728 } 739 729 730 + static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv, 731 + u32 *timestamp) 732 + { 733 + return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp); 734 + } 735 + 740 736 static inline u16 mcp251xfd_get_tef_obj_addr(u8 n) 741 737 { 742 738 return MCP251XFD_RAM_START + ··· 853 837 u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size, 854 838 const void *data, size_t data_size); 855 839 u16 mcp251xfd_crc16_compute(const void *data, size_t data_size); 840 + void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv, 841 + struct sk_buff *skb, u32 timestamp); 842 + void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); 843 + void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); 844 + 845 + #if IS_ENABLED(CONFIG_DEV_COREDUMP) 846 + void mcp251xfd_dump(const struct mcp251xfd_priv *priv); 847 + #else 848 + static inline void mcp251xfd_dump(const struct mcp251xfd_priv *priv) 849 + { 850 + } 851 + #endif 856 852 857 853 #endif
+1 -1
drivers/net/can/usb/ems_usb.c
··· 807 807 808 808 err = usb_submit_urb(urb, GFP_ATOMIC); 809 809 if (unlikely(err)) { 810 - can_free_echo_skb(netdev, context->echo_index); 810 + can_free_echo_skb(netdev, context->echo_index, NULL); 811 811 812 812 usb_unanchor_urb(urb); 813 813 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
+2 -2
drivers/net/can/usb/esd_usb2.c
··· 360 360 can_get_echo_skb(netdev, context->echo_index, NULL); 361 361 } else { 362 362 stats->tx_errors++; 363 - can_free_echo_skb(netdev, context->echo_index); 363 + can_free_echo_skb(netdev, context->echo_index, NULL); 364 364 } 365 365 366 366 /* Release context */ ··· 793 793 794 794 err = usb_submit_urb(urb, GFP_ATOMIC); 795 795 if (err) { 796 - can_free_echo_skb(netdev, context->echo_index); 796 + can_free_echo_skb(netdev, context->echo_index, NULL); 797 797 798 798 atomic_dec(&priv->active_tx_jobs); 799 799 usb_unanchor_urb(urb);
+1 -1
drivers/net/can/usb/gs_usb.c
··· 533 533 if (unlikely(rc)) { /* usb send failed */ 534 534 atomic_dec(&dev->active_tx_urbs); 535 535 536 - can_free_echo_skb(netdev, idx); 536 + can_free_echo_skb(netdev, idx, NULL); 537 537 gs_free_tx_context(txc); 538 538 539 539 usb_unanchor_urb(urb);
+1 -1
drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
··· 593 593 if (unlikely(err)) { 594 594 spin_lock_irqsave(&priv->tx_contexts_lock, flags); 595 595 596 - can_free_echo_skb(netdev, context->echo_index); 596 + can_free_echo_skb(netdev, context->echo_index, NULL); 597 597 context->echo_index = dev->max_tx_urbs; 598 598 --priv->active_tx_contexts; 599 599 netif_wake_queue(netdev);
+1 -1
drivers/net/can/usb/mcba_usb.c
··· 364 364 return NETDEV_TX_OK; 365 365 366 366 xmit_failed: 367 - can_free_echo_skb(priv->netdev, ctx->ndx); 367 + can_free_echo_skb(priv->netdev, ctx->ndx, NULL); 368 368 mcba_usb_free_ctx(ctx); 369 369 dev_kfree_skb(skb); 370 370 stats->tx_dropped++;
+47
drivers/net/can/usb/peak_usb/pcan_usb.c
··· 11 11 #include <linux/netdevice.h> 12 12 #include <linux/usb.h> 13 13 #include <linux/module.h> 14 + #include <linux/ethtool.h> 14 15 15 16 #include <linux/can.h> 16 17 #include <linux/can/dev.h> ··· 41 40 #define PCAN_USB_CMD_REGISTER 9 42 41 #define PCAN_USB_CMD_EXT_VCC 10 43 42 #define PCAN_USB_CMD_ERR_FR 11 43 + #define PCAN_USB_CMD_LED 12 44 44 45 45 /* PCAN_USB_CMD_SET_BUS number arg */ 46 46 #define PCAN_USB_BUS_XCVER 2 ··· 248 246 }; 249 247 250 248 return pcan_usb_send_cmd(dev, PCAN_USB_CMD_EXT_VCC, PCAN_USB_SET, args); 249 + } 250 + 251 + static int pcan_usb_set_led(struct peak_usb_device *dev, u8 onoff) 252 + { 253 + u8 args[PCAN_USB_CMD_ARGS_LEN] = { 254 + [0] = !!onoff, 255 + }; 256 + 257 + return pcan_usb_send_cmd(dev, PCAN_USB_CMD_LED, PCAN_USB_SET, args); 251 258 } 252 259 253 260 /* ··· 982 971 return 0; 983 972 } 984 973 974 + static int pcan_usb_set_phys_id(struct net_device *netdev, 975 + enum ethtool_phys_id_state state) 976 + { 977 + struct peak_usb_device *dev = netdev_priv(netdev); 978 + int err = 0; 979 + 980 + switch (state) { 981 + case ETHTOOL_ID_ACTIVE: 982 + /* call ON/OFF twice a second */ 983 + return 2; 984 + 985 + case ETHTOOL_ID_OFF: 986 + err = pcan_usb_set_led(dev, 0); 987 + break; 988 + 989 + case ETHTOOL_ID_ON: 990 + fallthrough; 991 + 992 + case ETHTOOL_ID_INACTIVE: 993 + /* restore LED default */ 994 + err = pcan_usb_set_led(dev, 1); 995 + break; 996 + 997 + default: 998 + break; 999 + } 1000 + 1001 + return err; 1002 + } 1003 + 1004 + static const struct ethtool_ops pcan_usb_ethtool_ops = { 1005 + .set_phys_id = pcan_usb_set_phys_id, 1006 + }; 1007 + 985 1008 /* 986 1009 * describe the PCAN-USB adapter 987 1010 */ ··· 1045 1000 1046 1001 /* size of device private data */ 1047 1002 .sizeof_dev_private = sizeof(struct pcan_usb), 1003 + 1004 + .ethtool_ops = &pcan_usb_ethtool_ops, 1048 1005 1049 1006 /* timestamps usage */ 1050 1007 .ts_used_bits = 16,
+5 -1
drivers/net/can/usb/peak_usb/pcan_usb_core.c
··· 14 14 #include <linux/module.h> 15 15 #include <linux/netdevice.h> 16 16 #include <linux/usb.h> 17 + #include <linux/ethtool.h> 17 18 18 19 #include <linux/can.h> 19 20 #include <linux/can/dev.h> ··· 372 371 373 372 err = usb_submit_urb(urb, GFP_ATOMIC); 374 373 if (err) { 375 - can_free_echo_skb(netdev, context->echo_index); 374 + can_free_echo_skb(netdev, context->echo_index, NULL); 376 375 377 376 usb_unanchor_urb(urb); 378 377 ··· 820 819 netdev->netdev_ops = &peak_usb_netdev_ops; 821 820 822 821 netdev->flags |= IFF_ECHO; /* we support local echo */ 822 + 823 + /* add ethtool support */ 824 + netdev->ethtool_ops = peak_usb_adapter->ethtool_ops; 823 825 824 826 init_usb_anchor(&dev->rx_submitted); 825 827
+2
drivers/net/can/usb/peak_usb/pcan_usb_core.h
··· 46 46 const struct can_bittiming_const * const data_bittiming_const; 47 47 unsigned int ctrl_count; 48 48 49 + const struct ethtool_ops *ethtool_ops; 50 + 49 51 int (*intf_probe)(struct usb_interface *intf); 50 52 51 53 int (*dev_init)(struct peak_usb_device *dev);
+42 -4
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
··· 7 7 #include <linux/netdevice.h> 8 8 #include <linux/usb.h> 9 9 #include <linux/module.h> 10 + #include <linux/ethtool.h> 10 11 11 12 #include <linux/can.h> 12 13 #include <linux/can/dev.h> ··· 774 773 tx_msg_flags |= PUCAN_MSG_RTR; 775 774 } 776 775 776 + /* Single-Shot frame */ 777 + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 778 + tx_msg_flags |= PUCAN_MSG_SINGLE_SHOT; 779 + 777 780 tx_msg->flags = cpu_to_le16(tx_msg_flags); 778 781 tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, dlc); 779 782 memcpy(tx_msg->d, cfd->data, cfd->len); ··· 1011 1006 } 1012 1007 } 1013 1008 1009 + /* blink LED's */ 1010 + static int pcan_usb_fd_set_phys_id(struct net_device *netdev, 1011 + enum ethtool_phys_id_state state) 1012 + { 1013 + struct peak_usb_device *dev = netdev_priv(netdev); 1014 + int err = 0; 1015 + 1016 + switch (state) { 1017 + case ETHTOOL_ID_ACTIVE: 1018 + err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_FAST); 1019 + break; 1020 + case ETHTOOL_ID_INACTIVE: 1021 + err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF); 1022 + break; 1023 + default: 1024 + break; 1025 + } 1026 + 1027 + return err; 1028 + } 1029 + 1030 + static const struct ethtool_ops pcan_usb_fd_ethtool_ops = { 1031 + .set_phys_id = pcan_usb_fd_set_phys_id, 1032 + }; 1033 + 1014 1034 /* describes the PCAN-USB FD adapter */ 1015 1035 static const struct can_bittiming_const pcan_usb_fd_const = { 1016 1036 .name = "pcan_usb_fd", ··· 1067 1037 .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, 1068 1038 .ctrlmode_supported = CAN_CTRLMODE_FD | 1069 1039 CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | 1070 - CAN_CTRLMODE_CC_LEN8_DLC, 1040 + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, 1071 1041 .clock = { 1072 1042 .freq = PCAN_UFD_CRYSTAL_HZ, 1073 1043 }, ··· 1076 1046 1077 1047 /* size of device private data */ 1078 1048 .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), 1049 + 1050 + .ethtool_ops = &pcan_usb_fd_ethtool_ops, 1079 1051 1080 1052 /* timestamps usage */ 1081 1053 .ts_used_bits = 32, ··· 1142 1110 .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, 1143 1111 .ctrlmode_supported = CAN_CTRLMODE_FD | 1144 1112 CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | 1145 - CAN_CTRLMODE_CC_LEN8_DLC, 1113 + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, 1146 1114 .clock = { 1147 1115 .freq = PCAN_UFD_CRYSTAL_HZ, 1148 1116 }, ··· 1151 1119 1152 1120 /* size of device private data */ 1153 1121 .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), 1122 + 1123 + .ethtool_ops = &pcan_usb_fd_ethtool_ops, 1154 1124 1155 1125 /* timestamps usage */ 1156 1126 .ts_used_bits = 32, ··· 1217 1183 .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, 1218 1184 .ctrlmode_supported = CAN_CTRLMODE_FD | 1219 1185 CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | 1220 - CAN_CTRLMODE_CC_LEN8_DLC, 1186 + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, 1221 1187 .clock = { 1222 1188 .freq = PCAN_UFD_CRYSTAL_HZ, 1223 1189 }, ··· 1226 1192 1227 1193 /* size of device private data */ 1228 1194 .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), 1195 + 1196 + .ethtool_ops = &pcan_usb_fd_ethtool_ops, 1229 1197 1230 1198 /* timestamps usage */ 1231 1199 .ts_used_bits = 32, ··· 1292 1256 .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, 1293 1257 .ctrlmode_supported = CAN_CTRLMODE_FD | 1294 1258 CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | 1295 - CAN_CTRLMODE_CC_LEN8_DLC, 1259 + CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, 1296 1260 .clock = { 1297 1261 .freq = PCAN_UFD_CRYSTAL_HZ, 1298 1262 }, ··· 1301 1265 1302 1266 /* size of device private data */ 1303 1267 .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), 1268 + 1269 + .ethtool_ops = &pcan_usb_fd_ethtool_ops, 1304 1270 1305 1271 /* timestamps usage */ 1306 1272 .ts_used_bits = 32,
+42 -4
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
··· 9 9 #include <linux/netdevice.h> 10 10 #include <linux/usb.h> 11 11 #include <linux/module.h> 12 + #include <linux/ethtool.h> 12 13 13 14 #include <linux/can.h> 14 15 #include <linux/can/dev.h> ··· 37 36 38 37 #define PCAN_USBPRO_RTR 0x01 39 38 #define PCAN_USBPRO_EXT 0x02 39 + #define PCAN_USBPRO_SS 0x08 40 40 41 41 #define PCAN_USBPRO_CMD_BUFFER_SIZE 512 42 42 ··· 778 776 779 777 flags = 0; 780 778 if (cf->can_id & CAN_EFF_FLAG) 781 - flags |= 0x02; 779 + flags |= PCAN_USBPRO_EXT; 782 780 if (cf->can_id & CAN_RTR_FLAG) 783 - flags |= 0x01; 781 + flags |= PCAN_USBPRO_RTR; 782 + 783 + /* Single-Shot frame */ 784 + if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 785 + flags |= PCAN_USBPRO_SS; 784 786 785 787 pcan_msg_add_rec(&usb_msg, data_type, 0, flags, len, cf->can_id, 786 788 cf->data); ··· 912 906 usb_if->dev[dev->ctrl_idx] = dev; 913 907 914 908 /* set LED in default state (end of init phase) */ 915 - pcan_usb_pro_set_led(dev, 0, 1); 909 + pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1); 916 910 917 911 kfree(bi); 918 912 kfree(fi); ··· 996 990 return 0; 997 991 } 998 992 993 + static int pcan_usb_pro_set_phys_id(struct net_device *netdev, 994 + enum ethtool_phys_id_state state) 995 + { 996 + struct peak_usb_device *dev = netdev_priv(netdev); 997 + int err = 0; 998 + 999 + switch (state) { 1000 + case ETHTOOL_ID_ACTIVE: 1001 + /* fast blinking forever */ 1002 + err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_BLINK_FAST, 1003 + 0xffffffff); 1004 + break; 1005 + 1006 + case ETHTOOL_ID_INACTIVE: 1007 + /* restore LED default */ 1008 + err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1); 1009 + break; 1010 + 1011 + default: 1012 + break; 1013 + } 1014 + 1015 + return err; 1016 + } 1017 + 1018 + static const struct ethtool_ops pcan_usb_pro_ethtool_ops = { 1019 + .set_phys_id = pcan_usb_pro_set_phys_id, 1020 + }; 1021 + 999 1022 /* 1000 1023 * describe the PCAN-USB Pro adapter 1001 1024 */ ··· 1044 1009 .name = "PCAN-USB Pro", 1045 1010 .device_id = PCAN_USBPRO_PRODUCT_ID, 1046 1011 .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT, 1047 - .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, 1012 + .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | 1013 + CAN_CTRLMODE_ONE_SHOT, 1048 1014 .clock = { 1049 1015 .freq = PCAN_USBPRO_CRYSTAL_HZ, 1050 1016 }, ··· 1053 1017 1054 1018 /* size of device private data */ 1055 1019 .sizeof_dev_private = sizeof(struct pcan_usb_pro_device), 1020 + 1021 + .ethtool_ops = &pcan_usb_pro_ethtool_ops, 1056 1022 1057 1023 /* timestamps usage */ 1058 1024 .ts_used_bits = 32,
+6
drivers/net/can/usb/peak_usb/pcan_usb_pro.h
··· 115 115 __le32 serial_num; 116 116 }; 117 117 118 + #define PCAN_USBPRO_LED_DEVICE 0x00 119 + #define PCAN_USBPRO_LED_BLINK_FAST 0x01 120 + #define PCAN_USBPRO_LED_BLINK_SLOW 0x02 121 + #define PCAN_USBPRO_LED_ON 0x03 122 + #define PCAN_USBPRO_LED_OFF 0x04 123 + 118 124 struct __packed pcan_usb_pro_setled { 119 125 u8 data_type; 120 126 u8 channel;
+4 -4
drivers/net/can/usb/ucan.c
··· 246 246 */ 247 247 struct ucan_tx_complete_entry_t can_tx_complete_msg[0]; 248 248 } __aligned(0x4) msg; 249 - } __packed; 249 + } __packed __aligned(0x4); 250 250 251 251 /* Macros to calculate message lengths */ 252 252 #define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg) ··· 675 675 can_get_echo_skb(up->netdev, echo_index, NULL); 676 676 } else { 677 677 up->netdev->stats.tx_dropped++; 678 - can_free_echo_skb(up->netdev, echo_index); 678 + can_free_echo_skb(up->netdev, echo_index, NULL); 679 679 } 680 680 spin_unlock_irqrestore(&up->echo_skb_lock, flags); 681 681 } ··· 843 843 844 844 /* update counters an cleanup */ 845 845 spin_lock_irqsave(&up->echo_skb_lock, flags); 846 - can_free_echo_skb(up->netdev, context - up->context_array); 846 + can_free_echo_skb(up->netdev, context - up->context_array, NULL); 847 847 spin_unlock_irqrestore(&up->echo_skb_lock, flags); 848 848 849 849 up->netdev->stats.tx_dropped++; ··· 1157 1157 * frees the skb 1158 1158 */ 1159 1159 spin_lock_irqsave(&up->echo_skb_lock, flags); 1160 - can_free_echo_skb(up->netdev, echo_index); 1160 + can_free_echo_skb(up->netdev, echo_index, NULL); 1161 1161 spin_unlock_irqrestore(&up->echo_skb_lock, flags); 1162 1162 1163 1163 if (ret == -ENODEV) {
+1 -1
drivers/net/can/usb/usb_8dev.c
··· 691 691 return NETDEV_TX_BUSY; 692 692 693 693 failed: 694 - can_free_echo_skb(netdev, context->echo_index); 694 + can_free_echo_skb(netdev, context->echo_index, NULL); 695 695 696 696 usb_unanchor_urb(urb); 697 697 usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+4 -6
drivers/net/can/xilinx_can.c
··· 1772 1772 /* Getting the CAN can_clk info */ 1773 1773 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); 1774 1774 if (IS_ERR(priv->can_clk)) { 1775 - if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER) 1776 - dev_err(&pdev->dev, "Device clock not found.\n"); 1777 - ret = PTR_ERR(priv->can_clk); 1775 + ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk), 1776 + "device clock not found\n"); 1778 1777 goto err_free; 1779 1778 } 1780 1779 1781 1780 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); 1782 1781 if (IS_ERR(priv->bus_clk)) { 1783 - if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER) 1784 - dev_err(&pdev->dev, "bus clock not found\n"); 1785 - ret = PTR_ERR(priv->bus_clk); 1782 + ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk), 1783 + "bus clock not found\n"); 1786 1784 goto err_free; 1787 1785 } 1788 1786
+79
include/linux/can/bittiming.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> 3 + * Copyright (c) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> 3 4 */ 4 5 5 6 #ifndef _CAN_BITTIMING_H ··· 11 10 12 11 #define CAN_SYNC_SEG 1 13 12 13 + 14 + /* Kilobits and Megabits per second */ 15 + #define CAN_KBPS 1000UL 16 + #define CAN_MBPS 1000000UL 17 + 18 + /* Megahertz */ 19 + #define CAN_MHZ 1000000UL 20 + 21 + /* 22 + * struct can_tdc - CAN FD Transmission Delay Compensation parameters 23 + * 24 + * At high bit rates, the propagation delay from the TX pin to the RX 25 + * pin of the transceiver causes measurement errors: the sample point 26 + * on the RX pin might occur on the previous bit. 27 + * 28 + * To solve this issue, ISO 11898-1 introduces in section 11.3.3 29 + * "Transmitter delay compensation" a SSP (Secondary Sample Point) 30 + * equal to the distance, in time quanta, from the start of the bit 31 + * time on the TX pin to the actual measurement on the RX pin. 32 + * 33 + * This structure contains the parameters to calculate that SSP. 34 + * 35 + * @tdcv: Transmitter Delay Compensation Value. Distance, in time 36 + * quanta, from when the bit is sent on the TX pin to when it is 37 + * received on the RX pin of the transmitter. Possible options: 38 + * 39 + * O: automatic mode. The controller dynamically measure @tdcv 40 + * for each transmitted CAN FD frame. 41 + * 42 + * Other values: manual mode. Use the fixed provided value. 43 + * 44 + * @tdco: Transmitter Delay Compensation Offset. Offset value, in time 45 + * quanta, defining the distance between the start of the bit 46 + * reception on the RX pin of the transceiver and the SSP 47 + * position such as SSP = @tdcv + @tdco. 48 + * 49 + * If @tdco is zero, then TDC is disabled and both @tdcv and 50 + * @tdcf should be ignored. 51 + * 52 + * @tdcf: Transmitter Delay Compensation Filter window. Defines the 53 + * minimum value for the SSP position in time quanta. If SSP is 54 + * less than @tdcf, then no delay compensations occur and the 55 + * normal sampling point is used instead. The feature is enabled 56 + * if and only if @tdcv is set to zero (automatic mode) and @tdcf 57 + * is configured to a value greater than @tdco. 58 + */ 59 + struct can_tdc { 60 + u32 tdcv; 61 + u32 tdco; 62 + u32 tdcf; 63 + }; 64 + 65 + /* 66 + * struct can_tdc_const - CAN hardware-dependent constant for 67 + * Transmission Delay Compensation 68 + * 69 + * @tdcv_max: Transmitter Delay Compensation Value maximum value. 70 + * Should be set to zero if the controller does not support 71 + * manual mode for tdcv. 72 + * @tdco_max: Transmitter Delay Compensation Offset maximum value. 73 + * Should not be zero. If the controller does not support TDC, 74 + * then the pointer to this structure should be NULL. 75 + * @tdcf_max: Transmitter Delay Compensation Filter window maximum 76 + * value. Should be set to zero if the controller does not 77 + * support this feature. 78 + */ 79 + struct can_tdc_const { 80 + u32 tdcv_max; 81 + u32 tdco_max; 82 + u32 tdcf_max; 83 + }; 84 + 14 85 #ifdef CONFIG_CAN_CALC_BITTIMING 15 86 int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, 16 87 const struct can_bittiming_const *btc); 88 + 89 + void can_calc_tdco(struct net_device *dev); 17 90 #else /* !CONFIG_CAN_CALC_BITTIMING */ 18 91 static inline int 19 92 can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, ··· 95 20 { 96 21 netdev_err(dev, "bit-timing calculation not available\n"); 97 22 return -EINVAL; 23 + } 24 + 25 + static inline void can_calc_tdco(struct net_device *dev) 26 + { 98 27 } 99 28 #endif /* CONFIG_CAN_CALC_BITTIMING */ 100 29
+9 -5
include/linux/can/dev.h
··· 39 39 struct net_device *dev; 40 40 struct can_device_stats can_stats; 41 41 42 - struct can_bittiming bittiming, data_bittiming; 43 42 const struct can_bittiming_const *bittiming_const, 44 43 *data_bittiming_const; 45 - const u16 *termination_const; 46 - unsigned int termination_const_cnt; 47 - u16 termination; 48 - const u32 *bitrate_const; 44 + struct can_bittiming bittiming, data_bittiming; 45 + const struct can_tdc_const *tdc_const; 46 + struct can_tdc tdc; 47 + 49 48 unsigned int bitrate_const_cnt; 49 + const u32 *bitrate_const; 50 50 const u32 *data_bitrate_const; 51 51 unsigned int data_bitrate_const_cnt; 52 52 u32 bitrate_max; 53 53 struct can_clock clock; 54 + 55 + unsigned int termination_const_cnt; 56 + const u16 *termination_const; 57 + u16 termination; 54 58 55 59 enum can_state state; 56 60
+2 -1
include/linux/can/skb.h
··· 23 23 u8 *len_ptr, unsigned int *frame_len_ptr); 24 24 unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx, 25 25 unsigned int *frame_len_ptr); 26 - void can_free_echo_skb(struct net_device *dev, unsigned int idx); 26 + void can_free_echo_skb(struct net_device *dev, unsigned int idx, 27 + unsigned int *frame_len_ptr); 27 28 struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); 28 29 struct sk_buff *alloc_canfd_skb(struct net_device *dev, 29 30 struct canfd_frame **cfd);