Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'linux-can-fixes-for-5.4-20191105' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can

Marc Kleine-Budde says:

====================
pull-request: can 2019-11-05

this is a pull request of 33 patches for net/master.

In the first patch Wen Yang's patch adds a missing of_node_put() to CAN device
infrastructure.

Navid Emamdoost's patch for the gs_usb driver fixes a memory leak in the
gs_can_open() error path.

Johan Hovold provides two patches, one for the mcba_usb, the other for the
usb_8dev driver. Both fix a use-after-free after USB-disconnect.

Joakim Zhang's patch improves the flexcan driver, the ECC mechanism is now
completely disabled instead of masking the interrupts.

The next three patches all target the peak_usb driver. Stephane Grosjean's
patch fixes a potential out-of-sync while decoding packets, Johan Hovold's
patch fixes a slab info leak, Jeroen Hofstee's patch adds missing reporting of
bus off recovery events.

Followed by three patches for the c_can driver. Kurt Van Dijck's patch fixes
detection of potential missing status IRQs, Jeroen Hofstee's patches add a chip
reset on open and add missing reporting of bus off recovery events.

Appana Durga Kedareswara rao's patch for the xilinx driver fixes the flags
field initialization for axi CAN.

The next seven patches target the rx-offload helper, they are by me and Jeroen
Hofstee. The error handling in case of a queue overflow is fixed removing a
memory leak. Further the error handling in case of queue overflow and skb OOM
is cleaned up.

The next two patches are by me and target the flexcan and ti_hecc driver. In
case of a error during can_rx_offload_queue_sorted() the error counters in the
drivers are incremented.

Jeroen Hofstee provides 6 patches for the ti_hecc driver, which properly stop
the device in ifdown, improve the rx-offload support (which hit mainline in
v5.4-rc1), and add missing FIFO overflow and state change reporting.

The following four patches target the j1939 protocol. Colin Ian King's patch
fixes a memory leak in the j1939_sk_errqueue() handling. Three patches by
Oleksij Rempel fix a memory leak on socket release and fix the EOMA packet in
the transport protocol.

Timo Schlüßler's patch fixes a potential race condition in the mcp251x driver
on after suspend.

The last patch is by Yegor Yefremov and updates the SPDX-License-Identifier to
v3.0.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+371 -140
+64 -7
drivers/net/can/c_can/c_can.c
··· 52 52 #define CONTROL_EX_PDR BIT(8) 53 53 54 54 /* control register */ 55 + #define CONTROL_SWR BIT(15) 55 56 #define CONTROL_TEST BIT(7) 56 57 #define CONTROL_CCE BIT(6) 57 58 #define CONTROL_DISABLE_AR BIT(5) ··· 97 96 #define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT) 98 97 #define BTR_TSEG2_SHIFT 12 99 98 #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) 99 + 100 + /* interrupt register */ 101 + #define INT_STS_PENDING 0x8000 100 102 101 103 /* brp extension register */ 102 104 #define BRP_EXT_BRPE_MASK 0x0f ··· 573 569 IF_MCONT_RCV_EOB); 574 570 } 575 571 572 + static int c_can_software_reset(struct net_device *dev) 573 + { 574 + struct c_can_priv *priv = netdev_priv(dev); 575 + int retry = 0; 576 + 577 + if (priv->type != BOSCH_D_CAN) 578 + return 0; 579 + 580 + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT); 581 + while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) { 582 + msleep(20); 583 + if (retry++ > 100) { 584 + netdev_err(dev, "CCTRL: software reset failed\n"); 585 + return -EIO; 586 + } 587 + } 588 + 589 + return 0; 590 + } 591 + 576 592 /* 577 593 * Configure C_CAN chip: 578 594 * - enable/disable auto-retransmission ··· 602 578 static int c_can_chip_config(struct net_device *dev) 603 579 { 604 580 struct c_can_priv *priv = netdev_priv(dev); 581 + int err; 582 + 583 + err = c_can_software_reset(dev); 584 + if (err) 585 + return err; 605 586 606 587 /* enable automatic retransmission */ 607 588 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); ··· 915 886 struct can_berr_counter bec; 916 887 917 888 switch (error_type) { 889 + case C_CAN_NO_ERROR: 890 + priv->can.state = CAN_STATE_ERROR_ACTIVE; 891 + break; 918 892 case C_CAN_ERROR_WARNING: 919 893 /* error warning state */ 920 894 priv->can.can_stats.error_warning++; ··· 948 916 ERR_CNT_RP_SHIFT; 949 917 950 918 switch (error_type) { 919 + case C_CAN_NO_ERROR: 920 + /* error warning state */ 921 + cf->can_id |= CAN_ERR_CRTL; 922 + cf->data[1] = CAN_ERR_CRTL_ACTIVE; 923 + cf->data[6] = bec.txerr; 924 + cf->data[7] = bec.rxerr; 925 + break; 951 926 case C_CAN_ERROR_WARNING: 952 927 /* error warning state */ 953 928 cf->can_id |= CAN_ERR_CRTL; ··· 1068 1029 u16 curr, last = priv->last_status; 1069 1030 int work_done = 0; 1070 1031 1071 - priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); 1072 - /* Ack status on C_CAN. D_CAN is self clearing */ 1073 - if (priv->type != BOSCH_D_CAN) 1074 - priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 1032 + /* Only read the status register if a status interrupt was pending */ 1033 + if (atomic_xchg(&priv->sie_pending, 0)) { 1034 + priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); 1035 + /* Ack status on C_CAN. D_CAN is self clearing */ 1036 + if (priv->type != BOSCH_D_CAN) 1037 + priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 1038 + } else { 1039 + /* no change detected ... */ 1040 + curr = last; 1041 + } 1075 1042 1076 1043 /* handle state changes */ 1077 1044 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { ··· 1099 1054 /* handle bus recovery events */ 1100 1055 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { 1101 1056 netdev_dbg(dev, "left bus off state\n"); 1102 - priv->can.state = CAN_STATE_ERROR_ACTIVE; 1057 + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE); 1103 1058 } 1059 + 1104 1060 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { 1105 1061 netdev_dbg(dev, "left error passive state\n"); 1106 - priv->can.state = CAN_STATE_ERROR_ACTIVE; 1062 + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING); 1063 + } 1064 + 1065 + if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) { 1066 + netdev_dbg(dev, "left error warning state\n"); 1067 + work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR); 1107 1068 } 1108 1069 1109 1070 /* handle lec errors on the bus */ ··· 1134 1083 { 1135 1084 struct net_device *dev = (struct net_device *)dev_id; 1136 1085 struct c_can_priv *priv = netdev_priv(dev); 1086 + int reg_int; 1137 1087 1138 - if (!priv->read_reg(priv, C_CAN_INT_REG)) 1088 + reg_int = priv->read_reg(priv, C_CAN_INT_REG); 1089 + if (!reg_int) 1139 1090 return IRQ_NONE; 1091 + 1092 + /* save for later use */ 1093 + if (reg_int & INT_STS_PENDING) 1094 + atomic_set(&priv->sie_pending, 1); 1140 1095 1141 1096 /* disable all interrupts and schedule the NAPI */ 1142 1097 c_can_irq_control(priv, false);
+1
drivers/net/can/c_can/c_can.h
··· 198 198 struct net_device *dev; 199 199 struct device *device; 200 200 atomic_t tx_active; 201 + atomic_t sie_pending; 201 202 unsigned long tx_dir; 202 203 int last_status; 203 204 u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
+1
drivers/net/can/dev.c
··· 848 848 return; 849 849 850 850 ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); 851 + of_node_put(dn); 851 852 if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) 852 853 netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); 853 854 }
+9 -2
drivers/net/can/flexcan.c
··· 677 677 struct can_frame *cf; 678 678 bool rx_errors = false, tx_errors = false; 679 679 u32 timestamp; 680 + int err; 680 681 681 682 timestamp = priv->read(&regs->timer) << 16; 682 683 ··· 726 725 if (tx_errors) 727 726 dev->stats.tx_errors++; 728 727 729 - can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 728 + err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 729 + if (err) 730 + dev->stats.rx_fifo_errors++; 730 731 } 731 732 732 733 static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) ··· 741 738 int flt; 742 739 struct can_berr_counter bec; 743 740 u32 timestamp; 741 + int err; 744 742 745 743 timestamp = priv->read(&regs->timer) << 16; 746 744 ··· 773 769 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 774 770 can_bus_off(dev); 775 771 776 - can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 772 + err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 773 + if (err) 774 + dev->stats.rx_fifo_errors++; 777 775 } 778 776 779 777 static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) ··· 1194 1188 reg_mecr = priv->read(&regs->mecr); 1195 1189 reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; 1196 1190 priv->write(reg_mecr, &regs->mecr); 1191 + reg_mecr |= FLEXCAN_MECR_ECCDIS; 1197 1192 reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | 1198 1193 FLEXCAN_MECR_FANCEI_MSK); 1199 1194 priv->write(reg_mecr, &regs->mecr);
+85 -17
drivers/net/can/rx-offload.c
··· 107 107 return cb_b->timestamp - cb_a->timestamp; 108 108 } 109 109 110 - static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) 110 + /** 111 + * can_rx_offload_offload_one() - Read one CAN frame from HW 112 + * @offload: pointer to rx_offload context 113 + * @n: number of mailbox to read 114 + * 115 + * The task of this function is to read a CAN frame from mailbox @n 116 + * from the device and return the mailbox's content as a struct 117 + * sk_buff. 118 + * 119 + * If the struct can_rx_offload::skb_queue exceeds the maximal queue 120 + * length (struct can_rx_offload::skb_queue_len_max) or no skb can be 121 + * allocated, the mailbox contents is discarded by reading it into an 122 + * overflow buffer. This way the mailbox is marked as free by the 123 + * driver. 124 + * 125 + * Return: A pointer to skb containing the CAN frame on success. 126 + * 127 + * NULL if the mailbox @n is empty. 128 + * 129 + * ERR_PTR() in case of an error 130 + */ 131 + static struct sk_buff * 132 + can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) 111 133 { 112 - struct sk_buff *skb = NULL; 134 + struct sk_buff *skb = NULL, *skb_error = NULL; 113 135 struct can_rx_offload_cb *cb; 114 136 struct can_frame *cf; 115 137 int ret; 116 138 117 - /* If queue is full or skb not available, read to discard mailbox */ 118 - if (likely(skb_queue_len(&offload->skb_queue) <= 119 - offload->skb_queue_len_max)) 139 + if (likely(skb_queue_len(&offload->skb_queue) < 140 + offload->skb_queue_len_max)) { 120 141 skb = alloc_can_skb(offload->dev, &cf); 142 + if (unlikely(!skb)) 143 + skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */ 144 + } else { 145 + skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */ 146 + } 121 147 122 - if (!skb) { 148 + /* If queue is full or skb not available, drop by reading into 149 + * overflow buffer. 150 + */ 151 + if (unlikely(skb_error)) { 123 152 struct can_frame cf_overflow; 124 153 u32 timestamp; 125 154 126 155 ret = offload->mailbox_read(offload, &cf_overflow, 127 156 &timestamp, n); 128 - if (ret) 129 - offload->dev->stats.rx_dropped++; 130 157 131 - return NULL; 158 + /* Mailbox was empty. */ 159 + if (unlikely(!ret)) 160 + return NULL; 161 + 162 + /* Mailbox has been read and we're dropping it or 163 + * there was a problem reading the mailbox. 164 + * 165 + * Increment error counters in any case. 166 + */ 167 + offload->dev->stats.rx_dropped++; 168 + offload->dev->stats.rx_fifo_errors++; 169 + 170 + /* There was a problem reading the mailbox, propagate 171 + * error value. 172 + */ 173 + if (unlikely(ret < 0)) 174 + return ERR_PTR(ret); 175 + 176 + return skb_error; 132 177 } 133 178 134 179 cb = can_rx_offload_get_cb(skb); 135 180 ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); 136 - if (!ret) { 181 + 182 + /* Mailbox was empty. */ 183 + if (unlikely(!ret)) { 137 184 kfree_skb(skb); 138 185 return NULL; 139 186 } 140 187 188 + /* There was a problem reading the mailbox, propagate error value. */ 189 + if (unlikely(ret < 0)) { 190 + kfree_skb(skb); 191 + 192 + offload->dev->stats.rx_dropped++; 193 + offload->dev->stats.rx_fifo_errors++; 194 + 195 + return ERR_PTR(ret); 196 + } 197 + 198 + /* Mailbox was read. */ 141 199 return skb; 142 200 } 143 201 ··· 215 157 continue; 216 158 217 159 skb = can_rx_offload_offload_one(offload, i); 218 - if (!skb) 219 - break; 160 + if (IS_ERR_OR_NULL(skb)) 161 + continue; 220 162 221 163 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); 222 164 } ··· 246 188 struct sk_buff *skb; 247 189 int received = 0; 248 190 249 - while ((skb = can_rx_offload_offload_one(offload, 0))) { 191 + while (1) { 192 + skb = can_rx_offload_offload_one(offload, 0); 193 + if (IS_ERR(skb)) 194 + continue; 195 + if (!skb) 196 + break; 197 + 250 198 skb_queue_tail(&offload->skb_queue, skb); 251 199 received++; 252 200 } ··· 271 207 unsigned long flags; 272 208 273 209 if (skb_queue_len(&offload->skb_queue) > 274 - offload->skb_queue_len_max) 275 - return -ENOMEM; 210 + offload->skb_queue_len_max) { 211 + kfree_skb(skb); 212 + return -ENOBUFS; 213 + } 276 214 277 215 cb = can_rx_offload_get_cb(skb); 278 216 cb->timestamp = timestamp; ··· 316 250 struct sk_buff *skb) 317 251 { 318 252 if (skb_queue_len(&offload->skb_queue) > 319 - offload->skb_queue_len_max) 320 - return -ENOMEM; 253 + offload->skb_queue_len_max) { 254 + kfree_skb(skb); 255 + return -ENOBUFS; 256 + } 321 257 322 258 skb_queue_tail(&offload->skb_queue, skb); 323 259 can_rx_offload_schedule(offload);
+1 -1
drivers/net/can/spi/mcp251x.c
··· 717 717 if (priv->after_suspend) { 718 718 mcp251x_hw_reset(spi); 719 719 mcp251x_setup(net, spi); 720 + priv->force_quit = 0; 720 721 if (priv->after_suspend & AFTER_SUSPEND_RESTART) { 721 722 mcp251x_set_normal_mode(spi); 722 723 } else if (priv->after_suspend & AFTER_SUSPEND_UP) { ··· 729 728 mcp251x_hw_sleep(spi); 730 729 } 731 730 priv->after_suspend = 0; 732 - priv->force_quit = 0; 733 731 } 734 732 735 733 if (priv->restart_tx) {
+150 -86
drivers/net/can/ti_hecc.c
··· 73 73 */ 74 74 #define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) 75 75 #define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) 76 + #define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX) 76 77 77 78 /* TI HECC module registers */ 78 79 #define HECC_CANME 0x0 /* Mailbox enable */ ··· 83 82 #define HECC_CANTA 0x10 /* Transmission acknowledge */ 84 83 #define HECC_CANAA 0x14 /* Abort acknowledge */ 85 84 #define HECC_CANRMP 0x18 /* Receive message pending */ 86 - #define HECC_CANRML 0x1C /* Remote message lost */ 85 + #define HECC_CANRML 0x1C /* Receive message lost */ 87 86 #define HECC_CANRFP 0x20 /* Remote frame pending */ 88 87 #define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */ 89 88 #define HECC_CANMC 0x28 /* Master control */ ··· 150 149 #define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\ 151 150 HECC_CANES_CRCE | HECC_CANES_SE |\ 152 151 HECC_CANES_ACKE) 152 + #define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\ 153 + HECC_CANES_EP | HECC_CANES_EW) 153 154 154 155 #define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */ 155 156 ··· 385 382 hecc_set_bit(priv, HECC_CANMIM, mbx_mask); 386 383 } 387 384 388 - /* Prevent message over-write & Enable interrupts */ 389 - hecc_write(priv, HECC_CANOPC, HECC_SET_REG); 385 + /* Enable tx interrupts */ 386 + hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1); 387 + 388 + /* Prevent message over-write to create a rx fifo, but not for 389 + * the lowest priority mailbox, since that allows detecting 390 + * overflows instead of the hardware silently dropping the 391 + * messages. 392 + */ 393 + mbx_mask = ~BIT(HECC_RX_LAST_MBOX); 394 + hecc_write(priv, HECC_CANOPC, mbx_mask); 395 + 396 + /* Enable interrupts */ 390 397 if (priv->use_hecc1int) { 391 398 hecc_write(priv, HECC_CANMIL, HECC_SET_REG); 392 399 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | ··· 412 399 static void ti_hecc_stop(struct net_device *ndev) 413 400 { 414 401 struct ti_hecc_priv *priv = netdev_priv(ndev); 402 + 403 + /* Disable the CPK; stop sending, erroring and acking */ 404 + hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR); 415 405 416 406 /* Disable interrupts and disable mailboxes */ 417 407 hecc_write(priv, HECC_CANGIM, 0); ··· 524 508 hecc_set_bit(priv, HECC_CANME, mbx_mask); 525 509 spin_unlock_irqrestore(&priv->mbx_lock, flags); 526 510 527 - hecc_clear_bit(priv, HECC_CANMD, mbx_mask); 528 - hecc_set_bit(priv, HECC_CANMIM, mbx_mask); 529 511 hecc_write(priv, HECC_CANTRS, mbx_mask); 530 512 531 513 return NETDEV_TX_OK; ··· 540 526 u32 *timestamp, unsigned int mbxno) 541 527 { 542 528 struct ti_hecc_priv *priv = rx_offload_to_priv(offload); 543 - u32 data; 529 + u32 data, mbx_mask; 530 + int ret = 1; 544 531 532 + mbx_mask = BIT(mbxno); 545 533 data = hecc_read_mbx(priv, mbxno, HECC_CANMID); 546 534 if (data & HECC_CANMID_IDE) 547 535 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; ··· 564 548 565 549 *timestamp = hecc_read_stamp(priv, mbxno); 566 550 567 - return 1; 551 + /* Check for FIFO overrun. 552 + * 553 + * All but the last RX mailbox have activated overwrite 554 + * protection. So skip check for overrun, if we're not 555 + * handling the last RX mailbox. 556 + * 557 + * As the overwrite protection for the last RX mailbox is 558 + * disabled, the CAN core might update while we're reading 559 + * it. This means the skb might be inconsistent. 560 + * 561 + * Return an error to let rx-offload discard this CAN frame. 562 + */ 563 + if (unlikely(mbxno == HECC_RX_LAST_MBOX && 564 + hecc_read(priv, HECC_CANRML) & mbx_mask)) 565 + ret = -ENOBUFS; 566 + 567 + hecc_write(priv, HECC_CANRMP, mbx_mask); 568 + 569 + return ret; 568 570 } 569 571 570 572 static int ti_hecc_error(struct net_device *ndev, int int_status, ··· 592 558 struct can_frame *cf; 593 559 struct sk_buff *skb; 594 560 u32 timestamp; 595 - 596 - /* propagate the error condition to the can stack */ 597 - skb = alloc_can_err_skb(ndev, &cf); 598 - if (!skb) { 599 - if (printk_ratelimit()) 600 - netdev_err(priv->ndev, 601 - "%s: alloc_can_err_skb() failed\n", 602 - __func__); 603 - return -ENOMEM; 604 - } 605 - 606 - if (int_status & HECC_CANGIF_WLIF) { /* warning level int */ 607 - if ((int_status & HECC_CANGIF_BOIF) == 0) { 608 - priv->can.state = CAN_STATE_ERROR_WARNING; 609 - ++priv->can.can_stats.error_warning; 610 - cf->can_id |= CAN_ERR_CRTL; 611 - if (hecc_read(priv, HECC_CANTEC) > 96) 612 - cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; 613 - if (hecc_read(priv, HECC_CANREC) > 96) 614 - cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; 615 - } 616 - hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW); 617 - netdev_dbg(priv->ndev, "Error Warning interrupt\n"); 618 - hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); 619 - } 620 - 621 - if (int_status & HECC_CANGIF_EPIF) { /* error passive int */ 622 - if ((int_status & HECC_CANGIF_BOIF) == 0) { 623 - priv->can.state = CAN_STATE_ERROR_PASSIVE; 624 - ++priv->can.can_stats.error_passive; 625 - cf->can_id |= CAN_ERR_CRTL; 626 - if (hecc_read(priv, HECC_CANTEC) > 127) 627 - cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; 628 - if (hecc_read(priv, HECC_CANREC) > 127) 629 - cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 630 - } 631 - hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP); 632 - netdev_dbg(priv->ndev, "Error passive interrupt\n"); 633 - hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); 634 - } 635 - 636 - /* Need to check busoff condition in error status register too to 637 - * ensure warning interrupts don't hog the system 638 - */ 639 - if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) { 640 - priv->can.state = CAN_STATE_BUS_OFF; 641 - cf->can_id |= CAN_ERR_BUSOFF; 642 - hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO); 643 - hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); 644 - /* Disable all interrupts in bus-off to avoid int hog */ 645 - hecc_write(priv, HECC_CANGIM, 0); 646 - ++priv->can.can_stats.bus_off; 647 - can_bus_off(ndev); 648 - } 561 + int err; 649 562 650 563 if (err_status & HECC_BUS_ERROR) { 564 + /* propagate the error condition to the can stack */ 565 + skb = alloc_can_err_skb(ndev, &cf); 566 + if (!skb) { 567 + if (net_ratelimit()) 568 + netdev_err(priv->ndev, 569 + "%s: alloc_can_err_skb() failed\n", 570 + __func__); 571 + return -ENOMEM; 572 + } 573 + 651 574 ++priv->can.can_stats.bus_error; 652 575 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 653 - if (err_status & HECC_CANES_FE) { 654 - hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); 576 + if (err_status & HECC_CANES_FE) 655 577 cf->data[2] |= CAN_ERR_PROT_FORM; 656 - } 657 - if (err_status & HECC_CANES_BE) { 658 - hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE); 578 + if (err_status & HECC_CANES_BE) 659 579 cf->data[2] |= CAN_ERR_PROT_BIT; 660 - } 661 - if (err_status & HECC_CANES_SE) { 662 - hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE); 580 + if (err_status & HECC_CANES_SE) 663 581 cf->data[2] |= CAN_ERR_PROT_STUFF; 664 - } 665 - if (err_status & HECC_CANES_CRCE) { 666 - hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); 582 + if (err_status & HECC_CANES_CRCE) 667 583 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 668 - } 669 - if (err_status & HECC_CANES_ACKE) { 670 - hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); 584 + if (err_status & HECC_CANES_ACKE) 671 585 cf->data[3] = CAN_ERR_PROT_LOC_ACK; 672 - } 586 + 587 + timestamp = hecc_read(priv, HECC_CANLNT); 588 + err = can_rx_offload_queue_sorted(&priv->offload, skb, 589 + timestamp); 590 + if (err) 591 + ndev->stats.rx_fifo_errors++; 592 + } 593 + 594 + hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS); 595 + 596 + return 0; 597 + } 598 + 599 + static void ti_hecc_change_state(struct net_device *ndev, 600 + enum can_state rx_state, 601 + enum can_state tx_state) 602 + { 603 + struct ti_hecc_priv *priv = netdev_priv(ndev); 604 + struct can_frame *cf; 605 + struct sk_buff *skb; 606 + u32 timestamp; 607 + int err; 608 + 609 + skb = alloc_can_err_skb(priv->ndev, &cf); 610 + if (unlikely(!skb)) { 611 + priv->can.state = max(tx_state, rx_state); 612 + return; 613 + } 614 + 615 + can_change_state(priv->ndev, cf, tx_state, rx_state); 616 + 617 + if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) { 618 + cf->data[6] = hecc_read(priv, HECC_CANTEC); 619 + cf->data[7] = hecc_read(priv, HECC_CANREC); 673 620 } 674 621 675 622 timestamp = hecc_read(priv, HECC_CANLNT); 676 - can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 677 - 678 - return 0; 623 + err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 624 + if (err) 625 + ndev->stats.rx_fifo_errors++; 679 626 } 680 627 681 628 static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) ··· 666 651 struct net_device_stats *stats = &ndev->stats; 667 652 u32 mbxno, mbx_mask, int_status, err_status, stamp; 668 653 unsigned long flags, rx_pending; 654 + u32 handled = 0; 669 655 670 656 int_status = hecc_read(priv, 671 657 priv->use_hecc1int ? ··· 676 660 return IRQ_NONE; 677 661 678 662 err_status = hecc_read(priv, HECC_CANES); 679 - if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO | 680 - HECC_CANES_EP | HECC_CANES_EW)) 663 + if (unlikely(err_status & HECC_CANES_FLAGS)) 681 664 ti_hecc_error(ndev, int_status, err_status); 665 + 666 + if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) { 667 + enum can_state rx_state, tx_state; 668 + u32 rec = hecc_read(priv, HECC_CANREC); 669 + u32 tec = hecc_read(priv, HECC_CANTEC); 670 + 671 + if (int_status & HECC_CANGIF_WLIF) { 672 + handled |= HECC_CANGIF_WLIF; 673 + rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0; 674 + tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0; 675 + netdev_dbg(priv->ndev, "Error Warning interrupt\n"); 676 + ti_hecc_change_state(ndev, rx_state, tx_state); 677 + } 678 + 679 + if (int_status & HECC_CANGIF_EPIF) { 680 + handled |= HECC_CANGIF_EPIF; 681 + rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0; 682 + tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0; 683 + netdev_dbg(priv->ndev, "Error passive interrupt\n"); 684 + ti_hecc_change_state(ndev, rx_state, tx_state); 685 + } 686 + 687 + if (int_status & HECC_CANGIF_BOIF) { 688 + handled |= HECC_CANGIF_BOIF; 689 + rx_state = CAN_STATE_BUS_OFF; 690 + tx_state = CAN_STATE_BUS_OFF; 691 + netdev_dbg(priv->ndev, "Bus off interrupt\n"); 692 + 693 + /* Disable all interrupts */ 694 + hecc_write(priv, HECC_CANGIM, 0); 695 + can_bus_off(ndev); 696 + ti_hecc_change_state(ndev, rx_state, tx_state); 697 + } 698 + } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) { 699 + enum can_state new_state, tx_state, rx_state; 700 + u32 rec = hecc_read(priv, HECC_CANREC); 701 + u32 tec = hecc_read(priv, HECC_CANTEC); 702 + 703 + if (rec >= 128 || tec >= 128) 704 + new_state = CAN_STATE_ERROR_PASSIVE; 705 + else if (rec >= 96 || tec >= 96) 706 + new_state = CAN_STATE_ERROR_WARNING; 707 + else 708 + new_state = CAN_STATE_ERROR_ACTIVE; 709 + 710 + if (new_state < priv->can.state) { 711 + rx_state = rec >= tec ? new_state : 0; 712 + tx_state = rec <= tec ? new_state : 0; 713 + ti_hecc_change_state(ndev, rx_state, tx_state); 714 + } 715 + } 682 716 683 717 if (int_status & HECC_CANGIF_GMIF) { 684 718 while (priv->tx_tail - priv->tx_head > 0) { ··· 736 670 mbx_mask = BIT(mbxno); 737 671 if (!(mbx_mask & hecc_read(priv, HECC_CANTA))) 738 672 break; 739 - hecc_clear_bit(priv, HECC_CANMIM, mbx_mask); 740 673 hecc_write(priv, HECC_CANTA, mbx_mask); 741 674 spin_lock_irqsave(&priv->mbx_lock, flags); 742 675 hecc_clear_bit(priv, HECC_CANME, mbx_mask); ··· 760 695 while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { 761 696 can_rx_offload_irq_offload_timestamp(&priv->offload, 762 697 rx_pending); 763 - hecc_write(priv, HECC_CANRMP, rx_pending); 764 698 } 765 699 } 766 700 767 701 /* clear all interrupt conditions - read back to avoid spurious ints */ 768 702 if (priv->use_hecc1int) { 769 - hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); 703 + hecc_write(priv, HECC_CANGIF1, handled); 770 704 int_status = hecc_read(priv, HECC_CANGIF1); 771 705 } else { 772 - hecc_write(priv, HECC_CANGIF0, HECC_SET_REG); 706 + hecc_write(priv, HECC_CANGIF0, handled); 773 707 int_status = hecc_read(priv, HECC_CANGIF0); 774 708 } 775 709 ··· 941 877 942 878 priv->offload.mailbox_read = ti_hecc_mailbox_read; 943 879 priv->offload.mb_first = HECC_RX_FIRST_MBOX; 944 - priv->offload.mb_last = HECC_MAX_TX_MBOX; 880 + priv->offload.mb_last = HECC_RX_LAST_MBOX; 945 881 err = can_rx_offload_add_timestamp(ndev, &priv->offload); 946 882 if (err) { 947 883 dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
+1
drivers/net/can/usb/gs_usb.c
··· 623 623 rc); 624 624 625 625 usb_unanchor_urb(urb); 626 + usb_free_urb(urb); 626 627 break; 627 628 } 628 629
+1 -2
drivers/net/can/usb/mcba_usb.c
··· 876 876 netdev_info(priv->netdev, "device disconnected\n"); 877 877 878 878 unregister_candev(priv->netdev); 879 - free_candev(priv->netdev); 880 - 881 879 mcba_urb_unlink(priv); 880 + free_candev(priv->netdev); 882 881 } 883 882 884 883 static struct usb_driver mcba_usb_driver = {
+22 -10
drivers/net/can/usb/peak_usb/pcan_usb.c
··· 100 100 u8 *end; 101 101 u8 rec_cnt; 102 102 u8 rec_idx; 103 - u8 rec_data_idx; 103 + u8 rec_ts_idx; 104 104 struct net_device *netdev; 105 105 struct pcan_usb *pdev; 106 106 }; ··· 436 436 } 437 437 if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) { 438 438 /* no error (back to active state) */ 439 - mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; 440 - return 0; 439 + new_state = CAN_STATE_ERROR_ACTIVE; 440 + break; 441 441 } 442 442 break; 443 443 ··· 460 460 } 461 461 462 462 if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) { 463 - /* no error (back to active state) */ 464 - mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; 465 - return 0; 463 + /* no error (back to warning state) */ 464 + new_state = CAN_STATE_ERROR_WARNING; 465 + break; 466 466 } 467 467 break; 468 468 ··· 499 499 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING | 500 500 CAN_ERR_CRTL_RX_WARNING; 501 501 mc->pdev->dev.can.can_stats.error_warning++; 502 + break; 503 + 504 + case CAN_STATE_ERROR_ACTIVE: 505 + cf->can_id |= CAN_ERR_CRTL; 506 + cf->data[1] = CAN_ERR_CRTL_ACTIVE; 502 507 break; 503 508 504 509 default: ··· 552 547 mc->ptr += PCAN_USB_CMD_ARGS; 553 548 554 549 if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { 555 - int err = pcan_usb_decode_ts(mc, !mc->rec_idx); 550 + int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx); 556 551 557 552 if (err) 558 553 return err; 554 + 555 + /* Next packet in the buffer will have a timestamp on a single 556 + * byte 557 + */ 558 + mc->rec_ts_idx++; 559 559 } 560 560 561 561 switch (f) { ··· 642 632 643 633 cf->can_dlc = get_can_dlc(rec_len); 644 634 645 - /* first data packet timestamp is a word */ 646 - if (pcan_usb_decode_ts(mc, !mc->rec_data_idx)) 635 + /* Only first packet timestamp is a word */ 636 + if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx)) 647 637 goto decode_failed; 638 + 639 + /* Next packet in the buffer will have a timestamp on a single byte */ 640 + mc->rec_ts_idx++; 648 641 649 642 /* read data */ 650 643 memset(cf->data, 0x0, sizeof(cf->data)); ··· 701 688 /* handle normal can frames here */ 702 689 } else { 703 690 err = pcan_usb_decode_data(&mc, sl); 704 - mc.rec_data_idx++; 705 691 } 706 692 } 707 693
+1 -1
drivers/net/can/usb/peak_usb/pcan_usb_core.c
··· 750 750 dev = netdev_priv(netdev); 751 751 752 752 /* allocate a buffer large enough to send commands */ 753 - dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); 753 + dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); 754 754 if (!dev->cmd_buf) { 755 755 err = -ENOMEM; 756 756 goto lbl_free_candev;
+1 -2
drivers/net/can/usb/usb_8dev.c
··· 996 996 netdev_info(priv->netdev, "device disconnected\n"); 997 997 998 998 unregister_netdev(priv->netdev); 999 - free_candev(priv->netdev); 1000 - 1001 999 unlink_all_urbs(priv); 1000 + free_candev(priv->netdev); 1002 1001 } 1003 1002 1004 1003 }
-1
drivers/net/can/xilinx_can.c
··· 1599 1599 1600 1600 static const struct xcan_devtype_data xcan_axi_data = { 1601 1601 .cantype = XAXI_CAN, 1602 - .flags = XCAN_FLAG_TXFEMP, 1603 1602 .bittiming_const = &xcan_bittiming_const, 1604 1603 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1605 1604 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
+1 -1
include/uapi/linux/can.h
··· 1 - /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1 + /* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */ 2 2 /* 3 3 * linux/can.h 4 4 *
+1 -1
include/uapi/linux/can/bcm.h
··· 1 - /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1 + /* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */ 2 2 /* 3 3 * linux/can/bcm.h 4 4 *
+1 -1
include/uapi/linux/can/error.h
··· 1 - /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1 + /* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */ 2 2 /* 3 3 * linux/can/error.h 4 4 *
+1 -1
include/uapi/linux/can/gw.h
··· 1 - /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1 + /* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */ 2 2 /* 3 3 * linux/can/gw.h 4 4 *
+1 -1
include/uapi/linux/can/j1939.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 + /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ 2 2 /* 3 3 * j1939.h 4 4 *
+1 -1
include/uapi/linux/can/netlink.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 + /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ 2 2 /* 3 3 * linux/can/netlink.h 4 4 *
+1 -1
include/uapi/linux/can/raw.h
··· 1 - /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1 + /* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */ 2 2 /* 3 3 * linux/can/raw.h 4 4 *
+1 -1
include/uapi/linux/can/vxcan.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1 + /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ 2 2 #ifndef _UAPI_CAN_VXCAN_H 3 3 #define _UAPI_CAN_VXCAN_H 4 4
+7 -2
net/can/j1939/socket.c
··· 580 580 j1939_netdev_stop(priv); 581 581 } 582 582 583 + kfree(jsk->filters); 583 584 sock_orphan(sk); 584 585 sock->sk = NULL; 585 586 ··· 910 909 memset(serr, 0, sizeof(*serr)); 911 910 switch (type) { 912 911 case J1939_ERRQUEUE_ACK: 913 - if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) 912 + if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) { 913 + kfree_skb(skb); 914 914 return; 915 + } 915 916 916 917 serr->ee.ee_errno = ENOMSG; 917 918 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; ··· 921 918 state = "ACK"; 922 919 break; 923 920 case J1939_ERRQUEUE_SCHED: 924 - if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) 921 + if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) { 922 + kfree_skb(skb); 925 923 return; 924 + } 926 925 927 926 serr->ee.ee_errno = ENOMSG; 928 927 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+19 -1
net/can/j1939/transport.c
··· 1273 1273 static void 1274 1274 j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb) 1275 1275 { 1276 + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); 1277 + const u8 *dat; 1278 + int len; 1279 + 1276 1280 if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) 1277 1281 return; 1282 + 1283 + dat = skb->data; 1284 + 1285 + if (skcb->addr.type == J1939_ETP) 1286 + len = j1939_etp_ctl_to_size(dat); 1287 + else 1288 + len = j1939_tp_ctl_to_size(dat); 1289 + 1290 + if (session->total_message_size != len) { 1291 + netdev_warn_once(session->priv->ndev, 1292 + "%s: 0x%p: Incorrect size. Expected: %i; got: %i.\n", 1293 + __func__, session, session->total_message_size, 1294 + len); 1295 + } 1278 1296 1279 1297 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); 1280 1298 ··· 1450 1432 skcb = j1939_skb_to_cb(skb); 1451 1433 memcpy(skcb, rel_skcb, sizeof(*skcb)); 1452 1434 1453 - session = j1939_session_new(priv, skb, skb->len); 1435 + session = j1939_session_new(priv, skb, size); 1454 1436 if (!session) { 1455 1437 kfree_skb(skb); 1456 1438 return NULL;