Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'linux-can-fixes-for-6.15-20250506' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can

Marc Kleine-Budde says:

====================
pull-request: can 2025-05-06

The first patch is by Antonios Salios and adds a missing
spin_lock_init() to the m_can driver.

The next 3 patches are by me and fix the unregistration order in the
mcp251xfd, rockchip_canfd and m_can driver.

The last patch is by Oliver Hartkopp and fixes RCU and BH
locking/handling in the CAN gw protocol.

* tag 'linux-can-fixes-for-6.15-20250506' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can:
can: gw: fix RCU/BH usage in cgw_create_job()
can: mcan: m_can_class_unregister(): fix order of unregistration calls
can: rockchip_canfd: rkcanfd_remove(): fix order of unregistration calls
can: mcp251xfd: mcp251xfd_remove(): fix order of unregistration calls
can: mcp251xfd: fix TDC setting for low data bit rates
can: m_can: m_can_class_allocate_dev(): initialize spin lock on device probe
====================

Link: https://patch.msgid.link/20250506135939.652543-1-mkl@pengutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+127 -71
+2 -1
drivers/net/can/m_can/m_can.c
··· 2379 2379 SET_NETDEV_DEV(net_dev, dev); 2380 2380 2381 2381 m_can_of_parse_mram(class_dev, mram_config_vals); 2382 + spin_lock_init(&class_dev->tx_handling_spinlock); 2382 2383 out: 2383 2384 return class_dev; 2384 2385 } ··· 2463 2462 2464 2463 void m_can_class_unregister(struct m_can_classdev *cdev) 2465 2464 { 2465 + unregister_candev(cdev->net); 2466 2466 if (cdev->is_peripheral) 2467 2467 can_rx_offload_del(&cdev->offload); 2468 - unregister_candev(cdev->net); 2469 2468 } 2470 2469 EXPORT_SYMBOL_GPL(m_can_class_unregister); 2471 2470
+1 -1
drivers/net/can/rockchip/rockchip_canfd-core.c
··· 937 937 struct rkcanfd_priv *priv = platform_get_drvdata(pdev); 938 938 struct net_device *ndev = priv->ndev; 939 939 940 - can_rx_offload_del(&priv->offload); 941 940 rkcanfd_unregister(priv); 941 + can_rx_offload_del(&priv->offload); 942 942 free_candev(ndev); 943 943 } 944 944
+33 -9
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
··· 75 75 .brp_inc = 1, 76 76 }; 77 77 78 + /* The datasheet of the mcp2518fd (DS20006027B) specifies a range of 79 + * [-64,63] for TDCO, indicating a relative TDCO. 80 + * 81 + * Manual tests have shown, that using a relative TDCO configuration 82 + * results in bus off, while an absolute configuration works. 83 + * 84 + * For TDCO use the max value (63) from the data sheet, but 0 as the 85 + * minimum. 86 + */ 87 + static const struct can_tdc_const mcp251xfd_tdc_const = { 88 + .tdcv_min = 0, 89 + .tdcv_max = 63, 90 + .tdco_min = 0, 91 + .tdco_max = 63, 92 + .tdcf_min = 0, 93 + .tdcf_max = 0, 94 + }; 95 + 78 96 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) 79 97 { 80 98 switch (model) { ··· 528 510 { 529 511 const struct can_bittiming *bt = &priv->can.bittiming; 530 512 const struct can_bittiming *dbt = &priv->can.data_bittiming; 531 - u32 val = 0; 532 - s8 tdco; 513 + u32 tdcmod, val = 0; 533 514 int err; 534 515 535 516 /* CAN Control Register ··· 592 575 return err; 593 576 594 577 /* Transmitter Delay Compensation */ 595 - tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1), 596 - -64, 63); 597 - val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, 598 - MCP251XFD_REG_TDC_TDCMOD_AUTO) | 599 - FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco); 578 + if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) 579 + tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO; 580 + else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) 581 + tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL; 582 + else 583 + tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED; 584 + 585 + val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) | 586 + FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) | 587 + FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco); 600 588 601 589 return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); 602 590 } ··· 2105 2083 priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; 2106 2084 priv->can.bittiming_const = &mcp251xfd_bittiming_const; 2107 2085 priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; 2086 + priv->can.tdc_const = &mcp251xfd_tdc_const; 2108 2087 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 2109 2088 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | 2110 2089 CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | 2111 - CAN_CTRLMODE_CC_LEN8_DLC; 2090 + CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO | 2091 + CAN_CTRLMODE_TDC_MANUAL; 2112 2092 set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); 2113 2093 priv->ndev = ndev; 2114 2094 priv->spi = spi; ··· 2198 2174 struct mcp251xfd_priv *priv = spi_get_drvdata(spi); 2199 2175 struct net_device *ndev = priv->ndev; 2200 2176 2201 - can_rx_offload_del(&priv->offload); 2202 2177 mcp251xfd_unregister(priv); 2178 + can_rx_offload_del(&priv->offload); 2203 2179 spi->max_speed_hz = priv->spi_max_speed_hz_orig; 2204 2180 free_candev(ndev); 2205 2181 }
+91 -60
net/can/gw.c
··· 130 130 u32 handled_frames; 131 131 u32 dropped_frames; 132 132 u32 deleted_frames; 133 - struct cf_mod mod; 133 + struct cf_mod __rcu *cf_mod; 134 134 union { 135 135 /* CAN frame data source */ 136 136 struct net_device *dev; ··· 459 459 struct cgw_job *gwj = (struct cgw_job *)data; 460 460 struct canfd_frame *cf; 461 461 struct sk_buff *nskb; 462 + struct cf_mod *mod; 462 463 int modidx = 0; 463 464 464 465 /* process strictly Classic CAN or CAN FD frames */ ··· 507 506 * When there is at least one modification function activated, 508 507 * we need to copy the skb as we want to modify skb->data. 509 508 */ 510 - if (gwj->mod.modfunc[0]) 509 + mod = rcu_dereference(gwj->cf_mod); 510 + if (mod->modfunc[0]) 511 511 nskb = skb_copy(skb, GFP_ATOMIC); 512 512 else 513 513 nskb = skb_clone(skb, GFP_ATOMIC); ··· 531 529 cf = (struct canfd_frame *)nskb->data; 532 530 533 531 /* perform preprocessed modification functions if there are any */ 534 - while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) 535 - (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); 532 + while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx]) 533 + (*mod->modfunc[modidx++])(cf, mod); 536 534 537 535 /* Has the CAN frame been modified? */ 538 536 if (modidx) { ··· 548 546 } 549 547 550 548 /* check for checksum updates */ 551 - if (gwj->mod.csumfunc.crc8) 552 - (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); 549 + if (mod->csumfunc.crc8) 550 + (*mod->csumfunc.crc8)(cf, &mod->csum.crc8); 553 551 554 - if (gwj->mod.csumfunc.xor) 555 - (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); 552 + if (mod->csumfunc.xor) 553 + (*mod->csumfunc.xor)(cf, &mod->csum.xor); 556 554 } 557 555 558 556 /* clear the skb timestamp if not configured the other way */ ··· 583 581 { 584 582 struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu); 585 583 584 + /* cgw_job::cf_mod is always accessed from the same cgw_job object within 585 + * the same RCU read section. Once cgw_job is scheduled for removal, 586 + * cf_mod can also be removed without mandating an additional grace period. 587 + */ 588 + kfree(rcu_access_pointer(gwj->cf_mod)); 586 589 kmem_cache_free(cgw_cache, gwj); 590 + } 591 + 592 + /* Return cgw_job::cf_mod with RTNL protected section */ 593 + static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj) 594 + { 595 + return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked()); 587 596 } 588 597 589 598 static int cgw_notifier(struct notifier_block *nb, ··· 629 616 { 630 617 struct rtcanmsg *rtcan; 631 618 struct nlmsghdr *nlh; 619 + struct cf_mod *mod; 632 620 633 621 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags); 634 622 if (!nlh) ··· 664 650 goto cancel; 665 651 } 666 652 653 + mod = cgw_job_cf_mod(gwj); 667 654 if (gwj->flags & CGW_FLAGS_CAN_FD) { 668 655 struct cgw_fdframe_mod mb; 669 656 670 - if (gwj->mod.modtype.and) { 671 - memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); 672 - mb.modtype = gwj->mod.modtype.and; 657 + if (mod->modtype.and) { 658 + memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); 659 + mb.modtype = mod->modtype.and; 673 660 if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0) 674 661 goto cancel; 675 662 } 676 663 677 - if (gwj->mod.modtype.or) { 678 - memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); 679 - mb.modtype = gwj->mod.modtype.or; 664 + if (mod->modtype.or) { 665 + memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); 666 + mb.modtype = mod->modtype.or; 680 667 if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0) 681 668 goto cancel; 682 669 } 683 670 684 - if (gwj->mod.modtype.xor) { 685 - memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); 686 - mb.modtype = gwj->mod.modtype.xor; 671 + if (mod->modtype.xor) { 672 + memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); 673 + mb.modtype = mod->modtype.xor; 687 674 if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0) 688 675 goto cancel; 689 676 } 690 677 691 - if (gwj->mod.modtype.set) { 692 - memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); 693 - mb.modtype = gwj->mod.modtype.set; 678 + if (mod->modtype.set) { 679 + memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); 680 + mb.modtype = mod->modtype.set; 694 681 if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0) 695 682 goto cancel; 696 683 } 697 684 } else { 698 685 struct cgw_frame_mod mb; 699 686 700 - if (gwj->mod.modtype.and) { 701 - memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); 702 - mb.modtype = gwj->mod.modtype.and; 687 + if (mod->modtype.and) { 688 + memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); 689 + mb.modtype = mod->modtype.and; 703 690 if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) 704 691 goto cancel; 705 692 } 706 693 707 - if (gwj->mod.modtype.or) { 708 - memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); 709 - mb.modtype = gwj->mod.modtype.or; 694 + if (mod->modtype.or) { 695 + memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); 696 + mb.modtype = mod->modtype.or; 710 697 if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) 711 698 goto cancel; 712 699 } 713 700 714 - if (gwj->mod.modtype.xor) { 715 - memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); 716 - mb.modtype = gwj->mod.modtype.xor; 701 + if (mod->modtype.xor) { 702 + memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); 703 + mb.modtype = mod->modtype.xor; 717 704 if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) 718 705 goto cancel; 719 706 } 720 707 721 - if (gwj->mod.modtype.set) { 722 - memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); 723 - mb.modtype = gwj->mod.modtype.set; 708 + if (mod->modtype.set) { 709 + memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); 710 + mb.modtype = mod->modtype.set; 724 711 if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) 725 712 goto cancel; 726 713 } 727 714 } 728 715 729 - if (gwj->mod.uid) { 730 - if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0) 716 + if (mod->uid) { 717 + if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0) 731 718 goto cancel; 732 719 } 733 720 734 - if (gwj->mod.csumfunc.crc8) { 721 + if (mod->csumfunc.crc8) { 735 722 if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, 736 - &gwj->mod.csum.crc8) < 0) 723 + &mod->csum.crc8) < 0) 737 724 goto cancel; 738 725 } 739 726 740 - if (gwj->mod.csumfunc.xor) { 727 + if (mod->csumfunc.xor) { 741 728 if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN, 742 - &gwj->mod.csum.xor) < 0) 729 + &mod->csum.xor) < 0) 743 730 goto cancel; 744 731 } 745 732 ··· 1074 1059 struct net *net = sock_net(skb->sk); 1075 1060 struct rtcanmsg *r; 1076 1061 struct cgw_job *gwj; 1077 - struct cf_mod mod; 1062 + struct cf_mod *mod; 1078 1063 struct can_can_gw ccgw; 1079 1064 u8 limhops = 0; 1080 1065 int err = 0; ··· 1093 1078 if (r->gwtype != CGW_TYPE_CAN_CAN) 1094 1079 return -EINVAL; 1095 1080 1096 - err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); 1097 - if (err < 0) 1098 - return err; 1081 + mod = kmalloc(sizeof(*mod), GFP_KERNEL); 1082 + if (!mod) 1083 + return -ENOMEM; 1099 1084 1100 - if (mod.uid) { 1085 + err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); 1086 + if (err < 0) 1087 + goto out_free_cf; 1088 + 1089 + if (mod->uid) { 1101 1090 ASSERT_RTNL(); 1102 1091 1103 1092 /* check for updating an existing job with identical uid */ 1104 1093 hlist_for_each_entry(gwj, &net->can.cgw_list, list) { 1105 - if (gwj->mod.uid != mod.uid) 1094 + struct cf_mod *old_cf; 1095 + 1096 + old_cf = cgw_job_cf_mod(gwj); 1097 + if (old_cf->uid != mod->uid) 1106 1098 continue; 1107 1099 1108 1100 /* interfaces & filters must be identical */ 1109 - if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) 1110 - return -EINVAL; 1101 + if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) { 1102 + err = -EINVAL; 1103 + goto out_free_cf; 1104 + } 1111 1105 1112 - /* update modifications with disabled softirq & quit */ 1113 - local_bh_disable(); 1114 - memcpy(&gwj->mod, &mod, sizeof(mod)); 1115 - local_bh_enable(); 1106 + rcu_assign_pointer(gwj->cf_mod, mod); 1107 + kfree_rcu_mightsleep(old_cf); 1116 1108 return 0; 1117 1109 } 1118 1110 } 1119 1111 1120 1112 /* ifindex == 0 is not allowed for job creation */ 1121 - if (!ccgw.src_idx || !ccgw.dst_idx) 1122 - return -ENODEV; 1113 + if (!ccgw.src_idx || !ccgw.dst_idx) { 1114 + err = -ENODEV; 1115 + goto out_free_cf; 1116 + } 1123 1117 1124 1118 gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL); 1125 - if (!gwj) 1126 - return -ENOMEM; 1119 + if (!gwj) { 1120 + err = -ENOMEM; 1121 + goto out_free_cf; 1122 + } 1127 1123 1128 1124 gwj->handled_frames = 0; 1129 1125 gwj->dropped_frames = 0; ··· 1144 1118 gwj->limit_hops = limhops; 1145 1119 1146 1120 /* insert already parsed information */ 1147 - memcpy(&gwj->mod, &mod, sizeof(mod)); 1121 + RCU_INIT_POINTER(gwj->cf_mod, mod); 1148 1122 memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw)); 1149 1123 1150 1124 err = -ENODEV; ··· 1178 1152 if (!err) 1179 1153 hlist_add_head_rcu(&gwj->list, &net->can.cgw_list); 1180 1154 out: 1181 - if (err) 1155 + if (err) { 1182 1156 kmem_cache_free(cgw_cache, gwj); 1183 - 1157 + out_free_cf: 1158 + kfree(mod); 1159 + } 1184 1160 return err; 1185 1161 } 1186 1162 ··· 1242 1214 1243 1215 /* remove only the first matching entry */ 1244 1216 hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { 1217 + struct cf_mod *cf_mod; 1218 + 1245 1219 if (gwj->flags != r->flags) 1246 1220 continue; 1247 1221 1248 1222 if (gwj->limit_hops != limhops) 1249 1223 continue; 1250 1224 1225 + cf_mod = cgw_job_cf_mod(gwj); 1251 1226 /* we have a match when uid is enabled and identical */ 1252 - if (gwj->mod.uid || mod.uid) { 1253 - if (gwj->mod.uid != mod.uid) 1227 + if (cf_mod->uid || mod.uid) { 1228 + if (cf_mod->uid != mod.uid) 1254 1229 continue; 1255 1230 } else { 1256 1231 /* no uid => check for identical modifications */ 1257 - if (memcmp(&gwj->mod, &mod, sizeof(mod))) 1232 + if (memcmp(cf_mod, &mod, sizeof(mod))) 1258 1233 continue; 1259 1234 } 1260 1235