Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'linux-can-fixes-for-6.8-20240214' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can

Marc Kleine-Budde says:

====================
pull-request: can 2024-02-14

this is a pull request of 3 patches for net/master.

the first patch is by Ziqi Zhao and targets the CAN J1939 protocol, it
fixes a potential deadlock by replacing the spinlock by an rwlock.

Oleksij Rempel's patch adds a missing spin_lock_bh() to prevent a
potential Use-After-Free in the CAN J1939's
setsockopt(SO_J1939_FILTER).

Maxime Jayat contributes a patch to fix the transceiver delay
compensation (TDCO) calculation, which is needed for higher CAN-FD bit
rates (usually 2Mbit/s).

* tag 'linux-can-fixes-for-6.8-20240214' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can:
can: netlink: Fix TDCO calculation using the old data bittiming
can: j1939: Fix UAF in j1939_sk_match_filter during setsockopt(SO_J1939_FILTER)
can: j1939: prevent deadlock by changing j1939_socks_lock to rwlock
====================

Link: https://lore.kernel.org/r/20240214140348.2412776-1-mkl@pengutronix.de
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+34 -19
+1 -1
drivers/net/can/dev/netlink.c
··· 346 346 /* Neither of TDC parameters nor TDC flags are 347 347 * provided: do calculation 348 348 */ 349 - can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming, 349 + can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt, 350 350 &priv->ctrlmode, priv->ctrlmode_supported); 351 351 } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly 352 352 * turned off. TDC is disabled: do nothing
+2 -1
net/can/j1939/j1939-priv.h
··· 86 86 unsigned int tp_max_packet_size; 87 87 88 88 /* lock for j1939_socks list */ 89 - spinlock_t j1939_socks_lock; 89 + rwlock_t j1939_socks_lock; 90 90 struct list_head j1939_socks; 91 91 92 92 struct kref rx_kref; ··· 301 301 302 302 int ifindex; 303 303 struct j1939_addr addr; 304 + spinlock_t filters_lock; 304 305 struct j1939_filter *filters; 305 306 int nfilters; 306 307 pgn_t pgn_rx_filter;
+1 -1
net/can/j1939/main.c
··· 274 274 return ERR_PTR(-ENOMEM); 275 275 276 276 j1939_tp_init(priv); 277 - spin_lock_init(&priv->j1939_socks_lock); 277 + rwlock_init(&priv->j1939_socks_lock); 278 278 INIT_LIST_HEAD(&priv->j1939_socks); 279 279 280 280 mutex_lock(&j1939_netdev_lock);
+30 -16
net/can/j1939/socket.c
··· 80 80 jsk->state |= J1939_SOCK_BOUND; 81 81 j1939_priv_get(priv); 82 82 83 - spin_lock_bh(&priv->j1939_socks_lock); 83 + write_lock_bh(&priv->j1939_socks_lock); 84 84 list_add_tail(&jsk->list, &priv->j1939_socks); 85 - spin_unlock_bh(&priv->j1939_socks_lock); 85 + write_unlock_bh(&priv->j1939_socks_lock); 86 86 } 87 87 88 88 static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk) 89 89 { 90 - spin_lock_bh(&priv->j1939_socks_lock); 90 + write_lock_bh(&priv->j1939_socks_lock); 91 91 list_del_init(&jsk->list); 92 - spin_unlock_bh(&priv->j1939_socks_lock); 92 + write_unlock_bh(&priv->j1939_socks_lock); 93 93 94 94 j1939_priv_put(priv); 95 95 jsk->state &= ~J1939_SOCK_BOUND; ··· 262 262 static bool j1939_sk_match_filter(struct j1939_sock *jsk, 263 263 const struct j1939_sk_buff_cb *skcb) 264 264 { 265 - const struct j1939_filter *f = jsk->filters; 266 - int nfilter = jsk->nfilters; 265 + const struct j1939_filter *f; 266 + int nfilter; 267 + 268 + spin_lock_bh(&jsk->filters_lock); 269 + 270 + f = jsk->filters; 271 + nfilter = jsk->nfilters; 267 272 268 273 if (!nfilter) 269 274 /* receive all when no filters are assigned */ 270 - return true; 275 + goto filter_match_found; 271 276 272 277 for (; nfilter; ++f, --nfilter) { 273 278 if ((skcb->addr.pgn & f->pgn_mask) != f->pgn) ··· 281 276 continue; 282 277 if ((skcb->addr.src_name & f->name_mask) != f->name) 283 278 continue; 284 - return true; 279 + goto filter_match_found; 285 280 } 281 + 282 + spin_unlock_bh(&jsk->filters_lock); 286 283 return false; 284 + 285 + filter_match_found: 286 + spin_unlock_bh(&jsk->filters_lock); 287 + return true; 287 288 } 288 289 289 290 static bool j1939_sk_recv_match_one(struct j1939_sock *jsk, ··· 340 329 struct j1939_sock *jsk; 341 330 bool match = false; 342 331 343 - spin_lock_bh(&priv->j1939_socks_lock); 332 + read_lock_bh(&priv->j1939_socks_lock); 344 333 list_for_each_entry(jsk, &priv->j1939_socks, list) { 345 334 match = j1939_sk_recv_match_one(jsk, skcb); 346 335 if (match) 347 336 break; 348 337 } 349 - spin_unlock_bh(&priv->j1939_socks_lock); 338 + read_unlock_bh(&priv->j1939_socks_lock); 350 339 351 340 return match; 352 341 } ··· 355 344 { 356 345 struct j1939_sock *jsk; 357 346 358 - spin_lock_bh(&priv->j1939_socks_lock); 347 + read_lock_bh(&priv->j1939_socks_lock); 359 348 list_for_each_entry(jsk, &priv->j1939_socks, list) { 360 349 j1939_sk_recv_one(jsk, skb); 361 350 } 362 - spin_unlock_bh(&priv->j1939_socks_lock); 351 + read_unlock_bh(&priv->j1939_socks_lock); 363 352 } 364 353 365 354 static void j1939_sk_sock_destruct(struct sock *sk) ··· 412 401 atomic_set(&jsk->skb_pending, 0); 413 402 spin_lock_init(&jsk->sk_session_queue_lock); 414 403 INIT_LIST_HEAD(&jsk->sk_session_queue); 404 + spin_lock_init(&jsk->filters_lock); 415 405 416 406 /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */ 417 407 sock_set_flag(sk, SOCK_RCU_FREE); ··· 715 703 } 716 704 717 705 lock_sock(&jsk->sk); 706 + spin_lock_bh(&jsk->filters_lock); 718 707 ofilters = jsk->filters; 719 708 jsk->filters = filters; 720 709 jsk->nfilters = count; 710 + spin_unlock_bh(&jsk->filters_lock); 721 711 release_sock(&jsk->sk); 722 712 kfree(ofilters); 723 713 return 0; ··· 1094 1080 } 1095 1081 1096 1082 /* spread RX notifications to all sockets subscribed to this session */ 1097 - spin_lock_bh(&priv->j1939_socks_lock); 1083 + read_lock_bh(&priv->j1939_socks_lock); 1098 1084 list_for_each_entry(jsk, &priv->j1939_socks, list) { 1099 1085 if (j1939_sk_recv_match_one(jsk, &session->skcb)) 1100 1086 __j1939_sk_errqueue(session, &jsk->sk, type); 1101 1087 } 1102 - spin_unlock_bh(&priv->j1939_socks_lock); 1088 + read_unlock_bh(&priv->j1939_socks_lock); 1103 1089 }; 1104 1090 1105 1091 void j1939_sk_send_loop_abort(struct sock *sk, int err) ··· 1287 1273 struct j1939_sock *jsk; 1288 1274 int error_code = ENETDOWN; 1289 1275 1290 - spin_lock_bh(&priv->j1939_socks_lock); 1276 + read_lock_bh(&priv->j1939_socks_lock); 1291 1277 list_for_each_entry(jsk, &priv->j1939_socks, list) { 1292 1278 jsk->sk.sk_err = error_code; 1293 1279 if (!sock_flag(&jsk->sk, SOCK_DEAD)) ··· 1295 1281 1296 1282 j1939_sk_queue_drop_all(priv, jsk, error_code); 1297 1283 } 1298 - spin_unlock_bh(&priv->j1939_socks_lock); 1284 + read_unlock_bh(&priv->j1939_socks_lock); 1299 1285 } 1300 1286 1301 1287 static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,